summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/asm-generic/atomic-long.h6
-rw-r--r--include/asm-generic/bitops/lock.h14
-rw-r--r--include/asm-generic/bug.h27
-rw-r--r--include/asm-generic/checksum.h8
-rw-r--r--include/asm-generic/mm_hooks.h12
-rw-r--r--include/asm-generic/pgtable.h17
-rw-r--r--include/drm/drmP.h30
-rw-r--r--include/drm/drm_atomic_helper.h3
-rw-r--r--include/drm/drm_crtc.h74
-rw-r--r--include/drm/drm_crtc_helper.h3
-rw-r--r--include/drm/drm_dp_aux_dev.h62
-rw-r--r--include/drm/drm_fb_helper.h6
-rw-r--r--include/drm/drm_mipi_dsi.h26
-rw-r--r--include/drm/drm_modeset_helper_vtables.h2
-rw-r--r--include/drm/drm_of.h33
-rw-r--r--include/drm/exynos_drm.h101
-rw-r--r--include/drm/i915_pciids.h6
-rw-r--r--include/dt-bindings/clock/bcm-cygnus.h6
-rw-r--r--include/dt-bindings/clock/exynos5433.h9
-rw-r--r--include/dt-bindings/clock/imx6qdl-clock.h16
-rw-r--r--include/dt-bindings/clock/imx6ul-clock.h149
-rw-r--r--include/dt-bindings/clock/lpc32xx-clock.h1
-rw-r--r--include/dt-bindings/clock/qcom,gcc-ipq4019.h158
-rw-r--r--include/dt-bindings/clock/qcom,gcc-msm8916.h1
-rw-r--r--include/dt-bindings/clock/qcom,gcc-msm8996.h11
-rw-r--r--include/dt-bindings/clock/qcom,mmcc-msm8996.h17
-rw-r--r--include/dt-bindings/clock/r8a7793-clock.h2
-rw-r--r--include/dt-bindings/clock/r8a7794-clock.h1
-rw-r--r--include/dt-bindings/clock/rk3036-cru.h2
-rw-r--r--include/dt-bindings/clock/rk3188-cru-common.h1
-rw-r--r--include/dt-bindings/clock/rk3228-cru.h12
-rw-r--r--include/dt-bindings/memory/mt8173-larb-port.h111
-rw-r--r--include/dt-bindings/pinctrl/stm32f429-pinfunc.h1239
-rw-r--r--include/dt-bindings/power/rk3368-power.h28
-rw-r--r--include/dt-bindings/reset/pistachio-resets.h36
-rw-r--r--include/linux/apple-gmux.h50
-rw-r--r--include/linux/atomic.h10
-rw-r--r--include/linux/bcma/bcma.h3
-rw-r--r--include/linux/bcma/bcma_driver_chipcommon.h42
-rw-r--r--include/linux/bitmap.h10
-rw-r--r--include/linux/blkdev.h5
-rw-r--r--include/linux/bpf.h32
-rw-r--r--include/linux/buffer_head.h10
-rw-r--r--include/linux/bug.h9
-rw-r--r--include/linux/capability.h2
-rw-r--r--include/linux/cgroup-defs.h46
-rw-r--r--include/linux/cgroup.h49
-rw-r--r--include/linux/clk-provider.h30
-rw-r--r--include/linux/clk/at91_pmc.h12
-rw-r--r--include/linux/clk/renesas.h (renamed from include/linux/clk/shmobile.h)4
-rw-r--r--include/linux/clk/ti.h8
-rw-r--r--include/linux/clkdev.h3
-rw-r--r--include/linux/compaction.h16
-rw-r--r--include/linux/compat.h21
-rw-r--r--include/linux/compiler-clang.h5
-rw-r--r--include/linux/cpumask.h2
-rw-r--r--include/linux/cred.h5
-rw-r--r--include/linux/dcache.h35
-rw-r--r--include/linux/device.h19
-rw-r--r--include/linux/dma-attrs.h1
-rw-r--r--include/linux/dma-buf.h12
-rw-r--r--include/linux/dma-mapping.h2
-rw-r--r--include/linux/dqblk_qtree.h2
-rw-r--r--include/linux/efi.h85
-rw-r--r--include/linux/ethtool.h102
-rw-r--r--include/linux/f2fs_fs.h34
-rw-r--r--include/linux/frame.h23
-rw-r--r--include/linux/freezer.h2
-rw-r--r--include/linux/fs.h22
-rw-r--r--include/linux/fscrypto.h434
-rw-r--r--include/linux/fsl/guts.h105
-rw-r--r--include/linux/fsnotify.h9
-rw-r--r--include/linux/fsnotify_backend.h9
-rw-r--r--include/linux/gfp.h37
-rw-r--r--include/linux/hrtimer.h12
-rw-r--r--include/linux/huge_mm.h25
-rw-r--r--include/linux/ieee80211.h26
-rw-r--r--include/linux/if_bridge.h4
-rw-r--r--include/linux/if_team.h1
-rw-r--r--include/linux/igmp.h5
-rw-r--r--include/linux/inet_lro.h142
-rw-r--r--include/linux/io.h1
-rw-r--r--include/linux/ipv6.h3
-rw-r--r--include/linux/kcov.h29
-rw-r--r--include/linux/kernel.h31
-rw-r--r--include/linux/kernfs.h11
-rw-r--r--include/linux/kfifo.h2
-rw-r--r--include/linux/list_bl.h4
-rw-r--r--include/linux/mISDNif.h2
-rw-r--r--include/linux/mbus.h3
-rw-r--r--include/linux/memcontrol.h44
-rw-r--r--include/linux/mfd/tmio.h4
-rw-r--r--include/linux/mlx4/driver.h3
-rw-r--r--include/linux/mlx5/device.h23
-rw-r--r--include/linux/mlx5/driver.h48
-rw-r--r--include/linux/mlx5/mlx5_ifc.h146
-rw-r--r--include/linux/mlx5/port.h87
-rw-r--r--include/linux/mlx5/vport.h7
-rw-r--r--include/linux/mm.h167
-rw-r--r--include/linux/mman.h6
-rw-r--r--include/linux/mmc/core.h1
-rw-r--r--include/linux/mmc/dw_mmc.h12
-rw-r--r--include/linux/mmc/tmio.h5
-rw-r--r--include/linux/mmzone.h8
-rw-r--r--include/linux/mtd/bbm.h1
-rw-r--r--include/linux/mtd/inftl.h1
-rw-r--r--include/linux/mtd/map.h7
-rw-r--r--include/linux/mtd/mtd.h6
-rw-r--r--include/linux/mtd/nand.h10
-rw-r--r--include/linux/mtd/nand_bch.h8
-rw-r--r--include/linux/mtd/nftl.h1
-rw-r--r--include/linux/mtd/spi-nor.h2
-rw-r--r--include/linux/namei.h1
-rw-r--r--include/linux/net.h1
-rw-r--r--include/linux/netdev_features.h3
-rw-r--r--include/linux/netdevice.h327
-rw-r--r--include/linux/netfilter.h29
-rw-r--r--include/linux/netfilter/nfnetlink.h2
-rw-r--r--include/linux/netfilter/x_tables.h6
-rw-r--r--include/linux/netfilter_arp/arp_tables.h9
-rw-r--r--include/linux/netfilter_ipv4/ip_tables.h9
-rw-r--r--include/linux/netfilter_ipv6/ip6_tables.h9
-rw-r--r--include/linux/netlink.h10
-rw-r--r--include/linux/nfs4.h19
-rw-r--r--include/linux/nmi.h1
-rw-r--r--include/linux/nsproxy.h2
-rw-r--r--include/linux/of.h18
-rw-r--r--include/linux/omap-gpmc.h5
-rw-r--r--include/linux/page-flags-layout.h2
-rw-r--r--include/linux/page-flags.h32
-rw-r--r--include/linux/page_ref.h173
-rw-r--r--include/linux/pagemap.h19
-rw-r--r--include/linux/pci.h9
-rw-r--r--include/linux/perf/arm_pmu.h2
-rw-r--r--include/linux/perf_event.h18
-rw-r--r--include/linux/phy.h3
-rw-r--r--include/linux/phy_fixed.h5
-rw-r--r--include/linux/pkeys.h33
-rw-r--r--include/linux/platform_data/brcmfmac-sdio.h135
-rw-r--r--include/linux/platform_data/brcmfmac.h185
-rw-r--r--include/linux/platform_data/microread.h35
-rw-r--r--include/linux/platform_data/mmp_dma.h1
-rw-r--r--include/linux/platform_data/mtd-nand-s3c2410.h1
-rw-r--r--include/linux/poll.h2
-rw-r--r--include/linux/proc_ns.h4
-rw-r--r--include/linux/psci.h3
-rw-r--r--include/linux/qed/common_hsi.h36
-rw-r--r--include/linux/qed/eth_common.h171
-rw-r--r--include/linux/qed/qed_chain.h4
-rw-r--r--include/linux/qed/qed_eth_if.h14
-rw-r--r--include/linux/qed/qed_if.h14
-rw-r--r--include/linux/quicklist.h2
-rw-r--r--include/linux/quota.h5
-rw-r--r--include/linux/quotaops.h3
-rw-r--r--include/linux/radix-tree.h28
-rw-r--r--include/linux/rculist.h21
-rw-r--r--include/linux/reset-controller.h2
-rw-r--r--include/linux/rfkill-gpio.h37
-rw-r--r--include/linux/rfkill.h18
-rw-r--r--include/linux/rio.h98
-rw-r--r--include/linux/rio_drv.h15
-rw-r--r--include/linux/rio_mport_cdev.h271
-rw-r--r--include/linux/rio_regs.h3
-rw-r--r--include/linux/rmap.h6
-rw-r--r--include/linux/sched.h23
-rw-r--r--include/linux/sched/sysctl.h21
-rw-r--r--include/linux/scpi_protocol.h3
-rw-r--r--include/linux/skbuff.h69
-rw-r--r--include/linux/soc/samsung/exynos-pmu.h24
-rw-r--r--include/linux/soc/samsung/exynos-regs-pmu.h693
-rw-r--r--include/linux/soc/ti/ti-msgmgr.h35
-rw-r--r--include/linux/socket.h7
-rw-r--r--include/linux/stmmac.h17
-rw-r--r--include/linux/string.h8
-rw-r--r--include/linux/sunrpc/auth.h7
-rw-r--r--include/linux/sunrpc/clnt.h18
-rw-r--r--include/linux/sunrpc/rpc_rdma.h12
-rw-r--r--include/linux/sunrpc/sched.h32
-rw-r--r--include/linux/sunrpc/svc_rdma.h20
-rw-r--r--include/linux/sunrpc/xprt.h22
-rw-r--r--include/linux/sunrpc/xprtmultipath.h69
-rw-r--r--include/linux/sunrpc/xprtrdma.h2
-rw-r--r--include/linux/syscalls.h6
-rw-r--r--include/linux/tcp.h14
-rw-r--r--include/linux/thermal.h20
-rw-r--r--include/linux/tick.h2
-rw-r--r--include/linux/trace_events.h7
-rw-r--r--include/linux/unaligned/access_ok.h24
-rw-r--r--include/linux/vga_switcheroo.h36
-rw-r--r--include/linux/virtio.h23
-rw-r--r--include/linux/virtio_ring.h35
-rw-r--r--include/linux/vm_event_item.h2
-rw-r--r--include/linux/watchdog.h43
-rw-r--r--include/media/videobuf2-dma-contig.h11
-rw-r--r--include/misc/cxl.h8
-rw-r--r--include/net/6lowpan.h32
-rw-r--r--include/net/act_api.h84
-rw-r--r--include/net/addrconf.h2
-rw-r--r--include/net/bluetooth/hci.h1
-rw-r--r--include/net/bluetooth/hci_core.h3
-rw-r--r--include/net/bond_3ad.h1
-rw-r--r--include/net/bonding.h1
-rw-r--r--include/net/cfg80211.h8
-rw-r--r--include/net/checksum.h17
-rw-r--r--include/net/codel.h4
-rw-r--r--include/net/devlink.h140
-rw-r--r--include/net/dsa.h15
-rw-r--r--include/net/dst.h12
-rw-r--r--include/net/dst_cache.h97
-rw-r--r--include/net/dst_metadata.h6
-rw-r--r--include/net/flow.h2
-rw-r--r--include/net/flow_dissector.h13
-rw-r--r--include/net/genetlink.h4
-rw-r--r--include/net/hwbm.h28
-rw-r--r--include/net/inet6_connection_sock.h1
-rw-r--r--include/net/inet6_hashtables.h13
-rw-r--r--include/net/inet_frag.h1
-rw-r--r--include/net/inet_hashtables.h25
-rw-r--r--include/net/ip.h8
-rw-r--r--include/net/ip6_checksum.h3
-rw-r--r--include/net/ip6_tunnel.h14
-rw-r--r--include/net/ip_tunnels.h71
-rw-r--r--include/net/ip_vs.h17
-rw-r--r--include/net/ipv6.h14
-rw-r--r--include/net/kcm.h226
-rw-r--r--include/net/l3mdev.h4
-rw-r--r--include/net/lwtunnel.h4
-rw-r--r--include/net/mac80211.h198
-rw-r--r--include/net/mac802154.h15
-rw-r--r--include/net/netfilter/nft_masq.h4
-rw-r--r--include/net/netns/ipv4.h19
-rw-r--r--include/net/netns/ipv6.h3
-rw-r--r--include/net/phonet/phonet.h2
-rw-r--r--include/net/ping.h3
-rw-r--r--include/net/pkt_cls.h65
-rw-r--r--include/net/raw.h2
-rw-r--r--include/net/route.h5
-rw-r--r--include/net/sch_generic.h26
-rw-r--r--include/net/sctp/sctp.h10
-rw-r--r--include/net/sctp/sm.h2
-rw-r--r--include/net/sctp/structs.h13
-rw-r--r--include/net/sock.h6
-rw-r--r--include/net/tc_act/tc_gact.h16
-rw-r--r--include/net/tc_act/tc_ife.h61
-rw-r--r--include/net/tc_act/tc_skbedit.h16
-rw-r--r--include/net/tcp.h53
-rw-r--r--include/net/udp.h3
-rw-r--r--include/net/udp_tunnel.h6
-rw-r--r--include/net/vxlan.h187
-rw-r--r--include/rdma/ib_addr.h14
-rw-r--r--include/rdma/ib_verbs.h29
-rw-r--r--include/rdma/opa_port_info.h2
-rw-r--r--include/rdma/rdma_vt.h481
-rw-r--r--include/rdma/rdmavt_cq.h99
-rw-r--r--include/rdma/rdmavt_mr.h139
-rw-r--r--include/rdma/rdmavt_qp.h446
-rw-r--r--include/rxrpc/packet.h15
-rw-r--r--include/soc/fsl/qe/qe.h2
-rw-r--r--include/soc/mediatek/smi.h58
-rw-r--r--include/target/target_core_base.h11
-rw-r--r--include/target/target_core_fabric.h6
-rw-r--r--include/trace/events/compaction.h55
-rw-r--r--include/trace/events/f2fs.h12
-rw-r--r--include/trace/events/fib6.h2
-rw-r--r--include/trace/events/kmem.h42
-rw-r--r--include/trace/events/mmflags.h23
-rw-r--r--include/trace/events/page_ref.h134
-rw-r--r--include/trace/events/sunvnet.h139
-rw-r--r--include/trace/events/thermal.h16
-rw-r--r--include/trace/events/tlb.h4
-rw-r--r--include/trace/events/writeback.h121
-rw-r--r--include/uapi/asm-generic/siginfo.h17
-rw-r--r--include/uapi/asm-generic/socket.h2
-rw-r--r--include/uapi/drm/drm.h1
-rw-r--r--include/uapi/drm/drm_mode.h15
-rw-r--r--include/uapi/drm/exynos_drm.h43
-rw-r--r--include/uapi/drm/i915_drm.h43
-rw-r--r--include/uapi/drm/msm_drm.h2
-rw-r--r--include/uapi/linux/Kbuild2
-rw-r--r--include/uapi/linux/audit.h1
-rw-r--r--include/uapi/linux/bpf.h52
-rw-r--r--include/uapi/linux/byteorder/big_endian.h24
-rw-r--r--include/uapi/linux/byteorder/little_endian.h24
-rw-r--r--include/uapi/linux/devlink.h72
-rw-r--r--include/uapi/linux/dma-buf.h40
-rw-r--r--include/uapi/linux/dqblk_xfs.h1
-rw-r--r--include/uapi/linux/elf-em.h3
-rw-r--r--include/uapi/linux/ethtool.h437
-rw-r--r--include/uapi/linux/fs.h21
-rw-r--r--include/uapi/linux/genetlink.h1
-rw-r--r--include/uapi/linux/if.h2
-rw-r--r--include/uapi/linux/if_bridge.h37
-rw-r--r--include/uapi/linux/if_ether.h1
-rw-r--r--include/uapi/linux/if_link.h52
-rw-r--r--include/uapi/linux/if_macsec.h161
-rw-r--r--include/uapi/linux/ip.h2
-rw-r--r--include/uapi/linux/ipv6.h3
-rw-r--r--include/uapi/linux/kcm.h40
-rw-r--r--include/uapi/linux/kcov.h10
-rw-r--r--include/uapi/linux/kernel.h1
-rw-r--r--include/uapi/linux/mroute6.h9
-rw-r--r--include/uapi/linux/netconf.h1
-rw-r--r--include/uapi/linux/netfilter/nf_conntrack_common.h12
-rw-r--r--include/uapi/linux/netfilter/nf_tables.h6
-rw-r--r--include/uapi/linux/netlink.h4
-rw-r--r--include/uapi/linux/netlink_diag.h2
-rw-r--r--include/uapi/linux/nl80211.h14
-rw-r--r--include/uapi/linux/openvswitch.h49
-rw-r--r--include/uapi/linux/pkt_cls.h3
-rw-r--r--include/uapi/linux/quota.h14
-rw-r--r--include/uapi/linux/rfkill.h2
-rw-r--r--include/uapi/linux/sched.h3
-rw-r--r--include/uapi/linux/swab.h10
-rw-r--r--include/uapi/linux/target_core_user.h1
-rw-r--r--include/uapi/linux/tc_act/tc_ife.h38
-rw-r--r--include/uapi/linux/tcp.h5
-rw-r--r--include/uapi/linux/vhost.h6
-rw-r--r--include/uapi/linux/virtio_balloon.h3
-rw-r--r--include/uapi/linux/virtio_blk.h6
-rw-r--r--include/uapi/misc/cxl.h24
-rw-r--r--include/uapi/rdma/hfi/hfi1_user.h14
-rw-r--r--include/uapi/rdma/rdma_netlink.h1
-rw-r--r--include/video/omap-panel-data.h15
-rw-r--r--include/video/omapdss.h89
-rw-r--r--include/xen/interface/io/netif.h861
325 files changed, 11285 insertions, 2062 deletions
diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
index eb1973bad80b..5e1f345b58dd 100644
--- a/include/asm-generic/atomic-long.h
+++ b/include/asm-generic/atomic-long.h
@@ -98,14 +98,14 @@ ATOMIC_LONG_ADD_SUB_OP(sub, _release)
#define atomic_long_xchg(v, new) \
(ATOMIC_LONG_PFX(_xchg)((ATOMIC_LONG_PFX(_t) *)(v), (new)))
-static inline void atomic_long_inc(atomic_long_t *l)
+static __always_inline void atomic_long_inc(atomic_long_t *l)
{
ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
ATOMIC_LONG_PFX(_inc)(v);
}
-static inline void atomic_long_dec(atomic_long_t *l)
+static __always_inline void atomic_long_dec(atomic_long_t *l)
{
ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
@@ -113,7 +113,7 @@ static inline void atomic_long_dec(atomic_long_t *l)
}
#define ATOMIC_LONG_OP(op) \
-static inline void \
+static __always_inline void \
atomic_long_##op(long i, atomic_long_t *l) \
{ \
ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; \
diff --git a/include/asm-generic/bitops/lock.h b/include/asm-generic/bitops/lock.h
index c30266e94806..8ef0ccbf8167 100644
--- a/include/asm-generic/bitops/lock.h
+++ b/include/asm-generic/bitops/lock.h
@@ -29,16 +29,16 @@ do { \
* @nr: the bit to set
* @addr: the address to start counting from
*
- * This operation is like clear_bit_unlock, however it is not atomic.
- * It does provide release barrier semantics so it can be used to unlock
- * a bit lock, however it would only be used if no other CPU can modify
- * any bits in the memory until the lock is released (a good example is
- * if the bit lock itself protects access to the other bits in the word).
+ * A weaker form of clear_bit_unlock() as used by __bit_lock_unlock(). If all
+ * the bits in the word are protected by this lock some archs can use weaker
+ * ops to safely unlock.
+ *
+ * See for example x86's implementation.
*/
#define __clear_bit_unlock(nr, addr) \
do { \
- smp_mb(); \
- __clear_bit(nr, addr); \
+ smp_mb__before_atomic(); \
+ clear_bit(nr, addr); \
} while (0)
#endif /* _ASM_GENERIC_BITOPS_LOCK_H_ */
diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
index 630dd2372238..f90588abbfd4 100644
--- a/include/asm-generic/bug.h
+++ b/include/asm-generic/bug.h
@@ -81,6 +81,12 @@ extern void warn_slowpath_null(const char *file, const int line);
do { printk(arg); __WARN_TAINT(taint); } while (0)
#endif
+/* used internally by panic.c */
+struct warn_args;
+
+void __warn(const char *file, int line, void *caller, unsigned taint,
+ struct pt_regs *regs, struct warn_args *args);
+
#ifndef WARN_ON
#define WARN_ON(condition) ({ \
int __ret_warn_on = !!(condition); \
@@ -110,9 +116,10 @@ extern void warn_slowpath_null(const char *file, const int line);
static bool __section(.data.unlikely) __warned; \
int __ret_warn_once = !!(condition); \
\
- if (unlikely(__ret_warn_once)) \
- if (WARN_ON(!__warned)) \
- __warned = true; \
+ if (unlikely(__ret_warn_once && !__warned)) { \
+ __warned = true; \
+ WARN_ON(1); \
+ } \
unlikely(__ret_warn_once); \
})
@@ -120,9 +127,10 @@ extern void warn_slowpath_null(const char *file, const int line);
static bool __section(.data.unlikely) __warned; \
int __ret_warn_once = !!(condition); \
\
- if (unlikely(__ret_warn_once)) \
- if (WARN(!__warned, format)) \
- __warned = true; \
+ if (unlikely(__ret_warn_once && !__warned)) { \
+ __warned = true; \
+ WARN(1, format); \
+ } \
unlikely(__ret_warn_once); \
})
@@ -130,9 +138,10 @@ extern void warn_slowpath_null(const char *file, const int line);
static bool __section(.data.unlikely) __warned; \
int __ret_warn_once = !!(condition); \
\
- if (unlikely(__ret_warn_once)) \
- if (WARN_TAINT(!__warned, taint, format)) \
- __warned = true; \
+ if (unlikely(__ret_warn_once && !__warned)) { \
+ __warned = true; \
+ WARN_TAINT(1, taint, format); \
+ } \
unlikely(__ret_warn_once); \
})
diff --git a/include/asm-generic/checksum.h b/include/asm-generic/checksum.h
index 59811df58c5b..3150cbd8eb21 100644
--- a/include/asm-generic/checksum.h
+++ b/include/asm-generic/checksum.h
@@ -65,14 +65,14 @@ static inline __sum16 csum_fold(__wsum csum)
* returns a 16-bit checksum, already complemented
*/
extern __wsum
-csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len,
- unsigned short proto, __wsum sum);
+csum_tcpudp_nofold(__be32 saddr, __be32 daddr, __u32 len,
+ __u8 proto, __wsum sum);
#endif
#ifndef csum_tcpudp_magic
static inline __sum16
-csum_tcpudp_magic(__be32 saddr, __be32 daddr, unsigned short len,
- unsigned short proto, __wsum sum)
+csum_tcpudp_magic(__be32 saddr, __be32 daddr, __u32 len,
+ __u8 proto, __wsum sum)
{
return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
}
diff --git a/include/asm-generic/mm_hooks.h b/include/asm-generic/mm_hooks.h
index 866aa461efa5..cc5d9a1405df 100644
--- a/include/asm-generic/mm_hooks.h
+++ b/include/asm-generic/mm_hooks.h
@@ -26,4 +26,16 @@ static inline void arch_bprm_mm_init(struct mm_struct *mm,
{
}
+static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
+ bool write, bool execute, bool foreign)
+{
+ /* by default, allow everything */
+ return true;
+}
+
+static inline bool arch_pte_access_permitted(pte_t pte, bool write)
+{
+ /* by default, allow everything */
+ return true;
+}
#endif /* _ASM_GENERIC_MM_HOOKS_H */
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index c370b261c720..9401f4819891 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -783,6 +783,23 @@ static inline int pmd_clear_huge(pmd_t *pmd)
}
#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
+#ifndef __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+/*
+ * ARCHes with special requirements for evicting THP backing TLB entries can
+ * implement this. Otherwise also, it can help optimize normal TLB flush in
+ * THP regime. stock flush_tlb_range() typically has optimization to nuke the
+ * entire TLB TLB if flush span is greater than a threshold, which will
+ * likely be true for a single huge page. Thus a single thp flush will
+ * invalidate the entire TLB which is not desitable.
+ * e.g. see arch/arc: flush_pmd_tlb_range
+ */
+#define flush_pmd_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
+#else
+#define flush_pmd_tlb_range(vma, addr, end) BUILD_BUG()
+#endif
+#endif
+
#endif /* !__ASSEMBLY__ */
#ifndef io_remap_pfn_range
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index d7162cf1c3e1..3c8422c69572 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -283,6 +283,7 @@ struct drm_ioctl_desc {
struct drm_pending_event {
struct drm_event *event;
struct list_head link;
+ struct list_head pending_link;
struct drm_file *file_priv;
pid_t pid; /* pid of requester, no guarantee it's valid by the time
we deliver the event, for tracing only */
@@ -346,6 +347,7 @@ struct drm_file {
struct list_head blobs;
wait_queue_head_t event_wait;
+ struct list_head pending_event_list;
struct list_head event_list;
int event_space;
@@ -919,15 +921,25 @@ extern long drm_compat_ioctl(struct file *filp,
unsigned int cmd, unsigned long arg);
extern bool drm_ioctl_flags(unsigned int nr, unsigned int *flags);
- /* Device support (drm_fops.h) */
-extern int drm_open(struct inode *inode, struct file *filp);
-extern ssize_t drm_read(struct file *filp, char __user *buffer,
- size_t count, loff_t *offset);
-extern int drm_release(struct inode *inode, struct file *filp);
-extern int drm_new_set_master(struct drm_device *dev, struct drm_file *fpriv);
-
- /* Mapping support (drm_vm.h) */
-extern unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait);
+/* File Operations (drm_fops.c) */
+int drm_open(struct inode *inode, struct file *filp);
+ssize_t drm_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *offset);
+int drm_release(struct inode *inode, struct file *filp);
+int drm_new_set_master(struct drm_device *dev, struct drm_file *fpriv);
+unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait);
+int drm_event_reserve_init_locked(struct drm_device *dev,
+ struct drm_file *file_priv,
+ struct drm_pending_event *p,
+ struct drm_event *e);
+int drm_event_reserve_init(struct drm_device *dev,
+ struct drm_file *file_priv,
+ struct drm_pending_event *p,
+ struct drm_event *e);
+void drm_event_cancel_free(struct drm_device *dev,
+ struct drm_pending_event *p);
+void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e);
+void drm_send_event(struct drm_device *dev, struct drm_pending_event *e);
/* Misc. IOCTL support (drm_ioctl.c) */
int drm_noop(struct drm_device *dev, void *data,
diff --git a/include/drm/drm_atomic_helper.h b/include/drm/drm_atomic_helper.h
index fe5efada9d68..9054598c9a7a 100644
--- a/include/drm/drm_atomic_helper.h
+++ b/include/drm/drm_atomic_helper.h
@@ -146,6 +146,9 @@ __drm_atomic_helper_connector_destroy_state(struct drm_connector *connector,
struct drm_connector_state *state);
void drm_atomic_helper_connector_destroy_state(struct drm_connector *connector,
struct drm_connector_state *state);
+void drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc,
+ u16 *red, u16 *green, u16 *blue,
+ uint32_t start, uint32_t size);
/**
* drm_atomic_crtc_for_each_plane - iterate over planes currently attached to CRTC
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index c5b4b81a831b..e0170bf80bb0 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -305,12 +305,20 @@ struct drm_plane_helper_funcs;
* @mode_changed: crtc_state->mode or crtc_state->enable has been changed
* @active_changed: crtc_state->active has been toggled.
* @connectors_changed: connectors to this crtc have been updated
+ * @color_mgmt_changed: color management properties have changed (degamma or
+ * gamma LUT or CSC matrix)
* @plane_mask: bitmask of (1 << drm_plane_index(plane)) of attached planes
* @connector_mask: bitmask of (1 << drm_connector_index(connector)) of attached connectors
+ * @encoder_mask: bitmask of (1 << drm_encoder_index(encoder)) of attached encoders
* @last_vblank_count: for helpers and drivers to capture the vblank of the
* update to ensure framebuffer cleanup isn't done too early
* @adjusted_mode: for use by helpers and drivers to compute adjusted mode timings
* @mode: current mode timings
+ * @degamma_lut: Lookup table for converting framebuffer pixel data
+ * before apply the conversion matrix
+ * @ctm: Transformation matrix
+ * @gamma_lut: Lookup table for converting pixel data after the
+ * conversion matrix
* @event: optional pointer to a DRM event to signal upon completion of the
* state update
* @state: backpointer to global drm_atomic_state
@@ -332,6 +340,7 @@ struct drm_crtc_state {
bool mode_changed : 1;
bool active_changed : 1;
bool connectors_changed : 1;
+ bool color_mgmt_changed : 1;
/* attached planes bitmask:
* WARNING: transitional helpers do not maintain plane_mask so
@@ -341,6 +350,7 @@ struct drm_crtc_state {
u32 plane_mask;
u32 connector_mask;
+ u32 encoder_mask;
/* last_vblank_count: for vblank waits before cleanup */
u32 last_vblank_count;
@@ -353,6 +363,11 @@ struct drm_crtc_state {
/* blob property to expose current mode to atomic userspace */
struct drm_property_blob *mode_blob;
+ /* blob property to expose color management to userspace */
+ struct drm_property_blob *degamma_lut;
+ struct drm_property_blob *ctm;
+ struct drm_property_blob *gamma_lut;
+
struct drm_pending_vblank_event *event;
struct drm_atomic_state *state;
@@ -755,7 +770,7 @@ struct drm_crtc {
int x, y;
const struct drm_crtc_funcs *funcs;
- /* CRTC gamma size for reporting to userspace */
+ /* Legacy FB CRTC gamma size for reporting to userspace */
uint32_t gamma_size;
uint16_t *gamma_store;
@@ -1582,6 +1597,8 @@ struct drm_bridge_funcs {
*
* The bridge can assume that the display pipe (i.e. clocks and timing
* signals) feeding it is still running when this callback is called.
+ *
+ * The disable callback is optional.
*/
void (*disable)(struct drm_bridge *bridge);
@@ -1598,6 +1615,8 @@ struct drm_bridge_funcs {
* The bridge must assume that the display pipe (i.e. clocks and timing
* singals) feeding it is no longer running when this callback is
* called.
+ *
+ * The post_disable callback is optional.
*/
void (*post_disable)(struct drm_bridge *bridge);
@@ -1626,6 +1645,8 @@ struct drm_bridge_funcs {
* will not yet be running when this callback is called. The bridge must
* not enable the display link feeding the next bridge in the chain (if
* there is one) when this callback is called.
+ *
+ * The pre_enable callback is optional.
*/
void (*pre_enable)(struct drm_bridge *bridge);
@@ -1643,6 +1664,8 @@ struct drm_bridge_funcs {
* signals) feeding it is running when this callback is called. This
* callback must enable the display link feeding the next bridge in the
* chain if there is one.
+ *
+ * The enable callback is optional.
*/
void (*enable)(struct drm_bridge *bridge);
};
@@ -1675,6 +1698,7 @@ struct drm_bridge {
* @dev: parent DRM device
* @allow_modeset: allow full modeset
* @legacy_cursor_update: hint to enforce legacy cursor IOCTL semantics
+ * @legacy_set_config: Disable conflicting encoders instead of failing with -EINVAL.
* @planes: pointer to array of plane pointers
* @plane_states: pointer to array of plane states pointers
* @crtcs: pointer to array of CRTC pointers
@@ -1688,6 +1712,7 @@ struct drm_atomic_state {
struct drm_device *dev;
bool allow_modeset : 1;
bool legacy_cursor_update : 1;
+ bool legacy_set_config : 1;
struct drm_plane **planes;
struct drm_plane_state **plane_states;
struct drm_crtc **crtcs;
@@ -2024,6 +2049,15 @@ struct drm_mode_config_funcs {
* @property_blob_list: list of all the blob property objects
* @blob_lock: mutex for blob property allocation and management
* @*_property: core property tracking
+ * @degamma_lut_property: LUT used to convert the framebuffer's colors to linear
+ * gamma
+ * @degamma_lut_size_property: size of the degamma LUT as supported by the
+ * driver (read-only)
+ * @ctm_property: Matrix used to convert colors after the lookup in the
+ * degamma LUT
+ * @gamma_lut_property: LUT used to convert the colors, after the CSC matrix, to
+ * the gamma space of the connected screen (read-only)
+ * @gamma_lut_size_property: size of the gamma LUT as supported by the driver
* @preferred_depth: preferred RBG pixel depth, used by fb helpers
* @prefer_shadow: hint to userspace to prefer shadow-fb rendering
* @async_page_flip: does this device support async flips on the primary plane?
@@ -2126,6 +2160,13 @@ struct drm_mode_config {
struct drm_property *aspect_ratio_property;
struct drm_property *dirty_info_property;
+ /* Optional color correction properties */
+ struct drm_property *degamma_lut_property;
+ struct drm_property *degamma_lut_size_property;
+ struct drm_property *ctm_property;
+ struct drm_property *gamma_lut_property;
+ struct drm_property *gamma_lut_size_property;
+
/* properties for virtual machine layout */
struct drm_property *suggested_x_property;
struct drm_property *suggested_y_property;
@@ -2155,6 +2196,17 @@ struct drm_mode_config {
list_for_each_entry((plane), &(dev)->mode_config.plane_list, head) \
for_each_if ((plane_mask) & (1 << drm_plane_index(plane)))
+/**
+ * drm_for_each_encoder_mask - iterate over encoders specified by bitmask
+ * @encoder: the loop cursor
+ * @dev: the DRM device
+ * @encoder_mask: bitmask of encoder indices
+ *
+ * Iterate over all encoders specified by bitmask.
+ */
+#define drm_for_each_encoder_mask(encoder, dev, encoder_mask) \
+ list_for_each_entry((encoder), &(dev)->mode_config.encoder_list, head) \
+ for_each_if ((encoder_mask) & (1 << drm_encoder_index(encoder)))
#define obj_to_crtc(x) container_of(x, struct drm_crtc, base)
#define obj_to_connector(x) container_of(x, struct drm_connector, base)
@@ -2231,6 +2283,7 @@ int drm_encoder_init(struct drm_device *dev,
struct drm_encoder *encoder,
const struct drm_encoder_funcs *funcs,
int encoder_type, const char *name, ...);
+extern unsigned int drm_encoder_index(struct drm_encoder *encoder);
/**
* drm_encoder_crtc_ok - can a given crtc drive a given encoder?
@@ -2288,6 +2341,8 @@ extern void drm_property_destroy_user_blobs(struct drm_device *dev,
extern bool drm_probe_ddc(struct i2c_adapter *adapter);
extern struct edid *drm_get_edid(struct drm_connector *connector,
struct i2c_adapter *adapter);
+extern struct edid *drm_get_edid_switcheroo(struct drm_connector *connector,
+ struct i2c_adapter *adapter);
extern struct edid *drm_edid_duplicate(const struct edid *edid);
extern int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid);
extern void drm_mode_config_init(struct drm_device *dev);
@@ -2488,6 +2543,8 @@ extern int drm_format_num_planes(uint32_t format);
extern int drm_format_plane_cpp(uint32_t format, int plane);
extern int drm_format_horz_chroma_subsampling(uint32_t format);
extern int drm_format_vert_chroma_subsampling(uint32_t format);
+extern int drm_format_plane_width(int width, uint32_t format, int plane);
+extern int drm_format_plane_height(int height, uint32_t format, int plane);
extern const char *drm_get_format_name(uint32_t format);
extern struct drm_property *drm_mode_create_rotation_property(struct drm_device *dev,
unsigned int supported_rotations);
@@ -2536,6 +2593,21 @@ static inline struct drm_property *drm_property_find(struct drm_device *dev,
return mo ? obj_to_property(mo) : NULL;
}
+/*
+ * Extract a degamma/gamma LUT value provided by user and round it to the
+ * precision supported by the hardware.
+ */
+static inline uint32_t drm_color_lut_extract(uint32_t user_input,
+ uint32_t bit_precision)
+{
+ uint32_t val = user_input + (1 << (16 - bit_precision - 1));
+ uint32_t max = 0xffff >> (16 - bit_precision);
+
+ val >>= 16 - bit_precision;
+
+ return clamp_val(val, 0, max);
+}
+
/* Plane list iterator for legacy (overlay only) planes. */
#define drm_for_each_legacy_plane(plane, dev) \
list_for_each_entry(plane, &(dev)->mode_config.plane_list, head) \
diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
index 4b37afa2b73b..97fa894d4ee2 100644
--- a/include/drm/drm_crtc_helper.h
+++ b/include/drm/drm_crtc_helper.h
@@ -48,6 +48,9 @@ extern bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
struct drm_display_mode *mode,
int x, int y,
struct drm_framebuffer *old_fb);
+extern void drm_helper_crtc_enable_color_mgmt(struct drm_crtc *crtc,
+ int degamma_lut_size,
+ int gamma_lut_size);
extern bool drm_helper_crtc_in_use(struct drm_crtc *crtc);
extern bool drm_helper_encoder_in_use(struct drm_encoder *encoder);
diff --git a/include/drm/drm_dp_aux_dev.h b/include/drm/drm_dp_aux_dev.h
new file mode 100644
index 000000000000..1b76d990d8ab
--- /dev/null
+++ b/include/drm/drm_dp_aux_dev.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Rafael Antognolli <rafael.antognolli@intel.com>
+ *
+ */
+
+#ifndef DRM_DP_AUX_DEV
+#define DRM_DP_AUX_DEV
+
+#include <drm/drm_dp_helper.h>
+
+#ifdef CONFIG_DRM_DP_AUX_CHARDEV
+
+int drm_dp_aux_dev_init(void);
+void drm_dp_aux_dev_exit(void);
+int drm_dp_aux_register_devnode(struct drm_dp_aux *aux);
+void drm_dp_aux_unregister_devnode(struct drm_dp_aux *aux);
+
+#else
+
+static inline int drm_dp_aux_dev_init(void)
+{
+ return 0;
+}
+
+static inline void drm_dp_aux_dev_exit(void)
+{
+}
+
+static inline int drm_dp_aux_register_devnode(struct drm_dp_aux *aux)
+{
+ return 0;
+}
+
+static inline void drm_dp_aux_unregister_devnode(struct drm_dp_aux *aux)
+{
+}
+
+#endif
+
+#endif
diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h
index d8a40dff0d1d..062723bdcabe 100644
--- a/include/drm/drm_fb_helper.h
+++ b/include/drm/drm_fb_helper.h
@@ -219,6 +219,7 @@ struct drm_fb_helper {
};
#ifdef CONFIG_DRM_FBDEV_EMULATION
+int drm_fb_helper_modinit(void);
void drm_fb_helper_prepare(struct drm_device *dev, struct drm_fb_helper *helper,
const struct drm_fb_helper_funcs *funcs);
int drm_fb_helper_init(struct drm_device *dev,
@@ -283,6 +284,11 @@ int drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper, struct drm_
int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
struct drm_connector *connector);
#else
+static inline int drm_fb_helper_modinit(void)
+{
+ return 0;
+}
+
static inline void drm_fb_helper_prepare(struct drm_device *dev,
struct drm_fb_helper *helper,
const struct drm_fb_helper_funcs *funcs)
diff --git a/include/drm/drm_mipi_dsi.h b/include/drm/drm_mipi_dsi.h
index 1b3b1f8c8cdf..7a9840f8b38e 100644
--- a/include/drm/drm_mipi_dsi.h
+++ b/include/drm/drm_mipi_dsi.h
@@ -96,14 +96,17 @@ struct mipi_dsi_host_ops {
* struct mipi_dsi_host - DSI host device
* @dev: driver model device node for this DSI host
* @ops: DSI host operations
+ * @list: list management
*/
struct mipi_dsi_host {
struct device *dev;
const struct mipi_dsi_host_ops *ops;
+ struct list_head list;
};
int mipi_dsi_host_register(struct mipi_dsi_host *host);
void mipi_dsi_host_unregister(struct mipi_dsi_host *host);
+struct mipi_dsi_host *of_find_mipi_dsi_host_by_node(struct device_node *node);
/* DSI mode flags */
@@ -139,10 +142,28 @@ enum mipi_dsi_pixel_format {
MIPI_DSI_FMT_RGB565,
};
+#define DSI_DEV_NAME_SIZE 20
+
+/**
+ * struct mipi_dsi_device_info - template for creating a mipi_dsi_device
+ * @type: DSI peripheral chip type
+ * @channel: DSI virtual channel assigned to peripheral
+ * @node: pointer to OF device node or NULL
+ *
+ * This is populated and passed to mipi_dsi_device_new to create a new
+ * DSI device
+ */
+struct mipi_dsi_device_info {
+ char type[DSI_DEV_NAME_SIZE];
+ u32 channel;
+ struct device_node *node;
+};
+
/**
* struct mipi_dsi_device - DSI peripheral device
* @host: DSI host for this peripheral
* @dev: driver model device node for this peripheral
+ * @name: DSI peripheral chip type
* @channel: virtual channel assigned to the peripheral
* @format: pixel format for video mode
* @lanes: number of active data lanes
@@ -152,6 +173,7 @@ struct mipi_dsi_device {
struct mipi_dsi_host *host;
struct device dev;
+ char name[DSI_DEV_NAME_SIZE];
unsigned int channel;
unsigned int lanes;
enum mipi_dsi_pixel_format format;
@@ -188,6 +210,10 @@ static inline int mipi_dsi_pixel_format_to_bpp(enum mipi_dsi_pixel_format fmt)
return -EINVAL;
}
+struct mipi_dsi_device *
+mipi_dsi_device_register_full(struct mipi_dsi_host *host,
+ const struct mipi_dsi_device_info *info);
+void mipi_dsi_device_unregister(struct mipi_dsi_device *dsi);
struct mipi_dsi_device *of_find_mipi_dsi_device_by_node(struct device_node *np);
int mipi_dsi_attach(struct mipi_dsi_device *dsi);
int mipi_dsi_detach(struct mipi_dsi_device *dsi);
diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h
index a126a0d7aed4..b61c2d45192e 100644
--- a/include/drm/drm_modeset_helper_vtables.h
+++ b/include/drm/drm_modeset_helper_vtables.h
@@ -439,7 +439,7 @@ struct drm_encoder_helper_funcs {
* can be modified by this callback and does not need to match mode.
*
* This function is used by both legacy CRTC helpers and atomic helpers.
- * With atomic helpers it is optional.
+ * This hook is optional.
*
* NOTE:
*
diff --git a/include/drm/drm_of.h b/include/drm/drm_of.h
index 8544665ee4f4..3fd87b386ed7 100644
--- a/include/drm/drm_of.h
+++ b/include/drm/drm_of.h
@@ -1,9 +1,12 @@
#ifndef __DRM_OF_H__
#define __DRM_OF_H__
+#include <linux/of_graph.h>
+
struct component_master_ops;
struct device;
struct drm_device;
+struct drm_encoder;
struct device_node;
#ifdef CONFIG_OF
@@ -12,6 +15,9 @@ extern uint32_t drm_of_find_possible_crtcs(struct drm_device *dev,
extern int drm_of_component_probe(struct device *dev,
int (*compare_of)(struct device *, void *),
const struct component_master_ops *m_ops);
+extern int drm_of_encoder_active_endpoint(struct device_node *node,
+ struct drm_encoder *encoder,
+ struct of_endpoint *endpoint);
#else
static inline uint32_t drm_of_find_possible_crtcs(struct drm_device *dev,
struct device_node *port)
@@ -26,6 +32,33 @@ drm_of_component_probe(struct device *dev,
{
return -EINVAL;
}
+
+static inline int drm_of_encoder_active_endpoint(struct device_node *node,
+ struct drm_encoder *encoder,
+ struct of_endpoint *endpoint)
+{
+ return -EINVAL;
+}
#endif
+static inline int drm_of_encoder_active_endpoint_id(struct device_node *node,
+ struct drm_encoder *encoder)
+{
+ struct of_endpoint endpoint;
+ int ret = drm_of_encoder_active_endpoint(node, encoder,
+ &endpoint);
+
+ return ret ?: endpoint.id;
+}
+
+static inline int drm_of_encoder_active_port_id(struct device_node *node,
+ struct drm_encoder *encoder)
+{
+ struct of_endpoint endpoint;
+ int ret = drm_of_encoder_active_endpoint(node, encoder,
+ &endpoint);
+
+ return ret ?: endpoint.port;
+}
+
#endif /* __DRM_OF_H__ */
diff --git a/include/drm/exynos_drm.h b/include/drm/exynos_drm.h
deleted file mode 100644
index cb65fa14acfc..000000000000
--- a/include/drm/exynos_drm.h
+++ /dev/null
@@ -1,101 +0,0 @@
-/* exynos_drm.h
- *
- * Copyright (c) 2011 Samsung Electronics Co., Ltd.
- * Authors:
- * Inki Dae <inki.dae@samsung.com>
- * Joonyoung Shim <jy0922.shim@samsung.com>
- * Seung-Woo Kim <sw0312.kim@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-#ifndef _EXYNOS_DRM_H_
-#define _EXYNOS_DRM_H_
-
-#include <uapi/drm/exynos_drm.h>
-#include <video/videomode.h>
-
-/**
- * A structure for lcd panel information.
- *
- * @timing: default video mode for initializing
- * @width_mm: physical size of lcd width.
- * @height_mm: physical size of lcd height.
- */
-struct exynos_drm_panel_info {
- struct videomode vm;
- u32 width_mm;
- u32 height_mm;
-};
-
-/**
- * Platform Specific Structure for DRM based FIMD.
- *
- * @panel: default panel info for initializing
- * @default_win: default window layer number to be used for UI.
- * @bpp: default bit per pixel.
- */
-struct exynos_drm_fimd_pdata {
- struct exynos_drm_panel_info panel;
- u32 vidcon0;
- u32 vidcon1;
- unsigned int default_win;
- unsigned int bpp;
-};
-
-/**
- * Platform Specific Structure for DRM based HDMI.
- *
- * @hdmi_dev: device point to specific hdmi driver.
- * @mixer_dev: device point to specific mixer driver.
- *
- * this structure is used for common hdmi driver and each device object
- * would be used to access specific device driver(hdmi or mixer driver)
- */
-struct exynos_drm_common_hdmi_pd {
- struct device *hdmi_dev;
- struct device *mixer_dev;
-};
-
-/**
- * Platform Specific Structure for DRM based HDMI core.
- *
- * @is_v13: set if hdmi version 13 is.
- * @cfg_hpd: function pointer to configure hdmi hotplug detection pin
- * @get_hpd: function pointer to get value of hdmi hotplug detection pin
- */
-struct exynos_drm_hdmi_pdata {
- bool is_v13;
- void (*cfg_hpd)(bool external);
- int (*get_hpd)(void);
-};
-
-/**
- * Platform Specific Structure for DRM based IPP.
- *
- * @inv_pclk: if set 1. invert pixel clock
- * @inv_vsync: if set 1. invert vsync signal for wb
- * @inv_href: if set 1. invert href signal
- * @inv_hsync: if set 1. invert hsync signal for wb
- */
-struct exynos_drm_ipp_pol {
- unsigned int inv_pclk;
- unsigned int inv_vsync;
- unsigned int inv_href;
- unsigned int inv_hsync;
-};
-
-/**
- * Platform Specific Structure for DRM based FIMC.
- *
- * @pol: current hardware block polarity settings.
- * @clk_rate: current hardware clock rate.
- */
-struct exynos_drm_fimc_pdata {
- struct exynos_drm_ipp_pol pol;
- int clk_rate;
-};
-
-#endif /* _EXYNOS_DRM_H_ */
diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
index f97020904717..9094599a1150 100644
--- a/include/drm/i915_pciids.h
+++ b/include/drm/i915_pciids.h
@@ -277,7 +277,9 @@
INTEL_VGA_DEVICE(0x191D, info) /* WKS GT2 */
#define INTEL_SKL_GT3_IDS(info) \
+ INTEL_VGA_DEVICE(0x1923, info), /* ULT GT3 */ \
INTEL_VGA_DEVICE(0x1926, info), /* ULT GT3 */ \
+ INTEL_VGA_DEVICE(0x1927, info), /* ULT GT3 */ \
INTEL_VGA_DEVICE(0x192B, info), /* Halo GT3 */ \
INTEL_VGA_DEVICE(0x192A, info) /* SRV GT3 */
@@ -296,7 +298,9 @@
#define INTEL_BXT_IDS(info) \
INTEL_VGA_DEVICE(0x0A84, info), \
INTEL_VGA_DEVICE(0x1A84, info), \
- INTEL_VGA_DEVICE(0x5A84, info)
+ INTEL_VGA_DEVICE(0x1A85, info), \
+ INTEL_VGA_DEVICE(0x5A84, info), /* APL HD Graphics 505 */ \
+ INTEL_VGA_DEVICE(0x5A85, info) /* APL HD Graphics 500 */
#define INTEL_KBL_GT1_IDS(info) \
INTEL_VGA_DEVICE(0x5913, info), /* ULT GT1.5 */ \
diff --git a/include/dt-bindings/clock/bcm-cygnus.h b/include/dt-bindings/clock/bcm-cygnus.h
index 32fbc475087a..62ac5d782a00 100644
--- a/include/dt-bindings/clock/bcm-cygnus.h
+++ b/include/dt-bindings/clock/bcm-cygnus.h
@@ -65,4 +65,10 @@
#define BCM_CYGNUS_ASIU_ADC_CLK 1
#define BCM_CYGNUS_ASIU_PWM_CLK 2
+/* AUDIO clock ID */
+#define BCM_CYGNUS_AUDIOPLL 0
+#define BCM_CYGNUS_AUDIOPLL_CH0 1
+#define BCM_CYGNUS_AUDIOPLL_CH1 2
+#define BCM_CYGNUS_AUDIOPLL_CH2 3
+
#endif /* _CLOCK_BCM_CYGNUS_H */
diff --git a/include/dt-bindings/clock/exynos5433.h b/include/dt-bindings/clock/exynos5433.h
index 5bd80d5ecd0f..8e024fea26e7 100644
--- a/include/dt-bindings/clock/exynos5433.h
+++ b/include/dt-bindings/clock/exynos5433.h
@@ -765,7 +765,12 @@
#define CLK_SCLK_RGB_VCLK 109
#define CLK_SCLK_RGB_TV_VCLK 110
-#define DISP_NR_CLK 111
+#define CLK_PHYCLK_HDMIPHY_PIXEL_CLKO_PHY 111
+#define CLK_PHYCLK_HDMIPHY_TMDS_CLKO_PHY 112
+
+#define CLK_PCLK_DECON 113
+
+#define DISP_NR_CLK 114
/* CMU_AUD */
#define CLK_MOUT_AUD_PLL_USER 1
@@ -1298,7 +1303,7 @@
#define CLK_MOUT_ACLK_LITE_C_B 13
#define CLK_MOUT_ACLK_LITE_C_A 14
-#define CLK_DIV_SCLK_ISP_WPWM 15
+#define CLK_DIV_SCLK_ISP_MPWM 15
#define CLK_DIV_PCLK_CAM1_83 16
#define CLK_DIV_PCLK_CAM1_166 17
#define CLK_DIV_PCLK_DBG_CAM1 18
diff --git a/include/dt-bindings/clock/imx6qdl-clock.h b/include/dt-bindings/clock/imx6qdl-clock.h
index 77985cc43316..29050337d9d5 100644
--- a/include/dt-bindings/clock/imx6qdl-clock.h
+++ b/include/dt-bindings/clock/imx6qdl-clock.h
@@ -255,6 +255,20 @@
#define IMX6QDL_CLK_CAAM_ACLK 242
#define IMX6QDL_CLK_CAAM_IPG 243
#define IMX6QDL_CLK_SPDIF_GCLK 244
-#define IMX6QDL_CLK_END 245
+#define IMX6QDL_CLK_UART_SEL 245
+#define IMX6QDL_CLK_IPG_PER_SEL 246
+#define IMX6QDL_CLK_ECSPI_SEL 247
+#define IMX6QDL_CLK_CAN_SEL 248
+#define IMX6QDL_CLK_MMDC_CH1_AXI_CG 249
+#define IMX6QDL_CLK_PRE0 250
+#define IMX6QDL_CLK_PRE1 251
+#define IMX6QDL_CLK_PRE2 252
+#define IMX6QDL_CLK_PRE3 253
+#define IMX6QDL_CLK_PRG0_AXI 254
+#define IMX6QDL_CLK_PRG1_AXI 255
+#define IMX6QDL_CLK_PRG0_APB 256
+#define IMX6QDL_CLK_PRG1_APB 257
+#define IMX6QDL_CLK_PRE_AXI 258
+#define IMX6QDL_CLK_END 259
#endif /* __DT_BINDINGS_CLOCK_IMX6QDL_H */
diff --git a/include/dt-bindings/clock/imx6ul-clock.h b/include/dt-bindings/clock/imx6ul-clock.h
index c343894ce603..fd8aee8f64ae 100644
--- a/include/dt-bindings/clock/imx6ul-clock.h
+++ b/include/dt-bindings/clock/imx6ul-clock.h
@@ -21,13 +21,13 @@
#define IMX6UL_PLL5_BYPASS_SRC 8
#define IMX6UL_PLL6_BYPASS_SRC 9
#define IMX6UL_PLL7_BYPASS_SRC 10
-#define IMX6UL_CLK_PLL1 11
-#define IMX6UL_CLK_PLL2 12
-#define IMX6UL_CLK_PLL3 13
-#define IMX6UL_CLK_PLL4 14
-#define IMX6UL_CLK_PLL5 15
-#define IMX6UL_CLK_PLL6 16
-#define IMX6UL_CLK_PLL7 17
+#define IMX6UL_CLK_PLL1 11
+#define IMX6UL_CLK_PLL2 12
+#define IMX6UL_CLK_PLL3 13
+#define IMX6UL_CLK_PLL4 14
+#define IMX6UL_CLK_PLL5 15
+#define IMX6UL_CLK_PLL6 16
+#define IMX6UL_CLK_PLL7 17
#define IMX6UL_PLL1_BYPASS 18
#define IMX6UL_PLL2_BYPASS 19
#define IMX6UL_PLL3_BYPASS 20
@@ -37,7 +37,7 @@
#define IMX6UL_PLL7_BYPASS 24
#define IMX6UL_CLK_PLL1_SYS 25
#define IMX6UL_CLK_PLL2_BUS 26
-#define IMX6UL_CLK_PLL3_USB_OTG 27
+#define IMX6UL_CLK_PLL3_USB_OTG 27
#define IMX6UL_CLK_PLL4_AUDIO 28
#define IMX6UL_CLK_PLL5_VIDEO 29
#define IMX6UL_CLK_PLL6_ENET 30
@@ -66,7 +66,7 @@
#define IMX6UL_CLK_PLL2_198M 53
#define IMX6UL_CLK_PLL3_80M 54
#define IMX6UL_CLK_PLL3_60M 55
-#define IMX6UL_CLK_STEP 56
+#define IMX6UL_CLK_STEP 56
#define IMX6UL_CLK_PLL1_SW 57
#define IMX6UL_CLK_AXI_ALT_SEL 58
#define IMX6UL_CLK_AXI_SEL 59
@@ -78,7 +78,7 @@
#define IMX6UL_CLK_USDHC2_SEL 65
#define IMX6UL_CLK_BCH_SEL 66
#define IMX6UL_CLK_GPMI_SEL 67
-#define IMX6UL_CLK_EIM_SLOW_SEL 68
+#define IMX6UL_CLK_EIM_SLOW_SEL 68
#define IMX6UL_CLK_SPDIF_SEL 69
#define IMX6UL_CLK_SAI1_SEL 70
#define IMX6UL_CLK_SAI2_SEL 71
@@ -105,9 +105,9 @@
#define IMX6UL_CLK_LDB_DI1_DIV_SEL 92
#define IMX6UL_CLK_ARM 93
#define IMX6UL_CLK_PERIPH_CLK2 94
-#define IMX6UL_CLK_PERIPH2_CLK2 95
+#define IMX6UL_CLK_PERIPH2_CLK2 95
#define IMX6UL_CLK_AHB 96
-#define IMX6UL_CLK_MMDC_PODF 97
+#define IMX6UL_CLK_MMDC_PODF 97
#define IMX6UL_CLK_AXI_PODF 98
#define IMX6UL_CLK_PERCLK 99
#define IMX6UL_CLK_IPG 100
@@ -133,16 +133,16 @@
#define IMX6UL_CLK_CAN_PODF 120
#define IMX6UL_CLK_ECSPI_PODF 121
#define IMX6UL_CLK_UART_PODF 122
-#define IMX6UL_CLK_ADC1 123
-#define IMX6UL_CLK_ADC2 124
+#define IMX6UL_CLK_ADC1 123
+#define IMX6UL_CLK_ADC2 124
#define IMX6UL_CLK_AIPSTZ1 125
#define IMX6UL_CLK_AIPSTZ2 126
#define IMX6UL_CLK_AIPSTZ3 127
#define IMX6UL_CLK_APBHDMA 128
#define IMX6UL_CLK_ASRC_IPG 129
#define IMX6UL_CLK_ASRC_MEM 130
-#define IMX6UL_CLK_GPMI_BCH_APB 131
-#define IMX6UL_CLK_GPMI_BCH 132
+#define IMX6UL_CLK_GPMI_BCH_APB 131
+#define IMX6UL_CLK_GPMI_BCH 132
#define IMX6UL_CLK_GPMI_IO 133
#define IMX6UL_CLK_GPMI_APB 134
#define IMX6UL_CLK_CAAM_MEM 135
@@ -154,7 +154,7 @@
#define IMX6UL_CLK_ECSPI3 141
#define IMX6UL_CLK_ECSPI4 142
#define IMX6UL_CLK_EIM 143
-#define IMX6UL_CLK_ENET 144
+#define IMX6UL_CLK_ENET 144
#define IMX6UL_CLK_ENET_AHB 145
#define IMX6UL_CLK_EPIT1 146
#define IMX6UL_CLK_EPIT2 147
@@ -166,63 +166,63 @@
#define IMX6UL_CLK_GPT1_SERIAL 153
#define IMX6UL_CLK_GPT2_BUS 154
#define IMX6UL_CLK_GPT2_SERIAL 155
-#define IMX6UL_CLK_I2C1 156
-#define IMX6UL_CLK_I2C2 157
-#define IMX6UL_CLK_I2C3 158
-#define IMX6UL_CLK_I2C4 159
-#define IMX6UL_CLK_IOMUXC 160
-#define IMX6UL_CLK_LCDIF_APB 161
-#define IMX6UL_CLK_LCDIF_PIX 162
-#define IMX6UL_CLK_MMDC_P0_FAST 163
-#define IMX6UL_CLK_MMDC_P0_IPG 164
-#define IMX6UL_CLK_OCOTP 165
-#define IMX6UL_CLK_OCRAM 166
-#define IMX6UL_CLK_PWM1 167
-#define IMX6UL_CLK_PWM2 168
-#define IMX6UL_CLK_PWM3 169
-#define IMX6UL_CLK_PWM4 170
-#define IMX6UL_CLK_PWM5 171
-#define IMX6UL_CLK_PWM6 172
-#define IMX6UL_CLK_PWM7 173
-#define IMX6UL_CLK_PWM8 174
-#define IMX6UL_CLK_PXP 175
-#define IMX6UL_CLK_QSPI 176
-#define IMX6UL_CLK_ROM 177
-#define IMX6UL_CLK_SAI1 178
-#define IMX6UL_CLK_SAI1_IPG 179
-#define IMX6UL_CLK_SAI2 180
-#define IMX6UL_CLK_SAI2_IPG 181
-#define IMX6UL_CLK_SAI3 182
-#define IMX6UL_CLK_SAI3_IPG 183
-#define IMX6UL_CLK_SDMA 184
-#define IMX6UL_CLK_SIM 185
-#define IMX6UL_CLK_SIM_S 186
-#define IMX6UL_CLK_SPBA 187
-#define IMX6UL_CLK_SPDIF 188
-#define IMX6UL_CLK_UART1_IPG 189
-#define IMX6UL_CLK_UART1_SERIAL 190
-#define IMX6UL_CLK_UART2_IPG 191
-#define IMX6UL_CLK_UART2_SERIAL 192
-#define IMX6UL_CLK_UART3_IPG 193
-#define IMX6UL_CLK_UART3_SERIAL 194
-#define IMX6UL_CLK_UART4_IPG 195
-#define IMX6UL_CLK_UART4_SERIAL 196
-#define IMX6UL_CLK_UART5_IPG 197
-#define IMX6UL_CLK_UART5_SERIAL 198
-#define IMX6UL_CLK_UART6_IPG 199
-#define IMX6UL_CLK_UART6_SERIAL 200
-#define IMX6UL_CLK_UART7_IPG 201
-#define IMX6UL_CLK_UART7_SERIAL 202
-#define IMX6UL_CLK_UART8_IPG 203
-#define IMX6UL_CLK_UART8_SERIAL 204
-#define IMX6UL_CLK_USBOH3 205
-#define IMX6UL_CLK_USDHC1 206
-#define IMX6UL_CLK_USDHC2 207
-#define IMX6UL_CLK_WDOG1 208
-#define IMX6UL_CLK_WDOG2 209
-#define IMX6UL_CLK_WDOG3 210
+#define IMX6UL_CLK_I2C1 156
+#define IMX6UL_CLK_I2C2 157
+#define IMX6UL_CLK_I2C3 158
+#define IMX6UL_CLK_I2C4 159
+#define IMX6UL_CLK_IOMUXC 160
+#define IMX6UL_CLK_LCDIF_APB 161
+#define IMX6UL_CLK_LCDIF_PIX 162
+#define IMX6UL_CLK_MMDC_P0_FAST 163
+#define IMX6UL_CLK_MMDC_P0_IPG 164
+#define IMX6UL_CLK_OCOTP 165
+#define IMX6UL_CLK_OCRAM 166
+#define IMX6UL_CLK_PWM1 167
+#define IMX6UL_CLK_PWM2 168
+#define IMX6UL_CLK_PWM3 169
+#define IMX6UL_CLK_PWM4 170
+#define IMX6UL_CLK_PWM5 171
+#define IMX6UL_CLK_PWM6 172
+#define IMX6UL_CLK_PWM7 173
+#define IMX6UL_CLK_PWM8 174
+#define IMX6UL_CLK_PXP 175
+#define IMX6UL_CLK_QSPI 176
+#define IMX6UL_CLK_ROM 177
+#define IMX6UL_CLK_SAI1 178
+#define IMX6UL_CLK_SAI1_IPG 179
+#define IMX6UL_CLK_SAI2 180
+#define IMX6UL_CLK_SAI2_IPG 181
+#define IMX6UL_CLK_SAI3 182
+#define IMX6UL_CLK_SAI3_IPG 183
+#define IMX6UL_CLK_SDMA 184
+#define IMX6UL_CLK_SIM 185
+#define IMX6UL_CLK_SIM_S 186
+#define IMX6UL_CLK_SPBA 187
+#define IMX6UL_CLK_SPDIF 188
+#define IMX6UL_CLK_UART1_IPG 189
+#define IMX6UL_CLK_UART1_SERIAL 190
+#define IMX6UL_CLK_UART2_IPG 191
+#define IMX6UL_CLK_UART2_SERIAL 192
+#define IMX6UL_CLK_UART3_IPG 193
+#define IMX6UL_CLK_UART3_SERIAL 194
+#define IMX6UL_CLK_UART4_IPG 195
+#define IMX6UL_CLK_UART4_SERIAL 196
+#define IMX6UL_CLK_UART5_IPG 197
+#define IMX6UL_CLK_UART5_SERIAL 198
+#define IMX6UL_CLK_UART6_IPG 199
+#define IMX6UL_CLK_UART6_SERIAL 200
+#define IMX6UL_CLK_UART7_IPG 201
+#define IMX6UL_CLK_UART7_SERIAL 202
+#define IMX6UL_CLK_UART8_IPG 203
+#define IMX6UL_CLK_UART8_SERIAL 204
+#define IMX6UL_CLK_USBOH3 205
+#define IMX6UL_CLK_USDHC1 206
+#define IMX6UL_CLK_USDHC2 207
+#define IMX6UL_CLK_WDOG1 208
+#define IMX6UL_CLK_WDOG2 209
+#define IMX6UL_CLK_WDOG3 210
#define IMX6UL_CLK_LDB_DI0 211
-#define IMX6UL_CLK_AXI 212
+#define IMX6UL_CLK_AXI 212
#define IMX6UL_CLK_SPDIF_GCLK 213
#define IMX6UL_CLK_GPT_3M 214
#define IMX6UL_CLK_SIM2 215
@@ -234,7 +234,8 @@
#define IMX6UL_CLK_CSI_SEL 221
#define IMX6UL_CLK_CSI_PODF 222
#define IMX6UL_CLK_PLL3_120M 223
+#define IMX6UL_CLK_KPP 224
-#define IMX6UL_CLK_END 224
+#define IMX6UL_CLK_END 225
#endif /* __DT_BINDINGS_CLOCK_IMX6UL_H */
diff --git a/include/dt-bindings/clock/lpc32xx-clock.h b/include/dt-bindings/clock/lpc32xx-clock.h
index bcb1c9a73519..d41b6fea1450 100644
--- a/include/dt-bindings/clock/lpc32xx-clock.h
+++ b/include/dt-bindings/clock/lpc32xx-clock.h
@@ -47,6 +47,7 @@
#define LPC32XX_CLK_PWM1 32
#define LPC32XX_CLK_PWM2 33
#define LPC32XX_CLK_ADC 34
+#define LPC32XX_CLK_HCLK_PLL 35
/* LPC32XX USB clocks */
#define LPC32XX_USB_CLK_I2C 1
diff --git a/include/dt-bindings/clock/qcom,gcc-ipq4019.h b/include/dt-bindings/clock/qcom,gcc-ipq4019.h
new file mode 100644
index 000000000000..6240e5b0e900
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,gcc-ipq4019.h
@@ -0,0 +1,158 @@
+/* Copyright (c) 2015 The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+#ifndef __QCOM_CLK_IPQ4019_H__
+#define __QCOM_CLK_IPQ4019_H__
+
+#define GCC_DUMMY_CLK 0
+#define AUDIO_CLK_SRC 1
+#define BLSP1_QUP1_I2C_APPS_CLK_SRC 2
+#define BLSP1_QUP1_SPI_APPS_CLK_SRC 3
+#define BLSP1_QUP2_I2C_APPS_CLK_SRC 4
+#define BLSP1_QUP2_SPI_APPS_CLK_SRC 5
+#define BLSP1_UART1_APPS_CLK_SRC 6
+#define BLSP1_UART2_APPS_CLK_SRC 7
+#define GCC_USB3_MOCK_UTMI_CLK_SRC 8
+#define GCC_APPS_CLK_SRC 9
+#define GCC_APPS_AHB_CLK_SRC 10
+#define GP1_CLK_SRC 11
+#define GP2_CLK_SRC 12
+#define GP3_CLK_SRC 13
+#define SDCC1_APPS_CLK_SRC 14
+#define FEPHY_125M_DLY_CLK_SRC 15
+#define WCSS2G_CLK_SRC 16
+#define WCSS5G_CLK_SRC 17
+#define GCC_APSS_AHB_CLK 18
+#define GCC_AUDIO_AHB_CLK 19
+#define GCC_AUDIO_PWM_CLK 20
+#define GCC_BLSP1_AHB_CLK 21
+#define GCC_BLSP1_QUP1_I2C_APPS_CLK 22
+#define GCC_BLSP1_QUP1_SPI_APPS_CLK 23
+#define GCC_BLSP1_QUP2_I2C_APPS_CLK 24
+#define GCC_BLSP1_QUP2_SPI_APPS_CLK 25
+#define GCC_BLSP1_UART1_APPS_CLK 26
+#define GCC_BLSP1_UART2_APPS_CLK 27
+#define GCC_DCD_XO_CLK 28
+#define GCC_GP1_CLK 29
+#define GCC_GP2_CLK 30
+#define GCC_GP3_CLK 31
+#define GCC_BOOT_ROM_AHB_CLK 32
+#define GCC_CRYPTO_AHB_CLK 33
+#define GCC_CRYPTO_AXI_CLK 34
+#define GCC_CRYPTO_CLK 35
+#define GCC_ESS_CLK 36
+#define GCC_IMEM_AXI_CLK 37
+#define GCC_IMEM_CFG_AHB_CLK 38
+#define GCC_PCIE_AHB_CLK 39
+#define GCC_PCIE_AXI_M_CLK 40
+#define GCC_PCIE_AXI_S_CLK 41
+#define GCC_PCNOC_AHB_CLK 42
+#define GCC_PRNG_AHB_CLK 43
+#define GCC_QPIC_AHB_CLK 44
+#define GCC_QPIC_CLK 45
+#define GCC_SDCC1_AHB_CLK 46
+#define GCC_SDCC1_APPS_CLK 47
+#define GCC_SNOC_PCNOC_AHB_CLK 48
+#define GCC_SYS_NOC_125M_CLK 49
+#define GCC_SYS_NOC_AXI_CLK 50
+#define GCC_TCSR_AHB_CLK 51
+#define GCC_TLMM_AHB_CLK 52
+#define GCC_USB2_MASTER_CLK 53
+#define GCC_USB2_SLEEP_CLK 54
+#define GCC_USB2_MOCK_UTMI_CLK 55
+#define GCC_USB3_MASTER_CLK 56
+#define GCC_USB3_SLEEP_CLK 57
+#define GCC_USB3_MOCK_UTMI_CLK 58
+#define GCC_WCSS2G_CLK 59
+#define GCC_WCSS2G_REF_CLK 60
+#define GCC_WCSS2G_RTC_CLK 61
+#define GCC_WCSS5G_CLK 62
+#define GCC_WCSS5G_REF_CLK 63
+#define GCC_WCSS5G_RTC_CLK 64
+
+#define WIFI0_CPU_INIT_RESET 0
+#define WIFI0_RADIO_SRIF_RESET 1
+#define WIFI0_RADIO_WARM_RESET 2
+#define WIFI0_RADIO_COLD_RESET 3
+#define WIFI0_CORE_WARM_RESET 4
+#define WIFI0_CORE_COLD_RESET 5
+#define WIFI1_CPU_INIT_RESET 6
+#define WIFI1_RADIO_SRIF_RESET 7
+#define WIFI1_RADIO_WARM_RESET 8
+#define WIFI1_RADIO_COLD_RESET 9
+#define WIFI1_CORE_WARM_RESET 10
+#define WIFI1_CORE_COLD_RESET 11
+#define USB3_UNIPHY_PHY_ARES 12
+#define USB3_HSPHY_POR_ARES 13
+#define USB3_HSPHY_S_ARES 14
+#define USB2_HSPHY_POR_ARES 15
+#define USB2_HSPHY_S_ARES 16
+#define PCIE_PHY_AHB_ARES 17
+#define PCIE_AHB_ARES 18
+#define PCIE_PWR_ARES 19
+#define PCIE_PIPE_STICKY_ARES 20
+#define PCIE_AXI_M_STICKY_ARES 21
+#define PCIE_PHY_ARES 22
+#define PCIE_PARF_XPU_ARES 23
+#define PCIE_AXI_S_XPU_ARES 24
+#define PCIE_AXI_M_VMIDMT_ARES 25
+#define PCIE_PIPE_ARES 26
+#define PCIE_AXI_S_ARES 27
+#define PCIE_AXI_M_ARES 28
+#define ESS_RESET 29
+#define GCC_BLSP1_BCR 30
+#define GCC_BLSP1_QUP1_BCR 31
+#define GCC_BLSP1_UART1_BCR 32
+#define GCC_BLSP1_QUP2_BCR 33
+#define GCC_BLSP1_UART2_BCR 34
+#define GCC_BIMC_BCR 35
+#define GCC_TLMM_BCR 36
+#define GCC_IMEM_BCR 37
+#define GCC_ESS_BCR 38
+#define GCC_PRNG_BCR 39
+#define GCC_BOOT_ROM_BCR 40
+#define GCC_CRYPTO_BCR 41
+#define GCC_SDCC1_BCR 42
+#define GCC_SEC_CTRL_BCR 43
+#define GCC_AUDIO_BCR 44
+#define GCC_QPIC_BCR 45
+#define GCC_PCIE_BCR 46
+#define GCC_USB2_BCR 47
+#define GCC_USB2_PHY_BCR 48
+#define GCC_USB3_BCR 49
+#define GCC_USB3_PHY_BCR 50
+#define GCC_SYSTEM_NOC_BCR 51
+#define GCC_PCNOC_BCR 52
+#define GCC_DCD_BCR 53
+#define GCC_SNOC_BUS_TIMEOUT0_BCR 54
+#define GCC_SNOC_BUS_TIMEOUT1_BCR 55
+#define GCC_SNOC_BUS_TIMEOUT2_BCR 56
+#define GCC_SNOC_BUS_TIMEOUT3_BCR 57
+#define GCC_PCNOC_BUS_TIMEOUT0_BCR 58
+#define GCC_PCNOC_BUS_TIMEOUT1_BCR 59
+#define GCC_PCNOC_BUS_TIMEOUT2_BCR 60
+#define GCC_PCNOC_BUS_TIMEOUT3_BCR 61
+#define GCC_PCNOC_BUS_TIMEOUT4_BCR 62
+#define GCC_PCNOC_BUS_TIMEOUT5_BCR 63
+#define GCC_PCNOC_BUS_TIMEOUT6_BCR 64
+#define GCC_PCNOC_BUS_TIMEOUT7_BCR 65
+#define GCC_PCNOC_BUS_TIMEOUT8_BCR 66
+#define GCC_PCNOC_BUS_TIMEOUT9_BCR 67
+#define GCC_TCSR_BCR 68
+#define GCC_QDSS_BCR 69
+#define GCC_MPM_BCR 70
+#define GCC_SPDM_BCR 71
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-msm8916.h b/include/dt-bindings/clock/qcom,gcc-msm8916.h
index 257e2fbedd94..28a27a4ed3c3 100644
--- a/include/dt-bindings/clock/qcom,gcc-msm8916.h
+++ b/include/dt-bindings/clock/qcom,gcc-msm8916.h
@@ -174,6 +174,7 @@
#define GCC_ULTAUDIO_LPAIF_SEC_I2S_CLK 157
#define GCC_ULTAUDIO_LPAIF_AUX_I2S_CLK 158
#define GCC_CODEC_DIGCODEC_CLK 159
+#define GCC_MSS_Q6_BIMC_AXI_CLK 160
/* Indexes for GDSCs */
#define BIMC_GDSC 0
diff --git a/include/dt-bindings/clock/qcom,gcc-msm8996.h b/include/dt-bindings/clock/qcom,gcc-msm8996.h
index 888e75ce8fec..6f814db11c7e 100644
--- a/include/dt-bindings/clock/qcom,gcc-msm8996.h
+++ b/include/dt-bindings/clock/qcom,gcc-msm8996.h
@@ -336,4 +336,15 @@
#define GCC_MSS_Q6_BCR 99
#define GCC_QREFS_VBG_CAL_BCR 100
+/* Indexes for GDSCs */
+#define AGGRE0_NOC_GDSC 0
+#define HLOS1_VOTE_AGGRE0_NOC_GDSC 1
+#define HLOS1_VOTE_LPASS_ADSP_GDSC 2
+#define HLOS1_VOTE_LPASS_CORE_GDSC 3
+#define USB30_GDSC 4
+#define PCIE0_GDSC 5
+#define PCIE1_GDSC 6
+#define PCIE2_GDSC 7
+#define UFS_GDSC 8
+
#endif
diff --git a/include/dt-bindings/clock/qcom,mmcc-msm8996.h b/include/dt-bindings/clock/qcom,mmcc-msm8996.h
index 9b81ca65fcec..7d3a7fa1a1bd 100644
--- a/include/dt-bindings/clock/qcom,mmcc-msm8996.h
+++ b/include/dt-bindings/clock/qcom,mmcc-msm8996.h
@@ -282,4 +282,21 @@
#define FD_BCR 58
#define MMSS_SPDM_RM_BCR 59
+/* Indexes for GDSCs */
+#define MMAGIC_VIDEO_GDSC 0
+#define MMAGIC_MDSS_GDSC 1
+#define MMAGIC_CAMSS_GDSC 2
+#define GPU_GDSC 3
+#define VENUS_GDSC 4
+#define VENUS_CORE0_GDSC 5
+#define VENUS_CORE1_GDSC 6
+#define CAMSS_GDSC 7
+#define VFE0_GDSC 8
+#define VFE1_GDSC 9
+#define JPEG_GDSC 10
+#define CPP_GDSC 11
+#define FD_GDSC 12
+#define MDSS_GDSC 13
+#define GPU_GX_GDSC 14
+
#endif
diff --git a/include/dt-bindings/clock/r8a7793-clock.h b/include/dt-bindings/clock/r8a7793-clock.h
index 1579e07f96a3..efcbc594fe82 100644
--- a/include/dt-bindings/clock/r8a7793-clock.h
+++ b/include/dt-bindings/clock/r8a7793-clock.h
@@ -145,6 +145,8 @@
#define R8A7793_CLK_SCU_ALL 17
#define R8A7793_CLK_SCU_DVC1 18
#define R8A7793_CLK_SCU_DVC0 19
+#define R8A7793_CLK_SCU_CTU1_MIX1 20
+#define R8A7793_CLK_SCU_CTU0_MIX0 21
#define R8A7793_CLK_SCU_SRC9 22
#define R8A7793_CLK_SCU_SRC8 23
#define R8A7793_CLK_SCU_SRC7 24
diff --git a/include/dt-bindings/clock/r8a7794-clock.h b/include/dt-bindings/clock/r8a7794-clock.h
index a7a7e0370968..f843de6bf377 100644
--- a/include/dt-bindings/clock/r8a7794-clock.h
+++ b/include/dt-bindings/clock/r8a7794-clock.h
@@ -84,6 +84,7 @@
/* MSTP8 */
#define R8A7794_CLK_VIN1 10
#define R8A7794_CLK_VIN0 11
+#define R8A7794_CLK_ETHERAVB 12
#define R8A7794_CLK_ETHER 13
/* MSTP9 */
diff --git a/include/dt-bindings/clock/rk3036-cru.h b/include/dt-bindings/clock/rk3036-cru.h
index ebc7a7b43f52..de44109a3a04 100644
--- a/include/dt-bindings/clock/rk3036-cru.h
+++ b/include/dt-bindings/clock/rk3036-cru.h
@@ -54,6 +54,7 @@
#define SCLK_PVTM_VIDEO 125
#define SCLK_MAC 151
#define SCLK_MACREF 152
+#define SCLK_MACPLL 153
#define SCLK_SFC 160
/* aclk gates */
@@ -92,6 +93,7 @@
#define HCLK_SDMMC 456
#define HCLK_SDIO 457
#define HCLK_EMMC 459
+#define HCLK_MAC 460
#define HCLK_I2S 462
#define HCLK_LCDC 465
#define HCLK_ROM 467
diff --git a/include/dt-bindings/clock/rk3188-cru-common.h b/include/dt-bindings/clock/rk3188-cru-common.h
index 8df77a7c030b..4f53e70f68ee 100644
--- a/include/dt-bindings/clock/rk3188-cru-common.h
+++ b/include/dt-bindings/clock/rk3188-cru-common.h
@@ -55,6 +55,7 @@
#define SCLK_TIMER6 90
#define SCLK_JTAG 91
#define SCLK_SMC 92
+#define SCLK_TSADC 93
#define DCLK_LCDC0 190
#define DCLK_LCDC1 191
diff --git a/include/dt-bindings/clock/rk3228-cru.h b/include/dt-bindings/clock/rk3228-cru.h
index a78dd891e24a..5d43ed9b05ad 100644
--- a/include/dt-bindings/clock/rk3228-cru.h
+++ b/include/dt-bindings/clock/rk3228-cru.h
@@ -29,6 +29,7 @@
#define SCLK_SDMMC 68
#define SCLK_SDIO 69
#define SCLK_EMMC 71
+#define SCLK_TSADC 72
#define SCLK_UART0 77
#define SCLK_UART1 78
#define SCLK_UART2 79
@@ -49,10 +50,17 @@
#define SCLK_SDMMC_SAMPLE 118
#define SCLK_SDIO_SAMPLE 119
#define SCLK_EMMC_SAMPLE 121
+#define SCLK_VOP 122
+#define SCLK_HDMI_HDCP 123
+
+/* dclk gates */
+#define DCLK_VOP 190
+#define DCLK_HDMI_PHY 191
/* aclk gates */
#define ACLK_DMAC 194
#define ACLK_PERI 210
+#define ACLK_VOP 211
/* pclk gates */
#define PCLK_GPIO0 320
@@ -68,11 +76,15 @@
#define PCLK_UART0 341
#define PCLK_UART1 342
#define PCLK_UART2 343
+#define PCLK_TSADC 344
#define PCLK_PWM 350
#define PCLK_TIMER 353
#define PCLK_PERI 363
+#define PCLK_HDMI_CTRL 364
+#define PCLK_HDMI_PHY 365
/* hclk gates */
+#define HCLK_VOP 452
#define HCLK_NANDC 453
#define HCLK_SDMMC 456
#define HCLK_SDIO 457
diff --git a/include/dt-bindings/memory/mt8173-larb-port.h b/include/dt-bindings/memory/mt8173-larb-port.h
new file mode 100644
index 000000000000..5fef5d1f8f82
--- /dev/null
+++ b/include/dt-bindings/memory/mt8173-larb-port.h
@@ -0,0 +1,111 @@
+/*
+ * Copyright (c) 2015-2016 MediaTek Inc.
+ * Author: Yong Wu <yong.wu@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef __DTS_IOMMU_PORT_MT8173_H
+#define __DTS_IOMMU_PORT_MT8173_H
+
+#define MTK_M4U_ID(larb, port) (((larb) << 5) | (port))
+/* Local arbiter ID */
+#define MTK_M4U_TO_LARB(id) (((id) >> 5) & 0x7)
+/* PortID within the local arbiter */
+#define MTK_M4U_TO_PORT(id) ((id) & 0x1f)
+
+#define M4U_LARB0_ID 0
+#define M4U_LARB1_ID 1
+#define M4U_LARB2_ID 2
+#define M4U_LARB3_ID 3
+#define M4U_LARB4_ID 4
+#define M4U_LARB5_ID 5
+
+/* larb0 */
+#define M4U_PORT_DISP_OVL0 MTK_M4U_ID(M4U_LARB0_ID, 0)
+#define M4U_PORT_DISP_RDMA0 MTK_M4U_ID(M4U_LARB0_ID, 1)
+#define M4U_PORT_DISP_WDMA0 MTK_M4U_ID(M4U_LARB0_ID, 2)
+#define M4U_PORT_DISP_OD_R MTK_M4U_ID(M4U_LARB0_ID, 3)
+#define M4U_PORT_DISP_OD_W MTK_M4U_ID(M4U_LARB0_ID, 4)
+#define M4U_PORT_MDP_RDMA0 MTK_M4U_ID(M4U_LARB0_ID, 5)
+#define M4U_PORT_MDP_WDMA MTK_M4U_ID(M4U_LARB0_ID, 6)
+#define M4U_PORT_MDP_WROT0 MTK_M4U_ID(M4U_LARB0_ID, 7)
+
+/* larb1 */
+#define M4U_PORT_HW_VDEC_MC_EXT MTK_M4U_ID(M4U_LARB1_ID, 0)
+#define M4U_PORT_HW_VDEC_PP_EXT MTK_M4U_ID(M4U_LARB1_ID, 1)
+#define M4U_PORT_HW_VDEC_UFO_EXT MTK_M4U_ID(M4U_LARB1_ID, 2)
+#define M4U_PORT_HW_VDEC_VLD_EXT MTK_M4U_ID(M4U_LARB1_ID, 3)
+#define M4U_PORT_HW_VDEC_VLD2_EXT MTK_M4U_ID(M4U_LARB1_ID, 4)
+#define M4U_PORT_HW_VDEC_AVC_MV_EXT MTK_M4U_ID(M4U_LARB1_ID, 5)
+#define M4U_PORT_HW_VDEC_PRED_RD_EXT MTK_M4U_ID(M4U_LARB1_ID, 6)
+#define M4U_PORT_HW_VDEC_PRED_WR_EXT MTK_M4U_ID(M4U_LARB1_ID, 7)
+#define M4U_PORT_HW_VDEC_PPWRAP_EXT MTK_M4U_ID(M4U_LARB1_ID, 8)
+#define M4U_PORT_HW_VDEC_TILE MTK_M4U_ID(M4U_LARB1_ID, 9)
+
+/* larb2 */
+#define M4U_PORT_IMGO MTK_M4U_ID(M4U_LARB2_ID, 0)
+#define M4U_PORT_RRZO MTK_M4U_ID(M4U_LARB2_ID, 1)
+#define M4U_PORT_AAO MTK_M4U_ID(M4U_LARB2_ID, 2)
+#define M4U_PORT_LCSO MTK_M4U_ID(M4U_LARB2_ID, 3)
+#define M4U_PORT_ESFKO MTK_M4U_ID(M4U_LARB2_ID, 4)
+#define M4U_PORT_IMGO_D MTK_M4U_ID(M4U_LARB2_ID, 5)
+#define M4U_PORT_LSCI MTK_M4U_ID(M4U_LARB2_ID, 6)
+#define M4U_PORT_LSCI_D MTK_M4U_ID(M4U_LARB2_ID, 7)
+#define M4U_PORT_BPCI MTK_M4U_ID(M4U_LARB2_ID, 8)
+#define M4U_PORT_BPCI_D MTK_M4U_ID(M4U_LARB2_ID, 9)
+#define M4U_PORT_UFDI MTK_M4U_ID(M4U_LARB2_ID, 10)
+#define M4U_PORT_IMGI MTK_M4U_ID(M4U_LARB2_ID, 11)
+#define M4U_PORT_IMG2O MTK_M4U_ID(M4U_LARB2_ID, 12)
+#define M4U_PORT_IMG3O MTK_M4U_ID(M4U_LARB2_ID, 13)
+#define M4U_PORT_VIPI MTK_M4U_ID(M4U_LARB2_ID, 14)
+#define M4U_PORT_VIP2I MTK_M4U_ID(M4U_LARB2_ID, 15)
+#define M4U_PORT_VIP3I MTK_M4U_ID(M4U_LARB2_ID, 16)
+#define M4U_PORT_LCEI MTK_M4U_ID(M4U_LARB2_ID, 17)
+#define M4U_PORT_RB MTK_M4U_ID(M4U_LARB2_ID, 18)
+#define M4U_PORT_RP MTK_M4U_ID(M4U_LARB2_ID, 19)
+#define M4U_PORT_WR MTK_M4U_ID(M4U_LARB2_ID, 20)
+
+/* larb3 */
+#define M4U_PORT_VENC_RCPU MTK_M4U_ID(M4U_LARB3_ID, 0)
+#define M4U_PORT_VENC_REC MTK_M4U_ID(M4U_LARB3_ID, 1)
+#define M4U_PORT_VENC_BSDMA MTK_M4U_ID(M4U_LARB3_ID, 2)
+#define M4U_PORT_VENC_SV_COMV MTK_M4U_ID(M4U_LARB3_ID, 3)
+#define M4U_PORT_VENC_RD_COMV MTK_M4U_ID(M4U_LARB3_ID, 4)
+#define M4U_PORT_JPGENC_RDMA MTK_M4U_ID(M4U_LARB3_ID, 5)
+#define M4U_PORT_JPGENC_BSDMA MTK_M4U_ID(M4U_LARB3_ID, 6)
+#define M4U_PORT_JPGDEC_WDMA MTK_M4U_ID(M4U_LARB3_ID, 7)
+#define M4U_PORT_JPGDEC_BSDMA MTK_M4U_ID(M4U_LARB3_ID, 8)
+#define M4U_PORT_VENC_CUR_LUMA MTK_M4U_ID(M4U_LARB3_ID, 9)
+#define M4U_PORT_VENC_CUR_CHROMA MTK_M4U_ID(M4U_LARB3_ID, 10)
+#define M4U_PORT_VENC_REF_LUMA MTK_M4U_ID(M4U_LARB3_ID, 11)
+#define M4U_PORT_VENC_REF_CHROMA MTK_M4U_ID(M4U_LARB3_ID, 12)
+#define M4U_PORT_VENC_NBM_RDMA MTK_M4U_ID(M4U_LARB3_ID, 13)
+#define M4U_PORT_VENC_NBM_WDMA MTK_M4U_ID(M4U_LARB3_ID, 14)
+
+/* larb4 */
+#define M4U_PORT_DISP_OVL1 MTK_M4U_ID(M4U_LARB4_ID, 0)
+#define M4U_PORT_DISP_RDMA1 MTK_M4U_ID(M4U_LARB4_ID, 1)
+#define M4U_PORT_DISP_RDMA2 MTK_M4U_ID(M4U_LARB4_ID, 2)
+#define M4U_PORT_DISP_WDMA1 MTK_M4U_ID(M4U_LARB4_ID, 3)
+#define M4U_PORT_MDP_RDMA1 MTK_M4U_ID(M4U_LARB4_ID, 4)
+#define M4U_PORT_MDP_WROT1 MTK_M4U_ID(M4U_LARB4_ID, 5)
+
+/* larb5 */
+#define M4U_PORT_VENC_RCPU_SET2 MTK_M4U_ID(M4U_LARB5_ID, 0)
+#define M4U_PORT_VENC_REC_FRM_SET2 MTK_M4U_ID(M4U_LARB5_ID, 1)
+#define M4U_PORT_VENC_REF_LUMA_SET2 MTK_M4U_ID(M4U_LARB5_ID, 2)
+#define M4U_PORT_VENC_REC_CHROMA_SET2 MTK_M4U_ID(M4U_LARB5_ID, 3)
+#define M4U_PORT_VENC_BSDMA_SET2 MTK_M4U_ID(M4U_LARB5_ID, 4)
+#define M4U_PORT_VENC_CUR_LUMA_SET2 MTK_M4U_ID(M4U_LARB5_ID, 5)
+#define M4U_PORT_VENC_CUR_CHROMA_SET2 MTK_M4U_ID(M4U_LARB5_ID, 6)
+#define M4U_PORT_VENC_RD_COMA_SET2 MTK_M4U_ID(M4U_LARB5_ID, 7)
+#define M4U_PORT_VENC_SV_COMA_SET2 MTK_M4U_ID(M4U_LARB5_ID, 8)
+
+#endif
diff --git a/include/dt-bindings/pinctrl/stm32f429-pinfunc.h b/include/dt-bindings/pinctrl/stm32f429-pinfunc.h
new file mode 100644
index 000000000000..26f18798d949
--- /dev/null
+++ b/include/dt-bindings/pinctrl/stm32f429-pinfunc.h
@@ -0,0 +1,1239 @@
+#ifndef _DT_BINDINGS_STM32F429_PINFUNC_H
+#define _DT_BINDINGS_STM32F429_PINFUNC_H
+
+#define STM32F429_PA0_FUNC_GPIO 0x0
+#define STM32F429_PA0_FUNC_TIM2_CH1_TIM2_ETR 0x2
+#define STM32F429_PA0_FUNC_TIM5_CH1 0x3
+#define STM32F429_PA0_FUNC_TIM8_ETR 0x4
+#define STM32F429_PA0_FUNC_USART2_CTS 0x8
+#define STM32F429_PA0_FUNC_UART4_TX 0x9
+#define STM32F429_PA0_FUNC_ETH_MII_CRS 0xc
+#define STM32F429_PA0_FUNC_EVENTOUT 0x10
+#define STM32F429_PA0_FUNC_ANALOG 0x11
+
+#define STM32F429_PA1_FUNC_GPIO 0x100
+#define STM32F429_PA1_FUNC_TIM2_CH2 0x102
+#define STM32F429_PA1_FUNC_TIM5_CH2 0x103
+#define STM32F429_PA1_FUNC_USART2_RTS 0x108
+#define STM32F429_PA1_FUNC_UART4_RX 0x109
+#define STM32F429_PA1_FUNC_ETH_MII_RX_CLK_ETH_RMII_REF_CLK 0x10c
+#define STM32F429_PA1_FUNC_EVENTOUT 0x110
+#define STM32F429_PA1_FUNC_ANALOG 0x111
+
+#define STM32F429_PA2_FUNC_GPIO 0x200
+#define STM32F429_PA2_FUNC_TIM2_CH3 0x202
+#define STM32F429_PA2_FUNC_TIM5_CH3 0x203
+#define STM32F429_PA2_FUNC_TIM9_CH1 0x204
+#define STM32F429_PA2_FUNC_USART2_TX 0x208
+#define STM32F429_PA2_FUNC_ETH_MDIO 0x20c
+#define STM32F429_PA2_FUNC_EVENTOUT 0x210
+#define STM32F429_PA2_FUNC_ANALOG 0x211
+
+#define STM32F429_PA3_FUNC_GPIO 0x300
+#define STM32F429_PA3_FUNC_TIM2_CH4 0x302
+#define STM32F429_PA3_FUNC_TIM5_CH4 0x303
+#define STM32F429_PA3_FUNC_TIM9_CH2 0x304
+#define STM32F429_PA3_FUNC_USART2_RX 0x308
+#define STM32F429_PA3_FUNC_OTG_HS_ULPI_D0 0x30b
+#define STM32F429_PA3_FUNC_ETH_MII_COL 0x30c
+#define STM32F429_PA3_FUNC_LCD_B5 0x30f
+#define STM32F429_PA3_FUNC_EVENTOUT 0x310
+#define STM32F429_PA3_FUNC_ANALOG 0x311
+
+#define STM32F429_PA4_FUNC_GPIO 0x400
+#define STM32F429_PA4_FUNC_SPI1_NSS 0x406
+#define STM32F429_PA4_FUNC_SPI3_NSS_I2S3_WS 0x407
+#define STM32F429_PA4_FUNC_USART2_CK 0x408
+#define STM32F429_PA4_FUNC_OTG_HS_SOF 0x40d
+#define STM32F429_PA4_FUNC_DCMI_HSYNC 0x40e
+#define STM32F429_PA4_FUNC_LCD_VSYNC 0x40f
+#define STM32F429_PA4_FUNC_EVENTOUT 0x410
+#define STM32F429_PA4_FUNC_ANALOG 0x411
+
+#define STM32F429_PA5_FUNC_GPIO 0x500
+#define STM32F429_PA5_FUNC_TIM2_CH1_TIM2_ETR 0x502
+#define STM32F429_PA5_FUNC_TIM8_CH1N 0x504
+#define STM32F429_PA5_FUNC_SPI1_SCK 0x506
+#define STM32F429_PA5_FUNC_OTG_HS_ULPI_CK 0x50b
+#define STM32F429_PA5_FUNC_EVENTOUT 0x510
+#define STM32F429_PA5_FUNC_ANALOG 0x511
+
+#define STM32F429_PA6_FUNC_GPIO 0x600
+#define STM32F429_PA6_FUNC_TIM1_BKIN 0x602
+#define STM32F429_PA6_FUNC_TIM3_CH1 0x603
+#define STM32F429_PA6_FUNC_TIM8_BKIN 0x604
+#define STM32F429_PA6_FUNC_SPI1_MISO 0x606
+#define STM32F429_PA6_FUNC_TIM13_CH1 0x60a
+#define STM32F429_PA6_FUNC_DCMI_PIXCLK 0x60e
+#define STM32F429_PA6_FUNC_LCD_G2 0x60f
+#define STM32F429_PA6_FUNC_EVENTOUT 0x610
+#define STM32F429_PA6_FUNC_ANALOG 0x611
+
+#define STM32F429_PA7_FUNC_GPIO 0x700
+#define STM32F429_PA7_FUNC_TIM1_CH1N 0x702
+#define STM32F429_PA7_FUNC_TIM3_CH2 0x703
+#define STM32F429_PA7_FUNC_TIM8_CH1N 0x704
+#define STM32F429_PA7_FUNC_SPI1_MOSI 0x706
+#define STM32F429_PA7_FUNC_TIM14_CH1 0x70a
+#define STM32F429_PA7_FUNC_ETH_MII_RX_DV_ETH_RMII_CRS_DV 0x70c
+#define STM32F429_PA7_FUNC_EVENTOUT 0x710
+#define STM32F429_PA7_FUNC_ANALOG 0x711
+
+#define STM32F429_PA8_FUNC_GPIO 0x800
+#define STM32F429_PA8_FUNC_MCO1 0x801
+#define STM32F429_PA8_FUNC_TIM1_CH1 0x802
+#define STM32F429_PA8_FUNC_I2C3_SCL 0x805
+#define STM32F429_PA8_FUNC_USART1_CK 0x808
+#define STM32F429_PA8_FUNC_OTG_FS_SOF 0x80b
+#define STM32F429_PA8_FUNC_LCD_R6 0x80f
+#define STM32F429_PA8_FUNC_EVENTOUT 0x810
+#define STM32F429_PA8_FUNC_ANALOG 0x811
+
+#define STM32F429_PA9_FUNC_GPIO 0x900
+#define STM32F429_PA9_FUNC_TIM1_CH2 0x902
+#define STM32F429_PA9_FUNC_I2C3_SMBA 0x905
+#define STM32F429_PA9_FUNC_USART1_TX 0x908
+#define STM32F429_PA9_FUNC_DCMI_D0 0x90e
+#define STM32F429_PA9_FUNC_EVENTOUT 0x910
+#define STM32F429_PA9_FUNC_ANALOG 0x911
+
+#define STM32F429_PA10_FUNC_GPIO 0xa00
+#define STM32F429_PA10_FUNC_TIM1_CH3 0xa02
+#define STM32F429_PA10_FUNC_USART1_RX 0xa08
+#define STM32F429_PA10_FUNC_OTG_FS_ID 0xa0b
+#define STM32F429_PA10_FUNC_DCMI_D1 0xa0e
+#define STM32F429_PA10_FUNC_EVENTOUT 0xa10
+#define STM32F429_PA10_FUNC_ANALOG 0xa11
+
+#define STM32F429_PA11_FUNC_GPIO 0xb00
+#define STM32F429_PA11_FUNC_TIM1_CH4 0xb02
+#define STM32F429_PA11_FUNC_USART1_CTS 0xb08
+#define STM32F429_PA11_FUNC_CAN1_RX 0xb0a
+#define STM32F429_PA11_FUNC_OTG_FS_DM 0xb0b
+#define STM32F429_PA11_FUNC_LCD_R4 0xb0f
+#define STM32F429_PA11_FUNC_EVENTOUT 0xb10
+#define STM32F429_PA11_FUNC_ANALOG 0xb11
+
+#define STM32F429_PA12_FUNC_GPIO 0xc00
+#define STM32F429_PA12_FUNC_TIM1_ETR 0xc02
+#define STM32F429_PA12_FUNC_USART1_RTS 0xc08
+#define STM32F429_PA12_FUNC_CAN1_TX 0xc0a
+#define STM32F429_PA12_FUNC_OTG_FS_DP 0xc0b
+#define STM32F429_PA12_FUNC_LCD_R5 0xc0f
+#define STM32F429_PA12_FUNC_EVENTOUT 0xc10
+#define STM32F429_PA12_FUNC_ANALOG 0xc11
+
+#define STM32F429_PA13_FUNC_GPIO 0xd00
+#define STM32F429_PA13_FUNC_JTMS_SWDIO 0xd01
+#define STM32F429_PA13_FUNC_EVENTOUT 0xd10
+#define STM32F429_PA13_FUNC_ANALOG 0xd11
+
+#define STM32F429_PA14_FUNC_GPIO 0xe00
+#define STM32F429_PA14_FUNC_JTCK_SWCLK 0xe01
+#define STM32F429_PA14_FUNC_EVENTOUT 0xe10
+#define STM32F429_PA14_FUNC_ANALOG 0xe11
+
+#define STM32F429_PA15_FUNC_GPIO 0xf00
+#define STM32F429_PA15_FUNC_JTDI 0xf01
+#define STM32F429_PA15_FUNC_TIM2_CH1_TIM2_ETR 0xf02
+#define STM32F429_PA15_FUNC_SPI1_NSS 0xf06
+#define STM32F429_PA15_FUNC_SPI3_NSS_I2S3_WS 0xf07
+#define STM32F429_PA15_FUNC_EVENTOUT 0xf10
+#define STM32F429_PA15_FUNC_ANALOG 0xf11
+
+
+
+#define STM32F429_PB0_FUNC_GPIO 0x1000
+#define STM32F429_PB0_FUNC_TIM1_CH2N 0x1002
+#define STM32F429_PB0_FUNC_TIM3_CH3 0x1003
+#define STM32F429_PB0_FUNC_TIM8_CH2N 0x1004
+#define STM32F429_PB0_FUNC_LCD_R3 0x100a
+#define STM32F429_PB0_FUNC_OTG_HS_ULPI_D1 0x100b
+#define STM32F429_PB0_FUNC_ETH_MII_RXD2 0x100c
+#define STM32F429_PB0_FUNC_EVENTOUT 0x1010
+#define STM32F429_PB0_FUNC_ANALOG 0x1011
+
+#define STM32F429_PB1_FUNC_GPIO 0x1100
+#define STM32F429_PB1_FUNC_TIM1_CH3N 0x1102
+#define STM32F429_PB1_FUNC_TIM3_CH4 0x1103
+#define STM32F429_PB1_FUNC_TIM8_CH3N 0x1104
+#define STM32F429_PB1_FUNC_LCD_R6 0x110a
+#define STM32F429_PB1_FUNC_OTG_HS_ULPI_D2 0x110b
+#define STM32F429_PB1_FUNC_ETH_MII_RXD3 0x110c
+#define STM32F429_PB1_FUNC_EVENTOUT 0x1110
+#define STM32F429_PB1_FUNC_ANALOG 0x1111
+
+#define STM32F429_PB2_FUNC_GPIO 0x1200
+#define STM32F429_PB2_FUNC_EVENTOUT 0x1210
+#define STM32F429_PB2_FUNC_ANALOG 0x1211
+
+#define STM32F429_PB3_FUNC_GPIO 0x1300
+#define STM32F429_PB3_FUNC_JTDO_TRACESWO 0x1301
+#define STM32F429_PB3_FUNC_TIM2_CH2 0x1302
+#define STM32F429_PB3_FUNC_SPI1_SCK 0x1306
+#define STM32F429_PB3_FUNC_SPI3_SCK_I2S3_CK 0x1307
+#define STM32F429_PB3_FUNC_EVENTOUT 0x1310
+#define STM32F429_PB3_FUNC_ANALOG 0x1311
+
+#define STM32F429_PB4_FUNC_GPIO 0x1400
+#define STM32F429_PB4_FUNC_NJTRST 0x1401
+#define STM32F429_PB4_FUNC_TIM3_CH1 0x1403
+#define STM32F429_PB4_FUNC_SPI1_MISO 0x1406
+#define STM32F429_PB4_FUNC_SPI3_MISO 0x1407
+#define STM32F429_PB4_FUNC_I2S3EXT_SD 0x1408
+#define STM32F429_PB4_FUNC_EVENTOUT 0x1410
+#define STM32F429_PB4_FUNC_ANALOG 0x1411
+
+#define STM32F429_PB5_FUNC_GPIO 0x1500
+#define STM32F429_PB5_FUNC_TIM3_CH2 0x1503
+#define STM32F429_PB5_FUNC_I2C1_SMBA 0x1505
+#define STM32F429_PB5_FUNC_SPI1_MOSI 0x1506
+#define STM32F429_PB5_FUNC_SPI3_MOSI_I2S3_SD 0x1507
+#define STM32F429_PB5_FUNC_CAN2_RX 0x150a
+#define STM32F429_PB5_FUNC_OTG_HS_ULPI_D7 0x150b
+#define STM32F429_PB5_FUNC_ETH_PPS_OUT 0x150c
+#define STM32F429_PB5_FUNC_FMC_SDCKE1 0x150d
+#define STM32F429_PB5_FUNC_DCMI_D10 0x150e
+#define STM32F429_PB5_FUNC_EVENTOUT 0x1510
+#define STM32F429_PB5_FUNC_ANALOG 0x1511
+
+#define STM32F429_PB6_FUNC_GPIO 0x1600
+#define STM32F429_PB6_FUNC_TIM4_CH1 0x1603
+#define STM32F429_PB6_FUNC_I2C1_SCL 0x1605
+#define STM32F429_PB6_FUNC_USART1_TX 0x1608
+#define STM32F429_PB6_FUNC_CAN2_TX 0x160a
+#define STM32F429_PB6_FUNC_FMC_SDNE1 0x160d
+#define STM32F429_PB6_FUNC_DCMI_D5 0x160e
+#define STM32F429_PB6_FUNC_EVENTOUT 0x1610
+#define STM32F429_PB6_FUNC_ANALOG 0x1611
+
+#define STM32F429_PB7_FUNC_GPIO 0x1700
+#define STM32F429_PB7_FUNC_TIM4_CH2 0x1703
+#define STM32F429_PB7_FUNC_I2C1_SDA 0x1705
+#define STM32F429_PB7_FUNC_USART1_RX 0x1708
+#define STM32F429_PB7_FUNC_FMC_NL 0x170d
+#define STM32F429_PB7_FUNC_DCMI_VSYNC 0x170e
+#define STM32F429_PB7_FUNC_EVENTOUT 0x1710
+#define STM32F429_PB7_FUNC_ANALOG 0x1711
+
+#define STM32F429_PB8_FUNC_GPIO 0x1800
+#define STM32F429_PB8_FUNC_TIM4_CH3 0x1803
+#define STM32F429_PB8_FUNC_TIM10_CH1 0x1804
+#define STM32F429_PB8_FUNC_I2C1_SCL 0x1805
+#define STM32F429_PB8_FUNC_CAN1_RX 0x180a
+#define STM32F429_PB8_FUNC_ETH_MII_TXD3 0x180c
+#define STM32F429_PB8_FUNC_SDIO_D4 0x180d
+#define STM32F429_PB8_FUNC_DCMI_D6 0x180e
+#define STM32F429_PB8_FUNC_LCD_B6 0x180f
+#define STM32F429_PB8_FUNC_EVENTOUT 0x1810
+#define STM32F429_PB8_FUNC_ANALOG 0x1811
+
+#define STM32F429_PB9_FUNC_GPIO 0x1900
+#define STM32F429_PB9_FUNC_TIM4_CH4 0x1903
+#define STM32F429_PB9_FUNC_TIM11_CH1 0x1904
+#define STM32F429_PB9_FUNC_I2C1_SDA 0x1905
+#define STM32F429_PB9_FUNC_SPI2_NSS_I2S2_WS 0x1906
+#define STM32F429_PB9_FUNC_CAN1_TX 0x190a
+#define STM32F429_PB9_FUNC_SDIO_D5 0x190d
+#define STM32F429_PB9_FUNC_DCMI_D7 0x190e
+#define STM32F429_PB9_FUNC_LCD_B7 0x190f
+#define STM32F429_PB9_FUNC_EVENTOUT 0x1910
+#define STM32F429_PB9_FUNC_ANALOG 0x1911
+
+#define STM32F429_PB10_FUNC_GPIO 0x1a00
+#define STM32F429_PB10_FUNC_TIM2_CH3 0x1a02
+#define STM32F429_PB10_FUNC_I2C2_SCL 0x1a05
+#define STM32F429_PB10_FUNC_SPI2_SCK_I2S2_CK 0x1a06
+#define STM32F429_PB10_FUNC_USART3_TX 0x1a08
+#define STM32F429_PB10_FUNC_OTG_HS_ULPI_D3 0x1a0b
+#define STM32F429_PB10_FUNC_ETH_MII_RX_ER 0x1a0c
+#define STM32F429_PB10_FUNC_LCD_G4 0x1a0f
+#define STM32F429_PB10_FUNC_EVENTOUT 0x1a10
+#define STM32F429_PB10_FUNC_ANALOG 0x1a11
+
+#define STM32F429_PB11_FUNC_GPIO 0x1b00
+#define STM32F429_PB11_FUNC_TIM2_CH4 0x1b02
+#define STM32F429_PB11_FUNC_I2C2_SDA 0x1b05
+#define STM32F429_PB11_FUNC_USART3_RX 0x1b08
+#define STM32F429_PB11_FUNC_OTG_HS_ULPI_D4 0x1b0b
+#define STM32F429_PB11_FUNC_ETH_MII_TX_EN_ETH_RMII_TX_EN 0x1b0c
+#define STM32F429_PB11_FUNC_LCD_G5 0x1b0f
+#define STM32F429_PB11_FUNC_EVENTOUT 0x1b10
+#define STM32F429_PB11_FUNC_ANALOG 0x1b11
+
+#define STM32F429_PB12_FUNC_GPIO 0x1c00
+#define STM32F429_PB12_FUNC_TIM1_BKIN 0x1c02
+#define STM32F429_PB12_FUNC_I2C2_SMBA 0x1c05
+#define STM32F429_PB12_FUNC_SPI2_NSS_I2S2_WS 0x1c06
+#define STM32F429_PB12_FUNC_USART3_CK 0x1c08
+#define STM32F429_PB12_FUNC_CAN2_RX 0x1c0a
+#define STM32F429_PB12_FUNC_OTG_HS_ULPI_D5 0x1c0b
+#define STM32F429_PB12_FUNC_ETH_MII_TXD0_ETH_RMII_TXD0 0x1c0c
+#define STM32F429_PB12_FUNC_OTG_HS_ID 0x1c0d
+#define STM32F429_PB12_FUNC_EVENTOUT 0x1c10
+#define STM32F429_PB12_FUNC_ANALOG 0x1c11
+
+#define STM32F429_PB13_FUNC_GPIO 0x1d00
+#define STM32F429_PB13_FUNC_TIM1_CH1N 0x1d02
+#define STM32F429_PB13_FUNC_SPI2_SCK_I2S2_CK 0x1d06
+#define STM32F429_PB13_FUNC_USART3_CTS 0x1d08
+#define STM32F429_PB13_FUNC_CAN2_TX 0x1d0a
+#define STM32F429_PB13_FUNC_OTG_HS_ULPI_D6 0x1d0b
+#define STM32F429_PB13_FUNC_ETH_MII_TXD1_ETH_RMII_TXD1 0x1d0c
+#define STM32F429_PB13_FUNC_EVENTOUT 0x1d10
+#define STM32F429_PB13_FUNC_ANALOG 0x1d11
+
+#define STM32F429_PB14_FUNC_GPIO 0x1e00
+#define STM32F429_PB14_FUNC_TIM1_CH2N 0x1e02
+#define STM32F429_PB14_FUNC_TIM8_CH2N 0x1e04
+#define STM32F429_PB14_FUNC_SPI2_MISO 0x1e06
+#define STM32F429_PB14_FUNC_I2S2EXT_SD 0x1e07
+#define STM32F429_PB14_FUNC_USART3_RTS 0x1e08
+#define STM32F429_PB14_FUNC_TIM12_CH1 0x1e0a
+#define STM32F429_PB14_FUNC_OTG_HS_DM 0x1e0d
+#define STM32F429_PB14_FUNC_EVENTOUT 0x1e10
+#define STM32F429_PB14_FUNC_ANALOG 0x1e11
+
+#define STM32F429_PB15_FUNC_GPIO 0x1f00
+#define STM32F429_PB15_FUNC_RTC_REFIN 0x1f01
+#define STM32F429_PB15_FUNC_TIM1_CH3N 0x1f02
+#define STM32F429_PB15_FUNC_TIM8_CH3N 0x1f04
+#define STM32F429_PB15_FUNC_SPI2_MOSI_I2S2_SD 0x1f06
+#define STM32F429_PB15_FUNC_TIM12_CH2 0x1f0a
+#define STM32F429_PB15_FUNC_OTG_HS_DP 0x1f0d
+#define STM32F429_PB15_FUNC_EVENTOUT 0x1f10
+#define STM32F429_PB15_FUNC_ANALOG 0x1f11
+
+
+
+#define STM32F429_PC0_FUNC_GPIO 0x2000
+#define STM32F429_PC0_FUNC_OTG_HS_ULPI_STP 0x200b
+#define STM32F429_PC0_FUNC_FMC_SDNWE 0x200d
+#define STM32F429_PC0_FUNC_EVENTOUT 0x2010
+#define STM32F429_PC0_FUNC_ANALOG 0x2011
+
+#define STM32F429_PC1_FUNC_GPIO 0x2100
+#define STM32F429_PC1_FUNC_ETH_MDC 0x210c
+#define STM32F429_PC1_FUNC_EVENTOUT 0x2110
+#define STM32F429_PC1_FUNC_ANALOG 0x2111
+
+#define STM32F429_PC2_FUNC_GPIO 0x2200
+#define STM32F429_PC2_FUNC_SPI2_MISO 0x2206
+#define STM32F429_PC2_FUNC_I2S2EXT_SD 0x2207
+#define STM32F429_PC2_FUNC_OTG_HS_ULPI_DIR 0x220b
+#define STM32F429_PC2_FUNC_ETH_MII_TXD2 0x220c
+#define STM32F429_PC2_FUNC_FMC_SDNE0 0x220d
+#define STM32F429_PC2_FUNC_EVENTOUT 0x2210
+#define STM32F429_PC2_FUNC_ANALOG 0x2211
+
+#define STM32F429_PC3_FUNC_GPIO 0x2300
+#define STM32F429_PC3_FUNC_SPI2_MOSI_I2S2_SD 0x2306
+#define STM32F429_PC3_FUNC_OTG_HS_ULPI_NXT 0x230b
+#define STM32F429_PC3_FUNC_ETH_MII_TX_CLK 0x230c
+#define STM32F429_PC3_FUNC_FMC_SDCKE0 0x230d
+#define STM32F429_PC3_FUNC_EVENTOUT 0x2310
+#define STM32F429_PC3_FUNC_ANALOG 0x2311
+
+#define STM32F429_PC4_FUNC_GPIO 0x2400
+#define STM32F429_PC4_FUNC_ETH_MII_RXD0_ETH_RMII_RXD0 0x240c
+#define STM32F429_PC4_FUNC_EVENTOUT 0x2410
+#define STM32F429_PC4_FUNC_ANALOG 0x2411
+
+#define STM32F429_PC5_FUNC_GPIO 0x2500
+#define STM32F429_PC5_FUNC_ETH_MII_RXD1_ETH_RMII_RXD1 0x250c
+#define STM32F429_PC5_FUNC_EVENTOUT 0x2510
+#define STM32F429_PC5_FUNC_ANALOG 0x2511
+
+#define STM32F429_PC6_FUNC_GPIO 0x2600
+#define STM32F429_PC6_FUNC_TIM3_CH1 0x2603
+#define STM32F429_PC6_FUNC_TIM8_CH1 0x2604
+#define STM32F429_PC6_FUNC_I2S2_MCK 0x2606
+#define STM32F429_PC6_FUNC_USART6_TX 0x2609
+#define STM32F429_PC6_FUNC_SDIO_D6 0x260d
+#define STM32F429_PC6_FUNC_DCMI_D0 0x260e
+#define STM32F429_PC6_FUNC_LCD_HSYNC 0x260f
+#define STM32F429_PC6_FUNC_EVENTOUT 0x2610
+#define STM32F429_PC6_FUNC_ANALOG 0x2611
+
+#define STM32F429_PC7_FUNC_GPIO 0x2700
+#define STM32F429_PC7_FUNC_TIM3_CH2 0x2703
+#define STM32F429_PC7_FUNC_TIM8_CH2 0x2704
+#define STM32F429_PC7_FUNC_I2S3_MCK 0x2707
+#define STM32F429_PC7_FUNC_USART6_RX 0x2709
+#define STM32F429_PC7_FUNC_SDIO_D7 0x270d
+#define STM32F429_PC7_FUNC_DCMI_D1 0x270e
+#define STM32F429_PC7_FUNC_LCD_G6 0x270f
+#define STM32F429_PC7_FUNC_EVENTOUT 0x2710
+#define STM32F429_PC7_FUNC_ANALOG 0x2711
+
+#define STM32F429_PC8_FUNC_GPIO 0x2800
+#define STM32F429_PC8_FUNC_TIM3_CH3 0x2803
+#define STM32F429_PC8_FUNC_TIM8_CH3 0x2804
+#define STM32F429_PC8_FUNC_USART6_CK 0x2809
+#define STM32F429_PC8_FUNC_SDIO_D0 0x280d
+#define STM32F429_PC8_FUNC_DCMI_D2 0x280e
+#define STM32F429_PC8_FUNC_EVENTOUT 0x2810
+#define STM32F429_PC8_FUNC_ANALOG 0x2811
+
+#define STM32F429_PC9_FUNC_GPIO 0x2900
+#define STM32F429_PC9_FUNC_MCO2 0x2901
+#define STM32F429_PC9_FUNC_TIM3_CH4 0x2903
+#define STM32F429_PC9_FUNC_TIM8_CH4 0x2904
+#define STM32F429_PC9_FUNC_I2C3_SDA 0x2905
+#define STM32F429_PC9_FUNC_I2S_CKIN 0x2906
+#define STM32F429_PC9_FUNC_SDIO_D1 0x290d
+#define STM32F429_PC9_FUNC_DCMI_D3 0x290e
+#define STM32F429_PC9_FUNC_EVENTOUT 0x2910
+#define STM32F429_PC9_FUNC_ANALOG 0x2911
+
+#define STM32F429_PC10_FUNC_GPIO 0x2a00
+#define STM32F429_PC10_FUNC_SPI3_SCK_I2S3_CK 0x2a07
+#define STM32F429_PC10_FUNC_USART3_TX 0x2a08
+#define STM32F429_PC10_FUNC_UART4_TX 0x2a09
+#define STM32F429_PC10_FUNC_SDIO_D2 0x2a0d
+#define STM32F429_PC10_FUNC_DCMI_D8 0x2a0e
+#define STM32F429_PC10_FUNC_LCD_R2 0x2a0f
+#define STM32F429_PC10_FUNC_EVENTOUT 0x2a10
+#define STM32F429_PC10_FUNC_ANALOG 0x2a11
+
+#define STM32F429_PC11_FUNC_GPIO 0x2b00
+#define STM32F429_PC11_FUNC_I2S3EXT_SD 0x2b06
+#define STM32F429_PC11_FUNC_SPI3_MISO 0x2b07
+#define STM32F429_PC11_FUNC_USART3_RX 0x2b08
+#define STM32F429_PC11_FUNC_UART4_RX 0x2b09
+#define STM32F429_PC11_FUNC_SDIO_D3 0x2b0d
+#define STM32F429_PC11_FUNC_DCMI_D4 0x2b0e
+#define STM32F429_PC11_FUNC_EVENTOUT 0x2b10
+#define STM32F429_PC11_FUNC_ANALOG 0x2b11
+
+#define STM32F429_PC12_FUNC_GPIO 0x2c00
+#define STM32F429_PC12_FUNC_SPI3_MOSI_I2S3_SD 0x2c07
+#define STM32F429_PC12_FUNC_USART3_CK 0x2c08
+#define STM32F429_PC12_FUNC_UART5_TX 0x2c09
+#define STM32F429_PC12_FUNC_SDIO_CK 0x2c0d
+#define STM32F429_PC12_FUNC_DCMI_D9 0x2c0e
+#define STM32F429_PC12_FUNC_EVENTOUT 0x2c10
+#define STM32F429_PC12_FUNC_ANALOG 0x2c11
+
+#define STM32F429_PC13_FUNC_GPIO 0x2d00
+#define STM32F429_PC13_FUNC_EVENTOUT 0x2d10
+#define STM32F429_PC13_FUNC_ANALOG 0x2d11
+
+#define STM32F429_PC14_FUNC_GPIO 0x2e00
+#define STM32F429_PC14_FUNC_EVENTOUT 0x2e10
+#define STM32F429_PC14_FUNC_ANALOG 0x2e11
+
+#define STM32F429_PC15_FUNC_GPIO 0x2f00
+#define STM32F429_PC15_FUNC_EVENTOUT 0x2f10
+#define STM32F429_PC15_FUNC_ANALOG 0x2f11
+
+
+
+#define STM32F429_PD0_FUNC_GPIO 0x3000
+#define STM32F429_PD0_FUNC_CAN1_RX 0x300a
+#define STM32F429_PD0_FUNC_FMC_D2 0x300d
+#define STM32F429_PD0_FUNC_EVENTOUT 0x3010
+#define STM32F429_PD0_FUNC_ANALOG 0x3011
+
+#define STM32F429_PD1_FUNC_GPIO 0x3100
+#define STM32F429_PD1_FUNC_CAN1_TX 0x310a
+#define STM32F429_PD1_FUNC_FMC_D3 0x310d
+#define STM32F429_PD1_FUNC_EVENTOUT 0x3110
+#define STM32F429_PD1_FUNC_ANALOG 0x3111
+
+#define STM32F429_PD2_FUNC_GPIO 0x3200
+#define STM32F429_PD2_FUNC_TIM3_ETR 0x3203
+#define STM32F429_PD2_FUNC_UART5_RX 0x3209
+#define STM32F429_PD2_FUNC_SDIO_CMD 0x320d
+#define STM32F429_PD2_FUNC_DCMI_D11 0x320e
+#define STM32F429_PD2_FUNC_EVENTOUT 0x3210
+#define STM32F429_PD2_FUNC_ANALOG 0x3211
+
+#define STM32F429_PD3_FUNC_GPIO 0x3300
+#define STM32F429_PD3_FUNC_SPI2_SCK_I2S2_CK 0x3306
+#define STM32F429_PD3_FUNC_USART2_CTS 0x3308
+#define STM32F429_PD3_FUNC_FMC_CLK 0x330d
+#define STM32F429_PD3_FUNC_DCMI_D5 0x330e
+#define STM32F429_PD3_FUNC_LCD_G7 0x330f
+#define STM32F429_PD3_FUNC_EVENTOUT 0x3310
+#define STM32F429_PD3_FUNC_ANALOG 0x3311
+
+#define STM32F429_PD4_FUNC_GPIO 0x3400
+#define STM32F429_PD4_FUNC_USART2_RTS 0x3408
+#define STM32F429_PD4_FUNC_FMC_NOE 0x340d
+#define STM32F429_PD4_FUNC_EVENTOUT 0x3410
+#define STM32F429_PD4_FUNC_ANALOG 0x3411
+
+#define STM32F429_PD5_FUNC_GPIO 0x3500
+#define STM32F429_PD5_FUNC_USART2_TX 0x3508
+#define STM32F429_PD5_FUNC_FMC_NWE 0x350d
+#define STM32F429_PD5_FUNC_EVENTOUT 0x3510
+#define STM32F429_PD5_FUNC_ANALOG 0x3511
+
+#define STM32F429_PD6_FUNC_GPIO 0x3600
+#define STM32F429_PD6_FUNC_SPI3_MOSI_I2S3_SD 0x3606
+#define STM32F429_PD6_FUNC_SAI1_SD_A 0x3607
+#define STM32F429_PD6_FUNC_USART2_RX 0x3608
+#define STM32F429_PD6_FUNC_FMC_NWAIT 0x360d
+#define STM32F429_PD6_FUNC_DCMI_D10 0x360e
+#define STM32F429_PD6_FUNC_LCD_B2 0x360f
+#define STM32F429_PD6_FUNC_EVENTOUT 0x3610
+#define STM32F429_PD6_FUNC_ANALOG 0x3611
+
+#define STM32F429_PD7_FUNC_GPIO 0x3700
+#define STM32F429_PD7_FUNC_USART2_CK 0x3708
+#define STM32F429_PD7_FUNC_FMC_NE1_FMC_NCE2 0x370d
+#define STM32F429_PD7_FUNC_EVENTOUT 0x3710
+#define STM32F429_PD7_FUNC_ANALOG 0x3711
+
+#define STM32F429_PD8_FUNC_GPIO 0x3800
+#define STM32F429_PD8_FUNC_USART3_TX 0x3808
+#define STM32F429_PD8_FUNC_FMC_D13 0x380d
+#define STM32F429_PD8_FUNC_EVENTOUT 0x3810
+#define STM32F429_PD8_FUNC_ANALOG 0x3811
+
+#define STM32F429_PD9_FUNC_GPIO 0x3900
+#define STM32F429_PD9_FUNC_USART3_RX 0x3908
+#define STM32F429_PD9_FUNC_FMC_D14 0x390d
+#define STM32F429_PD9_FUNC_EVENTOUT 0x3910
+#define STM32F429_PD9_FUNC_ANALOG 0x3911
+
+#define STM32F429_PD10_FUNC_GPIO 0x3a00
+#define STM32F429_PD10_FUNC_USART3_CK 0x3a08
+#define STM32F429_PD10_FUNC_FMC_D15 0x3a0d
+#define STM32F429_PD10_FUNC_LCD_B3 0x3a0f
+#define STM32F429_PD10_FUNC_EVENTOUT 0x3a10
+#define STM32F429_PD10_FUNC_ANALOG 0x3a11
+
+#define STM32F429_PD11_FUNC_GPIO 0x3b00
+#define STM32F429_PD11_FUNC_USART3_CTS 0x3b08
+#define STM32F429_PD11_FUNC_FMC_A16 0x3b0d
+#define STM32F429_PD11_FUNC_EVENTOUT 0x3b10
+#define STM32F429_PD11_FUNC_ANALOG 0x3b11
+
+#define STM32F429_PD12_FUNC_GPIO 0x3c00
+#define STM32F429_PD12_FUNC_TIM4_CH1 0x3c03
+#define STM32F429_PD12_FUNC_USART3_RTS 0x3c08
+#define STM32F429_PD12_FUNC_FMC_A17 0x3c0d
+#define STM32F429_PD12_FUNC_EVENTOUT 0x3c10
+#define STM32F429_PD12_FUNC_ANALOG 0x3c11
+
+#define STM32F429_PD13_FUNC_GPIO 0x3d00
+#define STM32F429_PD13_FUNC_TIM4_CH2 0x3d03
+#define STM32F429_PD13_FUNC_FMC_A18 0x3d0d
+#define STM32F429_PD13_FUNC_EVENTOUT 0x3d10
+#define STM32F429_PD13_FUNC_ANALOG 0x3d11
+
+#define STM32F429_PD14_FUNC_GPIO 0x3e00
+#define STM32F429_PD14_FUNC_TIM4_CH3 0x3e03
+#define STM32F429_PD14_FUNC_FMC_D0 0x3e0d
+#define STM32F429_PD14_FUNC_EVENTOUT 0x3e10
+#define STM32F429_PD14_FUNC_ANALOG 0x3e11
+
+#define STM32F429_PD15_FUNC_GPIO 0x3f00
+#define STM32F429_PD15_FUNC_TIM4_CH4 0x3f03
+#define STM32F429_PD15_FUNC_FMC_D1 0x3f0d
+#define STM32F429_PD15_FUNC_EVENTOUT 0x3f10
+#define STM32F429_PD15_FUNC_ANALOG 0x3f11
+
+
+
+#define STM32F429_PE0_FUNC_GPIO 0x4000
+#define STM32F429_PE0_FUNC_TIM4_ETR 0x4003
+#define STM32F429_PE0_FUNC_UART8_RX 0x4009
+#define STM32F429_PE0_FUNC_FMC_NBL0 0x400d
+#define STM32F429_PE0_FUNC_DCMI_D2 0x400e
+#define STM32F429_PE0_FUNC_EVENTOUT 0x4010
+#define STM32F429_PE0_FUNC_ANALOG 0x4011
+
+#define STM32F429_PE1_FUNC_GPIO 0x4100
+#define STM32F429_PE1_FUNC_UART8_TX 0x4109
+#define STM32F429_PE1_FUNC_FMC_NBL1 0x410d
+#define STM32F429_PE1_FUNC_DCMI_D3 0x410e
+#define STM32F429_PE1_FUNC_EVENTOUT 0x4110
+#define STM32F429_PE1_FUNC_ANALOG 0x4111
+
+#define STM32F429_PE2_FUNC_GPIO 0x4200
+#define STM32F429_PE2_FUNC_TRACECLK 0x4201
+#define STM32F429_PE2_FUNC_SPI4_SCK 0x4206
+#define STM32F429_PE2_FUNC_SAI1_MCLK_A 0x4207
+#define STM32F429_PE2_FUNC_ETH_MII_TXD3 0x420c
+#define STM32F429_PE2_FUNC_FMC_A23 0x420d
+#define STM32F429_PE2_FUNC_EVENTOUT 0x4210
+#define STM32F429_PE2_FUNC_ANALOG 0x4211
+
+#define STM32F429_PE3_FUNC_GPIO 0x4300
+#define STM32F429_PE3_FUNC_TRACED0 0x4301
+#define STM32F429_PE3_FUNC_SAI1_SD_B 0x4307
+#define STM32F429_PE3_FUNC_FMC_A19 0x430d
+#define STM32F429_PE3_FUNC_EVENTOUT 0x4310
+#define STM32F429_PE3_FUNC_ANALOG 0x4311
+
+#define STM32F429_PE4_FUNC_GPIO 0x4400
+#define STM32F429_PE4_FUNC_TRACED1 0x4401
+#define STM32F429_PE4_FUNC_SPI4_NSS 0x4406
+#define STM32F429_PE4_FUNC_SAI1_FS_A 0x4407
+#define STM32F429_PE4_FUNC_FMC_A20 0x440d
+#define STM32F429_PE4_FUNC_DCMI_D4 0x440e
+#define STM32F429_PE4_FUNC_LCD_B0 0x440f
+#define STM32F429_PE4_FUNC_EVENTOUT 0x4410
+#define STM32F429_PE4_FUNC_ANALOG 0x4411
+
+#define STM32F429_PE5_FUNC_GPIO 0x4500
+#define STM32F429_PE5_FUNC_TRACED2 0x4501
+#define STM32F429_PE5_FUNC_TIM9_CH1 0x4504
+#define STM32F429_PE5_FUNC_SPI4_MISO 0x4506
+#define STM32F429_PE5_FUNC_SAI1_SCK_A 0x4507
+#define STM32F429_PE5_FUNC_FMC_A21 0x450d
+#define STM32F429_PE5_FUNC_DCMI_D6 0x450e
+#define STM32F429_PE5_FUNC_LCD_G0 0x450f
+#define STM32F429_PE5_FUNC_EVENTOUT 0x4510
+#define STM32F429_PE5_FUNC_ANALOG 0x4511
+
+#define STM32F429_PE6_FUNC_GPIO 0x4600
+#define STM32F429_PE6_FUNC_TRACED3 0x4601
+#define STM32F429_PE6_FUNC_TIM9_CH2 0x4604
+#define STM32F429_PE6_FUNC_SPI4_MOSI 0x4606
+#define STM32F429_PE6_FUNC_SAI1_SD_A 0x4607
+#define STM32F429_PE6_FUNC_FMC_A22 0x460d
+#define STM32F429_PE6_FUNC_DCMI_D7 0x460e
+#define STM32F429_PE6_FUNC_LCD_G1 0x460f
+#define STM32F429_PE6_FUNC_EVENTOUT 0x4610
+#define STM32F429_PE6_FUNC_ANALOG 0x4611
+
+#define STM32F429_PE7_FUNC_GPIO 0x4700
+#define STM32F429_PE7_FUNC_TIM1_ETR 0x4702
+#define STM32F429_PE7_FUNC_UART7_RX 0x4709
+#define STM32F429_PE7_FUNC_FMC_D4 0x470d
+#define STM32F429_PE7_FUNC_EVENTOUT 0x4710
+#define STM32F429_PE7_FUNC_ANALOG 0x4711
+
+#define STM32F429_PE8_FUNC_GPIO 0x4800
+#define STM32F429_PE8_FUNC_TIM1_CH1N 0x4802
+#define STM32F429_PE8_FUNC_UART7_TX 0x4809
+#define STM32F429_PE8_FUNC_FMC_D5 0x480d
+#define STM32F429_PE8_FUNC_EVENTOUT 0x4810
+#define STM32F429_PE8_FUNC_ANALOG 0x4811
+
+#define STM32F429_PE9_FUNC_GPIO 0x4900
+#define STM32F429_PE9_FUNC_TIM1_CH1 0x4902
+#define STM32F429_PE9_FUNC_FMC_D6 0x490d
+#define STM32F429_PE9_FUNC_EVENTOUT 0x4910
+#define STM32F429_PE9_FUNC_ANALOG 0x4911
+
+#define STM32F429_PE10_FUNC_GPIO 0x4a00
+#define STM32F429_PE10_FUNC_TIM1_CH2N 0x4a02
+#define STM32F429_PE10_FUNC_FMC_D7 0x4a0d
+#define STM32F429_PE10_FUNC_EVENTOUT 0x4a10
+#define STM32F429_PE10_FUNC_ANALOG 0x4a11
+
+#define STM32F429_PE11_FUNC_GPIO 0x4b00
+#define STM32F429_PE11_FUNC_TIM1_CH2 0x4b02
+#define STM32F429_PE11_FUNC_SPI4_NSS 0x4b06
+#define STM32F429_PE11_FUNC_FMC_D8 0x4b0d
+#define STM32F429_PE11_FUNC_LCD_G3 0x4b0f
+#define STM32F429_PE11_FUNC_EVENTOUT 0x4b10
+#define STM32F429_PE11_FUNC_ANALOG 0x4b11
+
+#define STM32F429_PE12_FUNC_GPIO 0x4c00
+#define STM32F429_PE12_FUNC_TIM1_CH3N 0x4c02
+#define STM32F429_PE12_FUNC_SPI4_SCK 0x4c06
+#define STM32F429_PE12_FUNC_FMC_D9 0x4c0d
+#define STM32F429_PE12_FUNC_LCD_B4 0x4c0f
+#define STM32F429_PE12_FUNC_EVENTOUT 0x4c10
+#define STM32F429_PE12_FUNC_ANALOG 0x4c11
+
+#define STM32F429_PE13_FUNC_GPIO 0x4d00
+#define STM32F429_PE13_FUNC_TIM1_CH3 0x4d02
+#define STM32F429_PE13_FUNC_SPI4_MISO 0x4d06
+#define STM32F429_PE13_FUNC_FMC_D10 0x4d0d
+#define STM32F429_PE13_FUNC_LCD_DE 0x4d0f
+#define STM32F429_PE13_FUNC_EVENTOUT 0x4d10
+#define STM32F429_PE13_FUNC_ANALOG 0x4d11
+
+#define STM32F429_PE14_FUNC_GPIO 0x4e00
+#define STM32F429_PE14_FUNC_TIM1_CH4 0x4e02
+#define STM32F429_PE14_FUNC_SPI4_MOSI 0x4e06
+#define STM32F429_PE14_FUNC_FMC_D11 0x4e0d
+#define STM32F429_PE14_FUNC_LCD_CLK 0x4e0f
+#define STM32F429_PE14_FUNC_EVENTOUT 0x4e10
+#define STM32F429_PE14_FUNC_ANALOG 0x4e11
+
+#define STM32F429_PE15_FUNC_GPIO 0x4f00
+#define STM32F429_PE15_FUNC_TIM1_BKIN 0x4f02
+#define STM32F429_PE15_FUNC_FMC_D12 0x4f0d
+#define STM32F429_PE15_FUNC_LCD_R7 0x4f0f
+#define STM32F429_PE15_FUNC_EVENTOUT 0x4f10
+#define STM32F429_PE15_FUNC_ANALOG 0x4f11
+
+
+
+#define STM32F429_PF0_FUNC_GPIO 0x5000
+#define STM32F429_PF0_FUNC_I2C2_SDA 0x5005
+#define STM32F429_PF0_FUNC_FMC_A0 0x500d
+#define STM32F429_PF0_FUNC_EVENTOUT 0x5010
+#define STM32F429_PF0_FUNC_ANALOG 0x5011
+
+#define STM32F429_PF1_FUNC_GPIO 0x5100
+#define STM32F429_PF1_FUNC_I2C2_SCL 0x5105
+#define STM32F429_PF1_FUNC_FMC_A1 0x510d
+#define STM32F429_PF1_FUNC_EVENTOUT 0x5110
+#define STM32F429_PF1_FUNC_ANALOG 0x5111
+
+#define STM32F429_PF2_FUNC_GPIO 0x5200
+#define STM32F429_PF2_FUNC_I2C2_SMBA 0x5205
+#define STM32F429_PF2_FUNC_FMC_A2 0x520d
+#define STM32F429_PF2_FUNC_EVENTOUT 0x5210
+#define STM32F429_PF2_FUNC_ANALOG 0x5211
+
+#define STM32F429_PF3_FUNC_GPIO 0x5300
+#define STM32F429_PF3_FUNC_FMC_A3 0x530d
+#define STM32F429_PF3_FUNC_EVENTOUT 0x5310
+#define STM32F429_PF3_FUNC_ANALOG 0x5311
+
+#define STM32F429_PF4_FUNC_GPIO 0x5400
+#define STM32F429_PF4_FUNC_FMC_A4 0x540d
+#define STM32F429_PF4_FUNC_EVENTOUT 0x5410
+#define STM32F429_PF4_FUNC_ANALOG 0x5411
+
+#define STM32F429_PF5_FUNC_GPIO 0x5500
+#define STM32F429_PF5_FUNC_FMC_A5 0x550d
+#define STM32F429_PF5_FUNC_EVENTOUT 0x5510
+#define STM32F429_PF5_FUNC_ANALOG 0x5511
+
+#define STM32F429_PF6_FUNC_GPIO 0x5600
+#define STM32F429_PF6_FUNC_TIM10_CH1 0x5604
+#define STM32F429_PF6_FUNC_SPI5_NSS 0x5606
+#define STM32F429_PF6_FUNC_SAI1_SD_B 0x5607
+#define STM32F429_PF6_FUNC_UART7_RX 0x5609
+#define STM32F429_PF6_FUNC_FMC_NIORD 0x560d
+#define STM32F429_PF6_FUNC_EVENTOUT 0x5610
+#define STM32F429_PF6_FUNC_ANALOG 0x5611
+
+#define STM32F429_PF7_FUNC_GPIO 0x5700
+#define STM32F429_PF7_FUNC_TIM11_CH1 0x5704
+#define STM32F429_PF7_FUNC_SPI5_SCK 0x5706
+#define STM32F429_PF7_FUNC_SAI1_MCLK_B 0x5707
+#define STM32F429_PF7_FUNC_UART7_TX 0x5709
+#define STM32F429_PF7_FUNC_FMC_NREG 0x570d
+#define STM32F429_PF7_FUNC_EVENTOUT 0x5710
+#define STM32F429_PF7_FUNC_ANALOG 0x5711
+
+#define STM32F429_PF8_FUNC_GPIO 0x5800
+#define STM32F429_PF8_FUNC_SPI5_MISO 0x5806
+#define STM32F429_PF8_FUNC_SAI1_SCK_B 0x5807
+#define STM32F429_PF8_FUNC_TIM13_CH1 0x580a
+#define STM32F429_PF8_FUNC_FMC_NIOWR 0x580d
+#define STM32F429_PF8_FUNC_EVENTOUT 0x5810
+#define STM32F429_PF8_FUNC_ANALOG 0x5811
+
+#define STM32F429_PF9_FUNC_GPIO 0x5900
+#define STM32F429_PF9_FUNC_SPI5_MOSI 0x5906
+#define STM32F429_PF9_FUNC_SAI1_FS_B 0x5907
+#define STM32F429_PF9_FUNC_TIM14_CH1 0x590a
+#define STM32F429_PF9_FUNC_FMC_CD 0x590d
+#define STM32F429_PF9_FUNC_EVENTOUT 0x5910
+#define STM32F429_PF9_FUNC_ANALOG 0x5911
+
+#define STM32F429_PF10_FUNC_GPIO 0x5a00
+#define STM32F429_PF10_FUNC_FMC_INTR 0x5a0d
+#define STM32F429_PF10_FUNC_DCMI_D11 0x5a0e
+#define STM32F429_PF10_FUNC_LCD_DE 0x5a0f
+#define STM32F429_PF10_FUNC_EVENTOUT 0x5a10
+#define STM32F429_PF10_FUNC_ANALOG 0x5a11
+
+#define STM32F429_PF11_FUNC_GPIO 0x5b00
+#define STM32F429_PF11_FUNC_SPI5_MOSI 0x5b06
+#define STM32F429_PF11_FUNC_FMC_SDNRAS 0x5b0d
+#define STM32F429_PF11_FUNC_DCMI_D12 0x5b0e
+#define STM32F429_PF11_FUNC_EVENTOUT 0x5b10
+#define STM32F429_PF11_FUNC_ANALOG 0x5b11
+
+#define STM32F429_PF12_FUNC_GPIO 0x5c00
+#define STM32F429_PF12_FUNC_FMC_A6 0x5c0d
+#define STM32F429_PF12_FUNC_EVENTOUT 0x5c10
+#define STM32F429_PF12_FUNC_ANALOG 0x5c11
+
+#define STM32F429_PF13_FUNC_GPIO 0x5d00
+#define STM32F429_PF13_FUNC_FMC_A7 0x5d0d
+#define STM32F429_PF13_FUNC_EVENTOUT 0x5d10
+#define STM32F429_PF13_FUNC_ANALOG 0x5d11
+
+#define STM32F429_PF14_FUNC_GPIO 0x5e00
+#define STM32F429_PF14_FUNC_FMC_A8 0x5e0d
+#define STM32F429_PF14_FUNC_EVENTOUT 0x5e10
+#define STM32F429_PF14_FUNC_ANALOG 0x5e11
+
+#define STM32F429_PF15_FUNC_GPIO 0x5f00
+#define STM32F429_PF15_FUNC_FMC_A9 0x5f0d
+#define STM32F429_PF15_FUNC_EVENTOUT 0x5f10
+#define STM32F429_PF15_FUNC_ANALOG 0x5f11
+
+
+
+#define STM32F429_PG0_FUNC_GPIO 0x6000
+#define STM32F429_PG0_FUNC_FMC_A10 0x600d
+#define STM32F429_PG0_FUNC_EVENTOUT 0x6010
+#define STM32F429_PG0_FUNC_ANALOG 0x6011
+
+#define STM32F429_PG1_FUNC_GPIO 0x6100
+#define STM32F429_PG1_FUNC_FMC_A11 0x610d
+#define STM32F429_PG1_FUNC_EVENTOUT 0x6110
+#define STM32F429_PG1_FUNC_ANALOG 0x6111
+
+#define STM32F429_PG2_FUNC_GPIO 0x6200
+#define STM32F429_PG2_FUNC_FMC_A12 0x620d
+#define STM32F429_PG2_FUNC_EVENTOUT 0x6210
+#define STM32F429_PG2_FUNC_ANALOG 0x6211
+
+#define STM32F429_PG3_FUNC_GPIO 0x6300
+#define STM32F429_PG3_FUNC_FMC_A13 0x630d
+#define STM32F429_PG3_FUNC_EVENTOUT 0x6310
+#define STM32F429_PG3_FUNC_ANALOG 0x6311
+
+#define STM32F429_PG4_FUNC_GPIO 0x6400
+#define STM32F429_PG4_FUNC_FMC_A14_FMC_BA0 0x640d
+#define STM32F429_PG4_FUNC_EVENTOUT 0x6410
+#define STM32F429_PG4_FUNC_ANALOG 0x6411
+
+#define STM32F429_PG5_FUNC_GPIO 0x6500
+#define STM32F429_PG5_FUNC_FMC_A15_FMC_BA1 0x650d
+#define STM32F429_PG5_FUNC_EVENTOUT 0x6510
+#define STM32F429_PG5_FUNC_ANALOG 0x6511
+
+#define STM32F429_PG6_FUNC_GPIO 0x6600
+#define STM32F429_PG6_FUNC_FMC_INT2 0x660d
+#define STM32F429_PG6_FUNC_DCMI_D12 0x660e
+#define STM32F429_PG6_FUNC_LCD_R7 0x660f
+#define STM32F429_PG6_FUNC_EVENTOUT 0x6610
+#define STM32F429_PG6_FUNC_ANALOG 0x6611
+
+#define STM32F429_PG7_FUNC_GPIO 0x6700
+#define STM32F429_PG7_FUNC_USART6_CK 0x6709
+#define STM32F429_PG7_FUNC_FMC_INT3 0x670d
+#define STM32F429_PG7_FUNC_DCMI_D13 0x670e
+#define STM32F429_PG7_FUNC_LCD_CLK 0x670f
+#define STM32F429_PG7_FUNC_EVENTOUT 0x6710
+#define STM32F429_PG7_FUNC_ANALOG 0x6711
+
+#define STM32F429_PG8_FUNC_GPIO 0x6800
+#define STM32F429_PG8_FUNC_SPI6_NSS 0x6806
+#define STM32F429_PG8_FUNC_USART6_RTS 0x6809
+#define STM32F429_PG8_FUNC_ETH_PPS_OUT 0x680c
+#define STM32F429_PG8_FUNC_FMC_SDCLK 0x680d
+#define STM32F429_PG8_FUNC_EVENTOUT 0x6810
+#define STM32F429_PG8_FUNC_ANALOG 0x6811
+
+#define STM32F429_PG9_FUNC_GPIO 0x6900
+#define STM32F429_PG9_FUNC_USART6_RX 0x6909
+#define STM32F429_PG9_FUNC_FMC_NE2_FMC_NCE3 0x690d
+#define STM32F429_PG9_FUNC_DCMI_VSYNC 0x690e
+#define STM32F429_PG9_FUNC_EVENTOUT 0x6910
+#define STM32F429_PG9_FUNC_ANALOG 0x6911
+
+#define STM32F429_PG10_FUNC_GPIO 0x6a00
+#define STM32F429_PG10_FUNC_LCD_G3 0x6a0a
+#define STM32F429_PG10_FUNC_FMC_NCE4_1_FMC_NE3 0x6a0d
+#define STM32F429_PG10_FUNC_DCMI_D2 0x6a0e
+#define STM32F429_PG10_FUNC_LCD_B2 0x6a0f
+#define STM32F429_PG10_FUNC_EVENTOUT 0x6a10
+#define STM32F429_PG10_FUNC_ANALOG 0x6a11
+
+#define STM32F429_PG11_FUNC_GPIO 0x6b00
+#define STM32F429_PG11_FUNC_ETH_MII_TX_EN_ETH_RMII_TX_EN 0x6b0c
+#define STM32F429_PG11_FUNC_FMC_NCE4_2 0x6b0d
+#define STM32F429_PG11_FUNC_DCMI_D3 0x6b0e
+#define STM32F429_PG11_FUNC_LCD_B3 0x6b0f
+#define STM32F429_PG11_FUNC_EVENTOUT 0x6b10
+#define STM32F429_PG11_FUNC_ANALOG 0x6b11
+
+#define STM32F429_PG12_FUNC_GPIO 0x6c00
+#define STM32F429_PG12_FUNC_SPI6_MISO 0x6c06
+#define STM32F429_PG12_FUNC_USART6_RTS 0x6c09
+#define STM32F429_PG12_FUNC_LCD_B4 0x6c0a
+#define STM32F429_PG12_FUNC_FMC_NE4 0x6c0d
+#define STM32F429_PG12_FUNC_LCD_B1 0x6c0f
+#define STM32F429_PG12_FUNC_EVENTOUT 0x6c10
+#define STM32F429_PG12_FUNC_ANALOG 0x6c11
+
+#define STM32F429_PG13_FUNC_GPIO 0x6d00
+#define STM32F429_PG13_FUNC_SPI6_SCK 0x6d06
+#define STM32F429_PG13_FUNC_USART6_CTS 0x6d09
+#define STM32F429_PG13_FUNC_ETH_MII_TXD0_ETH_RMII_TXD0 0x6d0c
+#define STM32F429_PG13_FUNC_FMC_A24 0x6d0d
+#define STM32F429_PG13_FUNC_EVENTOUT 0x6d10
+#define STM32F429_PG13_FUNC_ANALOG 0x6d11
+
+#define STM32F429_PG14_FUNC_GPIO 0x6e00
+#define STM32F429_PG14_FUNC_SPI6_MOSI 0x6e06
+#define STM32F429_PG14_FUNC_USART6_TX 0x6e09
+#define STM32F429_PG14_FUNC_ETH_MII_TXD1_ETH_RMII_TXD1 0x6e0c
+#define STM32F429_PG14_FUNC_FMC_A25 0x6e0d
+#define STM32F429_PG14_FUNC_EVENTOUT 0x6e10
+#define STM32F429_PG14_FUNC_ANALOG 0x6e11
+
+#define STM32F429_PG15_FUNC_GPIO 0x6f00
+#define STM32F429_PG15_FUNC_USART6_CTS 0x6f09
+#define STM32F429_PG15_FUNC_FMC_SDNCAS 0x6f0d
+#define STM32F429_PG15_FUNC_DCMI_D13 0x6f0e
+#define STM32F429_PG15_FUNC_EVENTOUT 0x6f10
+#define STM32F429_PG15_FUNC_ANALOG 0x6f11
+
+
+
+#define STM32F429_PH0_FUNC_GPIO 0x7000
+#define STM32F429_PH0_FUNC_EVENTOUT 0x7010
+#define STM32F429_PH0_FUNC_ANALOG 0x7011
+
+#define STM32F429_PH1_FUNC_GPIO 0x7100
+#define STM32F429_PH1_FUNC_EVENTOUT 0x7110
+#define STM32F429_PH1_FUNC_ANALOG 0x7111
+
+#define STM32F429_PH2_FUNC_GPIO 0x7200
+#define STM32F429_PH2_FUNC_ETH_MII_CRS 0x720c
+#define STM32F429_PH2_FUNC_FMC_SDCKE0 0x720d
+#define STM32F429_PH2_FUNC_LCD_R0 0x720f
+#define STM32F429_PH2_FUNC_EVENTOUT 0x7210
+#define STM32F429_PH2_FUNC_ANALOG 0x7211
+
+#define STM32F429_PH3_FUNC_GPIO 0x7300
+#define STM32F429_PH3_FUNC_ETH_MII_COL 0x730c
+#define STM32F429_PH3_FUNC_FMC_SDNE0 0x730d
+#define STM32F429_PH3_FUNC_LCD_R1 0x730f
+#define STM32F429_PH3_FUNC_EVENTOUT 0x7310
+#define STM32F429_PH3_FUNC_ANALOG 0x7311
+
+#define STM32F429_PH4_FUNC_GPIO 0x7400
+#define STM32F429_PH4_FUNC_I2C2_SCL 0x7405
+#define STM32F429_PH4_FUNC_OTG_HS_ULPI_NXT 0x740b
+#define STM32F429_PH4_FUNC_EVENTOUT 0x7410
+#define STM32F429_PH4_FUNC_ANALOG 0x7411
+
+#define STM32F429_PH5_FUNC_GPIO 0x7500
+#define STM32F429_PH5_FUNC_I2C2_SDA 0x7505
+#define STM32F429_PH5_FUNC_SPI5_NSS 0x7506
+#define STM32F429_PH5_FUNC_FMC_SDNWE 0x750d
+#define STM32F429_PH5_FUNC_EVENTOUT 0x7510
+#define STM32F429_PH5_FUNC_ANALOG 0x7511
+
+#define STM32F429_PH6_FUNC_GPIO 0x7600
+#define STM32F429_PH6_FUNC_I2C2_SMBA 0x7605
+#define STM32F429_PH6_FUNC_SPI5_SCK 0x7606
+#define STM32F429_PH6_FUNC_TIM12_CH1 0x760a
+#define STM32F429_PH6_FUNC_ETH_MII_RXD2 0x760c
+#define STM32F429_PH6_FUNC_FMC_SDNE1 0x760d
+#define STM32F429_PH6_FUNC_DCMI_D8 0x760e
+#define STM32F429_PH6_FUNC_EVENTOUT 0x7610
+#define STM32F429_PH6_FUNC_ANALOG 0x7611
+
+#define STM32F429_PH7_FUNC_GPIO 0x7700
+#define STM32F429_PH7_FUNC_I2C3_SCL 0x7705
+#define STM32F429_PH7_FUNC_SPI5_MISO 0x7706
+#define STM32F429_PH7_FUNC_ETH_MII_RXD3 0x770c
+#define STM32F429_PH7_FUNC_FMC_SDCKE1 0x770d
+#define STM32F429_PH7_FUNC_DCMI_D9 0x770e
+#define STM32F429_PH7_FUNC_EVENTOUT 0x7710
+#define STM32F429_PH7_FUNC_ANALOG 0x7711
+
+#define STM32F429_PH8_FUNC_GPIO 0x7800
+#define STM32F429_PH8_FUNC_I2C3_SDA 0x7805
+#define STM32F429_PH8_FUNC_FMC_D16 0x780d
+#define STM32F429_PH8_FUNC_DCMI_HSYNC 0x780e
+#define STM32F429_PH8_FUNC_LCD_R2 0x780f
+#define STM32F429_PH8_FUNC_EVENTOUT 0x7810
+#define STM32F429_PH8_FUNC_ANALOG 0x7811
+
+#define STM32F429_PH9_FUNC_GPIO 0x7900
+#define STM32F429_PH9_FUNC_I2C3_SMBA 0x7905
+#define STM32F429_PH9_FUNC_TIM12_CH2 0x790a
+#define STM32F429_PH9_FUNC_FMC_D17 0x790d
+#define STM32F429_PH9_FUNC_DCMI_D0 0x790e
+#define STM32F429_PH9_FUNC_LCD_R3 0x790f
+#define STM32F429_PH9_FUNC_EVENTOUT 0x7910
+#define STM32F429_PH9_FUNC_ANALOG 0x7911
+
+#define STM32F429_PH10_FUNC_GPIO 0x7a00
+#define STM32F429_PH10_FUNC_TIM5_CH1 0x7a03
+#define STM32F429_PH10_FUNC_FMC_D18 0x7a0d
+#define STM32F429_PH10_FUNC_DCMI_D1 0x7a0e
+#define STM32F429_PH10_FUNC_LCD_R4 0x7a0f
+#define STM32F429_PH10_FUNC_EVENTOUT 0x7a10
+#define STM32F429_PH10_FUNC_ANALOG 0x7a11
+
+#define STM32F429_PH11_FUNC_GPIO 0x7b00
+#define STM32F429_PH11_FUNC_TIM5_CH2 0x7b03
+#define STM32F429_PH11_FUNC_FMC_D19 0x7b0d
+#define STM32F429_PH11_FUNC_DCMI_D2 0x7b0e
+#define STM32F429_PH11_FUNC_LCD_R5 0x7b0f
+#define STM32F429_PH11_FUNC_EVENTOUT 0x7b10
+#define STM32F429_PH11_FUNC_ANALOG 0x7b11
+
+#define STM32F429_PH12_FUNC_GPIO 0x7c00
+#define STM32F429_PH12_FUNC_TIM5_CH3 0x7c03
+#define STM32F429_PH12_FUNC_FMC_D20 0x7c0d
+#define STM32F429_PH12_FUNC_DCMI_D3 0x7c0e
+#define STM32F429_PH12_FUNC_LCD_R6 0x7c0f
+#define STM32F429_PH12_FUNC_EVENTOUT 0x7c10
+#define STM32F429_PH12_FUNC_ANALOG 0x7c11
+
+#define STM32F429_PH13_FUNC_GPIO 0x7d00
+#define STM32F429_PH13_FUNC_TIM8_CH1N 0x7d04
+#define STM32F429_PH13_FUNC_CAN1_TX 0x7d0a
+#define STM32F429_PH13_FUNC_FMC_D21 0x7d0d
+#define STM32F429_PH13_FUNC_LCD_G2 0x7d0f
+#define STM32F429_PH13_FUNC_EVENTOUT 0x7d10
+#define STM32F429_PH13_FUNC_ANALOG 0x7d11
+
+#define STM32F429_PH14_FUNC_GPIO 0x7e00
+#define STM32F429_PH14_FUNC_TIM8_CH2N 0x7e04
+#define STM32F429_PH14_FUNC_FMC_D22 0x7e0d
+#define STM32F429_PH14_FUNC_DCMI_D4 0x7e0e
+#define STM32F429_PH14_FUNC_LCD_G3 0x7e0f
+#define STM32F429_PH14_FUNC_EVENTOUT 0x7e10
+#define STM32F429_PH14_FUNC_ANALOG 0x7e11
+
+#define STM32F429_PH15_FUNC_GPIO 0x7f00
+#define STM32F429_PH15_FUNC_TIM8_CH3N 0x7f04
+#define STM32F429_PH15_FUNC_FMC_D23 0x7f0d
+#define STM32F429_PH15_FUNC_DCMI_D11 0x7f0e
+#define STM32F429_PH15_FUNC_LCD_G4 0x7f0f
+#define STM32F429_PH15_FUNC_EVENTOUT 0x7f10
+#define STM32F429_PH15_FUNC_ANALOG 0x7f11
+
+
+
+#define STM32F429_PI0_FUNC_GPIO 0x8000
+#define STM32F429_PI0_FUNC_TIM5_CH4 0x8003
+#define STM32F429_PI0_FUNC_SPI2_NSS_I2S2_WS 0x8006
+#define STM32F429_PI0_FUNC_FMC_D24 0x800d
+#define STM32F429_PI0_FUNC_DCMI_D13 0x800e
+#define STM32F429_PI0_FUNC_LCD_G5 0x800f
+#define STM32F429_PI0_FUNC_EVENTOUT 0x8010
+#define STM32F429_PI0_FUNC_ANALOG 0x8011
+
+#define STM32F429_PI1_FUNC_GPIO 0x8100
+#define STM32F429_PI1_FUNC_SPI2_SCK_I2S2_CK 0x8106
+#define STM32F429_PI1_FUNC_FMC_D25 0x810d
+#define STM32F429_PI1_FUNC_DCMI_D8 0x810e
+#define STM32F429_PI1_FUNC_LCD_G6 0x810f
+#define STM32F429_PI1_FUNC_EVENTOUT 0x8110
+#define STM32F429_PI1_FUNC_ANALOG 0x8111
+
+#define STM32F429_PI2_FUNC_GPIO 0x8200
+#define STM32F429_PI2_FUNC_TIM8_CH4 0x8204
+#define STM32F429_PI2_FUNC_SPI2_MISO 0x8206
+#define STM32F429_PI2_FUNC_I2S2EXT_SD 0x8207
+#define STM32F429_PI2_FUNC_FMC_D26 0x820d
+#define STM32F429_PI2_FUNC_DCMI_D9 0x820e
+#define STM32F429_PI2_FUNC_LCD_G7 0x820f
+#define STM32F429_PI2_FUNC_EVENTOUT 0x8210
+#define STM32F429_PI2_FUNC_ANALOG 0x8211
+
+#define STM32F429_PI3_FUNC_GPIO 0x8300
+#define STM32F429_PI3_FUNC_TIM8_ETR 0x8304
+#define STM32F429_PI3_FUNC_SPI2_MOSI_I2S2_SD 0x8306
+#define STM32F429_PI3_FUNC_FMC_D27 0x830d
+#define STM32F429_PI3_FUNC_DCMI_D10 0x830e
+#define STM32F429_PI3_FUNC_EVENTOUT 0x8310
+#define STM32F429_PI3_FUNC_ANALOG 0x8311
+
+#define STM32F429_PI4_FUNC_GPIO 0x8400
+#define STM32F429_PI4_FUNC_TIM8_BKIN 0x8404
+#define STM32F429_PI4_FUNC_FMC_NBL2 0x840d
+#define STM32F429_PI4_FUNC_DCMI_D5 0x840e
+#define STM32F429_PI4_FUNC_LCD_B4 0x840f
+#define STM32F429_PI4_FUNC_EVENTOUT 0x8410
+#define STM32F429_PI4_FUNC_ANALOG 0x8411
+
+#define STM32F429_PI5_FUNC_GPIO 0x8500
+#define STM32F429_PI5_FUNC_TIM8_CH1 0x8504
+#define STM32F429_PI5_FUNC_FMC_NBL3 0x850d
+#define STM32F429_PI5_FUNC_DCMI_VSYNC 0x850e
+#define STM32F429_PI5_FUNC_LCD_B5 0x850f
+#define STM32F429_PI5_FUNC_EVENTOUT 0x8510
+#define STM32F429_PI5_FUNC_ANALOG 0x8511
+
+#define STM32F429_PI6_FUNC_GPIO 0x8600
+#define STM32F429_PI6_FUNC_TIM8_CH2 0x8604
+#define STM32F429_PI6_FUNC_FMC_D28 0x860d
+#define STM32F429_PI6_FUNC_DCMI_D6 0x860e
+#define STM32F429_PI6_FUNC_LCD_B6 0x860f
+#define STM32F429_PI6_FUNC_EVENTOUT 0x8610
+#define STM32F429_PI6_FUNC_ANALOG 0x8611
+
+#define STM32F429_PI7_FUNC_GPIO 0x8700
+#define STM32F429_PI7_FUNC_TIM8_CH3 0x8704
+#define STM32F429_PI7_FUNC_FMC_D29 0x870d
+#define STM32F429_PI7_FUNC_DCMI_D7 0x870e
+#define STM32F429_PI7_FUNC_LCD_B7 0x870f
+#define STM32F429_PI7_FUNC_EVENTOUT 0x8710
+#define STM32F429_PI7_FUNC_ANALOG 0x8711
+
+#define STM32F429_PI8_FUNC_GPIO 0x8800
+#define STM32F429_PI8_FUNC_EVENTOUT 0x8810
+#define STM32F429_PI8_FUNC_ANALOG 0x8811
+
+#define STM32F429_PI9_FUNC_GPIO 0x8900
+#define STM32F429_PI9_FUNC_CAN1_RX 0x890a
+#define STM32F429_PI9_FUNC_FMC_D30 0x890d
+#define STM32F429_PI9_FUNC_LCD_VSYNC 0x890f
+#define STM32F429_PI9_FUNC_EVENTOUT 0x8910
+#define STM32F429_PI9_FUNC_ANALOG 0x8911
+
+#define STM32F429_PI10_FUNC_GPIO 0x8a00
+#define STM32F429_PI10_FUNC_ETH_MII_RX_ER 0x8a0c
+#define STM32F429_PI10_FUNC_FMC_D31 0x8a0d
+#define STM32F429_PI10_FUNC_LCD_HSYNC 0x8a0f
+#define STM32F429_PI10_FUNC_EVENTOUT 0x8a10
+#define STM32F429_PI10_FUNC_ANALOG 0x8a11
+
+#define STM32F429_PI11_FUNC_GPIO 0x8b00
+#define STM32F429_PI11_FUNC_OTG_HS_ULPI_DIR 0x8b0b
+#define STM32F429_PI11_FUNC_EVENTOUT 0x8b10
+#define STM32F429_PI11_FUNC_ANALOG 0x8b11
+
+#define STM32F429_PI12_FUNC_GPIO 0x8c00
+#define STM32F429_PI12_FUNC_LCD_HSYNC 0x8c0f
+#define STM32F429_PI12_FUNC_EVENTOUT 0x8c10
+#define STM32F429_PI12_FUNC_ANALOG 0x8c11
+
+#define STM32F429_PI13_FUNC_GPIO 0x8d00
+#define STM32F429_PI13_FUNC_LCD_VSYNC 0x8d0f
+#define STM32F429_PI13_FUNC_EVENTOUT 0x8d10
+#define STM32F429_PI13_FUNC_ANALOG 0x8d11
+
+#define STM32F429_PI14_FUNC_GPIO 0x8e00
+#define STM32F429_PI14_FUNC_LCD_CLK 0x8e0f
+#define STM32F429_PI14_FUNC_EVENTOUT 0x8e10
+#define STM32F429_PI14_FUNC_ANALOG 0x8e11
+
+#define STM32F429_PI15_FUNC_GPIO 0x8f00
+#define STM32F429_PI15_FUNC_LCD_R0 0x8f0f
+#define STM32F429_PI15_FUNC_EVENTOUT 0x8f10
+#define STM32F429_PI15_FUNC_ANALOG 0x8f11
+
+
+
+#define STM32F429_PJ0_FUNC_GPIO 0x9000
+#define STM32F429_PJ0_FUNC_LCD_R1 0x900f
+#define STM32F429_PJ0_FUNC_EVENTOUT 0x9010
+#define STM32F429_PJ0_FUNC_ANALOG 0x9011
+
+#define STM32F429_PJ1_FUNC_GPIO 0x9100
+#define STM32F429_PJ1_FUNC_LCD_R2 0x910f
+#define STM32F429_PJ1_FUNC_EVENTOUT 0x9110
+#define STM32F429_PJ1_FUNC_ANALOG 0x9111
+
+#define STM32F429_PJ2_FUNC_GPIO 0x9200
+#define STM32F429_PJ2_FUNC_LCD_R3 0x920f
+#define STM32F429_PJ2_FUNC_EVENTOUT 0x9210
+#define STM32F429_PJ2_FUNC_ANALOG 0x9211
+
+#define STM32F429_PJ3_FUNC_GPIO 0x9300
+#define STM32F429_PJ3_FUNC_LCD_R4 0x930f
+#define STM32F429_PJ3_FUNC_EVENTOUT 0x9310
+#define STM32F429_PJ3_FUNC_ANALOG 0x9311
+
+#define STM32F429_PJ4_FUNC_GPIO 0x9400
+#define STM32F429_PJ4_FUNC_LCD_R5 0x940f
+#define STM32F429_PJ4_FUNC_EVENTOUT 0x9410
+#define STM32F429_PJ4_FUNC_ANALOG 0x9411
+
+#define STM32F429_PJ5_FUNC_GPIO 0x9500
+#define STM32F429_PJ5_FUNC_LCD_R6 0x950f
+#define STM32F429_PJ5_FUNC_EVENTOUT 0x9510
+#define STM32F429_PJ5_FUNC_ANALOG 0x9511
+
+#define STM32F429_PJ6_FUNC_GPIO 0x9600
+#define STM32F429_PJ6_FUNC_LCD_R7 0x960f
+#define STM32F429_PJ6_FUNC_EVENTOUT 0x9610
+#define STM32F429_PJ6_FUNC_ANALOG 0x9611
+
+#define STM32F429_PJ7_FUNC_GPIO 0x9700
+#define STM32F429_PJ7_FUNC_LCD_G0 0x970f
+#define STM32F429_PJ7_FUNC_EVENTOUT 0x9710
+#define STM32F429_PJ7_FUNC_ANALOG 0x9711
+
+#define STM32F429_PJ8_FUNC_GPIO 0x9800
+#define STM32F429_PJ8_FUNC_LCD_G1 0x980f
+#define STM32F429_PJ8_FUNC_EVENTOUT 0x9810
+#define STM32F429_PJ8_FUNC_ANALOG 0x9811
+
+#define STM32F429_PJ9_FUNC_GPIO 0x9900
+#define STM32F429_PJ9_FUNC_LCD_G2 0x990f
+#define STM32F429_PJ9_FUNC_EVENTOUT 0x9910
+#define STM32F429_PJ9_FUNC_ANALOG 0x9911
+
+#define STM32F429_PJ10_FUNC_GPIO 0x9a00
+#define STM32F429_PJ10_FUNC_LCD_G3 0x9a0f
+#define STM32F429_PJ10_FUNC_EVENTOUT 0x9a10
+#define STM32F429_PJ10_FUNC_ANALOG 0x9a11
+
+#define STM32F429_PJ11_FUNC_GPIO 0x9b00
+#define STM32F429_PJ11_FUNC_LCD_G4 0x9b0f
+#define STM32F429_PJ11_FUNC_EVENTOUT 0x9b10
+#define STM32F429_PJ11_FUNC_ANALOG 0x9b11
+
+#define STM32F429_PJ12_FUNC_GPIO 0x9c00
+#define STM32F429_PJ12_FUNC_LCD_B0 0x9c0f
+#define STM32F429_PJ12_FUNC_EVENTOUT 0x9c10
+#define STM32F429_PJ12_FUNC_ANALOG 0x9c11
+
+#define STM32F429_PJ13_FUNC_GPIO 0x9d00
+#define STM32F429_PJ13_FUNC_LCD_B1 0x9d0f
+#define STM32F429_PJ13_FUNC_EVENTOUT 0x9d10
+#define STM32F429_PJ13_FUNC_ANALOG 0x9d11
+
+#define STM32F429_PJ14_FUNC_GPIO 0x9e00
+#define STM32F429_PJ14_FUNC_LCD_B2 0x9e0f
+#define STM32F429_PJ14_FUNC_EVENTOUT 0x9e10
+#define STM32F429_PJ14_FUNC_ANALOG 0x9e11
+
+#define STM32F429_PJ15_FUNC_GPIO 0x9f00
+#define STM32F429_PJ15_FUNC_LCD_B3 0x9f0f
+#define STM32F429_PJ15_FUNC_EVENTOUT 0x9f10
+#define STM32F429_PJ15_FUNC_ANALOG 0x9f11
+
+
+
+#define STM32F429_PK0_FUNC_GPIO 0xa000
+#define STM32F429_PK0_FUNC_LCD_G5 0xa00f
+#define STM32F429_PK0_FUNC_EVENTOUT 0xa010
+#define STM32F429_PK0_FUNC_ANALOG 0xa011
+
+#define STM32F429_PK1_FUNC_GPIO 0xa100
+#define STM32F429_PK1_FUNC_LCD_G6 0xa10f
+#define STM32F429_PK1_FUNC_EVENTOUT 0xa110
+#define STM32F429_PK1_FUNC_ANALOG 0xa111
+
+#define STM32F429_PK2_FUNC_GPIO 0xa200
+#define STM32F429_PK2_FUNC_LCD_G7 0xa20f
+#define STM32F429_PK2_FUNC_EVENTOUT 0xa210
+#define STM32F429_PK2_FUNC_ANALOG 0xa211
+
+#define STM32F429_PK3_FUNC_GPIO 0xa300
+#define STM32F429_PK3_FUNC_LCD_B4 0xa30f
+#define STM32F429_PK3_FUNC_EVENTOUT 0xa310
+#define STM32F429_PK3_FUNC_ANALOG 0xa311
+
+#define STM32F429_PK4_FUNC_GPIO 0xa400
+#define STM32F429_PK4_FUNC_LCD_B5 0xa40f
+#define STM32F429_PK4_FUNC_EVENTOUT 0xa410
+#define STM32F429_PK4_FUNC_ANALOG 0xa411
+
+#define STM32F429_PK5_FUNC_GPIO 0xa500
+#define STM32F429_PK5_FUNC_LCD_B6 0xa50f
+#define STM32F429_PK5_FUNC_EVENTOUT 0xa510
+#define STM32F429_PK5_FUNC_ANALOG 0xa511
+
+#define STM32F429_PK6_FUNC_GPIO 0xa600
+#define STM32F429_PK6_FUNC_LCD_B7 0xa60f
+#define STM32F429_PK6_FUNC_EVENTOUT 0xa610
+#define STM32F429_PK6_FUNC_ANALOG 0xa611
+
+#define STM32F429_PK7_FUNC_GPIO 0xa700
+#define STM32F429_PK7_FUNC_LCD_DE 0xa70f
+#define STM32F429_PK7_FUNC_EVENTOUT 0xa710
+#define STM32F429_PK7_FUNC_ANALOG 0xa711
+
+#endif /* _DT_BINDINGS_STM32F429_PINFUNC_H */
diff --git a/include/dt-bindings/power/rk3368-power.h b/include/dt-bindings/power/rk3368-power.h
new file mode 100644
index 000000000000..93633d57ed84
--- /dev/null
+++ b/include/dt-bindings/power/rk3368-power.h
@@ -0,0 +1,28 @@
+#ifndef __DT_BINDINGS_POWER_RK3368_POWER_H__
+#define __DT_BINDINGS_POWER_RK3368_POWER_H__
+
+/* VD_CORE */
+#define RK3368_PD_A53_L0 0
+#define RK3368_PD_A53_L1 1
+#define RK3368_PD_A53_L2 2
+#define RK3368_PD_A53_L3 3
+#define RK3368_PD_SCU_L 4
+#define RK3368_PD_A53_B0 5
+#define RK3368_PD_A53_B1 6
+#define RK3368_PD_A53_B2 7
+#define RK3368_PD_A53_B3 8
+#define RK3368_PD_SCU_B 9
+
+/* VD_LOGIC */
+#define RK3368_PD_BUS 10
+#define RK3368_PD_PERI 11
+#define RK3368_PD_VIO 12
+#define RK3368_PD_ALIVE 13
+#define RK3368_PD_VIDEO 14
+#define RK3368_PD_GPU_0 15
+#define RK3368_PD_GPU_1 16
+
+/* VD_PMU */
+#define RK3368_PD_PMU 17
+
+#endif
diff --git a/include/dt-bindings/reset/pistachio-resets.h b/include/dt-bindings/reset/pistachio-resets.h
new file mode 100644
index 000000000000..60a189b1faef
--- /dev/null
+++ b/include/dt-bindings/reset/pistachio-resets.h
@@ -0,0 +1,36 @@
+/*
+ * This header provides constants for the reset controller
+ * present in the Pistachio SoC
+ */
+
+#ifndef _PISTACHIO_RESETS_H
+#define _PISTACHIO_RESETS_H
+
+#define PISTACHIO_RESET_I2C0 0
+#define PISTACHIO_RESET_I2C1 1
+#define PISTACHIO_RESET_I2C2 2
+#define PISTACHIO_RESET_I2C3 3
+#define PISTACHIO_RESET_I2S_IN 4
+#define PISTACHIO_RESET_PRL_OUT 5
+#define PISTACHIO_RESET_SPDIF_OUT 6
+#define PISTACHIO_RESET_SPI 7
+#define PISTACHIO_RESET_PWM_PDM 8
+#define PISTACHIO_RESET_UART0 9
+#define PISTACHIO_RESET_UART1 10
+#define PISTACHIO_RESET_QSPI 11
+#define PISTACHIO_RESET_MDC 12
+#define PISTACHIO_RESET_SDHOST 13
+#define PISTACHIO_RESET_ETHERNET 14
+#define PISTACHIO_RESET_IR 15
+#define PISTACHIO_RESET_HASH 16
+#define PISTACHIO_RESET_TIMER 17
+#define PISTACHIO_RESET_I2S_OUT 18
+#define PISTACHIO_RESET_SPDIF_IN 19
+#define PISTACHIO_RESET_EVT 20
+#define PISTACHIO_RESET_USB_H 21
+#define PISTACHIO_RESET_USB_PR 22
+#define PISTACHIO_RESET_USB_PHY_PR 23
+#define PISTACHIO_RESET_USB_PHY_PON 24
+#define PISTACHIO_RESET_MAX 24
+
+#endif
diff --git a/include/linux/apple-gmux.h b/include/linux/apple-gmux.h
new file mode 100644
index 000000000000..b2d32e01dfe4
--- /dev/null
+++ b/include/linux/apple-gmux.h
@@ -0,0 +1,50 @@
+/*
+ * apple-gmux.h - microcontroller built into dual GPU MacBook Pro & Mac Pro
+ * Copyright (C) 2015 Lukas Wunner <lukas@wunner.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2) as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef LINUX_APPLE_GMUX_H
+#define LINUX_APPLE_GMUX_H
+
+#include <linux/acpi.h>
+
+#define GMUX_ACPI_HID "APP000B"
+
+#if IS_ENABLED(CONFIG_APPLE_GMUX)
+
+/**
+ * apple_gmux_present() - detect if gmux is built into the machine
+ *
+ * Drivers may use this to activate quirks specific to dual GPU MacBook Pros
+ * and Mac Pros, e.g. for deferred probing, runtime pm and backlight.
+ *
+ * Return: %true if gmux is present and the kernel was configured
+ * with CONFIG_APPLE_GMUX, %false otherwise.
+ */
+static inline bool apple_gmux_present(void)
+{
+ return acpi_dev_present(GMUX_ACPI_HID);
+}
+
+#else /* !CONFIG_APPLE_GMUX */
+
+static inline bool apple_gmux_present(void)
+{
+ return false;
+}
+
+#endif /* !CONFIG_APPLE_GMUX */
+
+#endif /* LINUX_APPLE_GMUX_H */
diff --git a/include/linux/atomic.h b/include/linux/atomic.h
index 6c502cb13c95..df4f369254c0 100644
--- a/include/linux/atomic.h
+++ b/include/linux/atomic.h
@@ -34,20 +34,29 @@
* The idea here is to build acquire/release variants by adding explicit
* barriers on top of the relaxed variant. In the case where the relaxed
* variant is already fully ordered, no additional barriers are needed.
+ *
+ * Besides, if an arch has a special barrier for acquire/release, it could
+ * implement its own __atomic_op_* and use the same framework for building
+ * variants
*/
+#ifndef __atomic_op_acquire
#define __atomic_op_acquire(op, args...) \
({ \
typeof(op##_relaxed(args)) __ret = op##_relaxed(args); \
smp_mb__after_atomic(); \
__ret; \
})
+#endif
+#ifndef __atomic_op_release
#define __atomic_op_release(op, args...) \
({ \
smp_mb__before_atomic(); \
op##_relaxed(args); \
})
+#endif
+#ifndef __atomic_op_fence
#define __atomic_op_fence(op, args...) \
({ \
typeof(op##_relaxed(args)) __ret; \
@@ -56,6 +65,7 @@
smp_mb__after_atomic(); \
__ret; \
})
+#endif
/* atomic_add_return_relaxed */
#ifndef atomic_add_return_relaxed
diff --git a/include/linux/bcma/bcma.h b/include/linux/bcma/bcma.h
index 3feb1b2d75d8..0367c63f5960 100644
--- a/include/linux/bcma/bcma.h
+++ b/include/linux/bcma/bcma.h
@@ -151,6 +151,8 @@ struct bcma_host_ops {
#define BCMA_CORE_PCIE2 0x83C /* PCI Express Gen2 */
#define BCMA_CORE_USB30_DEV 0x83D
#define BCMA_CORE_ARM_CR4 0x83E
+#define BCMA_CORE_GCI 0x840
+#define BCMA_CORE_CMEM 0x846 /* CNDS DDR2/3 memory controller */
#define BCMA_CORE_ARM_CA7 0x847
#define BCMA_CORE_SYS_MEM 0x849
#define BCMA_CORE_DEFAULT 0xFFF
@@ -199,6 +201,7 @@ struct bcma_host_ops {
#define BCMA_PKG_ID_BCM4707 1
#define BCMA_PKG_ID_BCM4708 2
#define BCMA_PKG_ID_BCM4709 0
+#define BCMA_CHIP_ID_BCM47094 53030
#define BCMA_CHIP_ID_BCM53018 53018
/* Board types (on PCI usually equals to the subsystem dev id) */
diff --git a/include/linux/bcma/bcma_driver_chipcommon.h b/include/linux/bcma/bcma_driver_chipcommon.h
index db51a6ffb7d6..846513c73606 100644
--- a/include/linux/bcma/bcma_driver_chipcommon.h
+++ b/include/linux/bcma/bcma_driver_chipcommon.h
@@ -217,6 +217,11 @@
#define BCMA_CC_CLKDIV_JTAG_SHIFT 8
#define BCMA_CC_CLKDIV_UART 0x000000FF
#define BCMA_CC_CAP_EXT 0x00AC /* Capabilities */
+#define BCMA_CC_CAP_EXT_SECI_PRESENT 0x00000001
+#define BCMA_CC_CAP_EXT_GSIO_PRESENT 0x00000002
+#define BCMA_CC_CAP_EXT_GCI_PRESENT 0x00000004
+#define BCMA_CC_CAP_EXT_SECI_PUART_PRESENT 0x00000008 /* UART present */
+#define BCMA_CC_CAP_EXT_AOB_PRESENT 0x00000040
#define BCMA_CC_PLLONDELAY 0x00B0 /* Rev >= 4 only */
#define BCMA_CC_FREFSELDELAY 0x00B4 /* Rev >= 4 only */
#define BCMA_CC_SLOWCLKCTL 0x00B8 /* 6 <= Rev <= 9 only */
@@ -351,12 +356,12 @@
#define BCMA_CC_PMU_RES_REQTS 0x0640 /* PMU res req timer sel */
#define BCMA_CC_PMU_RES_REQT 0x0644 /* PMU res req timer */
#define BCMA_CC_PMU_RES_REQM 0x0648 /* PMU res req mask */
-#define BCMA_CC_CHIPCTL_ADDR 0x0650
-#define BCMA_CC_CHIPCTL_DATA 0x0654
-#define BCMA_CC_REGCTL_ADDR 0x0658
-#define BCMA_CC_REGCTL_DATA 0x065C
-#define BCMA_CC_PLLCTL_ADDR 0x0660
-#define BCMA_CC_PLLCTL_DATA 0x0664
+#define BCMA_CC_PMU_CHIPCTL_ADDR 0x0650
+#define BCMA_CC_PMU_CHIPCTL_DATA 0x0654
+#define BCMA_CC_PMU_REGCTL_ADDR 0x0658
+#define BCMA_CC_PMU_REGCTL_DATA 0x065C
+#define BCMA_CC_PMU_PLLCTL_ADDR 0x0660
+#define BCMA_CC_PMU_PLLCTL_DATA 0x0664
#define BCMA_CC_PMU_STRAPOPT 0x0668 /* (corerev >= 28) */
#define BCMA_CC_PMU_XTAL_FREQ 0x066C /* (pmurev >= 10) */
#define BCMA_CC_PMU_XTAL_FREQ_ILPCTL_MASK 0x00001FFF
@@ -566,17 +571,16 @@
* Check availability with ((struct bcma_chipcommon)->capabilities & BCMA_CC_CAP_PMU)
*/
struct bcma_chipcommon_pmu {
+ struct bcma_device *core; /* Can be separated core or just ChipCommon one */
u8 rev; /* PMU revision */
u32 crystalfreq; /* The active crystal frequency (in kHz) */
};
-#ifdef CONFIG_BCMA_DRIVER_MIPS
+#ifdef CONFIG_BCMA_PFLASH
struct bcma_pflash {
bool present;
- u8 buswidth;
- u32 window;
- u32 window_size;
};
+#endif
#ifdef CONFIG_BCMA_SFLASH
struct mtd_info;
@@ -600,6 +604,7 @@ struct bcma_nflash {
};
#endif
+#ifdef CONFIG_BCMA_DRIVER_MIPS
struct bcma_serial_port {
void *regs;
unsigned long clockspeed;
@@ -619,8 +624,9 @@ struct bcma_drv_cc {
/* Fast Powerup Delay constant */
u16 fast_pwrup_delay;
struct bcma_chipcommon_pmu pmu;
-#ifdef CONFIG_BCMA_DRIVER_MIPS
+#ifdef CONFIG_BCMA_PFLASH
struct bcma_pflash pflash;
+#endif
#ifdef CONFIG_BCMA_SFLASH
struct bcma_sflash sflash;
#endif
@@ -628,6 +634,7 @@ struct bcma_drv_cc {
struct bcma_nflash nflash;
#endif
+#ifdef CONFIG_BCMA_DRIVER_MIPS
int nr_serial_ports;
struct bcma_serial_port serial_ports[4];
#endif /* CONFIG_BCMA_DRIVER_MIPS */
@@ -660,6 +667,19 @@ struct bcma_drv_cc_b {
#define bcma_cc_maskset32(cc, offset, mask, set) \
bcma_cc_write32(cc, offset, (bcma_cc_read32(cc, offset) & (mask)) | (set))
+/* PMU registers access */
+#define bcma_pmu_read32(cc, offset) \
+ bcma_read32((cc)->pmu.core, offset)
+#define bcma_pmu_write32(cc, offset, val) \
+ bcma_write32((cc)->pmu.core, offset, val)
+
+#define bcma_pmu_mask32(cc, offset, mask) \
+ bcma_pmu_write32(cc, offset, bcma_pmu_read32(cc, offset) & (mask))
+#define bcma_pmu_set32(cc, offset, set) \
+ bcma_pmu_write32(cc, offset, bcma_pmu_read32(cc, offset) | (set))
+#define bcma_pmu_maskset32(cc, offset, mask, set) \
+ bcma_pmu_write32(cc, offset, (bcma_pmu_read32(cc, offset) & (mask)) | (set))
+
extern u32 bcma_chipco_watchdog_timer_set(struct bcma_drv_cc *cc, u32 ticks);
extern u32 bcma_chipco_get_alp_clock(struct bcma_drv_cc *cc);
diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
index 9653fdb76a42..e9b0b9ab07e5 100644
--- a/include/linux/bitmap.h
+++ b/include/linux/bitmap.h
@@ -59,6 +59,8 @@
* bitmap_find_free_region(bitmap, bits, order) Find and allocate bit region
* bitmap_release_region(bitmap, pos, order) Free specified bit region
* bitmap_allocate_region(bitmap, pos, order) Allocate specified bit region
+ * bitmap_from_u32array(dst, nbits, buf, nwords) *dst = *buf (nwords 32b words)
+ * bitmap_to_u32array(buf, nwords, src, nbits) *buf = *dst (nwords 32b words)
*/
/*
@@ -163,6 +165,14 @@ extern void bitmap_fold(unsigned long *dst, const unsigned long *orig,
extern int bitmap_find_free_region(unsigned long *bitmap, unsigned int bits, int order);
extern void bitmap_release_region(unsigned long *bitmap, unsigned int pos, int order);
extern int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order);
+extern unsigned int bitmap_from_u32array(unsigned long *bitmap,
+ unsigned int nbits,
+ const u32 *buf,
+ unsigned int nwords);
+extern unsigned int bitmap_to_u32array(u32 *buf,
+ unsigned int nwords,
+ const unsigned long *bitmap,
+ unsigned int nbits);
#ifdef __BIG_ENDIAN
extern void bitmap_copy_le(unsigned long *dst, const unsigned long *src, unsigned int nbits);
#else
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 413c84fbc4ed..7e5d7e018bea 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -499,7 +499,8 @@ struct request_queue {
#define QUEUE_FLAG_MQ_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
(1 << QUEUE_FLAG_STACKABLE) | \
- (1 << QUEUE_FLAG_SAME_COMP))
+ (1 << QUEUE_FLAG_SAME_COMP) | \
+ (1 << QUEUE_FLAG_POLL))
static inline void queue_lockdep_assert_held(struct request_queue *q)
{
@@ -1029,6 +1030,7 @@ extern int blk_pre_runtime_suspend(struct request_queue *q);
extern void blk_post_runtime_suspend(struct request_queue *q, int err);
extern void blk_pre_runtime_resume(struct request_queue *q);
extern void blk_post_runtime_resume(struct request_queue *q, int err);
+extern void blk_set_runtime_active(struct request_queue *q);
#else
static inline void blk_pm_runtime_init(struct request_queue *q,
struct device *dev) {}
@@ -1039,6 +1041,7 @@ static inline int blk_pre_runtime_suspend(struct request_queue *q)
static inline void blk_post_runtime_suspend(struct request_queue *q, int err) {}
static inline void blk_pre_runtime_resume(struct request_queue *q) {}
static inline void blk_post_runtime_resume(struct request_queue *q, int err) {}
+extern inline void blk_set_runtime_active(struct request_queue *q) {}
#endif
/*
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 83d1926c61e4..21ee41b92e8a 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -10,6 +10,7 @@
#include <uapi/linux/bpf.h>
#include <linux/workqueue.h>
#include <linux/file.h>
+#include <linux/percpu.h>
struct bpf_map;
@@ -36,6 +37,7 @@ struct bpf_map {
u32 key_size;
u32 value_size;
u32 max_entries;
+ u32 map_flags;
u32 pages;
struct user_struct *user;
const struct bpf_map_ops *ops;
@@ -65,6 +67,7 @@ enum bpf_arg_type {
*/
ARG_PTR_TO_STACK, /* any pointer to eBPF program stack */
ARG_CONST_STACK_SIZE, /* number of bytes accessed from stack */
+ ARG_CONST_STACK_SIZE_OR_ZERO, /* number of bytes accessed from stack or 0 */
ARG_PTR_TO_CTX, /* pointer to context */
ARG_ANYTHING, /* any (initialized) argument is ok */
@@ -151,6 +154,7 @@ struct bpf_array {
union {
char value[0] __aligned(8);
void *ptrs[0] __aligned(8);
+ void __percpu *pptrs[0] __aligned(8);
};
};
#define MAX_TAIL_CALL_CNT 32
@@ -161,6 +165,8 @@ bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *f
const struct bpf_func_proto *bpf_get_trace_printk_proto(void);
#ifdef CONFIG_BPF_SYSCALL
+DECLARE_PER_CPU(int, bpf_prog_active);
+
void bpf_register_prog_type(struct bpf_prog_type_list *tl);
void bpf_register_map_type(struct bpf_map_type_list *tl);
@@ -173,6 +179,7 @@ struct bpf_map *__bpf_map_get(struct fd f);
void bpf_map_inc(struct bpf_map *map, bool uref);
void bpf_map_put_with_uref(struct bpf_map *map);
void bpf_map_put(struct bpf_map *map);
+int bpf_map_precharge_memlock(u32 pages);
extern int sysctl_unprivileged_bpf_disabled;
@@ -182,6 +189,30 @@ int bpf_prog_new_fd(struct bpf_prog *prog);
int bpf_obj_pin_user(u32 ufd, const char __user *pathname);
int bpf_obj_get_user(const char __user *pathname);
+int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value);
+int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value);
+int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value,
+ u64 flags);
+int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
+ u64 flags);
+int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value);
+
+/* memcpy that is used with 8-byte aligned pointers, power-of-8 size and
+ * forced to use 'long' read/writes to try to atomically copy long counters.
+ * Best-effort only. No barriers here, since it _will_ race with concurrent
+ * updates from BPF programs. Called from bpf syscall and mostly used with
+ * size 8 or 16 bytes, so ask compiler to inline it.
+ */
+static inline void bpf_long_memcpy(void *dst, const void *src, u32 size)
+{
+ const long *lsrc = src;
+ long *ldst = dst;
+
+ size /= sizeof(long);
+ while (size--)
+ *ldst++ = *lsrc++;
+}
+
/* verify correctness of eBPF program */
int bpf_check(struct bpf_prog **fp, union bpf_attr *attr);
#else
@@ -213,6 +244,7 @@ extern const struct bpf_func_proto bpf_get_current_uid_gid_proto;
extern const struct bpf_func_proto bpf_get_current_comm_proto;
extern const struct bpf_func_proto bpf_skb_vlan_push_proto;
extern const struct bpf_func_proto bpf_skb_vlan_pop_proto;
+extern const struct bpf_func_proto bpf_get_stackid_proto;
/* Shared helpers among cBPF and eBPF. */
void bpf_user_rnd_init_once(void);
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index 89d9aa9e79bf..c67f052cc5e5 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -82,15 +82,15 @@ struct buffer_head {
* and buffer_foo() functions.
*/
#define BUFFER_FNS(bit, name) \
-static inline void set_buffer_##name(struct buffer_head *bh) \
+static __always_inline void set_buffer_##name(struct buffer_head *bh) \
{ \
set_bit(BH_##bit, &(bh)->b_state); \
} \
-static inline void clear_buffer_##name(struct buffer_head *bh) \
+static __always_inline void clear_buffer_##name(struct buffer_head *bh) \
{ \
clear_bit(BH_##bit, &(bh)->b_state); \
} \
-static inline int buffer_##name(const struct buffer_head *bh) \
+static __always_inline int buffer_##name(const struct buffer_head *bh) \
{ \
return test_bit(BH_##bit, &(bh)->b_state); \
}
@@ -99,11 +99,11 @@ static inline int buffer_##name(const struct buffer_head *bh) \
* test_set_buffer_foo() and test_clear_buffer_foo()
*/
#define TAS_BUFFER_FNS(bit, name) \
-static inline int test_set_buffer_##name(struct buffer_head *bh) \
+static __always_inline int test_set_buffer_##name(struct buffer_head *bh) \
{ \
return test_and_set_bit(BH_##bit, &(bh)->b_state); \
} \
-static inline int test_clear_buffer_##name(struct buffer_head *bh) \
+static __always_inline int test_clear_buffer_##name(struct buffer_head *bh) \
{ \
return test_and_clear_bit(BH_##bit, &(bh)->b_state); \
} \
diff --git a/include/linux/bug.h b/include/linux/bug.h
index 7f4818673c41..e51b0709e78d 100644
--- a/include/linux/bug.h
+++ b/include/linux/bug.h
@@ -20,6 +20,7 @@ struct pt_regs;
#define BUILD_BUG_ON_MSG(cond, msg) (0)
#define BUILD_BUG_ON(condition) (0)
#define BUILD_BUG() (0)
+#define MAYBE_BUILD_BUG_ON(cond) (0)
#else /* __CHECKER__ */
/* Force a compilation error if a constant expression is not a power of 2 */
@@ -83,6 +84,14 @@ struct pt_regs;
*/
#define BUILD_BUG() BUILD_BUG_ON_MSG(1, "BUILD_BUG failed")
+#define MAYBE_BUILD_BUG_ON(cond) \
+ do { \
+ if (__builtin_constant_p((cond))) \
+ BUILD_BUG_ON(cond); \
+ else \
+ BUG_ON(cond); \
+ } while (0)
+
#endif /* __CHECKER__ */
#ifdef CONFIG_GENERIC_BUG
diff --git a/include/linux/capability.h b/include/linux/capability.h
index f314275d4e3f..00690ff92edf 100644
--- a/include/linux/capability.h
+++ b/include/linux/capability.h
@@ -40,8 +40,6 @@ struct inode;
struct dentry;
struct user_namespace;
-struct user_namespace *current_user_ns(void);
-
extern const kernel_cap_t __cap_empty_set;
extern const kernel_cap_t __cap_init_eff_set;
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index 789471dba6fb..3e39ae5bc799 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -45,6 +45,7 @@ enum {
CSS_NO_REF = (1 << 0), /* no reference counting for this css */
CSS_ONLINE = (1 << 1), /* between ->css_online() and ->css_offline() */
CSS_RELEASED = (1 << 2), /* refcnt reached zero, released */
+ CSS_VISIBLE = (1 << 3), /* css is visible to userland */
};
/* bits in struct cgroup flags field */
@@ -190,12 +191,13 @@ struct css_set {
/*
* If this cset is acting as the source of migration the following
- * two fields are set. mg_src_cgrp is the source cgroup of the
- * on-going migration and mg_dst_cset is the destination cset the
- * target tasks on this cset should be migrated to. Protected by
- * cgroup_mutex.
+ * two fields are set. mg_src_cgrp and mg_dst_cgrp are
+ * respectively the source and destination cgroups of the on-going
+ * migration. mg_dst_cset is the destination cset the target tasks
+ * on this cset should be migrated to. Protected by cgroup_mutex.
*/
struct cgroup *mg_src_cgrp;
+ struct cgroup *mg_dst_cgrp;
struct css_set *mg_dst_cset;
/*
@@ -210,6 +212,9 @@ struct css_set {
/* all css_task_iters currently walking this cset */
struct list_head task_iters;
+ /* dead and being drained, ignore for migration */
+ bool dead;
+
/* For RCU-protected deletion */
struct rcu_head rcu_head;
};
@@ -253,13 +258,14 @@ struct cgroup {
/*
* The bitmask of subsystems enabled on the child cgroups.
* ->subtree_control is the one configured through
- * "cgroup.subtree_control" while ->child_subsys_mask is the
- * effective one which may have more subsystems enabled.
- * Controller knobs are made available iff it's enabled in
- * ->subtree_control.
+ * "cgroup.subtree_control" while ->child_ss_mask is the effective
+ * one which may have more subsystems enabled. Controller knobs
+ * are made available iff it's enabled in ->subtree_control.
*/
- unsigned int subtree_control;
- unsigned int child_subsys_mask;
+ u16 subtree_control;
+ u16 subtree_ss_mask;
+ u16 old_subtree_control;
+ u16 old_subtree_ss_mask;
/* Private pointers for each registered subsystem */
struct cgroup_subsys_state __rcu *subsys[CGROUP_SUBSYS_COUNT];
@@ -434,7 +440,6 @@ struct cgroup_subsys {
void (*css_released)(struct cgroup_subsys_state *css);
void (*css_free)(struct cgroup_subsys_state *css);
void (*css_reset)(struct cgroup_subsys_state *css);
- void (*css_e_css_changed)(struct cgroup_subsys_state *css);
int (*can_attach)(struct cgroup_taskset *tset);
void (*cancel_attach)(struct cgroup_taskset *tset);
@@ -446,7 +451,20 @@ struct cgroup_subsys {
void (*free)(struct task_struct *task);
void (*bind)(struct cgroup_subsys_state *root_css);
- int early_init;
+ bool early_init:1;
+
+ /*
+ * If %true, the controller, on the default hierarchy, doesn't show
+ * up in "cgroup.controllers" or "cgroup.subtree_control", is
+ * implicitly enabled on all cgroups on the default hierarchy, and
+ * bypasses the "no internal process" constraint. This is for
+ * utility type controllers which is transparent to userland.
+ *
+ * An implicit controller can be stolen from the default hierarchy
+ * anytime and thus must be okay with offline csses from previous
+ * hierarchies coexisting with csses for the current one.
+ */
+ bool implicit_on_dfl:1;
/*
* If %false, this subsystem is properly hierarchical -
@@ -460,8 +478,8 @@ struct cgroup_subsys {
* cases. Eventually, all subsystems will be made properly
* hierarchical and this will go away.
*/
- bool broken_hierarchy;
- bool warned_broken_hierarchy;
+ bool broken_hierarchy:1;
+ bool warned_broken_hierarchy:1;
/* the following two fields are initialized automtically during boot */
int id;
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 2162dca88dc0..a20320c666fd 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -17,6 +17,11 @@
#include <linux/seq_file.h>
#include <linux/kernfs.h>
#include <linux/jump_label.h>
+#include <linux/nsproxy.h>
+#include <linux/types.h>
+#include <linux/ns_common.h>
+#include <linux/nsproxy.h>
+#include <linux/user_namespace.h>
#include <linux/cgroup-defs.h>
@@ -611,4 +616,48 @@ static inline void cgroup_sk_free(struct sock_cgroup_data *skcd) {}
#endif /* CONFIG_CGROUP_DATA */
+struct cgroup_namespace {
+ atomic_t count;
+ struct ns_common ns;
+ struct user_namespace *user_ns;
+ struct css_set *root_cset;
+};
+
+extern struct cgroup_namespace init_cgroup_ns;
+
+#ifdef CONFIG_CGROUPS
+
+void free_cgroup_ns(struct cgroup_namespace *ns);
+
+struct cgroup_namespace *copy_cgroup_ns(unsigned long flags,
+ struct user_namespace *user_ns,
+ struct cgroup_namespace *old_ns);
+
+char *cgroup_path_ns(struct cgroup *cgrp, char *buf, size_t buflen,
+ struct cgroup_namespace *ns);
+
+#else /* !CONFIG_CGROUPS */
+
+static inline void free_cgroup_ns(struct cgroup_namespace *ns) { }
+static inline struct cgroup_namespace *
+copy_cgroup_ns(unsigned long flags, struct user_namespace *user_ns,
+ struct cgroup_namespace *old_ns)
+{
+ return old_ns;
+}
+
+#endif /* !CONFIG_CGROUPS */
+
+static inline void get_cgroup_ns(struct cgroup_namespace *ns)
+{
+ if (ns)
+ atomic_inc(&ns->count);
+}
+
+static inline void put_cgroup_ns(struct cgroup_namespace *ns)
+{
+ if (ns && atomic_dec_and_test(&ns->count))
+ free_cgroup_ns(ns);
+}
+
#endif /* _LINUX_CGROUP_H */
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index 1143e38555a4..da95258127aa 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -25,7 +25,7 @@
#define CLK_SET_PARENT_GATE BIT(1) /* must be gated across re-parent */
#define CLK_SET_RATE_PARENT BIT(2) /* propagate rate change up one level */
#define CLK_IGNORE_UNUSED BIT(3) /* do not gate even if unused */
-#define CLK_IS_ROOT BIT(4) /* root clk, has no parent */
+#define CLK_IS_ROOT BIT(4) /* Deprecated: Don't use */
#define CLK_IS_BASIC BIT(5) /* Basic clk, can't do a to_clk_foo() */
#define CLK_GET_RATE_NOCACHE BIT(6) /* do not use the cached clk rate */
#define CLK_SET_RATE_NO_REPARENT BIT(7) /* don't re-parent on rate change */
@@ -276,6 +276,8 @@ struct clk_fixed_rate {
u8 flags;
};
+#define to_clk_fixed_rate(_hw) container_of(_hw, struct clk_fixed_rate, hw)
+
extern const struct clk_ops clk_fixed_rate_ops;
struct clk *clk_register_fixed_rate(struct device *dev, const char *name,
const char *parent_name, unsigned long flags,
@@ -283,7 +285,7 @@ struct clk *clk_register_fixed_rate(struct device *dev, const char *name,
struct clk *clk_register_fixed_rate_with_accuracy(struct device *dev,
const char *name, const char *parent_name, unsigned long flags,
unsigned long fixed_rate, unsigned long fixed_accuracy);
-
+void clk_unregister_fixed_rate(struct clk *clk);
void of_fixed_clk_setup(struct device_node *np);
/**
@@ -314,6 +316,8 @@ struct clk_gate {
spinlock_t *lock;
};
+#define to_clk_gate(_hw) container_of(_hw, struct clk_gate, hw)
+
#define CLK_GATE_SET_TO_DISABLE BIT(0)
#define CLK_GATE_HIWORD_MASK BIT(1)
@@ -376,6 +380,8 @@ struct clk_divider {
spinlock_t *lock;
};
+#define to_clk_divider(_hw) container_of(_hw, struct clk_divider, hw)
+
#define CLK_DIVIDER_ONE_BASED BIT(0)
#define CLK_DIVIDER_POWER_OF_TWO BIT(1)
#define CLK_DIVIDER_ALLOW_ZERO BIT(2)
@@ -385,6 +391,7 @@ struct clk_divider {
#define CLK_DIVIDER_MAX_AT_ZERO BIT(6)
extern const struct clk_ops clk_divider_ops;
+extern const struct clk_ops clk_divider_ro_ops;
unsigned long divider_recalc_rate(struct clk_hw *hw, unsigned long parent_rate,
unsigned int val, const struct clk_div_table *table,
@@ -440,6 +447,8 @@ struct clk_mux {
spinlock_t *lock;
};
+#define to_clk_mux(_hw) container_of(_hw, struct clk_mux, hw)
+
#define CLK_MUX_INDEX_ONE BIT(0)
#define CLK_MUX_INDEX_BIT BIT(1)
#define CLK_MUX_HIWORD_MASK BIT(2)
@@ -483,10 +492,13 @@ struct clk_fixed_factor {
unsigned int div;
};
+#define to_clk_fixed_factor(_hw) container_of(_hw, struct clk_fixed_factor, hw)
+
extern const struct clk_ops clk_fixed_factor_ops;
struct clk *clk_register_fixed_factor(struct device *dev, const char *name,
const char *parent_name, unsigned long flags,
unsigned int mult, unsigned int div);
+void clk_unregister_fixed_factor(struct clk *clk);
/**
* struct clk_fractional_divider - adjustable fractional divider clock
@@ -514,6 +526,8 @@ struct clk_fractional_divider {
spinlock_t *lock;
};
+#define to_clk_fd(_hw) container_of(_hw, struct clk_fractional_divider, hw)
+
extern const struct clk_ops clk_fractional_divider_ops;
struct clk *clk_register_fractional_divider(struct device *dev,
const char *name, const char *parent_name, unsigned long flags,
@@ -550,6 +564,8 @@ struct clk_multiplier {
spinlock_t *lock;
};
+#define to_clk_multiplier(_hw) container_of(_hw, struct clk_multiplier, hw)
+
#define CLK_MULTIPLIER_ZERO_BYPASS BIT(0)
#define CLK_MULTIPLIER_ROUND_CLOSEST BIT(1)
@@ -579,6 +595,8 @@ struct clk_composite {
const struct clk_ops *gate_ops;
};
+#define to_clk_composite(_hw) container_of(_hw, struct clk_composite, hw)
+
struct clk *clk_register_composite(struct device *dev, const char *name,
const char * const *parent_names, int num_parents,
struct clk_hw *mux_hw, const struct clk_ops *mux_ops,
@@ -601,13 +619,13 @@ struct clk_gpio {
struct gpio_desc *gpiod;
};
+#define to_clk_gpio(_hw) container_of(_hw, struct clk_gpio, hw)
+
extern const struct clk_ops clk_gpio_gate_ops;
struct clk *clk_register_gpio_gate(struct device *dev, const char *name,
const char *parent_name, unsigned gpio, bool active_low,
unsigned long flags);
-void of_gpio_clk_gate_setup(struct device_node *node);
-
/**
* struct clk_gpio_mux - gpio controlled clock multiplexer
*
@@ -623,8 +641,6 @@ struct clk *clk_register_gpio_mux(struct device *dev, const char *name,
const char * const *parent_names, u8 num_parents, unsigned gpio,
bool active_low, unsigned long flags);
-void of_gpio_mux_clk_setup(struct device_node *node);
-
/**
* clk_register - allocate a new clock, register it and return an opaque cookie
* @dev: device that is registering this clock
@@ -700,7 +716,7 @@ void of_clk_del_provider(struct device_node *np);
struct clk *of_clk_src_simple_get(struct of_phandle_args *clkspec,
void *data);
struct clk *of_clk_src_onecell_get(struct of_phandle_args *clkspec, void *data);
-int of_clk_get_parent_count(struct device_node *np);
+unsigned int of_clk_get_parent_count(struct device_node *np);
int of_clk_parent_fill(struct device_node *np, const char **parents,
unsigned int size);
const char *of_clk_get_parent_name(struct device_node *np, int index);
diff --git a/include/linux/clk/at91_pmc.h b/include/linux/clk/at91_pmc.h
index 1e6932222e11..17f413bbbedf 100644
--- a/include/linux/clk/at91_pmc.h
+++ b/include/linux/clk/at91_pmc.h
@@ -16,18 +16,6 @@
#ifndef AT91_PMC_H
#define AT91_PMC_H
-#ifndef __ASSEMBLY__
-extern void __iomem *at91_pmc_base;
-
-#define at91_pmc_read(field) \
- readl_relaxed(at91_pmc_base + field)
-
-#define at91_pmc_write(field, value) \
- writel_relaxed(value, at91_pmc_base + field)
-#else
-.extern at91_pmc_base
-#endif
-
#define AT91_PMC_SCER 0x00 /* System Clock Enable Register */
#define AT91_PMC_SCDR 0x04 /* System Clock Disable Register */
diff --git a/include/linux/clk/shmobile.h b/include/linux/clk/renesas.h
index cb19cc1865ca..7adfd80fbf55 100644
--- a/include/linux/clk/shmobile.h
+++ b/include/linux/clk/renesas.h
@@ -11,8 +11,8 @@
* (at your option) any later version.
*/
-#ifndef __LINUX_CLK_SHMOBILE_H_
-#define __LINUX_CLK_SHMOBILE_H_
+#ifndef __LINUX_CLK_RENESAS_H_
+#define __LINUX_CLK_RENESAS_H_
#include <linux/types.h>
diff --git a/include/linux/clk/ti.h b/include/linux/clk/ti.h
index 9a638601cb09..dc5164a6df29 100644
--- a/include/linux/clk/ti.h
+++ b/include/linux/clk/ti.h
@@ -23,8 +23,8 @@
* @mult_div1_reg: register containing the DPLL M and N bitfields
* @mult_mask: mask of the DPLL M bitfield in @mult_div1_reg
* @div1_mask: mask of the DPLL N bitfield in @mult_div1_reg
- * @clk_bypass: struct clk pointer to the clock's bypass clock input
- * @clk_ref: struct clk pointer to the clock's reference clock input
+ * @clk_bypass: struct clk_hw pointer to the clock's bypass clock input
+ * @clk_ref: struct clk_hw pointer to the clock's reference clock input
* @control_reg: register containing the DPLL mode bitfield
* @enable_mask: mask of the DPLL mode bitfield in @control_reg
* @last_rounded_rate: cache of the last rate result of omap2_dpll_round_rate()
@@ -69,8 +69,8 @@ struct dpll_data {
void __iomem *mult_div1_reg;
u32 mult_mask;
u32 div1_mask;
- struct clk *clk_bypass;
- struct clk *clk_ref;
+ struct clk_hw *clk_bypass;
+ struct clk_hw *clk_ref;
void __iomem *control_reg;
u32 enable_mask;
unsigned long last_rounded_rate;
diff --git a/include/linux/clkdev.h b/include/linux/clkdev.h
index 08bffcc466de..c2c04f7cbe8a 100644
--- a/include/linux/clkdev.h
+++ b/include/linux/clkdev.h
@@ -44,8 +44,7 @@ struct clk_lookup *clkdev_create(struct clk *clk, const char *con_id,
void clkdev_add_table(struct clk_lookup *, size_t);
int clk_add_alias(const char *, const char *, const char *, struct device *);
-int clk_register_clkdev(struct clk *, const char *, const char *, ...)
- __printf(3, 4);
+int clk_register_clkdev(struct clk *, const char *, const char *);
int clk_register_clkdevs(struct clk *, struct clk_lookup *, size_t);
#ifdef CONFIG_COMMON_CLK
diff --git a/include/linux/compaction.h b/include/linux/compaction.h
index 4cd4ddf64cc7..d7c8de583a23 100644
--- a/include/linux/compaction.h
+++ b/include/linux/compaction.h
@@ -52,6 +52,10 @@ extern void compaction_defer_reset(struct zone *zone, int order,
bool alloc_success);
extern bool compaction_restarting(struct zone *zone, int order);
+extern int kcompactd_run(int nid);
+extern void kcompactd_stop(int nid);
+extern void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_idx);
+
#else
static inline unsigned long try_to_compact_pages(gfp_t gfp_mask,
unsigned int order, int alloc_flags,
@@ -84,6 +88,18 @@ static inline bool compaction_deferred(struct zone *zone, int order)
return true;
}
+static inline int kcompactd_run(int nid)
+{
+ return 0;
+}
+static inline void kcompactd_stop(int nid)
+{
+}
+
+static inline void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_idx)
+{
+}
+
#endif /* CONFIG_COMPACTION */
#if defined(CONFIG_COMPACTION) && defined(CONFIG_SYSFS) && defined(CONFIG_NUMA)
diff --git a/include/linux/compat.h b/include/linux/compat.h
index a76c9172b2eb..f964ef79e0ad 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -5,6 +5,8 @@
* syscall compatibility layer.
*/
+#include <linux/types.h>
+
#ifdef CONFIG_COMPAT
#include <linux/stat.h>
@@ -340,6 +342,12 @@ asmlinkage ssize_t compat_sys_preadv(compat_ulong_t fd,
asmlinkage ssize_t compat_sys_pwritev(compat_ulong_t fd,
const struct compat_iovec __user *vec,
compat_ulong_t vlen, u32 pos_low, u32 pos_high);
+asmlinkage ssize_t compat_sys_preadv2(compat_ulong_t fd,
+ const struct compat_iovec __user *vec,
+ compat_ulong_t vlen, u32 pos_low, u32 pos_high, int flags);
+asmlinkage ssize_t compat_sys_pwritev2(compat_ulong_t fd,
+ const struct compat_iovec __user *vec,
+ compat_ulong_t vlen, u32 pos_low, u32 pos_high, int flags);
#ifdef __ARCH_WANT_COMPAT_SYS_PREADV64
asmlinkage long compat_sys_preadv64(unsigned long fd,
@@ -713,9 +721,22 @@ asmlinkage long compat_sys_sched_rr_get_interval(compat_pid_t pid,
asmlinkage long compat_sys_fanotify_mark(int, unsigned int, __u32, __u32,
int, const char __user *);
+
+/*
+ * For most but not all architectures, "am I in a compat syscall?" and
+ * "am I a compat task?" are the same question. For architectures on which
+ * they aren't the same question, arch code can override in_compat_syscall.
+ */
+
+#ifndef in_compat_syscall
+static inline bool in_compat_syscall(void) { return is_compat_task(); }
+#endif
+
#else
#define is_compat_task() (0)
+static inline bool in_compat_syscall(void) { return false; }
#endif /* CONFIG_COMPAT */
+
#endif /* _LINUX_COMPAT_H */
diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h
index d1e49d52b640..de179993e039 100644
--- a/include/linux/compiler-clang.h
+++ b/include/linux/compiler-clang.h
@@ -10,3 +10,8 @@
#undef uninitialized_var
#define uninitialized_var(x) x = *(&(x))
#endif
+
+/* same as gcc, this was present in clang-2.6 so we can assume it works
+ * with any version that can compile the kernel
+ */
+#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index fc14275ff34e..40cee6b77a93 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -607,8 +607,6 @@ static inline int cpulist_parse(const char *buf, struct cpumask *dstp)
/**
* cpumask_size - size to allocate for a 'struct cpumask' in bytes
- *
- * This will eventually be a runtime variable, depending on nr_cpu_ids.
*/
static inline size_t cpumask_size(void)
{
diff --git a/include/linux/cred.h b/include/linux/cred.h
index 8d70e1361ecd..257db64562e5 100644
--- a/include/linux/cred.h
+++ b/include/linux/cred.h
@@ -377,7 +377,10 @@ extern struct user_namespace init_user_ns;
#ifdef CONFIG_USER_NS
#define current_user_ns() (current_cred_xxx(user_ns))
#else
-#define current_user_ns() (&init_user_ns)
+static inline struct user_namespace *current_user_ns(void)
+{
+ return &init_user_ns;
+}
#endif
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index c4b5f4b3f8f8..7cb043d8f4e8 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -228,6 +228,8 @@ struct dentry_operations {
#define DCACHE_FALLTHRU 0x01000000 /* Fall through to lower layer */
#define DCACHE_OP_SELECT_INODE 0x02000000 /* Unioned entry: dcache op selects inode */
+#define DCACHE_ENCRYPTED_WITH_KEY 0x04000000 /* dir is encrypted with a valid key */
+
extern seqlock_t rename_lock;
/*
@@ -246,6 +248,7 @@ extern struct dentry * d_alloc(struct dentry *, const struct qstr *);
extern struct dentry * d_alloc_pseudo(struct super_block *, const struct qstr *);
extern struct dentry * d_splice_alias(struct inode *, struct dentry *);
extern struct dentry * d_add_ci(struct dentry *, struct inode *, struct qstr *);
+extern struct dentry * d_exact_alias(struct dentry *, struct inode *);
extern struct dentry *d_find_any_alias(struct inode *inode);
extern struct dentry * d_obtain_alias(struct inode *);
extern struct dentry * d_obtain_root(struct inode *);
@@ -272,38 +275,8 @@ extern int have_submounts(struct dentry *);
* This adds the entry to the hash queues.
*/
extern void d_rehash(struct dentry *);
-
-/**
- * d_add - add dentry to hash queues
- * @entry: dentry to add
- * @inode: The inode to attach to this dentry
- *
- * This adds the entry to the hash queues and initializes @inode.
- * The entry was actually filled in earlier during d_alloc().
- */
-static inline void d_add(struct dentry *entry, struct inode *inode)
-{
- d_instantiate(entry, inode);
- d_rehash(entry);
-}
-
-/**
- * d_add_unique - add dentry to hash queues without aliasing
- * @entry: dentry to add
- * @inode: The inode to attach to this dentry
- *
- * This adds the entry to the hash queues and initializes @inode.
- * The entry was actually filled in earlier during d_alloc().
- */
-static inline struct dentry *d_add_unique(struct dentry *entry, struct inode *inode)
-{
- struct dentry *res;
-
- res = d_instantiate_unique(entry, inode);
- d_rehash(res != NULL ? res : entry);
- return res;
-}
+extern void d_add(struct dentry *, struct inode *);
extern void dentry_update_name_case(struct dentry *, struct qstr *);
diff --git a/include/linux/device.h b/include/linux/device.h
index 2d0e6e541d52..002c59728dbe 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -70,8 +70,11 @@ extern void bus_remove_file(struct bus_type *, struct bus_attribute *);
* @dev_groups: Default attributes of the devices on the bus.
* @drv_groups: Default attributes of the device drivers on the bus.
* @match: Called, perhaps multiple times, whenever a new device or driver
- * is added for this bus. It should return a nonzero value if the
- * given device can be handled by the given driver.
+ * is added for this bus. It should return a positive value if the
+ * given device can be handled by the given driver and zero
+ * otherwise. It may also return error code if determining that
+ * the driver supports the device is not possible. In case of
+ * -EPROBE_DEFER it will queue the device for deferred probing.
* @uevent: Called when a device is added, removed, or a few other things
* that generate uevents to add the environment variables.
* @probe: Called when a new device or driver add to this bus, and callback
@@ -682,6 +685,18 @@ void __iomem *devm_ioremap_resource(struct device *dev, struct resource *res);
int devm_add_action(struct device *dev, void (*action)(void *), void *data);
void devm_remove_action(struct device *dev, void (*action)(void *), void *data);
+static inline int devm_add_action_or_reset(struct device *dev,
+ void (*action)(void *), void *data)
+{
+ int ret;
+
+ ret = devm_add_action(dev, action, data);
+ if (ret)
+ action(data);
+
+ return ret;
+}
+
struct device_dma_parameters {
/*
* a low level driver may set these to teach IOMMU code about
diff --git a/include/linux/dma-attrs.h b/include/linux/dma-attrs.h
index 99c0be00b47c..5246239a4953 100644
--- a/include/linux/dma-attrs.h
+++ b/include/linux/dma-attrs.h
@@ -18,6 +18,7 @@ enum dma_attr {
DMA_ATTR_NO_KERNEL_MAPPING,
DMA_ATTR_SKIP_CPU_SYNC,
DMA_ATTR_FORCE_CONTIGUOUS,
+ DMA_ATTR_ALLOC_SINGLE_PAGES,
DMA_ATTR_MAX,
};
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index f98bd7068d55..532108ea0c1c 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -54,7 +54,7 @@ struct dma_buf_attachment;
* @release: release this buffer; to be called after the last dma_buf_put.
* @begin_cpu_access: [optional] called before cpu access to invalidate cpu
* caches and allocate backing storage (if not yet done)
- * respectively pin the objet into memory.
+ * respectively pin the object into memory.
* @end_cpu_access: [optional] called after cpu access to flush caches.
* @kmap_atomic: maps a page from the buffer into kernel address
* space, users may not block until the subsequent unmap call.
@@ -93,10 +93,8 @@ struct dma_buf_ops {
/* after final dma_buf_put() */
void (*release)(struct dma_buf *);
- int (*begin_cpu_access)(struct dma_buf *, size_t, size_t,
- enum dma_data_direction);
- void (*end_cpu_access)(struct dma_buf *, size_t, size_t,
- enum dma_data_direction);
+ int (*begin_cpu_access)(struct dma_buf *, enum dma_data_direction);
+ void (*end_cpu_access)(struct dma_buf *, enum dma_data_direction);
void *(*kmap_atomic)(struct dma_buf *, unsigned long);
void (*kunmap_atomic)(struct dma_buf *, unsigned long, void *);
void *(*kmap)(struct dma_buf *, unsigned long);
@@ -224,9 +222,9 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *,
enum dma_data_direction);
void dma_buf_unmap_attachment(struct dma_buf_attachment *, struct sg_table *,
enum dma_data_direction);
-int dma_buf_begin_cpu_access(struct dma_buf *dma_buf, size_t start, size_t len,
+int dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
enum dma_data_direction dir);
-void dma_buf_end_cpu_access(struct dma_buf *dma_buf, size_t start, size_t len,
+void dma_buf_end_cpu_access(struct dma_buf *dma_buf,
enum dma_data_direction dir);
void *dma_buf_kmap_atomic(struct dma_buf *, unsigned long);
void dma_buf_kunmap_atomic(struct dma_buf *, unsigned long, void *);
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 5e45cf930a3f..9ea9aba28049 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -70,6 +70,8 @@ struct dma_map_ops {
int is_phys;
};
+extern struct dma_map_ops dma_noop_ops;
+
#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
#define DMA_MASK_NONE 0x0ULL
diff --git a/include/linux/dqblk_qtree.h b/include/linux/dqblk_qtree.h
index ff8b55359648..0de21e935976 100644
--- a/include/linux/dqblk_qtree.h
+++ b/include/linux/dqblk_qtree.h
@@ -15,6 +15,7 @@
#define QTREE_DEL_REWRITE 6
struct dquot;
+struct kqid;
/* Operations */
struct qtree_fmt_operations {
@@ -52,5 +53,6 @@ static inline int qtree_depth(struct qtree_mem_dqinfo *info)
entries *= epb;
return i;
}
+int qtree_get_next_id(struct qtree_mem_dqinfo *info, struct kqid *qid);
#endif /* _LINUX_DQBLK_QTREE_H */
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 333d0ca6940f..1626474567ac 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -97,6 +97,7 @@ typedef struct {
#define EFI_MEMORY_WP ((u64)0x0000000000001000ULL) /* write-protect */
#define EFI_MEMORY_RP ((u64)0x0000000000002000ULL) /* read-protect */
#define EFI_MEMORY_XP ((u64)0x0000000000004000ULL) /* execute-protect */
+#define EFI_MEMORY_NV ((u64)0x0000000000008000ULL) /* non-volatile */
#define EFI_MEMORY_MORE_RELIABLE \
((u64)0x0000000000010000ULL) /* higher reliability */
#define EFI_MEMORY_RO ((u64)0x0000000000020000ULL) /* read-only */
@@ -507,10 +508,6 @@ typedef efi_status_t efi_get_next_variable_t (unsigned long *name_size, efi_char
typedef efi_status_t efi_set_variable_t (efi_char16_t *name, efi_guid_t *vendor,
u32 attr, unsigned long data_size,
void *data);
-typedef efi_status_t
-efi_set_variable_nonblocking_t(efi_char16_t *name, efi_guid_t *vendor,
- u32 attr, unsigned long data_size, void *data);
-
typedef efi_status_t efi_get_next_high_mono_count_t (u32 *count);
typedef void efi_reset_system_t (int reset_type, efi_status_t status,
unsigned long data_size, efi_char16_t *data);
@@ -529,7 +526,9 @@ typedef efi_status_t efi_query_capsule_caps_t(efi_capsule_header_t **capsules,
unsigned long count,
u64 *max_size,
int *reset_type);
-typedef efi_status_t efi_query_variable_store_t(u32 attributes, unsigned long size);
+typedef efi_status_t efi_query_variable_store_t(u32 attributes,
+ unsigned long size,
+ bool nonblocking);
void efi_native_runtime_setup(void);
@@ -537,67 +536,88 @@ void efi_native_runtime_setup(void);
* EFI Configuration Table and GUID definitions
*/
#define NULL_GUID \
- EFI_GUID( 0x00000000, 0x0000, 0x0000, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 )
+ EFI_GUID(0x00000000, 0x0000, 0x0000, \
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00)
#define MPS_TABLE_GUID \
- EFI_GUID( 0xeb9d2d2f, 0x2d88, 0x11d3, 0x9a, 0x16, 0x0, 0x90, 0x27, 0x3f, 0xc1, 0x4d )
+ EFI_GUID(0xeb9d2d2f, 0x2d88, 0x11d3, \
+ 0x9a, 0x16, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d)
#define ACPI_TABLE_GUID \
- EFI_GUID( 0xeb9d2d30, 0x2d88, 0x11d3, 0x9a, 0x16, 0x0, 0x90, 0x27, 0x3f, 0xc1, 0x4d )
+ EFI_GUID(0xeb9d2d30, 0x2d88, 0x11d3, \
+ 0x9a, 0x16, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d)
#define ACPI_20_TABLE_GUID \
- EFI_GUID( 0x8868e871, 0xe4f1, 0x11d3, 0xbc, 0x22, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81 )
+ EFI_GUID(0x8868e871, 0xe4f1, 0x11d3, \
+ 0xbc, 0x22, 0x00, 0x80, 0xc7, 0x3c, 0x88, 0x81)
#define SMBIOS_TABLE_GUID \
- EFI_GUID( 0xeb9d2d31, 0x2d88, 0x11d3, 0x9a, 0x16, 0x0, 0x90, 0x27, 0x3f, 0xc1, 0x4d )
+ EFI_GUID(0xeb9d2d31, 0x2d88, 0x11d3, \
+ 0x9a, 0x16, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d)
#define SMBIOS3_TABLE_GUID \
- EFI_GUID( 0xf2fd1544, 0x9794, 0x4a2c, 0x99, 0x2e, 0xe5, 0xbb, 0xcf, 0x20, 0xe3, 0x94 )
+ EFI_GUID(0xf2fd1544, 0x9794, 0x4a2c, \
+ 0x99, 0x2e, 0xe5, 0xbb, 0xcf, 0x20, 0xe3, 0x94)
#define SAL_SYSTEM_TABLE_GUID \
- EFI_GUID( 0xeb9d2d32, 0x2d88, 0x11d3, 0x9a, 0x16, 0x0, 0x90, 0x27, 0x3f, 0xc1, 0x4d )
+ EFI_GUID(0xeb9d2d32, 0x2d88, 0x11d3, \
+ 0x9a, 0x16, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d)
#define HCDP_TABLE_GUID \
- EFI_GUID( 0xf951938d, 0x620b, 0x42ef, 0x82, 0x79, 0xa8, 0x4b, 0x79, 0x61, 0x78, 0x98 )
+ EFI_GUID(0xf951938d, 0x620b, 0x42ef, \
+ 0x82, 0x79, 0xa8, 0x4b, 0x79, 0x61, 0x78, 0x98)
#define UGA_IO_PROTOCOL_GUID \
- EFI_GUID( 0x61a4d49e, 0x6f68, 0x4f1b, 0xb9, 0x22, 0xa8, 0x6e, 0xed, 0xb, 0x7, 0xa2 )
+ EFI_GUID(0x61a4d49e, 0x6f68, 0x4f1b, \
+ 0xb9, 0x22, 0xa8, 0x6e, 0xed, 0x0b, 0x07, 0xa2)
#define EFI_GLOBAL_VARIABLE_GUID \
- EFI_GUID( 0x8be4df61, 0x93ca, 0x11d2, 0xaa, 0x0d, 0x00, 0xe0, 0x98, 0x03, 0x2b, 0x8c )
+ EFI_GUID(0x8be4df61, 0x93ca, 0x11d2, \
+ 0xaa, 0x0d, 0x00, 0xe0, 0x98, 0x03, 0x2b, 0x8c)
#define UV_SYSTEM_TABLE_GUID \
- EFI_GUID( 0x3b13a7d4, 0x633e, 0x11dd, 0x93, 0xec, 0xda, 0x25, 0x56, 0xd8, 0x95, 0x93 )
+ EFI_GUID(0x3b13a7d4, 0x633e, 0x11dd, \
+ 0x93, 0xec, 0xda, 0x25, 0x56, 0xd8, 0x95, 0x93)
#define LINUX_EFI_CRASH_GUID \
- EFI_GUID( 0xcfc8fc79, 0xbe2e, 0x4ddc, 0x97, 0xf0, 0x9f, 0x98, 0xbf, 0xe2, 0x98, 0xa0 )
+ EFI_GUID(0xcfc8fc79, 0xbe2e, 0x4ddc, \
+ 0x97, 0xf0, 0x9f, 0x98, 0xbf, 0xe2, 0x98, 0xa0)
#define LOADED_IMAGE_PROTOCOL_GUID \
- EFI_GUID( 0x5b1b31a1, 0x9562, 0x11d2, 0x8e, 0x3f, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b )
+ EFI_GUID(0x5b1b31a1, 0x9562, 0x11d2, \
+ 0x8e, 0x3f, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b)
#define EFI_GRAPHICS_OUTPUT_PROTOCOL_GUID \
- EFI_GUID( 0x9042a9de, 0x23dc, 0x4a38, 0x96, 0xfb, 0x7a, 0xde, 0xd0, 0x80, 0x51, 0x6a )
+ EFI_GUID(0x9042a9de, 0x23dc, 0x4a38, \
+ 0x96, 0xfb, 0x7a, 0xde, 0xd0, 0x80, 0x51, 0x6a)
#define EFI_UGA_PROTOCOL_GUID \
- EFI_GUID( 0x982c298b, 0xf4fa, 0x41cb, 0xb8, 0x38, 0x77, 0xaa, 0x68, 0x8f, 0xb8, 0x39 )
+ EFI_GUID(0x982c298b, 0xf4fa, 0x41cb, \
+ 0xb8, 0x38, 0x77, 0xaa, 0x68, 0x8f, 0xb8, 0x39)
#define EFI_PCI_IO_PROTOCOL_GUID \
- EFI_GUID( 0x4cf5b200, 0x68b8, 0x4ca5, 0x9e, 0xec, 0xb2, 0x3e, 0x3f, 0x50, 0x2, 0x9a )
+ EFI_GUID(0x4cf5b200, 0x68b8, 0x4ca5, \
+ 0x9e, 0xec, 0xb2, 0x3e, 0x3f, 0x50, 0x02, 0x9a)
#define EFI_FILE_INFO_ID \
- EFI_GUID( 0x9576e92, 0x6d3f, 0x11d2, 0x8e, 0x39, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b )
+ EFI_GUID(0x9576e92, 0x6d3f, 0x11d2, \
+ 0x8e, 0x39, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b)
#define EFI_SYSTEM_RESOURCE_TABLE_GUID \
- EFI_GUID( 0xb122a263, 0x3661, 0x4f68, 0x99, 0x29, 0x78, 0xf8, 0xb0, 0xd6, 0x21, 0x80 )
+ EFI_GUID(0xb122a263, 0x3661, 0x4f68, \
+ 0x99, 0x29, 0x78, 0xf8, 0xb0, 0xd6, 0x21, 0x80)
#define EFI_FILE_SYSTEM_GUID \
- EFI_GUID( 0x964e5b22, 0x6459, 0x11d2, 0x8e, 0x39, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b )
+ EFI_GUID(0x964e5b22, 0x6459, 0x11d2, \
+ 0x8e, 0x39, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b)
#define DEVICE_TREE_GUID \
- EFI_GUID( 0xb1b621d5, 0xf19c, 0x41a5, 0x83, 0x0b, 0xd9, 0x15, 0x2c, 0x69, 0xaa, 0xe0 )
+ EFI_GUID(0xb1b621d5, 0xf19c, 0x41a5, \
+ 0x83, 0x0b, 0xd9, 0x15, 0x2c, 0x69, 0xaa, 0xe0)
#define EFI_PROPERTIES_TABLE_GUID \
- EFI_GUID( 0x880aaca3, 0x4adc, 0x4a04, 0x90, 0x79, 0xb7, 0x47, 0x34, 0x08, 0x25, 0xe5 )
+ EFI_GUID(0x880aaca3, 0x4adc, 0x4a04, \
+ 0x90, 0x79, 0xb7, 0x47, 0x34, 0x08, 0x25, 0xe5)
#define EFI_RNG_PROTOCOL_GUID \
EFI_GUID(0x3152bca5, 0xeade, 0x433d, \
@@ -855,8 +875,9 @@ extern struct efi {
efi_get_variable_t *get_variable;
efi_get_next_variable_t *get_next_variable;
efi_set_variable_t *set_variable;
- efi_set_variable_nonblocking_t *set_variable_nonblocking;
+ efi_set_variable_t *set_variable_nonblocking;
efi_query_variable_info_t *query_variable_info;
+ efi_query_variable_info_t *query_variable_info_nonblocking;
efi_update_capsule_t *update_capsule;
efi_query_capsule_caps_t *query_capsule_caps;
efi_get_next_high_mono_count_t *get_next_high_mono_count;
@@ -888,13 +909,17 @@ extern void efi_enter_virtual_mode (void); /* switch EFI to virtual mode, if pos
#ifdef CONFIG_X86
extern void efi_late_init(void);
extern void efi_free_boot_services(void);
-extern efi_status_t efi_query_variable_store(u32 attributes, unsigned long size);
+extern efi_status_t efi_query_variable_store(u32 attributes,
+ unsigned long size,
+ bool nonblocking);
extern void efi_find_mirror(void);
#else
static inline void efi_late_init(void) {}
static inline void efi_free_boot_services(void) {}
-static inline efi_status_t efi_query_variable_store(u32 attributes, unsigned long size)
+static inline efi_status_t efi_query_variable_store(u32 attributes,
+ unsigned long size,
+ bool nonblocking)
{
return EFI_SUCCESS;
}
@@ -1095,7 +1120,7 @@ struct efivar_operations {
efi_get_variable_t *get_variable;
efi_get_next_variable_t *get_next_variable;
efi_set_variable_t *set_variable;
- efi_set_variable_nonblocking_t *set_variable_nonblocking;
+ efi_set_variable_t *set_variable_nonblocking;
efi_query_variable_store_t *query_variable_store;
};
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index 653dc9c4ebac..e2b7bf27c03e 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -12,6 +12,7 @@
#ifndef _LINUX_ETHTOOL_H
#define _LINUX_ETHTOOL_H
+#include <linux/bitmap.h>
#include <linux/compat.h>
#include <uapi/linux/ethtool.h>
@@ -40,9 +41,6 @@ struct compat_ethtool_rxnfc {
#include <linux/rculist.h>
-extern int __ethtool_get_settings(struct net_device *dev,
- struct ethtool_cmd *cmd);
-
/**
* enum ethtool_phys_id_state - indicator state for physical identification
* @ETHTOOL_ID_INACTIVE: Physical ID indicator should be deactivated
@@ -97,13 +95,70 @@ static inline u32 ethtool_rxfh_indir_default(u32 index, u32 n_rx_rings)
return index % n_rx_rings;
}
+/* number of link mode bits/ulongs handled internally by kernel */
+#define __ETHTOOL_LINK_MODE_MASK_NBITS \
+ (__ETHTOOL_LINK_MODE_LAST + 1)
+
+/* declare a link mode bitmap */
+#define __ETHTOOL_DECLARE_LINK_MODE_MASK(name) \
+ DECLARE_BITMAP(name, __ETHTOOL_LINK_MODE_MASK_NBITS)
+
+/* drivers must ignore base.cmd and base.link_mode_masks_nwords
+ * fields, but they are allowed to overwrite them (will be ignored).
+ */
+struct ethtool_link_ksettings {
+ struct ethtool_link_settings base;
+ struct {
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(supported);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(lp_advertising);
+ } link_modes;
+};
+
+/**
+ * ethtool_link_ksettings_zero_link_mode - clear link_ksettings link mode mask
+ * @ptr : pointer to struct ethtool_link_ksettings
+ * @name : one of supported/advertising/lp_advertising
+ */
+#define ethtool_link_ksettings_zero_link_mode(ptr, name) \
+ bitmap_zero((ptr)->link_modes.name, __ETHTOOL_LINK_MODE_MASK_NBITS)
+
+/**
+ * ethtool_link_ksettings_add_link_mode - set bit in link_ksettings
+ * link mode mask
+ * @ptr : pointer to struct ethtool_link_ksettings
+ * @name : one of supported/advertising/lp_advertising
+ * @mode : one of the ETHTOOL_LINK_MODE_*_BIT
+ * (not atomic, no bound checking)
+ */
+#define ethtool_link_ksettings_add_link_mode(ptr, name, mode) \
+ __set_bit(ETHTOOL_LINK_MODE_ ## mode ## _BIT, (ptr)->link_modes.name)
+
+/**
+ * ethtool_link_ksettings_test_link_mode - test bit in ksettings link mode mask
+ * @ptr : pointer to struct ethtool_link_ksettings
+ * @name : one of supported/advertising/lp_advertising
+ * @mode : one of the ETHTOOL_LINK_MODE_*_BIT
+ * (not atomic, no bound checking)
+ *
+ * Returns true/false.
+ */
+#define ethtool_link_ksettings_test_link_mode(ptr, name, mode) \
+ test_bit(ETHTOOL_LINK_MODE_ ## mode ## _BIT, (ptr)->link_modes.name)
+
+extern int
+__ethtool_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *link_ksettings);
+
/**
* struct ethtool_ops - optional netdev operations
- * @get_settings: Get various device settings including Ethernet link
+ * @get_settings: DEPRECATED, use %get_link_ksettings/%set_link_ksettings
+ * API. Get various device settings including Ethernet link
* settings. The @cmd parameter is expected to have been cleared
- * before get_settings is called. Returns a negative error code or
- * zero.
- * @set_settings: Set various device settings including Ethernet link
+ * before get_settings is called. Returns a negative error code
+ * or zero.
+ * @set_settings: DEPRECATED, use %get_link_ksettings/%set_link_ksettings
+ * API. Set various device settings including Ethernet link
* settings. Returns a negative error code or zero.
* @get_drvinfo: Report driver/device information. Should only set the
* @driver, @version, @fw_version and @bus_info fields. If not
@@ -201,6 +256,29 @@ static inline u32 ethtool_rxfh_indir_default(u32 index, u32 n_rx_rings)
* @get_module_eeprom: Get the eeprom information from the plug-in module
* @get_eee: Get Energy-Efficient (EEE) supported and status.
* @set_eee: Set EEE status (enable/disable) as well as LPI timers.
+ * @get_per_queue_coalesce: Get interrupt coalescing parameters per queue.
+ * It must check that the given queue number is valid. If neither a RX nor
+ * a TX queue has this number, return -EINVAL. If only a RX queue or a TX
+ * queue has this number, set the inapplicable fields to ~0 and return 0.
+ * Returns a negative error code or zero.
+ * @set_per_queue_coalesce: Set interrupt coalescing parameters per queue.
+ * It must check that the given queue number is valid. If neither a RX nor
+ * a TX queue has this number, return -EINVAL. If only a RX queue or a TX
+ * queue has this number, ignore the inapplicable fields.
+ * Returns a negative error code or zero.
+ * @get_link_ksettings: When defined, takes precedence over the
+ * %get_settings method. Get various device settings
+ * including Ethernet link settings. The %cmd and
+ * %link_mode_masks_nwords fields should be ignored (use
+ * %__ETHTOOL_LINK_MODE_MASK_NBITS instead of the latter), any
+ * change to them will be overwritten by kernel. Returns a
+ * negative error code or zero.
+ * @set_link_ksettings: When defined, takes precedence over the
+ * %set_settings method. Set various device settings including
+ * Ethernet link settings. The %cmd and %link_mode_masks_nwords
+ * fields should be ignored (use %__ETHTOOL_LINK_MODE_MASK_NBITS
+ * instead of the latter), any change to them will be overwritten
+ * by kernel. Returns a negative error code or zero.
*
* All operations are optional (i.e. the function pointer may be set
* to %NULL) and callers must take this into account. Callers must
@@ -279,7 +357,13 @@ struct ethtool_ops {
const struct ethtool_tunable *, void *);
int (*set_tunable)(struct net_device *,
const struct ethtool_tunable *, const void *);
-
-
+ int (*get_per_queue_coalesce)(struct net_device *, u32,
+ struct ethtool_coalesce *);
+ int (*set_per_queue_coalesce)(struct net_device *, u32,
+ struct ethtool_coalesce *);
+ int (*get_link_ksettings)(struct net_device *,
+ struct ethtool_link_ksettings *);
+ int (*set_link_ksettings)(struct net_device *,
+ const struct ethtool_link_ksettings *);
};
#endif /* _LINUX_ETHTOOL_H */
diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
index e59c3be92106..9eb215a155e0 100644
--- a/include/linux/f2fs_fs.h
+++ b/include/linux/f2fs_fs.h
@@ -21,7 +21,7 @@
#define F2FS_BLKSIZE 4096 /* support only 4KB block */
#define F2FS_BLKSIZE_BITS 12 /* bits for F2FS_BLKSIZE */
#define F2FS_MAX_EXTENSION 64 /* # of extension entries */
-#define F2FS_BLK_ALIGN(x) (((x) + F2FS_BLKSIZE - 1) / F2FS_BLKSIZE)
+#define F2FS_BLK_ALIGN(x) (((x) + F2FS_BLKSIZE - 1) >> F2FS_BLKSIZE_BITS)
#define NULL_ADDR ((block_t)0) /* used as block_t addresses */
#define NEW_ADDR ((block_t)-1) /* used as block_t addresses */
@@ -170,12 +170,12 @@ struct f2fs_extent {
#define F2FS_INLINE_XATTR_ADDRS 50 /* 200 bytes for inline xattrs */
#define DEF_ADDRS_PER_INODE 923 /* Address Pointers in an Inode */
#define DEF_NIDS_PER_INODE 5 /* Node IDs in an Inode */
-#define ADDRS_PER_INODE(fi) addrs_per_inode(fi)
+#define ADDRS_PER_INODE(inode) addrs_per_inode(inode)
#define ADDRS_PER_BLOCK 1018 /* Address Pointers in a Direct Block */
#define NIDS_PER_BLOCK 1018 /* Node IDs in an Indirect Block */
-#define ADDRS_PER_PAGE(page, fi) \
- (IS_INODE(page) ? ADDRS_PER_INODE(fi) : ADDRS_PER_BLOCK)
+#define ADDRS_PER_PAGE(page, inode) \
+ (IS_INODE(page) ? ADDRS_PER_INODE(inode) : ADDRS_PER_BLOCK)
#define NODE_DIR1_BLOCK (DEF_ADDRS_PER_INODE + 1)
#define NODE_DIR2_BLOCK (DEF_ADDRS_PER_INODE + 2)
@@ -345,7 +345,7 @@ struct f2fs_summary {
struct summary_footer {
unsigned char entry_type; /* SUM_TYPE_XXX */
- __u32 check_sum; /* summary checksum */
+ __le32 check_sum; /* summary checksum */
} __packed;
#define SUM_JOURNAL_SIZE (F2FS_BLKSIZE - SUM_FOOTER_SIZE -\
@@ -358,6 +358,12 @@ struct summary_footer {
sizeof(struct sit_journal_entry))
#define SIT_JOURNAL_RESERVED ((SUM_JOURNAL_SIZE - 2) %\
sizeof(struct sit_journal_entry))
+
+/* Reserved area should make size of f2fs_extra_info equals to
+ * that of nat_journal and sit_journal.
+ */
+#define EXTRA_INFO_RESERVED (SUM_JOURNAL_SIZE - 2 - 8)
+
/*
* frequently updated NAT/SIT entries can be stored in the spare area in
* summary blocks
@@ -387,18 +393,28 @@ struct sit_journal {
__u8 reserved[SIT_JOURNAL_RESERVED];
} __packed;
-/* 4KB-sized summary block structure */
-struct f2fs_summary_block {
- struct f2fs_summary entries[ENTRIES_IN_SUM];
+struct f2fs_extra_info {
+ __le64 kbytes_written;
+ __u8 reserved[EXTRA_INFO_RESERVED];
+} __packed;
+
+struct f2fs_journal {
union {
__le16 n_nats;
__le16 n_sits;
};
- /* spare area is used by NAT or SIT journals */
+ /* spare area is used by NAT or SIT journals or extra info */
union {
struct nat_journal nat_j;
struct sit_journal sit_j;
+ struct f2fs_extra_info info;
};
+} __packed;
+
+/* 4KB-sized summary block structure */
+struct f2fs_summary_block {
+ struct f2fs_summary entries[ENTRIES_IN_SUM];
+ struct f2fs_journal journal;
struct summary_footer footer;
} __packed;
diff --git a/include/linux/frame.h b/include/linux/frame.h
new file mode 100644
index 000000000000..e6baaba3f1ae
--- /dev/null
+++ b/include/linux/frame.h
@@ -0,0 +1,23 @@
+#ifndef _LINUX_FRAME_H
+#define _LINUX_FRAME_H
+
+#ifdef CONFIG_STACK_VALIDATION
+/*
+ * This macro marks the given function's stack frame as "non-standard", which
+ * tells objtool to ignore the function when doing stack metadata validation.
+ * It should only be used in special cases where you're 100% sure it won't
+ * affect the reliability of frame pointers and kernel stack traces.
+ *
+ * For more information, see tools/objtool/Documentation/stack-validation.txt.
+ */
+#define STACK_FRAME_NON_STANDARD(func) \
+ static void __used __section(__func_stack_frame_non_standard) \
+ *__func_stack_frame_non_standard_##func = func
+
+#else /* !CONFIG_STACK_VALIDATION */
+
+#define STACK_FRAME_NON_STANDARD(func)
+
+#endif /* CONFIG_STACK_VALIDATION */
+
+#endif /* _LINUX_FRAME_H */
diff --git a/include/linux/freezer.h b/include/linux/freezer.h
index 6b7fd9cf5ea2..dd03e837ebb7 100644
--- a/include/linux/freezer.h
+++ b/include/linux/freezer.h
@@ -231,7 +231,7 @@ static inline long freezable_schedule_timeout_killable_unsafe(long timeout)
* call this with locks held.
*/
static inline int freezable_schedule_hrtimeout_range(ktime_t *expires,
- unsigned long delta, const enum hrtimer_mode mode)
+ u64 delta, const enum hrtimer_mode mode)
{
int __retval;
freezer_do_not_count();
diff --git a/include/linux/fs.h b/include/linux/fs.h
index e514f76db04f..14a97194b34b 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -53,6 +53,8 @@ struct swap_info_struct;
struct seq_file;
struct workqueue_struct;
struct iov_iter;
+struct fscrypt_info;
+struct fscrypt_operations;
extern void __init inode_init(void);
extern void __init inode_init_early(void);
@@ -70,7 +72,7 @@ extern int sysctl_protected_hardlinks;
struct buffer_head;
typedef int (get_block_t)(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create);
-typedef void (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
+typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
ssize_t bytes, void *private);
typedef void (dax_iodone_t)(struct buffer_head *bh_map, int uptodate);
@@ -320,6 +322,7 @@ struct writeback_control;
#define IOCB_EVENTFD (1 << 0)
#define IOCB_APPEND (1 << 1)
#define IOCB_DIRECT (1 << 2)
+#define IOCB_HIPRI (1 << 3)
struct kiocb {
struct file *ki_filp;
@@ -678,6 +681,10 @@ struct inode {
struct hlist_head i_fsnotify_marks;
#endif
+#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
+ struct fscrypt_info *i_crypt_info;
+#endif
+
void *i_private; /* fs or device private pointer */
};
@@ -1323,6 +1330,8 @@ struct super_block {
#endif
const struct xattr_handler **s_xattr;
+ const struct fscrypt_operations *s_cop;
+
struct hlist_bl_head s_anon; /* anonymous dentries for (nfs) exporting */
struct list_head s_mounts; /* list of mounts; _not_ for fs use */
struct block_device *s_bdev;
@@ -1540,11 +1549,6 @@ extern int vfs_rename(struct inode *, struct dentry *, struct inode *, struct de
extern int vfs_whiteout(struct inode *, struct dentry *);
/*
- * VFS dentry helper functions.
- */
-extern void dentry_unhash(struct dentry *dentry);
-
-/*
* VFS file helper functions.
*/
extern void inode_init_owner(struct inode *inode, const struct inode *dir,
@@ -1709,9 +1713,9 @@ extern ssize_t __vfs_write(struct file *, const char __user *, size_t, loff_t *)
extern ssize_t vfs_read(struct file *, char __user *, size_t, loff_t *);
extern ssize_t vfs_write(struct file *, const char __user *, size_t, loff_t *);
extern ssize_t vfs_readv(struct file *, const struct iovec __user *,
- unsigned long, loff_t *);
+ unsigned long, loff_t *, int);
extern ssize_t vfs_writev(struct file *, const struct iovec __user *,
- unsigned long, loff_t *);
+ unsigned long, loff_t *, int);
extern ssize_t vfs_copy_file_range(struct file *, loff_t , struct file *,
loff_t, size_t, unsigned int);
extern int vfs_clone_file_range(struct file *file_in, loff_t pos_in,
@@ -2259,7 +2263,7 @@ extern long do_sys_open(int dfd, const char __user *filename, int flags,
extern struct file *file_open_name(struct filename *, int, umode_t);
extern struct file *filp_open(const char *, int, umode_t);
extern struct file *file_open_root(struct dentry *, struct vfsmount *,
- const char *, int);
+ const char *, int, umode_t);
extern struct file * dentry_open(const struct path *, int, const struct cred *);
extern int filp_close(struct file *, fl_owner_t id);
diff --git a/include/linux/fscrypto.h b/include/linux/fscrypto.h
new file mode 100644
index 000000000000..cd91f75de49b
--- /dev/null
+++ b/include/linux/fscrypto.h
@@ -0,0 +1,434 @@
+/*
+ * General per-file encryption definition
+ *
+ * Copyright (C) 2015, Google, Inc.
+ *
+ * Written by Michael Halcrow, 2015.
+ * Modified by Jaegeuk Kim, 2015.
+ */
+
+#ifndef _LINUX_FSCRYPTO_H
+#define _LINUX_FSCRYPTO_H
+
+#include <linux/key.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/bio.h>
+#include <linux/dcache.h>
+#include <crypto/skcipher.h>
+#include <uapi/linux/fs.h>
+
+#define FS_KEY_DERIVATION_NONCE_SIZE 16
+#define FS_ENCRYPTION_CONTEXT_FORMAT_V1 1
+
+#define FS_POLICY_FLAGS_PAD_4 0x00
+#define FS_POLICY_FLAGS_PAD_8 0x01
+#define FS_POLICY_FLAGS_PAD_16 0x02
+#define FS_POLICY_FLAGS_PAD_32 0x03
+#define FS_POLICY_FLAGS_PAD_MASK 0x03
+#define FS_POLICY_FLAGS_VALID 0x03
+
+/* Encryption algorithms */
+#define FS_ENCRYPTION_MODE_INVALID 0
+#define FS_ENCRYPTION_MODE_AES_256_XTS 1
+#define FS_ENCRYPTION_MODE_AES_256_GCM 2
+#define FS_ENCRYPTION_MODE_AES_256_CBC 3
+#define FS_ENCRYPTION_MODE_AES_256_CTS 4
+
+/**
+ * Encryption context for inode
+ *
+ * Protector format:
+ * 1 byte: Protector format (1 = this version)
+ * 1 byte: File contents encryption mode
+ * 1 byte: File names encryption mode
+ * 1 byte: Flags
+ * 8 bytes: Master Key descriptor
+ * 16 bytes: Encryption Key derivation nonce
+ */
+struct fscrypt_context {
+ u8 format;
+ u8 contents_encryption_mode;
+ u8 filenames_encryption_mode;
+ u8 flags;
+ u8 master_key_descriptor[FS_KEY_DESCRIPTOR_SIZE];
+ u8 nonce[FS_KEY_DERIVATION_NONCE_SIZE];
+} __packed;
+
+/* Encryption parameters */
+#define FS_XTS_TWEAK_SIZE 16
+#define FS_AES_128_ECB_KEY_SIZE 16
+#define FS_AES_256_GCM_KEY_SIZE 32
+#define FS_AES_256_CBC_KEY_SIZE 32
+#define FS_AES_256_CTS_KEY_SIZE 32
+#define FS_AES_256_XTS_KEY_SIZE 64
+#define FS_MAX_KEY_SIZE 64
+
+#define FS_KEY_DESC_PREFIX "fscrypt:"
+#define FS_KEY_DESC_PREFIX_SIZE 8
+
+/* This is passed in from userspace into the kernel keyring */
+struct fscrypt_key {
+ u32 mode;
+ u8 raw[FS_MAX_KEY_SIZE];
+ u32 size;
+} __packed;
+
+struct fscrypt_info {
+ u8 ci_data_mode;
+ u8 ci_filename_mode;
+ u8 ci_flags;
+ struct crypto_skcipher *ci_ctfm;
+ struct key *ci_keyring_key;
+ u8 ci_master_key[FS_KEY_DESCRIPTOR_SIZE];
+};
+
+#define FS_CTX_REQUIRES_FREE_ENCRYPT_FL 0x00000001
+#define FS_WRITE_PATH_FL 0x00000002
+
+struct fscrypt_ctx {
+ union {
+ struct {
+ struct page *bounce_page; /* Ciphertext page */
+ struct page *control_page; /* Original page */
+ } w;
+ struct {
+ struct bio *bio;
+ struct work_struct work;
+ } r;
+ struct list_head free_list; /* Free list */
+ };
+ u8 flags; /* Flags */
+ u8 mode; /* Encryption mode for tfm */
+};
+
+struct fscrypt_completion_result {
+ struct completion completion;
+ int res;
+};
+
+#define DECLARE_FS_COMPLETION_RESULT(ecr) \
+ struct fscrypt_completion_result ecr = { \
+ COMPLETION_INITIALIZER((ecr).completion), 0 }
+
+static inline int fscrypt_key_size(int mode)
+{
+ switch (mode) {
+ case FS_ENCRYPTION_MODE_AES_256_XTS:
+ return FS_AES_256_XTS_KEY_SIZE;
+ case FS_ENCRYPTION_MODE_AES_256_GCM:
+ return FS_AES_256_GCM_KEY_SIZE;
+ case FS_ENCRYPTION_MODE_AES_256_CBC:
+ return FS_AES_256_CBC_KEY_SIZE;
+ case FS_ENCRYPTION_MODE_AES_256_CTS:
+ return FS_AES_256_CTS_KEY_SIZE;
+ default:
+ BUG();
+ }
+ return 0;
+}
+
+#define FS_FNAME_NUM_SCATTER_ENTRIES 4
+#define FS_CRYPTO_BLOCK_SIZE 16
+#define FS_FNAME_CRYPTO_DIGEST_SIZE 32
+
+/**
+ * For encrypted symlinks, the ciphertext length is stored at the beginning
+ * of the string in little-endian format.
+ */
+struct fscrypt_symlink_data {
+ __le16 len;
+ char encrypted_path[1];
+} __packed;
+
+/**
+ * This function is used to calculate the disk space required to
+ * store a filename of length l in encrypted symlink format.
+ */
+static inline u32 fscrypt_symlink_data_len(u32 l)
+{
+ if (l < FS_CRYPTO_BLOCK_SIZE)
+ l = FS_CRYPTO_BLOCK_SIZE;
+ return (l + sizeof(struct fscrypt_symlink_data) - 1);
+}
+
+struct fscrypt_str {
+ unsigned char *name;
+ u32 len;
+};
+
+struct fscrypt_name {
+ const struct qstr *usr_fname;
+ struct fscrypt_str disk_name;
+ u32 hash;
+ u32 minor_hash;
+ struct fscrypt_str crypto_buf;
+};
+
+#define FSTR_INIT(n, l) { .name = n, .len = l }
+#define FSTR_TO_QSTR(f) QSTR_INIT((f)->name, (f)->len)
+#define fname_name(p) ((p)->disk_name.name)
+#define fname_len(p) ((p)->disk_name.len)
+
+/*
+ * crypto opertions for filesystems
+ */
+struct fscrypt_operations {
+ int (*get_context)(struct inode *, void *, size_t);
+ int (*prepare_context)(struct inode *);
+ int (*set_context)(struct inode *, const void *, size_t, void *);
+ int (*dummy_context)(struct inode *);
+ bool (*is_encrypted)(struct inode *);
+ bool (*empty_dir)(struct inode *);
+ unsigned (*max_namelen)(struct inode *);
+};
+
+static inline bool fscrypt_dummy_context_enabled(struct inode *inode)
+{
+ if (inode->i_sb->s_cop->dummy_context &&
+ inode->i_sb->s_cop->dummy_context(inode))
+ return true;
+ return false;
+}
+
+static inline bool fscrypt_valid_contents_enc_mode(u32 mode)
+{
+ return (mode == FS_ENCRYPTION_MODE_AES_256_XTS);
+}
+
+static inline bool fscrypt_valid_filenames_enc_mode(u32 mode)
+{
+ return (mode == FS_ENCRYPTION_MODE_AES_256_CTS);
+}
+
+static inline u32 fscrypt_validate_encryption_key_size(u32 mode, u32 size)
+{
+ if (size == fscrypt_key_size(mode))
+ return size;
+ return 0;
+}
+
+static inline bool fscrypt_is_dot_dotdot(const struct qstr *str)
+{
+ if (str->len == 1 && str->name[0] == '.')
+ return true;
+
+ if (str->len == 2 && str->name[0] == '.' && str->name[1] == '.')
+ return true;
+
+ return false;
+}
+
+static inline struct page *fscrypt_control_page(struct page *page)
+{
+#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
+ return ((struct fscrypt_ctx *)page_private(page))->w.control_page;
+#else
+ WARN_ON_ONCE(1);
+ return ERR_PTR(-EINVAL);
+#endif
+}
+
+static inline int fscrypt_has_encryption_key(struct inode *inode)
+{
+#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
+ return (inode->i_crypt_info != NULL);
+#else
+ return 0;
+#endif
+}
+
+static inline void fscrypt_set_encrypted_dentry(struct dentry *dentry)
+{
+#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
+ spin_lock(&dentry->d_lock);
+ dentry->d_flags |= DCACHE_ENCRYPTED_WITH_KEY;
+ spin_unlock(&dentry->d_lock);
+#endif
+}
+
+#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
+extern const struct dentry_operations fscrypt_d_ops;
+#endif
+
+static inline void fscrypt_set_d_op(struct dentry *dentry)
+{
+#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
+ d_set_d_op(dentry, &fscrypt_d_ops);
+#endif
+}
+
+#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
+/* crypto.c */
+extern struct kmem_cache *fscrypt_info_cachep;
+int fscrypt_initialize(void);
+
+extern struct fscrypt_ctx *fscrypt_get_ctx(struct inode *);
+extern void fscrypt_release_ctx(struct fscrypt_ctx *);
+extern struct page *fscrypt_encrypt_page(struct inode *, struct page *);
+extern int fscrypt_decrypt_page(struct page *);
+extern void fscrypt_decrypt_bio_pages(struct fscrypt_ctx *, struct bio *);
+extern void fscrypt_pullback_bio_page(struct page **, bool);
+extern void fscrypt_restore_control_page(struct page *);
+extern int fscrypt_zeroout_range(struct inode *, pgoff_t, sector_t,
+ unsigned int);
+/* policy.c */
+extern int fscrypt_process_policy(struct inode *,
+ const struct fscrypt_policy *);
+extern int fscrypt_get_policy(struct inode *, struct fscrypt_policy *);
+extern int fscrypt_has_permitted_context(struct inode *, struct inode *);
+extern int fscrypt_inherit_context(struct inode *, struct inode *,
+ void *, bool);
+/* keyinfo.c */
+extern int get_crypt_info(struct inode *);
+extern int fscrypt_get_encryption_info(struct inode *);
+extern void fscrypt_put_encryption_info(struct inode *, struct fscrypt_info *);
+
+/* fname.c */
+extern int fscrypt_setup_filename(struct inode *, const struct qstr *,
+ int lookup, struct fscrypt_name *);
+extern void fscrypt_free_filename(struct fscrypt_name *);
+extern u32 fscrypt_fname_encrypted_size(struct inode *, u32);
+extern int fscrypt_fname_alloc_buffer(struct inode *, u32,
+ struct fscrypt_str *);
+extern void fscrypt_fname_free_buffer(struct fscrypt_str *);
+extern int fscrypt_fname_disk_to_usr(struct inode *, u32, u32,
+ const struct fscrypt_str *, struct fscrypt_str *);
+extern int fscrypt_fname_usr_to_disk(struct inode *, const struct qstr *,
+ struct fscrypt_str *);
+#endif
+
+/* crypto.c */
+static inline struct fscrypt_ctx *fscrypt_notsupp_get_ctx(struct inode *i)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline void fscrypt_notsupp_release_ctx(struct fscrypt_ctx *c)
+{
+ return;
+}
+
+static inline struct page *fscrypt_notsupp_encrypt_page(struct inode *i,
+ struct page *p)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline int fscrypt_notsupp_decrypt_page(struct page *p)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void fscrypt_notsupp_decrypt_bio_pages(struct fscrypt_ctx *c,
+ struct bio *b)
+{
+ return;
+}
+
+static inline void fscrypt_notsupp_pullback_bio_page(struct page **p, bool b)
+{
+ return;
+}
+
+static inline void fscrypt_notsupp_restore_control_page(struct page *p)
+{
+ return;
+}
+
+static inline int fscrypt_notsupp_zeroout_range(struct inode *i, pgoff_t p,
+ sector_t s, unsigned int f)
+{
+ return -EOPNOTSUPP;
+}
+
+/* policy.c */
+static inline int fscrypt_notsupp_process_policy(struct inode *i,
+ const struct fscrypt_policy *p)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int fscrypt_notsupp_get_policy(struct inode *i,
+ struct fscrypt_policy *p)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int fscrypt_notsupp_has_permitted_context(struct inode *p,
+ struct inode *i)
+{
+ return 0;
+}
+
+static inline int fscrypt_notsupp_inherit_context(struct inode *p,
+ struct inode *i, void *v, bool b)
+{
+ return -EOPNOTSUPP;
+}
+
+/* keyinfo.c */
+static inline int fscrypt_notsupp_get_encryption_info(struct inode *i)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void fscrypt_notsupp_put_encryption_info(struct inode *i,
+ struct fscrypt_info *f)
+{
+ return;
+}
+
+ /* fname.c */
+static inline int fscrypt_notsupp_setup_filename(struct inode *dir,
+ const struct qstr *iname,
+ int lookup, struct fscrypt_name *fname)
+{
+ if (dir->i_sb->s_cop->is_encrypted(dir))
+ return -EOPNOTSUPP;
+
+ memset(fname, 0, sizeof(struct fscrypt_name));
+ fname->usr_fname = iname;
+ fname->disk_name.name = (unsigned char *)iname->name;
+ fname->disk_name.len = iname->len;
+ return 0;
+}
+
+static inline void fscrypt_notsupp_free_filename(struct fscrypt_name *fname)
+{
+ return;
+}
+
+static inline u32 fscrypt_notsupp_fname_encrypted_size(struct inode *i, u32 s)
+{
+ /* never happens */
+ WARN_ON(1);
+ return 0;
+}
+
+static inline int fscrypt_notsupp_fname_alloc_buffer(struct inode *inode,
+ u32 ilen, struct fscrypt_str *crypto_str)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void fscrypt_notsupp_fname_free_buffer(struct fscrypt_str *c)
+{
+ return;
+}
+
+static inline int fscrypt_notsupp_fname_disk_to_usr(struct inode *inode,
+ u32 hash, u32 minor_hash,
+ const struct fscrypt_str *iname,
+ struct fscrypt_str *oname)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int fscrypt_notsupp_fname_usr_to_disk(struct inode *inode,
+ const struct qstr *iname,
+ struct fscrypt_str *oname)
+{
+ return -EOPNOTSUPP;
+}
+#endif /* _LINUX_FSCRYPTO_H */
diff --git a/include/linux/fsl/guts.h b/include/linux/fsl/guts.h
index 84d971ff3fba..649e9171a9b3 100644
--- a/include/linux/fsl/guts.h
+++ b/include/linux/fsl/guts.h
@@ -189,4 +189,109 @@ static inline void guts_set_pmuxcr_dma(struct ccsr_guts __iomem *guts,
#endif
+struct ccsr_rcpm_v1 {
+ u8 res0000[4];
+ __be32 cdozsr; /* 0x0004 Core Doze Status Register */
+ u8 res0008[4];
+ __be32 cdozcr; /* 0x000c Core Doze Control Register */
+ u8 res0010[4];
+ __be32 cnapsr; /* 0x0014 Core Nap Status Register */
+ u8 res0018[4];
+ __be32 cnapcr; /* 0x001c Core Nap Control Register */
+ u8 res0020[4];
+ __be32 cdozpsr; /* 0x0024 Core Doze Previous Status Register */
+ u8 res0028[4];
+ __be32 cnappsr; /* 0x002c Core Nap Previous Status Register */
+ u8 res0030[4];
+ __be32 cwaitsr; /* 0x0034 Core Wait Status Register */
+ u8 res0038[4];
+ __be32 cwdtdsr; /* 0x003c Core Watchdog Detect Status Register */
+ __be32 powmgtcsr; /* 0x0040 PM Control&Status Register */
+#define RCPM_POWMGTCSR_SLP 0x00020000
+ u8 res0044[12];
+ __be32 ippdexpcr; /* 0x0050 IP Powerdown Exception Control Register */
+ u8 res0054[16];
+ __be32 cpmimr; /* 0x0064 Core PM IRQ Mask Register */
+ u8 res0068[4];
+ __be32 cpmcimr; /* 0x006c Core PM Critical IRQ Mask Register */
+ u8 res0070[4];
+ __be32 cpmmcmr; /* 0x0074 Core PM Machine Check Mask Register */
+ u8 res0078[4];
+ __be32 cpmnmimr; /* 0x007c Core PM NMI Mask Register */
+ u8 res0080[4];
+ __be32 ctbenr; /* 0x0084 Core Time Base Enable Register */
+ u8 res0088[4];
+ __be32 ctbckselr; /* 0x008c Core Time Base Clock Select Register */
+ u8 res0090[4];
+ __be32 ctbhltcr; /* 0x0094 Core Time Base Halt Control Register */
+ u8 res0098[4];
+ __be32 cmcpmaskcr; /* 0x00a4 Core Machine Check Mask Register */
+};
+
+struct ccsr_rcpm_v2 {
+ u8 res_00[12];
+ __be32 tph10sr0; /* Thread PH10 Status Register */
+ u8 res_10[12];
+ __be32 tph10setr0; /* Thread PH10 Set Control Register */
+ u8 res_20[12];
+ __be32 tph10clrr0; /* Thread PH10 Clear Control Register */
+ u8 res_30[12];
+ __be32 tph10psr0; /* Thread PH10 Previous Status Register */
+ u8 res_40[12];
+ __be32 twaitsr0; /* Thread Wait Status Register */
+ u8 res_50[96];
+ __be32 pcph15sr; /* Physical Core PH15 Status Register */
+ __be32 pcph15setr; /* Physical Core PH15 Set Control Register */
+ __be32 pcph15clrr; /* Physical Core PH15 Clear Control Register */
+ __be32 pcph15psr; /* Physical Core PH15 Prev Status Register */
+ u8 res_c0[16];
+ __be32 pcph20sr; /* Physical Core PH20 Status Register */
+ __be32 pcph20setr; /* Physical Core PH20 Set Control Register */
+ __be32 pcph20clrr; /* Physical Core PH20 Clear Control Register */
+ __be32 pcph20psr; /* Physical Core PH20 Prev Status Register */
+ __be32 pcpw20sr; /* Physical Core PW20 Status Register */
+ u8 res_e0[12];
+ __be32 pcph30sr; /* Physical Core PH30 Status Register */
+ __be32 pcph30setr; /* Physical Core PH30 Set Control Register */
+ __be32 pcph30clrr; /* Physical Core PH30 Clear Control Register */
+ __be32 pcph30psr; /* Physical Core PH30 Prev Status Register */
+ u8 res_100[32];
+ __be32 ippwrgatecr; /* IP Power Gating Control Register */
+ u8 res_124[12];
+ __be32 powmgtcsr; /* Power Management Control & Status Reg */
+#define RCPM_POWMGTCSR_LPM20_RQ 0x00100000
+#define RCPM_POWMGTCSR_LPM20_ST 0x00000200
+#define RCPM_POWMGTCSR_P_LPM20_ST 0x00000100
+ u8 res_134[12];
+ __be32 ippdexpcr[4]; /* IP Powerdown Exception Control Reg */
+ u8 res_150[12];
+ __be32 tpmimr0; /* Thread PM Interrupt Mask Reg */
+ u8 res_160[12];
+ __be32 tpmcimr0; /* Thread PM Crit Interrupt Mask Reg */
+ u8 res_170[12];
+ __be32 tpmmcmr0; /* Thread PM Machine Check Interrupt Mask Reg */
+ u8 res_180[12];
+ __be32 tpmnmimr0; /* Thread PM NMI Mask Reg */
+ u8 res_190[12];
+ __be32 tmcpmaskcr0; /* Thread Machine Check Mask Control Reg */
+ __be32 pctbenr; /* Physical Core Time Base Enable Reg */
+ __be32 pctbclkselr; /* Physical Core Time Base Clock Select */
+ __be32 tbclkdivr; /* Time Base Clock Divider Register */
+ u8 res_1ac[4];
+ __be32 ttbhltcr[4]; /* Thread Time Base Halt Control Register */
+ __be32 clpcl10sr; /* Cluster PCL10 Status Register */
+ __be32 clpcl10setr; /* Cluster PCL30 Set Control Register */
+ __be32 clpcl10clrr; /* Cluster PCL30 Clear Control Register */
+ __be32 clpcl10psr; /* Cluster PCL30 Prev Status Register */
+ __be32 cddslpsetr; /* Core Domain Deep Sleep Set Register */
+ __be32 cddslpclrr; /* Core Domain Deep Sleep Clear Register */
+ __be32 cdpwroksetr; /* Core Domain Power OK Set Register */
+ __be32 cdpwrokclrr; /* Core Domain Power OK Clear Register */
+ __be32 cdpwrensr; /* Core Domain Power Enable Status Register */
+ __be32 cddslsr; /* Core Domain Deep Sleep Status Register */
+ u8 res_1e8[8];
+ __be32 dslpcntcr[8]; /* Deep Sleep Counter Cfg Register */
+ u8 res_300[3568];
+};
+
#endif
diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
index 7ee1774edee5..0141f257d67b 100644
--- a/include/linux/fsnotify.h
+++ b/include/linux/fsnotify.h
@@ -16,15 +16,6 @@
#include <linux/slab.h>
#include <linux/bug.h>
-/*
- * fsnotify_d_instantiate - instantiate a dentry for inode
- */
-static inline void fsnotify_d_instantiate(struct dentry *dentry,
- struct inode *inode)
-{
- __fsnotify_d_instantiate(dentry, inode);
-}
-
/* Notify this dentry's parent about a child's events. */
static inline int fsnotify_parent(struct path *path, struct dentry *dentry, __u32 mask)
{
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
index 533c4408529a..1259e53d9296 100644
--- a/include/linux/fsnotify_backend.h
+++ b/include/linux/fsnotify_backend.h
@@ -290,14 +290,9 @@ static inline void __fsnotify_update_dcache_flags(struct dentry *dentry)
/*
* fsnotify_d_instantiate - instantiate a dentry for inode
*/
-static inline void __fsnotify_d_instantiate(struct dentry *dentry, struct inode *inode)
+static inline void __fsnotify_d_instantiate(struct dentry *dentry)
{
- if (!inode)
- return;
-
- spin_lock(&dentry->d_lock);
__fsnotify_update_dcache_flags(dentry);
- spin_unlock(&dentry->d_lock);
}
/* called from fsnotify listeners, such as fanotify or dnotify */
@@ -396,7 +391,7 @@ static inline void __fsnotify_vfsmount_delete(struct vfsmount *mnt)
static inline void __fsnotify_update_dcache_flags(struct dentry *dentry)
{}
-static inline void __fsnotify_d_instantiate(struct dentry *dentry, struct inode *inode)
+static inline void __fsnotify_d_instantiate(struct dentry *dentry)
{}
static inline u32 fsnotify_get_cookie(void)
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index bb16dfeb917e..570383a41853 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -105,8 +105,6 @@ struct vm_area_struct;
*
* __GFP_NOMEMALLOC is used to explicitly forbid access to emergency reserves.
* This takes precedence over the __GFP_MEMALLOC flag if both are set.
- *
- * __GFP_NOACCOUNT ignores the accounting for kmemcg limit enforcement.
*/
#define __GFP_ATOMIC ((__force gfp_t)___GFP_ATOMIC)
#define __GFP_HIGH ((__force gfp_t)___GFP_HIGH)
@@ -259,7 +257,7 @@ struct vm_area_struct;
#define GFP_HIGHUSER_MOVABLE (GFP_HIGHUSER | __GFP_MOVABLE)
#define GFP_TRANSHUGE ((GFP_HIGHUSER_MOVABLE | __GFP_COMP | \
__GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN) & \
- ~__GFP_KSWAPD_RECLAIM)
+ ~__GFP_RECLAIM)
/* Convert GFP flags to their corresponding migrate type */
#define GFP_MOVABLE_MASK (__GFP_RECLAIMABLE|__GFP_MOVABLE)
@@ -333,22 +331,29 @@ static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags)
* 0xe => BAD (MOVABLE+DMA32+HIGHMEM)
* 0xf => BAD (MOVABLE+DMA32+HIGHMEM+DMA)
*
- * ZONES_SHIFT must be <= 2 on 32 bit platforms.
+ * GFP_ZONES_SHIFT must be <= 2 on 32 bit platforms.
*/
-#if 16 * ZONES_SHIFT > BITS_PER_LONG
-#error ZONES_SHIFT too large to create GFP_ZONE_TABLE integer
+#if defined(CONFIG_ZONE_DEVICE) && (MAX_NR_ZONES-1) <= 4
+/* ZONE_DEVICE is not a valid GFP zone specifier */
+#define GFP_ZONES_SHIFT 2
+#else
+#define GFP_ZONES_SHIFT ZONES_SHIFT
+#endif
+
+#if 16 * GFP_ZONES_SHIFT > BITS_PER_LONG
+#error GFP_ZONES_SHIFT too large to create GFP_ZONE_TABLE integer
#endif
#define GFP_ZONE_TABLE ( \
- (ZONE_NORMAL << 0 * ZONES_SHIFT) \
- | (OPT_ZONE_DMA << ___GFP_DMA * ZONES_SHIFT) \
- | (OPT_ZONE_HIGHMEM << ___GFP_HIGHMEM * ZONES_SHIFT) \
- | (OPT_ZONE_DMA32 << ___GFP_DMA32 * ZONES_SHIFT) \
- | (ZONE_NORMAL << ___GFP_MOVABLE * ZONES_SHIFT) \
- | (OPT_ZONE_DMA << (___GFP_MOVABLE | ___GFP_DMA) * ZONES_SHIFT) \
- | (ZONE_MOVABLE << (___GFP_MOVABLE | ___GFP_HIGHMEM) * ZONES_SHIFT) \
- | (OPT_ZONE_DMA32 << (___GFP_MOVABLE | ___GFP_DMA32) * ZONES_SHIFT) \
+ (ZONE_NORMAL << 0 * GFP_ZONES_SHIFT) \
+ | (OPT_ZONE_DMA << ___GFP_DMA * GFP_ZONES_SHIFT) \
+ | (OPT_ZONE_HIGHMEM << ___GFP_HIGHMEM * GFP_ZONES_SHIFT) \
+ | (OPT_ZONE_DMA32 << ___GFP_DMA32 * GFP_ZONES_SHIFT) \
+ | (ZONE_NORMAL << ___GFP_MOVABLE * GFP_ZONES_SHIFT) \
+ | (OPT_ZONE_DMA << (___GFP_MOVABLE | ___GFP_DMA) * GFP_ZONES_SHIFT) \
+ | (ZONE_MOVABLE << (___GFP_MOVABLE | ___GFP_HIGHMEM) * GFP_ZONES_SHIFT)\
+ | (OPT_ZONE_DMA32 << (___GFP_MOVABLE | ___GFP_DMA32) * GFP_ZONES_SHIFT)\
)
/*
@@ -373,8 +378,8 @@ static inline enum zone_type gfp_zone(gfp_t flags)
enum zone_type z;
int bit = (__force int) (flags & GFP_ZONEMASK);
- z = (GFP_ZONE_TABLE >> (bit * ZONES_SHIFT)) &
- ((1 << ZONES_SHIFT) - 1);
+ z = (GFP_ZONE_TABLE >> (bit * GFP_ZONES_SHIFT)) &
+ ((1 << GFP_ZONES_SHIFT) - 1);
VM_BUG_ON((GFP_ZONE_BAD >> bit) & 1);
return z;
}
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index 2ead22dd74a0..c98c6539e2c2 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -220,7 +220,7 @@ static inline void hrtimer_set_expires_range(struct hrtimer *timer, ktime_t time
timer->node.expires = ktime_add_safe(time, delta);
}
-static inline void hrtimer_set_expires_range_ns(struct hrtimer *timer, ktime_t time, unsigned long delta)
+static inline void hrtimer_set_expires_range_ns(struct hrtimer *timer, ktime_t time, u64 delta)
{
timer->_softexpires = time;
timer->node.expires = ktime_add_safe(time, ns_to_ktime(delta));
@@ -378,7 +378,7 @@ static inline void destroy_hrtimer_on_stack(struct hrtimer *timer) { }
/* Basic timer operations: */
extern void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
- unsigned long range_ns, const enum hrtimer_mode mode);
+ u64 range_ns, const enum hrtimer_mode mode);
/**
* hrtimer_start - (re)start an hrtimer on the current CPU
@@ -399,7 +399,7 @@ extern int hrtimer_try_to_cancel(struct hrtimer *timer);
static inline void hrtimer_start_expires(struct hrtimer *timer,
enum hrtimer_mode mode)
{
- unsigned long delta;
+ u64 delta;
ktime_t soft, hard;
soft = hrtimer_get_softexpires(timer);
hard = hrtimer_get_expires(timer);
@@ -477,10 +477,12 @@ extern long hrtimer_nanosleep_restart(struct restart_block *restart_block);
extern void hrtimer_init_sleeper(struct hrtimer_sleeper *sl,
struct task_struct *tsk);
-extern int schedule_hrtimeout_range(ktime_t *expires, unsigned long delta,
+extern int schedule_hrtimeout_range(ktime_t *expires, u64 delta,
const enum hrtimer_mode mode);
extern int schedule_hrtimeout_range_clock(ktime_t *expires,
- unsigned long delta, const enum hrtimer_mode mode, int clock);
+ u64 delta,
+ const enum hrtimer_mode mode,
+ int clock);
extern int schedule_hrtimeout(ktime_t *expires, const enum hrtimer_mode mode);
/* Soft interrupt function to run the hrtimer queues: */
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 459fd25b378e..79b0ef6aaa14 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -41,7 +41,8 @@ int vmf_insert_pfn_pmd(struct vm_area_struct *, unsigned long addr, pmd_t *,
enum transparent_hugepage_flag {
TRANSPARENT_HUGEPAGE_FLAG,
TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
- TRANSPARENT_HUGEPAGE_DEFRAG_FLAG,
+ TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG,
+ TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG,
TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG,
TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG,
TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG,
@@ -71,12 +72,6 @@ extern bool is_vma_temporary_stack(struct vm_area_struct *vma);
((__vma)->vm_flags & VM_HUGEPAGE))) && \
!((__vma)->vm_flags & VM_NOHUGEPAGE) && \
!is_vma_temporary_stack(__vma))
-#define transparent_hugepage_defrag(__vma) \
- ((transparent_hugepage_flags & \
- (1<<TRANSPARENT_HUGEPAGE_DEFRAG_FLAG)) || \
- (transparent_hugepage_flags & \
- (1<<TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG) && \
- (__vma)->vm_flags & VM_HUGEPAGE))
#define transparent_hugepage_use_zero_page() \
(transparent_hugepage_flags & \
(1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG))
@@ -101,19 +96,21 @@ static inline int split_huge_page(struct page *page)
void deferred_split_huge_page(struct page *page);
void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
- unsigned long address);
+ unsigned long address, bool freeze);
#define split_huge_pmd(__vma, __pmd, __address) \
do { \
pmd_t *____pmd = (__pmd); \
if (pmd_trans_huge(*____pmd) \
|| pmd_devmap(*____pmd)) \
- __split_huge_pmd(__vma, __pmd, __address); \
+ __split_huge_pmd(__vma, __pmd, __address, \
+ false); \
} while (0)
-#if HPAGE_PMD_ORDER >= MAX_ORDER
-#error "hugepages can't be allocated by the buddy allocator"
-#endif
+
+void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
+ bool freeze, struct page *page);
+
extern int hugepage_madvise(struct vm_area_struct *vma,
unsigned long *vm_flags, int advice);
extern void vma_adjust_trans_huge(struct vm_area_struct *vma,
@@ -178,6 +175,10 @@ static inline int split_huge_page(struct page *page)
static inline void deferred_split_huge_page(struct page *page) {}
#define split_huge_pmd(__vma, __pmd, __address) \
do { } while (0)
+
+static inline void split_huge_pmd_address(struct vm_area_struct *vma,
+ unsigned long address, bool freeze, struct page *page) {}
+
static inline int hugepage_madvise(struct vm_area_struct *vma,
unsigned long *vm_flags, int advice)
{
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index 452c0b0d2f32..3b1f6cef9513 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -163,6 +163,14 @@ static inline u16 ieee80211_sn_sub(u16 sn1, u16 sn2)
/* 30 byte 4 addr hdr, 2 byte QoS, 2304 byte MSDU, 12 byte crypt, 4 byte FCS */
#define IEEE80211_MAX_FRAME_LEN 2352
+/* Maximal size of an A-MSDU */
+#define IEEE80211_MAX_MPDU_LEN_HT_3839 3839
+#define IEEE80211_MAX_MPDU_LEN_HT_7935 7935
+
+#define IEEE80211_MAX_MPDU_LEN_VHT_3895 3895
+#define IEEE80211_MAX_MPDU_LEN_VHT_7991 7991
+#define IEEE80211_MAX_MPDU_LEN_VHT_11454 11454
+
#define IEEE80211_MAX_SSID_LEN 32
#define IEEE80211_MAX_MESH_ID_LEN 32
@@ -843,6 +851,8 @@ enum ieee80211_vht_opmode_bits {
};
#define WLAN_SA_QUERY_TR_ID_LEN 2
+#define WLAN_MEMBERSHIP_LEN 8
+#define WLAN_USER_POSITION_LEN 16
/**
* struct ieee80211_tpc_report_ie
@@ -991,6 +1001,11 @@ struct ieee80211_mgmt {
} __packed vht_opmode_notif;
struct {
u8 action_code;
+ u8 membership[WLAN_MEMBERSHIP_LEN];
+ u8 position[WLAN_USER_POSITION_LEN];
+ } __packed vht_group_notif;
+ struct {
+ u8 action_code;
u8 dialog_token;
u8 tpc_elem_id;
u8 tpc_elem_length;
@@ -1498,6 +1513,7 @@ struct ieee80211_vht_operation {
#define IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895 0x00000000
#define IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991 0x00000001
#define IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 0x00000002
+#define IEEE80211_VHT_CAP_MAX_MPDU_MASK 0x00000003
#define IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ 0x00000004
#define IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ 0x00000008
#define IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK 0x0000000C
@@ -2079,6 +2095,16 @@ enum ieee80211_tdls_actioncode {
#define WLAN_EXT_CAPA8_TDLS_WIDE_BW_ENABLED BIT(5)
#define WLAN_EXT_CAPA8_OPMODE_NOTIF BIT(6)
+/* Defines the maximal number of MSDUs in an A-MSDU. */
+#define WLAN_EXT_CAPA8_MAX_MSDU_IN_AMSDU_LSB BIT(7)
+#define WLAN_EXT_CAPA9_MAX_MSDU_IN_AMSDU_MSB BIT(0)
+
+/*
+ * Fine Timing Measurement Initiator - bit 71 of @WLAN_EID_EXT_CAPABILITY
+ * information element
+ */
+#define WLAN_EXT_CAPA9_FTM_INITIATOR BIT(7)
+
/* TDLS specific payload type in the LLC/SNAP header */
#define WLAN_TDLS_SNAP_RFTYPE 0x2
diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h
index a338a688ee4a..dcb89e3515db 100644
--- a/include/linux/if_bridge.h
+++ b/include/linux/if_bridge.h
@@ -46,10 +46,6 @@ struct br_ip_list {
#define BR_LEARNING_SYNC BIT(9)
#define BR_PROXYARP_WIFI BIT(10)
-/* values as per ieee8021QBridgeFdbAgingTime */
-#define BR_MIN_AGEING_TIME (10 * HZ)
-#define BR_MAX_AGEING_TIME (1000000 * HZ)
-
#define BR_DEFAULT_AGEING_TIME (300 * HZ)
extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __user *));
diff --git a/include/linux/if_team.h b/include/linux/if_team.h
index b84e49c3a738..174f43f43aff 100644
--- a/include/linux/if_team.h
+++ b/include/linux/if_team.h
@@ -24,6 +24,7 @@ struct team_pcpu_stats {
struct u64_stats_sync syncp;
u32 rx_dropped;
u32 tx_dropped;
+ u32 rx_nohandler;
};
struct team;
diff --git a/include/linux/igmp.h b/include/linux/igmp.h
index 9c9de11549a7..12f6fba6d21a 100644
--- a/include/linux/igmp.h
+++ b/include/linux/igmp.h
@@ -37,11 +37,6 @@ static inline struct igmpv3_query *
return (struct igmpv3_query *)skb_transport_header(skb);
}
-extern int sysctl_igmp_llm_reports;
-extern int sysctl_igmp_max_memberships;
-extern int sysctl_igmp_max_msf;
-extern int sysctl_igmp_qrv;
-
struct ip_sf_socklist {
unsigned int sl_max;
unsigned int sl_count;
diff --git a/include/linux/inet_lro.h b/include/linux/inet_lro.h
deleted file mode 100644
index 9a715cfa1fe3..000000000000
--- a/include/linux/inet_lro.h
+++ /dev/null
@@ -1,142 +0,0 @@
-/*
- * linux/include/linux/inet_lro.h
- *
- * Large Receive Offload (ipv4 / tcp)
- *
- * (C) Copyright IBM Corp. 2007
- *
- * Authors:
- * Jan-Bernd Themann <themann@de.ibm.com>
- * Christoph Raisch <raisch@de.ibm.com>
- *
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#ifndef __INET_LRO_H_
-#define __INET_LRO_H_
-
-#include <net/ip.h>
-#include <net/tcp.h>
-
-/*
- * LRO statistics
- */
-
-struct net_lro_stats {
- unsigned long aggregated;
- unsigned long flushed;
- unsigned long no_desc;
-};
-
-/*
- * LRO descriptor for a tcp session
- */
-struct net_lro_desc {
- struct sk_buff *parent;
- struct sk_buff *last_skb;
- struct skb_frag_struct *next_frag;
- struct iphdr *iph;
- struct tcphdr *tcph;
- __wsum data_csum;
- __be32 tcp_rcv_tsecr;
- __be32 tcp_rcv_tsval;
- __be32 tcp_ack;
- u32 tcp_next_seq;
- u32 skb_tot_frags_len;
- u16 ip_tot_len;
- u16 tcp_saw_tstamp; /* timestamps enabled */
- __be16 tcp_window;
- int pkt_aggr_cnt; /* counts aggregated packets */
- int vlan_packet;
- int mss;
- int active;
-};
-
-/*
- * Large Receive Offload (LRO) Manager
- *
- * Fields must be set by driver
- */
-
-struct net_lro_mgr {
- struct net_device *dev;
- struct net_lro_stats stats;
-
- /* LRO features */
- unsigned long features;
-#define LRO_F_NAPI 1 /* Pass packets to stack via NAPI */
-#define LRO_F_EXTRACT_VLAN_ID 2 /* Set flag if VLAN IDs are extracted
- from received packets and eth protocol
- is still ETH_P_8021Q */
-
- /*
- * Set for generated SKBs that are not added to
- * the frag list in fragmented mode
- */
- u32 ip_summed;
- u32 ip_summed_aggr; /* Set in aggregated SKBs: CHECKSUM_UNNECESSARY
- * or CHECKSUM_NONE */
-
- int max_desc; /* Max number of LRO descriptors */
- int max_aggr; /* Max number of LRO packets to be aggregated */
-
- int frag_align_pad; /* Padding required to properly align layer 3
- * headers in generated skb when using frags */
-
- struct net_lro_desc *lro_arr; /* Array of LRO descriptors */
-
- /*
- * Optimized driver functions
- *
- * get_skb_header: returns tcp and ip header for packet in SKB
- */
- int (*get_skb_header)(struct sk_buff *skb, void **ip_hdr,
- void **tcpudp_hdr, u64 *hdr_flags, void *priv);
-
- /* hdr_flags: */
-#define LRO_IPV4 1 /* ip_hdr is IPv4 header */
-#define LRO_TCP 2 /* tcpudp_hdr is TCP header */
-
- /*
- * get_frag_header: returns mac, tcp and ip header for packet in SKB
- *
- * @hdr_flags: Indicate what kind of LRO has to be done
- * (IPv4/IPv6/TCP/UDP)
- */
- int (*get_frag_header)(struct skb_frag_struct *frag, void **mac_hdr,
- void **ip_hdr, void **tcpudp_hdr, u64 *hdr_flags,
- void *priv);
-};
-
-/*
- * Processes a SKB
- *
- * @lro_mgr: LRO manager to use
- * @skb: SKB to aggregate
- * @priv: Private data that may be used by driver functions
- * (for example get_tcp_ip_hdr)
- */
-
-void lro_receive_skb(struct net_lro_mgr *lro_mgr,
- struct sk_buff *skb,
- void *priv);
-/*
- * Forward all aggregated SKBs held by lro_mgr to network stack
- */
-
-void lro_flush_all(struct net_lro_mgr *lro_mgr);
-
-#endif
diff --git a/include/linux/io.h b/include/linux/io.h
index 32403b5716e5..e2c8419278c1 100644
--- a/include/linux/io.h
+++ b/include/linux/io.h
@@ -135,6 +135,7 @@ enum {
/* See memremap() kernel-doc for usage description... */
MEMREMAP_WB = 1 << 0,
MEMREMAP_WT = 1 << 1,
+ MEMREMAP_WC = 1 << 2,
};
void *memremap(resource_size_t offset, size_t size, unsigned long flags);
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index 402753bccafa..7edc14fb66b6 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -50,16 +50,19 @@ struct ipv6_devconf {
__s32 mc_forwarding;
#endif
__s32 disable_ipv6;
+ __s32 drop_unicast_in_l2_multicast;
__s32 accept_dad;
__s32 force_tllao;
__s32 ndisc_notify;
__s32 suppress_frag_ndisc;
__s32 accept_ra_mtu;
+ __s32 drop_unsolicited_na;
struct ipv6_stable_secret {
bool initialized;
struct in6_addr secret;
} stable_secret;
__s32 use_oif_addrs_only;
+ __s32 keep_addr_on_down;
void *sysctl;
};
diff --git a/include/linux/kcov.h b/include/linux/kcov.h
new file mode 100644
index 000000000000..2883ac98c280
--- /dev/null
+++ b/include/linux/kcov.h
@@ -0,0 +1,29 @@
+#ifndef _LINUX_KCOV_H
+#define _LINUX_KCOV_H
+
+#include <uapi/linux/kcov.h>
+
+struct task_struct;
+
+#ifdef CONFIG_KCOV
+
+void kcov_task_init(struct task_struct *t);
+void kcov_task_exit(struct task_struct *t);
+
+enum kcov_mode {
+ /* Coverage collection is not enabled yet. */
+ KCOV_MODE_DISABLED = 0,
+ /*
+ * Tracing coverage collection mode.
+ * Covered PCs are collected in a per-task buffer.
+ */
+ KCOV_MODE_TRACE = 1,
+};
+
+#else
+
+static inline void kcov_task_init(struct task_struct *t) {}
+static inline void kcov_task_exit(struct task_struct *t) {}
+
+#endif /* CONFIG_KCOV */
+#endif /* _LINUX_KCOV_H */
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index f31638c6e873..2f7775e229b0 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -64,7 +64,7 @@
#define round_down(x, y) ((x) & ~__round_mask(x, y))
#define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f))
-#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
+#define DIV_ROUND_UP __KERNEL_DIV_ROUND_UP
#define DIV_ROUND_UP_ULL(ll,d) \
({ unsigned long long _tmp = (ll)+(d)-1; do_div(_tmp, d); _tmp; })
@@ -255,7 +255,7 @@ extern long (*panic_blink)(int state);
__printf(1, 2)
void panic(const char *fmt, ...)
__noreturn __cold;
-void nmi_panic_self_stop(struct pt_regs *);
+void nmi_panic(struct pt_regs *regs, const char *msg);
extern void oops_enter(void);
extern void oops_exit(void);
void print_oops_end_marker(void);
@@ -357,6 +357,7 @@ int __must_check kstrtou16(const char *s, unsigned int base, u16 *res);
int __must_check kstrtos16(const char *s, unsigned int base, s16 *res);
int __must_check kstrtou8(const char *s, unsigned int base, u8 *res);
int __must_check kstrtos8(const char *s, unsigned int base, s8 *res);
+int __must_check kstrtobool(const char *s, bool *res);
int __must_check kstrtoull_from_user(const char __user *s, size_t count, unsigned int base, unsigned long long *res);
int __must_check kstrtoll_from_user(const char __user *s, size_t count, unsigned int base, long long *res);
@@ -368,6 +369,7 @@ int __must_check kstrtou16_from_user(const char __user *s, size_t count, unsigne
int __must_check kstrtos16_from_user(const char __user *s, size_t count, unsigned int base, s16 *res);
int __must_check kstrtou8_from_user(const char __user *s, size_t count, unsigned int base, u8 *res);
int __must_check kstrtos8_from_user(const char __user *s, size_t count, unsigned int base, s8 *res);
+int __must_check kstrtobool_from_user(const char __user *s, size_t count, bool *res);
static inline int __must_check kstrtou64_from_user(const char __user *s, size_t count, unsigned int base, u64 *res)
{
@@ -455,25 +457,6 @@ extern atomic_t panic_cpu;
#define PANIC_CPU_INVALID -1
/*
- * A variant of panic() called from NMI context. We return if we've already
- * panicked on this CPU. If another CPU already panicked, loop in
- * nmi_panic_self_stop() which can provide architecture dependent code such
- * as saving register state for crash dump.
- */
-#define nmi_panic(regs, fmt, ...) \
-do { \
- int old_cpu, cpu; \
- \
- cpu = raw_smp_processor_id(); \
- old_cpu = atomic_cmpxchg(&panic_cpu, PANIC_CPU_INVALID, cpu); \
- \
- if (old_cpu == PANIC_CPU_INVALID) \
- panic(fmt, ##__VA_ARGS__); \
- else if (old_cpu != cpu) \
- nmi_panic_self_stop(regs); \
-} while (0)
-
-/*
* Only to be used by arch init code. If the user over-wrote the default
* CONFIG_PANIC_TIMEOUT, honor it.
*/
@@ -635,7 +618,7 @@ do { \
#define do_trace_printk(fmt, args...) \
do { \
- static const char *trace_printk_fmt \
+ static const char *trace_printk_fmt __used \
__attribute__((section("__trace_printk_fmt"))) = \
__builtin_constant_p(fmt) ? fmt : NULL; \
\
@@ -679,7 +662,7 @@ int __trace_printk(unsigned long ip, const char *fmt, ...);
*/
#define trace_puts(str) ({ \
- static const char *trace_printk_fmt \
+ static const char *trace_printk_fmt __used \
__attribute__((section("__trace_printk_fmt"))) = \
__builtin_constant_p(str) ? str : NULL; \
\
@@ -701,7 +684,7 @@ extern void trace_dump_stack(int skip);
#define ftrace_vprintk(fmt, vargs) \
do { \
if (__builtin_constant_p(fmt)) { \
- static const char *trace_printk_fmt \
+ static const char *trace_printk_fmt __used \
__attribute__((section("__trace_printk_fmt"))) = \
__builtin_constant_p(fmt) ? fmt : NULL; \
\
diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h
index af51df35d749..c06c44242f39 100644
--- a/include/linux/kernfs.h
+++ b/include/linux/kernfs.h
@@ -267,8 +267,9 @@ static inline bool kernfs_ns_enabled(struct kernfs_node *kn)
int kernfs_name(struct kernfs_node *kn, char *buf, size_t buflen);
size_t kernfs_path_len(struct kernfs_node *kn);
-char * __must_check kernfs_path(struct kernfs_node *kn, char *buf,
- size_t buflen);
+int kernfs_path_from_node(struct kernfs_node *root_kn, struct kernfs_node *kn,
+ char *buf, size_t buflen);
+char *kernfs_path(struct kernfs_node *kn, char *buf, size_t buflen);
void pr_cont_kernfs_name(struct kernfs_node *kn);
void pr_cont_kernfs_path(struct kernfs_node *kn);
struct kernfs_node *kernfs_get_parent(struct kernfs_node *kn);
@@ -283,6 +284,8 @@ struct kernfs_node *kernfs_node_from_dentry(struct dentry *dentry);
struct kernfs_root *kernfs_root_from_sb(struct super_block *sb);
struct inode *kernfs_get_inode(struct super_block *sb, struct kernfs_node *kn);
+struct dentry *kernfs_node_dentry(struct kernfs_node *kn,
+ struct super_block *sb);
struct kernfs_root *kernfs_create_root(struct kernfs_syscall_ops *scops,
unsigned int flags, void *priv);
void kernfs_destroy_root(struct kernfs_root *root);
@@ -338,8 +341,8 @@ static inline int kernfs_name(struct kernfs_node *kn, char *buf, size_t buflen)
static inline size_t kernfs_path_len(struct kernfs_node *kn)
{ return 0; }
-static inline char * __must_check kernfs_path(struct kernfs_node *kn, char *buf,
- size_t buflen)
+static inline char *kernfs_path(struct kernfs_node *kn, char *buf,
+ size_t buflen)
{ return NULL; }
static inline void pr_cont_kernfs_name(struct kernfs_node *kn) { }
diff --git a/include/linux/kfifo.h b/include/linux/kfifo.h
index 473b43678ad1..41eb6fdf87a8 100644
--- a/include/linux/kfifo.h
+++ b/include/linux/kfifo.h
@@ -401,7 +401,7 @@ __kfifo_int_must_check_helper( \
((typeof(__tmp->type))__kfifo->data) : \
(__tmp->buf) \
)[__kfifo->in & __tmp->kfifo.mask] = \
- (typeof(*__tmp->type))__val; \
+ *(typeof(__tmp->type))&__val; \
smp_wmb(); \
__kfifo->in++; \
} \
diff --git a/include/linux/list_bl.h b/include/linux/list_bl.h
index ee7229a6c06a..cb483305e1f5 100644
--- a/include/linux/list_bl.h
+++ b/include/linux/list_bl.h
@@ -48,7 +48,7 @@ static inline void INIT_HLIST_BL_NODE(struct hlist_bl_node *h)
#define hlist_bl_entry(ptr, type, member) container_of(ptr,type,member)
-static inline int hlist_bl_unhashed(const struct hlist_bl_node *h)
+static inline bool hlist_bl_unhashed(const struct hlist_bl_node *h)
{
return !h->pprev;
}
@@ -68,7 +68,7 @@ static inline void hlist_bl_set_first(struct hlist_bl_head *h,
h->first = (struct hlist_bl_node *)((unsigned long)n | LIST_BL_LOCKMASK);
}
-static inline int hlist_bl_empty(const struct hlist_bl_head *h)
+static inline bool hlist_bl_empty(const struct hlist_bl_head *h)
{
return !((unsigned long)READ_ONCE(h->first) & ~LIST_BL_LOCKMASK);
}
diff --git a/include/linux/mISDNif.h b/include/linux/mISDNif.h
index 246a3529ecf6..ac02c54520e9 100644
--- a/include/linux/mISDNif.h
+++ b/include/linux/mISDNif.h
@@ -596,7 +596,7 @@ static inline struct mISDNdevice *dev_to_mISDN(struct device *dev)
}
extern void set_channel_address(struct mISDNchannel *, u_int, u_int);
-extern void mISDN_clock_update(struct mISDNclock *, int, struct timeval *);
+extern void mISDN_clock_update(struct mISDNclock *, int, ktime_t *);
extern unsigned short mISDN_clock_get(void);
extern const char *mISDNDevName4ch(struct mISDNchannel *);
diff --git a/include/linux/mbus.h b/include/linux/mbus.h
index 1f7bc630d225..ea34a867caa0 100644
--- a/include/linux/mbus.h
+++ b/include/linux/mbus.h
@@ -69,6 +69,9 @@ static inline const struct mbus_dram_target_info *mv_mbus_dram_info_nooverlap(vo
int mvebu_mbus_save_cpu_target(u32 *store_addr);
void mvebu_mbus_get_pcie_mem_aperture(struct resource *res);
void mvebu_mbus_get_pcie_io_aperture(struct resource *res);
+int mvebu_mbus_get_dram_win_info(phys_addr_t phyaddr, u8 *target, u8 *attr);
+int mvebu_mbus_get_io_win_info(phys_addr_t phyaddr, u32 *size, u8 *target,
+ u8 *attr);
int mvebu_mbus_add_window_remap_by_id(unsigned int target,
unsigned int attribute,
phys_addr_t base, size_t size,
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index f0c4bec6565b..1191d79aa495 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -52,7 +52,10 @@ enum mem_cgroup_stat_index {
MEM_CGROUP_STAT_SWAP, /* # of pages, swapped out */
MEM_CGROUP_STAT_NSTATS,
/* default hierarchy stats */
- MEMCG_SOCK = MEM_CGROUP_STAT_NSTATS,
+ MEMCG_KERNEL_STACK = MEM_CGROUP_STAT_NSTATS,
+ MEMCG_SLAB_RECLAIMABLE,
+ MEMCG_SLAB_UNRECLAIMABLE,
+ MEMCG_SOCK,
MEMCG_NR_STAT,
};
@@ -400,6 +403,9 @@ int mem_cgroup_select_victim_node(struct mem_cgroup *memcg);
void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
int nr_pages);
+unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
+ int nid, unsigned int lru_mask);
+
static inline
unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
{
@@ -658,6 +664,13 @@ mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
{
}
+static inline unsigned long
+mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
+ int nid, unsigned int lru_mask)
+{
+ return 0;
+}
+
static inline void
mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
{
@@ -792,11 +805,6 @@ static inline bool memcg_kmem_enabled(void)
return static_branch_unlikely(&memcg_kmem_enabled_key);
}
-static inline bool memcg_kmem_online(struct mem_cgroup *memcg)
-{
- return memcg->kmem_state == KMEM_ONLINE;
-}
-
/*
* In general, we'll do everything in our power to not incur in any overhead
* for non-memcg users for the kmem functions. Not even a function call, if we
@@ -883,6 +891,20 @@ static __always_inline void memcg_kmem_put_cache(struct kmem_cache *cachep)
if (memcg_kmem_enabled())
__memcg_kmem_put_cache(cachep);
}
+
+/**
+ * memcg_kmem_update_page_stat - update kmem page state statistics
+ * @page: the page
+ * @idx: page state item to account
+ * @val: number of pages (positive or negative)
+ */
+static inline void memcg_kmem_update_page_stat(struct page *page,
+ enum mem_cgroup_stat_index idx, int val)
+{
+ if (memcg_kmem_enabled() && page->mem_cgroup)
+ this_cpu_add(page->mem_cgroup->stat->count[idx], val);
+}
+
#else
#define for_each_memcg_cache_index(_idx) \
for (; NULL; )
@@ -892,11 +914,6 @@ static inline bool memcg_kmem_enabled(void)
return false;
}
-static inline bool memcg_kmem_online(struct mem_cgroup *memcg)
-{
- return false;
-}
-
static inline int memcg_kmem_charge(struct page *page, gfp_t gfp, int order)
{
return 0;
@@ -928,6 +945,11 @@ memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp)
static inline void memcg_kmem_put_cache(struct kmem_cache *cachep)
{
}
+
+static inline void memcg_kmem_update_page_stat(struct page *page,
+ enum mem_cgroup_stat_index idx, int val)
+{
+}
#endif /* CONFIG_MEMCG && !CONFIG_SLOB */
#endif /* _LINUX_MEMCONTROL_H */
diff --git a/include/linux/mfd/tmio.h b/include/linux/mfd/tmio.h
index 24b86d538e88..05d58ee5e6a7 100644
--- a/include/linux/mfd/tmio.h
+++ b/include/linux/mfd/tmio.h
@@ -65,6 +65,10 @@
* Some controllers can support SDIO IRQ signalling.
*/
#define TMIO_MMC_SDIO_IRQ (1 << 2)
+
+/* Some controllers don't need to wait 10ms for clock changes */
+#define TMIO_MMC_FAST_CLK_CHG (1 << 3)
+
/*
* Some controllers require waiting for the SD bus to become
* idle before writing to some registers.
diff --git a/include/linux/mlx4/driver.h b/include/linux/mlx4/driver.h
index 2e8af001c5da..bd0e7075ea6d 100644
--- a/include/linux/mlx4/driver.h
+++ b/include/linux/mlx4/driver.h
@@ -33,6 +33,7 @@
#ifndef MLX4_DRIVER_H
#define MLX4_DRIVER_H
+#include <net/devlink.h>
#include <linux/mlx4/device.h>
struct mlx4_dev;
@@ -89,6 +90,8 @@ int mlx4_port_map_set(struct mlx4_dev *dev, struct mlx4_port_map *v2p);
void *mlx4_get_protocol_dev(struct mlx4_dev *dev, enum mlx4_protocol proto, int port);
+struct devlink_port *mlx4_get_devlink_port(struct mlx4_dev *dev, int port);
+
static inline u64 mlx4_mac_to_u64(u8 *addr)
{
u64 mac = 0;
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index 9566b3b3b2c5..8156e3c9239c 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -374,6 +374,12 @@ enum {
};
enum {
+ MLX5_BW_NO_LIMIT = 0,
+ MLX5_100_MBPS_UNIT = 3,
+ MLX5_GBPS_UNIT = 4,
+};
+
+enum {
MLX5_MAX_PAGE_SHIFT = 31
};
@@ -1200,6 +1206,17 @@ enum {
MLX5_RQC_RQ_TYPE_MEMORY_RQ_RPM = 0x1,
};
+enum mlx5_wol_mode {
+ MLX5_WOL_DISABLE = 0,
+ MLX5_WOL_SECURED_MAGIC = 1 << 1,
+ MLX5_WOL_MAGIC = 1 << 2,
+ MLX5_WOL_ARP = 1 << 3,
+ MLX5_WOL_BROADCAST = 1 << 4,
+ MLX5_WOL_MULTICAST = 1 << 5,
+ MLX5_WOL_UNICAST = 1 << 6,
+ MLX5_WOL_PHY_ACTIVITY = 1 << 7,
+};
+
/* MLX5 DEV CAPs */
/* TODO: EAT.ME */
@@ -1219,6 +1236,8 @@ enum mlx5_cap_type {
MLX5_CAP_FLOW_TABLE,
MLX5_CAP_ESWITCH_FLOW_TABLE,
MLX5_CAP_ESWITCH,
+ MLX5_CAP_RESERVED,
+ MLX5_CAP_VECTOR_CALC,
/* NUM OF CAP Types */
MLX5_CAP_NUM
};
@@ -1281,6 +1300,10 @@ enum mlx5_cap_type {
#define MLX5_CAP_ODP(mdev, cap)\
MLX5_GET(odp_cap, mdev->hca_caps_cur[MLX5_CAP_ODP], cap)
+#define MLX5_CAP_VECTOR_CALC(mdev, cap) \
+ MLX5_GET(vector_calc_cap, \
+ mdev->hca_caps_cur[MLX5_CAP_VECTOR_CALC], cap)
+
enum {
MLX5_CMD_STAT_OK = 0x0,
MLX5_CMD_STAT_INT_ERR = 0x1,
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 9108904a6a56..dcd5ac8d3b14 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -54,7 +54,7 @@ enum {
/* one minute for the sake of bringup. Generally, commands must always
* complete and we may need to increase this timeout value
*/
- MLX5_CMD_TIMEOUT_MSEC = 7200 * 1000,
+ MLX5_CMD_TIMEOUT_MSEC = 60 * 1000,
MLX5_CMD_WQ_MAX_NAME = 32,
};
@@ -99,6 +99,8 @@ enum {
};
enum {
+ MLX5_REG_QETCR = 0x4005,
+ MLX5_REG_QTCT = 0x400a,
MLX5_REG_PCAP = 0x5001,
MLX5_REG_PMTU = 0x5003,
MLX5_REG_PTYS = 0x5004,
@@ -458,8 +460,6 @@ struct mlx5_priv {
struct mlx5_uuar_info uuari;
MLX5_DECLARE_DOORBELL_LOCK(cq_uar_lock);
- struct io_mapping *bf_mapping;
-
/* pages stuff */
struct workqueue_struct *pg_wq;
struct rb_root page_root;
@@ -613,7 +613,10 @@ struct mlx5_pas {
};
enum port_state_policy {
- MLX5_AAA_000
+ MLX5_POLICY_DOWN = 0,
+ MLX5_POLICY_UP = 1,
+ MLX5_POLICY_FOLLOW = 2,
+ MLX5_POLICY_INVALID = 0xffffffff
};
enum phy_port_state {
@@ -706,8 +709,7 @@ void mlx5_cmd_use_events(struct mlx5_core_dev *dev);
void mlx5_cmd_use_polling(struct mlx5_core_dev *dev);
int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr);
int mlx5_cmd_status_to_err_v2(void *ptr);
-int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type,
- enum mlx5_cap_mode cap_mode);
+int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type);
int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
int out_size);
int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size,
@@ -717,7 +719,8 @@ int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn);
int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn);
int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari);
int mlx5_free_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari);
-int mlx5_alloc_map_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar);
+int mlx5_alloc_map_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar,
+ bool map_wc);
void mlx5_unmap_free_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar);
void mlx5_health_cleanup(struct mlx5_core_dev *dev);
int mlx5_health_init(struct mlx5_core_dev *dev);
@@ -796,37 +799,6 @@ int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in,
int size_in, void *data_out, int size_out,
u16 reg_num, int arg, int write);
-int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps);
-int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys,
- int ptys_size, int proto_mask, u8 local_port);
-int mlx5_query_port_proto_cap(struct mlx5_core_dev *dev,
- u32 *proto_cap, int proto_mask);
-int mlx5_query_port_proto_admin(struct mlx5_core_dev *dev,
- u32 *proto_admin, int proto_mask);
-int mlx5_query_port_link_width_oper(struct mlx5_core_dev *dev,
- u8 *link_width_oper, u8 local_port);
-int mlx5_query_port_proto_oper(struct mlx5_core_dev *dev,
- u8 *proto_oper, int proto_mask,
- u8 local_port);
-int mlx5_set_port_proto(struct mlx5_core_dev *dev, u32 proto_admin,
- int proto_mask);
-int mlx5_set_port_admin_status(struct mlx5_core_dev *dev,
- enum mlx5_port_status status);
-int mlx5_query_port_admin_status(struct mlx5_core_dev *dev,
- enum mlx5_port_status *status);
-
-int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu, u8 port);
-void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu, u8 port);
-void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu,
- u8 port);
-
-int mlx5_query_port_vl_hw_cap(struct mlx5_core_dev *dev,
- u8 *vl_hw_cap, u8 local_port);
-
-int mlx5_set_port_pause(struct mlx5_core_dev *dev, u32 rx_pause, u32 tx_pause);
-int mlx5_query_port_pause(struct mlx5_core_dev *dev,
- u32 *rx_pause, u32 *tx_pause);
-
int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 9b8a02b7880f..c15b8a864937 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -166,6 +166,8 @@ enum {
MLX5_CMD_OP_SET_L2_TABLE_ENTRY = 0x829,
MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY = 0x82a,
MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY = 0x82b,
+ MLX5_CMD_OP_SET_WOL_ROL = 0x830,
+ MLX5_CMD_OP_QUERY_WOL_ROL = 0x831,
MLX5_CMD_OP_CREATE_TIR = 0x900,
MLX5_CMD_OP_MODIFY_TIR = 0x901,
MLX5_CMD_OP_DESTROY_TIR = 0x902,
@@ -616,6 +618,33 @@ struct mlx5_ifc_odp_cap_bits {
u8 reserved_at_e0[0x720];
};
+struct mlx5_ifc_calc_op {
+ u8 reserved_at_0[0x10];
+ u8 reserved_at_10[0x9];
+ u8 op_swap_endianness[0x1];
+ u8 op_min[0x1];
+ u8 op_xor[0x1];
+ u8 op_or[0x1];
+ u8 op_and[0x1];
+ u8 op_max[0x1];
+ u8 op_add[0x1];
+};
+
+struct mlx5_ifc_vector_calc_cap_bits {
+ u8 calc_matrix[0x1];
+ u8 reserved_at_1[0x1f];
+ u8 reserved_at_20[0x8];
+ u8 max_vec_count[0x8];
+ u8 reserved_at_30[0xd];
+ u8 max_chunk_size[0x3];
+ struct mlx5_ifc_calc_op calc0;
+ struct mlx5_ifc_calc_op calc1;
+ struct mlx5_ifc_calc_op calc2;
+ struct mlx5_ifc_calc_op calc3;
+
+ u8 reserved_at_e0[0x720];
+};
+
enum {
MLX5_WQ_TYPE_LINKED_LIST = 0x0,
MLX5_WQ_TYPE_CYCLIC = 0x1,
@@ -730,7 +759,19 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_1bf[0x3];
u8 log_max_msg[0x5];
- u8 reserved_at_1c7[0x18];
+ u8 reserved_at_1c7[0x4];
+ u8 max_tc[0x4];
+ u8 reserved_at_1cf[0x6];
+ u8 rol_s[0x1];
+ u8 rol_g[0x1];
+ u8 reserved_at_1d7[0x1];
+ u8 wol_s[0x1];
+ u8 wol_g[0x1];
+ u8 wol_a[0x1];
+ u8 wol_b[0x1];
+ u8 wol_m[0x1];
+ u8 wol_u[0x1];
+ u8 wol_p[0x1];
u8 stat_rate_support[0x10];
u8 reserved_at_1ef[0xc];
@@ -770,7 +811,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 cd[0x1];
u8 reserved_at_22c[0x1];
u8 apm[0x1];
- u8 reserved_at_22e[0x2];
+ u8 vector_calc[0x1];
+ u8 reserved_at_22f[0x1];
u8 imaicl[0x1];
u8 reserved_at_231[0x4];
u8 qkv[0x1];
@@ -1940,6 +1982,7 @@ union mlx5_ifc_hca_cap_union_bits {
struct mlx5_ifc_flow_table_nic_cap_bits flow_table_nic_cap;
struct mlx5_ifc_flow_table_eswitch_cap_bits flow_table_eswitch_cap;
struct mlx5_ifc_e_switch_cap_bits e_switch_cap;
+ struct mlx5_ifc_vector_calc_cap_bits vector_calc_cap;
u8 reserved_at_0[0x8000];
};
@@ -3667,6 +3710,12 @@ struct mlx5_ifc_query_hca_vport_pkey_in_bits {
u8 pkey_index[0x10];
};
+enum {
+ MLX5_HCA_VPORT_SEL_PORT_GUID = 1 << 0,
+ MLX5_HCA_VPORT_SEL_NODE_GUID = 1 << 1,
+ MLX5_HCA_VPORT_SEL_STATE_POLICY = 1 << 2,
+};
+
struct mlx5_ifc_query_hca_vport_gid_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
@@ -6911,6 +6960,54 @@ struct mlx5_ifc_mtt_bits {
u8 rd_en[0x1];
};
+struct mlx5_ifc_query_wol_rol_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x10];
+ u8 rol_mode[0x8];
+ u8 wol_mode[0x8];
+
+ u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_query_wol_rol_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_set_wol_rol_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_set_wol_rol_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 rol_mode_valid[0x1];
+ u8 wol_mode_valid[0x1];
+ u8 reserved_at_42[0xe];
+ u8 rol_mode[0x8];
+ u8 wol_mode[0x8];
+
+ u8 reserved_at_60[0x20];
+};
+
enum {
MLX5_INITIAL_SEG_NIC_INTERFACE_FULL_DRIVER = 0x0,
MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED = 0x1,
@@ -7102,4 +7199,49 @@ struct mlx5_ifc_modify_flow_table_in_bits {
u8 reserved_at_100[0x100];
};
+struct mlx5_ifc_ets_tcn_config_reg_bits {
+ u8 g[0x1];
+ u8 b[0x1];
+ u8 r[0x1];
+ u8 reserved_at_3[0x9];
+ u8 group[0x4];
+ u8 reserved_at_10[0x9];
+ u8 bw_allocation[0x7];
+
+ u8 reserved_at_20[0xc];
+ u8 max_bw_units[0x4];
+ u8 reserved_at_30[0x8];
+ u8 max_bw_value[0x8];
+};
+
+struct mlx5_ifc_ets_global_config_reg_bits {
+ u8 reserved_at_0[0x2];
+ u8 r[0x1];
+ u8 reserved_at_3[0x1d];
+
+ u8 reserved_at_20[0xc];
+ u8 max_bw_units[0x4];
+ u8 reserved_at_30[0x8];
+ u8 max_bw_value[0x8];
+};
+
+struct mlx5_ifc_qetc_reg_bits {
+ u8 reserved_at_0[0x8];
+ u8 port_number[0x8];
+ u8 reserved_at_10[0x30];
+
+ struct mlx5_ifc_ets_tcn_config_reg_bits tc_configuration[0x8];
+ struct mlx5_ifc_ets_global_config_reg_bits global_configuration;
+};
+
+struct mlx5_ifc_qtct_reg_bits {
+ u8 reserved_at_0[0x8];
+ u8 port_number[0x8];
+ u8 reserved_at_10[0xd];
+ u8 prio[0x3];
+
+ u8 reserved_at_20[0x1d];
+ u8 tclass[0x3];
+};
+
#endif /* MLX5_IFC_H */
diff --git a/include/linux/mlx5/port.h b/include/linux/mlx5/port.h
new file mode 100644
index 000000000000..a1d145abd4eb
--- /dev/null
+++ b/include/linux/mlx5/port.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __MLX5_PORT_H__
+#define __MLX5_PORT_H__
+
+#include <linux/mlx5/driver.h>
+
+int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps);
+int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys,
+ int ptys_size, int proto_mask, u8 local_port);
+int mlx5_query_port_proto_cap(struct mlx5_core_dev *dev,
+ u32 *proto_cap, int proto_mask);
+int mlx5_query_port_proto_admin(struct mlx5_core_dev *dev,
+ u32 *proto_admin, int proto_mask);
+int mlx5_query_port_link_width_oper(struct mlx5_core_dev *dev,
+ u8 *link_width_oper, u8 local_port);
+int mlx5_query_port_proto_oper(struct mlx5_core_dev *dev,
+ u8 *proto_oper, int proto_mask,
+ u8 local_port);
+int mlx5_set_port_proto(struct mlx5_core_dev *dev, u32 proto_admin,
+ int proto_mask);
+int mlx5_set_port_admin_status(struct mlx5_core_dev *dev,
+ enum mlx5_port_status status);
+int mlx5_query_port_admin_status(struct mlx5_core_dev *dev,
+ enum mlx5_port_status *status);
+
+int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu, u8 port);
+void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu, u8 port);
+void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu,
+ u8 port);
+
+int mlx5_query_port_vl_hw_cap(struct mlx5_core_dev *dev,
+ u8 *vl_hw_cap, u8 local_port);
+
+int mlx5_set_port_pause(struct mlx5_core_dev *dev, u32 rx_pause, u32 tx_pause);
+int mlx5_query_port_pause(struct mlx5_core_dev *dev,
+ u32 *rx_pause, u32 *tx_pause);
+
+int mlx5_set_port_pfc(struct mlx5_core_dev *dev, u8 pfc_en_tx, u8 pfc_en_rx);
+int mlx5_query_port_pfc(struct mlx5_core_dev *dev, u8 *pfc_en_tx,
+ u8 *pfc_en_rx);
+
+int mlx5_max_tc(struct mlx5_core_dev *mdev);
+
+int mlx5_set_port_prio_tc(struct mlx5_core_dev *mdev, u8 *prio_tc);
+int mlx5_set_port_tc_group(struct mlx5_core_dev *mdev, u8 *tc_group);
+int mlx5_set_port_tc_bw_alloc(struct mlx5_core_dev *mdev, u8 *tc_bw);
+int mlx5_modify_port_ets_rate_limit(struct mlx5_core_dev *mdev,
+ u8 *max_bw_value,
+ u8 *max_bw_unit);
+int mlx5_query_port_ets_rate_limit(struct mlx5_core_dev *mdev,
+ u8 *max_bw_value,
+ u8 *max_bw_unit);
+int mlx5_set_port_wol(struct mlx5_core_dev *mdev, u8 wol_mode);
+int mlx5_query_port_wol(struct mlx5_core_dev *mdev, u8 *wol_mode);
+
+#endif /* __MLX5_PORT_H__ */
diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h
index a9f2bcc98cab..bd93e6323603 100644
--- a/include/linux/mlx5/vport.h
+++ b/include/linux/mlx5/vport.h
@@ -93,6 +93,11 @@ int mlx5_modify_nic_vport_vlans(struct mlx5_core_dev *dev,
int mlx5_nic_vport_enable_roce(struct mlx5_core_dev *mdev);
int mlx5_nic_vport_disable_roce(struct mlx5_core_dev *mdev);
int mlx5_core_query_vport_counter(struct mlx5_core_dev *dev, u8 other_vport,
- u8 port_num, void *out, size_t out_sz);
+ int vf, u8 port_num, void *out,
+ size_t out_sz);
+int mlx5_core_modify_hca_vport_context(struct mlx5_core_dev *dev,
+ u8 other_vport, u8 port_num,
+ int vf,
+ struct mlx5_hca_vport_context *req);
#endif /* __MLX5_VPORT_H__ */
diff --git a/include/linux/mm.h b/include/linux/mm.h
index dbf1eddab964..450fc977ed02 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -22,6 +22,7 @@
#include <linux/resource.h>
#include <linux/page_ext.h>
#include <linux/err.h>
+#include <linux/page_ref.h>
struct mempolicy;
struct anon_vma;
@@ -82,6 +83,27 @@ extern int mmap_rnd_compat_bits __read_mostly;
#define mm_forbids_zeropage(X) (0)
#endif
+/*
+ * Default maximum number of active map areas, this limits the number of vmas
+ * per mm struct. Users can overwrite this number by sysctl but there is a
+ * problem.
+ *
+ * When a program's coredump is generated as ELF format, a section is created
+ * per a vma. In ELF, the number of sections is represented in unsigned short.
+ * This means the number of sections should be smaller than 65535 at coredump.
+ * Because the kernel adds some informative sections to a image of program at
+ * generating coredump, we need some margin. The number of extra sections is
+ * 1-3 now and depends on arch. We use "5" as safe margin, here.
+ *
+ * ELF extended numbering allows more than 65535 sections, so 16-bit bound is
+ * not a hard limit any more. Although some userspace tools can be surprised by
+ * that.
+ */
+#define MAPCOUNT_ELF_CORE_MARGIN (5)
+#define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
+
+extern int sysctl_max_map_count;
+
extern unsigned long sysctl_user_reserve_kbytes;
extern unsigned long sysctl_admin_reserve_kbytes;
@@ -122,6 +144,7 @@ extern unsigned int kobjsize(const void *objp);
/*
* vm_flags in vm_area_struct, see mm_types.h.
+ * When changing, update also include/trace/events/mmflags.h
*/
#define VM_NONE 0x00000000
@@ -170,8 +193,26 @@ extern unsigned int kobjsize(const void *objp);
#define VM_NOHUGEPAGE 0x40000000 /* MADV_NOHUGEPAGE marked this vma */
#define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
+#ifdef CONFIG_ARCH_USES_HIGH_VMA_FLAGS
+#define VM_HIGH_ARCH_BIT_0 32 /* bit only usable on 64-bit architectures */
+#define VM_HIGH_ARCH_BIT_1 33 /* bit only usable on 64-bit architectures */
+#define VM_HIGH_ARCH_BIT_2 34 /* bit only usable on 64-bit architectures */
+#define VM_HIGH_ARCH_BIT_3 35 /* bit only usable on 64-bit architectures */
+#define VM_HIGH_ARCH_0 BIT(VM_HIGH_ARCH_BIT_0)
+#define VM_HIGH_ARCH_1 BIT(VM_HIGH_ARCH_BIT_1)
+#define VM_HIGH_ARCH_2 BIT(VM_HIGH_ARCH_BIT_2)
+#define VM_HIGH_ARCH_3 BIT(VM_HIGH_ARCH_BIT_3)
+#endif /* CONFIG_ARCH_USES_HIGH_VMA_FLAGS */
+
#if defined(CONFIG_X86)
# define VM_PAT VM_ARCH_1 /* PAT reserves whole VMA at once (x86) */
+#if defined (CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS)
+# define VM_PKEY_SHIFT VM_HIGH_ARCH_BIT_0
+# define VM_PKEY_BIT0 VM_HIGH_ARCH_0 /* A protection key is a 4-bit value */
+# define VM_PKEY_BIT1 VM_HIGH_ARCH_1
+# define VM_PKEY_BIT2 VM_HIGH_ARCH_2
+# define VM_PKEY_BIT3 VM_HIGH_ARCH_3
+#endif
#elif defined(CONFIG_PPC)
# define VM_SAO VM_ARCH_1 /* Strong Access Ordering (powerpc) */
#elif defined(CONFIG_PARISC)
@@ -233,6 +274,8 @@ extern pgprot_t protection_map[16];
#define FAULT_FLAG_KILLABLE 0x10 /* The fault task is in SIGKILL killable region */
#define FAULT_FLAG_TRIED 0x20 /* Second try */
#define FAULT_FLAG_USER 0x40 /* The fault originated in userspace */
+#define FAULT_FLAG_REMOTE 0x80 /* faulting for non current tsk/mm */
+#define FAULT_FLAG_INSTRUCTION 0x100 /* The fault was during an instruction fetch */
/*
* vm_fault is filled by the the pagefault handler and passed to the vma's
@@ -364,8 +407,8 @@ static inline int pmd_devmap(pmd_t pmd)
*/
static inline int put_page_testzero(struct page *page)
{
- VM_BUG_ON_PAGE(atomic_read(&page->_count) == 0, page);
- return atomic_dec_and_test(&page->_count);
+ VM_BUG_ON_PAGE(page_ref_count(page) == 0, page);
+ return page_ref_dec_and_test(page);
}
/*
@@ -376,7 +419,7 @@ static inline int put_page_testzero(struct page *page)
*/
static inline int get_page_unless_zero(struct page *page)
{
- return atomic_inc_not_zero(&page->_count);
+ return page_ref_add_unless(page, 1, 0);
}
extern int page_is_ram(unsigned long pfn);
@@ -464,11 +507,6 @@ static inline int total_mapcount(struct page *page)
}
#endif
-static inline int page_count(struct page *page)
-{
- return atomic_read(&compound_head(page)->_count);
-}
-
static inline struct page *virt_to_head_page(const void *x)
{
struct page *page = virt_to_page(x);
@@ -476,15 +514,6 @@ static inline struct page *virt_to_head_page(const void *x)
return compound_head(page);
}
-/*
- * Setup the page count before being freed into the page allocator for
- * the first time (boot or memory hotplug)
- */
-static inline void init_page_count(struct page *page)
-{
- atomic_set(&page->_count, 1);
-}
-
void __put_page(struct page *page);
void put_pages_list(struct list_head *pages);
@@ -694,8 +723,8 @@ static inline void get_page(struct page *page)
* Getting a normal page or the head of a compound page
* requires to already have an elevated page->_count.
*/
- VM_BUG_ON_PAGE(atomic_read(&page->_count) <= 0, page);
- atomic_inc(&page->_count);
+ VM_BUG_ON_PAGE(page_ref_count(page) <= 0, page);
+ page_ref_inc(page);
if (unlikely(is_zone_device_page(page)))
get_zone_device_page(page);
@@ -1043,8 +1072,6 @@ static inline void clear_page_pfmemalloc(struct page *page)
* just gets major/minor fault counters bumped up.
*/
-#define VM_FAULT_MINOR 0 /* For backwards compat. Remove me quickly. */
-
#define VM_FAULT_OOM 0x0001
#define VM_FAULT_SIGBUS 0x0002
#define VM_FAULT_MAJOR 0x0004
@@ -1217,24 +1244,82 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, unsigned long nr_pages,
unsigned int foll_flags, struct page **pages,
struct vm_area_struct **vmas, int *nonblocking);
-long get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
- unsigned long start, unsigned long nr_pages,
- int write, int force, struct page **pages,
- struct vm_area_struct **vmas);
-long get_user_pages_locked(struct task_struct *tsk, struct mm_struct *mm,
- unsigned long start, unsigned long nr_pages,
- int write, int force, struct page **pages,
- int *locked);
+long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
+ unsigned long start, unsigned long nr_pages,
+ int write, int force, struct page **pages,
+ struct vm_area_struct **vmas);
+long get_user_pages6(unsigned long start, unsigned long nr_pages,
+ int write, int force, struct page **pages,
+ struct vm_area_struct **vmas);
+long get_user_pages_locked6(unsigned long start, unsigned long nr_pages,
+ int write, int force, struct page **pages, int *locked);
long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, unsigned long nr_pages,
int write, int force, struct page **pages,
unsigned int gup_flags);
-long get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
- unsigned long start, unsigned long nr_pages,
+long get_user_pages_unlocked5(unsigned long start, unsigned long nr_pages,
int write, int force, struct page **pages);
int get_user_pages_fast(unsigned long start, int nr_pages, int write,
struct page **pages);
+/* suppress warnings from use in EXPORT_SYMBOL() */
+#ifndef __DISABLE_GUP_DEPRECATED
+#define __gup_deprecated __deprecated
+#else
+#define __gup_deprecated
+#endif
+/*
+ * These macros provide backward-compatibility with the old
+ * get_user_pages() variants which took tsk/mm. These
+ * functions/macros provide both compile-time __deprecated so we
+ * can catch old-style use and not break the build. The actual
+ * functions also have WARN_ON()s to let us know at runtime if
+ * the get_user_pages() should have been the "remote" variant.
+ *
+ * These are hideous, but temporary.
+ *
+ * If you run into one of these __deprecated warnings, look
+ * at how you are calling get_user_pages(). If you are calling
+ * it with current/current->mm as the first two arguments,
+ * simply remove those arguments. The behavior will be the same
+ * as it is now. If you are calling it on another task, use
+ * get_user_pages_remote() instead.
+ *
+ * Any questions? Ask Dave Hansen <dave@sr71.net>
+ */
+long
+__gup_deprecated
+get_user_pages8(struct task_struct *tsk, struct mm_struct *mm,
+ unsigned long start, unsigned long nr_pages,
+ int write, int force, struct page **pages,
+ struct vm_area_struct **vmas);
+#define GUP_MACRO(_1, _2, _3, _4, _5, _6, _7, _8, get_user_pages, ...) \
+ get_user_pages
+#define get_user_pages(...) GUP_MACRO(__VA_ARGS__, \
+ get_user_pages8, x, \
+ get_user_pages6, x, x, x, x, x)(__VA_ARGS__)
+
+__gup_deprecated
+long get_user_pages_locked8(struct task_struct *tsk, struct mm_struct *mm,
+ unsigned long start, unsigned long nr_pages,
+ int write, int force, struct page **pages,
+ int *locked);
+#define GUPL_MACRO(_1, _2, _3, _4, _5, _6, _7, _8, get_user_pages_locked, ...) \
+ get_user_pages_locked
+#define get_user_pages_locked(...) GUPL_MACRO(__VA_ARGS__, \
+ get_user_pages_locked8, x, \
+ get_user_pages_locked6, x, x, x, x)(__VA_ARGS__)
+
+__gup_deprecated
+long get_user_pages_unlocked7(struct task_struct *tsk, struct mm_struct *mm,
+ unsigned long start, unsigned long nr_pages,
+ int write, int force, struct page **pages);
+#define GUPU_MACRO(_1, _2, _3, _4, _5, _6, _7, get_user_pages_unlocked, ...) \
+ get_user_pages_unlocked
+#define get_user_pages_unlocked(...) GUPU_MACRO(__VA_ARGS__, \
+ get_user_pages_unlocked7, x, \
+ get_user_pages_unlocked5, x, x, x, x)(__VA_ARGS__)
+
/* Container for pinned pfns / pages */
struct frame_vector {
unsigned int nr_allocated; /* Number of frames we have space for */
@@ -1523,8 +1608,7 @@ static inline void mm_dec_nr_pmds(struct mm_struct *mm)
}
#endif
-int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
- pmd_t *pmd, unsigned long address);
+int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address);
int __pte_alloc_kernel(pmd_t *pmd, unsigned long address);
/*
@@ -1650,15 +1734,15 @@ static inline void pgtable_page_dtor(struct page *page)
pte_unmap(pte); \
} while (0)
-#define pte_alloc_map(mm, vma, pmd, address) \
- ((unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, vma, \
- pmd, address))? \
- NULL: pte_offset_map(pmd, address))
+#define pte_alloc(mm, pmd, address) \
+ (unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, pmd, address))
+
+#define pte_alloc_map(mm, pmd, address) \
+ (pte_alloc(mm, pmd, address) ? NULL : pte_offset_map(pmd, address))
#define pte_alloc_map_lock(mm, pmd, address, ptlp) \
- ((unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, NULL, \
- pmd, address))? \
- NULL: pte_offset_map_lock(mm, pmd, address, ptlp))
+ (pte_alloc(mm, pmd, address) ? \
+ NULL : pte_offset_map_lock(mm, pmd, address, ptlp))
#define pte_alloc_kernel(pmd, address) \
((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd, address))? \
@@ -1853,6 +1937,7 @@ extern int __meminit init_per_zone_wmark_min(void);
extern void mem_init(void);
extern void __init mmap_init(void);
extern void show_mem(unsigned int flags);
+extern long si_mem_available(void);
extern void si_meminfo(struct sysinfo * val);
extern void si_meminfo_node(struct sysinfo *val, int nid);
@@ -1867,6 +1952,7 @@ extern void zone_pcp_reset(struct zone *zone);
/* page_alloc.c */
extern int min_free_kbytes;
+extern int watermark_scale_factor;
/* nommu.c */
extern atomic_long_t mmap_pages_allocated;
@@ -2161,6 +2247,7 @@ static inline struct page *follow_page(struct vm_area_struct *vma,
#define FOLL_MIGRATION 0x400 /* wait for page to replace migration entry */
#define FOLL_TRIED 0x800 /* a retry, previous pass started an IO */
#define FOLL_MLOCK 0x1000 /* lock present pages */
+#define FOLL_REMOTE 0x2000 /* we are working on non-current tsk/mm */
typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
void *data);
diff --git a/include/linux/mman.h b/include/linux/mman.h
index 16373c8f5f57..33e17f6a327a 100644
--- a/include/linux/mman.h
+++ b/include/linux/mman.h
@@ -35,7 +35,7 @@ static inline void vm_unacct_memory(long pages)
*/
#ifndef arch_calc_vm_prot_bits
-#define arch_calc_vm_prot_bits(prot) 0
+#define arch_calc_vm_prot_bits(prot, pkey) 0
#endif
#ifndef arch_vm_get_page_prot
@@ -70,12 +70,12 @@ static inline int arch_validate_prot(unsigned long prot)
* Combine the mmap "prot" argument into "vm_flags" used internally.
*/
static inline unsigned long
-calc_vm_prot_bits(unsigned long prot)
+calc_vm_prot_bits(unsigned long prot, unsigned long pkey)
{
return _calc_vm_trans(prot, PROT_READ, VM_READ ) |
_calc_vm_trans(prot, PROT_WRITE, VM_WRITE) |
_calc_vm_trans(prot, PROT_EXEC, VM_EXEC) |
- arch_calc_vm_prot_bits(prot);
+ arch_calc_vm_prot_bits(prot, pkey);
}
/*
diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h
index 37967b6da03c..b01e77de1a74 100644
--- a/include/linux/mmc/core.h
+++ b/include/linux/mmc/core.h
@@ -113,7 +113,6 @@ struct mmc_data {
#define MMC_DATA_WRITE (1 << 8)
#define MMC_DATA_READ (1 << 9)
-#define MMC_DATA_STREAM (1 << 10)
unsigned int bytes_xfered;
diff --git a/include/linux/mmc/dw_mmc.h b/include/linux/mmc/dw_mmc.h
index 89df7abedd67..7b41c6db1bb6 100644
--- a/include/linux/mmc/dw_mmc.h
+++ b/include/linux/mmc/dw_mmc.h
@@ -235,21 +235,11 @@ struct dw_mci_dma_ops {
};
/* IP Quirks/flags. */
-/* Unreliable card detection */
-#define DW_MCI_QUIRK_BROKEN_CARD_DETECTION BIT(0)
/* Timer for broken data transfer over scheme */
-#define DW_MCI_QUIRK_BROKEN_DTO BIT(1)
+#define DW_MCI_QUIRK_BROKEN_DTO BIT(0)
struct dma_pdata;
-struct block_settings {
- unsigned short max_segs; /* see blk_queue_max_segments */
- unsigned int max_blk_size; /* maximum size of one mmc block */
- unsigned int max_blk_count; /* maximum number of blocks in one req*/
- unsigned int max_req_size; /* maximum number of bytes in one req*/
- unsigned int max_seg_size; /* see blk_queue_max_segment_size */
-};
-
/* Board platform data */
struct dw_mci_board {
u32 num_slots;
diff --git a/include/linux/mmc/tmio.h b/include/linux/mmc/tmio.h
index 84d9053b5dca..5f5cd80e9765 100644
--- a/include/linux/mmc/tmio.h
+++ b/include/linux/mmc/tmio.h
@@ -1,6 +1,8 @@
/*
* include/linux/mmc/tmio.h
*
+ * Copyright (C) 2016 Sang Engineering, Wolfram Sang
+ * Copyright (C) 2015-16 Renesas Electronics Corporation
* Copyright (C) 2007 Ian Molton
* Copyright (C) 2004 Ian Molton
*
@@ -61,6 +63,9 @@
#define TMIO_STAT_CMD_BUSY 0x40000000
#define TMIO_STAT_ILL_ACCESS 0x80000000
+#define CLK_CTL_DIV_MASK 0xff
+#define CLK_CTL_SCLKEN BIT(8)
+
#define TMIO_BBS 512 /* Boot block size */
#endif /* LINUX_MMC_TMIO_H */
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 6de02ac378a0..c60df9257cc7 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -668,6 +668,12 @@ typedef struct pglist_data {
mem_hotplug_begin/end() */
int kswapd_max_order;
enum zone_type classzone_idx;
+#ifdef CONFIG_COMPACTION
+ int kcompactd_max_order;
+ enum zone_type kcompactd_classzone_idx;
+ wait_queue_head_t kcompactd_wait;
+ struct task_struct *kcompactd;
+#endif
#ifdef CONFIG_NUMA_BALANCING
/* Lock serializing the migrate rate limiting window */
spinlock_t numabalancing_migrate_lock;
@@ -835,6 +841,8 @@ static inline int is_highmem(struct zone *zone)
struct ctl_table;
int min_free_kbytes_sysctl_handler(struct ctl_table *, int,
void __user *, size_t *, loff_t *);
+int watermark_scale_factor_sysctl_handler(struct ctl_table *, int,
+ void __user *, size_t *, loff_t *);
extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1];
int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int,
void __user *, size_t *, loff_t *);
diff --git a/include/linux/mtd/bbm.h b/include/linux/mtd/bbm.h
index 36bb6a503f19..3bf8f954b642 100644
--- a/include/linux/mtd/bbm.h
+++ b/include/linux/mtd/bbm.h
@@ -166,7 +166,6 @@ struct bbm_info {
};
/* OneNAND BBT interface */
-extern int onenand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd);
extern int onenand_default_bbt(struct mtd_info *mtd);
#endif /* __LINUX_MTD_BBM_H */
diff --git a/include/linux/mtd/inftl.h b/include/linux/mtd/inftl.h
index 02cd5f9b79b8..8255118be0f0 100644
--- a/include/linux/mtd/inftl.h
+++ b/include/linux/mtd/inftl.h
@@ -44,7 +44,6 @@ struct INFTLrecord {
unsigned int nb_blocks; /* number of physical blocks */
unsigned int nb_boot_blocks; /* number of blocks used by the bios */
struct erase_info instr;
- struct nand_ecclayout oobinfo;
};
int INFTL_mount(struct INFTLrecord *s);
diff --git a/include/linux/mtd/map.h b/include/linux/mtd/map.h
index 58f3ba709ade..5e0eb7ccabd4 100644
--- a/include/linux/mtd/map.h
+++ b/include/linux/mtd/map.h
@@ -240,8 +240,11 @@ struct map_info {
If there is no cache to care about this can be set to NULL. */
void (*inval_cache)(struct map_info *, unsigned long, ssize_t);
- /* set_vpp() must handle being reentered -- enable, enable, disable
- must leave it enabled. */
+ /* This will be called with 1 as parameter when the first map user
+ * needs VPP, and called with 0 when the last user exits. The map
+ * core maintains a reference counter, and assumes that VPP is a
+ * global resource applying to all mapped flash chips on the system.
+ */
void (*set_vpp)(struct map_info *, int);
unsigned long pfow_base;
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h
index cc84923011c0..771272187316 100644
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -105,7 +105,6 @@ struct mtd_oob_ops {
struct nand_ecclayout {
__u32 eccbytes;
__u32 eccpos[MTD_MAX_ECCPOS_ENTRIES_LARGE];
- __u32 oobavail;
struct nand_oobfree oobfree[MTD_MAX_OOBFREE_ENTRIES_LARGE];
};
@@ -265,6 +264,11 @@ static inline struct device_node *mtd_get_of_node(struct mtd_info *mtd)
return mtd->dev.of_node;
}
+static inline int mtd_oobavail(struct mtd_info *mtd, struct mtd_oob_ops *ops)
+{
+ return ops->mode == MTD_OPS_AUTO_OOB ? mtd->oobavail : mtd->oobsize;
+}
+
int mtd_erase(struct mtd_info *mtd, struct erase_info *instr);
int mtd_point(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
void **virt, resource_size_t *phys);
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index bdd68e22b5a5..56574ba36555 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -168,6 +168,12 @@ typedef enum {
/* Device supports subpage reads */
#define NAND_SUBPAGE_READ 0x00001000
+/*
+ * Some MLC NANDs need data scrambling to limit bitflips caused by repeated
+ * patterns.
+ */
+#define NAND_NEED_SCRAMBLING 0x00002000
+
/* Options valid for Samsung large page devices */
#define NAND_SAMSUNG_LP_OPTIONS NAND_CACHEPRG
@@ -666,7 +672,7 @@ struct nand_chip {
void (*write_buf)(struct mtd_info *mtd, const uint8_t *buf, int len);
void (*read_buf)(struct mtd_info *mtd, uint8_t *buf, int len);
void (*select_chip)(struct mtd_info *mtd, int chip);
- int (*block_bad)(struct mtd_info *mtd, loff_t ofs, int getchip);
+ int (*block_bad)(struct mtd_info *mtd, loff_t ofs);
int (*block_markbad)(struct mtd_info *mtd, loff_t ofs);
void (*cmd_ctrl)(struct mtd_info *mtd, int dat, unsigned int ctrl);
int (*dev_ready)(struct mtd_info *mtd);
@@ -896,7 +902,6 @@ extern int nand_do_read(struct mtd_info *mtd, loff_t from, size_t len,
* @chip_delay: R/B delay value in us
* @options: Option flags, e.g. 16bit buswidth
* @bbt_options: BBT option flags, e.g. NAND_BBT_USE_FLASH
- * @ecclayout: ECC layout info structure
* @part_probe_types: NULL-terminated array of probe types
*/
struct platform_nand_chip {
@@ -904,7 +909,6 @@ struct platform_nand_chip {
int chip_offset;
int nr_partitions;
struct mtd_partition *partitions;
- struct nand_ecclayout *ecclayout;
int chip_delay;
unsigned int options;
unsigned int bbt_options;
diff --git a/include/linux/mtd/nand_bch.h b/include/linux/mtd/nand_bch.h
index fb0bc3420a10..98f20ef05d60 100644
--- a/include/linux/mtd/nand_bch.h
+++ b/include/linux/mtd/nand_bch.h
@@ -32,9 +32,7 @@ int nand_bch_correct_data(struct mtd_info *mtd, u_char *dat, u_char *read_ecc,
/*
* Initialize BCH encoder/decoder
*/
-struct nand_bch_control *
-nand_bch_init(struct mtd_info *mtd, unsigned int eccsize,
- unsigned int eccbytes, struct nand_ecclayout **ecclayout);
+struct nand_bch_control *nand_bch_init(struct mtd_info *mtd);
/*
* Release BCH encoder/decoder resources
*/
@@ -58,9 +56,7 @@ nand_bch_correct_data(struct mtd_info *mtd, unsigned char *buf,
return -ENOTSUPP;
}
-static inline struct nand_bch_control *
-nand_bch_init(struct mtd_info *mtd, unsigned int eccsize,
- unsigned int eccbytes, struct nand_ecclayout **ecclayout)
+static inline struct nand_bch_control *nand_bch_init(struct mtd_info *mtd)
{
return NULL;
}
diff --git a/include/linux/mtd/nftl.h b/include/linux/mtd/nftl.h
index b059629e22bc..044daa02b8ff 100644
--- a/include/linux/mtd/nftl.h
+++ b/include/linux/mtd/nftl.h
@@ -50,7 +50,6 @@ struct NFTLrecord {
unsigned int nb_blocks; /* number of physical blocks */
unsigned int nb_boot_blocks; /* number of blocks used by the bios */
struct erase_info instr;
- struct nand_ecclayout oobinfo;
};
int NFTL_mount(struct NFTLrecord *s);
diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h
index 62356d50815b..3c36113a88e1 100644
--- a/include/linux/mtd/spi-nor.h
+++ b/include/linux/mtd/spi-nor.h
@@ -85,6 +85,7 @@
#define SR_BP0 BIT(2) /* Block protect 0 */
#define SR_BP1 BIT(3) /* Block protect 1 */
#define SR_BP2 BIT(4) /* Block protect 2 */
+#define SR_TB BIT(5) /* Top/Bottom protect */
#define SR_SRWD BIT(7) /* SR write protect */
#define SR_QUAD_EN_MX BIT(6) /* Macronix Quad I/O */
@@ -116,6 +117,7 @@ enum spi_nor_ops {
enum spi_nor_option_flags {
SNOR_F_USE_FSR = BIT(0),
+ SNOR_F_HAS_SR_TB = BIT(1),
};
/**
diff --git a/include/linux/namei.h b/include/linux/namei.h
index d0f25d81b46a..77d01700daf7 100644
--- a/include/linux/namei.h
+++ b/include/linux/namei.h
@@ -31,6 +31,7 @@ enum {LAST_NORM, LAST_ROOT, LAST_DOT, LAST_DOTDOT, LAST_BIND};
#define LOOKUP_PARENT 0x0010
#define LOOKUP_REVAL 0x0020
#define LOOKUP_RCU 0x0040
+#define LOOKUP_NO_REVAL 0x0080
/*
* Intent data
diff --git a/include/linux/net.h b/include/linux/net.h
index 0b4ac7da583a..49175e4ced11 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -215,6 +215,7 @@ int __sock_create(struct net *net, int family, int type, int proto,
int sock_create(int family, int type, int proto, struct socket **res);
int sock_create_kern(struct net *net, int family, int type, int proto, struct socket **res);
int sock_create_lite(int family, int type, int proto, struct socket **res);
+struct socket *sock_alloc(void);
void sock_release(struct socket *sock);
int sock_sendmsg(struct socket *sock, struct msghdr *msg);
int sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h
index d9654f0eecb3..a734bf43d190 100644
--- a/include/linux/netdev_features.h
+++ b/include/linux/netdev_features.h
@@ -67,6 +67,8 @@ enum {
NETIF_F_HW_L2FW_DOFFLOAD_BIT, /* Allow L2 Forwarding in Hardware */
NETIF_F_BUSY_POLL_BIT, /* Busy poll */
+ NETIF_F_HW_TC_BIT, /* Offload TC infrastructure */
+
/*
* Add your fresh new feature above and remember to update
* netdev_features_strings[] in net/core/ethtool.c and maybe
@@ -124,6 +126,7 @@ enum {
#define NETIF_F_HW_VLAN_STAG_TX __NETIF_F(HW_VLAN_STAG_TX)
#define NETIF_F_HW_L2FW_DOFFLOAD __NETIF_F(HW_L2FW_DOFFLOAD)
#define NETIF_F_BUSY_POLL __NETIF_F(BUSY_POLL)
+#define NETIF_F_HW_TC __NETIF_F(HW_TC)
#define for_each_netdev_feature(mask_addr, bit) \
for_each_set_bit(bit, (unsigned long *)mask_addr, NETDEV_FEATURE_COUNT)
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 5440b7b705eb..cb0d5d09c2e4 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -51,6 +51,7 @@
#include <linux/neighbour.h>
#include <uapi/linux/netdevice.h>
#include <uapi/linux/if_bonding.h>
+#include <uapi/linux/pkt_cls.h>
struct netpoll_info;
struct device;
@@ -80,8 +81,8 @@ void netdev_set_default_ethtool_ops(struct net_device *dev,
* function. Real network devices commonly used with qdiscs should only return
* the driver transmit return codes though - when qdiscs are used, the actual
* transmission happens asynchronously, so the value is not propagated to
- * higher layers. Virtual network devices transmit synchronously, in this case
- * the driver transmit return codes are consumed by dev_queue_xmit(), all
+ * higher layers. Virtual network devices transmit synchronously; in this case
+ * the driver transmit return codes are consumed by dev_queue_xmit(), and all
* others are propagated to higher layers.
*/
@@ -128,7 +129,7 @@ static inline bool dev_xmit_complete(int rc)
}
/*
- * Compute the worst case header length according to the protocols
+ * Compute the worst-case header length according to the protocols
* used.
*/
@@ -245,7 +246,7 @@ struct hh_cache {
unsigned long hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)];
};
-/* Reserve HH_DATA_MOD byte aligned hard_header_len, but at least that much.
+/* Reserve HH_DATA_MOD byte-aligned hard_header_len, but at least that much.
* Alternative is:
* dev->hard_header_len ? (dev->hard_header_len +
* (HH_DATA_MOD - 1)) & ~(HH_DATA_MOD - 1) : 0
@@ -267,10 +268,11 @@ struct header_ops {
void (*cache_update)(struct hh_cache *hh,
const struct net_device *dev,
const unsigned char *haddr);
+ bool (*validate)(const char *ll_header, unsigned int len);
};
/* These flag bits are private to the generic network queueing
- * layer, they may not be explicitly referenced by any other
+ * layer; they may not be explicitly referenced by any other
* code.
*/
@@ -284,7 +286,7 @@ enum netdev_state_t {
/*
- * This structure holds at boot time configured netdevice settings. They
+ * This structure holds boot-time configured netdevice settings. They
* are then used in the device probing.
*/
struct netdev_boot_setup {
@@ -302,7 +304,7 @@ struct napi_struct {
/* The poll_list must only be managed by the entity which
* changes the state of the NAPI_STATE_SCHED bit. This means
* whoever atomically sets that bit can add this napi_struct
- * to the per-cpu poll_list, and whoever clears that bit
+ * to the per-CPU poll_list, and whoever clears that bit
* can remove from the list right before clearing the bit.
*/
struct list_head poll_list;
@@ -348,7 +350,7 @@ typedef enum gro_result gro_result_t;
* @RX_HANDLER_ANOTHER: Do another round in receive path. This is indicated in
* case skb->dev was changed by rx_handler.
* @RX_HANDLER_EXACT: Force exact delivery, no wildcard.
- * @RX_HANDLER_PASS: Do nothing, passe the skb as if no rx_handler was called.
+ * @RX_HANDLER_PASS: Do nothing, pass the skb as if no rx_handler was called.
*
* rx_handlers are functions called from inside __netif_receive_skb(), to do
* special processing of the skb, prior to delivery to protocol handlers.
@@ -363,19 +365,19 @@ typedef enum gro_result gro_result_t;
* Upon return, rx_handler is expected to tell __netif_receive_skb() what to
* do with the skb.
*
- * If the rx_handler consumed to skb in some way, it should return
+ * If the rx_handler consumed the skb in some way, it should return
* RX_HANDLER_CONSUMED. This is appropriate when the rx_handler arranged for
- * the skb to be delivered in some other ways.
+ * the skb to be delivered in some other way.
*
* If the rx_handler changed skb->dev, to divert the skb to another
* net_device, it should return RX_HANDLER_ANOTHER. The rx_handler for the
* new device will be called if it exists.
*
- * If the rx_handler consider the skb should be ignored, it should return
+ * If the rx_handler decides the skb should be ignored, it should return
* RX_HANDLER_EXACT. The skb will only be delivered to protocol handlers that
* are registered on exact device (ptype->dev == skb->dev).
*
- * If the rx_handler didn't changed skb->dev, but want the skb to be normally
+ * If the rx_handler didn't change skb->dev, but wants the skb to be normally
* delivered, it should return RX_HANDLER_PASS.
*
* A device without a registered rx_handler will behave as if rx_handler
@@ -400,11 +402,11 @@ static inline bool napi_disable_pending(struct napi_struct *n)
}
/**
- * napi_schedule_prep - check if napi can be scheduled
- * @n: napi context
+ * napi_schedule_prep - check if NAPI can be scheduled
+ * @n: NAPI context
*
* Test if NAPI routine is already running, and if not mark
- * it as running. This is used as a condition variable
+ * it as running. This is used as a condition variable to
* insure only one NAPI poll instance runs. We also make
* sure there is no pending NAPI disable.
*/
@@ -416,7 +418,7 @@ static inline bool napi_schedule_prep(struct napi_struct *n)
/**
* napi_schedule - schedule NAPI poll
- * @n: napi context
+ * @n: NAPI context
*
* Schedule NAPI poll routine to be called if it is not already
* running.
@@ -429,7 +431,7 @@ static inline void napi_schedule(struct napi_struct *n)
/**
* napi_schedule_irqoff - schedule NAPI poll
- * @n: napi context
+ * @n: NAPI context
*
* Variant of napi_schedule(), assuming hard irqs are masked.
*/
@@ -453,7 +455,7 @@ void __napi_complete(struct napi_struct *n);
void napi_complete_done(struct napi_struct *n, int work_done);
/**
* napi_complete - NAPI processing complete
- * @n: napi context
+ * @n: NAPI context
*
* Mark NAPI processing as complete.
* Consider using napi_complete_done() instead.
@@ -465,32 +467,32 @@ static inline void napi_complete(struct napi_struct *n)
/**
* napi_hash_add - add a NAPI to global hashtable
- * @napi: napi context
+ * @napi: NAPI context
*
- * generate a new napi_id and store a @napi under it in napi_hash
- * Used for busy polling (CONFIG_NET_RX_BUSY_POLL)
+ * Generate a new napi_id and store a @napi under it in napi_hash.
+ * Used for busy polling (CONFIG_NET_RX_BUSY_POLL).
* Note: This is normally automatically done from netif_napi_add(),
- * so might disappear in a future linux version.
+ * so might disappear in a future Linux version.
*/
void napi_hash_add(struct napi_struct *napi);
/**
* napi_hash_del - remove a NAPI from global table
- * @napi: napi context
+ * @napi: NAPI context
*
- * Warning: caller must observe rcu grace period
+ * Warning: caller must observe RCU grace period
* before freeing memory containing @napi, if
* this function returns true.
* Note: core networking stack automatically calls it
- * from netif_napi_del()
+ * from netif_napi_del().
* Drivers might want to call this helper to combine all
- * the needed rcu grace periods into a single one.
+ * the needed RCU grace periods into a single one.
*/
bool napi_hash_del(struct napi_struct *napi);
/**
* napi_disable - prevent NAPI from scheduling
- * @n: napi context
+ * @n: NAPI context
*
* Stop NAPI from being scheduled on this context.
* Waits till any outstanding processing completes.
@@ -499,7 +501,7 @@ void napi_disable(struct napi_struct *n);
/**
* napi_enable - enable NAPI scheduling
- * @n: napi context
+ * @n: NAPI context
*
* Resume NAPI from being scheduled on this context.
* Must be paired with napi_disable.
@@ -514,7 +516,7 @@ static inline void napi_enable(struct napi_struct *n)
/**
* napi_synchronize - wait until NAPI is not running
- * @n: napi context
+ * @n: NAPI context
*
* Wait until NAPI is done being scheduled on this context.
* Waits till any outstanding processing completes but
@@ -557,7 +559,7 @@ enum netdev_queue_state_t {
struct netdev_queue {
/*
- * read mostly part
+ * read-mostly part
*/
struct net_device *dev;
struct Qdisc __rcu *qdisc;
@@ -569,7 +571,7 @@ struct netdev_queue {
int numa_node;
#endif
/*
- * write mostly part
+ * write-mostly part
*/
spinlock_t _xmit_lock ____cacheline_aligned_in_smp;
int xmit_lock_owner;
@@ -646,11 +648,11 @@ struct rps_dev_flow_table {
/*
* The rps_sock_flow_table contains mappings of flows to the last CPU
* on which they were processed by the application (set in recvmsg).
- * Each entry is a 32bit value. Upper part is the high order bits
- * of flow hash, lower part is cpu number.
+ * Each entry is a 32bit value. Upper part is the high-order bits
+ * of flow hash, lower part is CPU number.
* rps_cpu_mask is used to partition the space, depending on number of
- * possible cpus : rps_cpu_mask = roundup_pow_of_two(nr_cpu_ids) - 1
- * For example, if 64 cpus are possible, rps_cpu_mask = 0x3f,
+ * possible CPUs : rps_cpu_mask = roundup_pow_of_two(nr_cpu_ids) - 1
+ * For example, if 64 CPUs are possible, rps_cpu_mask = 0x3f,
* meaning we use 32-6=26 bits for the hash.
*/
struct rps_sock_flow_table {
@@ -672,7 +674,7 @@ static inline void rps_record_sock_flow(struct rps_sock_flow_table *table,
unsigned int index = hash & table->mask;
u32 val = hash & ~rps_cpu_mask;
- /* We only give a hint, preemption can change cpu under us */
+ /* We only give a hint, preemption can change CPU under us */
val |= raw_smp_processor_id();
if (table->ents[index] != val)
@@ -778,27 +780,48 @@ static inline bool netdev_phys_item_id_same(struct netdev_phys_item_id *a,
typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
struct sk_buff *skb);
+/* These structures hold the attributes of qdisc and classifiers
+ * that are being passed to the netdevice through the setup_tc op.
+ */
+enum {
+ TC_SETUP_MQPRIO,
+ TC_SETUP_CLSU32,
+ TC_SETUP_CLSFLOWER,
+};
+
+struct tc_cls_u32_offload;
+
+struct tc_to_netdev {
+ unsigned int type;
+ union {
+ u8 tc;
+ struct tc_cls_u32_offload *cls_u32;
+ struct tc_cls_flower_offload *cls_flower;
+ };
+};
+
+
/*
* This structure defines the management hooks for network devices.
* The following hooks can be defined; unless noted otherwise, they are
* optional and can be filled with a null pointer.
*
* int (*ndo_init)(struct net_device *dev);
- * This function is called once when network device is registered.
- * The network device can use this to any late stage initializaton
- * or semantic validattion. It can fail with an error code which will
- * be propogated back to register_netdev
+ * This function is called once when a network device is registered.
+ * The network device can use this for any late stage initialization
+ * or semantic validation. It can fail with an error code which will
+ * be propagated back to register_netdev.
*
* void (*ndo_uninit)(struct net_device *dev);
* This function is called when device is unregistered or when registration
* fails. It is not called if init fails.
*
* int (*ndo_open)(struct net_device *dev);
- * This function is called when network device transistions to the up
+ * This function is called when a network device transitions to the up
* state.
*
* int (*ndo_stop)(struct net_device *dev);
- * This function is called when network device transistions to the down
+ * This function is called when a network device transitions to the down
* state.
*
* netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb,
@@ -809,7 +832,7 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
* corner cases, but the stack really does a non-trivial amount
* of useless work if you return NETDEV_TX_BUSY.
* (can also return NETDEV_TX_LOCKED iff NETIF_F_LLTX)
- * Required can not be NULL.
+ * Required; cannot be NULL.
*
* netdev_features_t (*ndo_fix_features)(struct net_device *dev,
* netdev_features_t features);
@@ -819,34 +842,34 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
*
* u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb,
* void *accel_priv, select_queue_fallback_t fallback);
- * Called to decide which queue to when device supports multiple
+ * Called to decide which queue to use when device supports multiple
* transmit queues.
*
* void (*ndo_change_rx_flags)(struct net_device *dev, int flags);
* This function is called to allow device receiver to make
- * changes to configuration when multicast or promiscious is enabled.
+ * changes to configuration when multicast or promiscuous is enabled.
*
* void (*ndo_set_rx_mode)(struct net_device *dev);
* This function is called device changes address list filtering.
* If driver handles unicast address filtering, it should set
- * IFF_UNICAST_FLT to its priv_flags.
+ * IFF_UNICAST_FLT in its priv_flags.
*
* int (*ndo_set_mac_address)(struct net_device *dev, void *addr);
* This function is called when the Media Access Control address
* needs to be changed. If this interface is not defined, the
- * mac address can not be changed.
+ * MAC address can not be changed.
*
* int (*ndo_validate_addr)(struct net_device *dev);
* Test if Media Access Control address is valid for the device.
*
* int (*ndo_do_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd);
- * Called when a user request an ioctl which can't be handled by
- * the generic interface code. If not defined ioctl's return
+ * Called when a user requests an ioctl which can't be handled by
+ * the generic interface code. If not defined ioctls return
* not supported error code.
*
* int (*ndo_set_config)(struct net_device *dev, struct ifmap *map);
* Used to set network devices bus interface parameters. This interface
- * is retained for legacy reason, new devices should use the bus
+ * is retained for legacy reasons; new devices should use the bus
* interface (PCI) for low level management.
*
* int (*ndo_change_mtu)(struct net_device *dev, int new_mtu);
@@ -855,7 +878,7 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
* will return an error.
*
* void (*ndo_tx_timeout)(struct net_device *dev);
- * Callback uses when the transmitter has not made any progress
+ * Callback used when the transmitter has not made any progress
* for dev->watchdog ticks.
*
* struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev,
@@ -873,11 +896,11 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
* neither operation.
*
* int (*ndo_vlan_rx_add_vid)(struct net_device *dev, __be16 proto, u16 vid);
- * If device support VLAN filtering this function is called when a
+ * If device supports VLAN filtering this function is called when a
* VLAN id is registered.
*
* int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, __be16 proto, u16 vid);
- * If device support VLAN filtering this function is called when a
+ * If device supports VLAN filtering this function is called when a
* VLAN id is unregistered.
*
* void (*ndo_poll_controller)(struct net_device *dev);
@@ -897,7 +920,7 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
*
* Enable or disable the VF ability to query its RSS Redirection Table and
* Hash Key. This is needed since on some devices VF share this information
- * with PF and querying it may adduce a theoretical security risk.
+ * with PF and querying it may introduce a theoretical security risk.
* int (*ndo_set_vf_rss_query_en)(struct net_device *dev, int vf, bool setting);
* int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb);
* int (*ndo_setup_tc)(struct net_device *dev, u8 tc)
@@ -1007,20 +1030,20 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
*
* void (*ndo_add_vxlan_port)(struct net_device *dev,
* sa_family_t sa_family, __be16 port);
- * Called by vxlan to notiy a driver about the UDP port and socket
- * address family that vxlan is listnening to. It is called only when
+ * Called by vxlan to notify a driver about the UDP port and socket
+ * address family that vxlan is listening to. It is called only when
* a new port starts listening. The operation is protected by the
* vxlan_net->sock_lock.
*
* void (*ndo_add_geneve_port)(struct net_device *dev,
- * sa_family_t sa_family, __be16 port);
+ * sa_family_t sa_family, __be16 port);
* Called by geneve to notify a driver about the UDP port and socket
* address family that geneve is listnening to. It is called only when
* a new port starts listening. The operation is protected by the
* geneve_net->sock_lock.
*
* void (*ndo_del_geneve_port)(struct net_device *dev,
- * sa_family_t sa_family, __be16 port);
+ * sa_family_t sa_family, __be16 port);
* Called by geneve to notify the driver about a UDP port and socket
* address family that geneve is not listening to anymore. The operation
* is protected by the geneve_net->sock_lock.
@@ -1049,9 +1072,9 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
* Callback to use for xmit over the accelerated station. This
* is used in place of ndo_start_xmit on accelerated net
* devices.
- * netdev_features_t (*ndo_features_check) (struct sk_buff *skb,
- * struct net_device *dev
- * netdev_features_t features);
+ * netdev_features_t (*ndo_features_check)(struct sk_buff *skb,
+ * struct net_device *dev
+ * netdev_features_t features);
* Called by core transmit path to determine if device is capable of
* performing offload operations on a given packet. This is to give
* the device an opportunity to implement any restrictions that cannot
@@ -1065,7 +1088,7 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
* int (*ndo_get_iflink)(const struct net_device *dev);
* Called to get the iflink value of this device.
* void (*ndo_change_proto_down)(struct net_device *dev,
- * bool proto_down);
+ * bool proto_down);
* This function is used to pass protocol port error state information
* to the switch driver. The switch driver can react to the proto_down
* by doing a phys down on the associated switch port.
@@ -1073,6 +1096,12 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
* This function is used to get egress tunnel information for given skb.
* This is useful for retrieving outer tunnel header parameters while
* sampling packet.
+ * void (*ndo_set_rx_headroom)(struct net_device *dev, int needed_headroom);
+ * This function is used to specify the headroom that the skb must
+ * consider when allocation skb during packet reception. Setting
+ * appropriate rx headroom value allows avoiding skb head copy on
+ * forward. Setting a negative value resets the rx headroom to the
+ * default value.
*
*/
struct net_device_ops {
@@ -1147,10 +1176,16 @@ struct net_device_ops {
struct nlattr *port[]);
int (*ndo_get_vf_port)(struct net_device *dev,
int vf, struct sk_buff *skb);
+ int (*ndo_set_vf_guid)(struct net_device *dev,
+ int vf, u64 guid,
+ int guid_type);
int (*ndo_set_vf_rss_query_en)(
struct net_device *dev,
int vf, bool setting);
- int (*ndo_setup_tc)(struct net_device *dev, u8 tc);
+ int (*ndo_setup_tc)(struct net_device *dev,
+ u32 handle,
+ __be16 protocol,
+ struct tc_to_netdev *tc);
#if IS_ENABLED(CONFIG_FCOE)
int (*ndo_fcoe_enable)(struct net_device *dev);
int (*ndo_fcoe_disable)(struct net_device *dev);
@@ -1255,6 +1290,8 @@ struct net_device_ops {
bool proto_down);
int (*ndo_fill_metadata_dst)(struct net_device *dev,
struct sk_buff *skb);
+ void (*ndo_set_rx_headroom)(struct net_device *dev,
+ int needed_headroom);
};
/**
@@ -1262,7 +1299,7 @@ struct net_device_ops {
*
* These are the &struct net_device, they are only set internally
* by drivers and used in the kernel. These flags are invisible to
- * userspace, this means that the order of these flags can change
+ * userspace; this means that the order of these flags can change
* during any kernel release.
*
* You should have a pretty good reason to be extending these flags.
@@ -1286,11 +1323,19 @@ struct net_device_ops {
* @IFF_LIVE_ADDR_CHANGE: device supports hardware address
* change when it's running
* @IFF_MACVLAN: Macvlan device
+ * @IFF_XMIT_DST_RELEASE_PERM: IFF_XMIT_DST_RELEASE not taking into account
+ * underlying stacked devices
+ * @IFF_IPVLAN_MASTER: IPvlan master device
+ * @IFF_IPVLAN_SLAVE: IPvlan slave device
* @IFF_L3MDEV_MASTER: device is an L3 master device
* @IFF_NO_QUEUE: device can run without qdisc attached
* @IFF_OPENVSWITCH: device is a Open vSwitch master
* @IFF_L3MDEV_SLAVE: device is enslaved to an L3 master device
* @IFF_TEAM: device is a team device
+ * @IFF_RXFH_CONFIGURED: device has had Rx Flow indirection table configured
+ * @IFF_PHONY_HEADROOM: the headroom value is controlled by an external
+ * entity (i.e. the master device for bridged veth)
+ * @IFF_MACSEC: device is a MACsec device
*/
enum netdev_priv_flags {
IFF_802_1Q_VLAN = 1<<0,
@@ -1318,6 +1363,9 @@ enum netdev_priv_flags {
IFF_OPENVSWITCH = 1<<22,
IFF_L3MDEV_SLAVE = 1<<23,
IFF_TEAM = 1<<24,
+ IFF_RXFH_CONFIGURED = 1<<25,
+ IFF_PHONY_HEADROOM = 1<<26,
+ IFF_MACSEC = 1<<27,
};
#define IFF_802_1Q_VLAN IFF_802_1Q_VLAN
@@ -1345,6 +1393,8 @@ enum netdev_priv_flags {
#define IFF_OPENVSWITCH IFF_OPENVSWITCH
#define IFF_L3MDEV_SLAVE IFF_L3MDEV_SLAVE
#define IFF_TEAM IFF_TEAM
+#define IFF_RXFH_CONFIGURED IFF_RXFH_CONFIGURED
+#define IFF_MACSEC IFF_MACSEC
/**
* struct net_device - The DEVICE structure.
@@ -1367,10 +1417,12 @@ enum netdev_priv_flags {
*
* @state: Generic network queuing layer state, see netdev_state_t
* @dev_list: The global list of network devices
- * @napi_list: List entry, that is used for polling napi devices
- * @unreg_list: List entry, that is used, when we are unregistering the
- * device, see the function unregister_netdev
- * @close_list: List entry, that is used, when we are closing the device
+ * @napi_list: List entry used for polling NAPI devices
+ * @unreg_list: List entry when we are unregistering the
+ * device; see the function unregister_netdev
+ * @close_list: List entry used when we are closing the device
+ * @ptype_all: Device-specific packet handlers for all protocols
+ * @ptype_specific: Device-specific, protocol-specific packet handlers
*
* @adj_list: Directly linked devices, like slaves for bonding
* @all_adj_list: All linked devices, *including* neighbours
@@ -1388,7 +1440,7 @@ enum netdev_priv_flags {
* @mpls_features: Mask of features inheritable by MPLS
*
* @ifindex: interface index
- * @group: The group, that the device belongs to
+ * @group: The group the device belongs to
*
* @stats: Statistics struct, which was left as a legacy, use
* rtnl_link_stats64 instead
@@ -1397,6 +1449,8 @@ enum netdev_priv_flags {
* do not use this in drivers
* @tx_dropped: Dropped packets by core network,
* do not use this in drivers
+ * @rx_nohandler: nohandler dropped packets by core network on
+ * inactive devices, do not use this in drivers
*
* @wireless_handlers: List of functions to handle Wireless Extensions,
* instead of ioctl,
@@ -1420,8 +1474,7 @@ enum netdev_priv_flags {
* @dma: DMA channel
* @mtu: Interface MTU value
* @type: Interface hardware type
- * @hard_header_len: Hardware header length, which means that this is the
- * minimum size of a packet.
+ * @hard_header_len: Maximum hardware header length.
*
* @needed_headroom: Extra headroom the hardware may need, but not in all
* cases can this be guaranteed
@@ -1441,7 +1494,7 @@ enum netdev_priv_flags {
* @dev_port: Used to differentiate devices that share
* the same function
* @addr_list_lock: XXX: need comments on this one
- * @uc_promisc: Counter, that indicates, that promiscuous mode
+ * @uc_promisc: Counter that indicates promiscuous mode
* has been enabled due to the need to listen to
* additional unicast addresses in a device that
* does not implement ndo_set_rx_mode()
@@ -1449,9 +1502,9 @@ enum netdev_priv_flags {
* @mc: multicast mac addresses
* @dev_addrs: list of device hw addresses
* @queues_kset: Group of all Kobjects in the Tx and RX queues
- * @promiscuity: Number of times, the NIC is told to work in
- * Promiscuous mode, if it becomes 0 the NIC will
- * exit from working in Promiscuous mode
+ * @promiscuity: Number of times the NIC is told to work in
+ * promiscuous mode; if it becomes 0 the NIC will
+ * exit promiscuous mode
* @allmulti: Counter, enables or disables allmulticast mode
*
* @vlan_info: VLAN info
@@ -1497,7 +1550,7 @@ enum netdev_priv_flags {
*
* @trans_start: Time (in jiffies) of last Tx
* @watchdog_timeo: Represents the timeout that is used by
- * the watchdog ( see dev_watchdog() )
+ * the watchdog (see dev_watchdog())
* @watchdog_timer: List of timers
*
* @pcpu_refcnt: Number of references to this device
@@ -1611,10 +1664,11 @@ struct net_device {
atomic_long_t rx_dropped;
atomic_long_t tx_dropped;
+ atomic_long_t rx_nohandler;
#ifdef CONFIG_WIRELESS_EXT
- const struct iw_handler_def * wireless_handlers;
- struct iw_public_data * wireless_data;
+ const struct iw_handler_def *wireless_handlers;
+ struct iw_public_data *wireless_data;
#endif
const struct net_device_ops *netdev_ops;
const struct ethtool_ops *ethtool_ops;
@@ -1667,7 +1721,7 @@ struct net_device {
unsigned int allmulti;
- /* Protocol specific pointers */
+ /* Protocol-specific pointers */
#if IS_ENABLED(CONFIG_VLAN_8021Q)
struct vlan_info __rcu *vlan_info;
@@ -1697,13 +1751,11 @@ struct net_device {
/* Interface address info used in eth_type_trans() */
unsigned char *dev_addr;
-
#ifdef CONFIG_SYSFS
struct netdev_rx_queue *_rx;
unsigned int num_rx_queues;
unsigned int real_num_rx_queues;
-
#endif
unsigned long gro_flush_timeout;
@@ -1795,7 +1847,7 @@ struct net_device {
struct garp_port __rcu *garp_port;
struct mrp_port __rcu *mrp_port;
- struct device dev;
+ struct device dev;
const struct attribute_group *sysfs_groups[4];
const struct attribute_group *sysfs_rx_queue_group;
@@ -1810,9 +1862,9 @@ struct net_device {
#ifdef CONFIG_DCB
const struct dcbnl_rtnl_ops *dcbnl_ops;
#endif
- u8 num_tc;
- struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE];
- u8 prio_tc_map[TC_BITMASK + 1];
+ u8 num_tc;
+ struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE];
+ u8 prio_tc_map[TC_BITMASK + 1];
#if IS_ENABLED(CONFIG_FCOE)
unsigned int fcoe_ddp_xid;
@@ -1820,9 +1872,9 @@ struct net_device {
#if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
struct netprio_map __rcu *priomap;
#endif
- struct phy_device *phydev;
- struct lock_class_key *qdisc_tx_busylock;
- bool proto_down;
+ struct phy_device *phydev;
+ struct lock_class_key *qdisc_tx_busylock;
+ bool proto_down;
};
#define to_net_dev(d) container_of(d, struct net_device, dev)
@@ -1908,6 +1960,26 @@ struct netdev_queue *netdev_pick_tx(struct net_device *dev,
struct sk_buff *skb,
void *accel_priv);
+/* returns the headroom that the master device needs to take in account
+ * when forwarding to this dev
+ */
+static inline unsigned netdev_get_fwd_headroom(struct net_device *dev)
+{
+ return dev->priv_flags & IFF_PHONY_HEADROOM ? 0 : dev->needed_headroom;
+}
+
+static inline void netdev_set_rx_headroom(struct net_device *dev, int new_hr)
+{
+ if (dev->netdev_ops->ndo_set_rx_headroom)
+ dev->netdev_ops->ndo_set_rx_headroom(dev, new_hr);
+}
+
+/* set the device rx headroom to the dev's default */
+static inline void netdev_reset_rx_headroom(struct net_device *dev)
+{
+ netdev_set_rx_headroom(dev, -1);
+}
+
/*
* Net namespace inlines
*/
@@ -1950,7 +2022,7 @@ static inline void *netdev_priv(const struct net_device *dev)
/* Set the sysfs device type for the network logical device to allow
* fine-grained identification of different network device types. For
- * example Ethernet, Wirelss LAN, Bluetooth, WiMAX etc.
+ * example Ethernet, Wireless LAN, Bluetooth, WiMAX etc.
*/
#define SET_NETDEV_DEVTYPE(net, devtype) ((net)->dev.type = (devtype))
@@ -1960,22 +2032,22 @@ static inline void *netdev_priv(const struct net_device *dev)
#define NAPI_POLL_WEIGHT 64
/**
- * netif_napi_add - initialize a napi context
+ * netif_napi_add - initialize a NAPI context
* @dev: network device
- * @napi: napi context
+ * @napi: NAPI context
* @poll: polling function
* @weight: default weight
*
- * netif_napi_add() must be used to initialize a napi context prior to calling
- * *any* of the other napi related functions.
+ * netif_napi_add() must be used to initialize a NAPI context prior to calling
+ * *any* of the other NAPI-related functions.
*/
void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
int (*poll)(struct napi_struct *, int), int weight);
/**
- * netif_tx_napi_add - initialize a napi context
+ * netif_tx_napi_add - initialize a NAPI context
* @dev: network device
- * @napi: napi context
+ * @napi: NAPI context
* @poll: polling function
* @weight: default weight
*
@@ -1993,22 +2065,22 @@ static inline void netif_tx_napi_add(struct net_device *dev,
}
/**
- * netif_napi_del - remove a napi context
- * @napi: napi context
+ * netif_napi_del - remove a NAPI context
+ * @napi: NAPI context
*
- * netif_napi_del() removes a napi context from the network device napi list
+ * netif_napi_del() removes a NAPI context from the network device NAPI list
*/
void netif_napi_del(struct napi_struct *napi);
struct napi_gro_cb {
/* Virtual address of skb_shinfo(skb)->frags[0].page + offset. */
- void *frag0;
+ void *frag0;
/* Length of frag0. */
unsigned int frag0_len;
/* This indicates where we are processing relative to skb->data. */
- int data_offset;
+ int data_offset;
/* This is non-zero if the packet cannot be merged with the new skb. */
u16 flush;
@@ -2031,8 +2103,8 @@ struct napi_gro_cb {
/* This is non-zero if the packet may be of the same flow. */
u8 same_flow:1;
- /* Used in udp_gro_receive */
- u8 udp_mark:1;
+ /* Used in tunnel GRO receive */
+ u8 encap_mark:1;
/* GRO checksum is valid */
u8 csum_valid:1;
@@ -2104,7 +2176,7 @@ struct udp_offload {
struct udp_offload_callbacks callbacks;
};
-/* often modified stats are per cpu, other are shared (netdev->stats) */
+/* often modified stats are per-CPU, other are shared (netdev->stats) */
struct pcpu_sw_netstats {
u64 rx_packets;
u64 rx_bytes;
@@ -2201,7 +2273,7 @@ struct netdev_notifier_changeupper_info {
struct netdev_notifier_info info; /* must be first */
struct net_device *upper_dev; /* new upper dev */
bool master; /* is upper dev master */
- bool linking; /* is the nofication for link or unlink */
+ bool linking; /* is the notification for link or unlink */
void *upper_info; /* upper dev info */
};
@@ -2627,6 +2699,24 @@ static inline int dev_parse_header(const struct sk_buff *skb,
return dev->header_ops->parse(skb, haddr);
}
+/* ll_header must have at least hard_header_len allocated */
+static inline bool dev_validate_header(const struct net_device *dev,
+ char *ll_header, int len)
+{
+ if (likely(len >= dev->hard_header_len))
+ return true;
+
+ if (capable(CAP_SYS_RAWIO)) {
+ memset(ll_header + len, 0, dev->hard_header_len - len);
+ return true;
+ }
+
+ if (dev->header_ops && dev->header_ops->validate)
+ return dev->header_ops->validate(ll_header, len);
+
+ return false;
+}
+
typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len);
int register_gifconf(unsigned int family, gifconf_func_t *gifconf);
static inline int unregister_gifconf(unsigned int family)
@@ -2648,7 +2738,7 @@ extern int netdev_flow_limit_table_len;
#endif /* CONFIG_NET_FLOW_LIMIT */
/*
- * Incoming packets are placed on per-cpu queues
+ * Incoming packets are placed on per-CPU queues
*/
struct softnet_data {
struct list_head poll_list;
@@ -2818,7 +2908,7 @@ netif_xmit_frozen_or_drv_stopped(const struct netdev_queue *dev_queue)
* @dev_queue: pointer to transmit queue
*
* BQL enabled drivers might use this helper in their ndo_start_xmit(),
- * to give appropriate hint to the cpu.
+ * to give appropriate hint to the CPU.
*/
static inline void netdev_txq_bql_enqueue_prefetchw(struct netdev_queue *dev_queue)
{
@@ -2832,7 +2922,7 @@ static inline void netdev_txq_bql_enqueue_prefetchw(struct netdev_queue *dev_que
* @dev_queue: pointer to transmit queue
*
* BQL enabled drivers might use this helper in their TX completion path,
- * to give appropriate hint to the cpu.
+ * to give appropriate hint to the CPU.
*/
static inline void netdev_txq_bql_complete_prefetchw(struct netdev_queue *dev_queue)
{
@@ -2971,7 +3061,7 @@ static inline bool netif_running(const struct net_device *dev)
}
/*
- * Routines to manage the subqueues on a device. We only need start
+ * Routines to manage the subqueues on a device. We only need start,
* stop, and a check if it's stopped. All other device management is
* done at the overall netdevice level.
* Also test the device if we're multiqueue.
@@ -3255,7 +3345,6 @@ void netif_carrier_off(struct net_device *dev);
* in a "pending" state, waiting for some external event. For "on-
* demand" interfaces, this new state identifies the situation where the
* interface is waiting for events to place it in the up state.
- *
*/
static inline void netif_dormant_on(struct net_device *dev)
{
@@ -3590,7 +3679,7 @@ void dev_uc_init(struct net_device *dev);
*
* Add newly added addresses to the interface, and release
* addresses that have been deleted.
- **/
+ */
static inline int __dev_uc_sync(struct net_device *dev,
int (*sync)(struct net_device *,
const unsigned char *),
@@ -3606,7 +3695,7 @@ static inline int __dev_uc_sync(struct net_device *dev,
* @unsync: function to call if address should be removed
*
* Remove all addresses that were added to the device by dev_uc_sync().
- **/
+ */
static inline void __dev_uc_unsync(struct net_device *dev,
int (*unsync)(struct net_device *,
const unsigned char *))
@@ -3634,7 +3723,7 @@ void dev_mc_init(struct net_device *dev);
*
* Add newly added addresses to the interface, and release
* addresses that have been deleted.
- **/
+ */
static inline int __dev_mc_sync(struct net_device *dev,
int (*sync)(struct net_device *,
const unsigned char *),
@@ -3650,7 +3739,7 @@ static inline int __dev_mc_sync(struct net_device *dev,
* @unsync: function to call if address should be removed
*
* Remove all addresses that were added to the device by dev_mc_sync().
- **/
+ */
static inline void __dev_mc_unsync(struct net_device *dev,
int (*unsync)(struct net_device *,
const unsigned char *))
@@ -3741,7 +3830,7 @@ void netdev_lower_state_changed(struct net_device *lower_dev,
/* RSS keys are 40 or 52 bytes long */
#define NETDEV_RSS_KEY_LEN 52
-extern u8 netdev_rss_key[NETDEV_RSS_KEY_LEN];
+extern u8 netdev_rss_key[NETDEV_RSS_KEY_LEN] __read_mostly;
void netdev_rss_key_fill(void *buffer, size_t len);
int dev_get_nest_level(struct net_device *dev,
@@ -3965,6 +4054,11 @@ static inline void skb_gso_error_unwind(struct sk_buff *skb, __be16 protocol,
skb->mac_len = mac_len;
}
+static inline bool netif_is_macsec(const struct net_device *dev)
+{
+ return dev->priv_flags & IFF_MACSEC;
+}
+
static inline bool netif_is_macvlan(const struct net_device *dev)
{
return dev->priv_flags & IFF_MACVLAN;
@@ -4045,6 +4139,11 @@ static inline bool netif_is_lag_port(const struct net_device *dev)
return netif_is_bond_slave(dev) || netif_is_team_port(dev);
}
+static inline bool netif_is_rxfh_configured(const struct net_device *dev)
+{
+ return dev->priv_flags & IFF_RXFH_CONFIGURED;
+}
+
/* This device needs to keep skb dst for qdisc enqueue or ndo_start_xmit() */
static inline void netif_keep_dst(struct net_device *dev)
{
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index 0ad556726181..9230f9aee896 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -141,22 +141,6 @@ void nf_unregister_sockopt(struct nf_sockopt_ops *reg);
#ifdef HAVE_JUMP_LABEL
extern struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
-
-static inline bool nf_hook_list_active(struct list_head *hook_list,
- u_int8_t pf, unsigned int hook)
-{
- if (__builtin_constant_p(pf) &&
- __builtin_constant_p(hook))
- return static_key_false(&nf_hooks_needed[pf][hook]);
-
- return !list_empty(hook_list);
-}
-#else
-static inline bool nf_hook_list_active(struct list_head *hook_list,
- u_int8_t pf, unsigned int hook)
-{
- return !list_empty(hook_list);
-}
#endif
int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state);
@@ -177,9 +161,18 @@ static inline int nf_hook_thresh(u_int8_t pf, unsigned int hook,
int (*okfn)(struct net *, struct sock *, struct sk_buff *),
int thresh)
{
- struct list_head *hook_list = &net->nf.hooks[pf][hook];
+ struct list_head *hook_list;
+
+#ifdef HAVE_JUMP_LABEL
+ if (__builtin_constant_p(pf) &&
+ __builtin_constant_p(hook) &&
+ !static_key_false(&nf_hooks_needed[pf][hook]))
+ return 1;
+#endif
+
+ hook_list = &net->nf.hooks[pf][hook];
- if (nf_hook_list_active(hook_list, pf, hook)) {
+ if (!list_empty(hook_list)) {
struct nf_hook_state state;
nf_hook_state_init(&state, hook_list, hook, thresh,
diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
index ba0d9789eb6e..1d82dd5e9a08 100644
--- a/include/linux/netfilter/nfnetlink.h
+++ b/include/linux/netfilter/nfnetlink.h
@@ -34,8 +34,6 @@ int nfnetlink_subsys_register(const struct nfnetlink_subsystem *n);
int nfnetlink_subsys_unregister(const struct nfnetlink_subsystem *n);
int nfnetlink_has_listeners(struct net *net, unsigned int group);
-struct sk_buff *nfnetlink_alloc_skb(struct net *net, unsigned int size,
- u32 dst_portid, gfp_t gfp_mask);
int nfnetlink_send(struct sk_buff *skb, struct net *net, u32 portid,
unsigned int group, int echo, gfp_t flags);
int nfnetlink_set_err(struct net *net, u32 portid, u32 group, int error);
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
index c5577410c25d..80a305b85323 100644
--- a/include/linux/netfilter/x_tables.h
+++ b/include/linux/netfilter/x_tables.h
@@ -200,6 +200,9 @@ struct xt_table {
u_int8_t af; /* address/protocol family */
int priority; /* hook order */
+ /* called when table is needed in the given netns */
+ int (*table_init)(struct net *net);
+
/* A unique name... */
const char name[XT_TABLE_MAXNAMELEN];
};
@@ -408,8 +411,7 @@ xt_get_per_cpu_counter(struct xt_counters *cnt, unsigned int cpu)
return cnt;
}
-struct nf_hook_ops *xt_hook_link(const struct xt_table *, nf_hookfn *);
-void xt_hook_unlink(const struct xt_table *, struct nf_hook_ops *);
+struct nf_hook_ops *xt_hook_ops_alloc(const struct xt_table *, nf_hookfn *);
#ifdef CONFIG_COMPAT
#include <net/compat.h>
diff --git a/include/linux/netfilter_arp/arp_tables.h b/include/linux/netfilter_arp/arp_tables.h
index 6f074db2f23d..029b95e8924e 100644
--- a/include/linux/netfilter_arp/arp_tables.h
+++ b/include/linux/netfilter_arp/arp_tables.h
@@ -48,10 +48,11 @@ struct arpt_error {
}
extern void *arpt_alloc_initial_table(const struct xt_table *);
-extern struct xt_table *arpt_register_table(struct net *net,
- const struct xt_table *table,
- const struct arpt_replace *repl);
-extern void arpt_unregister_table(struct xt_table *table);
+int arpt_register_table(struct net *net, const struct xt_table *table,
+ const struct arpt_replace *repl,
+ const struct nf_hook_ops *ops, struct xt_table **res);
+void arpt_unregister_table(struct net *net, struct xt_table *table,
+ const struct nf_hook_ops *ops);
extern unsigned int arpt_do_table(struct sk_buff *skb,
const struct nf_hook_state *state,
struct xt_table *table);
diff --git a/include/linux/netfilter_ipv4/ip_tables.h b/include/linux/netfilter_ipv4/ip_tables.h
index aa598f942c01..7bfc5893ec31 100644
--- a/include/linux/netfilter_ipv4/ip_tables.h
+++ b/include/linux/netfilter_ipv4/ip_tables.h
@@ -24,10 +24,11 @@
extern void ipt_init(void) __init;
-extern struct xt_table *ipt_register_table(struct net *net,
- const struct xt_table *table,
- const struct ipt_replace *repl);
-extern void ipt_unregister_table(struct net *net, struct xt_table *table);
+int ipt_register_table(struct net *net, const struct xt_table *table,
+ const struct ipt_replace *repl,
+ const struct nf_hook_ops *ops, struct xt_table **res);
+void ipt_unregister_table(struct net *net, struct xt_table *table,
+ const struct nf_hook_ops *ops);
/* Standard entry. */
struct ipt_standard {
diff --git a/include/linux/netfilter_ipv6/ip6_tables.h b/include/linux/netfilter_ipv6/ip6_tables.h
index 0f76e5c674f9..b21c392d6012 100644
--- a/include/linux/netfilter_ipv6/ip6_tables.h
+++ b/include/linux/netfilter_ipv6/ip6_tables.h
@@ -25,10 +25,11 @@
extern void ip6t_init(void) __init;
extern void *ip6t_alloc_initial_table(const struct xt_table *);
-extern struct xt_table *ip6t_register_table(struct net *net,
- const struct xt_table *table,
- const struct ip6t_replace *repl);
-extern void ip6t_unregister_table(struct net *net, struct xt_table *table);
+int ip6t_register_table(struct net *net, const struct xt_table *table,
+ const struct ip6t_replace *repl,
+ const struct nf_hook_ops *ops, struct xt_table **res);
+void ip6t_unregister_table(struct net *net, struct xt_table *table,
+ const struct nf_hook_ops *ops);
extern unsigned int ip6t_do_table(struct sk_buff *skb,
const struct nf_hook_state *state,
struct xt_table *table);
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index 0b41959aab9f..da14ab61f363 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -69,16 +69,6 @@ extern void __netlink_clear_multicast_users(struct sock *sk, unsigned int group)
extern void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err);
extern int netlink_has_listeners(struct sock *sk, unsigned int group);
-extern struct sk_buff *__netlink_alloc_skb(struct sock *ssk, unsigned int size,
- unsigned int ldiff, u32 dst_portid,
- gfp_t gfp_mask);
-static inline struct sk_buff *
-netlink_alloc_skb(struct sock *ssk, unsigned int size, u32 dst_portid,
- gfp_t gfp_mask)
-{
- return __netlink_alloc_skb(ssk, size, 0, dst_portid, gfp_mask);
-}
-
extern int netlink_unicast(struct sock *ssk, struct sk_buff *skb, __u32 portid, int nonblock);
extern int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, __u32 portid,
__u32 group, gfp_t allocation);
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index d6f9b4e6006d..011433478a14 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -529,6 +529,7 @@ enum pnfs_layouttype {
LAYOUT_OSD2_OBJECTS = 2,
LAYOUT_BLOCK_VOLUME = 3,
LAYOUT_FLEX_FILES = 4,
+ LAYOUT_SCSI = 5,
LAYOUT_TYPE_MAX
};
@@ -555,6 +556,7 @@ enum pnfs_block_volume_type {
PNFS_BLOCK_VOLUME_SLICE = 1,
PNFS_BLOCK_VOLUME_CONCAT = 2,
PNFS_BLOCK_VOLUME_STRIPE = 3,
+ PNFS_BLOCK_VOLUME_SCSI = 4,
};
enum pnfs_block_extent_state {
@@ -568,6 +570,23 @@ enum pnfs_block_extent_state {
#define PNFS_BLOCK_EXTENT_SIZE \
(7 * sizeof(__be32) + NFS4_DEVICEID4_SIZE)
+/* on the wire size of a scsi commit range */
+#define PNFS_SCSI_RANGE_SIZE \
+ (4 * sizeof(__be32))
+
+enum scsi_code_set {
+ PS_CODE_SET_BINARY = 1,
+ PS_CODE_SET_ASCII = 2,
+ PS_CODE_SET_UTF8 = 3
+};
+
+enum scsi_designator_type {
+ PS_DESIGNATOR_T10 = 1,
+ PS_DESIGNATOR_EUI64 = 2,
+ PS_DESIGNATOR_NAA = 3,
+ PS_DESIGNATOR_NAME = 8
+};
+
#define NFL4_UFLG_MASK 0x0000003F
#define NFL4_UFLG_DENSE 0x00000001
#define NFL4_UFLG_COMMIT_THRU_MDS 0x00000002
diff --git a/include/linux/nmi.h b/include/linux/nmi.h
index 7ec5b86735f3..4630eeae18e0 100644
--- a/include/linux/nmi.h
+++ b/include/linux/nmi.h
@@ -65,7 +65,6 @@ static inline bool trigger_allbutself_cpu_backtrace(void)
#endif
#ifdef CONFIG_LOCKUP_DETECTOR
-int hw_nmi_is_cpu_stuck(struct pt_regs *);
u64 hw_nmi_get_sample_period(int watchdog_thresh);
extern int nmi_watchdog_enabled;
extern int soft_watchdog_enabled;
diff --git a/include/linux/nsproxy.h b/include/linux/nsproxy.h
index 35fa08fd7739..ac0d65bef5d0 100644
--- a/include/linux/nsproxy.h
+++ b/include/linux/nsproxy.h
@@ -8,6 +8,7 @@ struct mnt_namespace;
struct uts_namespace;
struct ipc_namespace;
struct pid_namespace;
+struct cgroup_namespace;
struct fs_struct;
/*
@@ -33,6 +34,7 @@ struct nsproxy {
struct mnt_namespace *mnt_ns;
struct pid_namespace *pid_ns_for_children;
struct net *net_ns;
+ struct cgroup_namespace *cgroup_ns;
};
extern struct nsproxy init_nsproxy;
diff --git a/include/linux/of.h b/include/linux/of.h
index dc6e39696b64..7fcb681baadf 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -296,13 +296,13 @@ extern int of_property_read_u64_array(const struct device_node *np,
u64 *out_values,
size_t sz);
-extern int of_property_read_string(struct device_node *np,
+extern int of_property_read_string(const struct device_node *np,
const char *propname,
const char **out_string);
-extern int of_property_match_string(struct device_node *np,
+extern int of_property_match_string(const struct device_node *np,
const char *propname,
const char *string);
-extern int of_property_read_string_helper(struct device_node *np,
+extern int of_property_read_string_helper(const struct device_node *np,
const char *propname,
const char **out_strs, size_t sz, int index);
extern int of_device_is_compatible(const struct device_node *device,
@@ -538,14 +538,14 @@ static inline int of_property_read_u64_array(const struct device_node *np,
return -ENOSYS;
}
-static inline int of_property_read_string(struct device_node *np,
+static inline int of_property_read_string(const struct device_node *np,
const char *propname,
const char **out_string)
{
return -ENOSYS;
}
-static inline int of_property_read_string_helper(struct device_node *np,
+static inline int of_property_read_string_helper(const struct device_node *np,
const char *propname,
const char **out_strs, size_t sz, int index)
{
@@ -571,7 +571,7 @@ static inline int of_property_read_u64(const struct device_node *np,
return -ENOSYS;
}
-static inline int of_property_match_string(struct device_node *np,
+static inline int of_property_match_string(const struct device_node *np,
const char *propname,
const char *string)
{
@@ -773,7 +773,7 @@ static inline int of_property_count_u64_elems(const struct device_node *np,
*
* If @out_strs is NULL, the number of strings in the property is returned.
*/
-static inline int of_property_read_string_array(struct device_node *np,
+static inline int of_property_read_string_array(const struct device_node *np,
const char *propname, const char **out_strs,
size_t sz)
{
@@ -792,7 +792,7 @@ static inline int of_property_read_string_array(struct device_node *np,
* does not have a value, and -EILSEQ if the string is not null-terminated
* within the length of the property data.
*/
-static inline int of_property_count_strings(struct device_node *np,
+static inline int of_property_count_strings(const struct device_node *np,
const char *propname)
{
return of_property_read_string_helper(np, propname, NULL, 0, 0);
@@ -816,7 +816,7 @@ static inline int of_property_count_strings(struct device_node *np,
*
* The out_string pointer is modified only if a valid string can be decoded.
*/
-static inline int of_property_read_string_index(struct device_node *np,
+static inline int of_property_read_string_index(const struct device_node *np,
const char *propname,
int index, const char **output)
{
diff --git a/include/linux/omap-gpmc.h b/include/linux/omap-gpmc.h
index 7dee00143afd..d833eb4dd446 100644
--- a/include/linux/omap-gpmc.h
+++ b/include/linux/omap-gpmc.h
@@ -51,6 +51,9 @@ struct gpmc_timings {
u32 adv_on; /* Assertion time */
u32 adv_rd_off; /* Read deassertion time */
u32 adv_wr_off; /* Write deassertion time */
+ u32 adv_aad_mux_on; /* ADV assertion time for AAD */
+ u32 adv_aad_mux_rd_off; /* ADV read deassertion time for AAD */
+ u32 adv_aad_mux_wr_off; /* ADV write deassertion time for AAD */
/* WE signals timings corresponding to GPMC_CONFIG4 */
u32 we_on; /* WE assertion time */
@@ -59,6 +62,8 @@ struct gpmc_timings {
/* OE signals timings corresponding to GPMC_CONFIG4 */
u32 oe_on; /* OE assertion time */
u32 oe_off; /* OE deassertion time */
+ u32 oe_aad_mux_on; /* OE assertion time for AAD */
+ u32 oe_aad_mux_off; /* OE deassertion time for AAD */
/* Access time and cycle time timings corresponding to GPMC_CONFIG5 */
u32 page_burst_access; /* Multiple access word delay */
diff --git a/include/linux/page-flags-layout.h b/include/linux/page-flags-layout.h
index da523661500a..77b078c103b2 100644
--- a/include/linux/page-flags-layout.h
+++ b/include/linux/page-flags-layout.h
@@ -17,6 +17,8 @@
#define ZONES_SHIFT 1
#elif MAX_NR_ZONES <= 4
#define ZONES_SHIFT 2
+#elif MAX_NR_ZONES <= 8
+#define ZONES_SHIFT 3
#else
#error ZONES_SHIFT -- too many zones configured adjust calculation
#endif
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 19724e6ebd26..f4ed4f1b0c77 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -144,12 +144,12 @@ static inline struct page *compound_head(struct page *page)
return page;
}
-static inline int PageTail(struct page *page)
+static __always_inline int PageTail(struct page *page)
{
return READ_ONCE(page->compound_head) & 1;
}
-static inline int PageCompound(struct page *page)
+static __always_inline int PageCompound(struct page *page)
{
return test_bit(PG_head, &page->flags) || PageTail(page);
}
@@ -184,31 +184,31 @@ static inline int PageCompound(struct page *page)
* Macros to create function definitions for page flags
*/
#define TESTPAGEFLAG(uname, lname, policy) \
-static inline int Page##uname(struct page *page) \
+static __always_inline int Page##uname(struct page *page) \
{ return test_bit(PG_##lname, &policy(page, 0)->flags); }
#define SETPAGEFLAG(uname, lname, policy) \
-static inline void SetPage##uname(struct page *page) \
+static __always_inline void SetPage##uname(struct page *page) \
{ set_bit(PG_##lname, &policy(page, 1)->flags); }
#define CLEARPAGEFLAG(uname, lname, policy) \
-static inline void ClearPage##uname(struct page *page) \
+static __always_inline void ClearPage##uname(struct page *page) \
{ clear_bit(PG_##lname, &policy(page, 1)->flags); }
#define __SETPAGEFLAG(uname, lname, policy) \
-static inline void __SetPage##uname(struct page *page) \
+static __always_inline void __SetPage##uname(struct page *page) \
{ __set_bit(PG_##lname, &policy(page, 1)->flags); }
#define __CLEARPAGEFLAG(uname, lname, policy) \
-static inline void __ClearPage##uname(struct page *page) \
+static __always_inline void __ClearPage##uname(struct page *page) \
{ __clear_bit(PG_##lname, &policy(page, 1)->flags); }
#define TESTSETFLAG(uname, lname, policy) \
-static inline int TestSetPage##uname(struct page *page) \
+static __always_inline int TestSetPage##uname(struct page *page) \
{ return test_and_set_bit(PG_##lname, &policy(page, 1)->flags); }
#define TESTCLEARFLAG(uname, lname, policy) \
-static inline int TestClearPage##uname(struct page *page) \
+static __always_inline int TestClearPage##uname(struct page *page) \
{ return test_and_clear_bit(PG_##lname, &policy(page, 1)->flags); }
#define PAGEFLAG(uname, lname, policy) \
@@ -371,7 +371,7 @@ PAGEFLAG(Idle, idle, PF_ANY)
#define PAGE_MAPPING_KSM 2
#define PAGE_MAPPING_FLAGS (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM)
-static inline int PageAnon(struct page *page)
+static __always_inline int PageAnon(struct page *page)
{
page = compound_head(page);
return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0;
@@ -384,7 +384,7 @@ static inline int PageAnon(struct page *page)
* is found in VM_MERGEABLE vmas. It's a PageAnon page, pointing not to any
* anon_vma, but to that page's node of the stable tree.
*/
-static inline int PageKsm(struct page *page)
+static __always_inline int PageKsm(struct page *page)
{
page = compound_head(page);
return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
@@ -415,14 +415,14 @@ static inline int PageUptodate(struct page *page)
return ret;
}
-static inline void __SetPageUptodate(struct page *page)
+static __always_inline void __SetPageUptodate(struct page *page)
{
VM_BUG_ON_PAGE(PageTail(page), page);
smp_wmb();
__set_bit(PG_uptodate, &page->flags);
}
-static inline void SetPageUptodate(struct page *page)
+static __always_inline void SetPageUptodate(struct page *page)
{
VM_BUG_ON_PAGE(PageTail(page), page);
/*
@@ -456,12 +456,12 @@ static inline void set_page_writeback_keepwrite(struct page *page)
__PAGEFLAG(Head, head, PF_ANY) CLEARPAGEFLAG(Head, head, PF_ANY)
-static inline void set_compound_head(struct page *page, struct page *head)
+static __always_inline void set_compound_head(struct page *page, struct page *head)
{
WRITE_ONCE(page->compound_head, (unsigned long)head + 1);
}
-static inline void clear_compound_head(struct page *page)
+static __always_inline void clear_compound_head(struct page *page)
{
WRITE_ONCE(page->compound_head, 0);
}
@@ -593,6 +593,8 @@ static inline void __ClearPageBuddy(struct page *page)
atomic_set(&page->_mapcount, -1);
}
+extern bool is_free_buddy_page(struct page *page);
+
#define PAGE_BALLOON_MAPCOUNT_VALUE (-256)
static inline int PageBalloon(struct page *page)
diff --git a/include/linux/page_ref.h b/include/linux/page_ref.h
new file mode 100644
index 000000000000..e596d5d9540e
--- /dev/null
+++ b/include/linux/page_ref.h
@@ -0,0 +1,173 @@
+#ifndef _LINUX_PAGE_REF_H
+#define _LINUX_PAGE_REF_H
+
+#include <linux/atomic.h>
+#include <linux/mm_types.h>
+#include <linux/page-flags.h>
+#include <linux/tracepoint-defs.h>
+
+extern struct tracepoint __tracepoint_page_ref_set;
+extern struct tracepoint __tracepoint_page_ref_mod;
+extern struct tracepoint __tracepoint_page_ref_mod_and_test;
+extern struct tracepoint __tracepoint_page_ref_mod_and_return;
+extern struct tracepoint __tracepoint_page_ref_mod_unless;
+extern struct tracepoint __tracepoint_page_ref_freeze;
+extern struct tracepoint __tracepoint_page_ref_unfreeze;
+
+#ifdef CONFIG_DEBUG_PAGE_REF
+
+/*
+ * Ideally we would want to use the trace_<tracepoint>_enabled() helper
+ * functions. But due to include header file issues, that is not
+ * feasible. Instead we have to open code the static key functions.
+ *
+ * See trace_##name##_enabled(void) in include/linux/tracepoint.h
+ */
+#define page_ref_tracepoint_active(t) static_key_false(&(t).key)
+
+extern void __page_ref_set(struct page *page, int v);
+extern void __page_ref_mod(struct page *page, int v);
+extern void __page_ref_mod_and_test(struct page *page, int v, int ret);
+extern void __page_ref_mod_and_return(struct page *page, int v, int ret);
+extern void __page_ref_mod_unless(struct page *page, int v, int u);
+extern void __page_ref_freeze(struct page *page, int v, int ret);
+extern void __page_ref_unfreeze(struct page *page, int v);
+
+#else
+
+#define page_ref_tracepoint_active(t) false
+
+static inline void __page_ref_set(struct page *page, int v)
+{
+}
+static inline void __page_ref_mod(struct page *page, int v)
+{
+}
+static inline void __page_ref_mod_and_test(struct page *page, int v, int ret)
+{
+}
+static inline void __page_ref_mod_and_return(struct page *page, int v, int ret)
+{
+}
+static inline void __page_ref_mod_unless(struct page *page, int v, int u)
+{
+}
+static inline void __page_ref_freeze(struct page *page, int v, int ret)
+{
+}
+static inline void __page_ref_unfreeze(struct page *page, int v)
+{
+}
+
+#endif
+
+static inline int page_ref_count(struct page *page)
+{
+ return atomic_read(&page->_count);
+}
+
+static inline int page_count(struct page *page)
+{
+ return atomic_read(&compound_head(page)->_count);
+}
+
+static inline void set_page_count(struct page *page, int v)
+{
+ atomic_set(&page->_count, v);
+ if (page_ref_tracepoint_active(__tracepoint_page_ref_set))
+ __page_ref_set(page, v);
+}
+
+/*
+ * Setup the page count before being freed into the page allocator for
+ * the first time (boot or memory hotplug)
+ */
+static inline void init_page_count(struct page *page)
+{
+ set_page_count(page, 1);
+}
+
+static inline void page_ref_add(struct page *page, int nr)
+{
+ atomic_add(nr, &page->_count);
+ if (page_ref_tracepoint_active(__tracepoint_page_ref_mod))
+ __page_ref_mod(page, nr);
+}
+
+static inline void page_ref_sub(struct page *page, int nr)
+{
+ atomic_sub(nr, &page->_count);
+ if (page_ref_tracepoint_active(__tracepoint_page_ref_mod))
+ __page_ref_mod(page, -nr);
+}
+
+static inline void page_ref_inc(struct page *page)
+{
+ atomic_inc(&page->_count);
+ if (page_ref_tracepoint_active(__tracepoint_page_ref_mod))
+ __page_ref_mod(page, 1);
+}
+
+static inline void page_ref_dec(struct page *page)
+{
+ atomic_dec(&page->_count);
+ if (page_ref_tracepoint_active(__tracepoint_page_ref_mod))
+ __page_ref_mod(page, -1);
+}
+
+static inline int page_ref_sub_and_test(struct page *page, int nr)
+{
+ int ret = atomic_sub_and_test(nr, &page->_count);
+
+ if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_test))
+ __page_ref_mod_and_test(page, -nr, ret);
+ return ret;
+}
+
+static inline int page_ref_dec_and_test(struct page *page)
+{
+ int ret = atomic_dec_and_test(&page->_count);
+
+ if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_test))
+ __page_ref_mod_and_test(page, -1, ret);
+ return ret;
+}
+
+static inline int page_ref_dec_return(struct page *page)
+{
+ int ret = atomic_dec_return(&page->_count);
+
+ if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_return))
+ __page_ref_mod_and_return(page, -1, ret);
+ return ret;
+}
+
+static inline int page_ref_add_unless(struct page *page, int nr, int u)
+{
+ int ret = atomic_add_unless(&page->_count, nr, u);
+
+ if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_unless))
+ __page_ref_mod_unless(page, nr, ret);
+ return ret;
+}
+
+static inline int page_ref_freeze(struct page *page, int count)
+{
+ int ret = likely(atomic_cmpxchg(&page->_count, count, 0) == count);
+
+ if (page_ref_tracepoint_active(__tracepoint_page_ref_freeze))
+ __page_ref_freeze(page, count, ret);
+ return ret;
+}
+
+static inline void page_ref_unfreeze(struct page *page, int count)
+{
+ VM_BUG_ON_PAGE(page_count(page) != 0, page);
+ VM_BUG_ON(count == 0);
+
+ atomic_set(&page->_count, count);
+ if (page_ref_tracepoint_active(__tracepoint_page_ref_unfreeze))
+ __page_ref_unfreeze(page, count);
+}
+
+#endif
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 183b15ea052b..1ebd65c91422 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -165,7 +165,7 @@ static inline int page_cache_get_speculative(struct page *page)
* SMP requires.
*/
VM_BUG_ON_PAGE(page_count(page) == 0, page);
- atomic_inc(&page->_count);
+ page_ref_inc(page);
#else
if (unlikely(!get_page_unless_zero(page))) {
@@ -194,10 +194,10 @@ static inline int page_cache_add_speculative(struct page *page, int count)
VM_BUG_ON(!in_atomic());
# endif
VM_BUG_ON_PAGE(page_count(page) == 0, page);
- atomic_add(count, &page->_count);
+ page_ref_add(page, count);
#else
- if (unlikely(!atomic_add_unless(&page->_count, count, 0)))
+ if (unlikely(!page_ref_add_unless(page, count, 0)))
return 0;
#endif
VM_BUG_ON_PAGE(PageCompound(page) && page != compound_head(page), page);
@@ -205,19 +205,6 @@ static inline int page_cache_add_speculative(struct page *page, int count)
return 1;
}
-static inline int page_freeze_refs(struct page *page, int count)
-{
- return likely(atomic_cmpxchg(&page->_count, count, 0) == count);
-}
-
-static inline void page_unfreeze_refs(struct page *page, int count)
-{
- VM_BUG_ON_PAGE(page_count(page) != 0, page);
- VM_BUG_ON(count == 0);
-
- atomic_set(&page->_count, count);
-}
-
#ifdef CONFIG_NUMA
extern struct page *__page_cache_alloc(gfp_t gfp);
#else
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 17e17c2ee1e6..004b8133417d 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -784,6 +784,7 @@ extern struct list_head pci_root_buses; /* list of all known PCI buses */
int no_pci_devices(void);
void pcibios_resource_survey_bus(struct pci_bus *bus);
+void pcibios_bus_add_device(struct pci_dev *pdev);
void pcibios_add_bus(struct pci_bus *bus);
void pcibios_remove_bus(struct pci_bus *bus);
void pcibios_fixup_bus(struct pci_bus *);
@@ -1733,6 +1734,8 @@ int pci_iov_virtfn_devfn(struct pci_dev *dev, int id);
int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn);
void pci_disable_sriov(struct pci_dev *dev);
+int pci_iov_add_virtfn(struct pci_dev *dev, int id, int reset);
+void pci_iov_remove_virtfn(struct pci_dev *dev, int id, int reset);
int pci_num_vf(struct pci_dev *dev);
int pci_vfs_assigned(struct pci_dev *dev);
int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs);
@@ -1749,6 +1752,12 @@ static inline int pci_iov_virtfn_devfn(struct pci_dev *dev, int id)
}
static inline int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn)
{ return -ENODEV; }
+static inline int pci_iov_add_virtfn(struct pci_dev *dev, int id, int reset)
+{
+ return -ENOSYS;
+}
+static inline void pci_iov_remove_virtfn(struct pci_dev *dev,
+ int id, int reset) { }
static inline void pci_disable_sriov(struct pci_dev *dev) { }
static inline int pci_num_vf(struct pci_dev *dev) { return 0; }
static inline int pci_vfs_assigned(struct pci_dev *dev)
diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h
index 83b5e34c6580..4196c90a3c88 100644
--- a/include/linux/perf/arm_pmu.h
+++ b/include/linux/perf/arm_pmu.h
@@ -104,9 +104,11 @@ struct arm_pmu {
atomic_t active_events;
struct mutex reserve_mutex;
u64 max_period;
+ bool secure_access; /* 32-bit ARM only */
struct platform_device *plat_device;
struct pmu_hw_events __percpu *hw_events;
struct notifier_block hotplug_nb;
+ struct notifier_block cpu_pm_nb;
};
#define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu))
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 79ec7bbf0155..f291275ffd71 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -121,6 +121,7 @@ struct hw_perf_event {
struct { /* intel_cqm */
int cqm_state;
u32 cqm_rmid;
+ int is_group_event;
struct list_head cqm_events_entry;
struct list_head cqm_groups_entry;
struct list_head cqm_group_entry;
@@ -128,6 +129,10 @@ struct hw_perf_event {
struct { /* itrace */
int itrace_started;
};
+ struct { /* amd_power */
+ u64 pwr_acc;
+ u64 ptsc;
+ };
#ifdef CONFIG_HAVE_HW_BREAKPOINT
struct { /* breakpoint */
/*
@@ -966,11 +971,20 @@ DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry);
extern void perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs);
extern void perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs);
+extern struct perf_callchain_entry *
+get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user,
+ bool crosstask, bool add_mark);
+extern int get_callchain_buffers(void);
+extern void put_callchain_buffers(void);
-static inline void perf_callchain_store(struct perf_callchain_entry *entry, u64 ip)
+static inline int perf_callchain_store(struct perf_callchain_entry *entry, u64 ip)
{
- if (entry->nr < PERF_MAX_STACK_DEPTH)
+ if (entry->nr < PERF_MAX_STACK_DEPTH) {
entry->ip[entry->nr++] = ip;
+ return 0;
+ } else {
+ return -1; /* no more room, stop walking the stack */
+ }
}
extern int sysctl_perf_event_paranoid;
diff --git a/include/linux/phy.h b/include/linux/phy.h
index d6f3641e7933..2abd7918f64f 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -327,8 +327,6 @@ struct phy_c45_device_ids {
/* phy_device: An instance of a PHY
*
* drv: Pointer to the driver for this PHY instance
- * bus: Pointer to the bus this PHY is on
- * dev: driver model device structure for this PHY
* phy_id: UID for this device found during discovery
* c45_ids: 802.3-c45 Device Identifers if is_c45.
* is_c45: Set to true if this phy uses clause 45 addressing.
@@ -338,7 +336,6 @@ struct phy_c45_device_ids {
* suspended: Set to true if this phy has been suspended successfully.
* state: state of the PHY for management purposes
* dev_flags: Device-specific flags used by the PHY driver.
- * addr: Bus address of PHY
* link_timeout: The number of timer firings to wait before the
* giving up on the current attempt at acquiring a link
* irq: IRQ number of the PHY's interrupt (-1 if none)
diff --git a/include/linux/phy_fixed.h b/include/linux/phy_fixed.h
index 2400d2ea4f34..1d41ec44e39d 100644
--- a/include/linux/phy_fixed.h
+++ b/include/linux/phy_fixed.h
@@ -19,7 +19,7 @@ extern struct phy_device *fixed_phy_register(unsigned int irq,
struct fixed_phy_status *status,
int link_gpio,
struct device_node *np);
-extern void fixed_phy_del(int phy_addr);
+extern void fixed_phy_unregister(struct phy_device *phydev);
extern int fixed_phy_set_link_update(struct phy_device *phydev,
int (*link_update)(struct net_device *,
struct fixed_phy_status *));
@@ -40,9 +40,8 @@ static inline struct phy_device *fixed_phy_register(unsigned int irq,
{
return ERR_PTR(-ENODEV);
}
-static inline int fixed_phy_del(int phy_addr)
+static inline void fixed_phy_unregister(struct phy_device *phydev)
{
- return -ENODEV;
}
static inline int fixed_phy_set_link_update(struct phy_device *phydev,
int (*link_update)(struct net_device *,
diff --git a/include/linux/pkeys.h b/include/linux/pkeys.h
new file mode 100644
index 000000000000..1d405a2b7272
--- /dev/null
+++ b/include/linux/pkeys.h
@@ -0,0 +1,33 @@
+#ifndef _LINUX_PKEYS_H
+#define _LINUX_PKEYS_H
+
+#include <linux/mm_types.h>
+#include <asm/mmu_context.h>
+
+#define PKEY_DISABLE_ACCESS 0x1
+#define PKEY_DISABLE_WRITE 0x2
+#define PKEY_ACCESS_MASK (PKEY_DISABLE_ACCESS |\
+ PKEY_DISABLE_WRITE)
+
+#ifdef CONFIG_ARCH_HAS_PKEYS
+#include <asm/pkeys.h>
+#else /* ! CONFIG_ARCH_HAS_PKEYS */
+#define arch_max_pkey() (1)
+#define execute_only_pkey(mm) (0)
+#define arch_override_mprotect_pkey(vma, prot, pkey) (0)
+#define PKEY_DEDICATED_EXECUTE_ONLY 0
+#endif /* ! CONFIG_ARCH_HAS_PKEYS */
+
+/*
+ * This is called from mprotect_pkey().
+ *
+ * Returns true if the protection keys is valid.
+ */
+static inline bool validate_pkey(int pkey)
+{
+ if (pkey < 0)
+ return false;
+ return (pkey < arch_max_pkey());
+}
+
+#endif /* _LINUX_PKEYS_H */
diff --git a/include/linux/platform_data/brcmfmac-sdio.h b/include/linux/platform_data/brcmfmac-sdio.h
deleted file mode 100644
index e75dcbf2b230..000000000000
--- a/include/linux/platform_data/brcmfmac-sdio.h
+++ /dev/null
@@ -1,135 +0,0 @@
-/*
- * Copyright (c) 2013 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef _LINUX_BRCMFMAC_PLATFORM_H
-#define _LINUX_BRCMFMAC_PLATFORM_H
-
-/*
- * Platform specific driver functions and data. Through the platform specific
- * device data functions can be provided to help the brcmfmac driver to
- * operate with the device in combination with the used platform.
- *
- * Use the platform data in the following (similar) way:
- *
- *
-#include <brcmfmac_platform.h>
-
-
-static void brcmfmac_power_on(void)
-{
-}
-
-static void brcmfmac_power_off(void)
-{
-}
-
-static void brcmfmac_reset(void)
-{
-}
-
-static struct brcmfmac_sdio_platform_data brcmfmac_sdio_pdata = {
- .power_on = brcmfmac_power_on,
- .power_off = brcmfmac_power_off,
- .reset = brcmfmac_reset
-};
-
-static struct platform_device brcmfmac_device = {
- .name = BRCMFMAC_SDIO_PDATA_NAME,
- .id = PLATFORM_DEVID_NONE,
- .dev.platform_data = &brcmfmac_sdio_pdata
-};
-
-void __init brcmfmac_init_pdata(void)
-{
- brcmfmac_sdio_pdata.oob_irq_supported = true;
- brcmfmac_sdio_pdata.oob_irq_nr = gpio_to_irq(GPIO_BRCMF_SDIO_OOB);
- brcmfmac_sdio_pdata.oob_irq_flags = IORESOURCE_IRQ |
- IORESOURCE_IRQ_HIGHLEVEL;
- platform_device_register(&brcmfmac_device);
-}
- *
- *
- * Note: the brcmfmac can be loaded as module or be statically built-in into
- * the kernel. If built-in then do note that it uses module_init (and
- * module_exit) routines which equal device_initcall. So if you intend to
- * create a module with the platform specific data for the brcmfmac and have
- * it built-in to the kernel then use a higher initcall then device_initcall
- * (see init.h). If this is not done then brcmfmac will load without problems
- * but will not pickup the platform data.
- *
- * When the driver does not "detect" platform driver data then it will continue
- * without reporting anything and just assume there is no data needed. Which is
- * probably true for most platforms.
- *
- * Explanation of the platform_data fields:
- *
- * drive_strength: is the preferred drive_strength to be used for the SDIO
- * pins. If 0 then a default value will be used. This is the target drive
- * strength, the exact drive strength which will be used depends on the
- * capabilities of the device.
- *
- * oob_irq_supported: does the board have support for OOB interrupts. SDIO
- * in-band interrupts are relatively slow and for having less overhead on
- * interrupt processing an out of band interrupt can be used. If the HW
- * supports this then enable this by setting this field to true and configure
- * the oob related fields.
- *
- * oob_irq_nr, oob_irq_flags: the OOB interrupt information. The values are
- * used for registering the irq using request_irq function.
- *
- * broken_sg_support: flag for broken sg list support of SDIO host controller.
- * Set this to true if the SDIO host controller has higher align requirement
- * than 32 bytes for each scatterlist item.
- *
- * sd_head_align: alignment requirement for start of data buffer
- *
- * sd_sgentry_align: length alignment requirement for each sg entry
- *
- * power_on: This function is called by the brcmfmac when the module gets
- * loaded. This can be particularly useful for low power devices. The platform
- * spcific routine may for example decide to power up the complete device.
- * If there is no use-case for this function then provide NULL.
- *
- * power_off: This function is called by the brcmfmac when the module gets
- * unloaded. At this point the device can be powered down or otherwise be reset.
- * So if an actual power_off is not supported but reset is then reset the device
- * when this function gets called. This can be particularly useful for low power
- * devices. If there is no use-case for this function (either power-down or
- * reset) then provide NULL.
- *
- * reset: This function can get called if the device communication broke down.
- * This functionality is particularly useful in case of SDIO type devices. It is
- * possible to reset a dongle via sdio data interface, but it requires that
- * this is fully functional. This function is chip/module specific and this
- * function should return only after the complete reset has completed.
- */
-
-#define BRCMFMAC_SDIO_PDATA_NAME "brcmfmac_sdio"
-
-struct brcmfmac_sdio_platform_data {
- unsigned int drive_strength;
- bool oob_irq_supported;
- unsigned int oob_irq_nr;
- unsigned long oob_irq_flags;
- bool broken_sg_support;
- unsigned short sd_head_align;
- unsigned short sd_sgentry_align;
- void (*power_on)(void);
- void (*power_off)(void);
- void (*reset)(void);
-};
-
-#endif /* _LINUX_BRCMFMAC_PLATFORM_H */
diff --git a/include/linux/platform_data/brcmfmac.h b/include/linux/platform_data/brcmfmac.h
new file mode 100644
index 000000000000..1d30bf278231
--- /dev/null
+++ b/include/linux/platform_data/brcmfmac.h
@@ -0,0 +1,185 @@
+/*
+ * Copyright (c) 201 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _LINUX_BRCMFMAC_PLATFORM_H
+#define _LINUX_BRCMFMAC_PLATFORM_H
+
+
+#define BRCMFMAC_PDATA_NAME "brcmfmac"
+
+#define BRCMFMAC_COUNTRY_BUF_SZ 4
+
+
+/*
+ * Platform specific driver functions and data. Through the platform specific
+ * device data functions and data can be provided to help the brcmfmac driver to
+ * operate with the device in combination with the used platform.
+ */
+
+
+/**
+ * Note: the brcmfmac can be loaded as module or be statically built-in into
+ * the kernel. If built-in then do note that it uses module_init (and
+ * module_exit) routines which equal device_initcall. So if you intend to
+ * create a module with the platform specific data for the brcmfmac and have
+ * it built-in to the kernel then use a higher initcall then device_initcall
+ * (see init.h). If this is not done then brcmfmac will load without problems
+ * but will not pickup the platform data.
+ *
+ * When the driver does not "detect" platform driver data then it will continue
+ * without reporting anything and just assume there is no data needed. Which is
+ * probably true for most platforms.
+ */
+
+/**
+ * enum brcmf_bus_type - Bus type identifier. Currently SDIO, USB and PCIE are
+ * supported.
+ */
+enum brcmf_bus_type {
+ BRCMF_BUSTYPE_SDIO,
+ BRCMF_BUSTYPE_USB,
+ BRCMF_BUSTYPE_PCIE
+};
+
+
+/**
+ * struct brcmfmac_sdio_pd - SDIO Device specific platform data.
+ *
+ * @txglomsz: SDIO txglom size. Use 0 if default of driver is to be
+ * used.
+ * @drive_strength: is the preferred drive_strength to be used for the SDIO
+ * pins. If 0 then a default value will be used. This is
+ * the target drive strength, the exact drive strength
+ * which will be used depends on the capabilities of the
+ * device.
+ * @oob_irq_supported: does the board have support for OOB interrupts. SDIO
+ * in-band interrupts are relatively slow and for having
+ * less overhead on interrupt processing an out of band
+ * interrupt can be used. If the HW supports this then
+ * enable this by setting this field to true and configure
+ * the oob related fields.
+ * @oob_irq_nr,
+ * @oob_irq_flags: the OOB interrupt information. The values are used for
+ * registering the irq using request_irq function.
+ * @broken_sg_support: flag for broken sg list support of SDIO host controller.
+ * Set this to true if the SDIO host controller has higher
+ * align requirement than 32 bytes for each scatterlist
+ * item.
+ * @sd_head_align: alignment requirement for start of data buffer.
+ * @sd_sgentry_align: length alignment requirement for each sg entry.
+ * @reset: This function can get called if the device communication
+ * broke down. This functionality is particularly useful in
+ * case of SDIO type devices. It is possible to reset a
+ * dongle via sdio data interface, but it requires that
+ * this is fully functional. This function is chip/module
+ * specific and this function should return only after the
+ * complete reset has completed.
+ */
+struct brcmfmac_sdio_pd {
+ int txglomsz;
+ unsigned int drive_strength;
+ bool oob_irq_supported;
+ unsigned int oob_irq_nr;
+ unsigned long oob_irq_flags;
+ bool broken_sg_support;
+ unsigned short sd_head_align;
+ unsigned short sd_sgentry_align;
+ void (*reset)(void);
+};
+
+/**
+ * struct brcmfmac_pd_cc_entry - Struct for translating user space country code
+ * (iso3166) to firmware country code and
+ * revision.
+ *
+ * @iso3166: iso3166 alpha 2 country code string.
+ * @cc: firmware country code string.
+ * @rev: firmware country code revision.
+ */
+struct brcmfmac_pd_cc_entry {
+ char iso3166[BRCMFMAC_COUNTRY_BUF_SZ];
+ char cc[BRCMFMAC_COUNTRY_BUF_SZ];
+ s32 rev;
+};
+
+/**
+ * struct brcmfmac_pd_cc - Struct for translating country codes as set by user
+ * space to a country code and rev which can be used by
+ * firmware.
+ *
+ * @table_size: number of entries in table (> 0)
+ * @table: array of 1 or more elements with translation information.
+ */
+struct brcmfmac_pd_cc {
+ int table_size;
+ struct brcmfmac_pd_cc_entry table[0];
+};
+
+/**
+ * struct brcmfmac_pd_device - Device specific platform data. (id/rev/bus_type)
+ * is the unique identifier of the device.
+ *
+ * @id: ID of the device for which this data is. In case of SDIO
+ * or PCIE this is the chipid as identified by chip.c In
+ * case of USB this is the chipid as identified by the
+ * device query.
+ * @rev: chip revision, see id.
+ * @bus_type: The type of bus. Some chipid/rev exist for different bus
+ * types. Each bus type has its own set of settings.
+ * @feature_disable: Bitmask of features to disable (override), See feature.c
+ * in brcmfmac for details.
+ * @country_codes: If available, pointer to struct for translating country
+ * codes.
+ * @bus: Bus specific (union) device settings. Currently only
+ * SDIO.
+ */
+struct brcmfmac_pd_device {
+ unsigned int id;
+ unsigned int rev;
+ enum brcmf_bus_type bus_type;
+ unsigned int feature_disable;
+ struct brcmfmac_pd_cc *country_codes;
+ union {
+ struct brcmfmac_sdio_pd sdio;
+ } bus;
+};
+
+/**
+ * struct brcmfmac_platform_data - BRCMFMAC specific platform data.
+ *
+ * @power_on: This function is called by the brcmfmac driver when the module
+ * gets loaded. This can be particularly useful for low power
+ * devices. The platform spcific routine may for example decide to
+ * power up the complete device. If there is no use-case for this
+ * function then provide NULL.
+ * @power_off: This function is called by the brcmfmac when the module gets
+ * unloaded. At this point the devices can be powered down or
+ * otherwise be reset. So if an actual power_off is not supported
+ * but reset is supported by the devices then reset the devices
+ * when this function gets called. This can be particularly useful
+ * for low power devices. If there is no use-case for this
+ * function then provide NULL.
+ */
+struct brcmfmac_platform_data {
+ void (*power_on)(void);
+ void (*power_off)(void);
+ char *fw_alternative_path;
+ int device_count;
+ struct brcmfmac_pd_device devices[0];
+};
+
+
+#endif /* _LINUX_BRCMFMAC_PLATFORM_H */
diff --git a/include/linux/platform_data/microread.h b/include/linux/platform_data/microread.h
deleted file mode 100644
index ca13992089b8..000000000000
--- a/include/linux/platform_data/microread.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Driver include for the Inside Secure microread NFC Chip.
- *
- * Copyright (C) 2011 Tieto Poland
- * Copyright (C) 2012 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#ifndef _MICROREAD_H
-#define _MICROREAD_H
-
-#include <linux/i2c.h>
-
-#define MICROREAD_DRIVER_NAME "microread"
-
-/* board config platform data for microread */
-struct microread_nfc_platform_data {
- unsigned int rst_gpio;
- unsigned int irq_gpio;
- unsigned int ioh_gpio;
-};
-
-#endif /* _MICROREAD_H */
diff --git a/include/linux/platform_data/mmp_dma.h b/include/linux/platform_data/mmp_dma.h
index 2a330ec9e2af..d1397c8ed94e 100644
--- a/include/linux/platform_data/mmp_dma.h
+++ b/include/linux/platform_data/mmp_dma.h
@@ -14,6 +14,7 @@
struct mmp_dma_platdata {
int dma_channels;
+ int nb_requestors;
};
#endif /* MMP_DMA_H */
diff --git a/include/linux/platform_data/mtd-nand-s3c2410.h b/include/linux/platform_data/mtd-nand-s3c2410.h
index 36bb92172f47..c55e42ee57fa 100644
--- a/include/linux/platform_data/mtd-nand-s3c2410.h
+++ b/include/linux/platform_data/mtd-nand-s3c2410.h
@@ -40,7 +40,6 @@ struct s3c2410_nand_set {
char *name;
int *nr_map;
struct mtd_partition *partitions;
- struct nand_ecclayout *ecc_layout;
};
struct s3c2410_platform_nand {
diff --git a/include/linux/poll.h b/include/linux/poll.h
index c08386fb3e08..9fb4f40d9a26 100644
--- a/include/linux/poll.h
+++ b/include/linux/poll.h
@@ -96,7 +96,7 @@ extern void poll_initwait(struct poll_wqueues *pwq);
extern void poll_freewait(struct poll_wqueues *pwq);
extern int poll_schedule_timeout(struct poll_wqueues *pwq, int state,
ktime_t *expires, unsigned long slack);
-extern long select_estimate_accuracy(struct timespec *tv);
+extern u64 select_estimate_accuracy(struct timespec *tv);
static inline int poll_schedule(struct poll_wqueues *pwq, int state)
diff --git a/include/linux/proc_ns.h b/include/linux/proc_ns.h
index 42dfc615dbf8..de0e7719d4c5 100644
--- a/include/linux/proc_ns.h
+++ b/include/linux/proc_ns.h
@@ -9,6 +9,8 @@
struct pid_namespace;
struct nsproxy;
struct path;
+struct task_struct;
+struct inode;
struct proc_ns_operations {
const char *name;
@@ -24,6 +26,7 @@ extern const struct proc_ns_operations ipcns_operations;
extern const struct proc_ns_operations pidns_operations;
extern const struct proc_ns_operations userns_operations;
extern const struct proc_ns_operations mntns_operations;
+extern const struct proc_ns_operations cgroupns_operations;
/*
* We always define these enumerators
@@ -34,6 +37,7 @@ enum {
PROC_UTS_INIT_INO = 0xEFFFFFFEU,
PROC_USER_INIT_INO = 0xEFFFFFFDU,
PROC_PID_INIT_INO = 0xEFFFFFFCU,
+ PROC_CGROUP_INIT_INO = 0xEFFFFFFBU,
};
#ifdef CONFIG_PROC_FS
diff --git a/include/linux/psci.h b/include/linux/psci.h
index 12c4865457ad..393efe2edf9a 100644
--- a/include/linux/psci.h
+++ b/include/linux/psci.h
@@ -24,6 +24,9 @@ bool psci_tos_resident_on(int cpu);
bool psci_power_state_loses_context(u32 state);
bool psci_power_state_is_valid(u32 state);
+int psci_cpu_init_idle(unsigned int cpu);
+int psci_cpu_suspend_enter(unsigned long index);
+
struct psci_operations {
int (*cpu_suspend)(u32 state, unsigned long entry_point);
int (*cpu_off)(u32 state);
diff --git a/include/linux/qed/common_hsi.h b/include/linux/qed/common_hsi.h
index 1d1ba2c5ee7a..53ecb37ae563 100644
--- a/include/linux/qed/common_hsi.h
+++ b/include/linux/qed/common_hsi.h
@@ -11,9 +11,11 @@
#define CORE_SPQE_PAGE_SIZE_BYTES 4096
+#define X_FINAL_CLEANUP_AGG_INT 1
+
#define FW_MAJOR_VERSION 8
-#define FW_MINOR_VERSION 4
-#define FW_REVISION_VERSION 2
+#define FW_MINOR_VERSION 7
+#define FW_REVISION_VERSION 3
#define FW_ENGINEERING_VERSION 0
/***********************/
@@ -152,6 +154,9 @@
/* number of queues in a PF queue group */
#define QM_PF_QUEUE_GROUP_SIZE 8
+/* the size of a single queue element in bytes */
+#define QM_PQ_ELEMENT_SIZE 4
+
/* base number of Tx PQs in the CM PQ representation.
* should be used when storing PQ IDs in CM PQ registers and context
*/
@@ -285,6 +290,16 @@
#define PXP_NUM_ILT_RECORDS_K2 11000
#define MAX_NUM_ILT_RECORDS MAX(PXP_NUM_ILT_RECORDS_BB, PXP_NUM_ILT_RECORDS_K2)
+#define SDM_COMP_TYPE_NONE 0
+#define SDM_COMP_TYPE_WAKE_THREAD 1
+#define SDM_COMP_TYPE_AGG_INT 2
+#define SDM_COMP_TYPE_CM 3
+#define SDM_COMP_TYPE_LOADER 4
+#define SDM_COMP_TYPE_PXP 5
+#define SDM_COMP_TYPE_INDICATE_ERROR 6
+#define SDM_COMP_TYPE_RELEASE_THREAD 7
+#define SDM_COMP_TYPE_RAM 8
+
/******************/
/* PBF CONSTANTS */
/******************/
@@ -335,7 +350,7 @@ struct event_ring_entry {
/* Multi function mode */
enum mf_mode {
- SF,
+ ERROR_MODE /* Unsupported mode */,
MF_OVLAN,
MF_NPAR,
MAX_MF_MODE
@@ -606,4 +621,19 @@ struct status_block {
#define STATUS_BLOCK_ZERO_PAD3_SHIFT 24
};
+struct tunnel_parsing_flags {
+ u8 flags;
+#define TUNNEL_PARSING_FLAGS_TYPE_MASK 0x3
+#define TUNNEL_PARSING_FLAGS_TYPE_SHIFT 0
+#define TUNNEL_PARSING_FLAGS_TENNANT_ID_EXIST_MASK 0x1
+#define TUNNEL_PARSING_FLAGS_TENNANT_ID_EXIST_SHIFT 2
+#define TUNNEL_PARSING_FLAGS_NEXT_PROTOCOL_MASK 0x3
+#define TUNNEL_PARSING_FLAGS_NEXT_PROTOCOL_SHIFT 3
+#define TUNNEL_PARSING_FLAGS_FIRSTHDRIPMATCH_MASK 0x1
+#define TUNNEL_PARSING_FLAGS_FIRSTHDRIPMATCH_SHIFT 5
+#define TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_MASK 0x1
+#define TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_SHIFT 6
+#define TUNNEL_PARSING_FLAGS_IPV4_OPTIONS_MASK 0x1
+#define TUNNEL_PARSING_FLAGS_IPV4_OPTIONS_SHIFT 7
+};
#endif /* __COMMON_HSI__ */
diff --git a/include/linux/qed/eth_common.h b/include/linux/qed/eth_common.h
index 320b3373ac1d..092cb0c1afcb 100644
--- a/include/linux/qed/eth_common.h
+++ b/include/linux/qed/eth_common.h
@@ -17,10 +17,8 @@
#define ETH_MAX_RAMROD_PER_CON 8
#define ETH_TX_BD_PAGE_SIZE_BYTES 4096
#define ETH_RX_BD_PAGE_SIZE_BYTES 4096
-#define ETH_RX_SGE_PAGE_SIZE_BYTES 4096
#define ETH_RX_CQE_PAGE_SIZE_BYTES 4096
#define ETH_RX_NUM_NEXT_PAGE_BDS 2
-#define ETH_RX_NUM_NEXT_PAGE_SGES 2
#define ETH_TX_MIN_BDS_PER_NON_LSO_PKT 1
#define ETH_TX_MAX_BDS_PER_NON_LSO_PACKET 18
@@ -34,7 +32,8 @@
#define ETH_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS
-#define ETH_REG_CQE_PBL_SIZE 3
+/* Maximum number of buffers, used for RX packet placement */
+#define ETH_RX_MAX_BUFF_PER_PKT 5
/* num of MAC/VLAN filters */
#define ETH_NUM_MAC_FILTERS 512
@@ -54,9 +53,9 @@
/* TPA constants */
#define ETH_TPA_MAX_AGGS_NUM 64
-#define ETH_TPA_CQE_START_SGL_SIZE 3
-#define ETH_TPA_CQE_CONT_SGL_SIZE 6
-#define ETH_TPA_CQE_END_SGL_SIZE 4
+#define ETH_TPA_CQE_START_LEN_LIST_SIZE ETH_RX_MAX_BUFF_PER_PKT
+#define ETH_TPA_CQE_CONT_LEN_LIST_SIZE 6
+#define ETH_TPA_CQE_END_LEN_LIST_SIZE 4
/* Queue Zone sizes */
#define TSTORM_QZONE_SIZE 0
@@ -74,18 +73,18 @@ struct coalescing_timeset {
struct eth_tx_1st_bd_flags {
u8 bitfields;
+#define ETH_TX_1ST_BD_FLAGS_START_BD_MASK 0x1
+#define ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT 0
#define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_MASK 0x1
-#define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_SHIFT 0
+#define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_SHIFT 1
#define ETH_TX_1ST_BD_FLAGS_IP_CSUM_MASK 0x1
-#define ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT 1
+#define ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT 2
#define ETH_TX_1ST_BD_FLAGS_L4_CSUM_MASK 0x1
-#define ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT 2
+#define ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT 3
#define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_MASK 0x1
-#define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT 3
+#define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT 4
#define ETH_TX_1ST_BD_FLAGS_LSO_MASK 0x1
-#define ETH_TX_1ST_BD_FLAGS_LSO_SHIFT 4
-#define ETH_TX_1ST_BD_FLAGS_START_BD_MASK 0x1
-#define ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT 5
+#define ETH_TX_1ST_BD_FLAGS_LSO_SHIFT 5
#define ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_MASK 0x1
#define ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT 6
#define ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK 0x1
@@ -97,38 +96,44 @@ struct eth_tx_data_1st_bd {
__le16 vlan;
u8 nbds;
struct eth_tx_1st_bd_flags bd_flags;
- __le16 fw_use_only;
+ __le16 bitfields;
+#define ETH_TX_DATA_1ST_BD_TUNN_CFG_OVERRIDE_MASK 0x1
+#define ETH_TX_DATA_1ST_BD_TUNN_CFG_OVERRIDE_SHIFT 0
+#define ETH_TX_DATA_1ST_BD_RESERVED0_MASK 0x1
+#define ETH_TX_DATA_1ST_BD_RESERVED0_SHIFT 1
+#define ETH_TX_DATA_1ST_BD_FW_USE_ONLY_MASK 0x3FFF
+#define ETH_TX_DATA_1ST_BD_FW_USE_ONLY_SHIFT 2
};
/* The parsing information data for the second tx bd of a given packet. */
struct eth_tx_data_2nd_bd {
__le16 tunn_ip_size;
- __le16 bitfields;
-#define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK 0x1FFF
-#define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT 0
-#define ETH_TX_DATA_2ND_BD_RESERVED0_MASK 0x7
-#define ETH_TX_DATA_2ND_BD_RESERVED0_SHIFT 13
- __le16 bitfields2;
+ __le16 bitfields1;
#define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_MASK 0xF
#define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_SHIFT 0
#define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_MASK 0x3
#define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_SHIFT 4
#define ETH_TX_DATA_2ND_BD_DEST_PORT_MODE_MASK 0x3
#define ETH_TX_DATA_2ND_BD_DEST_PORT_MODE_SHIFT 6
+#define ETH_TX_DATA_2ND_BD_START_BD_MASK 0x1
+#define ETH_TX_DATA_2ND_BD_START_BD_SHIFT 8
#define ETH_TX_DATA_2ND_BD_TUNN_TYPE_MASK 0x3
-#define ETH_TX_DATA_2ND_BD_TUNN_TYPE_SHIFT 8
+#define ETH_TX_DATA_2ND_BD_TUNN_TYPE_SHIFT 9
#define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_MASK 0x1
-#define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_SHIFT 10
+#define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_SHIFT 11
#define ETH_TX_DATA_2ND_BD_IPV6_EXT_MASK 0x1
-#define ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT 11
+#define ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT 12
#define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_MASK 0x1
-#define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_SHIFT 12
+#define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_SHIFT 13
#define ETH_TX_DATA_2ND_BD_L4_UDP_MASK 0x1
-#define ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT 13
+#define ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT 14
#define ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_MASK 0x1
-#define ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT 14
-#define ETH_TX_DATA_2ND_BD_RESERVED1_MASK 0x1
-#define ETH_TX_DATA_2ND_BD_RESERVED1_SHIFT 15
+#define ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT 15
+ __le16 bitfields2;
+#define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK 0x1FFF
+#define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT 0
+#define ETH_TX_DATA_2ND_BD_RESERVED0_MASK 0x7
+#define ETH_TX_DATA_2ND_BD_RESERVED0_SHIFT 13
};
/* Regular ETH Rx FP CQE. */
@@ -145,11 +150,68 @@ struct eth_fast_path_rx_reg_cqe {
struct parsing_and_err_flags pars_flags;
__le16 vlan_tag;
__le32 rss_hash;
- __le16 len_on_bd;
+ __le16 len_on_first_bd;
u8 placement_offset;
- u8 reserved;
- __le16 pbl[ETH_REG_CQE_PBL_SIZE];
- u8 reserved1[10];
+ struct tunnel_parsing_flags tunnel_pars_flags;
+ u8 bd_num;
+ u8 reserved[7];
+ u32 fw_debug;
+ u8 reserved1[3];
+ u8 flags;
+#define ETH_FAST_PATH_RX_REG_CQE_VALID_MASK 0x1
+#define ETH_FAST_PATH_RX_REG_CQE_VALID_SHIFT 0
+#define ETH_FAST_PATH_RX_REG_CQE_VALID_TOGGLE_MASK 0x1
+#define ETH_FAST_PATH_RX_REG_CQE_VALID_TOGGLE_SHIFT 1
+#define ETH_FAST_PATH_RX_REG_CQE_RESERVED2_MASK 0x3F
+#define ETH_FAST_PATH_RX_REG_CQE_RESERVED2_SHIFT 2
+};
+
+/* TPA-continue ETH Rx FP CQE. */
+struct eth_fast_path_rx_tpa_cont_cqe {
+ u8 type;
+ u8 tpa_agg_index;
+ __le16 len_list[ETH_TPA_CQE_CONT_LEN_LIST_SIZE];
+ u8 reserved[5];
+ u8 reserved1;
+ __le16 reserved2[ETH_TPA_CQE_CONT_LEN_LIST_SIZE];
+};
+
+/* TPA-end ETH Rx FP CQE. */
+struct eth_fast_path_rx_tpa_end_cqe {
+ u8 type;
+ u8 tpa_agg_index;
+ __le16 total_packet_len;
+ u8 num_of_bds;
+ u8 end_reason;
+ __le16 num_of_coalesced_segs;
+ __le32 ts_delta;
+ __le16 len_list[ETH_TPA_CQE_END_LEN_LIST_SIZE];
+ u8 reserved1[3];
+ u8 reserved2;
+ __le16 reserved3[ETH_TPA_CQE_END_LEN_LIST_SIZE];
+};
+
+/* TPA-start ETH Rx FP CQE. */
+struct eth_fast_path_rx_tpa_start_cqe {
+ u8 type;
+ u8 bitfields;
+#define ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE_MASK 0x7
+#define ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE_SHIFT 0
+#define ETH_FAST_PATH_RX_TPA_START_CQE_TC_MASK 0xF
+#define ETH_FAST_PATH_RX_TPA_START_CQE_TC_SHIFT 3
+#define ETH_FAST_PATH_RX_TPA_START_CQE_RESERVED0_MASK 0x1
+#define ETH_FAST_PATH_RX_TPA_START_CQE_RESERVED0_SHIFT 7
+ __le16 seg_len;
+ struct parsing_and_err_flags pars_flags;
+ __le16 vlan_tag;
+ __le32 rss_hash;
+ __le16 len_on_first_bd;
+ u8 placement_offset;
+ struct tunnel_parsing_flags tunnel_pars_flags;
+ u8 tpa_agg_index;
+ u8 header_len;
+ __le16 ext_bd_len_list[ETH_TPA_CQE_START_LEN_LIST_SIZE];
+ u32 fw_debug;
};
/* The L4 pseudo checksum mode for Ethernet */
@@ -168,13 +230,26 @@ struct eth_slow_path_rx_cqe {
u8 type;
u8 ramrod_cmd_id;
u8 error_flag;
- u8 reserved[27];
+ u8 reserved[25];
__le16 echo;
+ u8 reserved1;
+ u8 flags;
+/* for PMD mode - valid indication */
+#define ETH_SLOW_PATH_RX_CQE_VALID_MASK 0x1
+#define ETH_SLOW_PATH_RX_CQE_VALID_SHIFT 0
+/* for PMD mode - valid toggle indication */
+#define ETH_SLOW_PATH_RX_CQE_VALID_TOGGLE_MASK 0x1
+#define ETH_SLOW_PATH_RX_CQE_VALID_TOGGLE_SHIFT 1
+#define ETH_SLOW_PATH_RX_CQE_RESERVED2_MASK 0x3F
+#define ETH_SLOW_PATH_RX_CQE_RESERVED2_SHIFT 2
};
/* union for all ETH Rx CQE types */
union eth_rx_cqe {
struct eth_fast_path_rx_reg_cqe fast_path_regular;
+ struct eth_fast_path_rx_tpa_start_cqe fast_path_tpa_start;
+ struct eth_fast_path_rx_tpa_cont_cqe fast_path_tpa_cont;
+ struct eth_fast_path_rx_tpa_end_cqe fast_path_tpa_end;
struct eth_slow_path_rx_cqe slow_path;
};
@@ -183,15 +258,18 @@ enum eth_rx_cqe_type {
ETH_RX_CQE_TYPE_UNUSED,
ETH_RX_CQE_TYPE_REGULAR,
ETH_RX_CQE_TYPE_SLOW_PATH,
+ ETH_RX_CQE_TYPE_TPA_START,
+ ETH_RX_CQE_TYPE_TPA_CONT,
+ ETH_RX_CQE_TYPE_TPA_END,
MAX_ETH_RX_CQE_TYPE
};
/* ETH Rx producers data */
struct eth_rx_prod_data {
__le16 bd_prod;
- __le16 sge_prod;
__le16 cqe_prod;
__le16 reserved;
+ __le16 reserved1;
};
/* The first tx bd of a given packet */
@@ -211,12 +289,17 @@ struct eth_tx_2nd_bd {
/* The parsing information data for the third tx bd of a given packet. */
struct eth_tx_data_3rd_bd {
__le16 lso_mss;
- u8 bitfields;
+ __le16 bitfields;
#define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK 0xF
#define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT 0
#define ETH_TX_DATA_3RD_BD_HDR_NBD_MASK 0xF
#define ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT 4
- u8 resereved0[3];
+#define ETH_TX_DATA_3RD_BD_START_BD_MASK 0x1
+#define ETH_TX_DATA_3RD_BD_START_BD_SHIFT 8
+#define ETH_TX_DATA_3RD_BD_RESERVED0_MASK 0x7F
+#define ETH_TX_DATA_3RD_BD_RESERVED0_SHIFT 9
+ u8 tunn_l4_hdr_start_offset_w;
+ u8 tunn_hdr_size_w;
};
/* The third tx bd of a given packet */
@@ -226,12 +309,24 @@ struct eth_tx_3rd_bd {
struct eth_tx_data_3rd_bd data;
};
+/* Complementary information for the regular tx bd of a given packet. */
+struct eth_tx_data_bd {
+ __le16 reserved0;
+ __le16 bitfields;
+#define ETH_TX_DATA_BD_RESERVED1_MASK 0xFF
+#define ETH_TX_DATA_BD_RESERVED1_SHIFT 0
+#define ETH_TX_DATA_BD_START_BD_MASK 0x1
+#define ETH_TX_DATA_BD_START_BD_SHIFT 8
+#define ETH_TX_DATA_BD_RESERVED2_MASK 0x7F
+#define ETH_TX_DATA_BD_RESERVED2_SHIFT 9
+ __le16 reserved3;
+};
+
/* The common non-special TX BD ring element */
struct eth_tx_bd {
struct regpair addr;
__le16 nbytes;
- __le16 reserved0;
- __le32 reserved1;
+ struct eth_tx_data_bd data;
};
union eth_tx_bd_types {
diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h
index 41b9049b57e2..5f8fcaaa6504 100644
--- a/include/linux/qed/qed_chain.h
+++ b/include/linux/qed/qed_chain.h
@@ -19,6 +19,10 @@
/* dma_addr_t manip */
#define DMA_LO_LE(x) cpu_to_le32(lower_32_bits(x))
#define DMA_HI_LE(x) cpu_to_le32(upper_32_bits(x))
+#define DMA_REGPAIR_LE(x, val) do { \
+ (x).hi = DMA_HI_LE((val)); \
+ (x).lo = DMA_LO_LE((val)); \
+ } while (0)
#define HILO_GEN(hi, lo, type) ((((type)(hi)) << 32) + (lo))
#define HILO_DMA(hi, lo) HILO_GEN(hi, lo, dma_addr_t)
diff --git a/include/linux/qed/qed_eth_if.h b/include/linux/qed/qed_eth_if.h
index 81ab178e31c1..e1d69834a11f 100644
--- a/include/linux/qed/qed_eth_if.h
+++ b/include/linux/qed/qed_eth_if.h
@@ -33,10 +33,20 @@ struct qed_update_vport_params {
u8 vport_id;
u8 update_vport_active_flg;
u8 vport_active_flg;
+ u8 update_accept_any_vlan_flg;
+ u8 accept_any_vlan;
u8 update_rss_flg;
struct qed_update_vport_rss_params rss_params;
};
+struct qed_start_vport_params {
+ bool remove_inner_vlan;
+ bool gro_enable;
+ bool drop_ttl0;
+ u8 vport_id;
+ u16 mtu;
+};
+
struct qed_stop_rxq_params {
u8 rss_id;
u8 rx_queue_id;
@@ -116,9 +126,7 @@ struct qed_eth_ops {
void *cookie);
int (*vport_start)(struct qed_dev *cdev,
- u8 vport_id, u16 mtu,
- u8 drop_ttl0_flg,
- u8 inner_vlan_removal_en_flg);
+ struct qed_start_vport_params *params);
int (*vport_stop)(struct qed_dev *cdev,
u8 vport_id);
diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h
index d4a32e878180..1f7599c77cd4 100644
--- a/include/linux/qed/qed_if.h
+++ b/include/linux/qed/qed_if.h
@@ -80,7 +80,7 @@ struct qed_dev_info {
u8 num_hwfns;
u8 hw_mac[ETH_ALEN];
- bool is_mf;
+ bool is_mf_default;
/* FW version */
u16 fw_major;
@@ -360,6 +360,12 @@ enum DP_MODULE {
/* to be added...up to 0x8000000 */
};
+enum qed_mf_mode {
+ QED_MF_DEFAULT,
+ QED_MF_OVLAN,
+ QED_MF_NPAR,
+};
+
struct qed_eth_stats {
u64 no_buff_discards;
u64 packet_too_big_discard;
@@ -440,6 +446,12 @@ struct qed_eth_stats {
#define RX_PI 0
#define TX_PI(tc) (RX_PI + 1 + tc)
+struct qed_sb_cnt_info {
+ int sb_cnt;
+ int sb_iov_cnt;
+ int sb_free_blk;
+};
+
static inline u16 qed_sb_update_sb_idx(struct qed_sb_info *sb_info)
{
u32 prod = 0;
diff --git a/include/linux/quicklist.h b/include/linux/quicklist.h
index bd466439c588..3bdfa70bc642 100644
--- a/include/linux/quicklist.h
+++ b/include/linux/quicklist.h
@@ -5,7 +5,7 @@
* as needed after allocation when they are freed. Per cpu lists of pages
* are kept that only contain node local pages.
*
- * (C) 2007, SGI. Christoph Lameter <clameter@sgi.com>
+ * (C) 2007, SGI. Christoph Lameter <cl@linux.com>
*/
#include <linux/kernel.h>
#include <linux/gfp.h>
diff --git a/include/linux/quota.h b/include/linux/quota.h
index b2505acfd3c0..9dfb6bce8c9e 100644
--- a/include/linux/quota.h
+++ b/include/linux/quota.h
@@ -306,6 +306,7 @@ struct quota_format_ops {
int (*read_dqblk)(struct dquot *dquot); /* Read structure for one user */
int (*commit_dqblk)(struct dquot *dquot); /* Write structure for one user */
int (*release_dqblk)(struct dquot *dquot); /* Called when last reference to dquot is being dropped */
+ int (*get_next_id)(struct super_block *sb, struct kqid *qid); /* Get next ID with existing structure in the quota file */
};
/* Operations working with dquots */
@@ -321,6 +322,8 @@ struct dquot_operations {
* quota code only */
qsize_t *(*get_reserved_space) (struct inode *);
int (*get_projid) (struct inode *, kprojid_t *);/* Get project ID */
+ /* Get next ID with active quota structure */
+ int (*get_next_id) (struct super_block *sb, struct kqid *qid);
};
struct path;
@@ -425,6 +428,8 @@ struct quotactl_ops {
int (*quota_sync)(struct super_block *, int);
int (*set_info)(struct super_block *, int, struct qc_info *);
int (*get_dqblk)(struct super_block *, struct kqid, struct qc_dqblk *);
+ int (*get_nextdqblk)(struct super_block *, struct kqid *,
+ struct qc_dqblk *);
int (*set_dqblk)(struct super_block *, struct kqid, struct qc_dqblk *);
int (*get_state)(struct super_block *, struct qc_state *);
int (*rm_xquota)(struct super_block *, unsigned int);
diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h
index 7a57c28eb5e7..f00fa86ac966 100644
--- a/include/linux/quotaops.h
+++ b/include/linux/quotaops.h
@@ -82,6 +82,7 @@ int dquot_commit(struct dquot *dquot);
int dquot_acquire(struct dquot *dquot);
int dquot_release(struct dquot *dquot);
int dquot_commit_info(struct super_block *sb, int type);
+int dquot_get_next_id(struct super_block *sb, struct kqid *qid);
int dquot_mark_dquot_dirty(struct dquot *dquot);
int dquot_file_open(struct inode *inode, struct file *file);
@@ -99,6 +100,8 @@ int dquot_get_state(struct super_block *sb, struct qc_state *state);
int dquot_set_dqinfo(struct super_block *sb, int type, struct qc_info *ii);
int dquot_get_dqblk(struct super_block *sb, struct kqid id,
struct qc_dqblk *di);
+int dquot_get_next_dqblk(struct super_block *sb, struct kqid *id,
+ struct qc_dqblk *di);
int dquot_set_dqblk(struct super_block *sb, struct kqid id,
struct qc_dqblk *di);
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
index f54be7082207..51a97ac8bfbf 100644
--- a/include/linux/radix-tree.h
+++ b/include/linux/radix-tree.h
@@ -21,6 +21,7 @@
#ifndef _LINUX_RADIX_TREE_H
#define _LINUX_RADIX_TREE_H
+#include <linux/bitops.h>
#include <linux/preempt.h>
#include <linux/types.h>
#include <linux/bug.h>
@@ -270,8 +271,15 @@ static inline void radix_tree_replace_slot(void **pslot, void *item)
}
int __radix_tree_create(struct radix_tree_root *root, unsigned long index,
- struct radix_tree_node **nodep, void ***slotp);
-int radix_tree_insert(struct radix_tree_root *, unsigned long, void *);
+ unsigned order, struct radix_tree_node **nodep,
+ void ***slotp);
+int __radix_tree_insert(struct radix_tree_root *, unsigned long index,
+ unsigned order, void *);
+static inline int radix_tree_insert(struct radix_tree_root *root,
+ unsigned long index, void *entry)
+{
+ return __radix_tree_insert(root, index, 0, entry);
+}
void *__radix_tree_lookup(struct radix_tree_root *root, unsigned long index,
struct radix_tree_node **nodep, void ***slotp);
void *radix_tree_lookup(struct radix_tree_root *, unsigned long);
@@ -395,6 +403,22 @@ void **radix_tree_iter_retry(struct radix_tree_iter *iter)
}
/**
+ * radix_tree_iter_next - resume iterating when the chunk may be invalid
+ * @iter: iterator state
+ *
+ * If the iterator needs to release then reacquire a lock, the chunk may
+ * have been invalidated by an insertion or deletion. Call this function
+ * to continue the iteration from the next index.
+ */
+static inline __must_check
+void **radix_tree_iter_next(struct radix_tree_iter *iter)
+{
+ iter->next_index = iter->index + 1;
+ iter->tags = 0;
+ return NULL;
+}
+
+/**
* radix_tree_chunk_size - get current chunk size
*
* @iter: pointer to radix tree iterator
diff --git a/include/linux/rculist.h b/include/linux/rculist.h
index 14ec1652daf4..17d4f849c65e 100644
--- a/include/linux/rculist.h
+++ b/include/linux/rculist.h
@@ -319,6 +319,27 @@ static inline void list_splice_tail_init_rcu(struct list_head *list,
})
/**
+ * list_next_or_null_rcu - get the first element from a list
+ * @head: the head for the list.
+ * @ptr: the list head to take the next element from.
+ * @type: the type of the struct this is embedded in.
+ * @member: the name of the list_head within the struct.
+ *
+ * Note that if the ptr is at the end of the list, NULL is returned.
+ *
+ * This primitive may safely run concurrently with the _rcu list-mutation
+ * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock().
+ */
+#define list_next_or_null_rcu(head, ptr, type, member) \
+({ \
+ struct list_head *__head = (head); \
+ struct list_head *__ptr = (ptr); \
+ struct list_head *__next = READ_ONCE(__ptr->next); \
+ likely(__next != __head) ? list_entry_rcu(__next, type, \
+ member) : NULL; \
+})
+
+/**
* list_for_each_entry_rcu - iterate over rcu list of given type
* @pos: the type * to use as a loop cursor.
* @head: the head for your list.
diff --git a/include/linux/reset-controller.h b/include/linux/reset-controller.h
index ce6b962ffed4..a3a5bcdb1d02 100644
--- a/include/linux/reset-controller.h
+++ b/include/linux/reset-controller.h
@@ -38,7 +38,7 @@ struct of_phandle_args;
* @nr_resets: number of reset controls in this reset controller device
*/
struct reset_controller_dev {
- struct reset_control_ops *ops;
+ const struct reset_control_ops *ops;
struct module *owner;
struct list_head list;
struct device_node *of_node;
diff --git a/include/linux/rfkill-gpio.h b/include/linux/rfkill-gpio.h
deleted file mode 100644
index 20bcb55498cd..000000000000
--- a/include/linux/rfkill-gpio.h
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Copyright (c) 2011, NVIDIA Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
- */
-
-
-#ifndef __RFKILL_GPIO_H
-#define __RFKILL_GPIO_H
-
-#include <linux/types.h>
-#include <linux/rfkill.h>
-
-/**
- * struct rfkill_gpio_platform_data - platform data for rfkill gpio device.
- * for unused gpio's, the expected value is -1.
- * @name: name for the gpio rf kill instance
- */
-
-struct rfkill_gpio_platform_data {
- char *name;
- enum rfkill_type type;
-};
-
-#endif /* __RFKILL_GPIO_H */
diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h
index d9010789b4e8..e6a0031d1b1f 100644
--- a/include/linux/rfkill.h
+++ b/include/linux/rfkill.h
@@ -104,7 +104,8 @@ int __must_check rfkill_register(struct rfkill *rfkill);
*
* Pause polling -- say transmitter is off for other reasons.
* NOTE: not necessary for suspend/resume -- in that case the
- * core stops polling anyway
+ * core stops polling anyway (but will also correctly handle
+ * the case of polling having been paused before suspend.)
*/
void rfkill_pause_polling(struct rfkill *rfkill);
@@ -212,6 +213,15 @@ void rfkill_set_states(struct rfkill *rfkill, bool sw, bool hw);
* @rfkill: rfkill struct to query
*/
bool rfkill_blocked(struct rfkill *rfkill);
+
+/**
+ * rfkill_find_type - Helpper for finding rfkill type by name
+ * @name: the name of the type
+ *
+ * Returns enum rfkill_type that conrresponds the name.
+ */
+enum rfkill_type rfkill_find_type(const char *name);
+
#else /* !RFKILL */
static inline struct rfkill * __must_check
rfkill_alloc(const char *name,
@@ -268,6 +278,12 @@ static inline bool rfkill_blocked(struct rfkill *rfkill)
{
return false;
}
+
+static inline enum rfkill_type rfkill_find_type(const char *name)
+{
+ return RFKILL_TYPE_ALL;
+}
+
#endif /* RFKILL || RFKILL_MODULE */
diff --git a/include/linux/rio.h b/include/linux/rio.h
index cde976e86b48..aa2323893e8d 100644
--- a/include/linux/rio.h
+++ b/include/linux/rio.h
@@ -137,6 +137,13 @@ struct rio_switch_ops {
int (*em_handle) (struct rio_dev *dev, u8 swport);
};
+enum rio_device_state {
+ RIO_DEVICE_INITIALIZING,
+ RIO_DEVICE_RUNNING,
+ RIO_DEVICE_GONE,
+ RIO_DEVICE_SHUTDOWN,
+};
+
/**
* struct rio_dev - RIO device info
* @global_list: Node in list of all RIO devices
@@ -165,6 +172,7 @@ struct rio_switch_ops {
* @destid: Network destination ID (or associated destid for switch)
* @hopcount: Hopcount to this device
* @prev: Previous RIO device connected to the current one
+ * @state: device state
* @rswitch: struct rio_switch (if valid for this device)
*/
struct rio_dev {
@@ -194,6 +202,7 @@ struct rio_dev {
u16 destid;
u8 hopcount;
struct rio_dev *prev;
+ atomic_t state;
struct rio_switch rswitch[0]; /* RIO switch info */
};
@@ -202,6 +211,7 @@ struct rio_dev {
#define to_rio_dev(n) container_of(n, struct rio_dev, dev)
#define sw_to_rio_dev(n) container_of(n, struct rio_dev, rswitch[0])
#define to_rio_mport(n) container_of(n, struct rio_mport, dev)
+#define to_rio_net(n) container_of(n, struct rio_net, dev)
/**
* struct rio_msg - RIO message event
@@ -235,8 +245,11 @@ enum rio_phy_type {
/**
* struct rio_mport - RIO master port info
* @dbells: List of doorbell events
+ * @pwrites: List of portwrite events
* @node: Node in global list of master ports
* @nnode: Node in network list of master ports
+ * @net: RIO net this mport is attached to
+ * @lock: lock to synchronize lists manipulations
* @iores: I/O mem resource that this master port interface owns
* @riores: RIO resources that this master port interfaces owns
* @inb_msg: RIO inbound message event descriptors
@@ -253,11 +266,16 @@ enum rio_phy_type {
* @priv: Master port private data
* @dma: DMA device associated with mport
* @nscan: RapidIO network enumeration/discovery operations
+ * @state: mport device state
+ * @pwe_refcnt: port-write enable ref counter to track enable/disable requests
*/
struct rio_mport {
struct list_head dbells; /* list of doorbell events */
+ struct list_head pwrites; /* list of portwrite events */
struct list_head node; /* node in global list of ports */
struct list_head nnode; /* node in net list of ports */
+ struct rio_net *net; /* RIO net this mport is attached to */
+ struct mutex lock;
struct resource iores;
struct resource riores[RIO_MAX_MPORT_RESOURCES];
struct rio_msg inb_msg[RIO_MAX_MBOX];
@@ -280,20 +298,20 @@ struct rio_mport {
struct dma_device dma;
#endif
struct rio_scan *nscan;
+ atomic_t state;
+ unsigned int pwe_refcnt;
};
+static inline int rio_mport_is_running(struct rio_mport *mport)
+{
+ return atomic_read(&mport->state) == RIO_DEVICE_RUNNING;
+}
+
/*
* Enumeration/discovery control flags
*/
#define RIO_SCAN_ENUM_NO_WAIT 0x00000001 /* Do not wait for enum completed */
-struct rio_id_table {
- u16 start; /* logical minimal id */
- u32 max; /* max number of IDs in table */
- spinlock_t lock;
- unsigned long *table;
-};
-
/**
* struct rio_net - RIO network info
* @node: Node in global list of RIO networks
@@ -302,7 +320,9 @@ struct rio_id_table {
* @mports: List of master ports accessing this network
* @hport: Default port for accessing this network
* @id: RIO network ID
- * @destid_table: destID allocation table
+ * @dev: Device object
+ * @enum_data: private data specific to a network enumerator
+ * @release: enumerator-specific release callback
*/
struct rio_net {
struct list_head node; /* node in list of networks */
@@ -311,7 +331,53 @@ struct rio_net {
struct list_head mports; /* list of ports accessing net */
struct rio_mport *hport; /* primary port for accessing net */
unsigned char id; /* RIO network ID */
- struct rio_id_table destid_table; /* destID allocation table */
+ struct device dev;
+ void *enum_data; /* private data for enumerator of the network */
+ void (*release)(struct rio_net *net);
+};
+
+enum rio_link_speed {
+ RIO_LINK_DOWN = 0, /* SRIO Link not initialized */
+ RIO_LINK_125 = 1, /* 1.25 GBaud */
+ RIO_LINK_250 = 2, /* 2.5 GBaud */
+ RIO_LINK_312 = 3, /* 3.125 GBaud */
+ RIO_LINK_500 = 4, /* 5.0 GBaud */
+ RIO_LINK_625 = 5 /* 6.25 GBaud */
+};
+
+enum rio_link_width {
+ RIO_LINK_1X = 0,
+ RIO_LINK_1XR = 1,
+ RIO_LINK_2X = 3,
+ RIO_LINK_4X = 2,
+ RIO_LINK_8X = 4,
+ RIO_LINK_16X = 5
+};
+
+enum rio_mport_flags {
+ RIO_MPORT_DMA = (1 << 0), /* supports DMA data transfers */
+ RIO_MPORT_DMA_SG = (1 << 1), /* DMA supports HW SG mode */
+ RIO_MPORT_IBSG = (1 << 2), /* inbound mapping supports SG */
+};
+
+/**
+ * struct rio_mport_attr - RIO mport device attributes
+ * @flags: mport device capability flags
+ * @link_speed: SRIO link speed value (as defined by RapidIO specification)
+ * @link_width: SRIO link width value (as defined by RapidIO specification)
+ * @dma_max_sge: number of SG list entries that can be handled by DMA channel(s)
+ * @dma_max_size: max number of bytes in single DMA transfer (SG entry)
+ * @dma_align: alignment shift for DMA operations (as for other DMA operations)
+ */
+struct rio_mport_attr {
+ int flags;
+ int link_speed;
+ int link_width;
+
+ /* DMA capability info: valid only if RIO_MPORT_DMA flag is set */
+ int dma_max_sge;
+ int dma_max_size;
+ int dma_align;
};
/* Low-level architecture-dependent routines */
@@ -333,6 +399,9 @@ struct rio_net {
* @get_inb_message: Callback to get a message from an inbound mailbox queue.
* @map_inb: Callback to map RapidIO address region into local memory space.
* @unmap_inb: Callback to unmap RapidIO address region mapped with map_inb().
+ * @query_mport: Callback to query mport device attributes.
+ * @map_outb: Callback to map outbound address region into local memory space.
+ * @unmap_outb: Callback to unmap outbound RapidIO address region.
*/
struct rio_ops {
int (*lcread) (struct rio_mport *mport, int index, u32 offset, int len,
@@ -358,6 +427,11 @@ struct rio_ops {
int (*map_inb)(struct rio_mport *mport, dma_addr_t lstart,
u64 rstart, u32 size, u32 flags);
void (*unmap_inb)(struct rio_mport *mport, dma_addr_t lstart);
+ int (*query_mport)(struct rio_mport *mport,
+ struct rio_mport_attr *attr);
+ int (*map_outb)(struct rio_mport *mport, u16 destid, u64 rstart,
+ u32 size, u32 flags, dma_addr_t *laddr);
+ void (*unmap_outb)(struct rio_mport *mport, u16 destid, u64 rstart);
};
#define RIO_RESOURCE_MEM 0x00000100
@@ -376,6 +450,7 @@ struct rio_ops {
* @id_table: RIO device ids to be associated with this driver
* @probe: RIO device inserted
* @remove: RIO device removed
+ * @shutdown: shutdown notification callback
* @suspend: RIO device suspended
* @resume: RIO device awakened
* @enable_wake: RIO device enable wake event
@@ -390,6 +465,7 @@ struct rio_driver {
const struct rio_device_id *id_table;
int (*probe) (struct rio_dev * dev, const struct rio_device_id * id);
void (*remove) (struct rio_dev * dev);
+ void (*shutdown)(struct rio_dev *dev);
int (*suspend) (struct rio_dev * dev, u32 state);
int (*resume) (struct rio_dev * dev);
int (*enable_wake) (struct rio_dev * dev, u32 state, int enable);
@@ -476,10 +552,14 @@ struct rio_scan_node {
};
/* Architecture and hardware-specific functions */
+extern int rio_mport_initialize(struct rio_mport *);
extern int rio_register_mport(struct rio_mport *);
+extern int rio_unregister_mport(struct rio_mport *);
extern int rio_open_inb_mbox(struct rio_mport *, void *, int, int);
extern void rio_close_inb_mbox(struct rio_mport *, int);
extern int rio_open_outb_mbox(struct rio_mport *, void *, int, int);
extern void rio_close_outb_mbox(struct rio_mport *, int);
+extern int rio_query_mport(struct rio_mport *port,
+ struct rio_mport_attr *mport_attr);
#endif /* LINUX_RIO_H */
diff --git a/include/linux/rio_drv.h b/include/linux/rio_drv.h
index 9fc2f213e74f..0834264fb7f2 100644
--- a/include/linux/rio_drv.h
+++ b/include/linux/rio_drv.h
@@ -369,12 +369,24 @@ void rio_release_region(struct rio_dev *, int);
extern int rio_map_inb_region(struct rio_mport *mport, dma_addr_t local,
u64 rbase, u32 size, u32 rflags);
extern void rio_unmap_inb_region(struct rio_mport *mport, dma_addr_t lstart);
+extern int rio_map_outb_region(struct rio_mport *mport, u16 destid, u64 rbase,
+ u32 size, u32 rflags, dma_addr_t *local);
+extern void rio_unmap_outb_region(struct rio_mport *mport,
+ u16 destid, u64 rstart);
/* Port-Write management */
extern int rio_request_inb_pwrite(struct rio_dev *,
int (*)(struct rio_dev *, union rio_pw_msg*, int));
extern int rio_release_inb_pwrite(struct rio_dev *);
-extern int rio_inb_pwrite_handler(union rio_pw_msg *pw_msg);
+extern int rio_add_mport_pw_handler(struct rio_mport *mport, void *dev_id,
+ int (*pwcback)(struct rio_mport *mport, void *dev_id,
+ union rio_pw_msg *msg, int step));
+extern int rio_del_mport_pw_handler(struct rio_mport *mport, void *dev_id,
+ int (*pwcback)(struct rio_mport *mport, void *dev_id,
+ union rio_pw_msg *msg, int step));
+extern int rio_inb_pwrite_handler(struct rio_mport *mport,
+ union rio_pw_msg *pw_msg);
+extern void rio_pw_enable(struct rio_mport *mport, int enable);
/* LDM support */
int rio_register_driver(struct rio_driver *);
@@ -435,6 +447,7 @@ static inline void rio_set_drvdata(struct rio_dev *rdev, void *data)
/* Misc driver helpers */
extern u16 rio_local_get_device_id(struct rio_mport *port);
+extern void rio_local_set_device_id(struct rio_mport *port, u16 did);
extern struct rio_dev *rio_get_device(u16 vid, u16 did, struct rio_dev *from);
extern struct rio_dev *rio_get_asm(u16 vid, u16 did, u16 asm_vid, u16 asm_did,
struct rio_dev *from);
diff --git a/include/linux/rio_mport_cdev.h b/include/linux/rio_mport_cdev.h
new file mode 100644
index 000000000000..b65d19df76d2
--- /dev/null
+++ b/include/linux/rio_mport_cdev.h
@@ -0,0 +1,271 @@
+/*
+ * Copyright (c) 2015-2016, Integrated Device Technology Inc.
+ * Copyright (c) 2015, Prodrive Technologies
+ * Copyright (c) 2015, Texas Instruments Incorporated
+ * Copyright (c) 2015, RapidIO Trade Association
+ * All rights reserved.
+ *
+ * This software is available to you under a choice of one of two licenses.
+ * You may choose to be licensed under the terms of the GNU General Public
+ * License(GPL) Version 2, or the BSD-3 Clause license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the copyright holder nor the names of its contributors
+ * may be used to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RIO_MPORT_CDEV_H_
+#define _RIO_MPORT_CDEV_H_
+
+#ifndef __user
+#define __user
+#endif
+
+struct rio_mport_maint_io {
+ uint32_t rioid; /* destID of remote device */
+ uint32_t hopcount; /* hopcount to remote device */
+ uint32_t offset; /* offset in register space */
+ size_t length; /* length in bytes */
+ void __user *buffer; /* data buffer */
+};
+
+/*
+ * Definitions for RapidIO data transfers:
+ * - memory mapped (MAPPED)
+ * - packet generation from memory (TRANSFER)
+ */
+#define RIO_TRANSFER_MODE_MAPPED (1 << 0)
+#define RIO_TRANSFER_MODE_TRANSFER (1 << 1)
+#define RIO_CAP_DBL_SEND (1 << 2)
+#define RIO_CAP_DBL_RECV (1 << 3)
+#define RIO_CAP_PW_SEND (1 << 4)
+#define RIO_CAP_PW_RECV (1 << 5)
+#define RIO_CAP_MAP_OUTB (1 << 6)
+#define RIO_CAP_MAP_INB (1 << 7)
+
+struct rio_mport_properties {
+ uint16_t hdid;
+ uint8_t id; /* Physical port ID */
+ uint8_t index;
+ uint32_t flags;
+ uint32_t sys_size; /* Default addressing size */
+ uint8_t port_ok;
+ uint8_t link_speed;
+ uint8_t link_width;
+ uint32_t dma_max_sge;
+ uint32_t dma_max_size;
+ uint32_t dma_align;
+ uint32_t transfer_mode; /* Default transfer mode */
+ uint32_t cap_sys_size; /* Capable system sizes */
+ uint32_t cap_addr_size; /* Capable addressing sizes */
+ uint32_t cap_transfer_mode; /* Capable transfer modes */
+ uint32_t cap_mport; /* Mport capabilities */
+};
+
+/*
+ * Definitions for RapidIO events;
+ * - incoming port-writes
+ * - incoming doorbells
+ */
+#define RIO_DOORBELL (1 << 0)
+#define RIO_PORTWRITE (1 << 1)
+
+struct rio_doorbell {
+ uint32_t rioid;
+ uint16_t payload;
+};
+
+struct rio_doorbell_filter {
+ uint32_t rioid; /* 0xffffffff to match all ids */
+ uint16_t low;
+ uint16_t high;
+};
+
+
+struct rio_portwrite {
+ uint32_t payload[16];
+};
+
+struct rio_pw_filter {
+ uint32_t mask;
+ uint32_t low;
+ uint32_t high;
+};
+
+/* RapidIO base address for inbound requests set to value defined below
+ * indicates that no specific RIO-to-local address translation is requested
+ * and driver should use direct (one-to-one) address mapping.
+*/
+#define RIO_MAP_ANY_ADDR (uint64_t)(~((uint64_t) 0))
+
+struct rio_mmap {
+ uint32_t rioid;
+ uint64_t rio_addr;
+ uint64_t length;
+ uint64_t handle;
+ void *address;
+};
+
+struct rio_dma_mem {
+ uint64_t length; /* length of DMA memory */
+ uint64_t dma_handle; /* handle associated with this memory */
+ void *buffer; /* pointer to this memory */
+};
+
+
+struct rio_event {
+ unsigned int header; /* event type RIO_DOORBELL or RIO_PORTWRITE */
+ union {
+ struct rio_doorbell doorbell; /* header for RIO_DOORBELL */
+ struct rio_portwrite portwrite; /* header for RIO_PORTWRITE */
+ } u;
+};
+
+enum rio_transfer_sync {
+ RIO_TRANSFER_SYNC, /* synchronous transfer */
+ RIO_TRANSFER_ASYNC, /* asynchronous transfer */
+ RIO_TRANSFER_FAF, /* fire-and-forget transfer */
+};
+
+enum rio_transfer_dir {
+ RIO_TRANSFER_DIR_READ, /* Read operation */
+ RIO_TRANSFER_DIR_WRITE, /* Write operation */
+};
+
+/*
+ * RapidIO data exchange transactions are lists of individual transfers. Each
+ * transfer exchanges data between two RapidIO devices by remote direct memory
+ * access and has its own completion code.
+ *
+ * The RapidIO specification defines four types of data exchange requests:
+ * NREAD, NWRITE, SWRITE and NWRITE_R. The RapidIO DMA channel interface allows
+ * to specify the required type of write operation or combination of them when
+ * only the last data packet requires response.
+ *
+ * NREAD: read up to 256 bytes from remote device memory into local memory
+ * NWRITE: write up to 256 bytes from local memory to remote device memory
+ * without confirmation
+ * SWRITE: as NWRITE, but all addresses and payloads must be 64-bit aligned
+ * NWRITE_R: as NWRITE, but expect acknowledgment from remote device.
+ *
+ * The default exchange is chosen from NREAD and any of the WRITE modes as the
+ * driver sees fit. For write requests the user can explicitly choose between
+ * any of the write modes for each transaction.
+ */
+enum rio_exchange {
+ RIO_EXCHANGE_DEFAULT, /* Default method */
+ RIO_EXCHANGE_NWRITE, /* All packets using NWRITE */
+ RIO_EXCHANGE_SWRITE, /* All packets using SWRITE */
+ RIO_EXCHANGE_NWRITE_R, /* Last packet NWRITE_R, others NWRITE */
+ RIO_EXCHANGE_SWRITE_R, /* Last packet NWRITE_R, others SWRITE */
+ RIO_EXCHANGE_NWRITE_R_ALL, /* All packets using NWRITE_R */
+};
+
+struct rio_transfer_io {
+ uint32_t rioid; /* Target destID */
+ uint64_t rio_addr; /* Address in target's RIO mem space */
+ enum rio_exchange method; /* Data exchange method */
+ void __user *loc_addr;
+ uint64_t handle;
+ uint64_t offset; /* Offset in buffer */
+ uint64_t length; /* Length in bytes */
+ uint32_t completion_code; /* Completion code for this transfer */
+};
+
+struct rio_transaction {
+ uint32_t transfer_mode; /* Data transfer mode */
+ enum rio_transfer_sync sync; /* Synchronization method */
+ enum rio_transfer_dir dir; /* Transfer direction */
+ size_t count; /* Number of transfers */
+ struct rio_transfer_io __user *block; /* Array of <count> transfers */
+};
+
+struct rio_async_tx_wait {
+ uint32_t token; /* DMA transaction ID token */
+ uint32_t timeout; /* Wait timeout in msec, if 0 use default TO */
+};
+
+#define RIO_MAX_DEVNAME_SZ 20
+
+struct rio_rdev_info {
+ uint32_t destid;
+ uint8_t hopcount;
+ uint32_t comptag;
+ char name[RIO_MAX_DEVNAME_SZ + 1];
+};
+
+/* Driver IOCTL codes */
+#define RIO_MPORT_DRV_MAGIC 'm'
+
+#define RIO_MPORT_MAINT_HDID_SET \
+ _IOW(RIO_MPORT_DRV_MAGIC, 1, uint16_t)
+#define RIO_MPORT_MAINT_COMPTAG_SET \
+ _IOW(RIO_MPORT_DRV_MAGIC, 2, uint32_t)
+#define RIO_MPORT_MAINT_PORT_IDX_GET \
+ _IOR(RIO_MPORT_DRV_MAGIC, 3, uint32_t)
+#define RIO_MPORT_GET_PROPERTIES \
+ _IOR(RIO_MPORT_DRV_MAGIC, 4, struct rio_mport_properties)
+#define RIO_MPORT_MAINT_READ_LOCAL \
+ _IOR(RIO_MPORT_DRV_MAGIC, 5, struct rio_mport_maint_io)
+#define RIO_MPORT_MAINT_WRITE_LOCAL \
+ _IOW(RIO_MPORT_DRV_MAGIC, 6, struct rio_mport_maint_io)
+#define RIO_MPORT_MAINT_READ_REMOTE \
+ _IOR(RIO_MPORT_DRV_MAGIC, 7, struct rio_mport_maint_io)
+#define RIO_MPORT_MAINT_WRITE_REMOTE \
+ _IOW(RIO_MPORT_DRV_MAGIC, 8, struct rio_mport_maint_io)
+#define RIO_ENABLE_DOORBELL_RANGE \
+ _IOW(RIO_MPORT_DRV_MAGIC, 9, struct rio_doorbell_filter)
+#define RIO_DISABLE_DOORBELL_RANGE \
+ _IOW(RIO_MPORT_DRV_MAGIC, 10, struct rio_doorbell_filter)
+#define RIO_ENABLE_PORTWRITE_RANGE \
+ _IOW(RIO_MPORT_DRV_MAGIC, 11, struct rio_pw_filter)
+#define RIO_DISABLE_PORTWRITE_RANGE \
+ _IOW(RIO_MPORT_DRV_MAGIC, 12, struct rio_pw_filter)
+#define RIO_SET_EVENT_MASK \
+ _IOW(RIO_MPORT_DRV_MAGIC, 13, unsigned int)
+#define RIO_GET_EVENT_MASK \
+ _IOR(RIO_MPORT_DRV_MAGIC, 14, unsigned int)
+#define RIO_MAP_OUTBOUND \
+ _IOWR(RIO_MPORT_DRV_MAGIC, 15, struct rio_mmap)
+#define RIO_UNMAP_OUTBOUND \
+ _IOW(RIO_MPORT_DRV_MAGIC, 16, struct rio_mmap)
+#define RIO_MAP_INBOUND \
+ _IOWR(RIO_MPORT_DRV_MAGIC, 17, struct rio_mmap)
+#define RIO_UNMAP_INBOUND \
+ _IOW(RIO_MPORT_DRV_MAGIC, 18, uint64_t)
+#define RIO_ALLOC_DMA \
+ _IOWR(RIO_MPORT_DRV_MAGIC, 19, struct rio_dma_mem)
+#define RIO_FREE_DMA \
+ _IOW(RIO_MPORT_DRV_MAGIC, 20, uint64_t)
+#define RIO_TRANSFER \
+ _IOWR(RIO_MPORT_DRV_MAGIC, 21, struct rio_transaction)
+#define RIO_WAIT_FOR_ASYNC \
+ _IOW(RIO_MPORT_DRV_MAGIC, 22, struct rio_async_tx_wait)
+#define RIO_DEV_ADD \
+ _IOW(RIO_MPORT_DRV_MAGIC, 23, struct rio_rdev_info)
+#define RIO_DEV_DEL \
+ _IOW(RIO_MPORT_DRV_MAGIC, 24, struct rio_rdev_info)
+
+#endif /* _RIO_MPORT_CDEV_H_ */
diff --git a/include/linux/rio_regs.h b/include/linux/rio_regs.h
index 218168a2b5e9..1063ae382bc2 100644
--- a/include/linux/rio_regs.h
+++ b/include/linux/rio_regs.h
@@ -238,6 +238,8 @@
#define RIO_PORT_N_ACK_INBOUND 0x3f000000
#define RIO_PORT_N_ACK_OUTSTAND 0x00003f00
#define RIO_PORT_N_ACK_OUTBOUND 0x0000003f
+#define RIO_PORT_N_CTL2_CSR(x) (0x0054 + x*0x20)
+#define RIO_PORT_N_CTL2_SEL_BAUD 0xf0000000
#define RIO_PORT_N_ERR_STS_CSR(x) (0x0058 + x*0x20)
#define RIO_PORT_N_ERR_STS_PW_OUT_ES 0x00010000 /* Output Error-stopped */
#define RIO_PORT_N_ERR_STS_PW_INP_ES 0x00000100 /* Input Error-stopped */
@@ -249,6 +251,7 @@
#define RIO_PORT_N_CTL_PWIDTH 0xc0000000
#define RIO_PORT_N_CTL_PWIDTH_1 0x00000000
#define RIO_PORT_N_CTL_PWIDTH_4 0x40000000
+#define RIO_PORT_N_CTL_IPW 0x38000000 /* Initialized Port Width */
#define RIO_PORT_N_CTL_P_TYP_SER 0x00000001
#define RIO_PORT_N_CTL_LOCKOUT 0x00000002
#define RIO_PORT_N_CTL_EN_RX_SER 0x00200000
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index a07f42bedda3..49eb4f8ebac9 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -86,6 +86,7 @@ enum ttu_flags {
TTU_MIGRATION = 2, /* migration mode */
TTU_MUNLOCK = 4, /* munlock mode */
TTU_LZFREE = 8, /* lazy free mode */
+ TTU_SPLIT_HUGE_PMD = 16, /* split huge PMD if any */
TTU_IGNORE_MLOCK = (1 << 8), /* ignore mlock */
TTU_IGNORE_ACCESS = (1 << 9), /* don't age */
@@ -93,6 +94,8 @@ enum ttu_flags {
TTU_BATCH_FLUSH = (1 << 11), /* Batch TLB flushes where possible
* and caller guarantees they will
* do a final flush if necessary */
+ TTU_RMAP_LOCKED = (1 << 12) /* do not grab rmap lock:
+ * caller holds it */
};
#ifdef CONFIG_MMU
@@ -240,6 +243,8 @@ int page_mkclean(struct page *);
*/
int try_to_munlock(struct page *);
+void remove_migration_ptes(struct page *old, struct page *new, bool locked);
+
/*
* Called by memory-failure.c to kill processes.
*/
@@ -266,6 +271,7 @@ struct rmap_walk_control {
};
int rmap_walk(struct page *page, struct rmap_walk_control *rwc);
+int rmap_walk_locked(struct page *page, struct rmap_walk_control *rwc);
#else /* !CONFIG_MMU */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index bd242bed4abb..589c4780b077 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -51,6 +51,7 @@ struct sched_param {
#include <linux/resource.h>
#include <linux/timer.h>
#include <linux/hrtimer.h>
+#include <linux/kcov.h>
#include <linux/task_io_accounting.h>
#include <linux/latencytop.h>
#include <linux/cred.h>
@@ -1791,8 +1792,8 @@ struct task_struct {
* time slack values; these are used to round up poll() and
* select() etc timeout values. These are in nanoseconds.
*/
- unsigned long timer_slack_ns;
- unsigned long default_timer_slack_ns;
+ u64 timer_slack_ns;
+ u64 default_timer_slack_ns;
#ifdef CONFIG_KASAN
unsigned int kasan_depth;
@@ -1818,6 +1819,16 @@ struct task_struct {
/* bitmask and counter of trace recursion */
unsigned long trace_recursion;
#endif /* CONFIG_TRACING */
+#ifdef CONFIG_KCOV
+ /* Coverage collection mode enabled for this task (0 if disabled). */
+ enum kcov_mode kcov_mode;
+ /* Size of the kcov_area. */
+ unsigned kcov_size;
+ /* Buffer for coverage collection. */
+ void *kcov_area;
+ /* kcov desciptor wired with this task or NULL. */
+ struct kcov *kcov;
+#endif
#ifdef CONFIG_MEMCG
struct mem_cgroup *memcg_in_oom;
gfp_t memcg_oom_gfp_mask;
@@ -2859,10 +2870,18 @@ static inline unsigned long stack_not_used(struct task_struct *p)
unsigned long *n = end_of_stack(p);
do { /* Skip over canary */
+# ifdef CONFIG_STACK_GROWSUP
+ n--;
+# else
n++;
+# endif
} while (!*n);
+# ifdef CONFIG_STACK_GROWSUP
+ return (unsigned long)end_of_stack(p) - (unsigned long)n;
+# else
return (unsigned long)n - (unsigned long)end_of_stack(p);
+# endif
}
#endif
extern void set_task_stack_end_magic(struct task_struct *tsk);
diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
index 4f080ab4f2cd..22db1e63707e 100644
--- a/include/linux/sched/sysctl.h
+++ b/include/linux/sched/sysctl.h
@@ -14,27 +14,6 @@ extern int proc_dohung_task_timeout_secs(struct ctl_table *table, int write,
enum { sysctl_hung_task_timeout_secs = 0 };
#endif
-/*
- * Default maximum number of active map areas, this limits the number of vmas
- * per mm struct. Users can overwrite this number by sysctl but there is a
- * problem.
- *
- * When a program's coredump is generated as ELF format, a section is created
- * per a vma. In ELF, the number of sections is represented in unsigned short.
- * This means the number of sections should be smaller than 65535 at coredump.
- * Because the kernel adds some informative sections to a image of program at
- * generating coredump, we need some margin. The number of extra sections is
- * 1-3 now and depends on arch. We use "5" as safe margin, here.
- *
- * ELF extended numbering allows more than 65535 sections, so 16-bit bound is
- * not a hard limit any more. Although some userspace tools can be surprised by
- * that.
- */
-#define MAPCOUNT_ELF_CORE_MARGIN (5)
-#define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
-
-extern int sysctl_max_map_count;
-
extern unsigned int sysctl_sched_latency;
extern unsigned int sysctl_sched_min_granularity;
extern unsigned int sysctl_sched_wakeup_granularity;
diff --git a/include/linux/scpi_protocol.h b/include/linux/scpi_protocol.h
index 72ce932c69b2..35de50a65665 100644
--- a/include/linux/scpi_protocol.h
+++ b/include/linux/scpi_protocol.h
@@ -33,6 +33,7 @@ enum scpi_sensor_class {
VOLTAGE,
CURRENT,
POWER,
+ ENERGY,
};
struct scpi_sensor_info {
@@ -68,7 +69,7 @@ struct scpi_ops {
struct scpi_dvfs_info *(*dvfs_get_info)(u8);
int (*sensor_get_capability)(u16 *sensors);
int (*sensor_get_info)(u16 sensor_id, struct scpi_sensor_info *);
- int (*sensor_get_value)(u16, u32 *);
+ int (*sensor_get_value)(u16, u64 *);
};
#if IS_REACHABLE(CONFIG_ARM_SCPI_PROTOCOL)
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index d3fcd4591ce4..15d0df943466 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -1161,10 +1161,6 @@ static inline void skb_copy_hash(struct sk_buff *to, const struct sk_buff *from)
to->l4_hash = from->l4_hash;
};
-static inline void skb_sender_cpu_clear(struct sk_buff *skb)
-{
-}
-
#ifdef NET_SKBUFF_DATA_USES_OFFSET
static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
{
@@ -2186,6 +2182,11 @@ static inline int skb_checksum_start_offset(const struct sk_buff *skb)
return skb->csum_start - skb_headroom(skb);
}
+static inline unsigned char *skb_checksum_start(const struct sk_buff *skb)
+{
+ return skb->head + skb->csum_start;
+}
+
static inline int skb_transport_offset(const struct sk_buff *skb)
{
return skb_transport_header(skb) - skb->data;
@@ -2424,6 +2425,10 @@ static inline struct sk_buff *napi_alloc_skb(struct napi_struct *napi,
{
return __napi_alloc_skb(napi, length, GFP_ATOMIC);
}
+void napi_consume_skb(struct sk_buff *skb, int budget);
+
+void __kfree_skb_flush(void);
+void __kfree_skb_defer(struct sk_buff *skb);
/**
* __dev_alloc_pages - allocate page for network Rx
@@ -2646,6 +2651,13 @@ static inline int skb_clone_writable(const struct sk_buff *skb, unsigned int len
skb_headroom(skb) + len <= skb->hdr_len;
}
+static inline int skb_try_make_writable(struct sk_buff *skb,
+ unsigned int write_len)
+{
+ return skb_cloned(skb) && !skb_clone_writable(skb, write_len) &&
+ pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+}
+
static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom,
int cloned)
{
@@ -3574,6 +3586,7 @@ static inline struct sec_path *skb_sec_path(struct sk_buff *skb)
struct skb_gso_cb {
int mac_offset;
int encap_level;
+ __wsum csum;
__u16 csum_start;
};
#define SKB_SGO_CB_OFFSET 32
@@ -3600,6 +3613,16 @@ static inline int gso_pskb_expand_head(struct sk_buff *skb, int extra)
return 0;
}
+static inline void gso_reset_checksum(struct sk_buff *skb, __wsum res)
+{
+ /* Do not update partial checksums if remote checksum is enabled. */
+ if (skb->remcsum_offload)
+ return;
+
+ SKB_GSO_CB(skb)->csum = res;
+ SKB_GSO_CB(skb)->csum_start = skb_checksum_start(skb) - skb->head;
+}
+
/* Compute the checksum for a gso segment. First compute the checksum value
* from the start of transport header to SKB_GSO_CB(skb)->csum_start, and
* then add in skb->csum (checksum from csum_start to end of packet).
@@ -3610,15 +3633,14 @@ static inline int gso_pskb_expand_head(struct sk_buff *skb, int extra)
*/
static inline __sum16 gso_make_checksum(struct sk_buff *skb, __wsum res)
{
- int plen = SKB_GSO_CB(skb)->csum_start - skb_headroom(skb) -
- skb_transport_offset(skb);
- __wsum partial;
+ unsigned char *csum_start = skb_transport_header(skb);
+ int plen = (skb->head + SKB_GSO_CB(skb)->csum_start) - csum_start;
+ __wsum partial = SKB_GSO_CB(skb)->csum;
- partial = csum_partial(skb_transport_header(skb), plen, skb->csum);
- skb->csum = res;
- SKB_GSO_CB(skb)->csum_start -= plen;
+ SKB_GSO_CB(skb)->csum = res;
+ SKB_GSO_CB(skb)->csum_start = csum_start - skb->head;
- return csum_fold(partial);
+ return csum_fold(csum_partial(csum_start, plen, partial));
}
static inline bool skb_is_gso(const struct sk_buff *skb)
@@ -3708,5 +3730,30 @@ static inline unsigned int skb_gso_network_seglen(const struct sk_buff *skb)
return hdr_len + skb_gso_transport_seglen(skb);
}
+/* Local Checksum Offload.
+ * Compute outer checksum based on the assumption that the
+ * inner checksum will be offloaded later.
+ * See Documentation/networking/checksum-offloads.txt for
+ * explanation of how this works.
+ * Fill in outer checksum adjustment (e.g. with sum of outer
+ * pseudo-header) before calling.
+ * Also ensure that inner checksum is in linear data area.
+ */
+static inline __wsum lco_csum(struct sk_buff *skb)
+{
+ unsigned char *csum_start = skb_checksum_start(skb);
+ unsigned char *l4_hdr = skb_transport_header(skb);
+ __wsum partial;
+
+ /* Start with complement of inner checksum adjustment */
+ partial = ~csum_unfold(*(__force __sum16 *)(csum_start +
+ skb->csum_offset));
+
+ /* Add in checksum of our headers (incl. outer checksum
+ * adjustment filled in by caller) and return result.
+ */
+ return csum_partial(l4_hdr, csum_start - l4_hdr, partial);
+}
+
#endif /* __KERNEL__ */
#endif /* _LINUX_SKBUFF_H */
diff --git a/include/linux/soc/samsung/exynos-pmu.h b/include/linux/soc/samsung/exynos-pmu.h
new file mode 100644
index 000000000000..e2e9de1acc5b
--- /dev/null
+++ b/include/linux/soc/samsung/exynos-pmu.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2014 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Header for EXYNOS PMU Driver support
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __LINUX_SOC_EXYNOS_PMU_H
+#define __LINUX_SOC_EXYNOS_PMU_H
+
+enum sys_powerdown {
+ SYS_AFTR,
+ SYS_LPA,
+ SYS_SLEEP,
+ NUM_SYS_POWERDOWN,
+};
+
+extern void exynos_sys_powerdown_conf(enum sys_powerdown mode);
+
+#endif /* __LINUX_SOC_EXYNOS_PMU_H */
diff --git a/include/linux/soc/samsung/exynos-regs-pmu.h b/include/linux/soc/samsung/exynos-regs-pmu.h
new file mode 100644
index 000000000000..d30186e2b609
--- /dev/null
+++ b/include/linux/soc/samsung/exynos-regs-pmu.h
@@ -0,0 +1,693 @@
+/*
+ * Copyright (c) 2010-2012 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * EXYNOS - Power management unit definition
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef __LINUX_SOC_EXYNOS_REGS_PMU_H
+#define __LINUX_SOC_EXYNOS_REGS_PMU_H __FILE__
+
+#define S5P_CENTRAL_SEQ_CONFIGURATION 0x0200
+
+#define S5P_CENTRAL_LOWPWR_CFG (1 << 16)
+
+#define S5P_CENTRAL_SEQ_OPTION 0x0208
+
+#define S5P_USE_STANDBY_WFI0 (1 << 16)
+#define S5P_USE_STANDBY_WFI1 (1 << 17)
+#define S5P_USE_STANDBY_WFI2 (1 << 19)
+#define S5P_USE_STANDBY_WFI3 (1 << 20)
+#define S5P_USE_STANDBY_WFE0 (1 << 24)
+#define S5P_USE_STANDBY_WFE1 (1 << 25)
+#define S5P_USE_STANDBY_WFE2 (1 << 27)
+#define S5P_USE_STANDBY_WFE3 (1 << 28)
+
+#define S5P_USE_STANDBY_WFI_ALL \
+ (S5P_USE_STANDBY_WFI0 | S5P_USE_STANDBY_WFI1 | \
+ S5P_USE_STANDBY_WFI2 | S5P_USE_STANDBY_WFI3 | \
+ S5P_USE_STANDBY_WFE0 | S5P_USE_STANDBY_WFE1 | \
+ S5P_USE_STANDBY_WFE2 | S5P_USE_STANDBY_WFE3)
+
+#define S5P_USE_DELAYED_RESET_ASSERTION BIT(12)
+
+#define EXYNOS_CORE_PO_RESET(n) ((1 << 4) << n)
+#define EXYNOS_WAKEUP_FROM_LOWPWR (1 << 28)
+#define EXYNOS_SWRESET 0x0400
+#define EXYNOS5440_SWRESET 0x00C4
+
+#define S5P_WAKEUP_STAT 0x0600
+#define S5P_EINT_WAKEUP_MASK 0x0604
+#define S5P_WAKEUP_MASK 0x0608
+#define S5P_WAKEUP_MASK2 0x0614
+
+#define S5P_INFORM0 0x0800
+#define S5P_INFORM1 0x0804
+#define S5P_INFORM5 0x0814
+#define S5P_INFORM6 0x0818
+#define S5P_INFORM7 0x081C
+#define S5P_PMU_SPARE2 0x0908
+#define S5P_PMU_SPARE3 0x090C
+
+#define EXYNOS_IROM_DATA2 0x0988
+#define S5P_ARM_CORE0_LOWPWR 0x1000
+#define S5P_DIS_IRQ_CORE0 0x1004
+#define S5P_DIS_IRQ_CENTRAL0 0x1008
+#define S5P_ARM_CORE1_LOWPWR 0x1010
+#define S5P_DIS_IRQ_CORE1 0x1014
+#define S5P_DIS_IRQ_CENTRAL1 0x1018
+#define S5P_ARM_COMMON_LOWPWR 0x1080
+#define S5P_L2_0_LOWPWR 0x10C0
+#define S5P_L2_1_LOWPWR 0x10C4
+#define S5P_CMU_ACLKSTOP_LOWPWR 0x1100
+#define S5P_CMU_SCLKSTOP_LOWPWR 0x1104
+#define S5P_CMU_RESET_LOWPWR 0x110C
+#define S5P_APLL_SYSCLK_LOWPWR 0x1120
+#define S5P_MPLL_SYSCLK_LOWPWR 0x1124
+#define S5P_VPLL_SYSCLK_LOWPWR 0x1128
+#define S5P_EPLL_SYSCLK_LOWPWR 0x112C
+#define S5P_CMU_CLKSTOP_GPS_ALIVE_LOWPWR 0x1138
+#define S5P_CMU_RESET_GPSALIVE_LOWPWR 0x113C
+#define S5P_CMU_CLKSTOP_CAM_LOWPWR 0x1140
+#define S5P_CMU_CLKSTOP_TV_LOWPWR 0x1144
+#define S5P_CMU_CLKSTOP_MFC_LOWPWR 0x1148
+#define S5P_CMU_CLKSTOP_G3D_LOWPWR 0x114C
+#define S5P_CMU_CLKSTOP_LCD0_LOWPWR 0x1150
+#define S5P_CMU_CLKSTOP_MAUDIO_LOWPWR 0x1158
+#define S5P_CMU_CLKSTOP_GPS_LOWPWR 0x115C
+#define S5P_CMU_RESET_CAM_LOWPWR 0x1160
+#define S5P_CMU_RESET_TV_LOWPWR 0x1164
+#define S5P_CMU_RESET_MFC_LOWPWR 0x1168
+#define S5P_CMU_RESET_G3D_LOWPWR 0x116C
+#define S5P_CMU_RESET_LCD0_LOWPWR 0x1170
+#define S5P_CMU_RESET_MAUDIO_LOWPWR 0x1178
+#define S5P_CMU_RESET_GPS_LOWPWR 0x117C
+#define S5P_TOP_BUS_LOWPWR 0x1180
+#define S5P_TOP_RETENTION_LOWPWR 0x1184
+#define S5P_TOP_PWR_LOWPWR 0x1188
+#define S5P_LOGIC_RESET_LOWPWR 0x11A0
+#define S5P_ONENAND_MEM_LOWPWR 0x11C0
+#define S5P_G2D_ACP_MEM_LOWPWR 0x11C8
+#define S5P_USBOTG_MEM_LOWPWR 0x11CC
+#define S5P_HSMMC_MEM_LOWPWR 0x11D0
+#define S5P_CSSYS_MEM_LOWPWR 0x11D4
+#define S5P_SECSS_MEM_LOWPWR 0x11D8
+#define S5P_PAD_RETENTION_DRAM_LOWPWR 0x1200
+#define S5P_PAD_RETENTION_MAUDIO_LOWPWR 0x1204
+#define S5P_PAD_RETENTION_GPIO_LOWPWR 0x1220
+#define S5P_PAD_RETENTION_UART_LOWPWR 0x1224
+#define S5P_PAD_RETENTION_MMCA_LOWPWR 0x1228
+#define S5P_PAD_RETENTION_MMCB_LOWPWR 0x122C
+#define S5P_PAD_RETENTION_EBIA_LOWPWR 0x1230
+#define S5P_PAD_RETENTION_EBIB_LOWPWR 0x1234
+#define S5P_PAD_RETENTION_ISOLATION_LOWPWR 0x1240
+#define S5P_PAD_RETENTION_ALV_SEL_LOWPWR 0x1260
+#define S5P_XUSBXTI_LOWPWR 0x1280
+#define S5P_XXTI_LOWPWR 0x1284
+#define S5P_EXT_REGULATOR_LOWPWR 0x12C0
+#define S5P_GPIO_MODE_LOWPWR 0x1300
+#define S5P_GPIO_MODE_MAUDIO_LOWPWR 0x1340
+#define S5P_CAM_LOWPWR 0x1380
+#define S5P_TV_LOWPWR 0x1384
+#define S5P_MFC_LOWPWR 0x1388
+#define S5P_G3D_LOWPWR 0x138C
+#define S5P_LCD0_LOWPWR 0x1390
+#define S5P_MAUDIO_LOWPWR 0x1398
+#define S5P_GPS_LOWPWR 0x139C
+#define S5P_GPS_ALIVE_LOWPWR 0x13A0
+
+#define EXYNOS_ARM_CORE0_CONFIGURATION 0x2000
+#define EXYNOS_ARM_CORE_CONFIGURATION(_nr) \
+ (EXYNOS_ARM_CORE0_CONFIGURATION + (0x80 * (_nr)))
+#define EXYNOS_ARM_CORE_STATUS(_nr) \
+ (EXYNOS_ARM_CORE_CONFIGURATION(_nr) + 0x4)
+#define EXYNOS_ARM_CORE_OPTION(_nr) \
+ (EXYNOS_ARM_CORE_CONFIGURATION(_nr) + 0x8)
+
+#define EXYNOS_ARM_COMMON_CONFIGURATION 0x2500
+#define EXYNOS_COMMON_CONFIGURATION(_nr) \
+ (EXYNOS_ARM_COMMON_CONFIGURATION + (0x80 * (_nr)))
+#define EXYNOS_COMMON_STATUS(_nr) \
+ (EXYNOS_COMMON_CONFIGURATION(_nr) + 0x4)
+#define EXYNOS_COMMON_OPTION(_nr) \
+ (EXYNOS_COMMON_CONFIGURATION(_nr) + 0x8)
+
+#define EXYNOS_CORE_LOCAL_PWR_EN 0x3
+
+#define EXYNOS_ARM_COMMON_STATUS 0x2504
+#define EXYNOS_COMMON_OPTION(_nr) \
+ (EXYNOS_COMMON_CONFIGURATION(_nr) + 0x8)
+
+#define EXYNOS_ARM_L2_CONFIGURATION 0x2600
+#define EXYNOS_L2_CONFIGURATION(_nr) \
+ (EXYNOS_ARM_L2_CONFIGURATION + ((_nr) * 0x80))
+#define EXYNOS_L2_STATUS(_nr) \
+ (EXYNOS_L2_CONFIGURATION(_nr) + 0x4)
+#define EXYNOS_L2_OPTION(_nr) \
+ (EXYNOS_L2_CONFIGURATION(_nr) + 0x8)
+#define EXYNOS_L2_COMMON_PWR_EN 0x3
+
+#define EXYNOS_ARM_CORE_X_STATUS_OFFSET 0x4
+
+#define EXYNOS5_APLL_SYSCLK_CONFIGURATION 0x2A00
+#define EXYNOS5_APLL_SYSCLK_STATUS 0x2A04
+
+#define EXYNOS5_ARM_L2_OPTION 0x2608
+#define EXYNOS5_USE_RETENTION BIT(4)
+
+#define EXYNOS5_L2RSTDISABLE_VALUE BIT(3)
+
+#define S5P_PAD_RET_MAUDIO_OPTION 0x3028
+#define S5P_PAD_RET_MMC2_OPTION 0x30c8
+#define S5P_PAD_RET_GPIO_OPTION 0x3108
+#define S5P_PAD_RET_UART_OPTION 0x3128
+#define S5P_PAD_RET_MMCA_OPTION 0x3148
+#define S5P_PAD_RET_MMCB_OPTION 0x3168
+#define S5P_PAD_RET_EBIA_OPTION 0x3188
+#define S5P_PAD_RET_EBIB_OPTION 0x31A8
+#define S5P_PAD_RET_SPI_OPTION 0x31c8
+
+#define S5P_PS_HOLD_CONTROL 0x330C
+#define S5P_PS_HOLD_EN (1 << 31)
+#define S5P_PS_HOLD_OUTPUT_HIGH (3 << 8)
+
+#define S5P_CAM_OPTION 0x3C08
+#define S5P_MFC_OPTION 0x3C48
+#define S5P_G3D_OPTION 0x3C68
+#define S5P_LCD0_OPTION 0x3C88
+#define S5P_LCD1_OPTION 0x3CA8
+#define S5P_ISP_OPTION S5P_LCD1_OPTION
+
+#define S5P_CORE_LOCAL_PWR_EN 0x3
+#define S5P_CORE_WAKEUP_FROM_LOCAL_CFG (0x3 << 8)
+#define S5P_CORE_AUTOWAKEUP_EN (1 << 31)
+
+/* Only for EXYNOS4210 */
+#define S5P_CMU_CLKSTOP_LCD1_LOWPWR 0x1154
+#define S5P_CMU_RESET_LCD1_LOWPWR 0x1174
+#define S5P_MODIMIF_MEM_LOWPWR 0x11C4
+#define S5P_PCIE_MEM_LOWPWR 0x11E0
+#define S5P_SATA_MEM_LOWPWR 0x11E4
+#define S5P_LCD1_LOWPWR 0x1394
+
+/* Only for EXYNOS4x12 */
+#define S5P_ISP_ARM_LOWPWR 0x1050
+#define S5P_DIS_IRQ_ISP_ARM_LOCAL_LOWPWR 0x1054
+#define S5P_DIS_IRQ_ISP_ARM_CENTRAL_LOWPWR 0x1058
+#define S5P_CMU_ACLKSTOP_COREBLK_LOWPWR 0x1110
+#define S5P_CMU_SCLKSTOP_COREBLK_LOWPWR 0x1114
+#define S5P_CMU_RESET_COREBLK_LOWPWR 0x111C
+#define S5P_MPLLUSER_SYSCLK_LOWPWR 0x1130
+#define S5P_CMU_CLKSTOP_ISP_LOWPWR 0x1154
+#define S5P_CMU_RESET_ISP_LOWPWR 0x1174
+#define S5P_TOP_BUS_COREBLK_LOWPWR 0x1190
+#define S5P_TOP_RETENTION_COREBLK_LOWPWR 0x1194
+#define S5P_TOP_PWR_COREBLK_LOWPWR 0x1198
+#define S5P_OSCCLK_GATE_LOWPWR 0x11A4
+#define S5P_LOGIC_RESET_COREBLK_LOWPWR 0x11B0
+#define S5P_OSCCLK_GATE_COREBLK_LOWPWR 0x11B4
+#define S5P_HSI_MEM_LOWPWR 0x11C4
+#define S5P_ROTATOR_MEM_LOWPWR 0x11DC
+#define S5P_PAD_RETENTION_GPIO_COREBLK_LOWPWR 0x123C
+#define S5P_PAD_ISOLATION_COREBLK_LOWPWR 0x1250
+#define S5P_GPIO_MODE_COREBLK_LOWPWR 0x1320
+#define S5P_TOP_ASB_RESET_LOWPWR 0x1344
+#define S5P_TOP_ASB_ISOLATION_LOWPWR 0x1348
+#define S5P_ISP_LOWPWR 0x1394
+#define S5P_DRAM_FREQ_DOWN_LOWPWR 0x13B0
+#define S5P_DDRPHY_DLLOFF_LOWPWR 0x13B4
+#define S5P_CMU_SYSCLK_ISP_LOWPWR 0x13B8
+#define S5P_CMU_SYSCLK_GPS_LOWPWR 0x13BC
+#define S5P_LPDDR_PHY_DLL_LOCK_LOWPWR 0x13C0
+
+#define S5P_ARM_L2_0_OPTION 0x2608
+#define S5P_ARM_L2_1_OPTION 0x2628
+#define S5P_ONENAND_MEM_OPTION 0x2E08
+#define S5P_HSI_MEM_OPTION 0x2E28
+#define S5P_G2D_ACP_MEM_OPTION 0x2E48
+#define S5P_USBOTG_MEM_OPTION 0x2E68
+#define S5P_HSMMC_MEM_OPTION 0x2E88
+#define S5P_CSSYS_MEM_OPTION 0x2EA8
+#define S5P_SECSS_MEM_OPTION 0x2EC8
+#define S5P_ROTATOR_MEM_OPTION 0x2F48
+
+/* Only for EXYNOS4412 */
+#define S5P_ARM_CORE2_LOWPWR 0x1020
+#define S5P_DIS_IRQ_CORE2 0x1024
+#define S5P_DIS_IRQ_CENTRAL2 0x1028
+#define S5P_ARM_CORE3_LOWPWR 0x1030
+#define S5P_DIS_IRQ_CORE3 0x1034
+#define S5P_DIS_IRQ_CENTRAL3 0x1038
+
+/* Only for EXYNOS3XXX */
+#define EXYNOS3_ARM_CORE0_SYS_PWR_REG 0x1000
+#define EXYNOS3_DIS_IRQ_ARM_CORE0_LOCAL_SYS_PWR_REG 0x1004
+#define EXYNOS3_DIS_IRQ_ARM_CORE0_CENTRAL_SYS_PWR_REG 0x1008
+#define EXYNOS3_ARM_CORE1_SYS_PWR_REG 0x1010
+#define EXYNOS3_DIS_IRQ_ARM_CORE1_LOCAL_SYS_PWR_REG 0x1014
+#define EXYNOS3_DIS_IRQ_ARM_CORE1_CENTRAL_SYS_PWR_REG 0x1018
+#define EXYNOS3_ISP_ARM_SYS_PWR_REG 0x1050
+#define EXYNOS3_DIS_IRQ_ISP_ARM_LOCAL_SYS_PWR_REG 0x1054
+#define EXYNOS3_DIS_IRQ_ISP_ARM_CENTRAL_SYS_PWR_REG 0x1058
+#define EXYNOS3_ARM_COMMON_SYS_PWR_REG 0x1080
+#define EXYNOS3_ARM_L2_SYS_PWR_REG 0x10C0
+#define EXYNOS3_CMU_ACLKSTOP_SYS_PWR_REG 0x1100
+#define EXYNOS3_CMU_SCLKSTOP_SYS_PWR_REG 0x1104
+#define EXYNOS3_CMU_RESET_SYS_PWR_REG 0x110C
+#define EXYNOS3_CMU_ACLKSTOP_COREBLK_SYS_PWR_REG 0x1110
+#define EXYNOS3_CMU_SCLKSTOP_COREBLK_SYS_PWR_REG 0x1114
+#define EXYNOS3_CMU_RESET_COREBLK_SYS_PWR_REG 0x111C
+#define EXYNOS3_APLL_SYSCLK_SYS_PWR_REG 0x1120
+#define EXYNOS3_MPLL_SYSCLK_SYS_PWR_REG 0x1124
+#define EXYNOS3_VPLL_SYSCLK_SYS_PWR_REG 0x1128
+#define EXYNOS3_EPLL_SYSCLK_SYS_PWR_REG 0x112C
+#define EXYNOS3_MPLLUSER_SYSCLK_SYS_PWR_REG 0x1130
+#define EXYNOS3_BPLLUSER_SYSCLK_SYS_PWR_REG 0x1134
+#define EXYNOS3_EPLLUSER_SYSCLK_SYS_PWR_REG 0x1138
+#define EXYNOS3_CMU_CLKSTOP_CAM_SYS_PWR_REG 0x1140
+#define EXYNOS3_CMU_CLKSTOP_MFC_SYS_PWR_REG 0x1148
+#define EXYNOS3_CMU_CLKSTOP_G3D_SYS_PWR_REG 0x114C
+#define EXYNOS3_CMU_CLKSTOP_LCD0_SYS_PWR_REG 0x1150
+#define EXYNOS3_CMU_CLKSTOP_ISP_SYS_PWR_REG 0x1154
+#define EXYNOS3_CMU_CLKSTOP_MAUDIO_SYS_PWR_REG 0x1158
+#define EXYNOS3_CMU_RESET_CAM_SYS_PWR_REG 0x1160
+#define EXYNOS3_CMU_RESET_MFC_SYS_PWR_REG 0x1168
+#define EXYNOS3_CMU_RESET_G3D_SYS_PWR_REG 0x116C
+#define EXYNOS3_CMU_RESET_LCD0_SYS_PWR_REG 0x1170
+#define EXYNOS3_CMU_RESET_ISP_SYS_PWR_REG 0x1174
+#define EXYNOS3_CMU_RESET_MAUDIO_SYS_PWR_REG 0x1178
+#define EXYNOS3_TOP_BUS_SYS_PWR_REG 0x1180
+#define EXYNOS3_TOP_RETENTION_SYS_PWR_REG 0x1184
+#define EXYNOS3_TOP_PWR_SYS_PWR_REG 0x1188
+#define EXYNOS3_TOP_BUS_COREBLK_SYS_PWR_REG 0x1190
+#define EXYNOS3_TOP_RETENTION_COREBLK_SYS_PWR_REG 0x1194
+#define EXYNOS3_TOP_PWR_COREBLK_SYS_PWR_REG 0x1198
+#define EXYNOS3_LOGIC_RESET_SYS_PWR_REG 0x11A0
+#define EXYNOS3_OSCCLK_GATE_SYS_PWR_REG 0x11A4
+#define EXYNOS3_LOGIC_RESET_COREBLK_SYS_PWR_REG 0x11B0
+#define EXYNOS3_OSCCLK_GATE_COREBLK_SYS_PWR_REG 0x11B4
+#define EXYNOS3_PAD_RETENTION_DRAM_SYS_PWR_REG 0x1200
+#define EXYNOS3_PAD_RETENTION_MAUDIO_SYS_PWR_REG 0x1204
+#define EXYNOS3_PAD_RETENTION_SPI_SYS_PWR_REG 0x1208
+#define EXYNOS3_PAD_RETENTION_MMC2_SYS_PWR_REG 0x1218
+#define EXYNOS3_PAD_RETENTION_GPIO_SYS_PWR_REG 0x1220
+#define EXYNOS3_PAD_RETENTION_UART_SYS_PWR_REG 0x1224
+#define EXYNOS3_PAD_RETENTION_MMC0_SYS_PWR_REG 0x1228
+#define EXYNOS3_PAD_RETENTION_MMC1_SYS_PWR_REG 0x122C
+#define EXYNOS3_PAD_RETENTION_EBIA_SYS_PWR_REG 0x1230
+#define EXYNOS3_PAD_RETENTION_EBIB_SYS_PWR_REG 0x1234
+#define EXYNOS3_PAD_RETENTION_JTAG_SYS_PWR_REG 0x1238
+#define EXYNOS3_PAD_ISOLATION_SYS_PWR_REG 0x1240
+#define EXYNOS3_PAD_ALV_SEL_SYS_PWR_REG 0x1260
+#define EXYNOS3_XUSBXTI_SYS_PWR_REG 0x1280
+#define EXYNOS3_XXTI_SYS_PWR_REG 0x1284
+#define EXYNOS3_EXT_REGULATOR_SYS_PWR_REG 0x12C0
+#define EXYNOS3_EXT_REGULATOR_COREBLK_SYS_PWR_REG 0x12C4
+#define EXYNOS3_GPIO_MODE_SYS_PWR_REG 0x1300
+#define EXYNOS3_GPIO_MODE_MAUDIO_SYS_PWR_REG 0x1340
+#define EXYNOS3_TOP_ASB_RESET_SYS_PWR_REG 0x1344
+#define EXYNOS3_TOP_ASB_ISOLATION_SYS_PWR_REG 0x1348
+#define EXYNOS3_TOP_ASB_RESET_COREBLK_SYS_PWR_REG 0x1350
+#define EXYNOS3_TOP_ASB_ISOLATION_COREBLK_SYS_PWR_REG 0x1354
+#define EXYNOS3_CAM_SYS_PWR_REG 0x1380
+#define EXYNOS3_MFC_SYS_PWR_REG 0x1388
+#define EXYNOS3_G3D_SYS_PWR_REG 0x138C
+#define EXYNOS3_LCD0_SYS_PWR_REG 0x1390
+#define EXYNOS3_ISP_SYS_PWR_REG 0x1394
+#define EXYNOS3_MAUDIO_SYS_PWR_REG 0x1398
+#define EXYNOS3_DRAM_FREQ_DOWN_SYS_PWR_REG 0x13B0
+#define EXYNOS3_DDRPHY_DLLOFF_SYS_PWR_REG 0x13B4
+#define EXYNOS3_CMU_SYSCLK_ISP_SYS_PWR_REG 0x13B8
+#define EXYNOS3_LPDDR_PHY_DLL_LOCK_SYS_PWR_REG 0x13C0
+#define EXYNOS3_BPLL_SYSCLK_SYS_PWR_REG 0x13C4
+#define EXYNOS3_UPLL_SYSCLK_SYS_PWR_REG 0x13C8
+
+#define EXYNOS3_ARM_CORE0_OPTION 0x2008
+#define EXYNOS3_ARM_CORE_OPTION(_nr) \
+ (EXYNOS3_ARM_CORE0_OPTION + ((_nr) * 0x80))
+
+#define EXYNOS3_ARM_COMMON_OPTION 0x2408
+#define EXYNOS3_ARM_L2_OPTION 0x2608
+#define EXYNOS3_TOP_PWR_OPTION 0x2C48
+#define EXYNOS3_CORE_TOP_PWR_OPTION 0x2CA8
+#define EXYNOS3_XUSBXTI_DURATION 0x341C
+#define EXYNOS3_XXTI_DURATION 0x343C
+#define EXYNOS3_EXT_REGULATOR_DURATION 0x361C
+#define EXYNOS3_EXT_REGULATOR_COREBLK_DURATION 0x363C
+#define XUSBXTI_DURATION 0x00000BB8
+#define XXTI_DURATION XUSBXTI_DURATION
+#define EXT_REGULATOR_DURATION 0x00001D4C
+#define EXT_REGULATOR_COREBLK_DURATION EXT_REGULATOR_DURATION
+
+/* for XXX_OPTION */
+#define EXYNOS3_OPTION_USE_SC_COUNTER (1 << 0)
+#define EXYNOS3_OPTION_USE_SC_FEEDBACK (1 << 1)
+#define EXYNOS3_OPTION_SKIP_DEACTIVATE_ACEACP_IN_PWDN (1 << 7)
+
+/* For EXYNOS5 */
+
+#define EXYNOS5_AUTO_WDTRESET_DISABLE 0x0408
+#define EXYNOS5_MASK_WDTRESET_REQUEST 0x040C
+
+#define EXYNOS5_USE_RETENTION BIT(4)
+#define EXYNOS5_SYS_WDTRESET (1 << 20)
+
+#define EXYNOS5_ARM_CORE0_SYS_PWR_REG 0x1000
+#define EXYNOS5_DIS_IRQ_ARM_CORE0_LOCAL_SYS_PWR_REG 0x1004
+#define EXYNOS5_DIS_IRQ_ARM_CORE0_CENTRAL_SYS_PWR_REG 0x1008
+#define EXYNOS5_ARM_CORE1_SYS_PWR_REG 0x1010
+#define EXYNOS5_DIS_IRQ_ARM_CORE1_LOCAL_SYS_PWR_REG 0x1014
+#define EXYNOS5_DIS_IRQ_ARM_CORE1_CENTRAL_SYS_PWR_REG 0x1018
+#define EXYNOS5_FSYS_ARM_SYS_PWR_REG 0x1040
+#define EXYNOS5_DIS_IRQ_FSYS_ARM_CENTRAL_SYS_PWR_REG 0x1048
+#define EXYNOS5_ISP_ARM_SYS_PWR_REG 0x1050
+#define EXYNOS5_DIS_IRQ_ISP_ARM_LOCAL_SYS_PWR_REG 0x1054
+#define EXYNOS5_DIS_IRQ_ISP_ARM_CENTRAL_SYS_PWR_REG 0x1058
+#define EXYNOS5_ARM_COMMON_SYS_PWR_REG 0x1080
+#define EXYNOS5_ARM_L2_SYS_PWR_REG 0x10C0
+#define EXYNOS5_CMU_ACLKSTOP_SYS_PWR_REG 0x1100
+#define EXYNOS5_CMU_SCLKSTOP_SYS_PWR_REG 0x1104
+#define EXYNOS5_CMU_RESET_SYS_PWR_REG 0x110C
+#define EXYNOS5_CMU_ACLKSTOP_SYSMEM_SYS_PWR_REG 0x1120
+#define EXYNOS5_CMU_SCLKSTOP_SYSMEM_SYS_PWR_REG 0x1124
+#define EXYNOS5_CMU_RESET_SYSMEM_SYS_PWR_REG 0x112C
+#define EXYNOS5_DRAM_FREQ_DOWN_SYS_PWR_REG 0x1130
+#define EXYNOS5_DDRPHY_DLLOFF_SYS_PWR_REG 0x1134
+#define EXYNOS5_DDRPHY_DLLLOCK_SYS_PWR_REG 0x1138
+#define EXYNOS5_APLL_SYSCLK_SYS_PWR_REG 0x1140
+#define EXYNOS5_MPLL_SYSCLK_SYS_PWR_REG 0x1144
+#define EXYNOS5_VPLL_SYSCLK_SYS_PWR_REG 0x1148
+#define EXYNOS5_EPLL_SYSCLK_SYS_PWR_REG 0x114C
+#define EXYNOS5_BPLL_SYSCLK_SYS_PWR_REG 0x1150
+#define EXYNOS5_CPLL_SYSCLK_SYS_PWR_REG 0x1154
+#define EXYNOS5_MPLLUSER_SYSCLK_SYS_PWR_REG 0x1164
+#define EXYNOS5_BPLLUSER_SYSCLK_SYS_PWR_REG 0x1170
+#define EXYNOS5_TOP_BUS_SYS_PWR_REG 0x1180
+#define EXYNOS5_TOP_RETENTION_SYS_PWR_REG 0x1184
+#define EXYNOS5_TOP_PWR_SYS_PWR_REG 0x1188
+#define EXYNOS5_TOP_BUS_SYSMEM_SYS_PWR_REG 0x1190
+#define EXYNOS5_TOP_RETENTION_SYSMEM_SYS_PWR_REG 0x1194
+#define EXYNOS5_TOP_PWR_SYSMEM_SYS_PWR_REG 0x1198
+#define EXYNOS5_LOGIC_RESET_SYS_PWR_REG 0x11A0
+#define EXYNOS5_OSCCLK_GATE_SYS_PWR_REG 0x11A4
+#define EXYNOS5_LOGIC_RESET_SYSMEM_SYS_PWR_REG 0x11B0
+#define EXYNOS5_OSCCLK_GATE_SYSMEM_SYS_PWR_REG 0x11B4
+#define EXYNOS5_USBOTG_MEM_SYS_PWR_REG 0x11C0
+#define EXYNOS5_G2D_MEM_SYS_PWR_REG 0x11C8
+#define EXYNOS5_USBDRD_MEM_SYS_PWR_REG 0x11CC
+#define EXYNOS5_SDMMC_MEM_SYS_PWR_REG 0x11D0
+#define EXYNOS5_CSSYS_MEM_SYS_PWR_REG 0x11D4
+#define EXYNOS5_SECSS_MEM_SYS_PWR_REG 0x11D8
+#define EXYNOS5_ROTATOR_MEM_SYS_PWR_REG 0x11DC
+#define EXYNOS5_INTRAM_MEM_SYS_PWR_REG 0x11E0
+#define EXYNOS5_INTROM_MEM_SYS_PWR_REG 0x11E4
+#define EXYNOS5_JPEG_MEM_SYS_PWR_REG 0x11E8
+#define EXYNOS5_HSI_MEM_SYS_PWR_REG 0x11EC
+#define EXYNOS5_MCUIOP_MEM_SYS_PWR_REG 0x11F4
+#define EXYNOS5_SATA_MEM_SYS_PWR_REG 0x11FC
+#define EXYNOS5_PAD_RETENTION_DRAM_SYS_PWR_REG 0x1200
+#define EXYNOS5_PAD_RETENTION_MAU_SYS_PWR_REG 0x1204
+#define EXYNOS5_PAD_RETENTION_EFNAND_SYS_PWR_REG 0x1208
+#define EXYNOS5_PAD_RETENTION_GPIO_SYS_PWR_REG 0x1220
+#define EXYNOS5_PAD_RETENTION_UART_SYS_PWR_REG 0x1224
+#define EXYNOS5_PAD_RETENTION_MMCA_SYS_PWR_REG 0x1228
+#define EXYNOS5_PAD_RETENTION_MMCB_SYS_PWR_REG 0x122C
+#define EXYNOS5_PAD_RETENTION_EBIA_SYS_PWR_REG 0x1230
+#define EXYNOS5_PAD_RETENTION_EBIB_SYS_PWR_REG 0x1234
+#define EXYNOS5_PAD_RETENTION_SPI_SYS_PWR_REG 0x1238
+#define EXYNOS5_PAD_RETENTION_GPIO_SYSMEM_SYS_PWR_REG 0x123C
+#define EXYNOS5_PAD_ISOLATION_SYS_PWR_REG 0x1240
+#define EXYNOS5_PAD_ISOLATION_SYSMEM_SYS_PWR_REG 0x1250
+#define EXYNOS5_PAD_ALV_SEL_SYS_PWR_REG 0x1260
+#define EXYNOS5_XUSBXTI_SYS_PWR_REG 0x1280
+#define EXYNOS5_XXTI_SYS_PWR_REG 0x1284
+#define EXYNOS5_EXT_REGULATOR_SYS_PWR_REG 0x12C0
+#define EXYNOS5_GPIO_MODE_SYS_PWR_REG 0x1300
+#define EXYNOS5_GPIO_MODE_SYSMEM_SYS_PWR_REG 0x1320
+#define EXYNOS5_GPIO_MODE_MAU_SYS_PWR_REG 0x1340
+#define EXYNOS5_TOP_ASB_RESET_SYS_PWR_REG 0x1344
+#define EXYNOS5_TOP_ASB_ISOLATION_SYS_PWR_REG 0x1348
+#define EXYNOS5_GSCL_SYS_PWR_REG 0x1400
+#define EXYNOS5_ISP_SYS_PWR_REG 0x1404
+#define EXYNOS5_MFC_SYS_PWR_REG 0x1408
+#define EXYNOS5_G3D_SYS_PWR_REG 0x140C
+#define EXYNOS5_DISP1_SYS_PWR_REG 0x1414
+#define EXYNOS5_MAU_SYS_PWR_REG 0x1418
+#define EXYNOS5_CMU_CLKSTOP_GSCL_SYS_PWR_REG 0x1480
+#define EXYNOS5_CMU_CLKSTOP_ISP_SYS_PWR_REG 0x1484
+#define EXYNOS5_CMU_CLKSTOP_MFC_SYS_PWR_REG 0x1488
+#define EXYNOS5_CMU_CLKSTOP_G3D_SYS_PWR_REG 0x148C
+#define EXYNOS5_CMU_CLKSTOP_DISP1_SYS_PWR_REG 0x1494
+#define EXYNOS5_CMU_CLKSTOP_MAU_SYS_PWR_REG 0x1498
+#define EXYNOS5_CMU_SYSCLK_GSCL_SYS_PWR_REG 0x14C0
+#define EXYNOS5_CMU_SYSCLK_ISP_SYS_PWR_REG 0x14C4
+#define EXYNOS5_CMU_SYSCLK_MFC_SYS_PWR_REG 0x14C8
+#define EXYNOS5_CMU_SYSCLK_G3D_SYS_PWR_REG 0x14CC
+#define EXYNOS5_CMU_SYSCLK_DISP1_SYS_PWR_REG 0x14D4
+#define EXYNOS5_CMU_SYSCLK_MAU_SYS_PWR_REG 0x14D8
+#define EXYNOS5_CMU_RESET_GSCL_SYS_PWR_REG 0x1580
+#define EXYNOS5_CMU_RESET_ISP_SYS_PWR_REG 0x1584
+#define EXYNOS5_CMU_RESET_MFC_SYS_PWR_REG 0x1588
+#define EXYNOS5_CMU_RESET_G3D_SYS_PWR_REG 0x158C
+#define EXYNOS5_CMU_RESET_DISP1_SYS_PWR_REG 0x1594
+#define EXYNOS5_CMU_RESET_MAU_SYS_PWR_REG 0x1598
+
+#define EXYNOS5_ARM_CORE0_OPTION 0x2008
+#define EXYNOS5_ARM_CORE1_OPTION 0x2088
+#define EXYNOS5_FSYS_ARM_OPTION 0x2208
+#define EXYNOS5_ISP_ARM_OPTION 0x2288
+#define EXYNOS5_ARM_COMMON_OPTION 0x2408
+#define EXYNOS5_ARM_L2_OPTION 0x2608
+#define EXYNOS5_TOP_PWR_OPTION 0x2C48
+#define EXYNOS5_TOP_PWR_SYSMEM_OPTION 0x2CC8
+#define EXYNOS5_JPEG_MEM_OPTION 0x2F48
+#define EXYNOS5_GSCL_OPTION 0x4008
+#define EXYNOS5_ISP_OPTION 0x4028
+#define EXYNOS5_MFC_OPTION 0x4048
+#define EXYNOS5_G3D_OPTION 0x4068
+#define EXYNOS5_DISP1_OPTION 0x40A8
+#define EXYNOS5_MAU_OPTION 0x40C8
+
+#define EXYNOS5_USE_SC_FEEDBACK (1 << 1)
+#define EXYNOS5_USE_SC_COUNTER (1 << 0)
+
+#define EXYNOS5_SKIP_DEACTIVATE_ACEACP_IN_PWDN (1 << 7)
+
+#define EXYNOS5_OPTION_USE_STANDBYWFE (1 << 24)
+#define EXYNOS5_OPTION_USE_STANDBYWFI (1 << 16)
+
+#define EXYNOS5_OPTION_USE_RETENTION (1 << 4)
+
+#define EXYNOS5420_SWRESET_KFC_SEL 0x3
+
+/* Only for EXYNOS5420 */
+#define EXYNOS5420_ISP_ARM_OPTION 0x2488
+#define EXYNOS5420_L2RSTDISABLE_VALUE BIT(3)
+
+#define EXYNOS5420_LPI_MASK 0x0004
+#define EXYNOS5420_LPI_MASK1 0x0008
+#define EXYNOS5420_UFS BIT(8)
+#define EXYNOS5420_ATB_KFC BIT(13)
+#define EXYNOS5420_ATB_ISP_ARM BIT(19)
+#define EXYNOS5420_EMULATION BIT(31)
+#define ATB_ISP_ARM BIT(12)
+#define ATB_KFC BIT(13)
+#define ATB_NOC BIT(14)
+
+#define EXYNOS5420_ARM_INTR_SPREAD_ENABLE 0x0100
+#define EXYNOS5420_ARM_INTR_SPREAD_USE_STANDBYWFI 0x0104
+#define EXYNOS5420_UP_SCHEDULER 0x0120
+#define SPREAD_ENABLE 0xF
+#define SPREAD_USE_STANDWFI 0xF
+
+#define EXYNOS5420_KFC_CORE_RESET0 BIT(8)
+#define EXYNOS5420_KFC_ETM_RESET0 BIT(20)
+
+#define EXYNOS5420_KFC_CORE_RESET(_nr) \
+ ((EXYNOS5420_KFC_CORE_RESET0 | EXYNOS5420_KFC_ETM_RESET0) << (_nr))
+
+#define EXYNOS5420_BB_CON1 0x0784
+#define EXYNOS5420_BB_SEL_EN BIT(31)
+#define EXYNOS5420_BB_PMOS_EN BIT(7)
+#define EXYNOS5420_BB_1300X 0XF
+
+#define EXYNOS5420_ARM_CORE2_SYS_PWR_REG 0x1020
+#define EXYNOS5420_DIS_IRQ_ARM_CORE2_LOCAL_SYS_PWR_REG 0x1024
+#define EXYNOS5420_DIS_IRQ_ARM_CORE2_CENTRAL_SYS_PWR_REG 0x1028
+#define EXYNOS5420_ARM_CORE3_SYS_PWR_REG 0x1030
+#define EXYNOS5420_DIS_IRQ_ARM_CORE3_LOCAL_SYS_PWR_REG 0x1034
+#define EXYNOS5420_DIS_IRQ_ARM_CORE3_CENTRAL_SYS_PWR_REG 0x1038
+#define EXYNOS5420_KFC_CORE0_SYS_PWR_REG 0x1040
+#define EXYNOS5420_DIS_IRQ_KFC_CORE0_LOCAL_SYS_PWR_REG 0x1044
+#define EXYNOS5420_DIS_IRQ_KFC_CORE0_CENTRAL_SYS_PWR_REG 0x1048
+#define EXYNOS5420_KFC_CORE1_SYS_PWR_REG 0x1050
+#define EXYNOS5420_DIS_IRQ_KFC_CORE1_LOCAL_SYS_PWR_REG 0x1054
+#define EXYNOS5420_DIS_IRQ_KFC_CORE1_CENTRAL_SYS_PWR_REG 0x1058
+#define EXYNOS5420_KFC_CORE2_SYS_PWR_REG 0x1060
+#define EXYNOS5420_DIS_IRQ_KFC_CORE2_LOCAL_SYS_PWR_REG 0x1064
+#define EXYNOS5420_DIS_IRQ_KFC_CORE2_CENTRAL_SYS_PWR_REG 0x1068
+#define EXYNOS5420_KFC_CORE3_SYS_PWR_REG 0x1070
+#define EXYNOS5420_DIS_IRQ_KFC_CORE3_LOCAL_SYS_PWR_REG 0x1074
+#define EXYNOS5420_DIS_IRQ_KFC_CORE3_CENTRAL_SYS_PWR_REG 0x1078
+#define EXYNOS5420_ISP_ARM_SYS_PWR_REG 0x1090
+#define EXYNOS5420_DIS_IRQ_ISP_ARM_LOCAL_SYS_PWR_REG 0x1094
+#define EXYNOS5420_DIS_IRQ_ISP_ARM_CENTRAL_SYS_PWR_REG 0x1098
+#define EXYNOS5420_ARM_COMMON_SYS_PWR_REG 0x10A0
+#define EXYNOS5420_KFC_COMMON_SYS_PWR_REG 0x10B0
+#define EXYNOS5420_KFC_L2_SYS_PWR_REG 0x10D0
+#define EXYNOS5420_DPLL_SYSCLK_SYS_PWR_REG 0x1158
+#define EXYNOS5420_IPLL_SYSCLK_SYS_PWR_REG 0x115C
+#define EXYNOS5420_KPLL_SYSCLK_SYS_PWR_REG 0x1160
+#define EXYNOS5420_RPLL_SYSCLK_SYS_PWR_REG 0x1174
+#define EXYNOS5420_SPLL_SYSCLK_SYS_PWR_REG 0x1178
+#define EXYNOS5420_INTRAM_MEM_SYS_PWR_REG 0x11B8
+#define EXYNOS5420_INTROM_MEM_SYS_PWR_REG 0x11BC
+#define EXYNOS5420_ONENANDXL_MEM_SYS_PWR 0x11C0
+#define EXYNOS5420_USBDEV_MEM_SYS_PWR 0x11CC
+#define EXYNOS5420_USBDEV1_MEM_SYS_PWR 0x11D0
+#define EXYNOS5420_SDMMC_MEM_SYS_PWR 0x11D4
+#define EXYNOS5420_CSSYS_MEM_SYS_PWR 0x11D8
+#define EXYNOS5420_SECSS_MEM_SYS_PWR 0x11DC
+#define EXYNOS5420_ROTATOR_MEM_SYS_PWR 0x11E0
+#define EXYNOS5420_INTRAM_MEM_SYS_PWR 0x11E4
+#define EXYNOS5420_INTROM_MEM_SYS_PWR 0x11E8
+#define EXYNOS5420_PAD_RETENTION_JTAG_SYS_PWR_REG 0x1208
+#define EXYNOS5420_PAD_RETENTION_DRAM_SYS_PWR_REG 0x1210
+#define EXYNOS5420_PAD_RETENTION_UART_SYS_PWR_REG 0x1214
+#define EXYNOS5420_PAD_RETENTION_MMC0_SYS_PWR_REG 0x1218
+#define EXYNOS5420_PAD_RETENTION_MMC1_SYS_PWR_REG 0x121C
+#define EXYNOS5420_PAD_RETENTION_MMC2_SYS_PWR_REG 0x1220
+#define EXYNOS5420_PAD_RETENTION_HSI_SYS_PWR_REG 0x1224
+#define EXYNOS5420_PAD_RETENTION_EBIA_SYS_PWR_REG 0x1228
+#define EXYNOS5420_PAD_RETENTION_EBIB_SYS_PWR_REG 0x122C
+#define EXYNOS5420_PAD_RETENTION_SPI_SYS_PWR_REG 0x1230
+#define EXYNOS5420_PAD_RETENTION_DRAM_COREBLK_SYS_PWR_REG 0x1234
+#define EXYNOS5420_DISP1_SYS_PWR_REG 0x1410
+#define EXYNOS5420_MAU_SYS_PWR_REG 0x1414
+#define EXYNOS5420_G2D_SYS_PWR_REG 0x1418
+#define EXYNOS5420_MSC_SYS_PWR_REG 0x141C
+#define EXYNOS5420_FSYS_SYS_PWR_REG 0x1420
+#define EXYNOS5420_FSYS2_SYS_PWR_REG 0x1424
+#define EXYNOS5420_PSGEN_SYS_PWR_REG 0x1428
+#define EXYNOS5420_PERIC_SYS_PWR_REG 0x142C
+#define EXYNOS5420_WCORE_SYS_PWR_REG 0x1430
+#define EXYNOS5420_CMU_CLKSTOP_DISP1_SYS_PWR_REG 0x1490
+#define EXYNOS5420_CMU_CLKSTOP_MAU_SYS_PWR_REG 0x1494
+#define EXYNOS5420_CMU_CLKSTOP_G2D_SYS_PWR_REG 0x1498
+#define EXYNOS5420_CMU_CLKSTOP_MSC_SYS_PWR_REG 0x149C
+#define EXYNOS5420_CMU_CLKSTOP_FSYS_SYS_PWR_REG 0x14A0
+#define EXYNOS5420_CMU_CLKSTOP_FSYS2_SYS_PWR_REG 0x14A4
+#define EXYNOS5420_CMU_CLKSTOP_PSGEN_SYS_PWR_REG 0x14A8
+#define EXYNOS5420_CMU_CLKSTOP_PERIC_SYS_PWR_REG 0x14AC
+#define EXYNOS5420_CMU_CLKSTOP_WCORE_SYS_PWR_REG 0x14B0
+#define EXYNOS5420_CMU_SYSCLK_TOPPWR_SYS_PWR_REG 0x14BC
+#define EXYNOS5420_CMU_SYSCLK_DISP1_SYS_PWR_REG 0x14D0
+#define EXYNOS5420_CMU_SYSCLK_MAU_SYS_PWR_REG 0x14D4
+#define EXYNOS5420_CMU_SYSCLK_G2D_SYS_PWR_REG 0x14D8
+#define EXYNOS5420_CMU_SYSCLK_MSC_SYS_PWR_REG 0x14DC
+#define EXYNOS5420_CMU_SYSCLK_FSYS_SYS_PWR_REG 0x14E0
+#define EXYNOS5420_CMU_SYSCLK_FSYS2_SYS_PWR_REG 0x14E4
+#define EXYNOS5420_CMU_SYSCLK_PSGEN_SYS_PWR_REG 0x14E8
+#define EXYNOS5420_CMU_SYSCLK_PERIC_SYS_PWR_REG 0x14EC
+#define EXYNOS5420_CMU_SYSCLK_WCORE_SYS_PWR_REG 0x14F0
+#define EXYNOS5420_CMU_SYSCLK_SYSMEM_TOPPWR_SYS_PWR_REG 0x14F4
+#define EXYNOS5420_CMU_RESET_FSYS2_SYS_PWR_REG 0x1570
+#define EXYNOS5420_CMU_RESET_PSGEN_SYS_PWR_REG 0x1574
+#define EXYNOS5420_CMU_RESET_PERIC_SYS_PWR_REG 0x1578
+#define EXYNOS5420_CMU_RESET_WCORE_SYS_PWR_REG 0x157C
+#define EXYNOS5420_CMU_RESET_DISP1_SYS_PWR_REG 0x1590
+#define EXYNOS5420_CMU_RESET_MAU_SYS_PWR_REG 0x1594
+#define EXYNOS5420_CMU_RESET_G2D_SYS_PWR_REG 0x1598
+#define EXYNOS5420_CMU_RESET_MSC_SYS_PWR_REG 0x159C
+#define EXYNOS5420_CMU_RESET_FSYS_SYS_PWR_REG 0x15A0
+#define EXYNOS5420_SFR_AXI_CGDIS1 0x15E4
+#define EXYNOS_ARM_CORE2_CONFIGURATION 0x2100
+#define EXYNOS5420_ARM_CORE2_OPTION 0x2108
+#define EXYNOS_ARM_CORE3_CONFIGURATION 0x2180
+#define EXYNOS5420_ARM_CORE3_OPTION 0x2188
+#define EXYNOS5420_ARM_COMMON_STATUS 0x2504
+#define EXYNOS5420_ARM_COMMON_OPTION 0x2508
+#define EXYNOS5420_KFC_COMMON_STATUS 0x2584
+#define EXYNOS5420_KFC_COMMON_OPTION 0x2588
+#define EXYNOS5420_LOGIC_RESET_DURATION3 0x2D1C
+
+#define EXYNOS5420_PAD_RET_GPIO_OPTION 0x30C8
+#define EXYNOS5420_PAD_RET_UART_OPTION 0x30E8
+#define EXYNOS5420_PAD_RET_MMCA_OPTION 0x3108
+#define EXYNOS5420_PAD_RET_MMCB_OPTION 0x3128
+#define EXYNOS5420_PAD_RET_MMCC_OPTION 0x3148
+#define EXYNOS5420_PAD_RET_HSI_OPTION 0x3168
+#define EXYNOS5420_PAD_RET_SPI_OPTION 0x31C8
+#define EXYNOS5420_PAD_RET_DRAM_COREBLK_OPTION 0x31E8
+#define EXYNOS_PAD_RET_DRAM_OPTION 0x3008
+#define EXYNOS_PAD_RET_MAUDIO_OPTION 0x3028
+#define EXYNOS_PAD_RET_JTAG_OPTION 0x3048
+#define EXYNOS_PAD_RET_GPIO_OPTION 0x3108
+#define EXYNOS_PAD_RET_UART_OPTION 0x3128
+#define EXYNOS_PAD_RET_MMCA_OPTION 0x3148
+#define EXYNOS_PAD_RET_MMCB_OPTION 0x3168
+#define EXYNOS_PAD_RET_EBIA_OPTION 0x3188
+#define EXYNOS_PAD_RET_EBIB_OPTION 0x31A8
+
+#define EXYNOS_PS_HOLD_CONTROL 0x330C
+
+/* For SYS_PWR_REG */
+#define EXYNOS_SYS_PWR_CFG BIT(0)
+
+#define EXYNOS5420_MFC_CONFIGURATION 0x4060
+#define EXYNOS5420_MFC_STATUS 0x4064
+#define EXYNOS5420_MFC_OPTION 0x4068
+#define EXYNOS5420_G3D_CONFIGURATION 0x4080
+#define EXYNOS5420_G3D_STATUS 0x4084
+#define EXYNOS5420_G3D_OPTION 0x4088
+#define EXYNOS5420_DISP0_CONFIGURATION 0x40A0
+#define EXYNOS5420_DISP0_STATUS 0x40A4
+#define EXYNOS5420_DISP0_OPTION 0x40A8
+#define EXYNOS5420_DISP1_CONFIGURATION 0x40C0
+#define EXYNOS5420_DISP1_STATUS 0x40C4
+#define EXYNOS5420_DISP1_OPTION 0x40C8
+#define EXYNOS5420_MAU_CONFIGURATION 0x40E0
+#define EXYNOS5420_MAU_STATUS 0x40E4
+#define EXYNOS5420_MAU_OPTION 0x40E8
+#define EXYNOS5420_FSYS2_OPTION 0x4168
+#define EXYNOS5420_PSGEN_OPTION 0x4188
+
+/* For EXYNOS_CENTRAL_SEQ_OPTION */
+#define EXYNOS5_USE_STANDBYWFI_ARM_CORE0 BIT(16)
+#define EXYNOS5_USE_STANDBYWFI_ARM_CORE1 BUT(17)
+#define EXYNOS5_USE_STANDBYWFE_ARM_CORE0 BIT(24)
+#define EXYNOS5_USE_STANDBYWFE_ARM_CORE1 BIT(25)
+
+#define EXYNOS5420_ARM_USE_STANDBY_WFI0 BIT(4)
+#define EXYNOS5420_ARM_USE_STANDBY_WFI1 BIT(5)
+#define EXYNOS5420_ARM_USE_STANDBY_WFI2 BIT(6)
+#define EXYNOS5420_ARM_USE_STANDBY_WFI3 BIT(7)
+#define EXYNOS5420_KFC_USE_STANDBY_WFI0 BIT(8)
+#define EXYNOS5420_KFC_USE_STANDBY_WFI1 BIT(9)
+#define EXYNOS5420_KFC_USE_STANDBY_WFI2 BIT(10)
+#define EXYNOS5420_KFC_USE_STANDBY_WFI3 BIT(11)
+#define EXYNOS5420_ARM_USE_STANDBY_WFE0 BIT(16)
+#define EXYNOS5420_ARM_USE_STANDBY_WFE1 BIT(17)
+#define EXYNOS5420_ARM_USE_STANDBY_WFE2 BIT(18)
+#define EXYNOS5420_ARM_USE_STANDBY_WFE3 BIT(19)
+#define EXYNOS5420_KFC_USE_STANDBY_WFE0 BIT(20)
+#define EXYNOS5420_KFC_USE_STANDBY_WFE1 BIT(21)
+#define EXYNOS5420_KFC_USE_STANDBY_WFE2 BIT(22)
+#define EXYNOS5420_KFC_USE_STANDBY_WFE3 BIT(23)
+
+#define DUR_WAIT_RESET 0xF
+
+#define EXYNOS5420_USE_STANDBY_WFI_ALL (EXYNOS5420_ARM_USE_STANDBY_WFI0 \
+ | EXYNOS5420_ARM_USE_STANDBY_WFI1 \
+ | EXYNOS5420_ARM_USE_STANDBY_WFI2 \
+ | EXYNOS5420_ARM_USE_STANDBY_WFI3 \
+ | EXYNOS5420_KFC_USE_STANDBY_WFI0 \
+ | EXYNOS5420_KFC_USE_STANDBY_WFI1 \
+ | EXYNOS5420_KFC_USE_STANDBY_WFI2 \
+ | EXYNOS5420_KFC_USE_STANDBY_WFI3)
+
+#endif /* __LINUX_SOC_EXYNOS_REGS_PMU_H */
diff --git a/include/linux/soc/ti/ti-msgmgr.h b/include/linux/soc/ti/ti-msgmgr.h
new file mode 100644
index 000000000000..eac8e0c6fe11
--- /dev/null
+++ b/include/linux/soc/ti/ti-msgmgr.h
@@ -0,0 +1,35 @@
+/*
+ * Texas Instruments' Message Manager
+ *
+ * Copyright (C) 2015-2016 Texas Instruments Incorporated - http://www.ti.com/
+ * Nishanth Menon
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef TI_MSGMGR_H
+#define TI_MSGMGR_H
+
+/**
+ * struct ti_msgmgr_message - Message Manager structure
+ * @len: Length of data in the Buffer
+ * @buf: Buffer pointer
+ *
+ * This is the structure for data used in mbox_send_message
+ * the length of data buffer used depends on the SoC integration
+ * parameters - each message may be 64, 128 bytes long depending
+ * on SoC. Client is supposed to be aware of this.
+ */
+struct ti_msgmgr_message {
+ size_t len;
+ u8 *buf;
+};
+
+#endif /* TI_MSGMGR_H */
diff --git a/include/linux/socket.h b/include/linux/socket.h
index 5bf59c8493b7..73bf6c6a833b 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -200,7 +200,9 @@ struct ucred {
#define AF_ALG 38 /* Algorithm sockets */
#define AF_NFC 39 /* NFC sockets */
#define AF_VSOCK 40 /* vSockets */
-#define AF_MAX 41 /* For now.. */
+#define AF_KCM 41 /* Kernel Connection Multiplexor*/
+
+#define AF_MAX 42 /* For now.. */
/* Protocol families, same as address families. */
#define PF_UNSPEC AF_UNSPEC
@@ -246,6 +248,7 @@ struct ucred {
#define PF_ALG AF_ALG
#define PF_NFC AF_NFC
#define PF_VSOCK AF_VSOCK
+#define PF_KCM AF_KCM
#define PF_MAX AF_MAX
/* Maximum queue length specifiable by listen. */
@@ -274,6 +277,7 @@ struct ucred {
#define MSG_MORE 0x8000 /* Sender will send more */
#define MSG_WAITFORONE 0x10000 /* recvmmsg(): block until 1+ packets avail */
#define MSG_SENDPAGE_NOTLAST 0x20000 /* sendpage() internal : not the last page */
+#define MSG_BATCH 0x40000 /* sendmmsg(): more messages coming */
#define MSG_EOF MSG_FIN
#define MSG_FASTOPEN 0x20000000 /* Send data in TCP SYN */
@@ -322,6 +326,7 @@ struct ucred {
#define SOL_CAIF 278
#define SOL_ALG 279
#define SOL_NFC 280
+#define SOL_KCM 281
/* IPX options */
#define IPX_TYPE 1
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
index 881a79d52467..4bcf5a61aada 100644
--- a/include/linux/stmmac.h
+++ b/include/linux/stmmac.h
@@ -90,7 +90,21 @@ struct stmmac_dma_cfg {
int pbl;
int fixed_burst;
int mixed_burst;
- int burst_len;
+ bool aal;
+};
+
+#define AXI_BLEN 7
+struct stmmac_axi {
+ bool axi_lpi_en;
+ bool axi_xit_frm;
+ u32 axi_wr_osr_lmt;
+ u32 axi_rd_osr_lmt;
+ bool axi_kbbe;
+ bool axi_axi_all;
+ u32 axi_blen[AXI_BLEN];
+ bool axi_fb;
+ bool axi_mb;
+ bool axi_rb;
};
struct plat_stmmacenet_data {
@@ -123,5 +137,6 @@ struct plat_stmmacenet_data {
int (*init)(struct platform_device *pdev, void *priv);
void (*exit)(struct platform_device *pdev, void *priv);
void *bsp_priv;
+ struct stmmac_axi *axi;
};
#endif
diff --git a/include/linux/string.h b/include/linux/string.h
index 9eebc66d957a..d3993a79a325 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -128,7 +128,13 @@ extern char **argv_split(gfp_t gfp, const char *str, int *argcp);
extern void argv_free(char **argv);
extern bool sysfs_streq(const char *s1, const char *s2);
-extern int strtobool(const char *s, bool *res);
+extern int kstrtobool(const char *s, bool *res);
+static inline int strtobool(const char *s, bool *res)
+{
+ return kstrtobool(s, res);
+}
+
+int match_string(const char * const *array, size_t n, const char *string);
#ifdef CONFIG_BINARY_PRINTF
int vbin_printf(u32 *bin_buf, size_t size, const char *fmt, va_list args);
diff --git a/include/linux/sunrpc/auth.h b/include/linux/sunrpc/auth.h
index 1ecf13e148b8..6a241a277249 100644
--- a/include/linux/sunrpc/auth.h
+++ b/include/linux/sunrpc/auth.h
@@ -21,10 +21,17 @@
#include <linux/utsname.h>
/*
+ * Maximum size of AUTH_NONE authentication information, in XDR words.
+ */
+#define NUL_CALLSLACK (4)
+#define NUL_REPLYSLACK (2)
+
+/*
* Size of the nodename buffer. RFC1831 specifies a hard limit of 255 bytes,
* but Linux hostnames are actually limited to __NEW_UTS_LEN bytes.
*/
#define UNX_MAXNODENAME __NEW_UTS_LEN
+#define UNX_CALLSLACK (21 + XDR_QUADLEN(UNX_MAXNODENAME))
struct rpcsec_gss_info;
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
index 131032f15cc1..9a7ddbaf116e 100644
--- a/include/linux/sunrpc/clnt.h
+++ b/include/linux/sunrpc/clnt.h
@@ -25,6 +25,7 @@
#include <asm/signal.h>
#include <linux/path.h>
#include <net/ipv6.h>
+#include <linux/sunrpc/xprtmultipath.h>
struct rpc_inode;
@@ -67,6 +68,7 @@ struct rpc_clnt {
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
struct dentry *cl_debugfs; /* debugfs directory */
#endif
+ struct rpc_xprt_iter cl_xpi;
};
/*
@@ -139,7 +141,6 @@ struct rpc_clnt *rpc_create_xprt(struct rpc_create_args *args,
struct rpc_xprt *xprt);
struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *,
const struct rpc_program *, u32);
-void rpc_task_reset_client(struct rpc_task *task, struct rpc_clnt *clnt);
struct rpc_clnt *rpc_clone_client(struct rpc_clnt *);
struct rpc_clnt *rpc_clone_client_set_auth(struct rpc_clnt *,
rpc_authflavor_t);
@@ -181,6 +182,21 @@ size_t rpc_peeraddr(struct rpc_clnt *, struct sockaddr *, size_t);
const char *rpc_peeraddr2str(struct rpc_clnt *, enum rpc_display_format_t);
int rpc_localaddr(struct rpc_clnt *, struct sockaddr *, size_t);
+int rpc_clnt_iterate_for_each_xprt(struct rpc_clnt *clnt,
+ int (*fn)(struct rpc_clnt *, struct rpc_xprt *, void *),
+ void *data);
+
+int rpc_clnt_test_and_add_xprt(struct rpc_clnt *clnt,
+ struct rpc_xprt_switch *xps,
+ struct rpc_xprt *xprt,
+ void *dummy);
+int rpc_clnt_add_xprt(struct rpc_clnt *, struct xprt_create *,
+ int (*setup)(struct rpc_clnt *,
+ struct rpc_xprt_switch *,
+ struct rpc_xprt *,
+ void *),
+ void *data);
+
const char *rpc_proc_name(const struct rpc_task *task);
#endif /* __KERNEL__ */
#endif /* _LINUX_SUNRPC_CLNT_H */
diff --git a/include/linux/sunrpc/rpc_rdma.h b/include/linux/sunrpc/rpc_rdma.h
index f33c5a4d6fe4..3b1ff38f0c37 100644
--- a/include/linux/sunrpc/rpc_rdma.h
+++ b/include/linux/sunrpc/rpc_rdma.h
@@ -93,6 +93,12 @@ struct rpcrdma_msg {
__be32 rm_pempty[3]; /* 3 empty chunk lists */
} rm_padded;
+ struct {
+ __be32 rm_err;
+ __be32 rm_vers_low;
+ __be32 rm_vers_high;
+ } rm_error;
+
__be32 rm_chunks[0]; /* read, write and reply chunks */
} rm_body;
@@ -102,17 +108,13 @@ struct rpcrdma_msg {
* Smallest RPC/RDMA header: rm_xid through rm_type, then rm_nochunks
*/
#define RPCRDMA_HDRLEN_MIN (sizeof(__be32) * 7)
+#define RPCRDMA_HDRLEN_ERR (sizeof(__be32) * 5)
enum rpcrdma_errcode {
ERR_VERS = 1,
ERR_CHUNK = 2
};
-struct rpcrdma_err_vers {
- uint32_t rdma_vers_low; /* Version range supported by peer */
- uint32_t rdma_vers_high;
-};
-
enum rpcrdma_proc {
RDMA_MSG = 0, /* An RPC call or reply msg */
RDMA_NOMSG = 1, /* An RPC call or reply msg - separate body */
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
index d703f0ef37d8..05a1809c44d9 100644
--- a/include/linux/sunrpc/sched.h
+++ b/include/linux/sunrpc/sched.h
@@ -42,40 +42,43 @@ struct rpc_wait {
*/
struct rpc_task {
atomic_t tk_count; /* Reference count */
+ int tk_status; /* result of last operation */
struct list_head tk_task; /* global list of tasks */
- struct rpc_clnt * tk_client; /* RPC client */
- struct rpc_rqst * tk_rqstp; /* RPC request */
-
- /*
- * RPC call state
- */
- struct rpc_message tk_msg; /* RPC call info */
/*
* callback to be executed after waking up
* action next procedure for async tasks
- * tk_ops caller callbacks
*/
void (*tk_callback)(struct rpc_task *);
void (*tk_action)(struct rpc_task *);
- const struct rpc_call_ops *tk_ops;
- void * tk_calldata;
unsigned long tk_timeout; /* timeout for rpc_sleep() */
unsigned long tk_runstate; /* Task run status */
- struct workqueue_struct *tk_workqueue; /* Normally rpciod, but could
- * be any workqueue
- */
+
struct rpc_wait_queue *tk_waitqueue; /* RPC wait queue we're on */
union {
struct work_struct tk_work; /* Async task work queue */
struct rpc_wait tk_wait; /* RPC wait */
} u;
+ /*
+ * RPC call state
+ */
+ struct rpc_message tk_msg; /* RPC call info */
+ void * tk_calldata; /* Caller private data */
+ const struct rpc_call_ops *tk_ops; /* Caller callbacks */
+
+ struct rpc_clnt * tk_client; /* RPC client */
+ struct rpc_xprt * tk_xprt; /* Transport */
+
+ struct rpc_rqst * tk_rqstp; /* RPC request */
+
+ struct workqueue_struct *tk_workqueue; /* Normally rpciod, but could
+ * be any workqueue
+ */
ktime_t tk_start; /* RPC task init timestamp */
pid_t tk_owner; /* Process id for batching tasks */
- int tk_status; /* result of last operation */
unsigned short tk_flags; /* misc flags */
unsigned short tk_timeouts; /* maj timeouts */
@@ -100,6 +103,7 @@ struct rpc_call_ops {
struct rpc_task_setup {
struct rpc_task *task;
struct rpc_clnt *rpc_client;
+ struct rpc_xprt *rpc_xprt;
const struct rpc_message *rpc_message;
const struct rpc_call_ops *callback_ops;
void *callback_data;
diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
index 5322fea6fe4c..3081339968c3 100644
--- a/include/linux/sunrpc/svc_rdma.h
+++ b/include/linux/sunrpc/svc_rdma.h
@@ -75,8 +75,10 @@ struct svc_rdma_op_ctxt {
struct svc_rdma_fastreg_mr *frmr;
int hdr_count;
struct xdr_buf arg;
+ struct ib_cqe cqe;
+ struct ib_cqe reg_cqe;
+ struct ib_cqe inv_cqe;
struct list_head dto_q;
- enum ib_wr_opcode wr_op;
enum ib_wc_status wc_status;
u32 byte_len;
u32 position;
@@ -174,8 +176,6 @@ struct svcxprt_rdma {
struct work_struct sc_work;
};
/* sc_flags */
-#define RDMAXPRT_RQ_PENDING 1
-#define RDMAXPRT_SQ_PENDING 2
#define RDMAXPRT_CONN_PENDING 3
#define RPCRDMA_LISTEN_BACKLOG 10
@@ -199,7 +199,7 @@ extern int svc_rdma_handle_bc_reply(struct rpc_xprt *xprt,
struct xdr_buf *rcvbuf);
/* svc_rdma_marshal.c */
-extern int svc_rdma_xdr_decode_req(struct rpcrdma_msg **, struct svc_rqst *);
+extern int svc_rdma_xdr_decode_req(struct rpcrdma_msg *, struct svc_rqst *);
extern int svc_rdma_xdr_encode_error(struct svcxprt_rdma *,
struct rpcrdma_msg *,
enum rpcrdma_errcode, __be32 *);
@@ -224,16 +224,22 @@ extern int rdma_read_chunk_frmr(struct svcxprt_rdma *, struct svc_rqst *,
/* svc_rdma_sendto.c */
extern int svc_rdma_map_xdr(struct svcxprt_rdma *, struct xdr_buf *,
- struct svc_rdma_req_map *);
+ struct svc_rdma_req_map *, bool);
extern int svc_rdma_sendto(struct svc_rqst *);
extern struct rpcrdma_read_chunk *
svc_rdma_get_read_chunk(struct rpcrdma_msg *);
+extern void svc_rdma_send_error(struct svcxprt_rdma *, struct rpcrdma_msg *,
+ int);
/* svc_rdma_transport.c */
+extern void svc_rdma_wc_send(struct ib_cq *, struct ib_wc *);
+extern void svc_rdma_wc_write(struct ib_cq *, struct ib_wc *);
+extern void svc_rdma_wc_reg(struct ib_cq *, struct ib_wc *);
+extern void svc_rdma_wc_read(struct ib_cq *, struct ib_wc *);
+extern void svc_rdma_wc_inv(struct ib_cq *, struct ib_wc *);
extern int svc_rdma_send(struct svcxprt_rdma *, struct ib_send_wr *);
-extern void svc_rdma_send_error(struct svcxprt_rdma *, struct rpcrdma_msg *,
- enum rpcrdma_errcode);
extern int svc_rdma_post_recv(struct svcxprt_rdma *, gfp_t);
+extern int svc_rdma_repost_recv(struct svcxprt_rdma *, gfp_t);
extern int svc_rdma_create_listen(struct svc_serv *, int, struct sockaddr *);
extern struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *);
extern void svc_rdma_put_context(struct svc_rdma_op_ctxt *, int);
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
index 69ef5b3ab038..fb0d212e0d3a 100644
--- a/include/linux/sunrpc/xprt.h
+++ b/include/linux/sunrpc/xprt.h
@@ -13,6 +13,7 @@
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/ktime.h>
+#include <linux/kref.h>
#include <linux/sunrpc/sched.h>
#include <linux/sunrpc/xdr.h>
#include <linux/sunrpc/msg_prot.h>
@@ -166,7 +167,7 @@ enum xprt_transports {
};
struct rpc_xprt {
- atomic_t count; /* Reference count */
+ struct kref kref; /* Reference count */
struct rpc_xprt_ops * ops; /* transport methods */
const struct rpc_timeout *timeout; /* timeout parms */
@@ -197,6 +198,11 @@ struct rpc_xprt {
unsigned int bind_index; /* bind function index */
/*
+ * Multipath
+ */
+ struct list_head xprt_switch;
+
+ /*
* Connection of transports
*/
unsigned long bind_timeout,
@@ -256,6 +262,7 @@ struct rpc_xprt {
struct dentry *debugfs; /* debugfs directory */
atomic_t inject_disconnect;
#endif
+ struct rcu_head rcu;
};
#if defined(CONFIG_SUNRPC_BACKCHANNEL)
@@ -318,24 +325,13 @@ int xprt_adjust_timeout(struct rpc_rqst *req);
void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task);
void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task);
void xprt_release(struct rpc_task *task);
+struct rpc_xprt * xprt_get(struct rpc_xprt *xprt);
void xprt_put(struct rpc_xprt *xprt);
struct rpc_xprt * xprt_alloc(struct net *net, size_t size,
unsigned int num_prealloc,
unsigned int max_req);
void xprt_free(struct rpc_xprt *);
-/**
- * xprt_get - return a reference to an RPC transport.
- * @xprt: pointer to the transport
- *
- */
-static inline struct rpc_xprt *xprt_get(struct rpc_xprt *xprt)
-{
- if (atomic_inc_not_zero(&xprt->count))
- return xprt;
- return NULL;
-}
-
static inline __be32 *xprt_skip_transport_header(struct rpc_xprt *xprt, __be32 *p)
{
return p + xprt->tsh_size;
diff --git a/include/linux/sunrpc/xprtmultipath.h b/include/linux/sunrpc/xprtmultipath.h
new file mode 100644
index 000000000000..5a9acffa41be
--- /dev/null
+++ b/include/linux/sunrpc/xprtmultipath.h
@@ -0,0 +1,69 @@
+/*
+ * RPC client multipathing definitions
+ *
+ * Copyright (c) 2015, 2016, Primary Data, Inc. All rights reserved.
+ *
+ * Trond Myklebust <trond.myklebust@primarydata.com>
+ */
+#ifndef _NET_SUNRPC_XPRTMULTIPATH_H
+#define _NET_SUNRPC_XPRTMULTIPATH_H
+
+struct rpc_xprt_iter_ops;
+struct rpc_xprt_switch {
+ spinlock_t xps_lock;
+ struct kref xps_kref;
+
+ unsigned int xps_nxprts;
+ struct list_head xps_xprt_list;
+
+ struct net * xps_net;
+
+ const struct rpc_xprt_iter_ops *xps_iter_ops;
+
+ struct rcu_head xps_rcu;
+};
+
+struct rpc_xprt_iter {
+ struct rpc_xprt_switch __rcu *xpi_xpswitch;
+ struct rpc_xprt * xpi_cursor;
+
+ const struct rpc_xprt_iter_ops *xpi_ops;
+};
+
+
+struct rpc_xprt_iter_ops {
+ void (*xpi_rewind)(struct rpc_xprt_iter *);
+ struct rpc_xprt *(*xpi_xprt)(struct rpc_xprt_iter *);
+ struct rpc_xprt *(*xpi_next)(struct rpc_xprt_iter *);
+};
+
+extern struct rpc_xprt_switch *xprt_switch_alloc(struct rpc_xprt *xprt,
+ gfp_t gfp_flags);
+
+extern struct rpc_xprt_switch *xprt_switch_get(struct rpc_xprt_switch *xps);
+extern void xprt_switch_put(struct rpc_xprt_switch *xps);
+
+extern void rpc_xprt_switch_set_roundrobin(struct rpc_xprt_switch *xps);
+
+extern void rpc_xprt_switch_add_xprt(struct rpc_xprt_switch *xps,
+ struct rpc_xprt *xprt);
+extern void rpc_xprt_switch_remove_xprt(struct rpc_xprt_switch *xps,
+ struct rpc_xprt *xprt);
+
+extern void xprt_iter_init(struct rpc_xprt_iter *xpi,
+ struct rpc_xprt_switch *xps);
+
+extern void xprt_iter_init_listall(struct rpc_xprt_iter *xpi,
+ struct rpc_xprt_switch *xps);
+
+extern void xprt_iter_destroy(struct rpc_xprt_iter *xpi);
+
+extern struct rpc_xprt_switch *xprt_iter_xchg_switch(
+ struct rpc_xprt_iter *xpi,
+ struct rpc_xprt_switch *newswitch);
+
+extern struct rpc_xprt *xprt_iter_xprt(struct rpc_xprt_iter *xpi);
+extern struct rpc_xprt *xprt_iter_get_xprt(struct rpc_xprt_iter *xpi);
+extern struct rpc_xprt *xprt_iter_get_next(struct rpc_xprt_iter *xpi);
+
+#endif
diff --git a/include/linux/sunrpc/xprtrdma.h b/include/linux/sunrpc/xprtrdma.h
index b7b279b54504..767190b01363 100644
--- a/include/linux/sunrpc/xprtrdma.h
+++ b/include/linux/sunrpc/xprtrdma.h
@@ -54,8 +54,6 @@
#define RPCRDMA_DEF_INLINE (1024) /* default inline max */
-#define RPCRDMA_INLINE_PAD_THRESH (512)/* payload threshold to pad (bytes) */
-
/* Memory registration strategies, by number.
* This is part of a kernel / user space API. Do not remove. */
enum rpcrdma_memreg {
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 185815c96433..d795472c54d8 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -575,8 +575,14 @@ asmlinkage long sys_pwrite64(unsigned int fd, const char __user *buf,
size_t count, loff_t pos);
asmlinkage long sys_preadv(unsigned long fd, const struct iovec __user *vec,
unsigned long vlen, unsigned long pos_l, unsigned long pos_h);
+asmlinkage long sys_preadv2(unsigned long fd, const struct iovec __user *vec,
+ unsigned long vlen, unsigned long pos_l, unsigned long pos_h,
+ int flags);
asmlinkage long sys_pwritev(unsigned long fd, const struct iovec __user *vec,
unsigned long vlen, unsigned long pos_l, unsigned long pos_h);
+asmlinkage long sys_pwritev2(unsigned long fd, const struct iovec __user *vec,
+ unsigned long vlen, unsigned long pos_l, unsigned long pos_h,
+ int flags);
asmlinkage long sys_getcwd(char __user *buf, unsigned long size);
asmlinkage long sys_mkdir(const char __user *pathname, umode_t mode);
asmlinkage long sys_chdir(const char __user *filename);
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index b386361ba3e8..7be9b1242354 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -29,9 +29,14 @@ static inline struct tcphdr *tcp_hdr(const struct sk_buff *skb)
return (struct tcphdr *)skb_transport_header(skb);
}
+static inline unsigned int __tcp_hdrlen(const struct tcphdr *th)
+{
+ return th->doff * 4;
+}
+
static inline unsigned int tcp_hdrlen(const struct sk_buff *skb)
{
- return tcp_hdr(skb)->doff * 4;
+ return __tcp_hdrlen(tcp_hdr(skb));
}
static inline struct tcphdr *inner_tcp_hdr(const struct sk_buff *skb)
@@ -153,6 +158,9 @@ struct tcp_sock {
u32 segs_in; /* RFC4898 tcpEStatsPerfSegsIn
* total number of segments in.
*/
+ u32 data_segs_in; /* RFC4898 tcpEStatsPerfDataSegsIn
+ * total number of data segments in.
+ */
u32 rcv_nxt; /* What we want to receive next */
u32 copied_seq; /* Head of yet unread data */
u32 rcv_wup; /* rcv_nxt on last window update sent */
@@ -160,6 +168,9 @@ struct tcp_sock {
u32 segs_out; /* RFC4898 tcpEStatsPerfSegsOut
* The total number of segments sent.
*/
+ u32 data_segs_out; /* RFC4898 tcpEStatsPerfDataSegsOut
+ * total number of data segments sent.
+ */
u64 bytes_acked; /* RFC4898 tcpEStatsAppHCThruOctetsAcked
* sum(delta(snd_una)), or how many bytes
* were acked.
@@ -256,6 +267,7 @@ struct tcp_sock {
u32 prr_delivered; /* Number of newly delivered packets to
* receiver in Recovery. */
u32 prr_out; /* Total number of pkts sent during Recovery. */
+ u32 delivered; /* Total data packets delivered incl. rexmits */
u32 rcv_wnd; /* Current receiver window */
u32 write_seq; /* Tail(+1) of data held in tcp send buffer */
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
index e13a1ace50e9..a55d0523f75d 100644
--- a/include/linux/thermal.h
+++ b/include/linux/thermal.h
@@ -156,6 +156,7 @@ struct thermal_attr {
* @trip_hyst_attrs: attributes for trip points for sysfs: trip hysteresis
* @devdata: private pointer for device private data
* @trips: number of trip points the thermal zone supports
+ * @trips_disabled; bitmap for disabled trips
* @passive_delay: number of milliseconds to wait between polls when
* performing passive cooling.
* @polling_delay: number of milliseconds to wait between polls when
@@ -191,6 +192,7 @@ struct thermal_zone_device {
struct thermal_attr *trip_hyst_attrs;
void *devdata;
int trips;
+ unsigned long trips_disabled; /* bitmap for disabled trips */
int passive_delay;
int polling_delay;
int temperature;
@@ -362,6 +364,11 @@ thermal_zone_of_sensor_register(struct device *dev, int id, void *data,
const struct thermal_zone_of_device_ops *ops);
void thermal_zone_of_sensor_unregister(struct device *dev,
struct thermal_zone_device *tz);
+struct thermal_zone_device *devm_thermal_zone_of_sensor_register(
+ struct device *dev, int id, void *data,
+ const struct thermal_zone_of_device_ops *ops);
+void devm_thermal_zone_of_sensor_unregister(struct device *dev,
+ struct thermal_zone_device *tz);
#else
static inline struct thermal_zone_device *
thermal_zone_of_sensor_register(struct device *dev, int id, void *data,
@@ -376,6 +383,19 @@ void thermal_zone_of_sensor_unregister(struct device *dev,
{
}
+static inline struct thermal_zone_device *devm_thermal_zone_of_sensor_register(
+ struct device *dev, int id, void *data,
+ const struct thermal_zone_of_device_ops *ops)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline
+void devm_thermal_zone_of_sensor_unregister(struct device *dev,
+ struct thermal_zone_device *tz)
+{
+}
+
#endif
#if IS_ENABLED(CONFIG_THERMAL)
diff --git a/include/linux/tick.h b/include/linux/tick.h
index 21f73649a4dc..62be0786d6d0 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -111,7 +111,7 @@ enum tick_dep_bits {
#define TICK_DEP_MASK_CLOCK_UNSTABLE (1 << TICK_DEP_BIT_CLOCK_UNSTABLE)
#ifdef CONFIG_NO_HZ_COMMON
-extern int tick_nohz_enabled;
+extern bool tick_nohz_enabled;
extern int tick_nohz_tick_stopped(void);
extern void tick_nohz_idle_enter(void);
extern void tick_nohz_idle_exit(void);
diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
index 705df7db4482..0810f81b6db2 100644
--- a/include/linux/trace_events.h
+++ b/include/linux/trace_events.h
@@ -420,7 +420,8 @@ extern int call_filter_check_discard(struct trace_event_call *call, void *rec,
extern enum event_trigger_type event_triggers_call(struct trace_event_file *file,
void *rec);
extern void event_triggers_post_call(struct trace_event_file *file,
- enum event_trigger_type tt);
+ enum event_trigger_type tt,
+ void *rec);
bool trace_event_ignore_this_pid(struct trace_event_file *trace_file);
@@ -507,7 +508,7 @@ event_trigger_unlock_commit(struct trace_event_file *file,
trace_buffer_unlock_commit(file->tr, buffer, event, irq_flags, pc);
if (tt)
- event_triggers_post_call(file, tt);
+ event_triggers_post_call(file, tt, entry);
}
/**
@@ -540,7 +541,7 @@ event_trigger_unlock_commit_regs(struct trace_event_file *file,
irq_flags, pc, regs);
if (tt)
- event_triggers_post_call(file, tt);
+ event_triggers_post_call(file, tt, entry);
}
#ifdef CONFIG_BPF_EVENTS
diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
index 99c1b4d20b0f..33383ca23837 100644
--- a/include/linux/unaligned/access_ok.h
+++ b/include/linux/unaligned/access_ok.h
@@ -4,62 +4,62 @@
#include <linux/kernel.h>
#include <asm/byteorder.h>
-static inline u16 get_unaligned_le16(const void *p)
+static __always_inline u16 get_unaligned_le16(const void *p)
{
return le16_to_cpup((__le16 *)p);
}
-static inline u32 get_unaligned_le32(const void *p)
+static __always_inline u32 get_unaligned_le32(const void *p)
{
return le32_to_cpup((__le32 *)p);
}
-static inline u64 get_unaligned_le64(const void *p)
+static __always_inline u64 get_unaligned_le64(const void *p)
{
return le64_to_cpup((__le64 *)p);
}
-static inline u16 get_unaligned_be16(const void *p)
+static __always_inline u16 get_unaligned_be16(const void *p)
{
return be16_to_cpup((__be16 *)p);
}
-static inline u32 get_unaligned_be32(const void *p)
+static __always_inline u32 get_unaligned_be32(const void *p)
{
return be32_to_cpup((__be32 *)p);
}
-static inline u64 get_unaligned_be64(const void *p)
+static __always_inline u64 get_unaligned_be64(const void *p)
{
return be64_to_cpup((__be64 *)p);
}
-static inline void put_unaligned_le16(u16 val, void *p)
+static __always_inline void put_unaligned_le16(u16 val, void *p)
{
*((__le16 *)p) = cpu_to_le16(val);
}
-static inline void put_unaligned_le32(u32 val, void *p)
+static __always_inline void put_unaligned_le32(u32 val, void *p)
{
*((__le32 *)p) = cpu_to_le32(val);
}
-static inline void put_unaligned_le64(u64 val, void *p)
+static __always_inline void put_unaligned_le64(u64 val, void *p)
{
*((__le64 *)p) = cpu_to_le64(val);
}
-static inline void put_unaligned_be16(u16 val, void *p)
+static __always_inline void put_unaligned_be16(u16 val, void *p)
{
*((__be16 *)p) = cpu_to_be16(val);
}
-static inline void put_unaligned_be32(u32 val, void *p)
+static __always_inline void put_unaligned_be32(u32 val, void *p)
{
*((__be32 *)p) = cpu_to_be32(val);
}
-static inline void put_unaligned_be64(u64 val, void *p)
+static __always_inline void put_unaligned_be64(u64 val, void *p)
{
*((__be64 *)p) = cpu_to_be64(val);
}
diff --git a/include/linux/vga_switcheroo.h b/include/linux/vga_switcheroo.h
index 69e1d4a1f1b3..b39a5f3153bd 100644
--- a/include/linux/vga_switcheroo.h
+++ b/include/linux/vga_switcheroo.h
@@ -36,6 +36,26 @@
struct pci_dev;
/**
+ * enum vga_switcheroo_handler_flags_t - handler flags bitmask
+ * @VGA_SWITCHEROO_CAN_SWITCH_DDC: whether the handler is able to switch the
+ * DDC lines separately. This signals to clients that they should call
+ * drm_get_edid_switcheroo() to probe the EDID
+ * @VGA_SWITCHEROO_NEEDS_EDP_CONFIG: whether the handler is unable to switch
+ * the AUX channel separately. This signals to clients that the active
+ * GPU needs to train the link and communicate the link parameters to the
+ * inactive GPU (mediated by vga_switcheroo). The inactive GPU may then
+ * skip the AUX handshake and set up its output with these pre-calibrated
+ * values (DisplayPort specification v1.1a, section 2.5.3.3)
+ *
+ * Handler flags bitmask. Used by handlers to declare their capabilities upon
+ * registering with vga_switcheroo.
+ */
+enum vga_switcheroo_handler_flags_t {
+ VGA_SWITCHEROO_CAN_SWITCH_DDC = (1 << 0),
+ VGA_SWITCHEROO_NEEDS_EDP_CONFIG = (1 << 1),
+};
+
+/**
* enum vga_switcheroo_state - client power state
* @VGA_SWITCHEROO_OFF: off
* @VGA_SWITCHEROO_ON: on
@@ -82,6 +102,9 @@ enum vga_switcheroo_client_id {
* Mandatory. For muxless machines this should be a no-op. Returning 0
* denotes success, anything else failure (in which case the switch is
* aborted)
+ * @switch_ddc: switch DDC lines to given client.
+ * Optional. Should return the previous DDC owner on success or a
+ * negative int on failure
* @power_state: cut or reinstate power of given client.
* Optional. The return value is ignored
* @get_client_id: determine if given pci device is integrated or discrete GPU.
@@ -93,6 +116,7 @@ enum vga_switcheroo_client_id {
struct vga_switcheroo_handler {
int (*init)(void);
int (*switchto)(enum vga_switcheroo_client_id id);
+ int (*switch_ddc)(enum vga_switcheroo_client_id id);
int (*power_state)(enum vga_switcheroo_client_id id,
enum vga_switcheroo_state state);
enum vga_switcheroo_client_id (*get_client_id)(struct pci_dev *pdev);
@@ -132,8 +156,12 @@ int vga_switcheroo_register_audio_client(struct pci_dev *pdev,
void vga_switcheroo_client_fb_set(struct pci_dev *dev,
struct fb_info *info);
-int vga_switcheroo_register_handler(const struct vga_switcheroo_handler *handler);
+int vga_switcheroo_register_handler(const struct vga_switcheroo_handler *handler,
+ enum vga_switcheroo_handler_flags_t handler_flags);
void vga_switcheroo_unregister_handler(void);
+enum vga_switcheroo_handler_flags_t vga_switcheroo_handler_flags(void);
+int vga_switcheroo_lock_ddc(struct pci_dev *pdev);
+int vga_switcheroo_unlock_ddc(struct pci_dev *pdev);
int vga_switcheroo_process_delayed_switch(void);
@@ -150,11 +178,15 @@ static inline void vga_switcheroo_unregister_client(struct pci_dev *dev) {}
static inline int vga_switcheroo_register_client(struct pci_dev *dev,
const struct vga_switcheroo_client_ops *ops, bool driver_power_control) { return 0; }
static inline void vga_switcheroo_client_fb_set(struct pci_dev *dev, struct fb_info *info) {}
-static inline int vga_switcheroo_register_handler(const struct vga_switcheroo_handler *handler) { return 0; }
+static inline int vga_switcheroo_register_handler(const struct vga_switcheroo_handler *handler,
+ enum vga_switcheroo_handler_flags_t handler_flags) { return 0; }
static inline int vga_switcheroo_register_audio_client(struct pci_dev *pdev,
const struct vga_switcheroo_client_ops *ops,
enum vga_switcheroo_client_id id) { return 0; }
static inline void vga_switcheroo_unregister_handler(void) {}
+static inline enum vga_switcheroo_handler_flags_t vga_switcheroo_handler_flags(void) { return 0; }
+static inline int vga_switcheroo_lock_ddc(struct pci_dev *pdev) { return -ENODEV; }
+static inline int vga_switcheroo_unlock_ddc(struct pci_dev *pdev) { return -ENODEV; }
static inline int vga_switcheroo_process_delayed_switch(void) { return 0; }
static inline enum vga_switcheroo_state vga_switcheroo_get_client_state(struct pci_dev *dev) { return VGA_SWITCHEROO_ON; }
diff --git a/include/linux/virtio.h b/include/linux/virtio.h
index 8f4d4bfa6d46..d5eb5479a425 100644
--- a/include/linux/virtio.h
+++ b/include/linux/virtio.h
@@ -75,8 +75,27 @@ unsigned int virtqueue_get_vring_size(struct virtqueue *vq);
bool virtqueue_is_broken(struct virtqueue *vq);
-void *virtqueue_get_avail(struct virtqueue *vq);
-void *virtqueue_get_used(struct virtqueue *vq);
+const struct vring *virtqueue_get_vring(struct virtqueue *vq);
+dma_addr_t virtqueue_get_desc_addr(struct virtqueue *vq);
+dma_addr_t virtqueue_get_avail_addr(struct virtqueue *vq);
+dma_addr_t virtqueue_get_used_addr(struct virtqueue *vq);
+
+/*
+ * Legacy accessors -- in almost all cases, these are the wrong functions
+ * to use.
+ */
+static inline void *virtqueue_get_desc(struct virtqueue *vq)
+{
+ return virtqueue_get_vring(vq)->desc;
+}
+static inline void *virtqueue_get_avail(struct virtqueue *vq)
+{
+ return virtqueue_get_vring(vq)->avail;
+}
+static inline void *virtqueue_get_used(struct virtqueue *vq)
+{
+ return virtqueue_get_vring(vq)->used;
+}
/**
* virtio_device - representation of a device using virtio
diff --git a/include/linux/virtio_ring.h b/include/linux/virtio_ring.h
index a156e2b6ccfe..e8d36938f09a 100644
--- a/include/linux/virtio_ring.h
+++ b/include/linux/virtio_ring.h
@@ -59,6 +59,35 @@ static inline void virtio_store_mb(bool weak_barriers,
struct virtio_device;
struct virtqueue;
+/*
+ * Creates a virtqueue and allocates the descriptor ring. If
+ * may_reduce_num is set, then this may allocate a smaller ring than
+ * expected. The caller should query virtqueue_get_ring_size to learn
+ * the actual size of the ring.
+ */
+struct virtqueue *vring_create_virtqueue(unsigned int index,
+ unsigned int num,
+ unsigned int vring_align,
+ struct virtio_device *vdev,
+ bool weak_barriers,
+ bool may_reduce_num,
+ bool (*notify)(struct virtqueue *vq),
+ void (*callback)(struct virtqueue *vq),
+ const char *name);
+
+/* Creates a virtqueue with a custom layout. */
+struct virtqueue *__vring_new_virtqueue(unsigned int index,
+ struct vring vring,
+ struct virtio_device *vdev,
+ bool weak_barriers,
+ bool (*notify)(struct virtqueue *),
+ void (*callback)(struct virtqueue *),
+ const char *name);
+
+/*
+ * Creates a virtqueue with a standard layout but a caller-allocated
+ * ring.
+ */
struct virtqueue *vring_new_virtqueue(unsigned int index,
unsigned int num,
unsigned int vring_align,
@@ -68,7 +97,13 @@ struct virtqueue *vring_new_virtqueue(unsigned int index,
bool (*notify)(struct virtqueue *vq),
void (*callback)(struct virtqueue *vq),
const char *name);
+
+/*
+ * Destroys a virtqueue. If created with vring_create_virtqueue, this
+ * also frees the ring.
+ */
void vring_del_virtqueue(struct virtqueue *vq);
+
/* Filter out transport-specific feature bits. */
void vring_transport_features(struct virtio_device *vdev);
diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h
index 67c1dbd19c6d..ec084321fe09 100644
--- a/include/linux/vm_event_item.h
+++ b/include/linux/vm_event_item.h
@@ -53,6 +53,7 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
COMPACTMIGRATE_SCANNED, COMPACTFREE_SCANNED,
COMPACTISOLATED,
COMPACTSTALL, COMPACTFAIL, COMPACTSUCCESS,
+ KCOMPACTD_WAKE,
#endif
#ifdef CONFIG_HUGETLB_PAGE
HTLB_BUDDY_PGALLOC, HTLB_BUDDY_PGALLOC_FAIL,
@@ -71,6 +72,7 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
THP_COLLAPSE_ALLOC_FAILED,
THP_SPLIT_PAGE,
THP_SPLIT_PAGE_FAILED,
+ THP_DEFERRED_SPLIT_PAGE,
THP_SPLIT_PMD,
THP_ZERO_PAGE_ALLOC,
THP_ZERO_PAGE_ALLOC_FAILED,
diff --git a/include/linux/watchdog.h b/include/linux/watchdog.h
index b585fa2507ee..51732d6c9555 100644
--- a/include/linux/watchdog.h
+++ b/include/linux/watchdog.h
@@ -10,8 +10,9 @@
#include <linux/bitops.h>
-#include <linux/device.h>
#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
#include <linux/notifier.h>
#include <uapi/linux/watchdog.h>
@@ -46,7 +47,7 @@ struct watchdog_ops {
unsigned int (*status)(struct watchdog_device *);
int (*set_timeout)(struct watchdog_device *, unsigned int);
unsigned int (*get_timeleft)(struct watchdog_device *);
- int (*restart)(struct watchdog_device *);
+ int (*restart)(struct watchdog_device *, unsigned long, void *);
long (*ioctl)(struct watchdog_device *, unsigned int, unsigned long);
};
@@ -61,14 +62,21 @@ struct watchdog_ops {
* @bootstatus: Status of the watchdog device at boot.
* @timeout: The watchdog devices timeout value (in seconds).
* @min_timeout:The watchdog devices minimum timeout value (in seconds).
- * @max_timeout:The watchdog devices maximum timeout value (in seconds).
+ * @max_timeout:The watchdog devices maximum timeout value (in seconds)
+ * as configurable from user space. Only relevant if
+ * max_hw_heartbeat_ms is not provided.
+ * @min_hw_heartbeat_ms:
+ * Minimum time between heartbeats, in milli-seconds.
+ * @max_hw_heartbeat_ms:
+ * Hardware limit for maximum timeout, in milli-seconds.
+ * Replaces max_timeout if specified.
* @reboot_nb: The notifier block to stop watchdog on reboot.
* @restart_nb: The notifier block to register a restart function.
* @driver_data:Pointer to the drivers private data.
* @wd_data: Pointer to watchdog core internal data.
* @status: Field that contains the devices internal status bits.
- * @deferred: entry in wtd_deferred_reg_list which is used to
- * register early initialized watchdogs.
+ * @deferred: Entry in wtd_deferred_reg_list which is used to
+ * register early initialized watchdogs.
*
* The watchdog_device structure contains all information about a
* watchdog timer device.
@@ -89,6 +97,8 @@ struct watchdog_device {
unsigned int timeout;
unsigned int min_timeout;
unsigned int max_timeout;
+ unsigned int min_hw_heartbeat_ms;
+ unsigned int max_hw_heartbeat_ms;
struct notifier_block reboot_nb;
struct notifier_block restart_nb;
void *driver_data;
@@ -98,6 +108,7 @@ struct watchdog_device {
#define WDOG_ACTIVE 0 /* Is the watchdog running/active */
#define WDOG_NO_WAY_OUT 1 /* Is 'nowayout' feature set ? */
#define WDOG_STOP_ON_REBOOT 2 /* Should be stopped on reboot */
+#define WDOG_HW_RUNNING 3 /* True if HW watchdog running */
struct list_head deferred;
};
@@ -110,6 +121,15 @@ static inline bool watchdog_active(struct watchdog_device *wdd)
return test_bit(WDOG_ACTIVE, &wdd->status);
}
+/*
+ * Use the following function to check whether or not the hardware watchdog
+ * is running
+ */
+static inline bool watchdog_hw_running(struct watchdog_device *wdd)
+{
+ return test_bit(WDOG_HW_RUNNING, &wdd->status);
+}
+
/* Use the following function to set the nowayout feature */
static inline void watchdog_set_nowayout(struct watchdog_device *wdd, bool nowayout)
{
@@ -128,13 +148,18 @@ static inline bool watchdog_timeout_invalid(struct watchdog_device *wdd, unsigne
{
/*
* The timeout is invalid if
+ * - the requested value is larger than UINT_MAX / 1000
+ * (since internal calculations are done in milli-seconds),
+ * or
* - the requested value is smaller than the configured minimum timeout,
* or
- * - a maximum timeout is configured, and the requested value is larger
- * than the maximum timeout.
+ * - a maximum hardware timeout is not configured, a maximum timeout
+ * is configured, and the requested value is larger than the
+ * configured maximum timeout.
*/
- return t < wdd->min_timeout ||
- (wdd->max_timeout && t > wdd->max_timeout);
+ return t > UINT_MAX / 1000 || t < wdd->min_timeout ||
+ (!wdd->max_hw_heartbeat_ms && wdd->max_timeout &&
+ t > wdd->max_timeout);
}
/* Use the following functions to manipulate watchdog driver specific data */
diff --git a/include/media/videobuf2-dma-contig.h b/include/media/videobuf2-dma-contig.h
index c33dfa69d7ab..2087c9a68be3 100644
--- a/include/media/videobuf2-dma-contig.h
+++ b/include/media/videobuf2-dma-contig.h
@@ -16,6 +16,8 @@
#include <media/videobuf2-v4l2.h>
#include <linux/dma-mapping.h>
+struct dma_attrs;
+
static inline dma_addr_t
vb2_dma_contig_plane_dma_addr(struct vb2_buffer *vb, unsigned int plane_no)
{
@@ -24,7 +26,14 @@ vb2_dma_contig_plane_dma_addr(struct vb2_buffer *vb, unsigned int plane_no)
return *addr;
}
-void *vb2_dma_contig_init_ctx(struct device *dev);
+void *vb2_dma_contig_init_ctx_attrs(struct device *dev,
+ struct dma_attrs *attrs);
+
+static inline void *vb2_dma_contig_init_ctx(struct device *dev)
+{
+ return vb2_dma_contig_init_ctx_attrs(dev, NULL);
+}
+
void vb2_dma_contig_cleanup_ctx(void *alloc_ctx);
extern const struct vb2_mem_ops vb2_dma_contig_memops;
diff --git a/include/misc/cxl.h b/include/misc/cxl.h
index f2ffe5bd720d..7d5e2613c7b8 100644
--- a/include/misc/cxl.h
+++ b/include/misc/cxl.h
@@ -30,9 +30,6 @@ struct cxl_afu *cxl_pci_to_afu(struct pci_dev *dev);
/* Get the AFU conf record number associated with a pci_dev */
unsigned int cxl_pci_to_cfg_record(struct pci_dev *dev);
-/* Get the physical device (ie. the PCIe card) which the AFU is attached */
-struct device *cxl_get_phys_dev(struct pci_dev *dev);
-
/*
* Context lifetime overview:
@@ -210,4 +207,9 @@ ssize_t cxl_fd_read(struct file *file, char __user *buf, size_t count,
void cxl_perst_reloads_same_image(struct cxl_afu *afu,
bool perst_reloads_same_image);
+/*
+ * Read the VPD for the card where the AFU resides
+ */
+ssize_t cxl_read_adapter_vpd(struct pci_dev *dev, void *buf, size_t count);
+
#endif /* _MISC_CXL_H */
diff --git a/include/net/6lowpan.h b/include/net/6lowpan.h
index 2f6a3f2233ed..da3a77d25fcb 100644
--- a/include/net/6lowpan.h
+++ b/include/net/6lowpan.h
@@ -75,6 +75,8 @@
#define LOWPAN_IPHC_MAX_HC_BUF_LEN (sizeof(struct ipv6hdr) + \
LOWPAN_IPHC_MAX_HEADER_LEN + \
LOWPAN_NHC_MAX_HDR_LEN)
+/* SCI/DCI is 4 bit width, so we have maximum 16 entries */
+#define LOWPAN_IPHC_CTX_TABLE_SIZE (1 << 4)
#define LOWPAN_DISPATCH_IPV6 0x41 /* 01000001 = 65 */
#define LOWPAN_DISPATCH_IPHC 0x60 /* 011xxxxx = ... */
@@ -98,9 +100,39 @@ enum lowpan_lltypes {
LOWPAN_LLTYPE_IEEE802154,
};
+enum lowpan_iphc_ctx_flags {
+ LOWPAN_IPHC_CTX_FLAG_ACTIVE,
+ LOWPAN_IPHC_CTX_FLAG_COMPRESSION,
+};
+
+struct lowpan_iphc_ctx {
+ u8 id;
+ struct in6_addr pfx;
+ u8 plen;
+ unsigned long flags;
+};
+
+struct lowpan_iphc_ctx_table {
+ spinlock_t lock;
+ const struct lowpan_iphc_ctx_ops *ops;
+ struct lowpan_iphc_ctx table[LOWPAN_IPHC_CTX_TABLE_SIZE];
+};
+
+static inline bool lowpan_iphc_ctx_is_active(const struct lowpan_iphc_ctx *ctx)
+{
+ return test_bit(LOWPAN_IPHC_CTX_FLAG_ACTIVE, &ctx->flags);
+}
+
+static inline bool
+lowpan_iphc_ctx_is_compression(const struct lowpan_iphc_ctx *ctx)
+{
+ return test_bit(LOWPAN_IPHC_CTX_FLAG_COMPRESSION, &ctx->flags);
+}
+
struct lowpan_priv {
enum lowpan_lltypes lltype;
struct dentry *iface_debugfs;
+ struct lowpan_iphc_ctx_table ctx;
/* must be last */
u8 priv[0] __aligned(sizeof(void *));
diff --git a/include/net/act_api.h b/include/net/act_api.h
index 9d446f136607..2a19fe111c78 100644
--- a/include/net/act_api.h
+++ b/include/net/act_api.h
@@ -7,6 +7,8 @@
#include <net/sch_generic.h>
#include <net/pkt_sched.h>
+#include <net/net_namespace.h>
+#include <net/netns/generic.h>
struct tcf_common {
struct hlist_node tcfc_head;
@@ -65,11 +67,6 @@ static inline int tcf_hashinfo_init(struct tcf_hashinfo *hf, unsigned int mask)
return 0;
}
-static inline void tcf_hashinfo_destroy(struct tcf_hashinfo *hf)
-{
- kfree(hf->htab);
-}
-
/* Update lastuse only if needed, to avoid dirtying a cache line.
* We use a temp variable to avoid fetching jiffies twice.
*/
@@ -81,42 +78,76 @@ static inline void tcf_lastuse_update(struct tcf_t *tm)
tm->lastuse = now;
}
-#ifdef CONFIG_NET_CLS_ACT
-
-#define ACT_P_CREATED 1
-#define ACT_P_DELETED 1
-
struct tc_action {
void *priv;
const struct tc_action_ops *ops;
__u32 type; /* for backward compat(TCA_OLD_COMPAT) */
__u32 order;
struct list_head list;
+ struct tcf_hashinfo *hinfo;
};
+#ifdef CONFIG_NET_CLS_ACT
+
+#define ACT_P_CREATED 1
+#define ACT_P_DELETED 1
+
struct tc_action_ops {
struct list_head head;
- struct tcf_hashinfo *hinfo;
char kind[IFNAMSIZ];
__u32 type; /* TBD to match kind */
struct module *owner;
int (*act)(struct sk_buff *, const struct tc_action *, struct tcf_result *);
int (*dump)(struct sk_buff *, struct tc_action *, int, int);
void (*cleanup)(struct tc_action *, int bind);
- int (*lookup)(struct tc_action *, u32);
+ int (*lookup)(struct net *, struct tc_action *, u32);
int (*init)(struct net *net, struct nlattr *nla,
struct nlattr *est, struct tc_action *act, int ovr,
int bind);
- int (*walk)(struct sk_buff *, struct netlink_callback *, int, struct tc_action *);
+ int (*walk)(struct net *, struct sk_buff *,
+ struct netlink_callback *, int, struct tc_action *);
+};
+
+struct tc_action_net {
+ struct tcf_hashinfo *hinfo;
+ const struct tc_action_ops *ops;
};
-int tcf_hash_search(struct tc_action *a, u32 index);
-u32 tcf_hash_new_index(struct tcf_hashinfo *hinfo);
-int tcf_hash_check(u32 index, struct tc_action *a, int bind);
-int tcf_hash_create(u32 index, struct nlattr *est, struct tc_action *a,
- int size, int bind, bool cpustats);
+static inline
+int tc_action_net_init(struct tc_action_net *tn, const struct tc_action_ops *ops,
+ unsigned int mask)
+{
+ int err = 0;
+
+ tn->hinfo = kmalloc(sizeof(*tn->hinfo), GFP_KERNEL);
+ if (!tn->hinfo)
+ return -ENOMEM;
+ tn->ops = ops;
+ err = tcf_hashinfo_init(tn->hinfo, mask);
+ if (err)
+ kfree(tn->hinfo);
+ return err;
+}
+
+void tcf_hashinfo_destroy(const struct tc_action_ops *ops,
+ struct tcf_hashinfo *hinfo);
+
+static inline void tc_action_net_exit(struct tc_action_net *tn)
+{
+ tcf_hashinfo_destroy(tn->ops, tn->hinfo);
+}
+
+int tcf_generic_walker(struct tc_action_net *tn, struct sk_buff *skb,
+ struct netlink_callback *cb, int type,
+ struct tc_action *a);
+int tcf_hash_search(struct tc_action_net *tn, struct tc_action *a, u32 index);
+u32 tcf_hash_new_index(struct tc_action_net *tn);
+int tcf_hash_check(struct tc_action_net *tn, u32 index, struct tc_action *a,
+ int bind);
+int tcf_hash_create(struct tc_action_net *tn, u32 index, struct nlattr *est,
+ struct tc_action *a, int size, int bind, bool cpustats);
void tcf_hash_cleanup(struct tc_action *a, struct nlattr *est);
-void tcf_hash_insert(struct tc_action *a);
+void tcf_hash_insert(struct tc_action_net *tn, struct tc_action *a);
int __tcf_hash_release(struct tc_action *a, bool bind, bool strict);
@@ -125,8 +156,8 @@ static inline int tcf_hash_release(struct tc_action *a, bool bind)
return __tcf_hash_release(a, bind, false);
}
-int tcf_register_action(struct tc_action_ops *a, unsigned int mask);
-int tcf_unregister_action(struct tc_action_ops *a);
+int tcf_register_action(struct tc_action_ops *a, struct pernet_operations *ops);
+int tcf_unregister_action(struct tc_action_ops *a, struct pernet_operations *ops);
int tcf_action_destroy(struct list_head *actions, int bind);
int tcf_action_exec(struct sk_buff *skb, const struct list_head *actions,
struct tcf_result *res);
@@ -140,5 +171,16 @@ int tcf_action_dump(struct sk_buff *skb, struct list_head *, int, int);
int tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int, int);
int tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int, int);
int tcf_action_copy_stats(struct sk_buff *, struct tc_action *, int);
+
+#define tc_no_actions(_exts) \
+ (list_empty(&(_exts)->actions))
+
+#define tc_for_each_action(_a, _exts) \
+ list_for_each_entry(a, &(_exts)->actions, list)
+#else /* CONFIG_NET_CLS_ACT */
+
+#define tc_no_actions(_exts) true
+#define tc_for_each_action(_a, _exts) while (0)
+
#endif /* CONFIG_NET_CLS_ACT */
#endif
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index 47f52d3cd8df..730d856683e5 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -87,6 +87,8 @@ int __ipv6_get_lladdr(struct inet6_dev *idev, struct in6_addr *addr,
u32 banned_flags);
int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr,
u32 banned_flags);
+int ipv4_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2,
+ bool match_wildcard);
int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2,
bool match_wildcard);
void addrconf_join_solict(struct net_device *dev, const struct in6_addr *addr);
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index 339ea57be423..5d38d980b89d 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -233,6 +233,7 @@ enum {
HCI_SC_ENABLED,
HCI_SC_ONLY,
HCI_PRIVACY,
+ HCI_LIMITED_PRIVACY,
HCI_RPA_EXPIRED,
HCI_RPA_RESOLVING,
HCI_HS_ENABLED,
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index d4f82edb5cff..dc71473462ac 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -25,6 +25,7 @@
#ifndef __HCI_CORE_H
#define __HCI_CORE_H
+#include <linux/leds.h>
#include <net/bluetooth/hci.h>
#include <net/bluetooth/hci_sock.h>
@@ -396,6 +397,8 @@ struct hci_dev {
struct delayed_work rpa_expired;
bdaddr_t rpa;
+ struct led_trigger *power_led;
+
int (*open)(struct hci_dev *hdev);
int (*close)(struct hci_dev *hdev);
int (*flush)(struct hci_dev *hdev);
diff --git a/include/net/bond_3ad.h b/include/net/bond_3ad.h
index f1fbc3b11962..f358ad5e4214 100644
--- a/include/net/bond_3ad.h
+++ b/include/net/bond_3ad.h
@@ -306,5 +306,6 @@ int bond_3ad_lacpdu_recv(const struct sk_buff *skb, struct bonding *bond,
struct slave *slave);
int bond_3ad_set_carrier(struct bonding *bond);
void bond_3ad_update_lacp_rate(struct bonding *bond);
+void bond_3ad_update_ad_actor_settings(struct bonding *bond);
#endif /* _NET_BOND_3AD_H */
diff --git a/include/net/bonding.h b/include/net/bonding.h
index ee6c52053aa3..791800ddd6d9 100644
--- a/include/net/bonding.h
+++ b/include/net/bonding.h
@@ -215,6 +215,7 @@ struct bonding {
* ALB mode (6) - to sync the use and modifications of its hash table
*/
spinlock_t mode_lock;
+ spinlock_t stats_lock;
u8 send_peer_notif;
u8 igmp_retrans;
#ifdef CONFIG_PROC_FS
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 9bcaaf7cd15a..9e1b24c29f0c 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -712,6 +712,8 @@ struct cfg80211_acl_data {
* @p2p_opp_ps: P2P opportunistic PS
* @acl: ACL configuration used by the drivers which has support for
* MAC address based access control
+ * @pbss: If set, start as a PCP instead of AP. Relevant for DMG
+ * networks.
*/
struct cfg80211_ap_settings {
struct cfg80211_chan_def chandef;
@@ -730,6 +732,7 @@ struct cfg80211_ap_settings {
u8 p2p_ctwindow;
bool p2p_opp_ps;
const struct cfg80211_acl_data *acl;
+ bool pbss;
};
/**
@@ -1888,6 +1891,8 @@ struct cfg80211_ibss_params {
* @ht_capa_mask: The bits of ht_capa which are to be used.
* @vht_capa: VHT Capability overrides
* @vht_capa_mask: The bits of vht_capa which are to be used.
+ * @pbss: if set, connect to a PCP instead of AP. Valid for DMG
+ * networks.
*/
struct cfg80211_connect_params {
struct ieee80211_channel *channel;
@@ -1910,6 +1915,7 @@ struct cfg80211_connect_params {
struct ieee80211_ht_cap ht_capa_mask;
struct ieee80211_vht_cap vht_capa;
struct ieee80211_vht_cap vht_capa_mask;
+ bool pbss;
};
/**
@@ -3489,6 +3495,7 @@ struct cfg80211_cached_keys;
* registered for unexpected class 3 frames (AP mode)
* @conn: (private) cfg80211 software SME connection state machine data
* @connect_keys: (private) keys to set after connection is established
+ * @conn_bss_type: connecting/connected BSS type
* @ibss_fixed: (private) IBSS is using fixed BSSID
* @ibss_dfs_possible: (private) IBSS may change to a DFS channel
* @event_list: (private) list for internal event processing
@@ -3519,6 +3526,7 @@ struct wireless_dev {
u8 ssid_len, mesh_id_len, mesh_id_up_len;
struct cfg80211_conn *conn;
struct cfg80211_cached_keys *connect_keys;
+ enum ieee80211_bss_type conn_bss_type;
struct list_head event_list;
spinlock_t event_lock;
diff --git a/include/net/checksum.h b/include/net/checksum.h
index 10a16b5bd1c7..5c30891e84e5 100644
--- a/include/net/checksum.h
+++ b/include/net/checksum.h
@@ -88,8 +88,11 @@ static inline __wsum
csum_block_add(__wsum csum, __wsum csum2, int offset)
{
u32 sum = (__force u32)csum2;
- if (offset&1)
- sum = ((sum&0xFF00FF)<<8)+((sum>>8)&0xFF00FF);
+
+ /* rotate sum to align it with a 16b boundary */
+ if (offset & 1)
+ sum = ror32(sum, 8);
+
return csum_add(csum, (__force __wsum)sum);
}
@@ -102,10 +105,7 @@ csum_block_add_ext(__wsum csum, __wsum csum2, int offset, int len)
static inline __wsum
csum_block_sub(__wsum csum, __wsum csum2, int offset)
{
- u32 sum = (__force u32)csum2;
- if (offset&1)
- sum = ((sum&0xFF00FF)<<8)+((sum>>8)&0xFF00FF);
- return csum_sub(csum, (__force __wsum)sum);
+ return csum_block_add(csum, ~csum2, offset);
}
static inline __wsum csum_unfold(__sum16 n)
@@ -120,6 +120,11 @@ static inline __wsum csum_partial_ext(const void *buff, int len, __wsum sum)
#define CSUM_MANGLED_0 ((__force __sum16)0xffff)
+static inline void csum_replace_by_diff(__sum16 *sum, __wsum diff)
+{
+ *sum = csum_fold(csum_add(diff, ~csum_unfold(*sum)));
+}
+
static inline void csum_replace4(__sum16 *sum, __be32 from, __be32 to)
{
__wsum tmp = csum_sub(~csum_unfold(*sum), (__force __wsum)from);
diff --git a/include/net/codel.h b/include/net/codel.h
index 267e70210061..d168aca115cc 100644
--- a/include/net/codel.h
+++ b/include/net/codel.h
@@ -162,12 +162,14 @@ struct codel_vars {
* struct codel_stats - contains codel shared variables and stats
* @maxpacket: largest packet we've seen so far
* @drop_count: temp count of dropped packets in dequeue()
+ * @drop_len: bytes of dropped packets in dequeue()
* ecn_mark: number of packets we ECN marked instead of dropping
* ce_mark: number of packets CE marked because sojourn time was above ce_threshold
*/
struct codel_stats {
u32 maxpacket;
u32 drop_count;
+ u32 drop_len;
u32 ecn_mark;
u32 ce_mark;
};
@@ -308,6 +310,7 @@ static struct sk_buff *codel_dequeue(struct Qdisc *sch,
vars->rec_inv_sqrt);
goto end;
}
+ stats->drop_len += qdisc_pkt_len(skb);
qdisc_drop(skb, sch);
stats->drop_count++;
skb = dequeue_func(vars, sch);
@@ -330,6 +333,7 @@ static struct sk_buff *codel_dequeue(struct Qdisc *sch,
if (params->ecn && INET_ECN_set_ce(skb)) {
stats->ecn_mark++;
} else {
+ stats->drop_len += qdisc_pkt_len(skb);
qdisc_drop(skb, sch);
stats->drop_count++;
diff --git a/include/net/devlink.h b/include/net/devlink.h
new file mode 100644
index 000000000000..c37d257891d6
--- /dev/null
+++ b/include/net/devlink.h
@@ -0,0 +1,140 @@
+/*
+ * include/net/devlink.h - Network physical device Netlink interface
+ * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#ifndef _NET_DEVLINK_H_
+#define _NET_DEVLINK_H_
+
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/gfp.h>
+#include <linux/list.h>
+#include <linux/netdevice.h>
+#include <net/net_namespace.h>
+#include <uapi/linux/devlink.h>
+
+struct devlink_ops;
+
+struct devlink {
+ struct list_head list;
+ struct list_head port_list;
+ const struct devlink_ops *ops;
+ struct device *dev;
+ possible_net_t _net;
+ char priv[0] __aligned(NETDEV_ALIGN);
+};
+
+struct devlink_port {
+ struct list_head list;
+ struct devlink *devlink;
+ unsigned index;
+ bool registered;
+ enum devlink_port_type type;
+ enum devlink_port_type desired_type;
+ void *type_dev;
+ bool split;
+ u32 split_group;
+};
+
+struct devlink_ops {
+ size_t priv_size;
+ int (*port_type_set)(struct devlink_port *devlink_port,
+ enum devlink_port_type port_type);
+ int (*port_split)(struct devlink *devlink, unsigned int port_index,
+ unsigned int count);
+ int (*port_unsplit)(struct devlink *devlink, unsigned int port_index);
+};
+
+static inline void *devlink_priv(struct devlink *devlink)
+{
+ BUG_ON(!devlink);
+ return &devlink->priv;
+}
+
+static inline struct devlink *priv_to_devlink(void *priv)
+{
+ BUG_ON(!priv);
+ return container_of(priv, struct devlink, priv);
+}
+
+struct ib_device;
+
+#if IS_ENABLED(CONFIG_NET_DEVLINK)
+
+struct devlink *devlink_alloc(const struct devlink_ops *ops, size_t priv_size);
+int devlink_register(struct devlink *devlink, struct device *dev);
+void devlink_unregister(struct devlink *devlink);
+void devlink_free(struct devlink *devlink);
+int devlink_port_register(struct devlink *devlink,
+ struct devlink_port *devlink_port,
+ unsigned int port_index);
+void devlink_port_unregister(struct devlink_port *devlink_port);
+void devlink_port_type_eth_set(struct devlink_port *devlink_port,
+ struct net_device *netdev);
+void devlink_port_type_ib_set(struct devlink_port *devlink_port,
+ struct ib_device *ibdev);
+void devlink_port_type_clear(struct devlink_port *devlink_port);
+void devlink_port_split_set(struct devlink_port *devlink_port,
+ u32 split_group);
+
+#else
+
+static inline struct devlink *devlink_alloc(const struct devlink_ops *ops,
+ size_t priv_size)
+{
+ return kzalloc(sizeof(struct devlink) + priv_size, GFP_KERNEL);
+}
+
+static inline int devlink_register(struct devlink *devlink, struct device *dev)
+{
+ return 0;
+}
+
+static inline void devlink_unregister(struct devlink *devlink)
+{
+}
+
+static inline void devlink_free(struct devlink *devlink)
+{
+ kfree(devlink);
+}
+
+static inline int devlink_port_register(struct devlink *devlink,
+ struct devlink_port *devlink_port,
+ unsigned int port_index)
+{
+ return 0;
+}
+
+static inline void devlink_port_unregister(struct devlink_port *devlink_port)
+{
+}
+
+static inline void devlink_port_type_eth_set(struct devlink_port *devlink_port,
+ struct net_device *netdev)
+{
+}
+
+static inline void devlink_port_type_ib_set(struct devlink_port *devlink_port,
+ struct ib_device *ibdev)
+{
+}
+
+static inline void devlink_port_type_clear(struct devlink_port *devlink_port)
+{
+}
+
+static inline void devlink_port_split_set(struct devlink_port *devlink_port,
+ u32 split_group)
+{
+}
+
+#endif
+
+#endif /* _NET_DEVLINK_H_ */
diff --git a/include/net/dsa.h b/include/net/dsa.h
index 26a0e86e611e..6463bb2863ac 100644
--- a/include/net/dsa.h
+++ b/include/net/dsa.h
@@ -296,16 +296,17 @@ struct dsa_switch_driver {
/*
* Bridge integration
*/
- int (*port_join_bridge)(struct dsa_switch *ds, int port,
- u32 br_port_mask);
- int (*port_leave_bridge)(struct dsa_switch *ds, int port,
- u32 br_port_mask);
+ int (*port_bridge_join)(struct dsa_switch *ds, int port,
+ struct net_device *bridge);
+ void (*port_bridge_leave)(struct dsa_switch *ds, int port);
int (*port_stp_update)(struct dsa_switch *ds, int port,
u8 state);
/*
* VLAN support
*/
+ int (*port_vlan_filtering)(struct dsa_switch *ds, int port,
+ bool vlan_filtering);
int (*port_vlan_prepare)(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan,
struct switchdev_trans *trans);
@@ -314,9 +315,9 @@ struct dsa_switch_driver {
struct switchdev_trans *trans);
int (*port_vlan_del)(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan);
- int (*port_pvid_get)(struct dsa_switch *ds, int port, u16 *pvid);
- int (*vlan_getnext)(struct dsa_switch *ds, u16 *vid,
- unsigned long *ports, unsigned long *untagged);
+ int (*port_vlan_dump)(struct dsa_switch *ds, int port,
+ struct switchdev_obj_port_vlan *vlan,
+ int (*cb)(struct switchdev_obj *obj));
/*
* Forwarding database
diff --git a/include/net/dst.h b/include/net/dst.h
index c7329dcd90cc..5c98443c1c9e 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -398,6 +398,18 @@ static inline void skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev,
__skb_tunnel_rx(skb, dev, net);
}
+static inline u32 dst_tclassid(const struct sk_buff *skb)
+{
+#ifdef CONFIG_IP_ROUTE_CLASSID
+ const struct dst_entry *dst;
+
+ dst = skb_dst(skb);
+ if (dst)
+ return dst->tclassid;
+#endif
+ return 0;
+}
+
int dst_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb);
static inline int dst_discard(struct sk_buff *skb)
{
diff --git a/include/net/dst_cache.h b/include/net/dst_cache.h
new file mode 100644
index 000000000000..151accae708b
--- /dev/null
+++ b/include/net/dst_cache.h
@@ -0,0 +1,97 @@
+#ifndef _NET_DST_CACHE_H
+#define _NET_DST_CACHE_H
+
+#include <linux/jiffies.h>
+#include <net/dst.h>
+#if IS_ENABLED(CONFIG_IPV6)
+#include <net/ip6_fib.h>
+#endif
+
+struct dst_cache {
+ struct dst_cache_pcpu __percpu *cache;
+ unsigned long reset_ts;
+};
+
+/**
+ * dst_cache_get - perform cache lookup
+ * @dst_cache: the cache
+ *
+ * The caller should use dst_cache_get_ip4() if it need to retrieve the
+ * source address to be used when xmitting to the cached dst.
+ * local BH must be disabled.
+ */
+struct dst_entry *dst_cache_get(struct dst_cache *dst_cache);
+
+/**
+ * dst_cache_get_ip4 - perform cache lookup and fetch ipv4 source address
+ * @dst_cache: the cache
+ * @saddr: return value for the retrieved source address
+ *
+ * local BH must be disabled.
+ */
+struct rtable *dst_cache_get_ip4(struct dst_cache *dst_cache, __be32 *saddr);
+
+/**
+ * dst_cache_set_ip4 - store the ipv4 dst into the cache
+ * @dst_cache: the cache
+ * @dst: the entry to be cached
+ * @saddr: the source address to be stored inside the cache
+ *
+ * local BH must be disabled.
+ */
+void dst_cache_set_ip4(struct dst_cache *dst_cache, struct dst_entry *dst,
+ __be32 saddr);
+
+#if IS_ENABLED(CONFIG_IPV6)
+
+/**
+ * dst_cache_set_ip6 - store the ipv6 dst into the cache
+ * @dst_cache: the cache
+ * @dst: the entry to be cached
+ * @saddr: the source address to be stored inside the cache
+ *
+ * local BH must be disabled.
+ */
+void dst_cache_set_ip6(struct dst_cache *dst_cache, struct dst_entry *dst,
+ const struct in6_addr *addr);
+
+/**
+ * dst_cache_get_ip6 - perform cache lookup and fetch ipv6 source address
+ * @dst_cache: the cache
+ * @saddr: return value for the retrieved source address
+ *
+ * local BH must be disabled.
+ */
+struct dst_entry *dst_cache_get_ip6(struct dst_cache *dst_cache,
+ struct in6_addr *saddr);
+#endif
+
+/**
+ * dst_cache_reset - invalidate the cache contents
+ * @dst_cache: the cache
+ *
+ * This do not free the cached dst to avoid races and contentions.
+ * the dst will be freed on later cache lookup.
+ */
+static inline void dst_cache_reset(struct dst_cache *dst_cache)
+{
+ dst_cache->reset_ts = jiffies;
+}
+
+/**
+ * dst_cache_init - initialize the cache, allocating the required storage
+ * @dst_cache: the cache
+ * @gfp: allocation flags
+ */
+int dst_cache_init(struct dst_cache *dst_cache, gfp_t gfp);
+
+/**
+ * dst_cache_destroy - empty the cache and free the allocated storage
+ * @dst_cache: the cache
+ *
+ * No synchronization is enforced: it must be called only when the cache
+ * is unsed.
+ */
+void dst_cache_destroy(struct dst_cache *dst_cache);
+
+#endif
diff --git a/include/net/dst_metadata.h b/include/net/dst_metadata.h
index 30a56ab2ccfb..5db9f5910428 100644
--- a/include/net/dst_metadata.h
+++ b/include/net/dst_metadata.h
@@ -62,6 +62,7 @@ static inline int skb_metadata_dst_cmp(const struct sk_buff *skb_a,
sizeof(a->u.tun_info) + a->u.tun_info.options_len);
}
+void metadata_dst_free(struct metadata_dst *);
struct metadata_dst *metadata_dst_alloc(u8 optslen, gfp_t flags);
struct metadata_dst __percpu *metadata_dst_alloc_percpu(u8 optslen, gfp_t flags);
@@ -125,7 +126,7 @@ static inline struct metadata_dst *ip_tun_rx_dst(struct sk_buff *skb,
ip_tunnel_key_init(&tun_dst->u.tun_info.key,
iph->saddr, iph->daddr, iph->tos, iph->ttl,
- 0, 0, tunnel_id, flags);
+ 0, 0, 0, tunnel_id, flags);
return tun_dst;
}
@@ -151,8 +152,11 @@ static inline struct metadata_dst *ipv6_tun_rx_dst(struct sk_buff *skb,
info->key.u.ipv6.src = ip6h->saddr;
info->key.u.ipv6.dst = ip6h->daddr;
+
info->key.tos = ipv6_get_dsfield(ip6h);
info->key.ttl = ip6h->hop_limit;
+ info->key.label = ip6_flowlabel(ip6h);
+
return tun_dst;
}
diff --git a/include/net/flow.h b/include/net/flow.h
index 83969eebebf3..d47ef4bb5423 100644
--- a/include/net/flow.h
+++ b/include/net/flow.h
@@ -127,7 +127,6 @@ struct flowi6 {
#define flowi6_oif __fl_common.flowic_oif
#define flowi6_iif __fl_common.flowic_iif
#define flowi6_mark __fl_common.flowic_mark
-#define flowi6_tos __fl_common.flowic_tos
#define flowi6_scope __fl_common.flowic_scope
#define flowi6_proto __fl_common.flowic_proto
#define flowi6_flags __fl_common.flowic_flags
@@ -135,6 +134,7 @@ struct flowi6 {
#define flowi6_tun_key __fl_common.flowic_tun_key
struct in6_addr daddr;
struct in6_addr saddr;
+ /* Note: flowi6_tos is encoded in flowlabel, too. */
__be32 flowlabel;
union flowi_uli uli;
#define fl6_sport uli.ports.sport
diff --git a/include/net/flow_dissector.h b/include/net/flow_dissector.h
index 8c8548cf5888..d3d60dccd19f 100644
--- a/include/net/flow_dissector.h
+++ b/include/net/flow_dissector.h
@@ -184,4 +184,17 @@ static inline bool flow_keys_have_l4(struct flow_keys *keys)
u32 flow_hash_from_keys(struct flow_keys *keys);
+static inline bool dissector_uses_key(const struct flow_dissector *flow_dissector,
+ enum flow_dissector_key_id key_id)
+{
+ return flow_dissector->used_keys & (1 << key_id);
+}
+
+static inline void *skb_flow_dissector_target(struct flow_dissector *flow_dissector,
+ enum flow_dissector_key_id key_id,
+ void *target_container)
+{
+ return ((char *)target_container) + flow_dissector->offset[key_id];
+}
+
#endif
diff --git a/include/net/genetlink.h b/include/net/genetlink.h
index 43c0e771f417..8d4608ce8716 100644
--- a/include/net/genetlink.h
+++ b/include/net/genetlink.h
@@ -83,7 +83,6 @@ struct genl_family {
* @attrs: netlink attributes
* @_net: network namespace
* @user_ptr: user pointers
- * @dst_sk: destination socket
*/
struct genl_info {
u32 snd_seq;
@@ -94,7 +93,6 @@ struct genl_info {
struct nlattr ** attrs;
possible_net_t _net;
void * user_ptr[2];
- struct sock * dst_sk;
};
static inline struct net *genl_info_net(struct genl_info *info)
@@ -188,8 +186,6 @@ int genl_unregister_family(struct genl_family *family);
void genl_notify(struct genl_family *family, struct sk_buff *skb,
struct genl_info *info, u32 group, gfp_t flags);
-struct sk_buff *genlmsg_new_unicast(size_t payload, struct genl_info *info,
- gfp_t flags);
void *genlmsg_put(struct sk_buff *skb, u32 portid, u32 seq,
struct genl_family *family, int flags, u8 cmd);
diff --git a/include/net/hwbm.h b/include/net/hwbm.h
new file mode 100644
index 000000000000..47d08662501b
--- /dev/null
+++ b/include/net/hwbm.h
@@ -0,0 +1,28 @@
+#ifndef _HWBM_H
+#define _HWBM_H
+
+struct hwbm_pool {
+ /* Capacity of the pool */
+ int size;
+ /* Size of the buffers managed */
+ int frag_size;
+ /* Number of buffers currently used by this pool */
+ int buf_num;
+ /* constructor called during alocation */
+ int (*construct)(struct hwbm_pool *bm_pool, void *buf);
+ /* protect acces to the buffer counter*/
+ spinlock_t lock;
+ /* private data */
+ void *priv;
+};
+#ifdef CONFIG_HWBM
+void hwbm_buf_free(struct hwbm_pool *bm_pool, void *buf);
+int hwbm_pool_refill(struct hwbm_pool *bm_pool, gfp_t gfp);
+int hwbm_pool_add(struct hwbm_pool *bm_pool, unsigned int buf_num, gfp_t gfp);
+#else
+void hwbm_buf_free(struct hwbm_pool *bm_pool, void *buf) {}
+int hwbm_pool_refill(struct hwbm_pool *bm_pool, gfp_t gfp) { return 0; }
+int hwbm_pool_add(struct hwbm_pool *bm_pool, unsigned int buf_num, gfp_t gfp)
+{ return 0; }
+#endif /* CONFIG_HWBM */
+#endif /* _HWBM_H */
diff --git a/include/net/inet6_connection_sock.h b/include/net/inet6_connection_sock.h
index 064cfbe639d0..954ad6bfb56a 100644
--- a/include/net/inet6_connection_sock.h
+++ b/include/net/inet6_connection_sock.h
@@ -15,7 +15,6 @@
#include <linux/types.h>
-struct in6_addr;
struct inet_bind_bucket;
struct request_sock;
struct sk_buff;
diff --git a/include/net/inet6_hashtables.h b/include/net/inet6_hashtables.h
index 7ff588ca6817..28332bdac333 100644
--- a/include/net/inet6_hashtables.h
+++ b/include/net/inet6_hashtables.h
@@ -53,6 +53,7 @@ struct sock *__inet6_lookup_established(struct net *net,
struct sock *inet6_lookup_listener(struct net *net,
struct inet_hashinfo *hashinfo,
+ struct sk_buff *skb, int doff,
const struct in6_addr *saddr,
const __be16 sport,
const struct in6_addr *daddr,
@@ -60,6 +61,7 @@ struct sock *inet6_lookup_listener(struct net *net,
static inline struct sock *__inet6_lookup(struct net *net,
struct inet_hashinfo *hashinfo,
+ struct sk_buff *skb, int doff,
const struct in6_addr *saddr,
const __be16 sport,
const struct in6_addr *daddr,
@@ -71,12 +73,12 @@ static inline struct sock *__inet6_lookup(struct net *net,
if (sk)
return sk;
- return inet6_lookup_listener(net, hashinfo, saddr, sport,
+ return inet6_lookup_listener(net, hashinfo, skb, doff, saddr, sport,
daddr, hnum, dif);
}
static inline struct sock *__inet6_lookup_skb(struct inet_hashinfo *hashinfo,
- struct sk_buff *skb,
+ struct sk_buff *skb, int doff,
const __be16 sport,
const __be16 dport,
int iif)
@@ -86,16 +88,19 @@ static inline struct sock *__inet6_lookup_skb(struct inet_hashinfo *hashinfo,
if (sk)
return sk;
- return __inet6_lookup(dev_net(skb_dst(skb)->dev), hashinfo,
- &ipv6_hdr(skb)->saddr, sport,
+ return __inet6_lookup(dev_net(skb_dst(skb)->dev), hashinfo, skb,
+ doff, &ipv6_hdr(skb)->saddr, sport,
&ipv6_hdr(skb)->daddr, ntohs(dport),
iif);
}
struct sock *inet6_lookup(struct net *net, struct inet_hashinfo *hashinfo,
+ struct sk_buff *skb, int doff,
const struct in6_addr *saddr, const __be16 sport,
const struct in6_addr *daddr, const __be16 dport,
const int dif);
+
+int inet6_hash(struct sock *sk);
#endif /* IS_ENABLED(CONFIG_IPV6) */
#define INET6_MATCH(__sk, __net, __saddr, __daddr, __ports, __dif) \
diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h
index 12aac0fd6ee7..909972aa3acd 100644
--- a/include/net/inet_frag.h
+++ b/include/net/inet_frag.h
@@ -13,6 +13,7 @@ struct netns_frags {
int timeout;
int high_thresh;
int low_thresh;
+ int max_dist;
};
/**
diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h
index de2e3ade6102..50f635c2c536 100644
--- a/include/net/inet_hashtables.h
+++ b/include/net/inet_hashtables.h
@@ -207,12 +207,16 @@ void inet_hashinfo_init(struct inet_hashinfo *h);
bool inet_ehash_insert(struct sock *sk, struct sock *osk);
bool inet_ehash_nolisten(struct sock *sk, struct sock *osk);
-void __inet_hash(struct sock *sk, struct sock *osk);
-void inet_hash(struct sock *sk);
+int __inet_hash(struct sock *sk, struct sock *osk,
+ int (*saddr_same)(const struct sock *sk1,
+ const struct sock *sk2,
+ bool match_wildcard));
+int inet_hash(struct sock *sk);
void inet_unhash(struct sock *sk);
struct sock *__inet_lookup_listener(struct net *net,
struct inet_hashinfo *hashinfo,
+ struct sk_buff *skb, int doff,
const __be32 saddr, const __be16 sport,
const __be32 daddr,
const unsigned short hnum,
@@ -220,10 +224,11 @@ struct sock *__inet_lookup_listener(struct net *net,
static inline struct sock *inet_lookup_listener(struct net *net,
struct inet_hashinfo *hashinfo,
+ struct sk_buff *skb, int doff,
__be32 saddr, __be16 sport,
__be32 daddr, __be16 dport, int dif)
{
- return __inet_lookup_listener(net, hashinfo, saddr, sport,
+ return __inet_lookup_listener(net, hashinfo, skb, doff, saddr, sport,
daddr, ntohs(dport), dif);
}
@@ -299,6 +304,7 @@ static inline struct sock *
static inline struct sock *__inet_lookup(struct net *net,
struct inet_hashinfo *hashinfo,
+ struct sk_buff *skb, int doff,
const __be32 saddr, const __be16 sport,
const __be32 daddr, const __be16 dport,
const int dif)
@@ -307,12 +313,13 @@ static inline struct sock *__inet_lookup(struct net *net,
struct sock *sk = __inet_lookup_established(net, hashinfo,
saddr, sport, daddr, hnum, dif);
- return sk ? : __inet_lookup_listener(net, hashinfo, saddr, sport,
- daddr, hnum, dif);
+ return sk ? : __inet_lookup_listener(net, hashinfo, skb, doff, saddr,
+ sport, daddr, hnum, dif);
}
static inline struct sock *inet_lookup(struct net *net,
struct inet_hashinfo *hashinfo,
+ struct sk_buff *skb, int doff,
const __be32 saddr, const __be16 sport,
const __be32 daddr, const __be16 dport,
const int dif)
@@ -320,7 +327,8 @@ static inline struct sock *inet_lookup(struct net *net,
struct sock *sk;
local_bh_disable();
- sk = __inet_lookup(net, hashinfo, saddr, sport, daddr, dport, dif);
+ sk = __inet_lookup(net, hashinfo, skb, doff, saddr, sport, daddr,
+ dport, dif);
local_bh_enable();
return sk;
@@ -328,6 +336,7 @@ static inline struct sock *inet_lookup(struct net *net,
static inline struct sock *__inet_lookup_skb(struct inet_hashinfo *hashinfo,
struct sk_buff *skb,
+ int doff,
const __be16 sport,
const __be16 dport)
{
@@ -337,8 +346,8 @@ static inline struct sock *__inet_lookup_skb(struct inet_hashinfo *hashinfo,
if (sk)
return sk;
else
- return __inet_lookup(dev_net(skb_dst(skb)->dev), hashinfo,
- iph->saddr, sport,
+ return __inet_lookup(dev_net(skb_dst(skb)->dev), hashinfo, skb,
+ doff, iph->saddr, sport,
iph->daddr, dport, inet_iif(skb));
}
diff --git a/include/net/ip.h b/include/net/ip.h
index 1a98f1ca1638..fad74d323bd6 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -240,17 +240,13 @@ static inline int inet_is_local_reserved_port(struct net *net, int port)
}
#endif
+__be32 inet_current_timestamp(void);
+
/* From inetpeer.c */
extern int inet_peer_threshold;
extern int inet_peer_minttl;
extern int inet_peer_maxttl;
-/* From ip_input.c */
-extern int sysctl_ip_early_demux;
-
-/* From ip_output.c */
-extern int sysctl_ip_dynaddr;
-
void ipfrag_init(void);
void ip_static_sysctl_init(void);
diff --git a/include/net/ip6_checksum.h b/include/net/ip6_checksum.h
index 1a49b73f7f6e..cca840584c88 100644
--- a/include/net/ip6_checksum.h
+++ b/include/net/ip6_checksum.h
@@ -37,8 +37,7 @@
#ifndef _HAVE_ARCH_IPV6_CSUM
__sum16 csum_ipv6_magic(const struct in6_addr *saddr,
const struct in6_addr *daddr,
- __u32 len, unsigned short proto,
- __wsum csum);
+ __u32 len, __u8 proto, __wsum csum);
#endif
static inline __wsum ip6_compute_pseudo(struct sk_buff *skb, int proto)
diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h
index 0d0ce0b2d870..499a707765ea 100644
--- a/include/net/ip6_tunnel.h
+++ b/include/net/ip6_tunnel.h
@@ -6,6 +6,7 @@
#include <linux/if_tunnel.h>
#include <linux/ip6_tunnel.h>
#include <net/ip_tunnels.h>
+#include <net/dst_cache.h>
#define IP6TUNNEL_ERR_TIMEO (30*HZ)
@@ -33,12 +34,6 @@ struct __ip6_tnl_parm {
__be32 o_key;
};
-struct ip6_tnl_dst {
- seqlock_t lock;
- struct dst_entry __rcu *dst;
- u32 cookie;
-};
-
/* IPv6 tunnel */
struct ip6_tnl {
struct ip6_tnl __rcu *next; /* next tunnel in list */
@@ -46,7 +41,7 @@ struct ip6_tnl {
struct net *net; /* netns for packet i/o */
struct __ip6_tnl_parm parms; /* tunnel configuration parameters */
struct flowi fl; /* flowi template for xmit */
- struct ip6_tnl_dst __percpu *dst_cache; /* cached dst */
+ struct dst_cache dst_cache; /* cached dst */
int err_count;
unsigned long err_time;
@@ -66,11 +61,6 @@ struct ipv6_tlv_tnl_enc_lim {
__u8 encap_limit; /* tunnel encapsulation limit */
} __packed;
-struct dst_entry *ip6_tnl_dst_get(struct ip6_tnl *t);
-int ip6_tnl_dst_init(struct ip6_tnl *t);
-void ip6_tnl_dst_destroy(struct ip6_tnl *t);
-void ip6_tnl_dst_reset(struct ip6_tnl *t);
-void ip6_tnl_dst_set(struct ip6_tnl *t, struct dst_entry *dst);
int ip6_tnl_rcv_ctl(struct ip6_tnl *t, const struct in6_addr *laddr,
const struct in6_addr *raddr);
int ip6_tnl_xmit_ctl(struct ip6_tnl *t, const struct in6_addr *laddr,
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index dda9abf6b89c..56050f913339 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -7,12 +7,15 @@
#include <linux/socket.h>
#include <linux/types.h>
#include <linux/u64_stats_sync.h>
+#include <linux/bitops.h>
+
#include <net/dsfield.h>
#include <net/gro_cells.h>
#include <net/inet_ecn.h>
#include <net/netns/generic.h>
#include <net/rtnetlink.h>
#include <net/lwtunnel.h>
+#include <net/dst_cache.h>
#if IS_ENABLED(CONFIG_IPV6)
#include <net/ipv6.h>
@@ -47,6 +50,7 @@ struct ip_tunnel_key {
__be16 tun_flags;
u8 tos; /* TOS for IPv4, TC for IPv6 */
u8 ttl; /* TTL for IPv4, HL for IPv6 */
+ __be32 label; /* Flow Label for IPv6 */
__be16 tp_src;
__be16 tp_dst;
};
@@ -55,8 +59,16 @@ struct ip_tunnel_key {
#define IP_TUNNEL_INFO_TX 0x01 /* represents tx tunnel parameters */
#define IP_TUNNEL_INFO_IPV6 0x02 /* key contains IPv6 addresses */
+/* Maximum tunnel options length. */
+#define IP_TUNNEL_OPTS_MAX \
+ GENMASK((FIELD_SIZEOF(struct ip_tunnel_info, \
+ options_len) * BITS_PER_BYTE) - 1, 0)
+
struct ip_tunnel_info {
struct ip_tunnel_key key;
+#ifdef CONFIG_DST_CACHE
+ struct dst_cache dst_cache;
+#endif
u8 options_len;
u8 mode;
};
@@ -85,11 +97,6 @@ struct ip_tunnel_prl_entry {
struct rcu_head rcu_head;
};
-struct ip_tunnel_dst {
- struct dst_entry __rcu *dst;
- __be32 saddr;
-};
-
struct metadata_dst;
struct ip_tunnel {
@@ -108,7 +115,7 @@ struct ip_tunnel {
int tun_hlen; /* Precalculated header length */
int mlink;
- struct ip_tunnel_dst __percpu *dst_cache;
+ struct dst_cache dst_cache;
struct ip_tunnel_parm parms;
@@ -141,6 +148,7 @@ struct ip_tunnel {
#define TUNNEL_CRIT_OPT __cpu_to_be16(0x0400)
#define TUNNEL_GENEVE_OPT __cpu_to_be16(0x0800)
#define TUNNEL_VXLAN_OPT __cpu_to_be16(0x1000)
+#define TUNNEL_NOCACHE __cpu_to_be16(0x2000)
#define TUNNEL_OPTIONS_PRESENT (TUNNEL_GENEVE_OPT | TUNNEL_VXLAN_OPT)
@@ -181,7 +189,7 @@ int ip_tunnel_encap_del_ops(const struct ip_tunnel_encap_ops *op,
static inline void ip_tunnel_key_init(struct ip_tunnel_key *key,
__be32 saddr, __be32 daddr,
- u8 tos, u8 ttl,
+ u8 tos, u8 ttl, __be32 label,
__be16 tp_src, __be16 tp_dst,
__be64 tun_id, __be16 tun_flags)
{
@@ -192,6 +200,7 @@ static inline void ip_tunnel_key_init(struct ip_tunnel_key *key,
0, IP_TUNNEL_KEY_IPV4_PAD_LEN);
key->tos = tos;
key->ttl = ttl;
+ key->label = label;
key->tun_flags = tun_flags;
/* For the tunnel types on the top of IPsec, the tp_src and tp_dst of
@@ -207,6 +216,20 @@ static inline void ip_tunnel_key_init(struct ip_tunnel_key *key,
0, sizeof(*key) - IP_TUNNEL_KEY_SIZE);
}
+static inline bool
+ip_tunnel_dst_cache_usable(const struct sk_buff *skb,
+ const struct ip_tunnel_info *info)
+{
+ if (skb->mark)
+ return false;
+ if (!info)
+ return true;
+ if (info->key.tun_flags & TUNNEL_NOCACHE)
+ return false;
+
+ return true;
+}
+
static inline unsigned short ip_tunnel_info_af(const struct ip_tunnel_info
*tun_info)
{
@@ -248,7 +271,6 @@ int ip_tunnel_changelink(struct net_device *dev, struct nlattr *tb[],
int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[],
struct ip_tunnel_parm *p);
void ip_tunnel_setup(struct net_device *dev, int net_id);
-void ip_tunnel_dst_reset_all(struct ip_tunnel *t);
int ip_tunnel_encap_setup(struct ip_tunnel *t,
struct ip_tunnel_encap *ipencap);
@@ -273,15 +295,31 @@ static inline u8 ip_tunnel_ecn_encap(u8 tos, const struct iphdr *iph,
return INET_ECN_encapsulate(tos, inner);
}
-int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto);
+int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto,
+ bool xnet);
void iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb,
__be32 src, __be32 dst, u8 proto,
u8 tos, u8 ttl, __be16 df, bool xnet);
struct metadata_dst *iptunnel_metadata_reply(struct metadata_dst *md,
gfp_t flags);
-struct sk_buff *iptunnel_handle_offloads(struct sk_buff *skb, bool gre_csum,
- int gso_type_mask);
+struct sk_buff *iptunnel_handle_offloads(struct sk_buff *skb, int gso_type_mask);
+
+static inline int iptunnel_pull_offloads(struct sk_buff *skb)
+{
+ if (skb_is_gso(skb)) {
+ int err;
+
+ err = skb_unclone(skb, GFP_ATOMIC);
+ if (unlikely(err))
+ return err;
+ skb_shinfo(skb)->gso_type &= ~(NETIF_F_GSO_ENCAP_ALL >>
+ NETIF_F_GSO_SHIFT);
+ }
+
+ skb->encapsulation = 0;
+ return 0;
+}
static inline void iptunnel_xmit_stats(struct net_device *dev, int pkt_len)
{
@@ -356,6 +394,17 @@ static inline void ip_tunnel_unneed_metadata(void)
{
}
+static inline void ip_tunnel_info_opts_get(void *to,
+ const struct ip_tunnel_info *info)
+{
+}
+
+static inline void ip_tunnel_info_opts_set(struct ip_tunnel_info *info,
+ const void *from, int len)
+{
+ info->options_len = 0;
+}
+
#endif /* CONFIG_INET */
#endif /* __NET_IP_TUNNELS_H */
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index 0816c872b689..a6cc576fd467 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -1588,6 +1588,23 @@ static inline void ip_vs_conn_drop_conntrack(struct ip_vs_conn *cp)
}
#endif /* CONFIG_IP_VS_NFCT */
+/* Really using conntrack? */
+static inline bool ip_vs_conn_uses_conntrack(struct ip_vs_conn *cp,
+ struct sk_buff *skb)
+{
+#ifdef CONFIG_IP_VS_NFCT
+ enum ip_conntrack_info ctinfo;
+ struct nf_conn *ct;
+
+ if (!(cp->flags & IP_VS_CONN_F_NFCT))
+ return false;
+ ct = nf_ct_get(skb, &ctinfo);
+ if (ct && !nf_ct_is_untracked(ct))
+ return true;
+#endif
+ return false;
+}
+
static inline int
ip_vs_dest_conn_overhead(struct ip_vs_dest *dest)
{
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 6570f379aba2..d0aeb97aec5d 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -259,8 +259,12 @@ static inline struct ipv6_txoptions *txopt_get(const struct ipv6_pinfo *np)
rcu_read_lock();
opt = rcu_dereference(np->opt);
- if (opt && !atomic_inc_not_zero(&opt->refcnt))
- opt = NULL;
+ if (opt) {
+ if (!atomic_inc_not_zero(&opt->refcnt))
+ opt = NULL;
+ else
+ opt = rcu_pointer_handoff(opt);
+ }
rcu_read_unlock();
return opt;
}
@@ -831,6 +835,12 @@ static inline u8 ip6_tclass(__be32 flowinfo)
{
return ntohl(flowinfo & IPV6_TCLASS_MASK) >> IPV6_TCLASS_SHIFT;
}
+
+static inline __be32 ip6_make_flowinfo(unsigned int tclass, __be32 flowlabel)
+{
+ return htonl(tclass << IPV6_TCLASS_SHIFT) | flowlabel;
+}
+
/*
* Prototypes exported by ipv6
*/
diff --git a/include/net/kcm.h b/include/net/kcm.h
new file mode 100644
index 000000000000..2840b5825dcc
--- /dev/null
+++ b/include/net/kcm.h
@@ -0,0 +1,226 @@
+/*
+ * Kernel Connection Multiplexor
+ *
+ * Copyright (c) 2016 Tom Herbert <tom@herbertland.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ */
+
+#ifndef __NET_KCM_H_
+#define __NET_KCM_H_
+
+#include <linux/skbuff.h>
+#include <net/sock.h>
+#include <uapi/linux/kcm.h>
+
+extern unsigned int kcm_net_id;
+
+#define KCM_STATS_ADD(stat, count) ((stat) += (count))
+#define KCM_STATS_INCR(stat) ((stat)++)
+
+struct kcm_psock_stats {
+ unsigned long long rx_msgs;
+ unsigned long long rx_bytes;
+ unsigned long long tx_msgs;
+ unsigned long long tx_bytes;
+ unsigned int rx_aborts;
+ unsigned int rx_mem_fail;
+ unsigned int rx_need_more_hdr;
+ unsigned int rx_msg_too_big;
+ unsigned int rx_msg_timeouts;
+ unsigned int rx_bad_hdr_len;
+ unsigned long long reserved;
+ unsigned long long unreserved;
+ unsigned int tx_aborts;
+};
+
+struct kcm_mux_stats {
+ unsigned long long rx_msgs;
+ unsigned long long rx_bytes;
+ unsigned long long tx_msgs;
+ unsigned long long tx_bytes;
+ unsigned int rx_ready_drops;
+ unsigned int tx_retries;
+ unsigned int psock_attach;
+ unsigned int psock_unattach_rsvd;
+ unsigned int psock_unattach;
+};
+
+struct kcm_stats {
+ unsigned long long rx_msgs;
+ unsigned long long rx_bytes;
+ unsigned long long tx_msgs;
+ unsigned long long tx_bytes;
+};
+
+struct kcm_tx_msg {
+ unsigned int sent;
+ unsigned int fragidx;
+ unsigned int frag_offset;
+ unsigned int msg_flags;
+ struct sk_buff *frag_skb;
+ struct sk_buff *last_skb;
+};
+
+struct kcm_rx_msg {
+ int full_len;
+ int accum_len;
+ int offset;
+ int early_eaten;
+};
+
+/* Socket structure for KCM client sockets */
+struct kcm_sock {
+ struct sock sk;
+ struct kcm_mux *mux;
+ struct list_head kcm_sock_list;
+ int index;
+ u32 done : 1;
+ struct work_struct done_work;
+
+ struct kcm_stats stats;
+
+ /* Transmit */
+ struct kcm_psock *tx_psock;
+ struct work_struct tx_work;
+ struct list_head wait_psock_list;
+ struct sk_buff *seq_skb;
+
+ /* Don't use bit fields here, these are set under different locks */
+ bool tx_wait;
+ bool tx_wait_more;
+
+ /* Receive */
+ struct kcm_psock *rx_psock;
+ struct list_head wait_rx_list; /* KCMs waiting for receiving */
+ bool rx_wait;
+ u32 rx_disabled : 1;
+};
+
+struct bpf_prog;
+
+/* Structure for an attached lower socket */
+struct kcm_psock {
+ struct sock *sk;
+ struct kcm_mux *mux;
+ int index;
+
+ u32 tx_stopped : 1;
+ u32 rx_stopped : 1;
+ u32 done : 1;
+ u32 unattaching : 1;
+
+ void (*save_state_change)(struct sock *sk);
+ void (*save_data_ready)(struct sock *sk);
+ void (*save_write_space)(struct sock *sk);
+
+ struct list_head psock_list;
+
+ struct kcm_psock_stats stats;
+
+ /* Receive */
+ struct sk_buff *rx_skb_head;
+ struct sk_buff **rx_skb_nextp;
+ struct sk_buff *ready_rx_msg;
+ struct list_head psock_ready_list;
+ struct work_struct rx_work;
+ struct delayed_work rx_delayed_work;
+ struct bpf_prog *bpf_prog;
+ struct kcm_sock *rx_kcm;
+ unsigned long long saved_rx_bytes;
+ unsigned long long saved_rx_msgs;
+ struct timer_list rx_msg_timer;
+ unsigned int rx_need_bytes;
+
+ /* Transmit */
+ struct kcm_sock *tx_kcm;
+ struct list_head psock_avail_list;
+ unsigned long long saved_tx_bytes;
+ unsigned long long saved_tx_msgs;
+};
+
+/* Per net MUX list */
+struct kcm_net {
+ struct mutex mutex;
+ struct kcm_psock_stats aggregate_psock_stats;
+ struct kcm_mux_stats aggregate_mux_stats;
+ struct list_head mux_list;
+ int count;
+};
+
+/* Structure for a MUX */
+struct kcm_mux {
+ struct list_head kcm_mux_list;
+ struct rcu_head rcu;
+ struct kcm_net *knet;
+
+ struct list_head kcm_socks; /* All KCM sockets on MUX */
+ int kcm_socks_cnt; /* Total KCM socket count for MUX */
+ struct list_head psocks; /* List of all psocks on MUX */
+ int psocks_cnt; /* Total attached sockets */
+
+ struct kcm_mux_stats stats;
+ struct kcm_psock_stats aggregate_psock_stats;
+
+ /* Receive */
+ spinlock_t rx_lock ____cacheline_aligned_in_smp;
+ struct list_head kcm_rx_waiters; /* KCMs waiting for receiving */
+ struct list_head psocks_ready; /* List of psocks with a msg ready */
+ struct sk_buff_head rx_hold_queue;
+
+ /* Transmit */
+ spinlock_t lock ____cacheline_aligned_in_smp; /* TX and mux locking */
+ struct list_head psocks_avail; /* List of available psocks */
+ struct list_head kcm_tx_waiters; /* KCMs waiting for a TX psock */
+};
+
+#ifdef CONFIG_PROC_FS
+int kcm_proc_init(void);
+void kcm_proc_exit(void);
+#else
+static inline int kcm_proc_init(void) { return 0; }
+static inline void kcm_proc_exit(void) { }
+#endif
+
+static inline void aggregate_psock_stats(struct kcm_psock_stats *stats,
+ struct kcm_psock_stats *agg_stats)
+{
+ /* Save psock statistics in the mux when psock is being unattached. */
+
+#define SAVE_PSOCK_STATS(_stat) (agg_stats->_stat += stats->_stat)
+ SAVE_PSOCK_STATS(rx_msgs);
+ SAVE_PSOCK_STATS(rx_bytes);
+ SAVE_PSOCK_STATS(rx_aborts);
+ SAVE_PSOCK_STATS(rx_mem_fail);
+ SAVE_PSOCK_STATS(rx_need_more_hdr);
+ SAVE_PSOCK_STATS(rx_msg_too_big);
+ SAVE_PSOCK_STATS(rx_msg_timeouts);
+ SAVE_PSOCK_STATS(rx_bad_hdr_len);
+ SAVE_PSOCK_STATS(tx_msgs);
+ SAVE_PSOCK_STATS(tx_bytes);
+ SAVE_PSOCK_STATS(reserved);
+ SAVE_PSOCK_STATS(unreserved);
+ SAVE_PSOCK_STATS(tx_aborts);
+#undef SAVE_PSOCK_STATS
+}
+
+static inline void aggregate_mux_stats(struct kcm_mux_stats *stats,
+ struct kcm_mux_stats *agg_stats)
+{
+ /* Save psock statistics in the mux when psock is being unattached. */
+
+#define SAVE_MUX_STATS(_stat) (agg_stats->_stat += stats->_stat)
+ SAVE_MUX_STATS(rx_msgs);
+ SAVE_MUX_STATS(rx_bytes);
+ SAVE_MUX_STATS(tx_msgs);
+ SAVE_MUX_STATS(tx_bytes);
+ SAVE_MUX_STATS(rx_ready_drops);
+ SAVE_MUX_STATS(psock_attach);
+ SAVE_MUX_STATS(psock_unattach_rsvd);
+ SAVE_MUX_STATS(psock_unattach);
+#undef SAVE_MUX_STATS
+}
+
+#endif /* __NET_KCM_H_ */
diff --git a/include/net/l3mdev.h b/include/net/l3mdev.h
index 5567d46b3cff..c43a9c73de5e 100644
--- a/include/net/l3mdev.h
+++ b/include/net/l3mdev.h
@@ -39,7 +39,7 @@ struct l3mdev_ops {
#ifdef CONFIG_NET_L3_MASTER_DEV
-int l3mdev_master_ifindex_rcu(struct net_device *dev);
+int l3mdev_master_ifindex_rcu(const struct net_device *dev);
static inline int l3mdev_master_ifindex(struct net_device *dev)
{
int ifindex;
@@ -179,7 +179,7 @@ struct dst_entry *l3mdev_rt6_dst_by_oif(struct net *net,
#else
-static inline int l3mdev_master_ifindex_rcu(struct net_device *dev)
+static inline int l3mdev_master_ifindex_rcu(const struct net_device *dev)
{
return 0;
}
diff --git a/include/net/lwtunnel.h b/include/net/lwtunnel.h
index 66350ce3e955..e9f116e29c22 100644
--- a/include/net/lwtunnel.h
+++ b/include/net/lwtunnel.h
@@ -170,6 +170,8 @@ static inline int lwtunnel_input(struct sk_buff *skb)
return -EOPNOTSUPP;
}
-#endif
+#endif /* CONFIG_LWTUNNEL */
+
+#define MODULE_ALIAS_RTNL_LWT(encap_type) MODULE_ALIAS("rtnl-lwt-" __stringify(encap_type))
#endif /* __NET_LWTUNNEL_H */
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index 7c30faff245f..0c09da34b67a 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -5,7 +5,7 @@
* Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
* Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
- * Copyright (C) 2015 Intel Deutschland GmbH
+ * Copyright (C) 2015 - 2016 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -298,6 +298,7 @@ struct ieee80211_vif_chanctx_switch {
* note that this is only called when it changes after the channel
* context had been assigned.
* @BSS_CHANGED_OCB: OCB join status changed
+ * @BSS_CHANGED_MU_GROUPS: VHT MU-MIMO group id or user position changed
*/
enum ieee80211_bss_change {
BSS_CHANGED_ASSOC = 1<<0,
@@ -323,6 +324,7 @@ enum ieee80211_bss_change {
BSS_CHANGED_BEACON_INFO = 1<<20,
BSS_CHANGED_BANDWIDTH = 1<<21,
BSS_CHANGED_OCB = 1<<22,
+ BSS_CHANGED_MU_GROUPS = 1<<23,
/* when adding here, make sure to change ieee80211_reconfig */
};
@@ -436,6 +438,19 @@ struct ieee80211_event {
};
/**
+ * struct ieee80211_mu_group_data - STA's VHT MU-MIMO group data
+ *
+ * This structure describes the group id data of VHT MU-MIMO
+ *
+ * @membership: 64 bits array - a bit is set if station is member of the group
+ * @position: 2 bits per group id indicating the position in the group
+ */
+struct ieee80211_mu_group_data {
+ u8 membership[WLAN_MEMBERSHIP_LEN];
+ u8 position[WLAN_USER_POSITION_LEN];
+};
+
+/**
* struct ieee80211_bss_conf - holds the BSS's changing parameters
*
* This structure keeps information about a BSS (and an association
@@ -477,6 +492,7 @@ struct ieee80211_event {
* @enable_beacon: whether beaconing should be enabled or not
* @chandef: Channel definition for this BSS -- the hardware might be
* configured a higher bandwidth than this BSS uses, for example.
+ * @mu_group: VHT MU-MIMO group membership data
* @ht_operation_mode: HT operation mode like in &struct ieee80211_ht_operation.
* This field is only valid when the channel is a wide HT/VHT channel.
* Note that with TDLS this can be the case (channel is HT, protection must
@@ -535,6 +551,7 @@ struct ieee80211_bss_conf {
s32 cqm_rssi_thold;
u32 cqm_rssi_hyst;
struct cfg80211_chan_def chandef;
+ struct ieee80211_mu_group_data mu_group;
__be32 arp_addr_list[IEEE80211_BSS_ARP_ADDR_LIST_LEN];
int arp_addr_cnt;
bool qos;
@@ -691,12 +708,14 @@ enum mac80211_tx_info_flags {
* protocol frame (e.g. EAP)
* @IEEE80211_TX_CTRL_PS_RESPONSE: This frame is a response to a poll
* frame (PS-Poll or uAPSD).
+ * @IEEE80211_TX_CTRL_RATE_INJECT: This frame is injected with rate information
*
* These flags are used in tx_info->control.flags.
*/
enum mac80211_tx_control_flags {
IEEE80211_TX_CTRL_PORT_CTRL_PROTO = BIT(0),
IEEE80211_TX_CTRL_PS_RESPONSE = BIT(1),
+ IEEE80211_TX_CTRL_RATE_INJECT = BIT(2),
};
/*
@@ -993,6 +1012,8 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info)
* @RX_FLAG_MACTIME_END: The timestamp passed in the RX status (@mactime
* field) is valid and contains the time the last symbol of the MPDU
* (including FCS) was received.
+ * @RX_FLAG_MACTIME_PLCP_START: The timestamp passed in the RX status (@mactime
+ * field) is valid and contains the time the SYNC preamble was received.
* @RX_FLAG_SHORTPRE: Short preamble was used for this frame
* @RX_FLAG_HT: HT MCS was used and rate_idx is MCS index
* @RX_FLAG_VHT: VHT MCS was used and rate_index is MCS index
@@ -1014,6 +1035,14 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info)
* @RX_FLAG_AMPDU_DELIM_CRC_KNOWN: The delimiter CRC field is known (the CRC
* is stored in the @ampdu_delimiter_crc field)
* @RX_FLAG_LDPC: LDPC was used
+ * @RX_FLAG_ONLY_MONITOR: Report frame only to monitor interfaces without
+ * processing it in any regular way.
+ * This is useful if drivers offload some frames but still want to report
+ * them for sniffing purposes.
+ * @RX_FLAG_SKIP_MONITOR: Process and report frame to all interfaces except
+ * monitor interfaces.
+ * This is useful if drivers offload some frames but still want to report
+ * them for sniffing purposes.
* @RX_FLAG_STBC_MASK: STBC 2 bit bitmask. 1 - Nss=1, 2 - Nss=2, 3 - Nss=3
* @RX_FLAG_10MHZ: 10 MHz (half channel) was used
* @RX_FLAG_5MHZ: 5 MHz (quarter channel) was used
@@ -1033,6 +1062,7 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info)
enum mac80211_rx_flags {
RX_FLAG_MMIC_ERROR = BIT(0),
RX_FLAG_DECRYPTED = BIT(1),
+ RX_FLAG_MACTIME_PLCP_START = BIT(2),
RX_FLAG_MMIC_STRIPPED = BIT(3),
RX_FLAG_IV_STRIPPED = BIT(4),
RX_FLAG_FAILED_FCS_CRC = BIT(5),
@@ -1046,7 +1076,7 @@ enum mac80211_rx_flags {
RX_FLAG_HT_GF = BIT(13),
RX_FLAG_AMPDU_DETAILS = BIT(14),
RX_FLAG_PN_VALIDATED = BIT(15),
- /* bit 16 free */
+ RX_FLAG_DUP_VALIDATED = BIT(16),
RX_FLAG_AMPDU_LAST_KNOWN = BIT(17),
RX_FLAG_AMPDU_IS_LAST = BIT(18),
RX_FLAG_AMPDU_DELIM_CRC_ERROR = BIT(19),
@@ -1054,6 +1084,8 @@ enum mac80211_rx_flags {
RX_FLAG_MACTIME_END = BIT(21),
RX_FLAG_VHT = BIT(22),
RX_FLAG_LDPC = BIT(23),
+ RX_FLAG_ONLY_MONITOR = BIT(24),
+ RX_FLAG_SKIP_MONITOR = BIT(25),
RX_FLAG_STBC_MASK = BIT(26) | BIT(27),
RX_FLAG_10MHZ = BIT(28),
RX_FLAG_5MHZ = BIT(29),
@@ -1072,6 +1104,7 @@ enum mac80211_rx_flags {
* @RX_VHT_FLAG_160MHZ: 160 MHz was used
* @RX_VHT_FLAG_BF: packet was beamformed
*/
+
enum mac80211_rx_vht_flags {
RX_VHT_FLAG_80MHZ = BIT(0),
RX_VHT_FLAG_160MHZ = BIT(1),
@@ -1091,6 +1124,8 @@ enum mac80211_rx_vht_flags {
* it but can store it and pass it back to the driver for synchronisation
* @band: the active band when this frame was received
* @freq: frequency the radio was tuned to when receiving this frame, in MHz
+ * This field must be set for management frames, but isn't strictly needed
+ * for data (other) frames - for those it only affects radiotap reporting.
* @signal: signal strength when receiving this frame, either in dBm, in dB or
* unspecified depending on the hardware capabilities flags
* @IEEE80211_HW_SIGNAL_*
@@ -1347,6 +1382,7 @@ enum ieee80211_vif_flags {
* @csa_active: marks whether a channel switch is going on. Internally it is
* write-protected by sdata_lock and local->mtx so holding either is fine
* for read access.
+ * @mu_mimo_owner: indicates interface owns MU-MIMO capability
* @driver_flags: flags/capabilities the driver has for this interface,
* these need to be set (or cleared) when the interface is added
* or, if supported by the driver, the interface type is changed
@@ -1373,6 +1409,7 @@ struct ieee80211_vif {
u8 addr[ETH_ALEN];
bool p2p;
bool csa_active;
+ bool mu_mimo_owner;
u8 cab_queue;
u8 hw_queue[IEEE80211_NUM_ACS];
@@ -1486,9 +1523,8 @@ enum ieee80211_key_flags {
* wants to be given when a frame is transmitted and needs to be
* encrypted in hardware.
* @cipher: The key's cipher suite selector.
- * @tx_pn: PN used for TX on non-TKIP keys, may be used by the driver
- * as well if it needs to do software PN assignment by itself
- * (e.g. due to TSO)
+ * @tx_pn: PN used for TX keys, may be used by the driver as well if it
+ * needs to do software PN assignment by itself (e.g. due to TSO)
* @flags: key flags, see &enum ieee80211_key_flags.
* @keyidx: the key index (0-3)
* @keylen: key material length
@@ -1514,6 +1550,9 @@ struct ieee80211_key_conf {
#define IEEE80211_MAX_PN_LEN 16
+#define TKIP_PN_TO_IV16(pn) ((u16)(pn & 0xffff))
+#define TKIP_PN_TO_IV32(pn) ((u32)((pn >> 16) & 0xffffffff))
+
/**
* struct ieee80211_key_seq - key sequence counter
*
@@ -1684,6 +1723,18 @@ struct ieee80211_sta_rates {
* @tdls_initiator: indicates the STA is an initiator of the TDLS link. Only
* valid if the STA is a TDLS peer in the first place.
* @mfp: indicates whether the STA uses management frame protection or not.
+ * @max_amsdu_subframes: indicates the maximal number of MSDUs in a single
+ * A-MSDU. Taken from the Extended Capabilities element. 0 means
+ * unlimited.
+ * @max_amsdu_len: indicates the maximal length of an A-MSDU in bytes. This
+ * field is always valid for packets with a VHT preamble. For packets
+ * with a HT preamble, additional limits apply:
+ * + If the skb is transmitted as part of a BA agreement, the
+ * A-MSDU maximal size is min(max_amsdu_len, 4065) bytes.
+ * + If the skb is not part of a BA aggreement, the A-MSDU maximal
+ * size is min(max_amsdu_len, 7935) bytes.
+ * Both additional HT limits must be enforced by the low level driver.
+ * This is defined by the spec (IEEE 802.11-2012 section 8.3.2.2 NOTE 2).
* @txq: per-TID data TX queues (if driver uses the TXQ abstraction)
*/
struct ieee80211_sta {
@@ -1702,6 +1753,8 @@ struct ieee80211_sta {
bool tdls;
bool tdls_initiator;
bool mfp;
+ u8 max_amsdu_subframes;
+ u16 max_amsdu_len;
struct ieee80211_txq *txq[IEEE80211_NUM_TIDS];
@@ -1910,6 +1963,11 @@ struct ieee80211_txq {
* by just its MAC address; this prevents, for example, the same station
* from connecting to two virtual AP interfaces at the same time.
*
+ * @IEEE80211_HW_SUPPORTS_REORDERING_BUFFER: Hardware (or driver) manages the
+ * reordering buffer internally, guaranteeing mac80211 receives frames in
+ * order and does not need to manage its own reorder buffer or BA session
+ * timeout.
+ *
* @NUM_IEEE80211_HW_FLAGS: number of hardware flags, used for sizing arrays
*/
enum ieee80211_hw_flags {
@@ -1946,6 +2004,7 @@ enum ieee80211_hw_flags {
IEEE80211_HW_SUPPORTS_AMSDU_IN_AMPDU,
IEEE80211_HW_BEACON_TX_STATUS,
IEEE80211_HW_NEEDS_UNIQUE_STA_ADDR,
+ IEEE80211_HW_SUPPORTS_REORDERING_BUFFER,
/* keep last, obviously */
NUM_IEEE80211_HW_FLAGS
@@ -2167,7 +2226,7 @@ static inline void SET_IEEE80211_DEV(struct ieee80211_hw *hw, struct device *dev
* @hw: the &struct ieee80211_hw to set the MAC address for
* @addr: the address to set
*/
-static inline void SET_IEEE80211_PERM_ADDR(struct ieee80211_hw *hw, u8 *addr)
+static inline void SET_IEEE80211_PERM_ADDR(struct ieee80211_hw *hw, const u8 *addr)
{
memcpy(hw->wiphy->perm_addr, addr, ETH_ALEN);
}
@@ -2684,6 +2743,33 @@ enum ieee80211_ampdu_mlme_action {
};
/**
+ * struct ieee80211_ampdu_params - AMPDU action parameters
+ *
+ * @action: the ampdu action, value from %ieee80211_ampdu_mlme_action.
+ * @sta: peer of this AMPDU session
+ * @tid: tid of the BA session
+ * @ssn: start sequence number of the session. TX/RX_STOP can pass 0. When
+ * action is set to %IEEE80211_AMPDU_RX_START the driver passes back the
+ * actual ssn value used to start the session and writes the value here.
+ * @buf_size: reorder buffer size (number of subframes). Valid only when the
+ * action is set to %IEEE80211_AMPDU_RX_START or
+ * %IEEE80211_AMPDU_TX_OPERATIONAL
+ * @amsdu: indicates the peer's ability to receive A-MSDU within A-MPDU.
+ * valid when the action is set to %IEEE80211_AMPDU_TX_OPERATIONAL
+ * @timeout: BA session timeout. Valid only when the action is set to
+ * %IEEE80211_AMPDU_RX_START
+ */
+struct ieee80211_ampdu_params {
+ enum ieee80211_ampdu_mlme_action action;
+ struct ieee80211_sta *sta;
+ u16 tid;
+ u16 ssn;
+ u8 buf_size;
+ bool amsdu;
+ u16 timeout;
+};
+
+/**
* enum ieee80211_frame_release_type - frame release reason
* @IEEE80211_FRAME_RELEASE_PSPOLL: frame released for PS-Poll
* @IEEE80211_FRAME_RELEASE_UAPSD: frame(s) released due to
@@ -3027,13 +3113,9 @@ enum ieee80211_reconfig_type {
* @ampdu_action: Perform a certain A-MPDU action
* The RA/TID combination determines the destination and TID we want
* the ampdu action to be performed for. The action is defined through
- * ieee80211_ampdu_mlme_action. Starting sequence number (@ssn)
- * is the first frame we expect to perform the action on. Notice
- * that TX/RX_STOP can pass NULL for this parameter.
- * The @buf_size parameter is only valid when the action is set to
- * %IEEE80211_AMPDU_TX_OPERATIONAL and indicates the peer's reorder
- * buffer size (number of subframes) for this session -- the driver
- * may neither send aggregates containing more subframes than this
+ * ieee80211_ampdu_mlme_action.
+ * When the action is set to %IEEE80211_AMPDU_TX_OPERATIONAL the driver
+ * may neither send aggregates containing more subframes than @buf_size
* nor send aggregates in a way that lost frames would exceed the
* buffer size. If just limiting the aggregate size, this would be
* possible with a buf_size of 8:
@@ -3044,9 +3126,6 @@ enum ieee80211_reconfig_type {
* buffer size of 8. Correct ways to retransmit #1 would be:
* - TX: 1 or 18 or 81
* Even "189" would be wrong since 1 could be lost again.
- * The @amsdu parameter is valid when the action is set to
- * %IEEE80211_AMPDU_TX_OPERATIONAL and indicates the peer's ability
- * to receive A-MSDU within A-MPDU.
*
* Returns a negative error code on failure.
* The callback can sleep.
@@ -3388,9 +3467,7 @@ struct ieee80211_ops {
int (*tx_last_beacon)(struct ieee80211_hw *hw);
int (*ampdu_action)(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
- enum ieee80211_ampdu_mlme_action action,
- struct ieee80211_sta *sta, u16 tid, u16 *ssn,
- u8 buf_size, bool amsdu);
+ struct ieee80211_ampdu_params *params);
int (*get_survey)(struct ieee80211_hw *hw, int idx,
struct survey_info *survey);
void (*rfkill_poll)(struct ieee80211_hw *hw);
@@ -4374,21 +4451,19 @@ void ieee80211_get_tkip_p2k(struct ieee80211_key_conf *keyconf,
struct sk_buff *skb, u8 *p2k);
/**
- * ieee80211_get_key_tx_seq - get key TX sequence counter
+ * ieee80211_tkip_add_iv - write TKIP IV and Ext. IV to pos
*
+ * @pos: start of crypto header
* @keyconf: the parameter passed with the set key
- * @seq: buffer to receive the sequence data
+ * @pn: PN to add
*
- * This function allows a driver to retrieve the current TX IV/PN
- * for the given key. It must not be called if IV generation is
- * offloaded to the device.
+ * Returns: pointer to the octet following IVs (i.e. beginning of
+ * the packet payload)
*
- * Note that this function may only be called when no TX processing
- * can be done concurrently, for example when queues are stopped
- * and the stop has been synchronized.
+ * This function writes the tkip IV value to pos (which should
+ * point to the crypto header)
*/
-void ieee80211_get_key_tx_seq(struct ieee80211_key_conf *keyconf,
- struct ieee80211_key_seq *seq);
+u8 *ieee80211_tkip_add_iv(u8 *pos, struct ieee80211_key_conf *keyconf, u64 pn);
/**
* ieee80211_get_key_rx_seq - get key RX sequence counter
@@ -4410,23 +4485,6 @@ void ieee80211_get_key_rx_seq(struct ieee80211_key_conf *keyconf,
int tid, struct ieee80211_key_seq *seq);
/**
- * ieee80211_set_key_tx_seq - set key TX sequence counter
- *
- * @keyconf: the parameter passed with the set key
- * @seq: new sequence data
- *
- * This function allows a driver to set the current TX IV/PNs for the
- * given key. This is useful when resuming from WoWLAN sleep and the
- * device may have transmitted frames using the PTK, e.g. replies to
- * ARP requests.
- *
- * Note that this function may only be called when no TX processing
- * can be done concurrently.
- */
-void ieee80211_set_key_tx_seq(struct ieee80211_key_conf *keyconf,
- struct ieee80211_key_seq *seq);
-
-/**
* ieee80211_set_key_rx_seq - set key RX sequence counter
*
* @keyconf: the parameter passed with the set key
@@ -5121,6 +5179,24 @@ void ieee80211_stop_rx_ba_session(struct ieee80211_vif *vif, u16 ba_rx_bitmap,
const u8 *addr);
/**
+ * ieee80211_mark_rx_ba_filtered_frames - move RX BA window and mark filtered
+ * @pubsta: station struct
+ * @tid: the session's TID
+ * @ssn: starting sequence number of the bitmap, all frames before this are
+ * assumed to be out of the window after the call
+ * @filtered: bitmap of filtered frames, BIT(0) is the @ssn entry etc.
+ * @received_mpdus: number of received mpdus in firmware
+ *
+ * This function moves the BA window and releases all frames before @ssn, and
+ * marks frames marked in the bitmap as having been filtered. Afterwards, it
+ * checks if any frames in the window starting from @ssn can now be released
+ * (in case they were only waiting for frames that were filtered.)
+ */
+void ieee80211_mark_rx_ba_filtered_frames(struct ieee80211_sta *pubsta, u8 tid,
+ u16 ssn, u64 filtered,
+ u16 received_mpdus);
+
+/**
* ieee80211_send_bar - send a BlockAckReq frame
*
* can be used to flush pending frames from the peer's aggregation reorder
@@ -5371,6 +5447,21 @@ ieee80211_vif_type_p2p(struct ieee80211_vif *vif)
return ieee80211_iftype_p2p(vif->type, vif->p2p);
}
+/**
+ * ieee80211_update_mu_groups - set the VHT MU-MIMO groud data
+ *
+ * @vif: the specified virtual interface
+ * @membership: 64 bits array - a bit is set if station is member of the group
+ * @position: 2 bits per group id indicating the position in the group
+ *
+ * Note: This function assumes that the given vif is valid and the position and
+ * membership data is of the correct size and are in the same byte order as the
+ * matching GroupId management frame.
+ * Calls to this function need to be serialized with RX path.
+ */
+void ieee80211_update_mu_groups(struct ieee80211_vif *vif,
+ const u8 *membership, const u8 *position);
+
void ieee80211_enable_rssi_reports(struct ieee80211_vif *vif,
int rssi_min_thold,
int rssi_max_thold);
@@ -5523,4 +5614,19 @@ void ieee80211_unreserve_tid(struct ieee80211_sta *sta, u8 tid);
*/
struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw,
struct ieee80211_txq *txq);
+
+/**
+ * ieee80211_txq_get_depth - get pending frame/byte count of given txq
+ *
+ * The values are not guaranteed to be coherent with regard to each other, i.e.
+ * txq state can change half-way of this function and the caller may end up
+ * with "new" frame_cnt and "old" byte_cnt or vice-versa.
+ *
+ * @txq: pointer obtained from station or virtual interface
+ * @frame_cnt: pointer to store frame count
+ * @byte_cnt: pointer to store byte count
+ */
+void ieee80211_txq_get_depth(struct ieee80211_txq *txq,
+ unsigned long *frame_cnt,
+ unsigned long *byte_cnt);
#endif /* MAC80211_H */
diff --git a/include/net/mac802154.h b/include/net/mac802154.h
index da574bbdc333..6cd7a70706a9 100644
--- a/include/net/mac802154.h
+++ b/include/net/mac802154.h
@@ -16,10 +16,10 @@
#ifndef NET_MAC802154_H
#define NET_MAC802154_H
+#include <asm/unaligned.h>
#include <net/af_ieee802154.h>
#include <linux/ieee802154.h>
#include <linux/skbuff.h>
-#include <linux/unaligned/memmove.h>
#include <net/cfg802154.h>
@@ -247,13 +247,14 @@ struct ieee802154_ops {
*/
static inline __le16 ieee802154_get_fc_from_skb(const struct sk_buff *skb)
{
- /* return some invalid fc on failure */
- if (unlikely(skb->len < 2)) {
+ /* check if we can fc at skb_mac_header of sk buffer */
+ if (unlikely(!skb_mac_header_was_set(skb) ||
+ (skb_tail_pointer(skb) - skb_mac_header(skb)) < 2)) {
WARN_ON(1);
return cpu_to_le16(0);
}
- return (__force __le16)__get_unaligned_memmove16(skb_mac_header(skb));
+ return get_unaligned_le16(skb_mac_header(skb));
}
/**
@@ -263,7 +264,7 @@ static inline __le16 ieee802154_get_fc_from_skb(const struct sk_buff *skb)
*/
static inline void ieee802154_be64_to_le64(void *le64_dst, const void *be64_src)
{
- __put_unaligned_memmove64(swab64p(be64_src), le64_dst);
+ put_unaligned_le64(get_unaligned_be64(be64_src), le64_dst);
}
/**
@@ -273,7 +274,7 @@ static inline void ieee802154_be64_to_le64(void *le64_dst, const void *be64_src)
*/
static inline void ieee802154_le64_to_be64(void *be64_dst, const void *le64_src)
{
- __put_unaligned_memmove64(swab64p(le64_src), be64_dst);
+ put_unaligned_be64(get_unaligned_le64(le64_src), be64_dst);
}
/**
@@ -283,7 +284,7 @@ static inline void ieee802154_le64_to_be64(void *be64_dst, const void *le64_src)
*/
static inline void ieee802154_le16_to_be16(void *be16_dst, const void *le16_src)
{
- __put_unaligned_memmove16(swab16p(le16_src), be16_dst);
+ put_unaligned_be16(get_unaligned_le16(le16_src), be16_dst);
}
/**
diff --git a/include/net/netfilter/nft_masq.h b/include/net/netfilter/nft_masq.h
index e2a518b60e19..a3f3c11b2526 100644
--- a/include/net/netfilter/nft_masq.h
+++ b/include/net/netfilter/nft_masq.h
@@ -2,7 +2,9 @@
#define _NFT_MASQ_H_
struct nft_masq {
- u32 flags;
+ u32 flags;
+ enum nft_registers sreg_proto_min:8;
+ enum nft_registers sreg_proto_max:8;
};
extern const struct nla_policy nft_masq_policy[];
diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
index 2b7907a35568..a69cde3ce460 100644
--- a/include/net/netns/ipv4.h
+++ b/include/net/netns/ipv4.h
@@ -80,9 +80,13 @@ struct netns_ipv4 {
int sysctl_tcp_ecn;
int sysctl_tcp_ecn_fallback;
+ int sysctl_ip_default_ttl;
int sysctl_ip_no_pmtu_disc;
int sysctl_ip_fwd_use_pmtu;
int sysctl_ip_nonlocal_bind;
+ /* Shall we try to damage output packets if routing dev changes? */
+ int sysctl_ip_dynaddr;
+ int sysctl_ip_early_demux;
int sysctl_fwmark_reflect;
int sysctl_tcp_fwmark_accept;
@@ -98,6 +102,21 @@ struct netns_ipv4 {
int sysctl_tcp_keepalive_probes;
int sysctl_tcp_keepalive_intvl;
+ int sysctl_tcp_syn_retries;
+ int sysctl_tcp_synack_retries;
+ int sysctl_tcp_syncookies;
+ int sysctl_tcp_reordering;
+ int sysctl_tcp_retries1;
+ int sysctl_tcp_retries2;
+ int sysctl_tcp_orphan_retries;
+ int sysctl_tcp_fin_timeout;
+ unsigned int sysctl_tcp_notsent_lowat;
+
+ int sysctl_igmp_max_memberships;
+ int sysctl_igmp_max_msf;
+ int sysctl_igmp_llm_reports;
+ int sysctl_igmp_qrv;
+
struct ping_group_range ping_group_range;
atomic_t dev_addr_genid;
diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h
index c0368db6df54..10d0848f5b8a 100644
--- a/include/net/netns/ipv6.h
+++ b/include/net/netns/ipv6.h
@@ -58,7 +58,10 @@ struct netns_ipv6 {
struct timer_list ip6_fib_timer;
struct hlist_head *fib_table_hash;
struct fib6_table *fib6_main_tbl;
+ struct list_head fib6_walkers;
struct dst_ops ip6_dst_ops;
+ rwlock_t fib6_walker_lock;
+ spinlock_t fib6_gc_lock;
unsigned int ip6_rt_gc_expire;
unsigned long ip6_rt_last_gc;
#ifdef CONFIG_IPV6_MULTIPLE_TABLES
diff --git a/include/net/phonet/phonet.h b/include/net/phonet/phonet.h
index 68e509750caa..039cc29cb4a8 100644
--- a/include/net/phonet/phonet.h
+++ b/include/net/phonet/phonet.h
@@ -51,7 +51,7 @@ void pn_sock_init(void);
struct sock *pn_find_sock_by_sa(struct net *net, const struct sockaddr_pn *sa);
void pn_deliver_sock_broadcast(struct net *net, struct sk_buff *skb);
void phonet_get_local_port_range(int *min, int *max);
-void pn_sock_hash(struct sock *sk);
+int pn_sock_hash(struct sock *sk);
void pn_sock_unhash(struct sock *sk);
int pn_sock_get_port(struct sock *sk, unsigned short sport);
diff --git a/include/net/ping.h b/include/net/ping.h
index ac80cb45e630..4cd90d6b5c25 100644
--- a/include/net/ping.h
+++ b/include/net/ping.h
@@ -65,7 +65,7 @@ struct pingfakehdr {
};
int ping_get_port(struct sock *sk, unsigned short ident);
-void ping_hash(struct sock *sk);
+int ping_hash(struct sock *sk);
void ping_unhash(struct sock *sk);
int ping_init_sock(struct sock *sk);
@@ -79,7 +79,6 @@ int ping_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock,
int flags, int *addr_len);
int ping_common_sendmsg(int family, struct msghdr *msg, size_t len,
void *user_icmph, size_t icmph_len);
-int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len);
int ping_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
bool ping_rcv(struct sk_buff *skb);
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
index bc49967e1a68..caa5e18636df 100644
--- a/include/net/pkt_cls.h
+++ b/include/net/pkt_cls.h
@@ -358,4 +358,69 @@ tcf_match_indev(struct sk_buff *skb, int ifindex)
}
#endif /* CONFIG_NET_CLS_IND */
+struct tc_cls_u32_knode {
+ struct tcf_exts *exts;
+ struct tc_u32_sel *sel;
+ u32 handle;
+ u32 val;
+ u32 mask;
+ u32 link_handle;
+ u8 fshift;
+};
+
+struct tc_cls_u32_hnode {
+ u32 handle;
+ u32 prio;
+ unsigned int divisor;
+};
+
+enum tc_clsu32_command {
+ TC_CLSU32_NEW_KNODE,
+ TC_CLSU32_REPLACE_KNODE,
+ TC_CLSU32_DELETE_KNODE,
+ TC_CLSU32_NEW_HNODE,
+ TC_CLSU32_REPLACE_HNODE,
+ TC_CLSU32_DELETE_HNODE,
+};
+
+struct tc_cls_u32_offload {
+ /* knode values */
+ enum tc_clsu32_command command;
+ union {
+ struct tc_cls_u32_knode knode;
+ struct tc_cls_u32_hnode hnode;
+ };
+};
+
+/* tca flags definitions */
+#define TCA_CLS_FLAGS_SKIP_HW 1
+
+static inline bool tc_should_offload(struct net_device *dev, u32 flags)
+{
+ if (!(dev->features & NETIF_F_HW_TC))
+ return false;
+
+ if (flags & TCA_CLS_FLAGS_SKIP_HW)
+ return false;
+
+ if (!dev->netdev_ops->ndo_setup_tc)
+ return false;
+
+ return true;
+}
+
+enum tc_fl_command {
+ TC_CLSFLOWER_REPLACE,
+ TC_CLSFLOWER_DESTROY,
+};
+
+struct tc_cls_flower_offload {
+ enum tc_fl_command command;
+ unsigned long cookie;
+ struct flow_dissector *dissector;
+ struct fl_flow_key *mask;
+ struct fl_flow_key *key;
+ struct tcf_exts *exts;
+};
+
#endif
diff --git a/include/net/raw.h b/include/net/raw.h
index 6a40c6562dd2..3e789008394d 100644
--- a/include/net/raw.h
+++ b/include/net/raw.h
@@ -57,7 +57,7 @@ int raw_seq_open(struct inode *ino, struct file *file,
#endif
-void raw_hash_sk(struct sock *sk);
+int raw_hash_sk(struct sock *sk);
void raw_unhash_sk(struct sock *sk);
struct raw_sock {
diff --git a/include/net/route.h b/include/net/route.h
index a3b9ef74a389..9b0a523bb428 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -329,14 +329,13 @@ static inline int inet_iif(const struct sk_buff *skb)
return skb->skb_iif;
}
-extern int sysctl_ip_default_ttl;
-
static inline int ip4_dst_hoplimit(const struct dst_entry *dst)
{
int hoplimit = dst_metric_raw(dst, RTAX_HOPLIMIT);
+ struct net *net = dev_net(dst->dev);
if (hoplimit == 0)
- hoplimit = sysctl_ip_default_ttl;
+ hoplimit = net->ipv4.sysctl_ip_default_ttl;
return hoplimit;
}
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 636a362a0e03..46e55f0202a6 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -345,6 +345,12 @@ extern struct Qdisc_ops pfifo_fast_ops;
extern struct Qdisc_ops mq_qdisc_ops;
extern struct Qdisc_ops noqueue_qdisc_ops;
extern const struct Qdisc_ops *default_qdisc_ops;
+static inline const struct Qdisc_ops *
+get_default_qdisc_ops(const struct net_device *dev, int ntx)
+{
+ return ntx < dev->real_num_tx_queues ?
+ default_qdisc_ops : &pfifo_fast_ops;
+}
struct Qdisc_class_common {
u32 classid;
@@ -396,7 +402,8 @@ struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
struct Qdisc *qdisc);
void qdisc_reset(struct Qdisc *qdisc);
void qdisc_destroy(struct Qdisc *qdisc);
-void qdisc_tree_decrease_qlen(struct Qdisc *qdisc, unsigned int n);
+void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, unsigned int n,
+ unsigned int len);
struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
const struct Qdisc_ops *ops);
struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
@@ -707,6 +714,23 @@ static inline void qdisc_reset_queue(struct Qdisc *sch)
sch->qstats.backlog = 0;
}
+static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new,
+ struct Qdisc **pold)
+{
+ struct Qdisc *old;
+
+ sch_tree_lock(sch);
+ old = *pold;
+ *pold = new;
+ if (old != NULL) {
+ qdisc_tree_reduce_backlog(old, old->q.qlen, old->qstats.backlog);
+ qdisc_reset(old);
+ }
+ sch_tree_unlock(sch);
+
+ return old;
+}
+
static inline unsigned int __qdisc_queue_drop(struct Qdisc *sch,
struct sk_buff_head *list)
{
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index 835aa2ed9870..65521cfdcade 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -82,6 +82,11 @@
#define SCTP_PROTOSW_FLAG INET_PROTOSW_PERMANENT
#endif
+/* Round an int up to the next multiple of 4. */
+#define WORD_ROUND(s) (((s)+3)&~3)
+/* Truncate to the previous multiple of 4. */
+#define WORD_TRUNC(s) ((s)&~3)
+
/*
* Function declarations.
*/
@@ -426,7 +431,7 @@ static inline int sctp_frag_point(const struct sctp_association *asoc, int pmtu)
if (asoc->user_frag)
frag = min_t(int, frag, asoc->user_frag);
- frag = min_t(int, frag, SCTP_MAX_CHUNK_LEN);
+ frag = WORD_TRUNC(min_t(int, frag, SCTP_MAX_CHUNK_LEN));
return frag;
}
@@ -475,9 +480,6 @@ for (pos = chunk->subh.fwdtsn_hdr->skip;\
(void *)pos <= (void *)chunk->subh.fwdtsn_hdr->skip + end - sizeof(struct sctp_fwdtsn_skip);\
pos++)
-/* Round an int up to the next multiple of 4. */
-#define WORD_ROUND(s) (((s)+3)&~3)
-
/* External references. */
extern struct proto sctp_prot;
diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
index 487ef34bbd63..efc01743b9d6 100644
--- a/include/net/sctp/sm.h
+++ b/include/net/sctp/sm.h
@@ -201,7 +201,7 @@ struct sctp_chunk *sctp_make_cwr(const struct sctp_association *,
struct sctp_chunk * sctp_make_datafrag_empty(struct sctp_association *,
const struct sctp_sndrcvinfo *sinfo,
int len, const __u8 flags,
- __u16 ssn);
+ __u16 ssn, gfp_t gfp);
struct sctp_chunk *sctp_make_ecne(const struct sctp_association *,
const __u32);
struct sctp_chunk *sctp_make_sack(const struct sctp_association *);
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 5a57409da37b..6df1ce7a411c 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -535,7 +535,6 @@ struct sctp_datamsg {
struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *,
struct sctp_sndrcvinfo *,
struct iov_iter *);
-void sctp_datamsg_free(struct sctp_datamsg *);
void sctp_datamsg_put(struct sctp_datamsg *);
void sctp_chunk_fail(struct sctp_chunk *, int error);
int sctp_chunk_abandoned(struct sctp_chunk *);
@@ -656,7 +655,7 @@ void sctp_chunk_free(struct sctp_chunk *);
void *sctp_addto_chunk(struct sctp_chunk *, int len, const void *data);
struct sctp_chunk *sctp_chunkify(struct sk_buff *,
const struct sctp_association *,
- struct sock *);
+ struct sock *, gfp_t gfp);
void sctp_init_addrs(struct sctp_chunk *, union sctp_addr *,
union sctp_addr *);
const union sctp_addr *sctp_source(const struct sctp_chunk *chunk);
@@ -718,10 +717,10 @@ struct sctp_packet *sctp_packet_init(struct sctp_packet *,
__u16 sport, __u16 dport);
struct sctp_packet *sctp_packet_config(struct sctp_packet *, __u32 vtag, int);
sctp_xmit_t sctp_packet_transmit_chunk(struct sctp_packet *,
- struct sctp_chunk *, int);
+ struct sctp_chunk *, int, gfp_t);
sctp_xmit_t sctp_packet_append_chunk(struct sctp_packet *,
struct sctp_chunk *);
-int sctp_packet_transmit(struct sctp_packet *);
+int sctp_packet_transmit(struct sctp_packet *, gfp_t);
void sctp_packet_free(struct sctp_packet *);
static inline int sctp_packet_empty(struct sctp_packet *packet)
@@ -1054,7 +1053,7 @@ struct sctp_outq {
void sctp_outq_init(struct sctp_association *, struct sctp_outq *);
void sctp_outq_teardown(struct sctp_outq *);
void sctp_outq_free(struct sctp_outq*);
-int sctp_outq_tail(struct sctp_outq *, struct sctp_chunk *chunk);
+int sctp_outq_tail(struct sctp_outq *, struct sctp_chunk *chunk, gfp_t);
int sctp_outq_sack(struct sctp_outq *, struct sctp_chunk *);
int sctp_outq_is_empty(const struct sctp_outq *);
void sctp_outq_restart(struct sctp_outq *);
@@ -1062,7 +1061,7 @@ void sctp_outq_restart(struct sctp_outq *);
void sctp_retransmit(struct sctp_outq *, struct sctp_transport *,
sctp_retransmit_reason_t);
void sctp_retransmit_mark(struct sctp_outq *, struct sctp_transport *, __u8);
-int sctp_outq_uncork(struct sctp_outq *);
+int sctp_outq_uncork(struct sctp_outq *, gfp_t gfp);
/* Uncork and flush an outqueue. */
static inline void sctp_outq_cork(struct sctp_outq *q)
{
@@ -1098,7 +1097,7 @@ int sctp_bind_addr_dup(struct sctp_bind_addr *dest,
const struct sctp_bind_addr *src,
gfp_t gfp);
int sctp_add_bind_addr(struct sctp_bind_addr *, union sctp_addr *,
- __u8 addr_state, gfp_t gfp);
+ int new_size, __u8 addr_state, gfp_t gfp);
int sctp_del_bind_addr(struct sctp_bind_addr *, union sctp_addr *);
int sctp_bind_addr_match(struct sctp_bind_addr *, const union sctp_addr *,
struct sctp_sock *);
diff --git a/include/net/sock.h b/include/net/sock.h
index f5ea148853e2..255d3e03727b 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -984,7 +984,7 @@ struct proto {
void (*release_cb)(struct sock *sk);
/* Keeping track of sk's, looking them up, and port selection methods. */
- void (*hash)(struct sock *sk);
+ int (*hash)(struct sock *sk);
void (*unhash)(struct sock *sk);
void (*rehash)(struct sock *sk);
int (*get_port)(struct sock *sk, unsigned short snum);
@@ -1194,10 +1194,10 @@ static inline void sock_prot_inuse_add(struct net *net, struct proto *prot,
/* With per-bucket locks this operation is not-atomic, so that
* this version is not worse.
*/
-static inline void __sk_prot_rehash(struct sock *sk)
+static inline int __sk_prot_rehash(struct sock *sk)
{
sk->sk_prot->unhash(sk);
- sk->sk_prot->hash(sk);
+ return sk->sk_prot->hash(sk);
}
void sk_prot_clear_portaddr_nulls(struct sock *sk, int size);
diff --git a/include/net/tc_act/tc_gact.h b/include/net/tc_act/tc_gact.h
index 592a6bc02b0b..93c520b83d10 100644
--- a/include/net/tc_act/tc_gact.h
+++ b/include/net/tc_act/tc_gact.h
@@ -2,6 +2,7 @@
#define __NET_TC_GACT_H
#include <net/act_api.h>
+#include <linux/tc_act/tc_gact.h>
struct tcf_gact {
struct tcf_common common;
@@ -15,4 +16,19 @@ struct tcf_gact {
#define to_gact(a) \
container_of(a->priv, struct tcf_gact, common)
+static inline bool is_tcf_gact_shot(const struct tc_action *a)
+{
+#ifdef CONFIG_NET_CLS_ACT
+ struct tcf_gact *gact;
+
+ if (a->ops && a->ops->type != TCA_ACT_GACT)
+ return false;
+
+ gact = a->priv;
+ if (gact->tcf_action == TC_ACT_SHOT)
+ return true;
+
+#endif
+ return false;
+}
#endif /* __NET_TC_GACT_H */
diff --git a/include/net/tc_act/tc_ife.h b/include/net/tc_act/tc_ife.h
new file mode 100644
index 000000000000..dc9a09aefb33
--- /dev/null
+++ b/include/net/tc_act/tc_ife.h
@@ -0,0 +1,61 @@
+#ifndef __NET_TC_IFE_H
+#define __NET_TC_IFE_H
+
+#include <net/act_api.h>
+#include <linux/etherdevice.h>
+#include <linux/rtnetlink.h>
+#include <linux/module.h>
+
+#define IFE_METAHDRLEN 2
+struct tcf_ife_info {
+ struct tcf_common common;
+ u8 eth_dst[ETH_ALEN];
+ u8 eth_src[ETH_ALEN];
+ u16 eth_type;
+ u16 flags;
+ /* list of metaids allowed */
+ struct list_head metalist;
+};
+#define to_ife(a) \
+ container_of(a->priv, struct tcf_ife_info, common)
+
+struct tcf_meta_info {
+ const struct tcf_meta_ops *ops;
+ void *metaval;
+ u16 metaid;
+ struct list_head metalist;
+};
+
+struct tcf_meta_ops {
+ u16 metaid; /*Maintainer provided ID */
+ u16 metatype; /*netlink attribute type (look at net/netlink.h) */
+ const char *name;
+ const char *synopsis;
+ struct list_head list;
+ int (*check_presence)(struct sk_buff *, struct tcf_meta_info *);
+ int (*encode)(struct sk_buff *, void *, struct tcf_meta_info *);
+ int (*decode)(struct sk_buff *, void *, u16 len);
+ int (*get)(struct sk_buff *skb, struct tcf_meta_info *mi);
+ int (*alloc)(struct tcf_meta_info *, void *);
+ void (*release)(struct tcf_meta_info *);
+ int (*validate)(void *val, int len);
+ struct module *owner;
+};
+
+#define MODULE_ALIAS_IFE_META(metan) MODULE_ALIAS("ifemeta" __stringify_1(metan))
+
+int ife_get_meta_u32(struct sk_buff *skb, struct tcf_meta_info *mi);
+int ife_get_meta_u16(struct sk_buff *skb, struct tcf_meta_info *mi);
+int ife_tlv_meta_encode(void *skbdata, u16 attrtype, u16 dlen,
+ const void *dval);
+int ife_alloc_meta_u32(struct tcf_meta_info *mi, void *metaval);
+int ife_alloc_meta_u16(struct tcf_meta_info *mi, void *metaval);
+int ife_check_meta_u32(u32 metaval, struct tcf_meta_info *mi);
+int ife_encode_meta_u32(u32 metaval, void *skbdata, struct tcf_meta_info *mi);
+int ife_validate_meta_u32(void *val, int len);
+int ife_validate_meta_u16(void *val, int len);
+void ife_release_meta_gen(struct tcf_meta_info *mi);
+int register_ife_op(struct tcf_meta_ops *mops);
+int unregister_ife_op(struct tcf_meta_ops *mops);
+
+#endif /* __NET_TC_IFE_H */
diff --git a/include/net/tc_act/tc_skbedit.h b/include/net/tc_act/tc_skbedit.h
index 0df9a0db4a8e..b496d5ad7d42 100644
--- a/include/net/tc_act/tc_skbedit.h
+++ b/include/net/tc_act/tc_skbedit.h
@@ -20,6 +20,7 @@
#define __NET_TC_SKBEDIT_H
#include <net/act_api.h>
+#include <linux/tc_act/tc_skbedit.h>
struct tcf_skbedit {
struct tcf_common common;
@@ -32,4 +33,19 @@ struct tcf_skbedit {
#define to_skbedit(a) \
container_of(a->priv, struct tcf_skbedit, common)
+/* Return true iff action is mark */
+static inline bool is_tcf_skbedit_mark(const struct tc_action *a)
+{
+#ifdef CONFIG_NET_CLS_ACT
+ if (a->ops && a->ops->type == TCA_ACT_SKBEDIT)
+ return to_skbedit(a)->flags == SKBEDIT_F_MARK;
+#endif
+ return false;
+}
+
+static inline u32 tcf_skbedit_mark(const struct tc_action *a)
+{
+ return to_skbedit(a)->mark;
+}
+
#endif /* __NET_TC_SKBEDIT_H */
diff --git a/include/net/tcp.h b/include/net/tcp.h
index b04bc989ad6c..b91370f61be6 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -238,13 +238,6 @@ extern struct inet_timewait_death_row tcp_death_row;
extern int sysctl_tcp_timestamps;
extern int sysctl_tcp_window_scaling;
extern int sysctl_tcp_sack;
-extern int sysctl_tcp_fin_timeout;
-extern int sysctl_tcp_syn_retries;
-extern int sysctl_tcp_synack_retries;
-extern int sysctl_tcp_retries1;
-extern int sysctl_tcp_retries2;
-extern int sysctl_tcp_orphan_retries;
-extern int sysctl_tcp_syncookies;
extern int sysctl_tcp_fastopen;
extern int sysctl_tcp_retrans_collapse;
extern int sysctl_tcp_stdurg;
@@ -273,7 +266,6 @@ extern int sysctl_tcp_thin_dupack;
extern int sysctl_tcp_early_retrans;
extern int sysctl_tcp_limit_output_bytes;
extern int sysctl_tcp_challenge_ack_limit;
-extern unsigned int sysctl_tcp_notsent_lowat;
extern int sysctl_tcp_min_tso_segs;
extern int sysctl_tcp_min_rtt_wlen;
extern int sysctl_tcp_autocorking;
@@ -567,6 +559,7 @@ void tcp_rearm_rto(struct sock *sk);
void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req);
void tcp_reset(struct sock *sk);
void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp, struct sk_buff *skb);
+void tcp_fin(struct sock *sk);
/* tcp_timer.c */
void tcp_init_xmit_timers(struct sock *);
@@ -962,9 +955,11 @@ static inline void tcp_enable_fack(struct tcp_sock *tp)
*/
static inline void tcp_enable_early_retrans(struct tcp_sock *tp)
{
+ struct net *net = sock_net((struct sock *)tp);
+
tp->do_early_retrans = sysctl_tcp_early_retrans &&
sysctl_tcp_early_retrans < 4 && !sysctl_tcp_thin_dupack &&
- sysctl_tcp_reordering == 3;
+ net->ipv4.sysctl_tcp_reordering == 3;
}
static inline void tcp_disable_early_retrans(struct tcp_sock *tp)
@@ -1251,7 +1246,7 @@ static inline u32 keepalive_time_elapsed(const struct tcp_sock *tp)
static inline int tcp_fin_time(const struct sock *sk)
{
- int fin_timeout = tcp_sk(sk)->linger2 ? : sysctl_tcp_fin_timeout;
+ int fin_timeout = tcp_sk(sk)->linger2 ? : sock_net(sk)->ipv4.sysctl_tcp_fin_timeout;
const int rto = inet_csk(sk)->icsk_rto;
if (fin_timeout < (rto << 2) - (rto >> 1))
@@ -1433,6 +1428,7 @@ void tcp_free_fastopen_req(struct tcp_sock *tp);
extern struct tcp_fastopen_context __rcu *tcp_fastopen_ctx;
int tcp_fastopen_reset_cipher(void *key, unsigned int len);
+void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb);
struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
struct request_sock *req,
struct tcp_fastopen_cookie *foc,
@@ -1681,7 +1677,8 @@ void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr);
static inline u32 tcp_notsent_lowat(const struct tcp_sock *tp)
{
- return tp->notsent_lowat ?: sysctl_tcp_notsent_lowat;
+ struct net *net = sock_net((struct sock *)tp);
+ return tp->notsent_lowat ?: net->ipv4.sysctl_tcp_notsent_lowat;
}
static inline bool tcp_stream_memory_free(const struct sock *sk)
@@ -1815,4 +1812,38 @@ static inline void skb_set_tcp_pure_ack(struct sk_buff *skb)
skb->truesize = 2;
}
+static inline int tcp_inq(struct sock *sk)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+ int answ;
+
+ if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
+ answ = 0;
+ } else if (sock_flag(sk, SOCK_URGINLINE) ||
+ !tp->urg_data ||
+ before(tp->urg_seq, tp->copied_seq) ||
+ !before(tp->urg_seq, tp->rcv_nxt)) {
+
+ answ = tp->rcv_nxt - tp->copied_seq;
+
+ /* Subtract 1, if FIN was received */
+ if (answ && sock_flag(sk, SOCK_DONE))
+ answ--;
+ } else {
+ answ = tp->urg_seq - tp->copied_seq;
+ }
+
+ return answ;
+}
+
+static inline void tcp_segs_in(struct tcp_sock *tp, const struct sk_buff *skb)
+{
+ u16 segs_in;
+
+ segs_in = max_t(u16, 1, skb_shinfo(skb)->gso_segs);
+ tp->segs_in += segs_in;
+ if (skb->len > tcp_hdrlen(skb))
+ tp->data_segs_in += segs_in;
+}
+
#endif /* _TCP_H */
diff --git a/include/net/udp.h b/include/net/udp.h
index 2842541e28e7..92927f729ac8 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -177,9 +177,10 @@ static inline struct udphdr *udp_gro_udphdr(struct sk_buff *skb)
}
/* hash routines shared between UDPv4/6 and UDP-Litev4/6 */
-static inline void udp_lib_hash(struct sock *sk)
+static inline int udp_lib_hash(struct sock *sk)
{
BUG();
+ return 0;
}
void udp_lib_unhash(struct sock *sk);
diff --git a/include/net/udp_tunnel.h b/include/net/udp_tunnel.h
index cca2ad3082c3..b83114077cee 100644
--- a/include/net/udp_tunnel.h
+++ b/include/net/udp_tunnel.h
@@ -88,8 +88,8 @@ int udp_tunnel6_xmit_skb(struct dst_entry *dst, struct sock *sk,
struct sk_buff *skb,
struct net_device *dev, struct in6_addr *saddr,
struct in6_addr *daddr,
- __u8 prio, __u8 ttl, __be16 src_port,
- __be16 dst_port, bool nocheck);
+ __u8 prio, __u8 ttl, __be32 label,
+ __be16 src_port, __be16 dst_port, bool nocheck);
#endif
void udp_tunnel_sock_release(struct socket *sock);
@@ -103,7 +103,7 @@ static inline struct sk_buff *udp_tunnel_handle_offloads(struct sk_buff *skb,
{
int type = udp_csum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL;
- return iptunnel_handle_offloads(skb, udp_csum, type);
+ return iptunnel_handle_offloads(skb, type);
}
static inline void udp_tunnel_gro_complete(struct sk_buff *skb, int nhoff)
diff --git a/include/net/vxlan.h b/include/net/vxlan.h
index 0fb86442544b..73ed2e951c02 100644
--- a/include/net/vxlan.h
+++ b/include/net/vxlan.h
@@ -9,17 +9,71 @@
#include <linux/udp.h>
#include <net/dst_metadata.h>
+/* VXLAN protocol (RFC 7348) header:
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |R|R|R|R|I|R|R|R| Reserved |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | VXLAN Network Identifier (VNI) | Reserved |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * I = VXLAN Network Identifier (VNI) present.
+ */
+struct vxlanhdr {
+ __be32 vx_flags;
+ __be32 vx_vni;
+};
+
+/* VXLAN header flags. */
+#define VXLAN_HF_VNI cpu_to_be32(BIT(27))
+
+#define VXLAN_N_VID (1u << 24)
+#define VXLAN_VID_MASK (VXLAN_N_VID - 1)
+#define VXLAN_VNI_MASK cpu_to_be32(VXLAN_VID_MASK << 8)
+#define VXLAN_HLEN (sizeof(struct udphdr) + sizeof(struct vxlanhdr))
+
#define VNI_HASH_BITS 10
#define VNI_HASH_SIZE (1<<VNI_HASH_BITS)
+#define FDB_HASH_BITS 8
+#define FDB_HASH_SIZE (1<<FDB_HASH_BITS)
+
+/* Remote checksum offload for VXLAN (VXLAN_F_REMCSUM_[RT]X):
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |R|R|R|R|I|R|R|R|R|R|C| Reserved |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | VXLAN Network Identifier (VNI) |O| Csum start |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * C = Remote checksum offload bit. When set indicates that the
+ * remote checksum offload data is present.
+ *
+ * O = Offset bit. Indicates the checksum offset relative to
+ * checksum start.
+ *
+ * Csum start = Checksum start divided by two.
+ *
+ * http://tools.ietf.org/html/draft-herbert-vxlan-rco
+ */
+
+/* VXLAN-RCO header flags. */
+#define VXLAN_HF_RCO cpu_to_be32(BIT(21))
+
+/* Remote checksum offload header option */
+#define VXLAN_RCO_MASK cpu_to_be32(0x7f) /* Last byte of vni field */
+#define VXLAN_RCO_UDP cpu_to_be32(0x80) /* Indicate UDP RCO (TCP when not set *) */
+#define VXLAN_RCO_SHIFT 1 /* Left shift of start */
+#define VXLAN_RCO_SHIFT_MASK ((1 << VXLAN_RCO_SHIFT) - 1)
+#define VXLAN_MAX_REMCSUM_START (0x7f << VXLAN_RCO_SHIFT)
/*
- * VXLAN Group Based Policy Extension:
+ * VXLAN Group Based Policy Extension (VXLAN_F_GBP):
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- * |1|-|-|-|1|-|-|-|R|D|R|R|A|R|R|R| Group Policy ID |
+ * |G|R|R|R|I|R|R|R|R|D|R|R|A|R|R|R| Group Policy ID |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | VXLAN Network Identifier (VNI) | Reserved |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*
+ * G = Group Policy ID present.
+ *
* D = Don't Learn bit. When set, this bit indicates that the egress
* VTEP MUST NOT learn the source address of the encapsulated frame.
*
@@ -27,18 +81,18 @@
* this packet. Policies MUST NOT be applied by devices when the
* A bit is set.
*
- * [0] https://tools.ietf.org/html/draft-smith-vxlan-group-policy
+ * https://tools.ietf.org/html/draft-smith-vxlan-group-policy
*/
struct vxlanhdr_gbp {
- __u8 vx_flags;
+ u8 vx_flags;
#ifdef __LITTLE_ENDIAN_BITFIELD
- __u8 reserved_flags1:3,
+ u8 reserved_flags1:3,
policy_applied:1,
reserved_flags2:2,
dont_learn:1,
reserved_flags3:1;
#elif defined(__BIG_ENDIAN_BITFIELD)
- __u8 reserved_flags1:1,
+ u8 reserved_flags1:1,
dont_learn:1,
reserved_flags2:2,
policy_applied:1,
@@ -50,7 +104,10 @@ struct vxlanhdr_gbp {
__be32 vx_vni;
};
-#define VXLAN_GBP_USED_BITS (VXLAN_HF_GBP | 0xFFFFFF)
+/* VXLAN-GBP header flags. */
+#define VXLAN_HF_GBP cpu_to_be32(BIT(31))
+
+#define VXLAN_GBP_USED_BITS (VXLAN_HF_GBP | cpu_to_be32(0xFFFFFF))
/* skb->mark mapping
*
@@ -62,44 +119,6 @@ struct vxlanhdr_gbp {
#define VXLAN_GBP_POLICY_APPLIED (BIT(3) << 16)
#define VXLAN_GBP_ID_MASK (0xFFFF)
-/* VXLAN protocol header:
- * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- * |G|R|R|R|I|R|R|C| Reserved |
- * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- * | VXLAN Network Identifier (VNI) | Reserved |
- * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- *
- * G = 1 Group Policy (VXLAN-GBP)
- * I = 1 VXLAN Network Identifier (VNI) present
- * C = 1 Remote checksum offload (RCO)
- */
-struct vxlanhdr {
- __be32 vx_flags;
- __be32 vx_vni;
-};
-
-/* VXLAN header flags. */
-#define VXLAN_HF_RCO BIT(21)
-#define VXLAN_HF_VNI BIT(27)
-#define VXLAN_HF_GBP BIT(31)
-
-/* Remote checksum offload header option */
-#define VXLAN_RCO_MASK 0x7f /* Last byte of vni field */
-#define VXLAN_RCO_UDP 0x80 /* Indicate UDP RCO (TCP when not set *) */
-#define VXLAN_RCO_SHIFT 1 /* Left shift of start */
-#define VXLAN_RCO_SHIFT_MASK ((1 << VXLAN_RCO_SHIFT) - 1)
-#define VXLAN_MAX_REMCSUM_START (VXLAN_RCO_MASK << VXLAN_RCO_SHIFT)
-
-#define VXLAN_N_VID (1u << 24)
-#define VXLAN_VID_MASK (VXLAN_N_VID - 1)
-#define VXLAN_VNI_MASK (VXLAN_VID_MASK << 8)
-#define VXLAN_HLEN (sizeof(struct udphdr) + sizeof(struct vxlanhdr))
-
-#define VNI_HASH_BITS 10
-#define VNI_HASH_SIZE (1<<VNI_HASH_BITS)
-#define FDB_HASH_BITS 8
-#define FDB_HASH_SIZE (1<<FDB_HASH_BITS)
-
struct vxlan_metadata {
u32 gbp;
};
@@ -125,23 +144,25 @@ union vxlan_addr {
struct vxlan_rdst {
union vxlan_addr remote_ip;
__be16 remote_port;
- u32 remote_vni;
+ __be32 remote_vni;
u32 remote_ifindex;
struct list_head list;
struct rcu_head rcu;
+ struct dst_cache dst_cache;
};
struct vxlan_config {
union vxlan_addr remote_ip;
union vxlan_addr saddr;
- u32 vni;
+ __be32 vni;
int remote_ifindex;
int mtu;
__be16 dst_port;
- __u16 port_min;
- __u16 port_max;
- __u8 tos;
- __u8 ttl;
+ u16 port_min;
+ u16 port_max;
+ u8 tos;
+ u8 ttl;
+ __be32 label;
u32 flags;
unsigned long age_interval;
unsigned int addrmax;
@@ -177,7 +198,7 @@ struct vxlan_dev {
#define VXLAN_F_L2MISS 0x08
#define VXLAN_F_L3MISS 0x10
#define VXLAN_F_IPV6 0x20
-#define VXLAN_F_UDP_CSUM 0x40
+#define VXLAN_F_UDP_ZERO_CSUM_TX 0x40
#define VXLAN_F_UDP_ZERO_CSUM6_TX 0x80
#define VXLAN_F_UDP_ZERO_CSUM6_RX 0x100
#define VXLAN_F_REMCSUM_TX 0x200
@@ -242,6 +263,68 @@ static inline netdev_features_t vxlan_features_check(struct sk_buff *skb,
/* IPv6 header + UDP + VXLAN + Ethernet header */
#define VXLAN6_HEADROOM (40 + 8 + 8 + 14)
+static inline struct vxlanhdr *vxlan_hdr(struct sk_buff *skb)
+{
+ return (struct vxlanhdr *)(udp_hdr(skb) + 1);
+}
+
+static inline __be32 vxlan_vni(__be32 vni_field)
+{
+#if defined(__BIG_ENDIAN)
+ return (__force __be32)((__force u32)vni_field >> 8);
+#else
+ return (__force __be32)((__force u32)(vni_field & VXLAN_VNI_MASK) << 8);
+#endif
+}
+
+static inline __be32 vxlan_vni_field(__be32 vni)
+{
+#if defined(__BIG_ENDIAN)
+ return (__force __be32)((__force u32)vni << 8);
+#else
+ return (__force __be32)((__force u32)vni >> 8);
+#endif
+}
+
+static inline __be32 vxlan_tun_id_to_vni(__be64 tun_id)
+{
+#if defined(__BIG_ENDIAN)
+ return (__force __be32)tun_id;
+#else
+ return (__force __be32)((__force u64)tun_id >> 32);
+#endif
+}
+
+static inline __be64 vxlan_vni_to_tun_id(__be32 vni)
+{
+#if defined(__BIG_ENDIAN)
+ return (__force __be64)vni;
+#else
+ return (__force __be64)((u64)(__force u32)vni << 32);
+#endif
+}
+
+static inline size_t vxlan_rco_start(__be32 vni_field)
+{
+ return be32_to_cpu(vni_field & VXLAN_RCO_MASK) << VXLAN_RCO_SHIFT;
+}
+
+static inline size_t vxlan_rco_offset(__be32 vni_field)
+{
+ return (vni_field & VXLAN_RCO_UDP) ?
+ offsetof(struct udphdr, check) :
+ offsetof(struct tcphdr, check);
+}
+
+static inline __be32 vxlan_compute_rco(unsigned int start, unsigned int offset)
+{
+ __be32 vni_field = cpu_to_be32(start >> VXLAN_RCO_SHIFT);
+
+ if (offset == offsetof(struct udphdr, check))
+ vni_field |= VXLAN_RCO_UDP;
+ return vni_field;
+}
+
#if IS_ENABLED(CONFIG_VXLAN)
void vxlan_get_rx_port(struct net_device *netdev);
#else
diff --git a/include/rdma/ib_addr.h b/include/rdma/ib_addr.h
index c34c9002460c..931a47ba4571 100644
--- a/include/rdma/ib_addr.h
+++ b/include/rdma/ib_addr.h
@@ -262,24 +262,22 @@ static inline enum ib_mtu iboe_get_mtu(int mtu)
static inline int iboe_get_rate(struct net_device *dev)
{
- struct ethtool_cmd cmd;
- u32 speed;
+ struct ethtool_link_ksettings cmd;
int err;
rtnl_lock();
- err = __ethtool_get_settings(dev, &cmd);
+ err = __ethtool_get_link_ksettings(dev, &cmd);
rtnl_unlock();
if (err)
return IB_RATE_PORT_CURRENT;
- speed = ethtool_cmd_speed(&cmd);
- if (speed >= 40000)
+ if (cmd.base.speed >= 40000)
return IB_RATE_40_GBPS;
- else if (speed >= 30000)
+ else if (cmd.base.speed >= 30000)
return IB_RATE_30_GBPS;
- else if (speed >= 20000)
+ else if (cmd.base.speed >= 20000)
return IB_RATE_20_GBPS;
- else if (speed >= 10000)
+ else if (cmd.base.speed >= 10000)
return IB_RATE_10_GBPS;
else
return IB_RATE_PORT_CURRENT;
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 3a03c1d18afa..fb2cef4e9747 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -56,6 +56,7 @@
#include <linux/string.h>
#include <linux/slab.h>
+#include <linux/if_link.h>
#include <linux/atomic.h>
#include <linux/mmu_notifier.h>
#include <asm/uaccess.h>
@@ -97,6 +98,11 @@ enum rdma_node_type {
RDMA_NODE_USNIC_UDP,
};
+enum {
+ /* set the local administered indication */
+ IB_SA_WELL_KNOWN_GUID = BIT_ULL(57) | 2,
+};
+
enum rdma_transport_type {
RDMA_TRANSPORT_IB,
RDMA_TRANSPORT_IWARP,
@@ -213,6 +219,7 @@ enum ib_device_cap_flags {
IB_DEVICE_SIGNATURE_HANDOVER = (1 << 30),
IB_DEVICE_ON_DEMAND_PAGING = (1 << 31),
IB_DEVICE_SG_GAPS_REG = (1ULL << 32),
+ IB_DEVICE_VIRTUAL_FUNCTION = ((u64)1 << 33),
};
enum ib_signature_prot_cap {
@@ -274,7 +281,7 @@ struct ib_device_attr {
u32 hw_ver;
int max_qp;
int max_qp_wr;
- int device_cap_flags;
+ u64 device_cap_flags;
int max_sge;
int max_sge_rd;
int max_cq;
@@ -490,6 +497,7 @@ union rdma_protocol_stats {
| RDMA_CORE_CAP_OPA_MAD)
struct ib_port_attr {
+ u64 subnet_prefix;
enum ib_port_state state;
enum ib_mtu max_mtu;
enum ib_mtu active_mtu;
@@ -509,6 +517,7 @@ struct ib_port_attr {
u8 active_width;
u8 active_speed;
u8 phys_state;
+ bool grh_required;
};
enum ib_device_modify_flags {
@@ -614,6 +623,7 @@ enum {
};
#define IB_LID_PERMISSIVE cpu_to_be16(0xFFFF)
+#define IB_MULTICAST_LID_BASE cpu_to_be16(0xC000)
enum ib_ah_flags {
IB_AH_GRH = 1
@@ -1860,6 +1870,14 @@ struct ib_device {
void (*disassociate_ucontext)(struct ib_ucontext *ibcontext);
void (*drain_rq)(struct ib_qp *qp);
void (*drain_sq)(struct ib_qp *qp);
+ int (*set_vf_link_state)(struct ib_device *device, int vf, u8 port,
+ int state);
+ int (*get_vf_config)(struct ib_device *device, int vf, u8 port,
+ struct ifla_vf_info *ivf);
+ int (*get_vf_stats)(struct ib_device *device, int vf, u8 port,
+ struct ifla_vf_stats *stats);
+ int (*set_vf_guid)(struct ib_device *device, int vf, u8 port, u64 guid,
+ int type);
struct ib_dma_mapping_ops *dma_ops;
@@ -2303,6 +2321,15 @@ int ib_query_gid(struct ib_device *device,
u8 port_num, int index, union ib_gid *gid,
struct ib_gid_attr *attr);
+int ib_set_vf_link_state(struct ib_device *device, int vf, u8 port,
+ int state);
+int ib_get_vf_config(struct ib_device *device, int vf, u8 port,
+ struct ifla_vf_info *info);
+int ib_get_vf_stats(struct ib_device *device, int vf, u8 port,
+ struct ifla_vf_stats *stats);
+int ib_set_vf_guid(struct ib_device *device, int vf, u8 port, u64 guid,
+ int type);
+
int ib_query_pkey(struct ib_device *device,
u8 port_num, u16 index, u16 *pkey);
diff --git a/include/rdma/opa_port_info.h b/include/rdma/opa_port_info.h
index a0fa975cd1c1..2b95c2c336eb 100644
--- a/include/rdma/opa_port_info.h
+++ b/include/rdma/opa_port_info.h
@@ -97,7 +97,7 @@
#define OPA_LINKDOWN_REASON_WIDTH_POLICY 41
/* 42-48 reserved */
#define OPA_LINKDOWN_REASON_DISCONNECTED 49
-#define OPA_LINKDOWN_REASONLOCAL_MEDIA_NOT_INSTALLED 50
+#define OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED 50
#define OPA_LINKDOWN_REASON_NOT_INSTALLED 51
#define OPA_LINKDOWN_REASON_CHASSIS_CONFIG 52
/* 53 reserved */
diff --git a/include/rdma/rdma_vt.h b/include/rdma/rdma_vt.h
new file mode 100644
index 000000000000..a8696551abb1
--- /dev/null
+++ b/include/rdma/rdma_vt.h
@@ -0,0 +1,481 @@
+#ifndef DEF_RDMA_VT_H
+#define DEF_RDMA_VT_H
+
+/*
+ * Copyright(c) 2016 Intel Corporation.
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+/*
+ * Structure that low level drivers will populate in order to register with the
+ * rdmavt layer.
+ */
+
+#include <linux/spinlock.h>
+#include <linux/list.h>
+#include <linux/hash.h>
+#include <rdma/ib_verbs.h>
+#include <rdma/rdmavt_mr.h>
+#include <rdma/rdmavt_qp.h>
+
+#define RVT_MAX_PKEY_VALUES 16
+
+struct rvt_ibport {
+ struct rvt_qp __rcu *qp[2];
+ struct ib_mad_agent *send_agent; /* agent for SMI (traps) */
+ struct rb_root mcast_tree;
+ spinlock_t lock; /* protect changes in this struct */
+
+ /* non-zero when timer is set */
+ unsigned long mkey_lease_timeout;
+ unsigned long trap_timeout;
+ __be64 gid_prefix; /* in network order */
+ __be64 mkey;
+ u64 tid;
+ u32 port_cap_flags;
+ u32 pma_sample_start;
+ u32 pma_sample_interval;
+ __be16 pma_counter_select[5];
+ u16 pma_tag;
+ u16 mkey_lease_period;
+ u16 sm_lid;
+ u8 sm_sl;
+ u8 mkeyprot;
+ u8 subnet_timeout;
+ u8 vl_high_limit;
+
+ /*
+ * Driver is expected to keep these up to date. These
+ * counters are informational only and not required to be
+ * completely accurate.
+ */
+ u64 n_rc_resends;
+ u64 n_seq_naks;
+ u64 n_rdma_seq;
+ u64 n_rnr_naks;
+ u64 n_other_naks;
+ u64 n_loop_pkts;
+ u64 n_pkt_drops;
+ u64 n_vl15_dropped;
+ u64 n_rc_timeouts;
+ u64 n_dmawait;
+ u64 n_unaligned;
+ u64 n_rc_dupreq;
+ u64 n_rc_seqnak;
+ u16 pkey_violations;
+ u16 qkey_violations;
+ u16 mkey_violations;
+
+ /* Hot-path per CPU counters to avoid cacheline trading to update */
+ u64 z_rc_acks;
+ u64 z_rc_qacks;
+ u64 z_rc_delayed_comp;
+ u64 __percpu *rc_acks;
+ u64 __percpu *rc_qacks;
+ u64 __percpu *rc_delayed_comp;
+
+ void *priv; /* driver private data */
+
+ /*
+ * The pkey table is allocated and maintained by the driver. Drivers
+ * need to have access to this before registering with rdmav. However
+ * rdmavt will need access to it so drivers need to proviee this during
+ * the attach port API call.
+ */
+ u16 *pkey_table;
+
+ struct rvt_ah *sm_ah;
+};
+
+#define RVT_CQN_MAX 16 /* maximum length of cq name */
+
+/*
+ * Things that are driver specific, module parameters in hfi1 and qib
+ */
+struct rvt_driver_params {
+ struct ib_device_attr props;
+
+ /*
+ * Anything driver specific that is not covered by props
+ * For instance special module parameters. Goes here.
+ */
+ unsigned int lkey_table_size;
+ unsigned int qp_table_size;
+ int qpn_start;
+ int qpn_inc;
+ int qpn_res_start;
+ int qpn_res_end;
+ int nports;
+ int npkeys;
+ u8 qos_shift;
+ char cq_name[RVT_CQN_MAX];
+ int node;
+ int max_rdma_atomic;
+ int psn_mask;
+ int psn_shift;
+ int psn_modify_mask;
+ u32 core_cap_flags;
+ u32 max_mad_size;
+};
+
+/* Protection domain */
+struct rvt_pd {
+ struct ib_pd ibpd;
+ int user; /* non-zero if created from user space */
+};
+
+/* Address handle */
+struct rvt_ah {
+ struct ib_ah ibah;
+ struct ib_ah_attr attr;
+ atomic_t refcount;
+ u8 vl;
+ u8 log_pmtu;
+};
+
+struct rvt_dev_info;
+struct rvt_swqe;
+struct rvt_driver_provided {
+ /*
+ * Which functions are required depends on which verbs rdmavt is
+ * providing and which verbs the driver is overriding. See
+ * check_support() for details.
+ */
+
+ /* Passed to ib core registration. Callback to create syfs files */
+ int (*port_callback)(struct ib_device *, u8, struct kobject *);
+
+ /*
+ * Returns a string to represent the device for which is being
+ * registered. This is primarily used for error and debug messages on
+ * the console.
+ */
+ const char * (*get_card_name)(struct rvt_dev_info *rdi);
+
+ /*
+ * Returns a pointer to the undelying hardware's PCI device. This is
+ * used to display information as to what hardware is being referenced
+ * in an output message
+ */
+ struct pci_dev * (*get_pci_dev)(struct rvt_dev_info *rdi);
+
+ /*
+ * Allocate a private queue pair data structure for driver specific
+ * information which is opaque to rdmavt.
+ */
+ void * (*qp_priv_alloc)(struct rvt_dev_info *rdi, struct rvt_qp *qp,
+ gfp_t gfp);
+
+ /*
+ * Free the driver's private qp structure.
+ */
+ void (*qp_priv_free)(struct rvt_dev_info *rdi, struct rvt_qp *qp);
+
+ /*
+ * Inform the driver the particular qp in quesiton has been reset so
+ * that it can clean up anything it needs to.
+ */
+ void (*notify_qp_reset)(struct rvt_qp *qp);
+
+ /*
+ * Give the driver a notice that there is send work to do. It is up to
+ * the driver to generally push the packets out, this just queues the
+ * work with the driver. There are two variants here. The no_lock
+ * version requires the s_lock not to be held. The other assumes the
+ * s_lock is held.
+ */
+ void (*schedule_send)(struct rvt_qp *qp);
+ void (*schedule_send_no_lock)(struct rvt_qp *qp);
+
+ /*
+ * Sometimes rdmavt needs to kick the driver's send progress. That is
+ * done by this call back.
+ */
+ void (*do_send)(struct rvt_qp *qp);
+
+ /*
+ * Get a path mtu from the driver based on qp attributes.
+ */
+ int (*get_pmtu_from_attr)(struct rvt_dev_info *rdi, struct rvt_qp *qp,
+ struct ib_qp_attr *attr);
+
+ /*
+ * Notify driver that it needs to flush any outstanding IO requests that
+ * are waiting on a qp.
+ */
+ void (*flush_qp_waiters)(struct rvt_qp *qp);
+
+ /*
+ * Notify driver to stop its queue of sending packets. Nothing else
+ * should be posted to the queue pair after this has been called.
+ */
+ void (*stop_send_queue)(struct rvt_qp *qp);
+
+ /*
+ * Have the drivr drain any in progress operations
+ */
+ void (*quiesce_qp)(struct rvt_qp *qp);
+
+ /*
+ * Inform the driver a qp has went to error state.
+ */
+ void (*notify_error_qp)(struct rvt_qp *qp);
+
+ /*
+ * Get an MTU for a qp.
+ */
+ u32 (*mtu_from_qp)(struct rvt_dev_info *rdi, struct rvt_qp *qp,
+ u32 pmtu);
+ /*
+ * Convert an mtu to a path mtu
+ */
+ int (*mtu_to_path_mtu)(u32 mtu);
+
+ /*
+ * Get the guid of a port in big endian byte order
+ */
+ int (*get_guid_be)(struct rvt_dev_info *rdi, struct rvt_ibport *rvp,
+ int guid_index, __be64 *guid);
+
+ /*
+ * Query driver for the state of the port.
+ */
+ int (*query_port_state)(struct rvt_dev_info *rdi, u8 port_num,
+ struct ib_port_attr *props);
+
+ /*
+ * Tell driver to shutdown a port
+ */
+ int (*shut_down_port)(struct rvt_dev_info *rdi, u8 port_num);
+
+ /* Tell driver to send a trap for changed port capabilities */
+ void (*cap_mask_chg)(struct rvt_dev_info *rdi, u8 port_num);
+
+ /*
+ * The following functions can be safely ignored completely. Any use of
+ * these is checked for NULL before blindly calling. Rdmavt should also
+ * be functional if drivers omit these.
+ */
+
+ /* Called to inform the driver that all qps should now be freed. */
+ unsigned (*free_all_qps)(struct rvt_dev_info *rdi);
+
+ /* Driver specific AH validation */
+ int (*check_ah)(struct ib_device *, struct ib_ah_attr *);
+
+ /* Inform the driver a new AH has been created */
+ void (*notify_new_ah)(struct ib_device *, struct ib_ah_attr *,
+ struct rvt_ah *);
+
+ /* Let the driver pick the next queue pair number*/
+ int (*alloc_qpn)(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
+ enum ib_qp_type type, u8 port_num, gfp_t gfp);
+
+ /* Determine if its safe or allowed to modify the qp */
+ int (*check_modify_qp)(struct rvt_qp *qp, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_udata *udata);
+
+ /* Driver specific QP modification/notification-of */
+ void (*modify_qp)(struct rvt_qp *qp, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_udata *udata);
+
+ /* Driver specific work request checking */
+ int (*check_send_wqe)(struct rvt_qp *qp, struct rvt_swqe *wqe);
+
+ /* Notify driver a mad agent has been created */
+ void (*notify_create_mad_agent)(struct rvt_dev_info *rdi, int port_idx);
+
+ /* Notify driver a mad agent has been removed */
+ void (*notify_free_mad_agent)(struct rvt_dev_info *rdi, int port_idx);
+
+};
+
+struct rvt_dev_info {
+ struct ib_device ibdev; /* Keep this first. Nothing above here */
+
+ /*
+ * Prior to calling for registration the driver will be responsible for
+ * allocating space for this structure.
+ *
+ * The driver will also be responsible for filling in certain members of
+ * dparms.props. The driver needs to fill in dparms exactly as it would
+ * want values reported to a ULP. This will be returned to the caller
+ * in rdmavt's device. The driver should also therefore refrain from
+ * modifying this directly after registration with rdmavt.
+ */
+
+ /* Driver specific properties */
+ struct rvt_driver_params dparms;
+
+ struct rvt_mregion __rcu *dma_mr;
+ struct rvt_lkey_table lkey_table;
+
+ /* Driver specific helper functions */
+ struct rvt_driver_provided driver_f;
+
+ /* Internal use */
+ int n_pds_allocated;
+ spinlock_t n_pds_lock; /* Protect pd allocated count */
+
+ int n_ahs_allocated;
+ spinlock_t n_ahs_lock; /* Protect ah allocated count */
+
+ u32 n_srqs_allocated;
+ spinlock_t n_srqs_lock; /* Protect srqs allocated count */
+
+ int flags;
+ struct rvt_ibport **ports;
+
+ /* QP */
+ struct rvt_qp_ibdev *qp_dev;
+ u32 n_qps_allocated; /* number of QPs allocated for device */
+ u32 n_rc_qps; /* number of RC QPs allocated for device */
+ u32 busy_jiffies; /* timeout scaling based on RC QP count */
+ spinlock_t n_qps_lock; /* protect qps, rc qps and busy jiffy counts */
+
+ /* memory maps */
+ struct list_head pending_mmaps;
+ spinlock_t mmap_offset_lock; /* protect mmap_offset */
+ u32 mmap_offset;
+ spinlock_t pending_lock; /* protect pending mmap list */
+
+ /* CQ */
+ struct kthread_worker *worker; /* per device cq worker */
+ u32 n_cqs_allocated; /* number of CQs allocated for device */
+ spinlock_t n_cqs_lock; /* protect count of in use cqs */
+
+ /* Multicast */
+ u32 n_mcast_grps_allocated; /* number of mcast groups allocated */
+ spinlock_t n_mcast_grps_lock;
+
+};
+
+static inline struct rvt_pd *ibpd_to_rvtpd(struct ib_pd *ibpd)
+{
+ return container_of(ibpd, struct rvt_pd, ibpd);
+}
+
+static inline struct rvt_ah *ibah_to_rvtah(struct ib_ah *ibah)
+{
+ return container_of(ibah, struct rvt_ah, ibah);
+}
+
+static inline struct rvt_dev_info *ib_to_rvt(struct ib_device *ibdev)
+{
+ return container_of(ibdev, struct rvt_dev_info, ibdev);
+}
+
+static inline struct rvt_srq *ibsrq_to_rvtsrq(struct ib_srq *ibsrq)
+{
+ return container_of(ibsrq, struct rvt_srq, ibsrq);
+}
+
+static inline struct rvt_qp *ibqp_to_rvtqp(struct ib_qp *ibqp)
+{
+ return container_of(ibqp, struct rvt_qp, ibqp);
+}
+
+static inline unsigned rvt_get_npkeys(struct rvt_dev_info *rdi)
+{
+ /*
+ * All ports have same number of pkeys.
+ */
+ return rdi->dparms.npkeys;
+}
+
+/*
+ * Return the indexed PKEY from the port PKEY table.
+ */
+static inline u16 rvt_get_pkey(struct rvt_dev_info *rdi,
+ int port_index,
+ unsigned index)
+{
+ if (index >= rvt_get_npkeys(rdi))
+ return 0;
+ else
+ return rdi->ports[port_index]->pkey_table[index];
+}
+
+/**
+ * rvt_lookup_qpn - return the QP with the given QPN
+ * @ibp: the ibport
+ * @qpn: the QP number to look up
+ *
+ * The caller must hold the rcu_read_lock(), and keep the lock until
+ * the returned qp is no longer in use.
+ */
+/* TODO: Remove this and put in rdmavt/qp.h when no longer needed by drivers */
+static inline struct rvt_qp *rvt_lookup_qpn(struct rvt_dev_info *rdi,
+ struct rvt_ibport *rvp,
+ u32 qpn) __must_hold(RCU)
+{
+ struct rvt_qp *qp = NULL;
+
+ if (unlikely(qpn <= 1)) {
+ qp = rcu_dereference(rvp->qp[qpn]);
+ } else {
+ u32 n = hash_32(qpn, rdi->qp_dev->qp_table_bits);
+
+ for (qp = rcu_dereference(rdi->qp_dev->qp_table[n]); qp;
+ qp = rcu_dereference(qp->next))
+ if (qp->ibqp.qp_num == qpn)
+ break;
+ }
+ return qp;
+}
+
+struct rvt_dev_info *rvt_alloc_device(size_t size, int nports);
+int rvt_register_device(struct rvt_dev_info *rvd);
+void rvt_unregister_device(struct rvt_dev_info *rvd);
+int rvt_check_ah(struct ib_device *ibdev, struct ib_ah_attr *ah_attr);
+int rvt_init_port(struct rvt_dev_info *rdi, struct rvt_ibport *port,
+ int port_index, u16 *pkey_table);
+int rvt_rkey_ok(struct rvt_qp *qp, struct rvt_sge *sge,
+ u32 len, u64 vaddr, u32 rkey, int acc);
+int rvt_lkey_ok(struct rvt_lkey_table *rkt, struct rvt_pd *pd,
+ struct rvt_sge *isge, struct ib_sge *sge, int acc);
+struct rvt_mcast *rvt_mcast_find(struct rvt_ibport *ibp, union ib_gid *mgid);
+
+#endif /* DEF_RDMA_VT_H */
diff --git a/include/rdma/rdmavt_cq.h b/include/rdma/rdmavt_cq.h
new file mode 100644
index 000000000000..51fd00b243d0
--- /dev/null
+++ b/include/rdma/rdmavt_cq.h
@@ -0,0 +1,99 @@
+#ifndef DEF_RDMAVT_INCCQ_H
+#define DEF_RDMAVT_INCCQ_H
+
+/*
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2016 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <linux/kthread.h>
+#include <rdma/ib_user_verbs.h>
+
+/*
+ * Define an ib_cq_notify value that is not valid so we know when CQ
+ * notifications are armed.
+ */
+#define RVT_CQ_NONE (IB_CQ_NEXT_COMP + 1)
+
+/*
+ * This structure is used to contain the head pointer, tail pointer,
+ * and completion queue entries as a single memory allocation so
+ * it can be mmap'ed into user space.
+ */
+struct rvt_cq_wc {
+ u32 head; /* index of next entry to fill */
+ u32 tail; /* index of next ib_poll_cq() entry */
+ union {
+ /* these are actually size ibcq.cqe + 1 */
+ struct ib_uverbs_wc uqueue[0];
+ struct ib_wc kqueue[0];
+ };
+};
+
+/*
+ * The completion queue structure.
+ */
+struct rvt_cq {
+ struct ib_cq ibcq;
+ struct kthread_work comptask;
+ spinlock_t lock; /* protect changes in this struct */
+ u8 notify;
+ u8 triggered;
+ struct rvt_dev_info *rdi;
+ struct rvt_cq_wc *queue;
+ struct rvt_mmap_info *ip;
+};
+
+static inline struct rvt_cq *ibcq_to_rvtcq(struct ib_cq *ibcq)
+{
+ return container_of(ibcq, struct rvt_cq, ibcq);
+}
+
+void rvt_cq_enter(struct rvt_cq *cq, struct ib_wc *entry, bool solicited);
+
+#endif /* DEF_RDMAVT_INCCQH */
diff --git a/include/rdma/rdmavt_mr.h b/include/rdma/rdmavt_mr.h
new file mode 100644
index 000000000000..5edffdca8c53
--- /dev/null
+++ b/include/rdma/rdmavt_mr.h
@@ -0,0 +1,139 @@
+#ifndef DEF_RDMAVT_INCMR_H
+#define DEF_RDMAVT_INCMR_H
+
+/*
+ * Copyright(c) 2016 Intel Corporation.
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+/*
+ * For Memory Regions. This stuff should probably be moved into rdmavt/mr.h once
+ * drivers no longer need access to the MR directly.
+ */
+
+/*
+ * A segment is a linear region of low physical memory.
+ * Used by the verbs layer.
+ */
+struct rvt_seg {
+ void *vaddr;
+ size_t length;
+};
+
+/* The number of rvt_segs that fit in a page. */
+#define RVT_SEGSZ (PAGE_SIZE / sizeof(struct rvt_seg))
+
+struct rvt_segarray {
+ struct rvt_seg segs[RVT_SEGSZ];
+};
+
+struct rvt_mregion {
+ struct ib_pd *pd; /* shares refcnt of ibmr.pd */
+ u64 user_base; /* User's address for this region */
+ u64 iova; /* IB start address of this region */
+ size_t length;
+ u32 lkey;
+ u32 offset; /* offset (bytes) to start of region */
+ int access_flags;
+ u32 max_segs; /* number of rvt_segs in all the arrays */
+ u32 mapsz; /* size of the map array */
+ u8 page_shift; /* 0 - non unform/non powerof2 sizes */
+ u8 lkey_published; /* in global table */
+ struct completion comp; /* complete when refcount goes to zero */
+ atomic_t refcount;
+ struct rvt_segarray *map[0]; /* the segments */
+};
+
+#define RVT_MAX_LKEY_TABLE_BITS 23
+
+struct rvt_lkey_table {
+ spinlock_t lock; /* protect changes in this struct */
+ u32 next; /* next unused index (speeds search) */
+ u32 gen; /* generation count */
+ u32 max; /* size of the table */
+ struct rvt_mregion __rcu **table;
+};
+
+/*
+ * These keep track of the copy progress within a memory region.
+ * Used by the verbs layer.
+ */
+struct rvt_sge {
+ struct rvt_mregion *mr;
+ void *vaddr; /* kernel virtual address of segment */
+ u32 sge_length; /* length of the SGE */
+ u32 length; /* remaining length of the segment */
+ u16 m; /* current index: mr->map[m] */
+ u16 n; /* current index: mr->map[m]->segs[n] */
+};
+
+struct rvt_sge_state {
+ struct rvt_sge *sg_list; /* next SGE to be used if any */
+ struct rvt_sge sge; /* progress state for the current SGE */
+ u32 total_len;
+ u8 num_sge;
+};
+
+static inline void rvt_put_mr(struct rvt_mregion *mr)
+{
+ if (unlikely(atomic_dec_and_test(&mr->refcount)))
+ complete(&mr->comp);
+}
+
+static inline void rvt_get_mr(struct rvt_mregion *mr)
+{
+ atomic_inc(&mr->refcount);
+}
+
+static inline void rvt_put_ss(struct rvt_sge_state *ss)
+{
+ while (ss->num_sge) {
+ rvt_put_mr(ss->sge.mr);
+ if (--ss->num_sge)
+ ss->sge = *ss->sg_list++;
+ }
+}
+
+#endif /* DEF_RDMAVT_INCMRH */
diff --git a/include/rdma/rdmavt_qp.h b/include/rdma/rdmavt_qp.h
new file mode 100644
index 000000000000..497e59065c2c
--- /dev/null
+++ b/include/rdma/rdmavt_qp.h
@@ -0,0 +1,446 @@
+#ifndef DEF_RDMAVT_INCQP_H
+#define DEF_RDMAVT_INCQP_H
+
+/*
+ * Copyright(c) 2016 Intel Corporation.
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <rdma/rdma_vt.h>
+#include <rdma/ib_pack.h>
+#include <rdma/ib_verbs.h>
+/*
+ * Atomic bit definitions for r_aflags.
+ */
+#define RVT_R_WRID_VALID 0
+#define RVT_R_REWIND_SGE 1
+
+/*
+ * Bit definitions for r_flags.
+ */
+#define RVT_R_REUSE_SGE 0x01
+#define RVT_R_RDMAR_SEQ 0x02
+#define RVT_R_RSP_NAK 0x04
+#define RVT_R_RSP_SEND 0x08
+#define RVT_R_COMM_EST 0x10
+
+/*
+ * Bit definitions for s_flags.
+ *
+ * RVT_S_SIGNAL_REQ_WR - set if QP send WRs contain completion signaled
+ * RVT_S_BUSY - send tasklet is processing the QP
+ * RVT_S_TIMER - the RC retry timer is active
+ * RVT_S_ACK_PENDING - an ACK is waiting to be sent after RDMA read/atomics
+ * RVT_S_WAIT_FENCE - waiting for all prior RDMA read or atomic SWQEs
+ * before processing the next SWQE
+ * RVT_S_WAIT_RDMAR - waiting for a RDMA read or atomic SWQE to complete
+ * before processing the next SWQE
+ * RVT_S_WAIT_RNR - waiting for RNR timeout
+ * RVT_S_WAIT_SSN_CREDIT - waiting for RC credits to process next SWQE
+ * RVT_S_WAIT_DMA - waiting for send DMA queue to drain before generating
+ * next send completion entry not via send DMA
+ * RVT_S_WAIT_PIO - waiting for a send buffer to be available
+ * RVT_S_WAIT_PIO_DRAIN - waiting for a qp to drain pio packets
+ * RVT_S_WAIT_TX - waiting for a struct verbs_txreq to be available
+ * RVT_S_WAIT_DMA_DESC - waiting for DMA descriptors to be available
+ * RVT_S_WAIT_KMEM - waiting for kernel memory to be available
+ * RVT_S_WAIT_PSN - waiting for a packet to exit the send DMA queue
+ * RVT_S_WAIT_ACK - waiting for an ACK packet before sending more requests
+ * RVT_S_SEND_ONE - send one packet, request ACK, then wait for ACK
+ * RVT_S_ECN - a BECN was queued to the send engine
+ */
+#define RVT_S_SIGNAL_REQ_WR 0x0001
+#define RVT_S_BUSY 0x0002
+#define RVT_S_TIMER 0x0004
+#define RVT_S_RESP_PENDING 0x0008
+#define RVT_S_ACK_PENDING 0x0010
+#define RVT_S_WAIT_FENCE 0x0020
+#define RVT_S_WAIT_RDMAR 0x0040
+#define RVT_S_WAIT_RNR 0x0080
+#define RVT_S_WAIT_SSN_CREDIT 0x0100
+#define RVT_S_WAIT_DMA 0x0200
+#define RVT_S_WAIT_PIO 0x0400
+#define RVT_S_WAIT_PIO_DRAIN 0x0800
+#define RVT_S_WAIT_TX 0x1000
+#define RVT_S_WAIT_DMA_DESC 0x2000
+#define RVT_S_WAIT_KMEM 0x4000
+#define RVT_S_WAIT_PSN 0x8000
+#define RVT_S_WAIT_ACK 0x10000
+#define RVT_S_SEND_ONE 0x20000
+#define RVT_S_UNLIMITED_CREDIT 0x40000
+#define RVT_S_AHG_VALID 0x80000
+#define RVT_S_AHG_CLEAR 0x100000
+#define RVT_S_ECN 0x200000
+
+/*
+ * Wait flags that would prevent any packet type from being sent.
+ */
+#define RVT_S_ANY_WAIT_IO (RVT_S_WAIT_PIO | RVT_S_WAIT_TX | \
+ RVT_S_WAIT_DMA_DESC | RVT_S_WAIT_KMEM)
+
+/*
+ * Wait flags that would prevent send work requests from making progress.
+ */
+#define RVT_S_ANY_WAIT_SEND (RVT_S_WAIT_FENCE | RVT_S_WAIT_RDMAR | \
+ RVT_S_WAIT_RNR | RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_DMA | \
+ RVT_S_WAIT_PSN | RVT_S_WAIT_ACK)
+
+#define RVT_S_ANY_WAIT (RVT_S_ANY_WAIT_IO | RVT_S_ANY_WAIT_SEND)
+
+/* Number of bits to pay attention to in the opcode for checking qp type */
+#define RVT_OPCODE_QP_MASK 0xE0
+
+/* Flags for checking QP state (see ib_rvt_state_ops[]) */
+#define RVT_POST_SEND_OK 0x01
+#define RVT_POST_RECV_OK 0x02
+#define RVT_PROCESS_RECV_OK 0x04
+#define RVT_PROCESS_SEND_OK 0x08
+#define RVT_PROCESS_NEXT_SEND_OK 0x10
+#define RVT_FLUSH_SEND 0x20
+#define RVT_FLUSH_RECV 0x40
+#define RVT_PROCESS_OR_FLUSH_SEND \
+ (RVT_PROCESS_SEND_OK | RVT_FLUSH_SEND)
+
+/*
+ * Send work request queue entry.
+ * The size of the sg_list is determined when the QP is created and stored
+ * in qp->s_max_sge.
+ */
+struct rvt_swqe {
+ union {
+ struct ib_send_wr wr; /* don't use wr.sg_list */
+ struct ib_ud_wr ud_wr;
+ struct ib_reg_wr reg_wr;
+ struct ib_rdma_wr rdma_wr;
+ struct ib_atomic_wr atomic_wr;
+ };
+ u32 psn; /* first packet sequence number */
+ u32 lpsn; /* last packet sequence number */
+ u32 ssn; /* send sequence number */
+ u32 length; /* total length of data in sg_list */
+ struct rvt_sge sg_list[0];
+};
+
+/*
+ * Receive work request queue entry.
+ * The size of the sg_list is determined when the QP (or SRQ) is created
+ * and stored in qp->r_rq.max_sge (or srq->rq.max_sge).
+ */
+struct rvt_rwqe {
+ u64 wr_id;
+ u8 num_sge;
+ struct ib_sge sg_list[0];
+};
+
+/*
+ * This structure is used to contain the head pointer, tail pointer,
+ * and receive work queue entries as a single memory allocation so
+ * it can be mmap'ed into user space.
+ * Note that the wq array elements are variable size so you can't
+ * just index into the array to get the N'th element;
+ * use get_rwqe_ptr() instead.
+ */
+struct rvt_rwq {
+ u32 head; /* new work requests posted to the head */
+ u32 tail; /* receives pull requests from here. */
+ struct rvt_rwqe wq[0];
+};
+
+struct rvt_rq {
+ struct rvt_rwq *wq;
+ u32 size; /* size of RWQE array */
+ u8 max_sge;
+ /* protect changes in this struct */
+ spinlock_t lock ____cacheline_aligned_in_smp;
+};
+
+/*
+ * This structure is used by rvt_mmap() to validate an offset
+ * when an mmap() request is made. The vm_area_struct then uses
+ * this as its vm_private_data.
+ */
+struct rvt_mmap_info {
+ struct list_head pending_mmaps;
+ struct ib_ucontext *context;
+ void *obj;
+ __u64 offset;
+ struct kref ref;
+ unsigned size;
+};
+
+#define RVT_MAX_RDMA_ATOMIC 16
+
+/*
+ * This structure holds the information that the send tasklet needs
+ * to send a RDMA read response or atomic operation.
+ */
+struct rvt_ack_entry {
+ u8 opcode;
+ u8 sent;
+ u32 psn;
+ u32 lpsn;
+ union {
+ struct rvt_sge rdma_sge;
+ u64 atomic_data;
+ };
+};
+
+#define RC_QP_SCALING_INTERVAL 5
+
+/*
+ * Variables prefixed with s_ are for the requester (sender).
+ * Variables prefixed with r_ are for the responder (receiver).
+ * Variables prefixed with ack_ are for responder replies.
+ *
+ * Common variables are protected by both r_rq.lock and s_lock in that order
+ * which only happens in modify_qp() or changing the QP 'state'.
+ */
+struct rvt_qp {
+ struct ib_qp ibqp;
+ void *priv; /* Driver private data */
+ /* read mostly fields above and below */
+ struct ib_ah_attr remote_ah_attr;
+ struct ib_ah_attr alt_ah_attr;
+ struct rvt_qp __rcu *next; /* link list for QPN hash table */
+ struct rvt_swqe *s_wq; /* send work queue */
+ struct rvt_mmap_info *ip;
+
+ unsigned long timeout_jiffies; /* computed from timeout */
+
+ enum ib_mtu path_mtu;
+ int srate_mbps; /* s_srate (below) converted to Mbit/s */
+ pid_t pid; /* pid for user mode QPs */
+ u32 remote_qpn;
+ u32 qkey; /* QKEY for this QP (for UD or RD) */
+ u32 s_size; /* send work queue size */
+ u32 s_ahgpsn; /* set to the psn in the copy of the header */
+
+ u16 pmtu; /* decoded from path_mtu */
+ u8 log_pmtu; /* shift for pmtu */
+ u8 state; /* QP state */
+ u8 allowed_ops; /* high order bits of allowed opcodes */
+ u8 qp_access_flags;
+ u8 alt_timeout; /* Alternate path timeout for this QP */
+ u8 timeout; /* Timeout for this QP */
+ u8 s_srate;
+ u8 s_mig_state;
+ u8 port_num;
+ u8 s_pkey_index; /* PKEY index to use */
+ u8 s_alt_pkey_index; /* Alternate path PKEY index to use */
+ u8 r_max_rd_atomic; /* max number of RDMA read/atomic to receive */
+ u8 s_max_rd_atomic; /* max number of RDMA read/atomic to send */
+ u8 s_retry_cnt; /* number of times to retry */
+ u8 s_rnr_retry_cnt;
+ u8 r_min_rnr_timer; /* retry timeout value for RNR NAKs */
+ u8 s_max_sge; /* size of s_wq->sg_list */
+ u8 s_draining;
+
+ /* start of read/write fields */
+ atomic_t refcount ____cacheline_aligned_in_smp;
+ wait_queue_head_t wait;
+
+ struct rvt_ack_entry s_ack_queue[RVT_MAX_RDMA_ATOMIC + 1]
+ ____cacheline_aligned_in_smp;
+ struct rvt_sge_state s_rdma_read_sge;
+
+ spinlock_t r_lock ____cacheline_aligned_in_smp; /* used for APM */
+ u32 r_psn; /* expected rcv packet sequence number */
+ unsigned long r_aflags;
+ u64 r_wr_id; /* ID for current receive WQE */
+ u32 r_ack_psn; /* PSN for next ACK or atomic ACK */
+ u32 r_len; /* total length of r_sge */
+ u32 r_rcv_len; /* receive data len processed */
+ u32 r_msn; /* message sequence number */
+
+ u8 r_state; /* opcode of last packet received */
+ u8 r_flags;
+ u8 r_head_ack_queue; /* index into s_ack_queue[] */
+
+ struct list_head rspwait; /* link for waiting to respond */
+
+ struct rvt_sge_state r_sge; /* current receive data */
+ struct rvt_rq r_rq; /* receive work queue */
+
+ /* post send line */
+ spinlock_t s_hlock ____cacheline_aligned_in_smp;
+ u32 s_head; /* new entries added here */
+ u32 s_next_psn; /* PSN for next request */
+ u32 s_avail; /* number of entries avail */
+ u32 s_ssn; /* SSN of tail entry */
+
+ spinlock_t s_lock ____cacheline_aligned_in_smp;
+ u32 s_flags;
+ struct rvt_sge_state *s_cur_sge;
+ struct rvt_swqe *s_wqe;
+ struct rvt_sge_state s_sge; /* current send request data */
+ struct rvt_mregion *s_rdma_mr;
+ u32 s_cur_size; /* size of send packet in bytes */
+ u32 s_len; /* total length of s_sge */
+ u32 s_rdma_read_len; /* total length of s_rdma_read_sge */
+ u32 s_last_psn; /* last response PSN processed */
+ u32 s_sending_psn; /* lowest PSN that is being sent */
+ u32 s_sending_hpsn; /* highest PSN that is being sent */
+ u32 s_psn; /* current packet sequence number */
+ u32 s_ack_rdma_psn; /* PSN for sending RDMA read responses */
+ u32 s_ack_psn; /* PSN for acking sends and RDMA writes */
+ u32 s_tail; /* next entry to process */
+ u32 s_cur; /* current work queue entry */
+ u32 s_acked; /* last un-ACK'ed entry */
+ u32 s_last; /* last completed entry */
+ u32 s_lsn; /* limit sequence number (credit) */
+ u16 s_hdrwords; /* size of s_hdr in 32 bit words */
+ u16 s_rdma_ack_cnt;
+ s8 s_ahgidx;
+ u8 s_state; /* opcode of last packet sent */
+ u8 s_ack_state; /* opcode of packet to ACK */
+ u8 s_nak_state; /* non-zero if NAK is pending */
+ u8 r_nak_state; /* non-zero if NAK is pending */
+ u8 s_retry; /* requester retry counter */
+ u8 s_rnr_retry; /* requester RNR retry counter */
+ u8 s_num_rd_atomic; /* number of RDMA read/atomic pending */
+ u8 s_tail_ack_queue; /* index into s_ack_queue[] */
+
+ struct rvt_sge_state s_ack_rdma_sge;
+ struct timer_list s_timer;
+
+ /*
+ * This sge list MUST be last. Do not add anything below here.
+ */
+ struct rvt_sge r_sg_list[0] /* verified SGEs */
+ ____cacheline_aligned_in_smp;
+};
+
+struct rvt_srq {
+ struct ib_srq ibsrq;
+ struct rvt_rq rq;
+ struct rvt_mmap_info *ip;
+ /* send signal when number of RWQEs < limit */
+ u32 limit;
+};
+
+#define RVT_QPN_MAX BIT(24)
+#define RVT_QPNMAP_ENTRIES (RVT_QPN_MAX / PAGE_SIZE / BITS_PER_BYTE)
+#define RVT_BITS_PER_PAGE (PAGE_SIZE * BITS_PER_BYTE)
+#define RVT_BITS_PER_PAGE_MASK (RVT_BITS_PER_PAGE - 1)
+#define RVT_QPN_MASK 0xFFFFFF
+
+/*
+ * QPN-map pages start out as NULL, they get allocated upon
+ * first use and are never deallocated. This way,
+ * large bitmaps are not allocated unless large numbers of QPs are used.
+ */
+struct rvt_qpn_map {
+ void *page;
+};
+
+struct rvt_qpn_table {
+ spinlock_t lock; /* protect changes to the qp table */
+ unsigned flags; /* flags for QP0/1 allocated for each port */
+ u32 last; /* last QP number allocated */
+ u32 nmaps; /* size of the map table */
+ u16 limit;
+ u8 incr;
+ /* bit map of free QP numbers other than 0/1 */
+ struct rvt_qpn_map map[RVT_QPNMAP_ENTRIES];
+};
+
+struct rvt_qp_ibdev {
+ u32 qp_table_size;
+ u32 qp_table_bits;
+ struct rvt_qp __rcu **qp_table;
+ spinlock_t qpt_lock; /* qptable lock */
+ struct rvt_qpn_table qpn_table;
+};
+
+/*
+ * There is one struct rvt_mcast for each multicast GID.
+ * All attached QPs are then stored as a list of
+ * struct rvt_mcast_qp.
+ */
+struct rvt_mcast_qp {
+ struct list_head list;
+ struct rvt_qp *qp;
+};
+
+struct rvt_mcast {
+ struct rb_node rb_node;
+ union ib_gid mgid;
+ struct list_head qp_list;
+ wait_queue_head_t wait;
+ atomic_t refcount;
+ int n_attached;
+};
+
+/*
+ * Since struct rvt_swqe is not a fixed size, we can't simply index into
+ * struct rvt_qp.s_wq. This function does the array index computation.
+ */
+static inline struct rvt_swqe *rvt_get_swqe_ptr(struct rvt_qp *qp,
+ unsigned n)
+{
+ return (struct rvt_swqe *)((char *)qp->s_wq +
+ (sizeof(struct rvt_swqe) +
+ qp->s_max_sge *
+ sizeof(struct rvt_sge)) * n);
+}
+
+/*
+ * Since struct rvt_rwqe is not a fixed size, we can't simply index into
+ * struct rvt_rwq.wq. This function does the array index computation.
+ */
+static inline struct rvt_rwqe *rvt_get_rwqe_ptr(struct rvt_rq *rq, unsigned n)
+{
+ return (struct rvt_rwqe *)
+ ((char *)rq->wq->wq +
+ (sizeof(struct rvt_rwqe) +
+ rq->max_sge * sizeof(struct ib_sge)) * n);
+}
+
+extern const int ib_rvt_state_ops[];
+
+struct rvt_dev_info;
+int rvt_error_qp(struct rvt_qp *qp, enum ib_wc_status err);
+
+#endif /* DEF_RDMAVT_INCQP_H */
diff --git a/include/rxrpc/packet.h b/include/rxrpc/packet.h
index 4dce116bfd80..9ebab3a8cf0a 100644
--- a/include/rxrpc/packet.h
+++ b/include/rxrpc/packet.h
@@ -22,7 +22,7 @@ typedef __be32 rxrpc_serial_net_t; /* on-the-wire Rx message serial number */
* on-the-wire Rx packet header
* - all multibyte fields should be in network byte order
*/
-struct rxrpc_header {
+struct rxrpc_wire_header {
__be32 epoch; /* client boot timestamp */
__be32 cid; /* connection and channel ID */
@@ -68,10 +68,19 @@ struct rxrpc_header {
} __packed;
-#define __rxrpc_header_off(X) offsetof(struct rxrpc_header,X)
-
extern const char *rxrpc_pkts[];
+#define RXRPC_SUPPORTED_PACKET_TYPES ( \
+ (1 << RXRPC_PACKET_TYPE_DATA) | \
+ (1 << RXRPC_PACKET_TYPE_ACK) | \
+ (1 << RXRPC_PACKET_TYPE_BUSY) | \
+ (1 << RXRPC_PACKET_TYPE_ABORT) | \
+ (1 << RXRPC_PACKET_TYPE_ACKALL) | \
+ (1 << RXRPC_PACKET_TYPE_CHALLENGE) | \
+ (1 << RXRPC_PACKET_TYPE_RESPONSE) | \
+ /*(1 << RXRPC_PACKET_TYPE_DEBUG) | */ \
+ (1 << RXRPC_PACKET_TYPE_VERSION))
+
/*****************************************************************************/
/*
* jumbo packet secondary header
diff --git a/include/soc/fsl/qe/qe.h b/include/soc/fsl/qe/qe.h
index c7fa36c335c9..33b29ead3d55 100644
--- a/include/soc/fsl/qe/qe.h
+++ b/include/soc/fsl/qe/qe.h
@@ -103,8 +103,6 @@ int cpm_muram_init(void);
unsigned long cpm_muram_alloc(unsigned long size, unsigned long align);
int cpm_muram_free(unsigned long offset);
unsigned long cpm_muram_alloc_fixed(unsigned long offset, unsigned long size);
-unsigned long cpm_muram_alloc_common(unsigned long size, genpool_algo_t algo,
- void *data);
void __iomem *cpm_muram_addr(unsigned long offset);
unsigned long cpm_muram_offset(void __iomem *addr);
dma_addr_t cpm_muram_dma(void __iomem *addr);
diff --git a/include/soc/mediatek/smi.h b/include/soc/mediatek/smi.h
new file mode 100644
index 000000000000..8893c5eacd07
--- /dev/null
+++ b/include/soc/mediatek/smi.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2015-2016 MediaTek Inc.
+ * Author: Yong Wu <yong.wu@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef MTK_IOMMU_SMI_H
+#define MTK_IOMMU_SMI_H
+
+#include <linux/bitops.h>
+#include <linux/device.h>
+
+#ifdef CONFIG_MTK_SMI
+
+#define MTK_LARB_NR_MAX 8
+
+#define MTK_SMI_MMU_EN(port) BIT(port)
+
+struct mtk_smi_larb_iommu {
+ struct device *dev;
+ unsigned int mmu;
+};
+
+struct mtk_smi_iommu {
+ unsigned int larb_nr;
+ struct mtk_smi_larb_iommu larb_imu[MTK_LARB_NR_MAX];
+};
+
+/*
+ * mtk_smi_larb_get: Enable the power domain and clocks for this local arbiter.
+ * It also initialize some basic setting(like iommu).
+ * mtk_smi_larb_put: Disable the power domain and clocks for this local arbiter.
+ * Both should be called in non-atomic context.
+ *
+ * Returns 0 if successful, negative on failure.
+ */
+int mtk_smi_larb_get(struct device *larbdev);
+void mtk_smi_larb_put(struct device *larbdev);
+
+#else
+
+static inline int mtk_smi_larb_get(struct device *larbdev)
+{
+ return 0;
+}
+
+static inline void mtk_smi_larb_put(struct device *larbdev) { }
+
+#endif
+
+#endif
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 1b09cac06508..3e0dd86360a2 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -144,12 +144,6 @@ enum se_cmd_flags_table {
SCF_USE_CPUID = 0x00800000,
};
-/* struct se_dev_entry->lun_flags and struct se_lun->lun_access */
-enum transport_lunflags_table {
- TRANSPORT_LUNFLAGS_READ_ONLY = 0x01,
- TRANSPORT_LUNFLAGS_READ_WRITE = 0x02,
-};
-
/*
* Used by transport_send_check_condition_and_sense()
* to signal which ASC/ASCQ sense payload should be built.
@@ -633,11 +627,10 @@ struct se_lun_acl {
};
struct se_dev_entry {
- /* See transport_lunflags_table */
u64 mapped_lun;
u64 pr_res_key;
u64 creation_time;
- u32 lun_flags;
+ bool lun_access_ro;
u32 attach_count;
atomic_long_t total_cmds;
atomic_long_t read_bytes;
@@ -711,7 +704,7 @@ struct se_lun {
u64 unpacked_lun;
#define SE_LUN_LINK_MAGIC 0xffff7771
u32 lun_link_magic;
- u32 lun_access;
+ bool lun_access_ro;
u32 lun_index;
/* RELATIVE TARGET PORT IDENTIFER */
diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h
index 56653408f53b..685a51aa98cc 100644
--- a/include/target/target_core_fabric.h
+++ b/include/target/target_core_fabric.h
@@ -108,6 +108,12 @@ void target_unregister_template(const struct target_core_fabric_ops *fo);
int target_depend_item(struct config_item *item);
void target_undepend_item(struct config_item *item);
+struct se_session *target_alloc_session(struct se_portal_group *,
+ unsigned int, unsigned int, enum target_prot_op prot_op,
+ const char *, void *,
+ int (*callback)(struct se_portal_group *,
+ struct se_session *, void *));
+
struct se_session *transport_init_session(enum target_prot_op);
int transport_alloc_session_tags(struct se_session *, unsigned int,
unsigned int);
diff --git a/include/trace/events/compaction.h b/include/trace/events/compaction.h
index 111e5666e5eb..e215bf68f521 100644
--- a/include/trace/events/compaction.h
+++ b/include/trace/events/compaction.h
@@ -350,6 +350,61 @@ DEFINE_EVENT(mm_compaction_defer_template, mm_compaction_defer_reset,
);
#endif
+TRACE_EVENT(mm_compaction_kcompactd_sleep,
+
+ TP_PROTO(int nid),
+
+ TP_ARGS(nid),
+
+ TP_STRUCT__entry(
+ __field(int, nid)
+ ),
+
+ TP_fast_assign(
+ __entry->nid = nid;
+ ),
+
+ TP_printk("nid=%d", __entry->nid)
+);
+
+DECLARE_EVENT_CLASS(kcompactd_wake_template,
+
+ TP_PROTO(int nid, int order, enum zone_type classzone_idx),
+
+ TP_ARGS(nid, order, classzone_idx),
+
+ TP_STRUCT__entry(
+ __field(int, nid)
+ __field(int, order)
+ __field(enum zone_type, classzone_idx)
+ ),
+
+ TP_fast_assign(
+ __entry->nid = nid;
+ __entry->order = order;
+ __entry->classzone_idx = classzone_idx;
+ ),
+
+ TP_printk("nid=%d order=%d classzone_idx=%-8s",
+ __entry->nid,
+ __entry->order,
+ __print_symbolic(__entry->classzone_idx, ZONE_TYPE))
+);
+
+DEFINE_EVENT(kcompactd_wake_template, mm_compaction_wakeup_kcompactd,
+
+ TP_PROTO(int nid, int order, enum zone_type classzone_idx),
+
+ TP_ARGS(nid, order, classzone_idx)
+);
+
+DEFINE_EVENT(kcompactd_wake_template, mm_compaction_kcompactd_wake,
+
+ TP_PROTO(int nid, int order, enum zone_type classzone_idx),
+
+ TP_ARGS(nid, order, classzone_idx)
+);
+
#endif /* _TRACE_COMPACTION_H */
/* This part must be outside protection */
diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h
index a1b488809f06..0f565845707b 100644
--- a/include/trace/events/f2fs.h
+++ b/include/trace/events/f2fs.h
@@ -52,6 +52,7 @@ TRACE_DEFINE_ENUM(CP_DISCARD);
{ META_FLUSH, "META_FLUSH" }, \
{ INMEM, "INMEM" }, \
{ INMEM_DROP, "INMEM_DROP" }, \
+ { INMEM_REVOKE, "INMEM_REVOKE" }, \
{ IPU, "IN-PLACE" }, \
{ OPU, "OUT-OF-PLACE" })
@@ -727,7 +728,8 @@ DECLARE_EVENT_CLASS(f2fs__submit_page_bio,
__field(dev_t, dev)
__field(ino_t, ino)
__field(pgoff_t, index)
- __field(block_t, blkaddr)
+ __field(block_t, old_blkaddr)
+ __field(block_t, new_blkaddr)
__field(int, rw)
__field(int, type)
),
@@ -736,16 +738,18 @@ DECLARE_EVENT_CLASS(f2fs__submit_page_bio,
__entry->dev = page->mapping->host->i_sb->s_dev;
__entry->ino = page->mapping->host->i_ino;
__entry->index = page->index;
- __entry->blkaddr = fio->blk_addr;
+ __entry->old_blkaddr = fio->old_blkaddr;
+ __entry->new_blkaddr = fio->new_blkaddr;
__entry->rw = fio->rw;
__entry->type = fio->type;
),
TP_printk("dev = (%d,%d), ino = %lu, page_index = 0x%lx, "
- "blkaddr = 0x%llx, rw = %s%s, type = %s",
+ "oldaddr = 0x%llx, newaddr = 0x%llx rw = %s%s, type = %s",
show_dev_ino(__entry),
(unsigned long)__entry->index,
- (unsigned long long)__entry->blkaddr,
+ (unsigned long long)__entry->old_blkaddr,
+ (unsigned long long)__entry->new_blkaddr,
show_bio_type(__entry->rw),
show_block_type(__entry->type))
);
diff --git a/include/trace/events/fib6.h b/include/trace/events/fib6.h
index 4cf6bac4686d..d60096cddb2a 100644
--- a/include/trace/events/fib6.h
+++ b/include/trace/events/fib6.h
@@ -37,7 +37,7 @@ TRACE_EVENT(fib6_table_lookup,
__entry->tb_id = tb_id;
__entry->oif = flp->flowi6_oif;
__entry->iif = flp->flowi6_iif;
- __entry->tos = flp->flowi6_tos;
+ __entry->tos = ip6_tclass(flp->flowlabel);
__entry->scope = flp->flowi6_scope;
__entry->flags = flp->flowi6_flags;
diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h
index ca7217389067..6b2e154fd23a 100644
--- a/include/trace/events/kmem.h
+++ b/include/trace/events/kmem.h
@@ -140,42 +140,19 @@ DEFINE_EVENT(kmem_free, kfree,
TP_ARGS(call_site, ptr)
);
-DEFINE_EVENT_CONDITION(kmem_free, kmem_cache_free,
+DEFINE_EVENT(kmem_free, kmem_cache_free,
TP_PROTO(unsigned long call_site, const void *ptr),
- TP_ARGS(call_site, ptr),
-
- /*
- * This trace can be potentially called from an offlined cpu.
- * Since trace points use RCU and RCU should not be used from
- * offline cpus, filter such calls out.
- * While this trace can be called from a preemptable section,
- * it has no impact on the condition since tasks can migrate
- * only from online cpus to other online cpus. Thus its safe
- * to use raw_smp_processor_id.
- */
- TP_CONDITION(cpu_online(raw_smp_processor_id()))
+ TP_ARGS(call_site, ptr)
);
-TRACE_EVENT_CONDITION(mm_page_free,
+TRACE_EVENT(mm_page_free,
TP_PROTO(struct page *page, unsigned int order),
TP_ARGS(page, order),
-
- /*
- * This trace can be potentially called from an offlined cpu.
- * Since trace points use RCU and RCU should not be used from
- * offline cpus, filter such calls out.
- * While this trace can be called from a preemptable section,
- * it has no impact on the condition since tasks can migrate
- * only from online cpus to other online cpus. Thus its safe
- * to use raw_smp_processor_id.
- */
- TP_CONDITION(cpu_online(raw_smp_processor_id())),
-
TP_STRUCT__entry(
__field( unsigned long, pfn )
__field( unsigned int, order )
@@ -276,23 +253,12 @@ DEFINE_EVENT(mm_page, mm_page_alloc_zone_locked,
TP_ARGS(page, order, migratetype)
);
-TRACE_EVENT_CONDITION(mm_page_pcpu_drain,
+TRACE_EVENT(mm_page_pcpu_drain,
TP_PROTO(struct page *page, unsigned int order, int migratetype),
TP_ARGS(page, order, migratetype),
- /*
- * This trace can be potentially called from an offlined cpu.
- * Since trace points use RCU and RCU should not be used from
- * offline cpus, filter such calls out.
- * While this trace can be called from a preemptable section,
- * it has no impact on the condition since tasks can migrate
- * only from online cpus to other online cpus. Thus its safe
- * to use raw_smp_processor_id.
- */
- TP_CONDITION(cpu_online(raw_smp_processor_id())),
-
TP_STRUCT__entry(
__field( unsigned long, pfn )
__field( unsigned int, order )
diff --git a/include/trace/events/mmflags.h b/include/trace/events/mmflags.h
index a849185c82f0..43cedbf0c759 100644
--- a/include/trace/events/mmflags.h
+++ b/include/trace/events/mmflags.h
@@ -111,15 +111,21 @@ IF_HAVE_PG_IDLE(PG_idle, "idle" )
) : "none"
#if defined(CONFIG_X86)
-#define __VM_ARCH_SPECIFIC {VM_PAT, "pat" }
+#define __VM_ARCH_SPECIFIC_1 {VM_PAT, "pat" }
#elif defined(CONFIG_PPC)
-#define __VM_ARCH_SPECIFIC {VM_SAO, "sao" }
+#define __VM_ARCH_SPECIFIC_1 {VM_SAO, "sao" }
#elif defined(CONFIG_PARISC) || defined(CONFIG_METAG) || defined(CONFIG_IA64)
-#define __VM_ARCH_SPECIFIC {VM_GROWSUP, "growsup" }
+#define __VM_ARCH_SPECIFIC_1 {VM_GROWSUP, "growsup" }
#elif !defined(CONFIG_MMU)
-#define __VM_ARCH_SPECIFIC {VM_MAPPED_COPY,"mappedcopy" }
+#define __VM_ARCH_SPECIFIC_1 {VM_MAPPED_COPY,"mappedcopy" }
#else
-#define __VM_ARCH_SPECIFIC {VM_ARCH_1, "arch_1" }
+#define __VM_ARCH_SPECIFIC_1 {VM_ARCH_1, "arch_1" }
+#endif
+
+#if defined(CONFIG_X86)
+#define __VM_ARCH_SPECIFIC_2 {VM_MPX, "mpx" }
+#else
+#define __VM_ARCH_SPECIFIC_2 {VM_ARCH_2, "arch_2" }
#endif
#ifdef CONFIG_MEM_SOFT_DIRTY
@@ -138,19 +144,22 @@ IF_HAVE_PG_IDLE(PG_idle, "idle" )
{VM_MAYEXEC, "mayexec" }, \
{VM_MAYSHARE, "mayshare" }, \
{VM_GROWSDOWN, "growsdown" }, \
+ {VM_UFFD_MISSING, "uffd_missing" }, \
{VM_PFNMAP, "pfnmap" }, \
{VM_DENYWRITE, "denywrite" }, \
- {VM_LOCKONFAULT, "lockonfault" }, \
+ {VM_UFFD_WP, "uffd_wp" }, \
{VM_LOCKED, "locked" }, \
{VM_IO, "io" }, \
{VM_SEQ_READ, "seqread" }, \
{VM_RAND_READ, "randread" }, \
{VM_DONTCOPY, "dontcopy" }, \
{VM_DONTEXPAND, "dontexpand" }, \
+ {VM_LOCKONFAULT, "lockonfault" }, \
{VM_ACCOUNT, "account" }, \
{VM_NORESERVE, "noreserve" }, \
{VM_HUGETLB, "hugetlb" }, \
- __VM_ARCH_SPECIFIC , \
+ __VM_ARCH_SPECIFIC_1 , \
+ __VM_ARCH_SPECIFIC_2 , \
{VM_DONTDUMP, "dontdump" }, \
IF_HAVE_VM_SOFTDIRTY(VM_SOFTDIRTY, "softdirty" ) \
{VM_MIXEDMAP, "mixedmap" }, \
diff --git a/include/trace/events/page_ref.h b/include/trace/events/page_ref.h
new file mode 100644
index 000000000000..81001f8b0db4
--- /dev/null
+++ b/include/trace/events/page_ref.h
@@ -0,0 +1,134 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM page_ref
+
+#if !defined(_TRACE_PAGE_REF_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_PAGE_REF_H
+
+#include <linux/types.h>
+#include <linux/page_ref.h>
+#include <linux/tracepoint.h>
+#include <trace/events/mmflags.h>
+
+DECLARE_EVENT_CLASS(page_ref_mod_template,
+
+ TP_PROTO(struct page *page, int v),
+
+ TP_ARGS(page, v),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, pfn)
+ __field(unsigned long, flags)
+ __field(int, count)
+ __field(int, mapcount)
+ __field(void *, mapping)
+ __field(int, mt)
+ __field(int, val)
+ ),
+
+ TP_fast_assign(
+ __entry->pfn = page_to_pfn(page);
+ __entry->flags = page->flags;
+ __entry->count = page_ref_count(page);
+ __entry->mapcount = page_mapcount(page);
+ __entry->mapping = page->mapping;
+ __entry->mt = get_pageblock_migratetype(page);
+ __entry->val = v;
+ ),
+
+ TP_printk("pfn=0x%lx flags=%s count=%d mapcount=%d mapping=%p mt=%d val=%d",
+ __entry->pfn,
+ show_page_flags(__entry->flags & ((1UL << NR_PAGEFLAGS) - 1)),
+ __entry->count,
+ __entry->mapcount, __entry->mapping, __entry->mt,
+ __entry->val)
+);
+
+DEFINE_EVENT(page_ref_mod_template, page_ref_set,
+
+ TP_PROTO(struct page *page, int v),
+
+ TP_ARGS(page, v)
+);
+
+DEFINE_EVENT(page_ref_mod_template, page_ref_mod,
+
+ TP_PROTO(struct page *page, int v),
+
+ TP_ARGS(page, v)
+);
+
+DECLARE_EVENT_CLASS(page_ref_mod_and_test_template,
+
+ TP_PROTO(struct page *page, int v, int ret),
+
+ TP_ARGS(page, v, ret),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, pfn)
+ __field(unsigned long, flags)
+ __field(int, count)
+ __field(int, mapcount)
+ __field(void *, mapping)
+ __field(int, mt)
+ __field(int, val)
+ __field(int, ret)
+ ),
+
+ TP_fast_assign(
+ __entry->pfn = page_to_pfn(page);
+ __entry->flags = page->flags;
+ __entry->count = page_ref_count(page);
+ __entry->mapcount = page_mapcount(page);
+ __entry->mapping = page->mapping;
+ __entry->mt = get_pageblock_migratetype(page);
+ __entry->val = v;
+ __entry->ret = ret;
+ ),
+
+ TP_printk("pfn=0x%lx flags=%s count=%d mapcount=%d mapping=%p mt=%d val=%d ret=%d",
+ __entry->pfn,
+ show_page_flags(__entry->flags & ((1UL << NR_PAGEFLAGS) - 1)),
+ __entry->count,
+ __entry->mapcount, __entry->mapping, __entry->mt,
+ __entry->val, __entry->ret)
+);
+
+DEFINE_EVENT(page_ref_mod_and_test_template, page_ref_mod_and_test,
+
+ TP_PROTO(struct page *page, int v, int ret),
+
+ TP_ARGS(page, v, ret)
+);
+
+DEFINE_EVENT(page_ref_mod_and_test_template, page_ref_mod_and_return,
+
+ TP_PROTO(struct page *page, int v, int ret),
+
+ TP_ARGS(page, v, ret)
+);
+
+DEFINE_EVENT(page_ref_mod_and_test_template, page_ref_mod_unless,
+
+ TP_PROTO(struct page *page, int v, int ret),
+
+ TP_ARGS(page, v, ret)
+);
+
+DEFINE_EVENT(page_ref_mod_and_test_template, page_ref_freeze,
+
+ TP_PROTO(struct page *page, int v, int ret),
+
+ TP_ARGS(page, v, ret)
+);
+
+DEFINE_EVENT(page_ref_mod_template, page_ref_unfreeze,
+
+ TP_PROTO(struct page *page, int v),
+
+ TP_ARGS(page, v)
+);
+
+#endif /* _TRACE_PAGE_COUNT_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/sunvnet.h b/include/trace/events/sunvnet.h
new file mode 100644
index 000000000000..eb080b267e55
--- /dev/null
+++ b/include/trace/events/sunvnet.h
@@ -0,0 +1,139 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM sunvnet
+
+#if !defined(_TRACE_SUNVNET_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_SUNVNET_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(vnet_rx_one,
+
+ TP_PROTO(int lsid, int rsid, int index, int needs_ack),
+
+ TP_ARGS(lsid, rsid, index, needs_ack),
+
+ TP_STRUCT__entry(
+ __field(int, lsid)
+ __field(int, rsid)
+ __field(int, index)
+ __field(int, needs_ack)
+ ),
+
+ TP_fast_assign(
+ __entry->lsid = lsid;
+ __entry->rsid = rsid;
+ __entry->index = index;
+ __entry->needs_ack = needs_ack;
+ ),
+
+ TP_printk("(%x:%x) walk_rx_one index %d; needs_ack %d",
+ __entry->lsid, __entry->rsid,
+ __entry->index, __entry->needs_ack)
+);
+
+DECLARE_EVENT_CLASS(vnet_tx_stopped_ack_template,
+
+ TP_PROTO(int lsid, int rsid, int ack_end, int npkts),
+
+ TP_ARGS(lsid, rsid, ack_end, npkts),
+
+ TP_STRUCT__entry(
+ __field(int, lsid)
+ __field(int, rsid)
+ __field(int, ack_end)
+ __field(int, npkts)
+ ),
+
+ TP_fast_assign(
+ __entry->lsid = lsid;
+ __entry->rsid = rsid;
+ __entry->ack_end = ack_end;
+ __entry->npkts = npkts;
+ ),
+
+ TP_printk("(%x:%x) stopped ack for %d; npkts %d",
+ __entry->lsid, __entry->rsid,
+ __entry->ack_end, __entry->npkts)
+);
+DEFINE_EVENT(vnet_tx_stopped_ack_template, vnet_tx_send_stopped_ack,
+ TP_PROTO(int lsid, int rsid, int ack_end, int npkts),
+ TP_ARGS(lsid, rsid, ack_end, npkts));
+DEFINE_EVENT(vnet_tx_stopped_ack_template, vnet_tx_defer_stopped_ack,
+ TP_PROTO(int lsid, int rsid, int ack_end, int npkts),
+ TP_ARGS(lsid, rsid, ack_end, npkts));
+DEFINE_EVENT(vnet_tx_stopped_ack_template, vnet_tx_pending_stopped_ack,
+ TP_PROTO(int lsid, int rsid, int ack_end, int npkts),
+ TP_ARGS(lsid, rsid, ack_end, npkts));
+
+TRACE_EVENT(vnet_rx_stopped_ack,
+
+ TP_PROTO(int lsid, int rsid, int end),
+
+ TP_ARGS(lsid, rsid, end),
+
+ TP_STRUCT__entry(
+ __field(int, lsid)
+ __field(int, rsid)
+ __field(int, end)
+ ),
+
+ TP_fast_assign(
+ __entry->lsid = lsid;
+ __entry->rsid = rsid;
+ __entry->end = end;
+ ),
+
+ TP_printk("(%x:%x) stopped ack for index %d",
+ __entry->lsid, __entry->rsid, __entry->end)
+);
+
+TRACE_EVENT(vnet_tx_trigger,
+
+ TP_PROTO(int lsid, int rsid, int start, int err),
+
+ TP_ARGS(lsid, rsid, start, err),
+
+ TP_STRUCT__entry(
+ __field(int, lsid)
+ __field(int, rsid)
+ __field(int, start)
+ __field(int, err)
+ ),
+
+ TP_fast_assign(
+ __entry->lsid = lsid;
+ __entry->rsid = rsid;
+ __entry->start = start;
+ __entry->err = err;
+ ),
+
+ TP_printk("(%x:%x) Tx trigger for %d sent with err %d %s",
+ __entry->lsid, __entry->rsid, __entry->start,
+ __entry->err, __entry->err > 0 ? "(ok)" : " ")
+);
+
+TRACE_EVENT(vnet_skip_tx_trigger,
+
+ TP_PROTO(int lsid, int rsid, int last),
+
+ TP_ARGS(lsid, rsid, last),
+
+ TP_STRUCT__entry(
+ __field(int, lsid)
+ __field(int, rsid)
+ __field(int, last)
+ ),
+
+ TP_fast_assign(
+ __entry->lsid = lsid;
+ __entry->rsid = rsid;
+ __entry->last = last;
+ ),
+
+ TP_printk("(%x:%x) Skip Tx trigger. Last trigger sent was %d",
+ __entry->lsid, __entry->rsid, __entry->last)
+);
+#endif /* _TRACE_SOCK_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/thermal.h b/include/trace/events/thermal.h
index 5738bb3e2343..2b4a8ff72d0d 100644
--- a/include/trace/events/thermal.h
+++ b/include/trace/events/thermal.h
@@ -8,6 +8,18 @@
#include <linux/thermal.h>
#include <linux/tracepoint.h>
+TRACE_DEFINE_ENUM(THERMAL_TRIP_CRITICAL);
+TRACE_DEFINE_ENUM(THERMAL_TRIP_HOT);
+TRACE_DEFINE_ENUM(THERMAL_TRIP_PASSIVE);
+TRACE_DEFINE_ENUM(THERMAL_TRIP_ACTIVE);
+
+#define show_tzt_type(type) \
+ __print_symbolic(type, \
+ { THERMAL_TRIP_CRITICAL, "CRITICAL"}, \
+ { THERMAL_TRIP_HOT, "HOT"}, \
+ { THERMAL_TRIP_PASSIVE, "PASSIVE"}, \
+ { THERMAL_TRIP_ACTIVE, "ACTIVE"})
+
TRACE_EVENT(thermal_temperature,
TP_PROTO(struct thermal_zone_device *tz),
@@ -73,9 +85,9 @@ TRACE_EVENT(thermal_zone_trip,
__entry->trip_type = trip_type;
),
- TP_printk("thermal_zone=%s id=%d trip=%d trip_type=%d",
+ TP_printk("thermal_zone=%s id=%d trip=%d trip_type=%s",
__get_str(thermal_zone), __entry->id, __entry->trip,
- __entry->trip_type)
+ show_tzt_type(__entry->trip_type))
);
TRACE_EVENT(thermal_power_cpu_get_power,
diff --git a/include/trace/events/tlb.h b/include/trace/events/tlb.h
index bc8815f45f3b..9d14b1992108 100644
--- a/include/trace/events/tlb.h
+++ b/include/trace/events/tlb.h
@@ -34,13 +34,11 @@ TLB_FLUSH_REASON
#define EM(a,b) { a, b },
#define EMe(a,b) { a, b }
-TRACE_EVENT_CONDITION(tlb_flush,
+TRACE_EVENT(tlb_flush,
TP_PROTO(int reason, unsigned long pages),
TP_ARGS(reason, pages),
- TP_CONDITION(cpu_online(smp_processor_id())),
-
TP_STRUCT__entry(
__field( int, reason)
__field(unsigned long, pages)
diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h
index fff846b512e6..73614ce1d204 100644
--- a/include/trace/events/writeback.h
+++ b/include/trace/events/writeback.h
@@ -134,58 +134,28 @@ DEFINE_EVENT(writeback_dirty_inode_template, writeback_dirty_inode,
#ifdef CREATE_TRACE_POINTS
#ifdef CONFIG_CGROUP_WRITEBACK
-static inline size_t __trace_wb_cgroup_size(struct bdi_writeback *wb)
+static inline unsigned int __trace_wb_assign_cgroup(struct bdi_writeback *wb)
{
- return kernfs_path_len(wb->memcg_css->cgroup->kn) + 1;
+ return wb->memcg_css->cgroup->kn->ino;
}
-static inline void __trace_wb_assign_cgroup(char *buf, struct bdi_writeback *wb)
-{
- struct cgroup *cgrp = wb->memcg_css->cgroup;
- char *path;
-
- path = cgroup_path(cgrp, buf, kernfs_path_len(cgrp->kn) + 1);
- WARN_ON_ONCE(path != buf);
-}
-
-static inline size_t __trace_wbc_cgroup_size(struct writeback_control *wbc)
-{
- if (wbc->wb)
- return __trace_wb_cgroup_size(wbc->wb);
- else
- return 2;
-}
-
-static inline void __trace_wbc_assign_cgroup(char *buf,
- struct writeback_control *wbc)
+static inline unsigned int __trace_wbc_assign_cgroup(struct writeback_control *wbc)
{
if (wbc->wb)
- __trace_wb_assign_cgroup(buf, wbc->wb);
+ return __trace_wb_assign_cgroup(wbc->wb);
else
- strcpy(buf, "/");
+ return -1U;
}
-
#else /* CONFIG_CGROUP_WRITEBACK */
-static inline size_t __trace_wb_cgroup_size(struct bdi_writeback *wb)
-{
- return 2;
-}
-
-static inline void __trace_wb_assign_cgroup(char *buf, struct bdi_writeback *wb)
-{
- strcpy(buf, "/");
-}
-
-static inline size_t __trace_wbc_cgroup_size(struct writeback_control *wbc)
+static inline unsigned int __trace_wb_assign_cgroup(struct bdi_writeback *wb)
{
- return 2;
+ return -1U;
}
-static inline void __trace_wbc_assign_cgroup(char *buf,
- struct writeback_control *wbc)
+static inline unsigned int __trace_wbc_assign_cgroup(struct writeback_control *wbc)
{
- strcpy(buf, "/");
+ return -1U;
}
#endif /* CONFIG_CGROUP_WRITEBACK */
@@ -201,7 +171,7 @@ DECLARE_EVENT_CLASS(writeback_write_inode_template,
__array(char, name, 32)
__field(unsigned long, ino)
__field(int, sync_mode)
- __dynamic_array(char, cgroup, __trace_wbc_cgroup_size(wbc))
+ __field(unsigned int, cgroup_ino)
),
TP_fast_assign(
@@ -209,14 +179,14 @@ DECLARE_EVENT_CLASS(writeback_write_inode_template,
dev_name(inode_to_bdi(inode)->dev), 32);
__entry->ino = inode->i_ino;
__entry->sync_mode = wbc->sync_mode;
- __trace_wbc_assign_cgroup(__get_str(cgroup), wbc);
+ __entry->cgroup_ino = __trace_wbc_assign_cgroup(wbc);
),
- TP_printk("bdi %s: ino=%lu sync_mode=%d cgroup=%s",
+ TP_printk("bdi %s: ino=%lu sync_mode=%d cgroup_ino=%u",
__entry->name,
__entry->ino,
__entry->sync_mode,
- __get_str(cgroup)
+ __entry->cgroup_ino
)
);
@@ -246,7 +216,7 @@ DECLARE_EVENT_CLASS(writeback_work_class,
__field(int, range_cyclic)
__field(int, for_background)
__field(int, reason)
- __dynamic_array(char, cgroup, __trace_wb_cgroup_size(wb))
+ __field(unsigned int, cgroup_ino)
),
TP_fast_assign(
strncpy(__entry->name,
@@ -258,10 +228,10 @@ DECLARE_EVENT_CLASS(writeback_work_class,
__entry->range_cyclic = work->range_cyclic;
__entry->for_background = work->for_background;
__entry->reason = work->reason;
- __trace_wb_assign_cgroup(__get_str(cgroup), wb);
+ __entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
),
TP_printk("bdi %s: sb_dev %d:%d nr_pages=%ld sync_mode=%d "
- "kupdate=%d range_cyclic=%d background=%d reason=%s cgroup=%s",
+ "kupdate=%d range_cyclic=%d background=%d reason=%s cgroup_ino=%u",
__entry->name,
MAJOR(__entry->sb_dev), MINOR(__entry->sb_dev),
__entry->nr_pages,
@@ -270,7 +240,7 @@ DECLARE_EVENT_CLASS(writeback_work_class,
__entry->range_cyclic,
__entry->for_background,
__print_symbolic(__entry->reason, WB_WORK_REASON),
- __get_str(cgroup)
+ __entry->cgroup_ino
)
);
#define DEFINE_WRITEBACK_WORK_EVENT(name) \
@@ -300,15 +270,15 @@ DECLARE_EVENT_CLASS(writeback_class,
TP_ARGS(wb),
TP_STRUCT__entry(
__array(char, name, 32)
- __dynamic_array(char, cgroup, __trace_wb_cgroup_size(wb))
+ __field(unsigned int, cgroup_ino)
),
TP_fast_assign(
strncpy(__entry->name, dev_name(wb->bdi->dev), 32);
- __trace_wb_assign_cgroup(__get_str(cgroup), wb);
+ __entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
),
- TP_printk("bdi %s: cgroup=%s",
+ TP_printk("bdi %s: cgroup_ino=%u",
__entry->name,
- __get_str(cgroup)
+ __entry->cgroup_ino
)
);
#define DEFINE_WRITEBACK_EVENT(name) \
@@ -347,7 +317,7 @@ DECLARE_EVENT_CLASS(wbc_class,
__field(int, range_cyclic)
__field(long, range_start)
__field(long, range_end)
- __dynamic_array(char, cgroup, __trace_wbc_cgroup_size(wbc))
+ __field(unsigned int, cgroup_ino)
),
TP_fast_assign(
@@ -361,12 +331,12 @@ DECLARE_EVENT_CLASS(wbc_class,
__entry->range_cyclic = wbc->range_cyclic;
__entry->range_start = (long)wbc->range_start;
__entry->range_end = (long)wbc->range_end;
- __trace_wbc_assign_cgroup(__get_str(cgroup), wbc);
+ __entry->cgroup_ino = __trace_wbc_assign_cgroup(wbc);
),
TP_printk("bdi %s: towrt=%ld skip=%ld mode=%d kupd=%d "
"bgrd=%d reclm=%d cyclic=%d "
- "start=0x%lx end=0x%lx cgroup=%s",
+ "start=0x%lx end=0x%lx cgroup_ino=%u",
__entry->name,
__entry->nr_to_write,
__entry->pages_skipped,
@@ -377,7 +347,7 @@ DECLARE_EVENT_CLASS(wbc_class,
__entry->range_cyclic,
__entry->range_start,
__entry->range_end,
- __get_str(cgroup)
+ __entry->cgroup_ino
)
)
@@ -398,7 +368,7 @@ TRACE_EVENT(writeback_queue_io,
__field(long, age)
__field(int, moved)
__field(int, reason)
- __dynamic_array(char, cgroup, __trace_wb_cgroup_size(wb))
+ __field(unsigned int, cgroup_ino)
),
TP_fast_assign(
unsigned long *older_than_this = work->older_than_this;
@@ -408,15 +378,15 @@ TRACE_EVENT(writeback_queue_io,
(jiffies - *older_than_this) * 1000 / HZ : -1;
__entry->moved = moved;
__entry->reason = work->reason;
- __trace_wb_assign_cgroup(__get_str(cgroup), wb);
+ __entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
),
- TP_printk("bdi %s: older=%lu age=%ld enqueue=%d reason=%s cgroup=%s",
+ TP_printk("bdi %s: older=%lu age=%ld enqueue=%d reason=%s cgroup_ino=%u",
__entry->name,
__entry->older, /* older_than_this in jiffies */
__entry->age, /* older_than_this in relative milliseconds */
__entry->moved,
__print_symbolic(__entry->reason, WB_WORK_REASON),
- __get_str(cgroup)
+ __entry->cgroup_ino
)
);
@@ -484,7 +454,7 @@ TRACE_EVENT(bdi_dirty_ratelimit,
__field(unsigned long, dirty_ratelimit)
__field(unsigned long, task_ratelimit)
__field(unsigned long, balanced_dirty_ratelimit)
- __dynamic_array(char, cgroup, __trace_wb_cgroup_size(wb))
+ __field(unsigned int, cgroup_ino)
),
TP_fast_assign(
@@ -496,13 +466,13 @@ TRACE_EVENT(bdi_dirty_ratelimit,
__entry->task_ratelimit = KBps(task_ratelimit);
__entry->balanced_dirty_ratelimit =
KBps(wb->balanced_dirty_ratelimit);
- __trace_wb_assign_cgroup(__get_str(cgroup), wb);
+ __entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
),
TP_printk("bdi %s: "
"write_bw=%lu awrite_bw=%lu dirty_rate=%lu "
"dirty_ratelimit=%lu task_ratelimit=%lu "
- "balanced_dirty_ratelimit=%lu cgroup=%s",
+ "balanced_dirty_ratelimit=%lu cgroup_ino=%u",
__entry->bdi,
__entry->write_bw, /* write bandwidth */
__entry->avg_write_bw, /* avg write bandwidth */
@@ -510,7 +480,7 @@ TRACE_EVENT(bdi_dirty_ratelimit,
__entry->dirty_ratelimit, /* base ratelimit */
__entry->task_ratelimit, /* ratelimit with position control */
__entry->balanced_dirty_ratelimit, /* the balanced ratelimit */
- __get_str(cgroup)
+ __entry->cgroup_ino
)
);
@@ -548,7 +518,7 @@ TRACE_EVENT(balance_dirty_pages,
__field( long, pause)
__field(unsigned long, period)
__field( long, think)
- __dynamic_array(char, cgroup, __trace_wb_cgroup_size(wb))
+ __field(unsigned int, cgroup_ino)
),
TP_fast_assign(
@@ -571,7 +541,7 @@ TRACE_EVENT(balance_dirty_pages,
__entry->period = period * 1000 / HZ;
__entry->pause = pause * 1000 / HZ;
__entry->paused = (jiffies - start_time) * 1000 / HZ;
- __trace_wb_assign_cgroup(__get_str(cgroup), wb);
+ __entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
),
@@ -580,7 +550,7 @@ TRACE_EVENT(balance_dirty_pages,
"bdi_setpoint=%lu bdi_dirty=%lu "
"dirty_ratelimit=%lu task_ratelimit=%lu "
"dirtied=%u dirtied_pause=%u "
- "paused=%lu pause=%ld period=%lu think=%ld cgroup=%s",
+ "paused=%lu pause=%ld period=%lu think=%ld cgroup_ino=%u",
__entry->bdi,
__entry->limit,
__entry->setpoint,
@@ -595,7 +565,7 @@ TRACE_EVENT(balance_dirty_pages,
__entry->pause, /* ms */
__entry->period, /* ms */
__entry->think, /* ms */
- __get_str(cgroup)
+ __entry->cgroup_ino
)
);
@@ -609,8 +579,7 @@ TRACE_EVENT(writeback_sb_inodes_requeue,
__field(unsigned long, ino)
__field(unsigned long, state)
__field(unsigned long, dirtied_when)
- __dynamic_array(char, cgroup,
- __trace_wb_cgroup_size(inode_to_wb(inode)))
+ __field(unsigned int, cgroup_ino)
),
TP_fast_assign(
@@ -619,16 +588,16 @@ TRACE_EVENT(writeback_sb_inodes_requeue,
__entry->ino = inode->i_ino;
__entry->state = inode->i_state;
__entry->dirtied_when = inode->dirtied_when;
- __trace_wb_assign_cgroup(__get_str(cgroup), inode_to_wb(inode));
+ __entry->cgroup_ino = __trace_wb_assign_cgroup(inode_to_wb(inode));
),
- TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu cgroup=%s",
+ TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu cgroup_ino=%u",
__entry->name,
__entry->ino,
show_inode_state(__entry->state),
__entry->dirtied_when,
(jiffies - __entry->dirtied_when) / HZ,
- __get_str(cgroup)
+ __entry->cgroup_ino
)
);
@@ -684,7 +653,7 @@ DECLARE_EVENT_CLASS(writeback_single_inode_template,
__field(unsigned long, writeback_index)
__field(long, nr_to_write)
__field(unsigned long, wrote)
- __dynamic_array(char, cgroup, __trace_wbc_cgroup_size(wbc))
+ __field(unsigned int, cgroup_ino)
),
TP_fast_assign(
@@ -696,11 +665,11 @@ DECLARE_EVENT_CLASS(writeback_single_inode_template,
__entry->writeback_index = inode->i_mapping->writeback_index;
__entry->nr_to_write = nr_to_write;
__entry->wrote = nr_to_write - wbc->nr_to_write;
- __trace_wbc_assign_cgroup(__get_str(cgroup), wbc);
+ __entry->cgroup_ino = __trace_wbc_assign_cgroup(wbc);
),
TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu "
- "index=%lu to_write=%ld wrote=%lu cgroup=%s",
+ "index=%lu to_write=%ld wrote=%lu cgroup_ino=%u",
__entry->name,
__entry->ino,
show_inode_state(__entry->state),
@@ -709,7 +678,7 @@ DECLARE_EVENT_CLASS(writeback_single_inode_template,
__entry->writeback_index,
__entry->nr_to_write,
__entry->wrote,
- __get_str(cgroup)
+ __entry->cgroup_ino
)
);
diff --git a/include/uapi/asm-generic/siginfo.h b/include/uapi/asm-generic/siginfo.h
index 1e3552037a5a..1abaf62c86fc 100644
--- a/include/uapi/asm-generic/siginfo.h
+++ b/include/uapi/asm-generic/siginfo.h
@@ -91,10 +91,15 @@ typedef struct siginfo {
int _trapno; /* TRAP # which caused the signal */
#endif
short _addr_lsb; /* LSB of the reported address */
- struct {
- void __user *_lower;
- void __user *_upper;
- } _addr_bnd;
+ union {
+ /* used when si_code=SEGV_BNDERR */
+ struct {
+ void __user *_lower;
+ void __user *_upper;
+ } _addr_bnd;
+ /* used when si_code=SEGV_PKUERR */
+ __u32 _pkey;
+ };
} _sigfault;
/* SIGPOLL */
@@ -137,6 +142,7 @@ typedef struct siginfo {
#define si_addr_lsb _sifields._sigfault._addr_lsb
#define si_lower _sifields._sigfault._addr_bnd._lower
#define si_upper _sifields._sigfault._addr_bnd._upper
+#define si_pkey _sifields._sigfault._pkey
#define si_band _sifields._sigpoll._band
#define si_fd _sifields._sigpoll._fd
#ifdef __ARCH_SIGSYS
@@ -206,7 +212,8 @@ typedef struct siginfo {
#define SEGV_MAPERR (__SI_FAULT|1) /* address not mapped to object */
#define SEGV_ACCERR (__SI_FAULT|2) /* invalid permissions for mapped object */
#define SEGV_BNDERR (__SI_FAULT|3) /* failed address bound checks */
-#define NSIGSEGV 3
+#define SEGV_PKUERR (__SI_FAULT|4) /* failed protection key checks */
+#define NSIGSEGV 4
/*
* SIGBUS si_codes
diff --git a/include/uapi/asm-generic/socket.h b/include/uapi/asm-generic/socket.h
index fb8a41668382..67d632f1743d 100644
--- a/include/uapi/asm-generic/socket.h
+++ b/include/uapi/asm-generic/socket.h
@@ -90,4 +90,6 @@
#define SO_ATTACH_REUSEPORT_CBPF 51
#define SO_ATTACH_REUSEPORT_EBPF 52
+#define SO_CNX_ADVICE 53
+
#endif /* __ASM_GENERIC_SOCKET_H */
diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h
index b4e92eb12044..a0ebfe7c9a28 100644
--- a/include/uapi/drm/drm.h
+++ b/include/uapi/drm/drm.h
@@ -669,6 +669,7 @@ struct drm_set_client_cap {
__u64 value;
};
+#define DRM_RDWR O_RDWR
#define DRM_CLOEXEC O_CLOEXEC
struct drm_prime_handle {
__u32 handle;
diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h
index 50adb46204c2..c0217434d28d 100644
--- a/include/uapi/drm/drm_mode.h
+++ b/include/uapi/drm/drm_mode.h
@@ -487,6 +487,21 @@ struct drm_mode_crtc_lut {
__u64 blue;
};
+struct drm_color_ctm {
+ /* Conversion matrix in S31.32 format. */
+ __s64 matrix[9];
+};
+
+struct drm_color_lut {
+ /*
+ * Data is U0.16 fixed point format.
+ */
+ __u16 red;
+ __u16 green;
+ __u16 blue;
+ __u16 reserved;
+};
+
#define DRM_MODE_PAGE_FLIP_EVENT 0x01
#define DRM_MODE_PAGE_FLIP_ASYNC 0x02
#define DRM_MODE_PAGE_FLIP_FLAGS (DRM_MODE_PAGE_FLIP_EVENT|DRM_MODE_PAGE_FLIP_ASYNC)
diff --git a/include/uapi/drm/exynos_drm.h b/include/uapi/drm/exynos_drm.h
index 312c67d744ae..3947c2eb8d69 100644
--- a/include/uapi/drm/exynos_drm.h
+++ b/include/uapi/drm/exynos_drm.h
@@ -28,8 +28,21 @@
*/
struct drm_exynos_gem_create {
__u64 size;
- unsigned int flags;
- unsigned int handle;
+ __u32 flags;
+ __u32 handle;
+};
+
+/**
+ * A structure for getting a fake-offset that can be used with mmap.
+ *
+ * @handle: handle of gem object.
+ * @reserved: just padding to be 64-bit aligned.
+ * @offset: a fake-offset of gem object.
+ */
+struct drm_exynos_gem_map {
+ __u32 handle;
+ __u32 reserved;
+ __u64 offset;
};
/**
@@ -42,8 +55,8 @@ struct drm_exynos_gem_create {
* be set by driver.
*/
struct drm_exynos_gem_info {
- unsigned int handle;
- unsigned int flags;
+ __u32 handle;
+ __u32 flags;
__u64 size;
};
@@ -56,8 +69,8 @@ struct drm_exynos_gem_info {
* @edid: the edid data pointer from user side.
*/
struct drm_exynos_vidi_connection {
- unsigned int connection;
- unsigned int extensions;
+ __u32 connection;
+ __u32 extensions;
__u64 edid;
};
@@ -206,9 +219,9 @@ struct drm_exynos_ipp_prop_list {
* @pos: property of image position(src-cropped,dst-scaler).
*/
struct drm_exynos_ipp_config {
- enum drm_exynos_ops_id ops_id;
- enum drm_exynos_flip flip;
- enum drm_exynos_degree degree;
+ __u32 ops_id;
+ __u32 flip;
+ __u32 degree;
__u32 fmt;
struct drm_exynos_sz sz;
struct drm_exynos_pos pos;
@@ -233,7 +246,7 @@ enum drm_exynos_ipp_cmd {
*/
struct drm_exynos_ipp_property {
struct drm_exynos_ipp_config config[EXYNOS_DRM_OPS_MAX];
- enum drm_exynos_ipp_cmd cmd;
+ __u32 cmd;
__u32 ipp_id;
__u32 prop_id;
__u32 refresh_rate;
@@ -255,8 +268,8 @@ enum drm_exynos_ipp_buf_type {
* @user_data: user data.
*/
struct drm_exynos_ipp_queue_buf {
- enum drm_exynos_ops_id ops_id;
- enum drm_exynos_ipp_buf_type buf_type;
+ __u32 ops_id;
+ __u32 buf_type;
__u32 prop_id;
__u32 buf_id;
__u32 handle[EXYNOS_DRM_PLANAR_MAX];
@@ -280,10 +293,11 @@ enum drm_exynos_ipp_ctrl {
*/
struct drm_exynos_ipp_cmd_ctrl {
__u32 prop_id;
- enum drm_exynos_ipp_ctrl ctrl;
+ __u32 ctrl;
};
#define DRM_EXYNOS_GEM_CREATE 0x00
+#define DRM_EXYNOS_GEM_MAP 0x01
/* Reserved 0x03 ~ 0x05 for exynos specific gem ioctl */
#define DRM_EXYNOS_GEM_GET 0x04
#define DRM_EXYNOS_VIDI_CONNECTION 0x07
@@ -301,7 +315,8 @@ struct drm_exynos_ipp_cmd_ctrl {
#define DRM_IOCTL_EXYNOS_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + \
DRM_EXYNOS_GEM_CREATE, struct drm_exynos_gem_create)
-
+#define DRM_IOCTL_EXYNOS_GEM_MAP DRM_IOWR(DRM_COMMAND_BASE + \
+ DRM_EXYNOS_GEM_MAP, struct drm_exynos_gem_map)
#define DRM_IOCTL_EXYNOS_GEM_GET DRM_IOWR(DRM_COMMAND_BASE + \
DRM_EXYNOS_GEM_GET, struct drm_exynos_gem_info)
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index acf21026c78a..a5524cc95ff8 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -772,10 +772,12 @@ struct drm_i915_gem_execbuffer2 {
#define I915_EXEC_HANDLE_LUT (1<<12)
/** Used for switching BSD rings on the platforms with two BSD rings */
-#define I915_EXEC_BSD_MASK (3<<13)
-#define I915_EXEC_BSD_DEFAULT (0<<13) /* default ping-pong mode */
-#define I915_EXEC_BSD_RING1 (1<<13)
-#define I915_EXEC_BSD_RING2 (2<<13)
+#define I915_EXEC_BSD_SHIFT (13)
+#define I915_EXEC_BSD_MASK (3 << I915_EXEC_BSD_SHIFT)
+/* default ping-pong mode */
+#define I915_EXEC_BSD_DEFAULT (0 << I915_EXEC_BSD_SHIFT)
+#define I915_EXEC_BSD_RING1 (1 << I915_EXEC_BSD_SHIFT)
+#define I915_EXEC_BSD_RING2 (2 << I915_EXEC_BSD_SHIFT)
/** Tell the kernel that the batchbuffer is processed by
* the resource streamer.
@@ -812,10 +814,35 @@ struct drm_i915_gem_busy {
/** Handle of the buffer to check for busy */
__u32 handle;
- /** Return busy status (1 if busy, 0 if idle).
- * The high word is used to indicate on which rings the object
- * currently resides:
- * 16:31 - busy (r or r/w) rings (16 render, 17 bsd, 18 blt, etc)
+ /** Return busy status
+ *
+ * A return of 0 implies that the object is idle (after
+ * having flushed any pending activity), and a non-zero return that
+ * the object is still in-flight on the GPU. (The GPU has not yet
+ * signaled completion for all pending requests that reference the
+ * object.)
+ *
+ * The returned dword is split into two fields to indicate both
+ * the engines on which the object is being read, and the
+ * engine on which it is currently being written (if any).
+ *
+ * The low word (bits 0:15) indicate if the object is being written
+ * to by any engine (there can only be one, as the GEM implicit
+ * synchronisation rules force writes to be serialised). Only the
+ * engine for the last write is reported.
+ *
+ * The high word (bits 16:31) are a bitmask of which engines are
+ * currently reading from the object. Multiple engines may be
+ * reading from the object simultaneously.
+ *
+ * The value of each engine is the same as specified in the
+ * EXECBUFFER2 ioctl, i.e. I915_EXEC_RENDER, I915_EXEC_BSD etc.
+ * Note I915_EXEC_DEFAULT is a symbolic value and is mapped to
+ * the I915_EXEC_RENDER engine for execution, and so it is never
+ * reported as active itself. Some hardware may have parallel
+ * execution engines, e.g. multiple media engines, which are
+ * mapped to the same identifier in the EXECBUFFER2 ioctl and
+ * so are not separately reported for busyness.
*/
__u32 busy;
};
diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h
index 81e6e0d1d360..254d3e92d18e 100644
--- a/include/uapi/drm/msm_drm.h
+++ b/include/uapi/drm/msm_drm.h
@@ -50,6 +50,8 @@ struct drm_msm_timespec {
#define MSM_PARAM_GPU_ID 0x01
#define MSM_PARAM_GMEM_SIZE 0x02
#define MSM_PARAM_CHIP_ID 0x03
+#define MSM_PARAM_MAX_FREQ 0x04
+#define MSM_PARAM_TIMESTAMP 0x05
struct drm_msm_param {
__u32 pipe; /* in, MSM_PIPE_x */
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index 5c9ae6a9b7f5..b71fd0b5cbad 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -174,6 +174,7 @@ header-y += if_hippi.h
header-y += if_infiniband.h
header-y += if_link.h
header-y += if_ltalk.h
+header-y += if_macsec.h
header-y += if_packet.h
header-y += if_phonet.h
header-y += if_plip.h
@@ -353,6 +354,7 @@ header-y += reiserfs_fs.h
header-y += reiserfs_xattr.h
header-y += resource.h
header-y += rfkill.h
+header-y += rio_mport_cdev.h
header-y += romfs_fs.h
header-y += rose.h
header-y += route.h
diff --git a/include/uapi/linux/audit.h b/include/uapi/linux/audit.h
index 843540c398eb..d820aa979620 100644
--- a/include/uapi/linux/audit.h
+++ b/include/uapi/linux/audit.h
@@ -110,6 +110,7 @@
#define AUDIT_SECCOMP 1326 /* Secure Computing event */
#define AUDIT_PROCTITLE 1327 /* Proctitle emit event */
#define AUDIT_FEATURE_CHANGE 1328 /* audit log listing feature changes */
+#define AUDIT_REPLACE 1329 /* Replace auditd if this packet unanswerd */
#define AUDIT_AVC 1400 /* SE Linux avc denial or grant */
#define AUDIT_SELINUX_ERR 1401 /* Internal SE Linux Errors */
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 5df4881dea7b..924f537183fd 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -81,6 +81,9 @@ enum bpf_map_type {
BPF_MAP_TYPE_ARRAY,
BPF_MAP_TYPE_PROG_ARRAY,
BPF_MAP_TYPE_PERF_EVENT_ARRAY,
+ BPF_MAP_TYPE_PERCPU_HASH,
+ BPF_MAP_TYPE_PERCPU_ARRAY,
+ BPF_MAP_TYPE_STACK_TRACE,
};
enum bpf_prog_type {
@@ -98,12 +101,15 @@ enum bpf_prog_type {
#define BPF_NOEXIST 1 /* create new element if it didn't exist */
#define BPF_EXIST 2 /* update existing element */
+#define BPF_F_NO_PREALLOC (1U << 0)
+
union bpf_attr {
struct { /* anonymous struct used by BPF_MAP_CREATE command */
__u32 map_type; /* one of enum bpf_map_type */
__u32 key_size; /* size of key in bytes */
__u32 value_size; /* size of value in bytes */
__u32 max_entries; /* max number of entries in a map */
+ __u32 map_flags; /* prealloc or not */
};
struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */
@@ -270,6 +276,42 @@ enum bpf_func_id {
*/
BPF_FUNC_perf_event_output,
BPF_FUNC_skb_load_bytes,
+
+ /**
+ * bpf_get_stackid(ctx, map, flags) - walk user or kernel stack and return id
+ * @ctx: struct pt_regs*
+ * @map: pointer to stack_trace map
+ * @flags: bits 0-7 - numer of stack frames to skip
+ * bit 8 - collect user stack instead of kernel
+ * bit 9 - compare stacks by hash only
+ * bit 10 - if two different stacks hash into the same stackid
+ * discard old
+ * other bits - reserved
+ * Return: >= 0 stackid on success or negative error
+ */
+ BPF_FUNC_get_stackid,
+
+ /**
+ * bpf_csum_diff(from, from_size, to, to_size, seed) - calculate csum diff
+ * @from: raw from buffer
+ * @from_size: length of from buffer
+ * @to: raw to buffer
+ * @to_size: length of to buffer
+ * @seed: optional seed
+ * Return: csum result
+ */
+ BPF_FUNC_csum_diff,
+
+ /**
+ * bpf_skb_[gs]et_tunnel_opt(skb, opt, size)
+ * retrieve or populate tunnel options metadata
+ * @skb: pointer to skb
+ * @opt: pointer to raw tunnel option data
+ * @size: size of @opt
+ * Return: 0 on success for set, option size for get
+ */
+ BPF_FUNC_skb_get_tunnel_opt,
+ BPF_FUNC_skb_set_tunnel_opt,
__BPF_FUNC_MAX_ID,
};
@@ -277,6 +319,7 @@ enum bpf_func_id {
/* BPF_FUNC_skb_store_bytes flags. */
#define BPF_F_RECOMPUTE_CSUM (1ULL << 0)
+#define BPF_F_INVALIDATE_HASH (1ULL << 1)
/* BPF_FUNC_l3_csum_replace and BPF_FUNC_l4_csum_replace flags.
* First 4 bits are for passing the header field size.
@@ -285,6 +328,7 @@ enum bpf_func_id {
/* BPF_FUNC_l4_csum_replace flags. */
#define BPF_F_PSEUDO_HDR (1ULL << 4)
+#define BPF_F_MARK_MANGLED_0 (1ULL << 5)
/* BPF_FUNC_clone_redirect and BPF_FUNC_redirect flags. */
#define BPF_F_INGRESS (1ULL << 0)
@@ -292,8 +336,15 @@ enum bpf_func_id {
/* BPF_FUNC_skb_set_tunnel_key and BPF_FUNC_skb_get_tunnel_key flags. */
#define BPF_F_TUNINFO_IPV6 (1ULL << 0)
+/* BPF_FUNC_get_stackid flags. */
+#define BPF_F_SKIP_FIELD_MASK 0xffULL
+#define BPF_F_USER_STACK (1ULL << 8)
+#define BPF_F_FAST_STACK_CMP (1ULL << 9)
+#define BPF_F_REUSE_STACKID (1ULL << 10)
+
/* BPF_FUNC_skb_set_tunnel_key flags. */
#define BPF_F_ZERO_CSUM_TX (1ULL << 1)
+#define BPF_F_DONT_FRAGMENT (1ULL << 2)
/* user accessible mirror of in-kernel sk_buff.
* new fields can only be added to the end of this structure
@@ -324,6 +375,7 @@ struct bpf_tunnel_key {
};
__u8 tunnel_tos;
__u8 tunnel_ttl;
+ __u32 tunnel_label;
};
#endif /* _UAPI__LINUX_BPF_H__ */
diff --git a/include/uapi/linux/byteorder/big_endian.h b/include/uapi/linux/byteorder/big_endian.h
index 672374450095..cdab17ab907c 100644
--- a/include/uapi/linux/byteorder/big_endian.h
+++ b/include/uapi/linux/byteorder/big_endian.h
@@ -40,51 +40,51 @@
#define __cpu_to_be16(x) ((__force __be16)(__u16)(x))
#define __be16_to_cpu(x) ((__force __u16)(__be16)(x))
-static inline __le64 __cpu_to_le64p(const __u64 *p)
+static __always_inline __le64 __cpu_to_le64p(const __u64 *p)
{
return (__force __le64)__swab64p(p);
}
-static inline __u64 __le64_to_cpup(const __le64 *p)
+static __always_inline __u64 __le64_to_cpup(const __le64 *p)
{
return __swab64p((__u64 *)p);
}
-static inline __le32 __cpu_to_le32p(const __u32 *p)
+static __always_inline __le32 __cpu_to_le32p(const __u32 *p)
{
return (__force __le32)__swab32p(p);
}
-static inline __u32 __le32_to_cpup(const __le32 *p)
+static __always_inline __u32 __le32_to_cpup(const __le32 *p)
{
return __swab32p((__u32 *)p);
}
-static inline __le16 __cpu_to_le16p(const __u16 *p)
+static __always_inline __le16 __cpu_to_le16p(const __u16 *p)
{
return (__force __le16)__swab16p(p);
}
-static inline __u16 __le16_to_cpup(const __le16 *p)
+static __always_inline __u16 __le16_to_cpup(const __le16 *p)
{
return __swab16p((__u16 *)p);
}
-static inline __be64 __cpu_to_be64p(const __u64 *p)
+static __always_inline __be64 __cpu_to_be64p(const __u64 *p)
{
return (__force __be64)*p;
}
-static inline __u64 __be64_to_cpup(const __be64 *p)
+static __always_inline __u64 __be64_to_cpup(const __be64 *p)
{
return (__force __u64)*p;
}
-static inline __be32 __cpu_to_be32p(const __u32 *p)
+static __always_inline __be32 __cpu_to_be32p(const __u32 *p)
{
return (__force __be32)*p;
}
-static inline __u32 __be32_to_cpup(const __be32 *p)
+static __always_inline __u32 __be32_to_cpup(const __be32 *p)
{
return (__force __u32)*p;
}
-static inline __be16 __cpu_to_be16p(const __u16 *p)
+static __always_inline __be16 __cpu_to_be16p(const __u16 *p)
{
return (__force __be16)*p;
}
-static inline __u16 __be16_to_cpup(const __be16 *p)
+static __always_inline __u16 __be16_to_cpup(const __be16 *p)
{
return (__force __u16)*p;
}
diff --git a/include/uapi/linux/byteorder/little_endian.h b/include/uapi/linux/byteorder/little_endian.h
index d876736a0017..4b93f2b260dd 100644
--- a/include/uapi/linux/byteorder/little_endian.h
+++ b/include/uapi/linux/byteorder/little_endian.h
@@ -40,51 +40,51 @@
#define __cpu_to_be16(x) ((__force __be16)__swab16((x)))
#define __be16_to_cpu(x) __swab16((__force __u16)(__be16)(x))
-static inline __le64 __cpu_to_le64p(const __u64 *p)
+static __always_inline __le64 __cpu_to_le64p(const __u64 *p)
{
return (__force __le64)*p;
}
-static inline __u64 __le64_to_cpup(const __le64 *p)
+static __always_inline __u64 __le64_to_cpup(const __le64 *p)
{
return (__force __u64)*p;
}
-static inline __le32 __cpu_to_le32p(const __u32 *p)
+static __always_inline __le32 __cpu_to_le32p(const __u32 *p)
{
return (__force __le32)*p;
}
-static inline __u32 __le32_to_cpup(const __le32 *p)
+static __always_inline __u32 __le32_to_cpup(const __le32 *p)
{
return (__force __u32)*p;
}
-static inline __le16 __cpu_to_le16p(const __u16 *p)
+static __always_inline __le16 __cpu_to_le16p(const __u16 *p)
{
return (__force __le16)*p;
}
-static inline __u16 __le16_to_cpup(const __le16 *p)
+static __always_inline __u16 __le16_to_cpup(const __le16 *p)
{
return (__force __u16)*p;
}
-static inline __be64 __cpu_to_be64p(const __u64 *p)
+static __always_inline __be64 __cpu_to_be64p(const __u64 *p)
{
return (__force __be64)__swab64p(p);
}
-static inline __u64 __be64_to_cpup(const __be64 *p)
+static __always_inline __u64 __be64_to_cpup(const __be64 *p)
{
return __swab64p((__u64 *)p);
}
-static inline __be32 __cpu_to_be32p(const __u32 *p)
+static __always_inline __be32 __cpu_to_be32p(const __u32 *p)
{
return (__force __be32)__swab32p(p);
}
-static inline __u32 __be32_to_cpup(const __be32 *p)
+static __always_inline __u32 __be32_to_cpup(const __be32 *p)
{
return __swab32p((__u32 *)p);
}
-static inline __be16 __cpu_to_be16p(const __u16 *p)
+static __always_inline __be16 __cpu_to_be16p(const __u16 *p)
{
return (__force __be16)__swab16p(p);
}
-static inline __u16 __be16_to_cpup(const __be16 *p)
+static __always_inline __u16 __be16_to_cpup(const __be16 *p)
{
return __swab16p((__u16 *)p);
}
diff --git a/include/uapi/linux/devlink.h b/include/uapi/linux/devlink.h
new file mode 100644
index 000000000000..c9fee5781eb1
--- /dev/null
+++ b/include/uapi/linux/devlink.h
@@ -0,0 +1,72 @@
+/*
+ * include/uapi/linux/devlink.h - Network physical device Netlink interface
+ * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _UAPI_LINUX_DEVLINK_H_
+#define _UAPI_LINUX_DEVLINK_H_
+
+#define DEVLINK_GENL_NAME "devlink"
+#define DEVLINK_GENL_VERSION 0x1
+#define DEVLINK_GENL_MCGRP_CONFIG_NAME "config"
+
+enum devlink_command {
+ /* don't change the order or add anything between, this is ABI! */
+ DEVLINK_CMD_UNSPEC,
+
+ DEVLINK_CMD_GET, /* can dump */
+ DEVLINK_CMD_SET,
+ DEVLINK_CMD_NEW,
+ DEVLINK_CMD_DEL,
+
+ DEVLINK_CMD_PORT_GET, /* can dump */
+ DEVLINK_CMD_PORT_SET,
+ DEVLINK_CMD_PORT_NEW,
+ DEVLINK_CMD_PORT_DEL,
+
+ DEVLINK_CMD_PORT_SPLIT,
+ DEVLINK_CMD_PORT_UNSPLIT,
+
+ /* add new commands above here */
+
+ __DEVLINK_CMD_MAX,
+ DEVLINK_CMD_MAX = __DEVLINK_CMD_MAX - 1
+};
+
+enum devlink_port_type {
+ DEVLINK_PORT_TYPE_NOTSET,
+ DEVLINK_PORT_TYPE_AUTO,
+ DEVLINK_PORT_TYPE_ETH,
+ DEVLINK_PORT_TYPE_IB,
+};
+
+enum devlink_attr {
+ /* don't change the order or add anything between, this is ABI! */
+ DEVLINK_ATTR_UNSPEC,
+
+ /* bus name + dev name together are a handle for devlink entity */
+ DEVLINK_ATTR_BUS_NAME, /* string */
+ DEVLINK_ATTR_DEV_NAME, /* string */
+
+ DEVLINK_ATTR_PORT_INDEX, /* u32 */
+ DEVLINK_ATTR_PORT_TYPE, /* u16 */
+ DEVLINK_ATTR_PORT_DESIRED_TYPE, /* u16 */
+ DEVLINK_ATTR_PORT_NETDEV_IFINDEX, /* u32 */
+ DEVLINK_ATTR_PORT_NETDEV_NAME, /* string */
+ DEVLINK_ATTR_PORT_IBDEV_NAME, /* string */
+ DEVLINK_ATTR_PORT_SPLIT_COUNT, /* u32 */
+ DEVLINK_ATTR_PORT_SPLIT_GROUP, /* u32 */
+
+ /* add new attributes above here, update the policy in devlink.c */
+
+ __DEVLINK_ATTR_MAX,
+ DEVLINK_ATTR_MAX = __DEVLINK_ATTR_MAX - 1
+};
+
+#endif /* _UAPI_LINUX_DEVLINK_H_ */
diff --git a/include/uapi/linux/dma-buf.h b/include/uapi/linux/dma-buf.h
new file mode 100644
index 000000000000..fb0dedb7c121
--- /dev/null
+++ b/include/uapi/linux/dma-buf.h
@@ -0,0 +1,40 @@
+/*
+ * Framework for buffer objects that can be shared across devices/subsystems.
+ *
+ * Copyright(C) 2015 Intel Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _DMA_BUF_UAPI_H_
+#define _DMA_BUF_UAPI_H_
+
+#include <linux/types.h>
+
+/* begin/end dma-buf functions used for userspace mmap. */
+struct dma_buf_sync {
+ __u64 flags;
+};
+
+#define DMA_BUF_SYNC_READ (1 << 0)
+#define DMA_BUF_SYNC_WRITE (2 << 0)
+#define DMA_BUF_SYNC_RW (DMA_BUF_SYNC_READ | DMA_BUF_SYNC_WRITE)
+#define DMA_BUF_SYNC_START (0 << 2)
+#define DMA_BUF_SYNC_END (1 << 2)
+#define DMA_BUF_SYNC_VALID_FLAGS_MASK \
+ (DMA_BUF_SYNC_RW | DMA_BUF_SYNC_END)
+
+#define DMA_BUF_BASE 'b'
+#define DMA_BUF_IOCTL_SYNC _IOW(DMA_BUF_BASE, 0, struct dma_buf_sync)
+
+#endif
diff --git a/include/uapi/linux/dqblk_xfs.h b/include/uapi/linux/dqblk_xfs.h
index dcd75cc26196..11b3b31faf14 100644
--- a/include/uapi/linux/dqblk_xfs.h
+++ b/include/uapi/linux/dqblk_xfs.h
@@ -39,6 +39,7 @@
#define Q_XQUOTARM XQM_CMD(6) /* free disk space used by dquots */
#define Q_XQUOTASYNC XQM_CMD(7) /* delalloc flush, updates dquots */
#define Q_XGETQSTATV XQM_CMD(8) /* newer version of get quota */
+#define Q_XGETNEXTQUOTA XQM_CMD(9) /* get disk limits and usage >= ID */
/*
* fs_disk_quota structure:
diff --git a/include/uapi/linux/elf-em.h b/include/uapi/linux/elf-em.h
index b56dfcfe922a..c3fdfe79e5cc 100644
--- a/include/uapi/linux/elf-em.h
+++ b/include/uapi/linux/elf-em.h
@@ -30,7 +30,6 @@
#define EM_X86_64 62 /* AMD x86-64 */
#define EM_S390 22 /* IBM S/390 */
#define EM_CRIS 76 /* Axis Communications 32-bit embedded processor */
-#define EM_V850 87 /* NEC v850 */
#define EM_M32R 88 /* Renesas M32R */
#define EM_MN10300 89 /* Panasonic/MEI MN10300, AM33 */
#define EM_OPENRISC 92 /* OpenRISC 32-bit embedded processor */
@@ -50,8 +49,6 @@
*/
#define EM_ALPHA 0x9026
-/* Bogus old v850 magic number, used by old tools. */
-#define EM_CYGNUS_V850 0x9080
/* Bogus old m32r magic number, used by old tools. */
#define EM_CYGNUS_M32R 0x9041
/* This is the old interim value for S/390 architecture */
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
index 57fa39005e79..9222db8ccccc 100644
--- a/include/uapi/linux/ethtool.h
+++ b/include/uapi/linux/ethtool.h
@@ -13,15 +13,21 @@
#ifndef _UAPI_LINUX_ETHTOOL_H
#define _UAPI_LINUX_ETHTOOL_H
+#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/if_ether.h>
+#ifndef __KERNEL__
+#include <limits.h> /* for INT_MAX */
+#endif
+
/* All structures exposed to userland should be defined such that they
* have the same layout for 32-bit and 64-bit userland.
*/
/**
- * struct ethtool_cmd - link control and status
+ * struct ethtool_cmd - DEPRECATED, link control and status
+ * This structure is DEPRECATED, please use struct ethtool_link_settings.
* @cmd: Command number = %ETHTOOL_GSET or %ETHTOOL_SSET
* @supported: Bitmask of %SUPPORTED_* flags for the link modes,
* physical connectors and other link features for which the
@@ -31,7 +37,7 @@
* physical connectors and other link features that are
* advertised through autonegotiation or enabled for
* auto-detection.
- * @speed: Low bits of the speed
+ * @speed: Low bits of the speed, 1Mb units, 0 to INT_MAX or SPEED_UNKNOWN
* @duplex: Duplex mode; one of %DUPLEX_*
* @port: Physical connector type; one of %PORT_*
* @phy_address: MDIO address of PHY (transceiver); 0 or 255 if not
@@ -47,7 +53,7 @@
* obsoleted by &struct ethtool_coalesce. Read-only; deprecated.
* @maxrxpkt: Historically used to report RX IRQ coalescing; now
* obsoleted by &struct ethtool_coalesce. Read-only; deprecated.
- * @speed_hi: High bits of the speed
+ * @speed_hi: High bits of the speed, 1Mb units, 0 to INT_MAX or SPEED_UNKNOWN
* @eth_tp_mdix: Ethernet twisted-pair MDI(-X) status; one of
* %ETH_TP_MDI_*. If the status is unknown or not applicable, the
* value will be %ETH_TP_MDI_INVALID. Read-only.
@@ -748,6 +754,56 @@ struct ethtool_usrip4_spec {
__u8 proto;
};
+/**
+ * struct ethtool_tcpip6_spec - flow specification for TCP/IPv6 etc.
+ * @ip6src: Source host
+ * @ip6dst: Destination host
+ * @psrc: Source port
+ * @pdst: Destination port
+ * @tclass: Traffic Class
+ *
+ * This can be used to specify a TCP/IPv6, UDP/IPv6 or SCTP/IPv6 flow.
+ */
+struct ethtool_tcpip6_spec {
+ __be32 ip6src[4];
+ __be32 ip6dst[4];
+ __be16 psrc;
+ __be16 pdst;
+ __u8 tclass;
+};
+
+/**
+ * struct ethtool_ah_espip6_spec - flow specification for IPsec/IPv6
+ * @ip6src: Source host
+ * @ip6dst: Destination host
+ * @spi: Security parameters index
+ * @tclass: Traffic Class
+ *
+ * This can be used to specify an IPsec transport or tunnel over IPv6.
+ */
+struct ethtool_ah_espip6_spec {
+ __be32 ip6src[4];
+ __be32 ip6dst[4];
+ __be32 spi;
+ __u8 tclass;
+};
+
+/**
+ * struct ethtool_usrip6_spec - general flow specification for IPv6
+ * @ip6src: Source host
+ * @ip6dst: Destination host
+ * @l4_4_bytes: First 4 bytes of transport (layer 4) header
+ * @tclass: Traffic Class
+ * @l4_proto: Transport protocol number (nexthdr after any Extension Headers)
+ */
+struct ethtool_usrip6_spec {
+ __be32 ip6src[4];
+ __be32 ip6dst[4];
+ __be32 l4_4_bytes;
+ __u8 tclass;
+ __u8 l4_proto;
+};
+
union ethtool_flow_union {
struct ethtool_tcpip4_spec tcp_ip4_spec;
struct ethtool_tcpip4_spec udp_ip4_spec;
@@ -755,6 +811,12 @@ union ethtool_flow_union {
struct ethtool_ah_espip4_spec ah_ip4_spec;
struct ethtool_ah_espip4_spec esp_ip4_spec;
struct ethtool_usrip4_spec usr_ip4_spec;
+ struct ethtool_tcpip6_spec tcp_ip6_spec;
+ struct ethtool_tcpip6_spec udp_ip6_spec;
+ struct ethtool_tcpip6_spec sctp_ip6_spec;
+ struct ethtool_ah_espip6_spec ah_ip6_spec;
+ struct ethtool_ah_espip6_spec esp_ip6_spec;
+ struct ethtool_usrip6_spec usr_ip6_spec;
struct ethhdr ether_spec;
__u8 hdata[52];
};
@@ -1146,10 +1208,29 @@ enum ethtool_sfeatures_retval_bits {
#define ETHTOOL_F_WISH (1 << ETHTOOL_F_WISH__BIT)
#define ETHTOOL_F_COMPAT (1 << ETHTOOL_F_COMPAT__BIT)
+#define MAX_NUM_QUEUE 4096
+
+/**
+ * struct ethtool_per_queue_op - apply sub command to the queues in mask.
+ * @cmd: ETHTOOL_PERQUEUE
+ * @sub_command: the sub command which apply to each queues
+ * @queue_mask: Bitmap of the queues which sub command apply to
+ * @data: A complete command structure following for each of the queues addressed
+ */
+struct ethtool_per_queue_op {
+ __u32 cmd;
+ __u32 sub_command;
+ __u32 queue_mask[__KERNEL_DIV_ROUND_UP(MAX_NUM_QUEUE, 32)];
+ char data[];
+};
/* CMDs currently supported */
-#define ETHTOOL_GSET 0x00000001 /* Get settings. */
-#define ETHTOOL_SSET 0x00000002 /* Set settings. */
+#define ETHTOOL_GSET 0x00000001 /* DEPRECATED, Get settings.
+ * Please use ETHTOOL_GLINKSETTINGS
+ */
+#define ETHTOOL_SSET 0x00000002 /* DEPRECATED, Set settings.
+ * Please use ETHTOOL_SLINKSETTINGS
+ */
#define ETHTOOL_GDRVINFO 0x00000003 /* Get driver info. */
#define ETHTOOL_GREGS 0x00000004 /* Get NIC registers. */
#define ETHTOOL_GWOL 0x00000005 /* Get wake-on-lan options. */
@@ -1229,73 +1310,141 @@ enum ethtool_sfeatures_retval_bits {
#define ETHTOOL_STUNABLE 0x00000049 /* Set tunable configuration */
#define ETHTOOL_GPHYSTATS 0x0000004a /* get PHY-specific statistics */
+#define ETHTOOL_PERQUEUE 0x0000004b /* Set per queue options */
+
+#define ETHTOOL_GLINKSETTINGS 0x0000004c /* Get ethtool_link_settings */
+#define ETHTOOL_SLINKSETTINGS 0x0000004d /* Set ethtool_link_settings */
+
+
/* compatibility with older code */
#define SPARC_ETH_GSET ETHTOOL_GSET
#define SPARC_ETH_SSET ETHTOOL_SSET
-#define SUPPORTED_10baseT_Half (1 << 0)
-#define SUPPORTED_10baseT_Full (1 << 1)
-#define SUPPORTED_100baseT_Half (1 << 2)
-#define SUPPORTED_100baseT_Full (1 << 3)
-#define SUPPORTED_1000baseT_Half (1 << 4)
-#define SUPPORTED_1000baseT_Full (1 << 5)
-#define SUPPORTED_Autoneg (1 << 6)
-#define SUPPORTED_TP (1 << 7)
-#define SUPPORTED_AUI (1 << 8)
-#define SUPPORTED_MII (1 << 9)
-#define SUPPORTED_FIBRE (1 << 10)
-#define SUPPORTED_BNC (1 << 11)
-#define SUPPORTED_10000baseT_Full (1 << 12)
-#define SUPPORTED_Pause (1 << 13)
-#define SUPPORTED_Asym_Pause (1 << 14)
-#define SUPPORTED_2500baseX_Full (1 << 15)
-#define SUPPORTED_Backplane (1 << 16)
-#define SUPPORTED_1000baseKX_Full (1 << 17)
-#define SUPPORTED_10000baseKX4_Full (1 << 18)
-#define SUPPORTED_10000baseKR_Full (1 << 19)
-#define SUPPORTED_10000baseR_FEC (1 << 20)
-#define SUPPORTED_20000baseMLD2_Full (1 << 21)
-#define SUPPORTED_20000baseKR2_Full (1 << 22)
-#define SUPPORTED_40000baseKR4_Full (1 << 23)
-#define SUPPORTED_40000baseCR4_Full (1 << 24)
-#define SUPPORTED_40000baseSR4_Full (1 << 25)
-#define SUPPORTED_40000baseLR4_Full (1 << 26)
-#define SUPPORTED_56000baseKR4_Full (1 << 27)
-#define SUPPORTED_56000baseCR4_Full (1 << 28)
-#define SUPPORTED_56000baseSR4_Full (1 << 29)
-#define SUPPORTED_56000baseLR4_Full (1 << 30)
-
-#define ADVERTISED_10baseT_Half (1 << 0)
-#define ADVERTISED_10baseT_Full (1 << 1)
-#define ADVERTISED_100baseT_Half (1 << 2)
-#define ADVERTISED_100baseT_Full (1 << 3)
-#define ADVERTISED_1000baseT_Half (1 << 4)
-#define ADVERTISED_1000baseT_Full (1 << 5)
-#define ADVERTISED_Autoneg (1 << 6)
-#define ADVERTISED_TP (1 << 7)
-#define ADVERTISED_AUI (1 << 8)
-#define ADVERTISED_MII (1 << 9)
-#define ADVERTISED_FIBRE (1 << 10)
-#define ADVERTISED_BNC (1 << 11)
-#define ADVERTISED_10000baseT_Full (1 << 12)
-#define ADVERTISED_Pause (1 << 13)
-#define ADVERTISED_Asym_Pause (1 << 14)
-#define ADVERTISED_2500baseX_Full (1 << 15)
-#define ADVERTISED_Backplane (1 << 16)
-#define ADVERTISED_1000baseKX_Full (1 << 17)
-#define ADVERTISED_10000baseKX4_Full (1 << 18)
-#define ADVERTISED_10000baseKR_Full (1 << 19)
-#define ADVERTISED_10000baseR_FEC (1 << 20)
-#define ADVERTISED_20000baseMLD2_Full (1 << 21)
-#define ADVERTISED_20000baseKR2_Full (1 << 22)
-#define ADVERTISED_40000baseKR4_Full (1 << 23)
-#define ADVERTISED_40000baseCR4_Full (1 << 24)
-#define ADVERTISED_40000baseSR4_Full (1 << 25)
-#define ADVERTISED_40000baseLR4_Full (1 << 26)
-#define ADVERTISED_56000baseKR4_Full (1 << 27)
-#define ADVERTISED_56000baseCR4_Full (1 << 28)
-#define ADVERTISED_56000baseSR4_Full (1 << 29)
-#define ADVERTISED_56000baseLR4_Full (1 << 30)
+/* Link mode bit indices */
+enum ethtool_link_mode_bit_indices {
+ ETHTOOL_LINK_MODE_10baseT_Half_BIT = 0,
+ ETHTOOL_LINK_MODE_10baseT_Full_BIT = 1,
+ ETHTOOL_LINK_MODE_100baseT_Half_BIT = 2,
+ ETHTOOL_LINK_MODE_100baseT_Full_BIT = 3,
+ ETHTOOL_LINK_MODE_1000baseT_Half_BIT = 4,
+ ETHTOOL_LINK_MODE_1000baseT_Full_BIT = 5,
+ ETHTOOL_LINK_MODE_Autoneg_BIT = 6,
+ ETHTOOL_LINK_MODE_TP_BIT = 7,
+ ETHTOOL_LINK_MODE_AUI_BIT = 8,
+ ETHTOOL_LINK_MODE_MII_BIT = 9,
+ ETHTOOL_LINK_MODE_FIBRE_BIT = 10,
+ ETHTOOL_LINK_MODE_BNC_BIT = 11,
+ ETHTOOL_LINK_MODE_10000baseT_Full_BIT = 12,
+ ETHTOOL_LINK_MODE_Pause_BIT = 13,
+ ETHTOOL_LINK_MODE_Asym_Pause_BIT = 14,
+ ETHTOOL_LINK_MODE_2500baseX_Full_BIT = 15,
+ ETHTOOL_LINK_MODE_Backplane_BIT = 16,
+ ETHTOOL_LINK_MODE_1000baseKX_Full_BIT = 17,
+ ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT = 18,
+ ETHTOOL_LINK_MODE_10000baseKR_Full_BIT = 19,
+ ETHTOOL_LINK_MODE_10000baseR_FEC_BIT = 20,
+ ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT = 21,
+ ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT = 22,
+ ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT = 23,
+ ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT = 24,
+ ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT = 25,
+ ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT = 26,
+ ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT = 27,
+ ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT = 28,
+ ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT = 29,
+ ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT = 30,
+
+ /* Last allowed bit for __ETHTOOL_LINK_MODE_LEGACY_MASK is bit
+ * 31. Please do NOT define any SUPPORTED_* or ADVERTISED_*
+ * macro for bits > 31. The only way to use indices > 31 is to
+ * use the new ETHTOOL_GLINKSETTINGS/ETHTOOL_SLINKSETTINGS API.
+ */
+
+ __ETHTOOL_LINK_MODE_LAST
+ = ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT,
+};
+
+#define __ETHTOOL_LINK_MODE_LEGACY_MASK(base_name) \
+ (1UL << (ETHTOOL_LINK_MODE_ ## base_name ## _BIT))
+
+/* DEPRECATED macros. Please migrate to
+ * ETHTOOL_GLINKSETTINGS/ETHTOOL_SLINKSETTINGS API. Please do NOT
+ * define any new SUPPORTED_* macro for bits > 31.
+ */
+#define SUPPORTED_10baseT_Half __ETHTOOL_LINK_MODE_LEGACY_MASK(10baseT_Half)
+#define SUPPORTED_10baseT_Full __ETHTOOL_LINK_MODE_LEGACY_MASK(10baseT_Full)
+#define SUPPORTED_100baseT_Half __ETHTOOL_LINK_MODE_LEGACY_MASK(100baseT_Half)
+#define SUPPORTED_100baseT_Full __ETHTOOL_LINK_MODE_LEGACY_MASK(100baseT_Full)
+#define SUPPORTED_1000baseT_Half __ETHTOOL_LINK_MODE_LEGACY_MASK(1000baseT_Half)
+#define SUPPORTED_1000baseT_Full __ETHTOOL_LINK_MODE_LEGACY_MASK(1000baseT_Full)
+#define SUPPORTED_Autoneg __ETHTOOL_LINK_MODE_LEGACY_MASK(Autoneg)
+#define SUPPORTED_TP __ETHTOOL_LINK_MODE_LEGACY_MASK(TP)
+#define SUPPORTED_AUI __ETHTOOL_LINK_MODE_LEGACY_MASK(AUI)
+#define SUPPORTED_MII __ETHTOOL_LINK_MODE_LEGACY_MASK(MII)
+#define SUPPORTED_FIBRE __ETHTOOL_LINK_MODE_LEGACY_MASK(FIBRE)
+#define SUPPORTED_BNC __ETHTOOL_LINK_MODE_LEGACY_MASK(BNC)
+#define SUPPORTED_10000baseT_Full __ETHTOOL_LINK_MODE_LEGACY_MASK(10000baseT_Full)
+#define SUPPORTED_Pause __ETHTOOL_LINK_MODE_LEGACY_MASK(Pause)
+#define SUPPORTED_Asym_Pause __ETHTOOL_LINK_MODE_LEGACY_MASK(Asym_Pause)
+#define SUPPORTED_2500baseX_Full __ETHTOOL_LINK_MODE_LEGACY_MASK(2500baseX_Full)
+#define SUPPORTED_Backplane __ETHTOOL_LINK_MODE_LEGACY_MASK(Backplane)
+#define SUPPORTED_1000baseKX_Full __ETHTOOL_LINK_MODE_LEGACY_MASK(1000baseKX_Full)
+#define SUPPORTED_10000baseKX4_Full __ETHTOOL_LINK_MODE_LEGACY_MASK(10000baseKX4_Full)
+#define SUPPORTED_10000baseKR_Full __ETHTOOL_LINK_MODE_LEGACY_MASK(10000baseKR_Full)
+#define SUPPORTED_10000baseR_FEC __ETHTOOL_LINK_MODE_LEGACY_MASK(10000baseR_FEC)
+#define SUPPORTED_20000baseMLD2_Full __ETHTOOL_LINK_MODE_LEGACY_MASK(20000baseMLD2_Full)
+#define SUPPORTED_20000baseKR2_Full __ETHTOOL_LINK_MODE_LEGACY_MASK(20000baseKR2_Full)
+#define SUPPORTED_40000baseKR4_Full __ETHTOOL_LINK_MODE_LEGACY_MASK(40000baseKR4_Full)
+#define SUPPORTED_40000baseCR4_Full __ETHTOOL_LINK_MODE_LEGACY_MASK(40000baseCR4_Full)
+#define SUPPORTED_40000baseSR4_Full __ETHTOOL_LINK_MODE_LEGACY_MASK(40000baseSR4_Full)
+#define SUPPORTED_40000baseLR4_Full __ETHTOOL_LINK_MODE_LEGACY_MASK(40000baseLR4_Full)
+#define SUPPORTED_56000baseKR4_Full __ETHTOOL_LINK_MODE_LEGACY_MASK(56000baseKR4_Full)
+#define SUPPORTED_56000baseCR4_Full __ETHTOOL_LINK_MODE_LEGACY_MASK(56000baseCR4_Full)
+#define SUPPORTED_56000baseSR4_Full __ETHTOOL_LINK_MODE_LEGACY_MASK(56000baseSR4_Full)
+#define SUPPORTED_56000baseLR4_Full __ETHTOOL_LINK_MODE_LEGACY_MASK(56000baseLR4_Full)
+/* Please do not define any new SUPPORTED_* macro for bits > 31, see
+ * notice above.
+ */
+
+/*
+ * DEPRECATED macros. Please migrate to
+ * ETHTOOL_GLINKSETTINGS/ETHTOOL_SLINKSETTINGS API. Please do NOT
+ * define any new ADERTISE_* macro for bits > 31.
+ */
+#define ADVERTISED_10baseT_Half __ETHTOOL_LINK_MODE_LEGACY_MASK(10baseT_Half)
+#define ADVERTISED_10baseT_Full __ETHTOOL_LINK_MODE_LEGACY_MASK(10baseT_Full)
+#define ADVERTISED_100baseT_Half __ETHTOOL_LINK_MODE_LEGACY_MASK(100baseT_Half)
+#define ADVERTISED_100baseT_Full __ETHTOOL_LINK_MODE_LEGACY_MASK(100baseT_Full)
+#define ADVERTISED_1000baseT_Half __ETHTOOL_LINK_MODE_LEGACY_MASK(1000baseT_Half)
+#define ADVERTISED_1000baseT_Full __ETHTOOL_LINK_MODE_LEGACY_MASK(1000baseT_Full)
+#define ADVERTISED_Autoneg __ETHTOOL_LINK_MODE_LEGACY_MASK(Autoneg)
+#define ADVERTISED_TP __ETHTOOL_LINK_MODE_LEGACY_MASK(TP)
+#define ADVERTISED_AUI __ETHTOOL_LINK_MODE_LEGACY_MASK(AUI)
+#define ADVERTISED_MII __ETHTOOL_LINK_MODE_LEGACY_MASK(MII)
+#define ADVERTISED_FIBRE __ETHTOOL_LINK_MODE_LEGACY_MASK(FIBRE)
+#define ADVERTISED_BNC __ETHTOOL_LINK_MODE_LEGACY_MASK(BNC)
+#define ADVERTISED_10000baseT_Full __ETHTOOL_LINK_MODE_LEGACY_MASK(10000baseT_Full)
+#define ADVERTISED_Pause __ETHTOOL_LINK_MODE_LEGACY_MASK(Pause)
+#define ADVERTISED_Asym_Pause __ETHTOOL_LINK_MODE_LEGACY_MASK(Asym_Pause)
+#define ADVERTISED_2500baseX_Full __ETHTOOL_LINK_MODE_LEGACY_MASK(2500baseX_Full)
+#define ADVERTISED_Backplane __ETHTOOL_LINK_MODE_LEGACY_MASK(Backplane)
+#define ADVERTISED_1000baseKX_Full __ETHTOOL_LINK_MODE_LEGACY_MASK(1000baseKX_Full)
+#define ADVERTISED_10000baseKX4_Full __ETHTOOL_LINK_MODE_LEGACY_MASK(10000baseKX4_Full)
+#define ADVERTISED_10000baseKR_Full __ETHTOOL_LINK_MODE_LEGACY_MASK(10000baseKR_Full)
+#define ADVERTISED_10000baseR_FEC __ETHTOOL_LINK_MODE_LEGACY_MASK(10000baseR_FEC)
+#define ADVERTISED_20000baseMLD2_Full __ETHTOOL_LINK_MODE_LEGACY_MASK(20000baseMLD2_Full)
+#define ADVERTISED_20000baseKR2_Full __ETHTOOL_LINK_MODE_LEGACY_MASK(20000baseKR2_Full)
+#define ADVERTISED_40000baseKR4_Full __ETHTOOL_LINK_MODE_LEGACY_MASK(40000baseKR4_Full)
+#define ADVERTISED_40000baseCR4_Full __ETHTOOL_LINK_MODE_LEGACY_MASK(40000baseCR4_Full)
+#define ADVERTISED_40000baseSR4_Full __ETHTOOL_LINK_MODE_LEGACY_MASK(40000baseSR4_Full)
+#define ADVERTISED_40000baseLR4_Full __ETHTOOL_LINK_MODE_LEGACY_MASK(40000baseLR4_Full)
+#define ADVERTISED_56000baseKR4_Full __ETHTOOL_LINK_MODE_LEGACY_MASK(56000baseKR4_Full)
+#define ADVERTISED_56000baseCR4_Full __ETHTOOL_LINK_MODE_LEGACY_MASK(56000baseCR4_Full)
+#define ADVERTISED_56000baseSR4_Full __ETHTOOL_LINK_MODE_LEGACY_MASK(56000baseSR4_Full)
+#define ADVERTISED_56000baseLR4_Full __ETHTOOL_LINK_MODE_LEGACY_MASK(56000baseLR4_Full)
+/* Please do not define any new ADVERTISED_* macro for bits > 31, see
+ * notice above.
+ */
/* The following are all involved in forcing a particular link
* mode for the device for setting things. When getting the
@@ -1303,7 +1452,7 @@ enum ethtool_sfeatures_retval_bits {
* it was forced up into this mode or autonegotiated.
*/
-/* The forced speed, 10Mb, 100Mb, gigabit, [2.5|5|10|20|25|40|50|56|100]GbE. */
+/* The forced speed, in units of 1Mb. All values 0 to INT_MAX are legal. */
#define SPEED_10 10
#define SPEED_100 100
#define SPEED_1000 1000
@@ -1319,11 +1468,28 @@ enum ethtool_sfeatures_retval_bits {
#define SPEED_UNKNOWN -1
+static inline int ethtool_validate_speed(__u32 speed)
+{
+ return speed <= INT_MAX || speed == SPEED_UNKNOWN;
+}
+
/* Duplex, half or full. */
#define DUPLEX_HALF 0x00
#define DUPLEX_FULL 0x01
#define DUPLEX_UNKNOWN 0xff
+static inline int ethtool_validate_duplex(__u8 duplex)
+{
+ switch (duplex) {
+ case DUPLEX_HALF:
+ case DUPLEX_FULL:
+ case DUPLEX_UNKNOWN:
+ return 1;
+ }
+
+ return 0;
+}
+
/* Which connector port. */
#define PORT_TP 0x00
#define PORT_AUI 0x01
@@ -1367,15 +1533,17 @@ enum ethtool_sfeatures_retval_bits {
#define UDP_V4_FLOW 0x02 /* hash or spec (udp_ip4_spec) */
#define SCTP_V4_FLOW 0x03 /* hash or spec (sctp_ip4_spec) */
#define AH_ESP_V4_FLOW 0x04 /* hash only */
-#define TCP_V6_FLOW 0x05 /* hash only */
-#define UDP_V6_FLOW 0x06 /* hash only */
-#define SCTP_V6_FLOW 0x07 /* hash only */
+#define TCP_V6_FLOW 0x05 /* hash or spec (tcp_ip6_spec; nfc only) */
+#define UDP_V6_FLOW 0x06 /* hash or spec (udp_ip6_spec; nfc only) */
+#define SCTP_V6_FLOW 0x07 /* hash or spec (sctp_ip6_spec; nfc only) */
#define AH_ESP_V6_FLOW 0x08 /* hash only */
#define AH_V4_FLOW 0x09 /* hash or spec (ah_ip4_spec) */
#define ESP_V4_FLOW 0x0a /* hash or spec (esp_ip4_spec) */
-#define AH_V6_FLOW 0x0b /* hash only */
-#define ESP_V6_FLOW 0x0c /* hash only */
-#define IP_USER_FLOW 0x0d /* spec only (usr_ip4_spec) */
+#define AH_V6_FLOW 0x0b /* hash or spec (ah_ip6_spec; nfc only) */
+#define ESP_V6_FLOW 0x0c /* hash or spec (esp_ip6_spec; nfc only) */
+#define IPV4_USER_FLOW 0x0d /* spec only (usr_ip4_spec) */
+#define IP_USER_FLOW IPV4_USER_FLOW
+#define IPV6_USER_FLOW 0x0e /* spec only (usr_ip6_spec; nfc only) */
#define IPV4_FLOW 0x10 /* hash only */
#define IPV6_FLOW 0x11 /* hash only */
#define ETHER_FLOW 0x12 /* spec only (ether_spec) */
@@ -1441,4 +1609,123 @@ enum ethtool_reset_flags {
};
#define ETH_RESET_SHARED_SHIFT 16
+
+/**
+ * struct ethtool_link_settings - link control and status
+ *
+ * IMPORTANT, Backward compatibility notice: When implementing new
+ * user-space tools, please first try %ETHTOOL_GLINKSETTINGS, and
+ * if it succeeds use %ETHTOOL_SLINKSETTINGS to change link
+ * settings; do not use %ETHTOOL_SSET if %ETHTOOL_GLINKSETTINGS
+ * succeeded: stick to %ETHTOOL_GLINKSETTINGS/%SLINKSETTINGS in
+ * that case. Conversely, if %ETHTOOL_GLINKSETTINGS fails, use
+ * %ETHTOOL_GSET to query and %ETHTOOL_SSET to change link
+ * settings; do not use %ETHTOOL_SLINKSETTINGS if
+ * %ETHTOOL_GLINKSETTINGS failed: stick to
+ * %ETHTOOL_GSET/%ETHTOOL_SSET in that case.
+ *
+ * @cmd: Command number = %ETHTOOL_GLINKSETTINGS or %ETHTOOL_SLINKSETTINGS
+ * @speed: Link speed (Mbps)
+ * @duplex: Duplex mode; one of %DUPLEX_*
+ * @port: Physical connector type; one of %PORT_*
+ * @phy_address: MDIO address of PHY (transceiver); 0 or 255 if not
+ * applicable. For clause 45 PHYs this is the PRTAD.
+ * @autoneg: Enable/disable autonegotiation and auto-detection;
+ * either %AUTONEG_DISABLE or %AUTONEG_ENABLE
+ * @mdio_support: Bitmask of %ETH_MDIO_SUPPORTS_* flags for the MDIO
+ * protocols supported by the interface; 0 if unknown.
+ * Read-only.
+ * @eth_tp_mdix: Ethernet twisted-pair MDI(-X) status; one of
+ * %ETH_TP_MDI_*. If the status is unknown or not applicable, the
+ * value will be %ETH_TP_MDI_INVALID. Read-only.
+ * @eth_tp_mdix_ctrl: Ethernet twisted pair MDI(-X) control; one of
+ * %ETH_TP_MDI_*. If MDI(-X) control is not implemented, reads
+ * yield %ETH_TP_MDI_INVALID and writes may be ignored or rejected.
+ * When written successfully, the link should be renegotiated if
+ * necessary.
+ * @link_mode_masks_nwords: Number of 32-bit words for each of the
+ * supported, advertising, lp_advertising link mode bitmaps. For
+ * %ETHTOOL_GLINKSETTINGS: on entry, number of words passed by user
+ * (>= 0); on return, if handshake in progress, negative if
+ * request size unsupported by kernel: absolute value indicates
+ * kernel expected size and all the other fields but cmd
+ * are 0; otherwise (handshake completed), strictly positive
+ * to indicate size used by kernel and cmd field stays
+ * %ETHTOOL_GLINKSETTINGS, all other fields populated by driver. For
+ * %ETHTOOL_SLINKSETTINGS: must be valid on entry, ie. a positive
+ * value returned previously by %ETHTOOL_GLINKSETTINGS, otherwise
+ * refused. For drivers: ignore this field (use kernel's
+ * __ETHTOOL_LINK_MODE_MASK_NBITS instead), any change to it will
+ * be overwritten by kernel.
+ * @supported: Bitmap with each bit meaning given by
+ * %ethtool_link_mode_bit_indices for the link modes, physical
+ * connectors and other link features for which the interface
+ * supports autonegotiation or auto-detection. Read-only.
+ * @advertising: Bitmap with each bit meaning given by
+ * %ethtool_link_mode_bit_indices for the link modes, physical
+ * connectors and other link features that are advertised through
+ * autonegotiation or enabled for auto-detection.
+ * @lp_advertising: Bitmap with each bit meaning given by
+ * %ethtool_link_mode_bit_indices for the link modes, and other
+ * link features that the link partner advertised through
+ * autonegotiation; 0 if unknown or not applicable. Read-only.
+ *
+ * If autonegotiation is disabled, the speed and @duplex represent the
+ * fixed link mode and are writable if the driver supports multiple
+ * link modes. If it is enabled then they are read-only; if the link
+ * is up they represent the negotiated link mode; if the link is down,
+ * the speed is 0, %SPEED_UNKNOWN or the highest enabled speed and
+ * @duplex is %DUPLEX_UNKNOWN or the best enabled duplex mode.
+ *
+ * Some hardware interfaces may have multiple PHYs and/or physical
+ * connectors fitted or do not allow the driver to detect which are
+ * fitted. For these interfaces @port and/or @phy_address may be
+ * writable, possibly dependent on @autoneg being %AUTONEG_DISABLE.
+ * Otherwise, attempts to write different values may be ignored or
+ * rejected.
+ *
+ * Deprecated %ethtool_cmd fields transceiver, maxtxpkt and maxrxpkt
+ * are not available in %ethtool_link_settings. Until all drivers are
+ * converted to ignore them or to the new %ethtool_link_settings API,
+ * for both queries and changes, users should always try
+ * %ETHTOOL_GLINKSETTINGS first, and if it fails with -ENOTSUPP stick
+ * only to %ETHTOOL_GSET and %ETHTOOL_SSET consistently. If it
+ * succeeds, then users should stick to %ETHTOOL_GLINKSETTINGS and
+ * %ETHTOOL_SLINKSETTINGS (which would support drivers implementing
+ * either %ethtool_cmd or %ethtool_link_settings).
+ *
+ * Users should assume that all fields not marked read-only are
+ * writable and subject to validation by the driver. They should use
+ * %ETHTOOL_GLINKSETTINGS to get the current values before making specific
+ * changes and then applying them with %ETHTOOL_SLINKSETTINGS.
+ *
+ * Drivers that implement %get_link_ksettings and/or
+ * %set_link_ksettings should ignore the @cmd
+ * and @link_mode_masks_nwords fields (any change to them overwritten
+ * by kernel), and rely only on kernel's internal
+ * %__ETHTOOL_LINK_MODE_MASK_NBITS and
+ * %ethtool_link_mode_mask_t. Drivers that implement
+ * %set_link_ksettings() should validate all fields other than @cmd
+ * and @link_mode_masks_nwords that are not described as read-only or
+ * deprecated, and must ignore all fields described as read-only.
+ */
+struct ethtool_link_settings {
+ __u32 cmd;
+ __u32 speed;
+ __u8 duplex;
+ __u8 port;
+ __u8 phy_address;
+ __u8 autoneg;
+ __u8 mdio_support;
+ __u8 eth_tp_mdix;
+ __u8 eth_tp_mdix_ctrl;
+ __s8 link_mode_masks_nwords;
+ __u32 reserved[8];
+ __u32 link_mode_masks[0];
+ /* layout of link_mode_masks fields:
+ * __u32 map_supported[link_mode_masks_nwords];
+ * __u32 map_advertising[link_mode_masks_nwords];
+ * __u32 map_lp_advertising[link_mode_masks_nwords];
+ */
+};
#endif /* _UAPI_LINUX_ETHTOOL_H */
diff --git a/include/uapi/linux/fs.h b/include/uapi/linux/fs.h
index 149bec83a907..a079d50376e1 100644
--- a/include/uapi/linux/fs.h
+++ b/include/uapi/linux/fs.h
@@ -247,6 +247,24 @@ struct fsxattr {
#define FS_IOC_FSSETXATTR _IOW ('X', 32, struct fsxattr)
/*
+ * File system encryption support
+ */
+/* Policy provided via an ioctl on the topmost directory */
+#define FS_KEY_DESCRIPTOR_SIZE 8
+
+struct fscrypt_policy {
+ __u8 version;
+ __u8 contents_encryption_mode;
+ __u8 filenames_encryption_mode;
+ __u8 flags;
+ __u8 master_key_descriptor[FS_KEY_DESCRIPTOR_SIZE];
+} __packed;
+
+#define FS_IOC_SET_ENCRYPTION_POLICY _IOR('f', 19, struct fscrypt_policy)
+#define FS_IOC_GET_ENCRYPTION_PWSALT _IOW('f', 20, __u8[16])
+#define FS_IOC_GET_ENCRYPTION_POLICY _IOW('f', 21, struct fscrypt_policy)
+
+/*
* Inode flags (FS_IOC_GETFLAGS / FS_IOC_SETFLAGS)
*
* Note: for historical reasons, these flags were originally used and
@@ -304,4 +322,7 @@ struct fsxattr {
#define SYNC_FILE_RANGE_WRITE 2
#define SYNC_FILE_RANGE_WAIT_AFTER 4
+/* flags for preadv2/pwritev2: */
+#define RWF_HIPRI 0x00000001 /* high priority request, poll if possible */
+
#endif /* _UAPI_LINUX_FS_H */
diff --git a/include/uapi/linux/genetlink.h b/include/uapi/linux/genetlink.h
index c3363ba1ae05..5512c90af7e3 100644
--- a/include/uapi/linux/genetlink.h
+++ b/include/uapi/linux/genetlink.h
@@ -21,6 +21,7 @@ struct genlmsghdr {
#define GENL_CMD_CAP_DO 0x02
#define GENL_CMD_CAP_DUMP 0x04
#define GENL_CMD_CAP_HASPOL 0x08
+#define GENL_UNS_ADMIN_PERM 0x10
/*
* List of reserved static generic netlink identifiers:
diff --git a/include/uapi/linux/if.h b/include/uapi/linux/if.h
index 9cf2394f0bcf..f80277569f24 100644
--- a/include/uapi/linux/if.h
+++ b/include/uapi/linux/if.h
@@ -37,7 +37,7 @@
* are shared for all types of net_devices. The sysfs entries are available
* via /sys/class/net/<dev>/flags. Flags which can be toggled through sysfs
* are annotated below, note that only a few flags can be toggled and some
- * other flags are always always preserved from the original net_device flags
+ * other flags are always preserved from the original net_device flags
* even if you try to set them via sysfs. Flags which are always preserved
* are kept under the flag grouping @IFF_VOLATILE. Flags which are volatile
* are annotated below as such.
diff --git a/include/uapi/linux/if_bridge.h b/include/uapi/linux/if_bridge.h
index 18db14477bdd..0536eefff9bf 100644
--- a/include/uapi/linux/if_bridge.h
+++ b/include/uapi/linux/if_bridge.h
@@ -137,11 +137,17 @@ struct bridge_vlan_info {
/* Bridge multicast database attributes
* [MDBA_MDB] = {
* [MDBA_MDB_ENTRY] = {
- * [MDBA_MDB_ENTRY_INFO]
+ * [MDBA_MDB_ENTRY_INFO] {
+ * struct br_mdb_entry
+ * [MDBA_MDB_EATTR attributes]
+ * }
* }
* }
* [MDBA_ROUTER] = {
- * [MDBA_ROUTER_PORT]
+ * [MDBA_ROUTER_PORT] = {
+ * u32 ifindex
+ * [MDBA_ROUTER_PATTR attributes]
+ * }
* }
*/
enum {
@@ -166,6 +172,22 @@ enum {
};
#define MDBA_MDB_ENTRY_MAX (__MDBA_MDB_ENTRY_MAX - 1)
+/* per mdb entry additional attributes */
+enum {
+ MDBA_MDB_EATTR_UNSPEC,
+ MDBA_MDB_EATTR_TIMER,
+ __MDBA_MDB_EATTR_MAX
+};
+#define MDBA_MDB_EATTR_MAX (__MDBA_MDB_EATTR_MAX - 1)
+
+/* multicast router types */
+enum {
+ MDB_RTR_TYPE_DISABLED,
+ MDB_RTR_TYPE_TEMP_QUERY,
+ MDB_RTR_TYPE_PERM,
+ MDB_RTR_TYPE_TEMP
+};
+
enum {
MDBA_ROUTER_UNSPEC,
MDBA_ROUTER_PORT,
@@ -173,6 +195,15 @@ enum {
};
#define MDBA_ROUTER_MAX (__MDBA_ROUTER_MAX - 1)
+/* router port attributes */
+enum {
+ MDBA_ROUTER_PATTR_UNSPEC,
+ MDBA_ROUTER_PATTR_TIMER,
+ MDBA_ROUTER_PATTR_TYPE,
+ __MDBA_ROUTER_PATTR_MAX
+};
+#define MDBA_ROUTER_PATTR_MAX (__MDBA_ROUTER_PATTR_MAX - 1)
+
struct br_port_msg {
__u8 family;
__u32 ifindex;
@@ -183,6 +214,8 @@ struct br_mdb_entry {
#define MDB_TEMPORARY 0
#define MDB_PERMANENT 1
__u8 state;
+#define MDB_FLAGS_OFFLOAD (1 << 0)
+ __u8 flags;
__u16 vid;
struct {
union {
diff --git a/include/uapi/linux/if_ether.h b/include/uapi/linux/if_ether.h
index ea9221b0331a..4a93051c578c 100644
--- a/include/uapi/linux/if_ether.h
+++ b/include/uapi/linux/if_ether.h
@@ -83,6 +83,7 @@
#define ETH_P_8021AD 0x88A8 /* 802.1ad Service VLAN */
#define ETH_P_802_EX1 0x88B5 /* 802.1 Local Experimental 1. */
#define ETH_P_TIPC 0x88CA /* TIPC */
+#define ETH_P_MACSEC 0x88E5 /* 802.1ae MACsec */
#define ETH_P_8021AH 0x88E7 /* 802.1ah Backbone Service Tag */
#define ETH_P_MVRP 0x88F5 /* 802.1Q MVRP */
#define ETH_P_1588 0x88F7 /* IEEE 1588 Timesync */
diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h
index a30b78090594..c488066fb53a 100644
--- a/include/uapi/linux/if_link.h
+++ b/include/uapi/linux/if_link.h
@@ -35,6 +35,8 @@ struct rtnl_link_stats {
/* for cslip etc */
__u32 rx_compressed;
__u32 tx_compressed;
+
+ __u32 rx_nohandler; /* dropped, no handler found */
};
/* The main device statistics structure */
@@ -68,6 +70,8 @@ struct rtnl_link_stats64 {
/* for cslip etc */
__u64 rx_compressed;
__u64 tx_compressed;
+
+ __u64 rx_nohandler; /* dropped, no handler found */
};
/* The struct should be in sync with struct ifmap */
@@ -149,6 +153,8 @@ enum {
IFLA_LINK_NETNSID,
IFLA_PHYS_PORT_NAME,
IFLA_PROTO_DOWN,
+ IFLA_GSO_MAX_SEGS,
+ IFLA_GSO_MAX_SIZE,
__IFLA_MAX
};
@@ -401,6 +407,43 @@ enum {
#define IFLA_VRF_MAX (__IFLA_VRF_MAX - 1)
+enum {
+ IFLA_VRF_PORT_UNSPEC,
+ IFLA_VRF_PORT_TABLE,
+ __IFLA_VRF_PORT_MAX
+};
+
+#define IFLA_VRF_PORT_MAX (__IFLA_VRF_PORT_MAX - 1)
+
+/* MACSEC section */
+enum {
+ IFLA_MACSEC_UNSPEC,
+ IFLA_MACSEC_SCI,
+ IFLA_MACSEC_PORT,
+ IFLA_MACSEC_ICV_LEN,
+ IFLA_MACSEC_CIPHER_SUITE,
+ IFLA_MACSEC_WINDOW,
+ IFLA_MACSEC_ENCODING_SA,
+ IFLA_MACSEC_ENCRYPT,
+ IFLA_MACSEC_PROTECT,
+ IFLA_MACSEC_INC_SCI,
+ IFLA_MACSEC_ES,
+ IFLA_MACSEC_SCB,
+ IFLA_MACSEC_REPLAY_PROTECT,
+ IFLA_MACSEC_VALIDATION,
+ __IFLA_MACSEC_MAX,
+};
+
+#define IFLA_MACSEC_MAX (__IFLA_MACSEC_MAX - 1)
+
+enum macsec_validation_type {
+ MACSEC_VALIDATE_DISABLED = 0,
+ MACSEC_VALIDATE_CHECK = 1,
+ MACSEC_VALIDATE_STRICT = 2,
+ __MACSEC_VALIDATE_END,
+ MACSEC_VALIDATE_MAX = __MACSEC_VALIDATE_END - 1,
+};
+
/* IPVLAN section */
enum {
IFLA_IPVLAN_UNSPEC,
@@ -444,6 +487,7 @@ enum {
IFLA_VXLAN_GBP,
IFLA_VXLAN_REMCSUM_NOPARTIAL,
IFLA_VXLAN_COLLECT_METADATA,
+ IFLA_VXLAN_LABEL,
__IFLA_VXLAN_MAX
};
#define IFLA_VXLAN_MAX (__IFLA_VXLAN_MAX - 1)
@@ -466,6 +510,7 @@ enum {
IFLA_GENEVE_UDP_CSUM,
IFLA_GENEVE_UDP_ZERO_CSUM6_TX,
IFLA_GENEVE_UDP_ZERO_CSUM6_RX,
+ IFLA_GENEVE_LABEL,
__IFLA_GENEVE_MAX
};
#define IFLA_GENEVE_MAX (__IFLA_GENEVE_MAX - 1)
@@ -556,6 +601,8 @@ enum {
*/
IFLA_VF_STATS, /* network device statistics */
IFLA_VF_TRUST, /* Trust VF */
+ IFLA_VF_IB_NODE_GUID, /* VF Infiniband node GUID */
+ IFLA_VF_IB_PORT_GUID, /* VF Infiniband port GUID */
__IFLA_VF_MAX,
};
@@ -588,6 +635,11 @@ struct ifla_vf_spoofchk {
__u32 setting;
};
+struct ifla_vf_guid {
+ __u32 vf;
+ __u64 guid;
+};
+
enum {
IFLA_VF_LINK_STATE_AUTO, /* link state of the uplink */
IFLA_VF_LINK_STATE_ENABLE, /* link always up */
diff --git a/include/uapi/linux/if_macsec.h b/include/uapi/linux/if_macsec.h
new file mode 100644
index 000000000000..26b0d1e3e3e7
--- /dev/null
+++ b/include/uapi/linux/if_macsec.h
@@ -0,0 +1,161 @@
+/*
+ * include/uapi/linux/if_macsec.h - MACsec device
+ *
+ * Copyright (c) 2015 Sabrina Dubroca <sd@queasysnail.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _UAPI_MACSEC_H
+#define _UAPI_MACSEC_H
+
+#include <linux/types.h>
+
+#define MACSEC_GENL_NAME "macsec"
+#define MACSEC_GENL_VERSION 1
+
+#define MACSEC_MAX_KEY_LEN 128
+
+#define DEFAULT_CIPHER_ID 0x0080020001000001ULL
+#define DEFAULT_CIPHER_ALT 0x0080C20001000001ULL
+
+#define MACSEC_MIN_ICV_LEN 8
+#define MACSEC_MAX_ICV_LEN 32
+
+enum macsec_attrs {
+ MACSEC_ATTR_UNSPEC,
+ MACSEC_ATTR_IFINDEX, /* u32, ifindex of the MACsec netdevice */
+ MACSEC_ATTR_RXSC_CONFIG, /* config, nested macsec_rxsc_attrs */
+ MACSEC_ATTR_SA_CONFIG, /* config, nested macsec_sa_attrs */
+ MACSEC_ATTR_SECY, /* dump, nested macsec_secy_attrs */
+ MACSEC_ATTR_TXSA_LIST, /* dump, nested, macsec_sa_attrs for each TXSA */
+ MACSEC_ATTR_RXSC_LIST, /* dump, nested, macsec_rxsc_attrs for each RXSC */
+ MACSEC_ATTR_TXSC_STATS, /* dump, nested, macsec_txsc_stats_attr */
+ MACSEC_ATTR_SECY_STATS, /* dump, nested, macsec_secy_stats_attr */
+ __MACSEC_ATTR_END,
+ NUM_MACSEC_ATTR = __MACSEC_ATTR_END,
+ MACSEC_ATTR_MAX = __MACSEC_ATTR_END - 1,
+};
+
+enum macsec_secy_attrs {
+ MACSEC_SECY_ATTR_UNSPEC,
+ MACSEC_SECY_ATTR_SCI,
+ MACSEC_SECY_ATTR_ENCODING_SA,
+ MACSEC_SECY_ATTR_WINDOW,
+ MACSEC_SECY_ATTR_CIPHER_SUITE,
+ MACSEC_SECY_ATTR_ICV_LEN,
+ MACSEC_SECY_ATTR_PROTECT,
+ MACSEC_SECY_ATTR_REPLAY,
+ MACSEC_SECY_ATTR_OPER,
+ MACSEC_SECY_ATTR_VALIDATE,
+ MACSEC_SECY_ATTR_ENCRYPT,
+ MACSEC_SECY_ATTR_INC_SCI,
+ MACSEC_SECY_ATTR_ES,
+ MACSEC_SECY_ATTR_SCB,
+ __MACSEC_SECY_ATTR_END,
+ NUM_MACSEC_SECY_ATTR = __MACSEC_SECY_ATTR_END,
+ MACSEC_SECY_ATTR_MAX = __MACSEC_SECY_ATTR_END - 1,
+};
+
+enum macsec_rxsc_attrs {
+ MACSEC_RXSC_ATTR_UNSPEC,
+ MACSEC_RXSC_ATTR_SCI, /* config/dump, u64 */
+ MACSEC_RXSC_ATTR_ACTIVE, /* config/dump, u8 0..1 */
+ MACSEC_RXSC_ATTR_SA_LIST, /* dump, nested */
+ MACSEC_RXSC_ATTR_STATS, /* dump, nested, macsec_rxsc_stats_attr */
+ __MACSEC_RXSC_ATTR_END,
+ NUM_MACSEC_RXSC_ATTR = __MACSEC_RXSC_ATTR_END,
+ MACSEC_RXSC_ATTR_MAX = __MACSEC_RXSC_ATTR_END - 1,
+};
+
+enum macsec_sa_attrs {
+ MACSEC_SA_ATTR_UNSPEC,
+ MACSEC_SA_ATTR_AN, /* config/dump, u8 0..3 */
+ MACSEC_SA_ATTR_ACTIVE, /* config/dump, u8 0..1 */
+ MACSEC_SA_ATTR_PN, /* config/dump, u32 */
+ MACSEC_SA_ATTR_KEY, /* config, data */
+ MACSEC_SA_ATTR_KEYID, /* config/dump, u64 */
+ MACSEC_SA_ATTR_STATS, /* dump, nested, macsec_sa_stats_attr */
+ __MACSEC_SA_ATTR_END,
+ NUM_MACSEC_SA_ATTR = __MACSEC_SA_ATTR_END,
+ MACSEC_SA_ATTR_MAX = __MACSEC_SA_ATTR_END - 1,
+};
+
+enum macsec_nl_commands {
+ MACSEC_CMD_GET_TXSC,
+ MACSEC_CMD_ADD_RXSC,
+ MACSEC_CMD_DEL_RXSC,
+ MACSEC_CMD_UPD_RXSC,
+ MACSEC_CMD_ADD_TXSA,
+ MACSEC_CMD_DEL_TXSA,
+ MACSEC_CMD_UPD_TXSA,
+ MACSEC_CMD_ADD_RXSA,
+ MACSEC_CMD_DEL_RXSA,
+ MACSEC_CMD_UPD_RXSA,
+};
+
+/* u64 per-RXSC stats */
+enum macsec_rxsc_stats_attr {
+ MACSEC_RXSC_STATS_ATTR_UNSPEC,
+ MACSEC_RXSC_STATS_ATTR_IN_OCTETS_VALIDATED,
+ MACSEC_RXSC_STATS_ATTR_IN_OCTETS_DECRYPTED,
+ MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNCHECKED,
+ MACSEC_RXSC_STATS_ATTR_IN_PKTS_DELAYED,
+ MACSEC_RXSC_STATS_ATTR_IN_PKTS_OK,
+ MACSEC_RXSC_STATS_ATTR_IN_PKTS_INVALID,
+ MACSEC_RXSC_STATS_ATTR_IN_PKTS_LATE,
+ MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_VALID,
+ MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_USING_SA,
+ MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNUSED_SA,
+ __MACSEC_RXSC_STATS_ATTR_END,
+ NUM_MACSEC_RXSC_STATS_ATTR = __MACSEC_RXSC_STATS_ATTR_END,
+ MACSEC_RXSC_STATS_ATTR_MAX = __MACSEC_RXSC_STATS_ATTR_END - 1,
+};
+
+/* u32 per-{RX,TX}SA stats */
+enum macsec_sa_stats_attr {
+ MACSEC_SA_STATS_ATTR_UNSPEC,
+ MACSEC_SA_STATS_ATTR_IN_PKTS_OK,
+ MACSEC_SA_STATS_ATTR_IN_PKTS_INVALID,
+ MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_VALID,
+ MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_USING_SA,
+ MACSEC_SA_STATS_ATTR_IN_PKTS_UNUSED_SA,
+ MACSEC_SA_STATS_ATTR_OUT_PKTS_PROTECTED,
+ MACSEC_SA_STATS_ATTR_OUT_PKTS_ENCRYPTED,
+ __MACSEC_SA_STATS_ATTR_END,
+ NUM_MACSEC_SA_STATS_ATTR = __MACSEC_SA_STATS_ATTR_END,
+ MACSEC_SA_STATS_ATTR_MAX = __MACSEC_SA_STATS_ATTR_END - 1,
+};
+
+/* u64 per-TXSC stats */
+enum macsec_txsc_stats_attr {
+ MACSEC_TXSC_STATS_ATTR_UNSPEC,
+ MACSEC_TXSC_STATS_ATTR_OUT_PKTS_PROTECTED,
+ MACSEC_TXSC_STATS_ATTR_OUT_PKTS_ENCRYPTED,
+ MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_PROTECTED,
+ MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_ENCRYPTED,
+ __MACSEC_TXSC_STATS_ATTR_END,
+ NUM_MACSEC_TXSC_STATS_ATTR = __MACSEC_TXSC_STATS_ATTR_END,
+ MACSEC_TXSC_STATS_ATTR_MAX = __MACSEC_TXSC_STATS_ATTR_END - 1,
+};
+
+/* u64 per-SecY stats */
+enum macsec_secy_stats_attr {
+ MACSEC_SECY_STATS_ATTR_UNSPEC,
+ MACSEC_SECY_STATS_ATTR_OUT_PKTS_UNTAGGED,
+ MACSEC_SECY_STATS_ATTR_IN_PKTS_UNTAGGED,
+ MACSEC_SECY_STATS_ATTR_OUT_PKTS_TOO_LONG,
+ MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_TAG,
+ MACSEC_SECY_STATS_ATTR_IN_PKTS_BAD_TAG,
+ MACSEC_SECY_STATS_ATTR_IN_PKTS_UNKNOWN_SCI,
+ MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_SCI,
+ MACSEC_SECY_STATS_ATTR_IN_PKTS_OVERRUN,
+ __MACSEC_SECY_STATS_ATTR_END,
+ NUM_MACSEC_SECY_STATS_ATTR = __MACSEC_SECY_STATS_ATTR_END,
+ MACSEC_SECY_STATS_ATTR_MAX = __MACSEC_SECY_STATS_ATTR_END - 1,
+};
+
+#endif /* _UAPI_MACSEC_H */
diff --git a/include/uapi/linux/ip.h b/include/uapi/linux/ip.h
index 08f894d2ddbd..f291569768dd 100644
--- a/include/uapi/linux/ip.h
+++ b/include/uapi/linux/ip.h
@@ -165,6 +165,8 @@ enum
IPV4_DEVCONF_IGMPV2_UNSOLICITED_REPORT_INTERVAL,
IPV4_DEVCONF_IGMPV3_UNSOLICITED_REPORT_INTERVAL,
IPV4_DEVCONF_IGNORE_ROUTES_WITH_LINKDOWN,
+ IPV4_DEVCONF_DROP_UNICAST_IN_L2_MULTICAST,
+ IPV4_DEVCONF_DROP_GRATUITOUS_ARP,
__IPV4_DEVCONF_MAX
};
diff --git a/include/uapi/linux/ipv6.h b/include/uapi/linux/ipv6.h
index 38b4fef20219..395876060f50 100644
--- a/include/uapi/linux/ipv6.h
+++ b/include/uapi/linux/ipv6.h
@@ -174,6 +174,9 @@ enum {
DEVCONF_USE_OIF_ADDRS_ONLY,
DEVCONF_ACCEPT_RA_MIN_HOP_LIMIT,
DEVCONF_IGNORE_ROUTES_WITH_LINKDOWN,
+ DEVCONF_DROP_UNICAST_IN_L2_MULTICAST,
+ DEVCONF_DROP_UNSOLICITED_NA,
+ DEVCONF_KEEP_ADDR_ON_DOWN,
DEVCONF_MAX
};
diff --git a/include/uapi/linux/kcm.h b/include/uapi/linux/kcm.h
new file mode 100644
index 000000000000..a5a530940b99
--- /dev/null
+++ b/include/uapi/linux/kcm.h
@@ -0,0 +1,40 @@
+/*
+ * Kernel Connection Multiplexor
+ *
+ * Copyright (c) 2016 Tom Herbert <tom@herbertland.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * User API to clone KCM sockets and attach transport socket to a KCM
+ * multiplexor.
+ */
+
+#ifndef KCM_KERNEL_H
+#define KCM_KERNEL_H
+
+struct kcm_attach {
+ int fd;
+ int bpf_fd;
+};
+
+struct kcm_unattach {
+ int fd;
+};
+
+struct kcm_clone {
+ int fd;
+};
+
+#define SIOCKCMATTACH (SIOCPROTOPRIVATE + 0)
+#define SIOCKCMUNATTACH (SIOCPROTOPRIVATE + 1)
+#define SIOCKCMCLONE (SIOCPROTOPRIVATE + 2)
+
+#define KCMPROTO_CONNECTED 0
+
+/* Socket options */
+#define KCM_RECV_DISABLE 1
+
+#endif
+
diff --git a/include/uapi/linux/kcov.h b/include/uapi/linux/kcov.h
new file mode 100644
index 000000000000..574e22ec640d
--- /dev/null
+++ b/include/uapi/linux/kcov.h
@@ -0,0 +1,10 @@
+#ifndef _LINUX_KCOV_IOCTLS_H
+#define _LINUX_KCOV_IOCTLS_H
+
+#include <linux/types.h>
+
+#define KCOV_INIT_TRACE _IOR('c', 1, unsigned long)
+#define KCOV_ENABLE _IO('c', 100)
+#define KCOV_DISABLE _IO('c', 101)
+
+#endif /* _LINUX_KCOV_IOCTLS_H */
diff --git a/include/uapi/linux/kernel.h b/include/uapi/linux/kernel.h
index 321e399457f5..466073f0ce46 100644
--- a/include/uapi/linux/kernel.h
+++ b/include/uapi/linux/kernel.h
@@ -9,5 +9,6 @@
#define __ALIGN_KERNEL(x, a) __ALIGN_KERNEL_MASK(x, (typeof(x))(a) - 1)
#define __ALIGN_KERNEL_MASK(x, mask) (((x) + (mask)) & ~(mask))
+#define __KERNEL_DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
#endif /* _UAPI_LINUX_KERNEL_H */
diff --git a/include/uapi/linux/mroute6.h b/include/uapi/linux/mroute6.h
index ce91215cf7e6..5062fb5751e1 100644
--- a/include/uapi/linux/mroute6.h
+++ b/include/uapi/linux/mroute6.h
@@ -1,6 +1,7 @@
#ifndef _UAPI__LINUX_MROUTE6_H
#define _UAPI__LINUX_MROUTE6_H
+#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/sockios.h>
@@ -46,14 +47,8 @@ typedef unsigned short mifi_t;
typedef __u32 if_mask;
#define NIFBITS (sizeof(if_mask) * 8) /* bits per mask */
-#if !defined(__KERNEL__)
-#if !defined(DIV_ROUND_UP)
-#define DIV_ROUND_UP(x,y) (((x) + ((y) - 1)) / (y))
-#endif
-#endif
-
typedef struct if_set {
- if_mask ifs_bits[DIV_ROUND_UP(IF_SETSIZE, NIFBITS)];
+ if_mask ifs_bits[__KERNEL_DIV_ROUND_UP(IF_SETSIZE, NIFBITS)];
} if_set;
#define IF_SET(n, p) ((p)->ifs_bits[(n)/NIFBITS] |= (1 << ((n) % NIFBITS)))
diff --git a/include/uapi/linux/netconf.h b/include/uapi/linux/netconf.h
index 23cbd34e4ac7..45dfad509c4d 100644
--- a/include/uapi/linux/netconf.h
+++ b/include/uapi/linux/netconf.h
@@ -19,6 +19,7 @@ enum {
__NETCONFA_MAX
};
#define NETCONFA_MAX (__NETCONFA_MAX - 1)
+#define NETCONFA_ALL -1
#define NETCONFA_IFINDEX_ALL -1
#define NETCONFA_IFINDEX_DEFAULT -2
diff --git a/include/uapi/linux/netfilter/nf_conntrack_common.h b/include/uapi/linux/netfilter/nf_conntrack_common.h
index 319f47128db8..6d074d14ee27 100644
--- a/include/uapi/linux/netfilter/nf_conntrack_common.h
+++ b/include/uapi/linux/netfilter/nf_conntrack_common.h
@@ -20,9 +20,15 @@ enum ip_conntrack_info {
IP_CT_ESTABLISHED_REPLY = IP_CT_ESTABLISHED + IP_CT_IS_REPLY,
IP_CT_RELATED_REPLY = IP_CT_RELATED + IP_CT_IS_REPLY,
- IP_CT_NEW_REPLY = IP_CT_NEW + IP_CT_IS_REPLY,
- /* Number of distinct IP_CT types (no NEW in reply dirn). */
- IP_CT_NUMBER = IP_CT_IS_REPLY * 2 - 1
+ /* No NEW in reply direction. */
+
+ /* Number of distinct IP_CT types. */
+ IP_CT_NUMBER,
+
+ /* only for userspace compatibility */
+#ifndef __KERNEL__
+ IP_CT_NEW_REPLY = IP_CT_NUMBER,
+#endif
};
#define NF_CT_STATE_INVALID_BIT (1 << 0)
diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h
index be41ffc128b8..eeffde196f80 100644
--- a/include/uapi/linux/netfilter/nf_tables.h
+++ b/include/uapi/linux/netfilter/nf_tables.h
@@ -681,6 +681,7 @@ enum nft_exthdr_attributes {
* @NFT_META_IIFGROUP: packet input interface group
* @NFT_META_OIFGROUP: packet output interface group
* @NFT_META_CGROUP: socket control group (skb->sk->sk_classid)
+ * @NFT_META_PRANDOM: a 32bit pseudo-random number
*/
enum nft_meta_keys {
NFT_META_LEN,
@@ -707,6 +708,7 @@ enum nft_meta_keys {
NFT_META_IIFGROUP,
NFT_META_OIFGROUP,
NFT_META_CGROUP,
+ NFT_META_PRANDOM,
};
/**
@@ -949,10 +951,14 @@ enum nft_nat_attributes {
* enum nft_masq_attributes - nf_tables masquerade expression attributes
*
* @NFTA_MASQ_FLAGS: NAT flags (see NF_NAT_RANGE_* in linux/netfilter/nf_nat.h) (NLA_U32)
+ * @NFTA_MASQ_REG_PROTO_MIN: source register of proto range start (NLA_U32: nft_registers)
+ * @NFTA_MASQ_REG_PROTO_MAX: source register of proto range end (NLA_U32: nft_registers)
*/
enum nft_masq_attributes {
NFTA_MASQ_UNSPEC,
NFTA_MASQ_FLAGS,
+ NFTA_MASQ_REG_PROTO_MIN,
+ NFTA_MASQ_REG_PROTO_MAX,
__NFTA_MASQ_MAX
};
#define NFTA_MASQ_MAX (__NFTA_MASQ_MAX - 1)
diff --git a/include/uapi/linux/netlink.h b/include/uapi/linux/netlink.h
index f095155d8749..0dba4e4ed2be 100644
--- a/include/uapi/linux/netlink.h
+++ b/include/uapi/linux/netlink.h
@@ -107,8 +107,10 @@ struct nlmsgerr {
#define NETLINK_PKTINFO 3
#define NETLINK_BROADCAST_ERROR 4
#define NETLINK_NO_ENOBUFS 5
+#ifndef __KERNEL__
#define NETLINK_RX_RING 6
#define NETLINK_TX_RING 7
+#endif
#define NETLINK_LISTEN_ALL_NSID 8
#define NETLINK_LIST_MEMBERSHIPS 9
#define NETLINK_CAP_ACK 10
@@ -134,6 +136,7 @@ struct nl_mmap_hdr {
__u32 nm_gid;
};
+#ifndef __KERNEL__
enum nl_mmap_status {
NL_MMAP_STATUS_UNUSED,
NL_MMAP_STATUS_RESERVED,
@@ -145,6 +148,7 @@ enum nl_mmap_status {
#define NL_MMAP_MSG_ALIGNMENT NLMSG_ALIGNTO
#define NL_MMAP_MSG_ALIGN(sz) __ALIGN_KERNEL(sz, NL_MMAP_MSG_ALIGNMENT)
#define NL_MMAP_HDRLEN NL_MMAP_MSG_ALIGN(sizeof(struct nl_mmap_hdr))
+#endif
#define NET_MAJOR 36 /* Major 36 is reserved for networking */
diff --git a/include/uapi/linux/netlink_diag.h b/include/uapi/linux/netlink_diag.h
index f2159d30d1f5..d79399394b46 100644
--- a/include/uapi/linux/netlink_diag.h
+++ b/include/uapi/linux/netlink_diag.h
@@ -48,6 +48,8 @@ enum {
#define NDIAG_SHOW_MEMINFO 0x00000001 /* show memory info of a socket */
#define NDIAG_SHOW_GROUPS 0x00000002 /* show groups of a netlink socket */
+#ifndef __KERNEL__
#define NDIAG_SHOW_RING_CFG 0x00000004 /* show ring configuration */
+#endif
#endif
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
index 5b7b5ebe7ca8..5a30a7563633 100644
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@ -1727,6 +1727,8 @@ enum nl80211_commands {
* underlying device supports these minimal RRM features:
* %NL80211_FEATURE_DS_PARAM_SET_IE_IN_PROBES,
* %NL80211_FEATURE_QUIET,
+ * Or, if global RRM is supported, see:
+ * %NL80211_EXT_FEATURE_RRM
* If this flag is used, driver must add the Power Capabilities IE to the
* association request. In addition, it must also set the RRM capability
* flag in the association request's Capability Info field.
@@ -1789,6 +1791,10 @@ enum nl80211_commands {
* thus it must not specify the number of iterations, only the interval
* between scans. The scan plans are executed sequentially.
* Each scan plan is a nested attribute of &enum nl80211_sched_scan_plan.
+ * @NL80211_ATTR_PBSS: flag attribute. If set it means operate
+ * in a PBSS. Specified in %NL80211_CMD_CONNECT to request
+ * connecting to a PCP, and in %NL80211_CMD_START_AP to start
+ * a PCP instead of AP. Relevant for DMG networks only.
*
* @NUM_NL80211_ATTR: total number of nl80211_attrs available
* @NL80211_ATTR_MAX: highest attribute number currently defined
@@ -2164,6 +2170,8 @@ enum nl80211_attrs {
NL80211_ATTR_MAX_SCAN_PLAN_ITERATIONS,
NL80211_ATTR_SCHED_SCAN_PLANS,
+ NL80211_ATTR_PBSS,
+
/* add attributes here, update the policy in nl80211.c */
__NL80211_ATTR_AFTER_LAST,
@@ -4396,12 +4404,18 @@ enum nl80211_feature_flags {
/**
* enum nl80211_ext_feature_index - bit index of extended features.
* @NL80211_EXT_FEATURE_VHT_IBSS: This driver supports IBSS with VHT datarates.
+ * @NL80211_EXT_FEATURE_RRM: This driver supports RRM. When featured, user can
+ * can request to use RRM (see %NL80211_ATTR_USE_RRM) with
+ * %NL80211_CMD_ASSOCIATE and %NL80211_CMD_CONNECT requests, which will set
+ * the ASSOC_REQ_USE_RRM flag in the association request even if
+ * NL80211_FEATURE_QUIET is not advertized.
*
* @NUM_NL80211_EXT_FEATURES: number of extended features.
* @MAX_NL80211_EXT_FEATURES: highest extended feature index.
*/
enum nl80211_ext_feature_index {
NL80211_EXT_FEATURE_VHT_IBSS,
+ NL80211_EXT_FEATURE_RRM,
/* add new features before the definition below */
NUM_NL80211_EXT_FEATURES,
diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h
index a27222d5b413..616d04761730 100644
--- a/include/uapi/linux/openvswitch.h
+++ b/include/uapi/linux/openvswitch.h
@@ -454,6 +454,14 @@ struct ovs_key_ct_labels {
#define OVS_CS_F_REPLY_DIR 0x08 /* Flow is in the reply direction. */
#define OVS_CS_F_INVALID 0x10 /* Could not track connection. */
#define OVS_CS_F_TRACKED 0x20 /* Conntrack has occurred. */
+#define OVS_CS_F_SRC_NAT 0x40 /* Packet's source address/port was
+ * mangled by NAT.
+ */
+#define OVS_CS_F_DST_NAT 0x80 /* Packet's destination address/port
+ * was mangled by NAT.
+ */
+
+#define OVS_CS_F_NAT_MASK (OVS_CS_F_SRC_NAT | OVS_CS_F_DST_NAT)
/**
* enum ovs_flow_attr - attributes for %OVS_FLOW_* commands.
@@ -632,6 +640,8 @@ struct ovs_action_hash {
* mask. For each bit set in the mask, the corresponding bit in the value is
* copied to the connection tracking label field in the connection.
* @OVS_CT_ATTR_HELPER: variable length string defining conntrack ALG.
+ * @OVS_CT_ATTR_NAT: Nested OVS_NAT_ATTR_* for performing L3 network address
+ * translation (NAT) on the packet.
*/
enum ovs_ct_attr {
OVS_CT_ATTR_UNSPEC,
@@ -641,12 +651,51 @@ enum ovs_ct_attr {
OVS_CT_ATTR_LABELS, /* labels to associate with this connection. */
OVS_CT_ATTR_HELPER, /* netlink helper to assist detection of
related connections. */
+ OVS_CT_ATTR_NAT, /* Nested OVS_NAT_ATTR_* */
__OVS_CT_ATTR_MAX
};
#define OVS_CT_ATTR_MAX (__OVS_CT_ATTR_MAX - 1)
/**
+ * enum ovs_nat_attr - Attributes for %OVS_CT_ATTR_NAT.
+ *
+ * @OVS_NAT_ATTR_SRC: Flag for Source NAT (mangle source address/port).
+ * @OVS_NAT_ATTR_DST: Flag for Destination NAT (mangle destination
+ * address/port). Only one of (@OVS_NAT_ATTR_SRC, @OVS_NAT_ATTR_DST) may be
+ * specified. Effective only for packets for ct_state NEW connections.
+ * Packets of committed connections are mangled by the NAT action according to
+ * the committed NAT type regardless of the flags specified. As a corollary, a
+ * NAT action without a NAT type flag will only mangle packets of committed
+ * connections. The following NAT attributes only apply for NEW
+ * (non-committed) connections, and they may be included only when the CT
+ * action has the @OVS_CT_ATTR_COMMIT flag and either @OVS_NAT_ATTR_SRC or
+ * @OVS_NAT_ATTR_DST is also included.
+ * @OVS_NAT_ATTR_IP_MIN: struct in_addr or struct in6_addr
+ * @OVS_NAT_ATTR_IP_MAX: struct in_addr or struct in6_addr
+ * @OVS_NAT_ATTR_PROTO_MIN: u16 L4 protocol specific lower boundary (port)
+ * @OVS_NAT_ATTR_PROTO_MAX: u16 L4 protocol specific upper boundary (port)
+ * @OVS_NAT_ATTR_PERSISTENT: Flag for persistent IP mapping across reboots
+ * @OVS_NAT_ATTR_PROTO_HASH: Flag for pseudo random L4 port mapping (MD5)
+ * @OVS_NAT_ATTR_PROTO_RANDOM: Flag for fully randomized L4 port mapping
+ */
+enum ovs_nat_attr {
+ OVS_NAT_ATTR_UNSPEC,
+ OVS_NAT_ATTR_SRC,
+ OVS_NAT_ATTR_DST,
+ OVS_NAT_ATTR_IP_MIN,
+ OVS_NAT_ATTR_IP_MAX,
+ OVS_NAT_ATTR_PROTO_MIN,
+ OVS_NAT_ATTR_PROTO_MAX,
+ OVS_NAT_ATTR_PERSISTENT,
+ OVS_NAT_ATTR_PROTO_HASH,
+ OVS_NAT_ATTR_PROTO_RANDOM,
+ __OVS_NAT_ATTR_MAX,
+};
+
+#define OVS_NAT_ATTR_MAX (__OVS_NAT_ATTR_MAX - 1)
+
+/**
* enum ovs_action_attr - Action types.
*
* @OVS_ACTION_ATTR_OUTPUT: Output packet to port.
diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h
index 439873775d49..c43c5f78b9c4 100644
--- a/include/uapi/linux/pkt_cls.h
+++ b/include/uapi/linux/pkt_cls.h
@@ -172,6 +172,7 @@ enum {
TCA_U32_INDEV,
TCA_U32_PCNT,
TCA_U32_MARK,
+ TCA_U32_FLAGS,
__TCA_U32_MAX
};
@@ -416,6 +417,8 @@ enum {
TCA_FLOWER_KEY_TCP_DST, /* be16 */
TCA_FLOWER_KEY_UDP_SRC, /* be16 */
TCA_FLOWER_KEY_UDP_DST, /* be16 */
+
+ TCA_FLOWER_FLAGS,
__TCA_FLOWER_MAX,
};
diff --git a/include/uapi/linux/quota.h b/include/uapi/linux/quota.h
index 9c95b2c1c88a..38baddb807f5 100644
--- a/include/uapi/linux/quota.h
+++ b/include/uapi/linux/quota.h
@@ -71,6 +71,7 @@
#define Q_SETINFO 0x800006 /* set information about quota files */
#define Q_GETQUOTA 0x800007 /* get user quota structure */
#define Q_SETQUOTA 0x800008 /* set user quota structure */
+#define Q_GETNEXTQUOTA 0x800009 /* get disk limits and usage >= ID */
/* Quota format type IDs */
#define QFMT_VFS_OLD 1
@@ -119,6 +120,19 @@ struct if_dqblk {
__u32 dqb_valid;
};
+struct if_nextdqblk {
+ __u64 dqb_bhardlimit;
+ __u64 dqb_bsoftlimit;
+ __u64 dqb_curspace;
+ __u64 dqb_ihardlimit;
+ __u64 dqb_isoftlimit;
+ __u64 dqb_curinodes;
+ __u64 dqb_btime;
+ __u64 dqb_itime;
+ __u32 dqb_valid;
+ __u32 dqb_id;
+};
+
/*
* Structure used for setting quota information about file via quotactl
* Following flags are used to specify which fields are valid
diff --git a/include/uapi/linux/rfkill.h b/include/uapi/linux/rfkill.h
index 058757f7a733..2e00dcebebd0 100644
--- a/include/uapi/linux/rfkill.h
+++ b/include/uapi/linux/rfkill.h
@@ -59,6 +59,8 @@ enum rfkill_type {
* @RFKILL_OP_DEL: a device was removed
* @RFKILL_OP_CHANGE: a device's state changed -- userspace changes one device
* @RFKILL_OP_CHANGE_ALL: userspace changes all devices (of a type, or all)
+ * into a state, also updating the default state used for devices that
+ * are hot-plugged later.
*/
enum rfkill_operation {
RFKILL_OP_ADD = 0,
diff --git a/include/uapi/linux/sched.h b/include/uapi/linux/sched.h
index cc89ddefa926..5f0fe019a720 100644
--- a/include/uapi/linux/sched.h
+++ b/include/uapi/linux/sched.h
@@ -21,8 +21,7 @@
#define CLONE_DETACHED 0x00400000 /* Unused, ignored */
#define CLONE_UNTRACED 0x00800000 /* set if the tracing process can't force CLONE_PTRACE on this clone */
#define CLONE_CHILD_SETTID 0x01000000 /* set the TID in the child */
-/* 0x02000000 was previously the unused CLONE_STOPPED (Start in stopped state)
- and is now available for re-use. */
+#define CLONE_NEWCGROUP 0x02000000 /* New cgroup namespace */
#define CLONE_NEWUTS 0x04000000 /* New utsname namespace */
#define CLONE_NEWIPC 0x08000000 /* New ipc namespace */
#define CLONE_NEWUSER 0x10000000 /* New user namespace */
diff --git a/include/uapi/linux/swab.h b/include/uapi/linux/swab.h
index 0e011eb91b5d..3f10e5317b46 100644
--- a/include/uapi/linux/swab.h
+++ b/include/uapi/linux/swab.h
@@ -151,7 +151,7 @@ static inline __attribute_const__ __u32 __fswahb32(__u32 val)
* __swab16p - return a byteswapped 16-bit value from a pointer
* @p: pointer to a naturally-aligned 16-bit value
*/
-static inline __u16 __swab16p(const __u16 *p)
+static __always_inline __u16 __swab16p(const __u16 *p)
{
#ifdef __arch_swab16p
return __arch_swab16p(p);
@@ -164,7 +164,7 @@ static inline __u16 __swab16p(const __u16 *p)
* __swab32p - return a byteswapped 32-bit value from a pointer
* @p: pointer to a naturally-aligned 32-bit value
*/
-static inline __u32 __swab32p(const __u32 *p)
+static __always_inline __u32 __swab32p(const __u32 *p)
{
#ifdef __arch_swab32p
return __arch_swab32p(p);
@@ -177,7 +177,7 @@ static inline __u32 __swab32p(const __u32 *p)
* __swab64p - return a byteswapped 64-bit value from a pointer
* @p: pointer to a naturally-aligned 64-bit value
*/
-static inline __u64 __swab64p(const __u64 *p)
+static __always_inline __u64 __swab64p(const __u64 *p)
{
#ifdef __arch_swab64p
return __arch_swab64p(p);
@@ -232,7 +232,7 @@ static inline void __swab16s(__u16 *p)
* __swab32s - byteswap a 32-bit value in-place
* @p: pointer to a naturally-aligned 32-bit value
*/
-static inline void __swab32s(__u32 *p)
+static __always_inline void __swab32s(__u32 *p)
{
#ifdef __arch_swab32s
__arch_swab32s(p);
@@ -245,7 +245,7 @@ static inline void __swab32s(__u32 *p)
* __swab64s - byteswap a 64-bit value in-place
* @p: pointer to a naturally-aligned 64-bit value
*/
-static inline void __swab64s(__u64 *p)
+static __always_inline void __swab64s(__u64 *p)
{
#ifdef __arch_swab64s
__arch_swab64s(p);
diff --git a/include/uapi/linux/target_core_user.h b/include/uapi/linux/target_core_user.h
index 95c6521d8a95..c506cddb8165 100644
--- a/include/uapi/linux/target_core_user.h
+++ b/include/uapi/linux/target_core_user.h
@@ -41,6 +41,7 @@
#define TCMU_MAILBOX_VERSION 2
#define ALIGN_SIZE 64 /* Should be enough for most CPUs */
+#define TCMU_MAILBOX_FLAG_CAP_OOOC (1 << 0) /* Out-of-order completions */
struct tcmu_mailbox {
__u16 version;
diff --git a/include/uapi/linux/tc_act/tc_ife.h b/include/uapi/linux/tc_act/tc_ife.h
new file mode 100644
index 000000000000..d648ff66586f
--- /dev/null
+++ b/include/uapi/linux/tc_act/tc_ife.h
@@ -0,0 +1,38 @@
+#ifndef __UAPI_TC_IFE_H
+#define __UAPI_TC_IFE_H
+
+#include <linux/types.h>
+#include <linux/pkt_cls.h>
+
+#define TCA_ACT_IFE 25
+/* Flag bits for now just encoding/decoding; mutually exclusive */
+#define IFE_ENCODE 1
+#define IFE_DECODE 0
+
+struct tc_ife {
+ tc_gen;
+ __u16 flags;
+};
+
+/*XXX: We need to encode the total number of bytes consumed */
+enum {
+ TCA_IFE_UNSPEC,
+ TCA_IFE_PARMS,
+ TCA_IFE_TM,
+ TCA_IFE_DMAC,
+ TCA_IFE_SMAC,
+ TCA_IFE_TYPE,
+ TCA_IFE_METALST,
+ __TCA_IFE_MAX
+};
+#define TCA_IFE_MAX (__TCA_IFE_MAX - 1)
+
+#define IFE_META_SKBMARK 1
+#define IFE_META_HASHID 2
+#define IFE_META_PRIO 3
+#define IFE_META_QMAP 4
+/*Can be overridden at runtime by module option*/
+#define __IFE_META_MAX 5
+#define IFE_META_MAX (__IFE_META_MAX - 1)
+
+#endif
diff --git a/include/uapi/linux/tcp.h b/include/uapi/linux/tcp.h
index 65a77b071e22..53e8e3fe6b1b 100644
--- a/include/uapi/linux/tcp.h
+++ b/include/uapi/linux/tcp.h
@@ -196,6 +196,11 @@ struct tcp_info {
__u64 tcpi_bytes_received; /* RFC4898 tcpEStatsAppHCThruOctetsReceived */
__u32 tcpi_segs_out; /* RFC4898 tcpEStatsPerfSegsOut */
__u32 tcpi_segs_in; /* RFC4898 tcpEStatsPerfSegsIn */
+
+ __u32 tcpi_notsent_bytes;
+ __u32 tcpi_min_rtt;
+ __u32 tcpi_data_segs_in; /* RFC4898 tcpEStatsDataSegsIn */
+ __u32 tcpi_data_segs_out; /* RFC4898 tcpEStatsDataSegsOut */
};
/* for TCP_MD5SIG socket option */
diff --git a/include/uapi/linux/vhost.h b/include/uapi/linux/vhost.h
index ab3731917bac..61a8777178c6 100644
--- a/include/uapi/linux/vhost.h
+++ b/include/uapi/linux/vhost.h
@@ -126,6 +126,12 @@ struct vhost_memory {
#define VHOST_SET_VRING_CALL _IOW(VHOST_VIRTIO, 0x21, struct vhost_vring_file)
/* Set eventfd to signal an error */
#define VHOST_SET_VRING_ERR _IOW(VHOST_VIRTIO, 0x22, struct vhost_vring_file)
+/* Set busy loop timeout (in us) */
+#define VHOST_SET_VRING_BUSYLOOP_TIMEOUT _IOW(VHOST_VIRTIO, 0x23, \
+ struct vhost_vring_state)
+/* Get busy loop timeout (in us) */
+#define VHOST_GET_VRING_BUSYLOOP_TIMEOUT _IOW(VHOST_VIRTIO, 0x24, \
+ struct vhost_vring_state)
/* VHOST_NET specific defines */
diff --git a/include/uapi/linux/virtio_balloon.h b/include/uapi/linux/virtio_balloon.h
index d7f1cbc3766c..343d7ddefe04 100644
--- a/include/uapi/linux/virtio_balloon.h
+++ b/include/uapi/linux/virtio_balloon.h
@@ -51,7 +51,8 @@ struct virtio_balloon_config {
#define VIRTIO_BALLOON_S_MINFLT 3 /* Number of minor faults */
#define VIRTIO_BALLOON_S_MEMFREE 4 /* Total amount of free memory */
#define VIRTIO_BALLOON_S_MEMTOT 5 /* Total amount of memory */
-#define VIRTIO_BALLOON_S_NR 6
+#define VIRTIO_BALLOON_S_AVAIL 6 /* Available memory as in /proc */
+#define VIRTIO_BALLOON_S_NR 7
/*
* Memory statistics structure.
diff --git a/include/uapi/linux/virtio_blk.h b/include/uapi/linux/virtio_blk.h
index 19c66fcbab8a..9ebe4d968dd5 100644
--- a/include/uapi/linux/virtio_blk.h
+++ b/include/uapi/linux/virtio_blk.h
@@ -43,11 +43,11 @@
#ifndef VIRTIO_BLK_NO_LEGACY
#define VIRTIO_BLK_F_BARRIER 0 /* Does host support barriers? */
#define VIRTIO_BLK_F_SCSI 7 /* Supports scsi command passthru */
-#define VIRTIO_BLK_F_WCE 9 /* Writeback mode enabled after reset */
+#define VIRTIO_BLK_F_FLUSH 9 /* Flush command supported */
#define VIRTIO_BLK_F_CONFIG_WCE 11 /* Writeback mode available in config */
#ifndef __KERNEL__
-/* Old (deprecated) name for VIRTIO_BLK_F_WCE. */
-#define VIRTIO_BLK_F_FLUSH VIRTIO_BLK_F_WCE
+/* Old (deprecated) name for VIRTIO_BLK_F_FLUSH. */
+#define VIRTIO_BLK_F_WCE VIRTIO_BLK_F_FLUSH
#endif
#endif /* !VIRTIO_BLK_NO_LEGACY */
diff --git a/include/uapi/misc/cxl.h b/include/uapi/misc/cxl.h
index 1e889aa8a36e..8cd334f99ddc 100644
--- a/include/uapi/misc/cxl.h
+++ b/include/uapi/misc/cxl.h
@@ -55,11 +55,35 @@ struct cxl_afu_id {
__u64 reserved6;
};
+/* base adapter image header is included in the image */
+#define CXL_AI_NEED_HEADER 0x0000000000000001ULL
+#define CXL_AI_ALL CXL_AI_NEED_HEADER
+
+#define CXL_AI_HEADER_SIZE 128
+#define CXL_AI_BUFFER_SIZE 4096
+#define CXL_AI_MAX_ENTRIES 256
+#define CXL_AI_MAX_CHUNK_SIZE (CXL_AI_BUFFER_SIZE * CXL_AI_MAX_ENTRIES)
+
+struct cxl_adapter_image {
+ __u64 flags;
+ __u64 data;
+ __u64 len_data;
+ __u64 len_image;
+ __u64 reserved1;
+ __u64 reserved2;
+ __u64 reserved3;
+ __u64 reserved4;
+};
+
/* ioctl numbers */
#define CXL_MAGIC 0xCA
+/* AFU devices */
#define CXL_IOCTL_START_WORK _IOW(CXL_MAGIC, 0x00, struct cxl_ioctl_start_work)
#define CXL_IOCTL_GET_PROCESS_ELEMENT _IOR(CXL_MAGIC, 0x01, __u32)
#define CXL_IOCTL_GET_AFU_ID _IOR(CXL_MAGIC, 0x02, struct cxl_afu_id)
+/* adapter devices */
+#define CXL_IOCTL_DOWNLOAD_IMAGE _IOW(CXL_MAGIC, 0x0A, struct cxl_adapter_image)
+#define CXL_IOCTL_VALIDATE_IMAGE _IOW(CXL_MAGIC, 0x0B, struct cxl_adapter_image)
#define CXL_READ_MIN_SIZE 0x1000 /* 4K */
diff --git a/include/uapi/rdma/hfi/hfi1_user.h b/include/uapi/rdma/hfi/hfi1_user.h
index 288694e422fb..a533cecab14f 100644
--- a/include/uapi/rdma/hfi/hfi1_user.h
+++ b/include/uapi/rdma/hfi/hfi1_user.h
@@ -66,7 +66,7 @@
* The major version changes when data structures change in an incompatible
* way. The driver must be the same for initialization to succeed.
*/
-#define HFI1_USER_SWMAJOR 4
+#define HFI1_USER_SWMAJOR 5
/*
* Minor version differences are always compatible
@@ -93,7 +93,7 @@
#define HFI1_CAP_MULTI_PKT_EGR (1UL << 7) /* Enable multi-packet Egr buffs*/
#define HFI1_CAP_NODROP_RHQ_FULL (1UL << 8) /* Don't drop on Hdr Q full */
#define HFI1_CAP_NODROP_EGR_FULL (1UL << 9) /* Don't drop on EGR buffs full */
-#define HFI1_CAP_TID_UNMAP (1UL << 10) /* Enable Expected TID caching */
+#define HFI1_CAP_TID_UNMAP (1UL << 10) /* Disable Expected TID caching */
#define HFI1_CAP_PRINT_UNIMPL (1UL << 11) /* Show for unimplemented feats */
#define HFI1_CAP_ALLOW_PERM_JKEY (1UL << 12) /* Allow use of permissive JKEY */
#define HFI1_CAP_NO_INTEGRITY (1UL << 13) /* Enable ctxt integrity checks */
@@ -134,6 +134,7 @@
#define HFI1_CMD_ACK_EVENT 10 /* ack & clear user status bits */
#define HFI1_CMD_SET_PKEY 11 /* set context's pkey */
#define HFI1_CMD_CTXT_RESET 12 /* reset context's HW send context */
+#define HFI1_CMD_TID_INVAL_READ 13 /* read TID cache invalidations */
/* separate EPROM commands from normal PSM commands */
#define HFI1_CMD_EP_INFO 64 /* read EPROM device ID */
#define HFI1_CMD_EP_ERASE_CHIP 65 /* erase whole EPROM */
@@ -147,13 +148,15 @@
#define _HFI1_EVENT_LID_CHANGE_BIT 2
#define _HFI1_EVENT_LMC_CHANGE_BIT 3
#define _HFI1_EVENT_SL2VL_CHANGE_BIT 4
-#define _HFI1_MAX_EVENT_BIT _HFI1_EVENT_SL2VL_CHANGE_BIT
+#define _HFI1_EVENT_TID_MMU_NOTIFY_BIT 5
+#define _HFI1_MAX_EVENT_BIT _HFI1_EVENT_TID_MMU_NOTIFY_BIT
#define HFI1_EVENT_FROZEN (1UL << _HFI1_EVENT_FROZEN_BIT)
#define HFI1_EVENT_LINKDOWN (1UL << _HFI1_EVENT_LINKDOWN_BIT)
#define HFI1_EVENT_LID_CHANGE (1UL << _HFI1_EVENT_LID_CHANGE_BIT)
#define HFI1_EVENT_LMC_CHANGE (1UL << _HFI1_EVENT_LMC_CHANGE_BIT)
#define HFI1_EVENT_SL2VL_CHANGE (1UL << _HFI1_EVENT_SL2VL_CHANGE_BIT)
+#define HFI1_EVENT_TID_MMU_NOTIFY (1UL << _HFI1_EVENT_TID_MMU_NOTIFY_BIT)
/*
* These are the status bits readable (in ASCII form, 64bit value)
@@ -238,11 +241,6 @@ struct hfi1_tid_info {
__u32 tidcnt;
/* length of transfer buffer programmed by this request */
__u32 length;
- /*
- * pointer to bitmap of TIDs used for this call;
- * checked for being large enough at open
- */
- __u64 tidmap;
};
struct hfi1_cmd {
diff --git a/include/uapi/rdma/rdma_netlink.h b/include/uapi/rdma/rdma_netlink.h
index f7d7b6fec935..6e373d151cad 100644
--- a/include/uapi/rdma/rdma_netlink.h
+++ b/include/uapi/rdma/rdma_netlink.h
@@ -8,6 +8,7 @@ enum {
RDMA_NL_IWCM,
RDMA_NL_RSVD,
RDMA_NL_LS, /* RDMA Local Services */
+ RDMA_NL_I40IW,
RDMA_NL_NUM_CLIENTS
};
diff --git a/include/video/omap-panel-data.h b/include/video/omap-panel-data.h
index 69279c013ac4..56830d1dc762 100644
--- a/include/video/omap-panel-data.h
+++ b/include/video/omap-panel-data.h
@@ -45,21 +45,6 @@ struct encoder_tfp410_platform_data {
int data_lines;
};
-/**
- * encoder_tpd12s015 platform data
- * @name: name for this display entity
- * @ct_cp_hpd_gpio: CT_CP_HPD gpio number
- * @ls_oe_gpio: LS_OE gpio number
- * @hpd_gpio: HPD gpio number
- */
-struct encoder_tpd12s015_platform_data {
- const char *name;
- const char *source;
-
- int ct_cp_hpd_gpio;
- int ls_oe_gpio;
- int hpd_gpio;
-};
/**
* connector_dvi platform data
diff --git a/include/video/omapdss.h b/include/video/omapdss.h
index 295b41e20d8e..8e14ad7327c9 100644
--- a/include/video/omapdss.h
+++ b/include/video/omapdss.h
@@ -51,6 +51,7 @@
#define DISPC_IRQ_FRAMEDONEWB (1 << 23)
#define DISPC_IRQ_FRAMEDONETV (1 << 24)
#define DISPC_IRQ_WBBUFFEROVERFLOW (1 << 25)
+#define DISPC_IRQ_WBUNCOMPLETEERROR (1 << 26)
#define DISPC_IRQ_SYNC_LOST3 (1 << 27)
#define DISPC_IRQ_VSYNC3 (1 << 28)
#define DISPC_IRQ_ACBIAS_COUNT_STAT3 (1 << 29)
@@ -331,8 +332,6 @@ struct omap_dss_board_info {
/* Init with the board info */
extern int omap_display_init(struct omap_dss_board_info *board_data);
-/* HDMI mux init*/
-extern int omap_hdmi_init(enum omap_hdmi_flags flags);
struct omap_video_timings {
/* Unit: pixels */
@@ -366,6 +365,8 @@ struct omap_video_timings {
enum omap_dss_signal_level de_level;
/* Pixel clock edges to drive HSYNC and VSYNC signals */
enum omap_dss_signal_edge sync_pclk_edge;
+
+ bool double_pixel;
};
/* Hardcoded timings for tv modes. Venc only uses these to
@@ -769,6 +770,7 @@ struct omap_dss_device {
/* DISPC channel for this output */
enum omap_channel dispc_channel;
+ bool dispc_channel_connected;
/* output instance */
enum omap_dss_output_id id;
@@ -782,13 +784,6 @@ struct omap_dss_device {
struct omap_dss_device *dst;
};
-struct omap_dss_hdmi_data
-{
- int ct_cp_hpd_gpio;
- int ls_oe_gpio;
- int hpd_gpio;
-};
-
struct omap_dss_driver {
int (*probe)(struct omap_dss_device *);
void (*remove)(struct omap_dss_device *);
@@ -897,85 +892,9 @@ typedef void (*omap_dispc_isr_t) (void *arg, u32 mask);
int omap_dispc_register_isr(omap_dispc_isr_t isr, void *arg, u32 mask);
int omap_dispc_unregister_isr(omap_dispc_isr_t isr, void *arg, u32 mask);
-u32 dispc_read_irqstatus(void);
-void dispc_clear_irqstatus(u32 mask);
-u32 dispc_read_irqenable(void);
-void dispc_write_irqenable(u32 mask);
-
-int dispc_request_irq(irq_handler_t handler, void *dev_id);
-void dispc_free_irq(void *dev_id);
-
-int dispc_runtime_get(void);
-void dispc_runtime_put(void);
-
-void dispc_mgr_enable(enum omap_channel channel, bool enable);
-bool dispc_mgr_is_enabled(enum omap_channel channel);
-u32 dispc_mgr_get_vsync_irq(enum omap_channel channel);
-u32 dispc_mgr_get_framedone_irq(enum omap_channel channel);
-u32 dispc_mgr_get_sync_lost_irq(enum omap_channel channel);
-bool dispc_mgr_go_busy(enum omap_channel channel);
-void dispc_mgr_go(enum omap_channel channel);
-void dispc_mgr_set_lcd_config(enum omap_channel channel,
- const struct dss_lcd_mgr_config *config);
-void dispc_mgr_set_timings(enum omap_channel channel,
- const struct omap_video_timings *timings);
-void dispc_mgr_setup(enum omap_channel channel,
- const struct omap_overlay_manager_info *info);
-
-int dispc_ovl_check(enum omap_plane plane, enum omap_channel channel,
- const struct omap_overlay_info *oi,
- const struct omap_video_timings *timings,
- int *x_predecim, int *y_predecim);
-
-int dispc_ovl_enable(enum omap_plane plane, bool enable);
-bool dispc_ovl_enabled(enum omap_plane plane);
-void dispc_ovl_set_channel_out(enum omap_plane plane,
- enum omap_channel channel);
-int dispc_ovl_setup(enum omap_plane plane, const struct omap_overlay_info *oi,
- bool replication, const struct omap_video_timings *mgr_timings,
- bool mem_to_mem);
-
int omapdss_compat_init(void);
void omapdss_compat_uninit(void);
-struct dss_mgr_ops {
- int (*connect)(struct omap_overlay_manager *mgr,
- struct omap_dss_device *dst);
- void (*disconnect)(struct omap_overlay_manager *mgr,
- struct omap_dss_device *dst);
-
- void (*start_update)(struct omap_overlay_manager *mgr);
- int (*enable)(struct omap_overlay_manager *mgr);
- void (*disable)(struct omap_overlay_manager *mgr);
- void (*set_timings)(struct omap_overlay_manager *mgr,
- const struct omap_video_timings *timings);
- void (*set_lcd_config)(struct omap_overlay_manager *mgr,
- const struct dss_lcd_mgr_config *config);
- int (*register_framedone_handler)(struct omap_overlay_manager *mgr,
- void (*handler)(void *), void *data);
- void (*unregister_framedone_handler)(struct omap_overlay_manager *mgr,
- void (*handler)(void *), void *data);
-};
-
-int dss_install_mgr_ops(const struct dss_mgr_ops *mgr_ops);
-void dss_uninstall_mgr_ops(void);
-
-int dss_mgr_connect(struct omap_overlay_manager *mgr,
- struct omap_dss_device *dst);
-void dss_mgr_disconnect(struct omap_overlay_manager *mgr,
- struct omap_dss_device *dst);
-void dss_mgr_set_timings(struct omap_overlay_manager *mgr,
- const struct omap_video_timings *timings);
-void dss_mgr_set_lcd_config(struct omap_overlay_manager *mgr,
- const struct dss_lcd_mgr_config *config);
-int dss_mgr_enable(struct omap_overlay_manager *mgr);
-void dss_mgr_disable(struct omap_overlay_manager *mgr);
-void dss_mgr_start_update(struct omap_overlay_manager *mgr);
-int dss_mgr_register_framedone_handler(struct omap_overlay_manager *mgr,
- void (*handler)(void *), void *data);
-void dss_mgr_unregister_framedone_handler(struct omap_overlay_manager *mgr,
- void (*handler)(void *), void *data);
-
static inline bool omapdss_device_is_connected(struct omap_dss_device *dssdev)
{
return dssdev->src;
diff --git a/include/xen/interface/io/netif.h b/include/xen/interface/io/netif.h
index 252ffd4801ef..4f20dbc42910 100644
--- a/include/xen/interface/io/netif.h
+++ b/include/xen/interface/io/netif.h
@@ -1,16 +1,34 @@
/******************************************************************************
- * netif.h
+ * xen_netif.h
*
* Unified network-device I/O interface for Xen guest OSes.
*
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
* Copyright (c) 2003-2004, Keir Fraser
*/
-#ifndef __XEN_PUBLIC_IO_NETIF_H__
-#define __XEN_PUBLIC_IO_NETIF_H__
+#ifndef __XEN_PUBLIC_IO_XEN_NETIF_H__
+#define __XEN_PUBLIC_IO_XEN_NETIF_H__
-#include <xen/interface/io/ring.h>
-#include <xen/interface/grant_table.h>
+#include "ring.h"
+#include "../grant_table.h"
/*
* Older implementation of Xen network frontend / backend has an
@@ -38,10 +56,10 @@
* that it cannot safely queue packets (as it may not be kicked to send them).
*/
- /*
+/*
* "feature-split-event-channels" is introduced to separate guest TX
- * and RX notificaion. Backend either doesn't support this feature or
- * advertise it via xenstore as 0 (disabled) or 1 (enabled).
+ * and RX notification. Backend either doesn't support this feature or
+ * advertises it via xenstore as 0 (disabled) or 1 (enabled).
*
* To make use of this feature, frontend should allocate two event
* channels for TX and RX, advertise them to backend as
@@ -118,151 +136,804 @@
*/
/*
- * This is the 'wire' format for packets:
- * Request 1: xen_netif_tx_request -- XEN_NETTXF_* (any flags)
- * [Request 2: xen_netif_extra_info] (only if request 1 has XEN_NETTXF_extra_info)
- * [Request 3: xen_netif_extra_info] (only if request 2 has XEN_NETIF_EXTRA_MORE)
- * Request 4: xen_netif_tx_request -- XEN_NETTXF_more_data
- * Request 5: xen_netif_tx_request -- XEN_NETTXF_more_data
+ * "feature-multicast-control" and "feature-dynamic-multicast-control"
+ * advertise the capability to filter ethernet multicast packets in the
+ * backend. If the frontend wishes to take advantage of this feature then
+ * it may set "request-multicast-control". If the backend only advertises
+ * "feature-multicast-control" then "request-multicast-control" must be set
+ * before the frontend moves into the connected state. The backend will
+ * sample the value on this state transition and any subsequent change in
+ * value will have no effect. However, if the backend also advertises
+ * "feature-dynamic-multicast-control" then "request-multicast-control"
+ * may be set by the frontend at any time. In this case, the backend will
+ * watch the value and re-sample on watch events.
+ *
+ * If the sampled value of "request-multicast-control" is set then the
+ * backend transmit side should no longer flood multicast packets to the
+ * frontend, it should instead drop any multicast packet that does not
+ * match in a filter list.
+ * The list is amended by the frontend by sending dummy transmit requests
+ * containing XEN_NETIF_EXTRA_TYPE_MCAST_{ADD,DEL} extra-info fragments as
+ * specified below.
+ * Note that the filter list may be amended even if the sampled value of
+ * "request-multicast-control" is not set, however the filter should only
+ * be applied if it is set.
+ */
+
+/*
+ * Control ring
+ * ============
+ *
+ * Some features, such as hashing (detailed below), require a
+ * significant amount of out-of-band data to be passed from frontend to
+ * backend. Use of xenstore is not suitable for large quantities of data
+ * because of quota limitations and so a dedicated 'control ring' is used.
+ * The ability of the backend to use a control ring is advertised by
+ * setting:
+ *
+ * /local/domain/X/backend/<domid>/<vif>/feature-ctrl-ring = "1"
+ *
+ * The frontend provides a control ring to the backend by setting:
+ *
+ * /local/domain/<domid>/device/vif/<vif>/ctrl-ring-ref = <gref>
+ * /local/domain/<domid>/device/vif/<vif>/event-channel-ctrl = <port>
+ *
+ * where <gref> is the grant reference of the shared page used to
+ * implement the control ring and <port> is an event channel to be used
+ * as a mailbox interrupt. These keys must be set before the frontend
+ * moves into the connected state.
+ *
+ * The control ring uses a fixed request/response message size and is
+ * balanced (i.e. one request to one response), so operationally it is much
+ * the same as a transmit or receive ring.
+ * Note that there is no requirement that responses are issued in the same
+ * order as requests.
+ */
+
+/*
+ * Hash types
+ * ==========
+ *
+ * For the purposes of the definitions below, 'Packet[]' is an array of
+ * octets containing an IP packet without options, 'Array[X..Y]' means a
+ * sub-array of 'Array' containing bytes X thru Y inclusive, and '+' is
+ * used to indicate concatenation of arrays.
+ */
+
+/*
+ * A hash calculated over an IP version 4 header as follows:
+ *
+ * Buffer[0..8] = Packet[12..15] (source address) +
+ * Packet[16..19] (destination address)
+ *
+ * Result = Hash(Buffer, 8)
+ */
+#define _XEN_NETIF_CTRL_HASH_TYPE_IPV4 0
+#define XEN_NETIF_CTRL_HASH_TYPE_IPV4 \
+ (1 << _XEN_NETIF_CTRL_HASH_TYPE_IPV4)
+
+/*
+ * A hash calculated over an IP version 4 header and TCP header as
+ * follows:
+ *
+ * Buffer[0..12] = Packet[12..15] (source address) +
+ * Packet[16..19] (destination address) +
+ * Packet[20..21] (source port) +
+ * Packet[22..23] (destination port)
+ *
+ * Result = Hash(Buffer, 12)
+ */
+#define _XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP 1
+#define XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP \
+ (1 << _XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP)
+
+/*
+ * A hash calculated over an IP version 6 header as follows:
+ *
+ * Buffer[0..32] = Packet[8..23] (source address ) +
+ * Packet[24..39] (destination address)
+ *
+ * Result = Hash(Buffer, 32)
+ */
+#define _XEN_NETIF_CTRL_HASH_TYPE_IPV6 2
+#define XEN_NETIF_CTRL_HASH_TYPE_IPV6 \
+ (1 << _XEN_NETIF_CTRL_HASH_TYPE_IPV6)
+
+/*
+ * A hash calculated over an IP version 6 header and TCP header as
+ * follows:
+ *
+ * Buffer[0..36] = Packet[8..23] (source address) +
+ * Packet[24..39] (destination address) +
+ * Packet[40..41] (source port) +
+ * Packet[42..43] (destination port)
+ *
+ * Result = Hash(Buffer, 36)
+ */
+#define _XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP 3
+#define XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP \
+ (1 << _XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP)
+
+/*
+ * Hash algorithms
+ * ===============
+ */
+
+#define XEN_NETIF_CTRL_HASH_ALGORITHM_NONE 0
+
+/*
+ * Toeplitz hash:
+ */
+
+#define XEN_NETIF_CTRL_HASH_ALGORITHM_TOEPLITZ 1
+
+/*
+ * This algorithm uses a 'key' as well as the data buffer itself.
+ * (Buffer[] and Key[] are treated as shift-registers where the MSB of
+ * Buffer/Key[0] is considered 'left-most' and the LSB of Buffer/Key[N-1]
+ * is the 'right-most').
+ *
+ * Value = 0
+ * For number of bits in Buffer[]
+ * If (left-most bit of Buffer[] is 1)
+ * Value ^= left-most 32 bits of Key[]
+ * Key[] << 1
+ * Buffer[] << 1
+ *
+ * The code below is provided for convenience where an operating system
+ * does not already provide an implementation.
+ */
+#ifdef XEN_NETIF_DEFINE_TOEPLITZ
+static uint32_t xen_netif_toeplitz_hash(const uint8_t *key,
+ unsigned int keylen,
+ const uint8_t *buf, unsigned int buflen)
+{
+ unsigned int keyi, bufi;
+ uint64_t prefix = 0;
+ uint64_t hash = 0;
+
+ /* Pre-load prefix with the first 8 bytes of the key */
+ for (keyi = 0; keyi < 8; keyi++) {
+ prefix <<= 8;
+ prefix |= (keyi < keylen) ? key[keyi] : 0;
+ }
+
+ for (bufi = 0; bufi < buflen; bufi++) {
+ uint8_t byte = buf[bufi];
+ unsigned int bit;
+
+ for (bit = 0; bit < 8; bit++) {
+ if (byte & 0x80)
+ hash ^= prefix;
+ prefix <<= 1;
+ byte <<= 1;
+ }
+
+ /*
+ * 'prefix' has now been left-shifted by 8, so
+ * OR in the next byte.
+ */
+ prefix |= (keyi < keylen) ? key[keyi] : 0;
+ keyi++;
+ }
+
+ /* The valid part of the hash is in the upper 32 bits. */
+ return hash >> 32;
+}
+#endif /* XEN_NETIF_DEFINE_TOEPLITZ */
+
+/*
+ * Control requests (struct xen_netif_ctrl_request)
+ * ================================================
+ *
+ * All requests have the following format:
+ *
+ * 0 1 2 3 4 5 6 7 octet
+ * +-----+-----+-----+-----+-----+-----+-----+-----+
+ * | id | type | data[0] |
+ * +-----+-----+-----+-----+-----+-----+-----+-----+
+ * | data[1] | data[2] |
+ * +-----+-----+-----+-----+-----------------------+
+ *
+ * id: the request identifier, echoed in response.
+ * type: the type of request (see below)
+ * data[]: any data associated with the request (determined by type)
+ */
+
+struct xen_netif_ctrl_request {
+ uint16_t id;
+ uint16_t type;
+
+#define XEN_NETIF_CTRL_TYPE_INVALID 0
+#define XEN_NETIF_CTRL_TYPE_GET_HASH_FLAGS 1
+#define XEN_NETIF_CTRL_TYPE_SET_HASH_FLAGS 2
+#define XEN_NETIF_CTRL_TYPE_SET_HASH_KEY 3
+#define XEN_NETIF_CTRL_TYPE_GET_HASH_MAPPING_SIZE 4
+#define XEN_NETIF_CTRL_TYPE_SET_HASH_MAPPING_SIZE 5
+#define XEN_NETIF_CTRL_TYPE_SET_HASH_MAPPING 6
+#define XEN_NETIF_CTRL_TYPE_SET_HASH_ALGORITHM 7
+
+ uint32_t data[3];
+};
+
+/*
+ * Control responses (struct xen_netif_ctrl_response)
+ * ==================================================
+ *
+ * All responses have the following format:
+ *
+ * 0 1 2 3 4 5 6 7 octet
+ * +-----+-----+-----+-----+-----+-----+-----+-----+
+ * | id | type | status |
+ * +-----+-----+-----+-----+-----+-----+-----+-----+
+ * | data |
+ * +-----+-----+-----+-----+
+ *
+ * id: the corresponding request identifier
+ * type: the type of the corresponding request
+ * status: the status of request processing
+ * data: any data associated with the response (determined by type and
+ * status)
+ */
+
+struct xen_netif_ctrl_response {
+ uint16_t id;
+ uint16_t type;
+ uint32_t status;
+
+#define XEN_NETIF_CTRL_STATUS_SUCCESS 0
+#define XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED 1
+#define XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER 2
+#define XEN_NETIF_CTRL_STATUS_BUFFER_OVERFLOW 3
+
+ uint32_t data;
+};
+
+/*
+ * Control messages
+ * ================
+ *
+ * XEN_NETIF_CTRL_TYPE_SET_HASH_ALGORITHM
+ * --------------------------------------
+ *
+ * This is sent by the frontend to set the desired hash algorithm.
+ *
+ * Request:
+ *
+ * type = XEN_NETIF_CTRL_TYPE_SET_HASH_ALGORITHM
+ * data[0] = a XEN_NETIF_CTRL_HASH_ALGORITHM_* value
+ * data[1] = 0
+ * data[2] = 0
+ *
+ * Response:
+ *
+ * status = XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED - Operation not
+ * supported
+ * XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER - The algorithm is not
+ * supported
+ * XEN_NETIF_CTRL_STATUS_SUCCESS - Operation successful
+ *
+ * NOTE: Setting data[0] to XEN_NETIF_CTRL_HASH_ALGORITHM_NONE disables
+ * hashing and the backend is free to choose how it steers packets
+ * to queues (which is the default behaviour).
+ *
+ * XEN_NETIF_CTRL_TYPE_GET_HASH_FLAGS
+ * ----------------------------------
+ *
+ * This is sent by the frontend to query the types of hash supported by
+ * the backend.
+ *
+ * Request:
+ *
+ * type = XEN_NETIF_CTRL_TYPE_GET_HASH_FLAGS
+ * data[0] = 0
+ * data[1] = 0
+ * data[2] = 0
+ *
+ * Response:
+ *
+ * status = XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED - Operation not supported
+ * XEN_NETIF_CTRL_STATUS_SUCCESS - Operation successful
+ * data = supported hash types (if operation was successful)
+ *
+ * NOTE: A valid hash algorithm must be selected before this operation can
+ * succeed.
+ *
+ * XEN_NETIF_CTRL_TYPE_SET_HASH_FLAGS
+ * ----------------------------------
+ *
+ * This is sent by the frontend to set the types of hash that the backend
+ * should calculate. (See above for hash type definitions).
+ * Note that the 'maximal' type of hash should always be chosen. For
+ * example, if the frontend sets both IPV4 and IPV4_TCP hash types then
+ * the latter hash type should be calculated for any TCP packet and the
+ * former only calculated for non-TCP packets.
+ *
+ * Request:
+ *
+ * type = XEN_NETIF_CTRL_TYPE_SET_HASH_FLAGS
+ * data[0] = bitwise OR of XEN_NETIF_CTRL_HASH_TYPE_* values
+ * data[1] = 0
+ * data[2] = 0
+ *
+ * Response:
+ *
+ * status = XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED - Operation not
+ * supported
+ * XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER - One or more flag
+ * value is invalid or
+ * unsupported
+ * XEN_NETIF_CTRL_STATUS_SUCCESS - Operation successful
+ * data = 0
+ *
+ * NOTE: A valid hash algorithm must be selected before this operation can
+ * succeed.
+ * Also, setting data[0] to zero disables hashing and the backend
+ * is free to choose how it steers packets to queues.
+ *
+ * XEN_NETIF_CTRL_TYPE_SET_HASH_KEY
+ * --------------------------------
+ *
+ * This is sent by the frontend to set the key of the hash if the algorithm
+ * requires it. (See hash algorithms above).
+ *
+ * Request:
+ *
+ * type = XEN_NETIF_CTRL_TYPE_SET_HASH_KEY
+ * data[0] = grant reference of page containing the key (assumed to
+ * start at beginning of grant)
+ * data[1] = size of key in octets
+ * data[2] = 0
+ *
+ * Response:
+ *
+ * status = XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED - Operation not
+ * supported
+ * XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER - Key size is invalid
+ * XEN_NETIF_CTRL_STATUS_BUFFER_OVERFLOW - Key size is larger
+ * than the backend
+ * supports
+ * XEN_NETIF_CTRL_STATUS_SUCCESS - Operation successful
+ * data = 0
+ *
+ * NOTE: Any key octets not specified are assumed to be zero (the key
+ * is assumed to be empty by default) and specifying a new key
+ * invalidates any previous key, hence specifying a key size of
+ * zero will clear the key (which ensures that the calculated hash
+ * will always be zero).
+ * The maximum size of key is algorithm and backend specific, but
+ * is also limited by the single grant reference.
+ * The grant reference may be read-only and must remain valid until
+ * the response has been processed.
+ *
+ * XEN_NETIF_CTRL_TYPE_GET_HASH_MAPPING_SIZE
+ * -----------------------------------------
+ *
+ * This is sent by the frontend to query the maximum size of mapping
+ * table supported by the backend. The size is specified in terms of
+ * table entries.
+ *
+ * Request:
+ *
+ * type = XEN_NETIF_CTRL_TYPE_GET_HASH_MAPPING_SIZE
+ * data[0] = 0
+ * data[1] = 0
+ * data[2] = 0
+ *
+ * Response:
+ *
+ * status = XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED - Operation not supported
+ * XEN_NETIF_CTRL_STATUS_SUCCESS - Operation successful
+ * data = maximum number of entries allowed in the mapping table
+ * (if operation was successful) or zero if a mapping table is
+ * not supported (i.e. hash mapping is done only by modular
+ * arithmetic).
+ *
+ * XEN_NETIF_CTRL_TYPE_SET_HASH_MAPPING_SIZE
+ * -------------------------------------
+ *
+ * This is sent by the frontend to set the actual size of the mapping
+ * table to be used by the backend. The size is specified in terms of
+ * table entries.
+ * Any previous table is invalidated by this message and any new table
+ * is assumed to be zero filled.
+ *
+ * Request:
+ *
+ * type = XEN_NETIF_CTRL_TYPE_SET_HASH_MAPPING_SIZE
+ * data[0] = number of entries in mapping table
+ * data[1] = 0
+ * data[2] = 0
+ *
+ * Response:
+ *
+ * status = XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED - Operation not
+ * supported
+ * XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER - Table size is invalid
+ * XEN_NETIF_CTRL_STATUS_SUCCESS - Operation successful
+ * data = 0
+ *
+ * NOTE: Setting data[0] to 0 means that hash mapping should be done
+ * using modular arithmetic.
+ *
+ * XEN_NETIF_CTRL_TYPE_SET_HASH_MAPPING
+ * ------------------------------------
+ *
+ * This is sent by the frontend to set the content of the table mapping
+ * hash value to queue number. The backend should calculate the hash from
+ * the packet header, use it as an index into the table (modulo the size
+ * of the table) and then steer the packet to the queue number found at
+ * that index.
+ *
+ * Request:
+ *
+ * type = XEN_NETIF_CTRL_TYPE_SET_HASH_MAPPING
+ * data[0] = grant reference of page containing the mapping (sub-)table
+ * (assumed to start at beginning of grant)
+ * data[1] = size of (sub-)table in entries
+ * data[2] = offset, in entries, of sub-table within overall table
+ *
+ * Response:
+ *
+ * status = XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED - Operation not
+ * supported
+ * XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER - Table size or content
+ * is invalid
+ * XEN_NETIF_CTRL_STATUS_BUFFER_OVERFLOW - Table size is larger
+ * than the backend
+ * supports
+ * XEN_NETIF_CTRL_STATUS_SUCCESS - Operation successful
+ * data = 0
+ *
+ * NOTE: The overall table has the following format:
+ *
+ * 0 1 2 3 4 5 6 7 octet
+ * +-----+-----+-----+-----+-----+-----+-----+-----+
+ * | mapping[0] | mapping[1] |
+ * +-----+-----+-----+-----+-----+-----+-----+-----+
+ * | . |
+ * | . |
+ * | . |
+ * +-----+-----+-----+-----+-----+-----+-----+-----+
+ * | mapping[N-2] | mapping[N-1] |
+ * +-----+-----+-----+-----+-----+-----+-----+-----+
+ *
+ * where N is specified by a XEN_NETIF_CTRL_TYPE_SET_HASH_MAPPING_SIZE
+ * message and each mapping must specifies a queue between 0 and
+ * "multi-queue-num-queues" (see above).
+ * The backend may support a mapping table larger than can be
+ * mapped by a single grant reference. Thus sub-tables within a
+ * larger table can be individually set by sending multiple messages
+ * with differing offset values. Specifying a new sub-table does not
+ * invalidate any table data outside that range.
+ * The grant reference may be read-only and must remain valid until
+ * the response has been processed.
+ */
+
+DEFINE_RING_TYPES(xen_netif_ctrl,
+ struct xen_netif_ctrl_request,
+ struct xen_netif_ctrl_response);
+
+/*
+ * Guest transmit
+ * ==============
+ *
+ * This is the 'wire' format for transmit (frontend -> backend) packets:
+ *
+ * Fragment 1: xen_netif_tx_request_t - flags = XEN_NETTXF_*
+ * size = total packet size
+ * [Extra 1: xen_netif_extra_info_t] - (only if fragment 1 flags include
+ * XEN_NETTXF_extra_info)
+ * ...
+ * [Extra N: xen_netif_extra_info_t] - (only if extra N-1 flags include
+ * XEN_NETIF_EXTRA_MORE)
* ...
- * Request N: xen_netif_tx_request -- 0
+ * Fragment N: xen_netif_tx_request_t - (only if fragment N-1 flags include
+ * XEN_NETTXF_more_data - flags on preceding
+ * extras are not relevant here)
+ * flags = 0
+ * size = fragment size
+ *
+ * NOTE:
+ *
+ * This format slightly is different from that used for receive
+ * (backend -> frontend) packets. Specifically, in a multi-fragment
+ * packet the actual size of fragment 1 can only be determined by
+ * subtracting the sizes of fragments 2..N from the total packet size.
+ *
+ * Ring slot size is 12 octets, however not all request/response
+ * structs use the full size.
+ *
+ * tx request data (xen_netif_tx_request_t)
+ * ------------------------------------
+ *
+ * 0 1 2 3 4 5 6 7 octet
+ * +-----+-----+-----+-----+-----+-----+-----+-----+
+ * | grant ref | offset | flags |
+ * +-----+-----+-----+-----+-----+-----+-----+-----+
+ * | id | size |
+ * +-----+-----+-----+-----+
+ *
+ * grant ref: Reference to buffer page.
+ * offset: Offset within buffer page.
+ * flags: XEN_NETTXF_*.
+ * id: request identifier, echoed in response.
+ * size: packet size in bytes.
+ *
+ * tx response (xen_netif_tx_response_t)
+ * ---------------------------------
+ *
+ * 0 1 2 3 4 5 6 7 octet
+ * +-----+-----+-----+-----+-----+-----+-----+-----+
+ * | id | status | unused |
+ * +-----+-----+-----+-----+-----+-----+-----+-----+
+ * | unused |
+ * +-----+-----+-----+-----+
+ *
+ * id: reflects id in transmit request
+ * status: XEN_NETIF_RSP_*
+ *
+ * Guest receive
+ * =============
+ *
+ * This is the 'wire' format for receive (backend -> frontend) packets:
+ *
+ * Fragment 1: xen_netif_rx_request_t - flags = XEN_NETRXF_*
+ * size = fragment size
+ * [Extra 1: xen_netif_extra_info_t] - (only if fragment 1 flags include
+ * XEN_NETRXF_extra_info)
+ * ...
+ * [Extra N: xen_netif_extra_info_t] - (only if extra N-1 flags include
+ * XEN_NETIF_EXTRA_MORE)
+ * ...
+ * Fragment N: xen_netif_rx_request_t - (only if fragment N-1 flags include
+ * XEN_NETRXF_more_data - flags on preceding
+ * extras are not relevant here)
+ * flags = 0
+ * size = fragment size
+ *
+ * NOTE:
+ *
+ * This format slightly is different from that used for transmit
+ * (frontend -> backend) packets. Specifically, in a multi-fragment
+ * packet the size of the packet can only be determined by summing the
+ * sizes of fragments 1..N.
+ *
+ * Ring slot size is 8 octets.
+ *
+ * rx request (xen_netif_rx_request_t)
+ * -------------------------------
+ *
+ * 0 1 2 3 4 5 6 7 octet
+ * +-----+-----+-----+-----+-----+-----+-----+-----+
+ * | id | pad | gref |
+ * +-----+-----+-----+-----+-----+-----+-----+-----+
+ *
+ * id: request identifier, echoed in response.
+ * gref: reference to incoming granted frame.
+ *
+ * rx response (xen_netif_rx_response_t)
+ * ---------------------------------
+ *
+ * 0 1 2 3 4 5 6 7 octet
+ * +-----+-----+-----+-----+-----+-----+-----+-----+
+ * | id | offset | flags | status |
+ * +-----+-----+-----+-----+-----+-----+-----+-----+
+ *
+ * id: reflects id in receive request
+ * offset: offset in page of start of received packet
+ * flags: XEN_NETRXF_*
+ * status: -ve: XEN_NETIF_RSP_*; +ve: Rx'ed pkt size.
+ *
+ * NOTE: Historically, to support GSO on the frontend receive side, Linux
+ * netfront does not make use of the rx response id (because, as
+ * described below, extra info structures overlay the id field).
+ * Instead it assumes that responses always appear in the same ring
+ * slot as their corresponding request. Thus, to maintain
+ * compatibility, backends must make sure this is the case.
+ *
+ * Extra Info
+ * ==========
+ *
+ * Can be present if initial request or response has NET{T,R}XF_extra_info,
+ * or previous extra request has XEN_NETIF_EXTRA_MORE.
+ *
+ * The struct therefore needs to fit into either a tx or rx slot and
+ * is therefore limited to 8 octets.
+ *
+ * NOTE: Because extra info data overlays the usual request/response
+ * structures, there is no id information in the opposite direction.
+ * So, if an extra info overlays an rx response the frontend can
+ * assume that it is in the same ring slot as the request that was
+ * consumed to make the slot available, and the backend must ensure
+ * this assumption is true.
+ *
+ * extra info (xen_netif_extra_info_t)
+ * -------------------------------
+ *
+ * General format:
+ *
+ * 0 1 2 3 4 5 6 7 octet
+ * +-----+-----+-----+-----+-----+-----+-----+-----+
+ * |type |flags| type specific data |
+ * +-----+-----+-----+-----+-----+-----+-----+-----+
+ * | padding for tx |
+ * +-----+-----+-----+-----+
+ *
+ * type: XEN_NETIF_EXTRA_TYPE_*
+ * flags: XEN_NETIF_EXTRA_FLAG_*
+ * padding for tx: present only in the tx case due to 8 octet limit
+ * from rx case. Not shown in type specific entries
+ * below.
+ *
+ * XEN_NETIF_EXTRA_TYPE_GSO:
+ *
+ * 0 1 2 3 4 5 6 7 octet
+ * +-----+-----+-----+-----+-----+-----+-----+-----+
+ * |type |flags| size |type | pad | features |
+ * +-----+-----+-----+-----+-----+-----+-----+-----+
+ *
+ * type: Must be XEN_NETIF_EXTRA_TYPE_GSO
+ * flags: XEN_NETIF_EXTRA_FLAG_*
+ * size: Maximum payload size of each segment. For example,
+ * for TCP this is just the path MSS.
+ * type: XEN_NETIF_GSO_TYPE_*: This determines the protocol of
+ * the packet and any extra features required to segment the
+ * packet properly.
+ * features: EN_XEN_NETIF_GSO_FEAT_*: This specifies any extra GSO
+ * features required to process this packet, such as ECN
+ * support for TCPv4.
+ *
+ * XEN_NETIF_EXTRA_TYPE_MCAST_{ADD,DEL}:
+ *
+ * 0 1 2 3 4 5 6 7 octet
+ * +-----+-----+-----+-----+-----+-----+-----+-----+
+ * |type |flags| addr |
+ * +-----+-----+-----+-----+-----+-----+-----+-----+
+ *
+ * type: Must be XEN_NETIF_EXTRA_TYPE_MCAST_{ADD,DEL}
+ * flags: XEN_NETIF_EXTRA_FLAG_*
+ * addr: address to add/remove
+ *
+ * XEN_NETIF_EXTRA_TYPE_HASH:
+ *
+ * A backend that supports teoplitz hashing is assumed to accept
+ * this type of extra info in transmit packets.
+ * A frontend that enables hashing is assumed to accept
+ * this type of extra info in receive packets.
+ *
+ * 0 1 2 3 4 5 6 7 octet
+ * +-----+-----+-----+-----+-----+-----+-----+-----+
+ * |type |flags|htype| alg |LSB ---- value ---- MSB|
+ * +-----+-----+-----+-----+-----+-----+-----+-----+
+ *
+ * type: Must be XEN_NETIF_EXTRA_TYPE_HASH
+ * flags: XEN_NETIF_EXTRA_FLAG_*
+ * htype: Hash type (one of _XEN_NETIF_CTRL_HASH_TYPE_* - see above)
+ * alg: The algorithm used to calculate the hash (one of
+ * XEN_NETIF_CTRL_HASH_TYPE_ALGORITHM_* - see above)
+ * value: Hash value
*/
/* Protocol checksum field is blank in the packet (hardware offload)? */
-#define _XEN_NETTXF_csum_blank (0)
-#define XEN_NETTXF_csum_blank (1U<<_XEN_NETTXF_csum_blank)
+#define _XEN_NETTXF_csum_blank (0)
+#define XEN_NETTXF_csum_blank (1U<<_XEN_NETTXF_csum_blank)
/* Packet data has been validated against protocol checksum. */
-#define _XEN_NETTXF_data_validated (1)
-#define XEN_NETTXF_data_validated (1U<<_XEN_NETTXF_data_validated)
+#define _XEN_NETTXF_data_validated (1)
+#define XEN_NETTXF_data_validated (1U<<_XEN_NETTXF_data_validated)
/* Packet continues in the next request descriptor. */
-#define _XEN_NETTXF_more_data (2)
-#define XEN_NETTXF_more_data (1U<<_XEN_NETTXF_more_data)
+#define _XEN_NETTXF_more_data (2)
+#define XEN_NETTXF_more_data (1U<<_XEN_NETTXF_more_data)
/* Packet to be followed by extra descriptor(s). */
-#define _XEN_NETTXF_extra_info (3)
-#define XEN_NETTXF_extra_info (1U<<_XEN_NETTXF_extra_info)
+#define _XEN_NETTXF_extra_info (3)
+#define XEN_NETTXF_extra_info (1U<<_XEN_NETTXF_extra_info)
#define XEN_NETIF_MAX_TX_SIZE 0xFFFF
struct xen_netif_tx_request {
- grant_ref_t gref; /* Reference to buffer page */
- uint16_t offset; /* Offset within buffer page */
- uint16_t flags; /* XEN_NETTXF_* */
- uint16_t id; /* Echoed in response message. */
- uint16_t size; /* Packet size in bytes. */
+ grant_ref_t gref;
+ uint16_t offset;
+ uint16_t flags;
+ uint16_t id;
+ uint16_t size;
};
/* Types of xen_netif_extra_info descriptors. */
-#define XEN_NETIF_EXTRA_TYPE_NONE (0) /* Never used - invalid */
-#define XEN_NETIF_EXTRA_TYPE_GSO (1) /* u.gso */
-#define XEN_NETIF_EXTRA_TYPE_MCAST_ADD (2) /* u.mcast */
-#define XEN_NETIF_EXTRA_TYPE_MCAST_DEL (3) /* u.mcast */
-#define XEN_NETIF_EXTRA_TYPE_MAX (4)
+#define XEN_NETIF_EXTRA_TYPE_NONE (0) /* Never used - invalid */
+#define XEN_NETIF_EXTRA_TYPE_GSO (1) /* u.gso */
+#define XEN_NETIF_EXTRA_TYPE_MCAST_ADD (2) /* u.mcast */
+#define XEN_NETIF_EXTRA_TYPE_MCAST_DEL (3) /* u.mcast */
+#define XEN_NETIF_EXTRA_TYPE_HASH (4) /* u.hash */
+#define XEN_NETIF_EXTRA_TYPE_MAX (5)
-/* xen_netif_extra_info flags. */
-#define _XEN_NETIF_EXTRA_FLAG_MORE (0)
-#define XEN_NETIF_EXTRA_FLAG_MORE (1U<<_XEN_NETIF_EXTRA_FLAG_MORE)
+/* xen_netif_extra_info_t flags. */
+#define _XEN_NETIF_EXTRA_FLAG_MORE (0)
+#define XEN_NETIF_EXTRA_FLAG_MORE (1U<<_XEN_NETIF_EXTRA_FLAG_MORE)
/* GSO types */
-#define XEN_NETIF_GSO_TYPE_NONE (0)
-#define XEN_NETIF_GSO_TYPE_TCPV4 (1)
-#define XEN_NETIF_GSO_TYPE_TCPV6 (2)
+#define XEN_NETIF_GSO_TYPE_NONE (0)
+#define XEN_NETIF_GSO_TYPE_TCPV4 (1)
+#define XEN_NETIF_GSO_TYPE_TCPV6 (2)
/*
- * This structure needs to fit within both netif_tx_request and
- * netif_rx_response for compatibility.
+ * This structure needs to fit within both xen_netif_tx_request_t and
+ * xen_netif_rx_response_t for compatibility.
*/
struct xen_netif_extra_info {
- uint8_t type; /* XEN_NETIF_EXTRA_TYPE_* */
- uint8_t flags; /* XEN_NETIF_EXTRA_FLAG_* */
-
+ uint8_t type;
+ uint8_t flags;
union {
struct {
- /*
- * Maximum payload size of each segment. For
- * example, for TCP this is just the path MSS.
- */
uint16_t size;
-
- /*
- * GSO type. This determines the protocol of
- * the packet and any extra features required
- * to segment the packet properly.
- */
- uint8_t type; /* XEN_NETIF_GSO_TYPE_* */
-
- /* Future expansion. */
+ uint8_t type;
uint8_t pad;
-
- /*
- * GSO features. This specifies any extra GSO
- * features required to process this packet,
- * such as ECN support for TCPv4.
- */
- uint16_t features; /* XEN_NETIF_GSO_FEAT_* */
+ uint16_t features;
} gso;
-
struct {
- uint8_t addr[6]; /* Address to add/remove. */
+ uint8_t addr[6];
} mcast;
-
+ struct {
+ uint8_t type;
+ uint8_t algorithm;
+ uint8_t value[4];
+ } hash;
uint16_t pad[3];
} u;
};
struct xen_netif_tx_response {
uint16_t id;
- int16_t status; /* XEN_NETIF_RSP_* */
+ int16_t status;
};
struct xen_netif_rx_request {
- uint16_t id; /* Echoed in response message. */
- grant_ref_t gref; /* Reference to incoming granted frame */
+ uint16_t id; /* Echoed in response message. */
+ uint16_t pad;
+ grant_ref_t gref;
};
/* Packet data has been validated against protocol checksum. */
-#define _XEN_NETRXF_data_validated (0)
-#define XEN_NETRXF_data_validated (1U<<_XEN_NETRXF_data_validated)
+#define _XEN_NETRXF_data_validated (0)
+#define XEN_NETRXF_data_validated (1U<<_XEN_NETRXF_data_validated)
/* Protocol checksum field is blank in the packet (hardware offload)? */
-#define _XEN_NETRXF_csum_blank (1)
-#define XEN_NETRXF_csum_blank (1U<<_XEN_NETRXF_csum_blank)
+#define _XEN_NETRXF_csum_blank (1)
+#define XEN_NETRXF_csum_blank (1U<<_XEN_NETRXF_csum_blank)
/* Packet continues in the next request descriptor. */
-#define _XEN_NETRXF_more_data (2)
-#define XEN_NETRXF_more_data (1U<<_XEN_NETRXF_more_data)
+#define _XEN_NETRXF_more_data (2)
+#define XEN_NETRXF_more_data (1U<<_XEN_NETRXF_more_data)
/* Packet to be followed by extra descriptor(s). */
-#define _XEN_NETRXF_extra_info (3)
-#define XEN_NETRXF_extra_info (1U<<_XEN_NETRXF_extra_info)
+#define _XEN_NETRXF_extra_info (3)
+#define XEN_NETRXF_extra_info (1U<<_XEN_NETRXF_extra_info)
-/* GSO Prefix descriptor. */
-#define _XEN_NETRXF_gso_prefix (4)
-#define XEN_NETRXF_gso_prefix (1U<<_XEN_NETRXF_gso_prefix)
+/* Packet has GSO prefix. Deprecated but included for compatibility */
+#define _XEN_NETRXF_gso_prefix (4)
+#define XEN_NETRXF_gso_prefix (1U<<_XEN_NETRXF_gso_prefix)
struct xen_netif_rx_response {
- uint16_t id;
- uint16_t offset; /* Offset in page of start of received packet */
- uint16_t flags; /* XEN_NETRXF_* */
- int16_t status; /* -ve: BLKIF_RSP_* ; +ve: Rx'ed pkt size. */
+ uint16_t id;
+ uint16_t offset;
+ uint16_t flags;
+ int16_t status;
};
/*
- * Generate netif ring structures and types.
+ * Generate xen_netif ring structures and types.
*/
-DEFINE_RING_TYPES(xen_netif_tx,
- struct xen_netif_tx_request,
+DEFINE_RING_TYPES(xen_netif_tx, struct xen_netif_tx_request,
struct xen_netif_tx_response);
-DEFINE_RING_TYPES(xen_netif_rx,
- struct xen_netif_rx_request,
+DEFINE_RING_TYPES(xen_netif_rx, struct xen_netif_rx_request,
struct xen_netif_rx_response);
-#define XEN_NETIF_RSP_DROPPED -2
-#define XEN_NETIF_RSP_ERROR -1
-#define XEN_NETIF_RSP_OKAY 0
-/* No response: used for auxiliary requests (e.g., xen_netif_extra_info). */
-#define XEN_NETIF_RSP_NULL 1
+#define XEN_NETIF_RSP_DROPPED -2
+#define XEN_NETIF_RSP_ERROR -1
+#define XEN_NETIF_RSP_OKAY 0
+/* No response: used for auxiliary requests (e.g., xen_netif_extra_info_t). */
+#define XEN_NETIF_RSP_NULL 1
#endif