summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/acpi/acnames.h7
-rw-r--r--include/acpi/acoutput.h4
-rw-r--r--include/acpi/acpiosxf.h21
-rw-r--r--include/acpi/acpixf.h3
-rw-r--r--include/acpi/actbl2.h25
-rw-r--r--include/acpi/actypes.h5
-rw-r--r--include/acpi/cppc_acpi.h14
-rw-r--r--include/acpi/ghes.h7
-rw-r--r--include/acpi/platform/aclinux.h5
-rw-r--r--include/acpi/platform/aclinuxex.h30
-rw-r--r--include/asm-generic/atomic-long.h19
-rw-r--r--include/asm-generic/barrier.h27
-rw-r--r--include/asm-generic/compat.h3
-rw-r--r--include/asm-generic/dma-mapping.h9
-rw-r--r--include/asm-generic/export.h34
-rw-r--r--include/asm-generic/int-ll64.h19
-rw-r--r--include/asm-generic/pci.h8
-rw-r--r--include/asm-generic/qspinlock.h4
-rw-r--r--include/asm-generic/qspinlock_types.h32
-rw-r--r--include/asm-generic/vmlinux.lds.h336
-rw-r--r--include/crypto/algapi.h8
-rw-r--r--include/crypto/if_alg.h3
-rw-r--r--include/crypto/morus1280_glue.h137
-rw-r--r--include/crypto/morus640_glue.h137
-rw-r--r--include/crypto/morus_common.h23
-rw-r--r--include/crypto/salsa20.h27
-rw-r--r--include/crypto/sm4.h3
-rw-r--r--include/drm/amd_asic_type.h2
-rw-r--r--include/drm/bridge/analogix_dp.h3
-rw-r--r--include/drm/bridge/dw_hdmi.h2
-rw-r--r--include/drm/drmP.h28
-rw-r--r--include/drm/drm_blend.h3
-rw-r--r--include/drm/drm_device.h10
-rw-r--r--include/drm/drm_dp_helper.h24
-rw-r--r--include/drm/drm_drv.h15
-rw-r--r--include/drm/drm_edid.h2
-rw-r--r--include/drm/drm_file.h23
-rw-r--r--include/drm/drm_gem_framebuffer_helper.h3
-rw-r--r--include/drm/drm_hdcp.h2
-rw-r--r--include/drm/drm_ioctl.h7
-rw-r--r--include/drm/drm_legacy.h4
-rw-r--r--include/drm/drm_mode_config.h8
-rw-r--r--include/drm/drm_modes.h22
-rw-r--r--include/drm/drm_modeset_helper_vtables.h5
-rw-r--r--include/drm/drm_plane.h21
-rw-r--r--include/drm/drm_property.h28
-rw-r--r--include/drm/drm_rect.h3
-rw-r--r--include/drm/drm_simple_kms_helper.h6
-rw-r--r--include/drm/gpu_scheduler.h56
-rw-r--r--include/drm/gpu_scheduler_trace.h82
-rw-r--r--include/drm/i915_pciids.h1
-rw-r--r--include/drm/tinydrm/mipi-dbi.h4
-rw-r--r--include/drm/tinydrm/tinydrm-helpers.h5
-rw-r--r--include/drm/tinydrm/tinydrm.h8
-rw-r--r--include/dt-bindings/clock/actions,s900-cmu.h129
-rw-r--r--include/dt-bindings/clock/aspeed-clock.h4
-rw-r--r--include/dt-bindings/clock/axg-aoclkc.h26
-rw-r--r--include/dt-bindings/clock/bcm-sr.h24
-rw-r--r--include/dt-bindings/clock/gxbb-clkc.h2
-rw-r--r--include/dt-bindings/clock/histb-clock.h8
-rw-r--r--include/dt-bindings/clock/imx6qdl-clock.h4
-rw-r--r--include/dt-bindings/clock/imx6sx-clock.h6
-rw-r--r--include/dt-bindings/clock/imx6ul-clock.h33
-rw-r--r--include/dt-bindings/clock/imx7d-clock.h4
-rw-r--r--include/dt-bindings/clock/meson8b-clkc.h1
-rw-r--r--include/dt-bindings/clock/mt2701-clk.h20
-rw-r--r--include/dt-bindings/clock/nuvoton,npcm7xx-clock.h44
-rw-r--r--include/dt-bindings/clock/qcom,gcc-msm8998.h208
-rw-r--r--include/dt-bindings/clock/qcom,gcc-sdm845.h239
-rw-r--r--include/dt-bindings/clock/qcom,rpmh.h22
-rw-r--r--include/dt-bindings/clock/qcom,videocc-sdm845.h35
-rw-r--r--include/dt-bindings/clock/r8a77470-cpg-mssr.h36
-rw-r--r--include/dt-bindings/clock/r8a77990-cpg-mssr.h62
-rw-r--r--include/dt-bindings/clock/stm32mp1-clks.h4
-rw-r--r--include/dt-bindings/clock/sun50i-h6-r-ccu.h24
-rw-r--r--include/dt-bindings/dma/jz4780-dma.h49
-rw-r--r--include/dt-bindings/memory/tegra114-mc.h17
-rw-r--r--include/dt-bindings/memory/tegra124-mc.h25
-rw-r--r--include/dt-bindings/memory/tegra20-mc.h21
-rw-r--r--include/dt-bindings/memory/tegra210-mc.h31
-rw-r--r--include/dt-bindings/memory/tegra30-mc.h19
-rw-r--r--include/dt-bindings/net/microchip-lan78xx.h21
-rw-r--r--include/dt-bindings/phy/phy-qcom-qusb2.h37
-rw-r--r--include/dt-bindings/pinctrl/mt7623-pinfunc.h90
-rw-r--r--include/dt-bindings/power/px30-power.h27
-rw-r--r--include/dt-bindings/power/r8a77470-sysc.h22
-rw-r--r--include/dt-bindings/power/r8a77990-sysc.h26
-rw-r--r--include/dt-bindings/power/rk3036-power.h13
-rw-r--r--include/dt-bindings/power/rk3128-power.h14
-rw-r--r--include/dt-bindings/power/rk3228-power.h21
-rw-r--r--include/dt-bindings/reset/axg-aoclkc.h20
-rw-r--r--include/dt-bindings/reset/mt2701-resets.h3
-rw-r--r--include/dt-bindings/reset/sun50i-h6-r-ccu.h17
-rw-r--r--include/dt-bindings/soc/qcom,apr.h28
-rw-r--r--include/dt-bindings/sound/fsl-imx-audmux.h7
-rw-r--r--include/dt-bindings/sound/qcom,q6afe.h111
-rw-r--r--include/dt-bindings/sound/qcom,q6asm.h22
-rw-r--r--include/dt-bindings/sound/rt5640.h25
-rw-r--r--include/keys/asymmetric-subtype.h2
-rw-r--r--include/keys/asymmetric-type.h2
-rw-r--r--include/kvm/arm_psci.h16
-rw-r--r--include/kvm/arm_vgic.h18
-rw-r--r--include/linux/acpi.h25
-rw-r--r--include/linux/aer.h1
-rw-r--r--include/linux/aio.h2
-rw-r--r--include/linux/arm-smccc.h10
-rw-r--r--include/linux/assoc_array.h2
-rw-r--r--include/linux/assoc_array_priv.h2
-rw-r--r--include/linux/atalk.h7
-rw-r--r--include/linux/atmdev.h15
-rw-r--r--include/linux/atomic.h2
-rw-r--r--include/linux/audit.h39
-rw-r--r--include/linux/avf/virtchnl.h4
-rw-r--r--include/linux/backing-dev-defs.h5
-rw-r--r--include/linux/backing-dev.h30
-rw-r--r--include/linux/backlight.h4
-rw-r--r--include/linux/binfmts.h1
-rw-r--r--include/linux/bio.h52
-rw-r--r--include/linux/blk-mq.h8
-rw-r--r--include/linux/blk_types.h51
-rw-r--r--include/linux/blkdev.h120
-rw-r--r--include/linux/bpf-cgroup.h24
-rw-r--r--include/linux/bpf.h106
-rw-r--r--include/linux/bpf_lirc.h29
-rw-r--r--include/linux/bpf_trace.h1
-rw-r--r--include/linux/bpf_types.h12
-rw-r--r--include/linux/bpf_verifier.h16
-rw-r--r--include/linux/bpfilter.h15
-rw-r--r--include/linux/brcmphy.h5
-rw-r--r--include/linux/bsg-lib.h3
-rw-r--r--include/linux/bsg.h6
-rw-r--r--include/linux/btf.h50
-rw-r--r--include/linux/buffer_head.h2
-rw-r--r--include/linux/cacheinfo.h25
-rw-r--r--include/linux/ceph/ceph_fs.h1
-rw-r--r--include/linux/ceph/osd_client.h20
-rw-r--r--include/linux/ceph/osdmap.h8
-rw-r--r--include/linux/cfag12864b.h16
-rw-r--r--include/linux/cgroup-defs.h52
-rw-r--r--include/linux/cgroup.h12
-rw-r--r--include/linux/circ_buf.h2
-rw-r--r--include/linux/clk-provider.h26
-rw-r--r--include/linux/clk/davinci.h40
-rw-r--r--include/linux/compat.h19
-rw-r--r--include/linux/compat_time.h23
-rw-r--r--include/linux/compiler-clang.h17
-rw-r--r--include/linux/compiler-gcc.h4
-rw-r--r--include/linux/compiler-intel.h4
-rw-r--r--include/linux/compiler.h2
-rw-r--r--include/linux/coresight-pmu.h13
-rw-r--r--include/linux/coresight.h13
-rw-r--r--include/linux/cper.h4
-rw-r--r--include/linux/cpu.h2
-rw-r--r--include/linux/cpufreq.h2
-rw-r--r--include/linux/cpuidle.h1
-rw-r--r--include/linux/crash_dump.h18
-rw-r--r--include/linux/dax.h27
-rw-r--r--include/linux/dcache.h1
-rw-r--r--include/linux/delayacct.h2
-rw-r--r--include/linux/device-mapper.h5
-rw-r--r--include/linux/device.h27
-rw-r--r--include/linux/dma-contiguous.h2
-rw-r--r--include/linux/dma-debug.h6
-rw-r--r--include/linux/dma-direct.h7
-rw-r--r--include/linux/dma-fence.h236
-rw-r--r--include/linux/dma-iommu.h1
-rw-r--r--include/linux/dma-mapping.h19
-rw-r--r--include/linux/dma-noncoherent.h47
-rw-r--r--include/linux/dma/sprd-dma.h61
-rw-r--r--include/linux/efi.h14
-rw-r--r--include/linux/elevator.h2
-rw-r--r--include/linux/ethtool.h7
-rw-r--r--include/linux/export.h16
-rw-r--r--include/linux/filter.h160
-rw-r--r--include/linux/firmware.h10
-rw-r--r--include/linux/fpga/altera-pr-ip-core.h13
-rw-r--r--include/linux/fpga/fpga-bridge.h9
-rw-r--r--include/linux/fpga/fpga-mgr.h23
-rw-r--r--include/linux/fpga/fpga-region.h13
-rw-r--r--include/linux/fs.h61
-rw-r--r--include/linux/fscrypt_notsupp.h23
-rw-r--r--include/linux/fscrypt_supp.h21
-rw-r--r--include/linux/fsl/ptp_qoriq.h141
-rw-r--r--include/linux/fsnotify_backend.h85
-rw-r--r--include/linux/ftrace.h2
-rw-r--r--include/linux/genhd.h4
-rw-r--r--include/linux/gfp.h16
-rw-r--r--include/linux/gpio/consumer.h10
-rw-r--r--include/linux/gpio/machine.h31
-rw-r--r--include/linux/hid.h28
-rw-r--r--include/linux/hmm.h10
-rw-r--r--include/linux/host1x.h24
-rw-r--r--include/linux/hrtimer.h2
-rw-r--r--include/linux/hwspinlock.h68
-rw-r--r--include/linux/hyperv.h38
-rw-r--r--include/linux/i2c-pnx.h38
-rw-r--r--include/linux/i2c.h12
-rw-r--r--include/linux/ide.h8
-rw-r--r--include/linux/if_bridge.h29
-rw-r--r--include/linux/if_macvlan.h29
-rw-r--r--include/linux/if_tun.h4
-rw-r--r--include/linux/if_vlan.h11
-rw-r--r--include/linux/iio/adc/ad_sigma_delta.h24
-rw-r--r--include/linux/iio/adc/stm32-dfsdm-adc.h2
-rw-r--r--include/linux/iio/buffer_impl.h6
-rw-r--r--include/linux/iio/common/cros_ec_sensors_core.h180
-rw-r--r--include/linux/iio/iio.h24
-rw-r--r--include/linux/inetdevice.h1
-rw-r--r--include/linux/intel-iommu.h1
-rw-r--r--include/linux/interrupt.h13
-rw-r--r--include/linux/iomap.h27
-rw-r--r--include/linux/iommu-common.h53
-rw-r--r--include/linux/iommu-helper.h13
-rw-r--r--include/linux/ioprio.h9
-rw-r--r--include/linux/ipmi.h153
-rw-r--r--include/linux/ipmi_smi.h129
-rw-r--r--include/linux/irq.h8
-rw-r--r--include/linux/irq_cpustat.h10
-rw-r--r--include/linux/irq_sim.h13
-rw-r--r--include/linux/irqchip/arm-gic-v3.h1
-rw-r--r--include/linux/irqdomain.h8
-rw-r--r--include/linux/isdn/capilli.h2
-rw-r--r--include/linux/kcore.h6
-rw-r--r--include/linux/kcov.h14
-rw-r--r--include/linux/kernel.h18
-rw-r--r--include/linux/ks0108.h16
-rw-r--r--include/linux/ksm.h17
-rw-r--r--include/linux/kthread.h1
-rw-r--r--include/linux/kvm_host.h27
-rw-r--r--include/linux/libata.h26
-rw-r--r--include/linux/lightnvm.h2
-rw-r--r--include/linux/linkage.h12
-rw-r--r--include/linux/livepatch.h19
-rw-r--r--include/linux/log2.h35
-rw-r--r--include/linux/lsm_hooks.h7
-rw-r--r--include/linux/mdio-bitbang.h2
-rw-r--r--include/linux/mdio-gpio.h9
-rw-r--r--include/linux/mdio.h1
-rw-r--r--include/linux/memcontrol.h77
-rw-r--r--include/linux/memfd.h16
-rw-r--r--include/linux/memory_hotplug.h4
-rw-r--r--include/linux/mempool.h34
-rw-r--r--include/linux/memremap.h40
-rw-r--r--include/linux/mfd/abx500.h1
-rw-r--r--include/linux/mfd/abx500/ab8500-bm.h2
-rw-r--r--include/linux/mfd/abx500/ux500_chargalg.h4
-rw-r--r--include/linux/mfd/arizona/pdata.h3
-rw-r--r--include/linux/mfd/as3711.h4
-rw-r--r--include/linux/mfd/axp20x.h10
-rw-r--r--include/linux/mfd/bd9571mwv.h5
-rw-r--r--include/linux/mfd/cros_ec.h22
-rw-r--r--include/linux/mfd/lp8788.h16
-rw-r--r--include/linux/mfd/mc13xxx.h2
-rw-r--r--include/linux/mfd/rave-sp.h1
-rw-r--r--include/linux/mfd/samsung/core.h4
-rw-r--r--include/linux/mfd/stm32-timers.h70
-rw-r--r--include/linux/mfd/syscon/exynos4-pmu.h21
-rw-r--r--include/linux/mfd/syscon/exynos5-pmu.h19
-rw-r--r--include/linux/mfd/tps65090.h8
-rw-r--r--include/linux/mfd/tps65218.h4
-rw-r--r--include/linux/mfd/tps6586x.h1
-rw-r--r--include/linux/mfd/tps68470.h17
-rw-r--r--include/linux/mfd/wm8350/audio.h3
-rw-r--r--include/linux/microchipphy.h11
-rw-r--r--include/linux/mlx4/device.h1
-rw-r--r--include/linux/mlx5/device.h11
-rw-r--r--include/linux/mlx5/driver.h38
-rw-r--r--include/linux/mlx5/mlx5_ifc.h70
-rw-r--r--include/linux/mlx5/mlx5_ifc_fpga.h93
-rw-r--r--include/linux/mm.h118
-rw-r--r--include/linux/mm_types.h239
-rw-r--r--include/linux/mmc/card.h2
-rw-r--r--include/linux/mmc/core.h1
-rw-r--r--include/linux/mmc/host.h6
-rw-r--r--include/linux/mmc/sdio_ids.h2
-rw-r--r--include/linux/mmu_notifier.h2
-rw-r--r--include/linux/mod_devicetable.h12
-rw-r--r--include/linux/mpi.h61
-rw-r--r--include/linux/mroute_base.h10
-rw-r--r--include/linux/msi.h2
-rw-r--r--include/linux/mtd/flashchip.h1
-rw-r--r--include/linux/mtd/map.h2
-rw-r--r--include/linux/mtd/nand.h3
-rw-r--r--include/linux/mtd/rawnand.h51
-rw-r--r--include/linux/mtd/spi-nor.h2
-rw-r--r--include/linux/mutex.h3
-rw-r--r--include/linux/namei.h1
-rw-r--r--include/linux/net.h2
-rw-r--r--include/linux/net_dim.h69
-rw-r--r--include/linux/netdev_features.h6
-rw-r--r--include/linux/netdevice.h96
-rw-r--r--include/linux/netfilter.h34
-rw-r--r--include/linux/netfilter/ipset/ip_set_timeout.h20
-rw-r--r--include/linux/netfilter/nf_osf.h33
-rw-r--r--include/linux/netfilter/nfnetlink.h1
-rw-r--r--include/linux/netfilter_bridge/ebtables.h4
-rw-r--r--include/linux/nfs_fs_sb.h2
-rw-r--r--include/linux/nfs_xdr.h15
-rw-r--r--include/linux/node.h8
-rw-r--r--include/linux/nospec.h10
-rw-r--r--include/linux/notifier.h34
-rw-r--r--include/linux/nubus.h2
-rw-r--r--include/linux/nvme.h16
-rw-r--r--include/linux/nvmem-provider.h11
-rw-r--r--include/linux/of_clk.h30
-rw-r--r--include/linux/of_device.h8
-rw-r--r--include/linux/of_pci.h34
-rw-r--r--include/linux/omap-iommu.h5
-rw-r--r--include/linux/oom.h2
-rw-r--r--include/linux/overflow.h278
-rw-r--r--include/linux/page-flags.h51
-rw-r--r--include/linux/page_counter.h26
-rw-r--r--include/linux/pci-ecam.h1
-rw-r--r--include/linux/pci-epc.h8
-rw-r--r--include/linux/pci-epf.h4
-rw-r--r--include/linux/pci.h23
-rw-r--r--include/linux/pci_hotplug.h18
-rw-r--r--include/linux/pci_ids.h9
-rw-r--r--include/linux/percpu-rwsem.h6
-rw-r--r--include/linux/perf/arm_pmu.h2
-rw-r--r--include/linux/perf_event.h15
-rw-r--r--include/linux/pfn_t.h4
-rw-r--r--include/linux/phy.h69
-rw-r--r--include/linux/phy/phy.h1
-rw-r--r--include/linux/pkeys.h13
-rw-r--r--include/linux/pktcdvd.h2
-rw-r--r--include/linux/platform_data/b53.h4
-rw-r--r--include/linux/platform_data/clk-st.h17
-rw-r--r--include/linux/platform_data/gpio-dwapb.h3
-rw-r--r--include/linux/platform_data/i2c-gpio.h (renamed from include/linux/i2c-gpio.h)0
-rw-r--r--include/linux/platform_data/i2c-mux-gpio.h (renamed from include/linux/i2c-mux-gpio.h)0
-rw-r--r--include/linux/platform_data/i2c-ocores.h (renamed from include/linux/i2c-ocores.h)0
-rw-r--r--include/linux/platform_data/i2c-omap.h (renamed from include/linux/i2c-omap.h)0
-rw-r--r--include/linux/platform_data/i2c-pca-platform.h (renamed from include/linux/i2c-pca-platform.h)0
-rw-r--r--include/linux/platform_data/i2c-xiic.h (renamed from include/linux/i2c-xiic.h)0
-rw-r--r--include/linux/platform_data/mdio-gpio.h33
-rw-r--r--include/linux/platform_data/media/ir-rx51.h9
-rw-r--r--include/linux/platform_data/media/mmp-camera.h19
-rw-r--r--include/linux/platform_data/mlxreg.h3
-rw-r--r--include/linux/platform_data/mtd-davinci.h10
-rw-r--r--include/linux/platform_data/mv88e6xxx.h18
-rw-r--r--include/linux/platform_data/sc18is602.h2
-rw-r--r--include/linux/platform_data/shmob_drm.h4
-rw-r--r--include/linux/platform_data/spi-imx.h29
-rw-r--r--include/linux/platform_data/tda9950.h16
-rw-r--r--include/linux/platform_data/ti-aemif.h25
-rw-r--r--include/linux/platform_data/ti-sysc.h1
-rw-r--r--include/linux/platform_data/tsl2772.h101
-rw-r--r--include/linux/platform_device.h2
-rw-r--r--include/linux/pm_domain.h94
-rw-r--r--include/linux/pm_opp.h27
-rw-r--r--include/linux/poll.h14
-rw-r--r--include/linux/power/bq27xxx_battery.h3
-rw-r--r--include/linux/power_supply.h18
-rw-r--r--include/linux/proc_fs.h51
-rw-r--r--include/linux/property.h52
-rw-r--r--include/linux/psp-sev.h23
-rw-r--r--include/linux/pstore.h2
-rw-r--r--include/linux/ptrace.h1
-rw-r--r--include/linux/pwm_backlight.h2
-rw-r--r--include/linux/qcom-geni-se.h425
-rw-r--r--include/linux/qed/common_hsi.h4
-rw-r--r--include/linux/qed/iscsi_common.h8
-rw-r--r--include/linux/qed/qed_eth_if.h5
-rw-r--r--include/linux/qed/qed_if.h284
-rw-r--r--include/linux/qed/qed_ll2_if.h10
-rw-r--r--include/linux/qed/qed_rdma_if.h16
-rw-r--r--include/linux/qed/roce_common.h1
-rw-r--r--include/linux/rbtree_augmented.h1
-rw-r--r--include/linux/rbtree_latch.h1
-rw-r--r--include/linux/rculist.h13
-rw-r--r--include/linux/rculist_nulls.h2
-rw-r--r--include/linux/rcupdate.h9
-rw-r--r--include/linux/rcutiny.h1
-rw-r--r--include/linux/rcutree.h2
-rw-r--r--include/linux/regmap.h19
-rw-r--r--include/linux/regulator/ab8500.h157
-rw-r--r--include/linux/regulator/arizona-ldo1.h3
-rw-r--r--include/linux/regulator/consumer.h1
-rw-r--r--include/linux/regulator/driver.h27
-rw-r--r--include/linux/regulator/machine.h4
-rw-r--r--include/linux/regulator/max8952.h1
-rw-r--r--include/linux/remoteproc.h2
-rw-r--r--include/linux/restart_block.h7
-rw-r--r--include/linux/rhashtable.h38
-rw-r--r--include/linux/ring_buffer.h2
-rw-r--r--include/linux/rpmsg.h27
-rw-r--r--include/linux/rpmsg/qcom_glink.h2
-rw-r--r--include/linux/rslib.h74
-rw-r--r--include/linux/rtc.h2
-rw-r--r--include/linux/rwsem.h6
-rw-r--r--include/linux/sbitmap.h36
-rw-r--r--include/linux/sched.h213
-rw-r--r--include/linux/sched/mm.h46
-rw-r--r--include/linux/sched/signal.h2
-rw-r--r--include/linux/scmi_protocol.h18
-rw-r--r--include/linux/seccomp.h5
-rw-r--r--include/linux/security.h21
-rw-r--r--include/linux/seq_file_net.h19
-rw-r--r--include/linux/serial_8250.h1
-rw-r--r--include/linux/serial_core.h25
-rw-r--r--include/linux/shmem_fs.h13
-rw-r--r--include/linux/shrinker.h7
-rw-r--r--include/linux/signal.h3
-rw-r--r--include/linux/skb_array.h5
-rw-r--r--include/linux/skbuff.h31
-rw-r--r--include/linux/slab.h18
-rw-r--r--include/linux/slab_def.h7
-rw-r--r--include/linux/slub_def.h1
-rw-r--r--include/linux/soc/qcom/apr.h128
-rw-r--r--include/linux/soc/qcom/smem.h2
-rw-r--r--include/linux/soc/ti/knav_dma.h12
-rw-r--r--include/linux/soc/ti/knav_qmss.h1
-rw-r--r--include/linux/soc/ti/ti_sci_protocol.h10
-rw-r--r--include/linux/socket.h5
-rw-r--r--include/linux/sony-laptop.h4
-rw-r--r--include/linux/soundwire/sdw.h332
-rw-r--r--include/linux/soundwire/sdw_intel.h14
-rw-r--r--include/linux/spi/spi-mem.h249
-rw-r--r--include/linux/spi/spi.h60
-rw-r--r--include/linux/spinlock.h18
-rw-r--r--include/linux/srcu.h36
-rw-r--r--include/linux/srcutiny.h6
-rw-r--r--include/linux/srcutree.h6
-rw-r--r--include/linux/stackprotector.h2
-rw-r--r--include/linux/stat.h8
-rw-r--r--include/linux/ste_modem_shm.h56
-rw-r--r--include/linux/string.h4
-rw-r--r--include/linux/string_helpers.h1
-rw-r--r--include/linux/stringhash.h4
-rw-r--r--include/linux/sunrpc/rpc_pipe_fs.h2
-rw-r--r--include/linux/sunrpc/rpc_rdma.h1
-rw-r--r--include/linux/sunrpc/svc_rdma.h95
-rw-r--r--include/linux/sunrpc/xprt.h6
-rw-r--r--include/linux/sunrpc/xprtrdma.h1
-rw-r--r--include/linux/swait.h15
-rw-r--r--include/linux/swap.h2
-rw-r--r--include/linux/syscalls.h29
-rw-r--r--include/linux/tcp.h5
-rw-r--r--include/linux/textsearch.h4
-rw-r--r--include/linux/thermal.h17
-rw-r--r--include/linux/thread_info.h17
-rw-r--r--include/linux/ti-emif-sram.h75
-rw-r--r--include/linux/time.h4
-rw-r--r--include/linux/time32.h18
-rw-r--r--include/linux/time64.h17
-rw-r--r--include/linux/timekeeper_internal.h2
-rw-r--r--include/linux/timekeeping.h124
-rw-r--r--include/linux/timekeeping32.h73
-rw-r--r--include/linux/timer.h2
-rw-r--r--include/linux/tnum.h4
-rw-r--r--include/linux/trace_events.h20
-rw-r--r--include/linux/tracehook.h1
-rw-r--r--include/linux/tracepoint.h2
-rw-r--r--include/linux/tty.h7
-rw-r--r--include/linux/tty_driver.h2
-rw-r--r--include/linux/types.h34
-rw-r--r--include/linux/u64_stats_sync.h14
-rw-r--r--include/linux/udp.h3
-rw-r--r--include/linux/uio.h15
-rw-r--r--include/linux/uio_driver.h4
-rw-r--r--include/linux/umh.h12
-rw-r--r--include/linux/usb.h14
-rw-r--r--include/linux/usb/atmel_usba_udc.h24
-rw-r--r--include/linux/usb/audio-v2.h9
-rw-r--r--include/linux/usb/audio-v3.h40
-rw-r--r--include/linux/usb/composite.h2
-rw-r--r--include/linux/usb/gadget.h2
-rw-r--r--include/linux/usb/hcd.h2
-rw-r--r--include/linux/usb/musb.h15
-rw-r--r--include/linux/usb/pd.h4
-rw-r--r--include/linux/usb/phy.h36
-rw-r--r--include/linux/usb/tcpm.h10
-rw-r--r--include/linux/usb/tegra_usb_phy.h2
-rw-r--r--include/linux/userfaultfd_k.h6
-rw-r--r--include/linux/vbox_utils.h23
-rw-r--r--include/linux/virtio.h3
-rw-r--r--include/linux/virtio_net.h11
-rw-r--r--include/linux/virtio_ring.h4
-rw-r--r--include/linux/vmalloc.h1
-rw-r--r--include/linux/wait_bit.h17
-rw-r--r--include/linux/workqueue.h1
-rw-r--r--include/linux/xattr.h1
-rw-r--r--include/media/dvb-usb-ids.h1
-rw-r--r--include/media/dvbdev.h2
-rw-r--r--include/media/i2c/tvp7002.h2
-rw-r--r--include/media/media-entity.h2
-rw-r--r--include/media/rc-core.h1
-rw-r--r--include/media/v4l2-dev.h25
-rw-r--r--include/media/v4l2-device.h4
-rw-r--r--include/media/v4l2-fwnode.h2
-rw-r--r--include/media/v4l2-ioctl.h12
-rw-r--r--include/media/videobuf-core.h4
-rw-r--r--include/media/videobuf-dma-sg.h4
-rw-r--r--include/media/videobuf-dvb.h59
-rw-r--r--include/media/videobuf-vmalloc.h2
-rw-r--r--include/media/vsp1.h45
-rw-r--r--include/misc/ocxl.h9
-rw-r--r--include/net/addrconf.h43
-rw-r--r--include/net/ax25.h5
-rw-r--r--include/net/ax88796.h14
-rw-r--r--include/net/bluetooth/bluetooth.h2
-rw-r--r--include/net/bluetooth/hci_core.h2
-rw-r--r--include/net/bonding.h12
-rw-r--r--include/net/busy_poll.h15
-rw-r--r--include/net/cfg80211.h131
-rw-r--r--include/net/dcbnl.h4
-rw-r--r--include/net/devlink.h39
-rw-r--r--include/net/dsa.h37
-rw-r--r--include/net/erspan.h28
-rw-r--r--include/net/failover.h36
-rw-r--r--include/net/fib_rules.h3
-rw-r--r--include/net/flow_dissector.h9
-rw-r--r--include/net/if_inet6.h6
-rw-r--r--include/net/ife.h3
-rw-r--r--include/net/inet_connection_sock.h24
-rw-r--r--include/net/inet_sock.h1
-rw-r--r--include/net/inet_timewait_sock.h2
-rw-r--r--include/net/ip.h9
-rw-r--r--include/net/ip6_fib.h222
-rw-r--r--include/net/ip6_route.h85
-rw-r--r--include/net/ip_fib.h2
-rw-r--r--include/net/ip_tunnels.h15
-rw-r--r--include/net/ip_vs.h53
-rw-r--r--include/net/ipv6.h11
-rw-r--r--include/net/iucv/af_iucv.h2
-rw-r--r--include/net/llc_conn.h1
-rw-r--r--include/net/mac80211.h20
-rw-r--r--include/net/neighbour.h19
-rw-r--r--include/net/net_failover.h40
-rw-r--r--include/net/netfilter/ipv4/nf_nat_masquerade.h2
-rw-r--r--include/net/netfilter/ipv6/nf_nat_masquerade.h2
-rw-r--r--include/net/netfilter/nf_conntrack_count.h12
-rw-r--r--include/net/netfilter/nf_flow_table.h24
-rw-r--r--include/net/netfilter/nf_nat.h6
-rw-r--r--include/net/netfilter/nf_nat_core.h11
-rw-r--r--include/net/netfilter/nf_nat_l3proto.h64
-rw-r--r--include/net/netfilter/nf_nat_l4proto.h8
-rw-r--r--include/net/netfilter/nf_nat_redirect.h2
-rw-r--r--include/net/netfilter/nf_socket.h17
-rw-r--r--include/net/netfilter/nf_tables.h93
-rw-r--r--include/net/netfilter/nf_tables_core.h11
-rw-r--r--include/net/netfilter/nf_tproxy.h113
-rw-r--r--include/net/netfilter/nfnetlink_log.h17
-rw-r--r--include/net/netfilter/nft_dup.h10
-rw-r--r--include/net/netfilter/nft_meta.h44
-rw-r--r--include/net/netns/ipv4.h2
-rw-r--r--include/net/netns/ipv6.h4
-rw-r--r--include/net/netns/nftables.h3
-rw-r--r--include/net/netrom.h5
-rw-r--r--include/net/page_pool.h144
-rw-r--r--include/net/phonet/pn_dev.h4
-rw-r--r--include/net/ping.h11
-rw-r--r--include/net/pkt_cls.h20
-rw-r--r--include/net/raw.h4
-rw-r--r--include/net/rose.h6
-rw-r--r--include/net/route.h1
-rw-r--r--include/net/rtnetlink.h4
-rw-r--r--include/net/sch_generic.h22
-rw-r--r--include/net/sctp/constants.h5
-rw-r--r--include/net/sctp/sctp.h57
-rw-r--r--include/net/sctp/sm.h4
-rw-r--r--include/net/sctp/structs.h11
-rw-r--r--include/net/seg6.h7
-rw-r--r--include/net/seg6_local.h32
-rw-r--r--include/net/sock.h27
-rw-r--r--include/net/strparser.h2
-rw-r--r--include/net/switchdev.h1
-rw-r--r--include/net/tcp.h44
-rw-r--r--include/net/tipc.h4
-rw-r--r--include/net/tls.h127
-rw-r--r--include/net/transp_v6.h11
-rw-r--r--include/net/udp.h31
-rw-r--r--include/net/vxlan.h1
-rw-r--r--include/net/xdp.h99
-rw-r--r--include/net/xdp_sock.h99
-rw-r--r--include/net/xfrm.h5
-rw-r--r--include/ras/ras_event.h22
-rw-r--r--include/rdma/ib_verbs.h5
-rw-r--r--include/scsi/osd_initiator.h6
-rw-r--r--include/scsi/scsi_dbg.h2
-rw-r--r--include/scsi/scsi_device.h2
-rw-r--r--include/scsi/scsi_devinfo.h75
-rw-r--r--include/scsi/scsi_host.h2
-rw-r--r--include/soc/bcm2835/raspberrypi-firmware.h4
-rw-r--r--include/soc/qcom/cmd-db.h45
-rw-r--r--include/soc/tegra/cpuidle.h2
-rw-r--r--include/soc/tegra/mc.h37
-rw-r--r--include/sound/control.h7
-rw-r--r--include/sound/core.h2
-rw-r--r--include/sound/emu10k1.h4
-rw-r--r--include/sound/hdaudio.h5
-rw-r--r--include/sound/memalloc.h2
-rw-r--r--include/sound/omap-pcm.h30
-rw-r--r--include/sound/rt5640.h27
-rw-r--r--include/sound/rt5668.h40
-rw-r--r--include/sound/soc-dai.h28
-rw-r--r--include/sound/soc.h397
-rw-r--r--include/target/target_core_base.h1
-rw-r--r--include/trace/events/afs.h42
-rw-r--r--include/trace/events/asoc.h1
-rw-r--r--include/trace/events/bpf.h355
-rw-r--r--include/trace/events/btrfs.h323
-rw-r--r--include/trace/events/fib.h107
-rw-r--r--include/trace/events/fib6.h43
-rw-r--r--include/trace/events/host1x.h16
-rw-r--r--include/trace/events/initcall.h14
-rw-r--r--include/trace/events/rcu.h13
-rw-r--r--include/trace/events/rpcrdma.h660
-rw-r--r--include/trace/events/rseq.h57
-rw-r--r--include/trace/events/rxrpc.h117
-rw-r--r--include/trace/events/sched.h4
-rw-r--r--include/trace/events/sunrpc.h16
-rw-r--r--include/trace/events/tcp.h78
-rw-r--r--include/trace/events/ufs.h27
-rw-r--r--include/trace/events/workqueue.h2
-rw-r--r--include/trace/events/xdp.h50
-rw-r--r--include/trace/events/xen.h16
-rw-r--r--include/trace/trace_events.h1
-rw-r--r--include/uapi/asm-generic/msgbuf.h27
-rw-r--r--include/uapi/asm-generic/posix_types.h1
-rw-r--r--include/uapi/asm-generic/sembuf.h26
-rw-r--r--include/uapi/asm-generic/shmbuf.h41
-rw-r--r--include/uapi/asm-generic/siginfo.h3
-rw-r--r--include/uapi/asm-generic/unistd.h4
-rw-r--r--include/uapi/drm/amdgpu_drm.h20
-rw-r--r--include/uapi/drm/drm.h7
-rw-r--r--include/uapi/drm/drm_mode.h6
-rw-r--r--include/uapi/drm/exynos_drm.h240
-rw-r--r--include/uapi/drm/tegra_drm.h492
-rw-r--r--include/uapi/drm/v3d_drm.h194
-rw-r--r--include/uapi/drm/vc4_drm.h13
-rw-r--r--include/uapi/linux/aio_abi.h13
-rw-r--r--include/uapi/linux/audit.h2
-rw-r--r--include/uapi/linux/auto_fs.h169
-rw-r--r--include/uapi/linux/auto_fs4.h153
-rw-r--r--include/uapi/linux/bpf.h2220
-rw-r--r--include/uapi/linux/bpfilter.h21
-rw-r--r--include/uapi/linux/btf.h113
-rw-r--r--include/uapi/linux/btrfs.h97
-rw-r--r--include/uapi/linux/cn_proc.h4
-rw-r--r--include/uapi/linux/dcbnl.h11
-rw-r--r--include/uapi/linux/devlink.h14
-rw-r--r--include/uapi/linux/elf.h1
-rw-r--r--include/uapi/linux/fs.h10
-rw-r--r--include/uapi/linux/fuse.h7
-rw-r--r--include/uapi/linux/if_addr.h1
-rw-r--r--include/uapi/linux/if_link.h2
-rw-r--r--include/uapi/linux/if_xdp.h78
-rw-r--r--include/uapi/linux/kernel-page-flags.h2
-rw-r--r--include/uapi/linux/kvm.h8
-rw-r--r--include/uapi/linux/lirc.h6
-rw-r--r--include/uapi/linux/ncp.h202
-rw-r--r--include/uapi/linux/ncp_fs.h147
-rw-r--r--include/uapi/linux/ncp_mount.h72
-rw-r--r--include/uapi/linux/ncp_no.h20
-rw-r--r--include/uapi/linux/netfilter/nf_conntrack_common.h2
-rw-r--r--include/uapi/linux/netfilter/nf_conntrack_tcp.h3
-rw-r--r--include/uapi/linux/netfilter/nf_nat.h12
-rw-r--r--include/uapi/linux/netfilter/nf_osf.h86
-rw-r--r--include/uapi/linux/netfilter/nf_tables.h91
-rw-r--r--include/uapi/linux/netfilter/nfnetlink_conntrack.h1
-rw-r--r--include/uapi/linux/netfilter/xt_osf.h106
-rw-r--r--include/uapi/linux/netfilter_bridge/ebtables.h6
-rw-r--r--include/uapi/linux/netfilter_ipv6/ip6t_srh.h43
-rw-r--r--include/uapi/linux/nl80211.h129
-rw-r--r--include/uapi/linux/omap3isp.h22
-rw-r--r--include/uapi/linux/openvswitch.h28
-rw-r--r--include/uapi/linux/pci_regs.h8
-rw-r--r--include/uapi/linux/perf_event.h18
-rw-r--r--include/uapi/linux/pkt_cls.h1
-rw-r--r--include/uapi/linux/ppp-ioctl.h2
-rw-r--r--include/uapi/linux/prctl.h14
-rw-r--r--include/uapi/linux/psp-sev.h12
-rw-r--r--include/uapi/linux/random.h3
-rw-r--r--include/uapi/linux/rpmsg.h9
-rw-r--r--include/uapi/linux/rseq.h133
-rw-r--r--include/uapi/linux/rtnetlink.h8
-rw-r--r--include/uapi/linux/seccomp.h5
-rw-r--r--include/uapi/linux/seg6_local.h12
-rw-r--r--include/uapi/linux/signalfd.h6
-rw-r--r--include/uapi/linux/snmp.h3
-rw-r--r--include/uapi/linux/sysctl.h18
-rw-r--r--include/uapi/linux/target_core_user.h11
-rw-r--r--include/uapi/linux/tcp.h16
-rw-r--r--include/uapi/linux/time.h8
-rw-r--r--include/uapi/linux/tipc.h12
-rw-r--r--include/uapi/linux/tipc_config.h5
-rw-r--r--include/uapi/linux/tipc_netlink.h1
-rw-r--r--include/uapi/linux/tty_flags.h2
-rw-r--r--include/uapi/linux/types.h4
-rw-r--r--include/uapi/linux/types_32_64.h50
-rw-r--r--include/uapi/linux/udp.h1
-rw-r--r--include/uapi/linux/usb/audio.h27
-rw-r--r--include/uapi/linux/usb/ch11.h5
-rw-r--r--include/uapi/linux/virtio_balloon.h15
-rw-r--r--include/uapi/linux/virtio_config.h16
-rw-r--r--include/uapi/linux/virtio_gpu.h1
-rw-r--r--include/uapi/linux/virtio_net.h3
-rw-r--r--include/uapi/linux/vmcore.h18
-rw-r--r--include/uapi/misc/ocxl.h14
-rw-r--r--include/uapi/sound/asoc.h86
-rw-r--r--include/uapi/sound/skl-tplg-interface.h237
-rw-r--r--include/uapi/sound/tlv.h16
-rw-r--r--include/uapi/xen/privcmd.h11
-rw-r--r--include/video/auo_k190xfb.h107
-rw-r--r--include/video/omapfb_dss.h54
-rw-r--r--include/video/sh_mobile_lcdc.h3
-rw-r--r--include/video/sh_mobile_meram.h95
-rw-r--r--include/xen/interface/io/kbdif.h2
-rw-r--r--include/xen/interface/io/sndif.h322
-rw-r--r--include/xen/interface/memory.h66
-rw-r--r--include/xen/interface/xen.h7
-rw-r--r--include/xen/xen-ops.h24
714 files changed, 17400 insertions, 6409 deletions
diff --git a/include/acpi/acnames.h b/include/acpi/acnames.h
index 7b289dd00a30..6f69a4f638f8 100644
--- a/include/acpi/acnames.h
+++ b/include/acpi/acnames.h
@@ -49,11 +49,14 @@
/* Definitions of the predefined namespace names */
#define ACPI_UNKNOWN_NAME (u32) 0x3F3F3F3F /* Unknown name is "????" */
-#define ACPI_ROOT_NAME (u32) 0x5F5F5F5C /* Root name is "\___" */
-
#define ACPI_PREFIX_MIXED (u32) 0x69706341 /* "Acpi" */
#define ACPI_PREFIX_LOWER (u32) 0x69706361 /* "acpi" */
+/* Root name stuff */
+
+#define ACPI_ROOT_NAME (u32) 0x5F5F5F5C /* Root name is "\___" */
+#define ACPI_ROOT_PATHNAME "\\___"
+#define ACPI_NAMESPACE_ROOT "Namespace Root"
#define ACPI_NS_ROOT_PATH "\\"
#endif /* __ACNAMES_H__ */
diff --git a/include/acpi/acoutput.h b/include/acpi/acoutput.h
index 0a6c5bd92256..3a26aa7ead23 100644
--- a/include/acpi/acoutput.h
+++ b/include/acpi/acoutput.h
@@ -80,7 +80,8 @@
#define ACPI_LV_ALLOCATIONS 0x00100000
#define ACPI_LV_FUNCTIONS 0x00200000
#define ACPI_LV_OPTIMIZATIONS 0x00400000
-#define ACPI_LV_VERBOSITY2 0x00700000 | ACPI_LV_VERBOSITY1
+#define ACPI_LV_PARSE_TREES 0x00800000
+#define ACPI_LV_VERBOSITY2 0x00F00000 | ACPI_LV_VERBOSITY1
#define ACPI_LV_ALL ACPI_LV_VERBOSITY2
/* Trace verbosity level 3 [Threading, I/O, and Interrupts] */
@@ -131,6 +132,7 @@
#define ACPI_DB_TABLES ACPI_DEBUG_LEVEL (ACPI_LV_TABLES)
#define ACPI_DB_FUNCTIONS ACPI_DEBUG_LEVEL (ACPI_LV_FUNCTIONS)
#define ACPI_DB_OPTIMIZATIONS ACPI_DEBUG_LEVEL (ACPI_LV_OPTIMIZATIONS)
+#define ACPI_DB_PARSE_TREES ACPI_DEBUG_LEVEL (ACPI_LV_PARSE_TREES)
#define ACPI_DB_VALUES ACPI_DEBUG_LEVEL (ACPI_LV_VALUES)
#define ACPI_DB_OBJECTS ACPI_DEBUG_LEVEL (ACPI_LV_OBJECTS)
#define ACPI_DB_ALLOCATIONS ACPI_DEBUG_LEVEL (ACPI_LV_ALLOCATIONS)
diff --git a/include/acpi/acpiosxf.h b/include/acpi/acpiosxf.h
index 540d35f06ad6..eb1f21af7556 100644
--- a/include/acpi/acpiosxf.h
+++ b/include/acpi/acpiosxf.h
@@ -98,6 +98,27 @@ void acpi_os_release_lock(acpi_spinlock handle, acpi_cpu_flags flags);
#endif
/*
+ * RAW spinlock primitives. If the OS does not provide them, fallback to
+ * spinlock primitives
+ */
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_create_raw_lock
+# define acpi_os_create_raw_lock(out_handle) acpi_os_create_lock(out_handle)
+#endif
+
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_delete_raw_lock
+# define acpi_os_delete_raw_lock(handle) acpi_os_delete_lock(handle)
+#endif
+
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_acquire_raw_lock
+# define acpi_os_acquire_raw_lock(handle) acpi_os_acquire_lock(handle)
+#endif
+
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_release_raw_lock
+# define acpi_os_release_raw_lock(handle, flags) \
+ acpi_os_release_lock(handle, flags)
+#endif
+
+/*
* Semaphore primitives
*/
#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_create_semaphore
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index da0215ea9f44..48d84f0d9547 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -12,7 +12,7 @@
/* Current ACPICA subsystem version in YYYYMMDD format */
-#define ACPI_CA_VERSION 0x20180313
+#define ACPI_CA_VERSION 0x20180531
#include <acpi/acconfig.h>
#include <acpi/actypes.h>
@@ -753,6 +753,7 @@ ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
u32 gpe_number,
acpi_event_status
*event_status))
+ACPI_HW_DEPENDENT_RETURN_VOID(void acpi_dispatch_gpe(acpi_handle gpe_device, u32 gpe_number))
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_disable_all_gpes(void))
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_enable_all_runtime_gpes(void))
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_enable_all_wakeup_gpes(void))
diff --git a/include/acpi/actbl2.h b/include/acpi/actbl2.h
index 876012da8e6e..c50ef7e6b942 100644
--- a/include/acpi/actbl2.h
+++ b/include/acpi/actbl2.h
@@ -67,7 +67,7 @@
* IORT - IO Remapping Table
*
* Conforms to "IO Remapping Table System Software on ARM Platforms",
- * Document number: ARM DEN 0049C, May 2017
+ * Document number: ARM DEN 0049D, March 2018
*
******************************************************************************/
@@ -98,7 +98,8 @@ enum acpi_iort_node_type {
ACPI_IORT_NODE_NAMED_COMPONENT = 0x01,
ACPI_IORT_NODE_PCI_ROOT_COMPLEX = 0x02,
ACPI_IORT_NODE_SMMU = 0x03,
- ACPI_IORT_NODE_SMMU_V3 = 0x04
+ ACPI_IORT_NODE_SMMU_V3 = 0x04,
+ ACPI_IORT_NODE_PMCG = 0x05
};
struct acpi_iort_id_mapping {
@@ -152,10 +153,17 @@ struct acpi_iort_named_component {
char device_name[1]; /* Path of namespace object */
};
+/* Masks for Flags field above */
+
+#define ACPI_IORT_NC_STALL_SUPPORTED (1)
+#define ACPI_IORT_NC_PASID_BITS (31<<1)
+
struct acpi_iort_root_complex {
u64 memory_properties; /* Memory access properties */
u32 ats_attribute;
u32 pci_segment_number;
+ u8 memory_address_limit; /* Memory address size limit */
+ u8 reserved[3]; /* Reserved, must be zero */
};
/* Values for ats_attribute field above */
@@ -209,9 +217,7 @@ struct acpi_iort_smmu_v3 {
u32 pri_gsiv;
u32 gerr_gsiv;
u32 sync_gsiv;
- u8 pxm;
- u8 reserved1;
- u16 reserved2;
+ u32 pxm;
u32 id_mapping_index;
};
@@ -224,9 +230,16 @@ struct acpi_iort_smmu_v3 {
/* Masks for Flags field above */
#define ACPI_IORT_SMMU_V3_COHACC_OVERRIDE (1)
-#define ACPI_IORT_SMMU_V3_HTTU_OVERRIDE (1<<1)
+#define ACPI_IORT_SMMU_V3_HTTU_OVERRIDE (3<<1)
#define ACPI_IORT_SMMU_V3_PXM_VALID (1<<3)
+struct acpi_iort_pmcg {
+ u64 page0_base_address;
+ u32 overflow_gsiv;
+ u32 node_reference;
+ u64 page1_base_address;
+};
+
/*******************************************************************************
*
* IVRS - I/O Virtualization Reporting Structure
diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
index 1c530f95dc34..66ceb12ebc63 100644
--- a/include/acpi/actypes.h
+++ b/include/acpi/actypes.h
@@ -245,6 +245,10 @@ typedef u64 acpi_physical_address;
#define acpi_spinlock void *
#endif
+#ifndef acpi_raw_spinlock
+#define acpi_raw_spinlock acpi_spinlock
+#endif
+
#ifndef acpi_semaphore
#define acpi_semaphore void *
#endif
@@ -1268,6 +1272,7 @@ typedef enum {
#define ACPI_OSI_WIN_10 0x0D
#define ACPI_OSI_WIN_10_RS1 0x0E
#define ACPI_OSI_WIN_10_RS2 0x0F
+#define ACPI_OSI_WIN_10_RS3 0x10
/* Definitions of getopt */
diff --git a/include/acpi/cppc_acpi.h b/include/acpi/cppc_acpi.h
index 2010c0516f27..8e0b8250a139 100644
--- a/include/acpi/cppc_acpi.h
+++ b/include/acpi/cppc_acpi.h
@@ -20,14 +20,16 @@
#include <acpi/pcc.h>
#include <acpi/processor.h>
-/* Only support CPPCv2 for now. */
-#define CPPC_NUM_ENT 21
-#define CPPC_REV 2
+/* Support CPPCv2 and CPPCv3 */
+#define CPPC_V2_REV 2
+#define CPPC_V3_REV 3
+#define CPPC_V2_NUM_ENT 21
+#define CPPC_V3_NUM_ENT 23
#define PCC_CMD_COMPLETE_MASK (1 << 0)
#define PCC_ERROR_MASK (1 << 2)
-#define MAX_CPC_REG_ENT 19
+#define MAX_CPC_REG_ENT 21
/* CPPC specific PCC commands. */
#define CMD_READ 0
@@ -91,6 +93,8 @@ enum cppc_regs {
AUTO_ACT_WINDOW,
ENERGY_PERF,
REFERENCE_PERF,
+ LOWEST_FREQ,
+ NOMINAL_FREQ,
};
/*
@@ -104,6 +108,8 @@ struct cppc_perf_caps {
u32 nominal_perf;
u32 lowest_perf;
u32 lowest_nonlinear_perf;
+ u32 lowest_freq;
+ u32 nominal_freq;
};
struct cppc_perf_ctrls {
diff --git a/include/acpi/ghes.h b/include/acpi/ghes.h
index 8feb0c866ee0..1624e2be485c 100644
--- a/include/acpi/ghes.h
+++ b/include/acpi/ghes.h
@@ -55,22 +55,21 @@ enum {
/* From drivers/edac/ghes_edac.c */
#ifdef CONFIG_EDAC_GHES
-void ghes_edac_report_mem_error(struct ghes *ghes, int sev,
- struct cper_sec_mem_err *mem_err);
+void ghes_edac_report_mem_error(int sev, struct cper_sec_mem_err *mem_err);
int ghes_edac_register(struct ghes *ghes, struct device *dev);
void ghes_edac_unregister(struct ghes *ghes);
#else
-static inline void ghes_edac_report_mem_error(struct ghes *ghes, int sev,
+static inline void ghes_edac_report_mem_error(int sev,
struct cper_sec_mem_err *mem_err)
{
}
static inline int ghes_edac_register(struct ghes *ghes, struct device *dev)
{
- return 0;
+ return -ENODEV;
}
static inline void ghes_edac_unregister(struct ghes *ghes)
diff --git a/include/acpi/platform/aclinux.h b/include/acpi/platform/aclinux.h
index a0b232703302..7451b3bca83a 100644
--- a/include/acpi/platform/aclinux.h
+++ b/include/acpi/platform/aclinux.h
@@ -102,6 +102,7 @@
#define acpi_cache_t struct kmem_cache
#define acpi_spinlock spinlock_t *
+#define acpi_raw_spinlock raw_spinlock_t *
#define acpi_cpu_flags unsigned long
/* Use native linux version of acpi_os_allocate_zeroed */
@@ -119,6 +120,10 @@
#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_acquire_object
#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_get_thread_id
#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_create_lock
+#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_create_raw_lock
+#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_delete_raw_lock
+#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_acquire_raw_lock
+#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_release_raw_lock
/*
* OSL interfaces used by debugger/disassembler
diff --git a/include/acpi/platform/aclinuxex.h b/include/acpi/platform/aclinuxex.h
index 7e81475fe034..d754a1b12721 100644
--- a/include/acpi/platform/aclinuxex.h
+++ b/include/acpi/platform/aclinuxex.h
@@ -90,6 +90,36 @@ static inline acpi_thread_id acpi_os_get_thread_id(void)
lock ? AE_OK : AE_NO_MEMORY; \
})
+
+#define acpi_os_create_raw_lock(__handle) \
+ ({ \
+ raw_spinlock_t *lock = ACPI_ALLOCATE(sizeof(*lock)); \
+ if (lock) { \
+ *(__handle) = lock; \
+ raw_spin_lock_init(*(__handle)); \
+ } \
+ lock ? AE_OK : AE_NO_MEMORY; \
+ })
+
+static inline acpi_cpu_flags acpi_os_acquire_raw_lock(acpi_raw_spinlock lockp)
+{
+ acpi_cpu_flags flags;
+
+ raw_spin_lock_irqsave(lockp, flags);
+ return flags;
+}
+
+static inline void acpi_os_release_raw_lock(acpi_raw_spinlock lockp,
+ acpi_cpu_flags flags)
+{
+ raw_spin_unlock_irqrestore(lockp, flags);
+}
+
+static inline void acpi_os_delete_raw_lock(acpi_raw_spinlock handle)
+{
+ ACPI_FREE(handle);
+}
+
static inline u8 acpi_os_readable(void *pointer, acpi_size length)
{
return TRUE;
diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
index 34a028a7bcc5..87d14476edc2 100644
--- a/include/asm-generic/atomic-long.h
+++ b/include/asm-generic/atomic-long.h
@@ -25,6 +25,7 @@ typedef atomic64_t atomic_long_t;
#define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
#define ATOMIC_LONG_PFX(x) atomic64 ## x
+#define ATOMIC_LONG_TYPE s64
#else
@@ -32,6 +33,7 @@ typedef atomic_t atomic_long_t;
#define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
#define ATOMIC_LONG_PFX(x) atomic ## x
+#define ATOMIC_LONG_TYPE int
#endif
@@ -90,6 +92,21 @@ ATOMIC_LONG_ADD_SUB_OP(sub, _release)
#define atomic_long_cmpxchg(l, old, new) \
(ATOMIC_LONG_PFX(_cmpxchg)((ATOMIC_LONG_PFX(_t) *)(l), (old), (new)))
+
+#define atomic_long_try_cmpxchg_relaxed(l, old, new) \
+ (ATOMIC_LONG_PFX(_try_cmpxchg_relaxed)((ATOMIC_LONG_PFX(_t) *)(l), \
+ (ATOMIC_LONG_TYPE *)(old), (ATOMIC_LONG_TYPE)(new)))
+#define atomic_long_try_cmpxchg_acquire(l, old, new) \
+ (ATOMIC_LONG_PFX(_try_cmpxchg_acquire)((ATOMIC_LONG_PFX(_t) *)(l), \
+ (ATOMIC_LONG_TYPE *)(old), (ATOMIC_LONG_TYPE)(new)))
+#define atomic_long_try_cmpxchg_release(l, old, new) \
+ (ATOMIC_LONG_PFX(_try_cmpxchg_release)((ATOMIC_LONG_PFX(_t) *)(l), \
+ (ATOMIC_LONG_TYPE *)(old), (ATOMIC_LONG_TYPE)(new)))
+#define atomic_long_try_cmpxchg(l, old, new) \
+ (ATOMIC_LONG_PFX(_try_cmpxchg)((ATOMIC_LONG_PFX(_t) *)(l), \
+ (ATOMIC_LONG_TYPE *)(old), (ATOMIC_LONG_TYPE)(new)))
+
+
#define atomic_long_xchg_relaxed(v, new) \
(ATOMIC_LONG_PFX(_xchg_relaxed)((ATOMIC_LONG_PFX(_t) *)(v), (new)))
#define atomic_long_xchg_acquire(v, new) \
@@ -244,6 +261,8 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
#define atomic_long_inc_not_zero(l) \
ATOMIC_LONG_PFX(_inc_not_zero)((ATOMIC_LONG_PFX(_t) *)(l))
+#define atomic_long_cond_read_relaxed(v, c) \
+ ATOMIC_LONG_PFX(_cond_read_relaxed)((ATOMIC_LONG_PFX(_t) *)(v), (c))
#define atomic_long_cond_read_acquire(v, c) \
ATOMIC_LONG_PFX(_cond_read_acquire)((ATOMIC_LONG_PFX(_t) *)(v), (c))
diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h
index 29458bbb2fa0..2cafdbb9ae4c 100644
--- a/include/asm-generic/barrier.h
+++ b/include/asm-generic/barrier.h
@@ -221,18 +221,17 @@ do { \
#endif
/**
- * smp_cond_load_acquire() - (Spin) wait for cond with ACQUIRE ordering
+ * smp_cond_load_relaxed() - (Spin) wait for cond with no ordering guarantees
* @ptr: pointer to the variable to wait on
* @cond: boolean expression to wait for
*
- * Equivalent to using smp_load_acquire() on the condition variable but employs
- * the control dependency of the wait to reduce the barrier on many platforms.
+ * Equivalent to using READ_ONCE() on the condition variable.
*
* Due to C lacking lambda expressions we load the value of *ptr into a
* pre-named variable @VAL to be used in @cond.
*/
-#ifndef smp_cond_load_acquire
-#define smp_cond_load_acquire(ptr, cond_expr) ({ \
+#ifndef smp_cond_load_relaxed
+#define smp_cond_load_relaxed(ptr, cond_expr) ({ \
typeof(ptr) __PTR = (ptr); \
typeof(*ptr) VAL; \
for (;;) { \
@@ -241,10 +240,26 @@ do { \
break; \
cpu_relax(); \
} \
- smp_acquire__after_ctrl_dep(); \
VAL; \
})
#endif
+/**
+ * smp_cond_load_acquire() - (Spin) wait for cond with ACQUIRE ordering
+ * @ptr: pointer to the variable to wait on
+ * @cond: boolean expression to wait for
+ *
+ * Equivalent to using smp_load_acquire() on the condition variable but employs
+ * the control dependency of the wait to reduce the barrier on many platforms.
+ */
+#ifndef smp_cond_load_acquire
+#define smp_cond_load_acquire(ptr, cond_expr) ({ \
+ typeof(*ptr) _val; \
+ _val = smp_cond_load_relaxed(ptr, cond_expr); \
+ smp_acquire__after_ctrl_dep(); \
+ _val; \
+})
+#endif
+
#endif /* !__ASSEMBLY__ */
#endif /* __ASM_GENERIC_BARRIER_H */
diff --git a/include/asm-generic/compat.h b/include/asm-generic/compat.h
new file mode 100644
index 000000000000..28819451b6d1
--- /dev/null
+++ b/include/asm-generic/compat.h
@@ -0,0 +1,3 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* This is an empty stub for 32-bit-only architectures */
diff --git a/include/asm-generic/dma-mapping.h b/include/asm-generic/dma-mapping.h
index 880a292d792f..ad2868263867 100644
--- a/include/asm-generic/dma-mapping.h
+++ b/include/asm-generic/dma-mapping.h
@@ -4,7 +4,16 @@
static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
{
+ /*
+ * Use the non-coherent ops if available. If an architecture wants a
+ * more fine-grained selection of operations it will have to implement
+ * get_arch_dma_ops itself or use the per-device dma_ops.
+ */
+#ifdef CONFIG_DMA_NONCOHERENT_OPS
+ return &dma_noncoherent_ops;
+#else
return &dma_direct_ops;
+#endif
}
#endif /* _ASM_GENERIC_DMA_MAPPING_H */
diff --git a/include/asm-generic/export.h b/include/asm-generic/export.h
index 719db1968d81..68efb950a918 100644
--- a/include/asm-generic/export.h
+++ b/include/asm-generic/export.h
@@ -19,42 +19,32 @@
#define KCRC_ALIGN 4
#endif
-#ifdef CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX
-#define KSYM(name) _##name
-#else
-#define KSYM(name) name
-#endif
-
/*
* note on .section use: @progbits vs %progbits nastiness doesn't matter,
* since we immediately emit into those sections anyway.
*/
.macro ___EXPORT_SYMBOL name,val,sec
#ifdef CONFIG_MODULES
- .globl KSYM(__ksymtab_\name)
+ .globl __ksymtab_\name
.section ___ksymtab\sec+\name,"a"
.balign KSYM_ALIGN
-KSYM(__ksymtab_\name):
- __put \val, KSYM(__kstrtab_\name)
+__ksymtab_\name:
+ __put \val, __kstrtab_\name
.previous
.section __ksymtab_strings,"a"
-KSYM(__kstrtab_\name):
-#ifdef CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX
- .asciz "_\name"
-#else
+__kstrtab_\name:
.asciz "\name"
-#endif
.previous
#ifdef CONFIG_MODVERSIONS
.section ___kcrctab\sec+\name,"a"
.balign KCRC_ALIGN
-KSYM(__kcrctab_\name):
+__kcrctab_\name:
#if defined(CONFIG_MODULE_REL_CRCS)
- .long KSYM(__crc_\name) - .
+ .long __crc_\name - .
#else
- .long KSYM(__crc_\name)
+ .long __crc_\name
#endif
- .weak KSYM(__crc_\name)
+ .weak __crc_\name
.previous
#endif
#endif
@@ -84,12 +74,12 @@ KSYM(__kcrctab_\name):
#endif
#define EXPORT_SYMBOL(name) \
- __EXPORT_SYMBOL(name, KSYM_FUNC(KSYM(name)),)
+ __EXPORT_SYMBOL(name, KSYM_FUNC(name),)
#define EXPORT_SYMBOL_GPL(name) \
- __EXPORT_SYMBOL(name, KSYM_FUNC(KSYM(name)), _gpl)
+ __EXPORT_SYMBOL(name, KSYM_FUNC(name), _gpl)
#define EXPORT_DATA_SYMBOL(name) \
- __EXPORT_SYMBOL(name, KSYM(name),)
+ __EXPORT_SYMBOL(name, name,)
#define EXPORT_DATA_SYMBOL_GPL(name) \
- __EXPORT_SYMBOL(name, KSYM(name),_gpl)
+ __EXPORT_SYMBOL(name, name,_gpl)
#endif
diff --git a/include/asm-generic/int-ll64.h b/include/asm-generic/int-ll64.h
index ffb68d67be5f..a248545f1e18 100644
--- a/include/asm-generic/int-ll64.h
+++ b/include/asm-generic/int-ll64.h
@@ -13,17 +13,14 @@
#ifndef __ASSEMBLY__
-typedef signed char s8;
-typedef unsigned char u8;
-
-typedef signed short s16;
-typedef unsigned short u16;
-
-typedef signed int s32;
-typedef unsigned int u32;
-
-typedef signed long long s64;
-typedef unsigned long long u64;
+typedef __s8 s8;
+typedef __u8 u8;
+typedef __s16 s16;
+typedef __u16 u16;
+typedef __s32 s32;
+typedef __u32 u32;
+typedef __s64 s64;
+typedef __u64 u64;
#define S8_C(x) x
#define U8_C(x) x ## U
diff --git a/include/asm-generic/pci.h b/include/asm-generic/pci.h
index 830d7659289b..6bb3cd3d695a 100644
--- a/include/asm-generic/pci.h
+++ b/include/asm-generic/pci.h
@@ -14,12 +14,4 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
}
#endif /* HAVE_ARCH_PCI_GET_LEGACY_IDE_IRQ */
-/*
- * By default, assume that no iommu is in use and that the PCI
- * space is mapped to address physical 0.
- */
-#ifndef PCI_DMA_BUS_IS_PHYS
-#define PCI_DMA_BUS_IS_PHYS (1)
-#endif
-
#endif /* _ASM_GENERIC_PCI_H */
diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h
index b37b4ad7eb94..9cc457597ddf 100644
--- a/include/asm-generic/qspinlock.h
+++ b/include/asm-generic/qspinlock.h
@@ -26,7 +26,6 @@
* @lock: Pointer to queued spinlock structure
* Return: 1 if it is locked, 0 otherwise
*/
-#ifndef queued_spin_is_locked
static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
{
/*
@@ -35,7 +34,6 @@ static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
*/
return atomic_read(&lock->val);
}
-#endif
/**
* queued_spin_value_unlocked - is the spinlock structure unlocked?
@@ -100,7 +98,7 @@ static __always_inline void queued_spin_unlock(struct qspinlock *lock)
/*
* unlock() needs release semantics:
*/
- (void)atomic_sub_return_release(_Q_LOCKED_VAL, &lock->val);
+ smp_store_release(&lock->locked, 0);
}
#endif
diff --git a/include/asm-generic/qspinlock_types.h b/include/asm-generic/qspinlock_types.h
index 034acd0c4956..0763f065b975 100644
--- a/include/asm-generic/qspinlock_types.h
+++ b/include/asm-generic/qspinlock_types.h
@@ -29,13 +29,41 @@
#endif
typedef struct qspinlock {
- atomic_t val;
+ union {
+ atomic_t val;
+
+ /*
+ * By using the whole 2nd least significant byte for the
+ * pending bit, we can allow better optimization of the lock
+ * acquisition for the pending bit holder.
+ */
+#ifdef __LITTLE_ENDIAN
+ struct {
+ u8 locked;
+ u8 pending;
+ };
+ struct {
+ u16 locked_pending;
+ u16 tail;
+ };
+#else
+ struct {
+ u16 tail;
+ u16 locked_pending;
+ };
+ struct {
+ u8 reserved[2];
+ u8 pending;
+ u8 locked;
+ };
+#endif
+ };
} arch_spinlock_t;
/*
* Initializier
*/
-#define __ARCH_SPIN_LOCK_UNLOCKED { ATOMIC_INIT(0) }
+#define __ARCH_SPIN_LOCK_UNLOCKED { .val = ATOMIC_INIT(0) }
/*
* Bitfields in the atomic value:
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 278841c75b97..e373e2e10f6a 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -64,15 +64,24 @@
* generates .data.identifier sections, which need to be pulled in with
* .data. We don't want to pull in .data..other sections, which Linux
* has defined. Same for text and bss.
+ *
+ * RODATA_MAIN is not used because existing code already defines .rodata.x
+ * sections to be brought in with rodata.
*/
#ifdef CONFIG_LD_DEAD_CODE_DATA_ELIMINATION
#define TEXT_MAIN .text .text.[0-9a-zA-Z_]*
#define DATA_MAIN .data .data.[0-9a-zA-Z_]*
+#define SDATA_MAIN .sdata .sdata.[0-9a-zA-Z_]*
+#define RODATA_MAIN .rodata .rodata.[0-9a-zA-Z_]*
#define BSS_MAIN .bss .bss.[0-9a-zA-Z_]*
+#define SBSS_MAIN .sbss .sbss.[0-9a-zA-Z_]*
#else
#define TEXT_MAIN .text
#define DATA_MAIN .data
+#define SDATA_MAIN .sdata
+#define RODATA_MAIN .rodata
#define BSS_MAIN .bss
+#define SBSS_MAIN .sbss
#endif
/*
@@ -104,66 +113,66 @@
#ifdef CONFIG_FTRACE_MCOUNT_RECORD
#define MCOUNT_REC() . = ALIGN(8); \
- VMLINUX_SYMBOL(__start_mcount_loc) = .; \
- *(__mcount_loc) \
- VMLINUX_SYMBOL(__stop_mcount_loc) = .;
+ __start_mcount_loc = .; \
+ KEEP(*(__mcount_loc)) \
+ __stop_mcount_loc = .;
#else
#define MCOUNT_REC()
#endif
#ifdef CONFIG_TRACE_BRANCH_PROFILING
-#define LIKELY_PROFILE() VMLINUX_SYMBOL(__start_annotated_branch_profile) = .; \
- *(_ftrace_annotated_branch) \
- VMLINUX_SYMBOL(__stop_annotated_branch_profile) = .;
+#define LIKELY_PROFILE() __start_annotated_branch_profile = .; \
+ KEEP(*(_ftrace_annotated_branch)) \
+ __stop_annotated_branch_profile = .;
#else
#define LIKELY_PROFILE()
#endif
#ifdef CONFIG_PROFILE_ALL_BRANCHES
-#define BRANCH_PROFILE() VMLINUX_SYMBOL(__start_branch_profile) = .; \
- *(_ftrace_branch) \
- VMLINUX_SYMBOL(__stop_branch_profile) = .;
+#define BRANCH_PROFILE() __start_branch_profile = .; \
+ KEEP(*(_ftrace_branch)) \
+ __stop_branch_profile = .;
#else
#define BRANCH_PROFILE()
#endif
#ifdef CONFIG_KPROBES
#define KPROBE_BLACKLIST() . = ALIGN(8); \
- VMLINUX_SYMBOL(__start_kprobe_blacklist) = .; \
+ __start_kprobe_blacklist = .; \
KEEP(*(_kprobe_blacklist)) \
- VMLINUX_SYMBOL(__stop_kprobe_blacklist) = .;
+ __stop_kprobe_blacklist = .;
#else
#define KPROBE_BLACKLIST()
#endif
#ifdef CONFIG_FUNCTION_ERROR_INJECTION
#define ERROR_INJECT_WHITELIST() STRUCT_ALIGN(); \
- VMLINUX_SYMBOL(__start_error_injection_whitelist) = .;\
+ __start_error_injection_whitelist = .; \
KEEP(*(_error_injection_whitelist)) \
- VMLINUX_SYMBOL(__stop_error_injection_whitelist) = .;
+ __stop_error_injection_whitelist = .;
#else
#define ERROR_INJECT_WHITELIST()
#endif
#ifdef CONFIG_EVENT_TRACING
#define FTRACE_EVENTS() . = ALIGN(8); \
- VMLINUX_SYMBOL(__start_ftrace_events) = .; \
+ __start_ftrace_events = .; \
KEEP(*(_ftrace_events)) \
- VMLINUX_SYMBOL(__stop_ftrace_events) = .; \
- VMLINUX_SYMBOL(__start_ftrace_eval_maps) = .; \
+ __stop_ftrace_events = .; \
+ __start_ftrace_eval_maps = .; \
KEEP(*(_ftrace_eval_map)) \
- VMLINUX_SYMBOL(__stop_ftrace_eval_maps) = .;
+ __stop_ftrace_eval_maps = .;
#else
#define FTRACE_EVENTS()
#endif
#ifdef CONFIG_TRACING
-#define TRACE_PRINTKS() VMLINUX_SYMBOL(__start___trace_bprintk_fmt) = .; \
+#define TRACE_PRINTKS() __start___trace_bprintk_fmt = .; \
KEEP(*(__trace_printk_fmt)) /* Trace_printk fmt' pointer */ \
- VMLINUX_SYMBOL(__stop___trace_bprintk_fmt) = .;
-#define TRACEPOINT_STR() VMLINUX_SYMBOL(__start___tracepoint_str) = .; \
+ __stop___trace_bprintk_fmt = .;
+#define TRACEPOINT_STR() __start___tracepoint_str = .; \
KEEP(*(__tracepoint_str)) /* Trace_printk fmt' pointer */ \
- VMLINUX_SYMBOL(__stop___tracepoint_str) = .;
+ __stop___tracepoint_str = .;
#else
#define TRACE_PRINTKS()
#define TRACEPOINT_STR()
@@ -171,27 +180,27 @@
#ifdef CONFIG_FTRACE_SYSCALLS
#define TRACE_SYSCALLS() . = ALIGN(8); \
- VMLINUX_SYMBOL(__start_syscalls_metadata) = .; \
+ __start_syscalls_metadata = .; \
KEEP(*(__syscalls_metadata)) \
- VMLINUX_SYMBOL(__stop_syscalls_metadata) = .;
+ __stop_syscalls_metadata = .;
#else
#define TRACE_SYSCALLS()
#endif
#ifdef CONFIG_BPF_EVENTS
#define BPF_RAW_TP() STRUCT_ALIGN(); \
- VMLINUX_SYMBOL(__start__bpf_raw_tp) = .; \
+ __start__bpf_raw_tp = .; \
KEEP(*(__bpf_raw_tp_map)) \
- VMLINUX_SYMBOL(__stop__bpf_raw_tp) = .;
+ __stop__bpf_raw_tp = .;
#else
#define BPF_RAW_TP()
#endif
#ifdef CONFIG_SERIAL_EARLYCON
-#define EARLYCON_TABLE() STRUCT_ALIGN(); \
- VMLINUX_SYMBOL(__earlycon_table) = .; \
+#define EARLYCON_TABLE() . = ALIGN(8); \
+ __earlycon_table = .; \
KEEP(*(__earlycon_table)) \
- VMLINUX_SYMBOL(__earlycon_table_end) = .;
+ __earlycon_table_end = .;
#else
#define EARLYCON_TABLE()
#endif
@@ -202,7 +211,7 @@
#define _OF_TABLE_0(name)
#define _OF_TABLE_1(name) \
. = ALIGN(8); \
- VMLINUX_SYMBOL(__##name##_of_table) = .; \
+ __##name##_of_table = .; \
KEEP(*(__##name##_of_table)) \
KEEP(*(__##name##_of_table_end))
@@ -217,18 +226,18 @@
#ifdef CONFIG_ACPI
#define ACPI_PROBE_TABLE(name) \
. = ALIGN(8); \
- VMLINUX_SYMBOL(__##name##_acpi_probe_table) = .; \
+ __##name##_acpi_probe_table = .; \
KEEP(*(__##name##_acpi_probe_table)) \
- VMLINUX_SYMBOL(__##name##_acpi_probe_table_end) = .;
+ __##name##_acpi_probe_table_end = .;
#else
#define ACPI_PROBE_TABLE(name)
#endif
#define KERNEL_DTB() \
STRUCT_ALIGN(); \
- VMLINUX_SYMBOL(__dtb_start) = .; \
+ __dtb_start = .; \
KEEP(*(.dtb.init.rodata)) \
- VMLINUX_SYMBOL(__dtb_end) = .;
+ __dtb_end = .;
/*
* .data section
@@ -238,23 +247,23 @@
*(DATA_MAIN) \
*(.ref.data) \
*(.data..shared_aligned) /* percpu related */ \
- MEM_KEEP(init.data) \
- MEM_KEEP(exit.data) \
+ MEM_KEEP(init.data*) \
+ MEM_KEEP(exit.data*) \
*(.data.unlikely) \
- VMLINUX_SYMBOL(__start_once) = .; \
+ __start_once = .; \
*(.data.once) \
- VMLINUX_SYMBOL(__end_once) = .; \
+ __end_once = .; \
STRUCT_ALIGN(); \
*(__tracepoints) \
/* implement dynamic printk debug */ \
. = ALIGN(8); \
- VMLINUX_SYMBOL(__start___jump_table) = .; \
+ __start___jump_table = .; \
KEEP(*(__jump_table)) \
- VMLINUX_SYMBOL(__stop___jump_table) = .; \
+ __stop___jump_table = .; \
. = ALIGN(8); \
- VMLINUX_SYMBOL(__start___verbose) = .; \
+ __start___verbose = .; \
KEEP(*(__verbose)) \
- VMLINUX_SYMBOL(__stop___verbose) = .; \
+ __stop___verbose = .; \
LIKELY_PROFILE() \
BRANCH_PROFILE() \
TRACE_PRINTKS() \
@@ -266,10 +275,10 @@
*/
#define NOSAVE_DATA \
. = ALIGN(PAGE_SIZE); \
- VMLINUX_SYMBOL(__nosave_begin) = .; \
+ __nosave_begin = .; \
*(.data..nosave) \
. = ALIGN(PAGE_SIZE); \
- VMLINUX_SYMBOL(__nosave_end) = .;
+ __nosave_end = .;
#define PAGE_ALIGNED_DATA(page_align) \
. = ALIGN(page_align); \
@@ -286,13 +295,13 @@
#define INIT_TASK_DATA(align) \
. = ALIGN(align); \
- VMLINUX_SYMBOL(__start_init_task) = .; \
- VMLINUX_SYMBOL(init_thread_union) = .; \
- VMLINUX_SYMBOL(init_stack) = .; \
- *(.data..init_task) \
- *(.data..init_thread_info) \
- . = VMLINUX_SYMBOL(__start_init_task) + THREAD_SIZE; \
- VMLINUX_SYMBOL(__end_init_task) = .;
+ __start_init_task = .; \
+ init_thread_union = .; \
+ init_stack = .; \
+ KEEP(*(.data..init_task)) \
+ KEEP(*(.data..init_thread_info)) \
+ . = __start_init_task + THREAD_SIZE; \
+ __end_init_task = .;
/*
* Allow architectures to handle ro_after_init data on their
@@ -300,9 +309,9 @@
*/
#ifndef RO_AFTER_INIT_DATA
#define RO_AFTER_INIT_DATA \
- VMLINUX_SYMBOL(__start_ro_after_init) = .; \
+ __start_ro_after_init = .; \
*(.data..ro_after_init) \
- VMLINUX_SYMBOL(__end_ro_after_init) = .;
+ __end_ro_after_init = .;
#endif
/*
@@ -311,14 +320,14 @@
#define RO_DATA_SECTION(align) \
. = ALIGN((align)); \
.rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
- VMLINUX_SYMBOL(__start_rodata) = .; \
+ __start_rodata = .; \
*(.rodata) *(.rodata.*) \
RO_AFTER_INIT_DATA /* Read only after init */ \
KEEP(*(__vermagic)) /* Kernel version magic */ \
. = ALIGN(8); \
- VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
+ __start___tracepoints_ptrs = .; \
KEEP(*(__tracepoints_ptrs)) /* Tracepoints: pointer array */ \
- VMLINUX_SYMBOL(__stop___tracepoints_ptrs) = .; \
+ __stop___tracepoints_ptrs = .; \
*(__tracepoints_strings)/* Tracepoints: strings */ \
} \
\
@@ -328,109 +337,109 @@
\
/* PCI quirks */ \
.pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \
- VMLINUX_SYMBOL(__start_pci_fixups_early) = .; \
+ __start_pci_fixups_early = .; \
KEEP(*(.pci_fixup_early)) \
- VMLINUX_SYMBOL(__end_pci_fixups_early) = .; \
- VMLINUX_SYMBOL(__start_pci_fixups_header) = .; \
+ __end_pci_fixups_early = .; \
+ __start_pci_fixups_header = .; \
KEEP(*(.pci_fixup_header)) \
- VMLINUX_SYMBOL(__end_pci_fixups_header) = .; \
- VMLINUX_SYMBOL(__start_pci_fixups_final) = .; \
+ __end_pci_fixups_header = .; \
+ __start_pci_fixups_final = .; \
KEEP(*(.pci_fixup_final)) \
- VMLINUX_SYMBOL(__end_pci_fixups_final) = .; \
- VMLINUX_SYMBOL(__start_pci_fixups_enable) = .; \
+ __end_pci_fixups_final = .; \
+ __start_pci_fixups_enable = .; \
KEEP(*(.pci_fixup_enable)) \
- VMLINUX_SYMBOL(__end_pci_fixups_enable) = .; \
- VMLINUX_SYMBOL(__start_pci_fixups_resume) = .; \
+ __end_pci_fixups_enable = .; \
+ __start_pci_fixups_resume = .; \
KEEP(*(.pci_fixup_resume)) \
- VMLINUX_SYMBOL(__end_pci_fixups_resume) = .; \
- VMLINUX_SYMBOL(__start_pci_fixups_resume_early) = .; \
+ __end_pci_fixups_resume = .; \
+ __start_pci_fixups_resume_early = .; \
KEEP(*(.pci_fixup_resume_early)) \
- VMLINUX_SYMBOL(__end_pci_fixups_resume_early) = .; \
- VMLINUX_SYMBOL(__start_pci_fixups_suspend) = .; \
+ __end_pci_fixups_resume_early = .; \
+ __start_pci_fixups_suspend = .; \
KEEP(*(.pci_fixup_suspend)) \
- VMLINUX_SYMBOL(__end_pci_fixups_suspend) = .; \
- VMLINUX_SYMBOL(__start_pci_fixups_suspend_late) = .; \
+ __end_pci_fixups_suspend = .; \
+ __start_pci_fixups_suspend_late = .; \
KEEP(*(.pci_fixup_suspend_late)) \
- VMLINUX_SYMBOL(__end_pci_fixups_suspend_late) = .; \
+ __end_pci_fixups_suspend_late = .; \
} \
\
/* Built-in firmware blobs */ \
.builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) { \
- VMLINUX_SYMBOL(__start_builtin_fw) = .; \
+ __start_builtin_fw = .; \
KEEP(*(.builtin_fw)) \
- VMLINUX_SYMBOL(__end_builtin_fw) = .; \
+ __end_builtin_fw = .; \
} \
\
TRACEDATA \
\
/* Kernel symbol table: Normal symbols */ \
__ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \
- VMLINUX_SYMBOL(__start___ksymtab) = .; \
+ __start___ksymtab = .; \
KEEP(*(SORT(___ksymtab+*))) \
- VMLINUX_SYMBOL(__stop___ksymtab) = .; \
+ __stop___ksymtab = .; \
} \
\
/* Kernel symbol table: GPL-only symbols */ \
__ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) { \
- VMLINUX_SYMBOL(__start___ksymtab_gpl) = .; \
+ __start___ksymtab_gpl = .; \
KEEP(*(SORT(___ksymtab_gpl+*))) \
- VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .; \
+ __stop___ksymtab_gpl = .; \
} \
\
/* Kernel symbol table: Normal unused symbols */ \
__ksymtab_unused : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) { \
- VMLINUX_SYMBOL(__start___ksymtab_unused) = .; \
+ __start___ksymtab_unused = .; \
KEEP(*(SORT(___ksymtab_unused+*))) \
- VMLINUX_SYMBOL(__stop___ksymtab_unused) = .; \
+ __stop___ksymtab_unused = .; \
} \
\
/* Kernel symbol table: GPL-only unused symbols */ \
__ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \
- VMLINUX_SYMBOL(__start___ksymtab_unused_gpl) = .; \
+ __start___ksymtab_unused_gpl = .; \
KEEP(*(SORT(___ksymtab_unused_gpl+*))) \
- VMLINUX_SYMBOL(__stop___ksymtab_unused_gpl) = .; \
+ __stop___ksymtab_unused_gpl = .; \
} \
\
/* Kernel symbol table: GPL-future-only symbols */ \
__ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \
- VMLINUX_SYMBOL(__start___ksymtab_gpl_future) = .; \
+ __start___ksymtab_gpl_future = .; \
KEEP(*(SORT(___ksymtab_gpl_future+*))) \
- VMLINUX_SYMBOL(__stop___ksymtab_gpl_future) = .; \
+ __stop___ksymtab_gpl_future = .; \
} \
\
/* Kernel symbol table: Normal symbols */ \
__kcrctab : AT(ADDR(__kcrctab) - LOAD_OFFSET) { \
- VMLINUX_SYMBOL(__start___kcrctab) = .; \
+ __start___kcrctab = .; \
KEEP(*(SORT(___kcrctab+*))) \
- VMLINUX_SYMBOL(__stop___kcrctab) = .; \
+ __stop___kcrctab = .; \
} \
\
/* Kernel symbol table: GPL-only symbols */ \
__kcrctab_gpl : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) { \
- VMLINUX_SYMBOL(__start___kcrctab_gpl) = .; \
+ __start___kcrctab_gpl = .; \
KEEP(*(SORT(___kcrctab_gpl+*))) \
- VMLINUX_SYMBOL(__stop___kcrctab_gpl) = .; \
+ __stop___kcrctab_gpl = .; \
} \
\
/* Kernel symbol table: Normal unused symbols */ \
__kcrctab_unused : AT(ADDR(__kcrctab_unused) - LOAD_OFFSET) { \
- VMLINUX_SYMBOL(__start___kcrctab_unused) = .; \
+ __start___kcrctab_unused = .; \
KEEP(*(SORT(___kcrctab_unused+*))) \
- VMLINUX_SYMBOL(__stop___kcrctab_unused) = .; \
+ __stop___kcrctab_unused = .; \
} \
\
/* Kernel symbol table: GPL-only unused symbols */ \
__kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \
- VMLINUX_SYMBOL(__start___kcrctab_unused_gpl) = .; \
+ __start___kcrctab_unused_gpl = .; \
KEEP(*(SORT(___kcrctab_unused_gpl+*))) \
- VMLINUX_SYMBOL(__stop___kcrctab_unused_gpl) = .; \
+ __stop___kcrctab_unused_gpl = .; \
} \
\
/* Kernel symbol table: GPL-future-only symbols */ \
__kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \
- VMLINUX_SYMBOL(__start___kcrctab_gpl_future) = .; \
+ __start___kcrctab_gpl_future = .; \
KEEP(*(SORT(___kcrctab_gpl_future+*))) \
- VMLINUX_SYMBOL(__stop___kcrctab_gpl_future) = .; \
+ __stop___kcrctab_gpl_future = .; \
} \
\
/* Kernel symbol table: strings */ \
@@ -447,18 +456,18 @@
\
/* Built-in module parameters. */ \
__param : AT(ADDR(__param) - LOAD_OFFSET) { \
- VMLINUX_SYMBOL(__start___param) = .; \
+ __start___param = .; \
KEEP(*(__param)) \
- VMLINUX_SYMBOL(__stop___param) = .; \
+ __stop___param = .; \
} \
\
/* Built-in module versions. */ \
__modver : AT(ADDR(__modver) - LOAD_OFFSET) { \
- VMLINUX_SYMBOL(__start___modver) = .; \
+ __start___modver = .; \
KEEP(*(__modver)) \
- VMLINUX_SYMBOL(__stop___modver) = .; \
+ __stop___modver = .; \
. = ALIGN((align)); \
- VMLINUX_SYMBOL(__end_rodata) = .; \
+ __end_rodata = .; \
} \
. = ALIGN((align));
@@ -469,9 +478,9 @@
#define SECURITY_INIT \
.security_initcall.init : AT(ADDR(.security_initcall.init) - LOAD_OFFSET) { \
- VMLINUX_SYMBOL(__security_initcall_start) = .; \
+ __security_initcall_start = .; \
KEEP(*(.security_initcall.init)) \
- VMLINUX_SYMBOL(__security_initcall_end) = .; \
+ __security_initcall_end = .; \
}
/*
@@ -487,58 +496,58 @@
*(.text.hot TEXT_MAIN .text.fixup .text.unlikely) \
*(.text..refcount) \
*(.ref.text) \
- MEM_KEEP(init.text) \
- MEM_KEEP(exit.text) \
+ MEM_KEEP(init.text*) \
+ MEM_KEEP(exit.text*) \
/* sched.text is aling to function alignment to secure we have same
* address even at second ld pass when generating System.map */
#define SCHED_TEXT \
ALIGN_FUNCTION(); \
- VMLINUX_SYMBOL(__sched_text_start) = .; \
+ __sched_text_start = .; \
*(.sched.text) \
- VMLINUX_SYMBOL(__sched_text_end) = .;
+ __sched_text_end = .;
/* spinlock.text is aling to function alignment to secure we have same
* address even at second ld pass when generating System.map */
#define LOCK_TEXT \
ALIGN_FUNCTION(); \
- VMLINUX_SYMBOL(__lock_text_start) = .; \
+ __lock_text_start = .; \
*(.spinlock.text) \
- VMLINUX_SYMBOL(__lock_text_end) = .;
+ __lock_text_end = .;
#define CPUIDLE_TEXT \
ALIGN_FUNCTION(); \
- VMLINUX_SYMBOL(__cpuidle_text_start) = .; \
+ __cpuidle_text_start = .; \
*(.cpuidle.text) \
- VMLINUX_SYMBOL(__cpuidle_text_end) = .;
+ __cpuidle_text_end = .;
#define KPROBES_TEXT \
ALIGN_FUNCTION(); \
- VMLINUX_SYMBOL(__kprobes_text_start) = .; \
+ __kprobes_text_start = .; \
*(.kprobes.text) \
- VMLINUX_SYMBOL(__kprobes_text_end) = .;
+ __kprobes_text_end = .;
#define ENTRY_TEXT \
ALIGN_FUNCTION(); \
- VMLINUX_SYMBOL(__entry_text_start) = .; \
+ __entry_text_start = .; \
*(.entry.text) \
- VMLINUX_SYMBOL(__entry_text_end) = .;
+ __entry_text_end = .;
#define IRQENTRY_TEXT \
ALIGN_FUNCTION(); \
- VMLINUX_SYMBOL(__irqentry_text_start) = .; \
+ __irqentry_text_start = .; \
*(.irqentry.text) \
- VMLINUX_SYMBOL(__irqentry_text_end) = .;
+ __irqentry_text_end = .;
#define SOFTIRQENTRY_TEXT \
ALIGN_FUNCTION(); \
- VMLINUX_SYMBOL(__softirqentry_text_start) = .; \
+ __softirqentry_text_start = .; \
*(.softirqentry.text) \
- VMLINUX_SYMBOL(__softirqentry_text_end) = .;
+ __softirqentry_text_end = .;
/* Section used for early init (in .S files) */
-#define HEAD_TEXT *(.head.text)
+#define HEAD_TEXT KEEP(*(.head.text))
#define HEAD_TEXT_SECTION \
.head.text : AT(ADDR(.head.text) - LOAD_OFFSET) { \
@@ -551,9 +560,9 @@
#define EXCEPTION_TABLE(align) \
. = ALIGN(align); \
__ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { \
- VMLINUX_SYMBOL(__start___ex_table) = .; \
+ __start___ex_table = .; \
KEEP(*(__ex_table)) \
- VMLINUX_SYMBOL(__stop___ex_table) = .; \
+ __stop___ex_table = .; \
}
/*
@@ -567,11 +576,11 @@
#ifdef CONFIG_CONSTRUCTORS
#define KERNEL_CTORS() . = ALIGN(8); \
- VMLINUX_SYMBOL(__ctors_start) = .; \
+ __ctors_start = .; \
KEEP(*(.ctors)) \
KEEP(*(SORT(.init_array.*))) \
KEEP(*(.init_array)) \
- VMLINUX_SYMBOL(__ctors_end) = .;
+ __ctors_end = .;
#else
#define KERNEL_CTORS()
#endif
@@ -579,11 +588,11 @@
/* init and exit section handling */
#define INIT_DATA \
KEEP(*(SORT(___kentry+*))) \
- *(.init.data) \
- MEM_DISCARD(init.data) \
+ *(.init.data init.data.*) \
+ MEM_DISCARD(init.data*) \
KERNEL_CTORS() \
MCOUNT_REC() \
- *(.init.rodata) \
+ *(.init.rodata .init.rodata.*) \
FTRACE_EVENTS() \
TRACE_SYSCALLS() \
KPROBE_BLACKLIST() \
@@ -602,16 +611,16 @@
EARLYCON_TABLE()
#define INIT_TEXT \
- *(.init.text) \
+ *(.init.text .init.text.*) \
*(.text.startup) \
- MEM_DISCARD(init.text)
+ MEM_DISCARD(init.text*)
#define EXIT_DATA \
- *(.exit.data) \
+ *(.exit.data .exit.data.*) \
*(.fini_array) \
*(.dtors) \
- MEM_DISCARD(exit.data) \
- MEM_DISCARD(exit.rodata)
+ MEM_DISCARD(exit.data*) \
+ MEM_DISCARD(exit.rodata*)
#define EXIT_TEXT \
*(.exit.text) \
@@ -629,7 +638,7 @@
. = ALIGN(sbss_align); \
.sbss : AT(ADDR(.sbss) - LOAD_OFFSET) { \
*(.dynsbss) \
- *(.sbss) \
+ *(SBSS_MAIN) \
*(.scommon) \
}
@@ -706,9 +715,9 @@
#define BUG_TABLE \
. = ALIGN(8); \
__bug_table : AT(ADDR(__bug_table) - LOAD_OFFSET) { \
- VMLINUX_SYMBOL(__start___bug_table) = .; \
+ __start___bug_table = .; \
KEEP(*(__bug_table)) \
- VMLINUX_SYMBOL(__stop___bug_table) = .; \
+ __stop___bug_table = .; \
}
#else
#define BUG_TABLE
@@ -718,22 +727,22 @@
#define ORC_UNWIND_TABLE \
. = ALIGN(4); \
.orc_unwind_ip : AT(ADDR(.orc_unwind_ip) - LOAD_OFFSET) { \
- VMLINUX_SYMBOL(__start_orc_unwind_ip) = .; \
+ __start_orc_unwind_ip = .; \
KEEP(*(.orc_unwind_ip)) \
- VMLINUX_SYMBOL(__stop_orc_unwind_ip) = .; \
+ __stop_orc_unwind_ip = .; \
} \
. = ALIGN(6); \
.orc_unwind : AT(ADDR(.orc_unwind) - LOAD_OFFSET) { \
- VMLINUX_SYMBOL(__start_orc_unwind) = .; \
+ __start_orc_unwind = .; \
KEEP(*(.orc_unwind)) \
- VMLINUX_SYMBOL(__stop_orc_unwind) = .; \
+ __stop_orc_unwind = .; \
} \
. = ALIGN(4); \
.orc_lookup : AT(ADDR(.orc_lookup) - LOAD_OFFSET) { \
- VMLINUX_SYMBOL(orc_lookup) = .; \
+ orc_lookup = .; \
. += (((SIZEOF(.text) + LOOKUP_BLOCK_SIZE - 1) / \
LOOKUP_BLOCK_SIZE) + 1) * 4; \
- VMLINUX_SYMBOL(orc_lookup_end) = .; \
+ orc_lookup_end = .; \
}
#else
#define ORC_UNWIND_TABLE
@@ -743,9 +752,9 @@
#define TRACEDATA \
. = ALIGN(4); \
.tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) { \
- VMLINUX_SYMBOL(__tracedata_start) = .; \
+ __tracedata_start = .; \
KEEP(*(.tracedata)) \
- VMLINUX_SYMBOL(__tracedata_end) = .; \
+ __tracedata_end = .; \
}
#else
#define TRACEDATA
@@ -753,24 +762,24 @@
#define NOTES \
.notes : AT(ADDR(.notes) - LOAD_OFFSET) { \
- VMLINUX_SYMBOL(__start_notes) = .; \
- *(.note.*) \
- VMLINUX_SYMBOL(__stop_notes) = .; \
+ __start_notes = .; \
+ KEEP(*(.note.*)) \
+ __stop_notes = .; \
}
#define INIT_SETUP(initsetup_align) \
. = ALIGN(initsetup_align); \
- VMLINUX_SYMBOL(__setup_start) = .; \
+ __setup_start = .; \
KEEP(*(.init.setup)) \
- VMLINUX_SYMBOL(__setup_end) = .;
+ __setup_end = .;
#define INIT_CALLS_LEVEL(level) \
- VMLINUX_SYMBOL(__initcall##level##_start) = .; \
+ __initcall##level##_start = .; \
KEEP(*(.initcall##level##.init)) \
KEEP(*(.initcall##level##s.init)) \
#define INIT_CALLS \
- VMLINUX_SYMBOL(__initcall_start) = .; \
+ __initcall_start = .; \
KEEP(*(.initcallearly.init)) \
INIT_CALLS_LEVEL(0) \
INIT_CALLS_LEVEL(1) \
@@ -781,22 +790,22 @@
INIT_CALLS_LEVEL(rootfs) \
INIT_CALLS_LEVEL(6) \
INIT_CALLS_LEVEL(7) \
- VMLINUX_SYMBOL(__initcall_end) = .;
+ __initcall_end = .;
#define CON_INITCALL \
- VMLINUX_SYMBOL(__con_initcall_start) = .; \
+ __con_initcall_start = .; \
KEEP(*(.con_initcall.init)) \
- VMLINUX_SYMBOL(__con_initcall_end) = .;
+ __con_initcall_end = .;
#define SECURITY_INITCALL \
- VMLINUX_SYMBOL(__security_initcall_start) = .; \
+ __security_initcall_start = .; \
KEEP(*(.security_initcall.init)) \
- VMLINUX_SYMBOL(__security_initcall_end) = .;
+ __security_initcall_end = .;
#ifdef CONFIG_BLK_DEV_INITRD
#define INIT_RAM_FS \
. = ALIGN(4); \
- VMLINUX_SYMBOL(__initramfs_start) = .; \
+ __initramfs_start = .; \
KEEP(*(.init.ramfs)) \
. = ALIGN(8); \
KEEP(*(.init.ramfs.info))
@@ -851,7 +860,7 @@
* sharing between subsections for different purposes.
*/
#define PERCPU_INPUT(cacheline) \
- VMLINUX_SYMBOL(__per_cpu_start) = .; \
+ __per_cpu_start = .; \
*(.data..percpu..first) \
. = ALIGN(PAGE_SIZE); \
*(.data..percpu..page_aligned) \
@@ -861,7 +870,7 @@
*(.data..percpu) \
*(.data..percpu..shared_aligned) \
PERCPU_DECRYPTED_SECTION \
- VMLINUX_SYMBOL(__per_cpu_end) = .;
+ __per_cpu_end = .;
/**
* PERCPU_VADDR - define output section for percpu area
@@ -888,12 +897,11 @@
* address, use PERCPU_SECTION.
*/
#define PERCPU_VADDR(cacheline, vaddr, phdr) \
- VMLINUX_SYMBOL(__per_cpu_load) = .; \
- .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
- - LOAD_OFFSET) { \
+ __per_cpu_load = .; \
+ .data..percpu vaddr : AT(__per_cpu_load - LOAD_OFFSET) { \
PERCPU_INPUT(cacheline) \
} phdr \
- . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
+ . = __per_cpu_load + SIZEOF(.data..percpu);
/**
* PERCPU_SECTION - define output section for percpu area, simple version
@@ -910,7 +918,7 @@
#define PERCPU_SECTION(cacheline) \
. = ALIGN(PAGE_SIZE); \
.data..percpu : AT(ADDR(.data..percpu) - LOAD_OFFSET) { \
- VMLINUX_SYMBOL(__per_cpu_load) = .; \
+ __per_cpu_load = .; \
PERCPU_INPUT(cacheline) \
}
@@ -949,9 +957,9 @@
#define INIT_TEXT_SECTION(inittext_align) \
. = ALIGN(inittext_align); \
.init.text : AT(ADDR(.init.text) - LOAD_OFFSET) { \
- VMLINUX_SYMBOL(_sinittext) = .; \
+ _sinittext = .; \
INIT_TEXT \
- VMLINUX_SYMBOL(_einittext) = .; \
+ _einittext = .; \
}
#define INIT_DATA_SECTION(initsetup_align) \
@@ -966,8 +974,8 @@
#define BSS_SECTION(sbss_align, bss_align, stop_align) \
. = ALIGN(sbss_align); \
- VMLINUX_SYMBOL(__bss_start) = .; \
+ __bss_start = .; \
SBSS(sbss_align) \
BSS(bss_align) \
. = ALIGN(stop_align); \
- VMLINUX_SYMBOL(__bss_stop) = .;
+ __bss_stop = .;
diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
index 1aba888241dd..bd5e8ccf1687 100644
--- a/include/crypto/algapi.h
+++ b/include/crypto/algapi.h
@@ -17,6 +17,14 @@
#include <linux/kernel.h>
#include <linux/skbuff.h>
+/*
+ * Maximum values for blocksize and alignmask, used to allocate
+ * static buffers that are big enough for any combination of
+ * ciphers and architectures.
+ */
+#define MAX_CIPHER_BLOCKSIZE 16
+#define MAX_CIPHER_ALIGNMASK 15
+
struct crypto_aead;
struct crypto_instance;
struct module;
diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h
index 482461d8931d..cc414db9da0a 100644
--- a/include/crypto/if_alg.h
+++ b/include/crypto/if_alg.h
@@ -245,8 +245,7 @@ ssize_t af_alg_sendpage(struct socket *sock, struct page *page,
int offset, size_t size, int flags);
void af_alg_free_resources(struct af_alg_async_req *areq);
void af_alg_async_cb(struct crypto_async_request *_req, int err);
-__poll_t af_alg_poll(struct file *file, struct socket *sock,
- poll_table *wait);
+__poll_t af_alg_poll_mask(struct socket *sock, __poll_t events);
struct af_alg_async_req *af_alg_alloc_areq(struct sock *sk,
unsigned int areqlen);
int af_alg_get_rsgl(struct sock *sk, struct msghdr *msg, int flags,
diff --git a/include/crypto/morus1280_glue.h b/include/crypto/morus1280_glue.h
new file mode 100644
index 000000000000..b26dd70efd9a
--- /dev/null
+++ b/include/crypto/morus1280_glue.h
@@ -0,0 +1,137 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * The MORUS-1280 Authenticated-Encryption Algorithm
+ * Common glue skeleton -- header file
+ *
+ * Copyright (c) 2016-2018 Ondrej Mosnacek <omosnacek@gmail.com>
+ * Copyright (C) 2017-2018 Red Hat, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#ifndef _CRYPTO_MORUS1280_GLUE_H
+#define _CRYPTO_MORUS1280_GLUE_H
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <crypto/algapi.h>
+#include <crypto/aead.h>
+#include <crypto/morus_common.h>
+
+#define MORUS1280_WORD_SIZE 8
+#define MORUS1280_BLOCK_SIZE (MORUS_BLOCK_WORDS * MORUS1280_WORD_SIZE)
+
+struct morus1280_block {
+ u8 bytes[MORUS1280_BLOCK_SIZE];
+};
+
+struct morus1280_glue_ops {
+ void (*init)(void *state, const void *key, const void *iv);
+ void (*ad)(void *state, const void *data, unsigned int length);
+ void (*enc)(void *state, const void *src, void *dst, unsigned int length);
+ void (*dec)(void *state, const void *src, void *dst, unsigned int length);
+ void (*enc_tail)(void *state, const void *src, void *dst, unsigned int length);
+ void (*dec_tail)(void *state, const void *src, void *dst, unsigned int length);
+ void (*final)(void *state, void *tag_xor, u64 assoclen, u64 cryptlen);
+};
+
+struct morus1280_ctx {
+ const struct morus1280_glue_ops *ops;
+ struct morus1280_block key;
+};
+
+void crypto_morus1280_glue_init_ops(struct crypto_aead *aead,
+ const struct morus1280_glue_ops *ops);
+int crypto_morus1280_glue_setkey(struct crypto_aead *aead, const u8 *key,
+ unsigned int keylen);
+int crypto_morus1280_glue_setauthsize(struct crypto_aead *tfm,
+ unsigned int authsize);
+int crypto_morus1280_glue_encrypt(struct aead_request *req);
+int crypto_morus1280_glue_decrypt(struct aead_request *req);
+
+int cryptd_morus1280_glue_setkey(struct crypto_aead *aead, const u8 *key,
+ unsigned int keylen);
+int cryptd_morus1280_glue_setauthsize(struct crypto_aead *aead,
+ unsigned int authsize);
+int cryptd_morus1280_glue_encrypt(struct aead_request *req);
+int cryptd_morus1280_glue_decrypt(struct aead_request *req);
+int cryptd_morus1280_glue_init_tfm(struct crypto_aead *aead);
+void cryptd_morus1280_glue_exit_tfm(struct crypto_aead *aead);
+
+#define MORUS1280_DECLARE_ALGS(id, driver_name, priority) \
+ static const struct morus1280_glue_ops crypto_morus1280_##id##_ops = {\
+ .init = crypto_morus1280_##id##_init, \
+ .ad = crypto_morus1280_##id##_ad, \
+ .enc = crypto_morus1280_##id##_enc, \
+ .enc_tail = crypto_morus1280_##id##_enc_tail, \
+ .dec = crypto_morus1280_##id##_dec, \
+ .dec_tail = crypto_morus1280_##id##_dec_tail, \
+ .final = crypto_morus1280_##id##_final, \
+ }; \
+ \
+ static int crypto_morus1280_##id##_init_tfm(struct crypto_aead *tfm) \
+ { \
+ crypto_morus1280_glue_init_ops(tfm, &crypto_morus1280_##id##_ops); \
+ return 0; \
+ } \
+ \
+ static void crypto_morus1280_##id##_exit_tfm(struct crypto_aead *tfm) \
+ { \
+ } \
+ \
+ struct aead_alg crypto_morus1280_##id##_algs[] = {\
+ { \
+ .setkey = crypto_morus1280_glue_setkey, \
+ .setauthsize = crypto_morus1280_glue_setauthsize, \
+ .encrypt = crypto_morus1280_glue_encrypt, \
+ .decrypt = crypto_morus1280_glue_decrypt, \
+ .init = crypto_morus1280_##id##_init_tfm, \
+ .exit = crypto_morus1280_##id##_exit_tfm, \
+ \
+ .ivsize = MORUS_NONCE_SIZE, \
+ .maxauthsize = MORUS_MAX_AUTH_SIZE, \
+ .chunksize = MORUS1280_BLOCK_SIZE, \
+ \
+ .base = { \
+ .cra_flags = CRYPTO_ALG_INTERNAL, \
+ .cra_blocksize = 1, \
+ .cra_ctxsize = sizeof(struct morus1280_ctx), \
+ .cra_alignmask = 0, \
+ \
+ .cra_name = "__morus1280", \
+ .cra_driver_name = "__"driver_name, \
+ \
+ .cra_module = THIS_MODULE, \
+ } \
+ }, { \
+ .setkey = cryptd_morus1280_glue_setkey, \
+ .setauthsize = cryptd_morus1280_glue_setauthsize, \
+ .encrypt = cryptd_morus1280_glue_encrypt, \
+ .decrypt = cryptd_morus1280_glue_decrypt, \
+ .init = cryptd_morus1280_glue_init_tfm, \
+ .exit = cryptd_morus1280_glue_exit_tfm, \
+ \
+ .ivsize = MORUS_NONCE_SIZE, \
+ .maxauthsize = MORUS_MAX_AUTH_SIZE, \
+ .chunksize = MORUS1280_BLOCK_SIZE, \
+ \
+ .base = { \
+ .cra_flags = CRYPTO_ALG_ASYNC, \
+ .cra_blocksize = 1, \
+ .cra_ctxsize = sizeof(struct crypto_aead *), \
+ .cra_alignmask = 0, \
+ \
+ .cra_priority = priority, \
+ \
+ .cra_name = "morus1280", \
+ .cra_driver_name = driver_name, \
+ \
+ .cra_module = THIS_MODULE, \
+ } \
+ } \
+ }
+
+#endif /* _CRYPTO_MORUS1280_GLUE_H */
diff --git a/include/crypto/morus640_glue.h b/include/crypto/morus640_glue.h
new file mode 100644
index 000000000000..90c8db07e740
--- /dev/null
+++ b/include/crypto/morus640_glue.h
@@ -0,0 +1,137 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * The MORUS-640 Authenticated-Encryption Algorithm
+ * Common glue skeleton -- header file
+ *
+ * Copyright (c) 2016-2018 Ondrej Mosnacek <omosnacek@gmail.com>
+ * Copyright (C) 2017-2018 Red Hat, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#ifndef _CRYPTO_MORUS640_GLUE_H
+#define _CRYPTO_MORUS640_GLUE_H
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <crypto/algapi.h>
+#include <crypto/aead.h>
+#include <crypto/morus_common.h>
+
+#define MORUS640_WORD_SIZE 4
+#define MORUS640_BLOCK_SIZE (MORUS_BLOCK_WORDS * MORUS640_WORD_SIZE)
+
+struct morus640_block {
+ u8 bytes[MORUS640_BLOCK_SIZE];
+};
+
+struct morus640_glue_ops {
+ void (*init)(void *state, const void *key, const void *iv);
+ void (*ad)(void *state, const void *data, unsigned int length);
+ void (*enc)(void *state, const void *src, void *dst, unsigned int length);
+ void (*dec)(void *state, const void *src, void *dst, unsigned int length);
+ void (*enc_tail)(void *state, const void *src, void *dst, unsigned int length);
+ void (*dec_tail)(void *state, const void *src, void *dst, unsigned int length);
+ void (*final)(void *state, void *tag_xor, u64 assoclen, u64 cryptlen);
+};
+
+struct morus640_ctx {
+ const struct morus640_glue_ops *ops;
+ struct morus640_block key;
+};
+
+void crypto_morus640_glue_init_ops(struct crypto_aead *aead,
+ const struct morus640_glue_ops *ops);
+int crypto_morus640_glue_setkey(struct crypto_aead *aead, const u8 *key,
+ unsigned int keylen);
+int crypto_morus640_glue_setauthsize(struct crypto_aead *tfm,
+ unsigned int authsize);
+int crypto_morus640_glue_encrypt(struct aead_request *req);
+int crypto_morus640_glue_decrypt(struct aead_request *req);
+
+int cryptd_morus640_glue_setkey(struct crypto_aead *aead, const u8 *key,
+ unsigned int keylen);
+int cryptd_morus640_glue_setauthsize(struct crypto_aead *aead,
+ unsigned int authsize);
+int cryptd_morus640_glue_encrypt(struct aead_request *req);
+int cryptd_morus640_glue_decrypt(struct aead_request *req);
+int cryptd_morus640_glue_init_tfm(struct crypto_aead *aead);
+void cryptd_morus640_glue_exit_tfm(struct crypto_aead *aead);
+
+#define MORUS640_DECLARE_ALGS(id, driver_name, priority) \
+ static const struct morus640_glue_ops crypto_morus640_##id##_ops = {\
+ .init = crypto_morus640_##id##_init, \
+ .ad = crypto_morus640_##id##_ad, \
+ .enc = crypto_morus640_##id##_enc, \
+ .enc_tail = crypto_morus640_##id##_enc_tail, \
+ .dec = crypto_morus640_##id##_dec, \
+ .dec_tail = crypto_morus640_##id##_dec_tail, \
+ .final = crypto_morus640_##id##_final, \
+ }; \
+ \
+ static int crypto_morus640_##id##_init_tfm(struct crypto_aead *tfm) \
+ { \
+ crypto_morus640_glue_init_ops(tfm, &crypto_morus640_##id##_ops); \
+ return 0; \
+ } \
+ \
+ static void crypto_morus640_##id##_exit_tfm(struct crypto_aead *tfm) \
+ { \
+ } \
+ \
+ struct aead_alg crypto_morus640_##id##_algs[] = {\
+ { \
+ .setkey = crypto_morus640_glue_setkey, \
+ .setauthsize = crypto_morus640_glue_setauthsize, \
+ .encrypt = crypto_morus640_glue_encrypt, \
+ .decrypt = crypto_morus640_glue_decrypt, \
+ .init = crypto_morus640_##id##_init_tfm, \
+ .exit = crypto_morus640_##id##_exit_tfm, \
+ \
+ .ivsize = MORUS_NONCE_SIZE, \
+ .maxauthsize = MORUS_MAX_AUTH_SIZE, \
+ .chunksize = MORUS640_BLOCK_SIZE, \
+ \
+ .base = { \
+ .cra_flags = CRYPTO_ALG_INTERNAL, \
+ .cra_blocksize = 1, \
+ .cra_ctxsize = sizeof(struct morus640_ctx), \
+ .cra_alignmask = 0, \
+ \
+ .cra_name = "__morus640", \
+ .cra_driver_name = "__"driver_name, \
+ \
+ .cra_module = THIS_MODULE, \
+ } \
+ }, { \
+ .setkey = cryptd_morus640_glue_setkey, \
+ .setauthsize = cryptd_morus640_glue_setauthsize, \
+ .encrypt = cryptd_morus640_glue_encrypt, \
+ .decrypt = cryptd_morus640_glue_decrypt, \
+ .init = cryptd_morus640_glue_init_tfm, \
+ .exit = cryptd_morus640_glue_exit_tfm, \
+ \
+ .ivsize = MORUS_NONCE_SIZE, \
+ .maxauthsize = MORUS_MAX_AUTH_SIZE, \
+ .chunksize = MORUS640_BLOCK_SIZE, \
+ \
+ .base = { \
+ .cra_flags = CRYPTO_ALG_ASYNC, \
+ .cra_blocksize = 1, \
+ .cra_ctxsize = sizeof(struct crypto_aead *), \
+ .cra_alignmask = 0, \
+ \
+ .cra_priority = priority, \
+ \
+ .cra_name = "morus640", \
+ .cra_driver_name = driver_name, \
+ \
+ .cra_module = THIS_MODULE, \
+ } \
+ } \
+ }
+
+#endif /* _CRYPTO_MORUS640_GLUE_H */
diff --git a/include/crypto/morus_common.h b/include/crypto/morus_common.h
new file mode 100644
index 000000000000..39f28c749951
--- /dev/null
+++ b/include/crypto/morus_common.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * The MORUS Authenticated-Encryption Algorithm
+ * Common definitions
+ *
+ * Copyright (c) 2016-2018 Ondrej Mosnacek <omosnacek@gmail.com>
+ * Copyright (C) 2017-2018 Red Hat, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#ifndef _CRYPTO_MORUS_COMMON_H
+#define _CRYPTO_MORUS_COMMON_H
+
+#define MORUS_BLOCK_WORDS 4
+#define MORUS_STATE_BLOCKS 5
+#define MORUS_NONCE_SIZE 16
+#define MORUS_MAX_AUTH_SIZE 16
+
+#endif /* _CRYPTO_MORUS_COMMON_H */
diff --git a/include/crypto/salsa20.h b/include/crypto/salsa20.h
deleted file mode 100644
index 19ed48aefc86..000000000000
--- a/include/crypto/salsa20.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Common values for the Salsa20 algorithm
- */
-
-#ifndef _CRYPTO_SALSA20_H
-#define _CRYPTO_SALSA20_H
-
-#include <linux/types.h>
-
-#define SALSA20_IV_SIZE 8
-#define SALSA20_MIN_KEY_SIZE 16
-#define SALSA20_MAX_KEY_SIZE 32
-#define SALSA20_BLOCK_SIZE 64
-
-struct crypto_skcipher;
-
-struct salsa20_ctx {
- u32 initial_state[16];
-};
-
-void crypto_salsa20_init(u32 *state, const struct salsa20_ctx *ctx,
- const u8 *iv);
-int crypto_salsa20_setkey(struct crypto_skcipher *tfm, const u8 *key,
- unsigned int keysize);
-
-#endif /* _CRYPTO_SALSA20_H */
diff --git a/include/crypto/sm4.h b/include/crypto/sm4.h
index b64e64d20b28..7afd730d16ff 100644
--- a/include/crypto/sm4.h
+++ b/include/crypto/sm4.h
@@ -25,4 +25,7 @@ int crypto_sm4_set_key(struct crypto_tfm *tfm, const u8 *in_key,
int crypto_sm4_expand_key(struct crypto_sm4_ctx *ctx, const u8 *in_key,
unsigned int key_len);
+void crypto_sm4_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in);
+void crypto_sm4_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in);
+
#endif
diff --git a/include/drm/amd_asic_type.h b/include/drm/amd_asic_type.h
index 6c731c52c071..dd63d08cc54e 100644
--- a/include/drm/amd_asic_type.h
+++ b/include/drm/amd_asic_type.h
@@ -44,8 +44,10 @@ enum amd_asic_type {
CHIP_POLARIS10,
CHIP_POLARIS11,
CHIP_POLARIS12,
+ CHIP_VEGAM,
CHIP_VEGA10,
CHIP_VEGA12,
+ CHIP_VEGA20,
CHIP_RAVEN,
CHIP_LAST,
};
diff --git a/include/drm/bridge/analogix_dp.h b/include/drm/bridge/analogix_dp.h
index e9a1116d2f8e..475b706b49de 100644
--- a/include/drm/bridge/analogix_dp.h
+++ b/include/drm/bridge/analogix_dp.h
@@ -33,7 +33,8 @@ struct analogix_dp_plat_data {
struct drm_connector *connector;
bool skip_connector;
- int (*power_on)(struct analogix_dp_plat_data *);
+ int (*power_on_start)(struct analogix_dp_plat_data *);
+ int (*power_on_end)(struct analogix_dp_plat_data *);
int (*power_off)(struct analogix_dp_plat_data *);
int (*attach)(struct analogix_dp_plat_data *, struct drm_bridge *,
struct drm_connector *);
diff --git a/include/drm/bridge/dw_hdmi.h b/include/drm/bridge/dw_hdmi.h
index dd2a8cf7d20b..ccb5aa8468e0 100644
--- a/include/drm/bridge/dw_hdmi.h
+++ b/include/drm/bridge/dw_hdmi.h
@@ -151,7 +151,7 @@ struct dw_hdmi *dw_hdmi_bind(struct platform_device *pdev,
struct drm_encoder *encoder,
const struct dw_hdmi_plat_data *plat_data);
-void dw_hdmi_setup_rx_sense(struct device *dev, bool hpd, bool rx_sense);
+void dw_hdmi_setup_rx_sense(struct dw_hdmi *hdmi, bool hpd, bool rx_sense);
void dw_hdmi_set_sample_rate(struct dw_hdmi *hdmi, unsigned int rate);
void dw_hdmi_audio_enable(struct dw_hdmi *hdmi);
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index c6666cd09347..f5099c12c6a6 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -95,14 +95,6 @@ struct dma_buf_attachment;
struct pci_dev;
struct pci_controller;
-/***********************************************************************/
-/** \name DRM template customization defaults */
-/*@{*/
-
-/***********************************************************************/
-/** \name Internal types and structures */
-/*@{*/
-
#define DRM_IF_VERSION(maj, min) (maj << 16 | min)
/**
@@ -123,27 +115,13 @@ static inline bool drm_drv_uses_atomic_modeset(struct drm_device *dev)
#define DRM_SWITCH_POWER_CHANGING 2
#define DRM_SWITCH_POWER_DYNAMIC_OFF 3
-static __inline__ int drm_core_check_feature(struct drm_device *dev,
- int feature)
+static inline bool drm_core_check_feature(struct drm_device *dev, int feature)
{
- return ((dev->driver->driver_features & feature) ? 1 : 0);
+ return dev->driver->driver_features & feature;
}
-/******************************************************************/
-/** \name Internal function definitions */
-/*@{*/
-
- /* Driver support (drm_drv.h) */
-
-/*
- * These are exported to drivers so that they can implement fencing using
- * DMA quiscent + idle. DMA quiescent usually requires the hardware lock.
- */
-
-/*@}*/
-
/* returns true if currently okay to sleep */
-static __inline__ bool drm_can_sleep(void)
+static inline bool drm_can_sleep(void)
{
if (in_atomic() || in_dbg_master() || irqs_disabled())
return false;
diff --git a/include/drm/drm_blend.h b/include/drm/drm_blend.h
index 17606026590b..330c561c4c11 100644
--- a/include/drm/drm_blend.h
+++ b/include/drm/drm_blend.h
@@ -36,6 +36,9 @@ static inline bool drm_rotation_90_or_270(unsigned int rotation)
return rotation & (DRM_MODE_ROTATE_90 | DRM_MODE_ROTATE_270);
}
+#define DRM_BLEND_ALPHA_OPAQUE 0xffff
+
+int drm_plane_create_alpha_property(struct drm_plane *plane);
int drm_plane_create_rotation_property(struct drm_plane *plane,
unsigned int rotation,
unsigned int supported_rotations);
diff --git a/include/drm/drm_device.h b/include/drm/drm_device.h
index 7c4fa32f3fc6..858ba19a3e29 100644
--- a/include/drm/drm_device.h
+++ b/include/drm/drm_device.h
@@ -38,7 +38,6 @@ struct drm_device {
struct device *dev; /**< Device structure of bus-device */
struct drm_driver *driver; /**< DRM driver managing the device */
void *dev_private; /**< DRM driver private data */
- struct drm_minor *control; /**< Control node */
struct drm_minor *primary; /**< Primary node */
struct drm_minor *render; /**< Render node */
bool registered;
@@ -46,7 +45,14 @@ struct drm_device {
/* currently active master for this device. Protected by master_mutex */
struct drm_master *master;
- atomic_t unplugged; /**< Flag whether dev is dead */
+ /**
+ * @unplugged:
+ *
+ * Flag to tell if the device has been unplugged.
+ * See drm_dev_enter() and drm_dev_is_unplugged().
+ */
+ bool unplugged;
+
struct inode *anon_inode; /**< inode for private address-space */
char *unique; /**< unique name of the device */
/*@} */
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index 62903bae0221..c01564991a9f 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -64,6 +64,11 @@
/* AUX CH addresses */
/* DPCD */
#define DP_DPCD_REV 0x000
+# define DP_DPCD_REV_10 0x10
+# define DP_DPCD_REV_11 0x11
+# define DP_DPCD_REV_12 0x12
+# define DP_DPCD_REV_13 0x13
+# define DP_DPCD_REV_14 0x14
#define DP_MAX_LINK_RATE 0x001
@@ -119,6 +124,7 @@
# define DP_DPCD_DISPLAY_CONTROL_CAPABLE (1 << 3) /* edp v1.2 or higher */
#define DP_TRAINING_AUX_RD_INTERVAL 0x00e /* XXX 1.2? */
+# define DP_TRAINING_AUX_RD_MASK 0x7F /* XXX 1.2? */
#define DP_ADAPTER_CAP 0x00f /* 1.2 */
# define DP_FORCE_LOAD_SENSE_CAP (1 << 0)
@@ -478,6 +484,7 @@
# define DP_PSR_FRAME_CAPTURE (1 << 3)
# define DP_PSR_SELECTIVE_UPDATE (1 << 4)
# define DP_PSR_IRQ_HPD_WITH_CRC_ERRORS (1 << 5)
+# define DP_PSR_ENABLE_PSR2 (1 << 6) /* eDP 1.4a */
#define DP_ADAPTER_CTRL 0x1a0
# define DP_ADAPTER_CTRL_FORCE_LOAD_SENSE (1 << 0)
@@ -794,6 +801,15 @@
# define DP_LAST_ACTUAL_SYNCHRONIZATION_LATENCY_MASK (0xf << 4)
# define DP_LAST_ACTUAL_SYNCHRONIZATION_LATENCY_SHIFT 4
+#define DP_LAST_RECEIVED_PSR_SDP 0x200a /* eDP 1.2 */
+# define DP_PSR_STATE_BIT (1 << 0) /* eDP 1.2 */
+# define DP_UPDATE_RFB_BIT (1 << 1) /* eDP 1.2 */
+# define DP_CRC_VALID_BIT (1 << 2) /* eDP 1.2 */
+# define DP_SU_VALID (1 << 3) /* eDP 1.4 */
+# define DP_FIRST_SCAN_LINE_SU_REGION (1 << 4) /* eDP 1.4 */
+# define DP_LAST_SCAN_LINE_SU_REGION (1 << 5) /* eDP 1.4 */
+# define DP_Y_COORDINATE_VALID (1 << 6) /* eDP 1.4a */
+
#define DP_RECEIVER_ALPM_STATUS 0x200b /* eDP 1.4 */
# define DP_ALPM_LOCK_TIMEOUT_ERROR (1 << 0)
@@ -967,18 +983,18 @@ int drm_dp_bw_code_to_link_rate(u8 link_bw);
#define DP_SDP_VSC_EXT_CEA 0x21 /* DP 1.4 */
/* 0x80+ CEA-861 infoframe types */
-struct edp_sdp_header {
+struct dp_sdp_header {
u8 HB0; /* Secondary Data Packet ID */
u8 HB1; /* Secondary Data Packet Type */
- u8 HB2; /* 7:5 reserved, 4:0 revision number */
- u8 HB3; /* 7:5 reserved, 4:0 number of valid data bytes */
+ u8 HB2; /* Secondary Data Packet Specific header, Byte 0 */
+ u8 HB3; /* Secondary Data packet Specific header, Byte 1 */
} __packed;
#define EDP_SDP_HEADER_REVISION_MASK 0x1F
#define EDP_SDP_HEADER_VALID_PAYLOAD_BYTES 0x1F
struct edp_vsc_psr {
- struct edp_sdp_header sdp_header;
+ struct dp_sdp_header sdp_header;
u8 DB0; /* Stereo Interface */
u8 DB1; /* 0 - PSR State; 1 - Update RFB; 2 - CRC Valid */
u8 DB2; /* CRC value bits 7:0 of the R or Cr component */
diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h
index d23dcdd1bd95..7e545f5f94d3 100644
--- a/include/drm/drm_drv.h
+++ b/include/drm/drm_drv.h
@@ -624,6 +624,8 @@ void drm_dev_get(struct drm_device *dev);
void drm_dev_put(struct drm_device *dev);
void drm_dev_unref(struct drm_device *dev);
void drm_put_dev(struct drm_device *dev);
+bool drm_dev_enter(struct drm_device *dev, int *idx);
+void drm_dev_exit(int idx);
void drm_dev_unplug(struct drm_device *dev);
/**
@@ -635,11 +637,16 @@ void drm_dev_unplug(struct drm_device *dev);
* unplugged, these two functions guarantee that any store before calling
* drm_dev_unplug() is visible to callers of this function after it completes
*/
-static inline int drm_dev_is_unplugged(struct drm_device *dev)
+static inline bool drm_dev_is_unplugged(struct drm_device *dev)
{
- int ret = atomic_read(&dev->unplugged);
- smp_rmb();
- return ret;
+ int idx;
+
+ if (drm_dev_enter(dev, &idx)) {
+ drm_dev_exit(idx);
+ return false;
+ }
+
+ return true;
}
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
index 8d89a9c3748d..b25d12ef120a 100644
--- a/include/drm/drm_edid.h
+++ b/include/drm/drm_edid.h
@@ -465,8 +465,6 @@ struct edid *drm_get_edid(struct drm_connector *connector,
struct edid *drm_get_edid_switcheroo(struct drm_connector *connector,
struct i2c_adapter *adapter);
struct edid *drm_edid_duplicate(const struct edid *edid);
-void drm_reset_display_info(struct drm_connector *connector);
-u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edid);
int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid);
u8 drm_match_cea_mode(const struct drm_display_mode *to_match);
diff --git a/include/drm/drm_file.h b/include/drm/drm_file.h
index 5176c3797680..027ac16da3d1 100644
--- a/include/drm/drm_file.h
+++ b/include/drm/drm_file.h
@@ -47,6 +47,9 @@ struct device;
* header include loops we need it here for now.
*/
+/* Note that the order of this enum is ABI (it determines
+ * /dev/dri/renderD* numbers).
+ */
enum drm_minor_type {
DRM_MINOR_PRIMARY,
DRM_MINOR_CONTROL,
@@ -182,6 +185,14 @@ struct drm_file {
unsigned atomic:1;
/**
+ * @aspect_ratio_allowed:
+ *
+ * True, if client can handle picture aspect ratios, and has requested
+ * to pass this information along with the mode.
+ */
+ unsigned aspect_ratio_allowed:1;
+
+ /**
* @is_master:
*
* This client is the creator of @master. Protected by struct
@@ -348,18 +359,6 @@ static inline bool drm_is_render_client(const struct drm_file *file_priv)
return file_priv->minor->type == DRM_MINOR_RENDER;
}
-/**
- * drm_is_control_client - is this an open file of the control node
- * @file_priv: DRM file
- *
- * Control nodes are deprecated and in the process of getting removed from the
- * DRM userspace API. Do not ever use!
- */
-static inline bool drm_is_control_client(const struct drm_file *file_priv)
-{
- return file_priv->minor->type == DRM_MINOR_CONTROL;
-}
-
int drm_open(struct inode *inode, struct file *filp);
ssize_t drm_read(struct file *filp, char __user *buffer,
size_t count, loff_t *offset);
diff --git a/include/drm/drm_gem_framebuffer_helper.h b/include/drm/drm_gem_framebuffer_helper.h
index 5ca7cdc3f527..a38de7eb55b4 100644
--- a/include/drm/drm_gem_framebuffer_helper.h
+++ b/include/drm/drm_gem_framebuffer_helper.h
@@ -10,6 +10,7 @@ struct drm_gem_object;
struct drm_mode_fb_cmd2;
struct drm_plane;
struct drm_plane_state;
+struct drm_simple_display_pipe;
struct drm_gem_object *drm_gem_fb_get_obj(struct drm_framebuffer *fb,
unsigned int plane);
@@ -27,6 +28,8 @@ drm_gem_fb_create(struct drm_device *dev, struct drm_file *file,
int drm_gem_fb_prepare_fb(struct drm_plane *plane,
struct drm_plane_state *state);
+int drm_gem_fb_simple_display_pipe_prepare_fb(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *plane_state);
struct drm_framebuffer *
drm_gem_fbdev_fb_create(struct drm_device *dev,
diff --git a/include/drm/drm_hdcp.h b/include/drm/drm_hdcp.h
index 562fa7df2637..98e63d870139 100644
--- a/include/drm/drm_hdcp.h
+++ b/include/drm/drm_hdcp.h
@@ -19,7 +19,7 @@
#define DRM_HDCP_RI_LEN 2
#define DRM_HDCP_V_PRIME_PART_LEN 4
#define DRM_HDCP_V_PRIME_NUM_PARTS 5
-#define DRM_HDCP_NUM_DOWNSTREAM(x) (x & 0x3f)
+#define DRM_HDCP_NUM_DOWNSTREAM(x) (x & 0x7f)
#define DRM_HDCP_MAX_CASCADE_EXCEEDED(x) (x & BIT(3))
#define DRM_HDCP_MAX_DEVICE_EXCEEDED(x) (x & BIT(7))
diff --git a/include/drm/drm_ioctl.h b/include/drm/drm_ioctl.h
index add42809642a..fafb6f592c4b 100644
--- a/include/drm/drm_ioctl.h
+++ b/include/drm/drm_ioctl.h
@@ -109,13 +109,6 @@ enum drm_ioctl_flags {
*/
DRM_ROOT_ONLY = BIT(2),
/**
- * @DRM_CONTROL_ALLOW:
- *
- * Deprecated, do not use. Control nodes are in the process of getting
- * removed.
- */
- DRM_CONTROL_ALLOW = BIT(3),
- /**
* @DRM_UNLOCKED:
*
* Whether &drm_ioctl_desc.func should be called with the DRM BKL held
diff --git a/include/drm/drm_legacy.h b/include/drm/drm_legacy.h
index cf0e7d89bcdf..8fad66f88e4f 100644
--- a/include/drm/drm_legacy.h
+++ b/include/drm/drm_legacy.h
@@ -194,8 +194,8 @@ void drm_legacy_ioremap(struct drm_local_map *map, struct drm_device *dev);
void drm_legacy_ioremap_wc(struct drm_local_map *map, struct drm_device *dev);
void drm_legacy_ioremapfree(struct drm_local_map *map, struct drm_device *dev);
-static __inline__ struct drm_local_map *drm_legacy_findmap(struct drm_device *dev,
- unsigned int token)
+static inline struct drm_local_map *drm_legacy_findmap(struct drm_device *dev,
+ unsigned int token)
{
struct drm_map_list *_entry;
list_for_each_entry(_entry, &dev->maplist, head)
diff --git a/include/drm/drm_mode_config.h b/include/drm/drm_mode_config.h
index 7569f22ffef6..33b3a96d66d0 100644
--- a/include/drm/drm_mode_config.h
+++ b/include/drm/drm_mode_config.h
@@ -796,6 +796,14 @@ struct drm_mode_config {
bool allow_fb_modifiers;
/**
+ * @normalize_zpos:
+ *
+ * If true the drm core will call drm_atomic_normalize_zpos() as part of
+ * atomic mode checking from drm_atomic_helper_check()
+ */
+ bool normalize_zpos;
+
+ /**
* @modifiers_property: Plane property to list support modifier/format
* combination.
*/
diff --git a/include/drm/drm_modes.h b/include/drm/drm_modes.h
index 0d310beae6af..b159fe07fcf9 100644
--- a/include/drm/drm_modes.h
+++ b/include/drm/drm_modes.h
@@ -147,6 +147,12 @@ enum drm_mode_status {
#define DRM_MODE_FLAG_3D_MAX DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF
+#define DRM_MODE_MATCH_TIMINGS (1 << 0)
+#define DRM_MODE_MATCH_CLOCK (1 << 1)
+#define DRM_MODE_MATCH_FLAGS (1 << 2)
+#define DRM_MODE_MATCH_3D_FLAGS (1 << 3)
+#define DRM_MODE_MATCH_ASPECT_RATIO (1 << 4)
+
/**
* struct drm_display_mode - DRM kernel-internal display mode structure
* @hdisplay: horizontal display size
@@ -405,6 +411,19 @@ struct drm_display_mode {
* Field for setting the HDMI picture aspect ratio of a mode.
*/
enum hdmi_picture_aspect picture_aspect_ratio;
+
+ /**
+ * @export_head:
+ *
+ * struct list_head for modes to be exposed to the userspace.
+ * This is to maintain a list of exposed modes while preparing
+ * user-mode's list in drm_mode_getconnector ioctl. The purpose of this
+ * list_head only lies in the ioctl function, and is not expected to be
+ * used outside the function.
+ * Once used, the stale pointers are not reset, but left as it is, to
+ * avoid overhead of protecting it by mode_config.mutex.
+ */
+ struct list_head export_head;
};
/**
@@ -490,6 +509,9 @@ void drm_mode_copy(struct drm_display_mode *dst,
const struct drm_display_mode *src);
struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev,
const struct drm_display_mode *mode);
+bool drm_mode_match(const struct drm_display_mode *mode1,
+ const struct drm_display_mode *mode2,
+ unsigned int match_flags);
bool drm_mode_equal(const struct drm_display_mode *mode1,
const struct drm_display_mode *mode2);
bool drm_mode_equal_no_clocks(const struct drm_display_mode *mode1,
diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h
index 3e76ca805b0f..35e2a3a79fc5 100644
--- a/include/drm/drm_modeset_helper_vtables.h
+++ b/include/drm/drm_modeset_helper_vtables.h
@@ -1004,11 +1004,14 @@ struct drm_plane_helper_funcs {
* This function must not block for outstanding rendering, since it is
* called in the context of the atomic IOCTL even for async commits to
* be able to return any errors to userspace. Instead the recommended
- * way is to fill out the fence member of the passed-in
+ * way is to fill out the &drm_plane_state.fence of the passed-in
* &drm_plane_state. If the driver doesn't support native fences then
* equivalent functionality should be implemented through private
* members in the plane structure.
*
+ * Drivers which always have their buffers pinned should use
+ * drm_gem_fb_prepare_fb() for this hook.
+ *
* The helpers will call @cleanup_fb with matching arguments for every
* successful call to this hook.
*
diff --git a/include/drm/drm_plane.h b/include/drm/drm_plane.h
index f7bf4a48b1c3..26fa50c2a50e 100644
--- a/include/drm/drm_plane.h
+++ b/include/drm/drm_plane.h
@@ -43,6 +43,7 @@ struct drm_modeset_acquire_ctx;
* plane (in 16.16)
* @src_w: width of visible portion of plane (in 16.16)
* @src_h: height of visible portion of plane (in 16.16)
+ * @alpha: opacity of the plane
* @rotation: rotation of the plane
* @zpos: priority of the given plane on crtc (optional)
* Note that multiple active planes on the same crtc can have an identical
@@ -51,8 +52,8 @@ struct drm_modeset_acquire_ctx;
* plane with a lower ID.
* @normalized_zpos: normalized value of zpos: unique, range from 0 to N-1
* where N is the number of active planes for given crtc. Note that
- * the driver must call drm_atomic_normalize_zpos() to update this before
- * it can be trusted.
+ * the driver must set drm_mode_config.normalize_zpos or call
+ * drm_atomic_normalize_zpos() to update this before it can be trusted.
* @src: clipped source coordinates of the plane (in 16.16)
* @dst: clipped destination coordinates of the plane
* @state: backpointer to global drm_atomic_state
@@ -79,8 +80,15 @@ struct drm_plane_state {
/**
* @fence:
*
- * Optional fence to wait for before scanning out @fb. Do not write this
- * directly, use drm_atomic_set_fence_for_plane()
+ * Optional fence to wait for before scanning out @fb. The core atomic
+ * code will set this when userspace is using explicit fencing. Do not
+ * write this directly for a driver's implicit fence, use
+ * drm_atomic_set_fence_for_plane() to ensure that an explicit fence is
+ * preserved.
+ *
+ * Drivers should store any implicit fence in this from their
+ * &drm_plane_helper.prepare_fb callback. See drm_gem_fb_prepare_fb()
+ * and drm_gem_fb_simple_display_pipe_prepare_fb() for suitable helpers.
*/
struct dma_fence *fence;
@@ -106,6 +114,9 @@ struct drm_plane_state {
uint32_t src_x, src_y;
uint32_t src_h, src_w;
+ /* Plane opacity */
+ u16 alpha;
+
/* Plane rotation */
unsigned int rotation;
@@ -496,6 +507,7 @@ enum drm_plane_type {
* @funcs: helper functions
* @properties: property tracking for this plane
* @type: type of plane (overlay, primary, cursor)
+ * @alpha_property: alpha property for this plane
* @zpos_property: zpos property for this plane
* @rotation_property: rotation property for this plane
* @helper_private: mid-layer private data
@@ -571,6 +583,7 @@ struct drm_plane {
*/
struct drm_plane_state *state;
+ struct drm_property *alpha_property;
struct drm_property *zpos_property;
struct drm_property *rotation_property;
diff --git a/include/drm/drm_property.h b/include/drm/drm_property.h
index d1423c7f3c73..1d5c0b2a8956 100644
--- a/include/drm/drm_property.h
+++ b/include/drm/drm_property.h
@@ -260,7 +260,7 @@ struct drm_property *drm_property_create_object(struct drm_device *dev,
uint32_t type);
struct drm_property *drm_property_create_bool(struct drm_device *dev,
u32 flags, const char *name);
-int drm_property_add_enum(struct drm_property *property, int index,
+int drm_property_add_enum(struct drm_property *property,
uint64_t value, const char *name);
void drm_property_destroy(struct drm_device *dev, struct drm_property *property);
@@ -281,32 +281,6 @@ struct drm_property_blob *drm_property_blob_get(struct drm_property_blob *blob);
void drm_property_blob_put(struct drm_property_blob *blob);
/**
- * drm_property_reference_blob - acquire a blob property reference
- * @blob: DRM blob property
- *
- * This is a compatibility alias for drm_property_blob_get() and should not be
- * used by new code.
- */
-static inline struct drm_property_blob *
-drm_property_reference_blob(struct drm_property_blob *blob)
-{
- return drm_property_blob_get(blob);
-}
-
-/**
- * drm_property_unreference_blob - release a blob property reference
- * @blob: DRM blob property
- *
- * This is a compatibility alias for drm_property_blob_put() and should not be
- * used by new code.
- */
-static inline void
-drm_property_unreference_blob(struct drm_property_blob *blob)
-{
- drm_property_blob_put(blob);
-}
-
-/**
* drm_property_find - find property object
* @dev: DRM device
* @file_priv: drm file to check for lease against.
diff --git a/include/drm/drm_rect.h b/include/drm/drm_rect.h
index 44bc122b9ee0..6c54544a4be7 100644
--- a/include/drm/drm_rect.h
+++ b/include/drm/drm_rect.h
@@ -175,8 +175,7 @@ static inline bool drm_rect_equals(const struct drm_rect *r1,
bool drm_rect_intersect(struct drm_rect *r, const struct drm_rect *clip);
bool drm_rect_clip_scaled(struct drm_rect *src, struct drm_rect *dst,
- const struct drm_rect *clip,
- int hscale, int vscale);
+ const struct drm_rect *clip);
int drm_rect_calc_hscale(const struct drm_rect *src,
const struct drm_rect *dst,
int min_hscale, int max_hscale);
diff --git a/include/drm/drm_simple_kms_helper.h b/include/drm/drm_simple_kms_helper.h
index 1b4e352143fd..451960438a29 100644
--- a/include/drm/drm_simple_kms_helper.h
+++ b/include/drm/drm_simple_kms_helper.h
@@ -64,7 +64,8 @@ struct drm_simple_display_pipe_funcs {
* This hook is optional.
*/
void (*enable)(struct drm_simple_display_pipe *pipe,
- struct drm_crtc_state *crtc_state);
+ struct drm_crtc_state *crtc_state,
+ struct drm_plane_state *plane_state);
/**
* @disable:
*
@@ -115,6 +116,9 @@ struct drm_simple_display_pipe_funcs {
* Optional, called by &drm_plane_helper_funcs.prepare_fb. Please read
* the documentation for the &drm_plane_helper_funcs.prepare_fb hook for
* more details.
+ *
+ * Drivers which always have their buffers pinned should use
+ * drm_gem_fb_simple_display_pipe_prepare_fb() for this hook.
*/
int (*prepare_fb)(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *plane_state);
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index dfd54fb94e10..dec655894d08 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -43,10 +43,12 @@ enum drm_sched_priority {
};
/**
- * A scheduler entity is a wrapper around a job queue or a group
- * of other entities. Entities take turns emitting jobs from their
- * job queues to corresponding hardware ring based on scheduling
- * policy.
+ * drm_sched_entity - A wrapper around a job queue (typically attached
+ * to the DRM file_priv).
+ *
+ * Entities will emit jobs in order to their corresponding hardware
+ * ring, and the scheduler will alternate between entities based on
+ * scheduling policy.
*/
struct drm_sched_entity {
struct list_head list;
@@ -54,7 +56,6 @@ struct drm_sched_entity {
spinlock_t rq_lock;
struct drm_gpu_scheduler *sched;
- spinlock_t queue_lock;
struct spsc_queue job_queue;
atomic_t fence_seq;
@@ -63,6 +64,8 @@ struct drm_sched_entity {
struct dma_fence *dependency;
struct dma_fence_cb cb;
atomic_t *guilty; /* points to ctx's guilty */
+ int fini_status;
+ struct dma_fence *last_scheduled;
};
/**
@@ -78,7 +81,18 @@ struct drm_sched_rq {
struct drm_sched_fence {
struct dma_fence scheduled;
+
+ /* This fence is what will be signaled by the scheduler when
+ * the job is completed.
+ *
+ * When setting up an out fence for the job, you should use
+ * this, since it's available immediately upon
+ * drm_sched_job_init(), and the fence returned by the driver
+ * from run_job() won't be created until the dependencies have
+ * resolved.
+ */
struct dma_fence finished;
+
struct dma_fence_cb cb;
struct dma_fence *parent;
struct drm_gpu_scheduler *sched;
@@ -88,6 +102,13 @@ struct drm_sched_fence {
struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f);
+/**
+ * drm_sched_job - A job to be run by an entity.
+ *
+ * A job is created by the driver using drm_sched_job_init(), and
+ * should call drm_sched_entity_push_job() once it wants the scheduler
+ * to schedule the job.
+ */
struct drm_sched_job {
struct spsc_node queue_node;
struct drm_gpu_scheduler *sched;
@@ -99,6 +120,7 @@ struct drm_sched_job {
uint64_t id;
atomic_t karma;
enum drm_sched_priority s_priority;
+ struct drm_sched_entity *entity;
};
static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job,
@@ -112,10 +134,28 @@ static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job,
* these functions should be implemented in driver side
*/
struct drm_sched_backend_ops {
+ /* Called when the scheduler is considering scheduling this
+ * job next, to get another struct dma_fence for this job to
+ * block on. Once it returns NULL, run_job() may be called.
+ */
struct dma_fence *(*dependency)(struct drm_sched_job *sched_job,
struct drm_sched_entity *s_entity);
+
+ /* Called to execute the job once all of the dependencies have
+ * been resolved. This may be called multiple times, if
+ * timedout_job() has happened and drm_sched_job_recovery()
+ * decides to try it again.
+ */
struct dma_fence *(*run_job)(struct drm_sched_job *sched_job);
+
+ /* Called when a job has taken too long to execute, to trigger
+ * GPU recovery.
+ */
void (*timedout_job)(struct drm_sched_job *sched_job);
+
+ /* Called once the job's finished fence has been signaled and
+ * it's time to clean it up.
+ */
void (*free_job)(struct drm_sched_job *sched_job);
};
@@ -147,7 +187,11 @@ void drm_sched_fini(struct drm_gpu_scheduler *sched);
int drm_sched_entity_init(struct drm_gpu_scheduler *sched,
struct drm_sched_entity *entity,
struct drm_sched_rq *rq,
- uint32_t jobs, atomic_t *guilty);
+ atomic_t *guilty);
+void drm_sched_entity_do_release(struct drm_gpu_scheduler *sched,
+ struct drm_sched_entity *entity);
+void drm_sched_entity_cleanup(struct drm_gpu_scheduler *sched,
+ struct drm_sched_entity *entity);
void drm_sched_entity_fini(struct drm_gpu_scheduler *sched,
struct drm_sched_entity *entity);
void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
diff --git a/include/drm/gpu_scheduler_trace.h b/include/drm/gpu_scheduler_trace.h
deleted file mode 100644
index 0789e8d0a0e1..000000000000
--- a/include/drm/gpu_scheduler_trace.h
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- * Copyright 2017 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#if !defined(_GPU_SCHED_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
-#define _GPU_SCHED_TRACE_H_
-
-#include <linux/stringify.h>
-#include <linux/types.h>
-#include <linux/tracepoint.h>
-
-#include <drm/drmP.h>
-
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM gpu_scheduler
-#define TRACE_INCLUDE_FILE gpu_scheduler_trace
-
-TRACE_EVENT(drm_sched_job,
- TP_PROTO(struct drm_sched_job *sched_job, struct drm_sched_entity *entity),
- TP_ARGS(sched_job, entity),
- TP_STRUCT__entry(
- __field(struct drm_sched_entity *, entity)
- __field(struct dma_fence *, fence)
- __field(const char *, name)
- __field(uint64_t, id)
- __field(u32, job_count)
- __field(int, hw_job_count)
- ),
-
- TP_fast_assign(
- __entry->entity = entity;
- __entry->id = sched_job->id;
- __entry->fence = &sched_job->s_fence->finished;
- __entry->name = sched_job->sched->name;
- __entry->job_count = spsc_queue_count(&entity->job_queue);
- __entry->hw_job_count = atomic_read(
- &sched_job->sched->hw_rq_count);
- ),
- TP_printk("entity=%p, id=%llu, fence=%p, ring=%s, job count:%u, hw job count:%d",
- __entry->entity, __entry->id,
- __entry->fence, __entry->name,
- __entry->job_count, __entry->hw_job_count)
-);
-
-TRACE_EVENT(drm_sched_process_job,
- TP_PROTO(struct drm_sched_fence *fence),
- TP_ARGS(fence),
- TP_STRUCT__entry(
- __field(struct dma_fence *, fence)
- ),
-
- TP_fast_assign(
- __entry->fence = &fence->finished;
- ),
- TP_printk("fence=%p signaled", __entry->fence)
-);
-
-#endif
-
-/* This part must be outside protection */
-#undef TRACE_INCLUDE_PATH
-#define TRACE_INCLUDE_PATH .
-#include <trace/define_trace.h>
diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
index 70f0c2535b87..bab70ff6e78b 100644
--- a/include/drm/i915_pciids.h
+++ b/include/drm/i915_pciids.h
@@ -349,6 +349,7 @@
#define INTEL_KBL_GT2_IDS(info) \
INTEL_VGA_DEVICE(0x5916, info), /* ULT GT2 */ \
INTEL_VGA_DEVICE(0x5917, info), /* Mobile GT2 */ \
+ INTEL_VGA_DEVICE(0x591C, info), /* ULX GT2 */ \
INTEL_VGA_DEVICE(0x5921, info), /* ULT GT2F */ \
INTEL_VGA_DEVICE(0x591E, info), /* ULX GT2 */ \
INTEL_VGA_DEVICE(0x5912, info), /* DT GT2 */ \
diff --git a/include/drm/tinydrm/mipi-dbi.h b/include/drm/tinydrm/mipi-dbi.h
index 44e824af2ef6..b8ba58861986 100644
--- a/include/drm/tinydrm/mipi-dbi.h
+++ b/include/drm/tinydrm/mipi-dbi.h
@@ -67,7 +67,9 @@ int mipi_dbi_init(struct device *dev, struct mipi_dbi *mipi,
const struct drm_simple_display_pipe_funcs *pipe_funcs,
struct drm_driver *driver,
const struct drm_display_mode *mode, unsigned int rotation);
-void mipi_dbi_enable_flush(struct mipi_dbi *mipi);
+void mipi_dbi_enable_flush(struct mipi_dbi *mipi,
+ struct drm_crtc_state *crtc_state,
+ struct drm_plane_state *plan_state);
void mipi_dbi_pipe_disable(struct drm_simple_display_pipe *pipe);
void mipi_dbi_hw_reset(struct mipi_dbi *mipi);
bool mipi_dbi_display_is_on(struct mipi_dbi *mipi);
diff --git a/include/drm/tinydrm/tinydrm-helpers.h b/include/drm/tinydrm/tinydrm-helpers.h
index 0a4ddbc04c60..5b96f0b12c8c 100644
--- a/include/drm/tinydrm/tinydrm-helpers.h
+++ b/include/drm/tinydrm/tinydrm-helpers.h
@@ -36,6 +36,11 @@ static inline bool tinydrm_machine_little_endian(void)
bool tinydrm_merge_clips(struct drm_clip_rect *dst,
struct drm_clip_rect *src, unsigned int num_clips,
unsigned int flags, u32 max_width, u32 max_height);
+int tinydrm_fb_dirty(struct drm_framebuffer *fb,
+ struct drm_file *file_priv,
+ unsigned int flags, unsigned int color,
+ struct drm_clip_rect *clips,
+ unsigned int num_clips);
void tinydrm_memcpy(void *dst, void *vaddr, struct drm_framebuffer *fb,
struct drm_clip_rect *clip);
void tinydrm_swab16(u16 *dst, void *vaddr, struct drm_framebuffer *fb,
diff --git a/include/drm/tinydrm/tinydrm.h b/include/drm/tinydrm/tinydrm.h
index 07a9a11fe19d..56e4a916b5e8 100644
--- a/include/drm/tinydrm/tinydrm.h
+++ b/include/drm/tinydrm/tinydrm.h
@@ -26,6 +26,10 @@ struct tinydrm_device {
struct drm_simple_display_pipe pipe;
struct mutex dirty_lock;
const struct drm_framebuffer_funcs *fb_funcs;
+ int (*fb_dirty)(struct drm_framebuffer *framebuffer,
+ struct drm_file *file_priv, unsigned flags,
+ unsigned color, struct drm_clip_rect *clips,
+ unsigned num_clips);
};
static inline struct tinydrm_device *
@@ -41,7 +45,7 @@ pipe_to_tinydrm(struct drm_simple_display_pipe *pipe)
* the &drm_driver structure.
*/
#define TINYDRM_GEM_DRIVER_OPS \
- .gem_free_object = tinydrm_gem_cma_free_object, \
+ .gem_free_object_unlocked = tinydrm_gem_cma_free_object, \
.gem_print_info = drm_gem_cma_print_info, \
.gem_vm_ops = &drm_gem_cma_vm_ops, \
.prime_handle_to_fd = drm_gem_prime_handle_to_fd, \
@@ -91,8 +95,6 @@ void tinydrm_shutdown(struct tinydrm_device *tdev);
void tinydrm_display_pipe_update(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *old_state);
-int tinydrm_display_pipe_prepare_fb(struct drm_simple_display_pipe *pipe,
- struct drm_plane_state *plane_state);
int
tinydrm_display_pipe_init(struct tinydrm_device *tdev,
const struct drm_simple_display_pipe_funcs *funcs,
diff --git a/include/dt-bindings/clock/actions,s900-cmu.h b/include/dt-bindings/clock/actions,s900-cmu.h
new file mode 100644
index 000000000000..7c1251565f43
--- /dev/null
+++ b/include/dt-bindings/clock/actions,s900-cmu.h
@@ -0,0 +1,129 @@
+// SPDX-License-Identifier: GPL-2.0+
+//
+// Device Tree binding constants for Actions Semi S900 Clock Management Unit
+//
+// Copyright (c) 2014 Actions Semi Inc.
+// Copyright (c) 2018 Linaro Ltd.
+
+#ifndef __DT_BINDINGS_CLOCK_S900_CMU_H
+#define __DT_BINDINGS_CLOCK_S900_CMU_H
+
+#define CLK_NONE 0
+
+/* fixed rate clocks */
+#define CLK_LOSC 1
+#define CLK_HOSC 2
+
+/* pll clocks */
+#define CLK_CORE_PLL 3
+#define CLK_DEV_PLL 4
+#define CLK_DDR_PLL 5
+#define CLK_NAND_PLL 6
+#define CLK_DISPLAY_PLL 7
+#define CLK_DSI_PLL 8
+#define CLK_ASSIST_PLL 9
+#define CLK_AUDIO_PLL 10
+
+/* system clock */
+#define CLK_CPU 15
+#define CLK_DEV 16
+#define CLK_NOC 17
+#define CLK_NOC_MUX 18
+#define CLK_NOC_DIV 19
+#define CLK_AHB 20
+#define CLK_APB 21
+#define CLK_DMAC 22
+
+/* peripheral device clock */
+#define CLK_GPIO 23
+
+#define CLK_BISP 24
+#define CLK_CSI0 25
+#define CLK_CSI1 26
+
+#define CLK_DE0 27
+#define CLK_DE1 28
+#define CLK_DE2 29
+#define CLK_DE3 30
+#define CLK_DSI 32
+
+#define CLK_GPU 33
+#define CLK_GPU_CORE 34
+#define CLK_GPU_MEM 35
+#define CLK_GPU_SYS 36
+
+#define CLK_HDE 37
+#define CLK_I2C0 38
+#define CLK_I2C1 39
+#define CLK_I2C2 40
+#define CLK_I2C3 41
+#define CLK_I2C4 42
+#define CLK_I2C5 43
+#define CLK_I2SRX 44
+#define CLK_I2STX 45
+#define CLK_IMX 46
+#define CLK_LCD 47
+#define CLK_NAND0 48
+#define CLK_NAND1 49
+#define CLK_PWM0 50
+#define CLK_PWM1 51
+#define CLK_PWM2 52
+#define CLK_PWM3 53
+#define CLK_PWM4 54
+#define CLK_PWM5 55
+#define CLK_SD0 56
+#define CLK_SD1 57
+#define CLK_SD2 58
+#define CLK_SD3 59
+#define CLK_SENSOR 60
+#define CLK_SPEED_SENSOR 61
+#define CLK_SPI0 62
+#define CLK_SPI1 63
+#define CLK_SPI2 64
+#define CLK_SPI3 65
+#define CLK_THERMAL_SENSOR 66
+#define CLK_UART0 67
+#define CLK_UART1 68
+#define CLK_UART2 69
+#define CLK_UART3 70
+#define CLK_UART4 71
+#define CLK_UART5 72
+#define CLK_UART6 73
+#define CLK_VCE 74
+#define CLK_VDE 75
+
+#define CLK_USB3_480MPLL0 76
+#define CLK_USB3_480MPHY0 77
+#define CLK_USB3_5GPHY 78
+#define CLK_USB3_CCE 79
+#define CLK_USB3_MAC 80
+
+#define CLK_TIMER 83
+
+#define CLK_HDMI_AUDIO 84
+
+#define CLK_24M 85
+
+#define CLK_EDP 86
+
+#define CLK_24M_EDP 87
+#define CLK_EDP_PLL 88
+#define CLK_EDP_LINK 89
+
+#define CLK_USB2H0_PLLEN 90
+#define CLK_USB2H0_PHY 91
+#define CLK_USB2H0_CCE 92
+#define CLK_USB2H1_PLLEN 93
+#define CLK_USB2H1_PHY 94
+#define CLK_USB2H1_CCE 95
+
+#define CLK_DDR0 96
+#define CLK_DDR1 97
+#define CLK_DMM 98
+
+#define CLK_ETH_MAC 99
+#define CLK_RMII_REF 100
+
+#define CLK_NR_CLKS (CLK_RMII_REF + 1)
+
+#endif /* __DT_BINDINGS_CLOCK_S900_CMU_H */
diff --git a/include/dt-bindings/clock/aspeed-clock.h b/include/dt-bindings/clock/aspeed-clock.h
index d3558d897a4d..44761849fcbe 100644
--- a/include/dt-bindings/clock/aspeed-clock.h
+++ b/include/dt-bindings/clock/aspeed-clock.h
@@ -38,6 +38,7 @@
#define ASPEED_CLK_MAC 32
#define ASPEED_CLK_BCLK 33
#define ASPEED_CLK_MPLL 34
+#define ASPEED_CLK_24M 35
#define ASPEED_RESET_XDMA 0
#define ASPEED_RESET_MCTP 1
@@ -45,8 +46,9 @@
#define ASPEED_RESET_JTAG_MASTER 3
#define ASPEED_RESET_MIC 4
#define ASPEED_RESET_PWM 5
-#define ASPEED_RESET_PCIVGA 6
+#define ASPEED_RESET_PECI 6
#define ASPEED_RESET_I2C 7
#define ASPEED_RESET_AHB 8
+#define ASPEED_RESET_CRT1 9
#endif
diff --git a/include/dt-bindings/clock/axg-aoclkc.h b/include/dt-bindings/clock/axg-aoclkc.h
new file mode 100644
index 000000000000..61955016a55b
--- /dev/null
+++ b/include/dt-bindings/clock/axg-aoclkc.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/*
+ * Copyright (c) 2016 BayLibre, SAS
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ *
+ * Copyright (c) 2018 Amlogic, inc.
+ * Author: Qiufang Dai <qiufang.dai@amlogic.com>
+ */
+
+#ifndef DT_BINDINGS_CLOCK_AMLOGIC_MESON_AXG_AOCLK
+#define DT_BINDINGS_CLOCK_AMLOGIC_MESON_AXG_AOCLK
+
+#define CLKID_AO_REMOTE 0
+#define CLKID_AO_I2C_MASTER 1
+#define CLKID_AO_I2C_SLAVE 2
+#define CLKID_AO_UART1 3
+#define CLKID_AO_UART2 4
+#define CLKID_AO_IR_BLASTER 5
+#define CLKID_AO_SAR_ADC 6
+#define CLKID_AO_CLK81 7
+#define CLKID_AO_SAR_ADC_SEL 8
+#define CLKID_AO_SAR_ADC_DIV 9
+#define CLKID_AO_SAR_ADC_CLK 10
+#define CLKID_AO_ALT_XTAL 11
+
+#endif
diff --git a/include/dt-bindings/clock/bcm-sr.h b/include/dt-bindings/clock/bcm-sr.h
index cff6c6fe2947..419011ba1a94 100644
--- a/include/dt-bindings/clock/bcm-sr.h
+++ b/include/dt-bindings/clock/bcm-sr.h
@@ -35,7 +35,7 @@
/* GENPLL 0 clock channel ID SCR HSLS FS PCIE */
#define BCM_SR_GENPLL0 0
-#define BCM_SR_GENPLL0_SATA_CLK 1
+#define BCM_SR_GENPLL0_125M_CLK 1
#define BCM_SR_GENPLL0_SCR_CLK 2
#define BCM_SR_GENPLL0_250M_CLK 3
#define BCM_SR_GENPLL0_PCIE_AXI_CLK 4
@@ -50,9 +50,11 @@
/* GENPLL 2 clock channel ID NITRO MHB*/
#define BCM_SR_GENPLL2 0
#define BCM_SR_GENPLL2_NIC_CLK 1
-#define BCM_SR_GENPLL2_250_NITRO_CLK 2
+#define BCM_SR_GENPLL2_TS_500_CLK 2
#define BCM_SR_GENPLL2_125_NITRO_CLK 3
#define BCM_SR_GENPLL2_CHIMP_CLK 4
+#define BCM_SR_GENPLL2_NIC_FLASH_CLK 5
+#define BCM_SR_GENPLL2_FS4_CLK 6
/* GENPLL 3 HSLS clock channel ID */
#define BCM_SR_GENPLL3 0
@@ -62,11 +64,16 @@
/* GENPLL 4 SCR clock channel ID */
#define BCM_SR_GENPLL4 0
#define BCM_SR_GENPLL4_CCN_CLK 1
+#define BCM_SR_GENPLL4_TPIU_PLL_CLK 2
+#define BCM_SR_GENPLL4_NOC_CLK 3
+#define BCM_SR_GENPLL4_CHCLK_FS4_CLK 4
+#define BCM_SR_GENPLL4_BRIDGE_FSCPU_CLK 5
/* GENPLL 5 FS4 clock channel ID */
#define BCM_SR_GENPLL5 0
-#define BCM_SR_GENPLL5_FS_CLK 1
-#define BCM_SR_GENPLL5_SPU_CLK 2
+#define BCM_SR_GENPLL5_FS4_HF_CLK 1
+#define BCM_SR_GENPLL5_CRYPTO_AE_CLK 2
+#define BCM_SR_GENPLL5_RAID_AE_CLK 3
/* GENPLL 6 NITRO clock channel ID */
#define BCM_SR_GENPLL6 0
@@ -74,13 +81,16 @@
/* LCPLL0 clock channel ID */
#define BCM_SR_LCPLL0 0
-#define BCM_SR_LCPLL0_SATA_REF_CLK 1
-#define BCM_SR_LCPLL0_USB_REF_CLK 2
-#define BCM_SR_LCPLL0_SATA_REFPN_CLK 3
+#define BCM_SR_LCPLL0_SATA_REFP_CLK 1
+#define BCM_SR_LCPLL0_SATA_REFN_CLK 2
+#define BCM_SR_LCPLL0_SATA_350_CLK 3
+#define BCM_SR_LCPLL0_SATA_500_CLK 4
/* LCPLL1 clock channel ID */
#define BCM_SR_LCPLL1 0
#define BCM_SR_LCPLL1_WAN_CLK 1
+#define BCM_SR_LCPLL1_USB_REF_CLK 2
+#define BCM_SR_LCPLL1_CRMU_TS_CLK 3
/* LCPLL PCIE clock channel ID */
#define BCM_SR_LCPLL_PCIE 0
diff --git a/include/dt-bindings/clock/gxbb-clkc.h b/include/dt-bindings/clock/gxbb-clkc.h
index 8ba99a5e3fd3..7a892be90549 100644
--- a/include/dt-bindings/clock/gxbb-clkc.h
+++ b/include/dt-bindings/clock/gxbb-clkc.h
@@ -125,5 +125,7 @@
#define CLKID_VAPB_1 138
#define CLKID_VAPB_SEL 139
#define CLKID_VAPB 140
+#define CLKID_VDEC_1 153
+#define CLKID_VDEC_HEVC 156
#endif /* __GXBB_CLKC_H */
diff --git a/include/dt-bindings/clock/histb-clock.h b/include/dt-bindings/clock/histb-clock.h
index fab30b3f78b2..136de24733be 100644
--- a/include/dt-bindings/clock/histb-clock.h
+++ b/include/dt-bindings/clock/histb-clock.h
@@ -62,6 +62,14 @@
#define HISTB_USB2_PHY1_REF_CLK 40
#define HISTB_USB2_PHY2_REF_CLK 41
#define HISTB_COMBPHY0_CLK 42
+#define HISTB_USB3_BUS_CLK 43
+#define HISTB_USB3_UTMI_CLK 44
+#define HISTB_USB3_PIPE_CLK 45
+#define HISTB_USB3_SUSPEND_CLK 46
+#define HISTB_USB3_BUS_CLK1 47
+#define HISTB_USB3_UTMI_CLK1 48
+#define HISTB_USB3_PIPE_CLK1 49
+#define HISTB_USB3_SUSPEND_CLK1 50
/* clocks provided by mcu CRG */
#define HISTB_MCE_CLK 1
diff --git a/include/dt-bindings/clock/imx6qdl-clock.h b/include/dt-bindings/clock/imx6qdl-clock.h
index da59fd9cdb5e..7ad171b8f3bf 100644
--- a/include/dt-bindings/clock/imx6qdl-clock.h
+++ b/include/dt-bindings/clock/imx6qdl-clock.h
@@ -271,6 +271,8 @@
#define IMX6QDL_CLK_PRE_AXI 258
#define IMX6QDL_CLK_MLB_SEL 259
#define IMX6QDL_CLK_MLB_PODF 260
-#define IMX6QDL_CLK_END 261
+#define IMX6QDL_CLK_EPIT1 261
+#define IMX6QDL_CLK_EPIT2 262
+#define IMX6QDL_CLK_END 263
#endif /* __DT_BINDINGS_CLOCK_IMX6QDL_H */
diff --git a/include/dt-bindings/clock/imx6sx-clock.h b/include/dt-bindings/clock/imx6sx-clock.h
index 36f0324902a5..cd2d6c570e86 100644
--- a/include/dt-bindings/clock/imx6sx-clock.h
+++ b/include/dt-bindings/clock/imx6sx-clock.h
@@ -275,6 +275,10 @@
#define IMX6SX_PLL6_BYPASS 262
#define IMX6SX_PLL7_BYPASS 263
#define IMX6SX_CLK_SPDIF_GCLK 264
-#define IMX6SX_CLK_CLK_END 265
+#define IMX6SX_CLK_LVDS2_SEL 265
+#define IMX6SX_CLK_LVDS2_OUT 266
+#define IMX6SX_CLK_LVDS2_IN 267
+#define IMX6SX_CLK_ANACLK2 268
+#define IMX6SX_CLK_CLK_END 269
#endif /* __DT_BINDINGS_CLOCK_IMX6SX_H */
diff --git a/include/dt-bindings/clock/imx6ul-clock.h b/include/dt-bindings/clock/imx6ul-clock.h
index ee9f1a508d2f..9564597cbfac 100644
--- a/include/dt-bindings/clock/imx6ul-clock.h
+++ b/include/dt-bindings/clock/imx6ul-clock.h
@@ -235,20 +235,27 @@
#define IMX6UL_CLK_CSI_PODF 222
#define IMX6UL_CLK_PLL3_120M 223
#define IMX6UL_CLK_KPP 224
+#define IMX6UL_CLK_CKO1_SEL 225
+#define IMX6UL_CLK_CKO1_PODF 226
+#define IMX6UL_CLK_CKO1 227
+#define IMX6UL_CLK_CKO2_SEL 228
+#define IMX6UL_CLK_CKO2_PODF 229
+#define IMX6UL_CLK_CKO2 230
+#define IMX6UL_CLK_CKO 231
/* For i.MX6ULL */
-#define IMX6ULL_CLK_ESAI_PRED 225
-#define IMX6ULL_CLK_ESAI_PODF 226
-#define IMX6ULL_CLK_ESAI_EXTAL 227
-#define IMX6ULL_CLK_ESAI_MEM 228
-#define IMX6ULL_CLK_ESAI_IPG 229
-#define IMX6ULL_CLK_DCP_CLK 230
-#define IMX6ULL_CLK_EPDC_PRE_SEL 231
-#define IMX6ULL_CLK_EPDC_SEL 232
-#define IMX6ULL_CLK_EPDC_PODF 233
-#define IMX6ULL_CLK_EPDC_ACLK 234
-#define IMX6ULL_CLK_EPDC_PIX 235
-#define IMX6ULL_CLK_ESAI_SEL 236
-#define IMX6UL_CLK_END 237
+#define IMX6ULL_CLK_ESAI_PRED 232
+#define IMX6ULL_CLK_ESAI_PODF 233
+#define IMX6ULL_CLK_ESAI_EXTAL 234
+#define IMX6ULL_CLK_ESAI_MEM 235
+#define IMX6ULL_CLK_ESAI_IPG 236
+#define IMX6ULL_CLK_DCP_CLK 237
+#define IMX6ULL_CLK_EPDC_PRE_SEL 238
+#define IMX6ULL_CLK_EPDC_SEL 239
+#define IMX6ULL_CLK_EPDC_PODF 240
+#define IMX6ULL_CLK_EPDC_ACLK 241
+#define IMX6ULL_CLK_EPDC_PIX 242
+#define IMX6ULL_CLK_ESAI_SEL 243
+#define IMX6UL_CLK_END 244
#endif /* __DT_BINDINGS_CLOCK_IMX6UL_H */
diff --git a/include/dt-bindings/clock/imx7d-clock.h b/include/dt-bindings/clock/imx7d-clock.h
index b2325d3e236a..0d67f53bba93 100644
--- a/include/dt-bindings/clock/imx7d-clock.h
+++ b/include/dt-bindings/clock/imx7d-clock.h
@@ -168,7 +168,7 @@
#define IMX7D_SPDIF_ROOT_SRC 155
#define IMX7D_SPDIF_ROOT_CG 156
#define IMX7D_SPDIF_ROOT_DIV 157
-#define IMX7D_ENET1_REF_ROOT_CLK 158
+#define IMX7D_ENET1_IPG_ROOT_CLK 158
#define IMX7D_ENET1_REF_ROOT_SRC 159
#define IMX7D_ENET1_REF_ROOT_CG 160
#define IMX7D_ENET1_REF_ROOT_DIV 161
@@ -176,7 +176,7 @@
#define IMX7D_ENET1_TIME_ROOT_SRC 163
#define IMX7D_ENET1_TIME_ROOT_CG 164
#define IMX7D_ENET1_TIME_ROOT_DIV 165
-#define IMX7D_ENET2_REF_ROOT_CLK 166
+#define IMX7D_ENET2_IPG_ROOT_CLK 166
#define IMX7D_ENET2_REF_ROOT_SRC 167
#define IMX7D_ENET2_REF_ROOT_CG 168
#define IMX7D_ENET2_REF_ROOT_DIV 169
diff --git a/include/dt-bindings/clock/meson8b-clkc.h b/include/dt-bindings/clock/meson8b-clkc.h
index dea9d46d4fa7..a60f47b49231 100644
--- a/include/dt-bindings/clock/meson8b-clkc.h
+++ b/include/dt-bindings/clock/meson8b-clkc.h
@@ -102,5 +102,6 @@
#define CLKID_MPLL0 93
#define CLKID_MPLL1 94
#define CLKID_MPLL2 95
+#define CLKID_NAND_CLK 112
#endif /* __MESON8B_CLKC_H */
diff --git a/include/dt-bindings/clock/mt2701-clk.h b/include/dt-bindings/clock/mt2701-clk.h
index 24e93dfcee9f..9ac2f2b5710a 100644
--- a/include/dt-bindings/clock/mt2701-clk.h
+++ b/include/dt-bindings/clock/mt2701-clk.h
@@ -171,13 +171,12 @@
#define CLK_TOP_8BDAC 151
#define CLK_TOP_WBG_DIG_416M 152
#define CLK_TOP_DPI 153
-#define CLK_TOP_HDMITX_CLKDIG_CTS 154
-#define CLK_TOP_DSI0_LNTC_DSI 155
-#define CLK_TOP_AUD_EXT1 156
-#define CLK_TOP_AUD_EXT2 157
-#define CLK_TOP_NFI1X_PAD 158
-#define CLK_TOP_AXISEL_D4 159
-#define CLK_TOP_NR 160
+#define CLK_TOP_DSI0_LNTC_DSI 154
+#define CLK_TOP_AUD_EXT1 155
+#define CLK_TOP_AUD_EXT2 156
+#define CLK_TOP_NFI1X_PAD 157
+#define CLK_TOP_AXISEL_D4 158
+#define CLK_TOP_NR 159
/* APMIXEDSYS */
@@ -194,7 +193,8 @@
#define CLK_APMIXED_HADDS2PLL 11
#define CLK_APMIXED_AUD2PLL 12
#define CLK_APMIXED_TVD2PLL 13
-#define CLK_APMIXED_NR 14
+#define CLK_APMIXED_HDMI_REF 14
+#define CLK_APMIXED_NR 15
/* DDRPHY */
@@ -431,6 +431,10 @@
#define CLK_ETHSYS_CRYPTO 8
#define CLK_ETHSYS_NR 9
+/* G3DSYS */
+#define CLK_G3DSYS_CORE 1
+#define CLK_G3DSYS_NR 2
+
/* BDP */
#define CLK_BDP_BRG_BA 1
diff --git a/include/dt-bindings/clock/nuvoton,npcm7xx-clock.h b/include/dt-bindings/clock/nuvoton,npcm7xx-clock.h
new file mode 100644
index 000000000000..f21522605b94
--- /dev/null
+++ b/include/dt-bindings/clock/nuvoton,npcm7xx-clock.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Nuvoton NPCM7xx Clock Generator binding
+ * clock binding number for all clocks supportted by nuvoton,npcm7xx-clk
+ *
+ * Copyright (C) 2018 Nuvoton Technologies tali.perry@nuvoton.com
+ *
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_NPCM7XX_H
+#define __DT_BINDINGS_CLOCK_NPCM7XX_H
+
+
+#define NPCM7XX_CLK_CPU 0
+#define NPCM7XX_CLK_GFX_PIXEL 1
+#define NPCM7XX_CLK_MC 2
+#define NPCM7XX_CLK_ADC 3
+#define NPCM7XX_CLK_AHB 4
+#define NPCM7XX_CLK_TIMER 5
+#define NPCM7XX_CLK_UART 6
+#define NPCM7XX_CLK_MMC 7
+#define NPCM7XX_CLK_SPI3 8
+#define NPCM7XX_CLK_PCI 9
+#define NPCM7XX_CLK_AXI 10
+#define NPCM7XX_CLK_APB4 11
+#define NPCM7XX_CLK_APB3 12
+#define NPCM7XX_CLK_APB2 13
+#define NPCM7XX_CLK_APB1 14
+#define NPCM7XX_CLK_APB5 15
+#define NPCM7XX_CLK_CLKOUT 16
+#define NPCM7XX_CLK_GFX 17
+#define NPCM7XX_CLK_SU 18
+#define NPCM7XX_CLK_SU48 19
+#define NPCM7XX_CLK_SDHC 20
+#define NPCM7XX_CLK_SPI0 21
+#define NPCM7XX_CLK_SPIX 22
+
+#define NPCM7XX_CLK_REFCLK 23
+#define NPCM7XX_CLK_SYSBYPCK 24
+#define NPCM7XX_CLK_MCBYPCK 25
+
+#define NPCM7XX_NUM_CLOCKS (NPCM7XX_CLK_MCBYPCK+1)
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-msm8998.h b/include/dt-bindings/clock/qcom,gcc-msm8998.h
new file mode 100644
index 000000000000..58a242e656b1
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,gcc-msm8998.h
@@ -0,0 +1,208 @@
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DT_BINDINGS_CLK_MSM_GCC_COBALT_H
+#define _DT_BINDINGS_CLK_MSM_GCC_COBALT_H
+
+#define BLSP1_QUP1_I2C_APPS_CLK_SRC 0
+#define BLSP1_QUP1_SPI_APPS_CLK_SRC 1
+#define BLSP1_QUP2_I2C_APPS_CLK_SRC 2
+#define BLSP1_QUP2_SPI_APPS_CLK_SRC 3
+#define BLSP1_QUP3_I2C_APPS_CLK_SRC 4
+#define BLSP1_QUP3_SPI_APPS_CLK_SRC 5
+#define BLSP1_QUP4_I2C_APPS_CLK_SRC 6
+#define BLSP1_QUP4_SPI_APPS_CLK_SRC 7
+#define BLSP1_QUP5_I2C_APPS_CLK_SRC 8
+#define BLSP1_QUP5_SPI_APPS_CLK_SRC 9
+#define BLSP1_QUP6_I2C_APPS_CLK_SRC 10
+#define BLSP1_QUP6_SPI_APPS_CLK_SRC 11
+#define BLSP1_UART1_APPS_CLK_SRC 12
+#define BLSP1_UART2_APPS_CLK_SRC 13
+#define BLSP1_UART3_APPS_CLK_SRC 14
+#define BLSP2_QUP1_I2C_APPS_CLK_SRC 15
+#define BLSP2_QUP1_SPI_APPS_CLK_SRC 16
+#define BLSP2_QUP2_I2C_APPS_CLK_SRC 17
+#define BLSP2_QUP2_SPI_APPS_CLK_SRC 18
+#define BLSP2_QUP3_I2C_APPS_CLK_SRC 19
+#define BLSP2_QUP3_SPI_APPS_CLK_SRC 20
+#define BLSP2_QUP4_I2C_APPS_CLK_SRC 21
+#define BLSP2_QUP4_SPI_APPS_CLK_SRC 22
+#define BLSP2_QUP5_I2C_APPS_CLK_SRC 23
+#define BLSP2_QUP5_SPI_APPS_CLK_SRC 24
+#define BLSP2_QUP6_I2C_APPS_CLK_SRC 25
+#define BLSP2_QUP6_SPI_APPS_CLK_SRC 26
+#define BLSP2_UART1_APPS_CLK_SRC 27
+#define BLSP2_UART2_APPS_CLK_SRC 28
+#define BLSP2_UART3_APPS_CLK_SRC 29
+#define GCC_AGGRE1_NOC_XO_CLK 30
+#define GCC_AGGRE1_UFS_AXI_CLK 31
+#define GCC_AGGRE1_USB3_AXI_CLK 32
+#define GCC_APSS_QDSS_TSCTR_DIV2_CLK 33
+#define GCC_APSS_QDSS_TSCTR_DIV8_CLK 34
+#define GCC_BIMC_HMSS_AXI_CLK 35
+#define GCC_BIMC_MSS_Q6_AXI_CLK 36
+#define GCC_BLSP1_AHB_CLK 37
+#define GCC_BLSP1_QUP1_I2C_APPS_CLK 38
+#define GCC_BLSP1_QUP1_SPI_APPS_CLK 39
+#define GCC_BLSP1_QUP2_I2C_APPS_CLK 40
+#define GCC_BLSP1_QUP2_SPI_APPS_CLK 41
+#define GCC_BLSP1_QUP3_I2C_APPS_CLK 42
+#define GCC_BLSP1_QUP3_SPI_APPS_CLK 43
+#define GCC_BLSP1_QUP4_I2C_APPS_CLK 44
+#define GCC_BLSP1_QUP4_SPI_APPS_CLK 45
+#define GCC_BLSP1_QUP5_I2C_APPS_CLK 46
+#define GCC_BLSP1_QUP5_SPI_APPS_CLK 47
+#define GCC_BLSP1_QUP6_I2C_APPS_CLK 48
+#define GCC_BLSP1_QUP6_SPI_APPS_CLK 49
+#define GCC_BLSP1_SLEEP_CLK 50
+#define GCC_BLSP1_UART1_APPS_CLK 51
+#define GCC_BLSP1_UART2_APPS_CLK 52
+#define GCC_BLSP1_UART3_APPS_CLK 53
+#define GCC_BLSP2_AHB_CLK 54
+#define GCC_BLSP2_QUP1_I2C_APPS_CLK 55
+#define GCC_BLSP2_QUP1_SPI_APPS_CLK 56
+#define GCC_BLSP2_QUP2_I2C_APPS_CLK 57
+#define GCC_BLSP2_QUP2_SPI_APPS_CLK 58
+#define GCC_BLSP2_QUP3_I2C_APPS_CLK 59
+#define GCC_BLSP2_QUP3_SPI_APPS_CLK 60
+#define GCC_BLSP2_QUP4_I2C_APPS_CLK 61
+#define GCC_BLSP2_QUP4_SPI_APPS_CLK 62
+#define GCC_BLSP2_QUP5_I2C_APPS_CLK 63
+#define GCC_BLSP2_QUP5_SPI_APPS_CLK 64
+#define GCC_BLSP2_QUP6_I2C_APPS_CLK 65
+#define GCC_BLSP2_QUP6_SPI_APPS_CLK 66
+#define GCC_BLSP2_SLEEP_CLK 67
+#define GCC_BLSP2_UART1_APPS_CLK 68
+#define GCC_BLSP2_UART2_APPS_CLK 69
+#define GCC_BLSP2_UART3_APPS_CLK 70
+#define GCC_CFG_NOC_USB3_AXI_CLK 71
+#define GCC_GP1_CLK 72
+#define GCC_GP2_CLK 73
+#define GCC_GP3_CLK 74
+#define GCC_GPU_BIMC_GFX_CLK 75
+#define GCC_GPU_BIMC_GFX_SRC_CLK 76
+#define GCC_GPU_CFG_AHB_CLK 77
+#define GCC_GPU_SNOC_DVM_GFX_CLK 78
+#define GCC_HMSS_AHB_CLK 79
+#define GCC_HMSS_AT_CLK 80
+#define GCC_HMSS_DVM_BUS_CLK 81
+#define GCC_HMSS_RBCPR_CLK 82
+#define GCC_HMSS_TRIG_CLK 83
+#define GCC_LPASS_AT_CLK 84
+#define GCC_LPASS_TRIG_CLK 85
+#define GCC_MMSS_NOC_CFG_AHB_CLK 86
+#define GCC_MMSS_QM_AHB_CLK 87
+#define GCC_MMSS_QM_CORE_CLK 88
+#define GCC_MMSS_SYS_NOC_AXI_CLK 89
+#define GCC_MSS_AT_CLK 90
+#define GCC_PCIE_0_AUX_CLK 91
+#define GCC_PCIE_0_CFG_AHB_CLK 92
+#define GCC_PCIE_0_MSTR_AXI_CLK 93
+#define GCC_PCIE_0_PIPE_CLK 94
+#define GCC_PCIE_0_SLV_AXI_CLK 95
+#define GCC_PCIE_PHY_AUX_CLK 96
+#define GCC_PDM2_CLK 97
+#define GCC_PDM_AHB_CLK 98
+#define GCC_PDM_XO4_CLK 99
+#define GCC_PRNG_AHB_CLK 100
+#define GCC_SDCC2_AHB_CLK 101
+#define GCC_SDCC2_APPS_CLK 102
+#define GCC_SDCC4_AHB_CLK 103
+#define GCC_SDCC4_APPS_CLK 104
+#define GCC_TSIF_AHB_CLK 105
+#define GCC_TSIF_INACTIVITY_TIMERS_CLK 106
+#define GCC_TSIF_REF_CLK 107
+#define GCC_UFS_AHB_CLK 108
+#define GCC_UFS_AXI_CLK 109
+#define GCC_UFS_ICE_CORE_CLK 110
+#define GCC_UFS_PHY_AUX_CLK 111
+#define GCC_UFS_RX_SYMBOL_0_CLK 112
+#define GCC_UFS_RX_SYMBOL_1_CLK 113
+#define GCC_UFS_TX_SYMBOL_0_CLK 114
+#define GCC_UFS_UNIPRO_CORE_CLK 115
+#define GCC_USB30_MASTER_CLK 116
+#define GCC_USB30_MOCK_UTMI_CLK 117
+#define GCC_USB30_SLEEP_CLK 118
+#define GCC_USB3_PHY_AUX_CLK 119
+#define GCC_USB3_PHY_PIPE_CLK 120
+#define GCC_USB_PHY_CFG_AHB2PHY_CLK 121
+#define GP1_CLK_SRC 122
+#define GP2_CLK_SRC 123
+#define GP3_CLK_SRC 124
+#define GPLL0 125
+#define GPLL0_OUT_EVEN 126
+#define GPLL0_OUT_MAIN 127
+#define GPLL0_OUT_ODD 128
+#define GPLL0_OUT_TEST 129
+#define GPLL1 130
+#define GPLL1_OUT_EVEN 131
+#define GPLL1_OUT_MAIN 132
+#define GPLL1_OUT_ODD 133
+#define GPLL1_OUT_TEST 134
+#define GPLL2 135
+#define GPLL2_OUT_EVEN 136
+#define GPLL2_OUT_MAIN 137
+#define GPLL2_OUT_ODD 138
+#define GPLL2_OUT_TEST 139
+#define GPLL3 140
+#define GPLL3_OUT_EVEN 141
+#define GPLL3_OUT_MAIN 142
+#define GPLL3_OUT_ODD 143
+#define GPLL3_OUT_TEST 144
+#define GPLL4 145
+#define GPLL4_OUT_EVEN 146
+#define GPLL4_OUT_MAIN 147
+#define GPLL4_OUT_ODD 148
+#define GPLL4_OUT_TEST 149
+#define GPLL6 150
+#define GPLL6_OUT_EVEN 151
+#define GPLL6_OUT_MAIN 152
+#define GPLL6_OUT_ODD 153
+#define GPLL6_OUT_TEST 154
+#define HMSS_AHB_CLK_SRC 155
+#define HMSS_RBCPR_CLK_SRC 156
+#define PCIE_AUX_CLK_SRC 157
+#define PDM2_CLK_SRC 158
+#define SDCC2_APPS_CLK_SRC 159
+#define SDCC4_APPS_CLK_SRC 160
+#define TSIF_REF_CLK_SRC 161
+#define UFS_AXI_CLK_SRC 162
+#define USB30_MASTER_CLK_SRC 163
+#define USB30_MOCK_UTMI_CLK_SRC 164
+#define USB3_PHY_AUX_CLK_SRC 165
+
+#define PCIE_0_GDSC 0
+#define UFS_GDSC 1
+#define USB_30_GDSC 2
+
+#define GCC_BLSP1_QUP1_BCR 0
+#define GCC_BLSP1_QUP2_BCR 1
+#define GCC_BLSP1_QUP3_BCR 2
+#define GCC_BLSP1_QUP4_BCR 3
+#define GCC_BLSP1_QUP5_BCR 4
+#define GCC_BLSP1_QUP6_BCR 5
+#define GCC_BLSP2_QUP1_BCR 6
+#define GCC_BLSP2_QUP2_BCR 7
+#define GCC_BLSP2_QUP3_BCR 8
+#define GCC_BLSP2_QUP4_BCR 9
+#define GCC_BLSP2_QUP5_BCR 10
+#define GCC_BLSP2_QUP6_BCR 11
+#define GCC_PCIE_0_BCR 12
+#define GCC_PDM_BCR 13
+#define GCC_SDCC2_BCR 14
+#define GCC_SDCC4_BCR 15
+#define GCC_TSIF_BCR 16
+#define GCC_UFS_BCR 17
+#define GCC_USB_30_BCR 18
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-sdm845.h b/include/dt-bindings/clock/qcom,gcc-sdm845.h
new file mode 100644
index 000000000000..aca61264f12c
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,gcc-sdm845.h
@@ -0,0 +1,239 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_SDM_GCC_SDM845_H
+#define _DT_BINDINGS_CLK_SDM_GCC_SDM845_H
+
+/* GCC clock registers */
+#define GCC_AGGRE_NOC_PCIE_TBU_CLK 0
+#define GCC_AGGRE_UFS_CARD_AXI_CLK 1
+#define GCC_AGGRE_UFS_PHY_AXI_CLK 2
+#define GCC_AGGRE_USB3_PRIM_AXI_CLK 3
+#define GCC_AGGRE_USB3_SEC_AXI_CLK 4
+#define GCC_BOOT_ROM_AHB_CLK 5
+#define GCC_CAMERA_AHB_CLK 6
+#define GCC_CAMERA_AXI_CLK 7
+#define GCC_CAMERA_XO_CLK 8
+#define GCC_CE1_AHB_CLK 9
+#define GCC_CE1_AXI_CLK 10
+#define GCC_CE1_CLK 11
+#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK 12
+#define GCC_CFG_NOC_USB3_SEC_AXI_CLK 13
+#define GCC_CPUSS_AHB_CLK 14
+#define GCC_CPUSS_AHB_CLK_SRC 15
+#define GCC_CPUSS_RBCPR_CLK 16
+#define GCC_CPUSS_RBCPR_CLK_SRC 17
+#define GCC_DDRSS_GPU_AXI_CLK 18
+#define GCC_DISP_AHB_CLK 19
+#define GCC_DISP_AXI_CLK 20
+#define GCC_DISP_GPLL0_CLK_SRC 21
+#define GCC_DISP_GPLL0_DIV_CLK_SRC 22
+#define GCC_DISP_XO_CLK 23
+#define GCC_GP1_CLK 24
+#define GCC_GP1_CLK_SRC 25
+#define GCC_GP2_CLK 26
+#define GCC_GP2_CLK_SRC 27
+#define GCC_GP3_CLK 28
+#define GCC_GP3_CLK_SRC 29
+#define GCC_GPU_CFG_AHB_CLK 30
+#define GCC_GPU_GPLL0_CLK_SRC 31
+#define GCC_GPU_GPLL0_DIV_CLK_SRC 32
+#define GCC_GPU_MEMNOC_GFX_CLK 33
+#define GCC_GPU_SNOC_DVM_GFX_CLK 34
+#define GCC_MSS_AXIS2_CLK 35
+#define GCC_MSS_CFG_AHB_CLK 36
+#define GCC_MSS_GPLL0_DIV_CLK_SRC 37
+#define GCC_MSS_MFAB_AXIS_CLK 38
+#define GCC_MSS_Q6_MEMNOC_AXI_CLK 39
+#define GCC_MSS_SNOC_AXI_CLK 40
+#define GCC_PCIE_0_AUX_CLK 41
+#define GCC_PCIE_0_AUX_CLK_SRC 42
+#define GCC_PCIE_0_CFG_AHB_CLK 43
+#define GCC_PCIE_0_CLKREF_CLK 44
+#define GCC_PCIE_0_MSTR_AXI_CLK 45
+#define GCC_PCIE_0_PIPE_CLK 46
+#define GCC_PCIE_0_SLV_AXI_CLK 47
+#define GCC_PCIE_0_SLV_Q2A_AXI_CLK 48
+#define GCC_PCIE_1_AUX_CLK 49
+#define GCC_PCIE_1_AUX_CLK_SRC 50
+#define GCC_PCIE_1_CFG_AHB_CLK 51
+#define GCC_PCIE_1_CLKREF_CLK 52
+#define GCC_PCIE_1_MSTR_AXI_CLK 53
+#define GCC_PCIE_1_PIPE_CLK 54
+#define GCC_PCIE_1_SLV_AXI_CLK 55
+#define GCC_PCIE_1_SLV_Q2A_AXI_CLK 56
+#define GCC_PCIE_PHY_AUX_CLK 57
+#define GCC_PCIE_PHY_REFGEN_CLK 58
+#define GCC_PCIE_PHY_REFGEN_CLK_SRC 59
+#define GCC_PDM2_CLK 60
+#define GCC_PDM2_CLK_SRC 61
+#define GCC_PDM_AHB_CLK 62
+#define GCC_PDM_XO4_CLK 63
+#define GCC_PRNG_AHB_CLK 64
+#define GCC_QMIP_CAMERA_AHB_CLK 65
+#define GCC_QMIP_DISP_AHB_CLK 66
+#define GCC_QMIP_VIDEO_AHB_CLK 67
+#define GCC_QUPV3_WRAP0_S0_CLK 68
+#define GCC_QUPV3_WRAP0_S0_CLK_SRC 69
+#define GCC_QUPV3_WRAP0_S1_CLK 70
+#define GCC_QUPV3_WRAP0_S1_CLK_SRC 71
+#define GCC_QUPV3_WRAP0_S2_CLK 72
+#define GCC_QUPV3_WRAP0_S2_CLK_SRC 73
+#define GCC_QUPV3_WRAP0_S3_CLK 74
+#define GCC_QUPV3_WRAP0_S3_CLK_SRC 75
+#define GCC_QUPV3_WRAP0_S4_CLK 76
+#define GCC_QUPV3_WRAP0_S4_CLK_SRC 77
+#define GCC_QUPV3_WRAP0_S5_CLK 78
+#define GCC_QUPV3_WRAP0_S5_CLK_SRC 79
+#define GCC_QUPV3_WRAP0_S6_CLK 80
+#define GCC_QUPV3_WRAP0_S6_CLK_SRC 81
+#define GCC_QUPV3_WRAP0_S7_CLK 82
+#define GCC_QUPV3_WRAP0_S7_CLK_SRC 83
+#define GCC_QUPV3_WRAP1_S0_CLK 84
+#define GCC_QUPV3_WRAP1_S0_CLK_SRC 85
+#define GCC_QUPV3_WRAP1_S1_CLK 86
+#define GCC_QUPV3_WRAP1_S1_CLK_SRC 87
+#define GCC_QUPV3_WRAP1_S2_CLK 88
+#define GCC_QUPV3_WRAP1_S2_CLK_SRC 89
+#define GCC_QUPV3_WRAP1_S3_CLK 90
+#define GCC_QUPV3_WRAP1_S3_CLK_SRC 91
+#define GCC_QUPV3_WRAP1_S4_CLK 92
+#define GCC_QUPV3_WRAP1_S4_CLK_SRC 93
+#define GCC_QUPV3_WRAP1_S5_CLK 94
+#define GCC_QUPV3_WRAP1_S5_CLK_SRC 95
+#define GCC_QUPV3_WRAP1_S6_CLK 96
+#define GCC_QUPV3_WRAP1_S6_CLK_SRC 97
+#define GCC_QUPV3_WRAP1_S7_CLK 98
+#define GCC_QUPV3_WRAP1_S7_CLK_SRC 99
+#define GCC_QUPV3_WRAP_0_M_AHB_CLK 100
+#define GCC_QUPV3_WRAP_0_S_AHB_CLK 101
+#define GCC_QUPV3_WRAP_1_M_AHB_CLK 102
+#define GCC_QUPV3_WRAP_1_S_AHB_CLK 103
+#define GCC_SDCC2_AHB_CLK 104
+#define GCC_SDCC2_APPS_CLK 105
+#define GCC_SDCC2_APPS_CLK_SRC 106
+#define GCC_SDCC4_AHB_CLK 107
+#define GCC_SDCC4_APPS_CLK 108
+#define GCC_SDCC4_APPS_CLK_SRC 109
+#define GCC_SYS_NOC_CPUSS_AHB_CLK 110
+#define GCC_TSIF_AHB_CLK 111
+#define GCC_TSIF_INACTIVITY_TIMERS_CLK 112
+#define GCC_TSIF_REF_CLK 113
+#define GCC_TSIF_REF_CLK_SRC 114
+#define GCC_UFS_CARD_AHB_CLK 115
+#define GCC_UFS_CARD_AXI_CLK 116
+#define GCC_UFS_CARD_AXI_CLK_SRC 117
+#define GCC_UFS_CARD_CLKREF_CLK 118
+#define GCC_UFS_CARD_ICE_CORE_CLK 119
+#define GCC_UFS_CARD_ICE_CORE_CLK_SRC 120
+#define GCC_UFS_CARD_PHY_AUX_CLK 121
+#define GCC_UFS_CARD_PHY_AUX_CLK_SRC 122
+#define GCC_UFS_CARD_RX_SYMBOL_0_CLK 123
+#define GCC_UFS_CARD_RX_SYMBOL_1_CLK 124
+#define GCC_UFS_CARD_TX_SYMBOL_0_CLK 125
+#define GCC_UFS_CARD_UNIPRO_CORE_CLK 126
+#define GCC_UFS_CARD_UNIPRO_CORE_CLK_SRC 127
+#define GCC_UFS_MEM_CLKREF_CLK 128
+#define GCC_UFS_PHY_AHB_CLK 129
+#define GCC_UFS_PHY_AXI_CLK 130
+#define GCC_UFS_PHY_AXI_CLK_SRC 131
+#define GCC_UFS_PHY_ICE_CORE_CLK 132
+#define GCC_UFS_PHY_ICE_CORE_CLK_SRC 133
+#define GCC_UFS_PHY_PHY_AUX_CLK 134
+#define GCC_UFS_PHY_PHY_AUX_CLK_SRC 135
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK 136
+#define GCC_UFS_PHY_RX_SYMBOL_1_CLK 137
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK 138
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK 139
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC 140
+#define GCC_USB30_PRIM_MASTER_CLK 141
+#define GCC_USB30_PRIM_MASTER_CLK_SRC 142
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK 143
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 144
+#define GCC_USB30_PRIM_SLEEP_CLK 145
+#define GCC_USB30_SEC_MASTER_CLK 146
+#define GCC_USB30_SEC_MASTER_CLK_SRC 147
+#define GCC_USB30_SEC_MOCK_UTMI_CLK 148
+#define GCC_USB30_SEC_MOCK_UTMI_CLK_SRC 149
+#define GCC_USB30_SEC_SLEEP_CLK 150
+#define GCC_USB3_PRIM_CLKREF_CLK 151
+#define GCC_USB3_PRIM_PHY_AUX_CLK 152
+#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 153
+#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 154
+#define GCC_USB3_PRIM_PHY_PIPE_CLK 155
+#define GCC_USB3_SEC_CLKREF_CLK 156
+#define GCC_USB3_SEC_PHY_AUX_CLK 157
+#define GCC_USB3_SEC_PHY_AUX_CLK_SRC 158
+#define GCC_USB3_SEC_PHY_PIPE_CLK 159
+#define GCC_USB3_SEC_PHY_COM_AUX_CLK 160
+#define GCC_USB_PHY_CFG_AHB2PHY_CLK 161
+#define GCC_VIDEO_AHB_CLK 162
+#define GCC_VIDEO_AXI_CLK 163
+#define GCC_VIDEO_XO_CLK 164
+#define GPLL0 165
+#define GPLL0_OUT_EVEN 166
+#define GPLL0_OUT_MAIN 167
+#define GCC_GPU_IREF_CLK 168
+#define GCC_SDCC1_AHB_CLK 169
+#define GCC_SDCC1_APPS_CLK 170
+#define GCC_SDCC1_ICE_CORE_CLK 171
+#define GCC_SDCC1_APPS_CLK_SRC 172
+#define GCC_SDCC1_ICE_CORE_CLK_SRC 173
+#define GCC_APC_VS_CLK 174
+#define GCC_GPU_VS_CLK 175
+#define GCC_MSS_VS_CLK 176
+#define GCC_VDDA_VS_CLK 177
+#define GCC_VDDCX_VS_CLK 178
+#define GCC_VDDMX_VS_CLK 179
+#define GCC_VS_CTRL_AHB_CLK 180
+#define GCC_VS_CTRL_CLK 181
+#define GCC_VS_CTRL_CLK_SRC 182
+#define GCC_VSENSOR_CLK_SRC 183
+#define GPLL4 184
+
+/* GCC Resets */
+#define GCC_MMSS_BCR 0
+#define GCC_PCIE_0_BCR 1
+#define GCC_PCIE_1_BCR 2
+#define GCC_PCIE_PHY_BCR 3
+#define GCC_PDM_BCR 4
+#define GCC_PRNG_BCR 5
+#define GCC_QUPV3_WRAPPER_0_BCR 6
+#define GCC_QUPV3_WRAPPER_1_BCR 7
+#define GCC_QUSB2PHY_PRIM_BCR 8
+#define GCC_QUSB2PHY_SEC_BCR 9
+#define GCC_SDCC2_BCR 10
+#define GCC_SDCC4_BCR 11
+#define GCC_TSIF_BCR 12
+#define GCC_UFS_CARD_BCR 13
+#define GCC_UFS_PHY_BCR 14
+#define GCC_USB30_PRIM_BCR 15
+#define GCC_USB30_SEC_BCR 16
+#define GCC_USB3_PHY_PRIM_BCR 17
+#define GCC_USB3PHY_PHY_PRIM_BCR 18
+#define GCC_USB3_DP_PHY_PRIM_BCR 19
+#define GCC_USB3_PHY_SEC_BCR 20
+#define GCC_USB3PHY_PHY_SEC_BCR 21
+#define GCC_USB3_DP_PHY_SEC_BCR 22
+#define GCC_USB_PHY_CFG_AHB2PHY_BCR 23
+#define GCC_PCIE_0_PHY_BCR 24
+#define GCC_PCIE_1_PHY_BCR 25
+
+/* GCC GDSCRs */
+#define PCIE_0_GDSC 0
+#define PCIE_1_GDSC 1
+#define UFS_CARD_GDSC 2
+#define UFS_PHY_GDSC 3
+#define USB30_PRIM_GDSC 4
+#define USB30_SEC_GDSC 5
+#define HLOS1_VOTE_AGGRE_NOC_MMU_AUDIO_TBU_GDSC 6
+#define HLOS1_VOTE_AGGRE_NOC_MMU_PCIE_TBU_GDSC 7
+#define HLOS1_VOTE_AGGRE_NOC_MMU_TBU1_GDSC 8
+#define HLOS1_VOTE_AGGRE_NOC_MMU_TBU2_GDSC 9
+#define HLOS1_VOTE_MMNOC_MMU_TBU_HF0_GDSC 10
+#define HLOS1_VOTE_MMNOC_MMU_TBU_HF1_GDSC 11
+#define HLOS1_VOTE_MMNOC_MMU_TBU_SF_GDSC 12
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,rpmh.h b/include/dt-bindings/clock/qcom,rpmh.h
new file mode 100644
index 000000000000..f48fbd6f2095
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,rpmh.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved. */
+
+
+#ifndef _DT_BINDINGS_CLK_MSM_RPMH_H
+#define _DT_BINDINGS_CLK_MSM_RPMH_H
+
+/* RPMh controlled clocks */
+#define RPMH_CXO_CLK 0
+#define RPMH_CXO_CLK_A 1
+#define RPMH_LN_BB_CLK2 2
+#define RPMH_LN_BB_CLK2_A 3
+#define RPMH_LN_BB_CLK3 4
+#define RPMH_LN_BB_CLK3_A 5
+#define RPMH_RF_CLK1 6
+#define RPMH_RF_CLK1_A 7
+#define RPMH_RF_CLK2 8
+#define RPMH_RF_CLK2_A 9
+#define RPMH_RF_CLK3 10
+#define RPMH_RF_CLK3_A 11
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,videocc-sdm845.h b/include/dt-bindings/clock/qcom,videocc-sdm845.h
new file mode 100644
index 000000000000..1b868165e8ce
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,videocc-sdm845.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_SDM_VIDEO_CC_SDM845_H
+#define _DT_BINDINGS_CLK_SDM_VIDEO_CC_SDM845_H
+
+/* VIDEO_CC clock registers */
+#define VIDEO_CC_APB_CLK 0
+#define VIDEO_CC_AT_CLK 1
+#define VIDEO_CC_QDSS_TRIG_CLK 2
+#define VIDEO_CC_QDSS_TSCTR_DIV8_CLK 3
+#define VIDEO_CC_VCODEC0_AXI_CLK 4
+#define VIDEO_CC_VCODEC0_CORE_CLK 5
+#define VIDEO_CC_VCODEC1_AXI_CLK 6
+#define VIDEO_CC_VCODEC1_CORE_CLK 7
+#define VIDEO_CC_VENUS_AHB_CLK 8
+#define VIDEO_CC_VENUS_CLK_SRC 9
+#define VIDEO_CC_VENUS_CTL_AXI_CLK 10
+#define VIDEO_CC_VENUS_CTL_CORE_CLK 11
+#define VIDEO_PLL0 12
+
+/* VIDEO_CC Resets */
+#define VIDEO_CC_VENUS_BCR 0
+#define VIDEO_CC_VCODEC0_BCR 1
+#define VIDEO_CC_VCODEC1_BCR 2
+#define VIDEO_CC_INTERFACE_BCR 3
+
+/* VIDEO_CC GDSCRs */
+#define VENUS_GDSC 0
+#define VCODEC0_GDSC 1
+#define VCODEC1_GDSC 2
+
+#endif
diff --git a/include/dt-bindings/clock/r8a77470-cpg-mssr.h b/include/dt-bindings/clock/r8a77470-cpg-mssr.h
new file mode 100644
index 000000000000..34cba49d0f84
--- /dev/null
+++ b/include/dt-bindings/clock/r8a77470-cpg-mssr.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2018 Renesas Electronics Corp.
+ */
+#ifndef __DT_BINDINGS_CLOCK_R8A77470_CPG_MSSR_H__
+#define __DT_BINDINGS_CLOCK_R8A77470_CPG_MSSR_H__
+
+#include <dt-bindings/clock/renesas-cpg-mssr.h>
+
+/* r8a77470 CPG Core Clocks */
+#define R8A77470_CLK_Z2 0
+#define R8A77470_CLK_ZTR 1
+#define R8A77470_CLK_ZTRD2 2
+#define R8A77470_CLK_ZT 3
+#define R8A77470_CLK_ZX 4
+#define R8A77470_CLK_ZS 5
+#define R8A77470_CLK_HP 6
+#define R8A77470_CLK_B 7
+#define R8A77470_CLK_LB 8
+#define R8A77470_CLK_P 9
+#define R8A77470_CLK_CL 10
+#define R8A77470_CLK_CP 11
+#define R8A77470_CLK_M2 12
+#define R8A77470_CLK_ZB3 13
+#define R8A77470_CLK_SDH 14
+#define R8A77470_CLK_SD0 15
+#define R8A77470_CLK_SD1 16
+#define R8A77470_CLK_SD2 17
+#define R8A77470_CLK_MP 18
+#define R8A77470_CLK_QSPI 19
+#define R8A77470_CLK_CPEX 20
+#define R8A77470_CLK_RCAN 21
+#define R8A77470_CLK_R 22
+#define R8A77470_CLK_OSC 23
+
+#endif /* __DT_BINDINGS_CLOCK_R8A77470_CPG_MSSR_H__ */
diff --git a/include/dt-bindings/clock/r8a77990-cpg-mssr.h b/include/dt-bindings/clock/r8a77990-cpg-mssr.h
new file mode 100644
index 000000000000..a596a482f3a9
--- /dev/null
+++ b/include/dt-bindings/clock/r8a77990-cpg-mssr.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Renesas Electronics Corp.
+ */
+#ifndef __DT_BINDINGS_CLOCK_R8A77990_CPG_MSSR_H__
+#define __DT_BINDINGS_CLOCK_R8A77990_CPG_MSSR_H__
+
+#include <dt-bindings/clock/renesas-cpg-mssr.h>
+
+/* r8a77990 CPG Core Clocks */
+#define R8A77990_CLK_Z2 0
+#define R8A77990_CLK_ZR 1
+#define R8A77990_CLK_ZG 2
+#define R8A77990_CLK_ZTR 3
+#define R8A77990_CLK_ZT 4
+#define R8A77990_CLK_ZX 5
+#define R8A77990_CLK_S0D1 6
+#define R8A77990_CLK_S0D3 7
+#define R8A77990_CLK_S0D6 8
+#define R8A77990_CLK_S0D12 9
+#define R8A77990_CLK_S0D24 10
+#define R8A77990_CLK_S1D1 11
+#define R8A77990_CLK_S1D2 12
+#define R8A77990_CLK_S1D4 13
+#define R8A77990_CLK_S2D1 14
+#define R8A77990_CLK_S2D2 15
+#define R8A77990_CLK_S2D4 16
+#define R8A77990_CLK_S3D1 17
+#define R8A77990_CLK_S3D2 18
+#define R8A77990_CLK_S3D4 19
+#define R8A77990_CLK_S0D6C 20
+#define R8A77990_CLK_S3D1C 21
+#define R8A77990_CLK_S3D2C 22
+#define R8A77990_CLK_S3D4C 23
+#define R8A77990_CLK_LB 24
+#define R8A77990_CLK_CL 25
+#define R8A77990_CLK_ZB3 26
+#define R8A77990_CLK_ZB3D2 27
+#define R8A77990_CLK_CR 28
+#define R8A77990_CLK_CRD2 29
+#define R8A77990_CLK_SD0H 30
+#define R8A77990_CLK_SD0 31
+#define R8A77990_CLK_SD1H 32
+#define R8A77990_CLK_SD1 33
+#define R8A77990_CLK_SD3H 34
+#define R8A77990_CLK_SD3 35
+#define R8A77990_CLK_RPC 36
+#define R8A77990_CLK_RPCD2 37
+#define R8A77990_CLK_ZA2 38
+#define R8A77990_CLK_ZA8 39
+#define R8A77990_CLK_Z2D 40
+#define R8A77990_CLK_CANFD 41
+#define R8A77990_CLK_MSO 42
+#define R8A77990_CLK_R 43
+#define R8A77990_CLK_OSC 44
+#define R8A77990_CLK_LV0 45
+#define R8A77990_CLK_LV1 46
+#define R8A77990_CLK_CSI0 47
+#define R8A77990_CLK_CP 48
+#define R8A77990_CLK_CPEX 49
+
+#endif /* __DT_BINDINGS_CLOCK_R8A77990_CPG_MSSR_H__ */
diff --git a/include/dt-bindings/clock/stm32mp1-clks.h b/include/dt-bindings/clock/stm32mp1-clks.h
index 86e3ec662ef4..90ec780bfc68 100644
--- a/include/dt-bindings/clock/stm32mp1-clks.h
+++ b/include/dt-bindings/clock/stm32mp1-clks.h
@@ -76,7 +76,7 @@
#define I2C6 63
#define USART1 64
#define RTCAPB 65
-#define TZC 66
+#define TZC1 66
#define TZPC 67
#define IWDG1 68
#define BSEC 69
@@ -123,6 +123,7 @@
#define CRC1 110
#define USBH 111
#define ETHSTP 112
+#define TZC2 113
/* Kernel clocks */
#define SDMMC1_K 118
@@ -228,7 +229,6 @@
#define CK_MCO2 212
/* TRACE & DEBUG clocks */
-#define DBG 213
#define CK_DBG 214
#define CK_TRACE 215
diff --git a/include/dt-bindings/clock/sun50i-h6-r-ccu.h b/include/dt-bindings/clock/sun50i-h6-r-ccu.h
new file mode 100644
index 000000000000..76136132a13e
--- /dev/null
+++ b/include/dt-bindings/clock/sun50i-h6-r-ccu.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017 Icenowy Zheng <icenowy@aosc.xyz>
+ */
+
+#ifndef _DT_BINDINGS_CLK_SUN50I_H6_R_CCU_H_
+#define _DT_BINDINGS_CLK_SUN50I_H6_R_CCU_H_
+
+#define CLK_AR100 0
+
+#define CLK_R_APB1 2
+
+#define CLK_R_APB1_TIMER 4
+#define CLK_R_APB1_TWD 5
+#define CLK_R_APB1_PWM 6
+#define CLK_R_APB2_UART 7
+#define CLK_R_APB2_I2C 8
+#define CLK_R_APB1_IR 9
+#define CLK_R_APB1_W1 10
+
+#define CLK_IR 11
+#define CLK_W1 12
+
+#endif /* _DT_BINDINGS_CLK_SUN50I_H6_R_CCU_H_ */
diff --git a/include/dt-bindings/dma/jz4780-dma.h b/include/dt-bindings/dma/jz4780-dma.h
new file mode 100644
index 000000000000..df017fdfb44e
--- /dev/null
+++ b/include/dt-bindings/dma/jz4780-dma.h
@@ -0,0 +1,49 @@
+#ifndef __DT_BINDINGS_DMA_JZ4780_DMA_H__
+#define __DT_BINDINGS_DMA_JZ4780_DMA_H__
+
+/*
+ * Request type numbers for the JZ4780 DMA controller (written to the DRTn
+ * register for the channel).
+ */
+#define JZ4780_DMA_I2S1_TX 0x4
+#define JZ4780_DMA_I2S1_RX 0x5
+#define JZ4780_DMA_I2S0_TX 0x6
+#define JZ4780_DMA_I2S0_RX 0x7
+#define JZ4780_DMA_AUTO 0x8
+#define JZ4780_DMA_SADC_RX 0x9
+#define JZ4780_DMA_UART4_TX 0xc
+#define JZ4780_DMA_UART4_RX 0xd
+#define JZ4780_DMA_UART3_TX 0xe
+#define JZ4780_DMA_UART3_RX 0xf
+#define JZ4780_DMA_UART2_TX 0x10
+#define JZ4780_DMA_UART2_RX 0x11
+#define JZ4780_DMA_UART1_TX 0x12
+#define JZ4780_DMA_UART1_RX 0x13
+#define JZ4780_DMA_UART0_TX 0x14
+#define JZ4780_DMA_UART0_RX 0x15
+#define JZ4780_DMA_SSI0_TX 0x16
+#define JZ4780_DMA_SSI0_RX 0x17
+#define JZ4780_DMA_SSI1_TX 0x18
+#define JZ4780_DMA_SSI1_RX 0x19
+#define JZ4780_DMA_MSC0_TX 0x1a
+#define JZ4780_DMA_MSC0_RX 0x1b
+#define JZ4780_DMA_MSC1_TX 0x1c
+#define JZ4780_DMA_MSC1_RX 0x1d
+#define JZ4780_DMA_MSC2_TX 0x1e
+#define JZ4780_DMA_MSC2_RX 0x1f
+#define JZ4780_DMA_PCM0_TX 0x20
+#define JZ4780_DMA_PCM0_RX 0x21
+#define JZ4780_DMA_SMB0_TX 0x24
+#define JZ4780_DMA_SMB0_RX 0x25
+#define JZ4780_DMA_SMB1_TX 0x26
+#define JZ4780_DMA_SMB1_RX 0x27
+#define JZ4780_DMA_SMB2_TX 0x28
+#define JZ4780_DMA_SMB2_RX 0x29
+#define JZ4780_DMA_SMB3_TX 0x2a
+#define JZ4780_DMA_SMB3_RX 0x2b
+#define JZ4780_DMA_SMB4_TX 0x2c
+#define JZ4780_DMA_SMB4_RX 0x2d
+#define JZ4780_DMA_DES_TX 0x2e
+#define JZ4780_DMA_DES_RX 0x2f
+
+#endif /* __DT_BINDINGS_DMA_JZ4780_DMA_H__ */
diff --git a/include/dt-bindings/memory/tegra114-mc.h b/include/dt-bindings/memory/tegra114-mc.h
index 27c8386987ff..dfe99c8a5ba5 100644
--- a/include/dt-bindings/memory/tegra114-mc.h
+++ b/include/dt-bindings/memory/tegra114-mc.h
@@ -23,4 +23,21 @@
#define TEGRA_SWGROUP_EMUCIF 18
#define TEGRA_SWGROUP_TSEC 19
+#define TEGRA114_MC_RESET_AVPC 0
+#define TEGRA114_MC_RESET_DC 1
+#define TEGRA114_MC_RESET_DCB 2
+#define TEGRA114_MC_RESET_EPP 3
+#define TEGRA114_MC_RESET_2D 4
+#define TEGRA114_MC_RESET_HC 5
+#define TEGRA114_MC_RESET_HDA 6
+#define TEGRA114_MC_RESET_ISP 7
+#define TEGRA114_MC_RESET_MPCORE 8
+#define TEGRA114_MC_RESET_MPCORELP 9
+#define TEGRA114_MC_RESET_MPE 10
+#define TEGRA114_MC_RESET_3D 11
+#define TEGRA114_MC_RESET_3D2 12
+#define TEGRA114_MC_RESET_PPCS 13
+#define TEGRA114_MC_RESET_VDE 14
+#define TEGRA114_MC_RESET_VI 15
+
#endif
diff --git a/include/dt-bindings/memory/tegra124-mc.h b/include/dt-bindings/memory/tegra124-mc.h
index f534d7c06019..186e6b7e9b35 100644
--- a/include/dt-bindings/memory/tegra124-mc.h
+++ b/include/dt-bindings/memory/tegra124-mc.h
@@ -29,4 +29,29 @@
#define TEGRA_SWGROUP_VIC 24
#define TEGRA_SWGROUP_VI 25
+#define TEGRA124_MC_RESET_AFI 0
+#define TEGRA124_MC_RESET_AVPC 1
+#define TEGRA124_MC_RESET_DC 2
+#define TEGRA124_MC_RESET_DCB 3
+#define TEGRA124_MC_RESET_HC 4
+#define TEGRA124_MC_RESET_HDA 5
+#define TEGRA124_MC_RESET_ISP2 6
+#define TEGRA124_MC_RESET_MPCORE 7
+#define TEGRA124_MC_RESET_MPCORELP 8
+#define TEGRA124_MC_RESET_MSENC 9
+#define TEGRA124_MC_RESET_PPCS 10
+#define TEGRA124_MC_RESET_SATA 11
+#define TEGRA124_MC_RESET_VDE 12
+#define TEGRA124_MC_RESET_VI 13
+#define TEGRA124_MC_RESET_VIC 14
+#define TEGRA124_MC_RESET_XUSB_HOST 15
+#define TEGRA124_MC_RESET_XUSB_DEV 16
+#define TEGRA124_MC_RESET_TSEC 17
+#define TEGRA124_MC_RESET_SDMMC1 18
+#define TEGRA124_MC_RESET_SDMMC2 19
+#define TEGRA124_MC_RESET_SDMMC3 20
+#define TEGRA124_MC_RESET_SDMMC4 21
+#define TEGRA124_MC_RESET_ISP2B 22
+#define TEGRA124_MC_RESET_GPU 23
+
#endif
diff --git a/include/dt-bindings/memory/tegra20-mc.h b/include/dt-bindings/memory/tegra20-mc.h
new file mode 100644
index 000000000000..35e131eee198
--- /dev/null
+++ b/include/dt-bindings/memory/tegra20-mc.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef DT_BINDINGS_MEMORY_TEGRA20_MC_H
+#define DT_BINDINGS_MEMORY_TEGRA20_MC_H
+
+#define TEGRA20_MC_RESET_AVPC 0
+#define TEGRA20_MC_RESET_DC 1
+#define TEGRA20_MC_RESET_DCB 2
+#define TEGRA20_MC_RESET_EPP 3
+#define TEGRA20_MC_RESET_2D 4
+#define TEGRA20_MC_RESET_HC 5
+#define TEGRA20_MC_RESET_ISP 6
+#define TEGRA20_MC_RESET_MPCORE 7
+#define TEGRA20_MC_RESET_MPEA 8
+#define TEGRA20_MC_RESET_MPEB 9
+#define TEGRA20_MC_RESET_MPEC 10
+#define TEGRA20_MC_RESET_3D 11
+#define TEGRA20_MC_RESET_PPCS 12
+#define TEGRA20_MC_RESET_VDE 13
+#define TEGRA20_MC_RESET_VI 14
+
+#endif
diff --git a/include/dt-bindings/memory/tegra210-mc.h b/include/dt-bindings/memory/tegra210-mc.h
index 4490f7cf4772..cacf05617e03 100644
--- a/include/dt-bindings/memory/tegra210-mc.h
+++ b/include/dt-bindings/memory/tegra210-mc.h
@@ -34,4 +34,35 @@
#define TEGRA_SWGROUP_ETR 29
#define TEGRA_SWGROUP_TSECB 30
+#define TEGRA210_MC_RESET_AFI 0
+#define TEGRA210_MC_RESET_AVPC 1
+#define TEGRA210_MC_RESET_DC 2
+#define TEGRA210_MC_RESET_DCB 3
+#define TEGRA210_MC_RESET_HC 4
+#define TEGRA210_MC_RESET_HDA 5
+#define TEGRA210_MC_RESET_ISP2 6
+#define TEGRA210_MC_RESET_MPCORE 7
+#define TEGRA210_MC_RESET_NVENC 8
+#define TEGRA210_MC_RESET_PPCS 9
+#define TEGRA210_MC_RESET_SATA 10
+#define TEGRA210_MC_RESET_VI 11
+#define TEGRA210_MC_RESET_VIC 12
+#define TEGRA210_MC_RESET_XUSB_HOST 13
+#define TEGRA210_MC_RESET_XUSB_DEV 14
+#define TEGRA210_MC_RESET_A9AVP 15
+#define TEGRA210_MC_RESET_TSEC 16
+#define TEGRA210_MC_RESET_SDMMC1 17
+#define TEGRA210_MC_RESET_SDMMC2 18
+#define TEGRA210_MC_RESET_SDMMC3 19
+#define TEGRA210_MC_RESET_SDMMC4 20
+#define TEGRA210_MC_RESET_ISP2B 21
+#define TEGRA210_MC_RESET_GPU 22
+#define TEGRA210_MC_RESET_NVDEC 23
+#define TEGRA210_MC_RESET_APE 24
+#define TEGRA210_MC_RESET_SE 25
+#define TEGRA210_MC_RESET_NVJPG 26
+#define TEGRA210_MC_RESET_AXIAP 27
+#define TEGRA210_MC_RESET_ETR 28
+#define TEGRA210_MC_RESET_TSECB 29
+
#endif
diff --git a/include/dt-bindings/memory/tegra30-mc.h b/include/dt-bindings/memory/tegra30-mc.h
index 3cac81919023..169f005fbc78 100644
--- a/include/dt-bindings/memory/tegra30-mc.h
+++ b/include/dt-bindings/memory/tegra30-mc.h
@@ -22,4 +22,23 @@
#define TEGRA_SWGROUP_MPCORE 17
#define TEGRA_SWGROUP_ISP 18
+#define TEGRA30_MC_RESET_AFI 0
+#define TEGRA30_MC_RESET_AVPC 1
+#define TEGRA30_MC_RESET_DC 2
+#define TEGRA30_MC_RESET_DCB 3
+#define TEGRA30_MC_RESET_EPP 4
+#define TEGRA30_MC_RESET_2D 5
+#define TEGRA30_MC_RESET_HC 6
+#define TEGRA30_MC_RESET_HDA 7
+#define TEGRA30_MC_RESET_ISP 8
+#define TEGRA30_MC_RESET_MPCORE 9
+#define TEGRA30_MC_RESET_MPCORELP 10
+#define TEGRA30_MC_RESET_MPE 11
+#define TEGRA30_MC_RESET_3D 12
+#define TEGRA30_MC_RESET_3D2 13
+#define TEGRA30_MC_RESET_PPCS 14
+#define TEGRA30_MC_RESET_SATA 15
+#define TEGRA30_MC_RESET_VDE 16
+#define TEGRA30_MC_RESET_VI 17
+
#endif
diff --git a/include/dt-bindings/net/microchip-lan78xx.h b/include/dt-bindings/net/microchip-lan78xx.h
new file mode 100644
index 000000000000..0742ff075307
--- /dev/null
+++ b/include/dt-bindings/net/microchip-lan78xx.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _DT_BINDINGS_MICROCHIP_LAN78XX_H
+#define _DT_BINDINGS_MICROCHIP_LAN78XX_H
+
+/* LED modes for LAN7800/LAN7850 embedded PHY */
+
+#define LAN78XX_LINK_ACTIVITY 0
+#define LAN78XX_LINK_1000_ACTIVITY 1
+#define LAN78XX_LINK_100_ACTIVITY 2
+#define LAN78XX_LINK_10_ACTIVITY 3
+#define LAN78XX_LINK_100_1000_ACTIVITY 4
+#define LAN78XX_LINK_10_1000_ACTIVITY 5
+#define LAN78XX_LINK_10_100_ACTIVITY 6
+#define LAN78XX_DUPLEX_COLLISION 8
+#define LAN78XX_COLLISION 9
+#define LAN78XX_ACTIVITY 10
+#define LAN78XX_AUTONEG_FAULT 12
+#define LAN78XX_FORCE_LED_OFF 14
+#define LAN78XX_FORCE_LED_ON 15
+
+#endif
diff --git a/include/dt-bindings/phy/phy-qcom-qusb2.h b/include/dt-bindings/phy/phy-qcom-qusb2.h
new file mode 100644
index 000000000000..5c5e4d800cac
--- /dev/null
+++ b/include/dt-bindings/phy/phy-qcom-qusb2.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_QCOM_PHY_QUSB2_H_
+#define _DT_BINDINGS_QCOM_PHY_QUSB2_H_
+
+/* PHY HSTX TRIM bit values (24mA to 15mA) */
+#define QUSB2_V2_HSTX_TRIM_24_0_MA 0x0
+#define QUSB2_V2_HSTX_TRIM_23_4_MA 0x1
+#define QUSB2_V2_HSTX_TRIM_22_8_MA 0x2
+#define QUSB2_V2_HSTX_TRIM_22_2_MA 0x3
+#define QUSB2_V2_HSTX_TRIM_21_6_MA 0x4
+#define QUSB2_V2_HSTX_TRIM_21_0_MA 0x5
+#define QUSB2_V2_HSTX_TRIM_20_4_MA 0x6
+#define QUSB2_V2_HSTX_TRIM_19_8_MA 0x7
+#define QUSB2_V2_HSTX_TRIM_19_2_MA 0x8
+#define QUSB2_V2_HSTX_TRIM_18_6_MA 0x9
+#define QUSB2_V2_HSTX_TRIM_18_0_MA 0xa
+#define QUSB2_V2_HSTX_TRIM_17_4_MA 0xb
+#define QUSB2_V2_HSTX_TRIM_16_8_MA 0xc
+#define QUSB2_V2_HSTX_TRIM_16_2_MA 0xd
+#define QUSB2_V2_HSTX_TRIM_15_6_MA 0xe
+#define QUSB2_V2_HSTX_TRIM_15_0_MA 0xf
+
+/* PHY PREEMPHASIS bit values */
+#define QUSB2_V2_PREEMPHASIS_NONE 0
+#define QUSB2_V2_PREEMPHASIS_5_PERCENT 1
+#define QUSB2_V2_PREEMPHASIS_10_PERCENT 2
+#define QUSB2_V2_PREEMPHASIS_15_PERCENT 3
+
+/* PHY PREEMPHASIS-WIDTH bit values */
+#define QUSB2_V2_PREEMPHASIS_WIDTH_FULL_BIT 0
+#define QUSB2_V2_PREEMPHASIS_WIDTH_HALF_BIT 1
+
+#endif
diff --git a/include/dt-bindings/pinctrl/mt7623-pinfunc.h b/include/dt-bindings/pinctrl/mt7623-pinfunc.h
index 4878a67a844c..604fe781c465 100644
--- a/include/dt-bindings/pinctrl/mt7623-pinfunc.h
+++ b/include/dt-bindings/pinctrl/mt7623-pinfunc.h
@@ -23,20 +23,26 @@
#define MT7623_PIN_5_PWRAP_SPI0_CK2_FUNC_GPIO5 (MTK_PIN_NO(5) | 0)
#define MT7623_PIN_5_PWRAP_SPI0_CK2_FUNC_PWRAP_SPICK2_I (MTK_PIN_NO(5) | 1)
+#define MT7623_PIN_5_PWRAP_SPI0_CK2_FUNC_ANT_SEL1 (MTK_PIN_NO(5) | 5)
#define MT7623_PIN_6_PWRAP_SPI0_CSN2_FUNC_GPIO6 (MTK_PIN_NO(6) | 0)
#define MT7623_PIN_6_PWRAP_SPI0_CSN2_FUNC_PWRAP_SPICS2_B_I (MTK_PIN_NO(6) | 1)
+#define MT7623_PIN_6_PWRAP_SPI0_CSN2_FUNC_ANT_SEL0 (MTK_PIN_NO(6) | 5)
#define MT7623_PIN_7_SPI1_CSN_FUNC_GPIO7 (MTK_PIN_NO(7) | 0)
#define MT7623_PIN_7_SPI1_CSN_FUNC_SPI1_CS (MTK_PIN_NO(7) | 1)
+#define MT7623_PIN_7_SPI1_CSN_FUNC_KCOL0 (MTK_PIN_NO(7) | 4)
#define MT7623_PIN_8_SPI1_MI_FUNC_GPIO8 (MTK_PIN_NO(8) | 0)
#define MT7623_PIN_8_SPI1_MI_FUNC_SPI1_MI (MTK_PIN_NO(8) | 1)
#define MT7623_PIN_8_SPI1_MI_FUNC_SPI1_MO (MTK_PIN_NO(8) | 2)
+#define MT7623_PIN_8_SPI1_MI_FUNC_KCOL1 (MTK_PIN_NO(8) | 4)
#define MT7623_PIN_9_SPI1_MO_FUNC_GPIO9 (MTK_PIN_NO(9) | 0)
#define MT7623_PIN_9_SPI1_MO_FUNC_SPI1_MO (MTK_PIN_NO(9) | 1)
#define MT7623_PIN_9_SPI1_MO_FUNC_SPI1_MI (MTK_PIN_NO(9) | 2)
+#define MT7623_PIN_9_SPI1_MO_FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(9) | 3)
+#define MT7623_PIN_9_SPI1_MO_FUNC_KCOL2 (MTK_PIN_NO(9) | 4)
#define MT7623_PIN_10_RTC32K_CK_FUNC_GPIO10 (MTK_PIN_NO(10) | 0)
#define MT7623_PIN_10_RTC32K_CK_FUNC_RTC32K_CK (MTK_PIN_NO(10) | 1)
@@ -53,6 +59,7 @@
#define MT7623_PIN_14_GPIO14_FUNC_GPIO14 (MTK_PIN_NO(14) | 0)
#define MT7623_PIN_14_GPIO14_FUNC_URXD2 (MTK_PIN_NO(14) | 1)
#define MT7623_PIN_14_GPIO14_FUNC_UTXD2 (MTK_PIN_NO(14) | 2)
+#define MT7623_PIN_14_GPIO14_FUNC_SRCCLKENAI2 (MTK_PIN_NO(14) | 5)
#define MT7623_PIN_15_GPIO15_FUNC_GPIO15 (MTK_PIN_NO(15) | 0)
#define MT7623_PIN_15_GPIO15_FUNC_UTXD2 (MTK_PIN_NO(15) | 1)
@@ -60,88 +67,139 @@
#define MT7623_PIN_18_PCM_CLK_FUNC_GPIO18 (MTK_PIN_NO(18) | 0)
#define MT7623_PIN_18_PCM_CLK_FUNC_PCM_CLK0 (MTK_PIN_NO(18) | 1)
+#define MT7623_PIN_18_PCM_CLK_FUNC_MRG_CLK (MTK_PIN_NO(18) | 2)
+#define MT7623_PIN_18_PCM_CLK_FUNC_MM_TEST_CK (MTK_PIN_NO(18) | 4)
+#define MT7623_PIN_18_PCM_CLK_FUNC_CONN_DSP_JCK (MTK_PIN_NO(18) | 5)
#define MT7623_PIN_18_PCM_CLK_FUNC_AP_PCM_CLKO (MTK_PIN_NO(18) | 6)
#define MT7623_PIN_19_PCM_SYNC_FUNC_GPIO19 (MTK_PIN_NO(19) | 0)
#define MT7623_PIN_19_PCM_SYNC_FUNC_PCM_SYNC (MTK_PIN_NO(19) | 1)
+#define MT7623_PIN_19_PCM_SYNC_FUNC_MRG_SYNC (MTK_PIN_NO(19) | 2)
+#define MT7623_PIN_19_PCM_SYNC_FUNC_CONN_DSP_JINTP (MTK_PIN_NO(19) | 5)
#define MT7623_PIN_19_PCM_SYNC_FUNC_AP_PCM_SYNC (MTK_PIN_NO(19) | 6)
#define MT7623_PIN_20_PCM_RX_FUNC_GPIO20 (MTK_PIN_NO(20) | 0)
#define MT7623_PIN_20_PCM_RX_FUNC_PCM_RX (MTK_PIN_NO(20) | 1)
+#define MT7623_PIN_20_PCM_RX_FUNC_MRG_RX (MTK_PIN_NO(20) | 2)
+#define MT7623_PIN_20_PCM_RX_FUNC_MRG_TX (MTK_PIN_NO(20) | 3)
#define MT7623_PIN_20_PCM_RX_FUNC_PCM_TX (MTK_PIN_NO(20) | 4)
+#define MT7623_PIN_20_PCM_RX_FUNC_CONN_DSP_JDI (MTK_PIN_NO(20) | 5)
#define MT7623_PIN_20_PCM_RX_FUNC_AP_PCM_RX (MTK_PIN_NO(20) | 6)
#define MT7623_PIN_21_PCM_TX_FUNC_GPIO21 (MTK_PIN_NO(21) | 0)
#define MT7623_PIN_21_PCM_TX_FUNC_PCM_TX (MTK_PIN_NO(21) | 1)
+#define MT7623_PIN_21_PCM_TX_FUNC_MRG_TX (MTK_PIN_NO(21) | 2)
+#define MT7623_PIN_21_PCM_TX_FUNC_MRG_RX (MTK_PIN_NO(21) | 3)
#define MT7623_PIN_21_PCM_TX_FUNC_PCM_RX (MTK_PIN_NO(21) | 4)
+#define MT7623_PIN_21_PCM_TX_FUNC_CONN_DSP_JMS (MTK_PIN_NO(21) | 5)
#define MT7623_PIN_21_PCM_TX_FUNC_AP_PCM_TX (MTK_PIN_NO(21) | 6)
#define MT7623_PIN_22_EINT0_FUNC_GPIO22 (MTK_PIN_NO(22) | 0)
#define MT7623_PIN_22_EINT0_FUNC_UCTS0 (MTK_PIN_NO(22) | 1)
#define MT7623_PIN_22_EINT0_FUNC_PCIE0_PERST_N (MTK_PIN_NO(22) | 2)
+#define MT7623_PIN_22_EINT0_FUNC_KCOL3 (MTK_PIN_NO(22) | 3)
+#define MT7623_PIN_22_EINT0_FUNC_CONN_DSP_JDO (MTK_PIN_NO(22) | 4)
+#define MT7623_PIN_22_EINT0_FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(22) | 5)
#define MT7623_PIN_23_EINT1_FUNC_GPIO23 (MTK_PIN_NO(23) | 0)
#define MT7623_PIN_23_EINT1_FUNC_URTS0 (MTK_PIN_NO(23) | 1)
#define MT7623_PIN_23_EINT1_FUNC_PCIE1_PERST_N (MTK_PIN_NO(23) | 2)
+#define MT7623_PIN_23_EINT1_FUNC_KCOL2 (MTK_PIN_NO(23) | 3)
+#define MT7623_PIN_23_EINT1_FUNC_CONN_MCU_TDO (MTK_PIN_NO(23) | 4)
+#define MT7623_PIN_23_EINT1_FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(23) | 5)
#define MT7623_PIN_24_EINT2_FUNC_GPIO24 (MTK_PIN_NO(24) | 0)
#define MT7623_PIN_24_EINT2_FUNC_UCTS1 (MTK_PIN_NO(24) | 1)
#define MT7623_PIN_24_EINT2_FUNC_PCIE2_PERST_N (MTK_PIN_NO(24) | 2)
+#define MT7623_PIN_24_EINT2_FUNC_KCOL1 (MTK_PIN_NO(24) | 3)
+#define MT7623_PIN_24_EINT2_FUNC_CONN_MCU_DBGACK_N (MTK_PIN_NO(24) | 4)
#define MT7623_PIN_25_EINT3_FUNC_GPIO25 (MTK_PIN_NO(25) | 0)
#define MT7623_PIN_25_EINT3_FUNC_URTS1 (MTK_PIN_NO(25) | 1)
+#define MT7623_PIN_25_EINT3_FUNC_KCOL0 (MTK_PIN_NO(25) | 3)
+#define MT7623_PIN_25_EINT3_FUNC_CONN_MCU_DBGI_N (MTK_PIN_NO(25) | 4)
#define MT7623_PIN_26_EINT4_FUNC_GPIO26 (MTK_PIN_NO(26) | 0)
#define MT7623_PIN_26_EINT4_FUNC_UCTS3 (MTK_PIN_NO(26) | 1)
+#define MT7623_PIN_26_EINT4_FUNC_DRV_VBUS_P1 (MTK_PIN_NO(26) | 2)
+#define MT7623_PIN_26_EINT4_FUNC_KROW3 (MTK_PIN_NO(26) | 3)
+#define MT7623_PIN_26_EINT4_FUNC_CONN_MCU_TCK0 (MTK_PIN_NO(26) | 4)
+#define MT7623_PIN_26_EINT4_FUNC_CONN_MCU_AICE_JCKC (MTK_PIN_NO(26) | 5)
#define MT7623_PIN_26_EINT4_FUNC_PCIE2_WAKE_N (MTK_PIN_NO(26) | 6)
#define MT7623_PIN_27_EINT5_FUNC_GPIO27 (MTK_PIN_NO(27) | 0)
#define MT7623_PIN_27_EINT5_FUNC_URTS3 (MTK_PIN_NO(27) | 1)
+#define MT7623_PIN_27_EINT5_FUNC_IDDIG_P1 (MTK_PIN_NO(27) | 2)
+#define MT7623_PIN_27_EINT5_FUNC_KROW2 (MTK_PIN_NO(27) | 3)
+#define MT7623_PIN_27_EINT5_FUNC_CONN_MCU_TDI (MTK_PIN_NO(27) | 4)
#define MT7623_PIN_27_EINT5_FUNC_PCIE1_WAKE_N (MTK_PIN_NO(27) | 6)
#define MT7623_PIN_28_EINT6_FUNC_GPIO28 (MTK_PIN_NO(28) | 0)
#define MT7623_PIN_28_EINT6_FUNC_DRV_VBUS (MTK_PIN_NO(28) | 1)
+#define MT7623_PIN_28_EINT6_FUNC_KROW1 (MTK_PIN_NO(28) | 3)
+#define MT7623_PIN_28_EINT6_FUNC_CONN_MCU_TRST_B (MTK_PIN_NO(28) | 4)
#define MT7623_PIN_28_EINT6_FUNC_PCIE0_WAKE_N (MTK_PIN_NO(28) | 6)
#define MT7623_PIN_29_EINT7_FUNC_GPIO29 (MTK_PIN_NO(29) | 0)
#define MT7623_PIN_29_EINT7_FUNC_IDDIG (MTK_PIN_NO(29) | 1)
#define MT7623_PIN_29_EINT7_FUNC_MSDC1_WP (MTK_PIN_NO(29) | 2)
+#define MT7623_PIN_29_EINT7_FUNC_KROW0 (MTK_PIN_NO(29) | 3)
+#define MT7623_PIN_29_EINT7_FUNC_CONN_MCU_TMS (MTK_PIN_NO(29) | 4)
+#define MT7623_PIN_29_EINT7_FUNC_CONN_MCU_AICE_JMSC (MTK_PIN_NO(29) | 5)
#define MT7623_PIN_29_EINT7_FUNC_PCIE2_PERST_N (MTK_PIN_NO(29) | 6)
#define MT7623_PIN_33_I2S1_DATA_FUNC_GPIO33 (MTK_PIN_NO(33) | 0)
#define MT7623_PIN_33_I2S1_DATA_FUNC_I2S1_DATA (MTK_PIN_NO(33) | 1)
+#define MT7623_PIN_33_I2S1_DATA_FUNC_I2S1_DATA_BYPS (MTK_PIN_NO(33) | 2)
#define MT7623_PIN_33_I2S1_DATA_FUNC_PCM_TX (MTK_PIN_NO(33) | 3)
+#define MT7623_PIN_33_I2S1_DATA_FUNC_IMG_TEST_CK (MTK_PIN_NO(33) | 4)
+#define MT7623_PIN_33_I2S1_DATA_FUNC_G1_RXD0 (MTK_PIN_NO(33) | 5)
#define MT7623_PIN_33_I2S1_DATA_FUNC_AP_PCM_TX (MTK_PIN_NO(33) | 6)
#define MT7623_PIN_34_I2S1_DATA_IN_FUNC_GPIO34 (MTK_PIN_NO(34) | 0)
#define MT7623_PIN_34_I2S1_DATA_IN_FUNC_I2S1_DATA_IN (MTK_PIN_NO(34) | 1)
#define MT7623_PIN_34_I2S1_DATA_IN_FUNC_PCM_RX (MTK_PIN_NO(34) | 3)
+#define MT7623_PIN_34_I2S1_DATA_IN_FUNC_VDEC_TEST_CK (MTK_PIN_NO(34) | 4)
+#define MT7623_PIN_34_I2S1_DATA_IN_FUNC_G1_RXD1 (MTK_PIN_NO(34) | 5)
#define MT7623_PIN_34_I2S1_DATA_IN_FUNC_AP_PCM_RX (MTK_PIN_NO(34) | 6)
#define MT7623_PIN_35_I2S1_BCK_FUNC_GPIO35 (MTK_PIN_NO(35) | 0)
#define MT7623_PIN_35_I2S1_BCK_FUNC_I2S1_BCK (MTK_PIN_NO(35) | 1)
#define MT7623_PIN_35_I2S1_BCK_FUNC_PCM_CLK0 (MTK_PIN_NO(35) | 3)
+#define MT7623_PIN_35_I2S1_BCK_FUNC_G1_RXD2 (MTK_PIN_NO(35) | 5)
#define MT7623_PIN_35_I2S1_BCK_FUNC_AP_PCM_CLKO (MTK_PIN_NO(35) | 6)
#define MT7623_PIN_36_I2S1_LRCK_FUNC_GPIO36 (MTK_PIN_NO(36) | 0)
#define MT7623_PIN_36_I2S1_LRCK_FUNC_I2S1_LRCK (MTK_PIN_NO(36) | 1)
#define MT7623_PIN_36_I2S1_LRCK_FUNC_PCM_SYNC (MTK_PIN_NO(36) | 3)
+#define MT7623_PIN_36_I2S1_LRCK_FUNC_G1_RXD3 (MTK_PIN_NO(36) | 5)
#define MT7623_PIN_36_I2S1_LRCK_FUNC_AP_PCM_SYNC (MTK_PIN_NO(36) | 6)
#define MT7623_PIN_37_I2S1_MCLK_FUNC_GPIO37 (MTK_PIN_NO(37) | 0)
#define MT7623_PIN_37_I2S1_MCLK_FUNC_I2S1_MCLK (MTK_PIN_NO(37) | 1)
+#define MT7623_PIN_37_I2S1_MCLK_FUNC_G1_RXDV (MTK_PIN_NO(37) | 5)
#define MT7623_PIN_39_JTMS_FUNC_GPIO39 (MTK_PIN_NO(39) | 0)
#define MT7623_PIN_39_JTMS_FUNC_JTMS (MTK_PIN_NO(39) | 1)
+#define MT7623_PIN_39_JTMS_FUNC_CONN_MCU_TMS (MTK_PIN_NO(39) | 2)
+#define MT7623_PIN_39_JTMS_FUNC_CONN_MCU_AICE_JMSC (MTK_PIN_NO(39) | 3)
+#define MT7623_PIN_39_JTMS_FUNC_DFD_TMS_XI (MTK_PIN_NO(39) | 4)
#define MT7623_PIN_40_JTCK_FUNC_GPIO40 (MTK_PIN_NO(40) | 0)
#define MT7623_PIN_40_JTCK_FUNC_JTCK (MTK_PIN_NO(40) | 1)
+#define MT7623_PIN_40_JTCK_FUNC_CONN_MCU_TCK1 (MTK_PIN_NO(40) | 2)
+#define MT7623_PIN_40_JTCK_FUNC_CONN_MCU_AICE_JCKC (MTK_PIN_NO(40) | 3)
+#define MT7623_PIN_40_JTCK_FUNC_DFD_TCK_XI (MTK_PIN_NO(40) | 4)
#define MT7623_PIN_41_JTDI_FUNC_GPIO41 (MTK_PIN_NO(41) | 0)
#define MT7623_PIN_41_JTDI_FUNC_JTDI (MTK_PIN_NO(41) | 1)
+#define MT7623_PIN_41_JTDI_FUNC_CONN_MCU_TDI (MTK_PIN_NO(41) | 2)
+#define MT7623_PIN_41_JTDI_FUNC_DFD_TDI_XI (MTK_PIN_NO(41) | 4)
#define MT7623_PIN_42_JTDO_FUNC_GPIO42 (MTK_PIN_NO(42) | 0)
#define MT7623_PIN_42_JTDO_FUNC_JTDO (MTK_PIN_NO(42) | 1)
+#define MT7623_PIN_42_JTDO_FUNC_CONN_MCU_TDO (MTK_PIN_NO(42) | 2)
+#define MT7623_PIN_42_JTDO_FUNC_DFD_TDO (MTK_PIN_NO(42) | 4)
#define MT7623_PIN_43_NCLE_FUNC_GPIO43 (MTK_PIN_NO(43) | 0)
#define MT7623_PIN_43_NCLE_FUNC_NCLE (MTK_PIN_NO(43) | 1)
@@ -160,31 +218,40 @@
#define MT7623_PIN_47_NREB_FUNC_GPIO47 (MTK_PIN_NO(47) | 0)
#define MT7623_PIN_47_NREB_FUNC_NREB (MTK_PIN_NO(47) | 1)
+#define MT7623_PIN_47_NREB_FUNC_IDDIG_P1 (MTK_PIN_NO(47) | 2)
#define MT7623_PIN_48_NRNB_FUNC_GPIO48 (MTK_PIN_NO(48) | 0)
#define MT7623_PIN_48_NRNB_FUNC_NRNB (MTK_PIN_NO(48) | 1)
+#define MT7623_PIN_48_NRNB_FUNC_DRV_VBUS_P1 (MTK_PIN_NO(48) | 2)
#define MT7623_PIN_49_I2S0_DATA_FUNC_GPIO49 (MTK_PIN_NO(49) | 0)
#define MT7623_PIN_49_I2S0_DATA_FUNC_I2S0_DATA (MTK_PIN_NO(49) | 1)
+#define MT7623_PIN_49_I2S0_DATA_FUNC_I2S0_DATA_BYPS (MTK_PIN_NO(49) | 2)
#define MT7623_PIN_49_I2S0_DATA_FUNC_PCM_TX (MTK_PIN_NO(49) | 3)
#define MT7623_PIN_49_I2S0_DATA_FUNC_AP_I2S_DO (MTK_PIN_NO(49) | 6)
#define MT7623_PIN_53_SPI0_CSN_FUNC_GPIO53 (MTK_PIN_NO(53) | 0)
#define MT7623_PIN_53_SPI0_CSN_FUNC_SPI0_CS (MTK_PIN_NO(53) | 1)
+#define MT7623_PIN_53_SPI0_CSN_FUNC_SPDIF (MTK_PIN_NO(53) | 3)
+#define MT7623_PIN_53_SPI0_CSN_FUNC_ADC_CK (MTK_PIN_NO(53) | 4)
#define MT7623_PIN_53_SPI0_CSN_FUNC_PWM1 (MTK_PIN_NO(53) | 5)
#define MT7623_PIN_54_SPI0_CK_FUNC_GPIO54 (MTK_PIN_NO(54) | 0)
#define MT7623_PIN_54_SPI0_CK_FUNC_SPI0_CK (MTK_PIN_NO(54) | 1)
+#define MT7623_PIN_54_SPI0_CK_FUNC_SPDIF_IN1 (MTK_PIN_NO(54) | 3)
+#define MT7623_PIN_54_SPI0_CK_FUNC_ADC_DAT_IN (MTK_PIN_NO(54) | 4)
#define MT7623_PIN_55_SPI0_MI_FUNC_GPIO55 (MTK_PIN_NO(55) | 0)
#define MT7623_PIN_55_SPI0_MI_FUNC_SPI0_MI (MTK_PIN_NO(55) | 1)
#define MT7623_PIN_55_SPI0_MI_FUNC_SPI0_MO (MTK_PIN_NO(55) | 2)
#define MT7623_PIN_55_SPI0_MI_FUNC_MSDC1_WP (MTK_PIN_NO(55) | 3)
+#define MT7623_PIN_55_SPI0_MI_FUNC_ADC_WS (MTK_PIN_NO(55) | 4)
#define MT7623_PIN_55_SPI0_MI_FUNC_PWM2 (MTK_PIN_NO(55) | 5)
#define MT7623_PIN_56_SPI0_MO_FUNC_GPIO56 (MTK_PIN_NO(56) | 0)
#define MT7623_PIN_56_SPI0_MO_FUNC_SPI0_MO (MTK_PIN_NO(56) | 1)
#define MT7623_PIN_56_SPI0_MO_FUNC_SPI0_MI (MTK_PIN_NO(56) | 2)
+#define MT7623_PIN_56_SPI0_MO_FUNC_SPDIF_IN0 (MTK_PIN_NO(56) | 3)
#define MT7623_PIN_57_SDA1_FUNC_GPIO57 (MTK_PIN_NO(57) | 0)
#define MT7623_PIN_57_SDA1_FUNC_SDA1 (MTK_PIN_NO(57) | 1)
@@ -275,10 +342,23 @@
#define MT7623_PIN_83_LCM_RST_FUNC_GPIO83 (MTK_PIN_NO(83) | 0)
#define MT7623_PIN_83_LCM_RST_FUNC_LCM_RST (MTK_PIN_NO(83) | 1)
+#define MT7623_PIN_83_LCM_RST_FUNC_VDAC_CK_XI (MTK_PIN_NO(83) | 2)
#define MT7623_PIN_84_DSI_TE_FUNC_GPIO84 (MTK_PIN_NO(84) | 0)
#define MT7623_PIN_84_DSI_TE_FUNC_DSI_TE (MTK_PIN_NO(84) | 1)
+#define MT7623_PIN_91_MIPI_TDN3_FUNC_GPIO91 (MTK_PIN_NO(91) | 0)
+#define MT7623_PIN_91_MIPI_TDN3_FUNC_TDN3 (MTK_PIN_NO(91) | 1)
+
+#define MT7623_PIN_92_MIPI_TDP3_FUNC_GPIO92 (MTK_PIN_NO(92) | 0)
+#define MT7623_PIN_92_MIPI_TDP3_FUNC_TDP3 (MTK_PIN_NO(92) | 1)
+
+#define MT7623_PIN_93_MIPI_TDN2_FUNC_GPIO93 (MTK_PIN_NO(93) | 0)
+#define MT7623_PIN_93_MIPI_TDN2_FUNC_TDN2 (MTK_PIN_NO(93) | 1)
+
+#define MT7623_PIN_94_MIPI_TDP2_FUNC_GPIO94 (MTK_PIN_NO(94) | 0)
+#define MT7623_PIN_94_MIPI_TDP2_FUNC_TDP2 (MTK_PIN_NO(94) | 1)
+
#define MT7623_PIN_95_MIPI_TCN_FUNC_GPIO95 (MTK_PIN_NO(95) | 0)
#define MT7623_PIN_95_MIPI_TCN_FUNC_TCN (MTK_PIN_NO(95) | 1)
@@ -300,20 +380,24 @@
#define MT7623_PIN_101_SPI2_CSN_FUNC_GPIO101 (MTK_PIN_NO(101) | 0)
#define MT7623_PIN_101_SPI2_CSN_FUNC_SPI2_CS (MTK_PIN_NO(101) | 1)
#define MT7623_PIN_101_SPI2_CSN_FUNC_SCL3 (MTK_PIN_NO(101) | 3)
+#define MT7623_PIN_101_SPI2_CSN_FUNC_KROW0 (MTK_PIN_NO(101) | 4)
#define MT7623_PIN_102_SPI2_MI_FUNC_GPIO102 (MTK_PIN_NO(102) | 0)
#define MT7623_PIN_102_SPI2_MI_FUNC_SPI2_MI (MTK_PIN_NO(102) | 1)
#define MT7623_PIN_102_SPI2_MI_FUNC_SPI2_MO (MTK_PIN_NO(102) | 2)
#define MT7623_PIN_102_SPI2_MI_FUNC_SDA3 (MTK_PIN_NO(102) | 3)
+#define MT7623_PIN_102_SPI2_MI_FUNC_KROW1 (MTK_PIN_NO(102) | 4)
#define MT7623_PIN_103_SPI2_MO_FUNC_GPIO103 (MTK_PIN_NO(103) | 0)
#define MT7623_PIN_103_SPI2_MO_FUNC_SPI2_MO (MTK_PIN_NO(103) | 1)
#define MT7623_PIN_103_SPI2_MO_FUNC_SPI2_MI (MTK_PIN_NO(103) | 2)
#define MT7623_PIN_103_SPI2_MO_FUNC_SCL3 (MTK_PIN_NO(103) | 3)
+#define MT7623_PIN_103_SPI2_MO_FUNC_KROW2 (MTK_PIN_NO(103) | 4)
#define MT7623_PIN_104_SPI2_CK_FUNC_GPIO104 (MTK_PIN_NO(104) | 0)
#define MT7623_PIN_104_SPI2_CK_FUNC_SPI2_CK (MTK_PIN_NO(104) | 1)
#define MT7623_PIN_104_SPI2_CK_FUNC_SDA3 (MTK_PIN_NO(104) | 3)
+#define MT7623_PIN_104_SPI2_CK_FUNC_KROW3 (MTK_PIN_NO(104) | 4)
#define MT7623_PIN_105_MSDC1_CMD_FUNC_GPIO105 (MTK_PIN_NO(105) | 0)
#define MT7623_PIN_105_MSDC1_CMD_FUNC_MSDC1_CMD (MTK_PIN_NO(105) | 1)
@@ -394,7 +478,7 @@
#define MT7623_PIN_121_MSDC0_DAT0_FUNC_WATCHDOG (MTK_PIN_NO(121) | 5)
#define MT7623_PIN_122_GPIO122_FUNC_GPIO122 (MTK_PIN_NO(122) | 0)
-#define MT7623_PIN_122_GPIO122_FUNC_TEST (MTK_PIN_NO(122) | 1)
+#define MT7623_PIN_122_GPIO122_FUNC_CEC (MTK_PIN_NO(122) | 1)
#define MT7623_PIN_122_GPIO122_FUNC_SDA2 (MTK_PIN_NO(122) | 4)
#define MT7623_PIN_122_GPIO122_FUNC_URXD0 (MTK_PIN_NO(122) | 5)
@@ -404,12 +488,12 @@
#define MT7623_PIN_123_HTPLG_FUNC_UTXD0 (MTK_PIN_NO(123) | 5)
#define MT7623_PIN_124_GPIO124_FUNC_GPIO124 (MTK_PIN_NO(124) | 0)
-#define MT7623_PIN_124_GPIO124_FUNC_TEST (MTK_PIN_NO(124) | 1)
+#define MT7623_PIN_124_GPIO124_FUNC_HDMISCK (MTK_PIN_NO(124) | 1)
#define MT7623_PIN_124_GPIO124_FUNC_SDA1 (MTK_PIN_NO(124) | 4)
#define MT7623_PIN_124_GPIO124_FUNC_PWM3 (MTK_PIN_NO(124) | 5)
#define MT7623_PIN_125_GPIO125_FUNC_GPIO125 (MTK_PIN_NO(125) | 0)
-#define MT7623_PIN_125_GPIO125_FUNC_TEST (MTK_PIN_NO(125) | 1)
+#define MT7623_PIN_125_GPIO125_FUNC_HDMISD (MTK_PIN_NO(125) | 1)
#define MT7623_PIN_125_GPIO125_FUNC_SCL1 (MTK_PIN_NO(125) | 4)
#define MT7623_PIN_125_GPIO125_FUNC_PWM4 (MTK_PIN_NO(125) | 5)
diff --git a/include/dt-bindings/power/px30-power.h b/include/dt-bindings/power/px30-power.h
new file mode 100644
index 000000000000..30917a99ad20
--- /dev/null
+++ b/include/dt-bindings/power/px30-power.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __DT_BINDINGS_POWER_PX30_POWER_H__
+#define __DT_BINDINGS_POWER_PX30_POWER_H__
+
+/* VD_CORE */
+#define PX30_PD_A35_0 0
+#define PX30_PD_A35_1 1
+#define PX30_PD_A35_2 2
+#define PX30_PD_A35_3 3
+#define PX30_PD_SCU 4
+
+/* VD_LOGIC */
+#define PX30_PD_USB 5
+#define PX30_PD_DDR 6
+#define PX30_PD_SDCARD 7
+#define PX30_PD_CRYPTO 8
+#define PX30_PD_GMAC 9
+#define PX30_PD_MMC_NAND 10
+#define PX30_PD_VPU 11
+#define PX30_PD_VO 12
+#define PX30_PD_VI 13
+#define PX30_PD_GPU 14
+
+/* VD_PMU */
+#define PX30_PD_PMU 15
+
+#endif
diff --git a/include/dt-bindings/power/r8a77470-sysc.h b/include/dt-bindings/power/r8a77470-sysc.h
new file mode 100644
index 000000000000..8bf4db187c31
--- /dev/null
+++ b/include/dt-bindings/power/r8a77470-sysc.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2018 Renesas Electronics Corp.
+ */
+#ifndef __DT_BINDINGS_POWER_R8A77470_SYSC_H__
+#define __DT_BINDINGS_POWER_R8A77470_SYSC_H__
+
+/*
+ * These power domain indices match the numbers of the interrupt bits
+ * representing the power areas in the various Interrupt Registers
+ * (e.g. SYSCISR, Interrupt Status Register)
+ */
+
+#define R8A77470_PD_CA7_CPU0 5
+#define R8A77470_PD_CA7_CPU1 6
+#define R8A77470_PD_SGX 20
+#define R8A77470_PD_CA7_SCU 21
+
+/* Always-on power area */
+#define R8A77470_PD_ALWAYS_ON 32
+
+#endif /* __DT_BINDINGS_POWER_R8A77470_SYSC_H__ */
diff --git a/include/dt-bindings/power/r8a77990-sysc.h b/include/dt-bindings/power/r8a77990-sysc.h
new file mode 100644
index 000000000000..944d85beec15
--- /dev/null
+++ b/include/dt-bindings/power/r8a77990-sysc.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Renesas Electronics Corp.
+ */
+#ifndef __DT_BINDINGS_POWER_R8A77990_SYSC_H__
+#define __DT_BINDINGS_POWER_R8A77990_SYSC_H__
+
+/*
+ * These power domain indices match the numbers of the interrupt bits
+ * representing the power areas in the various Interrupt Registers
+ * (e.g. SYSCISR, Interrupt Status Register)
+ */
+
+#define R8A77990_PD_CA53_CPU0 5
+#define R8A77990_PD_CA53_CPU1 6
+#define R8A77990_PD_CR7 13
+#define R8A77990_PD_A3VC 14
+#define R8A77990_PD_3DG_A 17
+#define R8A77990_PD_3DG_B 18
+#define R8A77990_PD_CA53_SCU 21
+#define R8A77990_PD_A2VC1 26
+
+/* Always-on power area */
+#define R8A77990_PD_ALWAYS_ON 32
+
+#endif /* __DT_BINDINGS_POWER_R8A77990_SYSC_H__ */
diff --git a/include/dt-bindings/power/rk3036-power.h b/include/dt-bindings/power/rk3036-power.h
new file mode 100644
index 000000000000..0bc6b5d5075e
--- /dev/null
+++ b/include/dt-bindings/power/rk3036-power.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __DT_BINDINGS_POWER_RK3036_POWER_H__
+#define __DT_BINDINGS_POWER_RK3036_POWER_H__
+
+#define RK3036_PD_MSCH 0
+#define RK3036_PD_CORE 1
+#define RK3036_PD_PERI 2
+#define RK3036_PD_VIO 3
+#define RK3036_PD_VPU 4
+#define RK3036_PD_GPU 5
+#define RK3036_PD_SYS 6
+
+#endif
diff --git a/include/dt-bindings/power/rk3128-power.h b/include/dt-bindings/power/rk3128-power.h
new file mode 100644
index 000000000000..c051dc3108db
--- /dev/null
+++ b/include/dt-bindings/power/rk3128-power.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __DT_BINDINGS_POWER_RK3128_POWER_H__
+#define __DT_BINDINGS_POWER_RK3128_POWER_H__
+
+/* VD_CORE */
+#define RK3128_PD_CORE 0
+
+/* VD_LOGIC */
+#define RK3128_PD_VIO 1
+#define RK3128_PD_VIDEO 2
+#define RK3128_PD_GPU 3
+#define RK3128_PD_MSCH 4
+
+#endif
diff --git a/include/dt-bindings/power/rk3228-power.h b/include/dt-bindings/power/rk3228-power.h
new file mode 100644
index 000000000000..6a8dc1bf76ce
--- /dev/null
+++ b/include/dt-bindings/power/rk3228-power.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __DT_BINDINGS_POWER_RK3228_POWER_H__
+#define __DT_BINDINGS_POWER_RK3228_POWER_H__
+
+/**
+ * RK3228 idle id Summary.
+ */
+
+#define RK3228_PD_CORE 0
+#define RK3228_PD_MSCH 1
+#define RK3228_PD_BUS 2
+#define RK3228_PD_SYS 3
+#define RK3228_PD_VIO 4
+#define RK3228_PD_VOP 5
+#define RK3228_PD_VPU 6
+#define RK3228_PD_RKVDEC 7
+#define RK3228_PD_GPU 8
+#define RK3228_PD_PERI 9
+#define RK3228_PD_GMAC 10
+
+#endif
diff --git a/include/dt-bindings/reset/axg-aoclkc.h b/include/dt-bindings/reset/axg-aoclkc.h
new file mode 100644
index 000000000000..d342c0b6b2a7
--- /dev/null
+++ b/include/dt-bindings/reset/axg-aoclkc.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/*
+ * Copyright (c) 2016 BayLibre, SAS
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ *
+ * Copyright (c) 2018 Amlogic, inc.
+ * Author: Qiufang Dai <qiufang.dai@amlogic.com>
+ */
+
+#ifndef DT_BINDINGS_RESET_AMLOGIC_MESON_AXG_AOCLK
+#define DT_BINDINGS_RESET_AMLOGIC_MESON_AXG_AOCLK
+
+#define RESET_AO_REMOTE 0
+#define RESET_AO_I2C_MASTER 1
+#define RESET_AO_I2C_SLAVE 2
+#define RESET_AO_UART1 3
+#define RESET_AO_UART2 4
+#define RESET_AO_IR_BLASTER 5
+
+#endif
diff --git a/include/dt-bindings/reset/mt2701-resets.h b/include/dt-bindings/reset/mt2701-resets.h
index 21deb547cfa4..50b7f066da9a 100644
--- a/include/dt-bindings/reset/mt2701-resets.h
+++ b/include/dt-bindings/reset/mt2701-resets.h
@@ -87,4 +87,7 @@
#define MT2701_ETHSYS_GMAC_RST 23
#define MT2701_ETHSYS_PPE_RST 31
+/* G3DSYS resets */
+#define MT2701_G3DSYS_CORE_RST 0
+
#endif /* _DT_BINDINGS_RESET_CONTROLLER_MT2701 */
diff --git a/include/dt-bindings/reset/sun50i-h6-r-ccu.h b/include/dt-bindings/reset/sun50i-h6-r-ccu.h
new file mode 100644
index 000000000000..01c84dba49a4
--- /dev/null
+++ b/include/dt-bindings/reset/sun50i-h6-r-ccu.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: (GPL-2.0+ or MIT) */
+/*
+ * Copyright (C) 2016 Icenowy Zheng <icenowy@aosc.xyz>
+ */
+
+#ifndef _DT_BINDINGS_RST_SUN50I_H6_R_CCU_H_
+#define _DT_BINDINGS_RST_SUN50I_H6_R_CCU_H_
+
+#define RST_R_APB1_TIMER 0
+#define RST_R_APB1_TWD 1
+#define RST_R_APB1_PWM 2
+#define RST_R_APB2_UART 3
+#define RST_R_APB2_I2C 4
+#define RST_R_APB1_IR 5
+#define RST_R_APB1_W1 6
+
+#endif /* _DT_BINDINGS_RST_SUN50I_H6_R_CCU_H_ */
diff --git a/include/dt-bindings/soc/qcom,apr.h b/include/dt-bindings/soc/qcom,apr.h
new file mode 100644
index 000000000000..006362400c0f
--- /dev/null
+++ b/include/dt-bindings/soc/qcom,apr.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __DT_BINDINGS_QCOM_APR_H
+#define __DT_BINDINGS_QCOM_APR_H
+
+/* Domain IDs */
+#define APR_DOMAIN_SIM 0x1
+#define APR_DOMAIN_PC 0x2
+#define APR_DOMAIN_MODEM 0x3
+#define APR_DOMAIN_ADSP 0x4
+#define APR_DOMAIN_APPS 0x5
+#define APR_DOMAIN_MAX 0x6
+
+/* ADSP service IDs */
+#define APR_SVC_ADSP_CORE 0x3
+#define APR_SVC_AFE 0x4
+#define APR_SVC_VSM 0x5
+#define APR_SVC_VPM 0x6
+#define APR_SVC_ASM 0x7
+#define APR_SVC_ADM 0x8
+#define APR_SVC_ADSP_MVM 0x09
+#define APR_SVC_ADSP_CVS 0x0A
+#define APR_SVC_ADSP_CVP 0x0B
+#define APR_SVC_USM 0x0C
+#define APR_SVC_LSM 0x0D
+#define APR_SVC_VIDC 0x16
+#define APR_SVC_MAX 0x17
+
+#endif /* __DT_BINDINGS_QCOM_APR_H */
diff --git a/include/dt-bindings/sound/fsl-imx-audmux.h b/include/dt-bindings/sound/fsl-imx-audmux.h
index 751fe1416f95..15f138bebe16 100644
--- a/include/dt-bindings/sound/fsl-imx-audmux.h
+++ b/include/dt-bindings/sound/fsl-imx-audmux.h
@@ -25,6 +25,13 @@
#define MX51_AUDMUX_PORT6 5
#define MX51_AUDMUX_PORT7 6
+/*
+ * TFCSEL/RFCSEL (i.MX27) or TFSEL/TCSEL/RFSEL/RCSEL (i.MX31/51/53/6Q)
+ * can be sourced from Rx/Tx.
+ */
+#define IMX_AUDMUX_RXFS 0x8
+#define IMX_AUDMUX_RXCLK 0x8
+
/* Register definitions for the i.MX21/27 Digital Audio Multiplexer */
#define IMX_AUDMUX_V1_PCR_INMMASK(x) ((x) & 0xff)
#define IMX_AUDMUX_V1_PCR_INMEN (1 << 8)
diff --git a/include/dt-bindings/sound/qcom,q6afe.h b/include/dt-bindings/sound/qcom,q6afe.h
new file mode 100644
index 000000000000..e2d3892240b8
--- /dev/null
+++ b/include/dt-bindings/sound/qcom,q6afe.h
@@ -0,0 +1,111 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __DT_BINDINGS_Q6_AFE_H__
+#define __DT_BINDINGS_Q6_AFE_H__
+
+/* Audio Front End (AFE) virtual ports IDs */
+#define HDMI_RX 1
+#define SLIMBUS_0_RX 2
+#define SLIMBUS_0_TX 3
+#define SLIMBUS_1_RX 4
+#define SLIMBUS_1_TX 5
+#define SLIMBUS_2_RX 6
+#define SLIMBUS_2_TX 7
+#define SLIMBUS_3_RX 8
+#define SLIMBUS_3_TX 9
+#define SLIMBUS_4_RX 10
+#define SLIMBUS_4_TX 11
+#define SLIMBUS_5_RX 12
+#define SLIMBUS_5_TX 13
+#define SLIMBUS_6_RX 14
+#define SLIMBUS_6_TX 15
+#define PRIMARY_MI2S_RX 16
+#define PRIMARY_MI2S_TX 17
+#define SECONDARY_MI2S_RX 18
+#define SECONDARY_MI2S_TX 19
+#define TERTIARY_MI2S_RX 20
+#define TERTIARY_MI2S_TX 21
+#define QUATERNARY_MI2S_RX 22
+#define QUATERNARY_MI2S_TX 23
+#define PRIMARY_TDM_RX_0 24
+#define PRIMARY_TDM_TX_0 25
+#define PRIMARY_TDM_RX_1 26
+#define PRIMARY_TDM_TX_1 27
+#define PRIMARY_TDM_RX_2 28
+#define PRIMARY_TDM_TX_2 29
+#define PRIMARY_TDM_RX_3 30
+#define PRIMARY_TDM_TX_3 31
+#define PRIMARY_TDM_RX_4 32
+#define PRIMARY_TDM_TX_4 33
+#define PRIMARY_TDM_RX_5 34
+#define PRIMARY_TDM_TX_5 35
+#define PRIMARY_TDM_RX_6 36
+#define PRIMARY_TDM_TX_6 37
+#define PRIMARY_TDM_RX_7 38
+#define PRIMARY_TDM_TX_7 39
+#define SECONDARY_TDM_RX_0 40
+#define SECONDARY_TDM_TX_0 41
+#define SECONDARY_TDM_RX_1 42
+#define SECONDARY_TDM_TX_1 43
+#define SECONDARY_TDM_RX_2 44
+#define SECONDARY_TDM_TX_2 45
+#define SECONDARY_TDM_RX_3 46
+#define SECONDARY_TDM_TX_3 47
+#define SECONDARY_TDM_RX_4 48
+#define SECONDARY_TDM_TX_4 49
+#define SECONDARY_TDM_RX_5 50
+#define SECONDARY_TDM_TX_5 51
+#define SECONDARY_TDM_RX_6 52
+#define SECONDARY_TDM_TX_6 53
+#define SECONDARY_TDM_RX_7 54
+#define SECONDARY_TDM_TX_7 55
+#define TERTIARY_TDM_RX_0 56
+#define TERTIARY_TDM_TX_0 57
+#define TERTIARY_TDM_RX_1 58
+#define TERTIARY_TDM_TX_1 59
+#define TERTIARY_TDM_RX_2 60
+#define TERTIARY_TDM_TX_2 61
+#define TERTIARY_TDM_RX_3 62
+#define TERTIARY_TDM_TX_3 63
+#define TERTIARY_TDM_RX_4 64
+#define TERTIARY_TDM_TX_4 65
+#define TERTIARY_TDM_RX_5 66
+#define TERTIARY_TDM_TX_5 67
+#define TERTIARY_TDM_RX_6 68
+#define TERTIARY_TDM_TX_6 69
+#define TERTIARY_TDM_RX_7 70
+#define TERTIARY_TDM_TX_7 71
+#define QUATERNARY_TDM_RX_0 72
+#define QUATERNARY_TDM_TX_0 73
+#define QUATERNARY_TDM_RX_1 74
+#define QUATERNARY_TDM_TX_1 75
+#define QUATERNARY_TDM_RX_2 76
+#define QUATERNARY_TDM_TX_2 77
+#define QUATERNARY_TDM_RX_3 78
+#define QUATERNARY_TDM_TX_3 79
+#define QUATERNARY_TDM_RX_4 80
+#define QUATERNARY_TDM_TX_4 81
+#define QUATERNARY_TDM_RX_5 82
+#define QUATERNARY_TDM_TX_5 83
+#define QUATERNARY_TDM_RX_6 84
+#define QUATERNARY_TDM_TX_6 85
+#define QUATERNARY_TDM_RX_7 86
+#define QUATERNARY_TDM_TX_7 87
+#define QUINARY_TDM_RX_0 88
+#define QUINARY_TDM_TX_0 89
+#define QUINARY_TDM_RX_1 90
+#define QUINARY_TDM_TX_1 91
+#define QUINARY_TDM_RX_2 92
+#define QUINARY_TDM_TX_2 93
+#define QUINARY_TDM_RX_3 94
+#define QUINARY_TDM_TX_3 95
+#define QUINARY_TDM_RX_4 96
+#define QUINARY_TDM_TX_4 97
+#define QUINARY_TDM_RX_5 98
+#define QUINARY_TDM_TX_5 99
+#define QUINARY_TDM_RX_6 100
+#define QUINARY_TDM_TX_6 101
+#define QUINARY_TDM_RX_7 102
+#define QUINARY_TDM_TX_7 103
+
+#endif /* __DT_BINDINGS_Q6_AFE_H__ */
+
diff --git a/include/dt-bindings/sound/qcom,q6asm.h b/include/dt-bindings/sound/qcom,q6asm.h
new file mode 100644
index 000000000000..1eb77d87c2e8
--- /dev/null
+++ b/include/dt-bindings/sound/qcom,q6asm.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __DT_BINDINGS_Q6_ASM_H__
+#define __DT_BINDINGS_Q6_ASM_H__
+
+#define MSM_FRONTEND_DAI_MULTIMEDIA1 0
+#define MSM_FRONTEND_DAI_MULTIMEDIA2 1
+#define MSM_FRONTEND_DAI_MULTIMEDIA3 2
+#define MSM_FRONTEND_DAI_MULTIMEDIA4 3
+#define MSM_FRONTEND_DAI_MULTIMEDIA5 4
+#define MSM_FRONTEND_DAI_MULTIMEDIA6 5
+#define MSM_FRONTEND_DAI_MULTIMEDIA7 6
+#define MSM_FRONTEND_DAI_MULTIMEDIA8 7
+#define MSM_FRONTEND_DAI_MULTIMEDIA9 8
+#define MSM_FRONTEND_DAI_MULTIMEDIA10 9
+#define MSM_FRONTEND_DAI_MULTIMEDIA11 10
+#define MSM_FRONTEND_DAI_MULTIMEDIA12 11
+#define MSM_FRONTEND_DAI_MULTIMEDIA13 12
+#define MSM_FRONTEND_DAI_MULTIMEDIA14 13
+#define MSM_FRONTEND_DAI_MULTIMEDIA15 14
+#define MSM_FRONTEND_DAI_MULTIMEDIA16 15
+
+#endif /* __DT_BINDINGS_Q6_ASM_H__ */
diff --git a/include/dt-bindings/sound/rt5640.h b/include/dt-bindings/sound/rt5640.h
new file mode 100644
index 000000000000..154c9b4414f2
--- /dev/null
+++ b/include/dt-bindings/sound/rt5640.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __DT_RT5640_H
+#define __DT_RT5640_H
+
+#define RT5640_DMIC1_DATA_PIN_NONE 0
+#define RT5640_DMIC1_DATA_PIN_IN1P 1
+#define RT5640_DMIC1_DATA_PIN_GPIO3 2
+
+#define RT5640_DMIC2_DATA_PIN_NONE 0
+#define RT5640_DMIC2_DATA_PIN_IN1N 1
+#define RT5640_DMIC2_DATA_PIN_GPIO4 2
+
+#define RT5640_JD_SRC_GPIO1 1
+#define RT5640_JD_SRC_JD1_IN4P 2
+#define RT5640_JD_SRC_JD2_IN4N 3
+#define RT5640_JD_SRC_GPIO2 4
+#define RT5640_JD_SRC_GPIO3 5
+#define RT5640_JD_SRC_GPIO4 6
+
+#define RT5640_OVCD_SF_0P5 0
+#define RT5640_OVCD_SF_0P75 1
+#define RT5640_OVCD_SF_1P0 2
+#define RT5640_OVCD_SF_1P5 3
+
+#endif /* __DT_RT5640_H */
diff --git a/include/keys/asymmetric-subtype.h b/include/keys/asymmetric-subtype.h
index 2480469ce8fb..e0a9c2368872 100644
--- a/include/keys/asymmetric-subtype.h
+++ b/include/keys/asymmetric-subtype.h
@@ -1,6 +1,6 @@
/* Asymmetric public-key cryptography key subtype
*
- * See Documentation/security/asymmetric-keys.txt
+ * See Documentation/crypto/asymmetric-keys.txt
*
* Copyright (C) 2012 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
diff --git a/include/keys/asymmetric-type.h b/include/keys/asymmetric-type.h
index b38240716d41..1cb77cd5135e 100644
--- a/include/keys/asymmetric-type.h
+++ b/include/keys/asymmetric-type.h
@@ -1,6 +1,6 @@
/* Asymmetric Public-key cryptography key type interface
*
- * See Documentation/security/asymmetric-keys.txt
+ * See Documentation/crypto/asymmetric-keys.txt
*
* Copyright (C) 2012 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
diff --git a/include/kvm/arm_psci.h b/include/kvm/arm_psci.h
index e518e4e3dfb5..4b1548129fa2 100644
--- a/include/kvm/arm_psci.h
+++ b/include/kvm/arm_psci.h
@@ -37,10 +37,15 @@ static inline int kvm_psci_version(struct kvm_vcpu *vcpu, struct kvm *kvm)
* Our PSCI implementation stays the same across versions from
* v0.2 onward, only adding the few mandatory functions (such
* as FEATURES with 1.0) that are required by newer
- * revisions. It is thus safe to return the latest.
+ * revisions. It is thus safe to return the latest, unless
+ * userspace has instructed us otherwise.
*/
- if (test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features))
+ if (test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features)) {
+ if (vcpu->kvm->arch.psci_version)
+ return vcpu->kvm->arch.psci_version;
+
return KVM_ARM_PSCI_LATEST;
+ }
return KVM_ARM_PSCI_0_1;
}
@@ -48,4 +53,11 @@ static inline int kvm_psci_version(struct kvm_vcpu *vcpu, struct kvm *kvm)
int kvm_hvc_call_handler(struct kvm_vcpu *vcpu);
+struct kvm_one_reg;
+
+int kvm_arm_get_fw_num_regs(struct kvm_vcpu *vcpu);
+int kvm_arm_copy_fw_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices);
+int kvm_arm_get_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
+int kvm_arm_set_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
+
#endif /* __KVM_ARM_PSCI_H__ */
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index 24f03941ada8..cfdd2484cc42 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -28,7 +28,7 @@
#include <linux/irqchip/arm-gic-v4.h>
-#define VGIC_V3_MAX_CPUS 255
+#define VGIC_V3_MAX_CPUS 512
#define VGIC_V2_MAX_CPUS 8
#define VGIC_NR_IRQS_LEGACY 256
#define VGIC_NR_SGIS 16
@@ -131,6 +131,7 @@ struct vgic_irq {
u32 mpidr; /* GICv3 target VCPU */
};
u8 source; /* GICv2 SGIs only */
+ u8 active_source; /* GICv2 SGIs only */
u8 priority;
enum vgic_irq_config config; /* Level or edge */
@@ -200,6 +201,14 @@ struct vgic_its {
struct vgic_state_iter;
+struct vgic_redist_region {
+ u32 index;
+ gpa_t base;
+ u32 count; /* number of redistributors or 0 if single region */
+ u32 free_index; /* index of the next free redistributor */
+ struct list_head list;
+};
+
struct vgic_dist {
bool in_kernel;
bool ready;
@@ -219,10 +228,7 @@ struct vgic_dist {
/* either a GICv2 CPU interface */
gpa_t vgic_cpu_base;
/* or a number of GICv3 redistributor regions */
- struct {
- gpa_t vgic_redist_base;
- gpa_t vgic_redist_free_offset;
- };
+ struct list_head rd_regions;
};
/* distributor enabled */
@@ -310,6 +316,7 @@ struct vgic_cpu {
*/
struct vgic_io_device rd_iodev;
struct vgic_io_device sgi_iodev;
+ struct vgic_redist_region *rdreg;
/* Contains the attributes and gpa of the LPI pending tables. */
u64 pendbaser;
@@ -331,7 +338,6 @@ void kvm_vgic_early_init(struct kvm *kvm);
int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu);
int kvm_vgic_create(struct kvm *kvm, u32 type);
void kvm_vgic_destroy(struct kvm *kvm);
-void kvm_vgic_vcpu_early_init(struct kvm_vcpu *vcpu);
void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu);
int kvm_vgic_map_resources(struct kvm *kvm);
int kvm_vgic_hyp_init(void);
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 15bfb15c2fa5..4b35a66383f9 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -506,7 +506,8 @@ extern bool osc_pc_lpi_support_confirmed;
#define OSC_PCI_EXPRESS_PME_CONTROL 0x00000004
#define OSC_PCI_EXPRESS_AER_CONTROL 0x00000008
#define OSC_PCI_EXPRESS_CAPABILITY_CONTROL 0x00000010
-#define OSC_PCI_CONTROL_MASKS 0x0000001f
+#define OSC_PCI_EXPRESS_LTR_CONTROL 0x00000020
+#define OSC_PCI_CONTROL_MASKS 0x0000003f
#define ACPI_GSB_ACCESS_ATTRIB_QUICK 0x00000002
#define ACPI_GSB_ACCESS_ATTRIB_SEND_RCV 0x00000004
@@ -578,6 +579,7 @@ int acpi_match_platform_list(const struct acpi_platform_list *plat);
extern void acpi_early_init(void);
extern void acpi_subsystem_init(void);
+extern void arch_post_acpi_subsys_init(void);
extern int acpi_nvs_register(__u64 start, __u64 size);
@@ -899,7 +901,7 @@ static inline int acpi_subsys_runtime_suspend(struct device *dev) { return 0; }
static inline int acpi_subsys_runtime_resume(struct device *dev) { return 0; }
static inline int acpi_dev_pm_attach(struct device *dev, bool power_on)
{
- return -ENODEV;
+ return 0;
}
#endif
@@ -1297,4 +1299,23 @@ static inline int lpit_read_residency_count_address(u64 *address)
}
#endif
+#ifdef CONFIG_ACPI_PPTT
+int find_acpi_cpu_topology(unsigned int cpu, int level);
+int find_acpi_cpu_topology_package(unsigned int cpu);
+int find_acpi_cpu_cache_topology(unsigned int cpu, int level);
+#else
+static inline int find_acpi_cpu_topology(unsigned int cpu, int level)
+{
+ return -EINVAL;
+}
+static inline int find_acpi_cpu_topology_package(unsigned int cpu)
+{
+ return -EINVAL;
+}
+static inline int find_acpi_cpu_cache_topology(unsigned int cpu, int level)
+{
+ return -EINVAL;
+}
+#endif
+
#endif /*_LINUX_ACPI_H*/
diff --git a/include/linux/aer.h b/include/linux/aer.h
index 8f87bbeceef4..514bffa11dbb 100644
--- a/include/linux/aer.h
+++ b/include/linux/aer.h
@@ -14,6 +14,7 @@
#define AER_NONFATAL 0
#define AER_FATAL 1
#define AER_CORRECTABLE 2
+#define DPC_FATAL 3
struct pci_dev;
diff --git a/include/linux/aio.h b/include/linux/aio.h
index 9d8aabecfe2d..b83e68dd006f 100644
--- a/include/linux/aio.h
+++ b/include/linux/aio.h
@@ -8,8 +8,6 @@ struct kioctx;
struct kiocb;
struct mm_struct;
-#define KIOCB_KEY 0
-
typedef int (kiocb_cancel_fn)(struct kiocb *);
/* prototypes */
diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h
index a031897fca76..ca1d2cc2cdfa 100644
--- a/include/linux/arm-smccc.h
+++ b/include/linux/arm-smccc.h
@@ -80,6 +80,11 @@
ARM_SMCCC_SMC_32, \
0, 0x8000)
+#define ARM_SMCCC_ARCH_WORKAROUND_2 \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_32, \
+ 0, 0x7fff)
+
#ifndef __ASSEMBLY__
#include <linux/linkage.h>
@@ -291,5 +296,10 @@ asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1,
*/
#define arm_smccc_1_1_hvc(...) __arm_smccc_1_1(SMCCC_HVC_INST, __VA_ARGS__)
+/* Return codes defined in ARM DEN 0070A */
+#define SMCCC_RET_SUCCESS 0
+#define SMCCC_RET_NOT_SUPPORTED -1
+#define SMCCC_RET_NOT_REQUIRED -2
+
#endif /*__ASSEMBLY__*/
#endif /*__LINUX_ARM_SMCCC_H*/
diff --git a/include/linux/assoc_array.h b/include/linux/assoc_array.h
index a89df3be1686..65e3832f96b2 100644
--- a/include/linux/assoc_array.h
+++ b/include/linux/assoc_array.h
@@ -1,6 +1,6 @@
/* Generic associative array implementation.
*
- * See Documentation/assoc_array.txt for information.
+ * See Documentation/core-api/assoc_array.rst for information.
*
* Copyright (C) 2013 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
diff --git a/include/linux/assoc_array_priv.h b/include/linux/assoc_array_priv.h
index 711275e6681c..a00a06550c10 100644
--- a/include/linux/assoc_array_priv.h
+++ b/include/linux/assoc_array_priv.h
@@ -1,6 +1,6 @@
/* Private definitions for the generic associative array implementation.
*
- * See Documentation/assoc_array.txt for information.
+ * See Documentation/core-api/assoc_array.rst for information.
*
* Copyright (C) 2013 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
diff --git a/include/linux/atalk.h b/include/linux/atalk.h
index 40373920ea58..23f805562f4e 100644
--- a/include/linux/atalk.h
+++ b/include/linux/atalk.h
@@ -145,7 +145,12 @@ extern rwlock_t atalk_interfaces_lock;
extern struct atalk_route atrtr_default;
-extern const struct file_operations atalk_seq_arp_fops;
+struct aarp_iter_state {
+ int bucket;
+ struct aarp_entry **table;
+};
+
+extern const struct seq_operations aarp_seq_ops;
extern int sysctl_aarp_expiry_time;
extern int sysctl_aarp_tick_time;
diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
index 0c27515d2cf6..8124815eb121 100644
--- a/include/linux/atmdev.h
+++ b/include/linux/atmdev.h
@@ -214,6 +214,7 @@ struct atmphy_ops {
struct atm_skb_data {
struct atm_vcc *vcc; /* ATM VCC */
unsigned long atm_options; /* ATM layer options */
+ unsigned int acct_truesize; /* truesize accounted to vcc */
};
#define VCC_HTABLE_SIZE 32
@@ -241,6 +242,20 @@ void vcc_insert_socket(struct sock *sk);
void atm_dev_release_vccs(struct atm_dev *dev);
+static inline void atm_account_tx(struct atm_vcc *vcc, struct sk_buff *skb)
+{
+ /*
+ * Because ATM skbs may not belong to a sock (and we don't
+ * necessarily want to), skb->truesize may be adjusted,
+ * escaping the hack in pskb_expand_head() which avoids
+ * doing so for some cases. So stash the value of truesize
+ * at the time we accounted it, and atm_pop_raw() can use
+ * that value later, in case it changes.
+ */
+ refcount_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
+ ATM_SKB(skb)->acct_truesize = skb->truesize;
+ ATM_SKB(skb)->atm_options = vcc->atm_options;
+}
static inline void atm_force_charge(struct atm_vcc *vcc,int truesize)
{
diff --git a/include/linux/atomic.h b/include/linux/atomic.h
index 8b276fd9a127..01ce3997cb42 100644
--- a/include/linux/atomic.h
+++ b/include/linux/atomic.h
@@ -654,6 +654,7 @@ static inline int atomic_dec_if_positive(atomic_t *v)
}
#endif
+#define atomic_cond_read_relaxed(v, c) smp_cond_load_relaxed(&(v)->counter, (c))
#define atomic_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c))
#ifdef CONFIG_GENERIC_ATOMIC64
@@ -1075,6 +1076,7 @@ static inline long long atomic64_fetch_andnot_release(long long i, atomic64_t *v
}
#endif
+#define atomic64_cond_read_relaxed(v, c) smp_cond_load_relaxed(&(v)->counter, (c))
#define atomic64_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c))
#include <asm-generic/atomic-long.h>
diff --git a/include/linux/audit.h b/include/linux/audit.h
index 75d5b031e802..69c78477590b 100644
--- a/include/linux/audit.h
+++ b/include/linux/audit.h
@@ -232,12 +232,24 @@ extern void __audit_file(const struct file *);
extern void __audit_inode_child(struct inode *parent,
const struct dentry *dentry,
const unsigned char type);
-extern void __audit_seccomp(unsigned long syscall, long signr, int code);
+extern void audit_seccomp(unsigned long syscall, long signr, int code);
+extern void audit_seccomp_actions_logged(const char *names,
+ const char *old_names, int res);
extern void __audit_ptrace(struct task_struct *t);
+static inline void audit_set_context(struct task_struct *task, struct audit_context *ctx)
+{
+ task->audit_context = ctx;
+}
+
+static inline struct audit_context *audit_context(void)
+{
+ return current->audit_context;
+}
+
static inline bool audit_dummy_context(void)
{
- void *p = current->audit_context;
+ void *p = audit_context();
return !p || *(int *)p;
}
static inline void audit_free(struct task_struct *task)
@@ -249,12 +261,12 @@ static inline void audit_syscall_entry(int major, unsigned long a0,
unsigned long a1, unsigned long a2,
unsigned long a3)
{
- if (unlikely(current->audit_context))
+ if (unlikely(audit_context()))
__audit_syscall_entry(major, a0, a1, a2, a3);
}
static inline void audit_syscall_exit(void *pt_regs)
{
- if (unlikely(current->audit_context)) {
+ if (unlikely(audit_context())) {
int success = is_syscall_success(pt_regs);
long return_code = regs_return_value(pt_regs);
@@ -302,12 +314,6 @@ static inline void audit_inode_child(struct inode *parent,
}
void audit_core_dumps(long signr);
-static inline void audit_seccomp(unsigned long syscall, long signr, int code)
-{
- if (audit_enabled && unlikely(!audit_dummy_context()))
- __audit_seccomp(syscall, signr, code);
-}
-
static inline void audit_ptrace(struct task_struct *t)
{
if (unlikely(!audit_dummy_context()))
@@ -468,6 +474,12 @@ static inline bool audit_dummy_context(void)
{
return true;
}
+static inline void audit_set_context(struct task_struct *task, struct audit_context *ctx)
+{ }
+static inline struct audit_context *audit_context(void)
+{
+ return NULL;
+}
static inline struct filename *audit_reusename(const __user char *name)
{
return NULL;
@@ -498,10 +510,11 @@ static inline void audit_inode_child(struct inode *parent,
{ }
static inline void audit_core_dumps(long signr)
{ }
-static inline void __audit_seccomp(unsigned long syscall, long signr, int code)
-{ }
static inline void audit_seccomp(unsigned long syscall, long signr, int code)
{ }
+static inline void audit_seccomp_actions_logged(const char *names,
+ const char *old_names, int res)
+{ }
static inline int auditsc_get_stamp(struct audit_context *ctx,
struct timespec64 *t, unsigned int *serial)
{
@@ -513,7 +526,7 @@ static inline kuid_t audit_get_loginuid(struct task_struct *tsk)
}
static inline unsigned int audit_get_sessionid(struct task_struct *tsk)
{
- return -1;
+ return AUDIT_SID_UNSET;
}
static inline void audit_ipc_obj(struct kern_ipc_perm *ipcp)
{ }
diff --git a/include/linux/avf/virtchnl.h b/include/linux/avf/virtchnl.h
index b0a7f315bfbe..212b3822d180 100644
--- a/include/linux/avf/virtchnl.h
+++ b/include/linux/avf/virtchnl.h
@@ -485,7 +485,7 @@ VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_key);
struct virtchnl_rss_lut {
u16 vsi_id;
u16 lut_entries;
- u8 lut[1]; /* RSS lookup table*/
+ u8 lut[1]; /* RSS lookup table */
};
VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_lut);
@@ -819,7 +819,7 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
return VIRTCHNL_ERR_PARAM;
}
/* few more checks */
- if ((valid_len != msglen) || (err_msg_format))
+ if (err_msg_format || valid_len != msglen)
return VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH;
return 0;
diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h
index bfe86b54f6c1..0bd432a4d7bd 100644
--- a/include/linux/backing-dev-defs.h
+++ b/include/linux/backing-dev-defs.h
@@ -223,6 +223,11 @@ static inline void set_bdi_congested(struct backing_dev_info *bdi, int sync)
set_wb_congested(bdi->wb.congested, sync);
}
+struct wb_lock_cookie {
+ bool locked;
+ unsigned long flags;
+};
+
#ifdef CONFIG_CGROUP_WRITEBACK
/**
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index f6be4b0b6c18..72ca0f3d39f3 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -347,7 +347,7 @@ static inline struct bdi_writeback *inode_to_wb(const struct inode *inode)
/**
* unlocked_inode_to_wb_begin - begin unlocked inode wb access transaction
* @inode: target inode
- * @lockedp: temp bool output param, to be passed to the end function
+ * @cookie: output param, to be passed to the end function
*
* The caller wants to access the wb associated with @inode but isn't
* holding inode->i_lock, the i_pages lock or wb->list_lock. This
@@ -355,12 +355,12 @@ static inline struct bdi_writeback *inode_to_wb(const struct inode *inode)
* association doesn't change until the transaction is finished with
* unlocked_inode_to_wb_end().
*
- * The caller must call unlocked_inode_to_wb_end() with *@lockdep
- * afterwards and can't sleep during transaction. IRQ may or may not be
- * disabled on return.
+ * The caller must call unlocked_inode_to_wb_end() with *@cookie afterwards and
+ * can't sleep during the transaction. IRQs may or may not be disabled on
+ * return.
*/
static inline struct bdi_writeback *
-unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp)
+unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie)
{
rcu_read_lock();
@@ -368,10 +368,10 @@ unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp)
* Paired with store_release in inode_switch_wb_work_fn() and
* ensures that we see the new wb if we see cleared I_WB_SWITCH.
*/
- *lockedp = smp_load_acquire(&inode->i_state) & I_WB_SWITCH;
+ cookie->locked = smp_load_acquire(&inode->i_state) & I_WB_SWITCH;
- if (unlikely(*lockedp))
- xa_lock_irq(&inode->i_mapping->i_pages);
+ if (unlikely(cookie->locked))
+ xa_lock_irqsave(&inode->i_mapping->i_pages, cookie->flags);
/*
* Protected by either !I_WB_SWITCH + rcu_read_lock() or the i_pages
@@ -383,12 +383,13 @@ unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp)
/**
* unlocked_inode_to_wb_end - end inode wb access transaction
* @inode: target inode
- * @locked: *@lockedp from unlocked_inode_to_wb_begin()
+ * @cookie: @cookie from unlocked_inode_to_wb_begin()
*/
-static inline void unlocked_inode_to_wb_end(struct inode *inode, bool locked)
+static inline void unlocked_inode_to_wb_end(struct inode *inode,
+ struct wb_lock_cookie *cookie)
{
- if (unlikely(locked))
- xa_unlock_irq(&inode->i_mapping->i_pages);
+ if (unlikely(cookie->locked))
+ xa_unlock_irqrestore(&inode->i_mapping->i_pages, cookie->flags);
rcu_read_unlock();
}
@@ -435,12 +436,13 @@ static inline struct bdi_writeback *inode_to_wb(struct inode *inode)
}
static inline struct bdi_writeback *
-unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp)
+unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie)
{
return inode_to_wb(inode);
}
-static inline void unlocked_inode_to_wb_end(struct inode *inode, bool locked)
+static inline void unlocked_inode_to_wb_end(struct inode *inode,
+ struct wb_lock_cookie *cookie)
{
}
diff --git a/include/linux/backlight.h b/include/linux/backlight.h
index 2baab6f3861d..7fbf0539e14a 100644
--- a/include/linux/backlight.h
+++ b/include/linux/backlight.h
@@ -84,10 +84,6 @@ struct backlight_properties {
#define BL_CORE_SUSPENDED (1 << 0) /* backlight is suspended */
#define BL_CORE_FBBLANK (1 << 1) /* backlight is under an fb blank event */
-#define BL_CORE_DRIVER4 (1 << 28) /* reserved for driver specific use */
-#define BL_CORE_DRIVER3 (1 << 29) /* reserved for driver specific use */
-#define BL_CORE_DRIVER2 (1 << 30) /* reserved for driver specific use */
-#define BL_CORE_DRIVER1 (1 << 31) /* reserved for driver specific use */
};
diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
index 4955e0863b83..c05f24fac4f6 100644
--- a/include/linux/binfmts.h
+++ b/include/linux/binfmts.h
@@ -150,5 +150,6 @@ extern int do_execveat(int, struct filename *,
const char __user * const __user *,
const char __user * const __user *,
int);
+int do_execve_file(struct file *file, void *__argv, void *__envp);
#endif /* _LINUX_BINFMTS_H */
diff --git a/include/linux/bio.h b/include/linux/bio.h
index ce547a25e8ae..f08f5fe7bd08 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -67,8 +67,12 @@
#define bio_multiple_segments(bio) \
((bio)->bi_iter.bi_size != bio_iovec(bio).bv_len)
-#define bio_sectors(bio) ((bio)->bi_iter.bi_size >> 9)
-#define bio_end_sector(bio) ((bio)->bi_iter.bi_sector + bio_sectors((bio)))
+
+#define bvec_iter_sectors(iter) ((iter).bi_size >> 9)
+#define bvec_iter_end_sector(iter) ((iter).bi_sector + bvec_iter_sectors((iter)))
+
+#define bio_sectors(bio) bvec_iter_sectors((bio)->bi_iter)
+#define bio_end_sector(bio) bvec_iter_end_sector((bio)->bi_iter)
/*
* Return the data direction, READ or WRITE.
@@ -123,6 +127,11 @@ static inline void *bio_data(struct bio *bio)
return NULL;
}
+static inline bool bio_full(struct bio *bio)
+{
+ return bio->bi_vcnt >= bio->bi_max_vecs;
+}
+
/*
* will die
*/
@@ -406,13 +415,14 @@ static inline struct bio *bio_next_split(struct bio *bio, int sectors,
return bio_split(bio, sectors, gfp, bs);
}
-extern struct bio_set *bioset_create(unsigned int, unsigned int, int flags);
enum {
BIOSET_NEED_BVECS = BIT(0),
BIOSET_NEED_RESCUER = BIT(1),
};
-extern void bioset_free(struct bio_set *);
-extern mempool_t *biovec_create_pool(int pool_entries);
+extern int bioset_init(struct bio_set *, unsigned int, unsigned int, int flags);
+extern void bioset_exit(struct bio_set *);
+extern int biovec_init_pool(mempool_t *pool, int pool_entries);
+extern int bioset_init_from_src(struct bio_set *bs, struct bio_set *src);
extern struct bio *bio_alloc_bioset(gfp_t, unsigned int, struct bio_set *);
extern void bio_put(struct bio *);
@@ -421,11 +431,11 @@ extern void __bio_clone_fast(struct bio *, struct bio *);
extern struct bio *bio_clone_fast(struct bio *, gfp_t, struct bio_set *);
extern struct bio *bio_clone_bioset(struct bio *, gfp_t, struct bio_set *bs);
-extern struct bio_set *fs_bio_set;
+extern struct bio_set fs_bio_set;
static inline struct bio *bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs)
{
- return bio_alloc_bioset(gfp_mask, nr_iovecs, fs_bio_set);
+ return bio_alloc_bioset(gfp_mask, nr_iovecs, &fs_bio_set);
}
static inline struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned int nr_iovecs)
@@ -470,6 +480,10 @@ void bio_chain(struct bio *, struct bio *);
extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int);
extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *,
unsigned int, unsigned int);
+bool __bio_try_merge_page(struct bio *bio, struct page *page,
+ unsigned int len, unsigned int off);
+void __bio_add_page(struct bio *bio, struct page *page,
+ unsigned int len, unsigned int off);
int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter);
struct rq_map_data;
extern struct bio *bio_map_user_iov(struct request_queue *,
@@ -499,7 +513,10 @@ static inline void bio_flush_dcache_pages(struct bio *bi)
}
#endif
+extern void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter,
+ struct bio *src, struct bvec_iter *src_iter);
extern void bio_copy_data(struct bio *dst, struct bio *src);
+extern void bio_list_copy_data(struct bio *dst, struct bio *src);
extern void bio_free_pages(struct bio *bio);
extern struct bio *bio_copy_user_iov(struct request_queue *,
@@ -507,7 +524,13 @@ extern struct bio *bio_copy_user_iov(struct request_queue *,
struct iov_iter *,
gfp_t);
extern int bio_uncopy_user(struct bio *);
-void zero_fill_bio(struct bio *bio);
+void zero_fill_bio_iter(struct bio *bio, struct bvec_iter iter);
+
+static inline void zero_fill_bio(struct bio *bio)
+{
+ zero_fill_bio_iter(bio, bio->bi_iter);
+}
+
extern struct bio_vec *bvec_alloc(gfp_t, int, unsigned long *, mempool_t *);
extern void bvec_free(mempool_t *, struct bio_vec *, unsigned int);
extern unsigned int bvec_nr_vecs(unsigned short idx);
@@ -722,11 +745,11 @@ struct bio_set {
struct kmem_cache *bio_slab;
unsigned int front_pad;
- mempool_t *bio_pool;
- mempool_t *bvec_pool;
+ mempool_t bio_pool;
+ mempool_t bvec_pool;
#if defined(CONFIG_BLK_DEV_INTEGRITY)
- mempool_t *bio_integrity_pool;
- mempool_t *bvec_integrity_pool;
+ mempool_t bio_integrity_pool;
+ mempool_t bvec_integrity_pool;
#endif
/*
@@ -745,6 +768,11 @@ struct biovec_slab {
struct kmem_cache *slab;
};
+static inline bool bioset_initialized(struct bio_set *bs)
+{
+ return bs->bio_slab != NULL;
+}
+
/*
* a small number of entries is fine, not going to be performance critical.
* basically we just need to survive
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index e3986f4b3461..e3147eb74222 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -9,6 +9,9 @@
struct blk_mq_tags;
struct blk_flush_queue;
+/**
+ * struct blk_mq_hw_ctx - State for a hardware queue facing the hardware block device
+ */
struct blk_mq_hw_ctx {
struct {
spinlock_t lock;
@@ -256,7 +259,8 @@ void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
void blk_mq_kick_requeue_list(struct request_queue *q);
void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs);
void blk_mq_complete_request(struct request *rq);
-
+bool blk_mq_bio_list_merge(struct request_queue *q, struct list_head *list,
+ struct bio *bio);
bool blk_mq_queue_stopped(struct request_queue *q);
void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx);
void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx);
@@ -277,8 +281,6 @@ void blk_freeze_queue_start(struct request_queue *q);
void blk_mq_freeze_queue_wait(struct request_queue *q);
int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
unsigned long timeout);
-int blk_mq_tagset_iter(struct blk_mq_tag_set *set, void *data,
- int (reinit_request)(void *, struct request *));
int blk_mq_map_queues(struct blk_mq_tag_set *set);
void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues);
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 17b18b91ebac..3c4f390aea4b 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -8,6 +8,7 @@
#include <linux/types.h>
#include <linux/bvec.h>
+#include <linux/ktime.h>
struct bio_set;
struct bio;
@@ -90,10 +91,52 @@ static inline bool blk_path_error(blk_status_t error)
return true;
}
-struct blk_issue_stat {
- u64 stat;
+/*
+ * From most significant bit:
+ * 1 bit: reserved for other usage, see below
+ * 12 bits: original size of bio
+ * 51 bits: issue time of bio
+ */
+#define BIO_ISSUE_RES_BITS 1
+#define BIO_ISSUE_SIZE_BITS 12
+#define BIO_ISSUE_RES_SHIFT (64 - BIO_ISSUE_RES_BITS)
+#define BIO_ISSUE_SIZE_SHIFT (BIO_ISSUE_RES_SHIFT - BIO_ISSUE_SIZE_BITS)
+#define BIO_ISSUE_TIME_MASK ((1ULL << BIO_ISSUE_SIZE_SHIFT) - 1)
+#define BIO_ISSUE_SIZE_MASK \
+ (((1ULL << BIO_ISSUE_SIZE_BITS) - 1) << BIO_ISSUE_SIZE_SHIFT)
+#define BIO_ISSUE_RES_MASK (~((1ULL << BIO_ISSUE_RES_SHIFT) - 1))
+
+/* Reserved bit for blk-throtl */
+#define BIO_ISSUE_THROTL_SKIP_LATENCY (1ULL << 63)
+
+struct bio_issue {
+ u64 value;
};
+static inline u64 __bio_issue_time(u64 time)
+{
+ return time & BIO_ISSUE_TIME_MASK;
+}
+
+static inline u64 bio_issue_time(struct bio_issue *issue)
+{
+ return __bio_issue_time(issue->value);
+}
+
+static inline sector_t bio_issue_size(struct bio_issue *issue)
+{
+ return ((issue->value & BIO_ISSUE_SIZE_MASK) >> BIO_ISSUE_SIZE_SHIFT);
+}
+
+static inline void bio_issue_init(struct bio_issue *issue,
+ sector_t size)
+{
+ size &= (1ULL << BIO_ISSUE_SIZE_BITS) - 1;
+ issue->value = ((issue->value & BIO_ISSUE_RES_MASK) |
+ (ktime_get_ns() & BIO_ISSUE_TIME_MASK) |
+ ((u64)size << BIO_ISSUE_SIZE_SHIFT));
+}
+
/*
* main unit of I/O for the block layer and lower layers (ie drivers and
* stacking drivers)
@@ -138,7 +181,7 @@ struct bio {
struct cgroup_subsys_state *bi_css;
#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
void *bi_cg_private;
- struct blk_issue_stat bi_issue_stat;
+ struct bio_issue bi_issue;
#endif
#endif
union {
@@ -186,6 +229,8 @@ struct bio {
* throttling rules. Don't do it again. */
#define BIO_TRACE_COMPLETION 10 /* bio_endio() should trace the final completion
* of this bio. */
+#define BIO_QUEUE_ENTERED 11 /* can use blk_queue_enter_live() */
+
/* See BVEC_POOL_OFFSET below before adding new flags */
/*
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 9af3e0f430bc..9154570edf29 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -125,16 +125,25 @@ typedef __u32 __bitwise req_flags_t;
#define RQF_SPECIAL_PAYLOAD ((__force req_flags_t)(1 << 18))
/* The per-zone write lock is held for this request */
#define RQF_ZONE_WRITE_LOCKED ((__force req_flags_t)(1 << 19))
-/* timeout is expired */
-#define RQF_MQ_TIMEOUT_EXPIRED ((__force req_flags_t)(1 << 20))
/* already slept for hybrid poll */
-#define RQF_MQ_POLL_SLEPT ((__force req_flags_t)(1 << 21))
+#define RQF_MQ_POLL_SLEPT ((__force req_flags_t)(1 << 20))
+/* ->timeout has been called, don't expire again */
+#define RQF_TIMED_OUT ((__force req_flags_t)(1 << 21))
/* flags that prevent us from merging requests: */
#define RQF_NOMERGE_FLAGS \
(RQF_STARTED | RQF_SOFTBARRIER | RQF_FLUSH_SEQ | RQF_SPECIAL_PAYLOAD)
/*
+ * Request state for blk-mq.
+ */
+enum mq_rq_state {
+ MQ_RQ_IDLE = 0,
+ MQ_RQ_IN_FLIGHT = 1,
+ MQ_RQ_COMPLETE = 2,
+};
+
+/*
* Try to put the fields that are referenced together in the same cacheline.
*
* If you modify this structure, make sure to update blk_rq_init() and
@@ -205,9 +214,20 @@ struct request {
struct gendisk *rq_disk;
struct hd_struct *part;
- unsigned long start_time;
- struct blk_issue_stat issue_stat;
- /* Number of scatter-gather DMA addr+len pairs after
+ /* Time that I/O was submitted to the kernel. */
+ u64 start_time_ns;
+ /* Time that I/O was submitted to the device. */
+ u64 io_start_time_ns;
+
+#ifdef CONFIG_BLK_WBT
+ unsigned short wbt_flags;
+#endif
+#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
+ unsigned short throtl_size;
+#endif
+
+ /*
+ * Number of scatter-gather DMA addr+len pairs after
* physical address coalescing is performed.
*/
unsigned short nr_phys_segments;
@@ -219,32 +239,14 @@ struct request {
unsigned short write_hint;
unsigned short ioprio;
- unsigned int timeout;
-
void *special; /* opaque pointer available for LLD use */
unsigned int extra_len; /* length of alignment and padding */
- /*
- * On blk-mq, the lower bits of ->gstate (generation number and
- * state) carry the MQ_RQ_* state value and the upper bits the
- * generation number which is monotonically incremented and used to
- * distinguish the reuse instances.
- *
- * ->gstate_seq allows updates to ->gstate and other fields
- * (currently ->deadline) during request start to be read
- * atomically from the timeout path, so that it can operate on a
- * coherent set of information.
- */
- seqcount_t gstate_seq;
- u64 gstate;
+ enum mq_rq_state state;
+ refcount_t ref;
- /*
- * ->aborted_gstate is used by the timeout to claim a specific
- * recycle instance of this request. See blk_mq_timeout_work().
- */
- struct u64_stats_sync aborted_gstate_sync;
- u64 aborted_gstate;
+ unsigned int timeout;
/* access through blk_rq_set_deadline, blk_rq_deadline */
unsigned long __deadline;
@@ -267,8 +269,6 @@ struct request {
#ifdef CONFIG_BLK_CGROUP
struct request_list *rl; /* rl this rq is alloced from */
- unsigned long long start_time_ns;
- unsigned long long io_start_time_ns; /* when passed to hardware */
#endif
};
@@ -328,9 +328,8 @@ typedef int (init_rq_fn)(struct request_queue *, struct request *, gfp_t);
typedef void (exit_rq_fn)(struct request_queue *, struct request *);
enum blk_eh_timer_return {
- BLK_EH_NOT_HANDLED,
- BLK_EH_HANDLED,
- BLK_EH_RESET_TIMER,
+ BLK_EH_DONE, /* drivers has completed the command */
+ BLK_EH_RESET_TIMER, /* reset timer and try again */
};
typedef enum blk_eh_timer_return (rq_timed_out_fn)(struct request *);
@@ -563,7 +562,6 @@ struct request_queue {
unsigned int dma_alignment;
struct blk_queue_tag *queue_tags;
- struct list_head tag_busy_list;
unsigned int nr_sorted;
unsigned int in_flight[2];
@@ -605,6 +603,11 @@ struct request_queue {
* initialized by the low level device driver (e.g. scsi/sd.c).
* Stacking drivers (device mappers) may or may not initialize
* these fields.
+ *
+ * Reads of this information must be protected with blk_queue_enter() /
+ * blk_queue_exit(). Modifying this information is only allowed while
+ * no requests are being processed. See also blk_mq_freeze_queue() and
+ * blk_mq_unfreeze_queue().
*/
unsigned int nr_zones;
unsigned long *seq_zones_bitmap;
@@ -650,7 +653,7 @@ struct request_queue {
struct blk_mq_tag_set *tag_set;
struct list_head tag_set_list;
- struct bio_set *bio_split;
+ struct bio_set bio_split;
#ifdef CONFIG_BLK_DEBUG_FS
struct dentry *debugfs_dir;
@@ -737,6 +740,7 @@ bool blk_queue_flag_test_and_clear(unsigned int flag, struct request_queue *q);
#define blk_queue_quiesced(q) test_bit(QUEUE_FLAG_QUIESCED, &(q)->queue_flags)
#define blk_queue_preempt_only(q) \
test_bit(QUEUE_FLAG_PREEMPT_ONLY, &(q)->queue_flags)
+#define blk_queue_fua(q) test_bit(QUEUE_FLAG_FUA, &(q)->queue_flags)
extern int blk_set_preempt_only(struct request_queue *q);
extern void blk_clear_preempt_only(struct request_queue *q);
@@ -961,11 +965,8 @@ extern void blk_rq_init(struct request_queue *q, struct request *rq);
extern void blk_init_request_from_bio(struct request *req, struct bio *bio);
extern void blk_put_request(struct request *);
extern void __blk_put_request(struct request_queue *, struct request *);
-extern struct request *blk_get_request_flags(struct request_queue *,
- unsigned int op,
- blk_mq_req_flags_t flags);
extern struct request *blk_get_request(struct request_queue *, unsigned int op,
- gfp_t gfp_mask);
+ blk_mq_req_flags_t flags);
extern void blk_requeue_request(struct request_queue *, struct request *);
extern int blk_lld_busy(struct request_queue *q);
extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
@@ -1373,7 +1374,6 @@ extern void blk_queue_end_tag(struct request_queue *, struct request *);
extern int blk_queue_init_tags(struct request_queue *, int, struct blk_queue_tag *, int);
extern void blk_queue_free_tags(struct request_queue *);
extern int blk_queue_resize_tags(struct request_queue *, int);
-extern void blk_queue_invalidate_tags(struct request_queue *);
extern struct blk_queue_tag *blk_init_tags(int, int);
extern void blk_free_tags(struct blk_queue_tag *);
@@ -1782,48 +1782,6 @@ int kblockd_schedule_work(struct work_struct *work);
int kblockd_schedule_work_on(int cpu, struct work_struct *work);
int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay);
-#ifdef CONFIG_BLK_CGROUP
-/*
- * This should not be using sched_clock(). A real patch is in progress
- * to fix this up, until that is in place we need to disable preemption
- * around sched_clock() in this function and set_io_start_time_ns().
- */
-static inline void set_start_time_ns(struct request *req)
-{
- preempt_disable();
- req->start_time_ns = sched_clock();
- preempt_enable();
-}
-
-static inline void set_io_start_time_ns(struct request *req)
-{
- preempt_disable();
- req->io_start_time_ns = sched_clock();
- preempt_enable();
-}
-
-static inline uint64_t rq_start_time_ns(struct request *req)
-{
- return req->start_time_ns;
-}
-
-static inline uint64_t rq_io_start_time_ns(struct request *req)
-{
- return req->io_start_time_ns;
-}
-#else
-static inline void set_start_time_ns(struct request *req) {}
-static inline void set_io_start_time_ns(struct request *req) {}
-static inline uint64_t rq_start_time_ns(struct request *req)
-{
- return 0;
-}
-static inline uint64_t rq_io_start_time_ns(struct request *req)
-{
- return 0;
-}
-#endif
-
#define MODULE_ALIAS_BLOCKDEV(major,minor) \
MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor))
#define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \
diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h
index 30d15e64b993..975fb4cf1bb7 100644
--- a/include/linux/bpf-cgroup.h
+++ b/include/linux/bpf-cgroup.h
@@ -66,7 +66,8 @@ int __cgroup_bpf_run_filter_sk(struct sock *sk,
int __cgroup_bpf_run_filter_sock_addr(struct sock *sk,
struct sockaddr *uaddr,
- enum bpf_attach_type type);
+ enum bpf_attach_type type,
+ void *t_ctx);
int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
struct bpf_sock_ops_kern *sock_ops,
@@ -120,16 +121,18 @@ int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
({ \
int __ret = 0; \
if (cgroup_bpf_enabled) \
- __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type); \
+ __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type, \
+ NULL); \
__ret; \
})
-#define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, type) \
+#define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, type, t_ctx) \
({ \
int __ret = 0; \
if (cgroup_bpf_enabled) { \
lock_sock(sk); \
- __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type); \
+ __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type, \
+ t_ctx); \
release_sock(sk); \
} \
__ret; \
@@ -151,10 +154,16 @@ int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET6_CONNECT)
#define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr) \
- BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET4_CONNECT)
+ BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET4_CONNECT, NULL)
#define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr) \
- BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET6_CONNECT)
+ BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET6_CONNECT, NULL)
+
+#define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, t_ctx) \
+ BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP4_SENDMSG, t_ctx)
+
+#define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx) \
+ BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP6_SENDMSG, t_ctx)
#define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) \
({ \
@@ -185,6 +194,7 @@ struct cgroup_bpf {};
static inline void cgroup_bpf_put(struct cgroup *cgrp) {}
static inline int cgroup_bpf_inherit(struct cgroup *cgrp) { return 0; }
+#define cgroup_bpf_enabled (0)
#define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (0)
#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; })
#define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; })
@@ -197,6 +207,8 @@ static inline int cgroup_bpf_inherit(struct cgroup *cgrp) { return 0; }
#define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr) ({ 0; })
#define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr) ({ 0; })
#define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, t_ctx) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx) ({ 0; })
#define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) ({ 0; })
#define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type,major,minor,access) ({ 0; })
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 95a7abd0ee92..7df32a3200f7 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -22,6 +22,8 @@ struct perf_event;
struct bpf_prog;
struct bpf_map;
struct sock;
+struct seq_file;
+struct btf;
/* map is generic key/value storage optionally accesible by eBPF programs */
struct bpf_map_ops {
@@ -31,6 +33,7 @@ struct bpf_map_ops {
void (*map_release)(struct bpf_map *map, struct file *map_file);
void (*map_free)(struct bpf_map *map);
int (*map_get_next_key)(struct bpf_map *map, void *key, void *next_key);
+ void (*map_release_uref)(struct bpf_map *map);
/* funcs callable from userspace and from eBPF programs */
void *(*map_lookup_elem)(struct bpf_map *map, void *key);
@@ -43,10 +46,14 @@ struct bpf_map_ops {
void (*map_fd_put_ptr)(void *ptr);
u32 (*map_gen_lookup)(struct bpf_map *map, struct bpf_insn *insn_buf);
u32 (*map_fd_sys_lookup_elem)(void *ptr);
+ void (*map_seq_show_elem)(struct bpf_map *map, void *key,
+ struct seq_file *m);
+ int (*map_check_btf)(const struct bpf_map *map, const struct btf *btf,
+ u32 key_type_id, u32 value_type_id);
};
struct bpf_map {
- /* 1st cacheline with read-mostly members of which some
+ /* The first two cachelines with read-mostly members of which some
* are also accessed in fast-path (e.g. ops, max_entries).
*/
const struct bpf_map_ops *ops ____cacheline_aligned;
@@ -62,10 +69,13 @@ struct bpf_map {
u32 pages;
u32 id;
int numa_node;
+ u32 btf_key_type_id;
+ u32 btf_value_type_id;
+ struct btf *btf;
bool unpriv_array;
- /* 7 bytes hole */
+ /* 55 bytes hole */
- /* 2nd cacheline with misc members to avoid false sharing
+ /* The 3rd and 4th cacheline with misc members to avoid false sharing
* particularly with refcounting.
*/
struct user_struct *user ____cacheline_aligned;
@@ -100,6 +110,16 @@ static inline struct bpf_offloaded_map *map_to_offmap(struct bpf_map *map)
return container_of(map, struct bpf_offloaded_map, map);
}
+static inline bool bpf_map_offload_neutral(const struct bpf_map *map)
+{
+ return map->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY;
+}
+
+static inline bool bpf_map_support_seq_show(const struct bpf_map *map)
+{
+ return map->ops->map_seq_show_elem && map->ops->map_check_btf;
+}
+
extern const struct bpf_map_ops bpf_map_offload_ops;
/* function argument constraints */
@@ -220,6 +240,8 @@ struct bpf_verifier_ops {
struct bpf_insn_access_aux *info);
int (*gen_prologue)(struct bpf_insn *insn, bool direct_write,
const struct bpf_prog *prog);
+ int (*gen_ld_abs)(const struct bpf_insn *orig,
+ struct bpf_insn *insn_buf);
u32 (*convert_ctx_access)(enum bpf_access_type type,
const struct bpf_insn *src,
struct bpf_insn *dst,
@@ -339,8 +361,8 @@ int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *progs,
void bpf_prog_array_delete_safe(struct bpf_prog_array __rcu *progs,
struct bpf_prog *old_prog);
int bpf_prog_array_copy_info(struct bpf_prog_array __rcu *array,
- __u32 __user *prog_ids, u32 request_cnt,
- __u32 __user *prog_cnt);
+ u32 *prog_ids, u32 request_cnt,
+ u32 *prog_cnt);
int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array,
struct bpf_prog *exclude_prog,
struct bpf_prog *include_prog,
@@ -351,6 +373,7 @@ int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array,
struct bpf_prog **_prog, *__prog; \
struct bpf_prog_array *_array; \
u32 _ret = 1; \
+ preempt_disable(); \
rcu_read_lock(); \
_array = rcu_dereference(array); \
if (unlikely(check_non_null && !_array))\
@@ -362,6 +385,7 @@ int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array,
} \
_out: \
rcu_read_unlock(); \
+ preempt_enable_no_resched(); \
_ret; \
})
@@ -434,12 +458,13 @@ int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value);
int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
void *key, void *value, u64 map_flags);
int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value);
-void bpf_fd_array_map_clear(struct bpf_map *map);
int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file,
void *key, void *value, u64 map_flags);
int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value);
int bpf_get_file_flag(int flags);
+int bpf_check_uarg_tail_zero(void __user *uaddr, size_t expected_size,
+ size_t actual_size);
/* memcpy that is used with 8-byte aligned pointers, power-of-8 size and
* forced to use 'long' read/writes to try to atomically copy long counters.
@@ -462,14 +487,20 @@ int bpf_check(struct bpf_prog **fp, union bpf_attr *attr);
void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth);
/* Map specifics */
-struct net_device *__dev_map_lookup_elem(struct bpf_map *map, u32 key);
+struct xdp_buff;
+struct sk_buff;
+
+struct bpf_dtab_netdev *__dev_map_lookup_elem(struct bpf_map *map, u32 key);
void __dev_map_insert_ctx(struct bpf_map *map, u32 index);
void __dev_map_flush(struct bpf_map *map);
+int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
+ struct net_device *dev_rx);
+int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
+ struct bpf_prog *xdp_prog);
struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key);
void __cpu_map_insert_ctx(struct bpf_map *map, u32 index);
void __cpu_map_flush(struct bpf_map *map);
-struct xdp_buff;
int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_buff *xdp,
struct net_device *dev_rx);
@@ -548,6 +579,25 @@ static inline void __dev_map_flush(struct bpf_map *map)
{
}
+struct xdp_buff;
+struct bpf_dtab_netdev;
+
+static inline
+int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
+ struct net_device *dev_rx)
+{
+ return 0;
+}
+
+struct sk_buff;
+
+static inline int dev_map_generic_redirect(struct bpf_dtab_netdev *dst,
+ struct sk_buff *skb,
+ struct bpf_prog *xdp_prog)
+{
+ return 0;
+}
+
static inline
struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key)
{
@@ -562,7 +612,6 @@ static inline void __cpu_map_flush(struct bpf_map *map)
{
}
-struct xdp_buff;
static inline int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu,
struct xdp_buff *xdp,
struct net_device *dev_rx)
@@ -604,7 +653,7 @@ bool bpf_offload_dev_match(struct bpf_prog *prog, struct bpf_map *map);
#if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL)
int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr);
-static inline bool bpf_prog_is_dev_bound(struct bpf_prog_aux *aux)
+static inline bool bpf_prog_is_dev_bound(const struct bpf_prog_aux *aux)
{
return aux->offload_requested;
}
@@ -645,6 +694,7 @@ static inline void bpf_map_offload_map_free(struct bpf_map *map)
#if defined(CONFIG_STREAM_PARSER) && defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_INET)
struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key);
+struct sock *__sock_hash_lookup_elem(struct bpf_map *map, void *key);
int sock_map_prog(struct bpf_map *map, struct bpf_prog *prog, u32 type);
#else
static inline struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key)
@@ -652,6 +702,12 @@ static inline struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key)
return NULL;
}
+static inline struct sock *__sock_hash_lookup_elem(struct bpf_map *map,
+ void *key)
+{
+ return NULL;
+}
+
static inline int sock_map_prog(struct bpf_map *map,
struct bpf_prog *prog,
u32 type)
@@ -660,6 +716,31 @@ static inline int sock_map_prog(struct bpf_map *map,
}
#endif
+#if defined(CONFIG_XDP_SOCKETS)
+struct xdp_sock;
+struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map, u32 key);
+int __xsk_map_redirect(struct bpf_map *map, struct xdp_buff *xdp,
+ struct xdp_sock *xs);
+void __xsk_map_flush(struct bpf_map *map);
+#else
+struct xdp_sock;
+static inline struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map,
+ u32 key)
+{
+ return NULL;
+}
+
+static inline int __xsk_map_redirect(struct bpf_map *map, struct xdp_buff *xdp,
+ struct xdp_sock *xs)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void __xsk_map_flush(struct bpf_map *map)
+{
+}
+#endif
+
/* verifier prototypes for helper functions called from eBPF programs */
extern const struct bpf_func_proto bpf_map_lookup_elem_proto;
extern const struct bpf_func_proto bpf_map_update_elem_proto;
@@ -673,10 +754,11 @@ extern const struct bpf_func_proto bpf_ktime_get_ns_proto;
extern const struct bpf_func_proto bpf_get_current_pid_tgid_proto;
extern const struct bpf_func_proto bpf_get_current_uid_gid_proto;
extern const struct bpf_func_proto bpf_get_current_comm_proto;
-extern const struct bpf_func_proto bpf_skb_vlan_push_proto;
-extern const struct bpf_func_proto bpf_skb_vlan_pop_proto;
extern const struct bpf_func_proto bpf_get_stackid_proto;
+extern const struct bpf_func_proto bpf_get_stack_proto;
extern const struct bpf_func_proto bpf_sock_map_update_proto;
+extern const struct bpf_func_proto bpf_sock_hash_update_proto;
+extern const struct bpf_func_proto bpf_get_current_cgroup_id_proto;
/* Shared helpers among cBPF and eBPF. */
void bpf_user_rnd_init_once(void);
diff --git a/include/linux/bpf_lirc.h b/include/linux/bpf_lirc.h
new file mode 100644
index 000000000000..5f8a4283092d
--- /dev/null
+++ b/include/linux/bpf_lirc.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BPF_LIRC_H
+#define _BPF_LIRC_H
+
+#include <uapi/linux/bpf.h>
+
+#ifdef CONFIG_BPF_LIRC_MODE2
+int lirc_prog_attach(const union bpf_attr *attr);
+int lirc_prog_detach(const union bpf_attr *attr);
+int lirc_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr);
+#else
+static inline int lirc_prog_attach(const union bpf_attr *attr)
+{
+ return -EINVAL;
+}
+
+static inline int lirc_prog_detach(const union bpf_attr *attr)
+{
+ return -EINVAL;
+}
+
+static inline int lirc_prog_query(const union bpf_attr *attr,
+ union bpf_attr __user *uattr)
+{
+ return -EINVAL;
+}
+#endif
+
+#endif /* _BPF_LIRC_H */
diff --git a/include/linux/bpf_trace.h b/include/linux/bpf_trace.h
index e6fe98ae3794..ddf896abcfb6 100644
--- a/include/linux/bpf_trace.h
+++ b/include/linux/bpf_trace.h
@@ -2,7 +2,6 @@
#ifndef __LINUX_BPF_TRACE_H__
#define __LINUX_BPF_TRACE_H__
-#include <trace/events/bpf.h>
#include <trace/events/xdp.h>
#endif /* __LINUX_BPF_TRACE_H__ */
diff --git a/include/linux/bpf_types.h b/include/linux/bpf_types.h
index 2b28fcf6f6ae..c5700c2d5549 100644
--- a/include/linux/bpf_types.h
+++ b/include/linux/bpf_types.h
@@ -9,9 +9,10 @@ BPF_PROG_TYPE(BPF_PROG_TYPE_XDP, xdp)
BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SKB, cg_skb)
BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SOCK, cg_sock)
BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SOCK_ADDR, cg_sock_addr)
-BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_IN, lwt_inout)
-BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_OUT, lwt_inout)
+BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_IN, lwt_in)
+BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_OUT, lwt_out)
BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_XMIT, lwt_xmit)
+BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_SEG6LOCAL, lwt_seg6local)
BPF_PROG_TYPE(BPF_PROG_TYPE_SOCK_OPS, sock_ops)
BPF_PROG_TYPE(BPF_PROG_TYPE_SK_SKB, sk_skb)
BPF_PROG_TYPE(BPF_PROG_TYPE_SK_MSG, sk_msg)
@@ -25,6 +26,9 @@ BPF_PROG_TYPE(BPF_PROG_TYPE_RAW_TRACEPOINT, raw_tracepoint)
#ifdef CONFIG_CGROUP_BPF
BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_DEVICE, cg_dev)
#endif
+#ifdef CONFIG_BPF_LIRC_MODE2
+BPF_PROG_TYPE(BPF_PROG_TYPE_LIRC_MODE2, lirc_mode2)
+#endif
BPF_MAP_TYPE(BPF_MAP_TYPE_ARRAY, array_map_ops)
BPF_MAP_TYPE(BPF_MAP_TYPE_PERCPU_ARRAY, percpu_array_map_ops)
@@ -47,6 +51,10 @@ BPF_MAP_TYPE(BPF_MAP_TYPE_HASH_OF_MAPS, htab_of_maps_map_ops)
BPF_MAP_TYPE(BPF_MAP_TYPE_DEVMAP, dev_map_ops)
#if defined(CONFIG_STREAM_PARSER) && defined(CONFIG_INET)
BPF_MAP_TYPE(BPF_MAP_TYPE_SOCKMAP, sock_map_ops)
+BPF_MAP_TYPE(BPF_MAP_TYPE_SOCKHASH, sock_hash_ops)
#endif
BPF_MAP_TYPE(BPF_MAP_TYPE_CPUMAP, cpu_map_ops)
+#if defined(CONFIG_XDP_SOCKETS)
+BPF_MAP_TYPE(BPF_MAP_TYPE_XSKMAP, xsk_map_ops)
+#endif
#endif
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index 7e61c395fddf..38b04f559ad3 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -142,10 +142,11 @@ struct bpf_verifier_state_list {
struct bpf_insn_aux_data {
union {
enum bpf_reg_type ptr_type; /* pointer type for load/store insns */
- struct bpf_map *map_ptr; /* pointer for call insn into lookup_elem */
+ unsigned long map_state; /* pointer/poison value for maps */
s32 call_imm; /* saved imm field of call insn */
};
int ctx_field_size; /* the ctx field size for load insn, maybe 0 */
+ int sanitize_stack_off; /* stack slot to be cleared */
bool seen; /* this insn was processed by the verifier */
};
@@ -173,6 +174,11 @@ static inline bool bpf_verifier_log_needed(const struct bpf_verifier_log *log)
#define BPF_MAX_SUBPROGS 256
+struct bpf_subprog_info {
+ u32 start; /* insn idx of function entry point */
+ u16 stack_depth; /* max. stack depth used by this function */
+};
+
/* single container for all structs
* one verifier_env per bpf_check() call
*/
@@ -191,14 +197,12 @@ struct bpf_verifier_env {
bool seen_direct_write;
struct bpf_insn_aux_data *insn_aux_data; /* array of per-insn state */
struct bpf_verifier_log log;
- u32 subprog_starts[BPF_MAX_SUBPROGS];
- /* computes the stack depth of each bpf function */
- u16 subprog_stack_depth[BPF_MAX_SUBPROGS + 1];
+ struct bpf_subprog_info subprog_info[BPF_MAX_SUBPROGS + 1];
u32 subprog_cnt;
};
-void bpf_verifier_vlog(struct bpf_verifier_log *log, const char *fmt,
- va_list args);
+__printf(2, 0) void bpf_verifier_vlog(struct bpf_verifier_log *log,
+ const char *fmt, va_list args);
__printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env,
const char *fmt, ...);
diff --git a/include/linux/bpfilter.h b/include/linux/bpfilter.h
new file mode 100644
index 000000000000..687b1760bb9f
--- /dev/null
+++ b/include/linux/bpfilter.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_BPFILTER_H
+#define _LINUX_BPFILTER_H
+
+#include <uapi/linux/bpfilter.h>
+
+struct sock;
+int bpfilter_ip_set_sockopt(struct sock *sk, int optname, char *optval,
+ unsigned int optlen);
+int bpfilter_ip_get_sockopt(struct sock *sk, int optname, char *optval,
+ int *optlen);
+extern int (*bpfilter_process_sockopt)(struct sock *sk, int optname,
+ char __user *optval,
+ unsigned int optlen, bool is_set);
+#endif
diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h
index d3339dd48b1a..daa9234a9baf 100644
--- a/include/linux/brcmphy.h
+++ b/include/linux/brcmphy.h
@@ -25,6 +25,7 @@
#define PHY_ID_BCM54612E 0x03625e60
#define PHY_ID_BCM54616S 0x03625d10
#define PHY_ID_BCM57780 0x03625d90
+#define PHY_ID_BCM89610 0x03625cd0
#define PHY_ID_BCM7250 0xae025280
#define PHY_ID_BCM7260 0xae025190
@@ -84,6 +85,7 @@
#define MII_BCM54XX_EXP_SEL 0x17 /* Expansion register select */
#define MII_BCM54XX_EXP_SEL_SSD 0x0e00 /* Secondary SerDes select */
#define MII_BCM54XX_EXP_SEL_ER 0x0f00 /* Expansion register select */
+#define MII_BCM54XX_EXP_SEL_ETC 0x0d00 /* Expansion register spare + 2k mem */
#define MII_BCM54XX_AUX_CTL 0x18 /* Auxiliary control register */
#define MII_BCM54XX_ISR 0x1a /* BCM54xx interrupt status register */
@@ -218,6 +220,9 @@
#define BCM54810_SHD_CLK_CTL 0x3
#define BCM54810_SHD_CLK_CTL_GTXCLK_EN (1 << 9)
+/* BCM54612E Registers */
+#define BCM54612E_EXP_SPARE0 (MII_BCM54XX_EXP_SEL_ETC + 0x34)
+#define BCM54612E_LED4_CLK125OUT_EN (1 << 1)
/*****************************************************************************/
/* Fast Ethernet Transceiver definitions. */
diff --git a/include/linux/bsg-lib.h b/include/linux/bsg-lib.h
index 28a7ccc55c89..6aeaf6472665 100644
--- a/include/linux/bsg-lib.h
+++ b/include/linux/bsg-lib.h
@@ -72,8 +72,7 @@ struct bsg_job {
void bsg_job_done(struct bsg_job *job, int result,
unsigned int reply_payload_rcv_len);
struct request_queue *bsg_setup_queue(struct device *dev, const char *name,
- bsg_job_fn *job_fn, int dd_job_size,
- void (*release)(struct device *));
+ bsg_job_fn *job_fn, int dd_job_size);
void bsg_job_put(struct bsg_job *job);
int __must_check bsg_job_get(struct bsg_job *job);
diff --git a/include/linux/bsg.h b/include/linux/bsg.h
index 0c7dd9ceb139..dac37b6e00ec 100644
--- a/include/linux/bsg.h
+++ b/include/linux/bsg.h
@@ -17,17 +17,13 @@ struct bsg_ops {
struct bsg_class_device {
struct device *class_dev;
- struct device *parent;
int minor;
struct request_queue *queue;
- struct kref ref;
const struct bsg_ops *ops;
- void (*release)(struct device *);
};
int bsg_register_queue(struct request_queue *q, struct device *parent,
- const char *name, const struct bsg_ops *ops,
- void (*release)(struct device *));
+ const char *name, const struct bsg_ops *ops);
int bsg_scsi_register_queue(struct request_queue *q, struct device *parent);
void bsg_unregister_queue(struct request_queue *q);
#else
diff --git a/include/linux/btf.h b/include/linux/btf.h
new file mode 100644
index 000000000000..e076c4697049
--- /dev/null
+++ b/include/linux/btf.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2018 Facebook */
+
+#ifndef _LINUX_BTF_H
+#define _LINUX_BTF_H 1
+
+#include <linux/types.h>
+
+struct btf;
+struct btf_type;
+union bpf_attr;
+
+extern const struct file_operations btf_fops;
+
+void btf_put(struct btf *btf);
+int btf_new_fd(const union bpf_attr *attr);
+struct btf *btf_get_by_fd(int fd);
+int btf_get_info_by_fd(const struct btf *btf,
+ const union bpf_attr *attr,
+ union bpf_attr __user *uattr);
+/* Figure out the size of a type_id. If type_id is a modifier
+ * (e.g. const), it will be resolved to find out the type with size.
+ *
+ * For example:
+ * In describing "const void *", type_id is "const" and "const"
+ * refers to "void *". The return type will be "void *".
+ *
+ * If type_id is a simple "int", then return type will be "int".
+ *
+ * @btf: struct btf object
+ * @type_id: Find out the size of type_id. The type_id of the return
+ * type is set to *type_id.
+ * @ret_size: It can be NULL. If not NULL, the size of the return
+ * type is set to *ret_size.
+ * Return: The btf_type (resolved to another type with size info if needed).
+ * NULL is returned if type_id itself does not have size info
+ * (e.g. void) or it cannot be resolved to another type that
+ * has size info.
+ * *type_id and *ret_size will not be changed in the
+ * NULL return case.
+ */
+const struct btf_type *btf_type_id_size(const struct btf *btf,
+ u32 *type_id,
+ u32 *ret_size);
+void btf_type_seq_show(const struct btf *btf, u32 type_id, void *obj,
+ struct seq_file *m);
+int btf_get_fd_by_id(u32 id);
+u32 btf_id(const struct btf *btf);
+
+#endif
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index 894e5d125de6..96225a77c112 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -205,8 +205,6 @@ void write_boundary_block(struct block_device *bdev,
sector_t bblock, unsigned blocksize);
int bh_uptodate_or_lock(struct buffer_head *bh);
int bh_submit_read(struct buffer_head *bh);
-loff_t page_cache_seek_hole_data(struct inode *inode, loff_t offset,
- loff_t length, int whence);
extern int buffer_heads_over_limit;
diff --git a/include/linux/cacheinfo.h b/include/linux/cacheinfo.h
index 3d9805297cda..70e19bc6cc9f 100644
--- a/include/linux/cacheinfo.h
+++ b/include/linux/cacheinfo.h
@@ -34,9 +34,8 @@ enum cache_type {
* @shared_cpu_map: logical cpumask representing all the cpus sharing
* this cache node
* @attributes: bitfield representing various cache attributes
- * @of_node: if devicetree is used, this represents either the cpu node in
- * case there's no explicit cache node or the cache node itself in the
- * device tree
+ * @fw_token: Unique value used to determine if different cacheinfo
+ * structures represent a single hardware cache instance.
* @disable_sysfs: indicates whether this node is visible to the user via
* sysfs or not
* @priv: pointer to any private data structure specific to particular
@@ -65,8 +64,7 @@ struct cacheinfo {
#define CACHE_ALLOCATE_POLICY_MASK \
(CACHE_READ_ALLOCATE | CACHE_WRITE_ALLOCATE)
#define CACHE_ID BIT(4)
-
- struct device_node *of_node;
+ void *fw_token;
bool disable_sysfs;
void *priv;
};
@@ -99,6 +97,23 @@ int func(unsigned int cpu) \
struct cpu_cacheinfo *get_cpu_cacheinfo(unsigned int cpu);
int init_cache_level(unsigned int cpu);
int populate_cache_leaves(unsigned int cpu);
+int cache_setup_acpi(unsigned int cpu);
+#ifndef CONFIG_ACPI_PPTT
+/*
+ * acpi_find_last_cache_level is only called on ACPI enabled
+ * platforms using the PPTT for topology. This means that if
+ * the platform supports other firmware configuration methods
+ * we need to stub out the call when ACPI is disabled.
+ * ACPI enabled platforms not using PPTT won't be making calls
+ * to this function so we need not worry about them.
+ */
+static inline int acpi_find_last_cache_level(unsigned int cpu)
+{
+ return 0;
+}
+#else
+int acpi_find_last_cache_level(unsigned int cpu);
+#endif
const struct attribute_group *cache_get_priv_group(struct cacheinfo *this_leaf);
diff --git a/include/linux/ceph/ceph_fs.h b/include/linux/ceph/ceph_fs.h
index 7ecfc88314d8..4903deb0777a 100644
--- a/include/linux/ceph/ceph_fs.h
+++ b/include/linux/ceph/ceph_fs.h
@@ -628,6 +628,7 @@ int ceph_flags_to_mode(int flags);
CEPH_CAP_XATTR_SHARED)
#define CEPH_STAT_CAP_INLINE_DATA (CEPH_CAP_FILE_SHARED | \
CEPH_CAP_FILE_RD)
+#define CEPH_STAT_RSTAT CEPH_CAP_FILE_WREXTEND
#define CEPH_CAP_ANY_SHARED (CEPH_CAP_AUTH_SHARED | \
CEPH_CAP_LINK_SHARED | \
diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h
index 528ccc943cee..0d6ee04b4c41 100644
--- a/include/linux/ceph/osd_client.h
+++ b/include/linux/ceph/osd_client.h
@@ -77,7 +77,10 @@ struct ceph_osd_data {
u32 bio_length;
};
#endif /* CONFIG_BLOCK */
- struct ceph_bvec_iter bvec_pos;
+ struct {
+ struct ceph_bvec_iter bvec_pos;
+ u32 num_bvecs;
+ };
};
};
@@ -167,6 +170,7 @@ struct ceph_osd_request {
u64 r_tid; /* unique for this client */
struct rb_node r_node;
struct rb_node r_mc_node; /* map check */
+ struct work_struct r_complete_work;
struct ceph_osd *r_osd;
struct ceph_osd_request_target r_t;
@@ -198,7 +202,6 @@ struct ceph_osd_request {
struct timespec r_mtime; /* ditto */
u64 r_data_offset; /* ditto */
bool r_linger; /* don't resend on failure */
- bool r_abort_on_full; /* return ENOSPC when full */
/* internal */
unsigned long r_stamp; /* jiffies, send or check time */
@@ -344,6 +347,8 @@ struct ceph_osd_client {
struct rb_root linger_map_checks;
atomic_t num_requests;
atomic_t num_homeless;
+ bool abort_on_full; /* abort w/ ENOSPC when full */
+ int abort_err;
struct delayed_work timeout_work;
struct delayed_work osds_timeout_work;
#ifdef CONFIG_DEBUG_FS
@@ -356,6 +361,7 @@ struct ceph_osd_client {
struct ceph_msgpool msgpool_op_reply;
struct workqueue_struct *notify_wq;
+ struct workqueue_struct *completion_wq;
};
static inline bool ceph_osdmap_flag(struct ceph_osd_client *osdc, int flag)
@@ -375,6 +381,7 @@ extern void ceph_osdc_handle_reply(struct ceph_osd_client *osdc,
extern void ceph_osdc_handle_map(struct ceph_osd_client *osdc,
struct ceph_msg *msg);
void ceph_osdc_update_epoch_barrier(struct ceph_osd_client *osdc, u32 eb);
+void ceph_osdc_abort_requests(struct ceph_osd_client *osdc, int err);
extern void osd_req_op_init(struct ceph_osd_request *osd_req,
unsigned int which, u16 opcode, u32 flags);
@@ -412,6 +419,10 @@ void osd_req_op_extent_osd_data_bio(struct ceph_osd_request *osd_req,
struct ceph_bio_iter *bio_pos,
u32 bio_length);
#endif /* CONFIG_BLOCK */
+void osd_req_op_extent_osd_data_bvecs(struct ceph_osd_request *osd_req,
+ unsigned int which,
+ struct bio_vec *bvecs, u32 num_bvecs,
+ u32 bytes);
void osd_req_op_extent_osd_data_bvec_pos(struct ceph_osd_request *osd_req,
unsigned int which,
struct ceph_bvec_iter *bvec_pos);
@@ -426,13 +437,14 @@ extern void osd_req_op_cls_request_data_pages(struct ceph_osd_request *,
bool own_pages);
void osd_req_op_cls_request_data_bvecs(struct ceph_osd_request *osd_req,
unsigned int which,
- struct bio_vec *bvecs, u32 bytes);
+ struct bio_vec *bvecs, u32 num_bvecs,
+ u32 bytes);
extern void osd_req_op_cls_response_data_pages(struct ceph_osd_request *,
unsigned int which,
struct page **pages, u64 length,
u32 alignment, bool pages_from_pool,
bool own_pages);
-extern void osd_req_op_cls_init(struct ceph_osd_request *osd_req,
+extern int osd_req_op_cls_init(struct ceph_osd_request *osd_req,
unsigned int which, u16 opcode,
const char *class, const char *method);
extern int osd_req_op_xattr_init(struct ceph_osd_request *osd_req, unsigned int which,
diff --git a/include/linux/ceph/osdmap.h b/include/linux/ceph/osdmap.h
index e71fb222c7c3..5675b1f09bc5 100644
--- a/include/linux/ceph/osdmap.h
+++ b/include/linux/ceph/osdmap.h
@@ -279,10 +279,10 @@ bool ceph_osds_changed(const struct ceph_osds *old_acting,
const struct ceph_osds *new_acting,
bool any_change);
-int __ceph_object_locator_to_pg(struct ceph_pg_pool_info *pi,
- const struct ceph_object_id *oid,
- const struct ceph_object_locator *oloc,
- struct ceph_pg *raw_pgid);
+void __ceph_object_locator_to_pg(struct ceph_pg_pool_info *pi,
+ const struct ceph_object_id *oid,
+ const struct ceph_object_locator *oloc,
+ struct ceph_pg *raw_pgid);
int ceph_object_locator_to_pg(struct ceph_osdmap *osdmap,
const struct ceph_object_id *oid,
const struct ceph_object_locator *oloc,
diff --git a/include/linux/cfag12864b.h b/include/linux/cfag12864b.h
index b454dfce60d9..4060004968c8 100644
--- a/include/linux/cfag12864b.h
+++ b/include/linux/cfag12864b.h
@@ -1,25 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Filename: cfag12864b.h
* Version: 0.1.0
* Description: cfag12864b LCD driver header
- * License: GPLv2
*
* Author: Copyright (C) Miguel Ojeda Sandonis
* Date: 2006-10-12
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#ifndef _CFAG12864B_H_
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index dc5b70449dc6..c0e68f903011 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -105,6 +105,8 @@ enum {
struct cgroup_file {
/* do not access any fields from outside cgroup core */
struct kernfs_node *kn;
+ unsigned long notified_at;
+ struct timer_list notify_timer;
};
/*
@@ -128,6 +130,9 @@ struct cgroup_subsys_state {
struct list_head sibling;
struct list_head children;
+ /* flush target list anchored at cgrp->rstat_css_list */
+ struct list_head rstat_css_node;
+
/*
* PI: Subsys-unique ID. 0 is unused and root is always 1. The
* matching css can be looked up using css_from_id().
@@ -256,12 +261,16 @@ struct css_set {
struct rcu_head rcu_head;
};
+struct cgroup_base_stat {
+ struct task_cputime cputime;
+};
+
/*
- * cgroup basic resource usage statistics. Accounting is done per-cpu in
- * cgroup_cpu_stat which is then lazily propagated up the hierarchy on
- * reads.
+ * rstat - cgroup scalable recursive statistics. Accounting is done
+ * per-cpu in cgroup_rstat_cpu which is then lazily propagated up the
+ * hierarchy on reads.
*
- * When a stat gets updated, the cgroup_cpu_stat and its ancestors are
+ * When a stat gets updated, the cgroup_rstat_cpu and its ancestors are
* linked into the updated tree. On the following read, propagation only
* considers and consumes the updated tree. This makes reading O(the
* number of descendants which have been active since last read) instead of
@@ -271,20 +280,24 @@ struct css_set {
* aren't active and stat may be read frequently. The combination can
* become very expensive. By propagating selectively, increasing reading
* frequency decreases the cost of each read.
+ *
+ * This struct hosts both the fields which implement the above -
+ * updated_children and updated_next - and the fields which track basic
+ * resource statistics on top of it - bsync, bstat and last_bstat.
*/
-struct cgroup_cpu_stat {
+struct cgroup_rstat_cpu {
/*
- * ->sync protects all the current counters. These are the only
- * fields which get updated in the hot path.
+ * ->bsync protects ->bstat. These are the only fields which get
+ * updated in the hot path.
*/
- struct u64_stats_sync sync;
- struct task_cputime cputime;
+ struct u64_stats_sync bsync;
+ struct cgroup_base_stat bstat;
/*
* Snapshots at the last reading. These are used to calculate the
* deltas to propagate to the global counters.
*/
- struct task_cputime last_cputime;
+ struct cgroup_base_stat last_bstat;
/*
* Child cgroups with stat updates on this cpu since the last read
@@ -295,18 +308,12 @@ struct cgroup_cpu_stat {
* to the cgroup makes it unnecessary for each per-cpu struct to
* point back to the associated cgroup.
*
- * Protected by per-cpu cgroup_cpu_stat_lock.
+ * Protected by per-cpu cgroup_rstat_cpu_lock.
*/
struct cgroup *updated_children; /* terminated by self cgroup */
struct cgroup *updated_next; /* NULL iff not on the list */
};
-struct cgroup_stat {
- /* per-cpu statistics are collected into the folowing global counters */
- struct task_cputime cputime;
- struct prev_cputime prev_cputime;
-};
-
struct cgroup {
/* self css with NULL ->ss, points back to this cgroup */
struct cgroup_subsys_state self;
@@ -406,10 +413,14 @@ struct cgroup {
*/
struct cgroup *dom_cgrp;
+ /* per-cpu recursive resource statistics */
+ struct cgroup_rstat_cpu __percpu *rstat_cpu;
+ struct list_head rstat_css_list;
+
/* cgroup basic resource statistics */
- struct cgroup_cpu_stat __percpu *cpu_stat;
- struct cgroup_stat pending_stat; /* pending from children */
- struct cgroup_stat stat;
+ struct cgroup_base_stat pending_bstat; /* pending from children */
+ struct cgroup_base_stat bstat;
+ struct prev_cputime prev_cputime; /* for printing out cputime */
/*
* list of pidlists, up to two for each namespace (one for procs, one
@@ -570,6 +581,7 @@ struct cgroup_subsys {
void (*css_released)(struct cgroup_subsys_state *css);
void (*css_free)(struct cgroup_subsys_state *css);
void (*css_reset)(struct cgroup_subsys_state *css);
+ void (*css_rstat_flush)(struct cgroup_subsys_state *css, int cpu);
int (*css_extra_stat_show)(struct seq_file *seq,
struct cgroup_subsys_state *css);
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 473e0c0abb86..c9fdf6f57913 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -690,11 +690,19 @@ static inline void cgroup_path_from_kernfs_id(const union kernfs_node_id *id,
char *buf, size_t buflen) {}
#endif /* !CONFIG_CGROUPS */
+#ifdef CONFIG_CGROUPS
/*
- * Basic resource stats.
+ * cgroup scalable recursive statistics.
*/
-#ifdef CONFIG_CGROUPS
+void cgroup_rstat_updated(struct cgroup *cgrp, int cpu);
+void cgroup_rstat_flush(struct cgroup *cgrp);
+void cgroup_rstat_flush_irqsafe(struct cgroup *cgrp);
+void cgroup_rstat_flush_hold(struct cgroup *cgrp);
+void cgroup_rstat_flush_release(void);
+/*
+ * Basic resource stats.
+ */
#ifdef CONFIG_CGROUP_CPUACCT
void cpuacct_charge(struct task_struct *tsk, u64 cputime);
void cpuacct_account_field(struct task_struct *tsk, int index, u64 val);
diff --git a/include/linux/circ_buf.h b/include/linux/circ_buf.h
index 7cf262a421c3..b3233e8202f9 100644
--- a/include/linux/circ_buf.h
+++ b/include/linux/circ_buf.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * See Documentation/circular-buffers.txt for more information.
+ * See Documentation/core-api/circular-buffers.rst for more information.
*/
#ifndef _LINUX_CIRC_BUF_H
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index 210a890008f9..b7cfa037e593 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -13,6 +13,7 @@
#include <linux/io.h>
#include <linux/of.h>
+#include <linux/of_clk.h>
#ifdef CONFIG_COMMON_CLK
@@ -218,7 +219,7 @@ struct clk_ops {
int (*get_phase)(struct clk_hw *hw);
int (*set_phase)(struct clk_hw *hw, int degrees);
void (*init)(struct clk_hw *hw);
- int (*debug_init)(struct clk_hw *hw, struct dentry *dentry);
+ void (*debug_init)(struct clk_hw *hw, struct dentry *dentry);
};
/**
@@ -765,6 +766,9 @@ int __clk_mux_determine_rate(struct clk_hw *hw,
int __clk_determine_rate(struct clk_hw *core, struct clk_rate_request *req);
int __clk_mux_determine_rate_closest(struct clk_hw *hw,
struct clk_rate_request *req);
+int clk_mux_determine_rate_flags(struct clk_hw *hw,
+ struct clk_rate_request *req,
+ unsigned long flags);
void clk_hw_reparent(struct clk_hw *hw, struct clk_hw *new_parent);
void clk_hw_set_rate_range(struct clk_hw *hw, unsigned long min_rate,
unsigned long max_rate);
@@ -802,8 +806,6 @@ unsigned long clk_hw_round_rate(struct clk_hw *hw, unsigned long rate);
struct of_device_id;
-typedef void (*of_clk_init_cb_t)(struct device_node *);
-
struct clk_onecell_data {
struct clk **clks;
unsigned int clk_num;
@@ -890,13 +892,10 @@ struct clk_hw *of_clk_hw_simple_get(struct of_phandle_args *clkspec,
struct clk *of_clk_src_onecell_get(struct of_phandle_args *clkspec, void *data);
struct clk_hw *of_clk_hw_onecell_get(struct of_phandle_args *clkspec,
void *data);
-unsigned int of_clk_get_parent_count(struct device_node *np);
int of_clk_parent_fill(struct device_node *np, const char **parents,
unsigned int size);
-const char *of_clk_get_parent_name(struct device_node *np, int index);
int of_clk_detect_critical(struct device_node *np, int index,
unsigned long *flags);
-void of_clk_init(const struct of_device_id *matches);
#else /* !CONFIG_OF */
@@ -943,26 +942,16 @@ of_clk_hw_onecell_get(struct of_phandle_args *clkspec, void *data)
{
return ERR_PTR(-ENOENT);
}
-static inline unsigned int of_clk_get_parent_count(struct device_node *np)
-{
- return 0;
-}
static inline int of_clk_parent_fill(struct device_node *np,
const char **parents, unsigned int size)
{
return 0;
}
-static inline const char *of_clk_get_parent_name(struct device_node *np,
- int index)
-{
- return NULL;
-}
static inline int of_clk_detect_critical(struct device_node *np, int index,
unsigned long *flags)
{
return 0;
}
-static inline void of_clk_init(const struct of_device_id *matches) {}
#endif /* CONFIG_OF */
/*
@@ -996,10 +985,5 @@ static inline void clk_writel(u32 val, u32 __iomem *reg)
#endif /* platform dependent I/O accessors */
-#ifdef CONFIG_DEBUG_FS
-struct dentry *clk_debugfs_add_file(struct clk_hw *hw, char *name, umode_t mode,
- void *data, const struct file_operations *fops);
-#endif
-
#endif /* CONFIG_COMMON_CLK */
#endif /* CLK_PROVIDER_H */
diff --git a/include/linux/clk/davinci.h b/include/linux/clk/davinci.h
new file mode 100644
index 000000000000..8a7b5cd7eac0
--- /dev/null
+++ b/include/linux/clk/davinci.h
@@ -0,0 +1,40 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Clock drivers for TI DaVinci PLL and PSC controllers
+ *
+ * Copyright (C) 2018 David Lechner <david@lechnology.com>
+ */
+
+#ifndef __LINUX_CLK_DAVINCI_PLL_H___
+#define __LINUX_CLK_DAVINCI_PLL_H___
+
+#include <linux/device.h>
+#include <linux/regmap.h>
+
+/* function for registering clocks in early boot */
+
+#ifdef CONFIG_ARCH_DAVINCI_DA830
+int da830_pll_init(struct device *dev, void __iomem *base, struct regmap *cfgchip);
+#endif
+#ifdef CONFIG_ARCH_DAVINCI_DA850
+int da850_pll0_init(struct device *dev, void __iomem *base, struct regmap *cfgchip);
+#endif
+#ifdef CONFIG_ARCH_DAVINCI_DM355
+int dm355_pll1_init(struct device *dev, void __iomem *base, struct regmap *cfgchip);
+int dm355_psc_init(struct device *dev, void __iomem *base);
+#endif
+#ifdef CONFIG_ARCH_DAVINCI_DM365
+int dm365_pll1_init(struct device *dev, void __iomem *base, struct regmap *cfgchip);
+int dm365_pll2_init(struct device *dev, void __iomem *base, struct regmap *cfgchip);
+int dm365_psc_init(struct device *dev, void __iomem *base);
+#endif
+#ifdef CONFIG_ARCH_DAVINCI_DM644x
+int dm644x_pll1_init(struct device *dev, void __iomem *base, struct regmap *cfgchip);
+int dm644x_psc_init(struct device *dev, void __iomem *base);
+#endif
+#ifdef CONFIG_ARCH_DAVINCI_DM646x
+int dm646x_pll1_init(struct device *dev, void __iomem *base, struct regmap *cfgchip);
+int dm646x_psc_init(struct device *dev, void __iomem *base);
+#endif
+
+#endif /* __LINUX_CLK_DAVINCI_PLL_H___ */
diff --git a/include/linux/compat.h b/include/linux/compat.h
index 081281ad5772..b1a5562b3215 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -7,8 +7,7 @@
*/
#include <linux/types.h>
-
-#ifdef CONFIG_COMPAT
+#include <linux/compat_time.h>
#include <linux/stat.h>
#include <linux/param.h> /* for HZ */
@@ -21,8 +20,11 @@
#include <linux/unistd.h>
#include <asm/compat.h>
+
+#ifdef CONFIG_COMPAT
#include <asm/siginfo.h>
#include <asm/signal.h>
+#endif
#ifdef CONFIG_ARCH_HAS_SYSCALL_WRAPPER
/*
@@ -83,6 +85,8 @@
static inline long __do_compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__))
#endif /* COMPAT_SYSCALL_DEFINEx */
+#ifdef CONFIG_COMPAT
+
#ifndef compat_user_stack_pointer
#define compat_user_stack_pointer() current_user_stack_pointer()
#endif
@@ -290,8 +294,6 @@ extern int compat_get_timespec(struct timespec *, const void __user *);
extern int compat_put_timespec(const struct timespec *, void __user *);
extern int compat_get_timeval(struct timeval *, const void __user *);
extern int compat_put_timeval(const struct timeval *, void __user *);
-extern int compat_get_timespec64(struct timespec64 *, const void __user *);
-extern int compat_put_timespec64(const struct timespec64 *, void __user *);
extern int get_compat_itimerspec64(struct itimerspec64 *its,
const struct compat_itimerspec __user *uits);
extern int put_compat_itimerspec64(const struct itimerspec64 *its,
@@ -330,6 +332,7 @@ extern int put_compat_rusage(const struct rusage *,
struct compat_rusage __user *);
struct compat_siginfo;
+struct __compat_aio_sigset;
struct compat_dirent {
u32 d_ino;
@@ -553,6 +556,12 @@ asmlinkage long compat_sys_io_getevents(compat_aio_context_t ctx_id,
compat_long_t nr,
struct io_event __user *events,
struct compat_timespec __user *timeout);
+asmlinkage long compat_sys_io_pgetevents(compat_aio_context_t ctx_id,
+ compat_long_t min_nr,
+ compat_long_t nr,
+ struct io_event __user *events,
+ struct compat_timespec __user *timeout,
+ const struct __compat_aio_sigset __user *usig);
/* fs/cookies.c */
asmlinkage long compat_sys_lookup_dcookie(u32, u32, char __user *, compat_size_t);
@@ -1016,7 +1025,9 @@ static inline struct compat_timeval ns_to_compat_timeval(s64 nsec)
#else /* !CONFIG_COMPAT */
#define is_compat_task() (0)
+#ifndef in_compat_syscall
static inline bool in_compat_syscall(void) { return false; }
+#endif
#endif /* CONFIG_COMPAT */
diff --git a/include/linux/compat_time.h b/include/linux/compat_time.h
new file mode 100644
index 000000000000..31f2774f1994
--- /dev/null
+++ b/include/linux/compat_time.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_COMPAT_TIME_H
+#define _LINUX_COMPAT_TIME_H
+
+#include <linux/types.h>
+#include <linux/time64.h>
+
+typedef s32 compat_time_t;
+
+struct compat_timespec {
+ compat_time_t tv_sec;
+ s32 tv_nsec;
+};
+
+struct compat_timeval {
+ compat_time_t tv_sec;
+ s32 tv_usec;
+};
+
+extern int compat_get_timespec64(struct timespec64 *, const void __user *);
+extern int compat_put_timespec64(const struct timespec64 *, void __user *);
+
+#endif /* _LINUX_COMPAT_TIME_H */
diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h
index ceb96ecab96e..7087446c24c8 100644
--- a/include/linux/compiler-clang.h
+++ b/include/linux/compiler-clang.h
@@ -25,7 +25,24 @@
#define __SANITIZE_ADDRESS__
#endif
+#undef __no_sanitize_address
+#define __no_sanitize_address __attribute__((no_sanitize("address")))
+
/* Clang doesn't have a way to turn it off per-function, yet. */
#ifdef __noretpoline
#undef __noretpoline
#endif
+
+/*
+ * Not all versions of clang implement the the type-generic versions
+ * of the builtin overflow checkers. Fortunately, clang implements
+ * __has_builtin allowing us to avoid awkward version
+ * checks. Unfortunately, we don't know which version of gcc clang
+ * pretends to be, so the macro may or may not be defined.
+ */
+#undef COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW
+#if __has_builtin(__builtin_mul_overflow) && \
+ __has_builtin(__builtin_add_overflow) && \
+ __has_builtin(__builtin_sub_overflow)
+#define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1
+#endif
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index b4bf73f5e38f..f1a7492a5cc8 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -343,3 +343,7 @@
* code
*/
#define uninitialized_var(x) x = x
+
+#if GCC_VERSION >= 50100
+#define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1
+#endif
diff --git a/include/linux/compiler-intel.h b/include/linux/compiler-intel.h
index bfa08160db3a..547cdc920a3c 100644
--- a/include/linux/compiler-intel.h
+++ b/include/linux/compiler-intel.h
@@ -44,3 +44,7 @@
#define __builtin_bswap16 _bswap16
#endif
+/*
+ * icc defines __GNUC__, but does not implement the builtin overflow checkers.
+ */
+#undef COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index ab4711c63601..42506e4d1f53 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -21,7 +21,7 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
#define unlikely_notrace(x) __builtin_expect(!!(x), 0)
#define __branch_check__(x, expect, is_constant) ({ \
- int ______r; \
+ long ______r; \
static struct ftrace_likely_data \
__attribute__((__aligned__(4))) \
__attribute__((section("_ftrace_annotated_branch"))) \
diff --git a/include/linux/coresight-pmu.h b/include/linux/coresight-pmu.h
index edfeaba95429..a1a959ba24ff 100644
--- a/include/linux/coresight-pmu.h
+++ b/include/linux/coresight-pmu.h
@@ -1,18 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright(C) 2015 Linaro Limited. All rights reserved.
* Author: Mathieu Poirier <mathieu.poirier@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _LINUX_CORESIGHT_PMU_H
diff --git a/include/linux/coresight.h b/include/linux/coresight.h
index d950dad5056a..c265e0468414 100644
--- a/include/linux/coresight.h
+++ b/include/linux/coresight.h
@@ -1,13 +1,6 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2012, The Linux Foundation. All rights reserved.
*/
#ifndef _LINUX_CORESIGHT_H
diff --git a/include/linux/cper.h b/include/linux/cper.h
index d14ef4e77c8a..9c703a0abe6e 100644
--- a/include/linux/cper.h
+++ b/include/linux/cper.h
@@ -381,7 +381,7 @@ struct cper_sec_proc_generic {
/* IA32/X64 Processor Error Section */
struct cper_sec_proc_ia {
__u64 validation_bits;
- __u8 lapic_id;
+ __u64 lapic_id;
__u8 cpuid[48];
};
@@ -551,5 +551,7 @@ const char *cper_mem_err_unpack(struct trace_seq *,
struct cper_mem_err_compact *);
void cper_print_proc_arm(const char *pfx,
const struct cper_sec_proc_arm *proc);
+void cper_print_proc_ia(const char *pfx,
+ const struct cper_sec_proc_ia *proc);
#endif
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 7b01bc11c692..a97a63eef59f 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -53,6 +53,8 @@ extern ssize_t cpu_show_spectre_v1(struct device *dev,
struct device_attribute *attr, char *buf);
extern ssize_t cpu_show_spectre_v2(struct device *dev,
struct device_attribute *attr, char *buf);
+extern ssize_t cpu_show_spec_store_bypass(struct device *dev,
+ struct device_attribute *attr, char *buf);
extern __printf(4, 5)
struct device *cpu_device_create(struct device *parent, void *drvdata,
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 87f48dd932eb..882a9b9e34bc 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -571,7 +571,7 @@ struct governor_attr {
size_t count);
};
-static inline bool cpufreq_can_do_remote_dvfs(struct cpufreq_policy *policy)
+static inline bool cpufreq_this_cpu_can_update(struct cpufreq_policy *policy)
{
/*
* Allow remote callbacks if:
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
index 1eefabf1621f..4325d6fdde9b 100644
--- a/include/linux/cpuidle.h
+++ b/include/linux/cpuidle.h
@@ -258,6 +258,7 @@ struct cpuidle_governor {
#ifdef CONFIG_CPU_IDLE
extern int cpuidle_register_governor(struct cpuidle_governor *gov);
+extern int cpuidle_governor_latency_req(unsigned int cpu);
#else
static inline int cpuidle_register_governor(struct cpuidle_governor *gov)
{return 0;}
diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h
index f7ac2aa93269..3e4ba9d753c8 100644
--- a/include/linux/crash_dump.h
+++ b/include/linux/crash_dump.h
@@ -5,6 +5,7 @@
#include <linux/kexec.h>
#include <linux/proc_fs.h>
#include <linux/elf.h>
+#include <uapi/linux/vmcore.h>
#include <asm/pgtable.h> /* for pgprot_t */
@@ -93,4 +94,21 @@ static inline bool is_kdump_kernel(void) { return 0; }
#endif /* CONFIG_CRASH_DUMP */
extern unsigned long saved_max_pfn;
+
+/* Device Dump information to be filled by drivers */
+struct vmcoredd_data {
+ char dump_name[VMCOREDD_MAX_NAME_BYTES]; /* Unique name of the dump */
+ unsigned int size; /* Size of the dump */
+ /* Driver's registered callback to be invoked to collect dump */
+ int (*vmcoredd_callback)(struct vmcoredd_data *data, void *buf);
+};
+
+#ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
+int vmcore_add_device_dump(struct vmcoredd_data *data);
+#else
+static inline int vmcore_add_device_dump(struct vmcoredd_data *data)
+{
+ return -EOPNOTSUPP;
+}
+#endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
#endif /* LINUX_CRASHDUMP_H */
diff --git a/include/linux/dax.h b/include/linux/dax.h
index f9eb22ad341e..3855e3800f48 100644
--- a/include/linux/dax.h
+++ b/include/linux/dax.h
@@ -20,6 +20,9 @@ struct dax_operations {
/* copy_from_iter: required operation for fs-dax direct-i/o */
size_t (*copy_from_iter)(struct dax_device *, pgoff_t, void *, size_t,
struct iov_iter *);
+ /* copy_to_iter: required operation for fs-dax direct-i/o */
+ size_t (*copy_to_iter)(struct dax_device *, pgoff_t, void *, size_t,
+ struct iov_iter *);
};
extern struct attribute_group dax_attribute_group;
@@ -64,10 +67,10 @@ static inline bool dax_write_cache_enabled(struct dax_device *dax_dev)
struct writeback_control;
int bdev_dax_pgoff(struct block_device *, sector_t, size_t, pgoff_t *pgoff);
#if IS_ENABLED(CONFIG_FS_DAX)
-int __bdev_dax_supported(struct super_block *sb, int blocksize);
-static inline int bdev_dax_supported(struct super_block *sb, int blocksize)
+bool __bdev_dax_supported(struct block_device *bdev, int blocksize);
+static inline bool bdev_dax_supported(struct block_device *bdev, int blocksize)
{
- return __bdev_dax_supported(sb, blocksize);
+ return __bdev_dax_supported(bdev, blocksize);
}
static inline struct dax_device *fs_dax_get_by_host(const char *host)
@@ -83,10 +86,13 @@ static inline void fs_put_dax(struct dax_device *dax_dev)
struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev);
int dax_writeback_mapping_range(struct address_space *mapping,
struct block_device *bdev, struct writeback_control *wbc);
+
+struct page *dax_layout_busy_page(struct address_space *mapping);
#else
-static inline int bdev_dax_supported(struct super_block *sb, int blocksize)
+static inline bool bdev_dax_supported(struct block_device *bdev,
+ int blocksize)
{
- return -EOPNOTSUPP;
+ return false;
}
static inline struct dax_device *fs_dax_get_by_host(const char *host)
@@ -103,6 +109,11 @@ static inline struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev)
return NULL;
}
+static inline struct page *dax_layout_busy_page(struct address_space *mapping)
+{
+ return NULL;
+}
+
static inline int dax_writeback_mapping_range(struct address_space *mapping,
struct block_device *bdev, struct writeback_control *wbc)
{
@@ -118,14 +129,16 @@ long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages,
void **kaddr, pfn_t *pfn);
size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
size_t bytes, struct iov_iter *i);
+size_t dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
+ size_t bytes, struct iov_iter *i);
void dax_flush(struct dax_device *dax_dev, void *addr, size_t size);
ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
const struct iomap_ops *ops);
int dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
pfn_t *pfnp, int *errp, const struct iomap_ops *ops);
-int dax_finish_sync_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
- pfn_t pfn);
+vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf,
+ enum page_entry_size pe_size, pfn_t pfn);
int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index);
int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
pgoff_t index);
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index 94acbde17bb1..66c6e17e61e5 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -224,6 +224,7 @@ extern seqlock_t rename_lock;
* These are the low-level FS interfaces to the dcache..
*/
extern void d_instantiate(struct dentry *, struct inode *);
+extern void d_instantiate_new(struct dentry *, struct inode *);
extern struct dentry * d_instantiate_unique(struct dentry *, struct inode *);
extern struct dentry * d_instantiate_anon(struct dentry *, struct inode *);
extern int d_instantiate_no_diralias(struct dentry *, struct inode *);
diff --git a/include/linux/delayacct.h b/include/linux/delayacct.h
index 5e335b6203f4..e6c0448ebcc7 100644
--- a/include/linux/delayacct.h
+++ b/include/linux/delayacct.h
@@ -29,7 +29,7 @@
#ifdef CONFIG_TASK_DELAY_ACCT
struct task_delay_info {
- spinlock_t lock;
+ raw_spinlock_t lock;
unsigned int flags; /* Private per-task flags */
/* For each stat XXX, add following, aligned appropriately
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index 31fef7c34185..6fb0808e87c8 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -133,7 +133,7 @@ typedef int (*dm_busy_fn) (struct dm_target *ti);
*/
typedef long (*dm_dax_direct_access_fn) (struct dm_target *ti, pgoff_t pgoff,
long nr_pages, void **kaddr, pfn_t *pfn);
-typedef size_t (*dm_dax_copy_from_iter_fn)(struct dm_target *ti, pgoff_t pgoff,
+typedef size_t (*dm_dax_copy_iter_fn)(struct dm_target *ti, pgoff_t pgoff,
void *addr, size_t bytes, struct iov_iter *i);
#define PAGE_SECTORS (PAGE_SIZE / 512)
@@ -184,7 +184,8 @@ struct target_type {
dm_iterate_devices_fn iterate_devices;
dm_io_hints_fn io_hints;
dm_dax_direct_access_fn direct_access;
- dm_dax_copy_from_iter_fn dax_copy_from_iter;
+ dm_dax_copy_iter_fn dax_copy_from_iter;
+ dm_dax_copy_iter_fn dax_copy_to_iter;
/* For internal device-mapper use. */
struct list_head list;
diff --git a/include/linux/device.h b/include/linux/device.h
index 0059b99e1f25..055a69dbcd18 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -25,6 +25,7 @@
#include <linux/ratelimit.h>
#include <linux/uidgid.h>
#include <linux/gfp.h>
+#include <linux/overflow.h>
#include <asm/device.h>
struct device;
@@ -88,6 +89,8 @@ extern void bus_remove_file(struct bus_type *, struct bus_attribute *);
* @resume: Called to bring a device on this bus out of sleep mode.
* @num_vf: Called to find out how many virtual functions a device on this
* bus supports.
+ * @dma_configure: Called to setup DMA configuration on a device on
+ this bus.
* @pm: Power management operations of this bus, callback the specific
* device driver's pm-ops.
* @iommu_ops: IOMMU specific operations for this bus, used to attach IOMMU
@@ -96,8 +99,8 @@ extern void bus_remove_file(struct bus_type *, struct bus_attribute *);
* @p: The private data of the driver core, only the driver core can
* touch this.
* @lock_key: Lock class key for use by the lock validator
- * @force_dma: Assume devices on this bus should be set up by dma_configure()
- * even if DMA capability is not explicitly described by firmware.
+ * @need_parent_lock: When probing or removing a device on this bus, the
+ * device core should lock the device's parent.
*
* A bus is a channel between the processor and one or more devices. For the
* purposes of the device model, all devices are connected via a bus, even if
@@ -130,6 +133,8 @@ struct bus_type {
int (*num_vf)(struct device *dev);
+ int (*dma_configure)(struct device *dev);
+
const struct dev_pm_ops *pm;
const struct iommu_ops *iommu_ops;
@@ -137,7 +142,7 @@ struct bus_type {
struct subsys_private *p;
struct lock_class_key lock_key;
- bool force_dma;
+ bool need_parent_lock;
};
extern int __must_check bus_register(struct bus_type *bus);
@@ -256,7 +261,9 @@ enum probe_type {
* automatically.
* @pm: Power management operations of the device which matched
* this driver.
- * @coredump: Called through sysfs to initiate a device coredump.
+ * @coredump: Called when sysfs entry is written to. The device driver
+ * is expected to call the dev_coredump API resulting in a
+ * uevent.
* @p: Driver core's private data, no one other than the driver
* core can touch this.
*
@@ -288,7 +295,7 @@ struct device_driver {
const struct attribute_group **groups;
const struct dev_pm_ops *pm;
- int (*coredump) (struct device *dev);
+ void (*coredump) (struct device *dev);
struct driver_private *p;
};
@@ -666,9 +673,12 @@ static inline void *devm_kzalloc(struct device *dev, size_t size, gfp_t gfp)
static inline void *devm_kmalloc_array(struct device *dev,
size_t n, size_t size, gfp_t flags)
{
- if (size != 0 && n > SIZE_MAX / size)
+ size_t bytes;
+
+ if (unlikely(check_mul_overflow(n, size, &bytes)))
return NULL;
- return devm_kmalloc(dev, n * size, flags);
+
+ return devm_kmalloc(dev, bytes, flags);
}
static inline void *devm_kcalloc(struct device *dev,
size_t n, size_t size, gfp_t flags)
@@ -902,6 +912,8 @@ struct dev_links_info {
* @offline: Set after successful invocation of bus type's .offline().
* @of_node_reused: Set if the device-tree node is shared with an ancestor
* device.
+ * @dma_32bit_limit: bridge limited to 32bit DMA even if the device itself
+ * indicates support for a higher limit in the dma_mask field.
*
* At the lowest level, every device in a Linux system is represented by an
* instance of struct device. The device structure contains the information
@@ -990,6 +1002,7 @@ struct device {
bool offline_disabled:1;
bool offline:1;
bool of_node_reused:1;
+ bool dma_32bit_limit:1;
};
static inline struct device *kobj_to_dev(struct kobject *kobj)
diff --git a/include/linux/dma-contiguous.h b/include/linux/dma-contiguous.h
index b67bf6ac907d..3c5a4cb3eb95 100644
--- a/include/linux/dma-contiguous.h
+++ b/include/linux/dma-contiguous.h
@@ -48,7 +48,7 @@
* CMA should not be used by the device drivers directly. It is
* only a helper framework for dma-mapping subsystem.
*
- * For more information, see kernel-docs in drivers/base/dma-contiguous.c
+ * For more information, see kernel-docs in kernel/dma/contiguous.c
*/
#ifdef __KERNEL__
diff --git a/include/linux/dma-debug.h b/include/linux/dma-debug.h
index c7d844f09c3a..a785f2507159 100644
--- a/include/linux/dma-debug.h
+++ b/include/linux/dma-debug.h
@@ -30,8 +30,6 @@ struct bus_type;
extern void dma_debug_add_bus(struct bus_type *bus);
-extern void dma_debug_init(u32 num_entries);
-
extern int dma_debug_resize_entries(u32 num_entries);
extern void debug_dma_map_page(struct device *dev, struct page *page,
@@ -100,10 +98,6 @@ static inline void dma_debug_add_bus(struct bus_type *bus)
{
}
-static inline void dma_debug_init(u32 num_entries)
-{
-}
-
static inline int dma_debug_resize_entries(u32 num_entries)
{
return 0;
diff --git a/include/linux/dma-direct.h b/include/linux/dma-direct.h
index 53ad6a47f513..8d9f33febde5 100644
--- a/include/linux/dma-direct.h
+++ b/include/linux/dma-direct.h
@@ -59,6 +59,11 @@ void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
gfp_t gfp, unsigned long attrs);
void dma_direct_free(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t dma_addr, unsigned long attrs);
+dma_addr_t dma_direct_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size, enum dma_data_direction dir,
+ unsigned long attrs);
+int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
+ enum dma_data_direction dir, unsigned long attrs);
int dma_direct_supported(struct device *dev, u64 mask);
-
+int dma_direct_mapping_error(struct device *dev, dma_addr_t dma_addr);
#endif /* _LINUX_DMA_DIRECT_H */
diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h
index 4c008170fe65..eb9b05aa5aea 100644
--- a/include/linux/dma-fence.h
+++ b/include/linux/dma-fence.h
@@ -94,11 +94,11 @@ typedef void (*dma_fence_func_t)(struct dma_fence *fence,
struct dma_fence_cb *cb);
/**
- * struct dma_fence_cb - callback for dma_fence_add_callback
- * @node: used by dma_fence_add_callback to append this struct to fence::cb_list
+ * struct dma_fence_cb - callback for dma_fence_add_callback()
+ * @node: used by dma_fence_add_callback() to append this struct to fence::cb_list
* @func: dma_fence_func_t to call
*
- * This struct will be initialized by dma_fence_add_callback, additional
+ * This struct will be initialized by dma_fence_add_callback(), additional
* data can be passed along by embedding dma_fence_cb in another struct.
*/
struct dma_fence_cb {
@@ -108,75 +108,143 @@ struct dma_fence_cb {
/**
* struct dma_fence_ops - operations implemented for fence
- * @get_driver_name: returns the driver name.
- * @get_timeline_name: return the name of the context this fence belongs to.
- * @enable_signaling: enable software signaling of fence.
- * @signaled: [optional] peek whether the fence is signaled, can be null.
- * @wait: custom wait implementation, or dma_fence_default_wait.
- * @release: [optional] called on destruction of fence, can be null
- * @fill_driver_data: [optional] callback to fill in free-form debug info
- * Returns amount of bytes filled, or -errno.
- * @fence_value_str: [optional] fills in the value of the fence as a string
- * @timeline_value_str: [optional] fills in the current value of the timeline
- * as a string
*
- * Notes on enable_signaling:
- * For fence implementations that have the capability for hw->hw
- * signaling, they can implement this op to enable the necessary
- * irqs, or insert commands into cmdstream, etc. This is called
- * in the first wait() or add_callback() path to let the fence
- * implementation know that there is another driver waiting on
- * the signal (ie. hw->sw case).
- *
- * This function can be called from atomic context, but not
- * from irq context, so normal spinlocks can be used.
- *
- * A return value of false indicates the fence already passed,
- * or some failure occurred that made it impossible to enable
- * signaling. True indicates successful enabling.
- *
- * fence->error may be set in enable_signaling, but only when false is
- * returned.
- *
- * Calling dma_fence_signal before enable_signaling is called allows
- * for a tiny race window in which enable_signaling is called during,
- * before, or after dma_fence_signal. To fight this, it is recommended
- * that before enable_signaling returns true an extra reference is
- * taken on the fence, to be released when the fence is signaled.
- * This will mean dma_fence_signal will still be called twice, but
- * the second time will be a noop since it was already signaled.
- *
- * Notes on signaled:
- * May set fence->error if returning true.
- *
- * Notes on wait:
- * Must not be NULL, set to dma_fence_default_wait for default implementation.
- * the dma_fence_default_wait implementation should work for any fence, as long
- * as enable_signaling works correctly.
- *
- * Must return -ERESTARTSYS if the wait is intr = true and the wait was
- * interrupted, and remaining jiffies if fence has signaled, or 0 if wait
- * timed out. Can also return other error values on custom implementations,
- * which should be treated as if the fence is signaled. For example a hardware
- * lockup could be reported like that.
- *
- * Notes on release:
- * Can be NULL, this function allows additional commands to run on
- * destruction of the fence. Can be called from irq context.
- * If pointer is set to NULL, kfree will get called instead.
*/
-
struct dma_fence_ops {
+ /**
+ * @get_driver_name:
+ *
+ * Returns the driver name. This is a callback to allow drivers to
+ * compute the name at runtime, without having it to store permanently
+ * for each fence, or build a cache of some sort.
+ *
+ * This callback is mandatory.
+ */
const char * (*get_driver_name)(struct dma_fence *fence);
+
+ /**
+ * @get_timeline_name:
+ *
+ * Return the name of the context this fence belongs to. This is a
+ * callback to allow drivers to compute the name at runtime, without
+ * having it to store permanently for each fence, or build a cache of
+ * some sort.
+ *
+ * This callback is mandatory.
+ */
const char * (*get_timeline_name)(struct dma_fence *fence);
+
+ /**
+ * @enable_signaling:
+ *
+ * Enable software signaling of fence.
+ *
+ * For fence implementations that have the capability for hw->hw
+ * signaling, they can implement this op to enable the necessary
+ * interrupts, or insert commands into cmdstream, etc, to avoid these
+ * costly operations for the common case where only hw->hw
+ * synchronization is required. This is called in the first
+ * dma_fence_wait() or dma_fence_add_callback() path to let the fence
+ * implementation know that there is another driver waiting on the
+ * signal (ie. hw->sw case).
+ *
+ * This function can be called from atomic context, but not
+ * from irq context, so normal spinlocks can be used.
+ *
+ * A return value of false indicates the fence already passed,
+ * or some failure occurred that made it impossible to enable
+ * signaling. True indicates successful enabling.
+ *
+ * &dma_fence.error may be set in enable_signaling, but only when false
+ * is returned.
+ *
+ * Since many implementations can call dma_fence_signal() even when before
+ * @enable_signaling has been called there's a race window, where the
+ * dma_fence_signal() might result in the final fence reference being
+ * released and its memory freed. To avoid this, implementations of this
+ * callback should grab their own reference using dma_fence_get(), to be
+ * released when the fence is signalled (through e.g. the interrupt
+ * handler).
+ *
+ * This callback is mandatory.
+ */
bool (*enable_signaling)(struct dma_fence *fence);
+
+ /**
+ * @signaled:
+ *
+ * Peek whether the fence is signaled, as a fastpath optimization for
+ * e.g. dma_fence_wait() or dma_fence_add_callback(). Note that this
+ * callback does not need to make any guarantees beyond that a fence
+ * once indicates as signalled must always return true from this
+ * callback. This callback may return false even if the fence has
+ * completed already, in this case information hasn't propogated throug
+ * the system yet. See also dma_fence_is_signaled().
+ *
+ * May set &dma_fence.error if returning true.
+ *
+ * This callback is optional.
+ */
bool (*signaled)(struct dma_fence *fence);
+
+ /**
+ * @wait:
+ *
+ * Custom wait implementation, or dma_fence_default_wait.
+ *
+ * Must not be NULL, set to dma_fence_default_wait for default implementation.
+ * the dma_fence_default_wait implementation should work for any fence, as long
+ * as enable_signaling works correctly.
+ *
+ * Must return -ERESTARTSYS if the wait is intr = true and the wait was
+ * interrupted, and remaining jiffies if fence has signaled, or 0 if wait
+ * timed out. Can also return other error values on custom implementations,
+ * which should be treated as if the fence is signaled. For example a hardware
+ * lockup could be reported like that.
+ *
+ * This callback is mandatory.
+ */
signed long (*wait)(struct dma_fence *fence,
bool intr, signed long timeout);
+
+ /**
+ * @release:
+ *
+ * Called on destruction of fence to release additional resources.
+ * Can be called from irq context. This callback is optional. If it is
+ * NULL, then dma_fence_free() is instead called as the default
+ * implementation.
+ */
void (*release)(struct dma_fence *fence);
+ /**
+ * @fill_driver_data:
+ *
+ * Callback to fill in free-form debug info.
+ *
+ * Returns amount of bytes filled, or negative error on failure.
+ *
+ * This callback is optional.
+ */
int (*fill_driver_data)(struct dma_fence *fence, void *data, int size);
+
+ /**
+ * @fence_value_str:
+ *
+ * Callback to fill in free-form debug info specific to this fence, like
+ * the sequence number.
+ *
+ * This callback is optional.
+ */
void (*fence_value_str)(struct dma_fence *fence, char *str, int size);
+
+ /**
+ * @timeline_value_str:
+ *
+ * Fills in the current value of the timeline as a string, like the
+ * sequence number. This should match what @fill_driver_data prints for
+ * the most recently signalled fence (assuming no delayed signalling).
+ */
void (*timeline_value_str)(struct dma_fence *fence,
char *str, int size);
};
@@ -189,7 +257,7 @@ void dma_fence_free(struct dma_fence *fence);
/**
* dma_fence_put - decreases refcount of the fence
- * @fence: [in] fence to reduce refcount of
+ * @fence: fence to reduce refcount of
*/
static inline void dma_fence_put(struct dma_fence *fence)
{
@@ -199,7 +267,7 @@ static inline void dma_fence_put(struct dma_fence *fence)
/**
* dma_fence_get - increases refcount of the fence
- * @fence: [in] fence to increase refcount of
+ * @fence: fence to increase refcount of
*
* Returns the same fence, with refcount increased by 1.
*/
@@ -213,7 +281,7 @@ static inline struct dma_fence *dma_fence_get(struct dma_fence *fence)
/**
* dma_fence_get_rcu - get a fence from a reservation_object_list with
* rcu read lock
- * @fence: [in] fence to increase refcount of
+ * @fence: fence to increase refcount of
*
* Function returns NULL if no refcount could be obtained, or the fence.
*/
@@ -227,7 +295,7 @@ static inline struct dma_fence *dma_fence_get_rcu(struct dma_fence *fence)
/**
* dma_fence_get_rcu_safe - acquire a reference to an RCU tracked fence
- * @fencep: [in] pointer to fence to increase refcount of
+ * @fencep: pointer to fence to increase refcount of
*
* Function returns NULL if no refcount could be obtained, or the fence.
* This function handles acquiring a reference to a fence that may be
@@ -289,14 +357,16 @@ void dma_fence_enable_sw_signaling(struct dma_fence *fence);
/**
* dma_fence_is_signaled_locked - Return an indication if the fence
* is signaled yet.
- * @fence: [in] the fence to check
+ * @fence: the fence to check
*
* Returns true if the fence was already signaled, false if not. Since this
* function doesn't enable signaling, it is not guaranteed to ever return
- * true if dma_fence_add_callback, dma_fence_wait or
- * dma_fence_enable_sw_signaling haven't been called before.
+ * true if dma_fence_add_callback(), dma_fence_wait() or
+ * dma_fence_enable_sw_signaling() haven't been called before.
*
- * This function requires fence->lock to be held.
+ * This function requires &dma_fence.lock to be held.
+ *
+ * See also dma_fence_is_signaled().
*/
static inline bool
dma_fence_is_signaled_locked(struct dma_fence *fence)
@@ -314,17 +384,19 @@ dma_fence_is_signaled_locked(struct dma_fence *fence)
/**
* dma_fence_is_signaled - Return an indication if the fence is signaled yet.
- * @fence: [in] the fence to check
+ * @fence: the fence to check
*
* Returns true if the fence was already signaled, false if not. Since this
* function doesn't enable signaling, it is not guaranteed to ever return
- * true if dma_fence_add_callback, dma_fence_wait or
- * dma_fence_enable_sw_signaling haven't been called before.
+ * true if dma_fence_add_callback(), dma_fence_wait() or
+ * dma_fence_enable_sw_signaling() haven't been called before.
*
* It's recommended for seqno fences to call dma_fence_signal when the
* operation is complete, it makes it possible to prevent issues from
* wraparound between time of issue and time of use by checking the return
* value of this function before calling hardware-specific wait instructions.
+ *
+ * See also dma_fence_is_signaled_locked().
*/
static inline bool
dma_fence_is_signaled(struct dma_fence *fence)
@@ -342,8 +414,8 @@ dma_fence_is_signaled(struct dma_fence *fence)
/**
* __dma_fence_is_later - return if f1 is chronologically later than f2
- * @f1: [in] the first fence's seqno
- * @f2: [in] the second fence's seqno from the same context
+ * @f1: the first fence's seqno
+ * @f2: the second fence's seqno from the same context
*
* Returns true if f1 is chronologically later than f2. Both fences must be
* from the same context, since a seqno is not common across contexts.
@@ -355,8 +427,8 @@ static inline bool __dma_fence_is_later(u32 f1, u32 f2)
/**
* dma_fence_is_later - return if f1 is chronologically later than f2
- * @f1: [in] the first fence from the same context
- * @f2: [in] the second fence from the same context
+ * @f1: the first fence from the same context
+ * @f2: the second fence from the same context
*
* Returns true if f1 is chronologically later than f2. Both fences must be
* from the same context, since a seqno is not re-used across contexts.
@@ -372,8 +444,8 @@ static inline bool dma_fence_is_later(struct dma_fence *f1,
/**
* dma_fence_later - return the chronologically later fence
- * @f1: [in] the first fence from the same context
- * @f2: [in] the second fence from the same context
+ * @f1: the first fence from the same context
+ * @f2: the second fence from the same context
*
* Returns NULL if both fences are signaled, otherwise the fence that would be
* signaled last. Both fences must be from the same context, since a seqno is
@@ -398,7 +470,7 @@ static inline struct dma_fence *dma_fence_later(struct dma_fence *f1,
/**
* dma_fence_get_status_locked - returns the status upon completion
- * @fence: [in] the dma_fence to query
+ * @fence: the dma_fence to query
*
* Drivers can supply an optional error status condition before they signal
* the fence (to indicate whether the fence was completed due to an error
@@ -422,8 +494,8 @@ int dma_fence_get_status(struct dma_fence *fence);
/**
* dma_fence_set_error - flag an error condition on the fence
- * @fence: [in] the dma_fence
- * @error: [in] the error to store
+ * @fence: the dma_fence
+ * @error: the error to store
*
* Drivers can supply an optional error status condition before they signal
* the fence, to indicate that the fence was completed due to an error
@@ -449,8 +521,8 @@ signed long dma_fence_wait_any_timeout(struct dma_fence **fences,
/**
* dma_fence_wait - sleep until the fence gets signaled
- * @fence: [in] the fence to wait on
- * @intr: [in] if true, do an interruptible wait
+ * @fence: the fence to wait on
+ * @intr: if true, do an interruptible wait
*
* This function will return -ERESTARTSYS if interrupted by a signal,
* or 0 if the fence was signaled. Other error values may be
@@ -459,6 +531,8 @@ signed long dma_fence_wait_any_timeout(struct dma_fence **fences,
* Performs a synchronous wait on this fence. It is assumed the caller
* directly or indirectly holds a reference to the fence, otherwise the
* fence might be freed before return, resulting in undefined behavior.
+ *
+ * See also dma_fence_wait_timeout() and dma_fence_wait_any_timeout().
*/
static inline signed long dma_fence_wait(struct dma_fence *fence, bool intr)
{
diff --git a/include/linux/dma-iommu.h b/include/linux/dma-iommu.h
index 92f20832fd28..e8ca5e654277 100644
--- a/include/linux/dma-iommu.h
+++ b/include/linux/dma-iommu.h
@@ -17,6 +17,7 @@
#define __DMA_IOMMU_H
#ifdef __KERNEL__
+#include <linux/types.h>
#include <asm/errno.h>
#ifdef CONFIG_IOMMU_DMA
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index f8ab1c0f589e..f9cc309507d9 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -133,10 +133,10 @@ struct dma_map_ops {
#ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK
u64 (*get_required_mask)(struct device *dev);
#endif
- int is_phys;
};
extern const struct dma_map_ops dma_direct_ops;
+extern const struct dma_map_ops dma_noncoherent_ops;
extern const struct dma_map_ops dma_virt_ops;
#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
@@ -502,7 +502,7 @@ dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr,
#define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0)
#ifndef arch_dma_alloc_attrs
-#define arch_dma_alloc_attrs(dev, flag) (true)
+#define arch_dma_alloc_attrs(dev) (true)
#endif
static inline void *dma_alloc_attrs(struct device *dev, size_t size,
@@ -521,7 +521,7 @@ static inline void *dma_alloc_attrs(struct device *dev, size_t size,
/* let the implementation decide on the zone to allocate from: */
flag &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
- if (!arch_dma_alloc_attrs(&dev, &flag))
+ if (!arch_dma_alloc_attrs(&dev))
return NULL;
if (!ops->alloc)
return NULL;
@@ -572,14 +572,6 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
return 0;
}
-/*
- * This is a hack for the legacy x86 forbid_dac and iommu_sac_force. Please
- * don't use this in new code.
- */
-#ifndef arch_dma_supported
-#define arch_dma_supported(dev, mask) (1)
-#endif
-
static inline void dma_check_mask(struct device *dev, u64 mask)
{
if (sme_active() && (mask < (((u64)sme_get_me_mask() << 1) - 1)))
@@ -592,9 +584,6 @@ static inline int dma_supported(struct device *dev, u64 mask)
if (!ops)
return 0;
- if (!arch_dma_supported(dev, mask))
- return 0;
-
if (!ops->dma_supported)
return 1;
return ops->dma_supported(dev, mask);
@@ -839,7 +828,7 @@ static inline int dma_mmap_wc(struct device *dev,
#define dma_mmap_writecombine dma_mmap_wc
#endif
-#if defined(CONFIG_NEED_DMA_MAP_STATE) || defined(CONFIG_DMA_API_DEBUG)
+#ifdef CONFIG_NEED_DMA_MAP_STATE
#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME
#define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME
#define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME)
diff --git a/include/linux/dma-noncoherent.h b/include/linux/dma-noncoherent.h
new file mode 100644
index 000000000000..10b2654d549b
--- /dev/null
+++ b/include/linux/dma-noncoherent.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_DMA_NONCOHERENT_H
+#define _LINUX_DMA_NONCOHERENT_H 1
+
+#include <linux/dma-mapping.h>
+
+void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
+ gfp_t gfp, unsigned long attrs);
+void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
+ dma_addr_t dma_addr, unsigned long attrs);
+
+#ifdef CONFIG_DMA_NONCOHERENT_MMAP
+int arch_dma_mmap(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ unsigned long attrs);
+#else
+#define arch_dma_mmap NULL
+#endif /* CONFIG_DMA_NONCOHERENT_MMAP */
+
+#ifdef CONFIG_DMA_NONCOHERENT_CACHE_SYNC
+void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size,
+ enum dma_data_direction direction);
+#else
+#define arch_dma_cache_sync NULL
+#endif /* CONFIG_DMA_NONCOHERENT_CACHE_SYNC */
+
+#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE
+void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
+ size_t size, enum dma_data_direction dir);
+#else
+static inline void arch_sync_dma_for_device(struct device *dev,
+ phys_addr_t paddr, size_t size, enum dma_data_direction dir)
+{
+}
+#endif /* ARCH_HAS_SYNC_DMA_FOR_DEVICE */
+
+#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU
+void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
+ size_t size, enum dma_data_direction dir);
+#else
+static inline void arch_sync_dma_for_cpu(struct device *dev,
+ phys_addr_t paddr, size_t size, enum dma_data_direction dir)
+{
+}
+#endif /* ARCH_HAS_SYNC_DMA_FOR_CPU */
+
+#endif /* _LINUX_DMA_NONCOHERENT_H */
diff --git a/include/linux/dma/sprd-dma.h b/include/linux/dma/sprd-dma.h
new file mode 100644
index 000000000000..b0115e340fbc
--- /dev/null
+++ b/include/linux/dma/sprd-dma.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _SPRD_DMA_H_
+#define _SPRD_DMA_H_
+
+#define SPRD_DMA_REQ_SHIFT 16
+#define SPRD_DMA_FLAGS(req_mode, int_type) \
+ ((req_mode) << SPRD_DMA_REQ_SHIFT | (int_type))
+
+/*
+ * enum sprd_dma_req_mode: define the DMA request mode
+ * @SPRD_DMA_FRAG_REQ: fragment request mode
+ * @SPRD_DMA_BLK_REQ: block request mode
+ * @SPRD_DMA_TRANS_REQ: transaction request mode
+ * @SPRD_DMA_LIST_REQ: link-list request mode
+ *
+ * We have 4 types request mode: fragment mode, block mode, transaction mode
+ * and linklist mode. One transaction can contain several blocks, one block can
+ * contain several fragments. Link-list mode means we can save several DMA
+ * configuration into one reserved memory, then DMA can fetch each DMA
+ * configuration automatically to start transfer.
+ */
+enum sprd_dma_req_mode {
+ SPRD_DMA_FRAG_REQ,
+ SPRD_DMA_BLK_REQ,
+ SPRD_DMA_TRANS_REQ,
+ SPRD_DMA_LIST_REQ,
+};
+
+/*
+ * enum sprd_dma_int_type: define the DMA interrupt type
+ * @SPRD_DMA_NO_INT: do not need generate DMA interrupts.
+ * @SPRD_DMA_FRAG_INT: fragment done interrupt when one fragment request
+ * is done.
+ * @SPRD_DMA_BLK_INT: block done interrupt when one block request is done.
+ * @SPRD_DMA_BLK_FRAG_INT: block and fragment interrupt when one fragment
+ * or one block request is done.
+ * @SPRD_DMA_TRANS_INT: tansaction done interrupt when one transaction
+ * request is done.
+ * @SPRD_DMA_TRANS_FRAG_INT: transaction and fragment interrupt when one
+ * transaction request or fragment request is done.
+ * @SPRD_DMA_TRANS_BLK_INT: transaction and block interrupt when one
+ * transaction request or block request is done.
+ * @SPRD_DMA_LIST_INT: link-list done interrupt when one link-list request
+ * is done.
+ * @SPRD_DMA_CFGERR_INT: configure error interrupt when configuration is
+ * incorrect.
+ */
+enum sprd_dma_int_type {
+ SPRD_DMA_NO_INT,
+ SPRD_DMA_FRAG_INT,
+ SPRD_DMA_BLK_INT,
+ SPRD_DMA_BLK_FRAG_INT,
+ SPRD_DMA_TRANS_INT,
+ SPRD_DMA_TRANS_FRAG_INT,
+ SPRD_DMA_TRANS_BLK_INT,
+ SPRD_DMA_LIST_INT,
+ SPRD_DMA_CFGERR_INT,
+};
+
+#endif
diff --git a/include/linux/efi.h b/include/linux/efi.h
index f1b7d68ac460..56add823f190 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -395,9 +395,9 @@ typedef struct {
u32 attributes;
u32 get_bar_attributes;
u32 set_bar_attributes;
- uint64_t romsize;
- void *romimage;
-} efi_pci_io_protocol_32;
+ u64 romsize;
+ u32 romimage;
+} efi_pci_io_protocol_32_t;
typedef struct {
u64 poll_mem;
@@ -415,9 +415,9 @@ typedef struct {
u64 attributes;
u64 get_bar_attributes;
u64 set_bar_attributes;
- uint64_t romsize;
- void *romimage;
-} efi_pci_io_protocol_64;
+ u64 romsize;
+ u64 romimage;
+} efi_pci_io_protocol_64_t;
typedef struct {
void *poll_mem;
@@ -437,7 +437,7 @@ typedef struct {
void *set_bar_attributes;
uint64_t romsize;
void *romimage;
-} efi_pci_io_protocol;
+} efi_pci_io_protocol_t;
#define EFI_PCI_IO_ATTRIBUTE_ISA_MOTHERBOARD_IO 0x0001
#define EFI_PCI_IO_ATTRIBUTE_ISA_IO 0x0002
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index 6d9e230dffd2..a02deea30185 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -218,8 +218,6 @@ extern void elv_unregister(struct elevator_type *);
extern ssize_t elv_iosched_show(struct request_queue *, char *);
extern ssize_t elv_iosched_store(struct request_queue *, const char *, size_t);
-extern int elevator_init(struct request_queue *, char *);
-extern void elevator_exit(struct request_queue *, struct elevator_queue *);
extern bool elv_bio_merge_ok(struct request *, struct bio *);
extern struct elevator_queue *elevator_alloc(struct request_queue *,
struct elevator_type *);
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index ebe41811ed34..f8a2245b70ac 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -310,6 +310,11 @@ bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32,
* fields should be ignored (use %__ETHTOOL_LINK_MODE_MASK_NBITS
* instead of the latter), any change to them will be overwritten
* by kernel. Returns a negative error code or zero.
+ * @get_fecparam: Get the network device Forward Error Correction parameters.
+ * @set_fecparam: Set the network device Forward Error Correction parameters.
+ * @get_ethtool_phy_stats: Return extended statistics about the PHY device.
+ * This is only useful if the device maintains PHY statistics and
+ * cannot use the standard PHY library helpers.
*
* All operations are optional (i.e. the function pointer may be set
* to %NULL) and callers must take this into account. Callers must
@@ -405,5 +410,7 @@ struct ethtool_ops {
struct ethtool_fecparam *);
int (*set_fecparam)(struct net_device *,
struct ethtool_fecparam *);
+ void (*get_ethtool_phy_stats)(struct net_device *,
+ struct ethtool_stats *, u64 *);
};
#endif /* _LINUX_ETHTOOL_H */
diff --git a/include/linux/export.h b/include/linux/export.h
index 1a1dfdb2a5c6..b768d6dd3c90 100644
--- a/include/linux/export.h
+++ b/include/linux/export.h
@@ -10,14 +10,8 @@
* hackers place grumpy comments in header files.
*/
-/* Some toolchains use a `_' prefix for all user symbols. */
-#ifdef CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX
-#define __VMLINUX_SYMBOL(x) _##x
-#define __VMLINUX_SYMBOL_STR(x) "_" #x
-#else
#define __VMLINUX_SYMBOL(x) x
#define __VMLINUX_SYMBOL_STR(x) #x
-#endif
/* Indirect, so macros are expanded before pasting. */
#define VMLINUX_SYMBOL(x) __VMLINUX_SYMBOL(x)
@@ -46,14 +40,14 @@ extern struct module __this_module;
#if defined(CONFIG_MODULE_REL_CRCS)
#define __CRC_SYMBOL(sym, sec) \
asm(" .section \"___kcrctab" sec "+" #sym "\", \"a\" \n" \
- " .weak " VMLINUX_SYMBOL_STR(__crc_##sym) " \n" \
- " .long " VMLINUX_SYMBOL_STR(__crc_##sym) " - . \n" \
+ " .weak __crc_" #sym " \n" \
+ " .long __crc_" #sym " - . \n" \
" .previous \n");
#else
#define __CRC_SYMBOL(sym, sec) \
asm(" .section \"___kcrctab" sec "+" #sym "\", \"a\" \n" \
- " .weak " VMLINUX_SYMBOL_STR(__crc_##sym) " \n" \
- " .long " VMLINUX_SYMBOL_STR(__crc_##sym) " \n" \
+ " .weak __crc_" #sym " \n" \
+ " .long __crc_" #sym " \n" \
" .previous \n");
#endif
#else
@@ -66,7 +60,7 @@ extern struct module __this_module;
__CRC_SYMBOL(sym, sec) \
static const char __kstrtab_##sym[] \
__attribute__((section("__ksymtab_strings"), aligned(1))) \
- = VMLINUX_SYMBOL_STR(sym); \
+ = #sym; \
static const struct kernel_symbol __ksymtab_##sym \
__used \
__attribute__((section("___ksymtab" sec "+" #sym), used)) \
diff --git a/include/linux/filter.h b/include/linux/filter.h
index fc4e8f91b03d..b615df57b7d5 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -19,6 +19,7 @@
#include <linux/cryptohash.h>
#include <linux/set_memory.h>
#include <linux/kallsyms.h>
+#include <linux/if_vlan.h>
#include <net/sch_generic.h>
@@ -30,6 +31,7 @@ struct sock;
struct seccomp_data;
struct bpf_prog_aux;
struct xdp_rxq_info;
+struct xdp_buff;
/* ArgX, context and stack frame pointer register positions. Note,
* Arg1, Arg2, Arg3, etc are used as argument mappings of function
@@ -46,7 +48,9 @@ struct xdp_rxq_info;
/* Additional register mappings for converted user programs. */
#define BPF_REG_A BPF_REG_0
#define BPF_REG_X BPF_REG_7
-#define BPF_REG_TMP BPF_REG_8
+#define BPF_REG_TMP BPF_REG_2 /* scratch reg */
+#define BPF_REG_D BPF_REG_8 /* data, callee-saved */
+#define BPF_REG_H BPF_REG_9 /* hlen, callee-saved */
/* Kernel hidden auxiliary/helper register for hardening step.
* Only used by eBPF JITs. It's nothing more than a temporary
@@ -286,8 +290,21 @@ struct xdp_rxq_info;
.off = OFF, \
.imm = 0 })
+/* Relative call */
+
+#define BPF_CALL_REL(TGT) \
+ ((struct bpf_insn) { \
+ .code = BPF_JMP | BPF_CALL, \
+ .dst_reg = 0, \
+ .src_reg = BPF_PSEUDO_CALL, \
+ .off = 0, \
+ .imm = TGT })
+
/* Function call */
+#define BPF_CAST_CALL(x) \
+ ((u64 (*)(u64, u64, u64, u64, u64))(x))
+
#define BPF_EMIT_CALL(FUNC) \
((struct bpf_insn) { \
.code = BPF_JMP | BPF_CALL, \
@@ -453,7 +470,8 @@ struct sock_fprog_kern {
};
struct bpf_binary_header {
- unsigned int pages;
+ u16 pages;
+ u16 locked:1;
u8 image[];
};
@@ -467,7 +485,8 @@ struct bpf_prog {
dst_needed:1, /* Do we need dst entry? */
blinded:1, /* Was blinded */
is_func:1, /* program is a bpf function */
- kprobe_override:1; /* Do we override a kprobe? */
+ kprobe_override:1, /* Do we override a kprobe? */
+ has_callchain_buf:1; /* callchain buffer allocated? */
enum bpf_prog_type type; /* Type of BPF program */
enum bpf_attach_type expected_attach_type; /* For some prog types */
u32 len; /* Number of filter blocks */
@@ -500,14 +519,6 @@ struct bpf_skb_data_end {
void *data_end;
};
-struct xdp_buff {
- void *data;
- void *data_end;
- void *data_meta;
- void *data_hard_start;
- struct xdp_rxq_info *rxq;
-};
-
struct sk_msg_buff {
void *data;
void *data_end;
@@ -519,9 +530,9 @@ struct sk_msg_buff {
int sg_end;
struct scatterlist sg_data[MAX_SKB_FRAGS];
bool sg_copy[MAX_SKB_FRAGS];
- __u32 key;
__u32 flags;
- struct bpf_map *map;
+ struct sock *sk_redir;
+ struct sock *sk;
struct sk_buff *skb;
struct list_head list;
};
@@ -630,29 +641,50 @@ static inline bool bpf_prog_was_classic(const struct bpf_prog *prog)
return prog->type == BPF_PROG_TYPE_UNSPEC;
}
-static inline bool
-bpf_ctx_narrow_access_ok(u32 off, u32 size, const u32 size_default)
+static inline u32 bpf_ctx_off_adjust_machine(u32 size)
+{
+ const u32 size_machine = sizeof(unsigned long);
+
+ if (size > size_machine && size % size_machine == 0)
+ size = size_machine;
+
+ return size;
+}
+
+static inline bool bpf_ctx_narrow_align_ok(u32 off, u32 size_access,
+ u32 size_default)
{
- bool off_ok;
+ size_default = bpf_ctx_off_adjust_machine(size_default);
+ size_access = bpf_ctx_off_adjust_machine(size_access);
+
#ifdef __LITTLE_ENDIAN
- off_ok = (off & (size_default - 1)) == 0;
+ return (off & (size_default - 1)) == 0;
#else
- off_ok = (off & (size_default - 1)) + size == size_default;
+ return (off & (size_default - 1)) + size_access == size_default;
#endif
- return off_ok && size <= size_default && (size & (size - 1)) == 0;
+}
+
+static inline bool
+bpf_ctx_narrow_access_ok(u32 off, u32 size, u32 size_default)
+{
+ return bpf_ctx_narrow_align_ok(off, size, size_default) &&
+ size <= size_default && (size & (size - 1)) == 0;
}
#define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0]))
-#ifdef CONFIG_ARCH_HAS_SET_MEMORY
static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
{
+#ifdef CONFIG_ARCH_HAS_SET_MEMORY
fp->locked = 1;
- WARN_ON_ONCE(set_memory_ro((unsigned long)fp, fp->pages));
+ if (set_memory_ro((unsigned long)fp, fp->pages))
+ fp->locked = 0;
+#endif
}
static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
{
+#ifdef CONFIG_ARCH_HAS_SET_MEMORY
if (fp->locked) {
WARN_ON_ONCE(set_memory_rw((unsigned long)fp, fp->pages));
/* In case set_memory_rw() fails, we want to be the first
@@ -660,34 +692,30 @@ static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
*/
fp->locked = 0;
}
+#endif
}
static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr)
{
- WARN_ON_ONCE(set_memory_ro((unsigned long)hdr, hdr->pages));
-}
-
-static inline void bpf_jit_binary_unlock_ro(struct bpf_binary_header *hdr)
-{
- WARN_ON_ONCE(set_memory_rw((unsigned long)hdr, hdr->pages));
-}
-#else
-static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
-{
-}
-
-static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
-{
-}
-
-static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr)
-{
+#ifdef CONFIG_ARCH_HAS_SET_MEMORY
+ hdr->locked = 1;
+ if (set_memory_ro((unsigned long)hdr, hdr->pages))
+ hdr->locked = 0;
+#endif
}
static inline void bpf_jit_binary_unlock_ro(struct bpf_binary_header *hdr)
{
+#ifdef CONFIG_ARCH_HAS_SET_MEMORY
+ if (hdr->locked) {
+ WARN_ON_ONCE(set_memory_rw((unsigned long)hdr, hdr->pages));
+ /* In case set_memory_rw() fails, we want to be the first
+ * to crash here instead of some random place later on.
+ */
+ hdr->locked = 0;
+ }
+#endif
}
-#endif /* CONFIG_ARCH_HAS_SET_MEMORY */
static inline struct bpf_binary_header *
bpf_jit_binary_hdr(const struct bpf_prog *fp)
@@ -698,6 +726,22 @@ bpf_jit_binary_hdr(const struct bpf_prog *fp)
return (void *)addr;
}
+#ifdef CONFIG_ARCH_HAS_SET_MEMORY
+static inline int bpf_prog_check_pages_ro_single(const struct bpf_prog *fp)
+{
+ if (!fp->locked)
+ return -ENOLCK;
+ if (fp->jited) {
+ const struct bpf_binary_header *hdr = bpf_jit_binary_hdr(fp);
+
+ if (!hdr->locked)
+ return -ENOLCK;
+ }
+
+ return 0;
+}
+#endif
+
int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap);
static inline int sk_filter(struct sock *sk, struct sk_buff *skb)
{
@@ -759,6 +803,21 @@ static inline bool bpf_dump_raw_ok(void)
struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
const struct bpf_insn *patch, u32 len);
+static inline int __xdp_generic_ok_fwd_dev(struct sk_buff *skb,
+ struct net_device *fwd)
+{
+ unsigned int len;
+
+ if (unlikely(!(fwd->flags & IFF_UP)))
+ return -ENETDOWN;
+
+ len = fwd->mtu + fwd->hard_header_len + VLAN_HLEN;
+ if (skb->len > len)
+ return -EMSGSIZE;
+
+ return 0;
+}
+
/* The pair of xdp_do_redirect and xdp_do_flush_map MUST be called in the
* same cpu context. Further for best results no more than a single map
* for the do_redirect/do_flush pair should be used. This limitation is
@@ -766,27 +825,12 @@ struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
* This does not appear to be a real limitation for existing software.
*/
int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb,
- struct bpf_prog *prog);
+ struct xdp_buff *xdp, struct bpf_prog *prog);
int xdp_do_redirect(struct net_device *dev,
struct xdp_buff *xdp,
struct bpf_prog *prog);
void xdp_do_flush_map(void);
-/* Drivers not supporting XDP metadata can use this helper, which
- * rejects any room expansion for metadata as a result.
- */
-static __always_inline void
-xdp_set_data_meta_invalid(struct xdp_buff *xdp)
-{
- xdp->data_meta = xdp->data + 1;
-}
-
-static __always_inline bool
-xdp_data_meta_unsupported(const struct xdp_buff *xdp)
-{
- return unlikely(xdp->data_meta > xdp->data);
-}
-
void bpf_warn_invalid_xdp_action(u32 act);
struct sock *do_sk_redirect_map(struct sk_buff *skb);
@@ -949,6 +993,9 @@ static inline void bpf_prog_kallsyms_del(struct bpf_prog *fp)
}
#endif /* CONFIG_BPF_JIT */
+void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp);
+void bpf_prog_kallsyms_del_all(struct bpf_prog *fp);
+
#define BPF_ANC BIT(15)
static inline bool bpf_needs_clear_a(const struct sock_filter *first)
@@ -1029,6 +1076,7 @@ struct bpf_sock_addr_kern {
* only two (src and dst) are available at convert_ctx_access time
*/
u64 tmp_reg;
+ void *t_ctx; /* Attach type specific context. */
};
struct bpf_sock_ops_kern {
diff --git a/include/linux/firmware.h b/include/linux/firmware.h
index 41050417cafb..2dd566c91d44 100644
--- a/include/linux/firmware.h
+++ b/include/linux/firmware.h
@@ -42,6 +42,8 @@ struct builtin_fw {
#if defined(CONFIG_FW_LOADER) || (defined(CONFIG_FW_LOADER_MODULE) && defined(MODULE))
int request_firmware(const struct firmware **fw, const char *name,
struct device *device);
+int firmware_request_nowarn(const struct firmware **fw, const char *name,
+ struct device *device);
int request_firmware_nowait(
struct module *module, bool uevent,
const char *name, struct device *device, gfp_t gfp, void *context,
@@ -59,6 +61,14 @@ static inline int request_firmware(const struct firmware **fw,
{
return -EINVAL;
}
+
+static inline int firmware_request_nowarn(const struct firmware **fw,
+ const char *name,
+ struct device *device)
+{
+ return -EINVAL;
+}
+
static inline int request_firmware_nowait(
struct module *module, bool uevent,
const char *name, struct device *device, gfp_t gfp, void *context,
diff --git a/include/linux/fpga/altera-pr-ip-core.h b/include/linux/fpga/altera-pr-ip-core.h
index 3810a9033f49..7d4664730d60 100644
--- a/include/linux/fpga/altera-pr-ip-core.h
+++ b/include/linux/fpga/altera-pr-ip-core.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Driver for Altera Partial Reconfiguration IP Core
*
@@ -5,18 +6,6 @@
*
* Based on socfpga-a10.c Copyright (C) 2015-2016 Altera Corporation
* by Alan Tull <atull@opensource.altera.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _ALT_PR_IP_CORE_H
diff --git a/include/linux/fpga/fpga-bridge.h b/include/linux/fpga/fpga-bridge.h
index 3694821a6d2d..ce550fcf6360 100644
--- a/include/linux/fpga/fpga-bridge.h
+++ b/include/linux/fpga/fpga-bridge.h
@@ -62,8 +62,11 @@ int of_fpga_bridge_get_to_list(struct device_node *np,
struct fpga_image_info *info,
struct list_head *bridge_list);
-int fpga_bridge_register(struct device *dev, const char *name,
- const struct fpga_bridge_ops *br_ops, void *priv);
-void fpga_bridge_unregister(struct device *dev);
+struct fpga_bridge *fpga_bridge_create(struct device *dev, const char *name,
+ const struct fpga_bridge_ops *br_ops,
+ void *priv);
+void fpga_bridge_free(struct fpga_bridge *br);
+int fpga_bridge_register(struct fpga_bridge *br);
+void fpga_bridge_unregister(struct fpga_bridge *br);
#endif /* _LINUX_FPGA_BRIDGE_H */
diff --git a/include/linux/fpga/fpga-mgr.h b/include/linux/fpga/fpga-mgr.h
index 3c6de23aabdf..eec7c2478b0d 100644
--- a/include/linux/fpga/fpga-mgr.h
+++ b/include/linux/fpga/fpga-mgr.h
@@ -1,20 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* FPGA Framework
*
* Copyright (C) 2013-2016 Altera Corporation
* Copyright (C) 2017 Intel Corporation
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _LINUX_FPGA_MGR_H
#define _LINUX_FPGA_MGR_H
@@ -170,9 +159,11 @@ struct fpga_manager *fpga_mgr_get(struct device *dev);
void fpga_mgr_put(struct fpga_manager *mgr);
-int fpga_mgr_register(struct device *dev, const char *name,
- const struct fpga_manager_ops *mops, void *priv);
-
-void fpga_mgr_unregister(struct device *dev);
+struct fpga_manager *fpga_mgr_create(struct device *dev, const char *name,
+ const struct fpga_manager_ops *mops,
+ void *priv);
+void fpga_mgr_free(struct fpga_manager *mgr);
+int fpga_mgr_register(struct fpga_manager *mgr);
+void fpga_mgr_unregister(struct fpga_manager *mgr);
#endif /*_LINUX_FPGA_MGR_H */
diff --git a/include/linux/fpga/fpga-region.h b/include/linux/fpga/fpga-region.h
index b6520318ab9c..d7071cddd727 100644
--- a/include/linux/fpga/fpga-region.h
+++ b/include/linux/fpga/fpga-region.h
@@ -1,3 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
#ifndef _FPGA_REGION_H
#define _FPGA_REGION_H
@@ -14,7 +16,6 @@
* @info: FPGA image info
* @priv: private data
* @get_bridges: optional function to get bridges to a list
- * @groups: optional attribute groups.
*/
struct fpga_region {
struct device dev;
@@ -24,7 +25,6 @@ struct fpga_region {
struct fpga_image_info *info;
void *priv;
int (*get_bridges)(struct fpga_region *region);
- const struct attribute_group **groups;
};
#define to_fpga_region(d) container_of(d, struct fpga_region, dev)
@@ -34,7 +34,12 @@ struct fpga_region *fpga_region_class_find(
int (*match)(struct device *, const void *));
int fpga_region_program_fpga(struct fpga_region *region);
-int fpga_region_register(struct device *dev, struct fpga_region *region);
-int fpga_region_unregister(struct fpga_region *region);
+
+struct fpga_region
+*fpga_region_create(struct device *dev, struct fpga_manager *mgr,
+ int (*get_bridges)(struct fpga_region *));
+void fpga_region_free(struct fpga_region *region);
+int fpga_region_register(struct fpga_region *region);
+void fpga_region_unregister(struct fpga_region *region);
#endif /* _FPGA_REGION_H */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 760d8da1b6c7..5c91108846db 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -36,6 +36,7 @@
#include <linux/delayed_call.h>
#include <linux/uuid.h>
#include <linux/errseq.h>
+#include <linux/ioprio.h>
#include <asm/byteorder.h>
#include <uapi/linux/fs.h>
@@ -94,7 +95,7 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
/*
* flags in file.f_mode. Note that FMODE_READ and FMODE_WRITE must correspond
- * to O_WRONLY and O_RDWR via the strange trick in __dentry_open()
+ * to O_WRONLY and O_RDWR via the strange trick in do_dentry_open()
*/
/* file is open for reading */
@@ -206,9 +207,9 @@ struct iattr {
kuid_t ia_uid;
kgid_t ia_gid;
loff_t ia_size;
- struct timespec ia_atime;
- struct timespec ia_mtime;
- struct timespec ia_ctime;
+ struct timespec64 ia_atime;
+ struct timespec64 ia_mtime;
+ struct timespec64 ia_ctime;
/*
* Not an attribute, but an auxiliary info for filesystems wanting to
@@ -299,7 +300,8 @@ struct kiocb {
void (*ki_complete)(struct kiocb *iocb, long ret, long ret2);
void *private;
int ki_flags;
- enum rw_hint ki_hint;
+ u16 ki_hint;
+ u16 ki_ioprio; /* See linux/ioprio.h */
} __randomize_layout;
static inline bool is_sync_kiocb(struct kiocb *kiocb)
@@ -602,9 +604,9 @@ struct inode {
};
dev_t i_rdev;
loff_t i_size;
- struct timespec i_atime;
- struct timespec i_mtime;
- struct timespec i_ctime;
+ struct timespec64 i_atime;
+ struct timespec64 i_mtime;
+ struct timespec64 i_ctime;
spinlock_t i_lock; /* i_blocks, i_bytes, maybe i_size */
unsigned short i_bytes;
unsigned int i_blkbits;
@@ -1091,7 +1093,7 @@ extern int vfs_lock_file(struct file *, unsigned int, struct file_lock *, struct
extern int vfs_cancel_lock(struct file *filp, struct file_lock *fl);
extern int locks_lock_inode_wait(struct inode *inode, struct file_lock *fl);
extern int __break_lease(struct inode *inode, unsigned int flags, unsigned int type);
-extern void lease_get_mtime(struct inode *, struct timespec *time);
+extern void lease_get_mtime(struct inode *, struct timespec64 *time);
extern int generic_setlease(struct file *, long, struct file_lock **, void **priv);
extern int vfs_setlease(struct file *, long, struct file_lock **, void **);
extern int lease_modify(struct file_lock *, int, struct list_head *);
@@ -1206,7 +1208,8 @@ static inline int __break_lease(struct inode *inode, unsigned int mode, unsigned
return 0;
}
-static inline void lease_get_mtime(struct inode *inode, struct timespec *time)
+static inline void lease_get_mtime(struct inode *inode,
+ struct timespec64 *time)
{
return;
}
@@ -1250,7 +1253,7 @@ static inline int locks_lock_file_wait(struct file *filp, struct file_lock *fl)
}
struct fasync_struct {
- spinlock_t fa_lock;
+ rwlock_t fa_lock;
int magic;
int fa_fd;
struct fasync_struct *fa_next; /* singly linked list */
@@ -1364,9 +1367,9 @@ struct super_block {
void *s_security;
#endif
const struct xattr_handler **s_xattr;
-
+#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
const struct fscrypt_operations *s_cop;
-
+#endif
struct hlist_bl_head s_roots; /* alternate root dentries for NFS */
struct list_head s_mounts; /* list of mounts; _not_ for fs use */
struct block_device *s_bdev;
@@ -1476,7 +1479,8 @@ static inline void i_gid_write(struct inode *inode, gid_t gid)
inode->i_gid = make_kgid(inode->i_sb->s_user_ns, gid);
}
-extern struct timespec current_time(struct inode *inode);
+extern struct timespec64 timespec64_trunc(struct timespec64 t, unsigned gran);
+extern struct timespec64 current_time(struct inode *inode);
/*
* Snapshotting support.
@@ -1597,6 +1601,11 @@ static inline void sb_start_intwrite(struct super_block *sb)
__sb_start_write(sb, SB_FREEZE_FS, true);
}
+static inline int sb_start_intwrite_trylock(struct super_block *sb)
+{
+ return __sb_start_write(sb, SB_FREEZE_FS, false);
+}
+
extern bool inode_owner_or_capable(const struct inode *inode);
@@ -1711,6 +1720,8 @@ struct file_operations {
int (*iterate) (struct file *, struct dir_context *);
int (*iterate_shared) (struct file *, struct dir_context *);
__poll_t (*poll) (struct file *, struct poll_table_struct *);
+ struct wait_queue_head * (*get_poll_head)(struct file *, __poll_t);
+ __poll_t (*poll_mask) (struct file *, __poll_t);
long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long);
long (*compat_ioctl) (struct file *, unsigned int, unsigned long);
int (*mmap) (struct file *, struct vm_area_struct *);
@@ -1764,7 +1775,7 @@ struct inode_operations {
ssize_t (*listxattr) (struct dentry *, char *, size_t);
int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start,
u64 len);
- int (*update_time)(struct inode *, struct timespec *, int);
+ int (*update_time)(struct inode *, struct timespec64 *, int);
int (*atomic_open)(struct inode *, struct dentry *,
struct file *, unsigned open_flag,
umode_t create_mode, int *opened);
@@ -1927,12 +1938,22 @@ static inline enum rw_hint file_write_hint(struct file *file)
static inline int iocb_flags(struct file *file);
+static inline u16 ki_hint_validate(enum rw_hint hint)
+{
+ typeof(((struct kiocb *)0)->ki_hint) max_hint = -1;
+
+ if (hint <= max_hint)
+ return hint;
+ return 0;
+}
+
static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp)
{
*kiocb = (struct kiocb) {
.ki_filp = filp,
.ki_flags = iocb_flags(filp),
- .ki_hint = file_write_hint(filp),
+ .ki_hint = ki_hint_validate(file_write_hint(filp)),
+ .ki_ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, 0),
};
}
@@ -2198,7 +2219,7 @@ extern int current_umask(void);
extern void ihold(struct inode * inode);
extern void iput(struct inode *);
-extern int generic_update_time(struct inode *, struct timespec *, int);
+extern int generic_update_time(struct inode *, struct timespec64 *, int);
/* /sys/fs */
extern struct kobject *fs_kobj;
@@ -2570,7 +2591,7 @@ extern bool is_bad_inode(struct inode *);
#ifdef CONFIG_BLOCK
extern void check_disk_size_change(struct gendisk *disk,
- struct block_device *bdev);
+ struct block_device *bdev, bool verbose);
extern int revalidate_disk(struct gendisk *);
extern int check_disk_change(struct block_device *);
extern int __invalidate_device(struct block_device *, bool);
@@ -2879,6 +2900,10 @@ extern struct inode *ilookup5(struct super_block *sb, unsigned long hashval,
int (*test)(struct inode *, void *), void *data);
extern struct inode *ilookup(struct super_block *sb, unsigned long ino);
+extern struct inode *inode_insert5(struct inode *inode, unsigned long hashval,
+ int (*test)(struct inode *, void *),
+ int (*set)(struct inode *, void *),
+ void *data);
extern struct inode * iget5_locked(struct super_block *, unsigned long, int (*test)(struct inode *, void *), int (*set)(struct inode *, void *), void *);
extern struct inode * iget_locked(struct super_block *, unsigned long);
extern struct inode *find_inode_nowait(struct super_block *,
diff --git a/include/linux/fscrypt_notsupp.h b/include/linux/fscrypt_notsupp.h
index 44b50c04bae9..ee8b43e4c15a 100644
--- a/include/linux/fscrypt_notsupp.h
+++ b/include/linux/fscrypt_notsupp.h
@@ -25,6 +25,10 @@ static inline bool fscrypt_dummy_context_enabled(struct inode *inode)
}
/* crypto.c */
+static inline void fscrypt_enqueue_decrypt_work(struct work_struct *work)
+{
+}
+
static inline struct fscrypt_ctx *fscrypt_get_ctx(const struct inode *inode,
gfp_t gfp_flags)
{
@@ -64,16 +68,6 @@ static inline void fscrypt_restore_control_page(struct page *page)
return;
}
-static inline void fscrypt_set_d_op(struct dentry *dentry)
-{
- return;
-}
-
-static inline void fscrypt_set_encrypted_dentry(struct dentry *dentry)
-{
- return;
-}
-
/* policy.c */
static inline int fscrypt_ioctl_set_policy(struct file *filp,
const void __user *arg)
@@ -160,10 +154,13 @@ static inline bool fscrypt_match_name(const struct fscrypt_name *fname,
}
/* bio.c */
-static inline void fscrypt_decrypt_bio_pages(struct fscrypt_ctx *ctx,
- struct bio *bio)
+static inline void fscrypt_decrypt_bio(struct bio *bio)
+{
+}
+
+static inline void fscrypt_enqueue_decrypt_bio(struct fscrypt_ctx *ctx,
+ struct bio *bio)
{
- return;
}
static inline void fscrypt_pullback_bio_page(struct page **page, bool restore)
diff --git a/include/linux/fscrypt_supp.h b/include/linux/fscrypt_supp.h
index 477a7a6504d2..6456c6b2005f 100644
--- a/include/linux/fscrypt_supp.h
+++ b/include/linux/fscrypt_supp.h
@@ -29,7 +29,7 @@ struct fscrypt_operations {
int (*set_context)(struct inode *, const void *, size_t, void *);
bool (*dummy_context)(struct inode *);
bool (*empty_dir)(struct inode *);
- unsigned (*max_namelen)(struct inode *);
+ unsigned int max_namelen;
};
struct fscrypt_ctx {
@@ -59,6 +59,7 @@ static inline bool fscrypt_dummy_context_enabled(struct inode *inode)
}
/* crypto.c */
+extern void fscrypt_enqueue_decrypt_work(struct work_struct *);
extern struct fscrypt_ctx *fscrypt_get_ctx(const struct inode *, gfp_t);
extern void fscrypt_release_ctx(struct fscrypt_ctx *);
extern struct page *fscrypt_encrypt_page(const struct inode *, struct page *,
@@ -74,20 +75,6 @@ static inline struct page *fscrypt_control_page(struct page *page)
extern void fscrypt_restore_control_page(struct page *);
-extern const struct dentry_operations fscrypt_d_ops;
-
-static inline void fscrypt_set_d_op(struct dentry *dentry)
-{
- d_set_d_op(dentry, &fscrypt_d_ops);
-}
-
-static inline void fscrypt_set_encrypted_dentry(struct dentry *dentry)
-{
- spin_lock(&dentry->d_lock);
- dentry->d_flags |= DCACHE_ENCRYPTED_WITH_KEY;
- spin_unlock(&dentry->d_lock);
-}
-
/* policy.c */
extern int fscrypt_ioctl_set_policy(struct file *, const void __user *);
extern int fscrypt_ioctl_get_policy(struct file *, void __user *);
@@ -188,7 +175,9 @@ static inline bool fscrypt_match_name(const struct fscrypt_name *fname,
}
/* bio.c */
-extern void fscrypt_decrypt_bio_pages(struct fscrypt_ctx *, struct bio *);
+extern void fscrypt_decrypt_bio(struct bio *);
+extern void fscrypt_enqueue_decrypt_bio(struct fscrypt_ctx *ctx,
+ struct bio *bio);
extern void fscrypt_pullback_bio_page(struct page **, bool);
extern int fscrypt_zeroout_range(const struct inode *, pgoff_t, sector_t,
unsigned int);
diff --git a/include/linux/fsl/ptp_qoriq.h b/include/linux/fsl/ptp_qoriq.h
new file mode 100644
index 000000000000..b462d9ea8007
--- /dev/null
+++ b/include/linux/fsl/ptp_qoriq.h
@@ -0,0 +1,141 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2010 OMICRON electronics GmbH
+ * Copyright 2018 NXP
+ */
+#ifndef __PTP_QORIQ_H__
+#define __PTP_QORIQ_H__
+
+#include <linux/io.h>
+#include <linux/ptp_clock_kernel.h>
+
+/*
+ * qoriq ptp registers
+ * Generated by regen.tcl on Thu May 13 01:38:57 PM CEST 2010
+ */
+struct qoriq_ptp_registers {
+ u32 tmr_ctrl; /* Timer control register */
+ u32 tmr_tevent; /* Timestamp event register */
+ u32 tmr_temask; /* Timer event mask register */
+ u32 tmr_pevent; /* Timestamp event register */
+ u32 tmr_pemask; /* Timer event mask register */
+ u32 tmr_stat; /* Timestamp status register */
+ u32 tmr_cnt_h; /* Timer counter high register */
+ u32 tmr_cnt_l; /* Timer counter low register */
+ u32 tmr_add; /* Timer drift compensation addend register */
+ u32 tmr_acc; /* Timer accumulator register */
+ u32 tmr_prsc; /* Timer prescale */
+ u8 res1[4];
+ u32 tmroff_h; /* Timer offset high */
+ u32 tmroff_l; /* Timer offset low */
+ u8 res2[8];
+ u32 tmr_alarm1_h; /* Timer alarm 1 high register */
+ u32 tmr_alarm1_l; /* Timer alarm 1 high register */
+ u32 tmr_alarm2_h; /* Timer alarm 2 high register */
+ u32 tmr_alarm2_l; /* Timer alarm 2 high register */
+ u8 res3[48];
+ u32 tmr_fiper1; /* Timer fixed period interval */
+ u32 tmr_fiper2; /* Timer fixed period interval */
+ u32 tmr_fiper3; /* Timer fixed period interval */
+ u8 res4[20];
+ u32 tmr_etts1_h; /* Timestamp of general purpose external trigger */
+ u32 tmr_etts1_l; /* Timestamp of general purpose external trigger */
+ u32 tmr_etts2_h; /* Timestamp of general purpose external trigger */
+ u32 tmr_etts2_l; /* Timestamp of general purpose external trigger */
+};
+
+/* Bit definitions for the TMR_CTRL register */
+#define ALM1P (1<<31) /* Alarm1 output polarity */
+#define ALM2P (1<<30) /* Alarm2 output polarity */
+#define FIPERST (1<<28) /* FIPER start indication */
+#define PP1L (1<<27) /* Fiper1 pulse loopback mode enabled. */
+#define PP2L (1<<26) /* Fiper2 pulse loopback mode enabled. */
+#define TCLK_PERIOD_SHIFT (16) /* 1588 timer reference clock period. */
+#define TCLK_PERIOD_MASK (0x3ff)
+#define RTPE (1<<15) /* Record Tx Timestamp to PAL Enable. */
+#define FRD (1<<14) /* FIPER Realignment Disable */
+#define ESFDP (1<<11) /* External Tx/Rx SFD Polarity. */
+#define ESFDE (1<<10) /* External Tx/Rx SFD Enable. */
+#define ETEP2 (1<<9) /* External trigger 2 edge polarity */
+#define ETEP1 (1<<8) /* External trigger 1 edge polarity */
+#define COPH (1<<7) /* Generated clock output phase. */
+#define CIPH (1<<6) /* External oscillator input clock phase */
+#define TMSR (1<<5) /* Timer soft reset. */
+#define BYP (1<<3) /* Bypass drift compensated clock */
+#define TE (1<<2) /* 1588 timer enable. */
+#define CKSEL_SHIFT (0) /* 1588 Timer reference clock source */
+#define CKSEL_MASK (0x3)
+
+/* Bit definitions for the TMR_TEVENT register */
+#define ETS2 (1<<25) /* External trigger 2 timestamp sampled */
+#define ETS1 (1<<24) /* External trigger 1 timestamp sampled */
+#define ALM2 (1<<17) /* Current time = alarm time register 2 */
+#define ALM1 (1<<16) /* Current time = alarm time register 1 */
+#define PP1 (1<<7) /* periodic pulse generated on FIPER1 */
+#define PP2 (1<<6) /* periodic pulse generated on FIPER2 */
+#define PP3 (1<<5) /* periodic pulse generated on FIPER3 */
+
+/* Bit definitions for the TMR_TEMASK register */
+#define ETS2EN (1<<25) /* External trigger 2 timestamp enable */
+#define ETS1EN (1<<24) /* External trigger 1 timestamp enable */
+#define ALM2EN (1<<17) /* Timer ALM2 event enable */
+#define ALM1EN (1<<16) /* Timer ALM1 event enable */
+#define PP1EN (1<<7) /* Periodic pulse event 1 enable */
+#define PP2EN (1<<6) /* Periodic pulse event 2 enable */
+
+/* Bit definitions for the TMR_PEVENT register */
+#define TXP2 (1<<9) /* PTP transmitted timestamp im TXTS2 */
+#define TXP1 (1<<8) /* PTP transmitted timestamp in TXTS1 */
+#define RXP (1<<0) /* PTP frame has been received */
+
+/* Bit definitions for the TMR_PEMASK register */
+#define TXP2EN (1<<9) /* Transmit PTP packet event 2 enable */
+#define TXP1EN (1<<8) /* Transmit PTP packet event 1 enable */
+#define RXPEN (1<<0) /* Receive PTP packet event enable */
+
+/* Bit definitions for the TMR_STAT register */
+#define STAT_VEC_SHIFT (0) /* Timer general purpose status vector */
+#define STAT_VEC_MASK (0x3f)
+
+/* Bit definitions for the TMR_PRSC register */
+#define PRSC_OCK_SHIFT (0) /* Output clock division/prescale factor. */
+#define PRSC_OCK_MASK (0xffff)
+
+
+#define DRIVER "ptp_qoriq"
+#define DEFAULT_CKSEL 1
+#define N_EXT_TS 2
+#define REG_SIZE sizeof(struct qoriq_ptp_registers)
+
+struct qoriq_ptp {
+ struct qoriq_ptp_registers __iomem *regs;
+ spinlock_t lock; /* protects regs */
+ struct ptp_clock *clock;
+ struct ptp_clock_info caps;
+ struct resource *rsrc;
+ int irq;
+ int phc_index;
+ u64 alarm_interval; /* for periodic alarm */
+ u64 alarm_value;
+ u32 tclk_period; /* nanoseconds */
+ u32 tmr_prsc;
+ u32 tmr_add;
+ u32 cksel;
+ u32 tmr_fiper1;
+ u32 tmr_fiper2;
+};
+
+static inline u32 qoriq_read(unsigned __iomem *addr)
+{
+ u32 val;
+
+ val = ioread32be(addr);
+ return val;
+}
+
+static inline void qoriq_write(unsigned __iomem *addr, u32 val)
+{
+ iowrite32be(val, addr);
+}
+
+#endif
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
index 9f1edb92c97e..b38964a7a521 100644
--- a/include/linux/fsnotify_backend.h
+++ b/include/linux/fsnotify_backend.h
@@ -98,8 +98,6 @@ struct fsnotify_iter_info;
struct fsnotify_ops {
int (*handle_event)(struct fsnotify_group *group,
struct inode *inode,
- struct fsnotify_mark *inode_mark,
- struct fsnotify_mark *vfsmount_mark,
u32 mask, const void *data, int data_type,
const unsigned char *file_name, u32 cookie,
struct fsnotify_iter_info *iter_info);
@@ -201,6 +199,57 @@ struct fsnotify_group {
#define FSNOTIFY_EVENT_PATH 1
#define FSNOTIFY_EVENT_INODE 2
+enum fsnotify_obj_type {
+ FSNOTIFY_OBJ_TYPE_INODE,
+ FSNOTIFY_OBJ_TYPE_VFSMOUNT,
+ FSNOTIFY_OBJ_TYPE_COUNT,
+ FSNOTIFY_OBJ_TYPE_DETACHED = FSNOTIFY_OBJ_TYPE_COUNT
+};
+
+#define FSNOTIFY_OBJ_TYPE_INODE_FL (1U << FSNOTIFY_OBJ_TYPE_INODE)
+#define FSNOTIFY_OBJ_TYPE_VFSMOUNT_FL (1U << FSNOTIFY_OBJ_TYPE_VFSMOUNT)
+#define FSNOTIFY_OBJ_ALL_TYPES_MASK ((1U << FSNOTIFY_OBJ_TYPE_COUNT) - 1)
+
+struct fsnotify_iter_info {
+ struct fsnotify_mark *marks[FSNOTIFY_OBJ_TYPE_COUNT];
+ unsigned int report_mask;
+ int srcu_idx;
+};
+
+static inline bool fsnotify_iter_should_report_type(
+ struct fsnotify_iter_info *iter_info, int type)
+{
+ return (iter_info->report_mask & (1U << type));
+}
+
+static inline void fsnotify_iter_set_report_type(
+ struct fsnotify_iter_info *iter_info, int type)
+{
+ iter_info->report_mask |= (1U << type);
+}
+
+static inline void fsnotify_iter_set_report_type_mark(
+ struct fsnotify_iter_info *iter_info, int type,
+ struct fsnotify_mark *mark)
+{
+ iter_info->marks[type] = mark;
+ iter_info->report_mask |= (1U << type);
+}
+
+#define FSNOTIFY_ITER_FUNCS(name, NAME) \
+static inline struct fsnotify_mark *fsnotify_iter_##name##_mark( \
+ struct fsnotify_iter_info *iter_info) \
+{ \
+ return (iter_info->report_mask & FSNOTIFY_OBJ_TYPE_##NAME##_FL) ? \
+ iter_info->marks[FSNOTIFY_OBJ_TYPE_##NAME] : NULL; \
+}
+
+FSNOTIFY_ITER_FUNCS(inode, INODE)
+FSNOTIFY_ITER_FUNCS(vfsmount, VFSMOUNT)
+
+#define fsnotify_foreach_obj_type(type) \
+ for (type = 0; type < FSNOTIFY_OBJ_TYPE_COUNT; type++)
+
/*
* Inode / vfsmount point to this structure which tracks all marks attached to
* the inode / vfsmount. The reference to inode / vfsmount is held by this
@@ -209,20 +258,14 @@ struct fsnotify_group {
*/
struct fsnotify_mark_connector {
spinlock_t lock;
-#define FSNOTIFY_OBJ_TYPE_INODE 0x01
-#define FSNOTIFY_OBJ_TYPE_VFSMOUNT 0x02
-#define FSNOTIFY_OBJ_ALL_TYPES (FSNOTIFY_OBJ_TYPE_INODE | \
- FSNOTIFY_OBJ_TYPE_VFSMOUNT)
- unsigned int flags; /* Type of object [lock] */
+ unsigned int type; /* Type of object [lock] */
union { /* Object pointer [lock] */
struct inode *inode;
struct vfsmount *mnt;
- };
- union {
- struct hlist_head list;
/* Used listing heads to free after srcu period expires */
struct fsnotify_mark_connector *destroy_next;
};
+ struct hlist_head list;
};
/*
@@ -248,7 +291,7 @@ struct fsnotify_mark {
/* Group this mark is for. Set on mark creation, stable until last ref
* is dropped */
struct fsnotify_group *group;
- /* List of marks by group->i_fsnotify_marks. Also reused for queueing
+ /* List of marks by group->marks_list. Also reused for queueing
* mark into destroy_list when it's waiting for the end of SRCU period
* before it can be freed. [group->mark_mutex] */
struct list_head g_list;
@@ -358,7 +401,21 @@ extern struct fsnotify_mark *fsnotify_find_mark(
extern int fsnotify_add_mark(struct fsnotify_mark *mark, struct inode *inode,
struct vfsmount *mnt, int allow_dups);
extern int fsnotify_add_mark_locked(struct fsnotify_mark *mark,
- struct inode *inode, struct vfsmount *mnt, int allow_dups);
+ struct inode *inode, struct vfsmount *mnt,
+ int allow_dups);
+/* attach the mark to the inode */
+static inline int fsnotify_add_inode_mark(struct fsnotify_mark *mark,
+ struct inode *inode,
+ int allow_dups)
+{
+ return fsnotify_add_mark(mark, inode, NULL, allow_dups);
+}
+static inline int fsnotify_add_inode_mark_locked(struct fsnotify_mark *mark,
+ struct inode *inode,
+ int allow_dups)
+{
+ return fsnotify_add_mark_locked(mark, inode, NULL, allow_dups);
+}
/* given a group and a mark, flag mark to be freed when all references are dropped */
extern void fsnotify_destroy_mark(struct fsnotify_mark *mark,
struct fsnotify_group *group);
@@ -371,12 +428,12 @@ extern void fsnotify_clear_marks_by_group(struct fsnotify_group *group, unsigned
/* run all the marks in a group, and clear all of the vfsmount marks */
static inline void fsnotify_clear_vfsmount_marks_by_group(struct fsnotify_group *group)
{
- fsnotify_clear_marks_by_group(group, FSNOTIFY_OBJ_TYPE_VFSMOUNT);
+ fsnotify_clear_marks_by_group(group, FSNOTIFY_OBJ_TYPE_VFSMOUNT_FL);
}
/* run all the marks in a group, and clear all of the inode marks */
static inline void fsnotify_clear_inode_marks_by_group(struct fsnotify_group *group)
{
- fsnotify_clear_marks_by_group(group, FSNOTIFY_OBJ_TYPE_INODE);
+ fsnotify_clear_marks_by_group(group, FSNOTIFY_OBJ_TYPE_INODE_FL);
}
extern void fsnotify_get_mark(struct fsnotify_mark *mark);
extern void fsnotify_put_mark(struct fsnotify_mark *mark);
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 9c3c9a319e48..8154f4920fcb 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Ftrace header. For implementation details beyond the random comments
- * scattered below, see: Documentation/trace/ftrace-design.txt
+ * scattered below, see: Documentation/trace/ftrace-design.rst
*/
#ifndef _LINUX_FTRACE_H
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index c826b0b5232a..6cb8a5789668 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -368,7 +368,9 @@ static inline void free_part_stats(struct hd_struct *part)
part_stat_add(cpu, gendiskp, field, -subnd)
void part_in_flight(struct request_queue *q, struct hd_struct *part,
- unsigned int inflight[2]);
+ unsigned int inflight[2]);
+void part_in_flight_rw(struct request_queue *q, struct hd_struct *part,
+ unsigned int inflight[2]);
void part_dec_in_flight(struct request_queue *q, struct hd_struct *part,
int rw);
void part_inc_in_flight(struct request_queue *q, struct hd_struct *part,
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 1a4582b44d32..a6afcec53795 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -24,6 +24,7 @@ struct vm_area_struct;
#define ___GFP_HIGH 0x20u
#define ___GFP_IO 0x40u
#define ___GFP_FS 0x80u
+#define ___GFP_WRITE 0x100u
#define ___GFP_NOWARN 0x200u
#define ___GFP_RETRY_MAYFAIL 0x400u
#define ___GFP_NOFAIL 0x800u
@@ -36,11 +37,10 @@ struct vm_area_struct;
#define ___GFP_THISNODE 0x40000u
#define ___GFP_ATOMIC 0x80000u
#define ___GFP_ACCOUNT 0x100000u
-#define ___GFP_DIRECT_RECLAIM 0x400000u
-#define ___GFP_WRITE 0x800000u
-#define ___GFP_KSWAPD_RECLAIM 0x1000000u
+#define ___GFP_DIRECT_RECLAIM 0x200000u
+#define ___GFP_KSWAPD_RECLAIM 0x400000u
#ifdef CONFIG_LOCKDEP
-#define ___GFP_NOLOCKDEP 0x2000000u
+#define ___GFP_NOLOCKDEP 0x800000u
#else
#define ___GFP_NOLOCKDEP 0
#endif
@@ -205,7 +205,7 @@ struct vm_area_struct;
#define __GFP_NOLOCKDEP ((__force gfp_t)___GFP_NOLOCKDEP)
/* Room for N __GFP_FOO bits */
-#define __GFP_BITS_SHIFT (25 + IS_ENABLED(CONFIG_LOCKDEP))
+#define __GFP_BITS_SHIFT (23 + IS_ENABLED(CONFIG_LOCKDEP))
#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
/*
@@ -343,7 +343,7 @@ static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags)
* 0x1 => DMA or NORMAL
* 0x2 => HIGHMEM or NORMAL
* 0x3 => BAD (DMA+HIGHMEM)
- * 0x4 => DMA32 or DMA or NORMAL
+ * 0x4 => DMA32 or NORMAL
* 0x5 => BAD (DMA+DMA32)
* 0x6 => BAD (HIGHMEM+DMA32)
* 0x7 => BAD (HIGHMEM+DMA32+DMA)
@@ -351,7 +351,7 @@ static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags)
* 0x9 => DMA or NORMAL (MOVABLE+DMA)
* 0xa => MOVABLE (Movable is valid only if HIGHMEM is set too)
* 0xb => BAD (MOVABLE+HIGHMEM+DMA)
- * 0xc => DMA32 (MOVABLE+DMA32)
+ * 0xc => DMA32 or NORMAL (MOVABLE+DMA32)
* 0xd => BAD (MOVABLE+DMA32+DMA)
* 0xe => BAD (MOVABLE+DMA32+HIGHMEM)
* 0xf => BAD (MOVABLE+DMA32+HIGHMEM+DMA)
@@ -464,7 +464,7 @@ static inline struct page *
__alloc_pages_node(int nid, gfp_t gfp_mask, unsigned int order)
{
VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES);
- VM_WARN_ON(!node_online(nid));
+ VM_WARN_ON((gfp_mask & __GFP_THISNODE) && !node_online(nid));
return __alloc_pages(gfp_mask, order, nid);
}
diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h
index dbd065963296..243112c7fa7d 100644
--- a/include/linux/gpio/consumer.h
+++ b/include/linux/gpio/consumer.h
@@ -116,7 +116,7 @@ int gpiod_get_raw_array_value(unsigned int array_size,
struct gpio_desc **desc_array,
int *value_array);
void gpiod_set_raw_value(struct gpio_desc *desc, int value);
-void gpiod_set_raw_array_value(unsigned int array_size,
+int gpiod_set_raw_array_value(unsigned int array_size,
struct gpio_desc **desc_array,
int *value_array);
@@ -134,7 +134,7 @@ int gpiod_get_raw_array_value_cansleep(unsigned int array_size,
struct gpio_desc **desc_array,
int *value_array);
void gpiod_set_raw_value_cansleep(struct gpio_desc *desc, int value);
-void gpiod_set_raw_array_value_cansleep(unsigned int array_size,
+int gpiod_set_raw_array_value_cansleep(unsigned int array_size,
struct gpio_desc **desc_array,
int *value_array);
@@ -369,12 +369,13 @@ static inline void gpiod_set_raw_value(struct gpio_desc *desc, int value)
/* GPIO can never have been requested */
WARN_ON(1);
}
-static inline void gpiod_set_raw_array_value(unsigned int array_size,
+static inline int gpiod_set_raw_array_value(unsigned int array_size,
struct gpio_desc **desc_array,
int *value_array)
{
/* GPIO can never have been requested */
WARN_ON(1);
+ return 0;
}
static inline int gpiod_get_value_cansleep(const struct gpio_desc *desc)
@@ -423,12 +424,13 @@ static inline void gpiod_set_raw_value_cansleep(struct gpio_desc *desc,
/* GPIO can never have been requested */
WARN_ON(1);
}
-static inline void gpiod_set_raw_array_value_cansleep(unsigned int array_size,
+static inline int gpiod_set_raw_array_value_cansleep(unsigned int array_size,
struct gpio_desc **desc_array,
int *value_array)
{
/* GPIO can never have been requested */
WARN_ON(1);
+ return 0;
}
static inline int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce)
diff --git a/include/linux/gpio/machine.h b/include/linux/gpio/machine.h
index b2f2dc638463..daa44eac9241 100644
--- a/include/linux/gpio/machine.h
+++ b/include/linux/gpio/machine.h
@@ -39,6 +39,23 @@ struct gpiod_lookup_table {
struct gpiod_lookup table[];
};
+/**
+ * struct gpiod_hog - GPIO line hog table
+ * @chip_label: name of the chip the GPIO belongs to
+ * @chip_hwnum: hardware number (i.e. relative to the chip) of the GPIO
+ * @line_name: consumer name for the hogged line
+ * @lflags: mask of GPIO lookup flags
+ * @dflags: GPIO flags used to specify the direction and value
+ */
+struct gpiod_hog {
+ struct list_head list;
+ const char *chip_label;
+ u16 chip_hwnum;
+ const char *line_name;
+ enum gpio_lookup_flags lflags;
+ int dflags;
+};
+
/*
* Simple definition of a single GPIO under a con_id
*/
@@ -59,10 +76,23 @@ struct gpiod_lookup_table {
.flags = _flags, \
}
+/*
+ * Simple definition of a single GPIO hog in an array.
+ */
+#define GPIO_HOG(_chip_label, _chip_hwnum, _line_name, _lflags, _dflags) \
+{ \
+ .chip_label = _chip_label, \
+ .chip_hwnum = _chip_hwnum, \
+ .line_name = _line_name, \
+ .lflags = _lflags, \
+ .dflags = _dflags, \
+}
+
#ifdef CONFIG_GPIOLIB
void gpiod_add_lookup_table(struct gpiod_lookup_table *table);
void gpiod_add_lookup_tables(struct gpiod_lookup_table **tables, size_t n);
void gpiod_remove_lookup_table(struct gpiod_lookup_table *table);
+void gpiod_add_hogs(struct gpiod_hog *hogs);
#else
static inline
void gpiod_add_lookup_table(struct gpiod_lookup_table *table) {}
@@ -70,6 +100,7 @@ static inline
void gpiod_add_lookup_tables(struct gpiod_lookup_table **tables, size_t n) {}
static inline
void gpiod_remove_lookup_table(struct gpiod_lookup_table *table) {}
+static inline void gpiod_add_hogs(struct gpiod_hog *hogs) {}
#endif
#endif /* __LINUX_GPIO_MACHINE_H */
diff --git a/include/linux/hid.h b/include/linux/hid.h
index 8da3e1f48195..41a3d5775394 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -292,9 +292,12 @@ struct hid_item {
#define HID_DG_CONTACTCOUNT 0x000d0054
#define HID_DG_CONTACTMAX 0x000d0055
#define HID_DG_SCANTIME 0x000d0056
+#define HID_DG_SURFACESWITCH 0x000d0057
+#define HID_DG_BUTTONSWITCH 0x000d0058
#define HID_DG_BUTTONTYPE 0x000d0059
#define HID_DG_BARRELSWITCH2 0x000d005a
#define HID_DG_TOOLSERIALNUMBER 0x000d005b
+#define HID_DG_LATENCYMODE 0x000d0060
#define HID_VD_ASUS_CUSTOM_MEDIA_KEYS 0xff310076
/*
@@ -341,10 +344,12 @@ struct hid_item {
/* BIT(8) reserved for backward compatibility, was HID_QUIRK_NO_EMPTY_INPUT */
/* BIT(9) reserved for backward compatibility, was NO_INIT_INPUT_REPORTS */
#define HID_QUIRK_ALWAYS_POLL BIT(10)
+#define HID_QUIRK_INPUT_PER_APP BIT(11)
#define HID_QUIRK_SKIP_OUTPUT_REPORTS BIT(16)
#define HID_QUIRK_SKIP_OUTPUT_REPORT_ID BIT(17)
#define HID_QUIRK_NO_OUTPUT_REPORTS_ON_INTR_EP BIT(18)
#define HID_QUIRK_HAVE_SPECIAL_DRIVER BIT(19)
+#define HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE BIT(20)
#define HID_QUIRK_FULLSPEED_INTERVAL BIT(28)
#define HID_QUIRK_NO_INIT_REPORTS BIT(29)
#define HID_QUIRK_NO_IGNORE BIT(30)
@@ -367,6 +372,7 @@ struct hid_item {
#define HID_GROUP_RMI 0x0100
#define HID_GROUP_WACOM 0x0101
#define HID_GROUP_LOGITECH_DJ_DEVICE 0x0102
+#define HID_GROUP_STEAM 0x0103
/*
* HID protocol status
@@ -463,8 +469,10 @@ struct hid_field {
struct hid_report {
struct list_head list;
- unsigned id; /* id of this report */
- unsigned type; /* report type */
+ struct list_head hidinput_list;
+ unsigned int id; /* id of this report */
+ unsigned int type; /* report type */
+ unsigned int application; /* application usage for this report */
struct hid_field *field[HID_MAX_FIELDS]; /* fields of the report */
unsigned maxfield; /* maximum valid field index */
unsigned size; /* size of the report (bits) */
@@ -502,12 +510,15 @@ struct hid_output_fifo {
#define HID_STAT_ADDED BIT(0)
#define HID_STAT_PARSED BIT(1)
+#define HID_STAT_DUP_DETECTED BIT(2)
struct hid_input {
struct list_head list;
struct hid_report *report;
struct input_dev *input;
+ const char *name;
bool registered;
+ struct list_head reports; /* the list of reports */
};
enum hid_type {
@@ -516,6 +527,12 @@ enum hid_type {
HID_TYPE_USBNONE
};
+enum hid_battery_status {
+ HID_BATTERY_UNKNOWN = 0,
+ HID_BATTERY_QUERIED, /* Kernel explicitly queried battery strength */
+ HID_BATTERY_REPORTED, /* Device sent unsolicited battery strength report */
+};
+
struct hid_driver;
struct hid_ll_driver;
@@ -558,7 +575,8 @@ struct hid_device { /* device report descriptor */
__s32 battery_max;
__s32 battery_report_type;
__s32 battery_report_id;
- bool battery_reported;
+ enum hid_battery_status battery_status;
+ bool battery_avoid_query;
#endif
unsigned int status; /* see STAT flags above */
@@ -857,7 +875,9 @@ void hid_output_report(struct hid_report *report, __u8 *data);
void __hid_request(struct hid_device *hid, struct hid_report *rep, int reqtype);
u8 *hid_alloc_report_buf(struct hid_report *report, gfp_t flags);
struct hid_device *hid_allocate_device(void);
-struct hid_report *hid_register_report(struct hid_device *device, unsigned type, unsigned id);
+struct hid_report *hid_register_report(struct hid_device *device,
+ unsigned int type, unsigned int id,
+ unsigned int application);
int hid_parse_report(struct hid_device *hid, __u8 *start, unsigned size);
struct hid_report *hid_validate_values(struct hid_device *hid,
unsigned int type, unsigned int id,
diff --git a/include/linux/hmm.h b/include/linux/hmm.h
index 39988924de3a..4c92e3ba3e16 100644
--- a/include/linux/hmm.h
+++ b/include/linux/hmm.h
@@ -16,7 +16,7 @@
/*
* Heterogeneous Memory Management (HMM)
*
- * See Documentation/vm/hmm.txt for reasons and overview of what HMM is and it
+ * See Documentation/vm/hmm.rst for reasons and overview of what HMM is and it
* is for. Here we focus on the HMM API description, with some explanation of
* the underlying implementation.
*
@@ -522,9 +522,7 @@ void hmm_devmem_remove(struct hmm_devmem *devmem);
static inline void hmm_devmem_page_set_drvdata(struct page *page,
unsigned long data)
{
- unsigned long *drvdata = (unsigned long *)&page->pgmap;
-
- drvdata[1] = data;
+ page->hmm_data = data;
}
/*
@@ -535,9 +533,7 @@ static inline void hmm_devmem_page_set_drvdata(struct page *page,
*/
static inline unsigned long hmm_devmem_page_get_drvdata(const struct page *page)
{
- const unsigned long *drvdata = (const unsigned long *)&page->pgmap;
-
- return drvdata[1];
+ return page->hmm_data;
}
diff --git a/include/linux/host1x.h b/include/linux/host1x.h
index ddf7f9ca86cc..89110d896d72 100644
--- a/include/linux/host1x.h
+++ b/include/linux/host1x.h
@@ -192,13 +192,6 @@ struct host1x_reloc {
unsigned long shift;
};
-struct host1x_waitchk {
- struct host1x_bo *bo;
- u32 offset;
- u32 syncpt_id;
- u32 thresh;
-};
-
struct host1x_job {
/* When refcount goes to zero, job can be freed */
struct kref ref;
@@ -209,19 +202,15 @@ struct host1x_job {
/* Channel where job is submitted to */
struct host1x_channel *channel;
- u32 client;
+ /* client where the job originated */
+ struct host1x_client *client;
/* Gathers and their memory */
struct host1x_job_gather *gathers;
unsigned int num_gathers;
- /* Wait checks to be processed at submit time */
- struct host1x_waitchk *waitchk;
- unsigned int num_waitchk;
- u32 waitchk_mask;
-
/* Array of handles to be pinned & unpinned */
- struct host1x_reloc *relocarray;
+ struct host1x_reloc *relocs;
unsigned int num_relocs;
struct host1x_job_unpin_data *unpins;
unsigned int num_unpins;
@@ -261,10 +250,9 @@ struct host1x_job {
};
struct host1x_job *host1x_job_alloc(struct host1x_channel *ch,
- u32 num_cmdbufs, u32 num_relocs,
- u32 num_waitchks);
-void host1x_job_add_gather(struct host1x_job *job, struct host1x_bo *mem_id,
- u32 words, u32 offset);
+ u32 num_cmdbufs, u32 num_relocs);
+void host1x_job_add_gather(struct host1x_job *job, struct host1x_bo *bo,
+ unsigned int words, unsigned int offset);
struct host1x_job *host1x_job_get(struct host1x_job *job);
void host1x_job_put(struct host1x_job *job);
int host1x_job_pin(struct host1x_job *job, struct device *dev);
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index a2656c3ebe81..3892e9c8b2de 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -161,9 +161,11 @@ struct hrtimer_clock_base {
enum hrtimer_base_type {
HRTIMER_BASE_MONOTONIC,
HRTIMER_BASE_REALTIME,
+ HRTIMER_BASE_BOOTTIME,
HRTIMER_BASE_TAI,
HRTIMER_BASE_MONOTONIC_SOFT,
HRTIMER_BASE_REALTIME_SOFT,
+ HRTIMER_BASE_BOOTTIME_SOFT,
HRTIMER_BASE_TAI_SOFT,
HRTIMER_MAX_CLOCK_BASES,
};
diff --git a/include/linux/hwspinlock.h b/include/linux/hwspinlock.h
index 859d673d98c8..57537e67b468 100644
--- a/include/linux/hwspinlock.h
+++ b/include/linux/hwspinlock.h
@@ -1,18 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Hardware spinlock public header
*
* Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com
*
* Contact: Ohad Ben-Cohen <ohad@wizery.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef __LINUX_HWSPINLOCK_H
@@ -24,6 +16,7 @@
/* hwspinlock mode argument */
#define HWLOCK_IRQSTATE 0x01 /* Disable interrupts, save state */
#define HWLOCK_IRQ 0x02 /* Disable interrupts, don't save state */
+#define HWLOCK_RAW 0x03
struct device;
struct device_node;
@@ -176,6 +169,25 @@ static inline int hwspin_trylock_irq(struct hwspinlock *hwlock)
}
/**
+ * hwspin_trylock_raw() - attempt to lock a specific hwspinlock
+ * @hwlock: an hwspinlock which we want to trylock
+ *
+ * This function attempts to lock an hwspinlock, and will immediately fail
+ * if the hwspinlock is already taken.
+ *
+ * Caution: User must protect the routine of getting hardware lock with mutex
+ * or spinlock to avoid dead-lock, that will let user can do some time-consuming
+ * or sleepable operations under the hardware lock.
+ *
+ * Returns 0 if we successfully locked the hwspinlock, -EBUSY if
+ * the hwspinlock was already taken, and -EINVAL if @hwlock is invalid.
+ */
+static inline int hwspin_trylock_raw(struct hwspinlock *hwlock)
+{
+ return __hwspin_trylock(hwlock, HWLOCK_RAW, NULL);
+}
+
+/**
* hwspin_trylock() - attempt to lock a specific hwspinlock
* @hwlock: an hwspinlock which we want to trylock
*
@@ -243,6 +255,29 @@ int hwspin_lock_timeout_irq(struct hwspinlock *hwlock, unsigned int to)
}
/**
+ * hwspin_lock_timeout_raw() - lock an hwspinlock with timeout limit
+ * @hwlock: the hwspinlock to be locked
+ * @to: timeout value in msecs
+ *
+ * This function locks the underlying @hwlock. If the @hwlock
+ * is already taken, the function will busy loop waiting for it to
+ * be released, but give up when @timeout msecs have elapsed.
+ *
+ * Caution: User must protect the routine of getting hardware lock with mutex
+ * or spinlock to avoid dead-lock, that will let user can do some time-consuming
+ * or sleepable operations under the hardware lock.
+ *
+ * Returns 0 when the @hwlock was successfully taken, and an appropriate
+ * error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still
+ * busy after @timeout msecs). The function will never sleep.
+ */
+static inline
+int hwspin_lock_timeout_raw(struct hwspinlock *hwlock, unsigned int to)
+{
+ return __hwspin_lock_timeout(hwlock, to, HWLOCK_RAW, NULL);
+}
+
+/**
* hwspin_lock_timeout() - lock an hwspinlock with timeout limit
* @hwlock: the hwspinlock to be locked
* @to: timeout value in msecs
@@ -302,6 +337,21 @@ static inline void hwspin_unlock_irq(struct hwspinlock *hwlock)
}
/**
+ * hwspin_unlock_raw() - unlock hwspinlock
+ * @hwlock: a previously-acquired hwspinlock which we want to unlock
+ *
+ * This function will unlock a specific hwspinlock.
+ *
+ * @hwlock must be already locked (e.g. by hwspin_trylock()) before calling
+ * this function: it is a bug to call unlock on a @hwlock that is already
+ * unlocked.
+ */
+static inline void hwspin_unlock_raw(struct hwspinlock *hwlock)
+{
+ __hwspin_unlock(hwlock, HWLOCK_RAW, NULL);
+}
+
+/**
* hwspin_unlock() - unlock hwspinlock
* @hwlock: a previously-acquired hwspinlock which we want to unlock
*
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index 192ed8fbc403..3a3012f57be4 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -35,6 +35,7 @@
#include <linux/device.h>
#include <linux/mod_devicetable.h>
#include <linux/interrupt.h>
+#include <linux/reciprocal_div.h>
#define MAX_PAGE_BUFFER_COUNT 32
#define MAX_MULTIPAGE_BUFFER_COUNT 32 /* 128K */
@@ -120,6 +121,7 @@ struct hv_ring_buffer {
struct hv_ring_buffer_info {
struct hv_ring_buffer *ring_buffer;
u32 ring_size; /* Include the shared header */
+ struct reciprocal_value ring_size_div10_reciprocal;
spinlock_t ring_lock;
u32 ring_datasize; /* < ring_size */
@@ -154,6 +156,16 @@ static inline u32 hv_get_bytes_to_write(const struct hv_ring_buffer_info *rbi)
return write;
}
+static inline u32 hv_get_avail_to_write_percent(
+ const struct hv_ring_buffer_info *rbi)
+{
+ u32 avail_write = hv_get_bytes_to_write(rbi);
+
+ return reciprocal_divide(
+ (avail_write << 3) + (avail_write << 1),
+ rbi->ring_size_div10_reciprocal);
+}
+
/*
* VMBUS version is 32 bit entity broken up into
* two 16 bit quantities: major_number. minor_number.
@@ -163,6 +175,7 @@ static inline u32 hv_get_bytes_to_write(const struct hv_ring_buffer_info *rbi)
* 2 . 4 (Windows 8)
* 3 . 0 (Windows 8 R2)
* 4 . 0 (Windows 10)
+ * 5 . 0 (Newer Windows 10)
*/
#define VERSION_WS2008 ((0 << 16) | (13))
@@ -170,10 +183,11 @@ static inline u32 hv_get_bytes_to_write(const struct hv_ring_buffer_info *rbi)
#define VERSION_WIN8 ((2 << 16) | (4))
#define VERSION_WIN8_1 ((3 << 16) | (0))
#define VERSION_WIN10 ((4 << 16) | (0))
+#define VERSION_WIN10_V5 ((5 << 16) | (0))
#define VERSION_INVAL -1
-#define VERSION_CURRENT VERSION_WIN10
+#define VERSION_CURRENT VERSION_WIN10_V5
/* Make maximum size of pipe payload of 16K */
#define MAX_PIPE_DATA_PAYLOAD (sizeof(u8) * 16384)
@@ -570,7 +584,14 @@ struct vmbus_channel_initiate_contact {
struct vmbus_channel_message_header header;
u32 vmbus_version_requested;
u32 target_vcpu; /* The VCPU the host should respond to */
- u64 interrupt_page;
+ union {
+ u64 interrupt_page;
+ struct {
+ u8 msg_sint;
+ u8 padding1[3];
+ u32 padding2;
+ };
+ };
u64 monitor_page1;
u64 monitor_page2;
} __packed;
@@ -585,6 +606,19 @@ struct vmbus_channel_tl_connect_request {
struct vmbus_channel_version_response {
struct vmbus_channel_message_header header;
u8 version_supported;
+
+ u8 connection_state;
+ u16 padding;
+
+ /*
+ * On new hosts that support VMBus protocol 5.0, we must use
+ * VMBUS_MESSAGE_CONNECTION_ID_4 for the Initiate Contact Message,
+ * and for subsequent messages, we must use the Message Connection ID
+ * field in the host-returned Version Response Message.
+ *
+ * On old hosts, we should always use VMBUS_MESSAGE_CONNECTION_ID (1).
+ */
+ u32 msg_conn_id;
} __packed;
enum vmbus_channel_state {
diff --git a/include/linux/i2c-pnx.h b/include/linux/i2c-pnx.h
deleted file mode 100644
index 5388326fbbff..000000000000
--- a/include/linux/i2c-pnx.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Header file for I2C support on PNX010x/4008.
- *
- * Author: Dennis Kovalev <dkovalev@ru.mvista.com>
- *
- * 2004-2006 (c) MontaVista Software, Inc. This file is licensed under
- * the terms of the GNU General Public License version 2. This program
- * is licensed "as is" without any warranty of any kind, whether express
- * or implied.
- */
-
-#ifndef __I2C_PNX_H__
-#define __I2C_PNX_H__
-
-struct platform_device;
-struct clk;
-
-struct i2c_pnx_mif {
- int ret; /* Return value */
- int mode; /* Interface mode */
- struct completion complete; /* I/O completion */
- struct timer_list timer; /* Timeout */
- u8 * buf; /* Data buffer */
- int len; /* Length of data buffer */
- int order; /* RX Bytes to order via TX */
-};
-
-struct i2c_pnx_algo_data {
- void __iomem *ioaddr;
- struct i2c_pnx_mif mif;
- int last;
- struct clk *clk;
- struct i2c_adapter adapter;
- int irq;
- u32 timeout;
-};
-
-#endif /* __I2C_PNX_H__ */
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index 44ad14e016b5..254cd34eeae2 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -394,7 +394,6 @@ static inline bool i2c_detect_slave_mode(struct device *dev) { return false; }
* @addr: stored in i2c_client.addr
* @dev_name: Overrides the default <busnr>-<addr> dev_name if set
* @platform_data: stored in i2c_client.dev.platform_data
- * @archdata: copied into i2c_client.dev.archdata
* @of_node: pointer to OpenFirmware device node
* @fwnode: device node supplied by the platform firmware
* @properties: additional device properties for the device
@@ -419,7 +418,6 @@ struct i2c_board_info {
unsigned short addr;
const char *dev_name;
void *platform_data;
- struct dev_archdata *archdata;
struct device_node *of_node;
struct fwnode_handle *fwnode;
const struct property_entry *properties;
@@ -903,6 +901,9 @@ extern const struct of_device_id
*i2c_of_match_device(const struct of_device_id *matches,
struct i2c_client *client);
+int of_i2c_get_board_info(struct device *dev, struct device_node *node,
+ struct i2c_board_info *info);
+
#else
static inline struct i2c_client *of_find_i2c_device_by_node(struct device_node *node)
@@ -927,6 +928,13 @@ static inline const struct of_device_id
return NULL;
}
+static inline int of_i2c_get_board_info(struct device *dev,
+ struct device_node *node,
+ struct i2c_board_info *info)
+{
+ return -ENOTSUPP;
+}
+
#endif /* CONFIG_OF */
#if IS_ENABLED(CONFIG_ACPI)
diff --git a/include/linux/ide.h b/include/linux/ide.h
index ca9d34feb572..c74b0321922a 100644
--- a/include/linux/ide.h
+++ b/include/linux/ide.h
@@ -961,7 +961,7 @@ __IDE_PROC_DEVSET(_name, _min, _max, NULL, NULL)
typedef struct {
const char *name;
umode_t mode;
- const struct file_operations *proc_fops;
+ int (*show)(struct seq_file *, void *);
} ide_proc_entry_t;
void proc_ide_create(void);
@@ -973,8 +973,8 @@ void ide_proc_unregister_port(ide_hwif_t *);
void ide_proc_register_driver(ide_drive_t *, struct ide_driver *);
void ide_proc_unregister_driver(ide_drive_t *, struct ide_driver *);
-extern const struct file_operations ide_capacity_proc_fops;
-extern const struct file_operations ide_geometry_proc_fops;
+int ide_capacity_proc_show(struct seq_file *m, void *v);
+int ide_geometry_proc_show(struct seq_file *m, void *v);
#else
static inline void proc_ide_create(void) { ; }
static inline void proc_ide_destroy(void) { ; }
@@ -1508,8 +1508,6 @@ static inline void ide_set_hwifdata (ide_hwif_t * hwif, void *data)
hwif->hwif_data = data;
}
-extern void ide_toggle_bounce(ide_drive_t *drive, int on);
-
u64 ide_get_lba_addr(struct ide_cmd *, int);
u8 ide_dump_status(ide_drive_t *, const char *, u8);
diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h
index 02639ebea2f0..7843b98e1c6e 100644
--- a/include/linux/if_bridge.h
+++ b/include/linux/if_bridge.h
@@ -50,6 +50,7 @@ struct br_ip_list {
#define BR_VLAN_TUNNEL BIT(13)
#define BR_BCAST_FLOOD BIT(14)
#define BR_NEIGH_SUPPRESS BIT(15)
+#define BR_ISOLATED BIT(16)
#define BR_DEFAULT_AGEING_TIME (300 * HZ)
@@ -93,11 +94,39 @@ static inline bool br_multicast_router(const struct net_device *dev)
#if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_BRIDGE_VLAN_FILTERING)
bool br_vlan_enabled(const struct net_device *dev);
+int br_vlan_get_pvid(const struct net_device *dev, u16 *p_pvid);
+int br_vlan_get_info(const struct net_device *dev, u16 vid,
+ struct bridge_vlan_info *p_vinfo);
#else
static inline bool br_vlan_enabled(const struct net_device *dev)
{
return false;
}
+
+static inline int br_vlan_get_pvid(const struct net_device *dev, u16 *p_pvid)
+{
+ return -1;
+}
+
+static inline int br_vlan_get_info(const struct net_device *dev, u16 vid,
+ struct bridge_vlan_info *p_vinfo)
+{
+ return -1;
+}
+#endif
+
+#if IS_ENABLED(CONFIG_BRIDGE)
+struct net_device *br_fdb_find_port(const struct net_device *br_dev,
+ const unsigned char *addr,
+ __u16 vid);
+#else
+static inline struct net_device *
+br_fdb_find_port(const struct net_device *br_dev,
+ const unsigned char *addr,
+ __u16 vid)
+{
+ return NULL;
+}
#endif
#endif
diff --git a/include/linux/if_macvlan.h b/include/linux/if_macvlan.h
index 4cb7aeeafce0..2e55e4cdbd8a 100644
--- a/include/linux/if_macvlan.h
+++ b/include/linux/if_macvlan.h
@@ -21,7 +21,7 @@ struct macvlan_dev {
struct hlist_node hlist;
struct macvlan_port *port;
struct net_device *lowerdev;
- void *fwd_priv;
+ void *accel_priv;
struct vlan_pcpu_stats __percpu *pcpu_stats;
DECLARE_BITMAP(mc_filter, MACVLAN_MC_FILTER_SZ);
@@ -61,10 +61,6 @@ extern int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack);
-extern void macvlan_count_rx(const struct macvlan_dev *vlan,
- unsigned int len, bool success,
- bool multicast);
-
extern void macvlan_dellink(struct net_device *dev, struct list_head *head);
extern int macvlan_link_register(struct rtnl_link_ops *ops);
@@ -86,4 +82,27 @@ macvlan_dev_real_dev(const struct net_device *dev)
}
#endif
+static inline void *macvlan_accel_priv(struct net_device *dev)
+{
+ struct macvlan_dev *macvlan = netdev_priv(dev);
+
+ return macvlan->accel_priv;
+}
+
+static inline bool macvlan_supports_dest_filter(struct net_device *dev)
+{
+ struct macvlan_dev *macvlan = netdev_priv(dev);
+
+ return macvlan->mode == MACVLAN_MODE_PRIVATE ||
+ macvlan->mode == MACVLAN_MODE_VEPA ||
+ macvlan->mode == MACVLAN_MODE_BRIDGE;
+}
+
+static inline int macvlan_release_l2fw_offload(struct net_device *dev)
+{
+ struct macvlan_dev *macvlan = netdev_priv(dev);
+
+ macvlan->accel_priv = NULL;
+ return dev_uc_add(macvlan->lowerdev, dev->dev_addr);
+}
#endif /* _LINUX_IF_MACVLAN_H */
diff --git a/include/linux/if_tun.h b/include/linux/if_tun.h
index fd00170b494f..3d2996dc7d85 100644
--- a/include/linux/if_tun.h
+++ b/include/linux/if_tun.h
@@ -22,7 +22,7 @@
#if defined(CONFIG_TUN) || defined(CONFIG_TUN_MODULE)
struct socket *tun_get_socket(struct file *);
struct ptr_ring *tun_get_tx_ring(struct file *file);
-bool tun_is_xdp_buff(void *ptr);
+bool tun_is_xdp_frame(void *ptr);
void *tun_xdp_to_ptr(void *ptr);
void *tun_ptr_to_xdp(void *ptr);
void tun_ptr_free(void *ptr);
@@ -39,7 +39,7 @@ static inline struct ptr_ring *tun_get_tx_ring(struct file *f)
{
return ERR_PTR(-EINVAL);
}
-static inline bool tun_is_xdp_buff(void *ptr)
+static inline bool tun_is_xdp_frame(void *ptr)
{
return false;
}
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index d11f41d5269f..83ea4df6ab81 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -331,7 +331,7 @@ static inline bool vlan_hw_offload_capable(netdev_features_t features,
* @mac_len: MAC header length including outer vlan headers
*
* Inserts the VLAN tag into @skb as part of the payload at offset mac_len
- * Returns error if skb_cow_head failes.
+ * Returns error if skb_cow_head fails.
*
* Does not change skb->protocol so this function can be used during receive.
*/
@@ -379,7 +379,7 @@ static inline int __vlan_insert_inner_tag(struct sk_buff *skb,
* @vlan_tci: VLAN TCI to insert
*
* Inserts the VLAN tag into @skb as part of the payload
- * Returns error if skb_cow_head failes.
+ * Returns error if skb_cow_head fails.
*
* Does not change skb->protocol so this function can be used during receive.
*/
@@ -663,7 +663,7 @@ static inline bool skb_vlan_tagged(const struct sk_buff *skb)
* Returns true if the skb is tagged with multiple vlan headers, regardless
* of whether it is hardware accelerated or not.
*/
-static inline bool skb_vlan_tagged_multi(const struct sk_buff *skb)
+static inline bool skb_vlan_tagged_multi(struct sk_buff *skb)
{
__be16 protocol = skb->protocol;
@@ -673,6 +673,9 @@ static inline bool skb_vlan_tagged_multi(const struct sk_buff *skb)
if (likely(!eth_type_vlan(protocol)))
return false;
+ if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN)))
+ return false;
+
veh = (struct vlan_ethhdr *)skb->data;
protocol = veh->h_vlan_encapsulated_proto;
}
@@ -690,7 +693,7 @@ static inline bool skb_vlan_tagged_multi(const struct sk_buff *skb)
*
* Returns features without unsafe ones if the skb has multiple tags.
*/
-static inline netdev_features_t vlan_features_check(const struct sk_buff *skb,
+static inline netdev_features_t vlan_features_check(struct sk_buff *skb,
netdev_features_t features)
{
if (skb_vlan_tagged_multi(skb)) {
diff --git a/include/linux/iio/adc/ad_sigma_delta.h b/include/linux/iio/adc/ad_sigma_delta.h
index 1fc7abd28b0b..730ead1a46df 100644
--- a/include/linux/iio/adc/ad_sigma_delta.h
+++ b/include/linux/iio/adc/ad_sigma_delta.h
@@ -127,7 +127,7 @@ void ad_sd_cleanup_buffer_and_trigger(struct iio_dev *indio_dev);
int ad_sd_validate_trigger(struct iio_dev *indio_dev, struct iio_trigger *trig);
#define __AD_SD_CHANNEL(_si, _channel1, _channel2, _address, _bits, \
- _storagebits, _shift, _extend_name, _type) \
+ _storagebits, _shift, _extend_name, _type, _mask_all) \
{ \
.type = (_type), \
.differential = (_channel2 == -1 ? 0 : 1), \
@@ -139,7 +139,7 @@ int ad_sd_validate_trigger(struct iio_dev *indio_dev, struct iio_trigger *trig);
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
BIT(IIO_CHAN_INFO_OFFSET), \
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
- .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .info_mask_shared_by_all = _mask_all, \
.scan_index = (_si), \
.scan_type = { \
.sign = 'u', \
@@ -153,25 +153,35 @@ int ad_sd_validate_trigger(struct iio_dev *indio_dev, struct iio_trigger *trig);
#define AD_SD_DIFF_CHANNEL(_si, _channel1, _channel2, _address, _bits, \
_storagebits, _shift) \
__AD_SD_CHANNEL(_si, _channel1, _channel2, _address, _bits, \
- _storagebits, _shift, NULL, IIO_VOLTAGE)
+ _storagebits, _shift, NULL, IIO_VOLTAGE, \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ))
#define AD_SD_SHORTED_CHANNEL(_si, _channel, _address, _bits, \
_storagebits, _shift) \
__AD_SD_CHANNEL(_si, _channel, _channel, _address, _bits, \
- _storagebits, _shift, "shorted", IIO_VOLTAGE)
+ _storagebits, _shift, "shorted", IIO_VOLTAGE, \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ))
#define AD_SD_CHANNEL(_si, _channel, _address, _bits, \
_storagebits, _shift) \
__AD_SD_CHANNEL(_si, _channel, -1, _address, _bits, \
- _storagebits, _shift, NULL, IIO_VOLTAGE)
+ _storagebits, _shift, NULL, IIO_VOLTAGE, \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ))
+
+#define AD_SD_CHANNEL_NO_SAMP_FREQ(_si, _channel, _address, _bits, \
+ _storagebits, _shift) \
+ __AD_SD_CHANNEL(_si, _channel, -1, _address, _bits, \
+ _storagebits, _shift, NULL, IIO_VOLTAGE, 0)
#define AD_SD_TEMP_CHANNEL(_si, _address, _bits, _storagebits, _shift) \
__AD_SD_CHANNEL(_si, 0, -1, _address, _bits, \
- _storagebits, _shift, NULL, IIO_TEMP)
+ _storagebits, _shift, NULL, IIO_TEMP, \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ))
#define AD_SD_SUPPLY_CHANNEL(_si, _channel, _address, _bits, _storagebits, \
_shift) \
__AD_SD_CHANNEL(_si, _channel, -1, _address, _bits, \
- _storagebits, _shift, "supply", IIO_VOLTAGE)
+ _storagebits, _shift, "supply", IIO_VOLTAGE, \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ))
#endif
diff --git a/include/linux/iio/adc/stm32-dfsdm-adc.h b/include/linux/iio/adc/stm32-dfsdm-adc.h
index e7dc7a542a4e..0da298b41737 100644
--- a/include/linux/iio/adc/stm32-dfsdm-adc.h
+++ b/include/linux/iio/adc/stm32-dfsdm-adc.h
@@ -9,6 +9,8 @@
#ifndef STM32_DFSDM_ADC_H
#define STM32_DFSDM_ADC_H
+#include <linux/iio/iio.h>
+
int stm32_dfsdm_get_buff_cb(struct iio_dev *iio_dev,
int (*cb)(const void *data, size_t size,
void *private),
diff --git a/include/linux/iio/buffer_impl.h b/include/linux/iio/buffer_impl.h
index b9e22b7e2f28..d1171db23742 100644
--- a/include/linux/iio/buffer_impl.h
+++ b/include/linux/iio/buffer_impl.h
@@ -53,7 +53,7 @@ struct iio_buffer_access_funcs {
int (*request_update)(struct iio_buffer *buffer);
int (*set_bytes_per_datum)(struct iio_buffer *buffer, size_t bpd);
- int (*set_length)(struct iio_buffer *buffer, int length);
+ int (*set_length)(struct iio_buffer *buffer, unsigned int length);
int (*enable)(struct iio_buffer *buffer, struct iio_dev *indio_dev);
int (*disable)(struct iio_buffer *buffer, struct iio_dev *indio_dev);
@@ -72,10 +72,10 @@ struct iio_buffer_access_funcs {
*/
struct iio_buffer {
/** @length: Number of datums in buffer. */
- int length;
+ unsigned int length;
/** @bytes_per_datum: Size of individual datum including timestamp. */
- int bytes_per_datum;
+ size_t bytes_per_datum;
/**
* @access: Buffer access functions associated with the
diff --git a/include/linux/iio/common/cros_ec_sensors_core.h b/include/linux/iio/common/cros_ec_sensors_core.h
new file mode 100644
index 000000000000..ce16445411ac
--- /dev/null
+++ b/include/linux/iio/common/cros_ec_sensors_core.h
@@ -0,0 +1,180 @@
+/*
+ * ChromeOS EC sensor hub
+ *
+ * Copyright (C) 2016 Google, Inc
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __CROS_EC_SENSORS_CORE_H
+#define __CROS_EC_SENSORS_CORE_H
+
+#include <linux/iio/iio.h>
+#include <linux/irqreturn.h>
+#include <linux/mfd/cros_ec.h>
+
+enum {
+ CROS_EC_SENSOR_X,
+ CROS_EC_SENSOR_Y,
+ CROS_EC_SENSOR_Z,
+ CROS_EC_SENSOR_MAX_AXIS,
+};
+
+/* EC returns sensor values using signed 16 bit registers */
+#define CROS_EC_SENSOR_BITS 16
+
+/*
+ * 4 16 bit channels are allowed.
+ * Good enough for current sensors, they use up to 3 16 bit vectors.
+ */
+#define CROS_EC_SAMPLE_SIZE (sizeof(s64) * 2)
+
+/* Minimum sampling period to use when device is suspending */
+#define CROS_EC_MIN_SUSPEND_SAMPLING_FREQUENCY 1000 /* 1 second */
+
+/**
+ * struct cros_ec_sensors_core_state - state data for EC sensors IIO driver
+ * @ec: cros EC device structure
+ * @cmd_lock: lock used to prevent simultaneous access to the
+ * commands.
+ * @msg: cros EC command structure
+ * @param: motion sensor parameters structure
+ * @resp: motion sensor response structure
+ * @type: type of motion sensor
+ * @loc: location where the motion sensor is placed
+ * @calib: calibration parameters. Note that trigger
+ * captured data will always provide the calibrated
+ * data
+ * @samples: static array to hold data from a single capture.
+ * For each channel we need 2 bytes, except for
+ * the timestamp. The timestamp is always last and
+ * is always 8-byte aligned.
+ * @read_ec_sensors_data: function used for accessing sensors values
+ * @cuur_sampl_freq: current sampling period
+ */
+struct cros_ec_sensors_core_state {
+ struct cros_ec_device *ec;
+ struct mutex cmd_lock;
+
+ struct cros_ec_command *msg;
+ struct ec_params_motion_sense param;
+ struct ec_response_motion_sense *resp;
+
+ enum motionsensor_type type;
+ enum motionsensor_location loc;
+
+ s16 calib[CROS_EC_SENSOR_MAX_AXIS];
+
+ u8 samples[CROS_EC_SAMPLE_SIZE];
+
+ int (*read_ec_sensors_data)(struct iio_dev *indio_dev,
+ unsigned long scan_mask, s16 *data);
+
+ int curr_sampl_freq;
+};
+
+/**
+ * cros_ec_sensors_read_lpc() - retrieve data from EC shared memory
+ * @indio_dev: pointer to IIO device
+ * @scan_mask: bitmap of the sensor indices to scan
+ * @data: location to store data
+ *
+ * This is the safe function for reading the EC data. It guarantees that the
+ * data sampled was not modified by the EC while being read.
+ *
+ * Return: 0 on success, -errno on failure.
+ */
+int cros_ec_sensors_read_lpc(struct iio_dev *indio_dev, unsigned long scan_mask,
+ s16 *data);
+
+/**
+ * cros_ec_sensors_read_cmd() - retrieve data using the EC command protocol
+ * @indio_dev: pointer to IIO device
+ * @scan_mask: bitmap of the sensor indices to scan
+ * @data: location to store data
+ *
+ * Return: 0 on success, -errno on failure.
+ */
+int cros_ec_sensors_read_cmd(struct iio_dev *indio_dev, unsigned long scan_mask,
+ s16 *data);
+
+struct platform_device;
+/**
+ * cros_ec_sensors_core_init() - basic initialization of the core structure
+ * @pdev: platform device created for the sensors
+ * @indio_dev: iio device structure of the device
+ * @physical_device: true if the device refers to a physical device
+ *
+ * Return: 0 on success, -errno on failure.
+ */
+int cros_ec_sensors_core_init(struct platform_device *pdev,
+ struct iio_dev *indio_dev, bool physical_device);
+
+/**
+ * cros_ec_sensors_capture() - the trigger handler function
+ * @irq: the interrupt number.
+ * @p: a pointer to the poll function.
+ *
+ * On a trigger event occurring, if the pollfunc is attached then this
+ * handler is called as a threaded interrupt (and hence may sleep). It
+ * is responsible for grabbing data from the device and pushing it into
+ * the associated buffer.
+ *
+ * Return: IRQ_HANDLED
+ */
+irqreturn_t cros_ec_sensors_capture(int irq, void *p);
+
+/**
+ * cros_ec_motion_send_host_cmd() - send motion sense host command
+ * @st: pointer to state information for device
+ * @opt_length: optional length to reduce the response size, useful on the data
+ * path. Otherwise, the maximal allowed response size is used
+ *
+ * When called, the sub-command is assumed to be set in param->cmd.
+ *
+ * Return: 0 on success, -errno on failure.
+ */
+int cros_ec_motion_send_host_cmd(struct cros_ec_sensors_core_state *st,
+ u16 opt_length);
+
+/**
+ * cros_ec_sensors_core_read() - function to request a value from the sensor
+ * @st: pointer to state information for device
+ * @chan: channel specification structure table
+ * @val: will contain one element making up the returned value
+ * @val2: will contain another element making up the returned value
+ * @mask: specifies which values to be requested
+ *
+ * Return: the type of value returned by the device
+ */
+int cros_ec_sensors_core_read(struct cros_ec_sensors_core_state *st,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask);
+
+/**
+ * cros_ec_sensors_core_write() - function to write a value to the sensor
+ * @st: pointer to state information for device
+ * @chan: channel specification structure table
+ * @val: first part of value to write
+ * @val2: second part of value to write
+ * @mask: specifies which values to write
+ *
+ * Return: the type of value returned by the device
+ */
+int cros_ec_sensors_core_write(struct cros_ec_sensors_core_state *st,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask);
+
+extern const struct dev_pm_ops cros_ec_sensors_pm_ops;
+
+/* List of extended channel specification for all sensors */
+extern const struct iio_chan_spec_ext_info cros_ec_sensors_ext_info[];
+
+#endif /* __CROS_EC_SENSORS_CORE_H */
diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h
index 11579fd4126e..a74cb177dc6f 100644
--- a/include/linux/iio/iio.h
+++ b/include/linux/iio/iio.h
@@ -183,18 +183,18 @@ struct iio_event_spec {
* @address: Driver specific identifier.
* @scan_index: Monotonic index to give ordering in scans when read
* from a buffer.
- * @scan_type: sign: 's' or 'u' to specify signed or unsigned
- * realbits: Number of valid bits of data
- * storagebits: Realbits + padding
- * shift: Shift right by this before masking out
- * realbits.
- * repeat: Number of times real/storage bits
- * repeats. When the repeat element is
- * more than 1, then the type element in
- * sysfs will show a repeat value.
- * Otherwise, the number of repetitions is
- * omitted.
- * endianness: little or big endian
+ * @scan_type: struct describing the scan type
+ * @scan_type.sign: 's' or 'u' to specify signed or unsigned
+ * @scan_type.realbits: Number of valid bits of data
+ * @scan_type.storagebits: Realbits + padding
+ * @scan_type.shift: Shift right by this before masking out
+ * realbits.
+ * @scan_type.repeat: Number of times real/storage bits repeats.
+ * When the repeat element is more than 1, then
+ * the type element in sysfs will show a repeat
+ * value. Otherwise, the number of repetitions
+ * is omitted.
+ * @scan_type.endianness: little or big endian
* @info_mask_separate: What information is to be exported that is specific to
* this channel.
* @info_mask_separate_available: What availability information is to be
diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h
index e16fe7d44a71..27650f1bff3d 100644
--- a/include/linux/inetdevice.h
+++ b/include/linux/inetdevice.h
@@ -139,6 +139,7 @@ struct in_ifaddr {
__be32 ifa_local;
__be32 ifa_address;
__be32 ifa_mask;
+ __u32 ifa_rt_priority;
__be32 ifa_broadcast;
unsigned char ifa_scope;
unsigned char ifa_prefixlen;
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
index ef169d67df92..1df940196ab2 100644
--- a/include/linux/intel-iommu.h
+++ b/include/linux/intel-iommu.h
@@ -121,7 +121,6 @@
#define ecap_srs(e) ((e >> 31) & 0x1)
#define ecap_ers(e) ((e >> 30) & 0x1)
#define ecap_prs(e) ((e >> 29) & 0x1)
-#define ecap_broken_pasid(e) ((e >> 28) & 0x1)
#define ecap_dis(e) ((e >> 27) & 0x1)
#define ecap_nest(e) ((e >> 26) & 0x1)
#define ecap_mts(e) ((e >> 25) & 0x1)
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 5426627f9c55..eeceac3376fc 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -432,11 +432,18 @@ extern bool force_irqthreads;
#define force_irqthreads (0)
#endif
-#ifndef __ARCH_SET_SOFTIRQ_PENDING
-#define set_softirq_pending(x) (local_softirq_pending() = (x))
-#define or_softirq_pending(x) (local_softirq_pending() |= (x))
+#ifndef local_softirq_pending
+
+#ifndef local_softirq_pending_ref
+#define local_softirq_pending_ref irq_stat.__softirq_pending
#endif
+#define local_softirq_pending() (__this_cpu_read(local_softirq_pending_ref))
+#define set_softirq_pending(x) (__this_cpu_write(local_softirq_pending_ref, (x)))
+#define or_softirq_pending(x) (__this_cpu_or(local_softirq_pending_ref, (x)))
+
+#endif /* local_softirq_pending */
+
/* Some architectures might implement lazy enabling/disabling of
* interrupts. In some cases, such as stop_machine, we might want
* to ensure that after a local_irq_disable(), interrupts have
diff --git a/include/linux/iomap.h b/include/linux/iomap.h
index 19a07de28212..a044a824da85 100644
--- a/include/linux/iomap.h
+++ b/include/linux/iomap.h
@@ -4,6 +4,7 @@
#include <linux/types.h>
+struct address_space;
struct fiemap_extent_info;
struct inode;
struct iov_iter;
@@ -18,6 +19,7 @@ struct vm_fault;
#define IOMAP_DELALLOC 0x02 /* delayed allocation blocks */
#define IOMAP_MAPPED 0x03 /* blocks allocated at @addr */
#define IOMAP_UNWRITTEN 0x04 /* blocks allocated at @addr in unwritten state */
+#define IOMAP_INLINE 0x05 /* data inline in the inode */
/*
* Flags for all iomap mappings:
@@ -26,15 +28,19 @@ struct vm_fault;
* written data and requires fdatasync to commit them to persistent storage.
*/
#define IOMAP_F_NEW 0x01 /* blocks have been newly allocated */
-#define IOMAP_F_BOUNDARY 0x02 /* mapping ends at metadata boundary */
-#define IOMAP_F_DIRTY 0x04 /* uncommitted metadata */
+#define IOMAP_F_DIRTY 0x02 /* uncommitted metadata */
/*
* Flags that only need to be reported for IOMAP_REPORT requests:
*/
#define IOMAP_F_MERGED 0x10 /* contains multiple blocks/extents */
#define IOMAP_F_SHARED 0x20 /* block shared with another file */
-#define IOMAP_F_DATA_INLINE 0x40 /* data inline in the inode */
+
+/*
+ * Flags from 0x1000 up are for file system specific usage:
+ */
+#define IOMAP_F_PRIVATE 0x1000
+
/*
* Magic value for addr:
@@ -59,7 +65,7 @@ struct iomap {
#define IOMAP_REPORT (1 << 2) /* report extent status, e.g. FIEMAP */
#define IOMAP_FAULT (1 << 3) /* mapping for page fault */
#define IOMAP_DIRECT (1 << 4) /* direct I/O */
-#define IOMAP_NOWAIT (1 << 5) /* Don't wait for writeback */
+#define IOMAP_NOWAIT (1 << 5) /* do not block */
struct iomap_ops {
/*
@@ -95,6 +101,8 @@ loff_t iomap_seek_hole(struct inode *inode, loff_t offset,
const struct iomap_ops *ops);
loff_t iomap_seek_data(struct inode *inode, loff_t offset,
const struct iomap_ops *ops);
+sector_t iomap_bmap(struct address_space *mapping, sector_t bno,
+ const struct iomap_ops *ops);
/*
* Flags for direct I/O ->end_io:
@@ -106,4 +114,15 @@ typedef int (iomap_dio_end_io_t)(struct kiocb *iocb, ssize_t ret,
ssize_t iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
const struct iomap_ops *ops, iomap_dio_end_io_t end_io);
+#ifdef CONFIG_SWAP
+struct file;
+struct swap_info_struct;
+
+int iomap_swapfile_activate(struct swap_info_struct *sis,
+ struct file *swap_file, sector_t *pagespan,
+ const struct iomap_ops *ops);
+#else
+# define iomap_swapfile_activate(sis, swapfile, pagespan, ops) (-EIO)
+#endif /* CONFIG_SWAP */
+
#endif /* LINUX_IOMAP_H */
diff --git a/include/linux/iommu-common.h b/include/linux/iommu-common.h
deleted file mode 100644
index 802c90c79d1f..000000000000
--- a/include/linux/iommu-common.h
+++ /dev/null
@@ -1,53 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_IOMMU_COMMON_H
-#define _LINUX_IOMMU_COMMON_H
-
-#include <linux/spinlock_types.h>
-#include <linux/device.h>
-#include <asm/page.h>
-
-#define IOMMU_POOL_HASHBITS 4
-#define IOMMU_NR_POOLS (1 << IOMMU_POOL_HASHBITS)
-#define IOMMU_ERROR_CODE (~(unsigned long) 0)
-
-struct iommu_pool {
- unsigned long start;
- unsigned long end;
- unsigned long hint;
- spinlock_t lock;
-};
-
-struct iommu_map_table {
- unsigned long table_map_base;
- unsigned long table_shift;
- unsigned long nr_pools;
- void (*lazy_flush)(struct iommu_map_table *);
- unsigned long poolsize;
- struct iommu_pool pools[IOMMU_NR_POOLS];
- u32 flags;
-#define IOMMU_HAS_LARGE_POOL 0x00000001
-#define IOMMU_NO_SPAN_BOUND 0x00000002
-#define IOMMU_NEED_FLUSH 0x00000004
- struct iommu_pool large_pool;
- unsigned long *map;
-};
-
-extern void iommu_tbl_pool_init(struct iommu_map_table *iommu,
- unsigned long num_entries,
- u32 table_shift,
- void (*lazy_flush)(struct iommu_map_table *),
- bool large_pool, u32 npools,
- bool skip_span_boundary_check);
-
-extern unsigned long iommu_tbl_range_alloc(struct device *dev,
- struct iommu_map_table *iommu,
- unsigned long npages,
- unsigned long *handle,
- unsigned long mask,
- unsigned int align_order);
-
-extern void iommu_tbl_range_free(struct iommu_map_table *iommu,
- u64 dma_addr, unsigned long npages,
- unsigned long entry);
-
-#endif
diff --git a/include/linux/iommu-helper.h b/include/linux/iommu-helper.h
index cb9a9248c8c0..70d01edcbf8b 100644
--- a/include/linux/iommu-helper.h
+++ b/include/linux/iommu-helper.h
@@ -2,6 +2,7 @@
#ifndef _LINUX_IOMMU_HELPER_H
#define _LINUX_IOMMU_HELPER_H
+#include <linux/bug.h>
#include <linux/kernel.h>
static inline unsigned long iommu_device_max_index(unsigned long size,
@@ -14,9 +15,15 @@ static inline unsigned long iommu_device_max_index(unsigned long size,
return size;
}
-extern int iommu_is_span_boundary(unsigned int index, unsigned int nr,
- unsigned long shift,
- unsigned long boundary_size);
+static inline int iommu_is_span_boundary(unsigned int index, unsigned int nr,
+ unsigned long shift, unsigned long boundary_size)
+{
+ BUG_ON(!is_power_of_2(boundary_size));
+
+ shift = (shift + index) & (boundary_size - 1);
+ return shift + nr > boundary_size;
+}
+
extern unsigned long iommu_area_alloc(unsigned long *map, unsigned long size,
unsigned long start, unsigned int nr,
unsigned long shift,
diff --git a/include/linux/ioprio.h b/include/linux/ioprio.h
index 627efac73e6d..9e30ed6443db 100644
--- a/include/linux/ioprio.h
+++ b/include/linux/ioprio.h
@@ -77,4 +77,13 @@ extern int ioprio_best(unsigned short aprio, unsigned short bprio);
extern int set_task_ioprio(struct task_struct *task, int ioprio);
+#ifdef CONFIG_BLOCK
+extern int ioprio_check_cap(int ioprio);
+#else
+static inline int ioprio_check_cap(int ioprio)
+{
+ return -ENOTBLK;
+}
+#endif /* CONFIG_BLOCK */
+
#endif
diff --git a/include/linux/ipmi.h b/include/linux/ipmi.h
index 8b0626cec980..41f5c086f670 100644
--- a/include/linux/ipmi.h
+++ b/include/linux/ipmi.h
@@ -23,8 +23,10 @@
struct module;
struct device;
-/* Opaque type for a IPMI message user. One of these is needed to
- send and receive messages. */
+/*
+ * Opaque type for a IPMI message user. One of these is needed to
+ * send and receive messages.
+ */
typedef struct ipmi_user *ipmi_user_t;
/*
@@ -37,28 +39,36 @@ typedef struct ipmi_user *ipmi_user_t;
struct ipmi_recv_msg {
struct list_head link;
- /* The type of message as defined in the "Receive Types"
- defines above. */
+ /*
+ * The type of message as defined in the "Receive Types"
+ * defines above.
+ */
int recv_type;
- ipmi_user_t user;
+ struct ipmi_user *user;
struct ipmi_addr addr;
long msgid;
struct kernel_ipmi_msg msg;
- /* The user_msg_data is the data supplied when a message was
- sent, if this is a response to a sent message. If this is
- not a response to a sent message, then user_msg_data will
- be NULL. If the user above is NULL, then this will be the
- intf. */
+ /*
+ * The user_msg_data is the data supplied when a message was
+ * sent, if this is a response to a sent message. If this is
+ * not a response to a sent message, then user_msg_data will
+ * be NULL. If the user above is NULL, then this will be the
+ * intf.
+ */
void *user_msg_data;
- /* Call this when done with the message. It will presumably free
- the message and do any other necessary cleanup. */
+ /*
+ * Call this when done with the message. It will presumably free
+ * the message and do any other necessary cleanup.
+ */
void (*done)(struct ipmi_recv_msg *msg);
- /* Place-holder for the data, don't make any assumptions about
- the size or existence of this, since it may change. */
+ /*
+ * Place-holder for the data, don't make any assumptions about
+ * the size or existence of this, since it may change.
+ */
unsigned char msg_data[IPMI_MAX_MSG_LENGTH];
};
@@ -66,54 +76,77 @@ struct ipmi_recv_msg {
void ipmi_free_recv_msg(struct ipmi_recv_msg *msg);
struct ipmi_user_hndl {
- /* Routine type to call when a message needs to be routed to
- the upper layer. This will be called with some locks held,
- the only IPMI routines that can be called are ipmi_request
- and the alloc/free operations. The handler_data is the
- variable supplied when the receive handler was registered. */
+ /*
+ * Routine type to call when a message needs to be routed to
+ * the upper layer. This will be called with some locks held,
+ * the only IPMI routines that can be called are ipmi_request
+ * and the alloc/free operations. The handler_data is the
+ * variable supplied when the receive handler was registered.
+ */
void (*ipmi_recv_hndl)(struct ipmi_recv_msg *msg,
void *user_msg_data);
- /* Called when the interface detects a watchdog pre-timeout. If
- this is NULL, it will be ignored for the user. */
+ /*
+ * Called when the interface detects a watchdog pre-timeout. If
+ * this is NULL, it will be ignored for the user.
+ */
void (*ipmi_watchdog_pretimeout)(void *handler_data);
+
+ /*
+ * If not NULL, called at panic time after the interface has
+ * been set up to handle run to completion.
+ */
+ void (*ipmi_panic_handler)(void *handler_data);
+
+ /*
+ * Called when the interface has been removed. After this returns
+ * the user handle will be invalid. The interface may or may
+ * not be usable when this is called, but it will return errors
+ * if it is not usable.
+ */
+ void (*shutdown)(void *handler_data);
};
/* Create a new user of the IPMI layer on the given interface number. */
int ipmi_create_user(unsigned int if_num,
const struct ipmi_user_hndl *handler,
void *handler_data,
- ipmi_user_t *user);
+ struct ipmi_user **user);
-/* Destroy the given user of the IPMI layer. Note that after this
- function returns, the system is guaranteed to not call any
- callbacks for the user. Thus as long as you destroy all the users
- before you unload a module, you will be safe. And if you destroy
- the users before you destroy the callback structures, it should be
- safe, too. */
-int ipmi_destroy_user(ipmi_user_t user);
+/*
+ * Destroy the given user of the IPMI layer. Note that after this
+ * function returns, the system is guaranteed to not call any
+ * callbacks for the user. Thus as long as you destroy all the users
+ * before you unload a module, you will be safe. And if you destroy
+ * the users before you destroy the callback structures, it should be
+ * safe, too.
+ */
+int ipmi_destroy_user(struct ipmi_user *user);
/* Get the IPMI version of the BMC we are talking to. */
-int ipmi_get_version(ipmi_user_t user,
+int ipmi_get_version(struct ipmi_user *user,
unsigned char *major,
unsigned char *minor);
-/* Set and get the slave address and LUN that we will use for our
- source messages. Note that this affects the interface, not just
- this user, so it will affect all users of this interface. This is
- so some initialization code can come in and do the OEM-specific
- things it takes to determine your address (if not the BMC) and set
- it for everyone else. Note that each channel can have its own address. */
-int ipmi_set_my_address(ipmi_user_t user,
+/*
+ * Set and get the slave address and LUN that we will use for our
+ * source messages. Note that this affects the interface, not just
+ * this user, so it will affect all users of this interface. This is
+ * so some initialization code can come in and do the OEM-specific
+ * things it takes to determine your address (if not the BMC) and set
+ * it for everyone else. Note that each channel can have its own
+ * address.
+ */
+int ipmi_set_my_address(struct ipmi_user *user,
unsigned int channel,
unsigned char address);
-int ipmi_get_my_address(ipmi_user_t user,
+int ipmi_get_my_address(struct ipmi_user *user,
unsigned int channel,
unsigned char *address);
-int ipmi_set_my_LUN(ipmi_user_t user,
+int ipmi_set_my_LUN(struct ipmi_user *user,
unsigned int channel,
unsigned char LUN);
-int ipmi_get_my_LUN(ipmi_user_t user,
+int ipmi_get_my_LUN(struct ipmi_user *user,
unsigned int channel,
unsigned char *LUN);
@@ -130,7 +163,7 @@ int ipmi_get_my_LUN(ipmi_user_t user,
* it makes no sense to do it here. However, this can be used if you
* have unusual requirements.
*/
-int ipmi_request_settime(ipmi_user_t user,
+int ipmi_request_settime(struct ipmi_user *user,
struct ipmi_addr *addr,
long msgid,
struct kernel_ipmi_msg *msg,
@@ -148,7 +181,7 @@ int ipmi_request_settime(ipmi_user_t user,
* change as the system changes, so don't use it unless you REALLY
* have to.
*/
-int ipmi_request_supply_msgs(ipmi_user_t user,
+int ipmi_request_supply_msgs(struct ipmi_user *user,
struct ipmi_addr *addr,
long msgid,
struct kernel_ipmi_msg *msg,
@@ -164,7 +197,7 @@ int ipmi_request_supply_msgs(ipmi_user_t user,
* way. This is useful if you need to spin waiting for something to
* happen in the IPMI driver.
*/
-void ipmi_poll_interface(ipmi_user_t user);
+void ipmi_poll_interface(struct ipmi_user *user);
/*
* When commands come in to the SMS, the user can register to receive
@@ -175,11 +208,11 @@ void ipmi_poll_interface(ipmi_user_t user);
* error. Channels are specified as a bitfield, use IPMI_CHAN_ALL to
* mean all channels.
*/
-int ipmi_register_for_cmd(ipmi_user_t user,
+int ipmi_register_for_cmd(struct ipmi_user *user,
unsigned char netfn,
unsigned char cmd,
unsigned int chans);
-int ipmi_unregister_for_cmd(ipmi_user_t user,
+int ipmi_unregister_for_cmd(struct ipmi_user *user,
unsigned char netfn,
unsigned char cmd,
unsigned int chans);
@@ -210,8 +243,8 @@ int ipmi_unregister_for_cmd(ipmi_user_t user,
*
* See the IPMI_MAINTENANCE_MODE_xxx defines for what the mode means.
*/
-int ipmi_get_maintenance_mode(ipmi_user_t user);
-int ipmi_set_maintenance_mode(ipmi_user_t user, int mode);
+int ipmi_get_maintenance_mode(struct ipmi_user *user);
+int ipmi_set_maintenance_mode(struct ipmi_user *user, int mode);
/*
* When the user is created, it will not receive IPMI events by
@@ -219,7 +252,7 @@ int ipmi_set_maintenance_mode(ipmi_user_t user, int mode);
* The first user that sets this to TRUE will receive all events that
* have been queued while no one was waiting for events.
*/
-int ipmi_set_gets_events(ipmi_user_t user, bool val);
+int ipmi_set_gets_events(struct ipmi_user *user, bool val);
/*
* Called when a new SMI is registered. This will also be called on
@@ -229,14 +262,18 @@ int ipmi_set_gets_events(ipmi_user_t user, bool val);
struct ipmi_smi_watcher {
struct list_head link;
- /* You must set the owner to the current module, if you are in
- a module (generally just set it to "THIS_MODULE"). */
+ /*
+ * You must set the owner to the current module, if you are in
+ * a module (generally just set it to "THIS_MODULE").
+ */
struct module *owner;
- /* These two are called with read locks held for the interface
- the watcher list. So you can add and remove users from the
- IPMI interface, send messages, etc., but you cannot add
- or remove SMI watchers or SMI interfaces. */
+ /*
+ * These two are called with read locks held for the interface
+ * the watcher list. So you can add and remove users from the
+ * IPMI interface, send messages, etc., but you cannot add
+ * or remove SMI watchers or SMI interfaces.
+ */
void (*new_smi)(int if_num, struct device *dev);
void (*smi_gone)(int if_num);
};
@@ -244,8 +281,10 @@ struct ipmi_smi_watcher {
int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher);
int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher);
-/* The following are various helper functions for dealing with IPMI
- addresses. */
+/*
+ * The following are various helper functions for dealing with IPMI
+ * addresses.
+ */
/* Return the maximum length of an IPMI address given it's type. */
unsigned int ipmi_addr_length(int addr_type);
@@ -291,7 +330,7 @@ struct ipmi_smi_info {
union ipmi_smi_info_union addr_info;
};
-/* This is to get the private info of ipmi_smi_t */
+/* This is to get the private info of struct ipmi_smi */
extern int ipmi_get_smi_info(int if_num, struct ipmi_smi_info *data);
#endif /* __LINUX_IPMI_H */
diff --git a/include/linux/ipmi_smi.h b/include/linux/ipmi_smi.h
index af457b5a689e..7d5fd38d5282 100644
--- a/include/linux/ipmi_smi.h
+++ b/include/linux/ipmi_smi.h
@@ -22,8 +22,10 @@
struct device;
-/* This files describes the interface for IPMI system management interface
- drivers to bind into the IPMI message handler. */
+/*
+ * This files describes the interface for IPMI system management interface
+ * drivers to bind into the IPMI message handler.
+ */
/* Structure for the low-level drivers. */
typedef struct ipmi_smi *ipmi_smi_t;
@@ -61,12 +63,20 @@ struct ipmi_smi_msg {
struct ipmi_smi_handlers {
struct module *owner;
- /* The low-level interface cannot start sending messages to
- the upper layer until this function is called. This may
- not be NULL, the lower layer must take the interface from
- this call. */
- int (*start_processing)(void *send_info,
- ipmi_smi_t new_intf);
+ /*
+ * The low-level interface cannot start sending messages to
+ * the upper layer until this function is called. This may
+ * not be NULL, the lower layer must take the interface from
+ * this call.
+ */
+ int (*start_processing)(void *send_info,
+ struct ipmi_smi *new_intf);
+
+ /*
+ * When called, the low-level interface should disable all
+ * processing, it should be complete shut down when it returns.
+ */
+ void (*shutdown)(void *send_info);
/*
* Get the detailed private info of the low level interface and store
@@ -75,25 +85,31 @@ struct ipmi_smi_handlers {
*/
int (*get_smi_info)(void *send_info, struct ipmi_smi_info *data);
- /* Called to enqueue an SMI message to be sent. This
- operation is not allowed to fail. If an error occurs, it
- should report back the error in a received message. It may
- do this in the current call context, since no write locks
- are held when this is run. Message are delivered one at
- a time by the message handler, a new message will not be
- delivered until the previous message is returned. */
+ /*
+ * Called to enqueue an SMI message to be sent. This
+ * operation is not allowed to fail. If an error occurs, it
+ * should report back the error in a received message. It may
+ * do this in the current call context, since no write locks
+ * are held when this is run. Message are delivered one at
+ * a time by the message handler, a new message will not be
+ * delivered until the previous message is returned.
+ */
void (*sender)(void *send_info,
struct ipmi_smi_msg *msg);
- /* Called by the upper layer to request that we try to get
- events from the BMC we are attached to. */
+ /*
+ * Called by the upper layer to request that we try to get
+ * events from the BMC we are attached to.
+ */
void (*request_events)(void *send_info);
- /* Called by the upper layer when some user requires that the
- interface watch for events, received messages, watchdog
- pretimeouts, or not. Used by the SMI to know if it should
- watch for these. This may be NULL if the SMI does not
- implement it. */
+ /*
+ * Called by the upper layer when some user requires that the
+ * interface watch for events, received messages, watchdog
+ * pretimeouts, or not. Used by the SMI to know if it should
+ * watch for these. This may be NULL if the SMI does not
+ * implement it.
+ */
void (*set_need_watch)(void *send_info, bool enable);
/*
@@ -101,30 +117,29 @@ struct ipmi_smi_handlers {
*/
void (*flush_messages)(void *send_info);
- /* Called when the interface should go into "run to
- completion" mode. If this call sets the value to true, the
- interface should make sure that all messages are flushed
- out and that none are pending, and any new requests are run
- to completion immediately. */
+ /*
+ * Called when the interface should go into "run to
+ * completion" mode. If this call sets the value to true, the
+ * interface should make sure that all messages are flushed
+ * out and that none are pending, and any new requests are run
+ * to completion immediately.
+ */
void (*set_run_to_completion)(void *send_info, bool run_to_completion);
- /* Called to poll for work to do. This is so upper layers can
- poll for operations during things like crash dumps. */
+ /*
+ * Called to poll for work to do. This is so upper layers can
+ * poll for operations during things like crash dumps.
+ */
void (*poll)(void *send_info);
- /* Enable/disable firmware maintenance mode. Note that this
- is *not* the modes defined, this is simply an on/off
- setting. The message handler does the mode handling. Note
- that this is called from interrupt context, so it cannot
- block. */
+ /*
+ * Enable/disable firmware maintenance mode. Note that this
+ * is *not* the modes defined, this is simply an on/off
+ * setting. The message handler does the mode handling. Note
+ * that this is called from interrupt context, so it cannot
+ * block.
+ */
void (*set_maintenance_mode)(void *send_info, bool enable);
-
- /* Tell the handler that we are using it/not using it. The
- message handler get the modules that this handler belongs
- to; this function lets the SMI claim any modules that it
- uses. These may be NULL if this is not required. */
- int (*inc_usecount)(void *send_info);
- void (*dec_usecount)(void *send_info);
};
struct ipmi_device_id {
@@ -143,7 +158,8 @@ struct ipmi_device_id {
#define ipmi_version_major(v) ((v)->ipmi_version & 0xf)
#define ipmi_version_minor(v) ((v)->ipmi_version >> 4)
-/* Take a pointer to an IPMI response and extract device id information from
+/*
+ * Take a pointer to an IPMI response and extract device id information from
* it. @netfn is in the IPMI_NETFN_ format, so may need to be shifted from
* a SI response.
*/
@@ -187,12 +203,14 @@ static inline int ipmi_demangle_device_id(uint8_t netfn, uint8_t cmd,
return 0;
}
-/* Add a low-level interface to the IPMI driver. Note that if the
- interface doesn't know its slave address, it should pass in zero.
- The low-level interface should not deliver any messages to the
- upper layer until the start_processing() function in the handlers
- is called, and the lower layer must get the interface from that
- call. */
+/*
+ * Add a low-level interface to the IPMI driver. Note that if the
+ * interface doesn't know its slave address, it should pass in zero.
+ * The low-level interface should not deliver any messages to the
+ * upper layer until the start_processing() function in the handlers
+ * is called, and the lower layer must get the interface from that
+ * call.
+ */
int ipmi_register_smi(const struct ipmi_smi_handlers *handlers,
void *send_info,
struct device *dev,
@@ -202,7 +220,7 @@ int ipmi_register_smi(const struct ipmi_smi_handlers *handlers,
* Remove a low-level interface from the IPMI driver. This will
* return an error if the interface is still in use by a user.
*/
-int ipmi_unregister_smi(ipmi_smi_t intf);
+void ipmi_unregister_smi(struct ipmi_smi *intf);
/*
* The lower layer reports received messages through this interface.
@@ -210,11 +228,11 @@ int ipmi_unregister_smi(ipmi_smi_t intf);
* the lower layer gets an error sending a message, it should format
* an error response in the message response.
*/
-void ipmi_smi_msg_received(ipmi_smi_t intf,
+void ipmi_smi_msg_received(struct ipmi_smi *intf,
struct ipmi_smi_msg *msg);
/* The lower layer received a watchdog pre-timeout on interface. */
-void ipmi_smi_watchdog_pretimeout(ipmi_smi_t intf);
+void ipmi_smi_watchdog_pretimeout(struct ipmi_smi *intf);
struct ipmi_smi_msg *ipmi_alloc_smi_msg(void);
static inline void ipmi_free_smi_msg(struct ipmi_smi_msg *msg)
@@ -222,13 +240,4 @@ static inline void ipmi_free_smi_msg(struct ipmi_smi_msg *msg)
msg->done(msg);
}
-#ifdef CONFIG_IPMI_PROC_INTERFACE
-/* Allow the lower layer to add things to the proc filesystem
- directory for this interface. Note that the entry will
- automatically be dstroyed when the interface is destroyed. */
-int ipmi_smi_add_proc_entry(ipmi_smi_t smi, char *name,
- const struct file_operations *proc_ops,
- void *data);
-#endif
-
#endif /* __LINUX_IPMI_SMI_H */
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 65916a305f3d..4bd2f34947f4 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -512,6 +512,7 @@ enum {
IRQCHIP_SKIP_SET_WAKE = (1 << 4),
IRQCHIP_ONESHOT_SAFE = (1 << 5),
IRQCHIP_EOI_THREADED = (1 << 6),
+ IRQCHIP_SUPPORTS_LEVEL_MSI = (1 << 7),
};
#include <linux/irqdesc.h>
@@ -551,7 +552,12 @@ extern int irq_affinity_online_cpu(unsigned int cpu);
#endif
#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_PENDING_IRQ)
-void irq_move_irq(struct irq_data *data);
+void __irq_move_irq(struct irq_data *data);
+static inline void irq_move_irq(struct irq_data *data)
+{
+ if (unlikely(irqd_is_setaffinity_pending(data)))
+ __irq_move_irq(data);
+}
void irq_move_masked_irq(struct irq_data *data);
void irq_force_complete_move(struct irq_desc *desc);
#else
diff --git a/include/linux/irq_cpustat.h b/include/linux/irq_cpustat.h
index 4954948d1973..6e8895cd4d92 100644
--- a/include/linux/irq_cpustat.h
+++ b/include/linux/irq_cpustat.h
@@ -18,15 +18,11 @@
*/
#ifndef __ARCH_IRQ_STAT
-extern irq_cpustat_t irq_stat[]; /* defined in asm/hardirq.h */
-#define __IRQ_STAT(cpu, member) (irq_stat[cpu].member)
+DECLARE_PER_CPU_ALIGNED(irq_cpustat_t, irq_stat); /* defined in asm/hardirq.h */
+#define __IRQ_STAT(cpu, member) (per_cpu(irq_stat.member, cpu))
#endif
- /* arch independent irq_stat fields */
-#define local_softirq_pending() \
- __IRQ_STAT(smp_processor_id(), __softirq_pending)
-
- /* arch dependent irq_stat fields */
+/* arch dependent irq_stat fields */
#define nmi_count(cpu) __IRQ_STAT((cpu), __nmi_count) /* i386 */
#endif /* __irq_cpustat_h */
diff --git a/include/linux/irq_sim.h b/include/linux/irq_sim.h
index 0380d899b955..630a57e55db6 100644
--- a/include/linux/irq_sim.h
+++ b/include/linux/irq_sim.h
@@ -1,14 +1,11 @@
-#ifndef _LINUX_IRQ_SIM_H
-#define _LINUX_IRQ_SIM_H
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
- * Copyright (C) 2017 Bartosz Golaszewski <brgl@bgdev.pl>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
+ * Copyright (C) 2017-2018 Bartosz Golaszewski <brgl@bgdev.pl>
*/
+#ifndef _LINUX_IRQ_SIM_H
+#define _LINUX_IRQ_SIM_H
+
#include <linux/irq_work.h>
#include <linux/device.h>
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index f5af3b594e6e..cbb872c1b607 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -587,6 +587,7 @@ struct fwnode_handle;
int its_cpu_init(void);
int its_init(struct fwnode_handle *handle, struct rdists *rdists,
struct irq_domain *domain);
+int mbi_init(struct fwnode_handle *fwnode, struct irq_domain *parent);
static inline bool gic_enable_sre(void)
{
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
index 48c7e86bb556..dccfa65aee96 100644
--- a/include/linux/irqdomain.h
+++ b/include/linux/irqdomain.h
@@ -301,7 +301,13 @@ static inline struct irq_domain *irq_find_matching_host(struct device_node *node
static inline struct irq_domain *irq_find_host(struct device_node *node)
{
- return irq_find_matching_host(node, DOMAIN_BUS_ANY);
+ struct irq_domain *d;
+
+ d = irq_find_matching_host(node, DOMAIN_BUS_WIRED);
+ if (!d)
+ d = irq_find_matching_host(node, DOMAIN_BUS_ANY);
+
+ return d;
}
/**
diff --git a/include/linux/isdn/capilli.h b/include/linux/isdn/capilli.h
index 11b57c485854..d75e1ad72964 100644
--- a/include/linux/isdn/capilli.h
+++ b/include/linux/isdn/capilli.h
@@ -50,7 +50,7 @@ struct capi_ctr {
u16 (*send_message)(struct capi_ctr *, struct sk_buff *skb);
char *(*procinfo)(struct capi_ctr *);
- const struct file_operations *proc_fops;
+ int (*proc_show)(struct seq_file *, void *);
/* filled in before calling ready callback */
u8 manu[CAPI_MANUFACTURER_LEN]; /* CAPI_GET_MANUFACTURER */
diff --git a/include/linux/kcore.h b/include/linux/kcore.h
index 80db19d3a505..8de55e4b5ee9 100644
--- a/include/linux/kcore.h
+++ b/include/linux/kcore.h
@@ -28,6 +28,12 @@ struct vmcore {
loff_t offset;
};
+struct vmcoredd_node {
+ struct list_head list; /* List of dumps */
+ void *buf; /* Buffer containing device's dump */
+ unsigned int size; /* Size of the buffer */
+};
+
#ifdef CONFIG_PROC_KCORE
extern void kclist_add(struct kcore_list *, void *, size_t, int type);
#else
diff --git a/include/linux/kcov.h b/include/linux/kcov.h
index 3ecf6f5e3a5f..b76a1807028d 100644
--- a/include/linux/kcov.h
+++ b/include/linux/kcov.h
@@ -22,13 +22,27 @@ enum kcov_mode {
KCOV_MODE_TRACE_CMP = 3,
};
+#define KCOV_IN_CTXSW (1 << 30)
+
void kcov_task_init(struct task_struct *t);
void kcov_task_exit(struct task_struct *t);
+#define kcov_prepare_switch(t) \
+do { \
+ (t)->kcov_mode |= KCOV_IN_CTXSW; \
+} while (0)
+
+#define kcov_finish_switch(t) \
+do { \
+ (t)->kcov_mode &= ~KCOV_IN_CTXSW; \
+} while (0)
+
#else
static inline void kcov_task_init(struct task_struct *t) {}
static inline void kcov_task_exit(struct task_struct *t) {}
+static inline void kcov_prepare_switch(struct task_struct *t) {}
+static inline void kcov_finish_switch(struct task_struct *t) {}
#endif /* CONFIG_KCOV */
#endif /* _LINUX_KCOV_H */
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 6a1eb0b0aad9..d23123238534 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -29,6 +29,7 @@
#define LLONG_MIN (-LLONG_MAX - 1)
#define ULLONG_MAX (~0ULL)
#define SIZE_MAX (~(size_t)0)
+#define PHYS_ADDR_MAX (~(phys_addr_t)0)
#define U8_MAX ((u8)~0U)
#define S8_MAX ((s8)(U8_MAX>>1))
@@ -542,6 +543,7 @@ extern enum system_states {
SYSTEM_HALT,
SYSTEM_POWER_OFF,
SYSTEM_RESTART,
+ SYSTEM_SUSPEND,
} system_state;
/* This cannot be an enum because some may be used in assembly source. */
@@ -964,6 +966,22 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
"pointer type mismatch in container_of()"); \
((type *)(__mptr - offsetof(type, member))); })
+/**
+ * container_of_safe - cast a member of a structure out to the containing structure
+ * @ptr: the pointer to the member.
+ * @type: the type of the container struct this is embedded in.
+ * @member: the name of the member within the struct.
+ *
+ * If IS_ERR_OR_NULL(ptr), ptr is returned unchanged.
+ */
+#define container_of_safe(ptr, type, member) ({ \
+ void *__mptr = (void *)(ptr); \
+ BUILD_BUG_ON_MSG(!__same_type(*(ptr), ((type *)0)->member) && \
+ !__same_type(*(ptr), void), \
+ "pointer type mismatch in container_of()"); \
+ IS_ERR_OR_NULL(__mptr) ? ERR_CAST(__mptr) : \
+ ((type *)(__mptr - offsetof(type, member))); })
+
/* Rebuild everything on CONFIG_FTRACE_MCOUNT_RECORD */
#ifdef CONFIG_FTRACE_MCOUNT_RECORD
# define REBUILD_DUE_TO_FTRACE_MCOUNT_RECORD
diff --git a/include/linux/ks0108.h b/include/linux/ks0108.h
index cb311798e0bc..0738389b42b6 100644
--- a/include/linux/ks0108.h
+++ b/include/linux/ks0108.h
@@ -1,25 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Filename: ks0108.h
* Version: 0.1.0
* Description: ks0108 LCD Controller driver header
- * License: GPLv2
*
* Author: Copyright (C) Miguel Ojeda Sandonis
* Date: 2006-10-31
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#ifndef _KS0108_H_
diff --git a/include/linux/ksm.h b/include/linux/ksm.h
index 44368b19b27e..161e8164abcf 100644
--- a/include/linux/ksm.h
+++ b/include/linux/ksm.h
@@ -37,17 +37,6 @@ static inline void ksm_exit(struct mm_struct *mm)
__ksm_exit(mm);
}
-static inline struct stable_node *page_stable_node(struct page *page)
-{
- return PageKsm(page) ? page_rmapping(page) : NULL;
-}
-
-static inline void set_page_stable_node(struct page *page,
- struct stable_node *stable_node)
-{
- page->mapping = (void *)((unsigned long)stable_node | PAGE_MAPPING_KSM);
-}
-
/*
* When do_swap_page() first faults in from swap what used to be a KSM page,
* no problem, it will be assigned to this vma's anon_vma; but thereafter,
@@ -89,12 +78,6 @@ static inline struct page *ksm_might_need_to_copy(struct page *page,
return page;
}
-static inline int page_referenced_ksm(struct page *page,
- struct mem_cgroup *memcg, unsigned long *vm_flags)
-{
- return 0;
-}
-
static inline void rmap_walk_ksm(struct page *page,
struct rmap_walk_control *rwc)
{
diff --git a/include/linux/kthread.h b/include/linux/kthread.h
index c1961761311d..2803264c512f 100644
--- a/include/linux/kthread.h
+++ b/include/linux/kthread.h
@@ -62,6 +62,7 @@ void *kthread_probe_data(struct task_struct *k);
int kthread_park(struct task_struct *k);
void kthread_unpark(struct task_struct *k);
void kthread_parkme(void);
+void kthread_park_complete(struct task_struct *k);
int kthreadd(void *unused);
extern struct task_struct *kthreadd_task;
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 6930c63126c7..4ee7bc548a83 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -19,6 +19,7 @@
#include <linux/preempt.h>
#include <linux/msi.h>
#include <linux/slab.h>
+#include <linux/vmalloc.h>
#include <linux/rcupdate.h>
#include <linux/ratelimit.h>
#include <linux/err.h>
@@ -730,13 +731,16 @@ void kvm_put_guest_fpu(struct kvm_vcpu *vcpu);
void kvm_flush_remote_tlbs(struct kvm *kvm);
void kvm_reload_remote_mmus(struct kvm *kvm);
+
+bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req,
+ unsigned long *vcpu_bitmap, cpumask_var_t tmp);
bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req);
long kvm_arch_dev_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg);
long kvm_arch_vcpu_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg);
-int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf);
+vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf);
int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext);
@@ -808,6 +812,10 @@ bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu);
int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu);
#ifndef __KVM_HAVE_ARCH_VM_ALLOC
+/*
+ * All architectures that want to use vzalloc currently also
+ * need their own kvm_arch_alloc_vm implementation.
+ */
static inline struct kvm *kvm_arch_alloc_vm(void)
{
return kzalloc(sizeof(struct kvm), GFP_KERNEL);
@@ -1045,13 +1053,7 @@ static inline int mmu_notifier_retry(struct kvm *kvm, unsigned long mmu_seq)
#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
-#ifdef CONFIG_S390
-#define KVM_MAX_IRQ_ROUTES 4096 //FIXME: we can have more than that...
-#elif defined(CONFIG_ARM64)
-#define KVM_MAX_IRQ_ROUTES 4096
-#else
-#define KVM_MAX_IRQ_ROUTES 1024
-#endif
+#define KVM_MAX_IRQ_ROUTES 4096 /* might need extension/rework in the future */
bool kvm_arch_can_set_irq_routing(struct kvm *kvm);
int kvm_set_irq_routing(struct kvm *kvm,
@@ -1276,4 +1278,13 @@ static inline long kvm_arch_vcpu_async_ioctl(struct file *filp,
void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
unsigned long start, unsigned long end);
+#ifdef CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE
+int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu);
+#else
+static inline int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
+{
+ return 0;
+}
+#endif /* CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE */
+
#endif
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 1795fecdea17..8b8946dd63b9 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -125,9 +125,8 @@ enum {
LIBATA_MAX_PRD = ATA_MAX_PRD / 2,
LIBATA_DUMB_MAX_PRD = ATA_MAX_PRD / 4, /* Worst case */
ATA_DEF_QUEUE = 1,
- /* tag ATA_MAX_QUEUE - 1 is reserved for internal commands */
ATA_MAX_QUEUE = 32,
- ATA_TAG_INTERNAL = ATA_MAX_QUEUE - 1,
+ ATA_TAG_INTERNAL = ATA_MAX_QUEUE,
ATA_SHORT_PAUSE = 16,
ATAPI_MAX_DRAIN = 16 << 10,
@@ -637,7 +636,8 @@ struct ata_queued_cmd {
u8 cdb[ATAPI_CDB_LEN];
unsigned long flags; /* ATA_QCFLAG_xxx */
- unsigned int tag;
+ unsigned int tag; /* libata core tag */
+ unsigned int hw_tag; /* driver tag */
unsigned int n_elem;
unsigned int orig_n_elem;
@@ -849,9 +849,9 @@ struct ata_port {
unsigned int udma_mask;
unsigned int cbl; /* cable type; ATA_CBL_xxx */
- struct ata_queued_cmd qcmd[ATA_MAX_QUEUE];
+ struct ata_queued_cmd qcmd[ATA_MAX_QUEUE + 1];
unsigned long sas_tag_allocated; /* for sas tag allocation only */
- unsigned int qc_active;
+ u64 qc_active;
int nr_active_links; /* #links with active qcs */
unsigned int sas_last_tag; /* track next tag hw expects */
@@ -1130,10 +1130,11 @@ extern void ata_sas_async_probe(struct ata_port *ap);
extern int ata_sas_sync_probe(struct ata_port *ap);
extern int ata_sas_port_init(struct ata_port *);
extern int ata_sas_port_start(struct ata_port *ap);
+extern int ata_sas_tport_add(struct device *parent, struct ata_port *ap);
+extern void ata_sas_tport_delete(struct ata_port *ap);
extern void ata_sas_port_stop(struct ata_port *ap);
extern int ata_sas_slave_configure(struct scsi_device *, struct ata_port *);
extern int ata_sas_queuecmd(struct scsi_cmnd *cmd, struct ata_port *ap);
-extern enum blk_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd);
extern int sata_scr_valid(struct ata_link *link);
extern int sata_scr_read(struct ata_link *link, int reg, u32 *val);
extern int sata_scr_write(struct ata_link *link, int reg, u32 val);
@@ -1184,7 +1185,7 @@ extern void ata_id_c_string(const u16 *id, unsigned char *s,
extern unsigned int ata_do_dev_read_id(struct ata_device *dev,
struct ata_taskfile *tf, u16 *id);
extern void ata_qc_complete(struct ata_queued_cmd *qc);
-extern int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active);
+extern int ata_qc_complete_multiple(struct ata_port *ap, u64 qc_active);
extern void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd);
extern int ata_std_bios_param(struct scsi_device *sdev,
struct block_device *bdev,
@@ -1359,7 +1360,6 @@ extern struct device_attribute *ata_common_sdev_attrs[];
.proc_name = drv_name, \
.slave_configure = ata_scsi_slave_config, \
.slave_destroy = ata_scsi_slave_destroy, \
- .eh_timed_out = ata_scsi_timed_out, \
.bios_param = ata_std_bios_param, \
.unlock_native_capacity = ata_scsi_unlock_native_capacity, \
.sdev_attrs = ata_common_sdev_attrs
@@ -1485,14 +1485,14 @@ extern void ata_port_pbar_desc(struct ata_port *ap, int bar, ssize_t offset,
const char *name);
#endif
-static inline unsigned int ata_tag_valid(unsigned int tag)
+static inline bool ata_tag_internal(unsigned int tag)
{
- return (tag < ATA_MAX_QUEUE) ? 1 : 0;
+ return tag == ATA_TAG_INTERNAL;
}
-static inline unsigned int ata_tag_internal(unsigned int tag)
+static inline bool ata_tag_valid(unsigned int tag)
{
- return tag == ATA_TAG_INTERNAL;
+ return tag < ATA_MAX_QUEUE || ata_tag_internal(tag);
}
/*
@@ -1655,7 +1655,7 @@ static inline void ata_qc_set_polling(struct ata_queued_cmd *qc)
static inline struct ata_queued_cmd *__ata_qc_from_tag(struct ata_port *ap,
unsigned int tag)
{
- if (likely(ata_tag_valid(tag)))
+ if (ata_tag_valid(tag))
return &ap->qcmd[tag];
return NULL;
}
diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h
index 6e0859b9d4d2..e9e0d1c7eaf5 100644
--- a/include/linux/lightnvm.h
+++ b/include/linux/lightnvm.h
@@ -489,7 +489,7 @@ typedef blk_qc_t (nvm_tgt_make_rq_fn)(struct request_queue *, struct bio *);
typedef sector_t (nvm_tgt_capacity_fn)(void *);
typedef void *(nvm_tgt_init_fn)(struct nvm_tgt_dev *, struct gendisk *,
int flags);
-typedef void (nvm_tgt_exit_fn)(void *);
+typedef void (nvm_tgt_exit_fn)(void *, bool);
typedef int (nvm_tgt_sysfs_init_fn)(struct gendisk *);
typedef void (nvm_tgt_sysfs_exit_fn)(struct gendisk *);
diff --git a/include/linux/linkage.h b/include/linux/linkage.h
index f68db9e450eb..d7618c41f74c 100644
--- a/include/linux/linkage.h
+++ b/include/linux/linkage.h
@@ -24,16 +24,16 @@
#ifndef cond_syscall
#define cond_syscall(x) asm( \
- ".weak " VMLINUX_SYMBOL_STR(x) "\n\t" \
- ".set " VMLINUX_SYMBOL_STR(x) "," \
- VMLINUX_SYMBOL_STR(sys_ni_syscall))
+ ".weak " __stringify(x) "\n\t" \
+ ".set " __stringify(x) "," \
+ __stringify(sys_ni_syscall))
#endif
#ifndef SYSCALL_ALIAS
#define SYSCALL_ALIAS(alias, name) asm( \
- ".globl " VMLINUX_SYMBOL_STR(alias) "\n\t" \
- ".set " VMLINUX_SYMBOL_STR(alias) "," \
- VMLINUX_SYMBOL_STR(name))
+ ".globl " __stringify(alias) "\n\t" \
+ ".set " __stringify(alias) "," \
+ __stringify(name))
#endif
#define __page_aligned_data __section(.data..page_aligned) __aligned(PAGE_SIZE)
diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h
index 4754f01c1abb..aec44b1d9582 100644
--- a/include/linux/livepatch.h
+++ b/include/linux/livepatch.h
@@ -186,13 +186,20 @@ static inline bool klp_have_reliable_stack(void)
IS_ENABLED(CONFIG_HAVE_RELIABLE_STACKTRACE);
}
+typedef int (*klp_shadow_ctor_t)(void *obj,
+ void *shadow_data,
+ void *ctor_data);
+typedef void (*klp_shadow_dtor_t)(void *obj, void *shadow_data);
+
void *klp_shadow_get(void *obj, unsigned long id);
-void *klp_shadow_alloc(void *obj, unsigned long id, void *data,
- size_t size, gfp_t gfp_flags);
-void *klp_shadow_get_or_alloc(void *obj, unsigned long id, void *data,
- size_t size, gfp_t gfp_flags);
-void klp_shadow_free(void *obj, unsigned long id);
-void klp_shadow_free_all(unsigned long id);
+void *klp_shadow_alloc(void *obj, unsigned long id,
+ size_t size, gfp_t gfp_flags,
+ klp_shadow_ctor_t ctor, void *ctor_data);
+void *klp_shadow_get_or_alloc(void *obj, unsigned long id,
+ size_t size, gfp_t gfp_flags,
+ klp_shadow_ctor_t ctor, void *ctor_data);
+void klp_shadow_free(void *obj, unsigned long id, klp_shadow_dtor_t dtor);
+void klp_shadow_free_all(unsigned long id, klp_shadow_dtor_t dtor);
#else /* !CONFIG_LIVEPATCH */
diff --git a/include/linux/log2.h b/include/linux/log2.h
index 41a1ae010993..2af7f77866d0 100644
--- a/include/linux/log2.h
+++ b/include/linux/log2.h
@@ -72,16 +72,13 @@ unsigned long __rounddown_pow_of_two(unsigned long n)
}
/**
- * ilog2 - log base 2 of 32-bit or a 64-bit unsigned value
+ * const_ilog2 - log base 2 of 32-bit or a 64-bit constant unsigned value
* @n: parameter
*
- * constant-capable log of base 2 calculation
- * - this can be used to initialise global variables from constant data, hence
- * the massive ternary operator construction
- *
- * selects the appropriately-sized optimised version depending on sizeof(n)
+ * Use this where sparse expects a true constant expression, e.g. for array
+ * indices.
*/
-#define ilog2(n) \
+#define const_ilog2(n) \
( \
__builtin_constant_p(n) ? ( \
(n) < 2 ? 0 : \
@@ -147,10 +144,26 @@ unsigned long __rounddown_pow_of_two(unsigned long n)
(n) & (1ULL << 4) ? 4 : \
(n) & (1ULL << 3) ? 3 : \
(n) & (1ULL << 2) ? 2 : \
- 1 ) : \
- (sizeof(n) <= 4) ? \
- __ilog2_u32(n) : \
- __ilog2_u64(n) \
+ 1) : \
+ -1)
+
+/**
+ * ilog2 - log base 2 of 32-bit or a 64-bit unsigned value
+ * @n: parameter
+ *
+ * constant-capable log of base 2 calculation
+ * - this can be used to initialise global variables from constant data, hence
+ * the massive ternary operator construction
+ *
+ * selects the appropriately-sized optimised version depending on sizeof(n)
+ */
+#define ilog2(n) \
+( \
+ __builtin_constant_p(n) ? \
+ const_ilog2(n) : \
+ (sizeof(n) <= 4) ? \
+ __ilog2_u32(n) : \
+ __ilog2_u64(n) \
)
/**
diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h
index 9d0b286f3dba..8f1131c8dd54 100644
--- a/include/linux/lsm_hooks.h
+++ b/include/linux/lsm_hooks.h
@@ -757,6 +757,11 @@
* @type contains the requested communications type.
* @protocol contains the requested protocol.
* @kern set to 1 if a kernel socket.
+ * @socket_socketpair:
+ * Check permissions before creating a fresh pair of sockets.
+ * @socka contains the first socket structure.
+ * @sockb contains the second socket structure.
+ * Return 0 if permission is granted and the connection was established.
* @socket_bind:
* Check permission before socket protocol layer bind operation is
* performed and the socket @sock is bound to the address specified in the
@@ -1656,6 +1661,7 @@ union security_list_options {
int (*socket_create)(int family, int type, int protocol, int kern);
int (*socket_post_create)(struct socket *sock, int family, int type,
int protocol, int kern);
+ int (*socket_socketpair)(struct socket *socka, struct socket *sockb);
int (*socket_bind)(struct socket *sock, struct sockaddr *address,
int addrlen);
int (*socket_connect)(struct socket *sock, struct sockaddr *address,
@@ -1922,6 +1928,7 @@ struct security_hook_heads {
struct hlist_head unix_may_send;
struct hlist_head socket_create;
struct hlist_head socket_post_create;
+ struct hlist_head socket_socketpair;
struct hlist_head socket_bind;
struct hlist_head socket_connect;
struct hlist_head socket_listen;
diff --git a/include/linux/mdio-bitbang.h b/include/linux/mdio-bitbang.h
index a8ac9cfa014c..5d71e8a8500f 100644
--- a/include/linux/mdio-bitbang.h
+++ b/include/linux/mdio-bitbang.h
@@ -33,8 +33,6 @@ struct mdiobb_ops {
struct mdiobb_ctrl {
const struct mdiobb_ops *ops;
- /* reset callback */
- int (*reset)(struct mii_bus *bus);
};
/* The returned bus is not yet registered with the phy layer. */
diff --git a/include/linux/mdio-gpio.h b/include/linux/mdio-gpio.h
new file mode 100644
index 000000000000..cea443a672cb
--- /dev/null
+++ b/include/linux/mdio-gpio.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_MDIO_GPIO_H
+#define __LINUX_MDIO_GPIO_H
+
+#define MDIO_GPIO_MDC 0
+#define MDIO_GPIO_MDIO 1
+#define MDIO_GPIO_MDO 2
+
+#endif
diff --git a/include/linux/mdio.h b/include/linux/mdio.h
index 2cfffe586885..bfa7114167d7 100644
--- a/include/linux/mdio.h
+++ b/include/linux/mdio.h
@@ -29,7 +29,6 @@ enum mdio_mutex_lock_class {
struct mdio_device {
struct device dev;
- const struct dev_pm_ops *pm_ops;
struct mii_bus *bus;
char modalias[MDIO_NAME_SIZE];
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index d99b71bc2c66..6c6fb116e925 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -53,9 +53,18 @@ enum memcg_memory_event {
MEMCG_HIGH,
MEMCG_MAX,
MEMCG_OOM,
+ MEMCG_OOM_KILL,
+ MEMCG_SWAP_MAX,
+ MEMCG_SWAP_FAIL,
MEMCG_NR_MEMORY_EVENTS,
};
+enum mem_cgroup_protection {
+ MEMCG_PROT_NONE,
+ MEMCG_PROT_LOW,
+ MEMCG_PROT_MIN,
+};
+
struct mem_cgroup_reclaim_cookie {
pg_data_t *pgdat;
int priority;
@@ -158,6 +167,15 @@ enum memcg_kmem_state {
KMEM_ONLINE,
};
+#if defined(CONFIG_SMP)
+struct memcg_padding {
+ char x[0];
+} ____cacheline_internodealigned_in_smp;
+#define MEMCG_PADDING(name) struct memcg_padding name;
+#else
+#define MEMCG_PADDING(name)
+#endif
+
/*
* The memory controller data structure. The memory controller controls both
* page cache and RSS per cgroup. We would eventually like to provide
@@ -179,8 +197,7 @@ struct mem_cgroup {
struct page_counter kmem;
struct page_counter tcpmem;
- /* Normal memory consumption range */
- unsigned long low;
+ /* Upper bound of normal memory consumption range */
unsigned long high;
/* Range enforcement for interrupt charges */
@@ -205,9 +222,11 @@ struct mem_cgroup {
int oom_kill_disable;
/* memory.events */
- atomic_long_t memory_events[MEMCG_NR_MEMORY_EVENTS];
struct cgroup_file events_file;
+ /* handle for "memory.swap.events" */
+ struct cgroup_file swap_events_file;
+
/* protect arrays of thresholds */
struct mutex thresholds_lock;
@@ -225,19 +244,26 @@ struct mem_cgroup {
* mem_cgroup ? And what type of charges should we move ?
*/
unsigned long move_charge_at_immigrate;
+ /* taken only while moving_account > 0 */
+ spinlock_t move_lock;
+ unsigned long move_lock_flags;
+
+ MEMCG_PADDING(_pad1_);
+
/*
* set > 0 if pages under this cgroup are moving to other cgroup.
*/
atomic_t moving_account;
- /* taken only while moving_account > 0 */
- spinlock_t move_lock;
struct task_struct *move_lock_task;
- unsigned long move_lock_flags;
/* memory.stat */
struct mem_cgroup_stat_cpu __percpu *stat_cpu;
+
+ MEMCG_PADDING(_pad2_);
+
atomic_long_t stat[MEMCG_NR_STAT];
atomic_long_t events[NR_VM_EVENT_ITEMS];
+ atomic_long_t memory_events[MEMCG_NR_MEMORY_EVENTS];
unsigned long socket_pressure;
@@ -285,7 +311,8 @@ static inline bool mem_cgroup_disabled(void)
return !cgroup_subsys_enabled(memory_cgrp_subsys);
}
-bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg);
+enum mem_cgroup_protection mem_cgroup_protected(struct mem_cgroup *root,
+ struct mem_cgroup *memcg);
int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
gfp_t gfp_mask, struct mem_cgroup **memcgp,
@@ -462,7 +489,7 @@ unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
void mem_cgroup_handle_over_high(void);
-unsigned long mem_cgroup_get_limit(struct mem_cgroup *memcg);
+unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg);
void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
struct task_struct *p);
@@ -694,11 +721,8 @@ static inline void count_memcg_event_mm(struct mm_struct *mm,
rcu_read_lock();
memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
- if (likely(memcg)) {
+ if (likely(memcg))
count_memcg_events(memcg, idx, 1);
- if (idx == OOM_KILL)
- cgroup_file_notify(&memcg->events_file);
- }
rcu_read_unlock();
}
@@ -709,6 +733,21 @@ static inline void memcg_memory_event(struct mem_cgroup *memcg,
cgroup_file_notify(&memcg->events_file);
}
+static inline void memcg_memory_event_mm(struct mm_struct *mm,
+ enum memcg_memory_event event)
+{
+ struct mem_cgroup *memcg;
+
+ if (mem_cgroup_disabled())
+ return;
+
+ rcu_read_lock();
+ memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
+ if (likely(memcg))
+ memcg_memory_event(memcg, event);
+ rcu_read_unlock();
+}
+
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
void mem_cgroup_split_huge_fixup(struct page *head);
#endif
@@ -730,10 +769,15 @@ static inline void memcg_memory_event(struct mem_cgroup *memcg,
{
}
-static inline bool mem_cgroup_low(struct mem_cgroup *root,
- struct mem_cgroup *memcg)
+static inline void memcg_memory_event_mm(struct mm_struct *mm,
+ enum memcg_memory_event event)
{
- return false;
+}
+
+static inline enum mem_cgroup_protection mem_cgroup_protected(
+ struct mem_cgroup *root, struct mem_cgroup *memcg)
+{
+ return MEMCG_PROT_NONE;
}
static inline int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
@@ -853,7 +897,7 @@ mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
return 0;
}
-static inline unsigned long mem_cgroup_get_limit(struct mem_cgroup *memcg)
+static inline unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
{
return 0;
}
@@ -1093,7 +1137,6 @@ static inline void dec_lruvec_page_state(struct page *page,
#ifdef CONFIG_CGROUP_WRITEBACK
-struct list_head *mem_cgroup_cgwb_list(struct mem_cgroup *memcg);
struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb);
void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
unsigned long *pheadroom, unsigned long *pdirty,
diff --git a/include/linux/memfd.h b/include/linux/memfd.h
new file mode 100644
index 000000000000..4f1600413f91
--- /dev/null
+++ b/include/linux/memfd.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_MEMFD_H
+#define __LINUX_MEMFD_H
+
+#include <linux/file.h>
+
+#ifdef CONFIG_MEMFD_CREATE
+extern long memfd_fcntl(struct file *file, unsigned int cmd, unsigned long arg);
+#else
+static inline long memfd_fcntl(struct file *f, unsigned int c, unsigned long a)
+{
+ return -EINVAL;
+}
+#endif
+
+#endif /* __LINUX_MEMFD_H */
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index e0e49b5b1ee1..4e9828cda7a2 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -107,7 +107,6 @@ static inline bool movable_node_is_enabled(void)
}
#ifdef CONFIG_MEMORY_HOTREMOVE
-extern bool is_pageblock_removable_nolock(struct page *page);
extern int arch_remove_memory(u64 start, u64 size,
struct vmem_altmap *altmap);
extern int __remove_pages(struct zone *zone, unsigned long start_pfn,
@@ -216,6 +215,9 @@ void put_online_mems(void);
void mem_hotplug_begin(void);
void mem_hotplug_done(void);
+extern void set_zone_contiguous(struct zone *zone);
+extern void clear_zone_contiguous(struct zone *zone);
+
#else /* ! CONFIG_MEMORY_HOTPLUG */
#define pfn_to_online_page(pfn) \
({ \
diff --git a/include/linux/mempool.h b/include/linux/mempool.h
index b51f5c430c26..0c964ac107c2 100644
--- a/include/linux/mempool.h
+++ b/include/linux/mempool.h
@@ -25,6 +25,18 @@ typedef struct mempool_s {
wait_queue_head_t wait;
} mempool_t;
+static inline bool mempool_initialized(mempool_t *pool)
+{
+ return pool->elements != NULL;
+}
+
+void mempool_exit(mempool_t *pool);
+int mempool_init_node(mempool_t *pool, int min_nr, mempool_alloc_t *alloc_fn,
+ mempool_free_t *free_fn, void *pool_data,
+ gfp_t gfp_mask, int node_id);
+int mempool_init(mempool_t *pool, int min_nr, mempool_alloc_t *alloc_fn,
+ mempool_free_t *free_fn, void *pool_data);
+
extern mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn,
mempool_free_t *free_fn, void *pool_data);
extern mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn,
@@ -43,6 +55,14 @@ extern void mempool_free(void *element, mempool_t *pool);
*/
void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data);
void mempool_free_slab(void *element, void *pool_data);
+
+static inline int
+mempool_init_slab_pool(mempool_t *pool, int min_nr, struct kmem_cache *kc)
+{
+ return mempool_init(pool, min_nr, mempool_alloc_slab,
+ mempool_free_slab, (void *) kc);
+}
+
static inline mempool_t *
mempool_create_slab_pool(int min_nr, struct kmem_cache *kc)
{
@@ -56,6 +76,13 @@ mempool_create_slab_pool(int min_nr, struct kmem_cache *kc)
*/
void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data);
void mempool_kfree(void *element, void *pool_data);
+
+static inline int mempool_init_kmalloc_pool(mempool_t *pool, int min_nr, size_t size)
+{
+ return mempool_init(pool, min_nr, mempool_kmalloc,
+ mempool_kfree, (void *) size);
+}
+
static inline mempool_t *mempool_create_kmalloc_pool(int min_nr, size_t size)
{
return mempool_create(min_nr, mempool_kmalloc, mempool_kfree,
@@ -68,6 +95,13 @@ static inline mempool_t *mempool_create_kmalloc_pool(int min_nr, size_t size)
*/
void *mempool_alloc_pages(gfp_t gfp_mask, void *pool_data);
void mempool_free_pages(void *element, void *pool_data);
+
+static inline int mempool_init_page_pool(mempool_t *pool, int min_nr, int order)
+{
+ return mempool_init(pool, min_nr, mempool_alloc_pages,
+ mempool_free_pages, (void *)(long)order);
+}
+
static inline mempool_t *mempool_create_page_pool(int min_nr, int order)
{
return mempool_create(min_nr, mempool_alloc_pages, mempool_free_pages,
diff --git a/include/linux/memremap.h b/include/linux/memremap.h
index 7b4899c06f49..f91f9e763557 100644
--- a/include/linux/memremap.h
+++ b/include/linux/memremap.h
@@ -1,7 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_MEMREMAP_H_
#define _LINUX_MEMREMAP_H_
-#include <linux/mm.h>
#include <linux/ioport.h>
#include <linux/percpu-refcount.h>
@@ -30,13 +29,6 @@ struct vmem_altmap {
* Specialize ZONE_DEVICE memory into multiple types each having differents
* usage.
*
- * MEMORY_DEVICE_HOST:
- * Persistent device memory (pmem): struct page might be allocated in different
- * memory and architecture might want to perform special actions. It is similar
- * to regular memory, in that the CPU can access it transparently. However,
- * it is likely to have different bandwidth and latency than regular memory.
- * See Documentation/nvdimm/nvdimm.txt for more information.
- *
* MEMORY_DEVICE_PRIVATE:
* Device memory that is not directly addressable by the CPU: CPU can neither
* read nor write private memory. In this case, we do still have struct pages
@@ -45,7 +37,7 @@ struct vmem_altmap {
* must be treated as an opaque object, rather than a "normal" struct page.
*
* A more complete discussion of unaddressable memory may be found in
- * include/linux/hmm.h and Documentation/vm/hmm.txt.
+ * include/linux/hmm.h and Documentation/vm/hmm.rst.
*
* MEMORY_DEVICE_PUBLIC:
* Device memory that is cache coherent from device and CPU point of view. This
@@ -53,11 +45,19 @@ struct vmem_altmap {
* driver can hotplug the device memory using ZONE_DEVICE and with that memory
* type. Any page of a process can be migrated to such memory. However no one
* should be allow to pin such memory so that it can always be evicted.
+ *
+ * MEMORY_DEVICE_FS_DAX:
+ * Host memory that has similar access semantics as System RAM i.e. DMA
+ * coherent and supports page pinning. In support of coordinating page
+ * pinning vs other operations MEMORY_DEVICE_FS_DAX arranges for a
+ * wakeup event whenever a page is unpinned and becomes idle. This
+ * wakeup is used to coordinate physical address space management (ex:
+ * fs truncate/hole punch) vs pinned pages (ex: device dma).
*/
enum memory_type {
- MEMORY_DEVICE_HOST = 0,
- MEMORY_DEVICE_PRIVATE,
+ MEMORY_DEVICE_PRIVATE = 1,
MEMORY_DEVICE_PUBLIC,
+ MEMORY_DEVICE_FS_DAX,
};
/*
@@ -67,7 +67,7 @@ enum memory_type {
* page_free()
*
* Additional notes about MEMORY_DEVICE_PRIVATE may be found in
- * include/linux/hmm.h and Documentation/vm/hmm.txt. There is also a brief
+ * include/linux/hmm.h and Documentation/vm/hmm.rst. There is also a brief
* explanation in include/linux/memory_hotplug.h.
*
* The page_fault() callback must migrate page back, from device memory to
@@ -129,8 +129,6 @@ struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
unsigned long vmem_altmap_offset(struct vmem_altmap *altmap);
void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns);
-
-static inline bool is_zone_device_page(const struct page *page);
#else
static inline void *devm_memremap_pages(struct device *dev,
struct dev_pagemap *pgmap)
@@ -161,20 +159,6 @@ static inline void vmem_altmap_free(struct vmem_altmap *altmap,
}
#endif /* CONFIG_ZONE_DEVICE */
-#if defined(CONFIG_DEVICE_PRIVATE) || defined(CONFIG_DEVICE_PUBLIC)
-static inline bool is_device_private_page(const struct page *page)
-{
- return is_zone_device_page(page) &&
- page->pgmap->type == MEMORY_DEVICE_PRIVATE;
-}
-
-static inline bool is_device_public_page(const struct page *page)
-{
- return is_zone_device_page(page) &&
- page->pgmap->type == MEMORY_DEVICE_PUBLIC;
-}
-#endif /* CONFIG_DEVICE_PRIVATE || CONFIG_DEVICE_PUBLIC */
-
static inline void put_dev_pagemap(struct dev_pagemap *pgmap)
{
if (pgmap)
diff --git a/include/linux/mfd/abx500.h b/include/linux/mfd/abx500.h
index 44412c9d26e1..aa09414756db 100644
--- a/include/linux/mfd/abx500.h
+++ b/include/linux/mfd/abx500.h
@@ -271,7 +271,6 @@ struct abx500_bm_data {
bool autopower_cfg;
bool ac_enabled;
bool usb_enabled;
- bool usb_power_path;
bool no_maintenance;
bool capacity_scaling;
bool chg_unknown_bat;
diff --git a/include/linux/mfd/abx500/ab8500-bm.h b/include/linux/mfd/abx500/ab8500-bm.h
index e63681eb6c62..c06daf3d490a 100644
--- a/include/linux/mfd/abx500/ab8500-bm.h
+++ b/include/linux/mfd/abx500/ab8500-bm.h
@@ -248,8 +248,6 @@ enum bup_vch_sel {
#define BAT_CTRL_20U_ENA 0x02
#define BAT_CTRL_18U_ENA 0x01
#define BAT_CTRL_16U_ENA 0x02
-#define BAT_CTRL_60U_ENA 0x01
-#define BAT_CTRL_120U_ENA 0x02
#define BAT_CTRL_CMP_ENA 0x04
#define FORCE_BAT_CTRL_CMP_HIGH 0x08
#define BAT_CTRL_PULL_UP_ENA 0x10
diff --git a/include/linux/mfd/abx500/ux500_chargalg.h b/include/linux/mfd/abx500/ux500_chargalg.h
index 67703f23e7ba..669894f434f5 100644
--- a/include/linux/mfd/abx500/ux500_chargalg.h
+++ b/include/linux/mfd/abx500/ux500_chargalg.h
@@ -25,8 +25,6 @@ struct ux500_charger_ops {
int (*check_enable) (struct ux500_charger *, int, int);
int (*kick_wd) (struct ux500_charger *);
int (*update_curr) (struct ux500_charger *, int);
- int (*pp_enable) (struct ux500_charger *, bool);
- int (*pre_chg_enable) (struct ux500_charger *, bool);
};
/**
@@ -37,7 +35,6 @@ struct ux500_charger_ops {
* @max_out_curr maximum output charger current in mA
* @enabled indicates if this charger is used or not
* @external external charger unit (pm2xxx)
- * @power_path USB power path support
*/
struct ux500_charger {
struct power_supply *psy;
@@ -47,7 +44,6 @@ struct ux500_charger {
int wdt_refresh;
bool enabled;
bool external;
- bool power_path;
};
extern struct blocking_notifier_head charger_notifier_list;
diff --git a/include/linux/mfd/arizona/pdata.h b/include/linux/mfd/arizona/pdata.h
index f72dc53848d7..0013075d4cda 100644
--- a/include/linux/mfd/arizona/pdata.h
+++ b/include/linux/mfd/arizona/pdata.h
@@ -56,6 +56,7 @@
#define ARIZONA_MAX_PDM_SPK 2
struct regulator_init_data;
+struct gpio_desc;
struct arizona_micbias {
int mV; /** Regulated voltage */
@@ -77,7 +78,7 @@ struct arizona_micd_range {
};
struct arizona_pdata {
- int reset; /** GPIO controlling /RESET, if any */
+ struct gpio_desc *reset; /** GPIO controlling /RESET, if any */
/** Regulator configuration for MICVDD */
struct arizona_micsupp_pdata micvdd;
diff --git a/include/linux/mfd/as3711.h b/include/linux/mfd/as3711.h
index 34cc85864be5..ddd0b953323b 100644
--- a/include/linux/mfd/as3711.h
+++ b/include/linux/mfd/as3711.h
@@ -108,9 +108,9 @@ struct as3711_regulator_pdata {
};
struct as3711_bl_pdata {
- const char *su1_fb;
+ bool su1_fb;
int su1_max_uA;
- const char *su2_fb;
+ bool su2_fb;
int su2_max_uA;
enum as3711_su2_feedback su2_feedback;
enum as3711_su2_fbprot su2_fbprot;
diff --git a/include/linux/mfd/axp20x.h b/include/linux/mfd/axp20x.h
index 82bf7747b312..517e60eecbcb 100644
--- a/include/linux/mfd/axp20x.h
+++ b/include/linux/mfd/axp20x.h
@@ -592,11 +592,11 @@ enum axp806_irqs {
AXP806_IRQ_DCDCC_V_LOW,
AXP806_IRQ_DCDCD_V_LOW,
AXP806_IRQ_DCDCE_V_LOW,
- AXP806_IRQ_PWROK_LONG,
- AXP806_IRQ_PWROK_SHORT,
+ AXP806_IRQ_POK_LONG,
+ AXP806_IRQ_POK_SHORT,
AXP806_IRQ_WAKEUP,
- AXP806_IRQ_PWROK_FALL,
- AXP806_IRQ_PWROK_RISE,
+ AXP806_IRQ_POK_FALL,
+ AXP806_IRQ_POK_RISE,
};
enum axp809_irqs {
@@ -642,7 +642,7 @@ struct axp20x_dev {
struct regmap_irq_chip_data *regmap_irqc;
long variant;
int nr_cells;
- struct mfd_cell *cells;
+ const struct mfd_cell *cells;
const struct regmap_config *regmap_cfg;
const struct regmap_irq_chip *regmap_irq_chip;
};
diff --git a/include/linux/mfd/bd9571mwv.h b/include/linux/mfd/bd9571mwv.h
index f0708ba4cbba..eb05569f752b 100644
--- a/include/linux/mfd/bd9571mwv.h
+++ b/include/linux/mfd/bd9571mwv.h
@@ -33,6 +33,11 @@
#define BD9571MWV_I2C_MD2_E1_BIT_2 0x12
#define BD9571MWV_BKUP_MODE_CNT 0x20
+#define BD9571MWV_BKUP_MODE_CNT_KEEPON_MASK GENMASK(3, 0)
+#define BD9571MWV_BKUP_MODE_CNT_KEEPON_DDR0 BIT(0)
+#define BD9571MWV_BKUP_MODE_CNT_KEEPON_DDR1 BIT(1)
+#define BD9571MWV_BKUP_MODE_CNT_KEEPON_DDR0C BIT(2)
+#define BD9571MWV_BKUP_MODE_CNT_KEEPON_DDR1C BIT(3)
#define BD9571MWV_BKUP_MODE_STATUS 0x21
#define BD9571MWV_BKUP_RECOVERY_CNT 0x22
#define BD9571MWV_BKUP_CTRL_TIM_CNT 0x23
diff --git a/include/linux/mfd/cros_ec.h b/include/linux/mfd/cros_ec.h
index 2d4e23c9ea0a..32421dfeb996 100644
--- a/include/linux/mfd/cros_ec.h
+++ b/include/linux/mfd/cros_ec.h
@@ -197,6 +197,8 @@ struct cros_ec_dev {
u32 features[2];
};
+#define to_cros_ec_dev(dev) container_of(dev, struct cros_ec_dev, class_dev)
+
/**
* cros_ec_suspend - Handle a suspend operation for the ChromeOS EC device
*
@@ -327,23 +329,7 @@ extern struct attribute_group cros_ec_vbc_attr_group;
/* debugfs stuff */
int cros_ec_debugfs_init(struct cros_ec_dev *ec);
void cros_ec_debugfs_remove(struct cros_ec_dev *ec);
-
-/* ACPI GPE handler */
-#ifdef CONFIG_ACPI
-
-int cros_ec_acpi_install_gpe_handler(struct device *dev);
-void cros_ec_acpi_remove_gpe_handler(void);
-void cros_ec_acpi_clear_gpe(void);
-
-#else /* CONFIG_ACPI */
-
-static inline int cros_ec_acpi_install_gpe_handler(struct device *dev)
-{
- return -ENODEV;
-}
-static inline void cros_ec_acpi_remove_gpe_handler(void) {}
-static inline void cros_ec_acpi_clear_gpe(void) {}
-
-#endif /* CONFIG_ACPI */
+void cros_ec_debugfs_suspend(struct cros_ec_dev *ec);
+void cros_ec_debugfs_resume(struct cros_ec_dev *ec);
#endif /* __LINUX_MFD_CROS_EC_H */
diff --git a/include/linux/mfd/lp8788.h b/include/linux/mfd/lp8788.h
index 786bf6679a28..2010e0de3e34 100644
--- a/include/linux/mfd/lp8788.h
+++ b/include/linux/mfd/lp8788.h
@@ -182,20 +182,6 @@ struct lp8788_buck2_dvs {
};
/*
- * struct lp8788_ldo_enable_pin
- *
- * Basically, all LDOs are enabled through the I2C commands.
- * But ALDO 1 ~ 5, 7, DLDO 7, 9, 11 can be enabled by external gpio pins.
- *
- * @gpio : gpio number which is used for enabling ldos
- * @init_state : initial gpio state (ex. GPIOF_OUT_INIT_LOW)
- */
-struct lp8788_ldo_enable_pin {
- int gpio;
- int init_state;
-};
-
-/*
* struct lp8788_chg_param
* @addr : charging control register address (range : 0x11 ~ 0x1C)
* @val : charging parameter value
@@ -288,7 +274,6 @@ struct lp8788_vib_platform_data {
* @aldo_data : regulator initial data for analog ldo
* @buck1_dvs : gpio configurations for buck1 dvs
* @buck2_dvs : gpio configurations for buck2 dvs
- * @ldo_pin : gpio configurations for enabling LDOs
* @chg_pdata : platform data for charger driver
* @alarm_sel : rtc alarm selection (1 or 2)
* @bl_pdata : configurable data for backlight driver
@@ -306,7 +291,6 @@ struct lp8788_platform_data {
struct regulator_init_data *aldo_data[LP8788_NUM_ALDOS];
struct lp8788_buck1_dvs *buck1_dvs;
struct lp8788_buck2_dvs *buck2_dvs;
- struct lp8788_ldo_enable_pin *ldo_pin[EN_LDOS_MAX];
/* charger */
struct lp8788_charger_platform_data *chg_pdata;
diff --git a/include/linux/mfd/mc13xxx.h b/include/linux/mfd/mc13xxx.h
index 638222e43e48..54a3cd808f9e 100644
--- a/include/linux/mfd/mc13xxx.h
+++ b/include/linux/mfd/mc13xxx.h
@@ -243,6 +243,8 @@ struct mc13xxx_platform_data {
#define MC13XXX_ADC0_LICELLCON (1 << 0)
#define MC13XXX_ADC0_CHRGICON (1 << 1)
#define MC13XXX_ADC0_BATICON (1 << 2)
+#define MC13XXX_ADC0_ADIN7SEL_DIE (1 << 4)
+#define MC13XXX_ADC0_ADIN7SEL_UID (2 << 4)
#define MC13XXX_ADC0_ADREFEN (1 << 10)
#define MC13XXX_ADC0_TSMOD0 (1 << 12)
#define MC13XXX_ADC0_TSMOD1 (1 << 13)
diff --git a/include/linux/mfd/rave-sp.h b/include/linux/mfd/rave-sp.h
index 796fb9794c9e..fe0ce7bc59cf 100644
--- a/include/linux/mfd/rave-sp.h
+++ b/include/linux/mfd/rave-sp.h
@@ -21,6 +21,7 @@ enum rave_sp_command {
RAVE_SP_CMD_STATUS = 0xA0,
RAVE_SP_CMD_SW_WDT = 0xA1,
RAVE_SP_CMD_PET_WDT = 0xA2,
+ RAVE_SP_CMD_SET_BACKLIGHT = 0xA6,
RAVE_SP_CMD_RESET = 0xA7,
RAVE_SP_CMD_RESET_REASON = 0xA8,
diff --git a/include/linux/mfd/samsung/core.h b/include/linux/mfd/samsung/core.h
index 5a23dd4df432..28f4ae76271d 100644
--- a/include/linux/mfd/samsung/core.h
+++ b/include/linux/mfd/samsung/core.h
@@ -39,6 +39,8 @@
#define STEP_12_5_MV 12500
#define STEP_6_25_MV 6250
+struct gpio_desc;
+
enum sec_device_type {
S5M8751X,
S5M8763X,
@@ -151,7 +153,7 @@ struct sec_regulator_data {
int id;
struct regulator_init_data *initdata;
struct device_node *reg_node;
- int ext_control_gpio;
+ struct gpio_desc *ext_control_gpiod;
};
/*
diff --git a/include/linux/mfd/stm32-timers.h b/include/linux/mfd/stm32-timers.h
index 2aadab6f34a1..067d14655c28 100644
--- a/include/linux/mfd/stm32-timers.h
+++ b/include/linux/mfd/stm32-timers.h
@@ -8,6 +8,8 @@
#define _LINUX_STM32_GPTIMER_H_
#include <linux/clk.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
#include <linux/regmap.h>
#define TIM_CR1 0x00 /* Control Register 1 */
@@ -27,6 +29,8 @@
#define TIM_CCR3 0x3C /* Capt/Comp Register 3 */
#define TIM_CCR4 0x40 /* Capt/Comp Register 4 */
#define TIM_BDTR 0x44 /* Break and Dead-Time Reg */
+#define TIM_DCR 0x48 /* DMA control register */
+#define TIM_DMAR 0x4C /* DMA register for transfer */
#define TIM_CR1_CEN BIT(0) /* Counter Enable */
#define TIM_CR1_DIR BIT(4) /* Counter Direction */
@@ -36,17 +40,35 @@
#define TIM_SMCR_SMS (BIT(0) | BIT(1) | BIT(2)) /* Slave mode selection */
#define TIM_SMCR_TS (BIT(4) | BIT(5) | BIT(6)) /* Trigger selection */
#define TIM_DIER_UIE BIT(0) /* Update interrupt */
+#define TIM_DIER_UDE BIT(8) /* Update DMA request Enable */
+#define TIM_DIER_CC1DE BIT(9) /* CC1 DMA request Enable */
+#define TIM_DIER_CC2DE BIT(10) /* CC2 DMA request Enable */
+#define TIM_DIER_CC3DE BIT(11) /* CC3 DMA request Enable */
+#define TIM_DIER_CC4DE BIT(12) /* CC4 DMA request Enable */
+#define TIM_DIER_COMDE BIT(13) /* COM DMA request Enable */
+#define TIM_DIER_TDE BIT(14) /* Trigger DMA request Enable */
#define TIM_SR_UIF BIT(0) /* Update interrupt flag */
#define TIM_EGR_UG BIT(0) /* Update Generation */
#define TIM_CCMR_PE BIT(3) /* Channel Preload Enable */
#define TIM_CCMR_M1 (BIT(6) | BIT(5)) /* Channel PWM Mode 1 */
+#define TIM_CCMR_CC1S (BIT(0) | BIT(1)) /* Capture/compare 1 sel */
+#define TIM_CCMR_IC1PSC GENMASK(3, 2) /* Input capture 1 prescaler */
+#define TIM_CCMR_CC2S (BIT(8) | BIT(9)) /* Capture/compare 2 sel */
+#define TIM_CCMR_IC2PSC GENMASK(11, 10) /* Input capture 2 prescaler */
+#define TIM_CCMR_CC1S_TI1 BIT(0) /* IC1/IC3 selects TI1/TI3 */
+#define TIM_CCMR_CC1S_TI2 BIT(1) /* IC1/IC3 selects TI2/TI4 */
+#define TIM_CCMR_CC2S_TI2 BIT(8) /* IC2/IC4 selects TI2/TI4 */
+#define TIM_CCMR_CC2S_TI1 BIT(9) /* IC2/IC4 selects TI1/TI3 */
#define TIM_CCER_CC1E BIT(0) /* Capt/Comp 1 out Ena */
#define TIM_CCER_CC1P BIT(1) /* Capt/Comp 1 Polarity */
#define TIM_CCER_CC1NE BIT(2) /* Capt/Comp 1N out Ena */
#define TIM_CCER_CC1NP BIT(3) /* Capt/Comp 1N Polarity */
#define TIM_CCER_CC2E BIT(4) /* Capt/Comp 2 out Ena */
+#define TIM_CCER_CC2P BIT(5) /* Capt/Comp 2 Polarity */
#define TIM_CCER_CC3E BIT(8) /* Capt/Comp 3 out Ena */
+#define TIM_CCER_CC3P BIT(9) /* Capt/Comp 3 Polarity */
#define TIM_CCER_CC4E BIT(12) /* Capt/Comp 4 out Ena */
+#define TIM_CCER_CC4P BIT(13) /* Capt/Comp 4 Polarity */
#define TIM_CCER_CCXE (BIT(0) | BIT(4) | BIT(8) | BIT(12))
#define TIM_BDTR_BKE BIT(12) /* Break input enable */
#define TIM_BDTR_BKP BIT(13) /* Break input polarity */
@@ -56,8 +78,11 @@
#define TIM_BDTR_BK2F (BIT(20) | BIT(21) | BIT(22) | BIT(23))
#define TIM_BDTR_BK2E BIT(24) /* Break 2 input enable */
#define TIM_BDTR_BK2P BIT(25) /* Break 2 input polarity */
+#define TIM_DCR_DBA GENMASK(4, 0) /* DMA base addr */
+#define TIM_DCR_DBL GENMASK(12, 8) /* DMA burst len */
#define MAX_TIM_PSC 0xFFFF
+#define MAX_TIM_ICPSC 0x3
#define TIM_CR2_MMS_SHIFT 4
#define TIM_CR2_MMS2_SHIFT 20
#define TIM_SMCR_TS_SHIFT 4
@@ -65,9 +90,54 @@
#define TIM_BDTR_BKF_SHIFT 16
#define TIM_BDTR_BK2F_SHIFT 20
+enum stm32_timers_dmas {
+ STM32_TIMERS_DMA_CH1,
+ STM32_TIMERS_DMA_CH2,
+ STM32_TIMERS_DMA_CH3,
+ STM32_TIMERS_DMA_CH4,
+ STM32_TIMERS_DMA_UP,
+ STM32_TIMERS_DMA_TRIG,
+ STM32_TIMERS_DMA_COM,
+ STM32_TIMERS_MAX_DMAS,
+};
+
+/**
+ * struct stm32_timers_dma - STM32 timer DMA handling.
+ * @completion: end of DMA transfer completion
+ * @phys_base: control registers physical base address
+ * @lock: protect DMA access
+ * @chan: DMA channel in use
+ * @chans: DMA channels available for this timer instance
+ */
+struct stm32_timers_dma {
+ struct completion completion;
+ phys_addr_t phys_base;
+ struct mutex lock;
+ struct dma_chan *chan;
+ struct dma_chan *chans[STM32_TIMERS_MAX_DMAS];
+};
+
struct stm32_timers {
struct clk *clk;
struct regmap *regmap;
u32 max_arr;
+ struct stm32_timers_dma dma; /* Only to be used by the parent */
};
+
+#if IS_REACHABLE(CONFIG_MFD_STM32_TIMERS)
+int stm32_timers_dma_burst_read(struct device *dev, u32 *buf,
+ enum stm32_timers_dmas id, u32 reg,
+ unsigned int num_reg, unsigned int bursts,
+ unsigned long tmo_ms);
+#else
+static inline int stm32_timers_dma_burst_read(struct device *dev, u32 *buf,
+ enum stm32_timers_dmas id,
+ u32 reg,
+ unsigned int num_reg,
+ unsigned int bursts,
+ unsigned long tmo_ms)
+{
+ return -ENODEV;
+}
+#endif
#endif
diff --git a/include/linux/mfd/syscon/exynos4-pmu.h b/include/linux/mfd/syscon/exynos4-pmu.h
deleted file mode 100644
index 278b1b1549e9..000000000000
--- a/include/linux/mfd/syscon/exynos4-pmu.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * Copyright (C) 2015 Samsung Electronics Co., Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef _LINUX_MFD_SYSCON_PMU_EXYNOS4_H_
-#define _LINUX_MFD_SYSCON_PMU_EXYNOS4_H_
-
-/* Exynos4 PMU register definitions */
-
-/* MIPI_PHYn_CONTROL register offset: n = 0..1 */
-#define EXYNOS4_MIPI_PHY_CONTROL(n) (0x710 + (n) * 4)
-#define EXYNOS4_MIPI_PHY_ENABLE (1 << 0)
-#define EXYNOS4_MIPI_PHY_SRESETN (1 << 1)
-#define EXYNOS4_MIPI_PHY_MRESETN (1 << 2)
-#define EXYNOS4_MIPI_PHY_RESET_MASK (3 << 1)
-
-#endif /* _LINUX_MFD_SYSCON_PMU_EXYNOS4_H_ */
diff --git a/include/linux/mfd/syscon/exynos5-pmu.h b/include/linux/mfd/syscon/exynos5-pmu.h
deleted file mode 100644
index b4942a32b81d..000000000000
--- a/include/linux/mfd/syscon/exynos5-pmu.h
+++ /dev/null
@@ -1,19 +0,0 @@
-/*
- * Exynos5 SoC series Power Management Unit (PMU) register offsets
- * and bit definitions.
- *
- * Copyright (C) 2014 Samsung Electronics Co., Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef _LINUX_MFD_SYSCON_PMU_EXYNOS5_H_
-#define _LINUX_MFD_SYSCON_PMU_EXYNOS5_H_
-
-#define EXYNOS5_PHY_ENABLE BIT(0)
-#define EXYNOS5_MIPI_PHY_S_RESETN BIT(1)
-#define EXYNOS5_MIPI_PHY_M_RESETN BIT(2)
-
-#endif /* _LINUX_MFD_SYSCON_PMU_EXYNOS5_H_ */
diff --git a/include/linux/mfd/tps65090.h b/include/linux/mfd/tps65090.h
index 67d144b3b8f9..f05bf4a146e2 100644
--- a/include/linux/mfd/tps65090.h
+++ b/include/linux/mfd/tps65090.h
@@ -83,6 +83,8 @@ enum {
#define TPS65090_MAX_REG TPS65090_REG_AD_OUT2
#define TPS65090_NUM_REGS (TPS65090_MAX_REG + 1)
+struct gpio_desc;
+
struct tps65090 {
struct device *dev;
struct regmap *rmap;
@@ -95,8 +97,8 @@ struct tps65090 {
* @reg_init_data: The regulator init data.
* @enable_ext_control: Enable extrenal control or not. Only available for
* DCDC1, DCDC2 and DCDC3.
- * @gpio: Gpio number if external control is enabled and controlled through
- * gpio.
+ * @gpiod: Gpio descriptor if external control is enabled and controlled through
+ * gpio
* @overcurrent_wait_valid: True if the overcurrent_wait should be applied.
* @overcurrent_wait: Value to set as the overcurrent wait time. This is the
* actual bitfield value, not a time in ms (valid value are 0 - 3).
@@ -104,7 +106,7 @@ struct tps65090 {
struct tps65090_regulator_plat_data {
struct regulator_init_data *reg_init_data;
bool enable_ext_control;
- int gpio;
+ struct gpio_desc *gpiod;
bool overcurrent_wait_valid;
int overcurrent_wait;
};
diff --git a/include/linux/mfd/tps65218.h b/include/linux/mfd/tps65218.h
index f069c518c0ed..c204d9a79436 100644
--- a/include/linux/mfd/tps65218.h
+++ b/include/linux/mfd/tps65218.h
@@ -205,10 +205,10 @@ enum tps65218_regulator_id {
TPS65218_DCDC_4,
TPS65218_DCDC_5,
TPS65218_DCDC_6,
- /* LS's */
- TPS65218_LS_3,
/* LDOs */
TPS65218_LDO_1,
+ /* LS's */
+ TPS65218_LS_3,
};
#define TPS65218_MAX_REG_ID TPS65218_LDO_1
diff --git a/include/linux/mfd/tps6586x.h b/include/linux/mfd/tps6586x.h
index 2fe68e481230..b19c2801a30e 100644
--- a/include/linux/mfd/tps6586x.h
+++ b/include/linux/mfd/tps6586x.h
@@ -18,6 +18,7 @@
#define TPS658621A 0x15
#define TPS658621CD 0x2c
#define TPS658623 0x1b
+#define TPS658624 0x0a
#define TPS658640 0x01
#define TPS658640v2 0x02
#define TPS658643 0x03
diff --git a/include/linux/mfd/tps68470.h b/include/linux/mfd/tps68470.h
index 44f9d9f647ed..ffe81127d91c 100644
--- a/include/linux/mfd/tps68470.h
+++ b/include/linux/mfd/tps68470.h
@@ -1,17 +1,6 @@
-/*
- * Copyright (c) 2017 Intel Corporation
- *
- * Functions to access TPS68470 power management chip.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2017 Intel Corporation */
+/* Functions to access TPS68470 power management chip. */
#ifndef __LINUX_MFD_TPS68470_H
#define __LINUX_MFD_TPS68470_H
diff --git a/include/linux/mfd/wm8350/audio.h b/include/linux/mfd/wm8350/audio.h
index bd581c6fa085..0bc41c4c0429 100644
--- a/include/linux/mfd/wm8350/audio.h
+++ b/include/linux/mfd/wm8350/audio.h
@@ -617,11 +617,8 @@ struct wm8350_audio_platform_data {
u32 codec_current_charge:2; /* codec current @ vmid charge */
};
-struct snd_soc_codec;
-
struct wm8350_codec {
struct platform_device *pdev;
- struct snd_soc_codec *codec;
struct wm8350_audio_platform_data *platform_data;
};
diff --git a/include/linux/microchipphy.h b/include/linux/microchipphy.h
index eb492d47f717..8c40128af240 100644
--- a/include/linux/microchipphy.h
+++ b/include/linux/microchipphy.h
@@ -70,4 +70,15 @@
#define LAN88XX_MMD3_CHIP_ID (32877)
#define LAN88XX_MMD3_CHIP_REV (32878)
+/* Registers specific to the LAN7800/LAN7850 embedded phy */
+#define LAN78XX_PHY_LED_MODE_SELECT (0x1D)
+
+/* DSP registers */
+#define PHY_ARDENNES_MMD_DEV_3_PHY_CFG (0x806A)
+#define PHY_ARDENNES_MMD_DEV_3_PHY_CFG_ZD_DLY_EN_ (0x2000)
+#define LAN88XX_EXT_PAGE_ACCESS_TR (0x52B5)
+#define LAN88XX_EXT_PAGE_TR_CR 16
+#define LAN88XX_EXT_PAGE_TR_LOW_DATA 17
+#define LAN88XX_EXT_PAGE_TR_HIGH_DATA 18
+
#endif /* _MICROCHIPPHY_H */
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 81d0799b6091..122e7e9d3091 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -225,6 +225,7 @@ enum {
MLX4_DEV_CAP_FLAG2_SVLAN_BY_QP = 1ULL << 36,
MLX4_DEV_CAP_FLAG2_SL_TO_VL_CHANGE_EVENT = 1ULL << 37,
MLX4_DEV_CAP_FLAG2_USER_MAC_EN = 1ULL << 38,
+ MLX4_DEV_CAP_FLAG2_DRIVER_VERSION_TO_FW = 1ULL << 39,
};
enum {
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index 5004ddc702e3..02f72ebf31a7 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -314,6 +314,7 @@ enum mlx5_event {
MLX5_EVENT_TYPE_PORT_CHANGE = 0x09,
MLX5_EVENT_TYPE_GPIO_EVENT = 0x15,
MLX5_EVENT_TYPE_PORT_MODULE_EVENT = 0x16,
+ MLX5_EVENT_TYPE_TEMP_WARN_EVENT = 0x17,
MLX5_EVENT_TYPE_REMOTE_CONFIG = 0x19,
MLX5_EVENT_TYPE_GENERAL_EVENT = 0x22,
MLX5_EVENT_TYPE_PPS_EVENT = 0x25,
@@ -330,6 +331,7 @@ enum mlx5_event {
MLX5_EVENT_TYPE_DCT_DRAINED = 0x1c,
MLX5_EVENT_TYPE_FPGA_ERROR = 0x20,
+ MLX5_EVENT_TYPE_FPGA_QP_ERROR = 0x21,
};
enum {
@@ -626,6 +628,11 @@ struct mlx5_eqe_dct {
__be32 dctn;
};
+struct mlx5_eqe_temp_warning {
+ __be64 sensor_warning_msb;
+ __be64 sensor_warning_lsb;
+} __packed;
+
union ev_data {
__be32 raw[7];
struct mlx5_eqe_cmd cmd;
@@ -642,6 +649,7 @@ union ev_data {
struct mlx5_eqe_port_module port_module;
struct mlx5_eqe_pps pps;
struct mlx5_eqe_dct dct;
+ struct mlx5_eqe_temp_warning temp_warning;
} __packed;
struct mlx5_eqe {
@@ -1164,6 +1172,9 @@ enum mlx5_qcam_feature_groups {
#define MLX5_CAP_PCAM_FEATURE(mdev, fld) \
MLX5_GET(pcam_reg, (mdev)->caps.pcam, feature_cap_mask.enhanced_features.fld)
+#define MLX5_CAP_PCAM_REG(mdev, reg) \
+ MLX5_GET(pcam_reg, (mdev)->caps.pcam, port_access_reg_cap_mask.regs_5000_to_507f.reg)
+
#define MLX5_CAP_MCAM_REG(mdev, reg) \
MLX5_GET(mcam_reg, (mdev)->caps.mcam, mng_access_reg_cap_mask.access_regs.reg)
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 767d193c269a..80cbb7fdce4a 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -124,6 +124,8 @@ enum {
MLX5_REG_PAOS = 0x5006,
MLX5_REG_PFCC = 0x5007,
MLX5_REG_PPCNT = 0x5008,
+ MLX5_REG_PPTB = 0x500b,
+ MLX5_REG_PBMC = 0x500c,
MLX5_REG_PMAOS = 0x5012,
MLX5_REG_PUDE = 0x5009,
MLX5_REG_PMPE = 0x5010,
@@ -981,16 +983,24 @@ static inline u32 mlx5_base_mkey(const u32 key)
return key & 0xffffff00u;
}
-static inline void mlx5_core_init_cq_frag_buf(struct mlx5_frag_buf_ctrl *fbc,
- void *cqc)
+static inline void mlx5_fill_fbc(u8 log_stride, u8 log_sz,
+ struct mlx5_frag_buf_ctrl *fbc)
{
- fbc->log_stride = 6 + MLX5_GET(cqc, cqc, cqe_sz);
- fbc->log_sz = MLX5_GET(cqc, cqc, log_cq_size);
+ fbc->log_stride = log_stride;
+ fbc->log_sz = log_sz;
fbc->sz_m1 = (1 << fbc->log_sz) - 1;
fbc->log_frag_strides = PAGE_SHIFT - fbc->log_stride;
fbc->frag_sz_m1 = (1 << fbc->log_frag_strides) - 1;
}
+static inline void mlx5_core_init_cq_frag_buf(struct mlx5_frag_buf_ctrl *fbc,
+ void *cqc)
+{
+ mlx5_fill_fbc(6 + MLX5_GET(cqc, cqc, cqe_sz),
+ MLX5_GET(cqc, cqc, log_cq_size),
+ fbc);
+}
+
static inline void *mlx5_frag_buf_get_wqe(struct mlx5_frag_buf_ctrl *fbc,
u32 ix)
{
@@ -1284,25 +1294,9 @@ enum {
};
static inline const struct cpumask *
-mlx5_get_vector_affinity(struct mlx5_core_dev *dev, int vector)
+mlx5_get_vector_affinity_hint(struct mlx5_core_dev *dev, int vector)
{
- const struct cpumask *mask;
- struct irq_desc *desc;
- unsigned int irq;
- int eqn;
- int err;
-
- err = mlx5_vector2eqn(dev, MLX5_EQ_VEC_COMP_BASE + vector, &eqn, &irq);
- if (err)
- return NULL;
-
- desc = irq_to_desc(irq);
-#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
- mask = irq_data_get_effective_affinity_mask(&desc->irq_data);
-#else
- mask = desc->irq_common_data.affinity;
-#endif
- return mask;
+ return dev->priv.irq_info[vector].mask;
}
#endif /* MLX5_DRIVER_H */
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 16876fe1710b..27134c4fcb76 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -60,6 +60,7 @@ enum {
MLX5_EVENT_TYPE_CODING_COMMAND_INTERFACE_COMPLETION = 0xa,
MLX5_EVENT_TYPE_CODING_PAGE_REQUEST = 0xb,
MLX5_EVENT_TYPE_CODING_FPGA_ERROR = 0x20,
+ MLX5_EVENT_TYPE_CODING_FPGA_QP_ERROR = 0x21
};
enum {
@@ -362,22 +363,6 @@ struct mlx5_ifc_odp_per_transport_service_cap_bits {
u8 reserved_at_6[0x1a];
};
-struct mlx5_ifc_ipv4_layout_bits {
- u8 reserved_at_0[0x60];
-
- u8 ipv4[0x20];
-};
-
-struct mlx5_ifc_ipv6_layout_bits {
- u8 ipv6[16][0x8];
-};
-
-union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits {
- struct mlx5_ifc_ipv6_layout_bits ipv6_layout;
- struct mlx5_ifc_ipv4_layout_bits ipv4_layout;
- u8 reserved_at_0[0x80];
-};
-
struct mlx5_ifc_fte_match_set_lyr_2_4_bits {
u8 smac_47_16[0x20];
@@ -569,7 +554,9 @@ struct mlx5_ifc_flow_table_nic_cap_bits {
};
struct mlx5_ifc_flow_table_eswitch_cap_bits {
- u8 reserved_at_0[0x200];
+ u8 reserved_at_0[0x1c];
+ u8 fdb_multi_path_to_table[0x1];
+ u8 reserved_at_1d[0x1e3];
struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_esw_fdb;
@@ -955,7 +942,7 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 log_max_msg[0x5];
u8 reserved_at_1c8[0x4];
u8 max_tc[0x4];
- u8 reserved_at_1d0[0x1];
+ u8 temp_warn_event[0x1];
u8 dcbx[0x1];
u8 general_notification_event[0x1];
u8 reserved_at_1d3[0x2];
@@ -8055,6 +8042,17 @@ struct mlx5_ifc_pcam_enhanced_features_bits {
u8 ppcnt_statistical_group[0x1];
};
+struct mlx5_ifc_pcam_regs_5000_to_507f_bits {
+ u8 port_access_reg_cap_mask_127_to_96[0x20];
+ u8 port_access_reg_cap_mask_95_to_64[0x20];
+ u8 port_access_reg_cap_mask_63_to_32[0x20];
+
+ u8 port_access_reg_cap_mask_31_to_13[0x13];
+ u8 pbmc[0x1];
+ u8 pptb[0x1];
+ u8 port_access_reg_cap_mask_10_to_0[0xb];
+};
+
struct mlx5_ifc_pcam_reg_bits {
u8 reserved_at_0[0x8];
u8 feature_group[0x8];
@@ -8064,6 +8062,7 @@ struct mlx5_ifc_pcam_reg_bits {
u8 reserved_at_20[0x20];
union {
+ struct mlx5_ifc_pcam_regs_5000_to_507f_bits regs_5000_to_507f;
u8 reserved_at_0[0x80];
} port_access_reg_cap_mask;
@@ -8828,6 +8827,41 @@ struct mlx5_ifc_qpts_reg_bits {
u8 trust_state[0x3];
};
+struct mlx5_ifc_pptb_reg_bits {
+ u8 reserved_at_0[0x2];
+ u8 mm[0x2];
+ u8 reserved_at_4[0x4];
+ u8 local_port[0x8];
+ u8 reserved_at_10[0x6];
+ u8 cm[0x1];
+ u8 um[0x1];
+ u8 pm[0x8];
+
+ u8 prio_x_buff[0x20];
+
+ u8 pm_msb[0x8];
+ u8 reserved_at_48[0x10];
+ u8 ctrl_buff[0x4];
+ u8 untagged_buff[0x4];
+};
+
+struct mlx5_ifc_pbmc_reg_bits {
+ u8 reserved_at_0[0x8];
+ u8 local_port[0x8];
+ u8 reserved_at_10[0x10];
+
+ u8 xoff_timer_value[0x10];
+ u8 xoff_refresh[0x10];
+
+ u8 reserved_at_40[0x9];
+ u8 fullness_threshold[0x7];
+ u8 port_buffer_size[0x10];
+
+ struct mlx5_ifc_bufferx_reg_bits buffer[10];
+
+ u8 reserved_at_2e0[0x40];
+};
+
struct mlx5_ifc_qtct_reg_bits {
u8 reserved_at_0[0x8];
u8 port_number[0x8];
diff --git a/include/linux/mlx5/mlx5_ifc_fpga.h b/include/linux/mlx5/mlx5_ifc_fpga.h
index ec052491ba3d..64d0f40d4cc3 100644
--- a/include/linux/mlx5/mlx5_ifc_fpga.h
+++ b/include/linux/mlx5/mlx5_ifc_fpga.h
@@ -32,12 +32,29 @@
#ifndef MLX5_IFC_FPGA_H
#define MLX5_IFC_FPGA_H
+struct mlx5_ifc_ipv4_layout_bits {
+ u8 reserved_at_0[0x60];
+
+ u8 ipv4[0x20];
+};
+
+struct mlx5_ifc_ipv6_layout_bits {
+ u8 ipv6[16][0x8];
+};
+
+union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits {
+ struct mlx5_ifc_ipv6_layout_bits ipv6_layout;
+ struct mlx5_ifc_ipv4_layout_bits ipv4_layout;
+ u8 reserved_at_0[0x80];
+};
+
enum {
MLX5_FPGA_CAP_SANDBOX_VENDOR_ID_MLNX = 0x2c9,
};
enum {
MLX5_FPGA_CAP_SANDBOX_PRODUCT_ID_IPSEC = 0x2,
+ MLX5_FPGA_CAP_SANDBOX_PRODUCT_ID_TLS = 0x3,
};
struct mlx5_ifc_fpga_shell_caps_bits {
@@ -370,6 +387,27 @@ struct mlx5_ifc_fpga_destroy_qp_out_bits {
u8 reserved_at_40[0x40];
};
+struct mlx5_ifc_tls_extended_cap_bits {
+ u8 aes_gcm_128[0x1];
+ u8 aes_gcm_256[0x1];
+ u8 reserved_at_2[0x1e];
+ u8 reserved_at_20[0x20];
+ u8 context_capacity_total[0x20];
+ u8 context_capacity_rx[0x20];
+ u8 context_capacity_tx[0x20];
+ u8 reserved_at_a0[0x10];
+ u8 tls_counter_size[0x10];
+ u8 tls_counters_addr_low[0x20];
+ u8 tls_counters_addr_high[0x20];
+ u8 rx[0x1];
+ u8 tx[0x1];
+ u8 tls_v12[0x1];
+ u8 tls_v13[0x1];
+ u8 lro[0x1];
+ u8 ipv6[0x1];
+ u8 reserved_at_106[0x1a];
+};
+
struct mlx5_ifc_ipsec_extended_cap_bits {
u8 encapsulation[0x20];
@@ -432,6 +470,22 @@ struct mlx5_ifc_ipsec_counters_bits {
u8 dropped_cmd[0x40];
};
+enum {
+ MLX5_FPGA_QP_ERROR_EVENT_SYNDROME_RETRY_COUNTER_EXPIRED = 0x1,
+ MLX5_FPGA_QP_ERROR_EVENT_SYNDROME_RNR_EXPIRED = 0x2,
+};
+
+struct mlx5_ifc_fpga_qp_error_event_bits {
+ u8 reserved_at_0[0x40];
+
+ u8 reserved_at_40[0x18];
+ u8 syndrome[0x8];
+
+ u8 reserved_at_60[0x60];
+
+ u8 reserved_at_c0[0x8];
+ u8 fpga_qpn[0x18];
+};
enum mlx5_ifc_fpga_ipsec_response_syndrome {
MLX5_FPGA_IPSEC_RESPONSE_SUCCESS = 0,
MLX5_FPGA_IPSEC_RESPONSE_ILLEGAL_REQUEST = 1,
@@ -519,4 +573,43 @@ struct mlx5_ifc_fpga_ipsec_sa {
__be16 reserved2;
} __packed;
+enum fpga_tls_cmds {
+ CMD_SETUP_STREAM = 0x1001,
+ CMD_TEARDOWN_STREAM = 0x1002,
+};
+
+#define MLX5_TLS_1_2 (0)
+
+#define MLX5_TLS_ALG_AES_GCM_128 (0)
+#define MLX5_TLS_ALG_AES_GCM_256 (1)
+
+struct mlx5_ifc_tls_cmd_bits {
+ u8 command_type[0x20];
+ u8 ipv6[0x1];
+ u8 direction_sx[0x1];
+ u8 tls_version[0x2];
+ u8 reserved[0x1c];
+ u8 swid[0x20];
+ u8 src_port[0x10];
+ u8 dst_port[0x10];
+ union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits src_ipv4_src_ipv6;
+ union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits dst_ipv4_dst_ipv6;
+ u8 tls_rcd_sn[0x40];
+ u8 tcp_sn[0x20];
+ u8 tls_implicit_iv[0x20];
+ u8 tls_xor_iv[0x40];
+ u8 encryption_key[0x100];
+ u8 alg[4];
+ u8 reserved2[0x1c];
+ u8 reserved3[0x4a0];
+};
+
+struct mlx5_ifc_tls_resp_bits {
+ u8 syndrome[0x20];
+ u8 stream_id[0x20];
+ u8 reserverd[0x40];
+};
+
+#define MLX5_TLS_COMMAND_SIZE (0x100)
+
#endif /* MLX5_IFC_FPGA_H */
diff --git a/include/linux/mm.h b/include/linux/mm.h
index edf44265c752..a0fbb9ffe380 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -25,6 +25,7 @@
#include <linux/err.h>
#include <linux/page_ref.h>
#include <linux/memremap.h>
+#include <linux/overflow.h>
struct mempolicy;
struct anon_vma;
@@ -228,15 +229,21 @@ extern unsigned int kobjsize(const void *objp);
#define VM_HIGH_ARCH_4 BIT(VM_HIGH_ARCH_BIT_4)
#endif /* CONFIG_ARCH_USES_HIGH_VMA_FLAGS */
-#if defined(CONFIG_X86)
-# define VM_PAT VM_ARCH_1 /* PAT reserves whole VMA at once (x86) */
-#if defined (CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS)
+#ifdef CONFIG_ARCH_HAS_PKEYS
# define VM_PKEY_SHIFT VM_HIGH_ARCH_BIT_0
# define VM_PKEY_BIT0 VM_HIGH_ARCH_0 /* A protection key is a 4-bit value */
-# define VM_PKEY_BIT1 VM_HIGH_ARCH_1
+# define VM_PKEY_BIT1 VM_HIGH_ARCH_1 /* on x86 and 5-bit value on ppc64 */
# define VM_PKEY_BIT2 VM_HIGH_ARCH_2
# define VM_PKEY_BIT3 VM_HIGH_ARCH_3
+#ifdef CONFIG_PPC
+# define VM_PKEY_BIT4 VM_HIGH_ARCH_4
+#else
+# define VM_PKEY_BIT4 0
#endif
+#endif /* CONFIG_ARCH_HAS_PKEYS */
+
+#if defined(CONFIG_X86)
+# define VM_PAT VM_ARCH_1 /* PAT reserves whole VMA at once (x86) */
#elif defined(CONFIG_PPC)
# define VM_SAO VM_ARCH_1 /* Strong Access Ordering (powerpc) */
#elif defined(CONFIG_PARISC)
@@ -560,10 +567,17 @@ static inline void *kvzalloc(size_t size, gfp_t flags)
static inline void *kvmalloc_array(size_t n, size_t size, gfp_t flags)
{
- if (size != 0 && n > SIZE_MAX / size)
+ size_t bytes;
+
+ if (unlikely(check_mul_overflow(n, size, &bytes)))
return NULL;
- return kvmalloc(n * size, flags);
+ return kvmalloc(bytes, flags);
+}
+
+static inline void *kvcalloc(size_t n, size_t size, gfp_t flags)
+{
+ return kvmalloc_array(n, size, flags | __GFP_ZERO);
}
extern void kvfree(const void *addr);
@@ -821,27 +835,65 @@ static inline bool is_zone_device_page(const struct page *page)
}
#endif
-#if defined(CONFIG_DEVICE_PRIVATE) || defined(CONFIG_DEVICE_PUBLIC)
-void put_zone_device_private_or_public_page(struct page *page);
-DECLARE_STATIC_KEY_FALSE(device_private_key);
-#define IS_HMM_ENABLED static_branch_unlikely(&device_private_key)
-static inline bool is_device_private_page(const struct page *page);
-static inline bool is_device_public_page(const struct page *page);
-#else /* CONFIG_DEVICE_PRIVATE || CONFIG_DEVICE_PUBLIC */
-static inline void put_zone_device_private_or_public_page(struct page *page)
+#ifdef CONFIG_DEV_PAGEMAP_OPS
+void dev_pagemap_get_ops(void);
+void dev_pagemap_put_ops(void);
+void __put_devmap_managed_page(struct page *page);
+DECLARE_STATIC_KEY_FALSE(devmap_managed_key);
+static inline bool put_devmap_managed_page(struct page *page)
{
+ if (!static_branch_unlikely(&devmap_managed_key))
+ return false;
+ if (!is_zone_device_page(page))
+ return false;
+ switch (page->pgmap->type) {
+ case MEMORY_DEVICE_PRIVATE:
+ case MEMORY_DEVICE_PUBLIC:
+ case MEMORY_DEVICE_FS_DAX:
+ __put_devmap_managed_page(page);
+ return true;
+ default:
+ break;
+ }
+ return false;
}
-#define IS_HMM_ENABLED 0
+
static inline bool is_device_private_page(const struct page *page)
{
- return false;
+ return is_zone_device_page(page) &&
+ page->pgmap->type == MEMORY_DEVICE_PRIVATE;
}
+
static inline bool is_device_public_page(const struct page *page)
{
+ return is_zone_device_page(page) &&
+ page->pgmap->type == MEMORY_DEVICE_PUBLIC;
+}
+
+#else /* CONFIG_DEV_PAGEMAP_OPS */
+static inline void dev_pagemap_get_ops(void)
+{
+}
+
+static inline void dev_pagemap_put_ops(void)
+{
+}
+
+static inline bool put_devmap_managed_page(struct page *page)
+{
return false;
}
-#endif /* CONFIG_DEVICE_PRIVATE || CONFIG_DEVICE_PUBLIC */
+static inline bool is_device_private_page(const struct page *page)
+{
+ return false;
+}
+
+static inline bool is_device_public_page(const struct page *page)
+{
+ return false;
+}
+#endif /* CONFIG_DEV_PAGEMAP_OPS */
static inline void get_page(struct page *page)
{
@@ -859,16 +911,13 @@ static inline void put_page(struct page *page)
page = compound_head(page);
/*
- * For private device pages we need to catch refcount transition from
- * 2 to 1, when refcount reach one it means the private device page is
- * free and we need to inform the device driver through callback. See
+ * For devmap managed pages we need to catch refcount transition from
+ * 2 to 1, when refcount reach one it means the page is free and we
+ * need to inform the device driver through callback. See
* include/linux/memremap.h and HMM for details.
*/
- if (IS_HMM_ENABLED && unlikely(is_device_private_page(page) ||
- unlikely(is_device_public_page(page)))) {
- put_zone_device_private_or_public_page(page);
+ if (put_devmap_managed_page(page))
return;
- }
if (put_page_testzero(page))
__put_page(page);
@@ -1842,6 +1891,7 @@ static inline bool pgtable_page_ctor(struct page *page)
{
if (!ptlock_init(page))
return false;
+ __SetPageTable(page);
inc_zone_page_state(page, NR_PAGETABLE);
return true;
}
@@ -1849,6 +1899,7 @@ static inline bool pgtable_page_ctor(struct page *page)
static inline void pgtable_page_dtor(struct page *page)
{
pte_lock_deinit(page);
+ __ClearPageTable(page);
dec_zone_page_state(page, NR_PAGETABLE);
}
@@ -2109,7 +2160,6 @@ extern void setup_per_cpu_pageset(void);
extern void zone_pcp_update(struct zone *zone);
extern void zone_pcp_reset(struct zone *zone);
-extern void setup_zone_pageset(struct zone *zone);
/* page_alloc.c */
extern int min_free_kbytes;
@@ -2295,10 +2345,10 @@ extern void truncate_inode_pages_range(struct address_space *,
extern void truncate_inode_pages_final(struct address_space *);
/* generic vm_area_ops exported for stackable file systems */
-extern int filemap_fault(struct vm_fault *vmf);
+extern vm_fault_t filemap_fault(struct vm_fault *vmf);
extern void filemap_map_pages(struct vm_fault *vmf,
pgoff_t start_pgoff, pgoff_t end_pgoff);
-extern int filemap_page_mkwrite(struct vm_fault *vmf);
+extern vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf);
/* mm/page-writeback.c */
int __must_check write_one_page(struct page *page);
@@ -2423,8 +2473,8 @@ int vm_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn, pgprot_t pgprot);
int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
pfn_t pfn);
-int vm_insert_mixed_mkwrite(struct vm_area_struct *vma, unsigned long addr,
- pfn_t pfn);
+vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma,
+ unsigned long addr, pfn_t pfn);
int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len);
static inline vm_fault_t vmf_insert_page(struct vm_area_struct *vma,
@@ -2466,6 +2516,13 @@ static inline vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma,
return VM_FAULT_NOPAGE;
}
+static inline vm_fault_t vmf_error(int err)
+{
+ if (err == -ENOMEM)
+ return VM_FAULT_OOM;
+ return VM_FAULT_SIGBUS;
+}
+
struct page *follow_page_mask(struct vm_area_struct *vma,
unsigned long address, unsigned int foll_flags,
unsigned int *page_mask);
@@ -2493,6 +2550,7 @@ static inline struct page *follow_page(struct vm_area_struct *vma,
#define FOLL_MLOCK 0x1000 /* lock present pages */
#define FOLL_REMOTE 0x2000 /* we are working on non-current tsk/mm */
#define FOLL_COW 0x4000 /* internal GUP flag */
+#define FOLL_ANON 0x8000 /* don't do file mappings */
static inline int vm_fault_to_errno(int vm_fault, int foll_flags)
{
@@ -2514,12 +2572,10 @@ extern int apply_to_page_range(struct mm_struct *mm, unsigned long address,
#ifdef CONFIG_PAGE_POISONING
extern bool page_poisoning_enabled(void);
extern void kernel_poison_pages(struct page *page, int numpages, int enable);
-extern bool page_is_poisoned(struct page *page);
#else
static inline bool page_poisoning_enabled(void) { return false; }
static inline void kernel_poison_pages(struct page *page, int numpages,
int enable) { }
-static inline bool page_is_poisoned(struct page *page) { return false; }
#endif
#ifdef CONFIG_DEBUG_PAGEALLOC
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 21612347d311..99ce070e7dcb 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -33,29 +33,27 @@ struct hmm;
* it to keep track of whatever it is we are using the page for at the
* moment. Note that we have no way to track which tasks are using
* a page, though if it is a pagecache page, rmap structures can tell us
- * who is mapping it. If you allocate the page using alloc_pages(), you
- * can use some of the space in struct page for your own purposes.
+ * who is mapping it.
*
- * Pages that were once in the page cache may be found under the RCU lock
- * even after they have been recycled to a different purpose. The page
- * cache reads and writes some of the fields in struct page to pin the
- * page before checking that it's still in the page cache. It is vital
- * that all users of struct page:
- * 1. Use the first word as PageFlags.
- * 2. Clear or preserve bit 0 of page->compound_head. It is used as
- * PageTail for compound pages, and the page cache must not see false
- * positives. Some users put a pointer here (guaranteed to be at least
- * 4-byte aligned), other users avoid using the field altogether.
- * 3. page->_refcount must either not be used, or must be used in such a
- * way that other CPUs temporarily incrementing and then decrementing the
- * refcount does not cause problems. On receiving the page from
- * alloc_pages(), the refcount will be positive.
- * 4. Either preserve page->_mapcount or restore it to -1 before freeing it.
+ * If you allocate the page using alloc_pages(), you can use some of the
+ * space in struct page for your own purposes. The five words in the main
+ * union are available, except for bit 0 of the first word which must be
+ * kept clear. Many users use this word to store a pointer to an object
+ * which is guaranteed to be aligned. If you use the same storage as
+ * page->mapping, you must restore it to NULL before freeing the page.
*
- * If you allocate pages of order > 0, you can use the fields in the struct
- * page associated with each page, but bear in mind that the pages may have
- * been inserted individually into the page cache, so you must use the above
- * four fields in a compatible way for each struct page.
+ * If your page will not be mapped to userspace, you can also use the four
+ * bytes in the mapcount union, but you must call page_mapcount_reset()
+ * before freeing it.
+ *
+ * If you want to use the refcount field, it must be used in such a way
+ * that other CPUs temporarily incrementing and then decrementing the
+ * refcount does not cause problems. On receiving the page from
+ * alloc_pages(), the refcount will be positive.
+ *
+ * If you allocate pages of order > 0, you can use some of the fields
+ * in each subpage, but you may need to restore some of their values
+ * afterwards.
*
* SLUB uses cmpxchg_double() to atomically update its freelist and
* counters. That requires that freelist & counters be adjacent and
@@ -65,135 +63,122 @@ struct hmm;
*/
#ifdef CONFIG_HAVE_ALIGNED_STRUCT_PAGE
#define _struct_page_alignment __aligned(2 * sizeof(unsigned long))
-#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE)
-#define _slub_counter_t unsigned long
#else
-#define _slub_counter_t unsigned int
-#endif
-#else /* !CONFIG_HAVE_ALIGNED_STRUCT_PAGE */
#define _struct_page_alignment
-#define _slub_counter_t unsigned int
-#endif /* !CONFIG_HAVE_ALIGNED_STRUCT_PAGE */
+#endif
struct page {
- /* First double word block */
unsigned long flags; /* Atomic flags, some possibly
* updated asynchronously */
- union {
- /* See page-flags.h for the definition of PAGE_MAPPING_FLAGS */
- struct address_space *mapping;
-
- void *s_mem; /* slab first object */
- atomic_t compound_mapcount; /* first tail page */
- /* page_deferred_list().next -- second tail page */
- };
-
- /* Second double word */
- union {
- pgoff_t index; /* Our offset within mapping. */
- void *freelist; /* sl[aou]b first free object */
- /* page_deferred_list().prev -- second tail page */
- };
-
- union {
- _slub_counter_t counters;
- unsigned int active; /* SLAB */
- struct { /* SLUB */
- unsigned inuse:16;
- unsigned objects:15;
- unsigned frozen:1;
- };
- int units; /* SLOB */
-
- struct { /* Page cache */
- /*
- * Count of ptes mapped in mms, to show when
- * page is mapped & limit reverse map searches.
- *
- * Extra information about page type may be
- * stored here for pages that are never mapped,
- * in which case the value MUST BE <= -2.
- * See page-flags.h for more details.
- */
- atomic_t _mapcount;
-
- /*
- * Usage count, *USE WRAPPER FUNCTION* when manual
- * accounting. See page_ref.h
- */
- atomic_t _refcount;
- };
- };
-
/*
- * WARNING: bit 0 of the first word encode PageTail(). That means
- * the rest users of the storage space MUST NOT use the bit to
+ * Five words (20/40 bytes) are available in this union.
+ * WARNING: bit 0 of the first word is used for PageTail(). That
+ * means the other users of this union MUST NOT use the bit to
* avoid collision and false-positive PageTail().
*/
union {
- struct list_head lru; /* Pageout list, eg. active_list
- * protected by zone_lru_lock !
- * Can be used as a generic list
- * by the page owner.
- */
- struct dev_pagemap *pgmap; /* ZONE_DEVICE pages are never on an
- * lru or handled by a slab
- * allocator, this points to the
- * hosting device page map.
- */
- struct { /* slub per cpu partial pages */
- struct page *next; /* Next partial slab */
+ struct { /* Page cache and anonymous pages */
+ /**
+ * @lru: Pageout list, eg. active_list protected by
+ * zone_lru_lock. Sometimes used as a generic list
+ * by the page owner.
+ */
+ struct list_head lru;
+ /* See page-flags.h for PAGE_MAPPING_FLAGS */
+ struct address_space *mapping;
+ pgoff_t index; /* Our offset within mapping. */
+ /**
+ * @private: Mapping-private opaque data.
+ * Usually used for buffer_heads if PagePrivate.
+ * Used for swp_entry_t if PageSwapCache.
+ * Indicates order in the buddy system if PageBuddy.
+ */
+ unsigned long private;
+ };
+ struct { /* slab, slob and slub */
+ union {
+ struct list_head slab_list; /* uses lru */
+ struct { /* Partial pages */
+ struct page *next;
#ifdef CONFIG_64BIT
- int pages; /* Nr of partial slabs left */
- int pobjects; /* Approximate # of objects */
+ int pages; /* Nr of pages left */
+ int pobjects; /* Approximate count */
#else
- short int pages;
- short int pobjects;
+ short int pages;
+ short int pobjects;
#endif
+ };
+ };
+ struct kmem_cache *slab_cache; /* not slob */
+ /* Double-word boundary */
+ void *freelist; /* first free object */
+ union {
+ void *s_mem; /* slab: first object */
+ unsigned long counters; /* SLUB */
+ struct { /* SLUB */
+ unsigned inuse:16;
+ unsigned objects:15;
+ unsigned frozen:1;
+ };
+ };
};
-
- struct rcu_head rcu_head; /* Used by SLAB
- * when destroying via RCU
- */
- /* Tail pages of compound page */
- struct {
- unsigned long compound_head; /* If bit zero is set */
+ struct { /* Tail pages of compound page */
+ unsigned long compound_head; /* Bit zero is set */
/* First tail page only */
unsigned char compound_dtor;
unsigned char compound_order;
- /* two/six bytes available here */
+ atomic_t compound_mapcount;
};
-
-#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && USE_SPLIT_PMD_PTLOCKS
- struct {
- unsigned long __pad; /* do not overlay pmd_huge_pte
- * with compound_head to avoid
- * possible bit 0 collision.
- */
- pgtable_t pmd_huge_pte; /* protected by page->ptl */
+ struct { /* Second tail page of compound page */
+ unsigned long _compound_pad_1; /* compound_head */
+ unsigned long _compound_pad_2;
+ struct list_head deferred_list;
};
+ struct { /* Page table pages */
+ unsigned long _pt_pad_1; /* compound_head */
+ pgtable_t pmd_huge_pte; /* protected by page->ptl */
+ unsigned long _pt_pad_2; /* mapping */
+ struct mm_struct *pt_mm; /* x86 pgds only */
+#if ALLOC_SPLIT_PTLOCKS
+ spinlock_t *ptl;
+#else
+ spinlock_t ptl;
#endif
+ };
+ struct { /* ZONE_DEVICE pages */
+ /** @pgmap: Points to the hosting device page map. */
+ struct dev_pagemap *pgmap;
+ unsigned long hmm_data;
+ unsigned long _zd_pad_1; /* uses mapping */
+ };
+
+ /** @rcu_head: You can use this to free a page by RCU. */
+ struct rcu_head rcu_head;
};
- union {
+ union { /* This union is 4 bytes in size. */
/*
- * Mapping-private opaque data:
- * Usually used for buffer_heads if PagePrivate
- * Used for swp_entry_t if PageSwapCache
- * Indicates order in the buddy system if PageBuddy
+ * If the page can be mapped to userspace, encodes the number
+ * of times this page is referenced by a page table.
*/
- unsigned long private;
-#if USE_SPLIT_PTE_PTLOCKS
-#if ALLOC_SPLIT_PTLOCKS
- spinlock_t *ptl;
-#else
- spinlock_t ptl;
-#endif
-#endif
- struct kmem_cache *slab_cache; /* SL[AU]B: Pointer to slab */
+ atomic_t _mapcount;
+
+ /*
+ * If the page is neither PageSlab nor mappable to userspace,
+ * the value stored here may help determine what this page
+ * is used for. See page-flags.h for a list of page types
+ * which are currently stored here.
+ */
+ unsigned int page_type;
+
+ unsigned int active; /* SLAB */
+ int units; /* SLOB */
};
+ /* Usage count. *DO NOT USE DIRECTLY*. See page_ref.h */
+ atomic_t _refcount;
+
#ifdef CONFIG_MEMCG
struct mem_cgroup *mem_cgroup;
#endif
@@ -413,6 +398,8 @@ struct mm_struct {
unsigned long exec_vm; /* VM_EXEC & ~VM_WRITE & ~VM_STACK */
unsigned long stack_vm; /* VM_STACK */
unsigned long def_flags;
+
+ spinlock_t arg_lock; /* protect the below fields */
unsigned long start_code, end_code, start_data, end_data;
unsigned long start_brk, brk, start_stack;
unsigned long arg_start, arg_end, env_start, env_end;
@@ -627,9 +614,9 @@ struct vm_special_mapping {
* If non-NULL, then this is called to resolve page faults
* on the special mapping. If used, .pages is not checked.
*/
- int (*fault)(const struct vm_special_mapping *sm,
- struct vm_area_struct *vma,
- struct vm_fault *vmf);
+ vm_fault_t (*fault)(const struct vm_special_mapping *sm,
+ struct vm_area_struct *vma,
+ struct vm_fault *vmf);
int (*mremap)(const struct vm_special_mapping *sm,
struct vm_area_struct *new_vma);
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index 279b39008a33..de7377815b6b 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -156,6 +156,7 @@ struct sd_switch_caps {
#define UHS_DDR50_MAX_DTR 50000000
#define UHS_SDR25_MAX_DTR UHS_DDR50_MAX_DTR
#define UHS_SDR12_MAX_DTR 25000000
+#define DEFAULT_SPEED_MAX_DTR UHS_SDR12_MAX_DTR
unsigned int sd3_bus_mode;
#define UHS_SDR12_BUS_SPEED 0
#define HIGH_SPEED_BUS_SPEED 1
@@ -252,6 +253,7 @@ struct mmc_card {
#define MMC_TYPE_SD_COMBO 3 /* SD combo (IO+mem) card */
unsigned int state; /* (our) card state */
unsigned int quirks; /* card quirks */
+ unsigned int quirk_max_rate; /* max rate set by quirks */
#define MMC_QUIRK_LENIENT_FN0 (1<<0) /* allow SDIO FN0 writes outside of the VS CCCR range */
#define MMC_QUIRK_BLKSZ_FOR_BYTE_MODE (1<<1) /* use func->cur_blksize */
/* for byte mode */
diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h
index 927519385482..134a6483347a 100644
--- a/include/linux/mmc/core.h
+++ b/include/linux/mmc/core.h
@@ -177,6 +177,7 @@ int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd,
int retries);
int mmc_hw_reset(struct mmc_host *host);
+int mmc_sw_reset(struct mmc_host *host);
void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card);
#endif /* LINUX_MMC_CORE_H */
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 85146235231e..64300a48dcce 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -22,6 +22,7 @@
struct mmc_ios {
unsigned int clock; /* clock rate */
unsigned short vdd;
+ unsigned int power_delay_ms; /* waiting for stable power */
/* vdd stores the bit number of the selected voltage range from below. */
@@ -320,6 +321,9 @@ struct mmc_host {
#define MMC_CAP_UHS_SDR50 (1 << 18) /* Host supports UHS SDR50 mode */
#define MMC_CAP_UHS_SDR104 (1 << 19) /* Host supports UHS SDR104 mode */
#define MMC_CAP_UHS_DDR50 (1 << 20) /* Host supports UHS DDR50 mode */
+#define MMC_CAP_UHS (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | \
+ MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | \
+ MMC_CAP_UHS_DDR50)
/* (1 << 21) is free for reuse */
#define MMC_CAP_DRIVER_TYPE_A (1 << 23) /* Host supports Driver Type A */
#define MMC_CAP_DRIVER_TYPE_C (1 << 24) /* Host supports Driver Type C */
@@ -345,6 +349,7 @@ struct mmc_host {
#define MMC_CAP2_HS400_1_2V (1 << 16) /* Can support HS400 1.2V */
#define MMC_CAP2_HS400 (MMC_CAP2_HS400_1_8V | \
MMC_CAP2_HS400_1_2V)
+#define MMC_CAP2_HSX00_1_8V (MMC_CAP2_HS200_1_8V_SDR | MMC_CAP2_HS400_1_8V)
#define MMC_CAP2_HSX00_1_2V (MMC_CAP2_HS200_1_2V_SDR | MMC_CAP2_HS400_1_2V)
#define MMC_CAP2_SDIO_IRQ_NOTHREAD (1 << 17)
#define MMC_CAP2_NO_WRITE_PROTECT (1 << 18) /* No physical write protect pin, assume that card is always read-write */
@@ -354,6 +359,7 @@ struct mmc_host {
#define MMC_CAP2_NO_MMC (1 << 22) /* Do not send (e)MMC commands during initialization */
#define MMC_CAP2_CQE (1 << 23) /* Has eMMC command queue engine */
#define MMC_CAP2_CQE_DCMD (1 << 24) /* CQE can issue a direct command */
+#define MMC_CAP2_AVOID_3_3V (1 << 25) /* Host must negotiate down from 3.3V */
int fixed_drv_type; /* fixed driver type for non-removable media */
diff --git a/include/linux/mmc/sdio_ids.h b/include/linux/mmc/sdio_ids.h
index cdd66a5fbd5e..4224902a8e22 100644
--- a/include/linux/mmc/sdio_ids.h
+++ b/include/linux/mmc/sdio_ids.h
@@ -35,6 +35,7 @@
#define SDIO_DEVICE_ID_BROADCOM_4335_4339 0x4335
#define SDIO_DEVICE_ID_BROADCOM_4339 0x4339
#define SDIO_DEVICE_ID_BROADCOM_43362 0xa962
+#define SDIO_DEVICE_ID_BROADCOM_43364 0xa9a4
#define SDIO_DEVICE_ID_BROADCOM_43430 0xa9a6
#define SDIO_DEVICE_ID_BROADCOM_4345 0x4345
#define SDIO_DEVICE_ID_BROADCOM_43455 0xa9bf
@@ -55,6 +56,7 @@
#define SDIO_DEVICE_ID_MARVELL_8688WLAN 0x9104
#define SDIO_DEVICE_ID_MARVELL_8688BT 0x9105
#define SDIO_DEVICE_ID_MARVELL_8797_F0 0x9128
+#define SDIO_DEVICE_ID_MARVELL_8887WLAN 0x9134
#define SDIO_VENDOR_ID_SIANO 0x039a
#define SDIO_DEVICE_ID_SIANO_NOVA_B0 0x0201
diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
index 2d07a1ed5a31..392e6af82701 100644
--- a/include/linux/mmu_notifier.h
+++ b/include/linux/mmu_notifier.h
@@ -174,7 +174,7 @@ struct mmu_notifier_ops {
* invalidate_range_start()/end() notifiers, as
* invalidate_range() alread catches the points in time when an
* external TLB range needs to be flushed. For more in depth
- * discussion on this see Documentation/vm/mmu_notifier.txt
+ * discussion on this see Documentation/vm/mmu_notifier.rst
*
* Note that this function might be called with just a sub-range
* of what was passed to invalidate_range_start()/end(), if
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index 7d361be2e24f..96a71a648eed 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -471,6 +471,17 @@ struct slim_device_id {
kernel_ulong_t driver_data;
};
+#define APR_NAME_SIZE 32
+#define APR_MODULE_PREFIX "apr:"
+
+struct apr_device_id {
+ char name[APR_NAME_SIZE];
+ __u32 domain_id;
+ __u32 svc_id;
+ __u32 svc_version;
+ kernel_ulong_t driver_data; /* Data private to the driver */
+};
+
#define SPMI_NAME_SIZE 32
#define SPMI_MODULE_PREFIX "spmi:"
@@ -490,6 +501,7 @@ enum dmi_field {
DMI_PRODUCT_VERSION,
DMI_PRODUCT_SERIAL,
DMI_PRODUCT_UUID,
+ DMI_PRODUCT_SKU,
DMI_PRODUCT_FAMILY,
DMI_BOARD_VENDOR,
DMI_BOARD_NAME,
diff --git a/include/linux/mpi.h b/include/linux/mpi.h
index 1cc5ffb769af..7cd1473c64a4 100644
--- a/include/linux/mpi.h
+++ b/include/linux/mpi.h
@@ -53,93 +53,32 @@ struct gcry_mpi {
typedef struct gcry_mpi *MPI;
#define mpi_get_nlimbs(a) ((a)->nlimbs)
-#define mpi_is_neg(a) ((a)->sign)
/*-- mpiutil.c --*/
MPI mpi_alloc(unsigned nlimbs);
-MPI mpi_alloc_secure(unsigned nlimbs);
-MPI mpi_alloc_like(MPI a);
void mpi_free(MPI a);
int mpi_resize(MPI a, unsigned nlimbs);
-int mpi_copy(MPI *copy, const MPI a);
-void mpi_clear(MPI a);
-int mpi_set(MPI w, MPI u);
-int mpi_set_ui(MPI w, ulong u);
-MPI mpi_alloc_set_ui(unsigned long u);
-void mpi_m_check(MPI a);
-void mpi_swap(MPI a, MPI b);
/*-- mpicoder.c --*/
-MPI do_encode_md(const void *sha_buffer, unsigned nbits);
MPI mpi_read_raw_data(const void *xbuffer, size_t nbytes);
MPI mpi_read_from_buffer(const void *buffer, unsigned *ret_nread);
MPI mpi_read_raw_from_sgl(struct scatterlist *sgl, unsigned int len);
-int mpi_fromstr(MPI val, const char *str);
-u32 mpi_get_keyid(MPI a, u32 *keyid);
void *mpi_get_buffer(MPI a, unsigned *nbytes, int *sign);
int mpi_read_buffer(MPI a, uint8_t *buf, unsigned buf_len, unsigned *nbytes,
int *sign);
-void *mpi_get_secure_buffer(MPI a, unsigned *nbytes, int *sign);
int mpi_write_to_sgl(MPI a, struct scatterlist *sg, unsigned nbytes,
int *sign);
-#define log_mpidump g10_log_mpidump
-
-/*-- mpi-add.c --*/
-int mpi_add_ui(MPI w, MPI u, ulong v);
-int mpi_add(MPI w, MPI u, MPI v);
-int mpi_addm(MPI w, MPI u, MPI v, MPI m);
-int mpi_sub_ui(MPI w, MPI u, ulong v);
-int mpi_sub(MPI w, MPI u, MPI v);
-int mpi_subm(MPI w, MPI u, MPI v, MPI m);
-
-/*-- mpi-mul.c --*/
-int mpi_mul_ui(MPI w, MPI u, ulong v);
-int mpi_mul_2exp(MPI w, MPI u, ulong cnt);
-int mpi_mul(MPI w, MPI u, MPI v);
-int mpi_mulm(MPI w, MPI u, MPI v, MPI m);
-
-/*-- mpi-div.c --*/
-ulong mpi_fdiv_r_ui(MPI rem, MPI dividend, ulong divisor);
-int mpi_fdiv_r(MPI rem, MPI dividend, MPI divisor);
-int mpi_fdiv_q(MPI quot, MPI dividend, MPI divisor);
-int mpi_fdiv_qr(MPI quot, MPI rem, MPI dividend, MPI divisor);
-int mpi_tdiv_r(MPI rem, MPI num, MPI den);
-int mpi_tdiv_qr(MPI quot, MPI rem, MPI num, MPI den);
-int mpi_tdiv_q_2exp(MPI w, MPI u, unsigned count);
-int mpi_divisible_ui(const MPI dividend, ulong divisor);
-
-/*-- mpi-gcd.c --*/
-int mpi_gcd(MPI g, const MPI a, const MPI b);
-
/*-- mpi-pow.c --*/
-int mpi_pow(MPI w, MPI u, MPI v);
int mpi_powm(MPI res, MPI base, MPI exp, MPI mod);
-/*-- mpi-mpow.c --*/
-int mpi_mulpowm(MPI res, MPI *basearray, MPI *exparray, MPI mod);
-
/*-- mpi-cmp.c --*/
int mpi_cmp_ui(MPI u, ulong v);
int mpi_cmp(MPI u, MPI v);
-/*-- mpi-scan.c --*/
-int mpi_getbyte(MPI a, unsigned idx);
-void mpi_putbyte(MPI a, unsigned idx, int value);
-unsigned mpi_trailing_zeros(MPI a);
-
/*-- mpi-bit.c --*/
void mpi_normalize(MPI a);
unsigned mpi_get_nbits(MPI a);
-int mpi_test_bit(MPI a, unsigned n);
-int mpi_set_bit(MPI a, unsigned n);
-int mpi_set_highbit(MPI a, unsigned n);
-void mpi_clear_highbit(MPI a, unsigned n);
-void mpi_clear_bit(MPI a, unsigned n);
-int mpi_rshift(MPI x, MPI a, unsigned n);
-
-/*-- mpi-inv.c --*/
-int mpi_invm(MPI x, MPI u, MPI v);
/* inline functions */
diff --git a/include/linux/mroute_base.h b/include/linux/mroute_base.h
index d617fe45543e..d633f737b3c6 100644
--- a/include/linux/mroute_base.h
+++ b/include/linux/mroute_base.h
@@ -307,16 +307,6 @@ static inline void vif_device_init(struct vif_device *v,
{
}
-static inline void *
-mr_table_alloc(struct net *net, u32 id,
- struct mr_table_ops *ops,
- void (*expire_func)(struct timer_list *t),
- void (*table_set)(struct mr_table *mrt,
- struct net *net))
-{
- return NULL;
-}
-
static inline void *mr_mfc_find_parent(struct mr_table *mrt,
void *hasharg, int parent)
{
diff --git a/include/linux/msi.h b/include/linux/msi.h
index 1f1bbb5b4679..5839d8062dfc 100644
--- a/include/linux/msi.h
+++ b/include/linux/msi.h
@@ -289,6 +289,8 @@ enum {
* MSI_FLAG_ACTIVATE_EARLY has been set.
*/
MSI_FLAG_MUST_REACTIVATE = (1 << 5),
+ /* Is level-triggered capable, using two messages */
+ MSI_FLAG_LEVEL_CAPABLE = (1 << 6),
};
int msi_domain_set_affinity(struct irq_data *data, const struct cpumask *mask,
diff --git a/include/linux/mtd/flashchip.h b/include/linux/mtd/flashchip.h
index b63fa457febd..3529683f691e 100644
--- a/include/linux/mtd/flashchip.h
+++ b/include/linux/mtd/flashchip.h
@@ -85,6 +85,7 @@ struct flchip {
unsigned int write_suspended:1;
unsigned int erase_suspended:1;
unsigned long in_progress_block_addr;
+ unsigned long in_progress_block_mask;
struct mutex mutex;
wait_queue_head_t wq; /* Wait on here when we're waiting for the chip
diff --git a/include/linux/mtd/map.h b/include/linux/mtd/map.h
index b5b43f94f311..01b990e4b228 100644
--- a/include/linux/mtd/map.h
+++ b/include/linux/mtd/map.h
@@ -312,7 +312,7 @@ void map_destroy(struct mtd_info *mtd);
({ \
int i, ret = 1; \
for (i = 0; i < map_words(map); i++) { \
- if (((val1).x[i] & (val2).x[i]) != (val2).x[i]) { \
+ if (((val1).x[i] & (val2).x[i]) != (val3).x[i]) { \
ret = 0; \
break; \
} \
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index 792ea5c26329..abe975c87b90 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -86,6 +86,7 @@ struct nand_pos {
* @ooboffs: the OOB offset within the page
* @ooblen: the number of OOB bytes to read from/write to this page
* @oobbuf: buffer to store OOB data in or get OOB data from
+ * @mode: one of the %MTD_OPS_XXX mode
*
* This object is used to pass per-page I/O requests to NAND sub-layers. This
* way all useful information are already formatted in a useful way and
@@ -106,6 +107,7 @@ struct nand_page_io_req {
const void *out;
void *in;
} oobbuf;
+ int mode;
};
/**
@@ -599,6 +601,7 @@ static inline void nanddev_io_iter_init(struct nand_device *nand,
{
struct mtd_info *mtd = nanddev_to_mtd(nand);
+ iter->req.mode = req->mode;
iter->req.dataoffs = nanddev_offs_to_pos(nand, offs, &iter->req.pos);
iter->req.ooboffs = req->ooboffs;
iter->oobbytes_per_page = mtd_oobavail(mtd, req);
diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h
index 5dad59b31244..3e8ec3b8a39c 100644
--- a/include/linux/mtd/rawnand.h
+++ b/include/linux/mtd/rawnand.h
@@ -28,7 +28,14 @@ struct nand_flash_dev;
struct device_node;
/* Scan and identify a NAND device */
-int nand_scan(struct mtd_info *mtd, int max_chips);
+int nand_scan_with_ids(struct mtd_info *mtd, int max_chips,
+ struct nand_flash_dev *ids);
+
+static inline int nand_scan(struct mtd_info *mtd, int max_chips)
+{
+ return nand_scan_with_ids(mtd, max_chips, NULL);
+}
+
/*
* Separate phases of nand_scan(), allowing board driver to intervene
* and override command or ECC setup according to flash type.
@@ -740,8 +747,9 @@ enum nand_data_interface_type {
/**
* struct nand_data_interface - NAND interface timing
- * @type: type of the timing
- * @timings: The timing, type according to @type
+ * @type: type of the timing
+ * @timings: The timing, type according to @type
+ * @timings.sdr: Use it when @type is %NAND_SDR_IFACE.
*/
struct nand_data_interface {
enum nand_data_interface_type type;
@@ -798,8 +806,9 @@ struct nand_op_addr_instr {
/**
* struct nand_op_data_instr - Definition of a data instruction
* @len: number of data bytes to move
- * @in: buffer to fill when reading from the NAND chip
- * @out: buffer to read from when writing to the NAND chip
+ * @buf: buffer to fill
+ * @buf.in: buffer to fill when reading from the NAND chip
+ * @buf.out: buffer to read from when writing to the NAND chip
* @force_8bit: force 8-bit access
*
* Please note that "in" and "out" are inverted from the ONFI specification
@@ -842,9 +851,13 @@ enum nand_op_instr_type {
/**
* struct nand_op_instr - Instruction object
* @type: the instruction type
- * @cmd/@addr/@data/@waitrdy: extra data associated to the instruction.
- * You'll have to use the appropriate element
- * depending on @type
+ * @ctx: extra data associated to the instruction. You'll have to use the
+ * appropriate element depending on @type
+ * @ctx.cmd: use it if @type is %NAND_OP_CMD_INSTR
+ * @ctx.addr: use it if @type is %NAND_OP_ADDR_INSTR
+ * @ctx.data: use it if @type is %NAND_OP_DATA_IN_INSTR
+ * or %NAND_OP_DATA_OUT_INSTR
+ * @ctx.waitrdy: use it if @type is %NAND_OP_WAITRDY_INSTR
* @delay_ns: delay the controller should apply after the instruction has been
* issued on the bus. Most modern controllers have internal timings
* control logic, and in this case, the controller driver can ignore
@@ -867,12 +880,18 @@ struct nand_op_instr {
* tBERS (during an erase) which all of them are u64 values that cannot be
* divided by usual kernel macros and must be handled with the special
* DIV_ROUND_UP_ULL() macro.
+ *
+ * Cast to type of dividend is needed here to guarantee that the result won't
+ * be an unsigned long long when the dividend is an unsigned long (or smaller),
+ * which is what the compiler does when it sees ternary operator with 2
+ * different return types (picks the largest type to make sure there's no
+ * loss).
*/
-#define __DIVIDE(dividend, divisor) ({ \
- sizeof(dividend) == sizeof(u32) ? \
- DIV_ROUND_UP(dividend, divisor) : \
- DIV_ROUND_UP_ULL(dividend, divisor); \
- })
+#define __DIVIDE(dividend, divisor) ({ \
+ (__typeof__(dividend))(sizeof(dividend) <= sizeof(unsigned long) ? \
+ DIV_ROUND_UP(dividend, divisor) : \
+ DIV_ROUND_UP_ULL(dividend, divisor)); \
+ })
#define PSEC_TO_NSEC(x) __DIVIDE(x, 1000)
#define PSEC_TO_MSEC(x) __DIVIDE(x, 1000000000)
@@ -997,7 +1016,9 @@ struct nand_op_parser_data_constraints {
* struct nand_op_parser_pattern_elem - One element of a pattern
* @type: the instructuction type
* @optional: whether this element of the pattern is optional or mandatory
- * @addr/@data: address or data constraint (number of cycles or data length)
+ * @ctx: address or data constraint
+ * @ctx.addr: address constraint (number of cycles)
+ * @ctx.data: data constraint (data length)
*/
struct nand_op_parser_pattern_elem {
enum nand_op_instr_type type;
@@ -1224,6 +1245,8 @@ int nand_op_parser_exec_op(struct nand_chip *chip,
* devices.
* @priv: [OPTIONAL] pointer to private chip data
* @manufacturer: [INTERN] Contains manufacturer information
+ * @manufacturer.desc: [INTERN] Contains manufacturer's description
+ * @manufacturer.priv: [INTERN] Contains manufacturer private information
*/
struct nand_chip {
diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h
index de36969eb359..e60da0d34cc1 100644
--- a/include/linux/mtd/spi-nor.h
+++ b/include/linux/mtd/spi-nor.h
@@ -62,6 +62,8 @@
#define SPINOR_OP_RDCR 0x35 /* Read configuration register */
#define SPINOR_OP_RDFSR 0x70 /* Read flag status register */
#define SPINOR_OP_CLFSR 0x50 /* Clear flag status register */
+#define SPINOR_OP_RDEAR 0xc8 /* Read Extended Address Register */
+#define SPINOR_OP_WREAR 0xc5 /* Write Extended Address Register */
/* 4-byte address opcodes - used on Spansion and some Macronix flashes. */
#define SPINOR_OP_READ_4B 0x13 /* Read data bytes (low frequency) */
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index 14bc0d5d0ee5..3093dd162424 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -146,9 +146,6 @@ extern void __mutex_init(struct mutex *lock, const char *name,
*/
static inline bool mutex_is_locked(struct mutex *lock)
{
- /*
- * XXX think about spin_is_locked
- */
return __mutex_owner(lock) != NULL;
}
diff --git a/include/linux/namei.h b/include/linux/namei.h
index a982bb7cd480..a78606e8e3df 100644
--- a/include/linux/namei.h
+++ b/include/linux/namei.h
@@ -81,6 +81,7 @@ extern void done_path_create(struct path *, struct dentry *);
extern struct dentry *kern_path_locked(const char *, struct path *);
extern int kern_path_mountpoint(int, const char *, struct path *, unsigned int);
+extern struct dentry *try_lookup_one_len(const char *, struct dentry *, int);
extern struct dentry *lookup_one_len(const char *, struct dentry *, int);
extern struct dentry *lookup_one_len_unlocked(const char *, struct dentry *, int);
diff --git a/include/linux/net.h b/include/linux/net.h
index 2248a052061d..08b6eb964dd6 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -147,6 +147,7 @@ struct proto_ops {
int (*getname) (struct socket *sock,
struct sockaddr *addr,
int peer);
+ __poll_t (*poll_mask) (struct socket *sock, __poll_t events);
__poll_t (*poll) (struct file *file, struct socket *sock,
struct poll_table_struct *wait);
int (*ioctl) (struct socket *sock, unsigned int cmd,
@@ -197,6 +198,7 @@ struct proto_ops {
int offset, size_t size, int flags);
int (*sendmsg_locked)(struct sock *sk, struct msghdr *msg,
size_t size);
+ int (*set_rcvlowat)(struct sock *sk, int val);
};
#define DECLARE_SOCKADDR(type, dst, src) \
diff --git a/include/linux/net_dim.h b/include/linux/net_dim.h
index 29ed8fd6379a..db99240d00bd 100644
--- a/include/linux/net_dim.h
+++ b/include/linux/net_dim.h
@@ -103,11 +103,12 @@ enum {
#define NET_DIM_PARAMS_NUM_PROFILES 5
/* Adaptive moderation profiles */
#define NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE 256
+#define NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE 128
#define NET_DIM_DEF_PROFILE_CQE 1
#define NET_DIM_DEF_PROFILE_EQE 1
/* All profiles sizes must be NET_PARAMS_DIM_NUM_PROFILES */
-#define NET_DIM_EQE_PROFILES { \
+#define NET_DIM_RX_EQE_PROFILES { \
{1, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
{8, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
{64, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
@@ -115,7 +116,7 @@ enum {
{256, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
}
-#define NET_DIM_CQE_PROFILES { \
+#define NET_DIM_RX_CQE_PROFILES { \
{2, 256}, \
{8, 128}, \
{16, 64}, \
@@ -123,32 +124,68 @@ enum {
{64, 64} \
}
+#define NET_DIM_TX_EQE_PROFILES { \
+ {1, NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE}, \
+ {8, NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE}, \
+ {32, NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE}, \
+ {64, NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE}, \
+ {128, NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE} \
+}
+
+#define NET_DIM_TX_CQE_PROFILES { \
+ {5, 128}, \
+ {8, 64}, \
+ {16, 32}, \
+ {32, 32}, \
+ {64, 32} \
+}
+
static const struct net_dim_cq_moder
-profile[NET_DIM_CQ_PERIOD_NUM_MODES][NET_DIM_PARAMS_NUM_PROFILES] = {
- NET_DIM_EQE_PROFILES,
- NET_DIM_CQE_PROFILES,
+rx_profile[NET_DIM_CQ_PERIOD_NUM_MODES][NET_DIM_PARAMS_NUM_PROFILES] = {
+ NET_DIM_RX_EQE_PROFILES,
+ NET_DIM_RX_CQE_PROFILES,
};
-static inline struct net_dim_cq_moder net_dim_get_profile(u8 cq_period_mode,
- int ix)
+static const struct net_dim_cq_moder
+tx_profile[NET_DIM_CQ_PERIOD_NUM_MODES][NET_DIM_PARAMS_NUM_PROFILES] = {
+ NET_DIM_TX_EQE_PROFILES,
+ NET_DIM_TX_CQE_PROFILES,
+};
+
+static inline struct net_dim_cq_moder
+net_dim_get_rx_moderation(u8 cq_period_mode, int ix)
{
- struct net_dim_cq_moder cq_moder;
+ struct net_dim_cq_moder cq_moder = rx_profile[cq_period_mode][ix];
- cq_moder = profile[cq_period_mode][ix];
cq_moder.cq_period_mode = cq_period_mode;
return cq_moder;
}
-static inline struct net_dim_cq_moder net_dim_get_def_profile(u8 rx_cq_period_mode)
+static inline struct net_dim_cq_moder
+net_dim_get_def_rx_moderation(u8 cq_period_mode)
+{
+ u8 profile_ix = cq_period_mode == NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE ?
+ NET_DIM_DEF_PROFILE_CQE : NET_DIM_DEF_PROFILE_EQE;
+
+ return net_dim_get_rx_moderation(cq_period_mode, profile_ix);
+}
+
+static inline struct net_dim_cq_moder
+net_dim_get_tx_moderation(u8 cq_period_mode, int ix)
{
- int default_profile_ix;
+ struct net_dim_cq_moder cq_moder = tx_profile[cq_period_mode][ix];
+
+ cq_moder.cq_period_mode = cq_period_mode;
+ return cq_moder;
+}
- if (rx_cq_period_mode == NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE)
- default_profile_ix = NET_DIM_DEF_PROFILE_CQE;
- else /* NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE */
- default_profile_ix = NET_DIM_DEF_PROFILE_EQE;
+static inline struct net_dim_cq_moder
+net_dim_get_def_tx_moderation(u8 cq_period_mode)
+{
+ u8 profile_ix = cq_period_mode == NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE ?
+ NET_DIM_DEF_PROFILE_CQE : NET_DIM_DEF_PROFILE_EQE;
- return net_dim_get_profile(rx_cq_period_mode, default_profile_ix);
+ return net_dim_get_tx_moderation(cq_period_mode, profile_ix);
}
static inline bool net_dim_on_top(struct net_dim *dim)
diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h
index 35b79f47a13d..623bb8ced060 100644
--- a/include/linux/netdev_features.h
+++ b/include/linux/netdev_features.h
@@ -55,8 +55,9 @@ enum {
NETIF_F_GSO_SCTP_BIT, /* ... SCTP fragmentation */
NETIF_F_GSO_ESP_BIT, /* ... ESP with TSO */
NETIF_F_GSO_UDP_BIT, /* ... UFO, deprecated except tuntap */
+ NETIF_F_GSO_UDP_L4_BIT, /* ... UDP payload GSO (not UFO) */
/**/NETIF_F_GSO_LAST = /* last bit, see GSO_MASK */
- NETIF_F_GSO_UDP_BIT,
+ NETIF_F_GSO_UDP_L4_BIT,
NETIF_F_FCOE_CRC_BIT, /* FCoE CRC32 */
NETIF_F_SCTP_CRC_BIT, /* SCTP checksum offload */
@@ -77,6 +78,7 @@ enum {
NETIF_F_HW_ESP_BIT, /* Hardware ESP transformation offload */
NETIF_F_HW_ESP_TX_CSUM_BIT, /* ESP with TX checksum offload */
NETIF_F_RX_UDP_TUNNEL_PORT_BIT, /* Offload of RX port for UDP tunnels */
+ NETIF_F_HW_TLS_TX_BIT, /* Hardware TLS TX offload */
NETIF_F_GRO_HW_BIT, /* Hardware Generic receive offload */
NETIF_F_HW_TLS_RECORD_BIT, /* Offload TLS record */
@@ -147,6 +149,8 @@ enum {
#define NETIF_F_HW_ESP_TX_CSUM __NETIF_F(HW_ESP_TX_CSUM)
#define NETIF_F_RX_UDP_TUNNEL_PORT __NETIF_F(RX_UDP_TUNNEL_PORT)
#define NETIF_F_HW_TLS_RECORD __NETIF_F(HW_TLS_RECORD)
+#define NETIF_F_GSO_UDP_L4 __NETIF_F(GSO_UDP_L4)
+#define NETIF_F_HW_TLS_TX __NETIF_F(HW_TLS_TX)
#define for_each_netdev_feature(mask_addr, bit) \
for_each_set_bit(bit, (unsigned long *)mask_addr, NETDEV_FEATURE_COUNT)
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index cf44503ea81a..3ec9850c7936 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -791,6 +791,7 @@ enum tc_setup_type {
TC_SETUP_QDISC_CBS,
TC_SETUP_QDISC_RED,
TC_SETUP_QDISC_PRIO,
+ TC_SETUP_QDISC_MQ,
};
/* These structures hold the attributes of bpf state that are being passed
@@ -817,10 +818,13 @@ enum bpf_netdev_command {
BPF_OFFLOAD_DESTROY,
BPF_OFFLOAD_MAP_ALLOC,
BPF_OFFLOAD_MAP_FREE,
+ XDP_QUERY_XSK_UMEM,
+ XDP_SETUP_XSK_UMEM,
};
struct bpf_prog_offload_ops;
struct netlink_ext_ack;
+struct xdp_umem;
struct netdev_bpf {
enum bpf_netdev_command command;
@@ -851,6 +855,11 @@ struct netdev_bpf {
struct {
struct bpf_offloaded_map *offmap;
};
+ /* XDP_SETUP_XSK_UMEM */
+ struct {
+ struct xdp_umem *umem;
+ u16 queue_id;
+ } xsk;
};
};
@@ -865,6 +874,26 @@ struct xfrmdev_ops {
};
#endif
+#if IS_ENABLED(CONFIG_TLS_DEVICE)
+enum tls_offload_ctx_dir {
+ TLS_OFFLOAD_CTX_DIR_RX,
+ TLS_OFFLOAD_CTX_DIR_TX,
+};
+
+struct tls_crypto_info;
+struct tls_context;
+
+struct tlsdev_ops {
+ int (*tls_dev_add)(struct net_device *netdev, struct sock *sk,
+ enum tls_offload_ctx_dir direction,
+ struct tls_crypto_info *crypto_info,
+ u32 start_offload_tcp_sn);
+ void (*tls_dev_del)(struct net_device *netdev,
+ struct tls_context *ctx,
+ enum tls_offload_ctx_dir direction);
+};
+#endif
+
struct dev_ifalias {
struct rcu_head rcuhead;
char ifalias[];
@@ -1165,12 +1194,13 @@ struct dev_ifalias {
* This function is used to set or query state related to XDP on the
* netdevice and manage BPF offload. See definition of
* enum bpf_netdev_command for details.
- * int (*ndo_xdp_xmit)(struct net_device *dev, struct xdp_buff *xdp);
- * This function is used to submit a XDP packet for transmit on a
- * netdevice.
- * void (*ndo_xdp_flush)(struct net_device *dev);
- * This function is used to inform the driver to flush a particular
- * xdp tx queue. Must be called on same CPU as xdp_xmit.
+ * int (*ndo_xdp_xmit)(struct net_device *dev, int n, struct xdp_frame **xdp,
+ * u32 flags);
+ * This function is used to submit @n XDP packets for transmit on a
+ * netdevice. Returns number of frames successfully transmitted, frames
+ * that got dropped are freed/returned via xdp_return_frame().
+ * Returns negative number, means general error invoking ndo, meaning
+ * no frames were xmit'ed and core-caller will free all frames.
*/
struct net_device_ops {
int (*ndo_init)(struct net_device *dev);
@@ -1355,9 +1385,11 @@ struct net_device_ops {
int needed_headroom);
int (*ndo_bpf)(struct net_device *dev,
struct netdev_bpf *bpf);
- int (*ndo_xdp_xmit)(struct net_device *dev,
- struct xdp_buff *xdp);
- void (*ndo_xdp_flush)(struct net_device *dev);
+ int (*ndo_xdp_xmit)(struct net_device *dev, int n,
+ struct xdp_frame **xdp,
+ u32 flags);
+ int (*ndo_xsk_async_xmit)(struct net_device *dev,
+ u32 queue_id);
};
/**
@@ -1401,6 +1433,8 @@ struct net_device_ops {
* entity (i.e. the master device for bridged veth)
* @IFF_MACSEC: device is a MACsec device
* @IFF_NO_RX_HANDLER: device doesn't support the rx_handler hook
+ * @IFF_FAILOVER: device is a failover master device
+ * @IFF_FAILOVER_SLAVE: device is lower dev of a failover master device
*/
enum netdev_priv_flags {
IFF_802_1Q_VLAN = 1<<0,
@@ -1430,6 +1464,8 @@ enum netdev_priv_flags {
IFF_PHONY_HEADROOM = 1<<24,
IFF_MACSEC = 1<<25,
IFF_NO_RX_HANDLER = 1<<26,
+ IFF_FAILOVER = 1<<27,
+ IFF_FAILOVER_SLAVE = 1<<28,
};
#define IFF_802_1Q_VLAN IFF_802_1Q_VLAN
@@ -1458,6 +1494,8 @@ enum netdev_priv_flags {
#define IFF_RXFH_CONFIGURED IFF_RXFH_CONFIGURED
#define IFF_MACSEC IFF_MACSEC
#define IFF_NO_RX_HANDLER IFF_NO_RX_HANDLER
+#define IFF_FAILOVER IFF_FAILOVER
+#define IFF_FAILOVER_SLAVE IFF_FAILOVER_SLAVE
/**
* struct net_device - The DEVICE structure.
@@ -1750,6 +1788,10 @@ struct net_device {
const struct xfrmdev_ops *xfrmdev_ops;
#endif
+#if IS_ENABLED(CONFIG_TLS_DEVICE)
+ const struct tlsdev_ops *tlsdev_ops;
+#endif
+
const struct header_ops *header_ops;
unsigned int flags;
@@ -2304,8 +2346,19 @@ enum netdev_lag_tx_type {
NETDEV_LAG_TX_TYPE_HASH,
};
+enum netdev_lag_hash {
+ NETDEV_LAG_HASH_NONE,
+ NETDEV_LAG_HASH_L2,
+ NETDEV_LAG_HASH_L34,
+ NETDEV_LAG_HASH_L23,
+ NETDEV_LAG_HASH_E23,
+ NETDEV_LAG_HASH_E34,
+ NETDEV_LAG_HASH_UNKNOWN,
+};
+
struct netdev_lag_upper_info {
enum netdev_lag_tx_type tx_type;
+ enum netdev_lag_hash hash_type;
};
struct netdev_lag_lower_state_info {
@@ -2486,6 +2539,7 @@ void dev_disable_lro(struct net_device *dev);
int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *newskb);
int dev_queue_xmit(struct sk_buff *skb);
int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv);
+int dev_direct_xmit(struct sk_buff *skb, u16 queue_id);
int register_netdevice(struct net_device *dev);
void unregister_netdevice_queue(struct net_device *dev, struct list_head *head);
void unregister_netdevice_many(struct list_head *head);
@@ -3213,19 +3267,6 @@ static inline int netif_set_xps_queue(struct net_device *dev,
}
#endif
-u16 __skb_tx_hash(const struct net_device *dev, struct sk_buff *skb,
- unsigned int num_tx_queues);
-
-/*
- * Returns a Tx hash for the given packet when dev->real_num_tx_queues is used
- * as a distribution range limit for the returned value.
- */
-static inline u16 skb_tx_hash(const struct net_device *dev,
- struct sk_buff *skb)
-{
- return __skb_tx_hash(dev, skb, dev->real_num_tx_queues);
-}
-
/**
* netif_is_multiqueue - test if device has multiple transmit queues
* @dev: network device
@@ -4186,6 +4227,7 @@ static inline bool net_gso_ok(netdev_features_t features, int gso_type)
BUILD_BUG_ON(SKB_GSO_SCTP != (NETIF_F_GSO_SCTP >> NETIF_F_GSO_SHIFT));
BUILD_BUG_ON(SKB_GSO_ESP != (NETIF_F_GSO_ESP >> NETIF_F_GSO_SHIFT));
BUILD_BUG_ON(SKB_GSO_UDP != (NETIF_F_GSO_UDP >> NETIF_F_GSO_SHIFT));
+ BUILD_BUG_ON(SKB_GSO_UDP_L4 != (NETIF_F_GSO_UDP_L4 >> NETIF_F_GSO_SHIFT));
return (features & feature) == feature;
}
@@ -4308,6 +4350,16 @@ static inline bool netif_is_rxfh_configured(const struct net_device *dev)
return dev->priv_flags & IFF_RXFH_CONFIGURED;
}
+static inline bool netif_is_failover(const struct net_device *dev)
+{
+ return dev->priv_flags & IFF_FAILOVER;
+}
+
+static inline bool netif_is_failover_slave(const struct net_device *dev)
+{
+ return dev->priv_flags & IFF_FAILOVER_SLAVE;
+}
+
/* This device needs to keep skb dst for qdisc enqueue or ndo_start_xmit() */
static inline void netif_keep_dst(struct net_device *dev)
{
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index 85a1a0b32c66..dd2052f0efb7 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -67,7 +67,6 @@ struct nf_hook_ops {
struct net_device *dev;
void *priv;
u_int8_t pf;
- bool nat_hook;
unsigned int hooknum;
/* Hooks are ordered in ascending priority. */
int priority;
@@ -321,18 +320,33 @@ int nf_route(struct net *net, struct dst_entry **dst, struct flowi *fl,
int nf_reroute(struct sk_buff *skb, struct nf_queue_entry *entry);
#include <net/flow.h>
-extern void (*nf_nat_decode_session_hook)(struct sk_buff *, struct flowi *);
+
+struct nf_conn;
+enum nf_nat_manip_type;
+struct nlattr;
+enum ip_conntrack_dir;
+
+struct nf_nat_hook {
+ int (*parse_nat_setup)(struct nf_conn *ct, enum nf_nat_manip_type manip,
+ const struct nlattr *attr);
+ void (*decode_session)(struct sk_buff *skb, struct flowi *fl);
+ unsigned int (*manip_pkt)(struct sk_buff *skb, struct nf_conn *ct,
+ enum nf_nat_manip_type mtype,
+ enum ip_conntrack_dir dir);
+};
+
+extern struct nf_nat_hook __rcu *nf_nat_hook;
static inline void
nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family)
{
#ifdef CONFIG_NF_NAT_NEEDED
- void (*decodefn)(struct sk_buff *, struct flowi *);
+ struct nf_nat_hook *nat_hook;
rcu_read_lock();
- decodefn = rcu_dereference(nf_nat_decode_session_hook);
- if (decodefn)
- decodefn(skb, fl);
+ nat_hook = rcu_dereference(nf_nat_hook);
+ if (nat_hook && nat_hook->decode_session)
+ nat_hook->decode_session(skb, fl);
rcu_read_unlock();
#endif
}
@@ -374,13 +388,19 @@ nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family)
extern void (*ip_ct_attach)(struct sk_buff *, const struct sk_buff *) __rcu;
void nf_ct_attach(struct sk_buff *, const struct sk_buff *);
-extern void (*nf_ct_destroy)(struct nf_conntrack *) __rcu;
#else
static inline void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb) {}
#endif
struct nf_conn;
enum ip_conntrack_info;
+
+struct nf_ct_hook {
+ int (*update)(struct net *net, struct sk_buff *skb);
+ void (*destroy)(struct nf_conntrack *);
+};
+extern struct nf_ct_hook __rcu *nf_ct_hook;
+
struct nlattr;
struct nfnl_ct_hook {
diff --git a/include/linux/netfilter/ipset/ip_set_timeout.h b/include/linux/netfilter/ipset/ip_set_timeout.h
index bfb3531fd88a..8ce271e187b6 100644
--- a/include/linux/netfilter/ipset/ip_set_timeout.h
+++ b/include/linux/netfilter/ipset/ip_set_timeout.h
@@ -23,6 +23,9 @@
/* Set is defined with timeout support: timeout value may be 0 */
#define IPSET_NO_TIMEOUT UINT_MAX
+/* Max timeout value, see msecs_to_jiffies() in jiffies.h */
+#define IPSET_MAX_TIMEOUT (UINT_MAX >> 1)/MSEC_PER_SEC
+
#define ip_set_adt_opt_timeout(opt, set) \
((opt)->ext.timeout != IPSET_NO_TIMEOUT ? (opt)->ext.timeout : (set)->timeout)
@@ -32,11 +35,10 @@ ip_set_timeout_uget(struct nlattr *tb)
unsigned int timeout = ip_set_get_h32(tb);
/* Normalize to fit into jiffies */
- if (timeout > UINT_MAX/MSEC_PER_SEC)
- timeout = UINT_MAX/MSEC_PER_SEC;
+ if (timeout > IPSET_MAX_TIMEOUT)
+ timeout = IPSET_MAX_TIMEOUT;
- /* Userspace supplied TIMEOUT parameter: adjust crazy size */
- return timeout == IPSET_NO_TIMEOUT ? IPSET_NO_TIMEOUT - 1 : timeout;
+ return timeout;
}
static inline bool
@@ -65,8 +67,14 @@ ip_set_timeout_set(unsigned long *timeout, u32 value)
static inline u32
ip_set_timeout_get(const unsigned long *timeout)
{
- return *timeout == IPSET_ELEM_PERMANENT ? 0 :
- jiffies_to_msecs(*timeout - jiffies)/MSEC_PER_SEC;
+ u32 t;
+
+ if (*timeout == IPSET_ELEM_PERMANENT)
+ return 0;
+
+ t = jiffies_to_msecs(*timeout - jiffies)/MSEC_PER_SEC;
+ /* Zero value in userspace means no timeout */
+ return t == 0 ? 1 : t;
}
#endif /* __KERNEL__ */
diff --git a/include/linux/netfilter/nf_osf.h b/include/linux/netfilter/nf_osf.h
new file mode 100644
index 000000000000..0e114c492fb8
--- /dev/null
+++ b/include/linux/netfilter/nf_osf.h
@@ -0,0 +1,33 @@
+#include <uapi/linux/netfilter/nf_osf.h>
+
+/* Initial window size option state machine: multiple of mss, mtu or
+ * plain numeric value. Can also be made as plain numeric value which
+ * is not a multiple of specified value.
+ */
+enum nf_osf_window_size_options {
+ OSF_WSS_PLAIN = 0,
+ OSF_WSS_MSS,
+ OSF_WSS_MTU,
+ OSF_WSS_MODULO,
+ OSF_WSS_MAX,
+};
+
+enum osf_fmatch_states {
+ /* Packet does not match the fingerprint */
+ FMATCH_WRONG = 0,
+ /* Packet matches the fingerprint */
+ FMATCH_OK,
+ /* Options do not match the fingerprint, but header does */
+ FMATCH_OPT_WRONG,
+};
+
+struct nf_osf_finger {
+ struct rcu_head rcu_head;
+ struct list_head finger_entry;
+ struct nf_osf_user_finger finger;
+};
+
+bool nf_osf_match(const struct sk_buff *skb, u_int8_t family,
+ int hooknum, struct net_device *in, struct net_device *out,
+ const struct nf_osf_info *info, struct net *net,
+ const struct list_head *nf_osf_fingers);
diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
index 34551f8aaf9d..3ecc3050be0e 100644
--- a/include/linux/netfilter/nfnetlink.h
+++ b/include/linux/netfilter/nfnetlink.h
@@ -31,6 +31,7 @@ struct nfnetlink_subsystem {
const struct nfnl_callback *cb; /* callback for individual types */
int (*commit)(struct net *net, struct sk_buff *skb);
int (*abort)(struct net *net, struct sk_buff *skb);
+ void (*cleanup)(struct net *net);
bool (*valid_genid)(struct net *net, u32 genid);
};
diff --git a/include/linux/netfilter_bridge/ebtables.h b/include/linux/netfilter_bridge/ebtables.h
index 0773b5a032f1..c6935be7c6ca 100644
--- a/include/linux/netfilter_bridge/ebtables.h
+++ b/include/linux/netfilter_bridge/ebtables.h
@@ -17,10 +17,6 @@
#include <linux/if_ether.h>
#include <uapi/linux/netfilter_bridge/ebtables.h>
-/* return values for match() functions */
-#define EBT_MATCH 0
-#define EBT_NOMATCH 1
-
struct ebt_match {
struct list_head list;
const char name[EBT_FUNCTION_MAXNAMELEN];
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index 4e735be53e70..74ae3e1d19a0 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -28,6 +28,7 @@ struct nfs41_impl_id;
struct nfs_client {
refcount_t cl_count;
atomic_t cl_mds_count;
+ seqcount_t cl_callback_count;
int cl_cons_state; /* current construction state (-ve: init error) */
#define NFS_CS_READY 0 /* ready to be used */
#define NFS_CS_INITING 1 /* busy initialising */
@@ -235,6 +236,7 @@ struct nfs_server {
#define NFS_CAP_ACLS (1U << 3)
#define NFS_CAP_ATOMIC_OPEN (1U << 4)
/* #define NFS_CAP_CHANGE_ATTR (1U << 5) */
+#define NFS_CAP_LGOPEN (1U << 5)
#define NFS_CAP_FILEID (1U << 6)
#define NFS_CAP_MODE (1U << 7)
#define NFS_CAP_NLINK (1U << 8)
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 34d28564ecf3..9dee3c23895d 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -259,6 +259,7 @@ struct nfs4_layoutget_args {
struct nfs4_layoutget_res {
struct nfs4_sequence_res seq_res;
+ int status;
__u32 return_on_close;
struct pnfs_layout_range range;
__u32 type;
@@ -270,6 +271,7 @@ struct nfs4_layoutget {
struct nfs4_layoutget_args args;
struct nfs4_layoutget_res res;
struct rpc_cred *cred;
+ unsigned callback_count;
gfp_t gfp_flags;
};
@@ -435,6 +437,7 @@ struct nfs_openargs {
enum createmode4 createmode;
const struct nfs4_label *label;
umode_t umask;
+ struct nfs4_layoutget_args *lg_args;
};
struct nfs_openres {
@@ -457,6 +460,7 @@ struct nfs_openres {
__u32 access_request;
__u32 access_supported;
__u32 access_result;
+ struct nfs4_layoutget_res *lg_res;
};
/*
@@ -1577,7 +1581,8 @@ struct nfs_rpc_ops {
struct dentry *(*try_mount) (int, const char *, struct nfs_mount_info *,
struct nfs_subversion *);
int (*getattr) (struct nfs_server *, struct nfs_fh *,
- struct nfs_fattr *, struct nfs4_label *);
+ struct nfs_fattr *, struct nfs4_label *,
+ struct inode *);
int (*setattr) (struct dentry *, struct nfs_fattr *,
struct iattr *);
int (*lookup) (struct inode *, const struct qstr *,
@@ -1591,7 +1596,7 @@ struct nfs_rpc_ops {
int (*create) (struct inode *, struct dentry *,
struct iattr *, int);
int (*remove) (struct inode *, struct dentry *);
- void (*unlink_setup) (struct rpc_message *, struct dentry *);
+ void (*unlink_setup) (struct rpc_message *, struct dentry *, struct inode *);
void (*unlink_rpc_prepare) (struct rpc_task *, struct nfs_unlinkdata *);
int (*unlink_done) (struct rpc_task *, struct inode *);
void (*rename_setup) (struct rpc_message *msg,
@@ -1620,9 +1625,11 @@ struct nfs_rpc_ops {
struct nfs_pgio_header *);
void (*read_setup)(struct nfs_pgio_header *, struct rpc_message *);
int (*read_done)(struct rpc_task *, struct nfs_pgio_header *);
- void (*write_setup)(struct nfs_pgio_header *, struct rpc_message *);
+ void (*write_setup)(struct nfs_pgio_header *, struct rpc_message *,
+ struct rpc_clnt **);
int (*write_done)(struct rpc_task *, struct nfs_pgio_header *);
- void (*commit_setup) (struct nfs_commit_data *, struct rpc_message *);
+ void (*commit_setup) (struct nfs_commit_data *, struct rpc_message *,
+ struct rpc_clnt **);
void (*commit_rpc_prepare)(struct rpc_task *, struct nfs_commit_data *);
int (*commit_done) (struct rpc_task *, struct nfs_commit_data *);
int (*lock)(struct file *, int, struct file_lock *);
diff --git a/include/linux/node.h b/include/linux/node.h
index 41f171861dcc..6d336e38d155 100644
--- a/include/linux/node.h
+++ b/include/linux/node.h
@@ -32,9 +32,11 @@ extern struct node *node_devices[];
typedef void (*node_registration_func_t)(struct node *);
#if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_NUMA)
-extern int link_mem_sections(int nid, unsigned long start_pfn, unsigned long nr_pages);
+extern int link_mem_sections(int nid, unsigned long start_pfn,
+ unsigned long nr_pages, bool check_nid);
#else
-static inline int link_mem_sections(int nid, unsigned long start_pfn, unsigned long nr_pages)
+static inline int link_mem_sections(int nid, unsigned long start_pfn,
+ unsigned long nr_pages, bool check_nid)
{
return 0;
}
@@ -57,7 +59,7 @@ static inline int register_one_node(int nid)
if (error)
return error;
/* link memory sections under this node */
- error = link_mem_sections(nid, pgdat->node_start_pfn, pgdat->node_spanned_pages);
+ error = link_mem_sections(nid, pgdat->node_start_pfn, pgdat->node_spanned_pages, true);
}
return error;
diff --git a/include/linux/nospec.h b/include/linux/nospec.h
index e791ebc65c9c..0c5ef54fd416 100644
--- a/include/linux/nospec.h
+++ b/include/linux/nospec.h
@@ -7,6 +7,8 @@
#define _LINUX_NOSPEC_H
#include <asm/barrier.h>
+struct task_struct;
+
/**
* array_index_mask_nospec() - generate a ~0 mask when index < size, 0 otherwise
* @index: array element index
@@ -55,4 +57,12 @@ static inline unsigned long array_index_mask_nospec(unsigned long index,
\
(typeof(_i)) (_i & _mask); \
})
+
+/* Speculation control prctl */
+int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which);
+int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
+ unsigned long ctrl);
+/* Speculation control for seccomp enforced mitigation */
+void arch_seccomp_spec_mitigate(struct task_struct *task);
+
#endif /* _LINUX_NOSPEC_H */
diff --git a/include/linux/notifier.h b/include/linux/notifier.h
index 6d731110e0db..f35c7bf76143 100644
--- a/include/linux/notifier.h
+++ b/include/linux/notifier.h
@@ -43,9 +43,7 @@
* in srcu_notifier_call_chain(): no cache bounces and no memory barriers.
* As compensation, srcu_notifier_chain_unregister() is rather expensive.
* SRCU notifier chains should be used when the chain will be called very
- * often but notifier_blocks will seldom be removed. Also, SRCU notifier
- * chains are slightly more difficult to use because they require special
- * runtime initialization.
+ * often but notifier_blocks will seldom be removed.
*/
struct notifier_block;
@@ -91,7 +89,7 @@ struct srcu_notifier_head {
(name)->head = NULL; \
} while (0)
-/* srcu_notifier_heads must be initialized and cleaned up dynamically */
+/* srcu_notifier_heads must be cleaned up dynamically */
extern void srcu_init_notifier_head(struct srcu_notifier_head *nh);
#define srcu_cleanup_notifier_head(name) \
cleanup_srcu_struct(&(name)->srcu);
@@ -104,7 +102,13 @@ extern void srcu_init_notifier_head(struct srcu_notifier_head *nh);
.head = NULL }
#define RAW_NOTIFIER_INIT(name) { \
.head = NULL }
-/* srcu_notifier_heads cannot be initialized statically */
+
+#define SRCU_NOTIFIER_INIT(name, pcpu) \
+ { \
+ .mutex = __MUTEX_INITIALIZER(name.mutex), \
+ .head = NULL, \
+ .srcu = __SRCU_STRUCT_INIT(name.srcu, pcpu), \
+ }
#define ATOMIC_NOTIFIER_HEAD(name) \
struct atomic_notifier_head name = \
@@ -116,6 +120,26 @@ extern void srcu_init_notifier_head(struct srcu_notifier_head *nh);
struct raw_notifier_head name = \
RAW_NOTIFIER_INIT(name)
+#ifdef CONFIG_TREE_SRCU
+#define _SRCU_NOTIFIER_HEAD(name, mod) \
+ static DEFINE_PER_CPU(struct srcu_data, \
+ name##_head_srcu_data); \
+ mod struct srcu_notifier_head name = \
+ SRCU_NOTIFIER_INIT(name, name##_head_srcu_data)
+
+#else
+#define _SRCU_NOTIFIER_HEAD(name, mod) \
+ mod struct srcu_notifier_head name = \
+ SRCU_NOTIFIER_INIT(name, name)
+
+#endif
+
+#define SRCU_NOTIFIER_HEAD(name) \
+ _SRCU_NOTIFIER_HEAD(name, /* not static */)
+
+#define SRCU_NOTIFIER_HEAD_STATIC(name) \
+ _SRCU_NOTIFIER_HEAD(name, static)
+
#ifdef __KERNEL__
extern int atomic_notifier_chain_register(struct atomic_notifier_head *nh,
diff --git a/include/linux/nubus.h b/include/linux/nubus.h
index 6e8200215321..eba50b057f6f 100644
--- a/include/linux/nubus.h
+++ b/include/linux/nubus.h
@@ -163,7 +163,7 @@ void nubus_seq_write_rsrc_mem(struct seq_file *m,
unsigned char *nubus_dirptr(const struct nubus_dirent *nd);
/* Declarations relating to driver model objects */
-int nubus_bus_register(void);
+int nubus_parent_device_register(void);
int nubus_device_register(struct nubus_board *board);
int nubus_driver_register(struct nubus_driver *ndrv);
void nubus_driver_unregister(struct nubus_driver *ndrv);
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index 4112e2bd747f..2950ce957656 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -436,10 +436,19 @@ enum {
enum {
NVME_AER_ERROR = 0,
NVME_AER_SMART = 1,
+ NVME_AER_NOTICE = 2,
NVME_AER_CSS = 6,
NVME_AER_VS = 7,
- NVME_AER_NOTICE_NS_CHANGED = 0x0002,
- NVME_AER_NOTICE_FW_ACT_STARTING = 0x0102,
+};
+
+enum {
+ NVME_AER_NOTICE_NS_CHANGED = 0x00,
+ NVME_AER_NOTICE_FW_ACT_STARTING = 0x01,
+};
+
+enum {
+ NVME_AEN_CFG_NS_ATTR = 1 << 8,
+ NVME_AEN_CFG_FW_ACT = 1 << 9,
};
struct nvme_lba_range_type {
@@ -747,6 +756,7 @@ enum {
NVME_LOG_ERROR = 0x01,
NVME_LOG_SMART = 0x02,
NVME_LOG_FW_SLOT = 0x03,
+ NVME_LOG_CHANGED_NS = 0x04,
NVME_LOG_CMD_EFFECTS = 0x05,
NVME_LOG_DISC = 0x70,
NVME_LOG_RESERVATION = 0x80,
@@ -755,6 +765,8 @@ enum {
NVME_FWACT_ACTV = (2 << 3),
};
+#define NVME_MAX_CHANGED_NAMESPACES 1024
+
struct nvme_identify {
__u8 opcode;
__u8 flags;
diff --git a/include/linux/nvmem-provider.h b/include/linux/nvmem-provider.h
index f89598bc4e1c..24def6ad09bb 100644
--- a/include/linux/nvmem-provider.h
+++ b/include/linux/nvmem-provider.h
@@ -77,6 +77,9 @@ struct nvmem_device *devm_nvmem_register(struct device *dev,
int devm_nvmem_unregister(struct device *dev, struct nvmem_device *nvmem);
+int nvmem_add_cells(struct nvmem_device *nvmem,
+ const struct nvmem_cell_info *info,
+ int ncells);
#else
static inline struct nvmem_device *nvmem_register(const struct nvmem_config *c)
@@ -99,6 +102,14 @@ static inline int
devm_nvmem_unregister(struct device *dev, struct nvmem_device *nvmem)
{
return nvmem_unregister(nvmem);
+
+}
+
+static inline int nvmem_add_cells(struct nvmem_device *nvmem,
+ const struct nvmem_cell_info *info,
+ int ncells)
+{
+ return -ENOSYS;
}
#endif /* CONFIG_NVMEM */
diff --git a/include/linux/of_clk.h b/include/linux/of_clk.h
new file mode 100644
index 000000000000..b27da9f164cb
--- /dev/null
+++ b/include/linux/of_clk.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * OF clock helpers
+ */
+
+#ifndef __LINUX_OF_CLK_H
+#define __LINUX_OF_CLK_H
+
+#if defined(CONFIG_COMMON_CLK) && defined(CONFIG_OF)
+
+unsigned int of_clk_get_parent_count(struct device_node *np);
+const char *of_clk_get_parent_name(struct device_node *np, int index);
+void of_clk_init(const struct of_device_id *matches);
+
+#else /* !CONFIG_COMMON_CLK || !CONFIG_OF */
+
+static inline unsigned int of_clk_get_parent_count(struct device_node *np)
+{
+ return 0;
+}
+static inline const char *of_clk_get_parent_name(struct device_node *np,
+ int index)
+{
+ return NULL;
+}
+static inline void of_clk_init(const struct of_device_id *matches) {}
+
+#endif /* !CONFIG_COMMON_CLK || !CONFIG_OF */
+
+#endif /* __LINUX_OF_CLK_H */
diff --git a/include/linux/of_device.h b/include/linux/of_device.h
index 8da5a1b31ece..165fd302b442 100644
--- a/include/linux/of_device.h
+++ b/include/linux/of_device.h
@@ -55,7 +55,9 @@ static inline struct device_node *of_cpu_device_node_get(int cpu)
return of_node_get(cpu_dev->of_node);
}
-int of_dma_configure(struct device *dev, struct device_node *np);
+int of_dma_configure(struct device *dev,
+ struct device_node *np,
+ bool force_dma);
void of_dma_deconfigure(struct device *dev);
#else /* CONFIG_OF */
@@ -105,7 +107,9 @@ static inline struct device_node *of_cpu_device_node_get(int cpu)
return NULL;
}
-static inline int of_dma_configure(struct device *dev, struct device_node *np)
+static inline int of_dma_configure(struct device *dev,
+ struct device_node *np,
+ bool force_dma)
{
return 0;
}
diff --git a/include/linux/of_pci.h b/include/linux/of_pci.h
index 091033a6b836..e83d87fc5673 100644
--- a/include/linux/of_pci.h
+++ b/include/linux/of_pci.h
@@ -13,9 +13,6 @@ struct device_node;
struct device_node *of_pci_find_child_device(struct device_node *parent,
unsigned int devfn);
int of_pci_get_devfn(struct device_node *np);
-int of_pci_parse_bus_range(struct device_node *node, struct resource *res);
-int of_get_pci_domain_nr(struct device_node *node);
-int of_pci_get_max_link_speed(struct device_node *node);
void of_pci_check_probe_only(void);
int of_pci_map_rid(struct device_node *np, u32 rid,
const char *map_name, const char *map_mask_name,
@@ -32,18 +29,6 @@ static inline int of_pci_get_devfn(struct device_node *np)
return -EINVAL;
}
-static inline int
-of_pci_parse_bus_range(struct device_node *node, struct resource *res)
-{
- return -EINVAL;
-}
-
-static inline int
-of_get_pci_domain_nr(struct device_node *node)
-{
- return -1;
-}
-
static inline int of_pci_map_rid(struct device_node *np, u32 rid,
const char *map_name, const char *map_mask_name,
struct device_node **target, u32 *id_out)
@@ -51,12 +36,6 @@ static inline int of_pci_map_rid(struct device_node *np, u32 rid,
return -EINVAL;
}
-static inline int
-of_pci_get_max_link_speed(struct device_node *node)
-{
- return -EINVAL;
-}
-
static inline void of_pci_check_probe_only(void) { }
#endif
@@ -70,17 +49,4 @@ of_irq_parse_and_map_pci(const struct pci_dev *dev, u8 slot, u8 pin)
}
#endif
-#if defined(CONFIG_OF_ADDRESS)
-int of_pci_get_host_bridge_resources(struct device_node *dev,
- unsigned char busno, unsigned char bus_max,
- struct list_head *resources, resource_size_t *io_base);
-#else
-static inline int of_pci_get_host_bridge_resources(struct device_node *dev,
- unsigned char busno, unsigned char bus_max,
- struct list_head *resources, resource_size_t *io_base)
-{
- return -EINVAL;
-}
-#endif
-
#endif
diff --git a/include/linux/omap-iommu.h b/include/linux/omap-iommu.h
index c1aede46718b..ce1b7c6283ee 100644
--- a/include/linux/omap-iommu.h
+++ b/include/linux/omap-iommu.h
@@ -13,7 +13,12 @@
#ifndef _OMAP_IOMMU_H_
#define _OMAP_IOMMU_H_
+#ifdef CONFIG_OMAP_IOMMU
extern void omap_iommu_save_ctx(struct device *dev);
extern void omap_iommu_restore_ctx(struct device *dev);
+#else
+static inline void omap_iommu_save_ctx(struct device *dev) {}
+static inline void omap_iommu_restore_ctx(struct device *dev) {}
+#endif
#endif
diff --git a/include/linux/oom.h b/include/linux/oom.h
index 5bad038ac012..6adac113e96d 100644
--- a/include/linux/oom.h
+++ b/include/linux/oom.h
@@ -95,6 +95,8 @@ static inline int check_stable_address_space(struct mm_struct *mm)
return 0;
}
+void __oom_reap_task_mm(struct mm_struct *mm);
+
extern unsigned long oom_badness(struct task_struct *p,
struct mem_cgroup *memcg, const nodemask_t *nodemask,
unsigned long totalpages);
diff --git a/include/linux/overflow.h b/include/linux/overflow.h
new file mode 100644
index 000000000000..8712ff70995f
--- /dev/null
+++ b/include/linux/overflow.h
@@ -0,0 +1,278 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+#ifndef __LINUX_OVERFLOW_H
+#define __LINUX_OVERFLOW_H
+
+#include <linux/compiler.h>
+
+/*
+ * In the fallback code below, we need to compute the minimum and
+ * maximum values representable in a given type. These macros may also
+ * be useful elsewhere, so we provide them outside the
+ * COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW block.
+ *
+ * It would seem more obvious to do something like
+ *
+ * #define type_min(T) (T)(is_signed_type(T) ? (T)1 << (8*sizeof(T)-1) : 0)
+ * #define type_max(T) (T)(is_signed_type(T) ? ((T)1 << (8*sizeof(T)-1)) - 1 : ~(T)0)
+ *
+ * Unfortunately, the middle expressions, strictly speaking, have
+ * undefined behaviour, and at least some versions of gcc warn about
+ * the type_max expression (but not if -fsanitize=undefined is in
+ * effect; in that case, the warning is deferred to runtime...).
+ *
+ * The slightly excessive casting in type_min is to make sure the
+ * macros also produce sensible values for the exotic type _Bool. [The
+ * overflow checkers only almost work for _Bool, but that's
+ * a-feature-not-a-bug, since people shouldn't be doing arithmetic on
+ * _Bools. Besides, the gcc builtins don't allow _Bool* as third
+ * argument.]
+ *
+ * Idea stolen from
+ * https://mail-index.netbsd.org/tech-misc/2007/02/05/0000.html -
+ * credit to Christian Biere.
+ */
+#define is_signed_type(type) (((type)(-1)) < (type)1)
+#define __type_half_max(type) ((type)1 << (8*sizeof(type) - 1 - is_signed_type(type)))
+#define type_max(T) ((T)((__type_half_max(T) - 1) + __type_half_max(T)))
+#define type_min(T) ((T)((T)-type_max(T)-(T)1))
+
+
+#ifdef COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW
+/*
+ * For simplicity and code hygiene, the fallback code below insists on
+ * a, b and *d having the same type (similar to the min() and max()
+ * macros), whereas gcc's type-generic overflow checkers accept
+ * different types. Hence we don't just make check_add_overflow an
+ * alias for __builtin_add_overflow, but add type checks similar to
+ * below.
+ */
+#define check_add_overflow(a, b, d) ({ \
+ typeof(a) __a = (a); \
+ typeof(b) __b = (b); \
+ typeof(d) __d = (d); \
+ (void) (&__a == &__b); \
+ (void) (&__a == __d); \
+ __builtin_add_overflow(__a, __b, __d); \
+})
+
+#define check_sub_overflow(a, b, d) ({ \
+ typeof(a) __a = (a); \
+ typeof(b) __b = (b); \
+ typeof(d) __d = (d); \
+ (void) (&__a == &__b); \
+ (void) (&__a == __d); \
+ __builtin_sub_overflow(__a, __b, __d); \
+})
+
+#define check_mul_overflow(a, b, d) ({ \
+ typeof(a) __a = (a); \
+ typeof(b) __b = (b); \
+ typeof(d) __d = (d); \
+ (void) (&__a == &__b); \
+ (void) (&__a == __d); \
+ __builtin_mul_overflow(__a, __b, __d); \
+})
+
+#else
+
+
+/* Checking for unsigned overflow is relatively easy without causing UB. */
+#define __unsigned_add_overflow(a, b, d) ({ \
+ typeof(a) __a = (a); \
+ typeof(b) __b = (b); \
+ typeof(d) __d = (d); \
+ (void) (&__a == &__b); \
+ (void) (&__a == __d); \
+ *__d = __a + __b; \
+ *__d < __a; \
+})
+#define __unsigned_sub_overflow(a, b, d) ({ \
+ typeof(a) __a = (a); \
+ typeof(b) __b = (b); \
+ typeof(d) __d = (d); \
+ (void) (&__a == &__b); \
+ (void) (&__a == __d); \
+ *__d = __a - __b; \
+ __a < __b; \
+})
+/*
+ * If one of a or b is a compile-time constant, this avoids a division.
+ */
+#define __unsigned_mul_overflow(a, b, d) ({ \
+ typeof(a) __a = (a); \
+ typeof(b) __b = (b); \
+ typeof(d) __d = (d); \
+ (void) (&__a == &__b); \
+ (void) (&__a == __d); \
+ *__d = __a * __b; \
+ __builtin_constant_p(__b) ? \
+ __b > 0 && __a > type_max(typeof(__a)) / __b : \
+ __a > 0 && __b > type_max(typeof(__b)) / __a; \
+})
+
+/*
+ * For signed types, detecting overflow is much harder, especially if
+ * we want to avoid UB. But the interface of these macros is such that
+ * we must provide a result in *d, and in fact we must produce the
+ * result promised by gcc's builtins, which is simply the possibly
+ * wrapped-around value. Fortunately, we can just formally do the
+ * operations in the widest relevant unsigned type (u64) and then
+ * truncate the result - gcc is smart enough to generate the same code
+ * with and without the (u64) casts.
+ */
+
+/*
+ * Adding two signed integers can overflow only if they have the same
+ * sign, and overflow has happened iff the result has the opposite
+ * sign.
+ */
+#define __signed_add_overflow(a, b, d) ({ \
+ typeof(a) __a = (a); \
+ typeof(b) __b = (b); \
+ typeof(d) __d = (d); \
+ (void) (&__a == &__b); \
+ (void) (&__a == __d); \
+ *__d = (u64)__a + (u64)__b; \
+ (((~(__a ^ __b)) & (*__d ^ __a)) \
+ & type_min(typeof(__a))) != 0; \
+})
+
+/*
+ * Subtraction is similar, except that overflow can now happen only
+ * when the signs are opposite. In this case, overflow has happened if
+ * the result has the opposite sign of a.
+ */
+#define __signed_sub_overflow(a, b, d) ({ \
+ typeof(a) __a = (a); \
+ typeof(b) __b = (b); \
+ typeof(d) __d = (d); \
+ (void) (&__a == &__b); \
+ (void) (&__a == __d); \
+ *__d = (u64)__a - (u64)__b; \
+ ((((__a ^ __b)) & (*__d ^ __a)) \
+ & type_min(typeof(__a))) != 0; \
+})
+
+/*
+ * Signed multiplication is rather hard. gcc always follows C99, so
+ * division is truncated towards 0. This means that we can write the
+ * overflow check like this:
+ *
+ * (a > 0 && (b > MAX/a || b < MIN/a)) ||
+ * (a < -1 && (b > MIN/a || b < MAX/a) ||
+ * (a == -1 && b == MIN)
+ *
+ * The redundant casts of -1 are to silence an annoying -Wtype-limits
+ * (included in -Wextra) warning: When the type is u8 or u16, the
+ * __b_c_e in check_mul_overflow obviously selects
+ * __unsigned_mul_overflow, but unfortunately gcc still parses this
+ * code and warns about the limited range of __b.
+ */
+
+#define __signed_mul_overflow(a, b, d) ({ \
+ typeof(a) __a = (a); \
+ typeof(b) __b = (b); \
+ typeof(d) __d = (d); \
+ typeof(a) __tmax = type_max(typeof(a)); \
+ typeof(a) __tmin = type_min(typeof(a)); \
+ (void) (&__a == &__b); \
+ (void) (&__a == __d); \
+ *__d = (u64)__a * (u64)__b; \
+ (__b > 0 && (__a > __tmax/__b || __a < __tmin/__b)) || \
+ (__b < (typeof(__b))-1 && (__a > __tmin/__b || __a < __tmax/__b)) || \
+ (__b == (typeof(__b))-1 && __a == __tmin); \
+})
+
+
+#define check_add_overflow(a, b, d) \
+ __builtin_choose_expr(is_signed_type(typeof(a)), \
+ __signed_add_overflow(a, b, d), \
+ __unsigned_add_overflow(a, b, d))
+
+#define check_sub_overflow(a, b, d) \
+ __builtin_choose_expr(is_signed_type(typeof(a)), \
+ __signed_sub_overflow(a, b, d), \
+ __unsigned_sub_overflow(a, b, d))
+
+#define check_mul_overflow(a, b, d) \
+ __builtin_choose_expr(is_signed_type(typeof(a)), \
+ __signed_mul_overflow(a, b, d), \
+ __unsigned_mul_overflow(a, b, d))
+
+
+#endif /* COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW */
+
+/**
+ * array_size() - Calculate size of 2-dimensional array.
+ *
+ * @a: dimension one
+ * @b: dimension two
+ *
+ * Calculates size of 2-dimensional array: @a * @b.
+ *
+ * Returns: number of bytes needed to represent the array or SIZE_MAX on
+ * overflow.
+ */
+static inline __must_check size_t array_size(size_t a, size_t b)
+{
+ size_t bytes;
+
+ if (check_mul_overflow(a, b, &bytes))
+ return SIZE_MAX;
+
+ return bytes;
+}
+
+/**
+ * array3_size() - Calculate size of 3-dimensional array.
+ *
+ * @a: dimension one
+ * @b: dimension two
+ * @c: dimension three
+ *
+ * Calculates size of 3-dimensional array: @a * @b * @c.
+ *
+ * Returns: number of bytes needed to represent the array or SIZE_MAX on
+ * overflow.
+ */
+static inline __must_check size_t array3_size(size_t a, size_t b, size_t c)
+{
+ size_t bytes;
+
+ if (check_mul_overflow(a, b, &bytes))
+ return SIZE_MAX;
+ if (check_mul_overflow(bytes, c, &bytes))
+ return SIZE_MAX;
+
+ return bytes;
+}
+
+static inline __must_check size_t __ab_c_size(size_t n, size_t size, size_t c)
+{
+ size_t bytes;
+
+ if (check_mul_overflow(n, size, &bytes))
+ return SIZE_MAX;
+ if (check_add_overflow(bytes, c, &bytes))
+ return SIZE_MAX;
+
+ return bytes;
+}
+
+/**
+ * struct_size() - Calculate size of structure with trailing array.
+ * @p: Pointer to the structure.
+ * @member: Name of the array member.
+ * @n: Number of elements in the array.
+ *
+ * Calculates size of memory needed for structure @p followed by an
+ * array of @n @member elements.
+ *
+ * Return: number of bytes needed or SIZE_MAX on overflow.
+ */
+#define struct_size(p, member, n) \
+ __ab_c_size(n, \
+ sizeof(*(p)->member) + __must_be_array((p)->member),\
+ sizeof(*(p)))
+
+#endif /* __LINUX_OVERFLOW_H */
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index e34a27727b9a..901943e4754b 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -642,49 +642,62 @@ PAGEFLAG_FALSE(DoubleMap)
#endif
/*
- * For pages that are never mapped to userspace, page->mapcount may be
- * used for storing extra information about page type. Any value used
- * for this purpose must be <= -2, but it's better start not too close
- * to -2 so that an underflow of the page_mapcount() won't be mistaken
- * for a special page.
+ * For pages that are never mapped to userspace (and aren't PageSlab),
+ * page_type may be used. Because it is initialised to -1, we invert the
+ * sense of the bit, so __SetPageFoo *clears* the bit used for PageFoo, and
+ * __ClearPageFoo *sets* the bit used for PageFoo. We reserve a few high and
+ * low bits so that an underflow or overflow of page_mapcount() won't be
+ * mistaken for a page type value.
*/
-#define PAGE_MAPCOUNT_OPS(uname, lname) \
+
+#define PAGE_TYPE_BASE 0xf0000000
+/* Reserve 0x0000007f to catch underflows of page_mapcount */
+#define PG_buddy 0x00000080
+#define PG_balloon 0x00000100
+#define PG_kmemcg 0x00000200
+#define PG_table 0x00000400
+
+#define PageType(page, flag) \
+ ((page->page_type & (PAGE_TYPE_BASE | flag)) == PAGE_TYPE_BASE)
+
+#define PAGE_TYPE_OPS(uname, lname) \
static __always_inline int Page##uname(struct page *page) \
{ \
- return atomic_read(&page->_mapcount) == \
- PAGE_##lname##_MAPCOUNT_VALUE; \
+ return PageType(page, PG_##lname); \
} \
static __always_inline void __SetPage##uname(struct page *page) \
{ \
- VM_BUG_ON_PAGE(atomic_read(&page->_mapcount) != -1, page); \
- atomic_set(&page->_mapcount, PAGE_##lname##_MAPCOUNT_VALUE); \
+ VM_BUG_ON_PAGE(!PageType(page, 0), page); \
+ page->page_type &= ~PG_##lname; \
} \
static __always_inline void __ClearPage##uname(struct page *page) \
{ \
VM_BUG_ON_PAGE(!Page##uname(page), page); \
- atomic_set(&page->_mapcount, -1); \
+ page->page_type |= PG_##lname; \
}
/*
- * PageBuddy() indicate that the page is free and in the buddy system
+ * PageBuddy() indicates that the page is free and in the buddy system
* (see mm/page_alloc.c).
*/
-#define PAGE_BUDDY_MAPCOUNT_VALUE (-128)
-PAGE_MAPCOUNT_OPS(Buddy, BUDDY)
+PAGE_TYPE_OPS(Buddy, buddy)
/*
- * PageBalloon() is set on pages that are on the balloon page list
+ * PageBalloon() is true for pages that are on the balloon page list
* (see mm/balloon_compaction.c).
*/
-#define PAGE_BALLOON_MAPCOUNT_VALUE (-256)
-PAGE_MAPCOUNT_OPS(Balloon, BALLOON)
+PAGE_TYPE_OPS(Balloon, balloon)
/*
* If kmemcg is enabled, the buddy allocator will set PageKmemcg() on
* pages allocated with __GFP_ACCOUNT. It gets cleared on page free.
*/
-#define PAGE_KMEMCG_MAPCOUNT_VALUE (-512)
-PAGE_MAPCOUNT_OPS(Kmemcg, KMEMCG)
+PAGE_TYPE_OPS(Kmemcg, kmemcg)
+
+/*
+ * Marks pages in use as page tables.
+ */
+PAGE_TYPE_OPS(Table, table)
extern bool is_free_buddy_page(struct page *page);
diff --git a/include/linux/page_counter.h b/include/linux/page_counter.h
index c15ab80ad32d..bab7e57f659b 100644
--- a/include/linux/page_counter.h
+++ b/include/linux/page_counter.h
@@ -7,10 +7,22 @@
#include <asm/page.h>
struct page_counter {
- atomic_long_t count;
- unsigned long limit;
+ atomic_long_t usage;
+ unsigned long min;
+ unsigned long low;
+ unsigned long max;
struct page_counter *parent;
+ /* effective memory.min and memory.min usage tracking */
+ unsigned long emin;
+ atomic_long_t min_usage;
+ atomic_long_t children_min_usage;
+
+ /* effective memory.low and memory.low usage tracking */
+ unsigned long elow;
+ atomic_long_t low_usage;
+ atomic_long_t children_low_usage;
+
/* legacy */
unsigned long watermark;
unsigned long failcnt;
@@ -25,14 +37,14 @@ struct page_counter {
static inline void page_counter_init(struct page_counter *counter,
struct page_counter *parent)
{
- atomic_long_set(&counter->count, 0);
- counter->limit = PAGE_COUNTER_MAX;
+ atomic_long_set(&counter->usage, 0);
+ counter->max = PAGE_COUNTER_MAX;
counter->parent = parent;
}
static inline unsigned long page_counter_read(struct page_counter *counter)
{
- return atomic_long_read(&counter->count);
+ return atomic_long_read(&counter->usage);
}
void page_counter_cancel(struct page_counter *counter, unsigned long nr_pages);
@@ -41,7 +53,9 @@ bool page_counter_try_charge(struct page_counter *counter,
unsigned long nr_pages,
struct page_counter **fail);
void page_counter_uncharge(struct page_counter *counter, unsigned long nr_pages);
-int page_counter_limit(struct page_counter *counter, unsigned long limit);
+void page_counter_set_min(struct page_counter *counter, unsigned long nr_pages);
+void page_counter_set_low(struct page_counter *counter, unsigned long nr_pages);
+int page_counter_set_max(struct page_counter *counter, unsigned long nr_pages);
int page_counter_memparse(const char *buf, const char *max,
unsigned long *nr_pages);
diff --git a/include/linux/pci-ecam.h b/include/linux/pci-ecam.h
index baadad1aabbc..29efa09d686b 100644
--- a/include/linux/pci-ecam.h
+++ b/include/linux/pci-ecam.h
@@ -62,5 +62,6 @@ extern struct pci_ecam_ops xgene_v2_pcie_ecam_ops; /* APM X-Gene PCIe v2.x */
/* for DT-based PCI controllers that support ECAM */
int pci_host_common_probe(struct platform_device *pdev,
struct pci_ecam_ops *ops);
+int pci_host_common_remove(struct platform_device *pdev);
#endif
#endif
diff --git a/include/linux/pci-epc.h b/include/linux/pci-epc.h
index af657ca58b70..243eaa5a66ff 100644
--- a/include/linux/pci-epc.h
+++ b/include/linux/pci-epc.h
@@ -90,8 +90,16 @@ struct pci_epc {
struct config_group *group;
/* spinlock to protect against concurrent access of EP controller */
spinlock_t lock;
+ unsigned int features;
};
+#define EPC_FEATURE_NO_LINKUP_NOTIFIER BIT(0)
+#define EPC_FEATURE_BAR_MASK (BIT(1) | BIT(2) | BIT(3))
+#define EPC_FEATURE_SET_BAR(features, bar) \
+ (features |= (EPC_FEATURE_BAR_MASK & (bar << 1)))
+#define EPC_FEATURE_GET_BAR(features) \
+ ((features & EPC_FEATURE_BAR_MASK) >> 1)
+
#define to_pci_epc(device) container_of((device), struct pci_epc, dev)
#define pci_epc_create(dev, ops) \
diff --git a/include/linux/pci-epf.h b/include/linux/pci-epf.h
index f7d6f4883f8b..4e7764935fa8 100644
--- a/include/linux/pci-epf.h
+++ b/include/linux/pci-epf.h
@@ -72,7 +72,7 @@ struct pci_epf_ops {
* @driver: PCI EPF driver
* @ops: set of function pointers for performing EPF operations
* @owner: the owner of the module that registers the PCI EPF driver
- * @group: configfs group corresponding to the PCI EPF driver
+ * @epf_group: list of configfs group corresponding to the PCI EPF driver
* @id_table: identifies EPF devices for probing
*/
struct pci_epf_driver {
@@ -82,7 +82,7 @@ struct pci_epf_driver {
struct device_driver driver;
struct pci_epf_ops *ops;
struct module *owner;
- struct config_group *group;
+ struct list_head epf_group;
const struct pci_epf_device_id *id_table;
};
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 73178a2fcee0..340029b2fb38 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -217,6 +217,7 @@ enum pci_bus_flags {
PCI_BUS_FLAGS_NO_MSI = (__force pci_bus_flags_t) 1,
PCI_BUS_FLAGS_NO_MMRBC = (__force pci_bus_flags_t) 2,
PCI_BUS_FLAGS_NO_AERSID = (__force pci_bus_flags_t) 4,
+ PCI_BUS_FLAGS_NO_EXTCFG = (__force pci_bus_flags_t) 8,
};
/* Values from Link Status register, PCIe r3.1, sec 7.8.8 */
@@ -406,6 +407,9 @@ struct pci_dev {
struct bin_attribute *res_attr[DEVICE_COUNT_RESOURCE]; /* sysfs file for resources */
struct bin_attribute *res_attr_wc[DEVICE_COUNT_RESOURCE]; /* sysfs file for WC mapping of resources */
+#ifdef CONFIG_HOTPLUG_PCI_PCIE
+ unsigned int broken_cmd_compl:1; /* No compl for some cmds */
+#endif
#ifdef CONFIG_PCIE_PTM
unsigned int ptm_root:1;
unsigned int ptm_enabled:1;
@@ -471,8 +475,10 @@ struct pci_host_bridge {
unsigned int ignore_reset_delay:1; /* For entire hierarchy */
unsigned int no_ext_tags:1; /* No Extended Tags */
unsigned int native_aer:1; /* OS may use PCIe AER */
- unsigned int native_hotplug:1; /* OS may use PCIe hotplug */
+ unsigned int native_pcie_hotplug:1; /* OS may use PCIe hotplug */
+ unsigned int native_shpc_hotplug:1; /* OS may use SHPC hotplug */
unsigned int native_pme:1; /* OS may use PCIe PME */
+ unsigned int native_ltr:1; /* OS may use PCIe LTR */
/* Resource alignment requirements */
resource_size_t (*align_resource)(struct pci_dev *dev,
const struct resource *res,
@@ -670,7 +676,7 @@ int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn,
int raw_pci_write(unsigned int domain, unsigned int bus, unsigned int devfn,
int reg, int len, u32 val);
-#ifdef CONFIG_PCI_BUS_ADDR_T_64BIT
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
typedef u64 pci_bus_addr_t;
#else
typedef u32 pci_bus_addr_t;
@@ -1079,8 +1085,6 @@ int pcie_get_readrq(struct pci_dev *dev);
int pcie_set_readrq(struct pci_dev *dev, int rq);
int pcie_get_mps(struct pci_dev *dev);
int pcie_set_mps(struct pci_dev *dev, int mps);
-int pcie_get_minimum_link(struct pci_dev *dev, enum pci_bus_speed *speed,
- enum pcie_link_width *width);
u32 pcie_bandwidth_available(struct pci_dev *dev, struct pci_dev **limiting_dev,
enum pci_bus_speed *speed,
enum pcie_link_width *width);
@@ -1451,8 +1455,10 @@ static inline int pci_irqd_intx_xlate(struct irq_domain *d,
#ifdef CONFIG_PCIEPORTBUS
extern bool pcie_ports_disabled;
+extern bool pcie_ports_native;
#else
#define pcie_ports_disabled true
+#define pcie_ports_native false
#endif
#ifdef CONFIG_PCIEASPM
@@ -1479,6 +1485,8 @@ static inline void pcie_set_ecrc_checking(struct pci_dev *dev) { }
static inline void pcie_ecrc_get_policy(char *str) { }
#endif
+bool pci_ats_disabled(void);
+
#ifdef CONFIG_PCI_ATS
/* Address Translation Service */
void pci_ats_init(struct pci_dev *dev);
@@ -1510,12 +1518,10 @@ void pci_cfg_access_unlock(struct pci_dev *dev);
*/
#ifdef CONFIG_PCI_DOMAINS
extern int pci_domains_supported;
-int pci_get_new_domain_nr(void);
#else
enum { pci_domains_supported = 0 };
static inline int pci_domain_nr(struct pci_bus *bus) { return 0; }
static inline int pci_proc_domain(struct pci_bus *bus) { return 0; }
-static inline int pci_get_new_domain_nr(void) { return -ENOSYS; }
#endif /* CONFIG_PCI_DOMAINS */
/*
@@ -1670,7 +1676,6 @@ static inline struct pci_dev *pci_get_domain_bus_and_slot(int domain,
static inline int pci_domain_nr(struct pci_bus *bus) { return 0; }
static inline struct pci_dev *pci_dev_get(struct pci_dev *dev) { return NULL; }
-static inline int pci_get_new_domain_nr(void) { return -ENOSYS; }
#define dev_is_pci(d) (false)
#define dev_is_pf(d) (false)
@@ -1954,6 +1959,7 @@ int pci_num_vf(struct pci_dev *dev);
int pci_vfs_assigned(struct pci_dev *dev);
int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs);
int pci_sriov_get_totalvfs(struct pci_dev *dev);
+int pci_sriov_configure_simple(struct pci_dev *dev, int nr_virtfn);
resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno);
void pci_vf_drivers_autoprobe(struct pci_dev *dev, bool probe);
@@ -1986,6 +1992,7 @@ static inline int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs)
{ return 0; }
static inline int pci_sriov_get_totalvfs(struct pci_dev *dev)
{ return 0; }
+#define pci_sriov_configure_simple NULL
static inline resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno)
{ return 0; }
static inline void pci_vf_drivers_autoprobe(struct pci_dev *dev, bool probe) { }
@@ -2284,7 +2291,7 @@ static inline bool pci_is_thunderbolt_attached(struct pci_dev *pdev)
return false;
}
-#if defined(CONFIG_PCIEAER) || defined(CONFIG_EEH)
+#if defined(CONFIG_PCIEPORTBUS) || defined(CONFIG_EEH)
void pci_uevent_ers(struct pci_dev *pdev, enum pci_ers_result err_type);
#endif
diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h
index 26213024e81b..cf5e22103f68 100644
--- a/include/linux/pci_hotplug.h
+++ b/include/linux/pci_hotplug.h
@@ -162,8 +162,9 @@ struct hotplug_params {
#ifdef CONFIG_ACPI
#include <linux/acpi.h>
int pci_get_hp_params(struct pci_dev *dev, struct hotplug_params *hpp);
-bool pciehp_is_native(struct pci_dev *pdev);
-int acpi_get_hp_hw_control_from_firmware(struct pci_dev *dev, u32 flags);
+bool pciehp_is_native(struct pci_dev *bridge);
+int acpi_get_hp_hw_control_from_firmware(struct pci_dev *bridge);
+bool shpchp_is_native(struct pci_dev *bridge);
int acpi_pci_check_ejectable(struct pci_bus *pbus, acpi_handle handle);
int acpi_pci_detect_ejectable(acpi_handle handle);
#else
@@ -172,6 +173,17 @@ static inline int pci_get_hp_params(struct pci_dev *dev,
{
return -ENODEV;
}
-static inline bool pciehp_is_native(struct pci_dev *pdev) { return true; }
+
+static inline int acpi_get_hp_hw_control_from_firmware(struct pci_dev *bridge)
+{
+ return 0;
+}
+static inline bool pciehp_is_native(struct pci_dev *bridge) { return true; }
+static inline bool shpchp_is_native(struct pci_dev *bridge) { return true; }
#endif
+
+static inline bool hotplug_is_native(struct pci_dev *bridge)
+{
+ return pciehp_is_native(bridge) || shpchp_is_native(bridge);
+}
#endif
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index cc608fc55334..29502238e510 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -561,6 +561,7 @@
#define PCI_DEVICE_ID_AMD_OPUS_7443 0x7443
#define PCI_DEVICE_ID_AMD_VIPER_7443 0x7443
#define PCI_DEVICE_ID_AMD_OPUS_7445 0x7445
+#define PCI_DEVICE_ID_AMD_GOLAM_7450 0x7450
#define PCI_DEVICE_ID_AMD_8111_PCI 0x7460
#define PCI_DEVICE_ID_AMD_8111_LPC 0x7468
#define PCI_DEVICE_ID_AMD_8111_IDE 0x7469
@@ -2119,6 +2120,8 @@
#define PCI_VENDOR_ID_MYRICOM 0x14c1
+#define PCI_VENDOR_ID_MEDIATEK 0x14c3
+
#define PCI_VENDOR_ID_TITAN 0x14D2
#define PCI_DEVICE_ID_TITAN_010L 0x8001
#define PCI_DEVICE_ID_TITAN_100L 0x8010
@@ -2387,6 +2390,8 @@
#define PCI_VENDOR_ID_LENOVO 0x17aa
+#define PCI_VENDOR_ID_QCOM 0x17cb
+
#define PCI_VENDOR_ID_CDNS 0x17cd
#define PCI_VENDOR_ID_ARECA 0x17d3
@@ -2552,6 +2557,8 @@
#define PCI_VENDOR_ID_CIRCUITCO 0x1cc8
#define PCI_SUBSYSTEM_ID_CIRCUITCO_MINNOWBOARD 0x0001
+#define PCI_VENDOR_ID_AMAZON 0x1d0f
+
#define PCI_VENDOR_ID_TEKRAM 0x1de1
#define PCI_DEVICE_ID_TEKRAM_DC290 0xdc29
@@ -2672,6 +2679,7 @@
#define PCI_DEVICE_ID_INTEL_PANTHERPOINT_XHCI 0x1e31
#define PCI_DEVICE_ID_INTEL_PANTHERPOINT_LPC_MIN 0x1e40
#define PCI_DEVICE_ID_INTEL_PANTHERPOINT_LPC_MAX 0x1e5f
+#define PCI_DEVICE_ID_INTEL_VMD_201D 0x201d
#define PCI_DEVICE_ID_INTEL_DH89XXCC_LPC_MIN 0x2310
#define PCI_DEVICE_ID_INTEL_DH89XXCC_LPC_MAX 0x231f
#define PCI_DEVICE_ID_INTEL_82801AA_0 0x2410
@@ -2776,6 +2784,7 @@
#define PCI_DEVICE_ID_INTEL_ICH8_4 0x2815
#define PCI_DEVICE_ID_INTEL_ICH8_5 0x283e
#define PCI_DEVICE_ID_INTEL_ICH8_6 0x2850
+#define PCI_DEVICE_ID_INTEL_VMD_28C0 0x28c0
#define PCI_DEVICE_ID_INTEL_ICH9_0 0x2910
#define PCI_DEVICE_ID_INTEL_ICH9_1 0x2917
#define PCI_DEVICE_ID_INTEL_ICH9_2 0x2912
diff --git a/include/linux/percpu-rwsem.h b/include/linux/percpu-rwsem.h
index b1f37a89e368..79b99d653e03 100644
--- a/include/linux/percpu-rwsem.h
+++ b/include/linux/percpu-rwsem.h
@@ -133,7 +133,7 @@ static inline void percpu_rwsem_release(struct percpu_rw_semaphore *sem,
lock_release(&sem->rw_sem.dep_map, 1, ip);
#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
if (!read)
- sem->rw_sem.owner = NULL;
+ sem->rw_sem.owner = RWSEM_OWNER_UNKNOWN;
#endif
}
@@ -141,6 +141,10 @@ static inline void percpu_rwsem_acquire(struct percpu_rw_semaphore *sem,
bool read, unsigned long ip)
{
lock_acquire(&sem->rw_sem.dep_map, 0, 1, read, 1, NULL, ip);
+#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
+ if (!read)
+ sem->rw_sem.owner = current;
+#endif
}
#endif
diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h
index 40036a57d072..ad5444491975 100644
--- a/include/linux/perf/arm_pmu.h
+++ b/include/linux/perf/arm_pmu.h
@@ -78,7 +78,7 @@ struct arm_pmu {
struct pmu pmu;
cpumask_t supported_cpus;
char *name;
- irqreturn_t (*handle_irq)(int irq_num, void *dev);
+ irqreturn_t (*handle_irq)(struct arm_pmu *pmu);
void (*enable)(struct perf_event *event);
void (*disable)(struct perf_event *event);
int (*get_event_idx)(struct pmu_hw_events *hw_events,
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index e71e99eb9a4e..1fa12887ec02 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -467,7 +467,7 @@ enum perf_addr_filter_action_t {
*/
struct perf_addr_filter {
struct list_head entry;
- struct inode *inode;
+ struct path path;
unsigned long offset;
unsigned long size;
enum perf_addr_filter_action_t action;
@@ -868,6 +868,7 @@ extern void perf_event_exit_task(struct task_struct *child);
extern void perf_event_free_task(struct task_struct *task);
extern void perf_event_delayed_put(struct task_struct *task);
extern struct file *perf_event_get(unsigned int fd);
+extern const struct perf_event *perf_get_event(struct file *file);
extern const struct perf_event_attr *perf_event_attrs(struct perf_event *event);
extern void perf_event_print_debug(void);
extern void perf_pmu_disable(struct pmu *pmu);
@@ -1016,6 +1017,14 @@ static inline int is_software_event(struct perf_event *event)
return event->event_caps & PERF_EV_CAP_SOFTWARE;
}
+/*
+ * Return 1 for event in sw context, 0 for event in hw context
+ */
+static inline int in_software_context(struct perf_event *event)
+{
+ return event->ctx->pmu->task_ctx_nr == perf_sw_context;
+}
+
extern struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
extern void ___perf_sw_event(u32, u64, struct pt_regs *, u64);
@@ -1289,6 +1298,10 @@ static inline void perf_event_exit_task(struct task_struct *child) { }
static inline void perf_event_free_task(struct task_struct *task) { }
static inline void perf_event_delayed_put(struct task_struct *task) { }
static inline struct file *perf_event_get(unsigned int fd) { return ERR_PTR(-EINVAL); }
+static inline const struct perf_event *perf_get_event(struct file *file)
+{
+ return ERR_PTR(-EINVAL);
+}
static inline const struct perf_event_attr *perf_event_attrs(struct perf_event *event)
{
return ERR_PTR(-EINVAL);
diff --git a/include/linux/pfn_t.h b/include/linux/pfn_t.h
index a03c2642a87c..21713dc14ce2 100644
--- a/include/linux/pfn_t.h
+++ b/include/linux/pfn_t.h
@@ -122,7 +122,7 @@ pud_t pud_mkdevmap(pud_t pud);
#endif
#endif /* __HAVE_ARCH_PTE_DEVMAP */
-#ifdef __HAVE_ARCH_PTE_SPECIAL
+#ifdef CONFIG_ARCH_HAS_PTE_SPECIAL
static inline bool pfn_t_special(pfn_t pfn)
{
return (pfn.val & PFN_SPECIAL) == PFN_SPECIAL;
@@ -132,5 +132,5 @@ static inline bool pfn_t_special(pfn_t pfn)
{
return false;
}
-#endif /* __HAVE_ARCH_PTE_SPECIAL */
+#endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */
#endif /* _LINUX_PFN_T_H_ */
diff --git a/include/linux/phy.h b/include/linux/phy.h
index f0b5870a6d40..6cd09098427c 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -406,13 +406,17 @@ struct phy_device {
u32 phy_id;
struct phy_c45_device_ids c45_ids;
- bool is_c45;
- bool is_internal;
- bool is_pseudo_fixed_link;
- bool has_fixups;
- bool suspended;
- bool sysfs_links;
- bool loopback_enabled;
+ unsigned is_c45:1;
+ unsigned is_internal:1;
+ unsigned is_pseudo_fixed_link:1;
+ unsigned has_fixups:1;
+ unsigned suspended:1;
+ unsigned sysfs_links:1;
+ unsigned loopback_enabled:1;
+
+ unsigned autoneg:1;
+ /* The most recently read link state */
+ unsigned link:1;
enum phy_state state;
@@ -429,9 +433,6 @@ struct phy_device {
int pause;
int asym_pause;
- /* The most recently read link state */
- int link;
-
/* Enabled Interrupts */
u32 interrupts;
@@ -444,8 +445,6 @@ struct phy_device {
/* Energy efficient ethernet modes which should be prohibited */
u32 eee_broken_modes;
- int autoneg;
-
int link_timeout;
#ifdef CONFIG_LED_TRIGGER_PHY
@@ -1068,6 +1067,52 @@ int __init mdio_bus_init(void);
void mdio_bus_exit(void);
#endif
+/* Inline function for use within net/core/ethtool.c (built-in) */
+static inline int phy_ethtool_get_strings(struct phy_device *phydev, u8 *data)
+{
+ if (!phydev->drv)
+ return -EIO;
+
+ mutex_lock(&phydev->lock);
+ phydev->drv->get_strings(phydev, data);
+ mutex_unlock(&phydev->lock);
+
+ return 0;
+}
+
+static inline int phy_ethtool_get_sset_count(struct phy_device *phydev)
+{
+ int ret;
+
+ if (!phydev->drv)
+ return -EIO;
+
+ if (phydev->drv->get_sset_count &&
+ phydev->drv->get_strings &&
+ phydev->drv->get_stats) {
+ mutex_lock(&phydev->lock);
+ ret = phydev->drv->get_sset_count(phydev);
+ mutex_unlock(&phydev->lock);
+
+ return ret;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static inline int phy_ethtool_get_stats(struct phy_device *phydev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ if (!phydev->drv)
+ return -EIO;
+
+ mutex_lock(&phydev->lock);
+ phydev->drv->get_stats(phydev, stats, data);
+ mutex_unlock(&phydev->lock);
+
+ return 0;
+}
+
extern struct bus_type mdio_bus_type;
struct mdio_board_info {
diff --git a/include/linux/phy/phy.h b/include/linux/phy/phy.h
index c9d14eeee7f5..9713aebdd348 100644
--- a/include/linux/phy/phy.h
+++ b/include/linux/phy/phy.h
@@ -36,6 +36,7 @@ enum phy_mode {
PHY_MODE_USB_DEVICE_SS,
PHY_MODE_USB_OTG,
PHY_MODE_SGMII,
+ PHY_MODE_2500SGMII,
PHY_MODE_10GKR,
PHY_MODE_UFS_HS_A,
PHY_MODE_UFS_HS_B,
diff --git a/include/linux/pkeys.h b/include/linux/pkeys.h
index 0794ca78c379..2955ba976048 100644
--- a/include/linux/pkeys.h
+++ b/include/linux/pkeys.h
@@ -2,8 +2,7 @@
#ifndef _LINUX_PKEYS_H
#define _LINUX_PKEYS_H
-#include <linux/mm_types.h>
-#include <asm/mmu_context.h>
+#include <linux/mm.h>
#ifdef CONFIG_ARCH_HAS_PKEYS
#include <asm/pkeys.h>
@@ -14,6 +13,11 @@
#define PKEY_DEDICATED_EXECUTE_ONLY 0
#define ARCH_VM_PKEY_FLAGS 0
+static inline int vma_pkey(struct vm_area_struct *vma)
+{
+ return 0;
+}
+
static inline bool mm_pkey_is_allocated(struct mm_struct *mm, int pkey)
{
return (pkey == 0);
@@ -35,6 +39,11 @@ static inline int arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
return 0;
}
+static inline bool arch_pkeys_enabled(void)
+{
+ return false;
+}
+
static inline void copy_init_pkru_to_fpregs(void)
{
}
diff --git a/include/linux/pktcdvd.h b/include/linux/pktcdvd.h
index 93d142ad1528..174601554b06 100644
--- a/include/linux/pktcdvd.h
+++ b/include/linux/pktcdvd.h
@@ -186,7 +186,7 @@ struct pktcdvd_device
sector_t current_sector; /* Keep track of where the elevator is */
atomic_t scan_queue; /* Set to non-zero when pkt_handle_queue */
/* needs to be run. */
- mempool_t *rb_pool; /* mempool for pkt_rb_node allocations */
+ mempool_t rb_pool; /* mempool for pkt_rb_node allocations */
struct packet_iosched iosched;
struct gendisk *disk;
diff --git a/include/linux/platform_data/b53.h b/include/linux/platform_data/b53.h
index 69d279c0da96..8eaef2f2b691 100644
--- a/include/linux/platform_data/b53.h
+++ b/include/linux/platform_data/b53.h
@@ -20,8 +20,12 @@
#define __B53_H
#include <linux/kernel.h>
+#include <net/dsa.h>
struct b53_platform_data {
+ /* Must be first such that dsa_register_switch() can access it */
+ struct dsa_chip_data cd;
+
u32 chip_id;
u16 enabled_ports;
diff --git a/include/linux/platform_data/clk-st.h b/include/linux/platform_data/clk-st.h
new file mode 100644
index 000000000000..7cdb6a402b35
--- /dev/null
+++ b/include/linux/platform_data/clk-st.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * clock framework for AMD Stoney based clock
+ *
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ */
+
+#ifndef __CLK_ST_H
+#define __CLK_ST_H
+
+#include <linux/compiler.h>
+
+struct st_clk_data {
+ void __iomem *base;
+};
+
+#endif /* __CLK_ST_H */
diff --git a/include/linux/platform_data/gpio-dwapb.h b/include/linux/platform_data/gpio-dwapb.h
index 2dc7f4a8ab09..419cfacb4b42 100644
--- a/include/linux/platform_data/gpio-dwapb.h
+++ b/include/linux/platform_data/gpio-dwapb.h
@@ -19,7 +19,8 @@ struct dwapb_port_property {
unsigned int idx;
unsigned int ngpio;
unsigned int gpio_base;
- unsigned int irq;
+ int irq[32];
+ bool has_irq;
bool irq_shared;
};
diff --git a/include/linux/i2c-gpio.h b/include/linux/platform_data/i2c-gpio.h
index 352c1426fd4d..352c1426fd4d 100644
--- a/include/linux/i2c-gpio.h
+++ b/include/linux/platform_data/i2c-gpio.h
diff --git a/include/linux/i2c-mux-gpio.h b/include/linux/platform_data/i2c-mux-gpio.h
index 4406108201fe..4406108201fe 100644
--- a/include/linux/i2c-mux-gpio.h
+++ b/include/linux/platform_data/i2c-mux-gpio.h
diff --git a/include/linux/i2c-ocores.h b/include/linux/platform_data/i2c-ocores.h
index 01edd96fe1f7..01edd96fe1f7 100644
--- a/include/linux/i2c-ocores.h
+++ b/include/linux/platform_data/i2c-ocores.h
diff --git a/include/linux/i2c-omap.h b/include/linux/platform_data/i2c-omap.h
index 3444265ee8ee..3444265ee8ee 100644
--- a/include/linux/i2c-omap.h
+++ b/include/linux/platform_data/i2c-omap.h
diff --git a/include/linux/i2c-pca-platform.h b/include/linux/platform_data/i2c-pca-platform.h
index c37329432a8e..c37329432a8e 100644
--- a/include/linux/i2c-pca-platform.h
+++ b/include/linux/platform_data/i2c-pca-platform.h
diff --git a/include/linux/i2c-xiic.h b/include/linux/platform_data/i2c-xiic.h
index 4f9f2256a97e..4f9f2256a97e 100644
--- a/include/linux/i2c-xiic.h
+++ b/include/linux/platform_data/i2c-xiic.h
diff --git a/include/linux/platform_data/mdio-gpio.h b/include/linux/platform_data/mdio-gpio.h
deleted file mode 100644
index 11f00cdabe3d..000000000000
--- a/include/linux/platform_data/mdio-gpio.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * MDIO-GPIO bus platform data structures
- *
- * Copyright (C) 2008, Paulius Zaleckas <paulius.zaleckas@teltonika.lt>
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
- */
-
-#ifndef __LINUX_MDIO_GPIO_H
-#define __LINUX_MDIO_GPIO_H
-
-#include <linux/mdio-bitbang.h>
-
-struct mdio_gpio_platform_data {
- /* GPIO numbers for bus pins */
- unsigned int mdc;
- unsigned int mdio;
- unsigned int mdo;
-
- bool mdc_active_low;
- bool mdio_active_low;
- bool mdo_active_low;
-
- u32 phy_mask;
- u32 phy_ignore_ta_mask;
- int irqs[PHY_MAX_ADDR];
- /* reset callback */
- int (*reset)(struct mii_bus *bus);
-};
-
-#endif /* __LINUX_MDIO_GPIO_H */
diff --git a/include/linux/platform_data/media/ir-rx51.h b/include/linux/platform_data/media/ir-rx51.h
deleted file mode 100644
index 9d127aa648e7..000000000000
--- a/include/linux/platform_data/media/ir-rx51.h
+++ /dev/null
@@ -1,9 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _IR_RX51_H
-#define _IR_RX51_H
-
-struct ir_rx51_platform_data {
- int(*set_max_mpu_wakeup_lat)(struct device *dev, long t);
-};
-
-#endif
diff --git a/include/linux/platform_data/media/mmp-camera.h b/include/linux/platform_data/media/mmp-camera.h
index 83804028115c..d2d3a443eedf 100644
--- a/include/linux/platform_data/media/mmp-camera.h
+++ b/include/linux/platform_data/media/mmp-camera.h
@@ -3,8 +3,27 @@
* Information for the Marvell Armada MMP camera
*/
+#include <media/v4l2-mediabus.h>
+
+enum dphy3_algo {
+ DPHY3_ALGO_DEFAULT = 0,
+ DPHY3_ALGO_PXA910,
+ DPHY3_ALGO_PXA2128
+};
+
struct mmp_camera_platform_data {
struct platform_device *i2c_device;
int sensor_power_gpio;
int sensor_reset_gpio;
+ enum v4l2_mbus_type bus_type;
+ int mclk_min; /* The minimal value of MCLK */
+ int mclk_src; /* which clock source the MCLK derives from */
+ int mclk_div; /* Clock Divider Value for MCLK */
+ /*
+ * MIPI support
+ */
+ int dphy[3]; /* DPHY: CSI2_DPHY3, CSI2_DPHY5, CSI2_DPHY6 */
+ enum dphy3_algo dphy3_algo; /* algos for calculate CSI2_DPHY3 */
+ int lane; /* ccic used lane number; 0 means DVP mode */
+ int lane_clk;
};
diff --git a/include/linux/platform_data/mlxreg.h b/include/linux/platform_data/mlxreg.h
index 2744cff1b297..19f5cb618c55 100644
--- a/include/linux/platform_data/mlxreg.h
+++ b/include/linux/platform_data/mlxreg.h
@@ -58,11 +58,10 @@ struct mlxreg_hotplug_device {
* struct mlxreg_core_data - attributes control data:
*
* @label: attribute label;
- * @label: attribute register offset;
* @reg: attribute register;
* @mask: attribute access mask;
- * @mode: access mode;
* @bit: attribute effective bit;
+ * @mode: access mode;
* @np - pointer to node platform associated with attribute;
* @hpdev - hotplug device data;
* @health_cntr: dynamic device health indication counter;
diff --git a/include/linux/platform_data/mtd-davinci.h b/include/linux/platform_data/mtd-davinci.h
index f1a2cf655bdb..1bbfa27cccb4 100644
--- a/include/linux/platform_data/mtd-davinci.h
+++ b/include/linux/platform_data/mtd-davinci.h
@@ -56,6 +56,16 @@ struct davinci_nand_pdata { /* platform_data */
uint32_t mask_ale;
uint32_t mask_cle;
+ /*
+ * 0-indexed chip-select number of the asynchronous
+ * interface to which the NAND device has been connected.
+ *
+ * So, if you have NAND connected to CS3 of DA850, you
+ * will pass '1' here. Since the asynchronous interface
+ * on DA850 starts from CS2.
+ */
+ uint32_t core_chipsel;
+
/* for packages using two chipselects */
uint32_t mask_chipsel;
diff --git a/include/linux/platform_data/mv88e6xxx.h b/include/linux/platform_data/mv88e6xxx.h
new file mode 100644
index 000000000000..f63af2955ea0
--- /dev/null
+++ b/include/linux/platform_data/mv88e6xxx.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __DSA_MV88E6XXX_H
+#define __DSA_MV88E6XXX_H
+
+#include <net/dsa.h>
+
+struct dsa_mv88e6xxx_pdata {
+ /* Must be first, such that dsa_register_switch() can access this
+ * without gory pointer manipulations
+ */
+ struct dsa_chip_data cd;
+ const char *compatible;
+ unsigned int enabled_ports;
+ struct net_device *netdev;
+ u32 eeprom_len;
+};
+
+#endif
diff --git a/include/linux/platform_data/sc18is602.h b/include/linux/platform_data/sc18is602.h
index 997b06634152..18602cab7799 100644
--- a/include/linux/platform_data/sc18is602.h
+++ b/include/linux/platform_data/sc18is602.h
@@ -7,7 +7,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
- * For further information, see the Documentation/spi/sc18is602 file.
+ * For further information, see the Documentation/spi/spi-sc18is602 file.
*/
/**
diff --git a/include/linux/platform_data/shmob_drm.h b/include/linux/platform_data/shmob_drm.h
index 7c686d335c12..ee495d707f17 100644
--- a/include/linux/platform_data/shmob_drm.h
+++ b/include/linux/platform_data/shmob_drm.h
@@ -18,9 +18,6 @@
#include <drm/drm_mode.h>
-struct sh_mobile_meram_cfg;
-struct sh_mobile_meram_info;
-
enum shmob_drm_clk_source {
SHMOB_DRM_CLK_BUS,
SHMOB_DRM_CLK_PERIPHERAL,
@@ -93,7 +90,6 @@ struct shmob_drm_platform_data {
struct shmob_drm_interface_data iface;
struct shmob_drm_panel_data panel;
struct shmob_drm_backlight_data backlight;
- const struct sh_mobile_meram_cfg *meram;
};
#endif /* __SHMOB_DRM_H__ */
diff --git a/include/linux/platform_data/spi-imx.h b/include/linux/platform_data/spi-imx.h
index 6f012fefa1a2..328f670d10bd 100644
--- a/include/linux/platform_data/spi-imx.h
+++ b/include/linux/platform_data/spi-imx.h
@@ -5,24 +5,29 @@
/*
* struct spi_imx_master - device.platform_data for SPI controller devices.
- * @chipselect: Array of chipselects for this master. Numbers >= 0 mean gpio
- * pins, numbers < 0 mean internal CSPI chipselects according
- * to MXC_SPI_CS(). Normally you want to use gpio based chip
- * selects as the CSPI module tries to be intelligent about
- * when to assert the chipselect: The CSPI module deasserts the
- * chipselect once it runs out of input data. The other problem
- * is that it is not possible to mix between high active and low
- * active chipselects on one single bus using the internal
- * chipselects. Unfortunately Freescale decided to put some
+ * @chipselect: Array of chipselects for this master or NULL. Numbers >= 0
+ * mean GPIO pins, -ENOENT means internal CSPI chipselect
+ * matching the position in the array. E.g., if chipselect[1] =
+ * -ENOENT then a SPI slave using chip select 1 will use the
+ * native SS1 line of the CSPI. Omitting the array will use
+ * all native chip selects.
+
+ * Normally you want to use gpio based chip selects as the CSPI
+ * module tries to be intelligent about when to assert the
+ * chipselect: The CSPI module deasserts the chipselect once it
+ * runs out of input data. The other problem is that it is not
+ * possible to mix between high active and low active chipselects
+ * on one single bus using the internal chipselects.
+ * Unfortunately, on some SoCs, Freescale decided to put some
* chipselects on dedicated pins which are not usable as gpios,
* so we have to support the internal chipselects.
- * @num_chipselect: ARRAY_SIZE(chipselect)
+ *
+ * @num_chipselect: If @chipselect is specified, ARRAY_SIZE(chipselect),
+ * otherwise the number of native chip selects.
*/
struct spi_imx_master {
int *chipselect;
int num_chipselect;
};
-#define MXC_SPI_CS(no) ((no) - 32)
-
#endif /* __MACH_SPI_H_*/
diff --git a/include/linux/platform_data/tda9950.h b/include/linux/platform_data/tda9950.h
new file mode 100644
index 000000000000..c65efd461102
--- /dev/null
+++ b/include/linux/platform_data/tda9950.h
@@ -0,0 +1,16 @@
+#ifndef LINUX_PLATFORM_DATA_TDA9950_H
+#define LINUX_PLATFORM_DATA_TDA9950_H
+
+struct device;
+
+struct tda9950_glue {
+ struct device *parent;
+ unsigned long irq_flags;
+ void *data;
+ int (*init)(void *);
+ void (*exit)(void *);
+ int (*open)(void *);
+ void (*release)(void *);
+};
+
+#endif
diff --git a/include/linux/platform_data/ti-aemif.h b/include/linux/platform_data/ti-aemif.h
index ac72e115093c..e6407bafcbf8 100644
--- a/include/linux/platform_data/ti-aemif.h
+++ b/include/linux/platform_data/ti-aemif.h
@@ -16,8 +16,33 @@
#include <linux/of_platform.h>
+/**
+ * struct aemif_abus_data - Async bus configuration parameters.
+ *
+ * @cs - Chip-select number.
+ */
+struct aemif_abus_data {
+ u32 cs;
+};
+
+/**
+ * struct aemif_platform_data - Data to set up the TI aemif driver.
+ *
+ * @dev_lookup: of_dev_auxdata passed to of_platform_populate() for aemif
+ * subdevices.
+ * @cs_offset: Lowest allowed chip-select number.
+ * @abus_data: Array of async bus configuration entries.
+ * @num_abus_data: Number of abus entries.
+ * @sub_devices: Array of platform subdevices.
+ * @num_sub_devices: Number of subdevices.
+ */
struct aemif_platform_data {
struct of_dev_auxdata *dev_lookup;
+ u32 cs_offset;
+ struct aemif_abus_data *abus_data;
+ size_t num_abus_data;
+ struct platform_device *sub_devices;
+ size_t num_sub_devices;
};
#endif /* __TI_DAVINCI_AEMIF_DATA_H__ */
diff --git a/include/linux/platform_data/ti-sysc.h b/include/linux/platform_data/ti-sysc.h
index 80ce28d40832..990aad477458 100644
--- a/include/linux/platform_data/ti-sysc.h
+++ b/include/linux/platform_data/ti-sysc.h
@@ -45,6 +45,7 @@ struct sysc_regbits {
s8 emufree_shift;
};
+#define SYSC_QUIRK_RESOURCE_PROVIDER BIT(9)
#define SYSC_QUIRK_LEGACY_IDLE BIT(8)
#define SYSC_QUIRK_RESET_STATUS BIT(7)
#define SYSC_QUIRK_NO_IDLE_ON_INIT BIT(6)
diff --git a/include/linux/platform_data/tsl2772.h b/include/linux/platform_data/tsl2772.h
new file mode 100644
index 000000000000..f8ade15a35e2
--- /dev/null
+++ b/include/linux/platform_data/tsl2772.h
@@ -0,0 +1,101 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Device driver for monitoring ambient light intensity (lux)
+ * and proximity (prox) within the TAOS TSL2772 family of devices.
+ *
+ * Copyright (c) 2012, TAOS Corporation.
+ * Copyright (c) 2017-2018 Brian Masney <masneyb@onstation.org>
+ */
+
+#ifndef __TSL2772_H
+#define __TSL2772_H
+
+struct tsl2772_lux {
+ unsigned int ch0;
+ unsigned int ch1;
+};
+
+/* Max number of segments allowable in LUX table */
+#define TSL2772_MAX_LUX_TABLE_SIZE 6
+/* The default LUX tables all have 3 elements. */
+#define TSL2772_DEF_LUX_TABLE_SZ 3
+#define TSL2772_DEFAULT_TABLE_BYTES (sizeof(struct tsl2772_lux) * \
+ TSL2772_DEF_LUX_TABLE_SZ)
+
+/* Proximity diode to use */
+#define TSL2772_DIODE0 0x01
+#define TSL2772_DIODE1 0x02
+#define TSL2772_DIODE_BOTH 0x03
+
+/* LED Power */
+#define TSL2772_100_mA 0x00
+#define TSL2772_50_mA 0x01
+#define TSL2772_25_mA 0x02
+#define TSL2772_13_mA 0x03
+
+/**
+ * struct tsl2772_settings - Settings for the tsl2772 driver
+ * @als_time: Integration time of the ALS channel ADCs in 2.73 ms
+ * increments. Total integration time is
+ * (256 - als_time) * 2.73.
+ * @als_gain: Index into the tsl2772_als_gain array.
+ * @als_gain_trim: Default gain trim to account for aperture effects.
+ * @wait_time: Time between proximity and ALS cycles in 2.73
+ * periods.
+ * @prox_time: Integration time of the proximity ADC in 2.73 ms
+ * increments. Total integration time is
+ * (256 - prx_time) * 2.73.
+ * @prox_gain: Index into the tsl2772_prx_gain array.
+ * @als_prox_config: The value of the ALS / Proximity configuration
+ * register.
+ * @als_cal_target: Known external ALS reading for calibration.
+ * @als_persistence: H/W Filters, Number of 'out of limits' ALS readings.
+ * @als_interrupt_en: Enable/Disable ALS interrupts
+ * @als_thresh_low: CH0 'low' count to trigger interrupt.
+ * @als_thresh_high: CH0 'high' count to trigger interrupt.
+ * @prox_persistence: H/W Filters, Number of 'out of limits' proximity
+ * readings.
+ * @prox_interrupt_en: Enable/Disable proximity interrupts.
+ * @prox_thres_low: Low threshold proximity detection.
+ * @prox_thres_high: High threshold proximity detection.
+ * @prox_pulse_count: Number if proximity emitter pulses.
+ * @prox_max_samples_cal: The number of samples that are taken when performing
+ * a proximity calibration.
+ * @prox_diode Which diode(s) to use for driving the external
+ * LED(s) for proximity sensing.
+ * @prox_power The amount of power to use for the external LED(s).
+ */
+struct tsl2772_settings {
+ int als_time;
+ int als_gain;
+ int als_gain_trim;
+ int wait_time;
+ int prox_time;
+ int prox_gain;
+ int als_prox_config;
+ int als_cal_target;
+ u8 als_persistence;
+ bool als_interrupt_en;
+ int als_thresh_low;
+ int als_thresh_high;
+ u8 prox_persistence;
+ bool prox_interrupt_en;
+ int prox_thres_low;
+ int prox_thres_high;
+ int prox_pulse_count;
+ int prox_max_samples_cal;
+ int prox_diode;
+ int prox_power;
+};
+
+/**
+ * struct tsl2772_platform_data - Platform callback, glass and defaults
+ * @platform_lux_table: Device specific glass coefficents
+ * @platform_default_settings: Device specific power on defaults
+ */
+struct tsl2772_platform_data {
+ struct tsl2772_lux platform_lux_table[TSL2772_MAX_LUX_TABLE_SIZE];
+ struct tsl2772_settings *platform_default_settings;
+};
+
+#endif /* __TSL2772_H */
diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h
index 49f634d96118..3097c943fab9 100644
--- a/include/linux/platform_device.h
+++ b/include/linux/platform_device.h
@@ -356,6 +356,8 @@ extern int platform_pm_restore(struct device *dev);
#define platform_pm_restore NULL
#endif
+extern int platform_dma_configure(struct device *dev);
+
#ifdef CONFIG_PM_SLEEP
#define USE_PLATFORM_PM_SLEEP_OPS \
.suspend = platform_pm_suspend, \
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
index 04dbef9847d3..9206a4fef9ac 100644
--- a/include/linux/pm_domain.h
+++ b/include/linux/pm_domain.h
@@ -47,8 +47,10 @@ struct genpd_power_state {
};
struct genpd_lock_ops;
+struct dev_pm_opp;
struct generic_pm_domain {
+ struct device dev;
struct dev_pm_domain domain; /* PM domain operations */
struct list_head gpd_list_node; /* Node in the global PM domains list */
struct list_head master_links; /* Links with PM domain as a master */
@@ -67,6 +69,8 @@ struct generic_pm_domain {
unsigned int performance_state; /* Aggregated max performance state */
int (*power_off)(struct generic_pm_domain *domain);
int (*power_on)(struct generic_pm_domain *domain);
+ unsigned int (*opp_to_performance_state)(struct generic_pm_domain *genpd,
+ struct dev_pm_opp *opp);
int (*set_performance_state)(struct generic_pm_domain *genpd,
unsigned int state);
struct gpd_dev_ops dev_ops;
@@ -139,21 +143,16 @@ static inline struct generic_pm_domain_data *dev_gpd_data(struct device *dev)
return to_gpd_data(dev->power.subsys_data->domain_data);
}
-extern int __pm_genpd_add_device(struct generic_pm_domain *genpd,
- struct device *dev,
- struct gpd_timing_data *td);
-
-extern int pm_genpd_remove_device(struct generic_pm_domain *genpd,
- struct device *dev);
-extern int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
- struct generic_pm_domain *new_subdomain);
-extern int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
- struct generic_pm_domain *target);
-extern int pm_genpd_init(struct generic_pm_domain *genpd,
- struct dev_power_governor *gov, bool is_off);
-extern int pm_genpd_remove(struct generic_pm_domain *genpd);
-extern int dev_pm_genpd_set_performance_state(struct device *dev,
- unsigned int state);
+int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev);
+int pm_genpd_remove_device(struct device *dev);
+int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
+ struct generic_pm_domain *new_subdomain);
+int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
+ struct generic_pm_domain *target);
+int pm_genpd_init(struct generic_pm_domain *genpd,
+ struct dev_power_governor *gov, bool is_off);
+int pm_genpd_remove(struct generic_pm_domain *genpd);
+int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state);
extern struct dev_power_governor simple_qos_governor;
extern struct dev_power_governor pm_domain_always_on_gov;
@@ -163,14 +162,12 @@ static inline struct generic_pm_domain_data *dev_gpd_data(struct device *dev)
{
return ERR_PTR(-ENOSYS);
}
-static inline int __pm_genpd_add_device(struct generic_pm_domain *genpd,
- struct device *dev,
- struct gpd_timing_data *td)
+static inline int pm_genpd_add_device(struct generic_pm_domain *genpd,
+ struct device *dev)
{
return -ENOSYS;
}
-static inline int pm_genpd_remove_device(struct generic_pm_domain *genpd,
- struct device *dev)
+static inline int pm_genpd_remove_device(struct device *dev)
{
return -ENOSYS;
}
@@ -204,15 +201,9 @@ static inline int dev_pm_genpd_set_performance_state(struct device *dev,
#define pm_domain_always_on_gov (*(struct dev_power_governor *)(NULL))
#endif
-static inline int pm_genpd_add_device(struct generic_pm_domain *genpd,
- struct device *dev)
-{
- return __pm_genpd_add_device(genpd, dev, NULL);
-}
-
#ifdef CONFIG_PM_GENERIC_DOMAINS_SLEEP
-extern void pm_genpd_syscore_poweroff(struct device *dev);
-extern void pm_genpd_syscore_poweron(struct device *dev);
+void pm_genpd_syscore_poweroff(struct device *dev);
+void pm_genpd_syscore_poweron(struct device *dev);
#else
static inline void pm_genpd_syscore_poweroff(struct device *dev) {}
static inline void pm_genpd_syscore_poweron(struct device *dev) {}
@@ -236,15 +227,18 @@ int of_genpd_add_provider_simple(struct device_node *np,
int of_genpd_add_provider_onecell(struct device_node *np,
struct genpd_onecell_data *data);
void of_genpd_del_provider(struct device_node *np);
-extern int of_genpd_add_device(struct of_phandle_args *args,
- struct device *dev);
-extern int of_genpd_add_subdomain(struct of_phandle_args *parent,
- struct of_phandle_args *new_subdomain);
-extern struct generic_pm_domain *of_genpd_remove_last(struct device_node *np);
-extern int of_genpd_parse_idle_states(struct device_node *dn,
- struct genpd_power_state **states, int *n);
+int of_genpd_add_device(struct of_phandle_args *args, struct device *dev);
+int of_genpd_add_subdomain(struct of_phandle_args *parent,
+ struct of_phandle_args *new_subdomain);
+struct generic_pm_domain *of_genpd_remove_last(struct device_node *np);
+int of_genpd_parse_idle_states(struct device_node *dn,
+ struct genpd_power_state **states, int *n);
+unsigned int of_genpd_opp_to_performance_state(struct device *dev,
+ struct device_node *opp_node);
int genpd_dev_pm_attach(struct device *dev);
+struct device *genpd_dev_pm_attach_by_id(struct device *dev,
+ unsigned int index);
#else /* !CONFIG_PM_GENERIC_DOMAINS_OF */
static inline int of_genpd_add_provider_simple(struct device_node *np,
struct generic_pm_domain *genpd)
@@ -278,11 +272,24 @@ static inline int of_genpd_parse_idle_states(struct device_node *dn,
return -ENODEV;
}
-static inline int genpd_dev_pm_attach(struct device *dev)
+static inline unsigned int
+of_genpd_opp_to_performance_state(struct device *dev,
+ struct device_node *opp_node)
{
return -ENODEV;
}
+static inline int genpd_dev_pm_attach(struct device *dev)
+{
+ return 0;
+}
+
+static inline struct device *genpd_dev_pm_attach_by_id(struct device *dev,
+ unsigned int index)
+{
+ return NULL;
+}
+
static inline
struct generic_pm_domain *of_genpd_remove_last(struct device_node *np)
{
@@ -291,13 +298,20 @@ struct generic_pm_domain *of_genpd_remove_last(struct device_node *np)
#endif /* CONFIG_PM_GENERIC_DOMAINS_OF */
#ifdef CONFIG_PM
-extern int dev_pm_domain_attach(struct device *dev, bool power_on);
-extern void dev_pm_domain_detach(struct device *dev, bool power_off);
-extern void dev_pm_domain_set(struct device *dev, struct dev_pm_domain *pd);
+int dev_pm_domain_attach(struct device *dev, bool power_on);
+struct device *dev_pm_domain_attach_by_id(struct device *dev,
+ unsigned int index);
+void dev_pm_domain_detach(struct device *dev, bool power_off);
+void dev_pm_domain_set(struct device *dev, struct dev_pm_domain *pd);
#else
static inline int dev_pm_domain_attach(struct device *dev, bool power_on)
{
- return -ENODEV;
+ return 0;
+}
+static inline struct device *dev_pm_domain_attach_by_id(struct device *dev,
+ unsigned int index)
+{
+ return NULL;
}
static inline void dev_pm_domain_detach(struct device *dev, bool power_off) {}
static inline void dev_pm_domain_set(struct device *dev,
diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h
index 6c2d2e88f066..099b31960dec 100644
--- a/include/linux/pm_opp.h
+++ b/include/linux/pm_opp.h
@@ -125,8 +125,6 @@ struct opp_table *dev_pm_opp_set_clkname(struct device *dev, const char * name);
void dev_pm_opp_put_clkname(struct opp_table *opp_table);
struct opp_table *dev_pm_opp_register_set_opp_helper(struct device *dev, int (*set_opp)(struct dev_pm_set_opp_data *data));
void dev_pm_opp_unregister_set_opp_helper(struct opp_table *opp_table);
-struct opp_table *dev_pm_opp_register_get_pstate_helper(struct device *dev, int (*get_pstate)(struct device *dev, unsigned long rate));
-void dev_pm_opp_unregister_get_pstate_helper(struct opp_table *opp_table);
int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq);
int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, const struct cpumask *cpumask);
int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask);
@@ -247,14 +245,6 @@ static inline struct opp_table *dev_pm_opp_register_set_opp_helper(struct device
static inline void dev_pm_opp_unregister_set_opp_helper(struct opp_table *opp_table) {}
-static inline struct opp_table *dev_pm_opp_register_get_pstate_helper(struct device *dev,
- int (*get_pstate)(struct device *dev, unsigned long rate))
-{
- return ERR_PTR(-ENOTSUPP);
-}
-
-static inline void dev_pm_opp_unregister_get_pstate_helper(struct opp_table *opp_table) {}
-
static inline struct opp_table *dev_pm_opp_set_prop_name(struct device *dev, const char *name)
{
return ERR_PTR(-ENOTSUPP);
@@ -303,17 +293,25 @@ static inline void dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask
#if defined(CONFIG_PM_OPP) && defined(CONFIG_OF)
int dev_pm_opp_of_add_table(struct device *dev);
+int dev_pm_opp_of_add_table_indexed(struct device *dev, int index);
void dev_pm_opp_of_remove_table(struct device *dev);
int dev_pm_opp_of_cpumask_add_table(const struct cpumask *cpumask);
void dev_pm_opp_of_cpumask_remove_table(const struct cpumask *cpumask);
int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask);
struct device_node *dev_pm_opp_of_get_opp_desc_node(struct device *dev);
+struct dev_pm_opp *of_dev_pm_opp_find_required_opp(struct device *dev, struct device_node *np);
+struct device_node *dev_pm_opp_get_of_node(struct dev_pm_opp *opp);
#else
static inline int dev_pm_opp_of_add_table(struct device *dev)
{
return -ENOTSUPP;
}
+static inline int dev_pm_opp_of_add_table_indexed(struct device *dev, int index)
+{
+ return -ENOTSUPP;
+}
+
static inline void dev_pm_opp_of_remove_table(struct device *dev)
{
}
@@ -336,6 +334,15 @@ static inline struct device_node *dev_pm_opp_of_get_opp_desc_node(struct device
{
return NULL;
}
+
+static inline struct dev_pm_opp *of_dev_pm_opp_find_required_opp(struct device *dev, struct device_node *np)
+{
+ return NULL;
+}
+static inline struct device_node *dev_pm_opp_get_of_node(struct dev_pm_opp *opp)
+{
+ return NULL;
+}
#endif
#endif /* __LINUX_OPP_H__ */
diff --git a/include/linux/poll.h b/include/linux/poll.h
index f45ebd017eaa..fdf86b4cbc71 100644
--- a/include/linux/poll.h
+++ b/include/linux/poll.h
@@ -74,6 +74,18 @@ static inline void init_poll_funcptr(poll_table *pt, poll_queue_proc qproc)
pt->_key = ~(__poll_t)0; /* all events enabled */
}
+static inline bool file_has_poll_mask(struct file *file)
+{
+ return file->f_op->get_poll_head && file->f_op->poll_mask;
+}
+
+static inline bool file_can_poll(struct file *file)
+{
+ return file->f_op->poll || file_has_poll_mask(file);
+}
+
+__poll_t vfs_poll(struct file *file, struct poll_table_struct *pt);
+
struct poll_table_entry {
struct file *filp;
__poll_t key;
@@ -96,8 +108,6 @@ struct poll_wqueues {
extern void poll_initwait(struct poll_wqueues *pwq);
extern void poll_freewait(struct poll_wqueues *pwq);
-extern int poll_schedule_timeout(struct poll_wqueues *pwq, int state,
- ktime_t *expires, unsigned long slack);
extern u64 select_estimate_accuracy(struct timespec64 *tv);
#define MAX_INT64_SECONDS (((s64)(~((u64)0)>>1)/HZ)-1)
diff --git a/include/linux/power/bq27xxx_battery.h b/include/linux/power/bq27xxx_battery.h
index 01fbf1b16258..d6355f49fbae 100644
--- a/include/linux/power/bq27xxx_battery.h
+++ b/include/linux/power/bq27xxx_battery.h
@@ -24,8 +24,9 @@ enum bq27xxx_chip {
BQ27546,
BQ27742,
BQ27545, /* bq27545 */
- BQ27421, /* bq27421, bq27425, bq27441, bq27621 */
+ BQ27421, /* bq27421, bq27441, bq27621 */
BQ27425,
+ BQ27426,
BQ27441,
BQ27621,
};
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index f0139b460a72..b21c4bd96b84 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -145,6 +145,7 @@ enum power_supply_property {
POWER_SUPPLY_PROP_TIME_TO_FULL_NOW,
POWER_SUPPLY_PROP_TIME_TO_FULL_AVG,
POWER_SUPPLY_PROP_TYPE, /* use power_supply.type instead */
+ POWER_SUPPLY_PROP_USB_TYPE,
POWER_SUPPLY_PROP_SCOPE,
POWER_SUPPLY_PROP_PRECHARGE_CURRENT,
POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT,
@@ -170,6 +171,19 @@ enum power_supply_type {
POWER_SUPPLY_TYPE_APPLE_BRICK_ID, /* Apple Charging Method */
};
+enum power_supply_usb_type {
+ POWER_SUPPLY_USB_TYPE_UNKNOWN = 0,
+ POWER_SUPPLY_USB_TYPE_SDP, /* Standard Downstream Port */
+ POWER_SUPPLY_USB_TYPE_DCP, /* Dedicated Charging Port */
+ POWER_SUPPLY_USB_TYPE_CDP, /* Charging Downstream Port */
+ POWER_SUPPLY_USB_TYPE_ACA, /* Accessory Charger Adapters */
+ POWER_SUPPLY_USB_TYPE_C, /* Type C Port */
+ POWER_SUPPLY_USB_TYPE_PD, /* Power Delivery Port */
+ POWER_SUPPLY_USB_TYPE_PD_DRP, /* PD Dual Role Port */
+ POWER_SUPPLY_USB_TYPE_PD_PPS, /* PD Programmable Power Supply */
+ POWER_SUPPLY_USB_TYPE_APPLE_BRICK_ID, /* Apple Charging Method */
+};
+
enum power_supply_notifier_events {
PSY_EVENT_PROP_CHANGED,
};
@@ -185,6 +199,8 @@ struct power_supply;
/* Run-time specific power supply configuration */
struct power_supply_config {
struct device_node *of_node;
+ struct fwnode_handle *fwnode;
+
/* Driver private data */
void *drv_data;
@@ -196,6 +212,8 @@ struct power_supply_config {
struct power_supply_desc {
const char *name;
enum power_supply_type type;
+ enum power_supply_usb_type *usb_types;
+ size_t num_usb_types;
enum power_supply_property *properties;
size_t num_properties;
diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
index 928ef9e4d912..626fc65c4336 100644
--- a/include/linux/proc_fs.h
+++ b/include/linux/proc_fs.h
@@ -9,9 +9,13 @@
#include <linux/fs.h>
struct proc_dir_entry;
+struct seq_file;
+struct seq_operations;
#ifdef CONFIG_PROC_FS
+typedef int (*proc_write_t)(struct file *, char *, size_t);
+
extern void proc_root_init(void);
extern void proc_flush_task(struct task_struct *);
@@ -23,6 +27,19 @@ extern struct proc_dir_entry *proc_mkdir_data(const char *, umode_t,
extern struct proc_dir_entry *proc_mkdir_mode(const char *, umode_t,
struct proc_dir_entry *);
struct proc_dir_entry *proc_create_mount_point(const char *name);
+
+struct proc_dir_entry *proc_create_seq_private(const char *name, umode_t mode,
+ struct proc_dir_entry *parent, const struct seq_operations *ops,
+ unsigned int state_size, void *data);
+#define proc_create_seq_data(name, mode, parent, ops, data) \
+ proc_create_seq_private(name, mode, parent, ops, 0, data)
+#define proc_create_seq(name, mode, parent, ops) \
+ proc_create_seq_private(name, mode, parent, ops, 0, NULL)
+struct proc_dir_entry *proc_create_single_data(const char *name, umode_t mode,
+ struct proc_dir_entry *parent,
+ int (*show)(struct seq_file *, void *), void *data);
+#define proc_create_single(name, mode, parent, show) \
+ proc_create_single_data(name, mode, parent, show, NULL)
extern struct proc_dir_entry *proc_create_data(const char *, umode_t,
struct proc_dir_entry *,
@@ -38,6 +55,25 @@ extern void proc_remove(struct proc_dir_entry *);
extern void remove_proc_entry(const char *, struct proc_dir_entry *);
extern int remove_proc_subtree(const char *, struct proc_dir_entry *);
+struct proc_dir_entry *proc_create_net_data(const char *name, umode_t mode,
+ struct proc_dir_entry *parent, const struct seq_operations *ops,
+ unsigned int state_size, void *data);
+#define proc_create_net(name, mode, parent, state_size, ops) \
+ proc_create_net_data(name, mode, parent, state_size, ops, NULL)
+struct proc_dir_entry *proc_create_net_single(const char *name, umode_t mode,
+ struct proc_dir_entry *parent,
+ int (*show)(struct seq_file *, void *), void *data);
+struct proc_dir_entry *proc_create_net_data_write(const char *name, umode_t mode,
+ struct proc_dir_entry *parent,
+ const struct seq_operations *ops,
+ proc_write_t write,
+ unsigned int state_size, void *data);
+struct proc_dir_entry *proc_create_net_single_write(const char *name, umode_t mode,
+ struct proc_dir_entry *parent,
+ int (*show)(struct seq_file *, void *),
+ proc_write_t write,
+ void *data);
+
#else /* CONFIG_PROC_FS */
static inline void proc_root_init(void)
@@ -57,6 +93,11 @@ static inline struct proc_dir_entry *proc_mkdir_data(const char *name,
umode_t mode, struct proc_dir_entry *parent, void *data) { return NULL; }
static inline struct proc_dir_entry *proc_mkdir_mode(const char *name,
umode_t mode, struct proc_dir_entry *parent) { return NULL; }
+#define proc_create_seq_private(name, mode, parent, ops, size, data) ({NULL;})
+#define proc_create_seq_data(name, mode, parent, ops, data) ({NULL;})
+#define proc_create_seq(name, mode, parent, ops) ({NULL;})
+#define proc_create_single(name, mode, parent, show) ({NULL;})
+#define proc_create_single_data(name, mode, parent, show, data) ({NULL;})
#define proc_create(name, mode, parent, proc_fops) ({NULL;})
#define proc_create_data(name, mode, parent, proc_fops, data) ({NULL;})
@@ -69,6 +110,10 @@ static inline void proc_remove(struct proc_dir_entry *de) {}
#define remove_proc_entry(name, parent) do {} while (0)
static inline int remove_proc_subtree(const char *name, struct proc_dir_entry *parent) { return 0; }
+#define proc_create_net_data(name, mode, parent, ops, state_size, data) ({NULL;})
+#define proc_create_net(name, mode, parent, state_size, ops) ({NULL;})
+#define proc_create_net_single(name, mode, parent, show, data) ({NULL;})
+
#endif /* CONFIG_PROC_FS */
struct net;
@@ -83,4 +128,10 @@ struct ns_common;
int open_related_ns(struct ns_common *ns,
struct ns_common *(*get_ns)(struct ns_common *ns));
+/* get the associated pid namespace for a file in procfs */
+static inline struct pid_namespace *proc_pid_ns(struct inode *inode)
+{
+ return inode->i_sb->s_fs_info;
+}
+
#endif /* _LINUX_PROC_FS_H */
diff --git a/include/linux/property.h b/include/linux/property.h
index 2eea4b310fc2..ac8a1ebc4c1b 100644
--- a/include/linux/property.h
+++ b/include/linux/property.h
@@ -178,7 +178,7 @@ static inline int fwnode_property_read_u64(const struct fwnode_handle *fwnode,
* @name: Name of the property.
* @length: Length of data making up the value.
* @is_array: True when the property is an array.
- * @is_string: True when property is a string.
+ * @type: Type of the data in unions.
* @pointer: Pointer to the property (an array of items of the given type).
* @value: Value of the property (when it is a single item of the given type).
*/
@@ -186,10 +186,9 @@ struct property_entry {
const char *name;
size_t length;
bool is_array;
- bool is_string;
+ enum dev_prop_type type;
union {
union {
- const void *raw_data;
const u8 *u8_data;
const u16 *u16_data;
const u32 *u32_data;
@@ -197,7 +196,6 @@ struct property_entry {
const char * const *str;
} pointer;
union {
- unsigned long long raw_data;
u8 u8_data;
u16 u16_data;
u32 u32_data;
@@ -213,55 +211,55 @@ struct property_entry {
* and structs.
*/
-#define PROPERTY_ENTRY_INTEGER_ARRAY(_name_, _type_, _val_) \
-(struct property_entry) { \
- .name = _name_, \
- .length = ARRAY_SIZE(_val_) * sizeof(_type_), \
- .is_array = true, \
- .is_string = false, \
- { .pointer = { ._type_##_data = _val_ } }, \
+#define PROPERTY_ENTRY_INTEGER_ARRAY(_name_, _type_, _Type_, _val_) \
+(struct property_entry) { \
+ .name = _name_, \
+ .length = ARRAY_SIZE(_val_) * sizeof(_type_), \
+ .is_array = true, \
+ .type = DEV_PROP_##_Type_, \
+ { .pointer = { ._type_##_data = _val_ } }, \
}
#define PROPERTY_ENTRY_U8_ARRAY(_name_, _val_) \
- PROPERTY_ENTRY_INTEGER_ARRAY(_name_, u8, _val_)
+ PROPERTY_ENTRY_INTEGER_ARRAY(_name_, u8, U8, _val_)
#define PROPERTY_ENTRY_U16_ARRAY(_name_, _val_) \
- PROPERTY_ENTRY_INTEGER_ARRAY(_name_, u16, _val_)
+ PROPERTY_ENTRY_INTEGER_ARRAY(_name_, u16, U16, _val_)
#define PROPERTY_ENTRY_U32_ARRAY(_name_, _val_) \
- PROPERTY_ENTRY_INTEGER_ARRAY(_name_, u32, _val_)
+ PROPERTY_ENTRY_INTEGER_ARRAY(_name_, u32, U32, _val_)
#define PROPERTY_ENTRY_U64_ARRAY(_name_, _val_) \
- PROPERTY_ENTRY_INTEGER_ARRAY(_name_, u64, _val_)
+ PROPERTY_ENTRY_INTEGER_ARRAY(_name_, u64, U64, _val_)
#define PROPERTY_ENTRY_STRING_ARRAY(_name_, _val_) \
(struct property_entry) { \
.name = _name_, \
.length = ARRAY_SIZE(_val_) * sizeof(const char *), \
.is_array = true, \
- .is_string = true, \
+ .type = DEV_PROP_STRING, \
{ .pointer = { .str = _val_ } }, \
}
-#define PROPERTY_ENTRY_INTEGER(_name_, _type_, _val_) \
-(struct property_entry) { \
- .name = _name_, \
- .length = sizeof(_type_), \
- .is_string = false, \
- { .value = { ._type_##_data = _val_ } }, \
+#define PROPERTY_ENTRY_INTEGER(_name_, _type_, _Type_, _val_) \
+(struct property_entry) { \
+ .name = _name_, \
+ .length = sizeof(_type_), \
+ .type = DEV_PROP_##_Type_, \
+ { .value = { ._type_##_data = _val_ } }, \
}
#define PROPERTY_ENTRY_U8(_name_, _val_) \
- PROPERTY_ENTRY_INTEGER(_name_, u8, _val_)
+ PROPERTY_ENTRY_INTEGER(_name_, u8, U8, _val_)
#define PROPERTY_ENTRY_U16(_name_, _val_) \
- PROPERTY_ENTRY_INTEGER(_name_, u16, _val_)
+ PROPERTY_ENTRY_INTEGER(_name_, u16, U16, _val_)
#define PROPERTY_ENTRY_U32(_name_, _val_) \
- PROPERTY_ENTRY_INTEGER(_name_, u32, _val_)
+ PROPERTY_ENTRY_INTEGER(_name_, u32, U32, _val_)
#define PROPERTY_ENTRY_U64(_name_, _val_) \
- PROPERTY_ENTRY_INTEGER(_name_, u64, _val_)
+ PROPERTY_ENTRY_INTEGER(_name_, u64, U64, _val_)
#define PROPERTY_ENTRY_STRING(_name_, _val_) \
(struct property_entry) { \
.name = _name_, \
.length = sizeof(_val_), \
- .is_string = true, \
+ .type = DEV_PROP_STRING, \
{ .value = { .str = _val_ } }, \
}
diff --git a/include/linux/psp-sev.h b/include/linux/psp-sev.h
index 93addfa34061..827c601841c4 100644
--- a/include/linux/psp-sev.h
+++ b/include/linux/psp-sev.h
@@ -54,6 +54,8 @@ enum sev_cmd {
SEV_CMD_PDH_CERT_EXPORT = 0x008,
SEV_CMD_PDH_GEN = 0x009,
SEV_CMD_DF_FLUSH = 0x00A,
+ SEV_CMD_DOWNLOAD_FIRMWARE = 0x00B,
+ SEV_CMD_GET_ID = 0x00C,
/* Guest commands */
SEV_CMD_DECOMMISSION = 0x020,
@@ -130,6 +132,27 @@ struct sev_data_pek_cert_import {
} __packed;
/**
+ * struct sev_data_download_firmware - DOWNLOAD_FIRMWARE command parameters
+ *
+ * @address: physical address of firmware image
+ * @len: len of the firmware image
+ */
+struct sev_data_download_firmware {
+ u64 address; /* In */
+ u32 len; /* In */
+} __packed;
+
+/**
+ * struct sev_data_get_id - GET_ID command parameters
+ *
+ * @address: physical address of region to place unique CPU ID(s)
+ * @len: len of the region
+ */
+struct sev_data_get_id {
+ u64 address; /* In */
+ u32 len; /* In/Out */
+} __packed;
+/**
* struct sev_data_pdh_cert_export - PDH_CERT_EXPORT command parameters
*
* @pdh_address: PDH certificate address
diff --git a/include/linux/pstore.h b/include/linux/pstore.h
index 61f806a7fe29..a15bc4d48752 100644
--- a/include/linux/pstore.h
+++ b/include/linux/pstore.h
@@ -71,7 +71,7 @@ struct pstore_record {
struct pstore_info *psi;
enum pstore_type_id type;
u64 id;
- struct timespec time;
+ struct timespec64 time;
char *buf;
ssize_t size;
ssize_t ecc_notice_size;
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
index 919b2a0b0307..037bf0ef1ae9 100644
--- a/include/linux/ptrace.h
+++ b/include/linux/ptrace.h
@@ -345,7 +345,6 @@ extern void user_single_step_siginfo(struct task_struct *tsk,
static inline void user_single_step_siginfo(struct task_struct *tsk,
struct pt_regs *regs, siginfo_t *info)
{
- memset(info, 0, sizeof(*info));
info->si_signo = SIGTRAP;
}
#endif
diff --git a/include/linux/pwm_backlight.h b/include/linux/pwm_backlight.h
index e8afbd71a140..8ea265a022fd 100644
--- a/include/linux/pwm_backlight.h
+++ b/include/linux/pwm_backlight.h
@@ -14,6 +14,8 @@ struct platform_pwm_backlight_data {
unsigned int lth_brightness;
unsigned int pwm_period_ns;
unsigned int *levels;
+ unsigned int post_pwm_on_delay;
+ unsigned int pwm_off_delay;
/* TODO remove once all users are switched to gpiod_* API */
int enable_gpio;
int (*init)(struct device *dev);
diff --git a/include/linux/qcom-geni-se.h b/include/linux/qcom-geni-se.h
new file mode 100644
index 000000000000..5d6144977828
--- /dev/null
+++ b/include/linux/qcom-geni-se.h
@@ -0,0 +1,425 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _LINUX_QCOM_GENI_SE
+#define _LINUX_QCOM_GENI_SE
+
+/* Transfer mode supported by GENI Serial Engines */
+enum geni_se_xfer_mode {
+ GENI_SE_INVALID,
+ GENI_SE_FIFO,
+ GENI_SE_DMA,
+};
+
+/* Protocols supported by GENI Serial Engines */
+enum geni_se_protocol_type {
+ GENI_SE_NONE,
+ GENI_SE_SPI,
+ GENI_SE_UART,
+ GENI_SE_I2C,
+ GENI_SE_I3C,
+};
+
+struct geni_wrapper;
+struct clk;
+
+/**
+ * struct geni_se - GENI Serial Engine
+ * @base: Base Address of the Serial Engine's register block
+ * @dev: Pointer to the Serial Engine device
+ * @wrapper: Pointer to the parent QUP Wrapper core
+ * @clk: Handle to the core serial engine clock
+ * @num_clk_levels: Number of valid clock levels in clk_perf_tbl
+ * @clk_perf_tbl: Table of clock frequency input to serial engine clock
+ */
+struct geni_se {
+ void __iomem *base;
+ struct device *dev;
+ struct geni_wrapper *wrapper;
+ struct clk *clk;
+ unsigned int num_clk_levels;
+ unsigned long *clk_perf_tbl;
+};
+
+/* Common SE registers */
+#define GENI_FORCE_DEFAULT_REG 0x20
+#define SE_GENI_STATUS 0x40
+#define GENI_SER_M_CLK_CFG 0x48
+#define GENI_SER_S_CLK_CFG 0x4c
+#define GENI_FW_REVISION_RO 0x68
+#define SE_GENI_CLK_SEL 0x7c
+#define SE_GENI_DMA_MODE_EN 0x258
+#define SE_GENI_M_CMD0 0x600
+#define SE_GENI_M_CMD_CTRL_REG 0x604
+#define SE_GENI_M_IRQ_STATUS 0x610
+#define SE_GENI_M_IRQ_EN 0x614
+#define SE_GENI_M_IRQ_CLEAR 0x618
+#define SE_GENI_S_CMD0 0x630
+#define SE_GENI_S_CMD_CTRL_REG 0x634
+#define SE_GENI_S_IRQ_STATUS 0x640
+#define SE_GENI_S_IRQ_EN 0x644
+#define SE_GENI_S_IRQ_CLEAR 0x648
+#define SE_GENI_TX_FIFOn 0x700
+#define SE_GENI_RX_FIFOn 0x780
+#define SE_GENI_TX_FIFO_STATUS 0x800
+#define SE_GENI_RX_FIFO_STATUS 0x804
+#define SE_GENI_TX_WATERMARK_REG 0x80c
+#define SE_GENI_RX_WATERMARK_REG 0x810
+#define SE_GENI_RX_RFR_WATERMARK_REG 0x814
+#define SE_GENI_IOS 0x908
+#define SE_DMA_TX_IRQ_STAT 0xc40
+#define SE_DMA_TX_IRQ_CLR 0xc44
+#define SE_DMA_TX_FSM_RST 0xc58
+#define SE_DMA_RX_IRQ_STAT 0xd40
+#define SE_DMA_RX_IRQ_CLR 0xd44
+#define SE_DMA_RX_FSM_RST 0xd58
+#define SE_HW_PARAM_0 0xe24
+#define SE_HW_PARAM_1 0xe28
+
+/* GENI_FORCE_DEFAULT_REG fields */
+#define FORCE_DEFAULT BIT(0)
+
+/* GENI_STATUS fields */
+#define M_GENI_CMD_ACTIVE BIT(0)
+#define S_GENI_CMD_ACTIVE BIT(12)
+
+/* GENI_SER_M_CLK_CFG/GENI_SER_S_CLK_CFG */
+#define SER_CLK_EN BIT(0)
+#define CLK_DIV_MSK GENMASK(15, 4)
+#define CLK_DIV_SHFT 4
+
+/* GENI_FW_REVISION_RO fields */
+#define FW_REV_PROTOCOL_MSK GENMASK(15, 8)
+#define FW_REV_PROTOCOL_SHFT 8
+
+/* GENI_CLK_SEL fields */
+#define CLK_SEL_MSK GENMASK(2, 0)
+
+/* SE_GENI_DMA_MODE_EN */
+#define GENI_DMA_MODE_EN BIT(0)
+
+/* GENI_M_CMD0 fields */
+#define M_OPCODE_MSK GENMASK(31, 27)
+#define M_OPCODE_SHFT 27
+#define M_PARAMS_MSK GENMASK(26, 0)
+
+/* GENI_M_CMD_CTRL_REG */
+#define M_GENI_CMD_CANCEL BIT(2)
+#define M_GENI_CMD_ABORT BIT(1)
+#define M_GENI_DISABLE BIT(0)
+
+/* GENI_S_CMD0 fields */
+#define S_OPCODE_MSK GENMASK(31, 27)
+#define S_OPCODE_SHFT 27
+#define S_PARAMS_MSK GENMASK(26, 0)
+
+/* GENI_S_CMD_CTRL_REG */
+#define S_GENI_CMD_CANCEL BIT(2)
+#define S_GENI_CMD_ABORT BIT(1)
+#define S_GENI_DISABLE BIT(0)
+
+/* GENI_M_IRQ_EN fields */
+#define M_CMD_DONE_EN BIT(0)
+#define M_CMD_OVERRUN_EN BIT(1)
+#define M_ILLEGAL_CMD_EN BIT(2)
+#define M_CMD_FAILURE_EN BIT(3)
+#define M_CMD_CANCEL_EN BIT(4)
+#define M_CMD_ABORT_EN BIT(5)
+#define M_TIMESTAMP_EN BIT(6)
+#define M_RX_IRQ_EN BIT(7)
+#define M_GP_SYNC_IRQ_0_EN BIT(8)
+#define M_GP_IRQ_0_EN BIT(9)
+#define M_GP_IRQ_1_EN BIT(10)
+#define M_GP_IRQ_2_EN BIT(11)
+#define M_GP_IRQ_3_EN BIT(12)
+#define M_GP_IRQ_4_EN BIT(13)
+#define M_GP_IRQ_5_EN BIT(14)
+#define M_IO_DATA_DEASSERT_EN BIT(22)
+#define M_IO_DATA_ASSERT_EN BIT(23)
+#define M_RX_FIFO_RD_ERR_EN BIT(24)
+#define M_RX_FIFO_WR_ERR_EN BIT(25)
+#define M_RX_FIFO_WATERMARK_EN BIT(26)
+#define M_RX_FIFO_LAST_EN BIT(27)
+#define M_TX_FIFO_RD_ERR_EN BIT(28)
+#define M_TX_FIFO_WR_ERR_EN BIT(29)
+#define M_TX_FIFO_WATERMARK_EN BIT(30)
+#define M_SEC_IRQ_EN BIT(31)
+#define M_COMMON_GENI_M_IRQ_EN (GENMASK(6, 1) | \
+ M_IO_DATA_DEASSERT_EN | \
+ M_IO_DATA_ASSERT_EN | M_RX_FIFO_RD_ERR_EN | \
+ M_RX_FIFO_WR_ERR_EN | M_TX_FIFO_RD_ERR_EN | \
+ M_TX_FIFO_WR_ERR_EN)
+
+/* GENI_S_IRQ_EN fields */
+#define S_CMD_DONE_EN BIT(0)
+#define S_CMD_OVERRUN_EN BIT(1)
+#define S_ILLEGAL_CMD_EN BIT(2)
+#define S_CMD_FAILURE_EN BIT(3)
+#define S_CMD_CANCEL_EN BIT(4)
+#define S_CMD_ABORT_EN BIT(5)
+#define S_GP_SYNC_IRQ_0_EN BIT(8)
+#define S_GP_IRQ_0_EN BIT(9)
+#define S_GP_IRQ_1_EN BIT(10)
+#define S_GP_IRQ_2_EN BIT(11)
+#define S_GP_IRQ_3_EN BIT(12)
+#define S_GP_IRQ_4_EN BIT(13)
+#define S_GP_IRQ_5_EN BIT(14)
+#define S_IO_DATA_DEASSERT_EN BIT(22)
+#define S_IO_DATA_ASSERT_EN BIT(23)
+#define S_RX_FIFO_RD_ERR_EN BIT(24)
+#define S_RX_FIFO_WR_ERR_EN BIT(25)
+#define S_RX_FIFO_WATERMARK_EN BIT(26)
+#define S_RX_FIFO_LAST_EN BIT(27)
+#define S_COMMON_GENI_S_IRQ_EN (GENMASK(5, 1) | GENMASK(13, 9) | \
+ S_RX_FIFO_RD_ERR_EN | S_RX_FIFO_WR_ERR_EN)
+
+/* GENI_/TX/RX/RX_RFR/_WATERMARK_REG fields */
+#define WATERMARK_MSK GENMASK(5, 0)
+
+/* GENI_TX_FIFO_STATUS fields */
+#define TX_FIFO_WC GENMASK(27, 0)
+
+/* GENI_RX_FIFO_STATUS fields */
+#define RX_LAST BIT(31)
+#define RX_LAST_BYTE_VALID_MSK GENMASK(30, 28)
+#define RX_LAST_BYTE_VALID_SHFT 28
+#define RX_FIFO_WC_MSK GENMASK(24, 0)
+
+/* SE_GENI_IOS fields */
+#define IO2_DATA_IN BIT(1)
+#define RX_DATA_IN BIT(0)
+
+/* SE_DMA_TX_IRQ_STAT Register fields */
+#define TX_DMA_DONE BIT(0)
+#define TX_EOT BIT(1)
+#define TX_SBE BIT(2)
+#define TX_RESET_DONE BIT(3)
+
+/* SE_DMA_RX_IRQ_STAT Register fields */
+#define RX_DMA_DONE BIT(0)
+#define RX_EOT BIT(1)
+#define RX_SBE BIT(2)
+#define RX_RESET_DONE BIT(3)
+#define RX_FLUSH_DONE BIT(4)
+#define RX_GENI_GP_IRQ GENMASK(10, 5)
+#define RX_GENI_CANCEL_IRQ BIT(11)
+#define RX_GENI_GP_IRQ_EXT GENMASK(13, 12)
+
+/* SE_HW_PARAM_0 fields */
+#define TX_FIFO_WIDTH_MSK GENMASK(29, 24)
+#define TX_FIFO_WIDTH_SHFT 24
+#define TX_FIFO_DEPTH_MSK GENMASK(21, 16)
+#define TX_FIFO_DEPTH_SHFT 16
+
+/* SE_HW_PARAM_1 fields */
+#define RX_FIFO_WIDTH_MSK GENMASK(29, 24)
+#define RX_FIFO_WIDTH_SHFT 24
+#define RX_FIFO_DEPTH_MSK GENMASK(21, 16)
+#define RX_FIFO_DEPTH_SHFT 16
+
+#define HW_VER_MAJOR_MASK GENMASK(31, 28)
+#define HW_VER_MAJOR_SHFT 28
+#define HW_VER_MINOR_MASK GENMASK(27, 16)
+#define HW_VER_MINOR_SHFT 16
+#define HW_VER_STEP_MASK GENMASK(15, 0)
+
+#if IS_ENABLED(CONFIG_QCOM_GENI_SE)
+
+u32 geni_se_get_qup_hw_version(struct geni_se *se);
+
+#define geni_se_get_wrapper_version(se, major, minor, step) do { \
+ u32 ver; \
+\
+ ver = geni_se_get_qup_hw_version(se); \
+ major = (ver & HW_VER_MAJOR_MASK) >> HW_VER_MAJOR_SHFT; \
+ minor = (ver & HW_VER_MINOR_MASK) >> HW_VER_MINOR_SHFT; \
+ step = version & HW_VER_STEP_MASK; \
+} while (0)
+
+/**
+ * geni_se_read_proto() - Read the protocol configured for a serial engine
+ * @se: Pointer to the concerned serial engine.
+ *
+ * Return: Protocol value as configured in the serial engine.
+ */
+static inline u32 geni_se_read_proto(struct geni_se *se)
+{
+ u32 val;
+
+ val = readl_relaxed(se->base + GENI_FW_REVISION_RO);
+
+ return (val & FW_REV_PROTOCOL_MSK) >> FW_REV_PROTOCOL_SHFT;
+}
+
+/**
+ * geni_se_setup_m_cmd() - Setup the primary sequencer
+ * @se: Pointer to the concerned serial engine.
+ * @cmd: Command/Operation to setup in the primary sequencer.
+ * @params: Parameter for the sequencer command.
+ *
+ * This function is used to configure the primary sequencer with the
+ * command and its associated parameters.
+ */
+static inline void geni_se_setup_m_cmd(struct geni_se *se, u32 cmd, u32 params)
+{
+ u32 m_cmd;
+
+ m_cmd = (cmd << M_OPCODE_SHFT) | (params & M_PARAMS_MSK);
+ writel_relaxed(m_cmd, se->base + SE_GENI_M_CMD0);
+}
+
+/**
+ * geni_se_setup_s_cmd() - Setup the secondary sequencer
+ * @se: Pointer to the concerned serial engine.
+ * @cmd: Command/Operation to setup in the secondary sequencer.
+ * @params: Parameter for the sequencer command.
+ *
+ * This function is used to configure the secondary sequencer with the
+ * command and its associated parameters.
+ */
+static inline void geni_se_setup_s_cmd(struct geni_se *se, u32 cmd, u32 params)
+{
+ u32 s_cmd;
+
+ s_cmd = readl_relaxed(se->base + SE_GENI_S_CMD0);
+ s_cmd &= ~(S_OPCODE_MSK | S_PARAMS_MSK);
+ s_cmd |= (cmd << S_OPCODE_SHFT);
+ s_cmd |= (params & S_PARAMS_MSK);
+ writel_relaxed(s_cmd, se->base + SE_GENI_S_CMD0);
+}
+
+/**
+ * geni_se_cancel_m_cmd() - Cancel the command configured in the primary
+ * sequencer
+ * @se: Pointer to the concerned serial engine.
+ *
+ * This function is used to cancel the currently configured command in the
+ * primary sequencer.
+ */
+static inline void geni_se_cancel_m_cmd(struct geni_se *se)
+{
+ writel_relaxed(M_GENI_CMD_CANCEL, se->base + SE_GENI_M_CMD_CTRL_REG);
+}
+
+/**
+ * geni_se_cancel_s_cmd() - Cancel the command configured in the secondary
+ * sequencer
+ * @se: Pointer to the concerned serial engine.
+ *
+ * This function is used to cancel the currently configured command in the
+ * secondary sequencer.
+ */
+static inline void geni_se_cancel_s_cmd(struct geni_se *se)
+{
+ writel_relaxed(S_GENI_CMD_CANCEL, se->base + SE_GENI_S_CMD_CTRL_REG);
+}
+
+/**
+ * geni_se_abort_m_cmd() - Abort the command configured in the primary sequencer
+ * @se: Pointer to the concerned serial engine.
+ *
+ * This function is used to force abort the currently configured command in the
+ * primary sequencer.
+ */
+static inline void geni_se_abort_m_cmd(struct geni_se *se)
+{
+ writel_relaxed(M_GENI_CMD_ABORT, se->base + SE_GENI_M_CMD_CTRL_REG);
+}
+
+/**
+ * geni_se_abort_s_cmd() - Abort the command configured in the secondary
+ * sequencer
+ * @se: Pointer to the concerned serial engine.
+ *
+ * This function is used to force abort the currently configured command in the
+ * secondary sequencer.
+ */
+static inline void geni_se_abort_s_cmd(struct geni_se *se)
+{
+ writel_relaxed(S_GENI_CMD_ABORT, se->base + SE_GENI_S_CMD_CTRL_REG);
+}
+
+/**
+ * geni_se_get_tx_fifo_depth() - Get the TX fifo depth of the serial engine
+ * @se: Pointer to the concerned serial engine.
+ *
+ * This function is used to get the depth i.e. number of elements in the
+ * TX fifo of the serial engine.
+ *
+ * Return: TX fifo depth in units of FIFO words.
+ */
+static inline u32 geni_se_get_tx_fifo_depth(struct geni_se *se)
+{
+ u32 val;
+
+ val = readl_relaxed(se->base + SE_HW_PARAM_0);
+
+ return (val & TX_FIFO_DEPTH_MSK) >> TX_FIFO_DEPTH_SHFT;
+}
+
+/**
+ * geni_se_get_tx_fifo_width() - Get the TX fifo width of the serial engine
+ * @se: Pointer to the concerned serial engine.
+ *
+ * This function is used to get the width i.e. word size per element in the
+ * TX fifo of the serial engine.
+ *
+ * Return: TX fifo width in bits
+ */
+static inline u32 geni_se_get_tx_fifo_width(struct geni_se *se)
+{
+ u32 val;
+
+ val = readl_relaxed(se->base + SE_HW_PARAM_0);
+
+ return (val & TX_FIFO_WIDTH_MSK) >> TX_FIFO_WIDTH_SHFT;
+}
+
+/**
+ * geni_se_get_rx_fifo_depth() - Get the RX fifo depth of the serial engine
+ * @se: Pointer to the concerned serial engine.
+ *
+ * This function is used to get the depth i.e. number of elements in the
+ * RX fifo of the serial engine.
+ *
+ * Return: RX fifo depth in units of FIFO words
+ */
+static inline u32 geni_se_get_rx_fifo_depth(struct geni_se *se)
+{
+ u32 val;
+
+ val = readl_relaxed(se->base + SE_HW_PARAM_1);
+
+ return (val & RX_FIFO_DEPTH_MSK) >> RX_FIFO_DEPTH_SHFT;
+}
+
+void geni_se_init(struct geni_se *se, u32 rx_wm, u32 rx_rfr);
+
+void geni_se_select_mode(struct geni_se *se, enum geni_se_xfer_mode mode);
+
+void geni_se_config_packing(struct geni_se *se, int bpw, int pack_words,
+ bool msb_to_lsb, bool tx_cfg, bool rx_cfg);
+
+int geni_se_resources_off(struct geni_se *se);
+
+int geni_se_resources_on(struct geni_se *se);
+
+int geni_se_clk_tbl_get(struct geni_se *se, unsigned long **tbl);
+
+int geni_se_clk_freq_match(struct geni_se *se, unsigned long req_freq,
+ unsigned int *index, unsigned long *res_freq,
+ bool exact);
+
+int geni_se_tx_dma_prep(struct geni_se *se, void *buf, size_t len,
+ dma_addr_t *iova);
+
+int geni_se_rx_dma_prep(struct geni_se *se, void *buf, size_t len,
+ dma_addr_t *iova);
+
+void geni_se_tx_dma_unprep(struct geni_se *se, dma_addr_t iova, size_t len);
+
+void geni_se_rx_dma_unprep(struct geni_se *se, dma_addr_t iova, size_t len);
+#endif
+#endif
diff --git a/include/linux/qed/common_hsi.h b/include/linux/qed/common_hsi.h
index 13c8ab171437..0081fa6d1268 100644
--- a/include/linux/qed/common_hsi.h
+++ b/include/linux/qed/common_hsi.h
@@ -109,8 +109,8 @@
#define MAX_NUM_LL2_TX_STATS_COUNTERS 48
#define FW_MAJOR_VERSION 8
-#define FW_MINOR_VERSION 33
-#define FW_REVISION_VERSION 11
+#define FW_MINOR_VERSION 37
+#define FW_REVISION_VERSION 2
#define FW_ENGINEERING_VERSION 0
/***********************/
diff --git a/include/linux/qed/iscsi_common.h b/include/linux/qed/iscsi_common.h
index 938df614cb6a..b34c573f2b30 100644
--- a/include/linux/qed/iscsi_common.h
+++ b/include/linux/qed/iscsi_common.h
@@ -799,8 +799,8 @@ struct e4_mstorm_iscsi_task_ag_ctx {
#define E4_MSTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
#define E4_MSTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
#define E4_MSTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_MASK 0x1
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_SHIFT 5
#define E4_MSTORM_ISCSI_TASK_AG_CTX_VALID_MASK 0x1
#define E4_MSTORM_ISCSI_TASK_AG_CTX_VALID_SHIFT 6
#define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_FLAG_MASK 0x1
@@ -849,8 +849,8 @@ struct e4_ustorm_iscsi_task_ag_ctx {
#define E4_USTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
#define E4_USTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
#define E4_USTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
-#define E4_USTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1
-#define E4_USTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5
+#define E4_USTORM_ISCSI_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_MASK 0x1
+#define E4_USTORM_ISCSI_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_SHIFT 5
#define E4_USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_MASK 0x3
#define E4_USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_SHIFT 6
u8 flags1;
diff --git a/include/linux/qed/qed_eth_if.h b/include/linux/qed/qed_eth_if.h
index 147d08ccf813..2978fa4add42 100644
--- a/include/linux/qed/qed_eth_if.h
+++ b/include/linux/qed/qed_eth_if.h
@@ -66,6 +66,7 @@ enum qed_filter_config_mode {
QED_FILTER_CONFIG_MODE_5_TUPLE,
QED_FILTER_CONFIG_MODE_L4_PORT,
QED_FILTER_CONFIG_MODE_IP_DEST,
+ QED_FILTER_CONFIG_MODE_IP_SRC,
};
struct qed_ntuple_filter_params {
@@ -88,6 +89,9 @@ struct qed_ntuple_filter_params {
/* true iff this filter is to be added. Else to be removed */
bool b_is_add;
+
+ /* If flow needs to be dropped */
+ bool b_is_drop;
};
struct qed_dev_eth_info {
@@ -352,6 +356,7 @@ struct qed_eth_ops {
int (*configure_arfs_searcher)(struct qed_dev *cdev,
enum qed_filter_config_mode mode);
int (*get_coalesce)(struct qed_dev *cdev, u16 *coal, void *handle);
+ int (*req_bulletin_update_mac)(struct qed_dev *cdev, u8 *mac);
};
const struct qed_eth_ops *qed_get_eth_ops(void);
diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h
index b5b2bc9eacca..b4040023cbfb 100644
--- a/include/linux/qed/qed_if.h
+++ b/include/linux/qed/qed_if.h
@@ -159,6 +159,9 @@ struct qed_dcbx_get {
enum qed_nvm_images {
QED_NVM_IMAGE_ISCSI_CFG,
QED_NVM_IMAGE_FCOE_CFG,
+ QED_NVM_IMAGE_NVM_CFG1,
+ QED_NVM_IMAGE_DEFAULT_CFG,
+ QED_NVM_IMAGE_NVM_META,
};
struct qed_link_eee_params {
@@ -179,6 +182,272 @@ enum qed_led_mode {
QED_LED_MODE_RESTORE
};
+struct qed_mfw_tlv_eth {
+ u16 lso_maxoff_size;
+ bool lso_maxoff_size_set;
+ u16 lso_minseg_size;
+ bool lso_minseg_size_set;
+ u8 prom_mode;
+ bool prom_mode_set;
+ u16 tx_descr_size;
+ bool tx_descr_size_set;
+ u16 rx_descr_size;
+ bool rx_descr_size_set;
+ u16 netq_count;
+ bool netq_count_set;
+ u32 tcp4_offloads;
+ bool tcp4_offloads_set;
+ u32 tcp6_offloads;
+ bool tcp6_offloads_set;
+ u16 tx_descr_qdepth;
+ bool tx_descr_qdepth_set;
+ u16 rx_descr_qdepth;
+ bool rx_descr_qdepth_set;
+ u8 iov_offload;
+#define QED_MFW_TLV_IOV_OFFLOAD_NONE (0)
+#define QED_MFW_TLV_IOV_OFFLOAD_MULTIQUEUE (1)
+#define QED_MFW_TLV_IOV_OFFLOAD_VEB (2)
+#define QED_MFW_TLV_IOV_OFFLOAD_VEPA (3)
+ bool iov_offload_set;
+ u8 txqs_empty;
+ bool txqs_empty_set;
+ u8 rxqs_empty;
+ bool rxqs_empty_set;
+ u8 num_txqs_full;
+ bool num_txqs_full_set;
+ u8 num_rxqs_full;
+ bool num_rxqs_full_set;
+};
+
+#define QED_MFW_TLV_TIME_SIZE 14
+struct qed_mfw_tlv_time {
+ bool b_set;
+ u8 month;
+ u8 day;
+ u8 hour;
+ u8 min;
+ u16 msec;
+ u16 usec;
+};
+
+struct qed_mfw_tlv_fcoe {
+ u8 scsi_timeout;
+ bool scsi_timeout_set;
+ u32 rt_tov;
+ bool rt_tov_set;
+ u32 ra_tov;
+ bool ra_tov_set;
+ u32 ed_tov;
+ bool ed_tov_set;
+ u32 cr_tov;
+ bool cr_tov_set;
+ u8 boot_type;
+ bool boot_type_set;
+ u8 npiv_state;
+ bool npiv_state_set;
+ u32 num_npiv_ids;
+ bool num_npiv_ids_set;
+ u8 switch_name[8];
+ bool switch_name_set;
+ u16 switch_portnum;
+ bool switch_portnum_set;
+ u8 switch_portid[3];
+ bool switch_portid_set;
+ u8 vendor_name[8];
+ bool vendor_name_set;
+ u8 switch_model[8];
+ bool switch_model_set;
+ u8 switch_fw_version[8];
+ bool switch_fw_version_set;
+ u8 qos_pri;
+ bool qos_pri_set;
+ u8 port_alias[3];
+ bool port_alias_set;
+ u8 port_state;
+#define QED_MFW_TLV_PORT_STATE_OFFLINE (0)
+#define QED_MFW_TLV_PORT_STATE_LOOP (1)
+#define QED_MFW_TLV_PORT_STATE_P2P (2)
+#define QED_MFW_TLV_PORT_STATE_FABRIC (3)
+ bool port_state_set;
+ u16 fip_tx_descr_size;
+ bool fip_tx_descr_size_set;
+ u16 fip_rx_descr_size;
+ bool fip_rx_descr_size_set;
+ u16 link_failures;
+ bool link_failures_set;
+ u8 fcoe_boot_progress;
+ bool fcoe_boot_progress_set;
+ u64 rx_bcast;
+ bool rx_bcast_set;
+ u64 tx_bcast;
+ bool tx_bcast_set;
+ u16 fcoe_txq_depth;
+ bool fcoe_txq_depth_set;
+ u16 fcoe_rxq_depth;
+ bool fcoe_rxq_depth_set;
+ u64 fcoe_rx_frames;
+ bool fcoe_rx_frames_set;
+ u64 fcoe_rx_bytes;
+ bool fcoe_rx_bytes_set;
+ u64 fcoe_tx_frames;
+ bool fcoe_tx_frames_set;
+ u64 fcoe_tx_bytes;
+ bool fcoe_tx_bytes_set;
+ u16 crc_count;
+ bool crc_count_set;
+ u32 crc_err_src_fcid[5];
+ bool crc_err_src_fcid_set[5];
+ struct qed_mfw_tlv_time crc_err[5];
+ u16 losync_err;
+ bool losync_err_set;
+ u16 losig_err;
+ bool losig_err_set;
+ u16 primtive_err;
+ bool primtive_err_set;
+ u16 disparity_err;
+ bool disparity_err_set;
+ u16 code_violation_err;
+ bool code_violation_err_set;
+ u32 flogi_param[4];
+ bool flogi_param_set[4];
+ struct qed_mfw_tlv_time flogi_tstamp;
+ u32 flogi_acc_param[4];
+ bool flogi_acc_param_set[4];
+ struct qed_mfw_tlv_time flogi_acc_tstamp;
+ u32 flogi_rjt;
+ bool flogi_rjt_set;
+ struct qed_mfw_tlv_time flogi_rjt_tstamp;
+ u32 fdiscs;
+ bool fdiscs_set;
+ u8 fdisc_acc;
+ bool fdisc_acc_set;
+ u8 fdisc_rjt;
+ bool fdisc_rjt_set;
+ u8 plogi;
+ bool plogi_set;
+ u8 plogi_acc;
+ bool plogi_acc_set;
+ u8 plogi_rjt;
+ bool plogi_rjt_set;
+ u32 plogi_dst_fcid[5];
+ bool plogi_dst_fcid_set[5];
+ struct qed_mfw_tlv_time plogi_tstamp[5];
+ u32 plogi_acc_src_fcid[5];
+ bool plogi_acc_src_fcid_set[5];
+ struct qed_mfw_tlv_time plogi_acc_tstamp[5];
+ u8 tx_plogos;
+ bool tx_plogos_set;
+ u8 plogo_acc;
+ bool plogo_acc_set;
+ u8 plogo_rjt;
+ bool plogo_rjt_set;
+ u32 plogo_src_fcid[5];
+ bool plogo_src_fcid_set[5];
+ struct qed_mfw_tlv_time plogo_tstamp[5];
+ u8 rx_logos;
+ bool rx_logos_set;
+ u8 tx_accs;
+ bool tx_accs_set;
+ u8 tx_prlis;
+ bool tx_prlis_set;
+ u8 rx_accs;
+ bool rx_accs_set;
+ u8 tx_abts;
+ bool tx_abts_set;
+ u8 rx_abts_acc;
+ bool rx_abts_acc_set;
+ u8 rx_abts_rjt;
+ bool rx_abts_rjt_set;
+ u32 abts_dst_fcid[5];
+ bool abts_dst_fcid_set[5];
+ struct qed_mfw_tlv_time abts_tstamp[5];
+ u8 rx_rscn;
+ bool rx_rscn_set;
+ u32 rx_rscn_nport[4];
+ bool rx_rscn_nport_set[4];
+ u8 tx_lun_rst;
+ bool tx_lun_rst_set;
+ u8 abort_task_sets;
+ bool abort_task_sets_set;
+ u8 tx_tprlos;
+ bool tx_tprlos_set;
+ u8 tx_nos;
+ bool tx_nos_set;
+ u8 rx_nos;
+ bool rx_nos_set;
+ u8 ols;
+ bool ols_set;
+ u8 lr;
+ bool lr_set;
+ u8 lrr;
+ bool lrr_set;
+ u8 tx_lip;
+ bool tx_lip_set;
+ u8 rx_lip;
+ bool rx_lip_set;
+ u8 eofa;
+ bool eofa_set;
+ u8 eofni;
+ bool eofni_set;
+ u8 scsi_chks;
+ bool scsi_chks_set;
+ u8 scsi_cond_met;
+ bool scsi_cond_met_set;
+ u8 scsi_busy;
+ bool scsi_busy_set;
+ u8 scsi_inter;
+ bool scsi_inter_set;
+ u8 scsi_inter_cond_met;
+ bool scsi_inter_cond_met_set;
+ u8 scsi_rsv_conflicts;
+ bool scsi_rsv_conflicts_set;
+ u8 scsi_tsk_full;
+ bool scsi_tsk_full_set;
+ u8 scsi_aca_active;
+ bool scsi_aca_active_set;
+ u8 scsi_tsk_abort;
+ bool scsi_tsk_abort_set;
+ u32 scsi_rx_chk[5];
+ bool scsi_rx_chk_set[5];
+ struct qed_mfw_tlv_time scsi_chk_tstamp[5];
+};
+
+struct qed_mfw_tlv_iscsi {
+ u8 target_llmnr;
+ bool target_llmnr_set;
+ u8 header_digest;
+ bool header_digest_set;
+ u8 data_digest;
+ bool data_digest_set;
+ u8 auth_method;
+#define QED_MFW_TLV_AUTH_METHOD_NONE (1)
+#define QED_MFW_TLV_AUTH_METHOD_CHAP (2)
+#define QED_MFW_TLV_AUTH_METHOD_MUTUAL_CHAP (3)
+ bool auth_method_set;
+ u16 boot_taget_portal;
+ bool boot_taget_portal_set;
+ u16 frame_size;
+ bool frame_size_set;
+ u16 tx_desc_size;
+ bool tx_desc_size_set;
+ u16 rx_desc_size;
+ bool rx_desc_size_set;
+ u8 boot_progress;
+ bool boot_progress_set;
+ u16 tx_desc_qdepth;
+ bool tx_desc_qdepth_set;
+ u16 rx_desc_qdepth;
+ bool rx_desc_qdepth_set;
+ u64 rx_frames;
+ bool rx_frames_set;
+ u64 rx_bytes;
+ bool rx_bytes_set;
+ u64 tx_frames;
+ bool tx_frames_set;
+ u64 tx_bytes;
+ bool tx_bytes_set;
+};
+
#define DIRECT_REG_WR(reg_addr, val) writel((u32)val, \
(void __iomem *)(reg_addr))
@@ -336,7 +605,6 @@ struct qed_dev_info {
u8 num_hwfns;
u8 hw_mac[ETH_ALEN];
- bool is_mf_default;
/* FW version */
u16 fw_major;
@@ -356,7 +624,7 @@ struct qed_dev_info {
#define QED_MFW_VERSION_3_OFFSET 24
u32 flash_size;
- u8 mf_mode;
+ bool b_inter_pf_switch;
bool tx_switching;
bool rdma_supported;
u16 mtu;
@@ -483,6 +751,14 @@ struct qed_int_info {
u8 used_cnt;
};
+struct qed_generic_tlvs {
+#define QED_TLV_IP_CSUM BIT(0)
+#define QED_TLV_LSO BIT(1)
+ u16 feat_flags;
+#define QED_TLV_MAC_COUNT 3
+ u8 mac[QED_TLV_MAC_COUNT][ETH_ALEN];
+};
+
#define QED_NVM_SIGNATURE 0x12435687
enum qed_nvm_flash_cmd {
@@ -497,6 +773,8 @@ struct qed_common_cb_ops {
void (*link_update)(void *dev,
struct qed_link_output *link);
void (*dcbx_aen)(void *dev, struct qed_dcbx_get *get, u32 mib_type);
+ void (*get_generic_tlv_data)(void *dev, struct qed_generic_tlvs *data);
+ void (*get_protocol_tlv_data)(void *dev, void *data);
};
struct qed_selftest_ops {
@@ -851,6 +1129,7 @@ struct qed_eth_stats_common {
u64 rx_bcast_pkts;
u64 mftag_filter_discards;
u64 mac_filter_discards;
+ u64 gft_filter_drop;
u64 tx_ucast_bytes;
u64 tx_mcast_bytes;
u64 tx_bcast_bytes;
@@ -901,6 +1180,7 @@ struct qed_eth_stats_common {
u64 tx_mac_mc_packets;
u64 tx_mac_bc_packets;
u64 tx_mac_ctrl_frames;
+ u64 link_change_count;
};
struct qed_eth_stats_bb {
diff --git a/include/linux/qed/qed_ll2_if.h b/include/linux/qed/qed_ll2_if.h
index 266c1fb45387..5eb022953aca 100644
--- a/include/linux/qed/qed_ll2_if.h
+++ b/include/linux/qed/qed_ll2_if.h
@@ -202,6 +202,7 @@ struct qed_ll2_tx_pkt_info {
bool enable_ip_cksum;
bool enable_l4_cksum;
bool calc_ip_len;
+ bool remove_stag;
};
#define QED_LL2_UNUSED_HANDLE (0xff)
@@ -220,6 +221,11 @@ struct qed_ll2_params {
u8 ll2_mac_address[ETH_ALEN];
};
+enum qed_ll2_xmit_flags {
+ /* FIP discovery packet */
+ QED_LL2_XMIT_FLAGS_FIP_DISCOVERY
+};
+
struct qed_ll2_ops {
/**
* @brief start - initializes ll2
@@ -245,10 +251,12 @@ struct qed_ll2_ops {
*
* @param cdev
* @param skb
+ * @param xmit_flags - Transmit options defined by the enum qed_ll2_xmit_flags.
*
* @return 0 on success, otherwise error value.
*/
- int (*start_xmit)(struct qed_dev *cdev, struct sk_buff *skb);
+ int (*start_xmit)(struct qed_dev *cdev, struct sk_buff *skb,
+ unsigned long xmit_flags);
/**
* @brief register_cb_ops - protocol driver register the callback for Rx/Tx
diff --git a/include/linux/qed/qed_rdma_if.h b/include/linux/qed/qed_rdma_if.h
index 4dd72ba210f5..df4d13f7e191 100644
--- a/include/linux/qed/qed_rdma_if.h
+++ b/include/linux/qed/qed_rdma_if.h
@@ -65,8 +65,7 @@ enum qed_roce_qp_state {
enum qed_rdma_tid_type {
QED_RDMA_TID_REGISTERED_MR,
QED_RDMA_TID_FMR,
- QED_RDMA_TID_MW_TYPE1,
- QED_RDMA_TID_MW_TYPE2A
+ QED_RDMA_TID_MW
};
struct qed_rdma_events {
@@ -280,7 +279,6 @@ struct qed_rdma_register_tid_in_params {
bool dif_enabled;
u64 dif_error_addr;
- u64 dif_runt_addr;
};
struct qed_rdma_create_cq_in_params {
@@ -485,7 +483,9 @@ enum qed_iwarp_event_type {
QED_IWARP_EVENT_ACTIVE_MPA_REPLY,
QED_IWARP_EVENT_LOCAL_ACCESS_ERROR,
QED_IWARP_EVENT_REMOTE_OPERATION_ERROR,
- QED_IWARP_EVENT_TERMINATE_RECEIVED
+ QED_IWARP_EVENT_TERMINATE_RECEIVED,
+ QED_IWARP_EVENT_SRQ_LIMIT,
+ QED_IWARP_EVENT_SRQ_EMPTY,
};
enum qed_tcp_ip_version {
@@ -646,6 +646,14 @@ struct qed_rdma_ops {
int (*rdma_alloc_tid)(void *rdma_cxt, u32 *itid);
void (*rdma_free_tid)(void *rdma_cxt, u32 itid);
+ int (*rdma_create_srq)(void *rdma_cxt,
+ struct qed_rdma_create_srq_in_params *iparams,
+ struct qed_rdma_create_srq_out_params *oparams);
+ int (*rdma_destroy_srq)(void *rdma_cxt,
+ struct qed_rdma_destroy_srq_in_params *iparams);
+ int (*rdma_modify_srq)(void *rdma_cxt,
+ struct qed_rdma_modify_srq_in_params *iparams);
+
int (*ll2_acquire_connection)(void *rdma_cxt,
struct qed_ll2_acquire_data *data);
diff --git a/include/linux/qed/roce_common.h b/include/linux/qed/roce_common.h
index 193bcef302e1..473fba76aa77 100644
--- a/include/linux/qed/roce_common.h
+++ b/include/linux/qed/roce_common.h
@@ -43,6 +43,7 @@
#define ROCE_MAX_QPS (32 * 1024)
#define ROCE_DCQCN_NP_MAX_QPS (64)
#define ROCE_DCQCN_RP_MAX_QPS (64)
+#define ROCE_LKEY_MW_DIF_EN_BIT (28)
/* Affiliated asynchronous events / errors enumeration */
enum roce_async_events_type {
diff --git a/include/linux/rbtree_augmented.h b/include/linux/rbtree_augmented.h
index 6bfd2b581f75..af8a61be2d8d 100644
--- a/include/linux/rbtree_augmented.h
+++ b/include/linux/rbtree_augmented.h
@@ -26,6 +26,7 @@
#include <linux/compiler.h>
#include <linux/rbtree.h>
+#include <linux/rcupdate.h>
/*
* Please note - only struct rb_augment_callbacks and the prototypes for
diff --git a/include/linux/rbtree_latch.h b/include/linux/rbtree_latch.h
index ece43e882b56..7d012faa509a 100644
--- a/include/linux/rbtree_latch.h
+++ b/include/linux/rbtree_latch.h
@@ -35,6 +35,7 @@
#include <linux/rbtree.h>
#include <linux/seqlock.h>
+#include <linux/rcupdate.h>
struct latch_tree_node {
struct rb_node node[2];
diff --git a/include/linux/rculist.h b/include/linux/rculist.h
index 127f534fec94..36df6ccbc874 100644
--- a/include/linux/rculist.h
+++ b/include/linux/rculist.h
@@ -404,6 +404,19 @@ static inline void list_splice_tail_init_rcu(struct list_head *list,
pos = list_entry_rcu(pos->member.next, typeof(*pos), member))
/**
+ * list_for_each_entry_from_rcu - iterate over a list from current point
+ * @pos: the type * to use as a loop cursor.
+ * @head: the head for your list.
+ * @member: the name of the list_node within the struct.
+ *
+ * Iterate over the tail of a list starting from a given position,
+ * which must have been in the list when the RCU read lock was taken.
+ */
+#define list_for_each_entry_from_rcu(pos, head, member) \
+ for (; &(pos)->member != (head); \
+ pos = list_entry_rcu(pos->member.next, typeof(*(pos)), member))
+
+/**
* hlist_del_rcu - deletes entry from hash list without re-initialization
* @n: the element to delete from the hash list.
*
diff --git a/include/linux/rculist_nulls.h b/include/linux/rculist_nulls.h
index e4b257ff881b..bc8206a8f30e 100644
--- a/include/linux/rculist_nulls.h
+++ b/include/linux/rculist_nulls.h
@@ -109,7 +109,7 @@ static inline void hlist_nulls_add_head_rcu(struct hlist_nulls_node *n,
*
* The barrier() is needed to make sure compiler doesn't cache first element [1],
* as this loop can be restarted [2]
- * [1] Documentation/atomic_ops.txt around line 114
+ * [1] Documentation/core-api/atomic_ops.rst around line 114
* [2] Documentation/RCU/rculist_nulls.txt around line 146
*/
#define hlist_nulls_for_each_entry_rcu(tpos, pos, head, member) \
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 36360d07f25b..65163aa0bb04 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -108,7 +108,6 @@ void rcu_sched_qs(void);
void rcu_bh_qs(void);
void rcu_check_callbacks(int user);
void rcu_report_dead(unsigned int cpu);
-void rcu_cpu_starting(unsigned int cpu);
void rcutree_migrate_callbacks(int cpu);
#ifdef CONFIG_RCU_STALL_COMMON
@@ -188,13 +187,13 @@ static inline void exit_tasks_rcu_finish(void) { }
#endif /* #else #ifdef CONFIG_TASKS_RCU */
/**
- * cond_resched_rcu_qs - Report potential quiescent states to RCU
+ * cond_resched_tasks_rcu_qs - Report potential quiescent states to RCU
*
* This macro resembles cond_resched(), except that it is defined to
* report potential quiescent states to RCU-tasks even if the cond_resched()
* machinery were to be shut off, as some advocate for PREEMPT kernels.
*/
-#define cond_resched_rcu_qs() \
+#define cond_resched_tasks_rcu_qs() \
do { \
if (!cond_resched()) \
rcu_note_voluntary_context_switch_lite(current); \
@@ -653,9 +652,7 @@ static inline void rcu_read_lock(void)
* Unfortunately, this function acquires the scheduler's runqueue and
* priority-inheritance spinlocks. This means that deadlock could result
* if the caller of rcu_read_unlock() already holds one of these locks or
- * any lock that is ever acquired while holding them; or any lock which
- * can be taken from interrupt context because rcu_boost()->rt_mutex_lock()
- * does not disable irqs while taking ->wait_lock.
+ * any lock that is ever acquired while holding them.
*
* That said, RCU readers are never priority boosted unless they were
* preempted. Therefore, one way to avoid deadlock is to make sure
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index ce9beec35e34..7b3c82e8a625 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -132,5 +132,6 @@ static inline void rcu_all_qs(void) { barrier(); }
#define rcutree_offline_cpu NULL
#define rcutree_dead_cpu NULL
#define rcutree_dying_cpu NULL
+static inline void rcu_cpu_starting(unsigned int cpu) { }
#endif /* __LINUX_RCUTINY_H */
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index fd996cdf1833..914655848ef6 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -74,6 +74,7 @@ static inline void synchronize_rcu_bh_expedited(void)
void rcu_barrier(void);
void rcu_barrier_bh(void);
void rcu_barrier_sched(void);
+bool rcu_eqs_special_set(int cpu);
unsigned long get_state_synchronize_rcu(void);
void cond_synchronize_rcu(unsigned long oldstate);
unsigned long get_state_synchronize_sched(void);
@@ -100,5 +101,6 @@ int rcutree_online_cpu(unsigned int cpu);
int rcutree_offline_cpu(unsigned int cpu);
int rcutree_dead_cpu(unsigned int cpu);
int rcutree_dying_cpu(unsigned int cpu);
+void rcu_cpu_starting(unsigned int cpu);
#endif /* __LINUX_RCUTREE_H */
diff --git a/include/linux/regmap.h b/include/linux/regmap.h
index 5f7ad0552c03..4f38068ffb71 100644
--- a/include/linux/regmap.h
+++ b/include/linux/regmap.h
@@ -15,6 +15,7 @@
#include <linux/list.h>
#include <linux/rbtree.h>
+#include <linux/ktime.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/bug.h>
@@ -587,7 +588,10 @@ struct regmap *__devm_regmap_init_sdw(struct sdw_slave *sdw,
const struct regmap_config *config,
struct lock_class_key *lock_key,
const char *lock_name);
-
+struct regmap *__devm_regmap_init_slimbus(struct slim_device *slimbus,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
/*
* Wrapper for regmap_init macros to include a unique lockdep key and name
* for each call. No-op if CONFIG_LOCKDEP is not set.
@@ -906,6 +910,19 @@ bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg);
__regmap_lockdep_wrapper(__devm_regmap_init_sdw, #config, \
sdw, config)
+/**
+ * devm_regmap_init_slimbus() - Initialise managed register map
+ *
+ * @slimbus: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap. The regmap will be automatically freed by the
+ * device management code.
+ */
+#define devm_regmap_init_slimbus(slimbus, config) \
+ __regmap_lockdep_wrapper(__devm_regmap_init_slimbus, #config, \
+ slimbus, config)
int regmap_mmio_attach_clk(struct regmap *map, struct clk *clk);
void regmap_mmio_detach_clk(struct regmap *map);
void regmap_exit(struct regmap *map);
diff --git a/include/linux/regulator/ab8500.h b/include/linux/regulator/ab8500.h
index d8ecefaf63ca..6d46f962685d 100644
--- a/include/linux/regulator/ab8500.h
+++ b/include/linux/regulator/ab8500.h
@@ -49,47 +49,7 @@ enum ab8505_regulator_id {
AB8505_NUM_REGULATORS,
};
-/* AB9540 regulators */
-enum ab9540_regulator_id {
- AB9540_LDO_AUX1,
- AB9540_LDO_AUX2,
- AB9540_LDO_AUX3,
- AB9540_LDO_AUX4,
- AB9540_LDO_INTCORE,
- AB9540_LDO_TVOUT,
- AB9540_LDO_USB,
- AB9540_LDO_AUDIO,
- AB9540_LDO_ANAMIC1,
- AB9540_LDO_ANAMIC2,
- AB9540_LDO_DMIC,
- AB9540_LDO_ANA,
- AB9540_SYSCLKREQ_2,
- AB9540_SYSCLKREQ_4,
- AB9540_NUM_REGULATORS,
-};
-
-/* AB8540 regulators */
-enum ab8540_regulator_id {
- AB8540_LDO_AUX1,
- AB8540_LDO_AUX2,
- AB8540_LDO_AUX3,
- AB8540_LDO_AUX4,
- AB8540_LDO_AUX5,
- AB8540_LDO_AUX6,
- AB8540_LDO_INTCORE,
- AB8540_LDO_TVOUT,
- AB8540_LDO_AUDIO,
- AB8540_LDO_ANAMIC1,
- AB8540_LDO_ANAMIC2,
- AB8540_LDO_DMIC,
- AB8540_LDO_ANA,
- AB8540_LDO_SDIO,
- AB8540_SYSCLKREQ_2,
- AB8540_SYSCLKREQ_4,
- AB8540_NUM_REGULATORS,
-};
-
-/* AB8500, AB8505, and AB9540 register initialization */
+/* AB8500 and AB8505 register initialization */
struct ab8500_regulator_reg_init {
int id;
u8 mask;
@@ -185,121 +145,6 @@ enum ab8505_regulator_reg {
AB8505_NUM_REGULATOR_REGISTERS,
};
-/* AB9540 registers */
-enum ab9540_regulator_reg {
- AB9540_REGUREQUESTCTRL1,
- AB9540_REGUREQUESTCTRL2,
- AB9540_REGUREQUESTCTRL3,
- AB9540_REGUREQUESTCTRL4,
- AB9540_REGUSYSCLKREQ1HPVALID1,
- AB9540_REGUSYSCLKREQ1HPVALID2,
- AB9540_REGUHWHPREQ1VALID1,
- AB9540_REGUHWHPREQ1VALID2,
- AB9540_REGUHWHPREQ2VALID1,
- AB9540_REGUHWHPREQ2VALID2,
- AB9540_REGUSWHPREQVALID1,
- AB9540_REGUSWHPREQVALID2,
- AB9540_REGUSYSCLKREQVALID1,
- AB9540_REGUSYSCLKREQVALID2,
- AB9540_REGUVAUX4REQVALID,
- AB9540_REGUMISC1,
- AB9540_VAUDIOSUPPLY,
- AB9540_REGUCTRL1VAMIC,
- AB9540_VSMPS1REGU,
- AB9540_VSMPS2REGU,
- AB9540_VSMPS3REGU, /* NOTE! PRCMU register */
- AB9540_VPLLVANAREGU,
- AB9540_EXTSUPPLYREGU,
- AB9540_VAUX12REGU,
- AB9540_VRF1VAUX3REGU,
- AB9540_VSMPS1SEL1,
- AB9540_VSMPS1SEL2,
- AB9540_VSMPS1SEL3,
- AB9540_VSMPS2SEL1,
- AB9540_VSMPS2SEL2,
- AB9540_VSMPS2SEL3,
- AB9540_VSMPS3SEL1, /* NOTE! PRCMU register */
- AB9540_VSMPS3SEL2, /* NOTE! PRCMU register */
- AB9540_VAUX1SEL,
- AB9540_VAUX2SEL,
- AB9540_VRF1VAUX3SEL,
- AB9540_REGUCTRL2SPARE,
- AB9540_VAUX4REQCTRL,
- AB9540_VAUX4REGU,
- AB9540_VAUX4SEL,
- AB9540_REGUCTRLDISCH,
- AB9540_REGUCTRLDISCH2,
- AB9540_REGUCTRLDISCH3,
- AB9540_NUM_REGULATOR_REGISTERS,
-};
-
-/* AB8540 registers */
-enum ab8540_regulator_reg {
- AB8540_REGUREQUESTCTRL1,
- AB8540_REGUREQUESTCTRL2,
- AB8540_REGUREQUESTCTRL3,
- AB8540_REGUREQUESTCTRL4,
- AB8540_REGUSYSCLKREQ1HPVALID1,
- AB8540_REGUSYSCLKREQ1HPVALID2,
- AB8540_REGUHWHPREQ1VALID1,
- AB8540_REGUHWHPREQ1VALID2,
- AB8540_REGUHWHPREQ2VALID1,
- AB8540_REGUHWHPREQ2VALID2,
- AB8540_REGUSWHPREQVALID1,
- AB8540_REGUSWHPREQVALID2,
- AB8540_REGUSYSCLKREQVALID1,
- AB8540_REGUSYSCLKREQVALID2,
- AB8540_REGUVAUX4REQVALID,
- AB8540_REGUVAUX5REQVALID,
- AB8540_REGUVAUX6REQVALID,
- AB8540_REGUVCLKBREQVALID,
- AB8540_REGUVRF1REQVALID,
- AB8540_REGUMISC1,
- AB8540_VAUDIOSUPPLY,
- AB8540_REGUCTRL1VAMIC,
- AB8540_VHSIC,
- AB8540_VSDIO,
- AB8540_VSMPS1REGU,
- AB8540_VSMPS2REGU,
- AB8540_VSMPS3REGU,
- AB8540_VPLLVANAREGU,
- AB8540_EXTSUPPLYREGU,
- AB8540_VAUX12REGU,
- AB8540_VRF1VAUX3REGU,
- AB8540_VSMPS1SEL1,
- AB8540_VSMPS1SEL2,
- AB8540_VSMPS1SEL3,
- AB8540_VSMPS2SEL1,
- AB8540_VSMPS2SEL2,
- AB8540_VSMPS2SEL3,
- AB8540_VSMPS3SEL1,
- AB8540_VSMPS3SEL2,
- AB8540_VAUX1SEL,
- AB8540_VAUX2SEL,
- AB8540_VRF1VAUX3SEL,
- AB8540_REGUCTRL2SPARE,
- AB8540_VAUX4REQCTRL,
- AB8540_VAUX4REGU,
- AB8540_VAUX4SEL,
- AB8540_VAUX5REQCTRL,
- AB8540_VAUX5REGU,
- AB8540_VAUX5SEL,
- AB8540_VAUX6REQCTRL,
- AB8540_VAUX6REGU,
- AB8540_VAUX6SEL,
- AB8540_VCLKBREQCTRL,
- AB8540_VCLKBREGU,
- AB8540_VCLKBSEL,
- AB8540_VRF1REQCTRL,
- AB8540_REGUCTRLDISCH,
- AB8540_REGUCTRLDISCH2,
- AB8540_REGUCTRLDISCH3,
- AB8540_REGUCTRLDISCH4,
- AB8540_VSIMSYSCLKCTRL,
- AB8540_VANAVPLLSEL,
- AB8540_NUM_REGULATOR_REGISTERS,
-};
-
/* AB8500 external regulators */
struct ab8500_ext_regulator_cfg {
bool hwreq; /* requires hw mode or high power mode */
diff --git a/include/linux/regulator/arizona-ldo1.h b/include/linux/regulator/arizona-ldo1.h
index c685f1277c63..fe74ab9990e6 100644
--- a/include/linux/regulator/arizona-ldo1.h
+++ b/include/linux/regulator/arizona-ldo1.h
@@ -14,9 +14,6 @@
struct regulator_init_data;
struct arizona_ldo1_pdata {
- /** GPIO controlling LDOENA, if any */
- int ldoena;
-
/** Regulator configuration for LDO1 */
const struct regulator_init_data *init_data;
};
diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h
index df176d7c2b87..25602afd4844 100644
--- a/include/linux/regulator/consumer.h
+++ b/include/linux/regulator/consumer.h
@@ -80,6 +80,7 @@ struct regmap;
* These modes can be OR'ed together to make up a mask of valid register modes.
*/
+#define REGULATOR_MODE_INVALID 0x0
#define REGULATOR_MODE_FAST 0x1
#define REGULATOR_MODE_NORMAL 0x2
#define REGULATOR_MODE_IDLE 0x4
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h
index 4fc96cb8e5d7..fc2dc8df476f 100644
--- a/include/linux/regulator/driver.h
+++ b/include/linux/regulator/driver.h
@@ -15,6 +15,8 @@
#ifndef __LINUX_REGULATOR_DRIVER_H_
#define __LINUX_REGULATOR_DRIVER_H_
+#define MAX_COUPLED 4
+
#include <linux/device.h>
#include <linux/notifier.h>
#include <linux/regulator/consumer.h>
@@ -81,9 +83,12 @@ struct regulator_linear_range {
* @set_voltage_sel: Set the voltage for the regulator using the specified
* selector.
* @map_voltage: Convert a voltage into a selector
- * @get_voltage: Return the currently configured voltage for the regulator.
+ * @get_voltage: Return the currently configured voltage for the regulator;
+ * return -ENOTRECOVERABLE if regulator can't be read at
+ * bootup and hasn't been set yet.
* @get_voltage_sel: Return the currently configured voltage selector for the
- * regulator.
+ * regulator; return -ENOTRECOVERABLE if regulator can't
+ * be read at bootup and hasn't been set yet.
* @list_voltage: Return one of the supported voltages, in microvolts; zero
* if the selector indicates a voltage that is unusable on this system;
* or negative errno. Selectors range from zero to one less than
@@ -407,6 +412,20 @@ struct regulator_config {
};
/*
+ * struct coupling_desc
+ *
+ * Describes coupling of regulators. Each regulator should have
+ * at least a pointer to itself in coupled_rdevs array.
+ * When a new coupled regulator is resolved, n_resolved is
+ * incremented.
+ */
+struct coupling_desc {
+ struct regulator_dev *coupled_rdevs[MAX_COUPLED];
+ int n_resolved;
+ int n_coupled;
+};
+
+/*
* struct regulator_dev
*
* Voltage / Current regulator class device. One for each
@@ -429,8 +448,12 @@ struct regulator_dev {
/* lists we own */
struct list_head consumer_list; /* consumers we supply */
+ struct coupling_desc coupling_desc;
+
struct blocking_notifier_head notifier;
struct mutex mutex; /* consumer lock */
+ struct task_struct *mutex_owner;
+ int ref_cnt;
struct module *owner;
struct device dev;
struct regulation_constraints *constraints;
diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h
index 93a04893c739..3468703d663a 100644
--- a/include/linux/regulator/machine.h
+++ b/include/linux/regulator/machine.h
@@ -103,6 +103,7 @@ struct regulator_state {
* @ilim_uA: Maximum input current.
* @system_load: Load that isn't captured by any consumer requests.
*
+ * @max_spread: Max possible spread between coupled regulators
* @valid_modes_mask: Mask of modes which may be configured by consumers.
* @valid_ops_mask: Operations which may be performed by consumers.
*
@@ -154,6 +155,9 @@ struct regulation_constraints {
int system_load;
+ /* used for coupled regulators */
+ int max_spread;
+
/* valid regulator operating modes for this machine */
unsigned int valid_modes_mask;
diff --git a/include/linux/regulator/max8952.h b/include/linux/regulator/max8952.h
index 4dbb63a1d4ab..686c42c041b5 100644
--- a/include/linux/regulator/max8952.h
+++ b/include/linux/regulator/max8952.h
@@ -120,7 +120,6 @@ enum {
struct max8952_platform_data {
int gpio_vid0;
int gpio_vid1;
- int gpio_en;
u32 default_mode;
u32 dvs_mode[MAX8952_NUM_DVS_MODE]; /* MAX8952_DVS_MODEx_XXXXmV */
diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h
index d09a9c7af109..dfdaede9139e 100644
--- a/include/linux/remoteproc.h
+++ b/include/linux/remoteproc.h
@@ -569,7 +569,7 @@ static inline struct rproc *vdev_to_rproc(struct virtio_device *vdev)
void rproc_add_subdev(struct rproc *rproc,
struct rproc_subdev *subdev,
int (*probe)(struct rproc_subdev *subdev),
- void (*remove)(struct rproc_subdev *subdev, bool graceful));
+ void (*remove)(struct rproc_subdev *subdev, bool crashed));
void rproc_remove_subdev(struct rproc *rproc, struct rproc_subdev *subdev);
diff --git a/include/linux/restart_block.h b/include/linux/restart_block.h
index bcfdb918cd81..5d83d0c1d06c 100644
--- a/include/linux/restart_block.h
+++ b/include/linux/restart_block.h
@@ -7,6 +7,7 @@
#include <linux/compiler.h>
#include <linux/types.h>
+#include <linux/time64.h>
struct timespec;
struct compat_timespec;
@@ -15,9 +16,7 @@ struct pollfd;
enum timespec_type {
TT_NONE = 0,
TT_NATIVE = 1,
-#ifdef CONFIG_COMPAT
TT_COMPAT = 2,
-#endif
};
/*
@@ -40,10 +39,8 @@ struct restart_block {
clockid_t clockid;
enum timespec_type type;
union {
- struct timespec __user *rmtp;
-#ifdef CONFIG_COMPAT
+ struct __kernel_timespec __user *rmtp;
struct compat_timespec __user *compat_rmtp;
-#endif
};
u64 expires;
} nanosleep;
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
index 1f8ad121eb43..4e1f535c2034 100644
--- a/include/linux/rhashtable.h
+++ b/include/linux/rhashtable.h
@@ -836,9 +836,8 @@ out:
*
* It is safe to call this function from atomic context.
*
- * Will trigger an automatic deferred table resizing if the size grows
- * beyond the watermark indicated by grow_decision() which can be passed
- * to rhashtable_init().
+ * Will trigger an automatic deferred table resizing if residency in the
+ * table grows beyond 70%.
*/
static inline int rhashtable_insert_fast(
struct rhashtable *ht, struct rhash_head *obj,
@@ -866,9 +865,8 @@ static inline int rhashtable_insert_fast(
*
* It is safe to call this function from atomic context.
*
- * Will trigger an automatic deferred table resizing if the size grows
- * beyond the watermark indicated by grow_decision() which can be passed
- * to rhashtable_init().
+ * Will trigger an automatic deferred table resizing if residency in the
+ * table grows beyond 70%.
*/
static inline int rhltable_insert_key(
struct rhltable *hlt, const void *key, struct rhlist_head *list,
@@ -890,9 +888,8 @@ static inline int rhltable_insert_key(
*
* It is safe to call this function from atomic context.
*
- * Will trigger an automatic deferred table resizing if the size grows
- * beyond the watermark indicated by grow_decision() which can be passed
- * to rhashtable_init().
+ * Will trigger an automatic deferred table resizing if residency in the
+ * table grows beyond 70%.
*/
static inline int rhltable_insert(
struct rhltable *hlt, struct rhlist_head *list,
@@ -922,9 +919,8 @@ static inline int rhltable_insert(
*
* It is safe to call this function from atomic context.
*
- * Will trigger an automatic deferred table resizing if the size grows
- * beyond the watermark indicated by grow_decision() which can be passed
- * to rhashtable_init().
+ * Will trigger an automatic deferred table resizing if residency in the
+ * table grows beyond 70%.
*/
static inline int rhashtable_lookup_insert_fast(
struct rhashtable *ht, struct rhash_head *obj,
@@ -981,9 +977,8 @@ static inline void *rhashtable_lookup_get_insert_fast(
*
* Lookups may occur in parallel with hashtable mutations and resizing.
*
- * Will trigger an automatic deferred table resizing if the size grows
- * beyond the watermark indicated by grow_decision() which can be passed
- * to rhashtable_init().
+ * Will trigger an automatic deferred table resizing if residency in the
+ * table grows beyond 70%.
*
* Returns zero on success.
*/
@@ -1134,8 +1129,8 @@ static inline int __rhashtable_remove_fast(
* walk the bucket chain upon removal. The removal operation is thus
* considerable slow if the hash table is not correctly sized.
*
- * Will automatically shrink the table via rhashtable_expand() if the
- * shrink_decision function specified at rhashtable_init() returns true.
+ * Will automatically shrink the table if permitted when residency drops
+ * below 30%.
*
* Returns zero on success, -ENOENT if the entry could not be found.
*/
@@ -1156,8 +1151,8 @@ static inline int rhashtable_remove_fast(
* walk the bucket chain upon removal. The removal operation is thus
* considerable slow if the hash table is not correctly sized.
*
- * Will automatically shrink the table via rhashtable_expand() if the
- * shrink_decision function specified at rhashtable_init() returns true.
+ * Will automatically shrink the table if permitted when residency drops
+ * below 30%
*
* Returns zero on success, -ENOENT if the entry could not be found.
*/
@@ -1273,8 +1268,9 @@ static inline int rhashtable_walk_init(struct rhashtable *ht,
* For a completely stable walk you should construct your own data
* structure outside the hash table.
*
- * This function may sleep so you must not call it from interrupt
- * context or with spin locks held.
+ * This function may be called from any process context, including
+ * non-preemptable context, but cannot be called from softirq or
+ * hardirq context.
*
* You must call rhashtable_walk_exit after this function returns.
*/
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
index a0233edc0718..b72ebdff0b77 100644
--- a/include/linux/ring_buffer.h
+++ b/include/linux/ring_buffer.h
@@ -65,7 +65,7 @@ u64 ring_buffer_event_time_stamp(struct ring_buffer_event *event);
/*
* ring_buffer_discard_commit will remove an event that has not
- * ben committed yet. If this is used, then ring_buffer_unlock_commit
+ * been committed yet. If this is used, then ring_buffer_unlock_commit
* must not be called on the discarded event. This function
* will try to remove the event from the ring buffer completely
* if another event has not been written after it.
diff --git a/include/linux/rpmsg.h b/include/linux/rpmsg.h
index ca07366c4c33..9fe156d1c018 100644
--- a/include/linux/rpmsg.h
+++ b/include/linux/rpmsg.h
@@ -1,35 +1,10 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
/*
* Remote processor messaging
*
* Copyright (C) 2011 Texas Instruments, Inc.
* Copyright (C) 2011 Google, Inc.
* All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name Texas Instruments nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _LINUX_RPMSG_H
diff --git a/include/linux/rpmsg/qcom_glink.h b/include/linux/rpmsg/qcom_glink.h
index a622f029836e..96e26d94719f 100644
--- a/include/linux/rpmsg/qcom_glink.h
+++ b/include/linux/rpmsg/qcom_glink.h
@@ -1,3 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
#ifndef _LINUX_RPMSG_QCOM_GLINK_H
#define _LINUX_RPMSG_QCOM_GLINK_H
diff --git a/include/linux/rslib.h b/include/linux/rslib.h
index 746580c1939c..5974cedd008c 100644
--- a/include/linux/rslib.h
+++ b/include/linux/rslib.h
@@ -1,28 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * include/linux/rslib.h
- *
- * Overview:
- * Generic Reed Solomon encoder / decoder library
+ * Generic Reed Solomon encoder / decoder library
*
* Copyright (C) 2004 Thomas Gleixner (tglx@linutronix.de)
*
* RS code lifted from reed solomon library written by Phil Karn
* Copyright 2002 Phil Karn, KA9Q
- *
- * $Id: rslib.h,v 1.4 2005/11/07 11:14:52 gleixner Exp $
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
-
#ifndef _RSLIB_H_
#define _RSLIB_H_
#include <linux/list.h>
+#include <linux/types.h> /* for gfp_t */
+#include <linux/gfp.h> /* for GFP_KERNEL */
/**
- * struct rs_control - rs control structure
+ * struct rs_codec - rs codec data
*
* @mm: Bits per symbol
* @nn: Symbols per block (= (1<<mm)-1)
@@ -36,24 +29,34 @@
* @gfpoly: The primitive generator polynominal
* @gffunc: Function to generate the field, if non-canonical representation
* @users: Users of this structure
- * @list: List entry for the rs control list
+ * @list: List entry for the rs codec list
*/
-struct rs_control {
- int mm;
- int nn;
+struct rs_codec {
+ int mm;
+ int nn;
uint16_t *alpha_to;
uint16_t *index_of;
uint16_t *genpoly;
- int nroots;
- int fcr;
- int prim;
- int iprim;
+ int nroots;
+ int fcr;
+ int prim;
+ int iprim;
int gfpoly;
int (*gffunc)(int);
int users;
struct list_head list;
};
+/**
+ * struct rs_control - rs control structure per instance
+ * @codec: The codec used for this instance
+ * @buffers: Internal scratch buffers used in calls to decode_rs()
+ */
+struct rs_control {
+ struct rs_codec *codec;
+ uint16_t buffers[0];
+};
+
/* General purpose RS codec, 8-bit data width, symbol width 1-15 bit */
#ifdef CONFIG_REED_SOLOMON_ENC8
int encode_rs8(struct rs_control *rs, uint8_t *data, int len, uint16_t *par,
@@ -76,18 +79,37 @@ int decode_rs16(struct rs_control *rs, uint16_t *data, uint16_t *par, int len,
uint16_t *corr);
#endif
-/* Create or get a matching rs control structure */
-struct rs_control *init_rs(int symsize, int gfpoly, int fcr, int prim,
- int nroots);
+struct rs_control *init_rs_gfp(int symsize, int gfpoly, int fcr, int prim,
+ int nroots, gfp_t gfp);
+
+/**
+ * init_rs - Create a RS control struct and initialize it
+ * @symsize: the symbol size (number of bits)
+ * @gfpoly: the extended Galois field generator polynomial coefficients,
+ * with the 0th coefficient in the low order bit. The polynomial
+ * must be primitive;
+ * @fcr: the first consecutive root of the rs code generator polynomial
+ * in index form
+ * @prim: primitive element to generate polynomial roots
+ * @nroots: RS code generator polynomial degree (number of roots)
+ *
+ * Allocations use GFP_KERNEL.
+ */
+static inline struct rs_control *init_rs(int symsize, int gfpoly, int fcr,
+ int prim, int nroots)
+{
+ return init_rs_gfp(symsize, gfpoly, fcr, prim, nroots, GFP_KERNEL);
+}
+
struct rs_control *init_rs_non_canonical(int symsize, int (*func)(int),
- int fcr, int prim, int nroots);
+ int fcr, int prim, int nroots);
/* Release a rs control structure */
void free_rs(struct rs_control *rs);
/** modulo replacement for galois field arithmetics
*
- * @rs: the rs control structure
+ * @rs: Pointer to the RS codec
* @x: the value to reduce
*
* where
@@ -97,7 +119,7 @@ void free_rs(struct rs_control *rs);
* Simple arithmetic modulo would return a wrong result for values
* >= 3 * rs->nn
*/
-static inline int rs_modnn(struct rs_control *rs, int x)
+static inline int rs_modnn(struct rs_codec *rs, int x)
{
while (x >= rs->nn) {
x -= rs->nn;
diff --git a/include/linux/rtc.h b/include/linux/rtc.h
index 4c007f69082f..6268208760e9 100644
--- a/include/linux/rtc.h
+++ b/include/linux/rtc.h
@@ -285,7 +285,7 @@ void rtc_nvmem_unregister(struct rtc_device *rtc);
static inline int rtc_nvmem_register(struct rtc_device *rtc,
struct nvmem_config *nvmem_config)
{
- return -ENODEV;
+ return 0;
}
static inline void rtc_nvmem_unregister(struct rtc_device *rtc) {}
#endif
diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h
index 56707d5ff6ad..ab93b6eae696 100644
--- a/include/linux/rwsem.h
+++ b/include/linux/rwsem.h
@@ -44,6 +44,12 @@ struct rw_semaphore {
#endif
};
+/*
+ * Setting bit 0 of the owner field with other non-zero bits will indicate
+ * that the rwsem is writer-owned with an unknown owner.
+ */
+#define RWSEM_OWNER_UNKNOWN ((struct task_struct *)-1L)
+
extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
extern struct rw_semaphore *rwsem_down_read_failed_killable(struct rw_semaphore *sem);
extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
diff --git a/include/linux/sbitmap.h b/include/linux/sbitmap.h
index 841585f6e5f2..e6539536dea9 100644
--- a/include/linux/sbitmap.h
+++ b/include/linux/sbitmap.h
@@ -127,6 +127,12 @@ struct sbitmap_queue {
* @round_robin: Allocate bits in strict round-robin order.
*/
bool round_robin;
+
+ /**
+ * @min_shallow_depth: The minimum shallow depth which may be passed to
+ * sbitmap_queue_get_shallow() or __sbitmap_queue_get_shallow().
+ */
+ unsigned int min_shallow_depth;
};
/**
@@ -390,6 +396,9 @@ int __sbitmap_queue_get(struct sbitmap_queue *sbq);
* @shallow_depth: The maximum number of bits to allocate from a single word.
* See sbitmap_get_shallow().
*
+ * If you call this, make sure to call sbitmap_queue_min_shallow_depth() after
+ * initializing @sbq.
+ *
* Return: Non-negative allocated bit number if successful, -1 otherwise.
*/
int __sbitmap_queue_get_shallow(struct sbitmap_queue *sbq,
@@ -424,6 +433,9 @@ static inline int sbitmap_queue_get(struct sbitmap_queue *sbq,
* @shallow_depth: The maximum number of bits to allocate from a single word.
* See sbitmap_get_shallow().
*
+ * If you call this, make sure to call sbitmap_queue_min_shallow_depth() after
+ * initializing @sbq.
+ *
* Return: Non-negative allocated bit number if successful, -1 otherwise.
*/
static inline int sbitmap_queue_get_shallow(struct sbitmap_queue *sbq,
@@ -439,6 +451,23 @@ static inline int sbitmap_queue_get_shallow(struct sbitmap_queue *sbq,
}
/**
+ * sbitmap_queue_min_shallow_depth() - Inform a &struct sbitmap_queue of the
+ * minimum shallow depth that will be used.
+ * @sbq: Bitmap queue in question.
+ * @min_shallow_depth: The minimum shallow depth that will be passed to
+ * sbitmap_queue_get_shallow() or __sbitmap_queue_get_shallow().
+ *
+ * sbitmap_queue_clear() batches wakeups as an optimization. The batch size
+ * depends on the depth of the bitmap. Since the shallow allocation functions
+ * effectively operate with a different depth, the shallow depth must be taken
+ * into account when calculating the batch size. This function must be called
+ * with the minimum shallow depth that will be used. Failure to do so can result
+ * in missed wakeups.
+ */
+void sbitmap_queue_min_shallow_depth(struct sbitmap_queue *sbq,
+ unsigned int min_shallow_depth);
+
+/**
* sbitmap_queue_clear() - Free an allocated bit and wake up waiters on a
* &struct sbitmap_queue.
* @sbq: Bitmap to free from.
@@ -484,6 +513,13 @@ static inline struct sbq_wait_state *sbq_wait_ptr(struct sbitmap_queue *sbq,
void sbitmap_queue_wake_all(struct sbitmap_queue *sbq);
/**
+ * sbitmap_queue_wake_up() - Wake up some of waiters in one waitqueue
+ * on a &struct sbitmap_queue.
+ * @sbq: Bitmap queue to wake up.
+ */
+void sbitmap_queue_wake_up(struct sbitmap_queue *sbq);
+
+/**
* sbitmap_queue_show() - Dump &struct sbitmap_queue information to a &struct
* seq_file.
* @sbq: Bitmap queue to show.
diff --git a/include/linux/sched.h b/include/linux/sched.h
index b3d697f3b573..87bf02d93a27 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -27,6 +27,7 @@
#include <linux/signal_types.h>
#include <linux/mm_types_task.h>
#include <linux/task_io_accounting.h>
+#include <linux/rseq.h>
/* task_struct member predeclarations (sorted alphabetically): */
struct audit_context;
@@ -112,17 +113,36 @@ struct task_group;
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
+/*
+ * Special states are those that do not use the normal wait-loop pattern. See
+ * the comment with set_special_state().
+ */
+#define is_special_task_state(state) \
+ ((state) & (__TASK_STOPPED | __TASK_TRACED | TASK_DEAD))
+
#define __set_current_state(state_value) \
do { \
+ WARN_ON_ONCE(is_special_task_state(state_value));\
current->task_state_change = _THIS_IP_; \
current->state = (state_value); \
} while (0)
+
#define set_current_state(state_value) \
do { \
+ WARN_ON_ONCE(is_special_task_state(state_value));\
current->task_state_change = _THIS_IP_; \
smp_store_mb(current->state, (state_value)); \
} while (0)
+#define set_special_state(state_value) \
+ do { \
+ unsigned long flags; /* may shadow */ \
+ WARN_ON_ONCE(!is_special_task_state(state_value)); \
+ raw_spin_lock_irqsave(&current->pi_lock, flags); \
+ current->task_state_change = _THIS_IP_; \
+ current->state = (state_value); \
+ raw_spin_unlock_irqrestore(&current->pi_lock, flags); \
+ } while (0)
#else
/*
* set_current_state() includes a barrier so that the write of current->state
@@ -144,8 +164,8 @@ struct task_group;
*
* The above is typically ordered against the wakeup, which does:
*
- * need_sleep = false;
- * wake_up_state(p, TASK_UNINTERRUPTIBLE);
+ * need_sleep = false;
+ * wake_up_state(p, TASK_UNINTERRUPTIBLE);
*
* Where wake_up_state() (and all other wakeup primitives) imply enough
* barriers to order the store of the variable against wakeup.
@@ -154,12 +174,33 @@ struct task_group;
* once it observes the TASK_UNINTERRUPTIBLE store the waking CPU can issue a
* TASK_RUNNING store which can collide with __set_current_state(TASK_RUNNING).
*
- * This is obviously fine, since they both store the exact same value.
+ * However, with slightly different timing the wakeup TASK_RUNNING store can
+ * also collide with the TASK_UNINTERRUPTIBLE store. Loosing that store is not
+ * a problem either because that will result in one extra go around the loop
+ * and our @cond test will save the day.
*
* Also see the comments of try_to_wake_up().
*/
-#define __set_current_state(state_value) do { current->state = (state_value); } while (0)
-#define set_current_state(state_value) smp_store_mb(current->state, (state_value))
+#define __set_current_state(state_value) \
+ current->state = (state_value)
+
+#define set_current_state(state_value) \
+ smp_store_mb(current->state, (state_value))
+
+/*
+ * set_special_state() should be used for those states when the blocking task
+ * can not use the regular condition based wait-loop. In that case we must
+ * serialize against wakeups such that any possible in-flight TASK_RUNNING stores
+ * will not collide with our state change.
+ */
+#define set_special_state(state_value) \
+ do { \
+ unsigned long flags; /* may shadow */ \
+ raw_spin_lock_irqsave(&current->pi_lock, flags); \
+ current->state = (state_value); \
+ raw_spin_unlock_irqrestore(&current->pi_lock, flags); \
+ } while (0)
+
#endif
/* Task command name length: */
@@ -701,7 +742,7 @@ struct task_struct {
pid_t pid;
pid_t tgid;
-#ifdef CONFIG_CC_STACKPROTECTOR
+#ifdef CONFIG_STACKPROTECTOR
/* Canary value for the -fstack-protector GCC feature: */
unsigned long stack_canary;
#endif
@@ -1007,6 +1048,17 @@ struct task_struct {
unsigned long numa_pages_migrated;
#endif /* CONFIG_NUMA_BALANCING */
+#ifdef CONFIG_RSEQ
+ struct rseq __user *rseq;
+ u32 rseq_len;
+ u32 rseq_sig;
+ /*
+ * RmW on rseq_event_mask must be performed atomically
+ * with respect to preemption.
+ */
+ unsigned long rseq_event_mask;
+#endif
+
struct tlbflush_unmap_batch tlb_ubc;
struct rcu_head rcu;
@@ -1078,7 +1130,7 @@ struct task_struct {
#ifdef CONFIG_KCOV
/* Coverage collection mode enabled for this task (0 if disabled): */
- enum kcov_mode kcov_mode;
+ unsigned int kcov_mode;
/* Size of the kcov_area: */
unsigned int kcov_size;
@@ -1393,7 +1445,8 @@ static inline bool is_percpu_thread(void)
#define PFA_NO_NEW_PRIVS 0 /* May not gain new privileges. */
#define PFA_SPREAD_PAGE 1 /* Spread page cache over cpuset */
#define PFA_SPREAD_SLAB 2 /* Spread some slab caches over cpuset */
-
+#define PFA_SPEC_SSB_DISABLE 3 /* Speculative Store Bypass disabled */
+#define PFA_SPEC_SSB_FORCE_DISABLE 4 /* Speculative Store Bypass force disabled*/
#define TASK_PFA_TEST(name, func) \
static inline bool task_##func(struct task_struct *p) \
@@ -1418,6 +1471,13 @@ TASK_PFA_TEST(SPREAD_SLAB, spread_slab)
TASK_PFA_SET(SPREAD_SLAB, spread_slab)
TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab)
+TASK_PFA_TEST(SPEC_SSB_DISABLE, spec_ssb_disable)
+TASK_PFA_SET(SPEC_SSB_DISABLE, spec_ssb_disable)
+TASK_PFA_CLEAR(SPEC_SSB_DISABLE, spec_ssb_disable)
+
+TASK_PFA_TEST(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
+TASK_PFA_SET(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
+
static inline void
current_restore_flags(unsigned long orig_flags, unsigned long flags)
{
@@ -1464,6 +1524,7 @@ static inline int task_nice(const struct task_struct *p)
extern int can_nice(const struct task_struct *p, const int nice);
extern int task_curr(const struct task_struct *p);
extern int idle_cpu(int cpu);
+extern int available_idle_cpu(int cpu);
extern int sched_setscheduler(struct task_struct *, int, const struct sched_param *);
extern int sched_setscheduler_nocheck(struct task_struct *, int, const struct sched_param *);
extern int sched_setattr(struct task_struct *, const struct sched_attr *);
@@ -1578,6 +1639,12 @@ static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
clear_ti_thread_flag(task_thread_info(tsk), flag);
}
+static inline void update_tsk_thread_flag(struct task_struct *tsk, int flag,
+ bool value)
+{
+ update_ti_thread_flag(task_thread_info(tsk), flag, value);
+}
+
static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
{
return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);
@@ -1613,7 +1680,6 @@ static inline int test_tsk_need_resched(struct task_struct *tsk)
* explicit rescheduling in places that are safe. The return
* value indicates whether a reschedule was done in fact.
* cond_resched_lock() will drop the spinlock before scheduling,
- * cond_resched_softirq() will enable bhs before scheduling.
*/
#ifndef CONFIG_PREEMPT
extern int _cond_resched(void);
@@ -1633,13 +1699,6 @@ extern int __cond_resched_lock(spinlock_t *lock);
__cond_resched_lock(lock); \
})
-extern int __cond_resched_softirq(void);
-
-#define cond_resched_softirq() ({ \
- ___might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \
- __cond_resched_softirq(); \
-})
-
static inline void cond_resched_rcu(void)
{
#if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU)
@@ -1716,4 +1775,126 @@ extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
#define TASK_SIZE_OF(tsk) TASK_SIZE
#endif
+#ifdef CONFIG_RSEQ
+
+/*
+ * Map the event mask on the user-space ABI enum rseq_cs_flags
+ * for direct mask checks.
+ */
+enum rseq_event_mask_bits {
+ RSEQ_EVENT_PREEMPT_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT_BIT,
+ RSEQ_EVENT_SIGNAL_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL_BIT,
+ RSEQ_EVENT_MIGRATE_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE_BIT,
+};
+
+enum rseq_event_mask {
+ RSEQ_EVENT_PREEMPT = (1U << RSEQ_EVENT_PREEMPT_BIT),
+ RSEQ_EVENT_SIGNAL = (1U << RSEQ_EVENT_SIGNAL_BIT),
+ RSEQ_EVENT_MIGRATE = (1U << RSEQ_EVENT_MIGRATE_BIT),
+};
+
+static inline void rseq_set_notify_resume(struct task_struct *t)
+{
+ if (t->rseq)
+ set_tsk_thread_flag(t, TIF_NOTIFY_RESUME);
+}
+
+void __rseq_handle_notify_resume(struct pt_regs *regs);
+
+static inline void rseq_handle_notify_resume(struct pt_regs *regs)
+{
+ if (current->rseq)
+ __rseq_handle_notify_resume(regs);
+}
+
+static inline void rseq_signal_deliver(struct pt_regs *regs)
+{
+ preempt_disable();
+ __set_bit(RSEQ_EVENT_SIGNAL_BIT, &current->rseq_event_mask);
+ preempt_enable();
+ rseq_handle_notify_resume(regs);
+}
+
+/* rseq_preempt() requires preemption to be disabled. */
+static inline void rseq_preempt(struct task_struct *t)
+{
+ __set_bit(RSEQ_EVENT_PREEMPT_BIT, &t->rseq_event_mask);
+ rseq_set_notify_resume(t);
+}
+
+/* rseq_migrate() requires preemption to be disabled. */
+static inline void rseq_migrate(struct task_struct *t)
+{
+ __set_bit(RSEQ_EVENT_MIGRATE_BIT, &t->rseq_event_mask);
+ rseq_set_notify_resume(t);
+}
+
+/*
+ * If parent process has a registered restartable sequences area, the
+ * child inherits. Only applies when forking a process, not a thread. In
+ * case a parent fork() in the middle of a restartable sequence, set the
+ * resume notifier to force the child to retry.
+ */
+static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags)
+{
+ if (clone_flags & CLONE_THREAD) {
+ t->rseq = NULL;
+ t->rseq_len = 0;
+ t->rseq_sig = 0;
+ t->rseq_event_mask = 0;
+ } else {
+ t->rseq = current->rseq;
+ t->rseq_len = current->rseq_len;
+ t->rseq_sig = current->rseq_sig;
+ t->rseq_event_mask = current->rseq_event_mask;
+ rseq_preempt(t);
+ }
+}
+
+static inline void rseq_execve(struct task_struct *t)
+{
+ t->rseq = NULL;
+ t->rseq_len = 0;
+ t->rseq_sig = 0;
+ t->rseq_event_mask = 0;
+}
+
+#else
+
+static inline void rseq_set_notify_resume(struct task_struct *t)
+{
+}
+static inline void rseq_handle_notify_resume(struct pt_regs *regs)
+{
+}
+static inline void rseq_signal_deliver(struct pt_regs *regs)
+{
+}
+static inline void rseq_preempt(struct task_struct *t)
+{
+}
+static inline void rseq_migrate(struct task_struct *t)
+{
+}
+static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags)
+{
+}
+static inline void rseq_execve(struct task_struct *t)
+{
+}
+
+#endif
+
+#ifdef CONFIG_DEBUG_RSEQ
+
+void rseq_syscall(struct pt_regs *regs);
+
+#else
+
+static inline void rseq_syscall(struct pt_regs *regs)
+{
+}
+
+#endif
+
#endif
diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h
index 2c570cd934af..44d356f5e47c 100644
--- a/include/linux/sched/mm.h
+++ b/include/linux/sched/mm.h
@@ -28,7 +28,7 @@ extern struct mm_struct *mm_alloc(void);
*
* Use mmdrop() to release the reference acquired by mmgrab().
*
- * See also <Documentation/vm/active_mm.txt> for an in-depth explanation
+ * See also <Documentation/vm/active_mm.rst> for an in-depth explanation
* of &mm_struct.mm_count vs &mm_struct.mm_users.
*/
static inline void mmgrab(struct mm_struct *mm)
@@ -62,7 +62,7 @@ static inline void mmdrop(struct mm_struct *mm)
*
* Use mmput() to release the reference acquired by mmget().
*
- * See also <Documentation/vm/active_mm.txt> for an in-depth explanation
+ * See also <Documentation/vm/active_mm.rst> for an in-depth explanation
* of &mm_struct.mm_count vs &mm_struct.mm_users.
*/
static inline void mmget(struct mm_struct *mm)
@@ -163,13 +163,28 @@ static inline gfp_t current_gfp_context(gfp_t flags)
}
#ifdef CONFIG_LOCKDEP
+extern void __fs_reclaim_acquire(void);
+extern void __fs_reclaim_release(void);
extern void fs_reclaim_acquire(gfp_t gfp_mask);
extern void fs_reclaim_release(gfp_t gfp_mask);
#else
+static inline void __fs_reclaim_acquire(void) { }
+static inline void __fs_reclaim_release(void) { }
static inline void fs_reclaim_acquire(gfp_t gfp_mask) { }
static inline void fs_reclaim_release(gfp_t gfp_mask) { }
#endif
+/**
+ * memalloc_noio_save - Marks implicit GFP_NOIO allocation scope.
+ *
+ * This functions marks the beginning of the GFP_NOIO allocation scope.
+ * All further allocations will implicitly drop __GFP_IO flag and so
+ * they are safe for the IO critical section from the allocation recursion
+ * point of view. Use memalloc_noio_restore to end the scope with flags
+ * returned by this function.
+ *
+ * This function is safe to be used from any context.
+ */
static inline unsigned int memalloc_noio_save(void)
{
unsigned int flags = current->flags & PF_MEMALLOC_NOIO;
@@ -177,11 +192,30 @@ static inline unsigned int memalloc_noio_save(void)
return flags;
}
+/**
+ * memalloc_noio_restore - Ends the implicit GFP_NOIO scope.
+ * @flags: Flags to restore.
+ *
+ * Ends the implicit GFP_NOIO scope started by memalloc_noio_save function.
+ * Always make sure that that the given flags is the return value from the
+ * pairing memalloc_noio_save call.
+ */
static inline void memalloc_noio_restore(unsigned int flags)
{
current->flags = (current->flags & ~PF_MEMALLOC_NOIO) | flags;
}
+/**
+ * memalloc_nofs_save - Marks implicit GFP_NOFS allocation scope.
+ *
+ * This functions marks the beginning of the GFP_NOFS allocation scope.
+ * All further allocations will implicitly drop __GFP_FS flag and so
+ * they are safe for the FS critical section from the allocation recursion
+ * point of view. Use memalloc_nofs_restore to end the scope with flags
+ * returned by this function.
+ *
+ * This function is safe to be used from any context.
+ */
static inline unsigned int memalloc_nofs_save(void)
{
unsigned int flags = current->flags & PF_MEMALLOC_NOFS;
@@ -189,6 +223,14 @@ static inline unsigned int memalloc_nofs_save(void)
return flags;
}
+/**
+ * memalloc_nofs_restore - Ends the implicit GFP_NOFS scope.
+ * @flags: Flags to restore.
+ *
+ * Ends the implicit GFP_NOFS scope started by memalloc_nofs_save function.
+ * Always make sure that that the given flags is the return value from the
+ * pairing memalloc_nofs_save call.
+ */
static inline void memalloc_nofs_restore(unsigned int flags)
{
current->flags = (current->flags & ~PF_MEMALLOC_NOFS) | flags;
diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h
index a7ce74c74e49..113d1ad1ced7 100644
--- a/include/linux/sched/signal.h
+++ b/include/linux/sched/signal.h
@@ -280,7 +280,7 @@ static inline void kernel_signal_stop(void)
{
spin_lock_irq(&current->sighand->siglock);
if (current->jobctl & JOBCTL_STOP_DEQUEUED)
- __set_current_state(TASK_STOPPED);
+ set_special_state(TASK_STOPPED);
spin_unlock_irq(&current->sighand->siglock);
schedule();
diff --git a/include/linux/scmi_protocol.h b/include/linux/scmi_protocol.h
index b458c87b866c..f4c9fc0fc755 100644
--- a/include/linux/scmi_protocol.h
+++ b/include/linux/scmi_protocol.h
@@ -85,8 +85,8 @@ struct scmi_clk_ops {
* @level_set: sets the performance level of a domain
* @level_get: gets the performance level of a domain
* @device_domain_id: gets the scmi domain id for a given device
- * @get_transition_latency: gets the DVFS transition latency for a given device
- * @add_opps_to_device: adds all the OPPs for a given device
+ * @transition_latency_get: gets the DVFS transition latency for a given device
+ * @device_opps_add: adds all the OPPs for a given device
* @freq_set: sets the frequency for a given device using sustained frequency
* to sustained performance level mapping
* @freq_get: gets the frequency for a given device using sustained frequency
@@ -102,10 +102,10 @@ struct scmi_perf_ops {
int (*level_get)(const struct scmi_handle *handle, u32 domain,
u32 *level, bool poll);
int (*device_domain_id)(struct device *dev);
- int (*get_transition_latency)(const struct scmi_handle *handle,
+ int (*transition_latency_get)(const struct scmi_handle *handle,
struct device *dev);
- int (*add_opps_to_device)(const struct scmi_handle *handle,
- struct device *dev);
+ int (*device_opps_add)(const struct scmi_handle *handle,
+ struct device *dev);
int (*freq_set)(const struct scmi_handle *handle, u32 domain,
unsigned long rate, bool poll);
int (*freq_get)(const struct scmi_handle *handle, u32 domain,
@@ -189,6 +189,14 @@ struct scmi_sensor_ops {
* @perf_ops: pointer to set of performance protocol operations
* @clk_ops: pointer to set of clock protocol operations
* @sensor_ops: pointer to set of sensor protocol operations
+ * @perf_priv: pointer to private data structure specific to performance
+ * protocol(for internal use only)
+ * @clk_priv: pointer to private data structure specific to clock
+ * protocol(for internal use only)
+ * @power_priv: pointer to private data structure specific to power
+ * protocol(for internal use only)
+ * @sensor_priv: pointer to private data structure specific to sensors
+ * protocol(for internal use only)
*/
struct scmi_handle {
struct device *dev;
diff --git a/include/linux/seccomp.h b/include/linux/seccomp.h
index c723a5c4e3ff..e5320f6c8654 100644
--- a/include/linux/seccomp.h
+++ b/include/linux/seccomp.h
@@ -4,8 +4,9 @@
#include <uapi/linux/seccomp.h>
-#define SECCOMP_FILTER_FLAG_MASK (SECCOMP_FILTER_FLAG_TSYNC | \
- SECCOMP_FILTER_FLAG_LOG)
+#define SECCOMP_FILTER_FLAG_MASK (SECCOMP_FILTER_FLAG_TSYNC | \
+ SECCOMP_FILTER_FLAG_LOG | \
+ SECCOMP_FILTER_FLAG_SPEC_ALLOW)
#ifdef CONFIG_SECCOMP
diff --git a/include/linux/security.h b/include/linux/security.h
index 200920f521a1..63030c85ee19 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -220,12 +220,6 @@ int security_quotactl(int cmds, int type, int id, struct super_block *sb);
int security_quota_on(struct dentry *dentry);
int security_syslog(int type);
int security_settime64(const struct timespec64 *ts, const struct timezone *tz);
-static inline int security_settime(const struct timespec *ts, const struct timezone *tz)
-{
- struct timespec64 ts64 = timespec_to_timespec64(*ts);
-
- return security_settime64(&ts64, tz);
-}
int security_vm_enough_memory_mm(struct mm_struct *mm, long pages);
int security_bprm_set_creds(struct linux_binprm *bprm);
int security_bprm_check(struct linux_binprm *bprm);
@@ -508,14 +502,6 @@ static inline int security_settime64(const struct timespec64 *ts,
return cap_settime(ts, tz);
}
-static inline int security_settime(const struct timespec *ts,
- const struct timezone *tz)
-{
- struct timespec64 ts64 = timespec_to_timespec64(*ts);
-
- return cap_settime(&ts64, tz);
-}
-
static inline int security_vm_enough_memory_mm(struct mm_struct *mm, long pages)
{
return __vm_enough_memory(mm, pages, cap_vm_enough_memory(mm, pages));
@@ -1191,6 +1177,7 @@ int security_unix_may_send(struct socket *sock, struct socket *other);
int security_socket_create(int family, int type, int protocol, int kern);
int security_socket_post_create(struct socket *sock, int family,
int type, int protocol, int kern);
+int security_socket_socketpair(struct socket *socka, struct socket *sockb);
int security_socket_bind(struct socket *sock, struct sockaddr *address, int addrlen);
int security_socket_connect(struct socket *sock, struct sockaddr *address, int addrlen);
int security_socket_listen(struct socket *sock, int backlog);
@@ -1262,6 +1249,12 @@ static inline int security_socket_post_create(struct socket *sock,
return 0;
}
+static inline int security_socket_socketpair(struct socket *socka,
+ struct socket *sockb)
+{
+ return 0;
+}
+
static inline int security_socket_bind(struct socket *sock,
struct sockaddr *address,
int addrlen)
diff --git a/include/linux/seq_file_net.h b/include/linux/seq_file_net.h
index 43ccd84127b6..0fdbe1ddd8d1 100644
--- a/include/linux/seq_file_net.h
+++ b/include/linux/seq_file_net.h
@@ -13,12 +13,6 @@ struct seq_net_private {
#endif
};
-int seq_open_net(struct inode *, struct file *,
- const struct seq_operations *, int);
-int single_open_net(struct inode *, struct file *file,
- int (*show)(struct seq_file *, void *));
-int seq_release_net(struct inode *, struct file *);
-int single_release_net(struct inode *, struct file *);
static inline struct net *seq_file_net(struct seq_file *seq)
{
#ifdef CONFIG_NET_NS
@@ -28,4 +22,17 @@ static inline struct net *seq_file_net(struct seq_file *seq)
#endif
}
+/*
+ * This one is needed for proc_create_net_single since net is stored directly
+ * in private not as a struct i.e. seq_file_net can't be used.
+ */
+static inline struct net *seq_file_single_net(struct seq_file *seq)
+{
+#ifdef CONFIG_NET_NS
+ return (struct net *)seq->private;
+#else
+ return &init_net;
+#endif
+}
+
#endif
diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h
index a27ef5f56431..76b9db71e489 100644
--- a/include/linux/serial_8250.h
+++ b/include/linux/serial_8250.h
@@ -163,6 +163,7 @@ extern void serial8250_do_set_mctrl(struct uart_port *port, unsigned int mctrl);
extern int fsl8250_handle_irq(struct uart_port *port);
int serial8250_handle_irq(struct uart_port *port, unsigned int iir);
unsigned char serial8250_rx_chars(struct uart_8250_port *up, unsigned char lsr);
+void serial8250_read_char(struct uart_8250_port *up, unsigned char lsr);
void serial8250_tx_chars(struct uart_8250_port *up);
unsigned int serial8250_modem_status(struct uart_8250_port *up);
void serial8250_init_port(struct uart_8250_port *up);
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index 1d356105f25a..06ea4eeb09ab 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -233,6 +233,7 @@ struct uart_port {
#define UPSTAT_AUTORTS ((__force upstat_t) (1 << 2))
#define UPSTAT_AUTOCTS ((__force upstat_t) (1 << 3))
#define UPSTAT_AUTOXOFF ((__force upstat_t) (1 << 4))
+#define UPSTAT_SYNC_FIFO ((__force upstat_t) (1 << 5))
int hw_stopped; /* sw-assisted CTS flow state */
unsigned int mctrl; /* current modem ctrl settings */
@@ -348,13 +349,14 @@ struct earlycon_device {
};
struct earlycon_id {
- char name[16];
+ char name[15];
+ char name_term; /* In case compiler didn't '\0' term name */
char compatible[128];
int (*setup)(struct earlycon_device *, const char *options);
-} __aligned(32);
+};
-extern const struct earlycon_id __earlycon_table[];
-extern const struct earlycon_id __earlycon_table_end[];
+extern const struct earlycon_id *__earlycon_table[];
+extern const struct earlycon_id *__earlycon_table_end[];
#if defined(CONFIG_SERIAL_EARLYCON) && !defined(MODULE)
#define EARLYCON_USED_OR_UNUSED __used
@@ -362,12 +364,19 @@ extern const struct earlycon_id __earlycon_table_end[];
#define EARLYCON_USED_OR_UNUSED __maybe_unused
#endif
-#define OF_EARLYCON_DECLARE(_name, compat, fn) \
- static const struct earlycon_id __UNIQUE_ID(__earlycon_##_name) \
- EARLYCON_USED_OR_UNUSED __section(__earlycon_table) \
+#define _OF_EARLYCON_DECLARE(_name, compat, fn, unique_id) \
+ static const struct earlycon_id unique_id \
+ EARLYCON_USED_OR_UNUSED __initconst \
= { .name = __stringify(_name), \
.compatible = compat, \
- .setup = fn }
+ .setup = fn }; \
+ static const struct earlycon_id EARLYCON_USED_OR_UNUSED \
+ __section(__earlycon_table) \
+ * const __PASTE(__p, unique_id) = &unique_id
+
+#define OF_EARLYCON_DECLARE(_name, compat, fn) \
+ _OF_EARLYCON_DECLARE(_name, compat, fn, \
+ __UNIQUE_ID(__earlycon_##_name))
#define EARLYCON_DECLARE(_name, fn) OF_EARLYCON_DECLARE(_name, "", fn)
diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
index 73b5e655a76e..f155dc607112 100644
--- a/include/linux/shmem_fs.h
+++ b/include/linux/shmem_fs.h
@@ -110,19 +110,6 @@ static inline bool shmem_file(struct file *file)
extern bool shmem_charge(struct inode *inode, long pages);
extern void shmem_uncharge(struct inode *inode, long pages);
-#ifdef CONFIG_TMPFS
-
-extern long memfd_fcntl(struct file *file, unsigned int cmd, unsigned long arg);
-
-#else
-
-static inline long memfd_fcntl(struct file *f, unsigned int c, unsigned long a)
-{
- return -EINVAL;
-}
-
-#endif
-
#ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
extern bool shmem_huge_enabled(struct vm_area_struct *vma);
#else
diff --git a/include/linux/shrinker.h b/include/linux/shrinker.h
index 388ff2936a87..6794490f25b2 100644
--- a/include/linux/shrinker.h
+++ b/include/linux/shrinker.h
@@ -75,6 +75,9 @@ struct shrinker {
#define SHRINKER_NUMA_AWARE (1 << 0)
#define SHRINKER_MEMCG_AWARE (1 << 1)
-extern int register_shrinker(struct shrinker *);
-extern void unregister_shrinker(struct shrinker *);
+extern int prealloc_shrinker(struct shrinker *shrinker);
+extern void register_shrinker_prepared(struct shrinker *shrinker);
+extern int register_shrinker(struct shrinker *shrinker);
+extern void unregister_shrinker(struct shrinker *shrinker);
+extern void free_prealloced_shrinker(struct shrinker *shrinker);
#endif
diff --git a/include/linux/signal.h b/include/linux/signal.h
index a9bc7e1b077e..3c5200137b24 100644
--- a/include/linux/signal.h
+++ b/include/linux/signal.h
@@ -28,6 +28,9 @@ enum siginfo_layout {
SIL_TIMER,
SIL_POLL,
SIL_FAULT,
+ SIL_FAULT_MCEERR,
+ SIL_FAULT_BNDERR,
+ SIL_FAULT_PKUERR,
SIL_CHLD,
SIL_RT,
SIL_SYS,
diff --git a/include/linux/skb_array.h b/include/linux/skb_array.h
index a6b6e8bb3d7b..62d9b0a6329f 100644
--- a/include/linux/skb_array.h
+++ b/include/linux/skb_array.h
@@ -97,6 +97,11 @@ static inline bool skb_array_empty_any(struct skb_array *a)
return ptr_ring_empty_any(&a->ring);
}
+static inline struct sk_buff *__skb_array_consume(struct skb_array *a)
+{
+ return __ptr_ring_consume(&a->ring);
+}
+
static inline struct sk_buff *skb_array_consume(struct skb_array *a)
{
return ptr_ring_consume(&a->ring);
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 9065477ed255..c86885954994 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -573,6 +573,8 @@ enum {
SKB_GSO_ESP = 1 << 15,
SKB_GSO_UDP = 1 << 16,
+
+ SKB_GSO_UDP_L4 = 1 << 17,
};
#if BITS_PER_LONG > 32
@@ -852,8 +854,6 @@ struct sk_buff {
/*
* Handling routines are only of interest to the kernel
*/
-#include <linux/slab.h>
-
#define SKB_ALLOC_FCLONE 0x01
#define SKB_ALLOC_RX 0x02
@@ -1032,6 +1032,7 @@ static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src);
int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask);
struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t priority);
+void skb_copy_header(struct sk_buff *new, const struct sk_buff *old);
struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t priority);
struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom,
gfp_t gfp_mask, bool fclone);
@@ -1168,7 +1169,7 @@ void __skb_get_hash(struct sk_buff *skb);
u32 __skb_get_hash_symmetric(const struct sk_buff *skb);
u32 skb_get_poff(const struct sk_buff *skb);
u32 __skb_get_poff(const struct sk_buff *skb, void *data,
- const struct flow_keys *keys, int hlen);
+ const struct flow_keys_basic *keys, int hlen);
__be32 __skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto,
void *data, int hlen_proto);
@@ -1205,13 +1206,14 @@ static inline bool skb_flow_dissect_flow_keys(const struct sk_buff *skb,
NULL, 0, 0, 0, flags);
}
-static inline bool skb_flow_dissect_flow_keys_buf(struct flow_keys *flow,
- void *data, __be16 proto,
- int nhoff, int hlen,
- unsigned int flags)
+static inline bool
+skb_flow_dissect_flow_keys_basic(const struct sk_buff *skb,
+ struct flow_keys_basic *flow, void *data,
+ __be16 proto, int nhoff, int hlen,
+ unsigned int flags)
{
memset(flow, 0, sizeof(*flow));
- return __skb_flow_dissect(NULL, &flow_keys_buf_dissector, flow,
+ return __skb_flow_dissect(skb, &flow_keys_basic_dissector, flow,
data, proto, nhoff, hlen, flags);
}
@@ -2347,11 +2349,12 @@ static inline void skb_pop_mac_header(struct sk_buff *skb)
static inline void skb_probe_transport_header(struct sk_buff *skb,
const int offset_hint)
{
- struct flow_keys keys;
+ struct flow_keys_basic keys;
if (skb_transport_header_was_set(skb))
return;
- else if (skb_flow_dissect_flow_keys(skb, &keys, 0))
+
+ if (skb_flow_dissect_flow_keys_basic(skb, &keys, 0, 0, 0, 0, 0))
skb_set_transport_header(skb, keys.control.thoff);
else
skb_set_transport_header(skb, offset_hint);
@@ -3131,6 +3134,7 @@ static inline void *skb_push_rcsum(struct sk_buff *skb, unsigned int len)
return skb->data;
}
+int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len);
/**
* pskb_trim_rcsum - trim received skb and update checksum
* @skb: buffer to trim
@@ -3144,9 +3148,7 @@ static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
{
if (likely(len >= skb->len))
return 0;
- if (skb->ip_summed == CHECKSUM_COMPLETE)
- skb->ip_summed = CHECKSUM_NONE;
- return __pskb_trim(skb, len);
+ return pskb_trim_rcsum_slow(skb, len);
}
static inline int __skb_trim_rcsum(struct sk_buff *skb, unsigned int len)
@@ -3250,8 +3252,7 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
int *peeked, int *off, int *err);
struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
int *err);
-__poll_t datagram_poll(struct file *file, struct socket *sock,
- struct poll_table_struct *wait);
+__poll_t datagram_poll_mask(struct socket *sock, __poll_t events);
int skb_copy_datagram_iter(const struct sk_buff *from, int offset,
struct iov_iter *to, int size);
static inline int skb_copy_datagram_msg(const struct sk_buff *from, int offset,
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 81ebd71f8c03..14e3fe4bd6a1 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -13,6 +13,7 @@
#define _LINUX_SLAB_H
#include <linux/gfp.h>
+#include <linux/overflow.h>
#include <linux/types.h>
#include <linux/workqueue.h>
@@ -599,6 +600,7 @@ struct memcg_cache_params {
struct memcg_cache_array __rcu *memcg_caches;
struct list_head __root_caches_node;
struct list_head children;
+ bool dying;
};
struct {
struct mem_cgroup *memcg;
@@ -624,11 +626,13 @@ int memcg_update_all_caches(int num_memcgs);
*/
static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
{
- if (size != 0 && n > SIZE_MAX / size)
+ size_t bytes;
+
+ if (unlikely(check_mul_overflow(n, size, &bytes)))
return NULL;
if (__builtin_constant_p(n) && __builtin_constant_p(size))
- return kmalloc(n * size, flags);
- return __kmalloc(n * size, flags);
+ return kmalloc(bytes, flags);
+ return __kmalloc(bytes, flags);
}
/**
@@ -657,11 +661,13 @@ extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
static inline void *kmalloc_array_node(size_t n, size_t size, gfp_t flags,
int node)
{
- if (size != 0 && n > SIZE_MAX / size)
+ size_t bytes;
+
+ if (unlikely(check_mul_overflow(n, size, &bytes)))
return NULL;
if (__builtin_constant_p(n) && __builtin_constant_p(size))
- return kmalloc_node(n * size, flags, node);
- return __kmalloc_node(n * size, flags, node);
+ return kmalloc_node(bytes, flags, node);
+ return __kmalloc_node(bytes, flags, node);
}
static inline void *kcalloc_node(size_t n, size_t size, gfp_t flags, int node)
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
index d9228e4d0320..3485c58cfd1c 100644
--- a/include/linux/slab_def.h
+++ b/include/linux/slab_def.h
@@ -67,9 +67,10 @@ struct kmem_cache {
/*
* If debugging is enabled, then the allocator can add additional
- * fields and/or padding to every object. size contains the total
- * object size including these internal fields, the following two
- * variables contain the offset to the user object and its size.
+ * fields and/or padding to every object. 'size' contains the total
+ * object size including these internal fields, while 'obj_offset'
+ * and 'object_size' contain the offset to the user object and its
+ * size.
*/
int obj_offset;
#endif /* CONFIG_DEBUG_SLAB */
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index 3773e26c08c1..09fa2c6f0e68 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -101,7 +101,6 @@ struct kmem_cache {
void (*ctor)(void *);
unsigned int inuse; /* Offset to metadata */
unsigned int align; /* Alignment */
- unsigned int reserved; /* Reserved bytes at the end of slabs */
unsigned int red_left_pad; /* Left redzone padding size */
const char *name; /* Name (only for display!) */
struct list_head list; /* List of slab caches */
diff --git a/include/linux/soc/qcom/apr.h b/include/linux/soc/qcom/apr.h
new file mode 100644
index 000000000000..c5d52e2cb275
--- /dev/null
+++ b/include/linux/soc/qcom/apr.h
@@ -0,0 +1,128 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __QCOM_APR_H_
+#define __QCOM_APR_H_
+
+#include <linux/spinlock.h>
+#include <linux/device.h>
+#include <linux/mod_devicetable.h>
+#include <dt-bindings/soc/qcom,apr.h>
+
+extern struct bus_type aprbus;
+
+#define APR_HDR_LEN(hdr_len) ((hdr_len)/4)
+
+/*
+ * HEADER field
+ * version:0:3
+ * header_size : 4:7
+ * message_type : 8:9
+ * reserved: 10:15
+ */
+#define APR_HDR_FIELD(msg_type, hdr_len, ver)\
+ (((msg_type & 0x3) << 8) | ((hdr_len & 0xF) << 4) | (ver & 0xF))
+
+#define APR_HDR_SIZE sizeof(struct apr_hdr)
+#define APR_SEQ_CMD_HDR_FIELD APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, \
+ APR_HDR_LEN(APR_HDR_SIZE), \
+ APR_PKT_VER)
+/* Version */
+#define APR_PKT_VER 0x0
+
+/* Command and Response Types */
+#define APR_MSG_TYPE_EVENT 0x0
+#define APR_MSG_TYPE_CMD_RSP 0x1
+#define APR_MSG_TYPE_SEQ_CMD 0x2
+#define APR_MSG_TYPE_NSEQ_CMD 0x3
+#define APR_MSG_TYPE_MAX 0x04
+
+/* APR Basic Response Message */
+#define APR_BASIC_RSP_RESULT 0x000110E8
+#define APR_RSP_ACCEPTED 0x000100BE
+
+struct aprv2_ibasic_rsp_result_t {
+ uint32_t opcode;
+ uint32_t status;
+};
+
+/* hdr field Ver [0:3], Size [4:7], Message type [8:10] */
+#define APR_HDR_FIELD_VER(h) (h & 0x000F)
+#define APR_HDR_FIELD_SIZE(h) ((h & 0x00F0) >> 4)
+#define APR_HDR_FIELD_SIZE_BYTES(h) (((h & 0x00F0) >> 4) * 4)
+#define APR_HDR_FIELD_MT(h) ((h & 0x0300) >> 8)
+
+struct apr_hdr {
+ uint16_t hdr_field;
+ uint16_t pkt_size;
+ uint8_t src_svc;
+ uint8_t src_domain;
+ uint16_t src_port;
+ uint8_t dest_svc;
+ uint8_t dest_domain;
+ uint16_t dest_port;
+ uint32_t token;
+ uint32_t opcode;
+} __packed;
+
+struct apr_pkt {
+ struct apr_hdr hdr;
+ uint8_t payload[];
+};
+
+struct apr_resp_pkt {
+ struct apr_hdr hdr;
+ void *payload;
+ int payload_size;
+};
+
+/* Bits 0 to 15 -- Minor version, Bits 16 to 31 -- Major version */
+#define APR_SVC_MAJOR_VERSION(v) ((v >> 16) & 0xFF)
+#define APR_SVC_MINOR_VERSION(v) (v & 0xFF)
+
+struct apr_device {
+ struct device dev;
+ uint16_t svc_id;
+ uint16_t domain_id;
+ uint32_t version;
+ char name[APR_NAME_SIZE];
+ spinlock_t lock;
+ struct list_head node;
+};
+
+#define to_apr_device(d) container_of(d, struct apr_device, dev)
+
+struct apr_driver {
+ int (*probe)(struct apr_device *sl);
+ int (*remove)(struct apr_device *sl);
+ int (*callback)(struct apr_device *a,
+ struct apr_resp_pkt *d);
+ struct device_driver driver;
+ const struct apr_device_id *id_table;
+};
+
+#define to_apr_driver(d) container_of(d, struct apr_driver, driver)
+
+/*
+ * use a macro to avoid include chaining to get THIS_MODULE
+ */
+#define apr_driver_register(drv) __apr_driver_register(drv, THIS_MODULE)
+
+int __apr_driver_register(struct apr_driver *drv, struct module *owner);
+void apr_driver_unregister(struct apr_driver *drv);
+
+/**
+ * module_apr_driver() - Helper macro for registering a aprbus driver
+ * @__aprbus_driver: aprbus_driver struct
+ *
+ * Helper macro for aprbus drivers which do not do anything special in
+ * module init/exit. This eliminates a lot of boilerplate. Each module
+ * may only use this macro once, and calling it replaces module_init()
+ * and module_exit()
+ */
+#define module_apr_driver(__apr_driver) \
+ module_driver(__apr_driver, apr_driver_register, \
+ apr_driver_unregister)
+
+int apr_send_pkt(struct apr_device *adev, struct apr_pkt *pkt);
+
+#endif /* __QCOM_APR_H_ */
diff --git a/include/linux/soc/qcom/smem.h b/include/linux/soc/qcom/smem.h
index c1657ed27b30..86e1b358688a 100644
--- a/include/linux/soc/qcom/smem.h
+++ b/include/linux/soc/qcom/smem.h
@@ -9,4 +9,6 @@ void *qcom_smem_get(unsigned host, unsigned item, size_t *size);
int qcom_smem_get_free_space(unsigned host);
+phys_addr_t qcom_smem_virt_to_phys(void *p);
+
#endif
diff --git a/include/linux/soc/ti/knav_dma.h b/include/linux/soc/ti/knav_dma.h
index 66693bc4c6ad..7127ec301537 100644
--- a/include/linux/soc/ti/knav_dma.h
+++ b/include/linux/soc/ti/knav_dma.h
@@ -167,6 +167,8 @@ struct knav_dma_desc {
void *knav_dma_open_channel(struct device *dev, const char *name,
struct knav_dma_cfg *config);
void knav_dma_close_channel(void *channel);
+int knav_dma_get_flow(void *channel);
+bool knav_dma_device_ready(void);
#else
static inline void *knav_dma_open_channel(struct device *dev, const char *name,
struct knav_dma_cfg *config)
@@ -176,6 +178,16 @@ static inline void *knav_dma_open_channel(struct device *dev, const char *name,
static inline void knav_dma_close_channel(void *channel)
{}
+static inline int knav_dma_get_flow(void *channel)
+{
+ return -EINVAL;
+}
+
+static inline bool knav_dma_device_ready(void)
+{
+ return false;
+}
+
#endif
#endif /* __SOC_TI_KEYSTONE_NAVIGATOR_DMA_H__ */
diff --git a/include/linux/soc/ti/knav_qmss.h b/include/linux/soc/ti/knav_qmss.h
index 9f0ebb3bad27..9745df6ed9d3 100644
--- a/include/linux/soc/ti/knav_qmss.h
+++ b/include/linux/soc/ti/knav_qmss.h
@@ -86,5 +86,6 @@ int knav_pool_desc_map(void *ph, void *desc, unsigned size,
void *knav_pool_desc_unmap(void *ph, dma_addr_t dma, unsigned dma_sz);
dma_addr_t knav_pool_desc_virt_to_dma(void *ph, void *virt);
void *knav_pool_desc_dma_to_virt(void *ph, dma_addr_t dma);
+bool knav_qmss_device_ready(void);
#endif /* __SOC_TI_KNAV_QMSS_H__ */
diff --git a/include/linux/soc/ti/ti_sci_protocol.h b/include/linux/soc/ti/ti_sci_protocol.h
index 0ccbc138c26a..18435e5c6364 100644
--- a/include/linux/soc/ti/ti_sci_protocol.h
+++ b/include/linux/soc/ti/ti_sci_protocol.h
@@ -1,17 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Texas Instruments System Control Interface Protocol
*
* Copyright (C) 2015-2016 Texas Instruments Incorporated - http://www.ti.com/
* Nishanth Menon
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef __TISCI_PROTOCOL_H
diff --git a/include/linux/socket.h b/include/linux/socket.h
index ea50f4a65816..7ed4713d5337 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -207,8 +207,9 @@ struct ucred {
* PF_SMC protocol family that
* reuses AF_INET address family
*/
+#define AF_XDP 44 /* XDP sockets */
-#define AF_MAX 44 /* For now.. */
+#define AF_MAX 45 /* For now.. */
/* Protocol families, same as address families. */
#define PF_UNSPEC AF_UNSPEC
@@ -257,6 +258,7 @@ struct ucred {
#define PF_KCM AF_KCM
#define PF_QIPCRTR AF_QIPCRTR
#define PF_SMC AF_SMC
+#define PF_XDP AF_XDP
#define PF_MAX AF_MAX
/* Maximum queue length specifiable by listen. */
@@ -338,6 +340,7 @@ struct ucred {
#define SOL_NFC 280
#define SOL_KCM 281
#define SOL_TLS 282
+#define SOL_XDP 283
/* IPX options */
#define IPX_TYPE 1
diff --git a/include/linux/sony-laptop.h b/include/linux/sony-laptop.h
index 1a4b77317fa1..374d0fdb0743 100644
--- a/include/linux/sony-laptop.h
+++ b/include/linux/sony-laptop.h
@@ -28,7 +28,11 @@
#define SONY_PIC_COMMAND_GETCAMERAROMVERSION 18 /* obsolete */
#define SONY_PIC_COMMAND_GETCAMERAREVISION 19 /* obsolete */
+#if IS_ENABLED(CONFIG_SONY_LAPTOP)
int sony_pic_camera_command(int command, u8 value);
+#else
+static inline int sony_pic_camera_command(int command, u8 value) { return 0; };
+#endif
#endif /* __KERNEL__ */
diff --git a/include/linux/soundwire/sdw.h b/include/linux/soundwire/sdw.h
index e91fdcf41049..962971e6a9c7 100644
--- a/include/linux/soundwire/sdw.h
+++ b/include/linux/soundwire/sdw.h
@@ -23,9 +23,24 @@ struct sdw_slave;
#define SDW_MASTER_DEV_NUM 14
#define SDW_NUM_DEV_ID_REGISTERS 6
+/* frame shape defines */
+/*
+ * Note: The maximum row define in SoundWire spec 1.1 is 23. In order to
+ * fill hole with 0, one more dummy entry is added
+ */
+#define SDW_FRAME_ROWS 24
+#define SDW_FRAME_COLS 8
+#define SDW_FRAME_ROW_COLS (SDW_FRAME_ROWS * SDW_FRAME_COLS)
+
+#define SDW_FRAME_CTRL_BITS 48
#define SDW_MAX_DEVICES 11
+#define SDW_VALID_PORT_RANGE(n) (n <= 14 && n >= 1)
+
+#define SDW_DAI_ID_RANGE_START 100
+#define SDW_DAI_ID_RANGE_END 200
+
/**
* enum sdw_slave_status - Slave status
* @SDW_SLAVE_UNATTACHED: Slave is not attached with the bus.
@@ -61,6 +76,30 @@ enum sdw_command_response {
SDW_CMD_FAIL_OTHER = 4,
};
+/**
+ * enum sdw_stream_type: data stream type
+ *
+ * @SDW_STREAM_PCM: PCM data stream
+ * @SDW_STREAM_PDM: PDM data stream
+ *
+ * spec doesn't define this, but is used in implementation
+ */
+enum sdw_stream_type {
+ SDW_STREAM_PCM = 0,
+ SDW_STREAM_PDM = 1,
+};
+
+/**
+ * enum sdw_data_direction: Data direction
+ *
+ * @SDW_DATA_DIR_RX: Data into Port
+ * @SDW_DATA_DIR_TX: Data out of Port
+ */
+enum sdw_data_direction {
+ SDW_DATA_DIR_RX = 0,
+ SDW_DATA_DIR_TX = 1,
+};
+
/*
* SDW properties, defined in MIPI DisCo spec v1.0
*/
@@ -341,11 +380,92 @@ struct sdw_slave_intr_status {
};
/**
- * struct sdw_slave_ops - Slave driver callback ops
+ * sdw_reg_bank - SoundWire register banks
+ * @SDW_BANK0: Soundwire register bank 0
+ * @SDW_BANK1: Soundwire register bank 1
+ */
+enum sdw_reg_bank {
+ SDW_BANK0,
+ SDW_BANK1,
+};
+
+/**
+ * struct sdw_bus_conf: Bus configuration
+ *
+ * @clk_freq: Clock frequency, in Hz
+ * @num_rows: Number of rows in frame
+ * @num_cols: Number of columns in frame
+ * @bank: Next register bank
+ */
+struct sdw_bus_conf {
+ unsigned int clk_freq;
+ unsigned int num_rows;
+ unsigned int num_cols;
+ unsigned int bank;
+};
+
+/**
+ * struct sdw_prepare_ch: Prepare/De-prepare Data Port channel
+ *
+ * @num: Port number
+ * @ch_mask: Active channel mask
+ * @prepare: Prepare (true) /de-prepare (false) channel
+ * @bank: Register bank, which bank Slave/Master driver should program for
+ * implementation defined registers. This is always updated to next_bank
+ * value read from bus params.
+ *
+ */
+struct sdw_prepare_ch {
+ unsigned int num;
+ unsigned int ch_mask;
+ bool prepare;
+ unsigned int bank;
+};
+
+/**
+ * enum sdw_port_prep_ops: Prepare operations for Data Port
+ *
+ * @SDW_OPS_PORT_PRE_PREP: Pre prepare operation for the Port
+ * @SDW_OPS_PORT_PREP: Prepare operation for the Port
+ * @SDW_OPS_PORT_POST_PREP: Post prepare operation for the Port
+ */
+enum sdw_port_prep_ops {
+ SDW_OPS_PORT_PRE_PREP = 0,
+ SDW_OPS_PORT_PREP = 1,
+ SDW_OPS_PORT_POST_PREP = 2,
+};
+
+/**
+ * struct sdw_bus_params: Structure holding bus configuration
+ *
+ * @curr_bank: Current bank in use (BANK0/BANK1)
+ * @next_bank: Next bank to use (BANK0/BANK1). next_bank will always be
+ * set to !curr_bank
+ * @max_dr_freq: Maximum double rate clock frequency supported, in Hz
+ * @curr_dr_freq: Current double rate clock frequency, in Hz
+ * @bandwidth: Current bandwidth
+ * @col: Active columns
+ * @row: Active rows
+ */
+struct sdw_bus_params {
+ enum sdw_reg_bank curr_bank;
+ enum sdw_reg_bank next_bank;
+ unsigned int max_dr_freq;
+ unsigned int curr_dr_freq;
+ unsigned int bandwidth;
+ unsigned int col;
+ unsigned int row;
+};
+
+/**
+ * struct sdw_slave_ops: Slave driver callback ops
+ *
* @read_prop: Read Slave properties
* @interrupt_callback: Device interrupt notification (invoked in thread
* context)
* @update_status: Update Slave status
+ * @bus_config: Update the bus config for Slave
+ * @port_prep: Prepare the port with parameters
*/
struct sdw_slave_ops {
int (*read_prop)(struct sdw_slave *sdw);
@@ -353,6 +473,11 @@ struct sdw_slave_ops {
struct sdw_slave_intr_status *status);
int (*update_status)(struct sdw_slave *slave,
enum sdw_slave_status status);
+ int (*bus_config)(struct sdw_slave *slave,
+ struct sdw_bus_params *params);
+ int (*port_prep)(struct sdw_slave *slave,
+ struct sdw_prepare_ch *prepare_ch,
+ enum sdw_port_prep_ops pre_ops);
};
/**
@@ -406,6 +531,93 @@ int sdw_handle_slave_status(struct sdw_bus *bus,
* SDW master structures and APIs
*/
+/**
+ * struct sdw_port_params: Data Port parameters
+ *
+ * @num: Port number
+ * @bps: Word length of the Port
+ * @flow_mode: Port Data flow mode
+ * @data_mode: Test modes or normal mode
+ *
+ * This is used to program the Data Port based on Data Port stream
+ * parameters.
+ */
+struct sdw_port_params {
+ unsigned int num;
+ unsigned int bps;
+ unsigned int flow_mode;
+ unsigned int data_mode;
+};
+
+/**
+ * struct sdw_transport_params: Data Port Transport Parameters
+ *
+ * @blk_grp_ctrl_valid: Port implements block group control
+ * @num: Port number
+ * @blk_grp_ctrl: Block group control value
+ * @sample_interval: Sample interval
+ * @offset1: Blockoffset of the payload data
+ * @offset2: Blockoffset of the payload data
+ * @hstart: Horizontal start of the payload data
+ * @hstop: Horizontal stop of the payload data
+ * @blk_pkg_mode: Block per channel or block per port
+ * @lane_ctrl: Data lane Port uses for Data transfer. Currently only single
+ * data lane is supported in bus
+ *
+ * This is used to program the Data Port based on Data Port transport
+ * parameters. All these parameters are banked and can be modified
+ * during a bank switch without any artifacts in audio stream.
+ */
+struct sdw_transport_params {
+ bool blk_grp_ctrl_valid;
+ unsigned int port_num;
+ unsigned int blk_grp_ctrl;
+ unsigned int sample_interval;
+ unsigned int offset1;
+ unsigned int offset2;
+ unsigned int hstart;
+ unsigned int hstop;
+ unsigned int blk_pkg_mode;
+ unsigned int lane_ctrl;
+};
+
+/**
+ * struct sdw_enable_ch: Enable/disable Data Port channel
+ *
+ * @num: Port number
+ * @ch_mask: Active channel mask
+ * @enable: Enable (true) /disable (false) channel
+ */
+struct sdw_enable_ch {
+ unsigned int port_num;
+ unsigned int ch_mask;
+ bool enable;
+};
+
+/**
+ * struct sdw_master_port_ops: Callback functions from bus to Master
+ * driver to set Master Data ports.
+ *
+ * @dpn_set_port_params: Set the Port parameters for the Master Port.
+ * Mandatory callback
+ * @dpn_set_port_transport_params: Set transport parameters for the Master
+ * Port. Mandatory callback
+ * @dpn_port_prep: Port prepare operations for the Master Data Port.
+ * @dpn_port_enable_ch: Enable the channels of Master Port.
+ */
+struct sdw_master_port_ops {
+ int (*dpn_set_port_params)(struct sdw_bus *bus,
+ struct sdw_port_params *port_params,
+ unsigned int bank);
+ int (*dpn_set_port_transport_params)(struct sdw_bus *bus,
+ struct sdw_transport_params *transport_params,
+ enum sdw_reg_bank bank);
+ int (*dpn_port_prep)(struct sdw_bus *bus,
+ struct sdw_prepare_ch *prepare_ch);
+ int (*dpn_port_enable_ch)(struct sdw_bus *bus,
+ struct sdw_enable_ch *enable_ch, unsigned int bank);
+};
+
struct sdw_msg;
/**
@@ -426,6 +638,9 @@ struct sdw_defer {
* @xfer_msg: Transfer message callback
* @xfer_msg_defer: Defer version of transfer message callback
* @reset_page_addr: Reset the SCP page address registers
+ * @set_bus_conf: Set the bus configuration
+ * @pre_bank_switch: Callback for pre bank switch
+ * @post_bank_switch: Callback for post bank switch
*/
struct sdw_master_ops {
int (*read_prop)(struct sdw_bus *bus);
@@ -437,6 +652,11 @@ struct sdw_master_ops {
struct sdw_defer *defer);
enum sdw_command_response (*reset_page_addr)
(struct sdw_bus *bus, unsigned int dev_num);
+ int (*set_bus_conf)(struct sdw_bus *bus,
+ struct sdw_bus_params *params);
+ int (*pre_bank_switch)(struct sdw_bus *bus);
+ int (*post_bank_switch)(struct sdw_bus *bus);
+
};
/**
@@ -449,9 +669,15 @@ struct sdw_master_ops {
* @bus_lock: bus lock
* @msg_lock: message lock
* @ops: Master callback ops
+ * @port_ops: Master port callback ops
+ * @params: Current bus parameters
* @prop: Master properties
+ * @m_rt_list: List of Master instance of all stream(s) running on Bus. This
+ * is used to compute and program bus bandwidth, clock, frame shape,
+ * transport and port parameters
* @defer_msg: Defer message
* @clk_stop_timeout: Clock stop timeout computed
+ * @bank_switch_timeout: Bank switch timeout computed
*/
struct sdw_bus {
struct device *dev;
@@ -461,14 +687,118 @@ struct sdw_bus {
struct mutex bus_lock;
struct mutex msg_lock;
const struct sdw_master_ops *ops;
+ const struct sdw_master_port_ops *port_ops;
+ struct sdw_bus_params params;
struct sdw_master_prop prop;
+ struct list_head m_rt_list;
struct sdw_defer defer_msg;
unsigned int clk_stop_timeout;
+ u32 bank_switch_timeout;
};
int sdw_add_bus_master(struct sdw_bus *bus);
void sdw_delete_bus_master(struct sdw_bus *bus);
+/**
+ * sdw_port_config: Master or Slave Port configuration
+ *
+ * @num: Port number
+ * @ch_mask: channels mask for port
+ */
+struct sdw_port_config {
+ unsigned int num;
+ unsigned int ch_mask;
+};
+
+/**
+ * sdw_stream_config: Master or Slave stream configuration
+ *
+ * @frame_rate: Audio frame rate of the stream, in Hz
+ * @ch_count: Channel count of the stream
+ * @bps: Number of bits per audio sample
+ * @direction: Data direction
+ * @type: Stream type PCM or PDM
+ */
+struct sdw_stream_config {
+ unsigned int frame_rate;
+ unsigned int ch_count;
+ unsigned int bps;
+ enum sdw_data_direction direction;
+ enum sdw_stream_type type;
+};
+
+/**
+ * sdw_stream_state: Stream states
+ *
+ * @SDW_STREAM_ALLOCATED: New stream allocated.
+ * @SDW_STREAM_CONFIGURED: Stream configured
+ * @SDW_STREAM_PREPARED: Stream prepared
+ * @SDW_STREAM_ENABLED: Stream enabled
+ * @SDW_STREAM_DISABLED: Stream disabled
+ * @SDW_STREAM_DEPREPARED: Stream de-prepared
+ * @SDW_STREAM_RELEASED: Stream released
+ */
+enum sdw_stream_state {
+ SDW_STREAM_ALLOCATED = 0,
+ SDW_STREAM_CONFIGURED = 1,
+ SDW_STREAM_PREPARED = 2,
+ SDW_STREAM_ENABLED = 3,
+ SDW_STREAM_DISABLED = 4,
+ SDW_STREAM_DEPREPARED = 5,
+ SDW_STREAM_RELEASED = 6,
+};
+
+/**
+ * sdw_stream_params: Stream parameters
+ *
+ * @rate: Sampling frequency, in Hz
+ * @ch_count: Number of channels
+ * @bps: bits per channel sample
+ */
+struct sdw_stream_params {
+ unsigned int rate;
+ unsigned int ch_count;
+ unsigned int bps;
+};
+
+/**
+ * sdw_stream_runtime: Runtime stream parameters
+ *
+ * @name: SoundWire stream name
+ * @params: Stream parameters
+ * @state: Current state of the stream
+ * @type: Stream type PCM or PDM
+ * @m_rt: Master runtime
+ */
+struct sdw_stream_runtime {
+ char *name;
+ struct sdw_stream_params params;
+ enum sdw_stream_state state;
+ enum sdw_stream_type type;
+ struct sdw_master_runtime *m_rt;
+};
+
+struct sdw_stream_runtime *sdw_alloc_stream(char *stream_name);
+void sdw_release_stream(struct sdw_stream_runtime *stream);
+int sdw_stream_add_master(struct sdw_bus *bus,
+ struct sdw_stream_config *stream_config,
+ struct sdw_port_config *port_config,
+ unsigned int num_ports,
+ struct sdw_stream_runtime *stream);
+int sdw_stream_add_slave(struct sdw_slave *slave,
+ struct sdw_stream_config *stream_config,
+ struct sdw_port_config *port_config,
+ unsigned int num_ports,
+ struct sdw_stream_runtime *stream);
+int sdw_stream_remove_master(struct sdw_bus *bus,
+ struct sdw_stream_runtime *stream);
+int sdw_stream_remove_slave(struct sdw_slave *slave,
+ struct sdw_stream_runtime *stream);
+int sdw_prepare_stream(struct sdw_stream_runtime *stream);
+int sdw_enable_stream(struct sdw_stream_runtime *stream);
+int sdw_disable_stream(struct sdw_stream_runtime *stream);
+int sdw_deprepare_stream(struct sdw_stream_runtime *stream);
+
/* messaging and data APIs */
int sdw_read(struct sdw_slave *slave, u32 addr);
diff --git a/include/linux/soundwire/sdw_intel.h b/include/linux/soundwire/sdw_intel.h
index 4b37528f592d..2b9573b8aedd 100644
--- a/include/linux/soundwire/sdw_intel.h
+++ b/include/linux/soundwire/sdw_intel.h
@@ -5,17 +5,31 @@
#define __SDW_INTEL_H
/**
+ * struct sdw_intel_ops: Intel audio driver callback ops
+ *
+ * @config_stream: configure the stream with the hw_params
+ */
+struct sdw_intel_ops {
+ int (*config_stream)(void *arg, void *substream,
+ void *dai, void *hw_params, int stream_num);
+};
+
+/**
* struct sdw_intel_res - Soundwire Intel resource structure
* @mmio_base: mmio base of SoundWire registers
* @irq: interrupt number
* @handle: ACPI parent handle
* @parent: parent device
+ * @ops: callback ops
+ * @arg: callback arg
*/
struct sdw_intel_res {
void __iomem *mmio_base;
int irq;
acpi_handle handle;
struct device *parent;
+ const struct sdw_intel_ops *ops;
+ void *arg;
};
void *sdw_intel_init(acpi_handle *parent_handle, struct sdw_intel_res *res);
diff --git a/include/linux/spi/spi-mem.h b/include/linux/spi/spi-mem.h
new file mode 100644
index 000000000000..bb4bd15ae1f6
--- /dev/null
+++ b/include/linux/spi/spi-mem.h
@@ -0,0 +1,249 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2018 Exceet Electronics GmbH
+ * Copyright (C) 2018 Bootlin
+ *
+ * Author: Boris Brezillon <boris.brezillon@bootlin.com>
+ */
+
+#ifndef __LINUX_SPI_MEM_H
+#define __LINUX_SPI_MEM_H
+
+#include <linux/spi/spi.h>
+
+#define SPI_MEM_OP_CMD(__opcode, __buswidth) \
+ { \
+ .buswidth = __buswidth, \
+ .opcode = __opcode, \
+ }
+
+#define SPI_MEM_OP_ADDR(__nbytes, __val, __buswidth) \
+ { \
+ .nbytes = __nbytes, \
+ .val = __val, \
+ .buswidth = __buswidth, \
+ }
+
+#define SPI_MEM_OP_NO_ADDR { }
+
+#define SPI_MEM_OP_DUMMY(__nbytes, __buswidth) \
+ { \
+ .nbytes = __nbytes, \
+ .buswidth = __buswidth, \
+ }
+
+#define SPI_MEM_OP_NO_DUMMY { }
+
+#define SPI_MEM_OP_DATA_IN(__nbytes, __buf, __buswidth) \
+ { \
+ .dir = SPI_MEM_DATA_IN, \
+ .nbytes = __nbytes, \
+ .buf.in = __buf, \
+ .buswidth = __buswidth, \
+ }
+
+#define SPI_MEM_OP_DATA_OUT(__nbytes, __buf, __buswidth) \
+ { \
+ .dir = SPI_MEM_DATA_OUT, \
+ .nbytes = __nbytes, \
+ .buf.out = __buf, \
+ .buswidth = __buswidth, \
+ }
+
+#define SPI_MEM_OP_NO_DATA { }
+
+/**
+ * enum spi_mem_data_dir - describes the direction of a SPI memory data
+ * transfer from the controller perspective
+ * @SPI_MEM_DATA_IN: data coming from the SPI memory
+ * @SPI_MEM_DATA_OUT: data sent the SPI memory
+ */
+enum spi_mem_data_dir {
+ SPI_MEM_DATA_IN,
+ SPI_MEM_DATA_OUT,
+};
+
+/**
+ * struct spi_mem_op - describes a SPI memory operation
+ * @cmd.buswidth: number of IO lines used to transmit the command
+ * @cmd.opcode: operation opcode
+ * @addr.nbytes: number of address bytes to send. Can be zero if the operation
+ * does not need to send an address
+ * @addr.buswidth: number of IO lines used to transmit the address cycles
+ * @addr.val: address value. This value is always sent MSB first on the bus.
+ * Note that only @addr.nbytes are taken into account in this
+ * address value, so users should make sure the value fits in the
+ * assigned number of bytes.
+ * @dummy.nbytes: number of dummy bytes to send after an opcode or address. Can
+ * be zero if the operation does not require dummy bytes
+ * @dummy.buswidth: number of IO lanes used to transmit the dummy bytes
+ * @data.buswidth: number of IO lanes used to send/receive the data
+ * @data.dir: direction of the transfer
+ * @data.buf.in: input buffer
+ * @data.buf.out: output buffer
+ */
+struct spi_mem_op {
+ struct {
+ u8 buswidth;
+ u8 opcode;
+ } cmd;
+
+ struct {
+ u8 nbytes;
+ u8 buswidth;
+ u64 val;
+ } addr;
+
+ struct {
+ u8 nbytes;
+ u8 buswidth;
+ } dummy;
+
+ struct {
+ u8 buswidth;
+ enum spi_mem_data_dir dir;
+ unsigned int nbytes;
+ /* buf.{in,out} must be DMA-able. */
+ union {
+ void *in;
+ const void *out;
+ } buf;
+ } data;
+};
+
+#define SPI_MEM_OP(__cmd, __addr, __dummy, __data) \
+ { \
+ .cmd = __cmd, \
+ .addr = __addr, \
+ .dummy = __dummy, \
+ .data = __data, \
+ }
+
+/**
+ * struct spi_mem - describes a SPI memory device
+ * @spi: the underlying SPI device
+ * @drvpriv: spi_mem_drviver private data
+ *
+ * Extra information that describe the SPI memory device and may be needed by
+ * the controller to properly handle this device should be placed here.
+ *
+ * One example would be the device size since some controller expose their SPI
+ * mem devices through a io-mapped region.
+ */
+struct spi_mem {
+ struct spi_device *spi;
+ void *drvpriv;
+};
+
+/**
+ * struct spi_mem_set_drvdata() - attach driver private data to a SPI mem
+ * device
+ * @mem: memory device
+ * @data: data to attach to the memory device
+ */
+static inline void spi_mem_set_drvdata(struct spi_mem *mem, void *data)
+{
+ mem->drvpriv = data;
+}
+
+/**
+ * struct spi_mem_get_drvdata() - get driver private data attached to a SPI mem
+ * device
+ * @mem: memory device
+ *
+ * Return: the data attached to the mem device.
+ */
+static inline void *spi_mem_get_drvdata(struct spi_mem *mem)
+{
+ return mem->drvpriv;
+}
+
+/**
+ * struct spi_controller_mem_ops - SPI memory operations
+ * @adjust_op_size: shrink the data xfer of an operation to match controller's
+ * limitations (can be alignment of max RX/TX size
+ * limitations)
+ * @supports_op: check if an operation is supported by the controller
+ * @exec_op: execute a SPI memory operation
+ *
+ * This interface should be implemented by SPI controllers providing an
+ * high-level interface to execute SPI memory operation, which is usually the
+ * case for QSPI controllers.
+ */
+struct spi_controller_mem_ops {
+ int (*adjust_op_size)(struct spi_mem *mem, struct spi_mem_op *op);
+ bool (*supports_op)(struct spi_mem *mem,
+ const struct spi_mem_op *op);
+ int (*exec_op)(struct spi_mem *mem,
+ const struct spi_mem_op *op);
+};
+
+/**
+ * struct spi_mem_driver - SPI memory driver
+ * @spidrv: inherit from a SPI driver
+ * @probe: probe a SPI memory. Usually where detection/initialization takes
+ * place
+ * @remove: remove a SPI memory
+ * @shutdown: take appropriate action when the system is shutdown
+ *
+ * This is just a thin wrapper around a spi_driver. The core takes care of
+ * allocating the spi_mem object and forwarding the probe/remove/shutdown
+ * request to the spi_mem_driver. The reason we use this wrapper is because
+ * we might have to stuff more information into the spi_mem struct to let
+ * SPI controllers know more about the SPI memory they interact with, and
+ * having this intermediate layer allows us to do that without adding more
+ * useless fields to the spi_device object.
+ */
+struct spi_mem_driver {
+ struct spi_driver spidrv;
+ int (*probe)(struct spi_mem *mem);
+ int (*remove)(struct spi_mem *mem);
+ void (*shutdown)(struct spi_mem *mem);
+};
+
+#if IS_ENABLED(CONFIG_SPI_MEM)
+int spi_controller_dma_map_mem_op_data(struct spi_controller *ctlr,
+ const struct spi_mem_op *op,
+ struct sg_table *sg);
+
+void spi_controller_dma_unmap_mem_op_data(struct spi_controller *ctlr,
+ const struct spi_mem_op *op,
+ struct sg_table *sg);
+#else
+static inline int
+spi_controller_dma_map_mem_op_data(struct spi_controller *ctlr,
+ const struct spi_mem_op *op,
+ struct sg_table *sg)
+{
+ return -ENOTSUPP;
+}
+
+static inline void
+spi_controller_dma_unmap_mem_op_data(struct spi_controller *ctlr,
+ const struct spi_mem_op *op,
+ struct sg_table *sg)
+{
+}
+#endif /* CONFIG_SPI_MEM */
+
+int spi_mem_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op);
+
+bool spi_mem_supports_op(struct spi_mem *mem,
+ const struct spi_mem_op *op);
+
+int spi_mem_exec_op(struct spi_mem *mem,
+ const struct spi_mem_op *op);
+
+int spi_mem_driver_register_with_owner(struct spi_mem_driver *drv,
+ struct module *owner);
+
+void spi_mem_driver_unregister(struct spi_mem_driver *drv);
+
+#define spi_mem_driver_register(__drv) \
+ spi_mem_driver_register_with_owner(__drv, THIS_MODULE)
+
+#define module_spi_mem_driver(__drv) \
+ module_driver(__drv, spi_mem_driver_register, \
+ spi_mem_driver_unregister)
+
+#endif /* __LINUX_SPI_MEM_H */
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index bc6bb325d1bf..a64235e05321 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -26,7 +26,7 @@ struct dma_chan;
struct property_entry;
struct spi_controller;
struct spi_transfer;
-struct spi_flash_read_message;
+struct spi_controller_mem_ops;
/*
* INTERFACES between SPI master-side drivers and SPI slave protocol handlers,
@@ -376,13 +376,11 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
* transfer_one callback.
* @handle_err: the subsystem calls the driver to handle an error that occurs
* in the generic implementation of transfer_one_message().
+ * @mem_ops: optimized/dedicated operations for interactions with SPI memory.
+ * This field is optional and should only be implemented if the
+ * controller has native support for memory like operations.
* @unprepare_message: undo any work done by prepare_message().
* @slave_abort: abort the ongoing transfer request on an SPI slave controller
- * @spi_flash_read: to support spi-controller hardwares that provide
- * accelerated interface to read from flash devices.
- * @spi_flash_can_dma: analogous to can_dma() interface, but for
- * controllers implementing spi_flash_read.
- * @flash_read_supported: spi device supports flash read
* @cs_gpios: Array of GPIOs to use as chip select lines; one per CS
* number. Any individual value may be -ENOENT for CS lines that
* are not GPIOs (driven by the SPI controller itself).
@@ -548,11 +546,6 @@ struct spi_controller {
int (*unprepare_message)(struct spi_controller *ctlr,
struct spi_message *message);
int (*slave_abort)(struct spi_controller *ctlr);
- int (*spi_flash_read)(struct spi_device *spi,
- struct spi_flash_read_message *msg);
- bool (*spi_flash_can_dma)(struct spi_device *spi,
- struct spi_flash_read_message *msg);
- bool (*flash_read_supported)(struct spi_device *spi);
/*
* These hooks are for drivers that use a generic implementation
@@ -564,6 +557,9 @@ struct spi_controller {
void (*handle_err)(struct spi_controller *ctlr,
struct spi_message *message);
+ /* Optimized handlers for SPI memory-like operations. */
+ const struct spi_controller_mem_ops *mem_ops;
+
/* gpio chip select */
int *cs_gpios;
@@ -1183,48 +1179,6 @@ static inline ssize_t spi_w8r16be(struct spi_device *spi, u8 cmd)
return be16_to_cpu(result);
}
-/**
- * struct spi_flash_read_message - flash specific information for
- * spi-masters that provide accelerated flash read interfaces
- * @buf: buffer to read data
- * @from: offset within the flash from where data is to be read
- * @len: length of data to be read
- * @retlen: actual length of data read
- * @read_opcode: read_opcode to be used to communicate with flash
- * @addr_width: number of address bytes
- * @dummy_bytes: number of dummy bytes
- * @opcode_nbits: number of lines to send opcode
- * @addr_nbits: number of lines to send address
- * @data_nbits: number of lines for data
- * @rx_sg: Scatterlist for receive data read from flash
- * @cur_msg_mapped: message has been mapped for DMA
- */
-struct spi_flash_read_message {
- void *buf;
- loff_t from;
- size_t len;
- size_t retlen;
- u8 read_opcode;
- u8 addr_width;
- u8 dummy_bytes;
- u8 opcode_nbits;
- u8 addr_nbits;
- u8 data_nbits;
- struct sg_table rx_sg;
- bool cur_msg_mapped;
-};
-
-/* SPI core interface for flash read support */
-static inline bool spi_flash_read_supported(struct spi_device *spi)
-{
- return spi->controller->spi_flash_read &&
- (!spi->controller->flash_read_supported ||
- spi->controller->flash_read_supported(spi));
-}
-
-int spi_flash_read(struct spi_device *spi,
- struct spi_flash_read_message *msg);
-
/*---------------------------------------------------------------------------*/
/*
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index 4894d322d258..1e8a46435838 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -380,6 +380,24 @@ static __always_inline int spin_trylock_irq(spinlock_t *lock)
raw_spin_trylock_irqsave(spinlock_check(lock), flags); \
})
+/**
+ * spin_is_locked() - Check whether a spinlock is locked.
+ * @lock: Pointer to the spinlock.
+ *
+ * This function is NOT required to provide any memory ordering
+ * guarantees; it could be used for debugging purposes or, when
+ * additional synchronization is needed, accompanied with other
+ * constructs (memory barriers) enforcing the synchronization.
+ *
+ * Returns: 1 if @lock is locked, 0 otherwise.
+ *
+ * Note that the function only tells you that the spinlock is
+ * seen to be locked, not that it is locked on your CPU.
+ *
+ * Further, on CONFIG_SMP=n builds with CONFIG_DEBUG_SPINLOCK=n,
+ * the return value is always 0 (see include/linux/spinlock_up.h).
+ * Therefore you should not rely heavily on the return value.
+ */
static __always_inline int spin_is_locked(spinlock_t *lock)
{
return raw_spin_is_locked(&lock->rlock);
diff --git a/include/linux/srcu.h b/include/linux/srcu.h
index 33c1c698df09..91494d7e8e41 100644
--- a/include/linux/srcu.h
+++ b/include/linux/srcu.h
@@ -69,11 +69,45 @@ struct srcu_struct { };
void call_srcu(struct srcu_struct *sp, struct rcu_head *head,
void (*func)(struct rcu_head *head));
-void cleanup_srcu_struct(struct srcu_struct *sp);
+void _cleanup_srcu_struct(struct srcu_struct *sp, bool quiesced);
int __srcu_read_lock(struct srcu_struct *sp) __acquires(sp);
void __srcu_read_unlock(struct srcu_struct *sp, int idx) __releases(sp);
void synchronize_srcu(struct srcu_struct *sp);
+/**
+ * cleanup_srcu_struct - deconstruct a sleep-RCU structure
+ * @sp: structure to clean up.
+ *
+ * Must invoke this after you are finished using a given srcu_struct that
+ * was initialized via init_srcu_struct(), else you leak memory.
+ */
+static inline void cleanup_srcu_struct(struct srcu_struct *sp)
+{
+ _cleanup_srcu_struct(sp, false);
+}
+
+/**
+ * cleanup_srcu_struct_quiesced - deconstruct a quiesced sleep-RCU structure
+ * @sp: structure to clean up.
+ *
+ * Must invoke this after you are finished using a given srcu_struct that
+ * was initialized via init_srcu_struct(), else you leak memory. Also,
+ * all grace-period processing must have completed.
+ *
+ * "Completed" means that the last synchronize_srcu() and
+ * synchronize_srcu_expedited() calls must have returned before the call
+ * to cleanup_srcu_struct_quiesced(). It also means that the callback
+ * from the last call_srcu() must have been invoked before the call to
+ * cleanup_srcu_struct_quiesced(), but you can use srcu_barrier() to help
+ * with this last. Violating these rules will get you a WARN_ON() splat
+ * (with high probability, anyway), and will also cause the srcu_struct
+ * to be leaked.
+ */
+static inline void cleanup_srcu_struct_quiesced(struct srcu_struct *sp)
+{
+ _cleanup_srcu_struct(sp, true);
+}
+
#ifdef CONFIG_DEBUG_LOCK_ALLOC
/**
diff --git a/include/linux/srcutiny.h b/include/linux/srcutiny.h
index 261471f407a5..f41d2fb09f87 100644
--- a/include/linux/srcutiny.h
+++ b/include/linux/srcutiny.h
@@ -43,7 +43,7 @@ struct srcu_struct {
void srcu_drive_gp(struct work_struct *wp);
-#define __SRCU_STRUCT_INIT(name) \
+#define __SRCU_STRUCT_INIT(name, __ignored) \
{ \
.srcu_wq = __SWAIT_QUEUE_HEAD_INITIALIZER(name.srcu_wq), \
.srcu_cb_tail = &name.srcu_cb_head, \
@@ -56,9 +56,9 @@ void srcu_drive_gp(struct work_struct *wp);
* Tree SRCU, which needs some per-CPU data.
*/
#define DEFINE_SRCU(name) \
- struct srcu_struct name = __SRCU_STRUCT_INIT(name)
+ struct srcu_struct name = __SRCU_STRUCT_INIT(name, name)
#define DEFINE_STATIC_SRCU(name) \
- static struct srcu_struct name = __SRCU_STRUCT_INIT(name)
+ static struct srcu_struct name = __SRCU_STRUCT_INIT(name, name)
void synchronize_srcu(struct srcu_struct *sp);
diff --git a/include/linux/srcutree.h b/include/linux/srcutree.h
index 4eda108abee0..745d4ca4dd50 100644
--- a/include/linux/srcutree.h
+++ b/include/linux/srcutree.h
@@ -104,9 +104,9 @@ struct srcu_struct {
#define SRCU_STATE_SCAN1 1
#define SRCU_STATE_SCAN2 2
-#define __SRCU_STRUCT_INIT(name) \
+#define __SRCU_STRUCT_INIT(name, pcpu_name) \
{ \
- .sda = &name##_srcu_data, \
+ .sda = &pcpu_name, \
.lock = __SPIN_LOCK_UNLOCKED(name.lock), \
.srcu_gp_seq_needed = 0 - 1, \
__SRCU_DEP_MAP_INIT(name) \
@@ -133,7 +133,7 @@ struct srcu_struct {
*/
#define __DEFINE_SRCU(name, is_static) \
static DEFINE_PER_CPU(struct srcu_data, name##_srcu_data);\
- is_static struct srcu_struct name = __SRCU_STRUCT_INIT(name)
+ is_static struct srcu_struct name = __SRCU_STRUCT_INIT(name, name##_srcu_data)
#define DEFINE_SRCU(name) __DEFINE_SRCU(name, /* not static */)
#define DEFINE_STATIC_SRCU(name) __DEFINE_SRCU(name, static)
diff --git a/include/linux/stackprotector.h b/include/linux/stackprotector.h
index 03696c729fb4..6b792d080eee 100644
--- a/include/linux/stackprotector.h
+++ b/include/linux/stackprotector.h
@@ -6,7 +6,7 @@
#include <linux/sched.h>
#include <linux/random.h>
-#ifdef CONFIG_CC_STACKPROTECTOR
+#ifdef CONFIG_STACKPROTECTOR
# include <asm/stackprotector.h>
#else
static inline void boot_init_stack_canary(void)
diff --git a/include/linux/stat.h b/include/linux/stat.h
index 22484e44544d..765573dc17d6 100644
--- a/include/linux/stat.h
+++ b/include/linux/stat.h
@@ -41,10 +41,10 @@ struct kstat {
kuid_t uid;
kgid_t gid;
loff_t size;
- struct timespec atime;
- struct timespec mtime;
- struct timespec ctime;
- struct timespec btime; /* File creation time */
+ struct timespec64 atime;
+ struct timespec64 mtime;
+ struct timespec64 ctime;
+ struct timespec64 btime; /* File creation time */
u64 blocks;
};
diff --git a/include/linux/ste_modem_shm.h b/include/linux/ste_modem_shm.h
deleted file mode 100644
index 8444a4eff1bb..000000000000
--- a/include/linux/ste_modem_shm.h
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- * Copyright (C) ST-Ericsson AB 2012
- * Author: Sjur Brendeland / sjur.brandeland@stericsson.com
- *
- * License terms: GNU General Public License (GPL) version 2
- */
-
-#ifndef __INC_MODEM_DEV_H
-#define __INC_MODEM_DEV_H
-#include <linux/types.h>
-#include <linux/platform_device.h>
-
-struct ste_modem_device;
-
-/**
- * struct ste_modem_dev_cb - Callbacks for modem initiated events.
- * @kick: Called when the modem kicks the host.
- *
- * This structure contains callbacks for actions triggered by the modem.
- */
-struct ste_modem_dev_cb {
- void (*kick)(struct ste_modem_device *mdev, int notify_id);
-};
-
-/**
- * struct ste_modem_dev_ops - Functions to control modem and modem interface.
- *
- * @power: Main power switch, used for cold-start or complete power off.
- * @kick: Kick the modem.
- * @kick_subscribe: Subscribe for notifications from the modem.
- * @setup: Provide callback functions to modem device.
- *
- * This structure contains functions used by the ste remoteproc driver
- * to manage the modem.
- */
-struct ste_modem_dev_ops {
- int (*power)(struct ste_modem_device *mdev, bool on);
- int (*kick)(struct ste_modem_device *mdev, int notify_id);
- int (*kick_subscribe)(struct ste_modem_device *mdev, int notify_id);
- int (*setup)(struct ste_modem_device *mdev,
- struct ste_modem_dev_cb *cfg);
-};
-
-/**
- * struct ste_modem_device - represent the STE modem device
- * @pdev: Reference to platform device
- * @ops: Operations used to manage the modem.
- * @drv_data: Driver private data.
- */
-struct ste_modem_device {
- struct platform_device pdev;
- struct ste_modem_dev_ops ops;
- void *drv_data;
-};
-
-#endif /*INC_MODEM_DEV_H*/
diff --git a/include/linux/string.h b/include/linux/string.h
index dd39a690c841..4a5a0eb7df51 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -147,8 +147,8 @@ extern int memcmp(const void *,const void *,__kernel_size_t);
extern void * memchr(const void *,int,__kernel_size_t);
#endif
#ifndef __HAVE_ARCH_MEMCPY_MCSAFE
-static inline __must_check int memcpy_mcsafe(void *dst, const void *src,
- size_t cnt)
+static inline __must_check unsigned long memcpy_mcsafe(void *dst,
+ const void *src, size_t cnt)
{
memcpy(dst, src, cnt);
return 0;
diff --git a/include/linux/string_helpers.h b/include/linux/string_helpers.h
index 4397c52ec4a4..d23c5030901a 100644
--- a/include/linux/string_helpers.h
+++ b/include/linux/string_helpers.h
@@ -5,6 +5,7 @@
#include <linux/types.h>
struct file;
+struct task_struct;
/* Descriptions of the types of units to
* print in */
diff --git a/include/linux/stringhash.h b/include/linux/stringhash.h
index e8f0f852968f..c0c5c5b73dc0 100644
--- a/include/linux/stringhash.h
+++ b/include/linux/stringhash.h
@@ -50,9 +50,9 @@ partial_name_hash(unsigned long c, unsigned long prevhash)
* losing bits). This also has the property (wanted by the dcache)
* that the msbits make a good hash table index.
*/
-static inline unsigned long end_name_hash(unsigned long hash)
+static inline unsigned int end_name_hash(unsigned long hash)
{
- return __hash_32((unsigned int)hash);
+ return hash_long(hash, 32);
}
/*
diff --git a/include/linux/sunrpc/rpc_pipe_fs.h b/include/linux/sunrpc/rpc_pipe_fs.h
index a5704daf5df9..e90b9bd99ded 100644
--- a/include/linux/sunrpc/rpc_pipe_fs.h
+++ b/include/linux/sunrpc/rpc_pipe_fs.h
@@ -122,8 +122,6 @@ extern struct dentry *rpc_create_cache_dir(struct dentry *,
struct cache_detail *);
extern void rpc_remove_cache_dir(struct dentry *);
-extern int rpc_rmdir(struct dentry *dentry);
-
struct rpc_pipe *rpc_mkpipe_data(const struct rpc_pipe_ops *ops, int flags);
void rpc_destroy_pipe_data(struct rpc_pipe *pipe);
extern struct dentry *rpc_mkpipe_dentry(struct dentry *, const char *, void *,
diff --git a/include/linux/sunrpc/rpc_rdma.h b/include/linux/sunrpc/rpc_rdma.h
index 8f144db73e38..92d182fd8e3b 100644
--- a/include/linux/sunrpc/rpc_rdma.h
+++ b/include/linux/sunrpc/rpc_rdma.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (c) 2015-2017 Oracle. All rights reserved.
* Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
index 7337e1221590..fd78f78df5c6 100644
--- a/include/linux/sunrpc/svc_rdma.h
+++ b/include/linux/sunrpc/svc_rdma.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (c) 2005-2006 Network Appliance, Inc. All rights reserved.
*
@@ -70,37 +71,16 @@ extern atomic_t rdma_stat_rq_prod;
extern atomic_t rdma_stat_sq_poll;
extern atomic_t rdma_stat_sq_prod;
-/*
- * Contexts are built when an RDMA request is created and are a
- * record of the resources that can be recovered when the request
- * completes.
- */
-struct svc_rdma_op_ctxt {
- struct list_head list;
- struct xdr_buf arg;
- struct ib_cqe cqe;
- u32 byte_len;
- struct svcxprt_rdma *xprt;
- enum dma_data_direction direction;
- int count;
- unsigned int mapped_sges;
- int hdr_count;
- struct ib_send_wr send_wr;
- struct ib_sge sge[1 + RPCRDMA_MAX_INLINE_THRESH / PAGE_SIZE];
- struct page *pages[RPCSVC_MAXPAGES];
-};
-
struct svcxprt_rdma {
struct svc_xprt sc_xprt; /* SVC transport structure */
struct rdma_cm_id *sc_cm_id; /* RDMA connection id */
struct list_head sc_accept_q; /* Conn. waiting accept */
int sc_ord; /* RDMA read limit */
- int sc_max_sge;
+ int sc_max_send_sges;
bool sc_snd_w_inv; /* OK to use Send With Invalidate */
atomic_t sc_sq_avail; /* SQEs ready to be consumed */
unsigned int sc_sq_depth; /* Depth of SQ */
- unsigned int sc_rq_depth; /* Depth of RQ */
__be32 sc_fc_credits; /* Forward credits */
u32 sc_max_requests; /* Max requests */
u32 sc_max_bc_requests;/* Backward credits */
@@ -109,9 +89,8 @@ struct svcxprt_rdma {
struct ib_pd *sc_pd;
- spinlock_t sc_ctxt_lock;
- struct list_head sc_ctxts;
- int sc_ctxt_used;
+ spinlock_t sc_send_lock;
+ struct list_head sc_send_ctxts;
spinlock_t sc_rw_ctxt_lock;
struct list_head sc_rw_ctxts;
@@ -127,6 +106,9 @@ struct svcxprt_rdma {
unsigned long sc_flags;
struct list_head sc_read_complete_q;
struct work_struct sc_work;
+
+ spinlock_t sc_recv_lock;
+ struct list_head sc_recv_ctxts;
};
/* sc_flags */
#define RDMAXPRT_CONN_PENDING 3
@@ -141,12 +123,30 @@ struct svcxprt_rdma {
#define RPCSVC_MAXPAYLOAD_RDMA RPCSVC_MAXPAYLOAD
-/* Track DMA maps for this transport and context */
-static inline void svc_rdma_count_mappings(struct svcxprt_rdma *rdma,
- struct svc_rdma_op_ctxt *ctxt)
-{
- ctxt->mapped_sges++;
-}
+struct svc_rdma_recv_ctxt {
+ struct list_head rc_list;
+ struct ib_recv_wr rc_recv_wr;
+ struct ib_cqe rc_cqe;
+ struct ib_sge rc_recv_sge;
+ void *rc_recv_buf;
+ struct xdr_buf rc_arg;
+ bool rc_temp;
+ u32 rc_byte_len;
+ unsigned int rc_page_count;
+ unsigned int rc_hdr_count;
+ struct page *rc_pages[RPCSVC_MAXPAGES];
+};
+
+struct svc_rdma_send_ctxt {
+ struct list_head sc_list;
+ struct ib_send_wr sc_send_wr;
+ struct ib_cqe sc_cqe;
+ void *sc_xprt_buf;
+ int sc_page_count;
+ int sc_cur_sge_no;
+ struct page *sc_pages[RPCSVC_MAXPAGES];
+ struct ib_sge sc_sges[];
+};
/* svc_rdma_backchannel.c */
extern int svc_rdma_handle_bc_reply(struct rpc_xprt *xprt,
@@ -154,13 +154,18 @@ extern int svc_rdma_handle_bc_reply(struct rpc_xprt *xprt,
struct xdr_buf *rcvbuf);
/* svc_rdma_recvfrom.c */
+extern void svc_rdma_recv_ctxts_destroy(struct svcxprt_rdma *rdma);
+extern bool svc_rdma_post_recvs(struct svcxprt_rdma *rdma);
+extern void svc_rdma_recv_ctxt_put(struct svcxprt_rdma *rdma,
+ struct svc_rdma_recv_ctxt *ctxt);
+extern void svc_rdma_flush_recv_queues(struct svcxprt_rdma *rdma);
extern int svc_rdma_recvfrom(struct svc_rqst *);
/* svc_rdma_rw.c */
extern void svc_rdma_destroy_rw_ctxts(struct svcxprt_rdma *rdma);
extern int svc_rdma_recv_read_chunk(struct svcxprt_rdma *rdma,
struct svc_rqst *rqstp,
- struct svc_rdma_op_ctxt *head, __be32 *p);
+ struct svc_rdma_recv_ctxt *head, __be32 *p);
extern int svc_rdma_send_write_chunk(struct svcxprt_rdma *rdma,
__be32 *wr_ch, struct xdr_buf *xdr);
extern int svc_rdma_send_reply_chunk(struct svcxprt_rdma *rdma,
@@ -168,24 +173,22 @@ extern int svc_rdma_send_reply_chunk(struct svcxprt_rdma *rdma,
struct xdr_buf *xdr);
/* svc_rdma_sendto.c */
-extern int svc_rdma_map_reply_hdr(struct svcxprt_rdma *rdma,
- struct svc_rdma_op_ctxt *ctxt,
- __be32 *rdma_resp, unsigned int len);
-extern int svc_rdma_post_send_wr(struct svcxprt_rdma *rdma,
- struct svc_rdma_op_ctxt *ctxt,
- int num_sge, u32 inv_rkey);
+extern void svc_rdma_send_ctxts_destroy(struct svcxprt_rdma *rdma);
+extern struct svc_rdma_send_ctxt *
+ svc_rdma_send_ctxt_get(struct svcxprt_rdma *rdma);
+extern void svc_rdma_send_ctxt_put(struct svcxprt_rdma *rdma,
+ struct svc_rdma_send_ctxt *ctxt);
+extern int svc_rdma_send(struct svcxprt_rdma *rdma, struct ib_send_wr *wr);
+extern void svc_rdma_sync_reply_hdr(struct svcxprt_rdma *rdma,
+ struct svc_rdma_send_ctxt *ctxt,
+ unsigned int len);
+extern int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma,
+ struct svc_rdma_send_ctxt *ctxt,
+ struct xdr_buf *xdr, __be32 *wr_lst);
extern int svc_rdma_sendto(struct svc_rqst *);
/* svc_rdma_transport.c */
-extern void svc_rdma_wc_send(struct ib_cq *, struct ib_wc *);
-extern void svc_rdma_wc_reg(struct ib_cq *, struct ib_wc *);
-extern void svc_rdma_wc_read(struct ib_cq *, struct ib_wc *);
-extern void svc_rdma_wc_inv(struct ib_cq *, struct ib_wc *);
-extern int svc_rdma_send(struct svcxprt_rdma *, struct ib_send_wr *);
extern int svc_rdma_create_listen(struct svc_serv *, int, struct sockaddr *);
-extern struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *);
-extern void svc_rdma_put_context(struct svc_rdma_op_ctxt *, int);
-extern void svc_rdma_unmap_dma(struct svc_rdma_op_ctxt *ctxt);
extern void svc_sq_reap(struct svcxprt_rdma *);
extern void svc_rq_reap(struct svcxprt_rdma *);
extern void svc_rdma_prep_reply_hdr(struct svc_rqst *);
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
index 5fea0fb420df..336fd1a19cca 100644
--- a/include/linux/sunrpc/xprt.h
+++ b/include/linux/sunrpc/xprt.h
@@ -84,7 +84,6 @@ struct rpc_rqst {
void (*rq_release_snd_buf)(struct rpc_rqst *); /* release rq_enc_pages */
struct list_head rq_list;
- void *rq_xprtdata; /* Per-xprt private data */
void *rq_buffer; /* Call XDR encode buffer */
size_t rq_callsize;
void *rq_rbuffer; /* Reply XDR decode buffer */
@@ -127,6 +126,8 @@ struct rpc_xprt_ops {
int (*reserve_xprt)(struct rpc_xprt *xprt, struct rpc_task *task);
void (*release_xprt)(struct rpc_xprt *xprt, struct rpc_task *task);
void (*alloc_slot)(struct rpc_xprt *xprt, struct rpc_task *task);
+ void (*free_slot)(struct rpc_xprt *xprt,
+ struct rpc_rqst *req);
void (*rpcbind)(struct rpc_task *task);
void (*set_port)(struct rpc_xprt *xprt, unsigned short port);
void (*connect)(struct rpc_xprt *xprt, struct rpc_task *task);
@@ -324,10 +325,13 @@ struct xprt_class {
struct rpc_xprt *xprt_create_transport(struct xprt_create *args);
void xprt_connect(struct rpc_task *task);
void xprt_reserve(struct rpc_task *task);
+void xprt_request_init(struct rpc_task *task);
void xprt_retry_reserve(struct rpc_task *task);
int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task);
int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task);
void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task);
+void xprt_free_slot(struct rpc_xprt *xprt,
+ struct rpc_rqst *req);
void xprt_lock_and_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task);
bool xprt_prepare_transmit(struct rpc_task *task);
void xprt_transmit(struct rpc_task *task);
diff --git a/include/linux/sunrpc/xprtrdma.h b/include/linux/sunrpc/xprtrdma.h
index 5859563e3c1f..86fc38ff0355 100644
--- a/include/linux/sunrpc/xprtrdma.h
+++ b/include/linux/sunrpc/xprtrdma.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
*
diff --git a/include/linux/swait.h b/include/linux/swait.h
index c98aaf677466..bf8cb0dee23c 100644
--- a/include/linux/swait.h
+++ b/include/linux/swait.h
@@ -5,10 +5,23 @@
#include <linux/list.h>
#include <linux/stddef.h>
#include <linux/spinlock.h>
+#include <linux/wait.h>
#include <asm/current.h>
/*
- * Simple wait queues
+ * BROKEN wait-queues.
+ *
+ * These "simple" wait-queues are broken garbage, and should never be
+ * used. The comments below claim that they are "similar" to regular
+ * wait-queues, but the semantics are actually completely different, and
+ * every single user we have ever had has been buggy (or pointless).
+ *
+ * A "swake_up()" only wakes up _one_ waiter, which is not at all what
+ * "wake_up()" does, and has led to problems. In other cases, it has
+ * been fine, because there's only ever one waiter (kvm), but in that
+ * case gthe whole "simple" wait-queue is just pointless to begin with,
+ * since there is no "queue". Use "wake_up_process()" with a direct
+ * pointer instead.
*
* While these are very similar to regular wait queues (wait.h) the most
* important difference is that the simple waitqueue allows for deterministic
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 2417d288e016..c063443d8638 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -53,7 +53,7 @@ static inline int current_is_kswapd(void)
/*
* Unaddressable device memory support. See include/linux/hmm.h and
- * Documentation/vm/hmm.txt. Short description is we need struct pages for
+ * Documentation/vm/hmm.rst. Short description is we need struct pages for
* device memory that is unaddressable (inaccessible) by CPU, so that we can
* migrate part of a process memory to device memory.
*
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 70fcda1a9049..73810808cdf2 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -66,6 +66,7 @@ struct old_linux_dirent;
struct perf_event_attr;
struct file_handle;
struct sigaltstack;
+struct rseq;
union bpf_attr;
#include <linux/types.h>
@@ -290,6 +291,12 @@ asmlinkage long sys_io_getevents(aio_context_t ctx_id,
long nr,
struct io_event __user *events,
struct timespec __user *timeout);
+asmlinkage long sys_io_pgetevents(aio_context_t ctx_id,
+ long min_nr,
+ long nr,
+ struct io_event __user *events,
+ struct timespec __user *timeout,
+ const struct __aio_sigset *sig);
/* fs/xattr.c */
asmlinkage long sys_setxattr(const char __user *path, const char __user *name,
@@ -536,7 +543,8 @@ asmlinkage long sys_set_robust_list(struct robust_list_head __user *head,
size_t len);
/* kernel/hrtimer.c */
-asmlinkage long sys_nanosleep(struct timespec __user *rqtp, struct timespec __user *rmtp);
+asmlinkage long sys_nanosleep(struct __kernel_timespec __user *rqtp,
+ struct __kernel_timespec __user *rmtp);
/* kernel/itimer.c */
asmlinkage long sys_getitimer(int which, struct itimerval __user *value);
@@ -567,14 +575,14 @@ asmlinkage long sys_timer_settime(timer_t timer_id, int flags,
struct itimerspec __user *old_setting);
asmlinkage long sys_timer_delete(timer_t timer_id);
asmlinkage long sys_clock_settime(clockid_t which_clock,
- const struct timespec __user *tp);
+ const struct __kernel_timespec __user *tp);
asmlinkage long sys_clock_gettime(clockid_t which_clock,
- struct timespec __user *tp);
+ struct __kernel_timespec __user *tp);
asmlinkage long sys_clock_getres(clockid_t which_clock,
- struct timespec __user *tp);
+ struct __kernel_timespec __user *tp);
asmlinkage long sys_clock_nanosleep(clockid_t which_clock, int flags,
- const struct timespec __user *rqtp,
- struct timespec __user *rmtp);
+ const struct __kernel_timespec __user *rqtp,
+ struct __kernel_timespec __user *rmtp);
/* kernel/printk.c */
asmlinkage long sys_syslog(int type, char __user *buf, int len);
@@ -679,8 +687,8 @@ asmlinkage long sys_sysinfo(struct sysinfo __user *info);
/* ipc/mqueue.c */
asmlinkage long sys_mq_open(const char __user *name, int oflag, umode_t mode, struct mq_attr __user *attr);
asmlinkage long sys_mq_unlink(const char __user *name);
-asmlinkage long sys_mq_timedsend(mqd_t mqdes, const char __user *msg_ptr, size_t msg_len, unsigned int msg_prio, const struct timespec __user *abs_timeout);
-asmlinkage long sys_mq_timedreceive(mqd_t mqdes, char __user *msg_ptr, size_t msg_len, unsigned int __user *msg_prio, const struct timespec __user *abs_timeout);
+asmlinkage long sys_mq_timedsend(mqd_t mqdes, const char __user *msg_ptr, size_t msg_len, unsigned int msg_prio, const struct __kernel_timespec __user *abs_timeout);
+asmlinkage long sys_mq_timedreceive(mqd_t mqdes, char __user *msg_ptr, size_t msg_len, unsigned int __user *msg_prio, const struct __kernel_timespec __user *abs_timeout);
asmlinkage long sys_mq_notify(mqd_t mqdes, const struct sigevent __user *notification);
asmlinkage long sys_mq_getsetattr(mqd_t mqdes, const struct mq_attr __user *mqstat, struct mq_attr __user *omqstat);
@@ -697,7 +705,7 @@ asmlinkage long sys_semget(key_t key, int nsems, int semflg);
asmlinkage long sys_semctl(int semid, int semnum, int cmd, unsigned long arg);
asmlinkage long sys_semtimedop(int semid, struct sembuf __user *sops,
unsigned nsops,
- const struct timespec __user *timeout);
+ const struct __kernel_timespec __user *timeout);
asmlinkage long sys_semop(int semid, struct sembuf __user *sops,
unsigned nsops);
@@ -890,7 +898,8 @@ asmlinkage long sys_pkey_alloc(unsigned long flags, unsigned long init_val);
asmlinkage long sys_pkey_free(int pkey);
asmlinkage long sys_statx(int dfd, const char __user *path, unsigned flags,
unsigned mask, struct statx __user *buffer);
-
+asmlinkage long sys_rseq(struct rseq __user *rseq, uint32_t rseq_len,
+ int flags, uint32_t sig);
/*
* Architecture-specific system calls
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index 8f4c54986f97..72705eaf4b84 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -218,6 +218,7 @@ struct tcp_sock {
reord:1; /* reordering detected */
} rack;
u16 advmss; /* Advertised MSS */
+ u8 compressed_ack;
u32 chrono_start; /* Start time in jiffies of a TCP chrono */
u32 chrono_stat[3]; /* Time in jiffies for chrono_stat stats */
u8 chrono_type:2, /* current chronograph type */
@@ -228,7 +229,7 @@ struct tcp_sock {
unused:2;
u8 nonagle : 4,/* Disable Nagle algorithm? */
thin_lto : 1,/* Use linear timeouts for thin streams */
- unused1 : 1,
+ recvmsg_inq : 1,/* Indicate # of bytes in queue upon recvmsg */
repair : 1,
frto : 1;/* F-RTO (RFC5682) activated in CA_Loss */
u8 repair_queue;
@@ -281,6 +282,7 @@ struct tcp_sock {
* receiver in Recovery. */
u32 prr_out; /* Total number of pkts sent during Recovery. */
u32 delivered; /* Total data packets delivered incl. rexmits */
+ u32 delivered_ce; /* Like the above but only ECE marked packets */
u32 lost; /* Total data packets lost incl. rexmits */
u32 app_limited; /* limited until "delivered" reaches this val */
u64 first_tx_mstamp; /* start of window send phase */
@@ -296,6 +298,7 @@ struct tcp_sock {
u32 sacked_out; /* SACK'd packets */
struct hrtimer pacing_timer;
+ struct hrtimer compressed_ack_timer;
/* from STCP, retrans queue hinting */
struct sk_buff* lost_skb_hint;
diff --git a/include/linux/textsearch.h b/include/linux/textsearch.h
index 0494db3fd9e8..13770cfe33ad 100644
--- a/include/linux/textsearch.h
+++ b/include/linux/textsearch.h
@@ -62,7 +62,7 @@ struct ts_config
int flags;
/**
- * get_next_block - fetch next block of data
+ * @get_next_block: fetch next block of data
* @consumed: number of bytes consumed by the caller
* @dst: destination buffer
* @conf: search configuration
@@ -79,7 +79,7 @@ struct ts_config
struct ts_state *state);
/**
- * finish - finalize/clean a series of get_next_block() calls
+ * @finish: finalize/clean a series of get_next_block() calls
* @conf: search configuration
* @state: search state
*
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
index 7834be668d80..5f4705f46c2f 100644
--- a/include/linux/thermal.h
+++ b/include/linux/thermal.h
@@ -1,25 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* thermal.h ($Revision: 0 $)
*
* Copyright (C) 2008 Intel Corp
* Copyright (C) 2008 Zhang Rui <rui.zhang@intel.com>
* Copyright (C) 2008 Sujith Thomas <sujith.thomas@intel.com>
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
#ifndef __THERMAL_H__
diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
index 34f053a150a9..8d8821b3689a 100644
--- a/include/linux/thread_info.h
+++ b/include/linux/thread_info.h
@@ -43,11 +43,7 @@ enum {
#define THREAD_ALIGN THREAD_SIZE
#endif
-#if IS_ENABLED(CONFIG_DEBUG_STACK_USAGE) || IS_ENABLED(CONFIG_DEBUG_KMEMLEAK)
-# define THREADINFO_GFP (GFP_KERNEL_ACCOUNT | __GFP_ZERO)
-#else
-# define THREADINFO_GFP (GFP_KERNEL_ACCOUNT)
-#endif
+#define THREADINFO_GFP (GFP_KERNEL_ACCOUNT | __GFP_ZERO)
/*
* flag set/clear/test wrappers
@@ -64,6 +60,15 @@ static inline void clear_ti_thread_flag(struct thread_info *ti, int flag)
clear_bit(flag, (unsigned long *)&ti->flags);
}
+static inline void update_ti_thread_flag(struct thread_info *ti, int flag,
+ bool value)
+{
+ if (value)
+ set_ti_thread_flag(ti, flag);
+ else
+ clear_ti_thread_flag(ti, flag);
+}
+
static inline int test_and_set_ti_thread_flag(struct thread_info *ti, int flag)
{
return test_and_set_bit(flag, (unsigned long *)&ti->flags);
@@ -83,6 +88,8 @@ static inline int test_ti_thread_flag(struct thread_info *ti, int flag)
set_ti_thread_flag(current_thread_info(), flag)
#define clear_thread_flag(flag) \
clear_ti_thread_flag(current_thread_info(), flag)
+#define update_thread_flag(flag, value) \
+ update_ti_thread_flag(current_thread_info(), flag, value)
#define test_and_set_thread_flag(flag) \
test_and_set_ti_thread_flag(current_thread_info(), flag)
#define test_and_clear_thread_flag(flag) \
diff --git a/include/linux/ti-emif-sram.h b/include/linux/ti-emif-sram.h
index 45bc6b376492..53604b087f2c 100644
--- a/include/linux/ti-emif-sram.h
+++ b/include/linux/ti-emif-sram.h
@@ -60,6 +60,81 @@ struct ti_emif_pm_functions {
u32 abort_sr;
} __packed __aligned(8);
+static inline void ti_emif_asm_offsets(void)
+{
+ DEFINE(EMIF_SDCFG_VAL_OFFSET,
+ offsetof(struct emif_regs_amx3, emif_sdcfg_val));
+ DEFINE(EMIF_TIMING1_VAL_OFFSET,
+ offsetof(struct emif_regs_amx3, emif_timing1_val));
+ DEFINE(EMIF_TIMING2_VAL_OFFSET,
+ offsetof(struct emif_regs_amx3, emif_timing2_val));
+ DEFINE(EMIF_TIMING3_VAL_OFFSET,
+ offsetof(struct emif_regs_amx3, emif_timing3_val));
+ DEFINE(EMIF_REF_CTRL_VAL_OFFSET,
+ offsetof(struct emif_regs_amx3, emif_ref_ctrl_val));
+ DEFINE(EMIF_ZQCFG_VAL_OFFSET,
+ offsetof(struct emif_regs_amx3, emif_zqcfg_val));
+ DEFINE(EMIF_PMCR_VAL_OFFSET,
+ offsetof(struct emif_regs_amx3, emif_pmcr_val));
+ DEFINE(EMIF_PMCR_SHDW_VAL_OFFSET,
+ offsetof(struct emif_regs_amx3, emif_pmcr_shdw_val));
+ DEFINE(EMIF_RD_WR_LEVEL_RAMP_CTRL_OFFSET,
+ offsetof(struct emif_regs_amx3, emif_rd_wr_level_ramp_ctrl));
+ DEFINE(EMIF_RD_WR_EXEC_THRESH_OFFSET,
+ offsetof(struct emif_regs_amx3, emif_rd_wr_exec_thresh));
+ DEFINE(EMIF_COS_CONFIG_OFFSET,
+ offsetof(struct emif_regs_amx3, emif_cos_config));
+ DEFINE(EMIF_PRIORITY_TO_COS_MAPPING_OFFSET,
+ offsetof(struct emif_regs_amx3, emif_priority_to_cos_mapping));
+ DEFINE(EMIF_CONNECT_ID_SERV_1_MAP_OFFSET,
+ offsetof(struct emif_regs_amx3, emif_connect_id_serv_1_map));
+ DEFINE(EMIF_CONNECT_ID_SERV_2_MAP_OFFSET,
+ offsetof(struct emif_regs_amx3, emif_connect_id_serv_2_map));
+ DEFINE(EMIF_OCP_CONFIG_VAL_OFFSET,
+ offsetof(struct emif_regs_amx3, emif_ocp_config_val));
+ DEFINE(EMIF_LPDDR2_NVM_TIM_OFFSET,
+ offsetof(struct emif_regs_amx3, emif_lpddr2_nvm_tim));
+ DEFINE(EMIF_LPDDR2_NVM_TIM_SHDW_OFFSET,
+ offsetof(struct emif_regs_amx3, emif_lpddr2_nvm_tim_shdw));
+ DEFINE(EMIF_DLL_CALIB_CTRL_VAL_OFFSET,
+ offsetof(struct emif_regs_amx3, emif_dll_calib_ctrl_val));
+ DEFINE(EMIF_DLL_CALIB_CTRL_VAL_SHDW_OFFSET,
+ offsetof(struct emif_regs_amx3, emif_dll_calib_ctrl_val_shdw));
+ DEFINE(EMIF_DDR_PHY_CTLR_1_OFFSET,
+ offsetof(struct emif_regs_amx3, emif_ddr_phy_ctlr_1));
+ DEFINE(EMIF_EXT_PHY_CTRL_VALS_OFFSET,
+ offsetof(struct emif_regs_amx3, emif_ext_phy_ctrl_vals));
+ DEFINE(EMIF_REGS_AMX3_SIZE, sizeof(struct emif_regs_amx3));
+
+ BLANK();
+
+ DEFINE(EMIF_PM_BASE_ADDR_VIRT_OFFSET,
+ offsetof(struct ti_emif_pm_data, ti_emif_base_addr_virt));
+ DEFINE(EMIF_PM_BASE_ADDR_PHYS_OFFSET,
+ offsetof(struct ti_emif_pm_data, ti_emif_base_addr_phys));
+ DEFINE(EMIF_PM_CONFIG_OFFSET,
+ offsetof(struct ti_emif_pm_data, ti_emif_sram_config));
+ DEFINE(EMIF_PM_REGS_VIRT_OFFSET,
+ offsetof(struct ti_emif_pm_data, regs_virt));
+ DEFINE(EMIF_PM_REGS_PHYS_OFFSET,
+ offsetof(struct ti_emif_pm_data, regs_phys));
+ DEFINE(EMIF_PM_DATA_SIZE, sizeof(struct ti_emif_pm_data));
+
+ BLANK();
+
+ DEFINE(EMIF_PM_SAVE_CONTEXT_OFFSET,
+ offsetof(struct ti_emif_pm_functions, save_context));
+ DEFINE(EMIF_PM_RESTORE_CONTEXT_OFFSET,
+ offsetof(struct ti_emif_pm_functions, restore_context));
+ DEFINE(EMIF_PM_ENTER_SR_OFFSET,
+ offsetof(struct ti_emif_pm_functions, enter_sr));
+ DEFINE(EMIF_PM_EXIT_SR_OFFSET,
+ offsetof(struct ti_emif_pm_functions, exit_sr));
+ DEFINE(EMIF_PM_ABORT_SR_OFFSET,
+ offsetof(struct ti_emif_pm_functions, abort_sr));
+ DEFINE(EMIF_PM_FUNCTIONS_SIZE, sizeof(struct ti_emif_pm_functions));
+}
+
struct gen_pool;
int ti_emif_copy_pm_function_table(struct gen_pool *sram_pool, void *dst);
diff --git a/include/linux/time.h b/include/linux/time.h
index 4b62a2c0a661..aed74463592d 100644
--- a/include/linux/time.h
+++ b/include/linux/time.h
@@ -10,9 +10,9 @@
extern struct timezone sys_tz;
int get_timespec64(struct timespec64 *ts,
- const struct timespec __user *uts);
+ const struct __kernel_timespec __user *uts);
int put_timespec64(const struct timespec64 *ts,
- struct timespec __user *uts);
+ struct __kernel_timespec __user *uts);
int get_itimerspec64(struct itimerspec64 *it,
const struct itimerspec __user *uit);
int put_itimerspec64(const struct itimerspec64 *it,
diff --git a/include/linux/time32.h b/include/linux/time32.h
index d2bcd4377b56..0b14f936100a 100644
--- a/include/linux/time32.h
+++ b/include/linux/time32.h
@@ -18,25 +18,14 @@
/* timespec64 is defined as timespec here */
static inline struct timespec timespec64_to_timespec(const struct timespec64 ts64)
{
- return ts64;
+ return *(const struct timespec *)&ts64;
}
static inline struct timespec64 timespec_to_timespec64(const struct timespec ts)
{
- return ts;
+ return *(const struct timespec64 *)&ts;
}
-# define timespec_equal timespec64_equal
-# define timespec_compare timespec64_compare
-# define set_normalized_timespec set_normalized_timespec64
-# define timespec_add timespec64_add
-# define timespec_sub timespec64_sub
-# define timespec_valid timespec64_valid
-# define timespec_valid_strict timespec64_valid_strict
-# define timespec_to_ns timespec64_to_ns
-# define ns_to_timespec ns_to_timespec64
-# define timespec_add_ns timespec64_add_ns
-
#else
static inline struct timespec timespec64_to_timespec(const struct timespec64 ts64)
{
@@ -55,6 +44,7 @@ static inline struct timespec64 timespec_to_timespec64(const struct timespec ts)
ret.tv_nsec = ts.tv_nsec;
return ret;
}
+#endif
static inline int timespec_equal(const struct timespec *a,
const struct timespec *b)
@@ -159,8 +149,6 @@ static __always_inline void timespec_add_ns(struct timespec *a, u64 ns)
a->tv_nsec = ns;
}
-#endif
-
/**
* time_to_tm - converts the calendar time to local broken-down time
*
diff --git a/include/linux/time64.h b/include/linux/time64.h
index 93d39499838e..0a7b2f79cec7 100644
--- a/include/linux/time64.h
+++ b/include/linux/time64.h
@@ -2,17 +2,20 @@
#ifndef _LINUX_TIME64_H
#define _LINUX_TIME64_H
-#include <uapi/linux/time.h>
#include <linux/math64.h>
typedef __s64 time64_t;
typedef __u64 timeu64_t;
-#if __BITS_PER_LONG == 64
-/* this trick allows us to optimize out timespec64_to_timespec */
-# define timespec64 timespec
-#define itimerspec64 itimerspec
-#else
+/* CONFIG_64BIT_TIME enables new 64 bit time_t syscalls in the compat path
+ * and 32-bit emulation.
+ */
+#ifndef CONFIG_64BIT_TIME
+#define __kernel_timespec timespec
+#endif
+
+#include <uapi/linux/time.h>
+
struct timespec64 {
time64_t tv_sec; /* seconds */
long tv_nsec; /* nanoseconds */
@@ -23,8 +26,6 @@ struct itimerspec64 {
struct timespec64 it_value;
};
-#endif
-
/* Parameters used to convert the timespec values: */
#define MSEC_PER_SEC 1000L
#define USEC_PER_MSEC 1000L
diff --git a/include/linux/timekeeper_internal.h b/include/linux/timekeeper_internal.h
index 4b3dca173e89..7acb953298a7 100644
--- a/include/linux/timekeeper_internal.h
+++ b/include/linux/timekeeper_internal.h
@@ -52,7 +52,6 @@ struct tk_read_base {
* @offs_real: Offset clock monotonic -> clock realtime
* @offs_boot: Offset clock monotonic -> clock boottime
* @offs_tai: Offset clock monotonic -> clock tai
- * @time_suspended: Accumulated suspend time
* @tai_offset: The current UTC to TAI offset in seconds
* @clock_was_set_seq: The sequence number of clock was set events
* @cs_was_changed_seq: The sequence number of clocksource change events
@@ -95,7 +94,6 @@ struct timekeeper {
ktime_t offs_real;
ktime_t offs_boot;
ktime_t offs_tai;
- ktime_t time_suspended;
s32 tai_offset;
unsigned int clock_was_set_seq;
u8 cs_was_changed_seq;
diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h
index 9737fbec7019..86bc2026efce 100644
--- a/include/linux/timekeeping.h
+++ b/include/linux/timekeeping.h
@@ -19,53 +19,43 @@ extern void xtime_update(unsigned long ticks);
extern int do_settimeofday64(const struct timespec64 *ts);
extern int do_sys_settimeofday64(const struct timespec64 *tv,
const struct timezone *tz);
-/*
- * Kernel time accessors
- */
-struct timespec64 current_kernel_time64(void);
/*
* timespec64 based interfaces
*/
-struct timespec64 get_monotonic_coarse64(void);
-extern void getrawmonotonic64(struct timespec64 *ts);
+extern void ktime_get_raw_ts64(struct timespec64 *ts);
extern void ktime_get_ts64(struct timespec64 *ts);
+extern void ktime_get_real_ts64(struct timespec64 *tv);
+extern void ktime_get_coarse_ts64(struct timespec64 *ts);
+extern void ktime_get_coarse_real_ts64(struct timespec64 *ts);
+
+void getboottime64(struct timespec64 *ts);
+
+/*
+ * time64_t base interfaces
+ */
extern time64_t ktime_get_seconds(void);
extern time64_t __ktime_get_real_seconds(void);
extern time64_t ktime_get_real_seconds(void);
-extern void ktime_get_active_ts64(struct timespec64 *ts);
-
-extern int __getnstimeofday64(struct timespec64 *tv);
-extern void getnstimeofday64(struct timespec64 *tv);
-extern void getboottime64(struct timespec64 *ts);
-
-#define ktime_get_real_ts64(ts) getnstimeofday64(ts)
-
-/* Clock BOOTTIME compatibility wrappers */
-static inline void get_monotonic_boottime64(struct timespec64 *ts)
-{
- ktime_get_ts64(ts);
-}
/*
* ktime_t based interfaces
*/
+
enum tk_offsets {
TK_OFFS_REAL,
+ TK_OFFS_BOOT,
TK_OFFS_TAI,
TK_OFFS_MAX,
};
extern ktime_t ktime_get(void);
extern ktime_t ktime_get_with_offset(enum tk_offsets offs);
+extern ktime_t ktime_get_coarse_with_offset(enum tk_offsets offs);
extern ktime_t ktime_mono_to_any(ktime_t tmono, enum tk_offsets offs);
extern ktime_t ktime_get_raw(void);
extern u32 ktime_get_resolution_ns(void);
-/* Clock BOOTTIME compatibility wrappers */
-static inline ktime_t ktime_get_boottime(void) { return ktime_get(); }
-static inline u64 ktime_get_boot_ns(void) { return ktime_get(); }
-
/**
* ktime_get_real - get the real (wall-) time in ktime_t format
*/
@@ -74,6 +64,27 @@ static inline ktime_t ktime_get_real(void)
return ktime_get_with_offset(TK_OFFS_REAL);
}
+static inline ktime_t ktime_get_coarse_real(void)
+{
+ return ktime_get_coarse_with_offset(TK_OFFS_REAL);
+}
+
+/**
+ * ktime_get_boottime - Returns monotonic time since boot in ktime_t format
+ *
+ * This is similar to CLOCK_MONTONIC/ktime_get, but also includes the
+ * time spent in suspend.
+ */
+static inline ktime_t ktime_get_boottime(void)
+{
+ return ktime_get_with_offset(TK_OFFS_BOOT);
+}
+
+static inline ktime_t ktime_get_coarse_boottime(void)
+{
+ return ktime_get_coarse_with_offset(TK_OFFS_BOOT);
+}
+
/**
* ktime_get_clocktai - Returns the TAI time of day in ktime_t format
*/
@@ -82,6 +93,11 @@ static inline ktime_t ktime_get_clocktai(void)
return ktime_get_with_offset(TK_OFFS_TAI);
}
+static inline ktime_t ktime_get_coarse_clocktai(void)
+{
+ return ktime_get_coarse_with_offset(TK_OFFS_TAI);
+}
+
/**
* ktime_mono_to_real - Convert monotonic time to clock realtime
*/
@@ -100,6 +116,11 @@ static inline u64 ktime_get_real_ns(void)
return ktime_to_ns(ktime_get_real());
}
+static inline u64 ktime_get_boot_ns(void)
+{
+ return ktime_to_ns(ktime_get_boottime());
+}
+
static inline u64 ktime_get_tai_ns(void)
{
return ktime_to_ns(ktime_get_clocktai());
@@ -112,16 +133,44 @@ static inline u64 ktime_get_raw_ns(void)
extern u64 ktime_get_mono_fast_ns(void);
extern u64 ktime_get_raw_fast_ns(void);
+extern u64 ktime_get_boot_fast_ns(void);
extern u64 ktime_get_real_fast_ns(void);
/*
- * timespec64 interfaces utilizing the ktime based ones
+ * timespec64/time64_t interfaces utilizing the ktime based ones
+ * for API completeness, these could be implemented more efficiently
+ * if needed.
*/
-static inline void timekeeping_clocktai64(struct timespec64 *ts)
+static inline void ktime_get_boottime_ts64(struct timespec64 *ts)
+{
+ *ts = ktime_to_timespec64(ktime_get_boottime());
+}
+
+static inline void ktime_get_coarse_boottime_ts64(struct timespec64 *ts)
+{
+ *ts = ktime_to_timespec64(ktime_get_coarse_boottime());
+}
+
+static inline time64_t ktime_get_boottime_seconds(void)
+{
+ return ktime_divns(ktime_get_coarse_boottime(), NSEC_PER_SEC);
+}
+
+static inline void ktime_get_clocktai_ts64(struct timespec64 *ts)
{
*ts = ktime_to_timespec64(ktime_get_clocktai());
}
+static inline void ktime_get_coarse_clocktai_ts64(struct timespec64 *ts)
+{
+ *ts = ktime_to_timespec64(ktime_get_coarse_clocktai());
+}
+
+static inline time64_t ktime_get_clocktai_seconds(void)
+{
+ return ktime_divns(ktime_get_coarse_clocktai(), NSEC_PER_SEC);
+}
+
/*
* RTC specific
*/
@@ -197,5 +246,30 @@ extern void read_persistent_clock64(struct timespec64 *ts);
extern void read_boot_clock64(struct timespec64 *ts);
extern int update_persistent_clock64(struct timespec64 now);
+/*
+ * deprecated aliases, don't use in new code
+ */
+#define getnstimeofday64(ts) ktime_get_real_ts64(ts)
+#define get_monotonic_boottime64(ts) ktime_get_boottime_ts64(ts)
+#define getrawmonotonic64(ts) ktime_get_raw_ts64(ts)
+#define timekeeping_clocktai64(ts) ktime_get_clocktai_ts64(ts)
+
+static inline struct timespec64 current_kernel_time64(void)
+{
+ struct timespec64 ts;
+
+ ktime_get_coarse_real_ts64(&ts);
+
+ return ts;
+}
+
+static inline struct timespec64 get_monotonic_coarse64(void)
+{
+ struct timespec64 ts;
+
+ ktime_get_coarse_ts64(&ts);
+
+ return ts;
+}
#endif
diff --git a/include/linux/timekeeping32.h b/include/linux/timekeeping32.h
index af4114d5dc17..8762c2f45f8b 100644
--- a/include/linux/timekeeping32.h
+++ b/include/linux/timekeeping32.h
@@ -9,60 +9,15 @@
extern void do_gettimeofday(struct timeval *tv);
unsigned long get_seconds(void);
-/* does not take xtime_lock */
-struct timespec __current_kernel_time(void);
-
static inline struct timespec current_kernel_time(void)
{
- struct timespec64 now = current_kernel_time64();
-
- return timespec64_to_timespec(now);
-}
-
-#if BITS_PER_LONG == 64
-/**
- * Deprecated. Use do_settimeofday64().
- */
-static inline int do_settimeofday(const struct timespec *ts)
-{
- return do_settimeofday64(ts);
-}
-
-static inline int __getnstimeofday(struct timespec *ts)
-{
- return __getnstimeofday64(ts);
-}
-
-static inline void getnstimeofday(struct timespec *ts)
-{
- getnstimeofday64(ts);
-}
-
-static inline void ktime_get_ts(struct timespec *ts)
-{
- ktime_get_ts64(ts);
-}
-
-static inline void ktime_get_real_ts(struct timespec *ts)
-{
- getnstimeofday64(ts);
-}
+ struct timespec64 ts64;
-static inline void getrawmonotonic(struct timespec *ts)
-{
- getrawmonotonic64(ts);
-}
+ ktime_get_coarse_real_ts64(&ts64);
-static inline struct timespec get_monotonic_coarse(void)
-{
- return get_monotonic_coarse64();
+ return timespec64_to_timespec(ts64);
}
-static inline void getboottime(struct timespec *ts)
-{
- return getboottime64(ts);
-}
-#else
/**
* Deprecated. Use do_settimeofday64().
*/
@@ -74,20 +29,11 @@ static inline int do_settimeofday(const struct timespec *ts)
return do_settimeofday64(&ts64);
}
-static inline int __getnstimeofday(struct timespec *ts)
-{
- struct timespec64 ts64;
- int ret = __getnstimeofday64(&ts64);
-
- *ts = timespec64_to_timespec(ts64);
- return ret;
-}
-
static inline void getnstimeofday(struct timespec *ts)
{
struct timespec64 ts64;
- getnstimeofday64(&ts64);
+ ktime_get_real_ts64(&ts64);
*ts = timespec64_to_timespec(ts64);
}
@@ -103,7 +49,7 @@ static inline void ktime_get_real_ts(struct timespec *ts)
{
struct timespec64 ts64;
- getnstimeofday64(&ts64);
+ ktime_get_real_ts64(&ts64);
*ts = timespec64_to_timespec(ts64);
}
@@ -111,13 +57,17 @@ static inline void getrawmonotonic(struct timespec *ts)
{
struct timespec64 ts64;
- getrawmonotonic64(&ts64);
+ ktime_get_raw_ts64(&ts64);
*ts = timespec64_to_timespec(ts64);
}
static inline struct timespec get_monotonic_coarse(void)
{
- return timespec64_to_timespec(get_monotonic_coarse64());
+ struct timespec64 ts64;
+
+ ktime_get_coarse_ts64(&ts64);
+
+ return timespec64_to_timespec(ts64);
}
static inline void getboottime(struct timespec *ts)
@@ -127,7 +77,6 @@ static inline void getboottime(struct timespec *ts)
getboottime64(&ts64);
*ts = timespec64_to_timespec(ts64);
}
-#endif
/*
* Timespec interfaces utilizing the ktime based ones
diff --git a/include/linux/timer.h b/include/linux/timer.h
index 2448f9cc48a3..7b066fd38248 100644
--- a/include/linux/timer.h
+++ b/include/linux/timer.h
@@ -8,8 +8,6 @@
#include <linux/debugobjects.h>
#include <linux/stringify.h>
-struct tvec_base;
-
struct timer_list {
/*
* All fields that change during normal runtime grouped to the
diff --git a/include/linux/tnum.h b/include/linux/tnum.h
index 0d2d3da46139..c7dc2b5902c0 100644
--- a/include/linux/tnum.h
+++ b/include/linux/tnum.h
@@ -23,8 +23,10 @@ struct tnum tnum_range(u64 min, u64 max);
/* Arithmetic and logical ops */
/* Shift a tnum left (by a fixed shift) */
struct tnum tnum_lshift(struct tnum a, u8 shift);
-/* Shift a tnum right (by a fixed shift) */
+/* Shift (rsh) a tnum right (by a fixed shift) */
struct tnum tnum_rshift(struct tnum a, u8 shift);
+/* Shift (arsh) a tnum right (by a fixed min_shift) */
+struct tnum tnum_arshift(struct tnum a, u8 min_shift);
/* Add two tnums, return @a + @b */
struct tnum tnum_add(struct tnum a, struct tnum b);
/* Subtract two tnums, return @a - @b */
diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
index 2bde3eff564c..78a010e19ed4 100644
--- a/include/linux/trace_events.h
+++ b/include/linux/trace_events.h
@@ -435,8 +435,7 @@ event_triggers_call(struct trace_event_file *file, void *rec,
struct ring_buffer_event *event);
extern void
event_triggers_post_call(struct trace_event_file *file,
- enum event_trigger_type tt,
- void *rec, struct ring_buffer_event *event);
+ enum event_trigger_type tt);
bool trace_event_ignore_this_pid(struct trace_event_file *trace_file);
@@ -473,6 +472,9 @@ int perf_event_query_prog_array(struct perf_event *event, void __user *info);
int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog);
int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog);
struct bpf_raw_event_map *bpf_find_raw_tracepoint(const char *name);
+int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
+ u32 *fd_type, const char **buf,
+ u64 *probe_offset, u64 *probe_addr);
#else
static inline unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)
{
@@ -504,6 +506,13 @@ static inline struct bpf_raw_event_map *bpf_find_raw_tracepoint(const char *name
{
return NULL;
}
+static inline int bpf_get_perf_event_info(const struct perf_event *event,
+ u32 *prog_id, u32 *fd_type,
+ const char **buf, u64 *probe_offset,
+ u64 *probe_addr)
+{
+ return -EOPNOTSUPP;
+}
#endif
enum {
@@ -560,10 +569,17 @@ extern void perf_trace_del(struct perf_event *event, int flags);
#ifdef CONFIG_KPROBE_EVENTS
extern int perf_kprobe_init(struct perf_event *event, bool is_retprobe);
extern void perf_kprobe_destroy(struct perf_event *event);
+extern int bpf_get_kprobe_info(const struct perf_event *event,
+ u32 *fd_type, const char **symbol,
+ u64 *probe_offset, u64 *probe_addr,
+ bool perf_type_tracepoint);
#endif
#ifdef CONFIG_UPROBE_EVENTS
extern int perf_uprobe_init(struct perf_event *event, bool is_retprobe);
extern void perf_uprobe_destroy(struct perf_event *event);
+extern int bpf_get_uprobe_info(const struct perf_event *event,
+ u32 *fd_type, const char **filename,
+ u64 *probe_offset, bool perf_type_tracepoint);
#endif
extern int ftrace_profile_set_filter(struct perf_event *event, int event_id,
char *filter_str);
diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h
index 26c152122a42..4a8841963c2e 100644
--- a/include/linux/tracehook.h
+++ b/include/linux/tracehook.h
@@ -124,6 +124,7 @@ static inline void tracehook_report_syscall_exit(struct pt_regs *regs, int step)
{
if (step) {
siginfo_t info;
+ clear_siginfo(&info);
user_single_step_siginfo(current, regs, &info);
force_sig_info(SIGTRAP, &info, current);
return;
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index c94f466d57ef..19a690b559ca 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -4,7 +4,7 @@
/*
* Kernel Tracepoint API.
*
- * See Documentation/trace/tracepoints.txt.
+ * See Documentation/trace/tracepoints.rst.
*
* Copyright (C) 2008-2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
*
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 47f8af22f216..c56e3978b00f 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -10,6 +10,7 @@
#include <linux/tty_ldisc.h>
#include <linux/mutex.h>
#include <linux/tty_flags.h>
+#include <linux/seq_file.h>
#include <uapi/linux/tty.h>
#include <linux/rwsem.h>
#include <linux/llist.h>
@@ -527,7 +528,7 @@ static inline speed_t tty_get_baud_rate(struct tty_struct *tty)
}
extern void tty_termios_copy_hw(struct ktermios *new, struct ktermios *old);
-extern int tty_termios_hw_change(struct ktermios *a, struct ktermios *b);
+extern int tty_termios_hw_change(const struct ktermios *a, const struct ktermios *b);
extern int tty_set_termios(struct tty_struct *tty, struct ktermios *kt);
extern struct tty_ldisc *tty_ldisc_ref(struct tty_struct *);
@@ -535,7 +536,7 @@ extern void tty_ldisc_deref(struct tty_ldisc *);
extern struct tty_ldisc *tty_ldisc_ref_wait(struct tty_struct *);
extern void tty_ldisc_hangup(struct tty_struct *tty, bool reset);
extern int tty_ldisc_reinit(struct tty_struct *tty, int disc);
-extern const struct file_operations tty_ldiscs_proc_fops;
+extern const struct seq_operations tty_ldiscs_seq_ops;
extern void tty_wakeup(struct tty_struct *tty);
extern void tty_ldisc_flush(struct tty_struct *tty);
@@ -701,7 +702,7 @@ extern int tty_unregister_ldisc(int disc);
extern int tty_set_ldisc(struct tty_struct *tty, int disc);
extern int tty_ldisc_setup(struct tty_struct *tty, struct tty_struct *o_tty);
extern void tty_ldisc_release(struct tty_struct *tty);
-extern void tty_ldisc_init(struct tty_struct *tty);
+extern int __must_check tty_ldisc_init(struct tty_struct *tty);
extern void tty_ldisc_deinit(struct tty_struct *tty);
extern int tty_ldisc_receive_buf(struct tty_ldisc *ld, const unsigned char *p,
char *f, int count);
diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
index 31c2b5b166de..71dbc891851a 100644
--- a/include/linux/tty_driver.h
+++ b/include/linux/tty_driver.h
@@ -293,7 +293,7 @@ struct tty_operations {
int (*poll_get_char)(struct tty_driver *driver, int line);
void (*poll_put_char)(struct tty_driver *driver, int line, char ch);
#endif
- const struct file_operations *proc_fops;
+ int (*proc_show)(struct seq_file *, void *);
} __randomize_layout;
struct tty_driver {
diff --git a/include/linux/types.h b/include/linux/types.h
index ec13d02b3481..9834e90aa010 100644
--- a/include/linux/types.h
+++ b/include/linux/types.h
@@ -10,14 +10,14 @@
#define DECLARE_BITMAP(name,bits) \
unsigned long name[BITS_TO_LONGS(bits)]
-typedef __u32 __kernel_dev_t;
+typedef u32 __kernel_dev_t;
typedef __kernel_fd_set fd_set;
typedef __kernel_dev_t dev_t;
typedef __kernel_ino_t ino_t;
typedef __kernel_mode_t mode_t;
typedef unsigned short umode_t;
-typedef __u32 nlink_t;
+typedef u32 nlink_t;
typedef __kernel_off_t off_t;
typedef __kernel_pid_t pid_t;
typedef __kernel_daddr_t daddr_t;
@@ -95,29 +95,29 @@ typedef unsigned long ulong;
#ifndef __BIT_TYPES_DEFINED__
#define __BIT_TYPES_DEFINED__
-typedef __u8 u_int8_t;
-typedef __s8 int8_t;
-typedef __u16 u_int16_t;
-typedef __s16 int16_t;
-typedef __u32 u_int32_t;
-typedef __s32 int32_t;
+typedef u8 u_int8_t;
+typedef s8 int8_t;
+typedef u16 u_int16_t;
+typedef s16 int16_t;
+typedef u32 u_int32_t;
+typedef s32 int32_t;
#endif /* !(__BIT_TYPES_DEFINED__) */
-typedef __u8 uint8_t;
-typedef __u16 uint16_t;
-typedef __u32 uint32_t;
+typedef u8 uint8_t;
+typedef u16 uint16_t;
+typedef u32 uint32_t;
#if defined(__GNUC__)
-typedef __u64 uint64_t;
-typedef __u64 u_int64_t;
-typedef __s64 int64_t;
+typedef u64 uint64_t;
+typedef u64 u_int64_t;
+typedef s64 int64_t;
#endif
/* this is a special 64bit data type that is 8-byte aligned */
-#define aligned_u64 __u64 __attribute__((aligned(8)))
-#define aligned_be64 __be64 __attribute__((aligned(8)))
-#define aligned_le64 __le64 __attribute__((aligned(8)))
+#define aligned_u64 __aligned_u64
+#define aligned_be64 __aligned_be64
+#define aligned_le64 __aligned_le64
/**
* The type used for indexing onto a disc or disc partition.
diff --git a/include/linux/u64_stats_sync.h b/include/linux/u64_stats_sync.h
index 07ee0f84a46c..a27604f99ed0 100644
--- a/include/linux/u64_stats_sync.h
+++ b/include/linux/u64_stats_sync.h
@@ -112,20 +112,6 @@ u64_stats_update_end_irqrestore(struct u64_stats_sync *syncp,
#endif
}
-static inline void u64_stats_update_begin_raw(struct u64_stats_sync *syncp)
-{
-#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
- raw_write_seqcount_begin(&syncp->seq);
-#endif
-}
-
-static inline void u64_stats_update_end_raw(struct u64_stats_sync *syncp)
-{
-#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
- raw_write_seqcount_end(&syncp->seq);
-#endif
-}
-
static inline unsigned int __u64_stats_fetch_begin(const struct u64_stats_sync *syncp)
{
#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
diff --git a/include/linux/udp.h b/include/linux/udp.h
index eaea63bc79bb..ca840345571b 100644
--- a/include/linux/udp.h
+++ b/include/linux/udp.h
@@ -55,6 +55,7 @@ struct udp_sock {
* when the socket is uncorked.
*/
__u16 len; /* total length of pending frames */
+ __u16 gso_size;
/*
* Fields specific to UDP-Lite.
*/
@@ -87,6 +88,8 @@ struct udp_sock {
int forward_deficit;
};
+#define UDP_MAX_SEGMENTS (1 << 6UL)
+
static inline struct udp_sock *udp_sk(const struct sock *sk)
{
return (struct udp_sock *)sk;
diff --git a/include/linux/uio.h b/include/linux/uio.h
index e67e12adb136..409c845d4cd3 100644
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@ -154,6 +154,12 @@ size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i);
#define _copy_from_iter_flushcache _copy_from_iter_nocache
#endif
+#ifdef CONFIG_ARCH_HAS_UACCESS_MCSAFE
+size_t _copy_to_iter_mcsafe(const void *addr, size_t bytes, struct iov_iter *i);
+#else
+#define _copy_to_iter_mcsafe _copy_to_iter
+#endif
+
static __always_inline __must_check
size_t copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
{
@@ -163,6 +169,15 @@ size_t copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
return _copy_from_iter_flushcache(addr, bytes, i);
}
+static __always_inline __must_check
+size_t copy_to_iter_mcsafe(void *addr, size_t bytes, struct iov_iter *i)
+{
+ if (unlikely(!check_copy_size(addr, bytes, false)))
+ return 0;
+ else
+ return _copy_to_iter_mcsafe(addr, bytes, i);
+}
+
size_t iov_iter_zero(size_t bytes, struct iov_iter *);
unsigned long iov_iter_alignment(const struct iov_iter *i);
unsigned long iov_iter_gap_alignment(const struct iov_iter *i);
diff --git a/include/linux/uio_driver.h b/include/linux/uio_driver.h
index 3c85c81b0027..6c5f2074e14f 100644
--- a/include/linux/uio_driver.h
+++ b/include/linux/uio_driver.h
@@ -14,6 +14,7 @@
#ifndef _UIO_DRIVER_H_
#define _UIO_DRIVER_H_
+#include <linux/device.h>
#include <linux/fs.h>
#include <linux/interrupt.h>
@@ -68,12 +69,13 @@ struct uio_port {
struct uio_device {
struct module *owner;
- struct device *dev;
+ struct device dev;
int minor;
atomic_t event;
struct fasync_struct *async_queue;
wait_queue_head_t wait;
struct uio_info *info;
+ spinlock_t info_lock;
struct kobject *map_dir;
struct kobject *portio_dir;
};
diff --git a/include/linux/umh.h b/include/linux/umh.h
index 244aff638220..5c812acbb80a 100644
--- a/include/linux/umh.h
+++ b/include/linux/umh.h
@@ -22,8 +22,10 @@ struct subprocess_info {
const char *path;
char **argv;
char **envp;
+ struct file *file;
int wait;
int retval;
+ pid_t pid;
int (*init)(struct subprocess_info *info, struct cred *new);
void (*cleanup)(struct subprocess_info *info);
void *data;
@@ -38,6 +40,16 @@ call_usermodehelper_setup(const char *path, char **argv, char **envp,
int (*init)(struct subprocess_info *info, struct cred *new),
void (*cleanup)(struct subprocess_info *), void *data);
+struct subprocess_info *call_usermodehelper_setup_file(struct file *file,
+ int (*init)(struct subprocess_info *info, struct cred *new),
+ void (*cleanup)(struct subprocess_info *), void *data);
+struct umh_info {
+ struct file *pipe_to_umh;
+ struct file *pipe_from_umh;
+ pid_t pid;
+};
+int fork_usermode_blob(void *data, size_t len, struct umh_info *info);
+
extern int
call_usermodehelper_exec(struct subprocess_info *info, int wait);
diff --git a/include/linux/usb.h b/include/linux/usb.h
index 0173597e59aa..4cdd515a4385 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -490,6 +490,16 @@ enum usb_port_connect_type {
};
/*
+ * USB port quirks.
+ */
+
+/* For the given port, prefer the old (faster) enumeration scheme. */
+#define USB_PORT_QUIRK_OLD_SCHEME BIT(0)
+
+/* Decrease TRSTRCY to 10ms during device enumeration. */
+#define USB_PORT_QUIRK_FAST_ENUM BIT(1)
+
+/*
* USB 2.0 Link Power Management (LPM) parameters.
*/
struct usb2_lpm_parameters {
@@ -551,6 +561,8 @@ struct usb3_lpm_parameters {
* @route: tree topology hex string for use with xHCI
* @state: device state: configured, not attached, etc.
* @speed: device speed: high/full/low (or error)
+ * @rx_lanes: number of rx lanes in use, USB 3.2 adds dual-lane support
+ * @tx_lanes: number of tx lanes in use, USB 3.2 adds dual-lane support
* @tt: Transaction Translator info; used with low/full speed dev, highspeed hub
* @ttport: device port on that tt hub
* @toggle: one bit for each endpoint, with ([0] = IN, [1] = OUT) endpoints
@@ -624,6 +636,8 @@ struct usb_device {
u32 route;
enum usb_device_state state;
enum usb_device_speed speed;
+ unsigned int rx_lanes;
+ unsigned int tx_lanes;
struct usb_tt *tt;
int ttport;
diff --git a/include/linux/usb/atmel_usba_udc.h b/include/linux/usb/atmel_usba_udc.h
deleted file mode 100644
index 9bb00df3b53f..000000000000
--- a/include/linux/usb/atmel_usba_udc.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Platform data definitions for Atmel USBA gadget driver.
- */
-#ifndef __LINUX_USB_USBA_H
-#define __LINUX_USB_USBA_H
-
-struct usba_ep_data {
- char *name;
- int index;
- int fifo_size;
- int nr_banks;
- int can_dma;
- int can_isoc;
-};
-
-struct usba_platform_data {
- int vbus_pin;
- int vbus_pin_inverted;
- int num_ep;
- struct usba_ep_data ep[0];
-};
-
-#endif /* __LINUX_USB_USBA_H */
diff --git a/include/linux/usb/audio-v2.h b/include/linux/usb/audio-v2.h
index aaafecf073ff..ba4b3e3327ff 100644
--- a/include/linux/usb/audio-v2.h
+++ b/include/linux/usb/audio-v2.h
@@ -94,7 +94,7 @@ struct uac_clock_selector_descriptor {
__u8 bClockID;
__u8 bNrInPins;
__u8 baCSourceID[];
- /* bmControls, bAssocTerminal and iClockSource omitted */
+ /* bmControls and iClockSource omitted */
} __attribute__((packed));
/* 4.7.2.3 Clock Multiplier Descriptor */
@@ -189,6 +189,13 @@ struct uac2_iso_endpoint_descriptor {
#define UAC2_CONTROL_DATA_OVERRUN (3 << 2)
#define UAC2_CONTROL_DATA_UNDERRUN (3 << 4)
+/* 5.2.5.4.2 Connector Control Parameter Block */
+struct uac2_connectors_ctl_blk {
+ __u8 bNrChannels;
+ __le32 bmChannelConfig;
+ __u8 iChannelNames;
+} __attribute__((packed));
+
/* 6.1 Interrupt Data Message */
#define UAC2_INTERRUPT_DATA_MSG_VENDOR (1 << 0)
diff --git a/include/linux/usb/audio-v3.h b/include/linux/usb/audio-v3.h
index a8959aaba0ae..a710e28b5215 100644
--- a/include/linux/usb/audio-v3.h
+++ b/include/linux/usb/audio-v3.h
@@ -221,6 +221,12 @@ struct uac3_iso_endpoint_descriptor {
__le16 wLockDelay;
} __attribute__((packed));
+/* 5.2.1.6.1 INSERTION CONTROL PARAMETER BLOCK */
+struct uac3_insertion_ctl_blk {
+ __u8 bSize;
+ __u8 bmConInserted;
+} __attribute__ ((packed));
+
/* 6.1 INTERRUPT DATA MESSAGE */
struct uac3_interrupt_data_msg {
__u8 bInfo;
@@ -392,4 +398,38 @@ struct uac3_interrupt_data_msg {
#define UAC3_AC_ACTIVE_INTERFACE_CONTROL 0x01
#define UAC3_AC_POWER_DOMAIN_CONTROL 0x02
+/* A.23.5 TERMINAL CONTROL SELECTORS */
+#define UAC3_TE_UNDEFINED 0x00
+#define UAC3_TE_INSERTION 0x01
+#define UAC3_TE_OVERLOAD 0x02
+#define UAC3_TE_UNDERFLOW 0x03
+#define UAC3_TE_OVERFLOW 0x04
+#define UAC3_TE_LATENCY 0x05
+
+/* BADD predefined Unit/Terminal values */
+#define UAC3_BADD_IT_ID1 1 /* Input Terminal ID1: bTerminalID = 1 */
+#define UAC3_BADD_FU_ID2 2 /* Feature Unit ID2: bUnitID = 2 */
+#define UAC3_BADD_OT_ID3 3 /* Output Terminal ID3: bTerminalID = 3 */
+#define UAC3_BADD_IT_ID4 4 /* Input Terminal ID4: bTerminalID = 4 */
+#define UAC3_BADD_FU_ID5 5 /* Feature Unit ID5: bUnitID = 5 */
+#define UAC3_BADD_OT_ID6 6 /* Output Terminal ID6: bTerminalID = 6 */
+#define UAC3_BADD_FU_ID7 7 /* Feature Unit ID7: bUnitID = 7 */
+#define UAC3_BADD_MU_ID8 8 /* Mixer Unit ID8: bUnitID = 8 */
+#define UAC3_BADD_CS_ID9 9 /* Clock Source Entity ID9: bClockID = 9 */
+#define UAC3_BADD_PD_ID10 10 /* Power Domain ID10: bPowerDomainID = 10 */
+#define UAC3_BADD_PD_ID11 11 /* Power Domain ID11: bPowerDomainID = 11 */
+
+/* BADD wMaxPacketSize of AS endpoints */
+#define UAC3_BADD_EP_MAXPSIZE_SYNC_MONO_16 0x0060
+#define UAC3_BADD_EP_MAXPSIZE_ASYNC_MONO_16 0x0062
+#define UAC3_BADD_EP_MAXPSIZE_SYNC_MONO_24 0x0090
+#define UAC3_BADD_EP_MAXPSIZE_ASYNC_MONO_24 0x0093
+#define UAC3_BADD_EP_MAXPSIZE_SYNC_STEREO_16 0x00C0
+#define UAC3_BADD_EP_MAXPSIZE_ASYNC_STEREO_16 0x00C4
+#define UAC3_BADD_EP_MAXPSIZE_SYNC_STEREO_24 0x0120
+#define UAC3_BADD_EP_MAXPSIZE_ASYNC_STEREO_24 0x0126
+
+/* BADD sample rate is always fixed to 48kHz */
+#define UAC3_BADD_SAMPLING_RATE 48000
+
#endif /* __LINUX_USB_AUDIO_V3_H */
diff --git a/include/linux/usb/composite.h b/include/linux/usb/composite.h
index 4b6b9283fa7b..8675e145ea8b 100644
--- a/include/linux/usb/composite.h
+++ b/include/linux/usb/composite.h
@@ -52,7 +52,7 @@
#define USB_GADGET_DELAYED_STATUS 0x7fff /* Impossibly large value */
/* big enough to hold our biggest descriptor */
-#define USB_COMP_EP0_BUFSIZ 1024
+#define USB_COMP_EP0_BUFSIZ 4096
/* OS feature descriptor length <= 4kB */
#define USB_COMP_EP0_OS_DESC_BUFSIZ 4096
diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h
index 847f423ad9b3..e5cd84a0f84a 100644
--- a/include/linux/usb/gadget.h
+++ b/include/linux/usb/gadget.h
@@ -763,7 +763,7 @@ struct usb_gadget_string_container {
};
/* put descriptor for string with that id into buf (buflen >= 256) */
-int usb_gadget_get_string(struct usb_gadget_strings *table, int id, u8 *buf);
+int usb_gadget_get_string(const struct usb_gadget_strings *table, int id, u8 *buf);
/*-------------------------------------------------------------------------*/
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
index aef50cb2ed1b..34a6ded6f319 100644
--- a/include/linux/usb/hcd.h
+++ b/include/linux/usb/hcd.h
@@ -150,7 +150,6 @@ struct usb_hcd {
unsigned rh_pollable:1; /* may we poll the root hub? */
unsigned msix_enabled:1; /* driver has MSI-X enabled? */
unsigned msi_enabled:1; /* driver has MSI enabled? */
- unsigned remove_phy:1; /* auto-remove USB phy */
/*
* do not manage the PHY state in the HCD core, instead let the driver
* handle this (for example if the PHY can only be turned on after a
@@ -261,6 +260,7 @@ struct hc_driver {
#define HCD_USB25 0x0030 /* Wireless USB 1.0 (USB 2.5)*/
#define HCD_USB3 0x0040 /* USB 3.0 */
#define HCD_USB31 0x0050 /* USB 3.1 */
+#define HCD_USB32 0x0060 /* USB 3.2 */
#define HCD_MASK 0x0070
#define HCD_BH 0x0100 /* URB complete in BH context */
diff --git a/include/linux/usb/musb.h b/include/linux/usb/musb.h
index 9eb908a98033..fc6c77918481 100644
--- a/include/linux/usb/musb.h
+++ b/include/linux/usb/musb.h
@@ -67,28 +67,13 @@ struct musb_hdrc_config {
/* MUSB configuration-specific details */
unsigned multipoint:1; /* multipoint device */
unsigned dyn_fifo:1 __deprecated; /* supports dynamic fifo sizing */
- unsigned soft_con:1 __deprecated; /* soft connect required */
- unsigned utm_16:1 __deprecated; /* utm data witdh is 16 bits */
- unsigned big_endian:1; /* true if CPU uses big-endian */
- unsigned mult_bulk_tx:1; /* Tx ep required for multbulk pkts */
- unsigned mult_bulk_rx:1; /* Rx ep required for multbulk pkts */
- unsigned high_iso_tx:1; /* Tx ep required for HB iso */
- unsigned high_iso_rx:1; /* Rx ep required for HD iso */
- unsigned dma:1 __deprecated; /* supports DMA */
- unsigned vendor_req:1 __deprecated; /* vendor registers required */
/* need to explicitly de-assert the port reset after resume? */
unsigned host_port_deassert_reset_at_resume:1;
u8 num_eps; /* number of endpoints _with_ ep0 */
- u8 dma_channels __deprecated; /* number of dma channels */
- u8 dyn_fifo_size; /* dynamic size in bytes */
- u8 vendor_ctrl __deprecated; /* vendor control reg width */
- u8 vendor_stat __deprecated; /* vendor status reg witdh */
- u8 dma_req_chan __deprecated; /* bitmask for required dma channels */
u8 ram_bits; /* ram address size */
- struct musb_hdrc_eps_bits *eps_bits __deprecated;
u32 maximum_speed;
};
diff --git a/include/linux/usb/pd.h b/include/linux/usb/pd.h
index ff359bdfdc7b..09b570feb297 100644
--- a/include/linux/usb/pd.h
+++ b/include/linux/usb/pd.h
@@ -103,8 +103,8 @@ enum pd_ext_msg_type {
(((cnt) & PD_HEADER_CNT_MASK) << PD_HEADER_CNT_SHIFT) | \
((ext_hdr) ? PD_HEADER_EXT_HDR : 0))
-#define PD_HEADER_LE(type, pwr, data, id, cnt) \
- cpu_to_le16(PD_HEADER((type), (pwr), (data), PD_REV20, (id), (cnt), (0)))
+#define PD_HEADER_LE(type, pwr, data, rev, id, cnt) \
+ cpu_to_le16(PD_HEADER((type), (pwr), (data), (rev), (id), (cnt), (0)))
static inline unsigned int pd_header_cnt(u16 header)
{
diff --git a/include/linux/usb/phy.h b/include/linux/usb/phy.h
index b7a2625947f5..e4de6bc1f69b 100644
--- a/include/linux/usb/phy.h
+++ b/include/linux/usb/phy.h
@@ -157,22 +157,6 @@ struct usb_phy {
enum usb_charger_type (*charger_detect)(struct usb_phy *x);
};
-/**
- * struct usb_phy_bind - represent the binding for the phy
- * @dev_name: the device name of the device that will bind to the phy
- * @phy_dev_name: the device name of the phy
- * @index: used if a single controller uses multiple phys
- * @phy: reference to the phy
- * @list: to maintain a linked list of the binding information
- */
-struct usb_phy_bind {
- const char *dev_name;
- const char *phy_dev_name;
- u8 index;
- struct usb_phy *phy;
- struct list_head list;
-};
-
/* for board-specific init logic */
extern int usb_add_phy(struct usb_phy *, enum usb_phy_type type);
extern int usb_add_phy_dev(struct usb_phy *);
@@ -234,16 +218,12 @@ usb_phy_vbus_off(struct usb_phy *x)
extern struct usb_phy *usb_get_phy(enum usb_phy_type type);
extern struct usb_phy *devm_usb_get_phy(struct device *dev,
enum usb_phy_type type);
-extern struct usb_phy *usb_get_phy_dev(struct device *dev, u8 index);
-extern struct usb_phy *devm_usb_get_phy_dev(struct device *dev, u8 index);
extern struct usb_phy *devm_usb_get_phy_by_phandle(struct device *dev,
const char *phandle, u8 index);
extern struct usb_phy *devm_usb_get_phy_by_node(struct device *dev,
struct device_node *node, struct notifier_block *nb);
extern void usb_put_phy(struct usb_phy *);
extern void devm_usb_put_phy(struct device *dev, struct usb_phy *x);
-extern int usb_bind_phy(const char *dev_name, u8 index,
- const char *phy_dev_name);
extern void usb_phy_set_event(struct usb_phy *x, unsigned long event);
extern void usb_phy_set_charger_current(struct usb_phy *usb_phy,
unsigned int mA);
@@ -263,16 +243,6 @@ static inline struct usb_phy *devm_usb_get_phy(struct device *dev,
return ERR_PTR(-ENXIO);
}
-static inline struct usb_phy *usb_get_phy_dev(struct device *dev, u8 index)
-{
- return ERR_PTR(-ENXIO);
-}
-
-static inline struct usb_phy *devm_usb_get_phy_dev(struct device *dev, u8 index)
-{
- return ERR_PTR(-ENXIO);
-}
-
static inline struct usb_phy *devm_usb_get_phy_by_phandle(struct device *dev,
const char *phandle, u8 index)
{
@@ -293,12 +263,6 @@ static inline void devm_usb_put_phy(struct device *dev, struct usb_phy *x)
{
}
-static inline int usb_bind_phy(const char *dev_name, u8 index,
- const char *phy_dev_name)
-{
- return -EOPNOTSUPP;
-}
-
static inline void usb_phy_set_event(struct usb_phy *x, unsigned long event)
{
}
diff --git a/include/linux/usb/tcpm.h b/include/linux/usb/tcpm.h
index f0d839daeaea..b231b9314240 100644
--- a/include/linux/usb/tcpm.h
+++ b/include/linux/usb/tcpm.h
@@ -36,6 +36,7 @@ enum typec_cc_polarity {
/* Time to wait for TCPC to complete transmit */
#define PD_T_TCPC_TX_TIMEOUT 100 /* in ms */
#define PD_ROLE_SWAP_TIMEOUT (MSEC_PER_SEC * 10)
+#define PD_PPS_CTRL_TIMEOUT (MSEC_PER_SEC * 10)
enum tcpm_transmit_status {
TCPC_TX_SUCCESS = 0,
@@ -62,9 +63,6 @@ enum tcpm_transmit_type {
* @snk_pdo: PDO parameters sent to partner as response to
* PD_CTRL_GET_SINK_CAP message
* @nr_snk_pdo: Number of entries in @snk_pdo
- * @max_snk_mv: Maximum acceptable sink voltage in mV
- * @max_snk_ma: Maximum sink current in mA
- * @max_snk_mw: Maximum required sink power in mW
* @operating_snk_mw:
* Required operating sink power in mW
* @type: Port type (TYPEC_PORT_DFP, TYPEC_PORT_UFP, or
@@ -85,9 +83,6 @@ struct tcpc_config {
const u32 *snk_vdo;
unsigned int nr_snk_vdo;
- unsigned int max_snk_mv;
- unsigned int max_snk_ma;
- unsigned int max_snk_mw;
unsigned int operating_snk_mw;
enum typec_port_type type;
@@ -174,9 +169,6 @@ int tcpm_update_source_capabilities(struct tcpm_port *port, const u32 *pdo,
unsigned int nr_pdo);
int tcpm_update_sink_capabilities(struct tcpm_port *port, const u32 *pdo,
unsigned int nr_pdo,
- unsigned int max_snk_mv,
- unsigned int max_snk_ma,
- unsigned int max_snk_mw,
unsigned int operating_snk_mw);
void tcpm_vbus_change(struct tcpm_port *port);
diff --git a/include/linux/usb/tegra_usb_phy.h b/include/linux/usb/tegra_usb_phy.h
index d641ea1660b7..0c5c3ea8b2d7 100644
--- a/include/linux/usb/tegra_usb_phy.h
+++ b/include/linux/usb/tegra_usb_phy.h
@@ -17,6 +17,7 @@
#define __TEGRA_USB_PHY_H
#include <linux/clk.h>
+#include <linux/reset.h>
#include <linux/usb/otg.h>
/*
@@ -76,6 +77,7 @@ struct tegra_usb_phy {
bool is_legacy_phy;
bool is_ulpi_phy;
int reset_gpio;
+ struct reset_control *pad_rst;
};
void tegra_usb_phy_preresume(struct usb_phy *phy);
diff --git a/include/linux/userfaultfd_k.h b/include/linux/userfaultfd_k.h
index f2f3b68ba910..e091f0a11b11 100644
--- a/include/linux/userfaultfd_k.h
+++ b/include/linux/userfaultfd_k.h
@@ -31,10 +31,12 @@
extern int handle_userfault(struct vm_fault *vmf, unsigned long reason);
extern ssize_t mcopy_atomic(struct mm_struct *dst_mm, unsigned long dst_start,
- unsigned long src_start, unsigned long len);
+ unsigned long src_start, unsigned long len,
+ bool *mmap_changing);
extern ssize_t mfill_zeropage(struct mm_struct *dst_mm,
unsigned long dst_start,
- unsigned long len);
+ unsigned long len,
+ bool *mmap_changing);
/* mm helpers */
static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct *vma,
diff --git a/include/linux/vbox_utils.h b/include/linux/vbox_utils.h
index c71def6b310f..a240ed2a0372 100644
--- a/include/linux/vbox_utils.h
+++ b/include/linux/vbox_utils.h
@@ -24,24 +24,6 @@ __printf(1, 2) void vbg_debug(const char *fmt, ...);
#define vbg_debug pr_debug
#endif
-/**
- * Allocate memory for generic request and initialize the request header.
- *
- * Return: the allocated memory
- * @len: Size of memory block required for the request.
- * @req_type: The generic request type.
- */
-void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type);
-
-/**
- * Perform a generic request.
- *
- * Return: VBox status code
- * @gdev: The Guest extension device.
- * @req: Pointer to the request structure.
- */
-int vbg_req_perform(struct vbg_dev *gdev, void *req);
-
int vbg_hgcm_connect(struct vbg_dev *gdev,
struct vmmdev_hgcm_service_location *loc,
u32 *client_id, int *vbox_status);
@@ -52,11 +34,6 @@ int vbg_hgcm_call(struct vbg_dev *gdev, u32 client_id, u32 function,
u32 timeout_ms, struct vmmdev_hgcm_function_parameter *parms,
u32 parm_count, int *vbox_status);
-int vbg_hgcm_call32(
- struct vbg_dev *gdev, u32 client_id, u32 function, u32 timeout_ms,
- struct vmmdev_hgcm_function_parameter32 *parm32, u32 parm_count,
- int *vbox_status);
-
/**
* Convert a VirtualBox status code to a standard Linux kernel return value.
* Return: 0 or negative errno value.
diff --git a/include/linux/virtio.h b/include/linux/virtio.h
index 988c7355bc22..fa1b5da2804e 100644
--- a/include/linux/virtio.h
+++ b/include/linux/virtio.h
@@ -157,6 +157,9 @@ int virtio_device_freeze(struct virtio_device *dev);
int virtio_device_restore(struct virtio_device *dev);
#endif
+#define virtio_device_for_each_vq(vdev, vq) \
+ list_for_each_entry(vq, &vdev->vqs, list)
+
/**
* virtio_driver - operations for a virtio I/O driver
* @driver: underlying device driver (populate name and owner).
diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h
index f144216febc6..9397628a1967 100644
--- a/include/linux/virtio_net.h
+++ b/include/linux/virtio_net.h
@@ -58,7 +58,8 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
static inline int virtio_net_hdr_from_skb(const struct sk_buff *skb,
struct virtio_net_hdr *hdr,
bool little_endian,
- bool has_data_valid)
+ bool has_data_valid,
+ int vlan_hlen)
{
memset(hdr, 0, sizeof(*hdr)); /* no info leak */
@@ -83,12 +84,8 @@ static inline int virtio_net_hdr_from_skb(const struct sk_buff *skb,
if (skb->ip_summed == CHECKSUM_PARTIAL) {
hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
- if (skb_vlan_tag_present(skb))
- hdr->csum_start = __cpu_to_virtio16(little_endian,
- skb_checksum_start_offset(skb) + VLAN_HLEN);
- else
- hdr->csum_start = __cpu_to_virtio16(little_endian,
- skb_checksum_start_offset(skb));
+ hdr->csum_start = __cpu_to_virtio16(little_endian,
+ skb_checksum_start_offset(skb) + vlan_hlen);
hdr->csum_offset = __cpu_to_virtio16(little_endian,
skb->csum_offset);
} else if (has_data_valid &&
diff --git a/include/linux/virtio_ring.h b/include/linux/virtio_ring.h
index bbf32524ab27..fab02133a919 100644
--- a/include/linux/virtio_ring.h
+++ b/include/linux/virtio_ring.h
@@ -35,7 +35,7 @@ static inline void virtio_rmb(bool weak_barriers)
if (weak_barriers)
virt_rmb();
else
- rmb();
+ dma_rmb();
}
static inline void virtio_wmb(bool weak_barriers)
@@ -43,7 +43,7 @@ static inline void virtio_wmb(bool weak_barriers)
if (weak_barriers)
virt_wmb();
else
- wmb();
+ dma_wmb();
}
static inline void virtio_store_mb(bool weak_barriers,
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index 1e5d8c392f15..398e9c95cd61 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -8,6 +8,7 @@
#include <linux/llist.h>
#include <asm/page.h> /* pgprot_t */
#include <linux/rbtree.h>
+#include <linux/overflow.h>
struct vm_area_struct; /* vma defining user mapping in mm_types.h */
struct notifier_block; /* in notifier.h */
diff --git a/include/linux/wait_bit.h b/include/linux/wait_bit.h
index 9318b2166439..2b0072fa5e92 100644
--- a/include/linux/wait_bit.h
+++ b/include/linux/wait_bit.h
@@ -305,4 +305,21 @@ do { \
__ret; \
})
+/**
+ * clear_and_wake_up_bit - clear a bit and wake up anyone waiting on that bit
+ *
+ * @bit: the bit of the word being waited on
+ * @word: the word being waited on, a kernel virtual address
+ *
+ * You can use this helper if bitflags are manipulated atomically rather than
+ * non-atomically under a lock.
+ */
+static inline void clear_and_wake_up_bit(int bit, void *word)
+{
+ clear_bit_unlock(bit, word);
+ /* See wake_up_bit() for which memory barrier you need to use. */
+ smp_mb__after_atomic();
+ wake_up_bit(word, bit);
+}
+
#endif /* _LINUX_WAIT_BIT_H */
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 39a0e215022a..60d673e15632 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -494,6 +494,7 @@ extern unsigned int work_busy(struct work_struct *work);
extern __printf(1, 2) void set_worker_desc(const char *fmt, ...);
extern void print_worker_info(const char *log_lvl, struct task_struct *task);
extern void show_workqueue_state(void);
+extern void wq_worker_comm(char *buf, size_t size, struct task_struct *task);
/**
* queue_work - queue work on a workqueue
diff --git a/include/linux/xattr.h b/include/linux/xattr.h
index d70f77a4b62a..6dad031be3c2 100644
--- a/include/linux/xattr.h
+++ b/include/linux/xattr.h
@@ -46,7 +46,6 @@ struct xattr {
size_t value_len;
};
-ssize_t xattr_getsecurity(struct inode *, const char *, void *, size_t);
ssize_t __vfs_getxattr(struct dentry *, struct inode *, const char *, void *, size_t);
ssize_t vfs_getxattr(struct dentry *, const char *, void *, size_t);
ssize_t vfs_listxattr(struct dentry *d, char *list, size_t size);
diff --git a/include/media/dvb-usb-ids.h b/include/media/dvb-usb-ids.h
index 28e2be5c8a98..f9e73b4a6e89 100644
--- a/include/media/dvb-usb-ids.h
+++ b/include/media/dvb-usb-ids.h
@@ -418,6 +418,7 @@
#define USB_PID_SVEON_STV27 0xd3af
#define USB_PID_TURBOX_DTT_2000 0xd3a4
#define USB_PID_WINTV_SOLOHD 0x0264
+#define USB_PID_WINTV_SOLOHD_2 0x8268
#define USB_PID_EVOLVEO_XTRATV_STICK 0xa115
#define USB_PID_HAMA_DVBT_HYBRID 0x2758
#define USB_PID_XBOX_ONE_TUNER 0x02d5
diff --git a/include/media/dvbdev.h b/include/media/dvbdev.h
index ee91516ad074..881ca461b7bb 100644
--- a/include/media/dvbdev.h
+++ b/include/media/dvbdev.h
@@ -91,6 +91,7 @@ struct dvb_frontend;
* @mfe_dvbdev: Frontend device in use, in the case of MFE
* @mfe_lock: Lock to prevent using the other frontends when MFE is
* used.
+ * @mdev_lock: Protect access to the mdev pointer.
* @mdev: pointer to struct media_device, used when the media
* controller is used.
* @conn: RF connector. Used only if the device has no separate
@@ -114,6 +115,7 @@ struct dvb_adapter {
struct mutex mfe_lock; /* access lock for thread creation */
#if defined(CONFIG_MEDIA_CONTROLLER_DVB)
+ struct mutex mdev_lock;
struct media_device *mdev;
struct media_entity *conn;
struct media_pad *conn_pads;
diff --git a/include/media/i2c/tvp7002.h b/include/media/i2c/tvp7002.h
index 5ee007c1cead..cb213c136089 100644
--- a/include/media/i2c/tvp7002.h
+++ b/include/media/i2c/tvp7002.h
@@ -5,7 +5,7 @@
* Author: Santiago Nunez-Corrales <santiago.nunez@ridgerun.com>
*
* This code is partially based upon the TVP5150 driver
- * written by Mauro Carvalho Chehab (mchehab@infradead.org),
+ * written by Mauro Carvalho Chehab <mchehab@kernel.org>,
* the TVP514x driver written by Vaibhav Hiremath <hvaibhav@ti.com>
* and the TVP7002 driver in the TI LSP 2.10.00.14
*
diff --git a/include/media/media-entity.h b/include/media/media-entity.h
index a732af1dbba0..3aa3d58d1d58 100644
--- a/include/media/media-entity.h
+++ b/include/media/media-entity.h
@@ -842,7 +842,7 @@ struct media_entity *media_entity_get(struct media_entity *entity);
* a fwnode. This is useful for devices which use more complex
* mappings of media pads.
*
- * If the entity dose not implement the get_fwnode_pad() operation
+ * If the entity does not implement the get_fwnode_pad() operation
* then this function searches the entity for the first pad that
* matches the @direction_flags.
*
diff --git a/include/media/rc-core.h b/include/media/rc-core.h
index 6742fd86ff65..61571773a98d 100644
--- a/include/media/rc-core.h
+++ b/include/media/rc-core.h
@@ -347,6 +347,7 @@ static inline void ir_raw_event_reset(struct rc_dev *dev)
struct ir_raw_event ev = { .reset = true };
ir_raw_event_store(dev, &ev);
+ dev->idle = true;
ir_raw_event_handle(dev);
}
diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
index f60cf9cf3b9c..456ac13eca1d 100644
--- a/include/media/v4l2-dev.h
+++ b/include/media/v4l2-dev.h
@@ -30,6 +30,7 @@
* @VFL_TYPE_SUBDEV: for V4L2 subdevices
* @VFL_TYPE_SDR: for Software Defined Radio tuners
* @VFL_TYPE_TOUCH: for touch sensors
+ * @VFL_TYPE_MAX: number of VFL types, must always be last in the enum
*/
enum vfl_devnode_type {
VFL_TYPE_GRABBER = 0,
@@ -237,7 +238,6 @@ struct v4l2_file_operations {
* @ioctl_ops: pointer to &struct v4l2_ioctl_ops with ioctl callbacks
*
* @valid_ioctls: bitmap with the valid ioctls for this device
- * @disable_locking: bitmap with the ioctls that don't require locking
* @lock: pointer to &struct mutex serialization lock
*
* .. note::
@@ -290,7 +290,6 @@ struct video_device
const struct v4l2_ioctl_ops *ioctl_ops;
DECLARE_BITMAP(valid_ioctls, BASE_VIDIOC_PRIVATE);
- DECLARE_BITMAP(disable_locking, BASE_VIDIOC_PRIVATE);
struct mutex *lock;
};
@@ -437,28 +436,6 @@ void video_device_release(struct video_device *vdev);
void video_device_release_empty(struct video_device *vdev);
/**
- * v4l2_is_known_ioctl - Checks if a given cmd is a known V4L ioctl
- *
- * @cmd: ioctl command
- *
- * returns true if cmd is a known V4L2 ioctl
- */
-bool v4l2_is_known_ioctl(unsigned int cmd);
-
-/** v4l2_disable_ioctl_locking - mark that a given command
- * shouldn't use core locking
- *
- * @vdev: pointer to &struct video_device
- * @cmd: ioctl command
- */
-static inline void v4l2_disable_ioctl_locking(struct video_device *vdev,
- unsigned int cmd)
-{
- if (_IOC_NR(cmd) < BASE_VIDIOC_PRIVATE)
- set_bit(_IOC_NR(cmd), vdev->disable_locking);
-}
-
-/**
* v4l2_disable_ioctl- mark that a given command isn't implemented.
* shouldn't use core locking
*
diff --git a/include/media/v4l2-device.h b/include/media/v4l2-device.h
index 0c9e4da55499..b330e4a08a6b 100644
--- a/include/media/v4l2-device.h
+++ b/include/media/v4l2-device.h
@@ -33,7 +33,7 @@ struct v4l2_ctrl_handler;
* struct v4l2_device - main struct to for V4L2 device drivers
*
* @dev: pointer to struct device.
- * @mdev: pointer to struct media_device
+ * @mdev: pointer to struct media_device, may be NULL.
* @subdevs: used to keep track of the registered subdevs
* @lock: lock this struct; can be used by the driver as well
* if this struct is embedded into a larger struct.
@@ -58,9 +58,7 @@ struct v4l2_ctrl_handler;
*/
struct v4l2_device {
struct device *dev;
-#if defined(CONFIG_MEDIA_CONTROLLER)
struct media_device *mdev;
-#endif
struct list_head subdevs;
spinlock_t lock;
char name[V4L2_DEVICE_NAME_SIZE];
diff --git a/include/media/v4l2-fwnode.h b/include/media/v4l2-fwnode.h
index c228ec1c77cf..9cccab618b98 100644
--- a/include/media/v4l2-fwnode.h
+++ b/include/media/v4l2-fwnode.h
@@ -99,7 +99,7 @@ struct v4l2_fwnode_endpoint {
struct fwnode_endpoint base;
/*
* Fields below this line will be zeroed by
- * v4l2_fwnode_parse_endpoint()
+ * v4l2_fwnode_endpoint_parse()
*/
enum v4l2_mbus_type bus_type;
union {
diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
index a7b3f7c75d62..a8dbf5b54b5c 100644
--- a/include/media/v4l2-ioctl.h
+++ b/include/media/v4l2-ioctl.h
@@ -658,18 +658,6 @@ void v4l_printk_ioctl(const char *prefix, unsigned int cmd);
struct video_device;
-
-/**
- * v4l2_ioctl_get_lock - get the mutex (if any) that it is need to lock for
- * a given command.
- *
- * @vdev: Pointer to struct &video_device.
- * @cmd: Ioctl name.
- *
- * .. note:: Internal use only. Should not be used outside V4L2 core.
- */
-struct mutex *v4l2_ioctl_get_lock(struct video_device *vdev, unsigned int cmd);
-
/* names for fancy debug output */
extern const char *v4l2_field_names[];
extern const char *v4l2_type_names[];
diff --git a/include/media/videobuf-core.h b/include/media/videobuf-core.h
index 0bda0adc744f..60a664febba0 100644
--- a/include/media/videobuf-core.h
+++ b/include/media/videobuf-core.h
@@ -1,11 +1,11 @@
/*
* generic helper functions for handling video4linux capture buffers
*
- * (c) 2007 Mauro Carvalho Chehab, <mchehab@infradead.org>
+ * (c) 2007 Mauro Carvalho Chehab, <mchehab@kernel.org>
*
* Highly based on video-buf written originally by:
* (c) 2001,02 Gerd Knorr <kraxel@bytesex.org>
- * (c) 2006 Mauro Carvalho Chehab, <mchehab@infradead.org>
+ * (c) 2006 Mauro Carvalho Chehab, <mchehab@kernel.org>
* (c) 2006 Ted Walther and John Sokol
*
* This program is free software; you can redistribute it and/or modify
diff --git a/include/media/videobuf-dma-sg.h b/include/media/videobuf-dma-sg.h
index d8b27854e3bf..01bd142b979d 100644
--- a/include/media/videobuf-dma-sg.h
+++ b/include/media/videobuf-dma-sg.h
@@ -6,11 +6,11 @@
* into PAGE_SIZE chunks). They also assume the driver does not need
* to touch the video data.
*
- * (c) 2007 Mauro Carvalho Chehab, <mchehab@infradead.org>
+ * (c) 2007 Mauro Carvalho Chehab, <mchehab@kernel.org>
*
* Highly based on video-buf written originally by:
* (c) 2001,02 Gerd Knorr <kraxel@bytesex.org>
- * (c) 2006 Mauro Carvalho Chehab, <mchehab@infradead.org>
+ * (c) 2006 Mauro Carvalho Chehab, <mchehab@kernel.org>
* (c) 2006 Ted Walther and John Sokol
*
* This program is free software; you can redistribute it and/or modify
diff --git a/include/media/videobuf-dvb.h b/include/media/videobuf-dvb.h
deleted file mode 100644
index c9c81990a56c..000000000000
--- a/include/media/videobuf-dvb.h
+++ /dev/null
@@ -1,59 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#include <media/dvbdev.h>
-#include <media/dmxdev.h>
-#include <media/dvb_demux.h>
-#include <media/dvb_net.h>
-#include <media/dvb_frontend.h>
-
-#ifndef _VIDEOBUF_DVB_H_
-#define _VIDEOBUF_DVB_H_
-
-struct videobuf_dvb {
- /* filling that the job of the driver */
- char *name;
- struct dvb_frontend *frontend;
- struct videobuf_queue dvbq;
-
- /* video-buf-dvb state info */
- struct mutex lock;
- struct task_struct *thread;
- int nfeeds;
-
- /* videobuf_dvb_(un)register manges this */
- struct dvb_demux demux;
- struct dmxdev dmxdev;
- struct dmx_frontend fe_hw;
- struct dmx_frontend fe_mem;
- struct dvb_net net;
-};
-
-struct videobuf_dvb_frontend {
- struct list_head felist;
- int id;
- struct videobuf_dvb dvb;
-};
-
-struct videobuf_dvb_frontends {
- struct list_head felist;
- struct mutex lock;
- struct dvb_adapter adapter;
- int active_fe_id; /* Indicates which frontend in the felist is in use */
- int gate; /* Frontend with gate control 0=!MFE,1=fe0,2=fe1 etc */
-};
-
-int videobuf_dvb_register_bus(struct videobuf_dvb_frontends *f,
- struct module *module,
- void *adapter_priv,
- struct device *device,
- short *adapter_nr,
- int mfe_shared);
-
-void videobuf_dvb_unregister_bus(struct videobuf_dvb_frontends *f);
-
-struct videobuf_dvb_frontend * videobuf_dvb_alloc_frontend(struct videobuf_dvb_frontends *f, int id);
-void videobuf_dvb_dealloc_frontends(struct videobuf_dvb_frontends *f);
-
-struct videobuf_dvb_frontend * videobuf_dvb_get_frontend(struct videobuf_dvb_frontends *f, int id);
-int videobuf_dvb_find_frontend(struct videobuf_dvb_frontends *f, struct dvb_frontend *p);
-
-#endif /* _VIDEOBUF_DVB_H_ */
diff --git a/include/media/videobuf-vmalloc.h b/include/media/videobuf-vmalloc.h
index 486a97efdb56..36c6a4ad3504 100644
--- a/include/media/videobuf-vmalloc.h
+++ b/include/media/videobuf-vmalloc.h
@@ -6,7 +6,7 @@
* into PAGE_SIZE chunks). They also assume the driver does not need
* to touch the video data.
*
- * (c) 2007 Mauro Carvalho Chehab, <mchehab@infradead.org>
+ * (c) 2007 Mauro Carvalho Chehab, <mchehab@kernel.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/include/media/vsp1.h b/include/media/vsp1.h
index 68a8abe4fac5..678c24de1ac6 100644
--- a/include/media/vsp1.h
+++ b/include/media/vsp1.h
@@ -34,13 +34,23 @@ struct vsp1_du_lif_config {
unsigned int width;
unsigned int height;
- void (*callback)(void *, bool);
+ void (*callback)(void *data, bool completed, u32 crc);
void *callback_data;
};
int vsp1_du_setup_lif(struct device *dev, unsigned int pipe_index,
const struct vsp1_du_lif_config *cfg);
+/**
+ * struct vsp1_du_atomic_config - VSP atomic configuration parameters
+ * @pixelformat: plane pixel format (V4L2 4CC)
+ * @pitch: line pitch in bytes, for all planes
+ * @mem: DMA memory address for each plane of the frame buffer
+ * @src: source rectangle in the frame buffer (integer coordinates)
+ * @dst: destination rectangle on the display (integer coordinates)
+ * @alpha: alpha value (0: fully transparent, 255: fully opaque)
+ * @zpos: Z position of the plane (from 0 to number of planes minus 1)
+ */
struct vsp1_du_atomic_config {
u32 pixelformat;
unsigned int pitch;
@@ -51,11 +61,42 @@ struct vsp1_du_atomic_config {
unsigned int zpos;
};
+/**
+ * enum vsp1_du_crc_source - Source used for CRC calculation
+ * @VSP1_DU_CRC_NONE: CRC calculation disabled
+ * @VSP1_DU_CRC_PLANE: Perform CRC calculation on an input plane
+ * @VSP1_DU_CRC_OUTPUT: Perform CRC calculation on the composed output
+ */
+enum vsp1_du_crc_source {
+ VSP1_DU_CRC_NONE,
+ VSP1_DU_CRC_PLANE,
+ VSP1_DU_CRC_OUTPUT,
+};
+
+/**
+ * struct vsp1_du_crc_config - VSP CRC computation configuration parameters
+ * @source: source for CRC calculation
+ * @index: index of the CRC source plane (when source is set to plane)
+ */
+struct vsp1_du_crc_config {
+ enum vsp1_du_crc_source source;
+ unsigned int index;
+};
+
+/**
+ * struct vsp1_du_atomic_pipe_config - VSP atomic pipe configuration parameters
+ * @crc: CRC computation configuration
+ */
+struct vsp1_du_atomic_pipe_config {
+ struct vsp1_du_crc_config crc;
+};
+
void vsp1_du_atomic_begin(struct device *dev, unsigned int pipe_index);
int vsp1_du_atomic_update(struct device *dev, unsigned int pipe_index,
unsigned int rpf,
const struct vsp1_du_atomic_config *cfg);
-void vsp1_du_atomic_flush(struct device *dev, unsigned int pipe_index);
+void vsp1_du_atomic_flush(struct device *dev, unsigned int pipe_index,
+ const struct vsp1_du_atomic_pipe_config *cfg);
int vsp1_du_map_sg(struct device *dev, struct sg_table *sgt);
void vsp1_du_unmap_sg(struct device *dev, struct sg_table *sgt);
diff --git a/include/misc/ocxl.h b/include/misc/ocxl.h
index 51ccf76db293..9ff6ddc28e22 100644
--- a/include/misc/ocxl.h
+++ b/include/misc/ocxl.h
@@ -188,6 +188,15 @@ extern int ocxl_link_add_pe(void *link_handle, int pasid, u32 pidr, u32 tidr,
void (*xsl_err_cb)(void *data, u64 addr, u64 dsisr),
void *xsl_err_data);
+/**
+ * Update values within a Process Element
+ *
+ * link_handle: the link handle associated with the process element
+ * pasid: the PASID for the AFU context
+ * tid: the new thread id for the process element
+ */
+extern int ocxl_link_update_pe(void *link_handle, int pasid, __u16 tid);
+
/*
* Remove a Process Element from the Shared Process Area for a link
*/
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index 378d601258be..5f43f7a70fe6 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -59,6 +59,19 @@ struct in6_validator_info {
struct netlink_ext_ack *extack;
};
+struct ifa6_config {
+ const struct in6_addr *pfx;
+ unsigned int plen;
+
+ const struct in6_addr *peer_pfx;
+
+ u32 rt_priority;
+ u32 ifa_flags;
+ u32 preferred_lft;
+ u32 valid_lft;
+ u16 scope;
+};
+
int addrconf_init(void);
void addrconf_cleanup(void);
@@ -223,6 +236,22 @@ struct ipv6_stub {
const struct in6_addr *addr);
int (*ipv6_dst_lookup)(struct net *net, struct sock *sk,
struct dst_entry **dst, struct flowi6 *fl6);
+
+ struct fib6_table *(*fib6_get_table)(struct net *net, u32 id);
+ struct fib6_info *(*fib6_lookup)(struct net *net, int oif,
+ struct flowi6 *fl6, int flags);
+ struct fib6_info *(*fib6_table_lookup)(struct net *net,
+ struct fib6_table *table,
+ int oif, struct flowi6 *fl6,
+ int flags);
+ struct fib6_info *(*fib6_multipath_select)(const struct net *net,
+ struct fib6_info *f6i,
+ struct flowi6 *fl6, int oif,
+ const struct sk_buff *skb,
+ int strict);
+ u32 (*ip6_mtu_from_fib6)(struct fib6_info *f6i, struct in6_addr *daddr,
+ struct in6_addr *saddr);
+
void (*udpv6_encap_enable)(void);
void (*ndisc_send_na)(struct net_device *dev, const struct in6_addr *daddr,
const struct in6_addr *solicited_addr,
@@ -308,6 +337,20 @@ static inline struct inet6_dev *__in6_dev_get(const struct net_device *dev)
}
/**
+ * __in6_dev_get_safely - get inet6_dev pointer from netdevice
+ * @dev: network device
+ *
+ * This is a safer version of __in6_dev_get
+ */
+static inline struct inet6_dev *__in6_dev_get_safely(const struct net_device *dev)
+{
+ if (likely(dev))
+ return rcu_dereference_rtnl(dev->ip6_ptr);
+ else
+ return NULL;
+}
+
+/**
* in6_dev_get - get inet6_dev pointer from netdevice
* @dev: network device
*
diff --git a/include/net/ax25.h b/include/net/ax25.h
index c91bc87931c7..3f9aea8087e3 100644
--- a/include/net/ax25.h
+++ b/include/net/ax25.h
@@ -15,6 +15,7 @@
#include <linux/refcount.h>
#include <net/neighbour.h>
#include <net/sock.h>
+#include <linux/seq_file.h>
#define AX25_T1CLAMPLO 1
#define AX25_T1CLAMPHI (30 * HZ)
@@ -399,7 +400,7 @@ int ax25_check_iframes_acked(ax25_cb *, unsigned short);
/* ax25_route.c */
void ax25_rt_device_down(struct net_device *);
int ax25_rt_ioctl(unsigned int, void __user *);
-extern const struct file_operations ax25_route_fops;
+extern const struct seq_operations ax25_rt_seqops;
ax25_route *ax25_get_route(ax25_address *addr, struct net_device *dev);
int ax25_rt_autobind(ax25_cb *, ax25_address *);
struct sk_buff *ax25_rt_build_path(struct sk_buff *, ax25_address *,
@@ -455,7 +456,7 @@ unsigned long ax25_display_timer(struct timer_list *);
extern int ax25_uid_policy;
ax25_uid_assoc *ax25_findbyuid(kuid_t);
int __must_check ax25_uid_ioctl(int, struct sockaddr_ax25 *);
-extern const struct file_operations ax25_uid_fops;
+extern const struct seq_operations ax25_uid_seqops;
void ax25_uid_free(void);
/* sysctl_net_ax25.c */
diff --git a/include/net/ax88796.h b/include/net/ax88796.h
index b9a3beca0ce4..84b3785d0e66 100644
--- a/include/net/ax88796.h
+++ b/include/net/ax88796.h
@@ -12,6 +12,10 @@
#ifndef __NET_AX88796_PLAT_H
#define __NET_AX88796_PLAT_H
+struct sk_buff;
+struct net_device;
+struct platform_device;
+
#define AXFLG_HAS_EEPROM (1<<0)
#define AXFLG_MAC_FROMDEV (1<<1) /* device already has MAC */
#define AXFLG_HAS_93CX6 (1<<2) /* use eeprom_93cx6 driver */
@@ -26,6 +30,16 @@ struct ax_plat_data {
u32 *reg_offsets; /* register offsets */
u8 *mac_addr; /* MAC addr (only used when
AXFLG_MAC_FROMPLATFORM is used */
+
+ /* uses default ax88796 buffer if set to NULL */
+ void (*block_output)(struct net_device *dev, int count,
+ const unsigned char *buf, int star_page);
+ void (*block_input)(struct net_device *dev, int count,
+ struct sk_buff *skb, int ring_offset);
+ /* returns nonzero if a pending interrupt request might by caused by
+ * the ax88786. Handles all interrupts if set to NULL
+ */
+ int (*check_irq)(struct platform_device *pdev);
};
#endif /* __NET_AX88796_PLAT_H */
diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h
index ec9d6bc65855..53ce8176c313 100644
--- a/include/net/bluetooth/bluetooth.h
+++ b/include/net/bluetooth/bluetooth.h
@@ -271,7 +271,7 @@ int bt_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
int flags);
int bt_sock_stream_recvmsg(struct socket *sock, struct msghdr *msg,
size_t len, int flags);
-__poll_t bt_sock_poll(struct file *file, struct socket *sock, poll_table *wait);
+__poll_t bt_sock_poll_mask(struct socket *sock, __poll_t events);
int bt_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo);
int bt_sock_wait_ready(struct sock *sk, unsigned long flags);
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index b619a190ff12..893bbbb5d2fa 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -1393,6 +1393,8 @@ struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
const void *param, u32 timeout);
struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
const void *param, u8 event, u32 timeout);
+int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
+ const void *param);
int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
const void *param);
diff --git a/include/net/bonding.h b/include/net/bonding.h
index f801fc940b29..808f1d167349 100644
--- a/include/net/bonding.h
+++ b/include/net/bonding.h
@@ -198,6 +198,7 @@ struct bonding {
struct slave __rcu *primary_slave;
struct bond_up_slave __rcu *slave_arr; /* Array of usable slaves */
bool force_primary;
+ u32 nest_level;
s32 slave_cnt; /* never change this value outside the attach/detach wrappers */
int (*recv_probe)(const struct sk_buff *, struct bonding *,
struct slave *);
@@ -284,8 +285,15 @@ static inline bool bond_needs_speed_duplex(const struct bonding *bond)
static inline bool bond_is_nondyn_tlb(const struct bonding *bond)
{
- return (BOND_MODE(bond) == BOND_MODE_TLB) &&
- (bond->params.tlb_dynamic_lb == 0);
+ return (bond_is_lb(bond) && bond->params.tlb_dynamic_lb == 0);
+}
+
+static inline bool bond_mode_can_use_xmit_hash(const struct bonding *bond)
+{
+ return (BOND_MODE(bond) == BOND_MODE_8023AD ||
+ BOND_MODE(bond) == BOND_MODE_XOR ||
+ BOND_MODE(bond) == BOND_MODE_TLB ||
+ BOND_MODE(bond) == BOND_MODE_ALB);
}
static inline bool bond_mode_uses_xmit_hash(const struct bonding *bond)
diff --git a/include/net/busy_poll.h b/include/net/busy_poll.h
index 71c72a939bf8..c5187438af38 100644
--- a/include/net/busy_poll.h
+++ b/include/net/busy_poll.h
@@ -121,6 +121,21 @@ static inline void sk_busy_loop(struct sock *sk, int nonblock)
#endif
}
+static inline void sock_poll_busy_loop(struct socket *sock, __poll_t events)
+{
+ if (sk_can_busy_loop(sock->sk) &&
+ events && (events & POLL_BUSY_LOOP)) {
+ /* once, only if requested by syscall */
+ sk_busy_loop(sock->sk, 1);
+ }
+}
+
+/* if this socket can poll_ll, tell the system call */
+static inline __poll_t sock_poll_busy_flag(struct socket *sock)
+{
+ return sk_can_busy_loop(sock->sk) ? POLL_BUSY_LOOP : 0;
+}
+
/* used in the NIC receive handler to mark the skb */
static inline void skb_mark_napi_id(struct sk_buff *skb,
struct napi_struct *napi)
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 250dac390806..5fbfe61f41c6 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -1080,6 +1080,37 @@ struct sta_bss_parameters {
};
/**
+ * struct cfg80211_txq_stats - TXQ statistics for this TID
+ * @filled: bitmap of flags using the bits of &enum nl80211_txq_stats to
+ * indicate the relevant values in this struct are filled
+ * @backlog_bytes: total number of bytes currently backlogged
+ * @backlog_packets: total number of packets currently backlogged
+ * @flows: number of new flows seen
+ * @drops: total number of packets dropped
+ * @ecn_marks: total number of packets marked with ECN CE
+ * @overlimit: number of drops due to queue space overflow
+ * @overmemory: number of drops due to memory limit overflow
+ * @collisions: number of hash collisions
+ * @tx_bytes: total number of bytes dequeued
+ * @tx_packets: total number of packets dequeued
+ * @max_flows: maximum number of flows supported
+ */
+struct cfg80211_txq_stats {
+ u32 filled;
+ u32 backlog_bytes;
+ u32 backlog_packets;
+ u32 flows;
+ u32 drops;
+ u32 ecn_marks;
+ u32 overlimit;
+ u32 overmemory;
+ u32 collisions;
+ u32 tx_bytes;
+ u32 tx_packets;
+ u32 max_flows;
+};
+
+/**
* struct cfg80211_tid_stats - per-TID statistics
* @filled: bitmap of flags using the bits of &enum nl80211_tid_stats to
* indicate the relevant values in this struct are filled
@@ -1088,6 +1119,7 @@ struct sta_bss_parameters {
* @tx_msdu_retries: number of retries (not counting the first) for
* transmitted MSDUs
* @tx_msdu_failed: number of failed transmitted MSDUs
+ * @txq_stats: TXQ statistics
*/
struct cfg80211_tid_stats {
u32 filled;
@@ -1095,6 +1127,7 @@ struct cfg80211_tid_stats {
u64 tx_msdu;
u64 tx_msdu_retries;
u64 tx_msdu_failed;
+ struct cfg80211_txq_stats txq_stats;
};
#define IEEE80211_MAX_CHAINS 4
@@ -1151,7 +1184,10 @@ struct cfg80211_tid_stats {
* @rx_duration: aggregate PPDU duration(usecs) for all the frames from a peer
* @pertid: per-TID statistics, see &struct cfg80211_tid_stats, using the last
* (IEEE80211_NUM_TIDS) index for MSDUs not encapsulated in QoS-MPDUs.
+ * Note that this doesn't use the @filled bit, but is used if non-NULL.
* @ack_signal: signal strength (in dBm) of the last ACK frame.
+ * @avg_ack_signal: average rssi value of ack packet for the no of msdu's has
+ * been sent.
*/
struct station_info {
u64 filled;
@@ -1195,8 +1231,9 @@ struct station_info {
u64 rx_beacon;
u64 rx_duration;
u8 rx_beacon_signal_avg;
- struct cfg80211_tid_stats pertid[IEEE80211_NUM_TIDS + 1];
+ struct cfg80211_tid_stats *pertid;
s8 ack_signal;
+ s8 avg_ack_signal;
};
#if IS_ENABLED(CONFIG_CFG80211)
@@ -2188,9 +2225,14 @@ struct cfg80211_connect_params {
* have to be updated as part of update_connect_params() call.
*
* @UPDATE_ASSOC_IES: Indicates whether association request IEs are updated
+ * @UPDATE_FILS_ERP_INFO: Indicates that FILS connection parameters (realm,
+ * username, erp sequence number and rrk) are updated
+ * @UPDATE_AUTH_TYPE: Indicates that authentication type is updated
*/
enum cfg80211_connect_params_changed {
UPDATE_ASSOC_IES = BIT(0),
+ UPDATE_FILS_ERP_INFO = BIT(1),
+ UPDATE_AUTH_TYPE = BIT(2),
};
/**
@@ -2201,6 +2243,9 @@ enum cfg80211_connect_params_changed {
* @WIPHY_PARAM_RTS_THRESHOLD: wiphy->rts_threshold has changed
* @WIPHY_PARAM_COVERAGE_CLASS: coverage class changed
* @WIPHY_PARAM_DYN_ACK: dynack has been enabled
+ * @WIPHY_PARAM_TXQ_LIMIT: TXQ packet limit has been changed
+ * @WIPHY_PARAM_TXQ_MEMORY_LIMIT: TXQ memory limit has been changed
+ * @WIPHY_PARAM_TXQ_QUANTUM: TXQ scheduler quantum
*/
enum wiphy_params_flags {
WIPHY_PARAM_RETRY_SHORT = 1 << 0,
@@ -2209,6 +2254,9 @@ enum wiphy_params_flags {
WIPHY_PARAM_RTS_THRESHOLD = 1 << 3,
WIPHY_PARAM_COVERAGE_CLASS = 1 << 4,
WIPHY_PARAM_DYN_ACK = 1 << 5,
+ WIPHY_PARAM_TXQ_LIMIT = 1 << 6,
+ WIPHY_PARAM_TXQ_MEMORY_LIMIT = 1 << 7,
+ WIPHY_PARAM_TXQ_QUANTUM = 1 << 8,
};
/**
@@ -2961,6 +3009,9 @@ struct cfg80211_external_auth_params {
*
* @set_multicast_to_unicast: configure multicast to unicast conversion for BSS
*
+ * @get_txq_stats: Get TXQ stats for interface or phy. If wdev is %NULL, this
+ * function should return phy stats, and interface stats otherwise.
+ *
* @set_pmk: configure the PMK to be used for offloaded 802.1X 4-Way handshake.
* If not deleted through @del_pmk the PMK remains valid until disconnect
* upon which the driver should clear it.
@@ -3262,6 +3313,10 @@ struct cfg80211_ops {
struct net_device *dev,
const bool enabled);
+ int (*get_txq_stats)(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
+ struct cfg80211_txq_stats *txqstats);
+
int (*set_pmk)(struct wiphy *wiphy, struct net_device *dev,
const struct cfg80211_pmk_conf *conf);
int (*del_pmk)(struct wiphy *wiphy, struct net_device *dev,
@@ -3806,6 +3861,10 @@ struct wiphy_iftype_ext_capab {
* bitmap of &enum nl80211_band values. For instance, for
* NL80211_BAND_2GHZ, bit 0 would be set
* (i.e. BIT(NL80211_BAND_2GHZ)).
+ *
+ * @txq_limit: configuration of internal TX queue frame limit
+ * @txq_memory_limit: configuration internal TX queue memory limit
+ * @txq_quantum: configuration of internal TX queue scheduler quantum
*/
struct wiphy {
/* assign these fields before you register the wiphy */
@@ -3940,6 +3999,10 @@ struct wiphy {
u8 nan_supported_bands;
+ u32 txq_limit;
+ u32 txq_memory_limit;
+ u32 txq_quantum;
+
char priv[0] __aligned(NETDEV_ALIGN);
};
@@ -5363,6 +5426,30 @@ static inline void cfg80211_testmode_event(struct sk_buff *skb, gfp_t gfp)
#endif
/**
+ * struct cfg80211_fils_resp_params - FILS connection response params
+ * @kek: KEK derived from a successful FILS connection (may be %NULL)
+ * @kek_len: Length of @fils_kek in octets
+ * @update_erp_next_seq_num: Boolean value to specify whether the value in
+ * @erp_next_seq_num is valid.
+ * @erp_next_seq_num: The next sequence number to use in ERP message in
+ * FILS Authentication. This value should be specified irrespective of the
+ * status for a FILS connection.
+ * @pmk: A new PMK if derived from a successful FILS connection (may be %NULL).
+ * @pmk_len: Length of @pmk in octets
+ * @pmkid: A new PMKID if derived from a successful FILS connection or the PMKID
+ * used for this FILS connection (may be %NULL).
+ */
+struct cfg80211_fils_resp_params {
+ const u8 *kek;
+ size_t kek_len;
+ bool update_erp_next_seq_num;
+ u16 erp_next_seq_num;
+ const u8 *pmk;
+ size_t pmk_len;
+ const u8 *pmkid;
+};
+
+/**
* struct cfg80211_connect_resp_params - Connection response params
* @status: Status code, %WLAN_STATUS_SUCCESS for successful connection, use
* %WLAN_STATUS_UNSPECIFIED_FAILURE if your device cannot give you
@@ -5380,17 +5467,7 @@ static inline void cfg80211_testmode_event(struct sk_buff *skb, gfp_t gfp)
* @req_ie_len: Association request IEs length
* @resp_ie: Association response IEs (may be %NULL)
* @resp_ie_len: Association response IEs length
- * @fils_kek: KEK derived from a successful FILS connection (may be %NULL)
- * @fils_kek_len: Length of @fils_kek in octets
- * @update_erp_next_seq_num: Boolean value to specify whether the value in
- * @fils_erp_next_seq_num is valid.
- * @fils_erp_next_seq_num: The next sequence number to use in ERP message in
- * FILS Authentication. This value should be specified irrespective of the
- * status for a FILS connection.
- * @pmk: A new PMK if derived from a successful FILS connection (may be %NULL).
- * @pmk_len: Length of @pmk in octets
- * @pmkid: A new PMKID if derived from a successful FILS connection or the PMKID
- * used for this FILS connection (may be %NULL).
+ * @fils: FILS connection response parameters.
* @timeout_reason: Reason for connection timeout. This is used when the
* connection fails due to a timeout instead of an explicit rejection from
* the AP. %NL80211_TIMEOUT_UNSPECIFIED is used when the timeout reason is
@@ -5406,13 +5483,7 @@ struct cfg80211_connect_resp_params {
size_t req_ie_len;
const u8 *resp_ie;
size_t resp_ie_len;
- const u8 *fils_kek;
- size_t fils_kek_len;
- bool update_erp_next_seq_num;
- u16 fils_erp_next_seq_num;
- const u8 *pmk;
- size_t pmk_len;
- const u8 *pmkid;
+ struct cfg80211_fils_resp_params fils;
enum nl80211_timeout_reason timeout_reason;
};
@@ -5558,6 +5629,7 @@ cfg80211_connect_timeout(struct net_device *dev, const u8 *bssid,
* @req_ie_len: association request IEs length
* @resp_ie: association response IEs (may be %NULL)
* @resp_ie_len: assoc response IEs length
+ * @fils: FILS related roaming information.
*/
struct cfg80211_roam_info {
struct ieee80211_channel *channel;
@@ -5567,6 +5639,7 @@ struct cfg80211_roam_info {
size_t req_ie_len;
const u8 *resp_ie;
size_t resp_ie_len;
+ struct cfg80211_fils_resp_params fils;
};
/**
@@ -5648,6 +5721,26 @@ void cfg80211_remain_on_channel_expired(struct wireless_dev *wdev, u64 cookie,
struct ieee80211_channel *chan,
gfp_t gfp);
+/**
+ * cfg80211_sinfo_alloc_tid_stats - allocate per-tid statistics.
+ *
+ * @sinfo: the station information
+ * @gfp: allocation flags
+ */
+int cfg80211_sinfo_alloc_tid_stats(struct station_info *sinfo, gfp_t gfp);
+
+/**
+ * cfg80211_sinfo_release_content - release contents of station info
+ * @sinfo: the station information
+ *
+ * Releases any potentially allocated sub-information of the station
+ * information, but not the struct itself (since it's typically on
+ * the stack.)
+ */
+static inline void cfg80211_sinfo_release_content(struct station_info *sinfo)
+{
+ kfree(sinfo->pertid);
+}
/**
* cfg80211_new_sta - notify userspace about station
diff --git a/include/net/dcbnl.h b/include/net/dcbnl.h
index 207d9ba1f92c..0e5e91be2d30 100644
--- a/include/net/dcbnl.h
+++ b/include/net/dcbnl.h
@@ -101,6 +101,10 @@ struct dcbnl_rtnl_ops {
/* CEE peer */
int (*cee_peer_getpg) (struct net_device *, struct cee_pg *);
int (*cee_peer_getpfc) (struct net_device *, struct cee_pfc *);
+
+ /* buffer settings */
+ int (*dcbnl_getbuffer)(struct net_device *, struct dcbnl_buffer *);
+ int (*dcbnl_setbuffer)(struct net_device *, struct dcbnl_buffer *);
};
#endif /* __NET_DCBNL_H__ */
diff --git a/include/net/devlink.h b/include/net/devlink.h
index 2e4f71e16e95..e336ea9c73df 100644
--- a/include/net/devlink.h
+++ b/include/net/devlink.h
@@ -35,6 +35,14 @@ struct devlink {
char priv[0] __aligned(NETDEV_ALIGN);
};
+struct devlink_port_attrs {
+ bool set;
+ enum devlink_port_flavour flavour;
+ u32 port_number; /* same value as "split group" */
+ bool split;
+ u32 split_subport_number;
+};
+
struct devlink_port {
struct list_head list;
struct devlink *devlink;
@@ -43,8 +51,7 @@ struct devlink_port {
enum devlink_port_type type;
enum devlink_port_type desired_type;
void *type_dev;
- bool split;
- u32 split_group;
+ struct devlink_port_attrs attrs;
};
struct devlink_sb_pool_info {
@@ -289,12 +296,13 @@ struct devlink_resource {
#define DEVLINK_RESOURCE_ID_PARENT_TOP 0
struct devlink_ops {
- int (*reload)(struct devlink *devlink);
+ int (*reload)(struct devlink *devlink, struct netlink_ext_ack *extack);
int (*port_type_set)(struct devlink_port *devlink_port,
enum devlink_port_type port_type);
int (*port_split)(struct devlink *devlink, unsigned int port_index,
- unsigned int count);
- int (*port_unsplit)(struct devlink *devlink, unsigned int port_index);
+ unsigned int count, struct netlink_ext_ack *extack);
+ int (*port_unsplit)(struct devlink *devlink, unsigned int port_index,
+ struct netlink_ext_ack *extack);
int (*sb_pool_get)(struct devlink *devlink, unsigned int sb_index,
u16 pool_index,
struct devlink_sb_pool_info *pool_info);
@@ -367,8 +375,12 @@ void devlink_port_type_eth_set(struct devlink_port *devlink_port,
void devlink_port_type_ib_set(struct devlink_port *devlink_port,
struct ib_device *ibdev);
void devlink_port_type_clear(struct devlink_port *devlink_port);
-void devlink_port_split_set(struct devlink_port *devlink_port,
- u32 split_group);
+void devlink_port_attrs_set(struct devlink_port *devlink_port,
+ enum devlink_port_flavour flavour,
+ u32 port_number, bool split,
+ u32 split_subport_number);
+int devlink_port_get_phys_port_name(struct devlink_port *devlink_port,
+ char *name, size_t len);
int devlink_sb_register(struct devlink *devlink, unsigned int sb_index,
u32 size, u16 ingress_pools_count,
u16 egress_pools_count, u16 ingress_tc_count,
@@ -466,11 +478,20 @@ static inline void devlink_port_type_clear(struct devlink_port *devlink_port)
{
}
-static inline void devlink_port_split_set(struct devlink_port *devlink_port,
- u32 split_group)
+static inline void devlink_port_attrs_set(struct devlink_port *devlink_port,
+ enum devlink_port_flavour flavour,
+ u32 port_number, bool split,
+ u32 split_subport_number)
{
}
+static inline int
+devlink_port_get_phys_port_name(struct devlink_port *devlink_port,
+ char *name, size_t len)
+{
+ return -EOPNOTSUPP;
+}
+
static inline int devlink_sb_register(struct devlink *devlink,
unsigned int sb_index, u32 size,
u16 ingress_pools_count,
diff --git a/include/net/dsa.h b/include/net/dsa.h
index 60fb4ec8ba61..fdbd6082945d 100644
--- a/include/net/dsa.h
+++ b/include/net/dsa.h
@@ -20,12 +20,14 @@
#include <linux/of.h>
#include <linux/ethtool.h>
#include <linux/net_tstamp.h>
+#include <linux/phy.h>
#include <net/devlink.h>
#include <net/switchdev.h>
struct tc_action;
struct phy_device;
struct fixed_phy_status;
+struct phylink_link_state;
enum dsa_tag_protocol {
DSA_TAG_PROTO_NONE = 0,
@@ -199,6 +201,7 @@ struct dsa_port {
u8 stp_state;
struct net_device *bridge_dev;
struct devlink_port devlink_port;
+ struct phylink *pl;
/*
* Original copy of the master netdev ethtool_ops
*/
@@ -354,12 +357,36 @@ struct dsa_switch_ops {
struct fixed_phy_status *st);
/*
+ * PHYLINK integration
+ */
+ void (*phylink_validate)(struct dsa_switch *ds, int port,
+ unsigned long *supported,
+ struct phylink_link_state *state);
+ int (*phylink_mac_link_state)(struct dsa_switch *ds, int port,
+ struct phylink_link_state *state);
+ void (*phylink_mac_config)(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ const struct phylink_link_state *state);
+ void (*phylink_mac_an_restart)(struct dsa_switch *ds, int port);
+ void (*phylink_mac_link_down)(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ phy_interface_t interface);
+ void (*phylink_mac_link_up)(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ phy_interface_t interface,
+ struct phy_device *phydev);
+ void (*phylink_fixed_state)(struct dsa_switch *ds, int port,
+ struct phylink_link_state *state);
+ /*
* ethtool hardware statistics.
*/
- void (*get_strings)(struct dsa_switch *ds, int port, uint8_t *data);
+ void (*get_strings)(struct dsa_switch *ds, int port,
+ u32 stringset, uint8_t *data);
void (*get_ethtool_stats)(struct dsa_switch *ds,
int port, uint64_t *data);
- int (*get_sset_count)(struct dsa_switch *ds, int port);
+ int (*get_sset_count)(struct dsa_switch *ds, int port, int sset);
+ void (*get_ethtool_phy_stats)(struct dsa_switch *ds,
+ int port, uint64_t *data);
/*
* ethtool Wake-on-LAN
@@ -588,4 +615,10 @@ static inline int call_dsa_notifiers(unsigned long val, struct net_device *dev,
#define BRCM_TAG_GET_PORT(v) ((v) >> 8)
#define BRCM_TAG_GET_QUEUE(v) ((v) & 0xff)
+
+int dsa_port_get_phy_strings(struct dsa_port *dp, uint8_t *data);
+int dsa_port_get_ethtool_phy_stats(struct dsa_port *dp, uint64_t *data);
+int dsa_port_get_phy_sset_count(struct dsa_port *dp);
+void dsa_port_phylink_mac_change(struct dsa_switch *ds, int port, bool up);
+
#endif
diff --git a/include/net/erspan.h b/include/net/erspan.h
index d044aa60cc76..b39643ef4c95 100644
--- a/include/net/erspan.h
+++ b/include/net/erspan.h
@@ -219,6 +219,33 @@ static inline __be32 erspan_get_timestamp(void)
return htonl((u32)h_usecs);
}
+/* ERSPAN BSO (Bad/Short/Oversized), see RFC1757
+ * 00b --> Good frame with no error, or unknown integrity
+ * 01b --> Payload is a Short Frame
+ * 10b --> Payload is an Oversized Frame
+ * 11b --> Payload is a Bad Frame with CRC or Alignment Error
+ */
+enum erspan_bso {
+ BSO_NOERROR = 0x0,
+ BSO_SHORT = 0x1,
+ BSO_OVERSIZED = 0x2,
+ BSO_BAD = 0x3,
+};
+
+static inline u8 erspan_detect_bso(struct sk_buff *skb)
+{
+ /* BSO_BAD is not handled because the frame CRC
+ * or alignment error information is in FCS.
+ */
+ if (skb->len < ETH_ZLEN)
+ return BSO_SHORT;
+
+ if (skb->len > ETH_FRAME_LEN)
+ return BSO_OVERSIZED;
+
+ return BSO_NOERROR;
+}
+
static inline void erspan_build_header_v2(struct sk_buff *skb,
u32 id, u8 direction, u16 hwid,
bool truncate, bool is_ipv4)
@@ -248,6 +275,7 @@ static inline void erspan_build_header_v2(struct sk_buff *skb,
vlan_tci = ntohs(qp->tci);
}
+ bso = erspan_detect_bso(skb);
skb_push(skb, sizeof(*ershdr) + ERSPAN_V2_MDSIZE);
ershdr = (struct erspan_base_hdr *)skb->data;
memset(ershdr, 0, sizeof(*ershdr) + ERSPAN_V2_MDSIZE);
diff --git a/include/net/failover.h b/include/net/failover.h
new file mode 100644
index 000000000000..bb15438f39c7
--- /dev/null
+++ b/include/net/failover.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2018, Intel Corporation. */
+
+#ifndef _FAILOVER_H
+#define _FAILOVER_H
+
+#include <linux/netdevice.h>
+
+struct failover_ops {
+ int (*slave_pre_register)(struct net_device *slave_dev,
+ struct net_device *failover_dev);
+ int (*slave_register)(struct net_device *slave_dev,
+ struct net_device *failover_dev);
+ int (*slave_pre_unregister)(struct net_device *slave_dev,
+ struct net_device *failover_dev);
+ int (*slave_unregister)(struct net_device *slave_dev,
+ struct net_device *failover_dev);
+ int (*slave_link_change)(struct net_device *slave_dev,
+ struct net_device *failover_dev);
+ int (*slave_name_change)(struct net_device *slave_dev,
+ struct net_device *failover_dev);
+ rx_handler_result_t (*slave_handle_frame)(struct sk_buff **pskb);
+};
+
+struct failover {
+ struct list_head list;
+ struct net_device __rcu *failover_dev;
+ struct failover_ops __rcu *ops;
+};
+
+struct failover *failover_register(struct net_device *dev,
+ struct failover_ops *ops);
+void failover_unregister(struct failover *failover);
+int failover_slave_unregister(struct net_device *slave_dev);
+
+#endif /* _FAILOVER_H */
diff --git a/include/net/fib_rules.h b/include/net/fib_rules.h
index e5cfcfc7dd93..b473df5b9512 100644
--- a/include/net/fib_rules.h
+++ b/include/net/fib_rules.h
@@ -75,7 +75,8 @@ struct fib_rules_ops {
int (*configure)(struct fib_rule *,
struct sk_buff *,
struct fib_rule_hdr *,
- struct nlattr **);
+ struct nlattr **,
+ struct netlink_ext_ack *);
int (*delete)(struct fib_rule *);
int (*compare)(struct fib_rule *,
struct fib_rule_hdr *,
diff --git a/include/net/flow_dissector.h b/include/net/flow_dissector.h
index 9a074776f70b..adc24df56b90 100644
--- a/include/net/flow_dissector.h
+++ b/include/net/flow_dissector.h
@@ -226,6 +226,11 @@ struct flow_dissector {
unsigned short int offset[FLOW_DISSECTOR_KEY_MAX];
};
+struct flow_keys_basic {
+ struct flow_dissector_key_control control;
+ struct flow_dissector_key_basic basic;
+};
+
struct flow_keys {
struct flow_dissector_key_control control;
#define FLOW_KEYS_HASH_START_FIELD basic
@@ -244,14 +249,14 @@ __be32 flow_get_u32_src(const struct flow_keys *flow);
__be32 flow_get_u32_dst(const struct flow_keys *flow);
extern struct flow_dissector flow_keys_dissector;
-extern struct flow_dissector flow_keys_buf_dissector;
+extern struct flow_dissector flow_keys_basic_dissector;
/* struct flow_keys_digest:
*
* This structure is used to hold a digest of the full flow keys. This is a
* larger "hash" of a flow to allow definitively matching specific flows where
* the 32 bit skb->hash is not large enough. The size is limited to 16 bytes so
- * that it can by used in CB of skb (see sch_choke for an example).
+ * that it can be used in CB of skb (see sch_choke for an example).
*/
#define FLOW_KEYS_DIGEST_LEN 16
struct flow_keys_digest {
diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h
index d4088d1a688d..d7578cf49c3a 100644
--- a/include/net/if_inet6.h
+++ b/include/net/if_inet6.h
@@ -42,6 +42,7 @@ enum {
struct inet6_ifaddr {
struct in6_addr addr;
__u32 prefix_len;
+ __u32 rt_priority;
/* In seconds, relative to tstamp. Expiry is at tstamp + HZ * lft. */
__u32 valid_lft;
@@ -64,7 +65,7 @@ struct inet6_ifaddr {
struct delayed_work dad_work;
struct inet6_dev *idev;
- struct rt6_info *rt;
+ struct fib6_info *rt;
struct hlist_node addr_lst;
struct list_head if_list;
@@ -143,8 +144,7 @@ struct ipv6_ac_socklist {
struct ifacaddr6 {
struct in6_addr aca_addr;
- struct inet6_dev *aca_idev;
- struct rt6_info *aca_rt;
+ struct fib6_info *aca_rt;
struct ifacaddr6 *aca_next;
int aca_users;
refcount_t aca_refcnt;
diff --git a/include/net/ife.h b/include/net/ife.h
index 44b9c00f7223..e117617e3c34 100644
--- a/include/net/ife.h
+++ b/include/net/ife.h
@@ -12,7 +12,8 @@
void *ife_encode(struct sk_buff *skb, u16 metalen);
void *ife_decode(struct sk_buff *skb, u16 *metalen);
-void *ife_tlv_meta_decode(void *skbdata, u16 *attrtype, u16 *dlen, u16 *totlen);
+void *ife_tlv_meta_decode(void *skbdata, const void *ifehdr_end, u16 *attrtype,
+ u16 *dlen, u16 *totlen);
int ife_tlv_meta_encode(void *skbdata, u16 attrtype, u16 dlen,
const void *dval);
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index b68fea022a82..0a6c9e0f2b5a 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -23,8 +23,6 @@
#include <net/inet_sock.h>
#include <net/request_sock.h>
-#define INET_CSK_DEBUG 1
-
/* Cancel timers, when they are not required. */
#undef INET_CSK_CLEAR_TIMERS
@@ -77,6 +75,7 @@ struct inet_connection_sock_af_ops {
* @icsk_af_ops Operations which are AF_INET{4,6} specific
* @icsk_ulp_ops Pluggable ULP control hook
* @icsk_ulp_data ULP private data
+ * @icsk_clean_acked Clean acked data hook
* @icsk_listen_portaddr_node hash to the portaddr listener hashtable
* @icsk_ca_state: Congestion control state
* @icsk_retransmits: Number of unrecovered [RTO] timeouts
@@ -102,6 +101,7 @@ struct inet_connection_sock {
const struct inet_connection_sock_af_ops *icsk_af_ops;
const struct tcp_ulp_ops *icsk_ulp_ops;
void *icsk_ulp_data;
+ void (*icsk_clean_acked)(struct sock *sk, u32 acked_seq);
struct hlist_node icsk_listen_portaddr_node;
unsigned int (*icsk_sync_mss)(struct sock *sk, u32 pmtu);
__u8 icsk_ca_state:6,
@@ -194,10 +194,6 @@ static inline void inet_csk_delack_init(struct sock *sk)
void inet_csk_delete_keepalive_timer(struct sock *sk);
void inet_csk_reset_keepalive_timer(struct sock *sk, unsigned long timeout);
-#ifdef INET_CSK_DEBUG
-extern const char inet_csk_timer_bug_msg[];
-#endif
-
static inline void inet_csk_clear_xmit_timer(struct sock *sk, const int what)
{
struct inet_connection_sock *icsk = inet_csk(sk);
@@ -212,12 +208,9 @@ static inline void inet_csk_clear_xmit_timer(struct sock *sk, const int what)
#ifdef INET_CSK_CLEAR_TIMERS
sk_stop_timer(sk, &icsk->icsk_delack_timer);
#endif
+ } else {
+ pr_debug("inet_csk BUG: unknown timer value\n");
}
-#ifdef INET_CSK_DEBUG
- else {
- pr_debug("%s", inet_csk_timer_bug_msg);
- }
-#endif
}
/*
@@ -230,10 +223,8 @@ static inline void inet_csk_reset_xmit_timer(struct sock *sk, const int what,
struct inet_connection_sock *icsk = inet_csk(sk);
if (when > max_when) {
-#ifdef INET_CSK_DEBUG
pr_debug("reset_xmit_timer: sk=%p %d when=0x%lx, caller=%p\n",
sk, what, when, current_text_addr());
-#endif
when = max_when;
}
@@ -247,12 +238,9 @@ static inline void inet_csk_reset_xmit_timer(struct sock *sk, const int what,
icsk->icsk_ack.pending |= ICSK_ACK_TIMER;
icsk->icsk_ack.timeout = jiffies + when;
sk_reset_timer(sk, &icsk->icsk_delack_timer, icsk->icsk_ack.timeout);
+ } else {
+ pr_debug("inet_csk BUG: unknown timer value\n");
}
-#ifdef INET_CSK_DEBUG
- else {
- pr_debug("%s", inet_csk_timer_bug_msg);
- }
-#endif
}
static inline unsigned long
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
index 0a671c32d6b9..83d5b3c2ac42 100644
--- a/include/net/inet_sock.h
+++ b/include/net/inet_sock.h
@@ -147,6 +147,7 @@ struct inet_cork {
__u8 ttl;
__s16 tos;
char priority;
+ __u16 gso_size;
};
struct inet_cork_full {
diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h
index c7be1ca8e562..78775038f011 100644
--- a/include/net/inet_timewait_sock.h
+++ b/include/net/inet_timewait_sock.h
@@ -61,7 +61,7 @@ struct inet_timewait_sock {
#define tw_cookie __tw_common.skc_cookie
#define tw_dr __tw_common.skc_tw_dr
- int tw_timeout;
+ __u32 tw_mark;
volatile unsigned char tw_substate;
unsigned char tw_rcv_wscale;
diff --git a/include/net/ip.h b/include/net/ip.h
index ecffd843e7b8..0d2281b4b27a 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -76,6 +76,7 @@ struct ipcm_cookie {
__u8 ttl;
__s16 tos;
char priority;
+ __u16 gso_size;
};
#define IPCB(skb) ((struct inet_skb_parm*)((skb)->cb))
@@ -171,7 +172,7 @@ struct sk_buff *ip_make_skb(struct sock *sk, struct flowi4 *fl4,
int len, int odd, struct sk_buff *skb),
void *from, int length, int transhdrlen,
struct ipcm_cookie *ipc, struct rtable **rtp,
- unsigned int flags);
+ struct inet_cork *cork, unsigned int flags);
static inline struct sk_buff *ip_finish_skb(struct sock *sk, struct flowi4 *fl4)
{
@@ -396,6 +397,9 @@ static inline unsigned int ip_skb_dst_mtu(struct sock *sk,
return min(READ_ONCE(skb_dst(skb)->dev->mtu), IP_MAX_MTU);
}
+int ip_metrics_convert(struct net *net, struct nlattr *fc_mx, int fc_mx_len,
+ u32 *metrics);
+
u32 ip_idents_reserve(u32 hash, int segs);
void __ip_select_ident(struct net *net, struct iphdr *iph, int segs);
@@ -660,4 +664,7 @@ extern int sysctl_icmp_msgs_burst;
int ip_misc_proc_init(void);
#endif
+int rtm_getroute_parse_ip_proto(struct nlattr *attr, u8 *ip_proto,
+ struct netlink_ext_ack *extack);
+
#endif /* _IP_H */
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index 5e86fd9dc857..71b9043aa0e7 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -38,6 +38,7 @@
#endif
struct rt6_info;
+struct fib6_info;
struct fib6_config {
u32 fc_table;
@@ -74,12 +75,12 @@ struct fib6_node {
#ifdef CONFIG_IPV6_SUBTREES
struct fib6_node __rcu *subtree;
#endif
- struct rt6_info __rcu *leaf;
+ struct fib6_info __rcu *leaf;
__u16 fn_bit; /* bit key */
__u16 fn_flags;
int fn_sernum;
- struct rt6_info __rcu *rr_ptr;
+ struct fib6_info __rcu *rr_ptr;
struct rcu_head rcu;
};
@@ -94,11 +95,6 @@ struct fib6_gc_args {
#define FIB6_SUBTREE(fn) (rcu_dereference_protected((fn)->subtree, 1))
#endif
-struct mx6_config {
- const u32 *mx;
- DECLARE_BITMAP(mx_valid, RTAX_MAX);
-};
-
/*
* routing information
*
@@ -127,92 +123,105 @@ struct rt6_exception {
#define FIB6_EXCEPTION_BUCKET_SIZE (1 << FIB6_EXCEPTION_BUCKET_SIZE_SHIFT)
#define FIB6_MAX_DEPTH 5
-struct rt6_info {
- struct dst_entry dst;
- struct rt6_info __rcu *rt6_next;
- struct rt6_info *from;
+struct fib6_nh {
+ struct in6_addr nh_gw;
+ struct net_device *nh_dev;
+ struct lwtunnel_state *nh_lwtstate;
- /*
- * Tail elements of dst_entry (__refcnt etc.)
- * and these elements (rarely used in hot path) are in
- * the same cache line.
- */
- struct fib6_table *rt6i_table;
- struct fib6_node __rcu *rt6i_node;
+ unsigned int nh_flags;
+ atomic_t nh_upper_bound;
+ int nh_weight;
+};
- struct in6_addr rt6i_gateway;
+struct fib6_info {
+ struct fib6_table *fib6_table;
+ struct fib6_info __rcu *fib6_next;
+ struct fib6_node __rcu *fib6_node;
/* Multipath routes:
- * siblings is a list of rt6_info that have the the same metric/weight,
+ * siblings is a list of fib6_info that have the the same metric/weight,
* destination, but not the same gateway. nsiblings is just a cache
* to speed up lookup.
*/
- struct list_head rt6i_siblings;
- unsigned int rt6i_nsiblings;
- atomic_t rt6i_nh_upper_bound;
+ struct list_head fib6_siblings;
+ unsigned int fib6_nsiblings;
- atomic_t rt6i_ref;
+ atomic_t fib6_ref;
+ unsigned long expires;
+ struct dst_metrics *fib6_metrics;
+#define fib6_pmtu fib6_metrics->metrics[RTAX_MTU-1]
- unsigned int rt6i_nh_flags;
+ struct rt6key fib6_dst;
+ u32 fib6_flags;
+ struct rt6key fib6_src;
+ struct rt6key fib6_prefsrc;
- /* These are in a separate cache line. */
- struct rt6key rt6i_dst ____cacheline_aligned_in_smp;
- u32 rt6i_flags;
+ struct rt6_info * __percpu *rt6i_pcpu;
+ struct rt6_exception_bucket __rcu *rt6i_exception_bucket;
+
+ u32 fib6_metric;
+ u8 fib6_protocol;
+ u8 fib6_type;
+ u8 exception_bucket_flushed:1,
+ should_flush:1,
+ dst_nocount:1,
+ dst_nopolicy:1,
+ dst_host:1,
+ unused:3;
+
+ struct fib6_nh fib6_nh;
+ struct rcu_head rcu;
+};
+
+struct rt6_info {
+ struct dst_entry dst;
+ struct fib6_info __rcu *from;
+
+ struct rt6key rt6i_dst;
struct rt6key rt6i_src;
+ struct in6_addr rt6i_gateway;
+ struct inet6_dev *rt6i_idev;
+ u32 rt6i_flags;
struct rt6key rt6i_prefsrc;
struct list_head rt6i_uncached;
struct uncached_list *rt6i_uncached_list;
- struct inet6_dev *rt6i_idev;
- struct rt6_info * __percpu *rt6i_pcpu;
- struct rt6_exception_bucket __rcu *rt6i_exception_bucket;
-
- u32 rt6i_metric;
- u32 rt6i_pmtu;
/* more non-fragment space at head required */
- int rt6i_nh_weight;
unsigned short rt6i_nfheader_len;
- u8 rt6i_protocol;
- u8 exception_bucket_flushed:1,
- should_flush:1,
- unused:6;
};
#define for_each_fib6_node_rt_rcu(fn) \
for (rt = rcu_dereference((fn)->leaf); rt; \
- rt = rcu_dereference(rt->rt6_next))
+ rt = rcu_dereference(rt->fib6_next))
#define for_each_fib6_walker_rt(w) \
for (rt = (w)->leaf; rt; \
- rt = rcu_dereference_protected(rt->rt6_next, 1))
+ rt = rcu_dereference_protected(rt->fib6_next, 1))
static inline struct inet6_dev *ip6_dst_idev(struct dst_entry *dst)
{
return ((struct rt6_info *)dst)->rt6i_idev;
}
-static inline void rt6_clean_expires(struct rt6_info *rt)
+static inline void fib6_clean_expires(struct fib6_info *f6i)
{
- rt->rt6i_flags &= ~RTF_EXPIRES;
- rt->dst.expires = 0;
+ f6i->fib6_flags &= ~RTF_EXPIRES;
+ f6i->expires = 0;
}
-static inline void rt6_set_expires(struct rt6_info *rt, unsigned long expires)
+static inline void fib6_set_expires(struct fib6_info *f6i,
+ unsigned long expires)
{
- rt->dst.expires = expires;
- rt->rt6i_flags |= RTF_EXPIRES;
+ f6i->expires = expires;
+ f6i->fib6_flags |= RTF_EXPIRES;
}
-static inline void rt6_update_expires(struct rt6_info *rt0, int timeout)
+static inline bool fib6_check_expired(const struct fib6_info *f6i)
{
- struct rt6_info *rt;
-
- for (rt = rt0; rt && !(rt->rt6i_flags & RTF_EXPIRES); rt = rt->from);
- if (rt && rt != rt0)
- rt0->dst.expires = rt->dst.expires;
- dst_set_expires(&rt0->dst, timeout);
- rt0->rt6i_flags |= RTF_EXPIRES;
+ if (f6i->fib6_flags & RTF_EXPIRES)
+ return time_after(jiffies, f6i->expires);
+ return false;
}
/* Function to safely get fn->sernum for passed in rt
@@ -220,14 +229,13 @@ static inline void rt6_update_expires(struct rt6_info *rt0, int timeout)
* Return true if we can get cookie safely
* Return false if not
*/
-static inline bool rt6_get_cookie_safe(const struct rt6_info *rt,
- u32 *cookie)
+static inline bool fib6_get_cookie_safe(const struct fib6_info *f6i,
+ u32 *cookie)
{
struct fib6_node *fn;
bool status = false;
- rcu_read_lock();
- fn = rcu_dereference(rt->rt6i_node);
+ fn = rcu_dereference(f6i->fib6_node);
if (fn) {
*cookie = fn->fn_sernum;
@@ -236,19 +244,22 @@ static inline bool rt6_get_cookie_safe(const struct rt6_info *rt,
status = true;
}
- rcu_read_unlock();
return status;
}
static inline u32 rt6_get_cookie(const struct rt6_info *rt)
{
+ struct fib6_info *from;
u32 cookie = 0;
- if (rt->rt6i_flags & RTF_PCPU ||
- (unlikely(!list_empty(&rt->rt6i_uncached)) && rt->from))
- rt = rt->from;
+ rcu_read_lock();
- rt6_get_cookie_safe(rt, &cookie);
+ from = rcu_dereference(rt->from);
+ if (from && (rt->rt6i_flags & RTF_PCPU ||
+ unlikely(!list_empty(&rt->rt6i_uncached))))
+ fib6_get_cookie_safe(from, &cookie);
+
+ rcu_read_unlock();
return cookie;
}
@@ -262,20 +273,18 @@ static inline void ip6_rt_put(struct rt6_info *rt)
dst_release(&rt->dst);
}
-void rt6_free_pcpu(struct rt6_info *non_pcpu_rt);
+struct fib6_info *fib6_info_alloc(gfp_t gfp_flags);
+void fib6_info_destroy_rcu(struct rcu_head *head);
-static inline void rt6_hold(struct rt6_info *rt)
+static inline void fib6_info_hold(struct fib6_info *f6i)
{
- atomic_inc(&rt->rt6i_ref);
+ atomic_inc(&f6i->fib6_ref);
}
-static inline void rt6_release(struct rt6_info *rt)
+static inline void fib6_info_release(struct fib6_info *f6i)
{
- if (atomic_dec_and_test(&rt->rt6i_ref)) {
- rt6_free_pcpu(rt);
- dst_dev_put(&rt->dst);
- dst_release(&rt->dst);
- }
+ if (f6i && atomic_dec_and_test(&f6i->fib6_ref))
+ call_rcu(&f6i->rcu, fib6_info_destroy_rcu);
}
enum fib6_walk_state {
@@ -291,7 +300,7 @@ enum fib6_walk_state {
struct fib6_walker {
struct list_head lh;
struct fib6_node *root, *node;
- struct rt6_info *leaf;
+ struct fib6_info *leaf;
enum fib6_walk_state state;
unsigned int skip;
unsigned int count;
@@ -355,7 +364,7 @@ typedef struct rt6_info *(*pol_lookup_t)(struct net *,
struct fib6_entry_notifier_info {
struct fib_notifier_info info; /* must be first */
- struct rt6_info *rt;
+ struct fib6_info *rt;
};
/*
@@ -368,24 +377,49 @@ struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6,
const struct sk_buff *skb,
int flags, pol_lookup_t lookup);
-struct fib6_node *fib6_lookup(struct fib6_node *root,
- const struct in6_addr *daddr,
- const struct in6_addr *saddr);
+/* called with rcu lock held; can return error pointer
+ * caller needs to select path
+ */
+struct fib6_info *fib6_lookup(struct net *net, int oif, struct flowi6 *fl6,
+ int flags);
+
+/* called with rcu lock held; caller needs to select path */
+struct fib6_info *fib6_table_lookup(struct net *net, struct fib6_table *table,
+ int oif, struct flowi6 *fl6, int strict);
+
+struct fib6_info *fib6_multipath_select(const struct net *net,
+ struct fib6_info *match,
+ struct flowi6 *fl6, int oif,
+ const struct sk_buff *skb, int strict);
+
+struct fib6_node *fib6_node_lookup(struct fib6_node *root,
+ const struct in6_addr *daddr,
+ const struct in6_addr *saddr);
struct fib6_node *fib6_locate(struct fib6_node *root,
const struct in6_addr *daddr, int dst_len,
const struct in6_addr *saddr, int src_len,
bool exact_match);
-void fib6_clean_all(struct net *net, int (*func)(struct rt6_info *, void *arg),
+void fib6_clean_all(struct net *net, int (*func)(struct fib6_info *, void *arg),
void *arg);
-int fib6_add(struct fib6_node *root, struct rt6_info *rt,
- struct nl_info *info, struct mx6_config *mxc,
- struct netlink_ext_ack *extack);
-int fib6_del(struct rt6_info *rt, struct nl_info *info);
+int fib6_add(struct fib6_node *root, struct fib6_info *rt,
+ struct nl_info *info, struct netlink_ext_ack *extack);
+int fib6_del(struct fib6_info *rt, struct nl_info *info);
+
+static inline struct net_device *fib6_info_nh_dev(const struct fib6_info *f6i)
+{
+ return f6i->fib6_nh.nh_dev;
+}
+
+static inline
+struct lwtunnel_state *fib6_info_nh_lwt(const struct fib6_info *f6i)
+{
+ return f6i->fib6_nh.nh_lwtstate;
+}
-void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info,
+void inet6_rt_notify(int event, struct fib6_info *rt, struct nl_info *info,
unsigned int flags);
void fib6_run_gc(unsigned long expires, struct net *net, bool force);
@@ -394,7 +428,15 @@ void fib6_gc_cleanup(void);
int fib6_init(void);
-int ipv6_route_open(struct inode *inode, struct file *file);
+struct ipv6_route_iter {
+ struct seq_net_private p;
+ struct fib6_walker w;
+ loff_t skip;
+ struct fib6_table *tbl;
+ int sernum;
+};
+
+extern const struct seq_operations ipv6_route_seq_ops;
int call_fib6_notifier(struct notifier_block *nb, struct net *net,
enum fib_event_type event_type,
@@ -408,8 +450,14 @@ void __net_exit fib6_notifier_exit(struct net *net);
unsigned int fib6_tables_seq_read(struct net *net);
int fib6_tables_dump(struct net *net, struct notifier_block *nb);
-void fib6_update_sernum(struct rt6_info *rt);
-void fib6_update_sernum_upto_root(struct net *net, struct rt6_info *rt);
+void fib6_update_sernum(struct net *net, struct fib6_info *rt);
+void fib6_update_sernum_upto_root(struct net *net, struct fib6_info *rt);
+
+void fib6_metric_set(struct fib6_info *f6i, int metric, u32 val);
+static inline bool fib6_metric_locked(struct fib6_info *f6i, int metric)
+{
+ return !!(f6i->fib6_metrics->metrics[RTAX_LOCK - 1] & (1 << metric));
+}
#ifdef CONFIG_IPV6_MULTIPLE_TABLES
int fib6_rules_init(void);
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index 08b132381984..59656fc580df 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -66,12 +66,6 @@ static inline bool rt6_need_strict(const struct in6_addr *daddr)
(IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK);
}
-static inline bool rt6_qualify_for_ecmp(const struct rt6_info *rt)
-{
- return (rt->rt6i_flags & (RTF_GATEWAY|RTF_ADDRCONF|RTF_DYNAMIC)) ==
- RTF_GATEWAY;
-}
-
void ip6_route_input(struct sk_buff *skb);
struct dst_entry *ip6_route_input_lookup(struct net *net,
struct net_device *dev,
@@ -100,29 +94,29 @@ void ip6_route_cleanup(void);
int ipv6_route_ioctl(struct net *net, unsigned int cmd, void __user *arg);
-int ip6_route_add(struct fib6_config *cfg, struct netlink_ext_ack *extack);
-int ip6_ins_rt(struct rt6_info *);
-int ip6_del_rt(struct rt6_info *);
+int ip6_route_add(struct fib6_config *cfg, gfp_t gfp_flags,
+ struct netlink_ext_ack *extack);
+int ip6_ins_rt(struct net *net, struct fib6_info *f6i);
+int ip6_del_rt(struct net *net, struct fib6_info *f6i);
-void rt6_flush_exceptions(struct rt6_info *rt);
-int rt6_remove_exception_rt(struct rt6_info *rt);
-void rt6_age_exceptions(struct rt6_info *rt, struct fib6_gc_args *gc_args,
+void rt6_flush_exceptions(struct fib6_info *f6i);
+void rt6_age_exceptions(struct fib6_info *f6i, struct fib6_gc_args *gc_args,
unsigned long now);
-static inline int ip6_route_get_saddr(struct net *net, struct rt6_info *rt,
+static inline int ip6_route_get_saddr(struct net *net, struct fib6_info *f6i,
const struct in6_addr *daddr,
unsigned int prefs,
struct in6_addr *saddr)
{
- struct inet6_dev *idev =
- rt ? ip6_dst_idev((struct dst_entry *)rt) : NULL;
int err = 0;
- if (rt && rt->rt6i_prefsrc.plen)
- *saddr = rt->rt6i_prefsrc.addr;
- else
- err = ipv6_dev_get_saddr(net, idev ? idev->dev : NULL,
- daddr, prefs, saddr);
+ if (f6i && f6i->fib6_prefsrc.plen) {
+ *saddr = f6i->fib6_prefsrc.addr;
+ } else {
+ struct net_device *dev = f6i ? fib6_info_nh_dev(f6i) : NULL;
+
+ err = ipv6_dev_get_saddr(net, dev, daddr, prefs, saddr);
+ }
return err;
}
@@ -137,8 +131,9 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev, struct flowi6 *fl6);
void fib6_force_start_gc(struct net *net);
-struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
- const struct in6_addr *addr, bool anycast);
+struct fib6_info *addrconf_f6i_alloc(struct net *net, struct inet6_dev *idev,
+ const struct in6_addr *addr, bool anycast,
+ gfp_t gfp_flags);
struct rt6_info *ip6_dst_alloc(struct net *net, struct net_device *dev,
int flags);
@@ -147,9 +142,11 @@ struct rt6_info *ip6_dst_alloc(struct net *net, struct net_device *dev,
* support functions for ND
*
*/
-struct rt6_info *rt6_get_dflt_router(const struct in6_addr *addr,
+struct fib6_info *rt6_get_dflt_router(struct net *net,
+ const struct in6_addr *addr,
struct net_device *dev);
-struct rt6_info *rt6_add_dflt_router(const struct in6_addr *gwaddr,
+struct fib6_info *rt6_add_dflt_router(struct net *net,
+ const struct in6_addr *gwaddr,
struct net_device *dev, unsigned int pref);
void rt6_purge_dflt_routers(struct net *net);
@@ -174,14 +171,14 @@ struct rt6_rtnl_dump_arg {
struct net *net;
};
-int rt6_dump_route(struct rt6_info *rt, void *p_arg);
+int rt6_dump_route(struct fib6_info *f6i, void *p_arg);
void rt6_mtu_change(struct net_device *dev, unsigned int mtu);
void rt6_remove_prefsrc(struct inet6_ifaddr *ifp);
void rt6_clean_tohost(struct net *net, struct in6_addr *gateway);
void rt6_sync_up(struct net_device *dev, unsigned int nh_flags);
void rt6_disable_ip(struct net_device *dev, unsigned long event);
void rt6_sync_down_dev(struct net_device *dev, unsigned long event);
-void rt6_multipath_rebalance(struct rt6_info *rt);
+void rt6_multipath_rebalance(struct fib6_info *f6i);
void rt6_uncached_list_add(struct rt6_info *rt);
void rt6_uncached_list_del(struct rt6_info *rt);
@@ -269,12 +266,38 @@ static inline struct in6_addr *rt6_nexthop(struct rt6_info *rt,
return daddr;
}
-static inline bool rt6_duplicate_nexthop(struct rt6_info *a, struct rt6_info *b)
+static inline bool rt6_duplicate_nexthop(struct fib6_info *a, struct fib6_info *b)
{
- return a->dst.dev == b->dst.dev &&
- a->rt6i_idev == b->rt6i_idev &&
- ipv6_addr_equal(&a->rt6i_gateway, &b->rt6i_gateway) &&
- !lwtunnel_cmp_encap(a->dst.lwtstate, b->dst.lwtstate);
+ return a->fib6_nh.nh_dev == b->fib6_nh.nh_dev &&
+ ipv6_addr_equal(&a->fib6_nh.nh_gw, &b->fib6_nh.nh_gw) &&
+ !lwtunnel_cmp_encap(a->fib6_nh.nh_lwtstate, b->fib6_nh.nh_lwtstate);
}
+static inline unsigned int ip6_dst_mtu_forward(const struct dst_entry *dst)
+{
+ struct inet6_dev *idev;
+ unsigned int mtu;
+
+ if (dst_metric_locked(dst, RTAX_MTU)) {
+ mtu = dst_metric_raw(dst, RTAX_MTU);
+ if (mtu)
+ return mtu;
+ }
+
+ mtu = IPV6_MIN_MTU;
+ rcu_read_lock();
+ idev = __in6_dev_get(dst->dev);
+ if (idev)
+ mtu = idev->cnf.mtu6;
+ rcu_read_unlock();
+
+ return mtu;
+}
+
+u32 ip6_mtu_from_fib6(struct fib6_info *f6i, struct in6_addr *daddr,
+ struct in6_addr *saddr);
+
+struct neighbour *ip6_neigh_lookup(const struct in6_addr *gw,
+ struct net_device *dev, struct sk_buff *skb,
+ const void *daddr);
#endif
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index 81d0f2107ff1..69c91d1934c1 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -449,4 +449,6 @@ static inline void fib_proc_exit(struct net *net)
}
#endif
+u32 ip_mtu_from_fib_result(struct fib_result *res, __be32 daddr);
+
#endif /* _NET_FIB_H */
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index 540a4b4417bf..90ff430f5e9d 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -379,6 +379,17 @@ static inline u8 ip_tunnel_get_dsfield(const struct iphdr *iph,
return 0;
}
+static inline u8 ip_tunnel_get_ttl(const struct iphdr *iph,
+ const struct sk_buff *skb)
+{
+ if (skb->protocol == htons(ETH_P_IP))
+ return iph->ttl;
+ else if (skb->protocol == htons(ETH_P_IPV6))
+ return ((const struct ipv6hdr *)iph)->hop_limit;
+ else
+ return 0;
+}
+
/* Propogate ECN bits out */
static inline u8 ip_tunnel_ecn_encap(u8 tos, const struct iphdr *iph,
const struct sk_buff *skb)
@@ -466,12 +477,12 @@ static inline struct ip_tunnel_info *lwt_tun_info(struct lwtunnel_state *lwtstat
return (struct ip_tunnel_info *)lwtstate->data;
}
-extern struct static_key ip_tunnel_metadata_cnt;
+DECLARE_STATIC_KEY_FALSE(ip_tunnel_metadata_cnt);
/* Returns > 0 if metadata should be collected */
static inline int ip_tunnel_collect_metadata(void)
{
- return static_key_false(&ip_tunnel_metadata_cnt);
+ return static_branch_unlikely(&ip_tunnel_metadata_cnt);
}
void __init ip_tunnel_core_init(void);
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index eb0bec043c96..a0bec23c6d5e 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -41,18 +41,6 @@ static inline struct netns_ipvs *net_ipvs(struct net* net)
return net->ipvs;
}
-/* This one needed for single_open_net since net is stored directly in
- * private not as a struct i.e. seq_file_net can't be used.
- */
-static inline struct net *seq_file_single_net(struct seq_file *seq)
-{
-#ifdef CONFIG_NET_NS
- return (struct net *)seq->private;
-#else
- return &init_net;
-#endif
-}
-
/* Connections' size value needed by ip_vs_ctl.c */
extern int ip_vs_conn_tab_size;
@@ -643,6 +631,7 @@ struct ip_vs_service {
/* alternate persistence engine */
struct ip_vs_pe __rcu *pe;
+ int conntrack_afmask;
struct rcu_head rcu_head;
};
@@ -668,6 +657,7 @@ struct ip_vs_dest {
volatile unsigned int flags; /* dest status flags */
atomic_t conn_flags; /* flags to copy to conn */
atomic_t weight; /* server weight */
+ atomic_t last_weight; /* server latest weight */
refcount_t refcnt; /* reference counter */
struct ip_vs_stats stats; /* statistics */
@@ -762,14 +752,14 @@ struct ip_vs_app {
* 2=Mangled but checksum was not updated
*/
int (*pkt_out)(struct ip_vs_app *, struct ip_vs_conn *,
- struct sk_buff *, int *diff);
+ struct sk_buff *, int *diff, struct ip_vs_iphdr *ipvsh);
/* input hook: Process packet in outin direction, diff set for TCP.
* Return: 0=Error, 1=Payload Not Mangled/Mangled but checksum is ok,
* 2=Mangled but checksum was not updated
*/
int (*pkt_in)(struct ip_vs_app *, struct ip_vs_conn *,
- struct sk_buff *, int *diff);
+ struct sk_buff *, int *diff, struct ip_vs_iphdr *ipvsh);
/* ip_vs_app initializer */
int (*init_conn)(struct ip_vs_app *, struct ip_vs_conn *);
@@ -1327,8 +1317,10 @@ int register_ip_vs_app_inc(struct netns_ipvs *ipvs, struct ip_vs_app *app, __u16
int ip_vs_app_inc_get(struct ip_vs_app *inc);
void ip_vs_app_inc_put(struct ip_vs_app *inc);
-int ip_vs_app_pkt_out(struct ip_vs_conn *, struct sk_buff *skb);
-int ip_vs_app_pkt_in(struct ip_vs_conn *, struct sk_buff *skb);
+int ip_vs_app_pkt_out(struct ip_vs_conn *, struct sk_buff *skb,
+ struct ip_vs_iphdr *ipvsh);
+int ip_vs_app_pkt_in(struct ip_vs_conn *, struct sk_buff *skb,
+ struct ip_vs_iphdr *ipvsh);
int register_ip_vs_pe(struct ip_vs_pe *pe);
int unregister_ip_vs_pe(struct ip_vs_pe *pe);
@@ -1620,6 +1612,35 @@ static inline bool ip_vs_conn_uses_conntrack(struct ip_vs_conn *cp,
return false;
}
+static inline int ip_vs_register_conntrack(struct ip_vs_service *svc)
+{
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
+ int afmask = (svc->af == AF_INET6) ? 2 : 1;
+ int ret = 0;
+
+ if (!(svc->conntrack_afmask & afmask)) {
+ ret = nf_ct_netns_get(svc->ipvs->net, svc->af);
+ if (ret >= 0)
+ svc->conntrack_afmask |= afmask;
+ }
+ return ret;
+#else
+ return 0;
+#endif
+}
+
+static inline void ip_vs_unregister_conntrack(struct ip_vs_service *svc)
+{
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
+ int afmask = (svc->af == AF_INET6) ? 2 : 1;
+
+ if (svc->conntrack_afmask & afmask) {
+ nf_ct_netns_put(svc->ipvs->net, svc->af);
+ svc->conntrack_afmask &= ~afmask;
+ }
+#endif
+}
+
static inline int
ip_vs_dest_conn_overhead(struct ip_vs_dest *dest)
{
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 836f31af1369..16475c269749 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -298,6 +298,7 @@ struct ipcm6_cookie {
__s16 tclass;
__s8 dontfrag;
struct ipv6_txoptions *opt;
+ __u16 gso_size;
};
static inline struct ipv6_txoptions *txopt_get(const struct ipv6_pinfo *np)
@@ -906,6 +907,11 @@ static inline __be32 ip6_make_flowinfo(unsigned int tclass, __be32 flowlabel)
return htonl(tclass << IPV6_TCLASS_SHIFT) | flowlabel;
}
+static inline __be32 flowi6_get_flowlabel(const struct flowi6 *fl6)
+{
+ return fl6->flowlabel & IPV6_FLOWLABEL_MASK;
+}
+
/*
* Prototypes exported by ipv6
*/
@@ -950,6 +956,7 @@ struct sk_buff *ip6_make_skb(struct sock *sk,
void *from, int length, int transhdrlen,
struct ipcm6_cookie *ipc6, struct flowi6 *fl6,
struct rt6_info *rt, unsigned int flags,
+ struct inet_cork_full *cork,
const struct sockcm_cookie *sockc);
static inline struct sk_buff *ip6_finish_skb(struct sock *sk)
@@ -958,8 +965,6 @@ static inline struct sk_buff *ip6_finish_skb(struct sock *sk)
&inet6_sk(sk)->cork);
}
-unsigned int ip6_dst_mtu_forward(const struct dst_entry *dst);
-
int ip6_dst_lookup(struct net *net, struct sock *sk, struct dst_entry **dst,
struct flowi6 *fl6);
struct dst_entry *ip6_dst_lookup_flow(const struct sock *sk, struct flowi6 *fl6,
@@ -1044,8 +1049,6 @@ void ipv6_local_error(struct sock *sk, int err, struct flowi6 *fl6, u32 info);
void ipv6_local_rxpmtu(struct sock *sk, struct flowi6 *fl6, u32 mtu);
int inet6_release(struct socket *sock);
-int __inet6_bind(struct sock *sock, struct sockaddr *uaddr, int addr_len,
- bool force_bind_address_no_port, bool with_lock);
int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len);
int inet6_getname(struct socket *sock, struct sockaddr *uaddr,
int peer);
diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
index f4c21b5a1242..b0eaeb02d46d 100644
--- a/include/net/iucv/af_iucv.h
+++ b/include/net/iucv/af_iucv.h
@@ -153,8 +153,6 @@ struct iucv_sock_list {
atomic_t autobind_name;
};
-__poll_t iucv_sock_poll(struct file *file, struct socket *sock,
- poll_table *wait);
void iucv_sock_link(struct iucv_sock_list *l, struct sock *s);
void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *s);
void iucv_accept_enqueue(struct sock *parent, struct sock *sk);
diff --git a/include/net/llc_conn.h b/include/net/llc_conn.h
index 5c40f118c0fa..df528a623548 100644
--- a/include/net/llc_conn.h
+++ b/include/net/llc_conn.h
@@ -97,6 +97,7 @@ static __inline__ char llc_backlog_type(struct sk_buff *skb)
struct sock *llc_sk_alloc(struct net *net, int family, gfp_t priority,
struct proto *prot, int kern);
+void llc_sk_stop_all_timers(struct sock *sk, bool sync);
void llc_sk_free(struct sock *sk);
void llc_sk_reset(struct sock *sk);
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index d2279b2d61aa..851a5e19ae32 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -2080,7 +2080,7 @@ struct ieee80211_txq {
* virtual interface might not be given air time for the transmission of
* the frame, as it is not synced with the AP/P2P GO yet, and thus the
* deauthentication frame might not be transmitted.
- >
+ *
* @IEEE80211_HW_DOESNT_SUPPORT_QOS_NDP: The driver (or firmware) doesn't
* support QoS NDP for AP probing - that's most likely a driver bug.
*
@@ -3378,6 +3378,8 @@ enum ieee80211_reconfig_type {
* frame in case that no beacon was heard from the AP/P2P GO.
* The callback will be called before each transmission and upon return
* mac80211 will transmit the frame right away.
+ * If duration is greater than zero, mac80211 hints to the driver the
+ * duration for which the operation is requested.
* The callback is optional and can (should!) sleep.
*
* @mgd_protect_tdls_discover: Protect a TDLS discovery session. After sending
@@ -3697,7 +3699,8 @@ struct ieee80211_ops {
u32 sset, u8 *data);
void (*mgd_prepare_tx)(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif);
+ struct ieee80211_vif *vif,
+ u16 duration);
void (*mgd_protect_tdls_discover)(struct ieee80211_hw *hw,
struct ieee80211_vif *vif);
@@ -4450,6 +4453,19 @@ static inline struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw,
u8 ieee80211_csa_update_counter(struct ieee80211_vif *vif);
/**
+ * ieee80211_csa_set_counter - request mac80211 to set csa counter
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ * @counter: the new value for the counter
+ *
+ * The csa counter can be changed by the device, this API should be
+ * used by the device driver to update csa counter in mac80211.
+ *
+ * It should never be used together with ieee80211_csa_update_counter(),
+ * as it will cause a race condition around the counter value.
+ */
+void ieee80211_csa_set_counter(struct ieee80211_vif *vif, u8 counter);
+
+/**
* ieee80211_csa_finish - notify mac80211 about channel switch
* @vif: &struct ieee80211_vif pointer from the add_interface callback.
*
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index e421f86af043..6c1eecd56a4d 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -246,6 +246,7 @@ static inline void *neighbour_priv(const struct neighbour *n)
#define NEIGH_UPDATE_F_OVERRIDE 0x00000001
#define NEIGH_UPDATE_F_WEAK_OVERRIDE 0x00000002
#define NEIGH_UPDATE_F_OVERRIDE_ISROUTER 0x00000004
+#define NEIGH_UPDATE_F_EXT_LEARNED 0x20000000
#define NEIGH_UPDATE_F_ISROUTER 0x40000000
#define NEIGH_UPDATE_F_ADMIN 0x80000000
@@ -526,5 +527,21 @@ static inline void neigh_ha_snapshot(char *dst, const struct neighbour *n,
} while (read_seqretry(&n->ha_lock, seq));
}
-
+static inline void neigh_update_ext_learned(struct neighbour *neigh, u32 flags,
+ int *notify)
+{
+ u8 ndm_flags = 0;
+
+ if (!(flags & NEIGH_UPDATE_F_ADMIN))
+ return;
+
+ ndm_flags |= (flags & NEIGH_UPDATE_F_EXT_LEARNED) ? NTF_EXT_LEARNED : 0;
+ if ((neigh->flags ^ ndm_flags) & NTF_EXT_LEARNED) {
+ if (ndm_flags & NTF_EXT_LEARNED)
+ neigh->flags |= NTF_EXT_LEARNED;
+ else
+ neigh->flags &= ~NTF_EXT_LEARNED;
+ *notify = 1;
+ }
+}
#endif
diff --git a/include/net/net_failover.h b/include/net/net_failover.h
new file mode 100644
index 000000000000..b12a1c469d1c
--- /dev/null
+++ b/include/net/net_failover.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2018, Intel Corporation. */
+
+#ifndef _NET_FAILOVER_H
+#define _NET_FAILOVER_H
+
+#include <net/failover.h>
+
+/* failover state */
+struct net_failover_info {
+ /* primary netdev with same MAC */
+ struct net_device __rcu *primary_dev;
+
+ /* standby netdev */
+ struct net_device __rcu *standby_dev;
+
+ /* primary netdev stats */
+ struct rtnl_link_stats64 primary_stats;
+
+ /* standby netdev stats */
+ struct rtnl_link_stats64 standby_stats;
+
+ /* aggregated stats */
+ struct rtnl_link_stats64 failover_stats;
+
+ /* spinlock while updating stats */
+ spinlock_t stats_lock;
+};
+
+struct failover *net_failover_create(struct net_device *standby_dev);
+void net_failover_destroy(struct failover *failover);
+
+#define FAILOVER_VLAN_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
+ NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \
+ NETIF_F_HIGHDMA | NETIF_F_LRO)
+
+#define FAILOVER_ENC_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
+ NETIF_F_RXCSUM | NETIF_F_ALL_TSO)
+
+#endif /* _NET_FAILOVER_H */
diff --git a/include/net/netfilter/ipv4/nf_nat_masquerade.h b/include/net/netfilter/ipv4/nf_nat_masquerade.h
index ebd869473603..cd24be4c4a99 100644
--- a/include/net/netfilter/ipv4/nf_nat_masquerade.h
+++ b/include/net/netfilter/ipv4/nf_nat_masquerade.h
@@ -6,7 +6,7 @@
unsigned int
nf_nat_masquerade_ipv4(struct sk_buff *skb, unsigned int hooknum,
- const struct nf_nat_range *range,
+ const struct nf_nat_range2 *range,
const struct net_device *out);
void nf_nat_masquerade_ipv4_register_notifier(void);
diff --git a/include/net/netfilter/ipv6/nf_nat_masquerade.h b/include/net/netfilter/ipv6/nf_nat_masquerade.h
index 1ed4f2631ed6..0c3b5ebf0bb8 100644
--- a/include/net/netfilter/ipv6/nf_nat_masquerade.h
+++ b/include/net/netfilter/ipv6/nf_nat_masquerade.h
@@ -3,7 +3,7 @@
#define _NF_NAT_MASQUERADE_IPV6_H_
unsigned int
-nf_nat_masquerade_ipv6(struct sk_buff *skb, const struct nf_nat_range *range,
+nf_nat_masquerade_ipv6(struct sk_buff *skb, const struct nf_nat_range2 *range,
const struct net_device *out);
void nf_nat_masquerade_ipv6_register_notifier(void);
void nf_nat_masquerade_ipv6_unregister_notifier(void);
diff --git a/include/net/netfilter/nf_conntrack_count.h b/include/net/netfilter/nf_conntrack_count.h
index e61184fbfb71..3a188a0923a3 100644
--- a/include/net/netfilter/nf_conntrack_count.h
+++ b/include/net/netfilter/nf_conntrack_count.h
@@ -13,4 +13,16 @@ unsigned int nf_conncount_count(struct net *net,
const u32 *key,
const struct nf_conntrack_tuple *tuple,
const struct nf_conntrack_zone *zone);
+
+unsigned int nf_conncount_lookup(struct net *net, struct hlist_head *head,
+ const struct nf_conntrack_tuple *tuple,
+ const struct nf_conntrack_zone *zone,
+ bool *addit);
+
+bool nf_conncount_add(struct hlist_head *head,
+ const struct nf_conntrack_tuple *tuple,
+ const struct nf_conntrack_zone *zone);
+
+void nf_conncount_cache_free(struct hlist_head *hhead);
+
#endif
diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h
index 833752dd0c58..ba9fa4592f2b 100644
--- a/include/net/netfilter/nf_flow_table.h
+++ b/include/net/netfilter/nf_flow_table.h
@@ -6,6 +6,7 @@
#include <linux/netdevice.h>
#include <linux/rhashtable.h>
#include <linux/rcupdate.h>
+#include <linux/netfilter/nf_conntrack_tuple_common.h>
#include <net/dst.h>
struct nf_flowtable;
@@ -13,25 +14,24 @@ struct nf_flowtable;
struct nf_flowtable_type {
struct list_head list;
int family;
- void (*gc)(struct work_struct *work);
+ int (*init)(struct nf_flowtable *ft);
void (*free)(struct nf_flowtable *ft);
- const struct rhashtable_params *params;
nf_hookfn *hook;
struct module *owner;
};
struct nf_flowtable {
+ struct list_head list;
struct rhashtable rhashtable;
const struct nf_flowtable_type *type;
struct delayed_work gc_work;
};
enum flow_offload_tuple_dir {
- FLOW_OFFLOAD_DIR_ORIGINAL,
- FLOW_OFFLOAD_DIR_REPLY,
- __FLOW_OFFLOAD_DIR_MAX = FLOW_OFFLOAD_DIR_REPLY,
+ FLOW_OFFLOAD_DIR_ORIGINAL = IP_CT_DIR_ORIGINAL,
+ FLOW_OFFLOAD_DIR_REPLY = IP_CT_DIR_REPLY,
+ FLOW_OFFLOAD_DIR_MAX = IP_CT_DIR_MAX
};
-#define FLOW_OFFLOAD_DIR_MAX (__FLOW_OFFLOAD_DIR_MAX + 1)
struct flow_offload_tuple {
union {
@@ -55,6 +55,8 @@ struct flow_offload_tuple {
int oifidx;
+ u16 mtu;
+
struct dst_entry *dst_cache;
};
@@ -66,6 +68,7 @@ struct flow_offload_tuple_rhash {
#define FLOW_OFFLOAD_SNAT 0x1
#define FLOW_OFFLOAD_DNAT 0x2
#define FLOW_OFFLOAD_DYING 0x4
+#define FLOW_OFFLOAD_TEARDOWN 0x8
struct flow_offload {
struct flow_offload_tuple_rhash tuplehash[FLOW_OFFLOAD_DIR_MAX];
@@ -98,11 +101,14 @@ int nf_flow_table_iterate(struct nf_flowtable *flow_table,
void nf_flow_table_cleanup(struct net *net, struct net_device *dev);
+int nf_flow_table_init(struct nf_flowtable *flow_table);
void nf_flow_table_free(struct nf_flowtable *flow_table);
-void nf_flow_offload_work_gc(struct work_struct *work);
-extern const struct rhashtable_params nf_flow_offload_rhash_params;
-void flow_offload_dead(struct flow_offload *flow);
+void flow_offload_teardown(struct flow_offload *flow);
+static inline void flow_offload_dead(struct flow_offload *flow)
+{
+ flow->flags |= FLOW_OFFLOAD_DYING;
+}
int nf_flow_snat_port(const struct flow_offload *flow,
struct sk_buff *skb, unsigned int thoff,
diff --git a/include/net/netfilter/nf_nat.h b/include/net/netfilter/nf_nat.h
index 207a467e7ca6..a17eb2f8d40e 100644
--- a/include/net/netfilter/nf_nat.h
+++ b/include/net/netfilter/nf_nat.h
@@ -39,7 +39,7 @@ struct nf_conn_nat {
/* Set up the info structure to map into this range. */
unsigned int nf_nat_setup_info(struct nf_conn *ct,
- const struct nf_nat_range *range,
+ const struct nf_nat_range2 *range,
enum nf_nat_manip_type maniptype);
extern unsigned int nf_nat_alloc_null_binding(struct nf_conn *ct,
@@ -75,4 +75,8 @@ static inline bool nf_nat_oif_changed(unsigned int hooknum,
#endif
}
+int nf_nat_register_fn(struct net *net, const struct nf_hook_ops *ops,
+ const struct nf_hook_ops *nat_ops, unsigned int ops_count);
+void nf_nat_unregister_fn(struct net *net, const struct nf_hook_ops *ops,
+ unsigned int ops_count);
#endif
diff --git a/include/net/netfilter/nf_nat_core.h b/include/net/netfilter/nf_nat_core.h
index 235bd0e9a5aa..dc7cd0440229 100644
--- a/include/net/netfilter/nf_nat_core.h
+++ b/include/net/netfilter/nf_nat_core.h
@@ -11,6 +11,10 @@
unsigned int nf_nat_packet(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
unsigned int hooknum, struct sk_buff *skb);
+unsigned int
+nf_nat_inet_fn(void *priv, struct sk_buff *skb,
+ const struct nf_hook_state *state);
+
int nf_xfrm_me_harder(struct net *net, struct sk_buff *skb, unsigned int family);
static inline int nf_nat_initialized(struct nf_conn *ct,
@@ -22,11 +26,4 @@ static inline int nf_nat_initialized(struct nf_conn *ct,
return ct->status & IPS_DST_NAT_DONE;
}
-struct nlattr;
-
-extern int
-(*nfnetlink_parse_nat_setup_hook)(struct nf_conn *ct,
- enum nf_nat_manip_type manip,
- const struct nlattr *attr);
-
#endif /* _NF_NAT_CORE_H */
diff --git a/include/net/netfilter/nf_nat_l3proto.h b/include/net/netfilter/nf_nat_l3proto.h
index ce7c2b4e64bb..d300b8f03972 100644
--- a/include/net/netfilter/nf_nat_l3proto.h
+++ b/include/net/netfilter/nf_nat_l3proto.h
@@ -7,7 +7,7 @@ struct nf_nat_l3proto {
u8 l3proto;
bool (*in_range)(const struct nf_conntrack_tuple *t,
- const struct nf_nat_range *range);
+ const struct nf_nat_range2 *range);
u32 (*secure_port)(const struct nf_conntrack_tuple *t, __be16);
@@ -33,7 +33,7 @@ struct nf_nat_l3proto {
struct flowi *fl);
int (*nlattr_to_range)(struct nlattr *tb[],
- struct nf_nat_range *range);
+ struct nf_nat_range2 *range);
};
int nf_nat_l3proto_register(const struct nf_nat_l3proto *);
@@ -44,66 +44,14 @@ int nf_nat_icmp_reply_translation(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned int hooknum);
-unsigned int nf_nat_ipv4_in(void *priv, struct sk_buff *skb,
- const struct nf_hook_state *state,
- unsigned int (*do_chain)(void *priv,
- struct sk_buff *skb,
- const struct nf_hook_state *state,
- struct nf_conn *ct));
-
-unsigned int nf_nat_ipv4_out(void *priv, struct sk_buff *skb,
- const struct nf_hook_state *state,
- unsigned int (*do_chain)(void *priv,
- struct sk_buff *skb,
- const struct nf_hook_state *state,
- struct nf_conn *ct));
-
-unsigned int nf_nat_ipv4_local_fn(void *priv,
- struct sk_buff *skb,
- const struct nf_hook_state *state,
- unsigned int (*do_chain)(void *priv,
- struct sk_buff *skb,
- const struct nf_hook_state *state,
- struct nf_conn *ct));
-
-unsigned int nf_nat_ipv4_fn(void *priv, struct sk_buff *skb,
- const struct nf_hook_state *state,
- unsigned int (*do_chain)(void *priv,
- struct sk_buff *skb,
- const struct nf_hook_state *state,
- struct nf_conn *ct));
-
int nf_nat_icmpv6_reply_translation(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned int hooknum, unsigned int hdrlen);
-unsigned int nf_nat_ipv6_in(void *priv, struct sk_buff *skb,
- const struct nf_hook_state *state,
- unsigned int (*do_chain)(void *priv,
- struct sk_buff *skb,
- const struct nf_hook_state *state,
- struct nf_conn *ct));
-
-unsigned int nf_nat_ipv6_out(void *priv, struct sk_buff *skb,
- const struct nf_hook_state *state,
- unsigned int (*do_chain)(void *priv,
- struct sk_buff *skb,
- const struct nf_hook_state *state,
- struct nf_conn *ct));
-
-unsigned int nf_nat_ipv6_local_fn(void *priv,
- struct sk_buff *skb,
- const struct nf_hook_state *state,
- unsigned int (*do_chain)(void *priv,
- struct sk_buff *skb,
- const struct nf_hook_state *state,
- struct nf_conn *ct));
+int nf_nat_l3proto_ipv4_register_fn(struct net *net, const struct nf_hook_ops *ops);
+void nf_nat_l3proto_ipv4_unregister_fn(struct net *net, const struct nf_hook_ops *ops);
-unsigned int nf_nat_ipv6_fn(void *priv, struct sk_buff *skb,
- const struct nf_hook_state *state,
- unsigned int (*do_chain)(void *priv,
- struct sk_buff *skb,
- const struct nf_hook_state *state,
- struct nf_conn *ct));
+int nf_nat_l3proto_ipv6_register_fn(struct net *net, const struct nf_hook_ops *ops);
+void nf_nat_l3proto_ipv6_unregister_fn(struct net *net, const struct nf_hook_ops *ops);
#endif /* _NF_NAT_L3PROTO_H */
diff --git a/include/net/netfilter/nf_nat_l4proto.h b/include/net/netfilter/nf_nat_l4proto.h
index 67835ff8a2d9..b4d6b29bca62 100644
--- a/include/net/netfilter/nf_nat_l4proto.h
+++ b/include/net/netfilter/nf_nat_l4proto.h
@@ -34,12 +34,12 @@ struct nf_nat_l4proto {
*/
void (*unique_tuple)(const struct nf_nat_l3proto *l3proto,
struct nf_conntrack_tuple *tuple,
- const struct nf_nat_range *range,
+ const struct nf_nat_range2 *range,
enum nf_nat_manip_type maniptype,
const struct nf_conn *ct);
int (*nlattr_to_range)(struct nlattr *tb[],
- struct nf_nat_range *range);
+ struct nf_nat_range2 *range);
};
/* Protocol registration. */
@@ -72,11 +72,11 @@ bool nf_nat_l4proto_in_range(const struct nf_conntrack_tuple *tuple,
void nf_nat_l4proto_unique_tuple(const struct nf_nat_l3proto *l3proto,
struct nf_conntrack_tuple *tuple,
- const struct nf_nat_range *range,
+ const struct nf_nat_range2 *range,
enum nf_nat_manip_type maniptype,
const struct nf_conn *ct, u16 *rover);
int nf_nat_l4proto_nlattr_to_range(struct nlattr *tb[],
- struct nf_nat_range *range);
+ struct nf_nat_range2 *range);
#endif /*_NF_NAT_L4PROTO_H*/
diff --git a/include/net/netfilter/nf_nat_redirect.h b/include/net/netfilter/nf_nat_redirect.h
index 5ddabb08c472..c129aacc8ae8 100644
--- a/include/net/netfilter/nf_nat_redirect.h
+++ b/include/net/netfilter/nf_nat_redirect.h
@@ -7,7 +7,7 @@ nf_nat_redirect_ipv4(struct sk_buff *skb,
const struct nf_nat_ipv4_multi_range_compat *mr,
unsigned int hooknum);
unsigned int
-nf_nat_redirect_ipv6(struct sk_buff *skb, const struct nf_nat_range *range,
+nf_nat_redirect_ipv6(struct sk_buff *skb, const struct nf_nat_range2 *range,
unsigned int hooknum);
#endif /* _NF_NAT_REDIRECT_H_ */
diff --git a/include/net/netfilter/nf_socket.h b/include/net/netfilter/nf_socket.h
index 8230fefff9f5..f9d7bee9bd4e 100644
--- a/include/net/netfilter/nf_socket.h
+++ b/include/net/netfilter/nf_socket.h
@@ -2,22 +2,7 @@
#ifndef _NF_SOCK_H_
#define _NF_SOCK_H_
-struct net_device;
-struct sk_buff;
-struct sock;
-struct net;
-
-static inline bool nf_sk_is_transparent(struct sock *sk)
-{
- switch (sk->sk_state) {
- case TCP_TIME_WAIT:
- return inet_twsk(sk)->tw_transparent;
- case TCP_NEW_SYN_RECV:
- return inet_rsk(inet_reqsk(sk))->no_srccheck;
- default:
- return inet_sk(sk)->transparent;
- }
-}
+#include <net/sock.h>
struct sock *nf_sk_lookup_slow_v4(struct net *net, const struct sk_buff *skb,
const struct net_device *indev);
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index cd368d1b8cb8..08c005ce56e9 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -9,6 +9,7 @@
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter/nf_tables.h>
#include <linux/u64_stats_sync.h>
+#include <linux/rhashtable.h>
#include <net/netfilter/nf_flow_table.h>
#include <net/netlink.h>
@@ -170,6 +171,7 @@ struct nft_data_desc {
int nft_data_init(const struct nft_ctx *ctx,
struct nft_data *data, unsigned int size,
struct nft_data_desc *desc, const struct nlattr *nla);
+void nft_data_hold(const struct nft_data *data, enum nft_data_types type);
void nft_data_release(const struct nft_data *data, enum nft_data_types type);
int nft_data_dump(struct sk_buff *skb, int attr, const struct nft_data *data,
enum nft_data_types type, unsigned int len);
@@ -275,23 +277,6 @@ struct nft_set_estimate {
enum nft_set_class space;
};
-/**
- * struct nft_set_type - nf_tables set type
- *
- * @select_ops: function to select nft_set_ops
- * @ops: default ops, used when no select_ops functions is present
- * @list: used internally
- * @owner: module reference
- */
-struct nft_set_type {
- const struct nft_set_ops *(*select_ops)(const struct nft_ctx *,
- const struct nft_set_desc *desc,
- u32 flags);
- const struct nft_set_ops *ops;
- struct list_head list;
- struct module *owner;
-};
-
struct nft_set_ext;
struct nft_expr;
@@ -310,7 +295,6 @@ struct nft_expr;
* @init: initialize private data of new set instance
* @destroy: destroy private data of set instance
* @elemsize: element private size
- * @features: features supported by the implementation
*/
struct nft_set_ops {
bool (*lookup)(const struct net *net,
@@ -359,11 +343,26 @@ struct nft_set_ops {
const struct nft_set_desc *desc,
const struct nlattr * const nla[]);
void (*destroy)(const struct nft_set *set);
+ void (*gc_init)(const struct nft_set *set);
unsigned int elemsize;
+};
+
+/**
+ * struct nft_set_type - nf_tables set type
+ *
+ * @ops: set ops for this type
+ * @list: used internally
+ * @owner: module reference
+ * @features: features supported by the implementation
+ */
+struct nft_set_type {
+ const struct nft_set_ops ops;
+ struct list_head list;
+ struct module *owner;
u32 features;
- const struct nft_set_type *type;
};
+#define to_set_type(o) container_of(o, struct nft_set_type, ops)
int nft_register_set(struct nft_set_type *type);
void nft_unregister_set(struct nft_set_type *type);
@@ -373,6 +372,8 @@ void nft_unregister_set(struct nft_set_type *type);
*
* @list: table set list node
* @bindings: list of set bindings
+ * @table: table this set belongs to
+ * @net: netnamespace this set belongs to
* @name: name of the set
* @handle: unique handle of the set
* @ktype: key type (numeric type defined by userspace, not used in the kernel)
@@ -396,6 +397,8 @@ void nft_unregister_set(struct nft_set_type *type);
struct nft_set {
struct list_head list;
struct list_head bindings;
+ struct nft_table *table;
+ possible_net_t net;
char *name;
u64 handle;
u32 ktype;
@@ -589,7 +592,7 @@ static inline u64 *nft_set_ext_timeout(const struct nft_set_ext *ext)
return nft_set_ext(ext, NFT_SET_EXT_TIMEOUT);
}
-static inline unsigned long *nft_set_ext_expiration(const struct nft_set_ext *ext)
+static inline u64 *nft_set_ext_expiration(const struct nft_set_ext *ext)
{
return nft_set_ext(ext, NFT_SET_EXT_EXPIRATION);
}
@@ -607,7 +610,7 @@ static inline struct nft_expr *nft_set_ext_expr(const struct nft_set_ext *ext)
static inline bool nft_set_elem_expired(const struct nft_set_ext *ext)
{
return nft_set_ext_exists(ext, NFT_SET_EXT_EXPIRATION) &&
- time_is_before_eq_jiffies(*nft_set_ext_expiration(ext));
+ time_is_before_eq_jiffies64(*nft_set_ext_expiration(ext));
}
static inline struct nft_set_ext *nft_set_elem_ext(const struct nft_set *set,
@@ -711,6 +714,7 @@ struct nft_expr_type {
};
#define NFT_EXPR_STATEFUL 0x1
+#define NFT_EXPR_GC 0x2
/**
* struct nft_expr_ops - nf_tables expression operations
@@ -736,13 +740,21 @@ struct nft_expr_ops {
int (*init)(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nlattr * const tb[]);
+ void (*activate)(const struct nft_ctx *ctx,
+ const struct nft_expr *expr);
+ void (*deactivate)(const struct nft_ctx *ctx,
+ const struct nft_expr *expr);
void (*destroy)(const struct nft_ctx *ctx,
const struct nft_expr *expr);
+ void (*destroy_clone)(const struct nft_ctx *ctx,
+ const struct nft_expr *expr);
int (*dump)(struct sk_buff *skb,
const struct nft_expr *expr);
int (*validate)(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nft_data **data);
+ bool (*gc)(struct net *net,
+ const struct nft_expr *expr);
const struct nft_expr_type *type;
void *data;
};
@@ -849,6 +861,7 @@ enum nft_chain_flags {
*
* @rules: list of rules in the chain
* @list: used internally
+ * @rhlhead: used internally
* @table: table that this chain belongs to
* @handle: chain handle
* @use: number of jump references to this chain
@@ -857,8 +870,11 @@ enum nft_chain_flags {
* @name: name of the chain
*/
struct nft_chain {
+ struct nft_rule *__rcu *rules_gen_0;
+ struct nft_rule *__rcu *rules_gen_1;
struct list_head rules;
struct list_head list;
+ struct rhlist_head rhlhead;
struct nft_table *table;
u64 handle;
u32 use;
@@ -866,8 +882,13 @@ struct nft_chain {
u8 flags:6,
genmask:2;
char *name;
+
+ /* Only used during control plane commit phase: */
+ struct nft_rule **rules_next;
};
+int nft_chain_validate(const struct nft_ctx *ctx, const struct nft_chain *chain);
+
enum nft_chain_types {
NFT_CHAIN_T_DEFAULT = 0,
NFT_CHAIN_T_ROUTE,
@@ -884,8 +905,8 @@ enum nft_chain_types {
* @owner: module owner
* @hook_mask: mask of valid hooks
* @hooks: array of hook functions
- * @init: chain initialization function
- * @free: chain release function
+ * @ops_register: base chain register function
+ * @ops_unregister: base chain unregister function
*/
struct nft_chain_type {
const char *name;
@@ -894,8 +915,8 @@ struct nft_chain_type {
struct module *owner;
unsigned int hook_mask;
nf_hookfn *hooks[NF_MAX_HOOKS];
- int (*init)(struct nft_ctx *ctx);
- void (*free)(struct nft_ctx *ctx);
+ int (*ops_register)(struct net *net, const struct nf_hook_ops *ops);
+ void (*ops_unregister)(struct net *net, const struct nf_hook_ops *ops);
};
int nft_chain_validate_dependency(const struct nft_chain *chain,
@@ -947,7 +968,8 @@ unsigned int nft_do_chain(struct nft_pktinfo *pkt, void *priv);
* struct nft_table - nf_tables table
*
* @list: used internally
- * @chains: chains in the table
+ * @chains_ht: chains in the table
+ * @chains: same, for stable walks
* @sets: sets in the table
* @objects: stateful objects in the table
* @flowtables: flow tables in the table
@@ -961,6 +983,7 @@ unsigned int nft_do_chain(struct nft_pktinfo *pkt, void *priv);
*/
struct nft_table {
struct list_head list;
+ struct rhltable chains_ht;
struct list_head chains;
struct list_head sets;
struct list_head objects;
@@ -1015,9 +1038,9 @@ static inline void *nft_obj_data(const struct nft_object *obj)
#define nft_expr_obj(expr) *((struct nft_object **)nft_expr_priv(expr))
-struct nft_object *nf_tables_obj_lookup(const struct nft_table *table,
- const struct nlattr *nla, u32 objtype,
- u8 genmask);
+struct nft_object *nft_obj_lookup(const struct nft_table *table,
+ const struct nlattr *nla, u32 objtype,
+ u8 genmask);
void nft_obj_notify(struct net *net, struct nft_table *table,
struct nft_object *obj, u32 portid, u32 seq,
@@ -1062,7 +1085,8 @@ struct nft_object_ops {
int (*init)(const struct nft_ctx *ctx,
const struct nlattr *const tb[],
struct nft_object *obj);
- void (*destroy)(struct nft_object *obj);
+ void (*destroy)(const struct nft_ctx *ctx,
+ struct nft_object *obj);
int (*dump)(struct sk_buff *skb,
struct nft_object *obj,
bool reset);
@@ -1106,12 +1130,9 @@ struct nft_flowtable {
struct nf_flowtable data;
};
-struct nft_flowtable *nf_tables_flowtable_lookup(const struct nft_table *table,
- const struct nlattr *nla,
- u8 genmask);
-void nft_flow_table_iterate(struct net *net,
- void (*iter)(struct nf_flowtable *flowtable, void *data),
- void *data);
+struct nft_flowtable *nft_flowtable_lookup(const struct nft_table *table,
+ const struct nlattr *nla,
+ u8 genmask);
void nft_register_flowtable_type(struct nf_flowtable_type *type);
void nft_unregister_flowtable_type(struct nf_flowtable_type *type);
diff --git a/include/net/netfilter/nf_tables_core.h b/include/net/netfilter/nf_tables_core.h
index ea5aab568be8..e0c0c2558ec4 100644
--- a/include/net/netfilter/nf_tables_core.h
+++ b/include/net/netfilter/nf_tables_core.h
@@ -2,6 +2,8 @@
#ifndef _NET_NF_TABLES_CORE_H
#define _NET_NF_TABLES_CORE_H
+#include <net/netfilter/nf_tables.h>
+
extern struct nft_expr_type nft_imm_type;
extern struct nft_expr_type nft_cmp_type;
extern struct nft_expr_type nft_lookup_type;
@@ -10,6 +12,9 @@ extern struct nft_expr_type nft_byteorder_type;
extern struct nft_expr_type nft_payload_type;
extern struct nft_expr_type nft_dynset_type;
extern struct nft_expr_type nft_range_type;
+extern struct nft_expr_type nft_meta_type;
+extern struct nft_expr_type nft_rt_type;
+extern struct nft_expr_type nft_exthdr_type;
int nf_tables_core_module_init(void);
void nf_tables_core_module_exit(void);
@@ -20,6 +25,12 @@ struct nft_cmp_fast_expr {
u8 len;
};
+struct nft_immediate_expr {
+ struct nft_data data;
+ enum nft_registers dreg:8;
+ u8 dlen;
+};
+
/* Calculate the mask for the nft_cmp_fast expression. On big endian the
* mask needs to include the *upper* bytes when interpreting that data as
* something smaller than the full u32, therefore a cpu_to_le32 is done.
diff --git a/include/net/netfilter/nf_tproxy.h b/include/net/netfilter/nf_tproxy.h
new file mode 100644
index 000000000000..9754a50ecde9
--- /dev/null
+++ b/include/net/netfilter/nf_tproxy.h
@@ -0,0 +1,113 @@
+#ifndef _NF_TPROXY_H_
+#define _NF_TPROXY_H_
+
+#include <net/tcp.h>
+
+enum nf_tproxy_lookup_t {
+ NF_TPROXY_LOOKUP_LISTENER,
+ NF_TPROXY_LOOKUP_ESTABLISHED,
+};
+
+static inline bool nf_tproxy_sk_is_transparent(struct sock *sk)
+{
+ if (inet_sk_transparent(sk))
+ return true;
+
+ sock_gen_put(sk);
+ return false;
+}
+
+__be32 nf_tproxy_laddr4(struct sk_buff *skb, __be32 user_laddr, __be32 daddr);
+
+/**
+ * nf_tproxy_handle_time_wait4 - handle IPv4 TCP TIME_WAIT reopen redirections
+ * @skb: The skb being processed.
+ * @laddr: IPv4 address to redirect to or zero.
+ * @lport: TCP port to redirect to or zero.
+ * @sk: The TIME_WAIT TCP socket found by the lookup.
+ *
+ * We have to handle SYN packets arriving to TIME_WAIT sockets
+ * differently: instead of reopening the connection we should rather
+ * redirect the new connection to the proxy if there's a listener
+ * socket present.
+ *
+ * nf_tproxy_handle_time_wait4() consumes the socket reference passed in.
+ *
+ * Returns the listener socket if there's one, the TIME_WAIT socket if
+ * no such listener is found, or NULL if the TCP header is incomplete.
+ */
+struct sock *
+nf_tproxy_handle_time_wait4(struct net *net, struct sk_buff *skb,
+ __be32 laddr, __be16 lport, struct sock *sk);
+
+/*
+ * This is used when the user wants to intercept a connection matching
+ * an explicit iptables rule. In this case the sockets are assumed
+ * matching in preference order:
+ *
+ * - match: if there's a fully established connection matching the
+ * _packet_ tuple, it is returned, assuming the redirection
+ * already took place and we process a packet belonging to an
+ * established connection
+ *
+ * - match: if there's a listening socket matching the redirection
+ * (e.g. on-port & on-ip of the connection), it is returned,
+ * regardless if it was bound to 0.0.0.0 or an explicit
+ * address. The reasoning is that if there's an explicit rule, it
+ * does not really matter if the listener is bound to an interface
+ * or to 0. The user already stated that he wants redirection
+ * (since he added the rule).
+ *
+ * Please note that there's an overlap between what a TPROXY target
+ * and a socket match will match. Normally if you have both rules the
+ * "socket" match will be the first one, effectively all packets
+ * belonging to established connections going through that one.
+ */
+struct sock *
+nf_tproxy_get_sock_v4(struct net *net, struct sk_buff *skb, void *hp,
+ const u8 protocol,
+ const __be32 saddr, const __be32 daddr,
+ const __be16 sport, const __be16 dport,
+ const struct net_device *in,
+ const enum nf_tproxy_lookup_t lookup_type);
+
+const struct in6_addr *
+nf_tproxy_laddr6(struct sk_buff *skb, const struct in6_addr *user_laddr,
+ const struct in6_addr *daddr);
+
+/**
+ * nf_tproxy_handle_time_wait6 - handle IPv6 TCP TIME_WAIT reopen redirections
+ * @skb: The skb being processed.
+ * @tproto: Transport protocol.
+ * @thoff: Transport protocol header offset.
+ * @net: Network namespace.
+ * @laddr: IPv6 address to redirect to.
+ * @lport: TCP port to redirect to or zero.
+ * @sk: The TIME_WAIT TCP socket found by the lookup.
+ *
+ * We have to handle SYN packets arriving to TIME_WAIT sockets
+ * differently: instead of reopening the connection we should rather
+ * redirect the new connection to the proxy if there's a listener
+ * socket present.
+ *
+ * nf_tproxy_handle_time_wait6() consumes the socket reference passed in.
+ *
+ * Returns the listener socket if there's one, the TIME_WAIT socket if
+ * no such listener is found, or NULL if the TCP header is incomplete.
+ */
+struct sock *
+nf_tproxy_handle_time_wait6(struct sk_buff *skb, int tproto, int thoff,
+ struct net *net,
+ const struct in6_addr *laddr,
+ const __be16 lport,
+ struct sock *sk);
+
+struct sock *
+nf_tproxy_get_sock_v6(struct net *net, struct sk_buff *skb, int thoff, void *hp,
+ const u8 protocol,
+ const struct in6_addr *saddr, const struct in6_addr *daddr,
+ const __be16 sport, const __be16 dport,
+ const struct net_device *in,
+ const enum nf_tproxy_lookup_t lookup_type);
+
+#endif /* _NF_TPROXY_H_ */
diff --git a/include/net/netfilter/nfnetlink_log.h b/include/net/netfilter/nfnetlink_log.h
index 612cfb63ac68..ea32a7d3cf1b 100644
--- a/include/net/netfilter/nfnetlink_log.h
+++ b/include/net/netfilter/nfnetlink_log.h
@@ -1,18 +1 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _KER_NFNETLINK_LOG_H
-#define _KER_NFNETLINK_LOG_H
-
-void
-nfulnl_log_packet(struct net *net,
- u_int8_t pf,
- unsigned int hooknum,
- const struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- const struct nf_loginfo *li_user,
- const char *prefix);
-
-#define NFULNL_COPY_DISABLED 0xff
-
-#endif /* _KER_NFNETLINK_LOG_H */
-
diff --git a/include/net/netfilter/nft_dup.h b/include/net/netfilter/nft_dup.h
deleted file mode 100644
index 4d9d512984b2..000000000000
--- a/include/net/netfilter/nft_dup.h
+++ /dev/null
@@ -1,10 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _NFT_DUP_H_
-#define _NFT_DUP_H_
-
-struct nft_dup_inet {
- enum nft_registers sreg_addr:8;
- enum nft_registers sreg_dev:8;
-};
-
-#endif /* _NFT_DUP_H_ */
diff --git a/include/net/netfilter/nft_meta.h b/include/net/netfilter/nft_meta.h
deleted file mode 100644
index 5c69e9b09388..000000000000
--- a/include/net/netfilter/nft_meta.h
+++ /dev/null
@@ -1,44 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _NFT_META_H_
-#define _NFT_META_H_
-
-struct nft_meta {
- enum nft_meta_keys key:8;
- union {
- enum nft_registers dreg:8;
- enum nft_registers sreg:8;
- };
-};
-
-extern const struct nla_policy nft_meta_policy[];
-
-int nft_meta_get_init(const struct nft_ctx *ctx,
- const struct nft_expr *expr,
- const struct nlattr * const tb[]);
-
-int nft_meta_set_init(const struct nft_ctx *ctx,
- const struct nft_expr *expr,
- const struct nlattr * const tb[]);
-
-int nft_meta_get_dump(struct sk_buff *skb,
- const struct nft_expr *expr);
-
-int nft_meta_set_dump(struct sk_buff *skb,
- const struct nft_expr *expr);
-
-void nft_meta_get_eval(const struct nft_expr *expr,
- struct nft_regs *regs,
- const struct nft_pktinfo *pkt);
-
-void nft_meta_set_eval(const struct nft_expr *expr,
- struct nft_regs *regs,
- const struct nft_pktinfo *pkt);
-
-void nft_meta_set_destroy(const struct nft_ctx *ctx,
- const struct nft_expr *expr);
-
-int nft_meta_set_validate(const struct nft_ctx *ctx,
- const struct nft_expr *expr,
- const struct nft_data **data);
-
-#endif
diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
index 8491bc9c86b1..661348f23ea5 100644
--- a/include/net/netns/ipv4.h
+++ b/include/net/netns/ipv4.h
@@ -160,6 +160,8 @@ struct netns_ipv4 {
int sysctl_tcp_pacing_ca_ratio;
int sysctl_tcp_wmem[3];
int sysctl_tcp_rmem[3];
+ int sysctl_tcp_comp_sack_nr;
+ unsigned long sysctl_tcp_comp_sack_delay_ns;
struct inet_timewait_death_row tcp_death_row;
int sysctl_max_syn_backlog;
int sysctl_tcp_fastopen;
diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h
index c29f09cfc9d7..c978a31b0f84 100644
--- a/include/net/netns/ipv6.h
+++ b/include/net/netns/ipv6.h
@@ -43,6 +43,7 @@ struct netns_sysctl_ipv6 {
int max_hbh_opts_cnt;
int max_dst_opts_len;
int max_hbh_opts_len;
+ int seg6_flowlabel;
};
struct netns_ipv6 {
@@ -60,7 +61,8 @@ struct netns_ipv6 {
#endif
struct xt_table *ip6table_nat;
#endif
- struct rt6_info *ip6_null_entry;
+ struct fib6_info *fib6_null_entry;
+ struct rt6_info *ip6_null_entry;
struct rt6_statistics *rt6_stats;
struct timer_list ip6_fib_timer;
struct hlist_head *fib_table_hash;
diff --git a/include/net/netns/nftables.h b/include/net/netns/nftables.h
index 48134353411d..94767ea3a490 100644
--- a/include/net/netns/nftables.h
+++ b/include/net/netns/nftables.h
@@ -4,13 +4,12 @@
#include <linux/list.h>
-struct nft_af_info;
-
struct netns_nftables {
struct list_head tables;
struct list_head commit_list;
unsigned int base_seq;
u8 gencursor;
+ u8 validate_state;
};
#endif
diff --git a/include/net/netrom.h b/include/net/netrom.h
index 0dad2dd5f9d7..5a0714ff500f 100644
--- a/include/net/netrom.h
+++ b/include/net/netrom.h
@@ -13,6 +13,7 @@
#include <linux/slab.h>
#include <net/sock.h>
#include <linux/refcount.h>
+#include <linux/seq_file.h>
#define NR_NETWORK_LEN 15
#define NR_TRANSPORT_LEN 5
@@ -216,8 +217,8 @@ struct net_device *nr_dev_get(ax25_address *);
int nr_rt_ioctl(unsigned int, void __user *);
void nr_link_failed(ax25_cb *, int);
int nr_route_frame(struct sk_buff *, ax25_cb *);
-extern const struct file_operations nr_nodes_fops;
-extern const struct file_operations nr_neigh_fops;
+extern const struct seq_operations nr_node_seqops;
+extern const struct seq_operations nr_neigh_seqops;
void nr_rt_free(void);
/* nr_subr.c */
diff --git a/include/net/page_pool.h b/include/net/page_pool.h
new file mode 100644
index 000000000000..694d055e01ef
--- /dev/null
+++ b/include/net/page_pool.h
@@ -0,0 +1,144 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * page_pool.h
+ * Author: Jesper Dangaard Brouer <netoptimizer@brouer.com>
+ * Copyright (C) 2016 Red Hat, Inc.
+ */
+
+/**
+ * DOC: page_pool allocator
+ *
+ * This page_pool allocator is optimized for the XDP mode that
+ * uses one-frame-per-page, but have fallbacks that act like the
+ * regular page allocator APIs.
+ *
+ * Basic use involve replacing alloc_pages() calls with the
+ * page_pool_alloc_pages() call. Drivers should likely use
+ * page_pool_dev_alloc_pages() replacing dev_alloc_pages().
+ *
+ * If page_pool handles DMA mapping (use page->private), then API user
+ * is responsible for invoking page_pool_put_page() once. In-case of
+ * elevated refcnt, the DMA state is released, assuming other users of
+ * the page will eventually call put_page().
+ *
+ * If no DMA mapping is done, then it can act as shim-layer that
+ * fall-through to alloc_page. As no state is kept on the page, the
+ * regular put_page() call is sufficient.
+ */
+#ifndef _NET_PAGE_POOL_H
+#define _NET_PAGE_POOL_H
+
+#include <linux/mm.h> /* Needed by ptr_ring */
+#include <linux/ptr_ring.h>
+#include <linux/dma-direction.h>
+
+#define PP_FLAG_DMA_MAP 1 /* Should page_pool do the DMA map/unmap */
+#define PP_FLAG_ALL PP_FLAG_DMA_MAP
+
+/*
+ * Fast allocation side cache array/stack
+ *
+ * The cache size and refill watermark is related to the network
+ * use-case. The NAPI budget is 64 packets. After a NAPI poll the RX
+ * ring is usually refilled and the max consumed elements will be 64,
+ * thus a natural max size of objects needed in the cache.
+ *
+ * Keeping room for more objects, is due to XDP_DROP use-case. As
+ * XDP_DROP allows the opportunity to recycle objects directly into
+ * this array, as it shares the same softirq/NAPI protection. If
+ * cache is already full (or partly full) then the XDP_DROP recycles
+ * would have to take a slower code path.
+ */
+#define PP_ALLOC_CACHE_SIZE 128
+#define PP_ALLOC_CACHE_REFILL 64
+struct pp_alloc_cache {
+ u32 count;
+ void *cache[PP_ALLOC_CACHE_SIZE];
+};
+
+struct page_pool_params {
+ unsigned int flags;
+ unsigned int order;
+ unsigned int pool_size;
+ int nid; /* Numa node id to allocate from pages from */
+ struct device *dev; /* device, for DMA pre-mapping purposes */
+ enum dma_data_direction dma_dir; /* DMA mapping direction */
+};
+
+struct page_pool {
+ struct rcu_head rcu;
+ struct page_pool_params p;
+
+ /*
+ * Data structure for allocation side
+ *
+ * Drivers allocation side usually already perform some kind
+ * of resource protection. Piggyback on this protection, and
+ * require driver to protect allocation side.
+ *
+ * For NIC drivers this means, allocate a page_pool per
+ * RX-queue. As the RX-queue is already protected by
+ * Softirq/BH scheduling and napi_schedule. NAPI schedule
+ * guarantee that a single napi_struct will only be scheduled
+ * on a single CPU (see napi_schedule).
+ */
+ struct pp_alloc_cache alloc ____cacheline_aligned_in_smp;
+
+ /* Data structure for storing recycled pages.
+ *
+ * Returning/freeing pages is more complicated synchronization
+ * wise, because free's can happen on remote CPUs, with no
+ * association with allocation resource.
+ *
+ * Use ptr_ring, as it separates consumer and producer
+ * effeciently, it a way that doesn't bounce cache-lines.
+ *
+ * TODO: Implement bulk return pages into this structure.
+ */
+ struct ptr_ring ring;
+};
+
+struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp);
+
+static inline struct page *page_pool_dev_alloc_pages(struct page_pool *pool)
+{
+ gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
+
+ return page_pool_alloc_pages(pool, gfp);
+}
+
+struct page_pool *page_pool_create(const struct page_pool_params *params);
+
+void page_pool_destroy(struct page_pool *pool);
+
+/* Never call this directly, use helpers below */
+void __page_pool_put_page(struct page_pool *pool,
+ struct page *page, bool allow_direct);
+
+static inline void page_pool_put_page(struct page_pool *pool,
+ struct page *page, bool allow_direct)
+{
+ /* When page_pool isn't compiled-in, net/core/xdp.c doesn't
+ * allow registering MEM_TYPE_PAGE_POOL, but shield linker.
+ */
+#ifdef CONFIG_PAGE_POOL
+ __page_pool_put_page(pool, page, allow_direct);
+#endif
+}
+/* Very limited use-cases allow recycle direct */
+static inline void page_pool_recycle_direct(struct page_pool *pool,
+ struct page *page)
+{
+ __page_pool_put_page(pool, page, true);
+}
+
+static inline bool is_page_pool_compiled_in(void)
+{
+#ifdef CONFIG_PAGE_POOL
+ return true;
+#else
+ return false;
+#endif
+}
+
+#endif /* _NET_PAGE_POOL_H */
diff --git a/include/net/phonet/pn_dev.h b/include/net/phonet/pn_dev.h
index 8639de5750f6..cbee32be1d9c 100644
--- a/include/net/phonet/pn_dev.h
+++ b/include/net/phonet/pn_dev.h
@@ -56,7 +56,7 @@ struct net_device *phonet_route_output(struct net *net, u8 daddr);
#define PN_NO_ADDR 0xff
-extern const struct file_operations pn_sock_seq_fops;
-extern const struct file_operations pn_res_seq_fops;
+extern const struct seq_operations pn_sock_seq_ops;
+extern const struct seq_operations pn_res_seq_ops;
#endif
diff --git a/include/net/ping.h b/include/net/ping.h
index 4cd90d6b5c25..fd080e043a6e 100644
--- a/include/net/ping.h
+++ b/include/net/ping.h
@@ -83,20 +83,9 @@ int ping_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
bool ping_rcv(struct sk_buff *skb);
#ifdef CONFIG_PROC_FS
-struct ping_seq_afinfo {
- char *name;
- sa_family_t family;
- const struct file_operations *seq_fops;
- const struct seq_operations seq_ops;
-};
-
-extern const struct file_operations ping_seq_fops;
-
void *ping_seq_start(struct seq_file *seq, loff_t *pos, sa_family_t family);
void *ping_seq_next(struct seq_file *seq, void *v, loff_t *pos);
void ping_seq_stop(struct seq_file *seq, void *v);
-int ping_proc_register(struct net *net, struct ping_seq_afinfo *afinfo);
-void ping_proc_unregister(struct net *net, struct ping_seq_afinfo *afinfo);
int __init ping_proc_init(void);
void ping_proc_exit(void);
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
index e828d31be5da..a3c1a2c47cd4 100644
--- a/include/net/pkt_cls.h
+++ b/include/net/pkt_cls.h
@@ -33,7 +33,7 @@ struct tcf_block_ext_info {
};
struct tcf_block_cb;
-bool tcf_queue_work(struct work_struct *work);
+bool tcf_queue_work(struct rcu_work *rwork, work_func_t func);
#ifdef CONFIG_NET_CLS
struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index,
@@ -683,9 +683,11 @@ static inline bool tc_skip_sw(u32 flags)
/* SKIP_HW and SKIP_SW are mutually exclusive flags. */
static inline bool tc_flags_valid(u32 flags)
{
- if (flags & ~(TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW))
+ if (flags & ~(TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW |
+ TCA_CLS_FLAGS_VERBOSE))
return false;
+ flags &= TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW;
if (!(flags ^ (TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW)))
return false;
@@ -705,7 +707,7 @@ tc_cls_common_offload_init(struct tc_cls_common_offload *cls_common,
cls_common->chain_index = tp->chain->index;
cls_common->protocol = tp->protocol;
cls_common->prio = tp->prio;
- if (tc_skip_sw(flags))
+ if (tc_skip_sw(flags) || flags & TCA_CLS_FLAGS_VERBOSE)
cls_common->extack = extack;
}
@@ -776,6 +778,18 @@ struct tc_qopt_offload_stats {
struct gnet_stats_queue *qstats;
};
+enum tc_mq_command {
+ TC_MQ_CREATE,
+ TC_MQ_DESTROY,
+ TC_MQ_STATS,
+};
+
+struct tc_mq_qopt_offload {
+ enum tc_mq_command command;
+ u32 handle;
+ struct tc_qopt_offload_stats stats;
+};
+
enum tc_red_command {
TC_RED_REPLACE,
TC_RED_DESTROY,
diff --git a/include/net/raw.h b/include/net/raw.h
index 99d26d0c4a19..9c9fa98a91a4 100644
--- a/include/net/raw.h
+++ b/include/net/raw.h
@@ -48,7 +48,6 @@ void raw_proc_exit(void);
struct raw_iter_state {
struct seq_net_private p;
int bucket;
- struct raw_hashinfo *h;
};
static inline struct raw_iter_state *raw_seq_private(struct seq_file *seq)
@@ -58,9 +57,6 @@ static inline struct raw_iter_state *raw_seq_private(struct seq_file *seq)
void *raw_seq_start(struct seq_file *seq, loff_t *pos);
void *raw_seq_next(struct seq_file *seq, void *v, loff_t *pos);
void raw_seq_stop(struct seq_file *seq, void *v);
-int raw_seq_open(struct inode *ino, struct file *file,
- struct raw_hashinfo *h, const struct seq_operations *ops);
-
#endif
int raw_hash_sk(struct sock *sk);
diff --git a/include/net/rose.h b/include/net/rose.h
index 04b72681f2ab..cf517d306a28 100644
--- a/include/net/rose.h
+++ b/include/net/rose.h
@@ -200,9 +200,9 @@ void rose_enquiry_response(struct sock *);
/* rose_route.c */
extern struct rose_neigh *rose_loopback_neigh;
-extern const struct file_operations rose_neigh_fops;
-extern const struct file_operations rose_nodes_fops;
-extern const struct file_operations rose_routes_fops;
+extern const struct seq_operations rose_neigh_seqops;
+extern const struct seq_operations rose_node_seqops;
+extern struct seq_operations rose_route_seqops;
void rose_add_loopback_neigh(void);
int __must_check rose_add_loopback_node(rose_address *);
diff --git a/include/net/route.h b/include/net/route.h
index dbb032d5921b..bb53cdba38dc 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -225,6 +225,7 @@ struct rtable *rt_dst_alloc(struct net_device *dev,
struct in_ifaddr;
void fib_add_ifaddr(struct in_ifaddr *);
void fib_del_ifaddr(struct in_ifaddr *, struct in_ifaddr *);
+void fib_modify_prefix_metric(struct in_ifaddr *ifa, u32 new_metric);
void rt_add_uncached_list(struct rtable *rt);
void rt_del_uncached_list(struct rtable *rt);
diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
index 14b6b3af8918..0bbaa5488423 100644
--- a/include/net/rtnetlink.h
+++ b/include/net/rtnetlink.h
@@ -64,7 +64,7 @@ struct rtnl_link_ops {
size_t priv_size;
void (*setup)(struct net_device *dev);
- int maxtype;
+ unsigned int maxtype;
const struct nla_policy *policy;
int (*validate)(struct nlattr *tb[],
struct nlattr *data[],
@@ -92,7 +92,7 @@ struct rtnl_link_ops {
unsigned int (*get_num_tx_queues)(void);
unsigned int (*get_num_rx_queues)(void);
- int slave_maxtype;
+ unsigned int slave_maxtype;
const struct nla_policy *slave_policy;
int (*slave_changelink)(struct net_device *dev,
struct net_device *slave_dev,
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 5154c8300262..6488daa32f82 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -30,7 +30,6 @@ struct qdisc_rate_table {
enum qdisc_state_t {
__QDISC_STATE_SCHED,
__QDISC_STATE_DEACTIVATED,
- __QDISC_STATE_RUNNING,
};
struct qdisc_size_table {
@@ -86,6 +85,8 @@ struct Qdisc {
struct net_rate_estimator __rcu *rate_est;
struct gnet_stats_basic_cpu __percpu *cpu_bstats;
struct gnet_stats_queue __percpu *cpu_qstats;
+ int padded;
+ refcount_t refcnt;
/*
* For performance sake on SMP, we put highly modified fields at the end
@@ -98,10 +99,9 @@ struct Qdisc {
unsigned long state;
struct Qdisc *next_sched;
struct sk_buff_head skb_bad_txq;
- int padded;
- refcount_t refcnt;
spinlock_t busylock ____cacheline_aligned_in_smp;
+ spinlock_t seqlock;
};
static inline void qdisc_refcount_inc(struct Qdisc *qdisc)
@@ -111,15 +111,21 @@ static inline void qdisc_refcount_inc(struct Qdisc *qdisc)
refcount_inc(&qdisc->refcnt);
}
-static inline bool qdisc_is_running(const struct Qdisc *qdisc)
+static inline bool qdisc_is_running(struct Qdisc *qdisc)
{
+ if (qdisc->flags & TCQ_F_NOLOCK)
+ return spin_is_locked(&qdisc->seqlock);
return (raw_read_seqcount(&qdisc->running) & 1) ? true : false;
}
static inline bool qdisc_run_begin(struct Qdisc *qdisc)
{
- if (qdisc_is_running(qdisc))
+ if (qdisc->flags & TCQ_F_NOLOCK) {
+ if (!spin_trylock(&qdisc->seqlock))
+ return false;
+ } else if (qdisc_is_running(qdisc)) {
return false;
+ }
/* Variant of write_seqcount_begin() telling lockdep a trylock
* was attempted.
*/
@@ -131,6 +137,8 @@ static inline bool qdisc_run_begin(struct Qdisc *qdisc)
static inline void qdisc_run_end(struct Qdisc *qdisc)
{
write_seqcount_end(&qdisc->running);
+ if (qdisc->flags & TCQ_F_NOLOCK)
+ spin_unlock(&qdisc->seqlock);
}
static inline bool qdisc_may_bulk(const struct Qdisc *qdisc)
@@ -342,14 +350,14 @@ static inline int qdisc_qlen(const struct Qdisc *q)
static inline int qdisc_qlen_sum(const struct Qdisc *q)
{
- __u32 qlen = 0;
+ __u32 qlen = q->qstats.qlen;
int i;
if (q->flags & TCQ_F_NOLOCK) {
for_each_possible_cpu(i)
qlen += per_cpu_ptr(q->cpu_qstats, i)->qlen;
} else {
- qlen = q->q.qlen;
+ qlen += q->q.qlen;
}
return qlen;
diff --git a/include/net/sctp/constants.h b/include/net/sctp/constants.h
index 20ff237c5eb2..86f034b524d4 100644
--- a/include/net/sctp/constants.h
+++ b/include/net/sctp/constants.h
@@ -254,11 +254,10 @@ enum { SCTP_ARBITRARY_COOKIE_ECHO_LEN = 200 };
#define SCTP_TSN_MAP_SIZE 4096
/* We will not record more than this many duplicate TSNs between two
- * SACKs. The minimum PMTU is 576. Remove all the headers and there
- * is enough room for 131 duplicate reports. Round down to the
+ * SACKs. The minimum PMTU is 512. Remove all the headers and there
+ * is enough room for 117 duplicate reports. Round down to the
* nearest power of 2.
*/
-enum { SCTP_MIN_PMTU = 576 };
enum { SCTP_MAX_DUP_TSNS = 16 };
enum { SCTP_MAX_GABS = 16 };
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index 28b996d63490..30b3e2fe240a 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -103,12 +103,13 @@ void sctp_addr_wq_mgmt(struct net *, struct sctp_sockaddr_entry *, int);
/*
* sctp/socket.c
*/
+int sctp_inet_connect(struct socket *sock, struct sockaddr *uaddr,
+ int addr_len, int flags);
int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb);
int sctp_inet_listen(struct socket *sock, int backlog);
void sctp_write_space(struct sock *sk);
void sctp_data_ready(struct sock *sk);
-__poll_t sctp_poll(struct file *file, struct socket *sock,
- poll_table *wait);
+__poll_t sctp_poll_mask(struct socket *sock, __poll_t events);
void sctp_sock_rfree(struct sk_buff *skb);
void sctp_copy_sock(struct sock *newsk, struct sock *sk,
struct sctp_association *asoc);
@@ -428,32 +429,6 @@ static inline int sctp_list_single_entry(struct list_head *head)
return (head->next != head) && (head->next == head->prev);
}
-/* Break down data chunks at this point. */
-static inline int sctp_frag_point(const struct sctp_association *asoc, int pmtu)
-{
- struct sctp_sock *sp = sctp_sk(asoc->base.sk);
- struct sctp_af *af = sp->pf->af;
- int frag = pmtu;
-
- frag -= af->ip_options_len(asoc->base.sk);
- frag -= af->net_header_len;
- frag -= sizeof(struct sctphdr) + sctp_datachk_len(&asoc->stream);
-
- if (asoc->user_frag)
- frag = min_t(int, frag, asoc->user_frag);
-
- frag = SCTP_TRUNC4(min_t(int, frag, SCTP_MAX_CHUNK_LEN -
- sctp_datachk_len(&asoc->stream)));
-
- return frag;
-}
-
-static inline void sctp_assoc_pending_pmtu(struct sctp_association *asoc)
-{
- sctp_assoc_sync_pmtu(asoc);
- asoc->pmtu_pending = 0;
-}
-
static inline bool sctp_chunk_pending(const struct sctp_chunk *chunk)
{
return !list_empty(&chunk->list);
@@ -607,17 +582,29 @@ static inline struct dst_entry *sctp_transport_dst_check(struct sctp_transport *
return t->dst;
}
-static inline bool sctp_transport_pmtu_check(struct sctp_transport *t)
+/* Calculate max payload size given a MTU, or the total overhead if
+ * given MTU is zero
+ */
+static inline __u32 sctp_mtu_payload(const struct sctp_sock *sp,
+ __u32 mtu, __u32 extra)
{
- __u32 pmtu = max_t(size_t, SCTP_TRUNC4(dst_mtu(t->dst)),
- SCTP_DEFAULT_MINSEGMENT);
+ __u32 overhead = sizeof(struct sctphdr) + extra;
- if (t->pathmtu == pmtu)
- return true;
+ if (sp)
+ overhead += sp->pf->af->net_header_len;
+ else
+ overhead += sizeof(struct ipv6hdr);
- t->pathmtu = pmtu;
+ if (WARN_ON_ONCE(mtu && mtu <= overhead))
+ mtu = overhead;
- return false;
+ return mtu ? mtu - overhead : overhead;
+}
+
+static inline __u32 sctp_dst_mtu(const struct dst_entry *dst)
+{
+ return SCTP_TRUNC4(max_t(__u32, dst_mtu(dst),
+ SCTP_DEFAULT_MINSEGMENT));
}
#endif /* __net_sctp_h__ */
diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
index 2d0e782c9055..5ef1bad81ef5 100644
--- a/include/net/sctp/sm.h
+++ b/include/net/sctp/sm.h
@@ -207,7 +207,7 @@ struct sctp_chunk *sctp_make_datafrag_empty(const struct sctp_association *asoc,
int len, __u8 flags, gfp_t gfp);
struct sctp_chunk *sctp_make_ecne(const struct sctp_association *asoc,
const __u32 lowest_tsn);
-struct sctp_chunk *sctp_make_sack(const struct sctp_association *asoc);
+struct sctp_chunk *sctp_make_sack(struct sctp_association *asoc);
struct sctp_chunk *sctp_make_shutdown(const struct sctp_association *asoc,
const struct sctp_chunk *chunk);
struct sctp_chunk *sctp_make_shutdown_ack(const struct sctp_association *asoc,
@@ -215,7 +215,7 @@ struct sctp_chunk *sctp_make_shutdown_ack(const struct sctp_association *asoc,
struct sctp_chunk *sctp_make_shutdown_complete(
const struct sctp_association *asoc,
const struct sctp_chunk *chunk);
-void sctp_init_cause(struct sctp_chunk *chunk, __be16 cause, size_t paylen);
+int sctp_init_cause(struct sctp_chunk *chunk, __be16 cause, size_t paylen);
struct sctp_chunk *sctp_make_abort(const struct sctp_association *asoc,
const struct sctp_chunk *chunk,
const size_t hint);
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index a0ec462bc1a9..dbe1b911a24d 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -1133,6 +1133,11 @@ struct sctp_input_cb {
};
#define SCTP_INPUT_CB(__skb) ((struct sctp_input_cb *)&((__skb)->cb[0]))
+struct sctp_output_cb {
+ struct sk_buff *last;
+};
+#define SCTP_OUTPUT_CB(__skb) ((struct sctp_output_cb *)&((__skb)->cb[0]))
+
static inline const struct sk_buff *sctp_gso_headskb(const struct sk_buff *skb)
{
const struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk;
@@ -2091,16 +2096,14 @@ void sctp_assoc_control_transport(struct sctp_association *asoc,
enum sctp_transport_cmd command,
sctp_sn_error_t error);
struct sctp_transport *sctp_assoc_lookup_tsn(struct sctp_association *, __u32);
-struct sctp_transport *sctp_assoc_is_match(struct sctp_association *,
- struct net *,
- const union sctp_addr *,
- const union sctp_addr *);
void sctp_assoc_migrate(struct sctp_association *, struct sock *);
int sctp_assoc_update(struct sctp_association *old,
struct sctp_association *new);
__u32 sctp_association_get_next_tsn(struct sctp_association *);
+void sctp_assoc_update_frag_point(struct sctp_association *asoc);
+void sctp_assoc_set_pmtu(struct sctp_association *asoc, __u32 pmtu);
void sctp_assoc_sync_pmtu(struct sctp_association *asoc);
void sctp_assoc_rwnd_increase(struct sctp_association *, unsigned int);
void sctp_assoc_rwnd_decrease(struct sctp_association *, unsigned int);
diff --git a/include/net/seg6.h b/include/net/seg6.h
index 099bad59dc90..e029e301faa5 100644
--- a/include/net/seg6.h
+++ b/include/net/seg6.h
@@ -49,7 +49,11 @@ struct seg6_pernet_data {
static inline struct seg6_pernet_data *seg6_pernet(struct net *net)
{
+#if IS_ENABLED(CONFIG_IPV6)
return net->ipv6.seg6_data;
+#else
+ return NULL;
+#endif
}
extern int seg6_init(void);
@@ -63,5 +67,6 @@ extern bool seg6_validate_srh(struct ipv6_sr_hdr *srh, int len);
extern int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh,
int proto);
extern int seg6_do_srh_inline(struct sk_buff *skb, struct ipv6_sr_hdr *osrh);
-
+extern int seg6_lookup_nexthop(struct sk_buff *skb, struct in6_addr *nhaddr,
+ u32 tbl_id);
#endif
diff --git a/include/net/seg6_local.h b/include/net/seg6_local.h
new file mode 100644
index 000000000000..661fd5b4d3e0
--- /dev/null
+++ b/include/net/seg6_local.h
@@ -0,0 +1,32 @@
+/*
+ * SR-IPv6 implementation
+ *
+ * Authors:
+ * David Lebrun <david.lebrun@uclouvain.be>
+ * eBPF support: Mathieu Xhonneux <m.xhonneux@gmail.com>
+ *
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _NET_SEG6_LOCAL_H
+#define _NET_SEG6_LOCAL_H
+
+#include <linux/percpu.h>
+#include <linux/net.h>
+#include <linux/ipv6.h>
+
+extern int seg6_lookup_nexthop(struct sk_buff *skb, struct in6_addr *nhaddr,
+ u32 tbl_id);
+
+struct seg6_bpf_srh_state {
+ bool valid;
+ u16 hdrlen;
+};
+
+DECLARE_PER_CPU(struct seg6_bpf_srh_state, seg6_bpf_srh_states);
+
+#endif
diff --git a/include/net/sock.h b/include/net/sock.h
index 74d725fdbe0f..b3b75419eafe 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -481,6 +481,11 @@ struct sock {
void (*sk_error_report)(struct sock *sk);
int (*sk_backlog_rcv)(struct sock *sk,
struct sk_buff *skb);
+#ifdef CONFIG_SOCK_VALIDATE_XMIT
+ struct sk_buff* (*sk_validate_xmit_skb)(struct sock *sk,
+ struct net_device *dev,
+ struct sk_buff *skb);
+#endif
void (*sk_destruct)(struct sock *sk);
struct sock_reuseport __rcu *sk_reuseport_cb;
struct rcu_head sk_rcu;
@@ -803,10 +808,10 @@ static inline bool sock_flag(const struct sock *sk, enum sock_flags flag)
}
#ifdef CONFIG_NET
-extern struct static_key memalloc_socks;
+DECLARE_STATIC_KEY_FALSE(memalloc_socks_key);
static inline int sk_memalloc_socks(void)
{
- return static_key_false(&memalloc_socks);
+ return static_branch_unlikely(&memalloc_socks_key);
}
#else
@@ -1591,8 +1596,6 @@ int sock_no_connect(struct socket *, struct sockaddr *, int, int);
int sock_no_socketpair(struct socket *, struct socket *);
int sock_no_accept(struct socket *, struct socket *, int, bool);
int sock_no_getname(struct socket *, struct sockaddr *, int);
-__poll_t sock_no_poll(struct file *, struct socket *,
- struct poll_table_struct *);
int sock_no_ioctl(struct socket *, unsigned int, unsigned long);
int sock_no_listen(struct socket *, int);
int sock_no_shutdown(struct socket *, int);
@@ -2332,6 +2335,22 @@ static inline bool sk_fullsock(const struct sock *sk)
return (1 << sk->sk_state) & ~(TCPF_TIME_WAIT | TCPF_NEW_SYN_RECV);
}
+/* Checks if this SKB belongs to an HW offloaded socket
+ * and whether any SW fallbacks are required based on dev.
+ */
+static inline struct sk_buff *sk_validate_xmit_skb(struct sk_buff *skb,
+ struct net_device *dev)
+{
+#ifdef CONFIG_SOCK_VALIDATE_XMIT
+ struct sock *sk = skb->sk;
+
+ if (sk && sk_fullsock(sk) && sk->sk_validate_xmit_skb)
+ skb = sk->sk_validate_xmit_skb(sk, dev, skb);
+#endif
+
+ return skb;
+}
+
/* This helper checks if a socket is a LISTEN or NEW_SYN_RECV
* SYNACK messages can be attached to either ones (depending on SYNCOOKIE)
*/
diff --git a/include/net/strparser.h b/include/net/strparser.h
index d96b59f45eba..f177c87ce38b 100644
--- a/include/net/strparser.h
+++ b/include/net/strparser.h
@@ -90,6 +90,8 @@ static inline void strp_pause(struct strparser *strp)
/* May be called without holding lock for attached socket */
void strp_unpause(struct strparser *strp);
+/* Must be called with process lock held (lock_sock) */
+void __strp_unpause(struct strparser *strp);
static inline void save_strp_stats(struct strparser *strp,
struct strp_aggr_stats *agg_stats)
diff --git a/include/net/switchdev.h b/include/net/switchdev.h
index 39bc855d7fee..d574ce63bf22 100644
--- a/include/net/switchdev.h
+++ b/include/net/switchdev.h
@@ -155,6 +155,7 @@ struct switchdev_notifier_fdb_info {
struct switchdev_notifier_info info; /* must be first */
const unsigned char *addr;
u16 vid;
+ bool added_by_user;
};
static inline struct net_device *
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 9c9b3768b350..0448e7c5d2b4 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -245,6 +245,7 @@ extern long sysctl_tcp_mem[3];
#define TCP_RACK_LOSS_DETECTION 0x1 /* Use RACK to detect losses */
#define TCP_RACK_STATIC_REO_WND 0x2 /* Use static RACK reo wnd */
+#define TCP_RACK_NO_DUPTHRESH 0x4 /* Do not use DUPACK threshold in RACK */
extern atomic_long_t tcp_memory_allocated;
extern struct percpu_counter tcp_sockets_allocated;
@@ -333,8 +334,7 @@ void tcp_write_timer_handler(struct sock *sk);
void tcp_delack_timer_handler(struct sock *sk);
int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg);
int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb);
-void tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
- const struct tcphdr *th);
+void tcp_rcv_established(struct sock *sk, struct sk_buff *skb);
void tcp_rcv_space_adjust(struct sock *sk);
int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp);
void tcp_twsk_destructor(struct sock *sk);
@@ -388,8 +388,7 @@ bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst);
void tcp_close(struct sock *sk, long timeout);
void tcp_init_sock(struct sock *sk);
void tcp_init_transfer(struct sock *sk, int bpf_op);
-__poll_t tcp_poll(struct file *file, struct socket *sock,
- struct poll_table_struct *wait);
+__poll_t tcp_poll_mask(struct socket *sock, __poll_t events);
int tcp_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen);
int tcp_setsockopt(struct sock *sk, int level, int optname,
@@ -402,6 +401,10 @@ void tcp_set_keepalive(struct sock *sk, int val);
void tcp_syn_ack_timeout(const struct request_sock *req);
int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
int flags, int *addr_len);
+int tcp_set_rcvlowat(struct sock *sk, int val);
+void tcp_data_ready(struct sock *sk);
+int tcp_mmap(struct file *file, struct socket *sock,
+ struct vm_area_struct *vma);
void tcp_parse_options(const struct net *net, const struct sk_buff *skb,
struct tcp_options_received *opt_rx,
int estab, struct tcp_fastopen_cookie *foc);
@@ -553,7 +556,12 @@ void tcp_fin(struct sock *sk);
void tcp_init_xmit_timers(struct sock *);
static inline void tcp_clear_xmit_timers(struct sock *sk)
{
- hrtimer_cancel(&tcp_sk(sk)->pacing_timer);
+ if (hrtimer_try_to_cancel(&tcp_sk(sk)->pacing_timer) == 1)
+ __sock_put(sk);
+
+ if (hrtimer_try_to_cancel(&tcp_sk(sk)->compressed_ack_timer) == 1)
+ __sock_put(sk);
+
inet_csk_clear_xmit_timers(sk);
}
@@ -810,9 +818,8 @@ struct tcp_skb_cb {
#endif
} header; /* For incoming skbs */
struct {
- __u32 key;
__u32 flags;
- struct bpf_map *map;
+ struct sock *sk_redir;
void *data_end;
} bpf;
};
@@ -1747,27 +1754,22 @@ enum tcp_seq_states {
TCP_SEQ_STATE_ESTABLISHED,
};
-int tcp_seq_open(struct inode *inode, struct file *file);
+void *tcp_seq_start(struct seq_file *seq, loff_t *pos);
+void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos);
+void tcp_seq_stop(struct seq_file *seq, void *v);
struct tcp_seq_afinfo {
- char *name;
sa_family_t family;
- const struct file_operations *seq_fops;
- struct seq_operations seq_ops;
};
struct tcp_iter_state {
struct seq_net_private p;
- sa_family_t family;
enum tcp_seq_states state;
struct sock *syn_wait_sk;
int bucket, offset, sbucket, num;
loff_t last_pos;
};
-int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo);
-void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo);
-
extern struct request_sock_ops tcp_request_sock_ops;
extern struct request_sock_ops tcp6_request_sock_ops;
@@ -1871,6 +1873,10 @@ void tcp_v4_init(void);
void tcp_init(void);
/* tcp_recovery.c */
+void tcp_mark_skb_lost(struct sock *sk, struct sk_buff *skb);
+void tcp_newreno_mark_lost(struct sock *sk, bool snd_una_advanced);
+extern s32 tcp_rack_skb_timeout(struct tcp_sock *tp, struct sk_buff *skb,
+ u32 reo_wnd);
extern void tcp_rack_mark_lost(struct sock *sk);
extern void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq,
u64 xmit_time);
@@ -2101,4 +2107,12 @@ static inline bool tcp_bpf_ca_needs_ecn(struct sock *sk)
#if IS_ENABLED(CONFIG_SMC)
extern struct static_key_false tcp_have_smc;
#endif
+
+#if IS_ENABLED(CONFIG_TLS_DEVICE)
+void clean_acked_data_enable(struct inet_connection_sock *icsk,
+ void (*cad)(struct sock *sk, u32 ack_seq));
+void clean_acked_data_disable(struct inet_connection_sock *icsk);
+
+#endif
+
#endif /* _TCP_H */
diff --git a/include/net/tipc.h b/include/net/tipc.h
index 07670ec022a7..f0e7e6bc1bef 100644
--- a/include/net/tipc.h
+++ b/include/net/tipc.h
@@ -44,11 +44,11 @@ struct tipc_basic_hdr {
__be32 w[4];
};
-static inline u32 tipc_hdr_rps_key(struct tipc_basic_hdr *hdr)
+static inline __be32 tipc_hdr_rps_key(struct tipc_basic_hdr *hdr)
{
u32 w0 = ntohl(hdr->w[0]);
bool keepalive_msg = (w0 & KEEPALIVE_MSG_MASK) == KEEPALIVE_MSG_MASK;
- int key;
+ __be32 key;
/* Return source node identity as key */
if (likely(!keepalive_msg))
diff --git a/include/net/tls.h b/include/net/tls.h
index 3da8e13a6d96..7f84ea3e217c 100644
--- a/include/net/tls.h
+++ b/include/net/tls.h
@@ -83,21 +83,10 @@ struct tls_device {
void (*unhash)(struct tls_device *device, struct sock *sk);
};
-struct tls_sw_context {
+struct tls_sw_context_tx {
struct crypto_aead *aead_send;
- struct crypto_aead *aead_recv;
struct crypto_wait async_wait;
- /* Receive context */
- struct strparser strp;
- void (*saved_data_ready)(struct sock *sk);
- unsigned int (*sk_poll)(struct file *file, struct socket *sock,
- struct poll_table_struct *wait);
- struct sk_buff *recv_pkt;
- u8 control;
- bool decrypted;
-
- /* Sending context */
char aad_space[TLS_AAD_SPACE_SIZE];
unsigned int sg_plaintext_size;
@@ -114,6 +103,53 @@ struct tls_sw_context {
struct scatterlist sg_aead_out[2];
};
+struct tls_sw_context_rx {
+ struct crypto_aead *aead_recv;
+ struct crypto_wait async_wait;
+
+ struct strparser strp;
+ void (*saved_data_ready)(struct sock *sk);
+ __poll_t (*sk_poll_mask)(struct socket *sock, __poll_t events);
+ struct sk_buff *recv_pkt;
+ u8 control;
+ bool decrypted;
+
+ char rx_aad_ciphertext[TLS_AAD_SPACE_SIZE];
+ char rx_aad_plaintext[TLS_AAD_SPACE_SIZE];
+
+};
+
+struct tls_record_info {
+ struct list_head list;
+ u32 end_seq;
+ int len;
+ int num_frags;
+ skb_frag_t frags[MAX_SKB_FRAGS];
+};
+
+struct tls_offload_context {
+ struct crypto_aead *aead_send;
+ spinlock_t lock; /* protects records list */
+ struct list_head records_list;
+ struct tls_record_info *open_record;
+ struct tls_record_info *retransmit_hint;
+ u64 hint_record_sn;
+ u64 unacked_record_sn;
+
+ struct scatterlist sg_tx_data[MAX_SKB_FRAGS];
+ void (*sk_destruct)(struct sock *sk);
+ u8 driver_state[];
+ /* The TLS layer reserves room for driver specific state
+ * Currently the belief is that there is not enough
+ * driver specific state to justify another layer of indirection
+ */
+#define TLS_DRIVER_STATE_SIZE (max_t(size_t, 8, sizeof(void *)))
+};
+
+#define TLS_OFFLOAD_CONTEXT_SIZE \
+ (ALIGN(sizeof(struct tls_offload_context), sizeof(void *)) + \
+ TLS_DRIVER_STATE_SIZE)
+
enum {
TLS_PENDING_CLOSED_RECORD
};
@@ -138,9 +174,15 @@ struct tls_context {
struct tls12_crypto_info_aes_gcm_128 crypto_recv_aes_gcm_128;
};
- void *priv_ctx;
+ struct list_head list;
+ struct net_device *netdev;
+ refcount_t refcount;
- u8 conf:3;
+ void *priv_ctx_tx;
+ void *priv_ctx_rx;
+
+ u8 tx_conf:3;
+ u8 rx_conf:3;
struct cipher_context tx;
struct cipher_context rx;
@@ -148,6 +190,7 @@ struct tls_context {
struct scatterlist *partially_sent_record;
u16 partially_sent_offset;
unsigned long flags;
+ bool in_tcp_sendpages;
u16 pending_open_record_frags;
int (*push_pending_record)(struct sock *sk, int flags);
@@ -177,18 +220,37 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
int tls_sw_sendpage(struct sock *sk, struct page *page,
int offset, size_t size, int flags);
void tls_sw_close(struct sock *sk, long timeout);
-void tls_sw_free_resources(struct sock *sk);
+void tls_sw_free_resources_tx(struct sock *sk);
+void tls_sw_free_resources_rx(struct sock *sk);
int tls_sw_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
int nonblock, int flags, int *addr_len);
-unsigned int tls_sw_poll(struct file *file, struct socket *sock,
- struct poll_table_struct *wait);
+__poll_t tls_sw_poll_mask(struct socket *sock, __poll_t events);
ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
struct pipe_inode_info *pipe,
size_t len, unsigned int flags);
-void tls_sk_destruct(struct sock *sk, struct tls_context *ctx);
-void tls_icsk_clean_acked(struct sock *sk);
+int tls_set_device_offload(struct sock *sk, struct tls_context *ctx);
+int tls_device_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
+int tls_device_sendpage(struct sock *sk, struct page *page,
+ int offset, size_t size, int flags);
+void tls_device_sk_destruct(struct sock *sk);
+void tls_device_init(void);
+void tls_device_cleanup(void);
+
+struct tls_record_info *tls_get_record(struct tls_offload_context *context,
+ u32 seq, u64 *p_record_sn);
+
+static inline bool tls_record_is_start_marker(struct tls_record_info *rec)
+{
+ return rec->len == 0;
+}
+
+static inline u32 tls_record_start_seq(struct tls_record_info *rec)
+{
+ return rec->end_seq - rec->len;
+}
+void tls_sk_destruct(struct sock *sk, struct tls_context *ctx);
int tls_push_sg(struct sock *sk, struct tls_context *ctx,
struct scatterlist *sg, u16 first_offset,
int flags);
@@ -225,6 +287,13 @@ static inline bool tls_is_pending_open_record(struct tls_context *tls_ctx)
return tls_ctx->pending_open_record_frags;
}
+static inline bool tls_is_sk_tx_device_offloaded(struct sock *sk)
+{
+ return sk_fullsock(sk) &&
+ /* matches smp_store_release in tls_set_device_offload */
+ smp_load_acquire(&sk->sk_destruct) == &tls_device_sk_destruct;
+}
+
static inline void tls_err_abort(struct sock *sk, int err)
{
sk->sk_err = err;
@@ -297,16 +366,22 @@ static inline struct tls_context *tls_get_ctx(const struct sock *sk)
return icsk->icsk_ulp_data;
}
-static inline struct tls_sw_context *tls_sw_ctx(
+static inline struct tls_sw_context_rx *tls_sw_ctx_rx(
const struct tls_context *tls_ctx)
{
- return (struct tls_sw_context *)tls_ctx->priv_ctx;
+ return (struct tls_sw_context_rx *)tls_ctx->priv_ctx_rx;
+}
+
+static inline struct tls_sw_context_tx *tls_sw_ctx_tx(
+ const struct tls_context *tls_ctx)
+{
+ return (struct tls_sw_context_tx *)tls_ctx->priv_ctx_tx;
}
static inline struct tls_offload_context *tls_offload_ctx(
const struct tls_context *tls_ctx)
{
- return (struct tls_offload_context *)tls_ctx->priv_ctx;
+ return (struct tls_offload_context *)tls_ctx->priv_ctx_tx;
}
int tls_proccess_cmsg(struct sock *sk, struct msghdr *msg,
@@ -314,4 +389,12 @@ int tls_proccess_cmsg(struct sock *sk, struct msghdr *msg,
void tls_register_device(struct tls_device *device);
void tls_unregister_device(struct tls_device *device);
+struct sk_buff *tls_validate_xmit_skb(struct sock *sk,
+ struct net_device *dev,
+ struct sk_buff *skb);
+
+int tls_sw_fallback_init(struct sock *sk,
+ struct tls_offload_context *offload_ctx,
+ struct tls_crypto_info *crypto_info);
+
#endif /* _TLS_OFFLOAD_H */
diff --git a/include/net/transp_v6.h b/include/net/transp_v6.h
index c4f5caaf3778..f6a3543e5247 100644
--- a/include/net/transp_v6.h
+++ b/include/net/transp_v6.h
@@ -45,8 +45,15 @@ int ip6_datagram_send_ctl(struct net *net, struct sock *sk, struct msghdr *msg,
struct flowi6 *fl6, struct ipcm6_cookie *ipc6,
struct sockcm_cookie *sockc);
-void ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp,
- __u16 srcp, __u16 destp, int bucket);
+void __ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp,
+ __u16 srcp, __u16 destp, int rqueue, int bucket);
+static inline void
+ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp, __u16 srcp,
+ __u16 destp, int bucket)
+{
+ __ip6_dgram_sock_seq_show(seq, sp, srcp, destp, sk_rmem_alloc_get(sp),
+ bucket);
+}
#define LOOPBACK4_IPV6 cpu_to_be32(0x7f000006)
diff --git a/include/net/udp.h b/include/net/udp.h
index 0676b272f6ac..b1ea8b0f5e6a 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -174,6 +174,9 @@ struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb,
struct udphdr *uh, udp_lookup_t lookup);
int udp_gro_complete(struct sk_buff *skb, int nhoff, udp_lookup_t lookup);
+struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb,
+ netdev_features_t features);
+
static inline struct udphdr *udp_gro_udphdr(struct sk_buff *skb)
{
struct udphdr *uh;
@@ -244,6 +247,11 @@ static inline __be16 udp_flow_src_port(struct net *net, struct sk_buff *skb,
return htons((((u64) hash * (max - min)) >> 32) + min);
}
+static inline int udp_rqueue_get(struct sock *sk)
+{
+ return sk_rmem_alloc_get(sk) - READ_ONCE(udp_sk(sk)->forward_deficit);
+}
+
/* net/ipv4/udp.c */
void udp_destruct_sock(struct sock *sk);
void skb_consume_udp(struct sock *sk, struct sk_buff *skb, int len);
@@ -269,6 +277,7 @@ int udp_abort(struct sock *sk, int err);
int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len);
int udp_push_pending_frames(struct sock *sk);
void udp_flush_pending_frames(struct sock *sk);
+int udp_cmsg_send(struct sock *sk, struct msghdr *msg, u16 *gso_size);
void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst);
int udp_rcv(struct sk_buff *skb);
int udp_ioctl(struct sock *sk, int cmd, unsigned long arg);
@@ -276,7 +285,7 @@ int udp_init_sock(struct sock *sk);
int udp_pre_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
int __udp_disconnect(struct sock *sk, int flags);
int udp_disconnect(struct sock *sk, int flags);
-__poll_t udp_poll(struct file *file, struct socket *sock, poll_table *wait);
+__poll_t udp_poll_mask(struct socket *sock, __poll_t events);
struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
netdev_features_t features,
bool is_ipv6);
@@ -408,31 +417,27 @@ do { \
#define __UDPX_INC_STATS(sk, field) __UDP_INC_STATS(sock_net(sk), field, 0)
#endif
-/* /proc */
-int udp_seq_open(struct inode *inode, struct file *file);
-
+#ifdef CONFIG_PROC_FS
struct udp_seq_afinfo {
- char *name;
sa_family_t family;
struct udp_table *udp_table;
- const struct file_operations *seq_fops;
- struct seq_operations seq_ops;
};
struct udp_iter_state {
struct seq_net_private p;
- sa_family_t family;
int bucket;
- struct udp_table *udp_table;
};
-#ifdef CONFIG_PROC_FS
-int udp_proc_register(struct net *net, struct udp_seq_afinfo *afinfo);
-void udp_proc_unregister(struct net *net, struct udp_seq_afinfo *afinfo);
+void *udp_seq_start(struct seq_file *seq, loff_t *pos);
+void *udp_seq_next(struct seq_file *seq, void *v, loff_t *pos);
+void udp_seq_stop(struct seq_file *seq, void *v);
+
+extern const struct seq_operations udp_seq_ops;
+extern const struct seq_operations udp6_seq_ops;
int udp4_proc_init(void);
void udp4_proc_exit(void);
-#endif
+#endif /* CONFIG_PROC_FS */
int udpv4_offload_init(void);
diff --git a/include/net/vxlan.h b/include/net/vxlan.h
index ad73d8b3fcc2..b99a02ae3934 100644
--- a/include/net/vxlan.h
+++ b/include/net/vxlan.h
@@ -262,6 +262,7 @@ struct vxlan_dev {
#define VXLAN_F_COLLECT_METADATA 0x2000
#define VXLAN_F_GPE 0x4000
#define VXLAN_F_IPV6_LINKLOCAL 0x8000
+#define VXLAN_F_TTL_INHERIT 0x10000
/* Flags that are used in the receive path. These flags must match in
* order for a socket to be shareable
diff --git a/include/net/xdp.h b/include/net/xdp.h
index b2362ddfa694..2deea7166a34 100644
--- a/include/net/xdp.h
+++ b/include/net/xdp.h
@@ -33,16 +33,115 @@
* also mandatory during RX-ring setup.
*/
+enum xdp_mem_type {
+ MEM_TYPE_PAGE_SHARED = 0, /* Split-page refcnt based model */
+ MEM_TYPE_PAGE_ORDER0, /* Orig XDP full page model */
+ MEM_TYPE_PAGE_POOL,
+ MEM_TYPE_ZERO_COPY,
+ MEM_TYPE_MAX,
+};
+
+/* XDP flags for ndo_xdp_xmit */
+#define XDP_XMIT_FLUSH (1U << 0) /* doorbell signal consumer */
+#define XDP_XMIT_FLAGS_MASK XDP_XMIT_FLUSH
+
+struct xdp_mem_info {
+ u32 type; /* enum xdp_mem_type, but known size type */
+ u32 id;
+};
+
+struct page_pool;
+
+struct zero_copy_allocator {
+ void (*free)(struct zero_copy_allocator *zca, unsigned long handle);
+};
+
struct xdp_rxq_info {
struct net_device *dev;
u32 queue_index;
u32 reg_state;
+ struct xdp_mem_info mem;
} ____cacheline_aligned; /* perf critical, avoid false-sharing */
+struct xdp_buff {
+ void *data;
+ void *data_end;
+ void *data_meta;
+ void *data_hard_start;
+ unsigned long handle;
+ struct xdp_rxq_info *rxq;
+};
+
+struct xdp_frame {
+ void *data;
+ u16 len;
+ u16 headroom;
+ u16 metasize;
+ /* Lifetime of xdp_rxq_info is limited to NAPI/enqueue time,
+ * while mem info is valid on remote CPU.
+ */
+ struct xdp_mem_info mem;
+ struct net_device *dev_rx; /* used by cpumap */
+};
+
+/* Convert xdp_buff to xdp_frame */
+static inline
+struct xdp_frame *convert_to_xdp_frame(struct xdp_buff *xdp)
+{
+ struct xdp_frame *xdp_frame;
+ int metasize;
+ int headroom;
+
+ /* TODO: implement clone, copy, use "native" MEM_TYPE */
+ if (xdp->rxq->mem.type == MEM_TYPE_ZERO_COPY)
+ return NULL;
+
+ /* Assure headroom is available for storing info */
+ headroom = xdp->data - xdp->data_hard_start;
+ metasize = xdp->data - xdp->data_meta;
+ metasize = metasize > 0 ? metasize : 0;
+ if (unlikely((headroom - metasize) < sizeof(*xdp_frame)))
+ return NULL;
+
+ /* Store info in top of packet */
+ xdp_frame = xdp->data_hard_start;
+
+ xdp_frame->data = xdp->data;
+ xdp_frame->len = xdp->data_end - xdp->data;
+ xdp_frame->headroom = headroom - sizeof(*xdp_frame);
+ xdp_frame->metasize = metasize;
+
+ /* rxq only valid until napi_schedule ends, convert to xdp_mem_info */
+ xdp_frame->mem = xdp->rxq->mem;
+
+ return xdp_frame;
+}
+
+void xdp_return_frame(struct xdp_frame *xdpf);
+void xdp_return_frame_rx_napi(struct xdp_frame *xdpf);
+void xdp_return_buff(struct xdp_buff *xdp);
+
int xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq,
struct net_device *dev, u32 queue_index);
void xdp_rxq_info_unreg(struct xdp_rxq_info *xdp_rxq);
void xdp_rxq_info_unused(struct xdp_rxq_info *xdp_rxq);
bool xdp_rxq_info_is_reg(struct xdp_rxq_info *xdp_rxq);
+int xdp_rxq_info_reg_mem_model(struct xdp_rxq_info *xdp_rxq,
+ enum xdp_mem_type type, void *allocator);
+
+/* Drivers not supporting XDP metadata can use this helper, which
+ * rejects any room expansion for metadata as a result.
+ */
+static __always_inline void
+xdp_set_data_meta_invalid(struct xdp_buff *xdp)
+{
+ xdp->data_meta = xdp->data + 1;
+}
+
+static __always_inline bool
+xdp_data_meta_unsupported(const struct xdp_buff *xdp)
+{
+ return unlikely(xdp->data_meta > xdp->data);
+}
#endif /* __LINUX_NET_XDP_H__ */
diff --git a/include/net/xdp_sock.h b/include/net/xdp_sock.h
new file mode 100644
index 000000000000..9fe472f2ac95
--- /dev/null
+++ b/include/net/xdp_sock.h
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* AF_XDP internal functions
+ * Copyright(c) 2018 Intel Corporation.
+ */
+
+#ifndef _LINUX_XDP_SOCK_H
+#define _LINUX_XDP_SOCK_H
+
+#include <linux/workqueue.h>
+#include <linux/if_xdp.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/mm.h>
+#include <net/sock.h>
+
+struct net_device;
+struct xsk_queue;
+
+struct xdp_umem_props {
+ u64 chunk_mask;
+ u64 size;
+};
+
+struct xdp_umem_page {
+ void *addr;
+ dma_addr_t dma;
+};
+
+struct xdp_umem {
+ struct xsk_queue *fq;
+ struct xsk_queue *cq;
+ struct xdp_umem_page *pages;
+ struct xdp_umem_props props;
+ u32 headroom;
+ u32 chunk_size_nohr;
+ struct user_struct *user;
+ struct pid *pid;
+ unsigned long address;
+ refcount_t users;
+ struct work_struct work;
+ struct page **pgs;
+ u32 npgs;
+ struct net_device *dev;
+ u16 queue_id;
+ bool zc;
+ spinlock_t xsk_list_lock;
+ struct list_head xsk_list;
+};
+
+struct xdp_sock {
+ /* struct sock must be the first member of struct xdp_sock */
+ struct sock sk;
+ struct xsk_queue *rx;
+ struct net_device *dev;
+ struct xdp_umem *umem;
+ struct list_head flush_node;
+ u16 queue_id;
+ struct xsk_queue *tx ____cacheline_aligned_in_smp;
+ struct list_head list;
+ bool zc;
+ /* Protects multiple processes in the control path */
+ struct mutex mutex;
+ u64 rx_dropped;
+};
+
+struct xdp_buff;
+#ifdef CONFIG_XDP_SOCKETS
+int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp);
+int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp);
+void xsk_flush(struct xdp_sock *xs);
+bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs);
+/* Used from netdev driver */
+u64 *xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr);
+void xsk_umem_discard_addr(struct xdp_umem *umem);
+void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries);
+bool xsk_umem_consume_tx(struct xdp_umem *umem, dma_addr_t *dma, u32 *len);
+void xsk_umem_consume_tx_done(struct xdp_umem *umem);
+#else
+static inline int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
+{
+ return -ENOTSUPP;
+}
+
+static inline int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
+{
+ return -ENOTSUPP;
+}
+
+static inline void xsk_flush(struct xdp_sock *xs)
+{
+}
+
+static inline bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs)
+{
+ return false;
+}
+#endif /* CONFIG_XDP_SOCKETS */
+
+#endif /* _LINUX_XDP_SOCK_H */
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index a872379b69da..557122846e0e 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -375,6 +375,7 @@ struct xfrm_input_afinfo {
int xfrm_input_register_afinfo(const struct xfrm_input_afinfo *afinfo);
int xfrm_input_unregister_afinfo(const struct xfrm_input_afinfo *afinfo);
+void xfrm_flush_gc(void);
void xfrm_state_delete_tunnel(struct xfrm_state *x);
struct xfrm_type {
@@ -736,7 +737,7 @@ static inline struct audit_buffer *xfrm_audit_start(const char *op)
if (audit_enabled == 0)
return NULL;
- audit_buf = audit_log_start(current->audit_context, GFP_ATOMIC,
+ audit_buf = audit_log_start(audit_context(), GFP_ATOMIC,
AUDIT_MAC_IPSEC_EVENT);
if (audit_buf == NULL)
return NULL;
@@ -751,7 +752,7 @@ static inline void xfrm_audit_helper_usrinfo(bool task_valid,
audit_get_loginuid(current) :
INVALID_UID);
const unsigned int ses = task_valid ? audit_get_sessionid(current) :
- (unsigned int) -1;
+ AUDIT_SID_UNSET;
audit_log_format(audit_buf, " auid=%u ses=%u", auid, ses);
audit_log_task_context(audit_buf);
diff --git a/include/ras/ras_event.h b/include/ras/ras_event.h
index 9c689868eb4d..a0794632fd01 100644
--- a/include/ras/ras_event.h
+++ b/include/ras/ras_event.h
@@ -298,30 +298,44 @@ TRACE_EVENT(non_standard_event,
TRACE_EVENT(aer_event,
TP_PROTO(const char *dev_name,
const u32 status,
- const u8 severity),
+ const u8 severity,
+ const u8 tlp_header_valid,
+ struct aer_header_log_regs *tlp),
- TP_ARGS(dev_name, status, severity),
+ TP_ARGS(dev_name, status, severity, tlp_header_valid, tlp),
TP_STRUCT__entry(
__string( dev_name, dev_name )
__field( u32, status )
__field( u8, severity )
+ __field( u8, tlp_header_valid)
+ __array( u32, tlp_header, 4 )
),
TP_fast_assign(
__assign_str(dev_name, dev_name);
__entry->status = status;
__entry->severity = severity;
+ __entry->tlp_header_valid = tlp_header_valid;
+ if (tlp_header_valid) {
+ __entry->tlp_header[0] = tlp->dw0;
+ __entry->tlp_header[1] = tlp->dw1;
+ __entry->tlp_header[2] = tlp->dw2;
+ __entry->tlp_header[3] = tlp->dw3;
+ }
),
- TP_printk("%s PCIe Bus Error: severity=%s, %s\n",
+ TP_printk("%s PCIe Bus Error: severity=%s, %s, TLP Header=%s\n",
__get_str(dev_name),
__entry->severity == AER_CORRECTABLE ? "Corrected" :
__entry->severity == AER_FATAL ?
"Fatal" : "Uncorrected, non-fatal",
__entry->severity == AER_CORRECTABLE ?
__print_flags(__entry->status, "|", aer_correctable_errors) :
- __print_flags(__entry->status, "|", aer_uncorrectable_errors))
+ __print_flags(__entry->status, "|", aer_uncorrectable_errors),
+ __entry->tlp_header_valid ?
+ __print_array(__entry->tlp_header, 4, 4) :
+ "Not available")
);
/*
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 4f71d6a073ba..6c003995347a 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -2093,10 +2093,7 @@ struct ib_flow_attr {
u32 flags;
u8 num_of_specs;
u8 port;
- /* Following are the optional layers according to user request
- * struct ib_flow_spec_xxx
- * struct ib_flow_spec_yyy
- */
+ union ib_flow_spec flows[];
};
struct ib_flow {
diff --git a/include/scsi/osd_initiator.h b/include/scsi/osd_initiator.h
index a29d3086eb56..86a569d008b2 100644
--- a/include/scsi/osd_initiator.h
+++ b/include/scsi/osd_initiator.h
@@ -148,7 +148,6 @@ struct osd_request {
u8 *pad_buff;
} out, in;
- gfp_t alloc_flags;
unsigned timeout;
unsigned retries;
unsigned sense_len;
@@ -202,14 +201,11 @@ static inline bool osd_req_is_ver1(struct osd_request *or)
*
* @osd_dev: OSD device that holds the scsi-device and default values
* that the request is associated with.
- * @gfp: The allocation flags to use for request allocation, and all
- * subsequent allocations. This will be stored at
- * osd_request->alloc_flags, can be changed by user later
*
* Allocate osd_request and initialize all members to the
* default/initial state.
*/
-struct osd_request *osd_start_request(struct osd_dev *od, gfp_t gfp);
+struct osd_request *osd_start_request(struct osd_dev *od);
enum osd_req_options {
OSD_REQ_FUA = 0x08, /* Force Unit Access */
diff --git a/include/scsi/scsi_dbg.h b/include/scsi/scsi_dbg.h
index 04e0679767f6..e03bd9d41fa8 100644
--- a/include/scsi/scsi_dbg.h
+++ b/include/scsi/scsi_dbg.h
@@ -11,8 +11,6 @@ struct scsi_sense_hdr;
extern void scsi_print_command(struct scsi_cmnd *);
extern size_t __scsi_format_command(char *, size_t,
const unsigned char *, size_t);
-extern void scsi_show_extd_sense(const struct scsi_device *, const char *,
- unsigned char, unsigned char);
extern void scsi_print_sense_hdr(const struct scsi_device *, const char *,
const struct scsi_sense_hdr *);
extern void scsi_print_sense(const struct scsi_cmnd *);
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index 7ae177c8e399..4c36af6edd79 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -15,7 +15,7 @@ struct scsi_cmnd;
struct scsi_lun;
struct scsi_sense_hdr;
-typedef unsigned int __bitwise blist_flags_t;
+typedef __u64 __bitwise blist_flags_t;
struct scsi_mode_data {
__u32 length;
diff --git a/include/scsi/scsi_devinfo.h b/include/scsi/scsi_devinfo.h
index ea67c32e870e..3fdb322d4c4b 100644
--- a/include/scsi/scsi_devinfo.h
+++ b/include/scsi/scsi_devinfo.h
@@ -6,55 +6,80 @@
*/
/* Only scan LUN 0 */
-#define BLIST_NOLUN ((__force blist_flags_t)(1 << 0))
+#define BLIST_NOLUN ((__force blist_flags_t)(1ULL << 0))
/* Known to have LUNs, force scanning.
* DEPRECATED: Use max_luns=N */
-#define BLIST_FORCELUN ((__force blist_flags_t)(1 << 1))
+#define BLIST_FORCELUN ((__force blist_flags_t)(1ULL << 1))
/* Flag for broken handshaking */
-#define BLIST_BORKEN ((__force blist_flags_t)(1 << 2))
+#define BLIST_BORKEN ((__force blist_flags_t)(1ULL << 2))
/* unlock by special command */
-#define BLIST_KEY ((__force blist_flags_t)(1 << 3))
+#define BLIST_KEY ((__force blist_flags_t)(1ULL << 3))
/* Do not use LUNs in parallel */
-#define BLIST_SINGLELUN ((__force blist_flags_t)(1 << 4))
+#define BLIST_SINGLELUN ((__force blist_flags_t)(1ULL << 4))
/* Buggy Tagged Command Queuing */
-#define BLIST_NOTQ ((__force blist_flags_t)(1 << 5))
+#define BLIST_NOTQ ((__force blist_flags_t)(1ULL << 5))
/* Non consecutive LUN numbering */
-#define BLIST_SPARSELUN ((__force blist_flags_t)(1 << 6))
+#define BLIST_SPARSELUN ((__force blist_flags_t)(1ULL << 6))
/* Avoid LUNS >= 5 */
-#define BLIST_MAX5LUN ((__force blist_flags_t)(1 << 7))
+#define BLIST_MAX5LUN ((__force blist_flags_t)(1ULL << 7))
/* Treat as (removable) CD-ROM */
-#define BLIST_ISROM ((__force blist_flags_t)(1 << 8))
+#define BLIST_ISROM ((__force blist_flags_t)(1ULL << 8))
/* LUNs past 7 on a SCSI-2 device */
-#define BLIST_LARGELUN ((__force blist_flags_t)(1 << 9))
+#define BLIST_LARGELUN ((__force blist_flags_t)(1ULL << 9))
/* override additional length field */
-#define BLIST_INQUIRY_36 ((__force blist_flags_t)(1 << 10))
+#define BLIST_INQUIRY_36 ((__force blist_flags_t)(1ULL << 10))
+#define __BLIST_UNUSED_11 ((__force blist_flags_t)(1ULL << 11))
/* do not do automatic start on add */
-#define BLIST_NOSTARTONADD ((__force blist_flags_t)(1 << 12))
+#define BLIST_NOSTARTONADD ((__force blist_flags_t)(1ULL << 12))
+#define __BLIST_UNUSED_13 ((__force blist_flags_t)(1ULL << 13))
+#define __BLIST_UNUSED_14 ((__force blist_flags_t)(1ULL << 14))
+#define __BLIST_UNUSED_15 ((__force blist_flags_t)(1ULL << 15))
+#define __BLIST_UNUSED_16 ((__force blist_flags_t)(1ULL << 16))
/* try REPORT_LUNS even for SCSI-2 devs (if HBA supports more than 8 LUNs) */
-#define BLIST_REPORTLUN2 ((__force blist_flags_t)(1 << 17))
+#define BLIST_REPORTLUN2 ((__force blist_flags_t)(1ULL << 17))
/* don't try REPORT_LUNS scan (SCSI-3 devs) */
-#define BLIST_NOREPORTLUN ((__force blist_flags_t)(1 << 18))
+#define BLIST_NOREPORTLUN ((__force blist_flags_t)(1ULL << 18))
/* don't use PREVENT-ALLOW commands */
-#define BLIST_NOT_LOCKABLE ((__force blist_flags_t)(1 << 19))
+#define BLIST_NOT_LOCKABLE ((__force blist_flags_t)(1ULL << 19))
/* device is actually for RAID config */
-#define BLIST_NO_ULD_ATTACH ((__force blist_flags_t)(1 << 20))
+#define BLIST_NO_ULD_ATTACH ((__force blist_flags_t)(1ULL << 20))
/* select without ATN */
-#define BLIST_SELECT_NO_ATN ((__force blist_flags_t)(1 << 21))
+#define BLIST_SELECT_NO_ATN ((__force blist_flags_t)(1ULL << 21))
/* retry HARDWARE_ERROR */
-#define BLIST_RETRY_HWERROR ((__force blist_flags_t)(1 << 22))
+#define BLIST_RETRY_HWERROR ((__force blist_flags_t)(1ULL << 22))
/* maximum 512 sector cdb length */
-#define BLIST_MAX_512 ((__force blist_flags_t)(1 << 23))
+#define BLIST_MAX_512 ((__force blist_flags_t)(1ULL << 23))
+#define __BLIST_UNUSED_24 ((__force blist_flags_t)(1ULL << 24))
/* Disable T10 PI (DIF) */
-#define BLIST_NO_DIF ((__force blist_flags_t)(1 << 25))
+#define BLIST_NO_DIF ((__force blist_flags_t)(1ULL << 25))
/* Ignore SBC-3 VPD pages */
-#define BLIST_SKIP_VPD_PAGES ((__force blist_flags_t)(1 << 26))
+#define BLIST_SKIP_VPD_PAGES ((__force blist_flags_t)(1ULL << 26))
+#define __BLIST_UNUSED_27 ((__force blist_flags_t)(1ULL << 27))
/* Attempt to read VPD pages */
-#define BLIST_TRY_VPD_PAGES ((__force blist_flags_t)(1 << 28))
+#define BLIST_TRY_VPD_PAGES ((__force blist_flags_t)(1ULL << 28))
/* don't try to issue RSOC */
-#define BLIST_NO_RSOC ((__force blist_flags_t)(1 << 29))
+#define BLIST_NO_RSOC ((__force blist_flags_t)(1ULL << 29))
/* maximum 1024 sector cdb length */
-#define BLIST_MAX_1024 ((__force blist_flags_t)(1 << 30))
+#define BLIST_MAX_1024 ((__force blist_flags_t)(1ULL << 30))
/* Use UNMAP limit for WRITE SAME */
-#define BLIST_UNMAP_LIMIT_WS ((__force blist_flags_t)(1 << 31))
+#define BLIST_UNMAP_LIMIT_WS ((__force blist_flags_t)(1ULL << 31))
+/* Always retry ABORTED_COMMAND with Internal Target Failure */
+#define BLIST_RETRY_ITF ((__force blist_flags_t)(1ULL << 32))
+/* Always retry ABORTED_COMMAND with ASC 0xc1 */
+#define BLIST_RETRY_ASC_C1 ((__force blist_flags_t)(1ULL << 33))
+
+#define __BLIST_LAST_USED BLIST_RETRY_ASC_C1
+
+#define __BLIST_HIGH_UNUSED (~(__BLIST_LAST_USED | \
+ (__force blist_flags_t) \
+ ((__force __u64)__BLIST_LAST_USED - 1ULL)))
+#define __BLIST_UNUSED_MASK (__BLIST_UNUSED_11 | \
+ __BLIST_UNUSED_13 | \
+ __BLIST_UNUSED_14 | \
+ __BLIST_UNUSED_15 | \
+ __BLIST_UNUSED_16 | \
+ __BLIST_UNUSED_24 | \
+ __BLIST_UNUSED_27 | \
+ __BLIST_HIGH_UNUSED)
#endif
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index 12f454cb6f61..53b485fe9b67 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -307,7 +307,7 @@ struct scsi_host_template {
* EH_HANDLED: I fixed the error, please complete the command
* EH_RESET_TIMER: I need more time, reset the timer and
* begin counting again
- * EH_NOT_HANDLED Begin normal error recovery
+ * EH_DONE: Begin normal error recovery
*
* Status: OPTIONAL
*/
diff --git a/include/soc/bcm2835/raspberrypi-firmware.h b/include/soc/bcm2835/raspberrypi-firmware.h
index 50df5b28d2c9..8ee8991aa099 100644
--- a/include/soc/bcm2835/raspberrypi-firmware.h
+++ b/include/soc/bcm2835/raspberrypi-firmware.h
@@ -143,13 +143,13 @@ struct rpi_firmware *rpi_firmware_get(struct device_node *firmware_node);
static inline int rpi_firmware_property(struct rpi_firmware *fw, u32 tag,
void *data, size_t len)
{
- return 0;
+ return -ENOSYS;
}
static inline int rpi_firmware_property_list(struct rpi_firmware *fw,
void *data, size_t tag_size)
{
- return 0;
+ return -ENOSYS;
}
static inline struct rpi_firmware *rpi_firmware_get(struct device_node *firmware_node)
diff --git a/include/soc/qcom/cmd-db.h b/include/soc/qcom/cmd-db.h
new file mode 100644
index 000000000000..578180cbc134
--- /dev/null
+++ b/include/soc/qcom/cmd-db.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. */
+
+#ifndef __QCOM_COMMAND_DB_H__
+#define __QCOM_COMMAND_DB_H__
+
+
+enum cmd_db_hw_type {
+ CMD_DB_HW_INVALID = 0,
+ CMD_DB_HW_MIN = 3,
+ CMD_DB_HW_ARC = CMD_DB_HW_MIN,
+ CMD_DB_HW_VRM = 4,
+ CMD_DB_HW_BCM = 5,
+ CMD_DB_HW_MAX = CMD_DB_HW_BCM,
+ CMD_DB_HW_ALL = 0xff,
+};
+
+#if IS_ENABLED(CONFIG_QCOM_COMMAND_DB)
+u32 cmd_db_read_addr(const char *resource_id);
+
+int cmd_db_read_aux_data(const char *resource_id, u8 *data, size_t len);
+
+size_t cmd_db_read_aux_data_len(const char *resource_id);
+
+enum cmd_db_hw_type cmd_db_read_slave_id(const char *resource_id);
+
+int cmd_db_ready(void);
+#else
+static inline u32 cmd_db_read_addr(const char *resource_id)
+{ return 0; }
+
+static inline int cmd_db_read_aux_data(const char *resource_id, u8 *data,
+ size_t len)
+{ return -ENODEV; }
+
+static inline size_t cmd_db_read_aux_data_len(const char *resource_id)
+{ return -ENODEV; }
+
+static inline enum cmd_db_hw_type cmd_db_read_slave_id(const char *resource_id)
+{ return -ENODEV; }
+
+static inline int cmd_db_ready(void)
+{ return -ENODEV; }
+#endif /* CONFIG_QCOM_COMMAND_DB */
+#endif /* __QCOM_COMMAND_DB_H__ */
diff --git a/include/soc/tegra/cpuidle.h b/include/soc/tegra/cpuidle.h
index 1fae9c7800d1..b6cf32211520 100644
--- a/include/soc/tegra/cpuidle.h
+++ b/include/soc/tegra/cpuidle.h
@@ -14,7 +14,7 @@
#ifndef __SOC_TEGRA_CPUIDLE_H__
#define __SOC_TEGRA_CPUIDLE_H__
-#if defined(CONFIG_ARM) && defined(CONFIG_CPU_IDLE)
+#if defined(CONFIG_ARM) && defined(CONFIG_ARCH_TEGRA) && defined(CONFIG_CPU_IDLE)
void tegra_cpuidle_pcie_irqs_in_use(void);
#else
static inline void tegra_cpuidle_pcie_irqs_in_use(void)
diff --git a/include/soc/tegra/mc.h b/include/soc/tegra/mc.h
index 233bae954970..b43f37fea096 100644
--- a/include/soc/tegra/mc.h
+++ b/include/soc/tegra/mc.h
@@ -9,6 +9,7 @@
#ifndef __SOC_TEGRA_MC_H__
#define __SOC_TEGRA_MC_H__
+#include <linux/reset-controller.h>
#include <linux/types.h>
struct clk;
@@ -95,6 +96,30 @@ static inline void tegra_smmu_remove(struct tegra_smmu *smmu)
}
#endif
+struct tegra_mc_reset {
+ const char *name;
+ unsigned long id;
+ unsigned int control;
+ unsigned int status;
+ unsigned int reset;
+ unsigned int bit;
+};
+
+struct tegra_mc_reset_ops {
+ int (*hotreset_assert)(struct tegra_mc *mc,
+ const struct tegra_mc_reset *rst);
+ int (*hotreset_deassert)(struct tegra_mc *mc,
+ const struct tegra_mc_reset *rst);
+ int (*block_dma)(struct tegra_mc *mc,
+ const struct tegra_mc_reset *rst);
+ bool (*dma_idling)(struct tegra_mc *mc,
+ const struct tegra_mc_reset *rst);
+ int (*unblock_dma)(struct tegra_mc *mc,
+ const struct tegra_mc_reset *rst);
+ int (*reset_status)(struct tegra_mc *mc,
+ const struct tegra_mc_reset *rst);
+};
+
struct tegra_mc_soc {
const struct tegra_mc_client *clients;
unsigned int num_clients;
@@ -108,12 +133,18 @@ struct tegra_mc_soc {
u8 client_id_mask;
const struct tegra_smmu_soc *smmu;
+
+ u32 intmask;
+
+ const struct tegra_mc_reset_ops *reset_ops;
+ const struct tegra_mc_reset *resets;
+ unsigned int num_resets;
};
struct tegra_mc {
struct device *dev;
struct tegra_smmu *smmu;
- void __iomem *regs;
+ void __iomem *regs, *regs2;
struct clk *clk;
int irq;
@@ -122,6 +153,10 @@ struct tegra_mc {
struct tegra_mc_timing *timings;
unsigned int num_timings;
+
+ struct reset_controller_dev reset;
+
+ spinlock_t lock;
};
void tegra_mc_write_emem_configuration(struct tegra_mc *mc, unsigned long rate);
diff --git a/include/sound/control.h b/include/sound/control.h
index ca13a44ae9d4..6011a58d3e20 100644
--- a/include/sound/control.h
+++ b/include/sound/control.h
@@ -23,6 +23,7 @@
*/
#include <linux/wait.h>
+#include <linux/nospec.h>
#include <sound/asound.h>
#define snd_kcontrol_chip(kcontrol) ((kcontrol)->private_data)
@@ -148,12 +149,14 @@ int snd_ctl_get_preferred_subdevice(struct snd_card *card, int type);
static inline unsigned int snd_ctl_get_ioffnum(struct snd_kcontrol *kctl, struct snd_ctl_elem_id *id)
{
- return id->numid - kctl->id.numid;
+ unsigned int ioff = id->numid - kctl->id.numid;
+ return array_index_nospec(ioff, kctl->count);
}
static inline unsigned int snd_ctl_get_ioffidx(struct snd_kcontrol *kctl, struct snd_ctl_elem_id *id)
{
- return id->index - kctl->id.index;
+ unsigned int ioff = id->index - kctl->id.index;
+ return array_index_nospec(ioff, kctl->count);
}
static inline unsigned int snd_ctl_get_ioff(struct snd_kcontrol *kctl, struct snd_ctl_elem_id *id)
diff --git a/include/sound/core.h b/include/sound/core.h
index 5f181b875c2f..36a5934cf4b1 100644
--- a/include/sound/core.h
+++ b/include/sound/core.h
@@ -51,7 +51,6 @@ struct completion;
*/
enum snd_device_type {
SNDRV_DEV_LOWLEVEL,
- SNDRV_DEV_CONTROL,
SNDRV_DEV_INFO,
SNDRV_DEV_BUS,
SNDRV_DEV_CODEC,
@@ -62,6 +61,7 @@ enum snd_device_type {
SNDRV_DEV_SEQUENCER,
SNDRV_DEV_HWDEP,
SNDRV_DEV_JACK,
+ SNDRV_DEV_CONTROL, /* NOTE: this must be the last one */
};
enum snd_device_state {
diff --git a/include/sound/emu10k1.h b/include/sound/emu10k1.h
index 5ebcc51c0a6a..8c1572de44c5 100644
--- a/include/sound/emu10k1.h
+++ b/include/sound/emu10k1.h
@@ -1610,7 +1610,7 @@ struct snd_emu10k1_fx8010_pcm {
struct snd_pcm_indirect pcm_rec;
unsigned int tram_pos;
unsigned int tram_shift;
- struct snd_emu10k1_fx8010_irq *irq;
+ struct snd_emu10k1_fx8010_irq irq;
};
struct snd_emu10k1_fx8010 {
@@ -1902,7 +1902,7 @@ int snd_emu10k1_fx8010_register_irq_handler(struct snd_emu10k1 *emu,
snd_fx8010_irq_handler_t *handler,
unsigned char gpr_running,
void *private_data,
- struct snd_emu10k1_fx8010_irq **r_irq);
+ struct snd_emu10k1_fx8010_irq *irq);
int snd_emu10k1_fx8010_unregister_irq_handler(struct snd_emu10k1 *emu,
struct snd_emu10k1_fx8010_irq *irq);
diff --git a/include/sound/hdaudio.h b/include/sound/hdaudio.h
index 06536e01ed94..c052afc27547 100644
--- a/include/sound/hdaudio.h
+++ b/include/sound/hdaudio.h
@@ -571,4 +571,9 @@ static inline unsigned int snd_array_index(struct snd_array *array, void *ptr)
return (unsigned long)(ptr - array->list) / array->elem_size;
}
+/* a helper macro to iterate for each snd_array element */
+#define snd_array_for_each(array, idx, ptr) \
+ for ((idx) = 0, (ptr) = (array)->list; (idx) < (array)->used; \
+ (ptr) = snd_array_elem(array, ++(idx)))
+
#endif /* __SOUND_HDAUDIO_H */
diff --git a/include/sound/memalloc.h b/include/sound/memalloc.h
index 782d1df34208..9c3db3dce32b 100644
--- a/include/sound/memalloc.h
+++ b/include/sound/memalloc.h
@@ -34,11 +34,9 @@ struct snd_dma_device {
struct device *dev; /* generic device */
};
-#ifndef snd_dma_pci_data
#define snd_dma_pci_data(pci) (&(pci)->dev)
#define snd_dma_isa_data() NULL
#define snd_dma_continuous_data(x) ((struct device *)(__force unsigned long)(x))
-#endif
/*
diff --git a/include/sound/omap-pcm.h b/include/sound/omap-pcm.h
deleted file mode 100644
index c1d2f31d71e9..000000000000
--- a/include/sound/omap-pcm.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * omap-pcm.h - OMAP PCM driver
- *
- * Copyright (C) 2014 Texas Instruments, Inc.
- *
- * Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- */
-
-#ifndef __OMAP_PCM_H__
-#define __OMAP_PCM_H__
-
-#if IS_ENABLED(CONFIG_SND_OMAP_SOC)
-int omap_pcm_platform_register(struct device *dev);
-#else
-static inline int omap_pcm_platform_register(struct device *dev)
-{
- return 0;
-}
-#endif /* CONFIG_SND_OMAP_SOC */
-
-#endif /* __OMAP_PCM_H__ */
diff --git a/include/sound/rt5640.h b/include/sound/rt5640.h
deleted file mode 100644
index e3c84b92ff70..000000000000
--- a/include/sound/rt5640.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * linux/sound/rt5640.h -- Platform data for RT5640
- *
- * Copyright 2011 Realtek Microelectronics
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef __LINUX_SND_RT5640_H
-#define __LINUX_SND_RT5640_H
-
-struct rt5640_platform_data {
- /* IN1 & IN2 & IN3 can optionally be differential */
- bool in1_diff;
- bool in2_diff;
- bool in3_diff;
-
- bool dmic_en;
- bool dmic1_data_pin; /* 0 = IN1P; 1 = GPIO3 */
- bool dmic2_data_pin; /* 0 = IN1N; 1 = GPIO4 */
-
- int ldo1_en; /* GPIO for LDO1_EN */
-};
-
-#endif
diff --git a/include/sound/rt5668.h b/include/sound/rt5668.h
new file mode 100644
index 000000000000..f907b78696cf
--- /dev/null
+++ b/include/sound/rt5668.h
@@ -0,0 +1,40 @@
+/*
+ * linux/sound/rt5668.h -- Platform data for RT5668
+ *
+ * Copyright 2018 Realtek Microelectronics
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __LINUX_SND_RT5668_H
+#define __LINUX_SND_RT5668_H
+
+enum rt5668_dmic1_data_pin {
+ RT5668_DMIC1_NULL,
+ RT5668_DMIC1_DATA_GPIO2,
+ RT5668_DMIC1_DATA_GPIO5,
+};
+
+enum rt5668_dmic1_clk_pin {
+ RT5668_DMIC1_CLK_GPIO1,
+ RT5668_DMIC1_CLK_GPIO3,
+};
+
+enum rt5668_jd_src {
+ RT5668_JD_NULL,
+ RT5668_JD1,
+};
+
+struct rt5668_platform_data {
+
+ int ldo1_en; /* GPIO for LDO1_EN */
+
+ enum rt5668_dmic1_data_pin dmic1_data_pin;
+ enum rt5668_dmic1_clk_pin dmic1_clk_pin;
+ enum rt5668_jd_src jd_src;
+};
+
+#endif
+
diff --git a/include/sound/soc-dai.h b/include/sound/soc-dai.h
index 8ad11669e4d8..e6f8c40ed43c 100644
--- a/include/sound/soc-dai.h
+++ b/include/sound/soc-dai.h
@@ -170,6 +170,8 @@ struct snd_soc_dai_ops {
unsigned int rx_num, unsigned int *rx_slot);
int (*set_tristate)(struct snd_soc_dai *dai, int tristate);
+ int (*set_sdw_stream)(struct snd_soc_dai *dai,
+ void *stream, int direction);
/*
* DAI digital mute - optional.
* Called by soc-core to minimise any pops.
@@ -294,8 +296,8 @@ struct snd_soc_dai {
struct snd_soc_dai_driver *driver;
/* DAI runtime info */
- unsigned int capture_active:1; /* stream is in use */
- unsigned int playback_active:1; /* stream is in use */
+ unsigned int capture_active; /* stream usage count */
+ unsigned int playback_active; /* stream usage count */
unsigned int probed:1;
unsigned int active;
@@ -313,7 +315,6 @@ struct snd_soc_dai {
unsigned int sample_bits;
/* parent platform/codec */
- struct snd_soc_codec *codec;
struct snd_soc_component *component;
/* CODEC TDM slot masks and params (for fixup) */
@@ -358,4 +359,25 @@ static inline void *snd_soc_dai_get_drvdata(struct snd_soc_dai *dai)
return dev_get_drvdata(dai->dev);
}
+/**
+ * snd_soc_dai_set_sdw_stream() - Configures a DAI for SDW stream operation
+ * @dai: DAI
+ * @stream: STREAM
+ * @direction: Stream direction(Playback/Capture)
+ * SoundWire subsystem doesn't have a notion of direction and we reuse
+ * the ASoC stream direction to configure sink/source ports.
+ * Playback maps to source ports and Capture for sink ports.
+ *
+ * This should be invoked with NULL to clear the stream set previously.
+ * Returns 0 on success, a negative error code otherwise.
+ */
+static inline int snd_soc_dai_set_sdw_stream(struct snd_soc_dai *dai,
+ void *stream, int direction)
+{
+ if (dai->driver->ops->set_sdw_stream)
+ return dai->driver->ops->set_sdw_stream(dai, stream, direction);
+ else
+ return -ENOTSUPP;
+}
+
#endif
diff --git a/include/sound/soc.h b/include/sound/soc.h
index ad266d7e9553..1378dcd2128a 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -401,11 +401,7 @@ struct snd_soc_ops;
struct snd_soc_pcm_runtime;
struct snd_soc_dai;
struct snd_soc_dai_driver;
-struct snd_soc_platform;
struct snd_soc_dai_link;
-struct snd_soc_platform_driver;
-struct snd_soc_codec;
-struct snd_soc_codec_driver;
struct snd_soc_component;
struct snd_soc_component_driver;
struct soc_enum;
@@ -430,13 +426,6 @@ enum snd_soc_card_subclass {
SND_SOC_CARD_CLASS_RUNTIME = 1,
};
-int snd_soc_codec_set_sysclk(struct snd_soc_codec *codec, int clk_id,
- int source, unsigned int freq, int dir);
-int snd_soc_codec_set_pll(struct snd_soc_codec *codec, int pll_id, int source,
- unsigned int freq_in, unsigned int freq_out);
-int snd_soc_codec_set_jack(struct snd_soc_codec *codec,
- struct snd_soc_jack *jack, void *data);
-
int snd_soc_register_card(struct snd_soc_card *card);
int snd_soc_unregister_card(struct snd_soc_card *card);
int devm_snd_soc_register_card(struct device *dev, struct snd_soc_card *card);
@@ -455,19 +444,6 @@ static inline int snd_soc_resume(struct device *dev)
}
#endif
int snd_soc_poweroff(struct device *dev);
-int snd_soc_register_platform(struct device *dev,
- const struct snd_soc_platform_driver *platform_drv);
-int devm_snd_soc_register_platform(struct device *dev,
- const struct snd_soc_platform_driver *platform_drv);
-void snd_soc_unregister_platform(struct device *dev);
-int snd_soc_add_platform(struct device *dev, struct snd_soc_platform *platform,
- const struct snd_soc_platform_driver *platform_drv);
-void snd_soc_remove_platform(struct snd_soc_platform *platform);
-struct snd_soc_platform *snd_soc_lookup_platform(struct device *dev);
-int snd_soc_register_codec(struct device *dev,
- const struct snd_soc_codec_driver *codec_drv,
- struct snd_soc_dai_driver *dai_drv, int num_dai);
-void snd_soc_unregister_codec(struct device *dev);
int snd_soc_add_component(struct device *dev,
struct snd_soc_component *component,
const struct snd_soc_component_driver *component_driver,
@@ -482,16 +458,15 @@ int devm_snd_soc_register_component(struct device *dev,
void snd_soc_unregister_component(struct device *dev);
struct snd_soc_component *snd_soc_lookup_component(struct device *dev,
const char *driver_name);
-int snd_soc_cache_init(struct snd_soc_codec *codec);
-int snd_soc_cache_exit(struct snd_soc_codec *codec);
-int snd_soc_platform_read(struct snd_soc_platform *platform,
- unsigned int reg);
-int snd_soc_platform_write(struct snd_soc_platform *platform,
- unsigned int reg, unsigned int val);
int soc_new_pcm(struct snd_soc_pcm_runtime *rtd, int num);
#ifdef CONFIG_SND_SOC_COMPRESS
int snd_soc_new_compress(struct snd_soc_pcm_runtime *rtd, int num);
+#else
+static inline int snd_soc_new_compress(struct snd_soc_pcm_runtime *rtd, int num)
+{
+ return 0;
+}
#endif
void snd_soc_disconnect_sync(struct device *dev);
@@ -576,23 +551,7 @@ static inline void snd_soc_jack_free_gpios(struct snd_soc_jack *jack, int count,
}
#endif
-/* codec register bit access */
-int snd_soc_update_bits(struct snd_soc_codec *codec, unsigned int reg,
- unsigned int mask, unsigned int value);
-int snd_soc_update_bits_locked(struct snd_soc_codec *codec,
- unsigned int reg, unsigned int mask,
- unsigned int value);
-int snd_soc_test_bits(struct snd_soc_codec *codec, unsigned int reg,
- unsigned int mask, unsigned int value);
-
#ifdef CONFIG_SND_SOC_AC97_BUS
-#define snd_soc_alloc_ac97_codec(codec) \
- snd_soc_alloc_ac97_component(&codec->component)
-#define snd_soc_new_ac97_codec(codec, id, id_mask) \
- snd_soc_new_ac97_component(&codec->component, id, id_mask)
-#define snd_soc_free_ac97_codec(ac97) \
- snd_soc_free_ac97_component(ac97)
-
struct snd_ac97 *snd_soc_alloc_ac97_component(struct snd_soc_component *component);
struct snd_ac97 *snd_soc_new_ac97_component(struct snd_soc_component *component,
unsigned int id, unsigned int id_mask);
@@ -626,10 +585,6 @@ struct snd_kcontrol *snd_soc_card_get_kcontrol(struct snd_soc_card *soc_card,
const char *name);
int snd_soc_add_component_controls(struct snd_soc_component *component,
const struct snd_kcontrol_new *controls, unsigned int num_controls);
-int snd_soc_add_codec_controls(struct snd_soc_codec *codec,
- const struct snd_kcontrol_new *controls, unsigned int num_controls);
-int snd_soc_add_platform_controls(struct snd_soc_platform *platform,
- const struct snd_kcontrol_new *controls, unsigned int num_controls);
int snd_soc_add_card_controls(struct snd_soc_card *soc_card,
const struct snd_kcontrol_new *controls, int num_controls);
int snd_soc_add_dai_controls(struct snd_soc_dai *dai,
@@ -862,8 +817,6 @@ struct snd_soc_component {
unsigned int active;
- unsigned int ignore_pmdown_time:1; /* pmdown_time is ignored at stop */
- unsigned int registered_as_component:1;
unsigned int suspended:1; /* is in suspend PM state */
struct list_head list;
@@ -875,9 +828,6 @@ struct snd_soc_component {
struct list_head dai_list;
int num_dai;
- int (*read)(struct snd_soc_component *, unsigned int, unsigned int *);
- int (*write)(struct snd_soc_component *, unsigned int, unsigned int);
-
struct regmap *regmap;
int val_bytes;
@@ -886,10 +836,6 @@ struct snd_soc_component {
/* attached dynamic objects */
struct list_head dobj_list;
-#ifdef CONFIG_DEBUG_FS
- struct dentry *debugfs_root;
-#endif
-
/*
* DO NOT use any of the fields below in drivers, they are temporary and
* are going to be removed again soon. If you use them in driver code the
@@ -899,29 +845,11 @@ struct snd_soc_component {
/* Don't use these, use snd_soc_component_get_dapm() */
struct snd_soc_dapm_context dapm;
- struct snd_soc_codec *codec;
-
- int (*probe)(struct snd_soc_component *);
- void (*remove)(struct snd_soc_component *);
- int (*suspend)(struct snd_soc_component *);
- int (*resume)(struct snd_soc_component *);
- int (*pcm_new)(struct snd_soc_component *, struct snd_soc_pcm_runtime *);
- void (*pcm_free)(struct snd_soc_component *, struct snd_pcm *);
-
- int (*set_sysclk)(struct snd_soc_component *component,
- int clk_id, int source, unsigned int freq, int dir);
- int (*set_pll)(struct snd_soc_component *component, int pll_id,
- int source, unsigned int freq_in, unsigned int freq_out);
- int (*set_jack)(struct snd_soc_component *component,
- struct snd_soc_jack *jack, void *data);
- int (*set_bias_level)(struct snd_soc_component *component,
- enum snd_soc_bias_level level);
-
/* machine specific init */
int (*init)(struct snd_soc_component *component);
#ifdef CONFIG_DEBUG_FS
- void (*init_debugfs)(struct snd_soc_component *component);
+ struct dentry *debugfs_root;
const char *debugfs_prefix;
#endif
};
@@ -938,97 +866,12 @@ snd_soc_rtdcom_lookup(struct snd_soc_pcm_runtime *rtd,
#define for_each_rtdcom_safe(rtd, rtdcom1, rtdcom2) \
list_for_each_entry_safe(rtdcom1, rtdcom2, &(rtd)->component_list, list)
-/* SoC Audio Codec device */
-struct snd_soc_codec {
- struct device *dev;
- const struct snd_soc_codec_driver *driver;
-
- struct list_head list;
-
- /* runtime */
- unsigned int cache_init:1; /* codec cache has been initialized */
-
- /* codec IO */
- void *control_data; /* codec control (i2c/3wire) data */
- hw_write_t hw_write;
- void *reg_cache;
-
- /* component */
- struct snd_soc_component component;
-};
-
-/* codec driver */
-struct snd_soc_codec_driver {
-
- /* driver ops */
- int (*probe)(struct snd_soc_codec *);
- int (*remove)(struct snd_soc_codec *);
- int (*suspend)(struct snd_soc_codec *);
- int (*resume)(struct snd_soc_codec *);
- struct snd_soc_component_driver component_driver;
-
- /* codec wide operations */
- int (*set_sysclk)(struct snd_soc_codec *codec,
- int clk_id, int source, unsigned int freq, int dir);
- int (*set_pll)(struct snd_soc_codec *codec, int pll_id, int source,
- unsigned int freq_in, unsigned int freq_out);
- int (*set_jack)(struct snd_soc_codec *codec,
- struct snd_soc_jack *jack, void *data);
-
- /* codec IO */
- struct regmap *(*get_regmap)(struct device *);
- unsigned int (*read)(struct snd_soc_codec *, unsigned int);
- int (*write)(struct snd_soc_codec *, unsigned int, unsigned int);
- unsigned int reg_cache_size;
- short reg_cache_step;
- short reg_word_size;
- const void *reg_cache_default;
-
- /* codec bias level */
- int (*set_bias_level)(struct snd_soc_codec *,
- enum snd_soc_bias_level level);
- bool idle_bias_off;
- bool suspend_bias_off;
-
- void (*seq_notifier)(struct snd_soc_dapm_context *,
- enum snd_soc_dapm_type, int);
-
- bool ignore_pmdown_time; /* Doesn't benefit from pmdown delay */
-};
-
-/* SoC platform interface */
-struct snd_soc_platform_driver {
-
- int (*probe)(struct snd_soc_platform *);
- int (*remove)(struct snd_soc_platform *);
- struct snd_soc_component_driver component_driver;
-
- /* pcm creation and destruction */
- int (*pcm_new)(struct snd_soc_pcm_runtime *);
- void (*pcm_free)(struct snd_pcm *);
-
- /* platform stream pcm ops */
- const struct snd_pcm_ops *ops;
-
- /* platform stream compress ops */
- const struct snd_compr_ops *compr_ops;
-};
-
struct snd_soc_dai_link_component {
const char *name;
struct device_node *of_node;
const char *dai_name;
};
-struct snd_soc_platform {
- struct device *dev;
- const struct snd_soc_platform_driver *driver;
-
- struct list_head list;
-
- struct snd_soc_component component;
-};
-
struct snd_soc_dai_link {
/* config - must be set by machine driver */
const char *name; /* Codec name */
@@ -1276,8 +1119,6 @@ struct snd_soc_pcm_runtime {
/* runtime devices */
struct snd_pcm *pcm;
struct snd_compr *compr;
- struct snd_soc_codec *codec;
- struct snd_soc_platform *platform; /* will be removed */
struct snd_soc_dai *codec_dai;
struct snd_soc_dai *cpu_dai;
@@ -1346,32 +1187,6 @@ struct soc_enum {
};
/**
- * snd_soc_component_to_codec() - Casts a component to the CODEC it is embedded in
- * @component: The component to cast to a CODEC
- *
- * This function must only be used on components that are known to be CODECs.
- * Otherwise the behavior is undefined.
- */
-static inline struct snd_soc_codec *snd_soc_component_to_codec(
- struct snd_soc_component *component)
-{
- return container_of(component, struct snd_soc_codec, component);
-}
-
-/**
- * snd_soc_component_to_platform() - Casts a component to the platform it is embedded in
- * @component: The component to cast to a platform
- *
- * This function must only be used on components that are known to be platforms.
- * Otherwise the behavior is undefined.
- */
-static inline struct snd_soc_platform *snd_soc_component_to_platform(
- struct snd_soc_component *component)
-{
- return container_of(component, struct snd_soc_platform, component);
-}
-
-/**
* snd_soc_dapm_to_component() - Casts a DAPM context to the component it is
* embedded in
* @dapm: The DAPM context to cast to the component
@@ -1387,33 +1202,6 @@ static inline struct snd_soc_component *snd_soc_dapm_to_component(
}
/**
- * snd_soc_dapm_to_codec() - Casts a DAPM context to the CODEC it is embedded in
- * @dapm: The DAPM context to cast to the CODEC
- *
- * This function must only be used on DAPM contexts that are known to be part of
- * a CODEC (e.g. in a CODEC driver). Otherwise the behavior is undefined.
- */
-static inline struct snd_soc_codec *snd_soc_dapm_to_codec(
- struct snd_soc_dapm_context *dapm)
-{
- return snd_soc_component_to_codec(snd_soc_dapm_to_component(dapm));
-}
-
-/**
- * snd_soc_dapm_to_platform() - Casts a DAPM context to the platform it is
- * embedded in
- * @dapm: The DAPM context to cast to the platform.
- *
- * This function must only be used on DAPM contexts that are known to be part of
- * a platform (e.g. in a platform driver). Otherwise the behavior is undefined.
- */
-static inline struct snd_soc_platform *snd_soc_dapm_to_platform(
- struct snd_soc_dapm_context *dapm)
-{
- return snd_soc_component_to_platform(snd_soc_dapm_to_component(dapm));
-}
-
-/**
* snd_soc_component_get_dapm() - Returns the DAPM context associated with a
* component
* @component: The component for which to get the DAPM context
@@ -1425,31 +1213,6 @@ static inline struct snd_soc_dapm_context *snd_soc_component_get_dapm(
}
/**
- * snd_soc_codec_get_dapm() - Returns the DAPM context for the CODEC
- * @codec: The CODEC for which to get the DAPM context
- *
- * Note: Use this function instead of directly accessing the CODEC's dapm field
- */
-static inline struct snd_soc_dapm_context *snd_soc_codec_get_dapm(
- struct snd_soc_codec *codec)
-{
- return snd_soc_component_get_dapm(&codec->component);
-}
-
-/**
- * snd_soc_dapm_init_bias_level() - Initialize CODEC DAPM bias level
- * @codec: The CODEC for which to initialize the DAPM bias level
- * @level: The DAPM level to initialize to
- *
- * Initializes the CODEC DAPM bias level. See snd_soc_dapm_init_bias_level().
- */
-static inline void snd_soc_codec_init_bias_level(struct snd_soc_codec *codec,
- enum snd_soc_bias_level level)
-{
- snd_soc_dapm_init_bias_level(snd_soc_codec_get_dapm(codec), level);
-}
-
-/**
* snd_soc_component_init_bias_level() - Initialize COMPONENT DAPM bias level
* @component: The COMPONENT for which to initialize the DAPM bias level
* @level: The DAPM level to initialize to
@@ -1465,18 +1228,6 @@ snd_soc_component_init_bias_level(struct snd_soc_component *component,
}
/**
- * snd_soc_dapm_get_bias_level() - Get current CODEC DAPM bias level
- * @codec: The CODEC for which to get the DAPM bias level
- *
- * Returns: The current DAPM bias level of the CODEC.
- */
-static inline enum snd_soc_bias_level snd_soc_codec_get_bias_level(
- struct snd_soc_codec *codec)
-{
- return snd_soc_dapm_get_bias_level(snd_soc_codec_get_dapm(codec));
-}
-
-/**
* snd_soc_component_get_bias_level() - Get current COMPONENT DAPM bias level
* @component: The COMPONENT for which to get the DAPM bias level
*
@@ -1490,21 +1241,6 @@ snd_soc_component_get_bias_level(struct snd_soc_component *component)
}
/**
- * snd_soc_codec_force_bias_level() - Set the CODEC DAPM bias level
- * @codec: The CODEC for which to set the level
- * @level: The level to set to
- *
- * Forces the CODEC bias level to a specific state. See
- * snd_soc_dapm_force_bias_level().
- */
-static inline int snd_soc_codec_force_bias_level(struct snd_soc_codec *codec,
- enum snd_soc_bias_level level)
-{
- return snd_soc_dapm_force_bias_level(snd_soc_codec_get_dapm(codec),
- level);
-}
-
-/**
* snd_soc_component_force_bias_level() - Set the COMPONENT DAPM bias level
* @component: The COMPONENT for which to set the level
* @level: The level to set to
@@ -1522,19 +1258,6 @@ snd_soc_component_force_bias_level(struct snd_soc_component *component,
}
/**
- * snd_soc_dapm_kcontrol_codec() - Returns the codec associated to a kcontrol
- * @kcontrol: The kcontrol
- *
- * This function must only be used on DAPM contexts that are known to be part of
- * a CODEC (e.g. in a CODEC driver). Otherwise the behavior is undefined.
- */
-static inline struct snd_soc_codec *snd_soc_dapm_kcontrol_codec(
- struct snd_kcontrol *kcontrol)
-{
- return snd_soc_dapm_to_codec(snd_soc_dapm_kcontrol_dapm(kcontrol));
-}
-
-/**
* snd_soc_dapm_kcontrol_component() - Returns the component associated to a kcontrol
* @kcontrol: The kcontrol
*
@@ -1547,22 +1270,6 @@ static inline struct snd_soc_component *snd_soc_dapm_kcontrol_component(
return snd_soc_dapm_to_component(snd_soc_dapm_kcontrol_dapm(kcontrol));
}
-/* codec IO */
-unsigned int snd_soc_read(struct snd_soc_codec *codec, unsigned int reg);
-int snd_soc_write(struct snd_soc_codec *codec, unsigned int reg,
- unsigned int val);
-
-/**
- * snd_soc_cache_sync() - Sync the register cache with the hardware
- * @codec: CODEC to sync
- *
- * Note: This function will call regcache_sync()
- */
-static inline int snd_soc_cache_sync(struct snd_soc_codec *codec)
-{
- return regcache_sync(codec->component.regmap);
-}
-
/**
* snd_soc_component_cache_sync() - Sync the register cache with the hardware
* @component: COMPONENT to sync
@@ -1605,37 +1312,6 @@ void snd_soc_component_init_regmap(struct snd_soc_component *component,
struct regmap *regmap);
void snd_soc_component_exit_regmap(struct snd_soc_component *component);
-/**
- * snd_soc_codec_init_regmap() - Initialize regmap instance for the CODEC
- * @codec: The CODEC for which to initialize the regmap instance
- * @regmap: The regmap instance that should be used by the CODEC
- *
- * This function allows deferred assignment of the regmap instance that is
- * associated with the CODEC. Only use this if the regmap instance is not yet
- * ready when the CODEC is registered. The function must also be called before
- * the first IO attempt of the CODEC.
- */
-static inline void snd_soc_codec_init_regmap(struct snd_soc_codec *codec,
- struct regmap *regmap)
-{
- snd_soc_component_init_regmap(&codec->component, regmap);
-}
-
-/**
- * snd_soc_codec_exit_regmap() - De-initialize regmap instance for the CODEC
- * @codec: The CODEC for which to de-initialize the regmap instance
- *
- * Calls regmap_exit() on the regmap instance associated to the CODEC and
- * removes the regmap instance from the CODEC.
- *
- * This function should only be used if snd_soc_codec_init_regmap() was used to
- * initialize the regmap instance.
- */
-static inline void snd_soc_codec_exit_regmap(struct snd_soc_codec *codec)
-{
- snd_soc_component_exit_regmap(&codec->component);
-}
-
#endif
/* device driver data */
@@ -1662,28 +1338,6 @@ static inline void *snd_soc_component_get_drvdata(struct snd_soc_component *c)
return dev_get_drvdata(c->dev);
}
-static inline void snd_soc_codec_set_drvdata(struct snd_soc_codec *codec,
- void *data)
-{
- snd_soc_component_set_drvdata(&codec->component, data);
-}
-
-static inline void *snd_soc_codec_get_drvdata(struct snd_soc_codec *codec)
-{
- return snd_soc_component_get_drvdata(&codec->component);
-}
-
-static inline void snd_soc_platform_set_drvdata(struct snd_soc_platform *platform,
- void *data)
-{
- snd_soc_component_set_drvdata(&platform->component, data);
-}
-
-static inline void *snd_soc_platform_get_drvdata(struct snd_soc_platform *platform)
-{
- return snd_soc_component_get_drvdata(&platform->component);
-}
-
static inline void snd_soc_initialize_card_lists(struct snd_soc_card *card)
{
INIT_LIST_HEAD(&card->widgets);
@@ -1735,20 +1389,15 @@ static inline bool snd_soc_component_is_active(
return component->active != 0;
}
-static inline bool snd_soc_codec_is_active(struct snd_soc_codec *codec)
-{
- return snd_soc_component_is_active(&codec->component);
-}
-
/**
* snd_soc_kcontrol_component() - Returns the component that registered the
* control
* @kcontrol: The control for which to get the component
*
* Note: This function will work correctly if the control has been registered
- * for a component. Either with snd_soc_add_codec_controls() or
- * snd_soc_add_platform_controls() or via table based setup for either a
- * CODEC, a platform or component driver. Otherwise the behavior is undefined.
+ * for a component. With snd_soc_add_codec_controls() or via table based
+ * setup for either a CODEC or component driver. Otherwise the behavior is
+ * undefined.
*/
static inline struct snd_soc_component *snd_soc_kcontrol_component(
struct snd_kcontrol *kcontrol)
@@ -1756,34 +1405,6 @@ static inline struct snd_soc_component *snd_soc_kcontrol_component(
return snd_kcontrol_chip(kcontrol);
}
-/**
- * snd_soc_kcontrol_codec() - Returns the CODEC that registered the control
- * @kcontrol: The control for which to get the CODEC
- *
- * Note: This function will only work correctly if the control has been
- * registered with snd_soc_add_codec_controls() or via table based setup of
- * snd_soc_codec_driver. Otherwise the behavior is undefined.
- */
-static inline struct snd_soc_codec *snd_soc_kcontrol_codec(
- struct snd_kcontrol *kcontrol)
-{
- return snd_soc_component_to_codec(snd_soc_kcontrol_component(kcontrol));
-}
-
-/**
- * snd_soc_kcontrol_platform() - Returns the platform that registered the control
- * @kcontrol: The control for which to get the platform
- *
- * Note: This function will only work correctly if the control has been
- * registered with snd_soc_add_platform_controls() or via table based setup of
- * a snd_soc_platform_driver. Otherwise the behavior is undefined.
- */
-static inline struct snd_soc_platform *snd_soc_kcontrol_platform(
- struct snd_kcontrol *kcontrol)
-{
- return snd_soc_component_to_platform(snd_soc_kcontrol_component(kcontrol));
-}
-
int snd_soc_util_init(void);
void snd_soc_util_exit(void);
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 9f9f5902af38..922a39f45abc 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -143,6 +143,7 @@ enum se_cmd_flags_table {
SCF_ACK_KREF = 0x00400000,
SCF_USE_CPUID = 0x00800000,
SCF_TASK_ATTR_SET = 0x01000000,
+ SCF_TREAT_READ_AS_NORMAL = 0x02000000,
};
/*
diff --git a/include/trace/events/afs.h b/include/trace/events/afs.h
index f0820554caa9..d0a341bc4540 100644
--- a/include/trace/events/afs.h
+++ b/include/trace/events/afs.h
@@ -575,6 +575,48 @@ TRACE_EVENT(afs_protocol_error,
__entry->call, __entry->error, __entry->where)
);
+TRACE_EVENT(afs_cm_no_server,
+ TP_PROTO(struct afs_call *call, struct sockaddr_rxrpc *srx),
+
+ TP_ARGS(call, srx),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, call )
+ __field(unsigned int, op_id )
+ __field_struct(struct sockaddr_rxrpc, srx )
+ ),
+
+ TP_fast_assign(
+ __entry->call = call->debug_id;
+ __entry->op_id = call->operation_ID;
+ memcpy(&__entry->srx, srx, sizeof(__entry->srx));
+ ),
+
+ TP_printk("c=%08x op=%u %pISpc",
+ __entry->call, __entry->op_id, &__entry->srx.transport)
+ );
+
+TRACE_EVENT(afs_cm_no_server_u,
+ TP_PROTO(struct afs_call *call, const uuid_t *uuid),
+
+ TP_ARGS(call, uuid),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, call )
+ __field(unsigned int, op_id )
+ __field_struct(uuid_t, uuid )
+ ),
+
+ TP_fast_assign(
+ __entry->call = call->debug_id;
+ __entry->op_id = call->operation_ID;
+ memcpy(&__entry->uuid, uuid, sizeof(__entry->uuid));
+ ),
+
+ TP_printk("c=%08x op=%u %pU",
+ __entry->call, __entry->op_id, &__entry->uuid)
+ );
+
#endif /* _TRACE_AFS_H */
/* This part must be outside protection */
diff --git a/include/trace/events/asoc.h b/include/trace/events/asoc.h
index ccd1a3bdff46..40c300fe704d 100644
--- a/include/trace/events/asoc.h
+++ b/include/trace/events/asoc.h
@@ -12,7 +12,6 @@
#define DAPM_ARROW(dir) (((dir) == SND_SOC_DAPM_DIR_OUT) ? "->" : "<-")
struct snd_soc_jack;
-struct snd_soc_codec;
struct snd_soc_card;
struct snd_soc_dapm_widget;
struct snd_soc_dapm_path;
diff --git a/include/trace/events/bpf.h b/include/trace/events/bpf.h
deleted file mode 100644
index 150185647e6b..000000000000
--- a/include/trace/events/bpf.h
+++ /dev/null
@@ -1,355 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM bpf
-
-#if !defined(_TRACE_BPF_H) || defined(TRACE_HEADER_MULTI_READ)
-#define _TRACE_BPF_H
-
-/* These are only used within the BPF_SYSCALL code */
-#ifdef CONFIG_BPF_SYSCALL
-
-#include <linux/filter.h>
-#include <linux/bpf.h>
-#include <linux/fs.h>
-#include <linux/tracepoint.h>
-
-#define __PROG_TYPE_MAP(FN) \
- FN(SOCKET_FILTER) \
- FN(KPROBE) \
- FN(SCHED_CLS) \
- FN(SCHED_ACT) \
- FN(TRACEPOINT) \
- FN(XDP) \
- FN(PERF_EVENT) \
- FN(CGROUP_SKB) \
- FN(CGROUP_SOCK) \
- FN(LWT_IN) \
- FN(LWT_OUT) \
- FN(LWT_XMIT)
-
-#define __MAP_TYPE_MAP(FN) \
- FN(HASH) \
- FN(ARRAY) \
- FN(PROG_ARRAY) \
- FN(PERF_EVENT_ARRAY) \
- FN(PERCPU_HASH) \
- FN(PERCPU_ARRAY) \
- FN(STACK_TRACE) \
- FN(CGROUP_ARRAY) \
- FN(LRU_HASH) \
- FN(LRU_PERCPU_HASH) \
- FN(LPM_TRIE)
-
-#define __PROG_TYPE_TP_FN(x) \
- TRACE_DEFINE_ENUM(BPF_PROG_TYPE_##x);
-#define __PROG_TYPE_SYM_FN(x) \
- { BPF_PROG_TYPE_##x, #x },
-#define __PROG_TYPE_SYM_TAB \
- __PROG_TYPE_MAP(__PROG_TYPE_SYM_FN) { -1, 0 }
-__PROG_TYPE_MAP(__PROG_TYPE_TP_FN)
-
-#define __MAP_TYPE_TP_FN(x) \
- TRACE_DEFINE_ENUM(BPF_MAP_TYPE_##x);
-#define __MAP_TYPE_SYM_FN(x) \
- { BPF_MAP_TYPE_##x, #x },
-#define __MAP_TYPE_SYM_TAB \
- __MAP_TYPE_MAP(__MAP_TYPE_SYM_FN) { -1, 0 }
-__MAP_TYPE_MAP(__MAP_TYPE_TP_FN)
-
-DECLARE_EVENT_CLASS(bpf_prog_event,
-
- TP_PROTO(const struct bpf_prog *prg),
-
- TP_ARGS(prg),
-
- TP_STRUCT__entry(
- __array(u8, prog_tag, 8)
- __field(u32, type)
- ),
-
- TP_fast_assign(
- BUILD_BUG_ON(sizeof(__entry->prog_tag) != sizeof(prg->tag));
- memcpy(__entry->prog_tag, prg->tag, sizeof(prg->tag));
- __entry->type = prg->type;
- ),
-
- TP_printk("prog=%s type=%s",
- __print_hex_str(__entry->prog_tag, 8),
- __print_symbolic(__entry->type, __PROG_TYPE_SYM_TAB))
-);
-
-DEFINE_EVENT(bpf_prog_event, bpf_prog_get_type,
-
- TP_PROTO(const struct bpf_prog *prg),
-
- TP_ARGS(prg)
-);
-
-DEFINE_EVENT(bpf_prog_event, bpf_prog_put_rcu,
-
- TP_PROTO(const struct bpf_prog *prg),
-
- TP_ARGS(prg)
-);
-
-TRACE_EVENT(bpf_prog_load,
-
- TP_PROTO(const struct bpf_prog *prg, int ufd),
-
- TP_ARGS(prg, ufd),
-
- TP_STRUCT__entry(
- __array(u8, prog_tag, 8)
- __field(u32, type)
- __field(int, ufd)
- ),
-
- TP_fast_assign(
- BUILD_BUG_ON(sizeof(__entry->prog_tag) != sizeof(prg->tag));
- memcpy(__entry->prog_tag, prg->tag, sizeof(prg->tag));
- __entry->type = prg->type;
- __entry->ufd = ufd;
- ),
-
- TP_printk("prog=%s type=%s ufd=%d",
- __print_hex_str(__entry->prog_tag, 8),
- __print_symbolic(__entry->type, __PROG_TYPE_SYM_TAB),
- __entry->ufd)
-);
-
-TRACE_EVENT(bpf_map_create,
-
- TP_PROTO(const struct bpf_map *map, int ufd),
-
- TP_ARGS(map, ufd),
-
- TP_STRUCT__entry(
- __field(u32, type)
- __field(u32, size_key)
- __field(u32, size_value)
- __field(u32, max_entries)
- __field(u32, flags)
- __field(int, ufd)
- ),
-
- TP_fast_assign(
- __entry->type = map->map_type;
- __entry->size_key = map->key_size;
- __entry->size_value = map->value_size;
- __entry->max_entries = map->max_entries;
- __entry->flags = map->map_flags;
- __entry->ufd = ufd;
- ),
-
- TP_printk("map type=%s ufd=%d key=%u val=%u max=%u flags=%x",
- __print_symbolic(__entry->type, __MAP_TYPE_SYM_TAB),
- __entry->ufd, __entry->size_key, __entry->size_value,
- __entry->max_entries, __entry->flags)
-);
-
-DECLARE_EVENT_CLASS(bpf_obj_prog,
-
- TP_PROTO(const struct bpf_prog *prg, int ufd,
- const struct filename *pname),
-
- TP_ARGS(prg, ufd, pname),
-
- TP_STRUCT__entry(
- __array(u8, prog_tag, 8)
- __field(int, ufd)
- __string(path, pname->name)
- ),
-
- TP_fast_assign(
- BUILD_BUG_ON(sizeof(__entry->prog_tag) != sizeof(prg->tag));
- memcpy(__entry->prog_tag, prg->tag, sizeof(prg->tag));
- __assign_str(path, pname->name);
- __entry->ufd = ufd;
- ),
-
- TP_printk("prog=%s path=%s ufd=%d",
- __print_hex_str(__entry->prog_tag, 8),
- __get_str(path), __entry->ufd)
-);
-
-DEFINE_EVENT(bpf_obj_prog, bpf_obj_pin_prog,
-
- TP_PROTO(const struct bpf_prog *prg, int ufd,
- const struct filename *pname),
-
- TP_ARGS(prg, ufd, pname)
-);
-
-DEFINE_EVENT(bpf_obj_prog, bpf_obj_get_prog,
-
- TP_PROTO(const struct bpf_prog *prg, int ufd,
- const struct filename *pname),
-
- TP_ARGS(prg, ufd, pname)
-);
-
-DECLARE_EVENT_CLASS(bpf_obj_map,
-
- TP_PROTO(const struct bpf_map *map, int ufd,
- const struct filename *pname),
-
- TP_ARGS(map, ufd, pname),
-
- TP_STRUCT__entry(
- __field(u32, type)
- __field(int, ufd)
- __string(path, pname->name)
- ),
-
- TP_fast_assign(
- __assign_str(path, pname->name);
- __entry->type = map->map_type;
- __entry->ufd = ufd;
- ),
-
- TP_printk("map type=%s ufd=%d path=%s",
- __print_symbolic(__entry->type, __MAP_TYPE_SYM_TAB),
- __entry->ufd, __get_str(path))
-);
-
-DEFINE_EVENT(bpf_obj_map, bpf_obj_pin_map,
-
- TP_PROTO(const struct bpf_map *map, int ufd,
- const struct filename *pname),
-
- TP_ARGS(map, ufd, pname)
-);
-
-DEFINE_EVENT(bpf_obj_map, bpf_obj_get_map,
-
- TP_PROTO(const struct bpf_map *map, int ufd,
- const struct filename *pname),
-
- TP_ARGS(map, ufd, pname)
-);
-
-DECLARE_EVENT_CLASS(bpf_map_keyval,
-
- TP_PROTO(const struct bpf_map *map, int ufd,
- const void *key, const void *val),
-
- TP_ARGS(map, ufd, key, val),
-
- TP_STRUCT__entry(
- __field(u32, type)
- __field(u32, key_len)
- __dynamic_array(u8, key, map->key_size)
- __field(bool, key_trunc)
- __field(u32, val_len)
- __dynamic_array(u8, val, map->value_size)
- __field(bool, val_trunc)
- __field(int, ufd)
- ),
-
- TP_fast_assign(
- memcpy(__get_dynamic_array(key), key, map->key_size);
- memcpy(__get_dynamic_array(val), val, map->value_size);
- __entry->type = map->map_type;
- __entry->key_len = min(map->key_size, 16U);
- __entry->key_trunc = map->key_size != __entry->key_len;
- __entry->val_len = min(map->value_size, 16U);
- __entry->val_trunc = map->value_size != __entry->val_len;
- __entry->ufd = ufd;
- ),
-
- TP_printk("map type=%s ufd=%d key=[%s%s] val=[%s%s]",
- __print_symbolic(__entry->type, __MAP_TYPE_SYM_TAB),
- __entry->ufd,
- __print_hex(__get_dynamic_array(key), __entry->key_len),
- __entry->key_trunc ? " ..." : "",
- __print_hex(__get_dynamic_array(val), __entry->val_len),
- __entry->val_trunc ? " ..." : "")
-);
-
-DEFINE_EVENT(bpf_map_keyval, bpf_map_lookup_elem,
-
- TP_PROTO(const struct bpf_map *map, int ufd,
- const void *key, const void *val),
-
- TP_ARGS(map, ufd, key, val)
-);
-
-DEFINE_EVENT(bpf_map_keyval, bpf_map_update_elem,
-
- TP_PROTO(const struct bpf_map *map, int ufd,
- const void *key, const void *val),
-
- TP_ARGS(map, ufd, key, val)
-);
-
-TRACE_EVENT(bpf_map_delete_elem,
-
- TP_PROTO(const struct bpf_map *map, int ufd,
- const void *key),
-
- TP_ARGS(map, ufd, key),
-
- TP_STRUCT__entry(
- __field(u32, type)
- __field(u32, key_len)
- __dynamic_array(u8, key, map->key_size)
- __field(bool, key_trunc)
- __field(int, ufd)
- ),
-
- TP_fast_assign(
- memcpy(__get_dynamic_array(key), key, map->key_size);
- __entry->type = map->map_type;
- __entry->key_len = min(map->key_size, 16U);
- __entry->key_trunc = map->key_size != __entry->key_len;
- __entry->ufd = ufd;
- ),
-
- TP_printk("map type=%s ufd=%d key=[%s%s]",
- __print_symbolic(__entry->type, __MAP_TYPE_SYM_TAB),
- __entry->ufd,
- __print_hex(__get_dynamic_array(key), __entry->key_len),
- __entry->key_trunc ? " ..." : "")
-);
-
-TRACE_EVENT(bpf_map_next_key,
-
- TP_PROTO(const struct bpf_map *map, int ufd,
- const void *key, const void *key_next),
-
- TP_ARGS(map, ufd, key, key_next),
-
- TP_STRUCT__entry(
- __field(u32, type)
- __field(u32, key_len)
- __dynamic_array(u8, key, map->key_size)
- __dynamic_array(u8, nxt, map->key_size)
- __field(bool, key_trunc)
- __field(bool, key_null)
- __field(int, ufd)
- ),
-
- TP_fast_assign(
- if (key)
- memcpy(__get_dynamic_array(key), key, map->key_size);
- __entry->key_null = !key;
- memcpy(__get_dynamic_array(nxt), key_next, map->key_size);
- __entry->type = map->map_type;
- __entry->key_len = min(map->key_size, 16U);
- __entry->key_trunc = map->key_size != __entry->key_len;
- __entry->ufd = ufd;
- ),
-
- TP_printk("map type=%s ufd=%d key=[%s%s] next=[%s%s]",
- __print_symbolic(__entry->type, __MAP_TYPE_SYM_TAB),
- __entry->ufd,
- __entry->key_null ? "NULL" : __print_hex(__get_dynamic_array(key),
- __entry->key_len),
- __entry->key_trunc && !__entry->key_null ? " ..." : "",
- __print_hex(__get_dynamic_array(nxt), __entry->key_len),
- __entry->key_trunc ? " ..." : "")
-);
-#endif /* CONFIG_BPF_SYSCALL */
-#endif /* _TRACE_BPF_H */
-
-#include <trace/define_trace.h>
diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h
index 965c650a5273..39b94ec965be 100644
--- a/include/trace/events/btrfs.h
+++ b/include/trace/events/btrfs.h
@@ -121,9 +121,9 @@ TRACE_EVENT(btrfs_transaction_commit,
__entry->root_objectid = root->root_key.objectid;
),
- TP_printk_btrfs("root = %llu(%s), gen = %llu",
+ TP_printk_btrfs("root=%llu(%s) gen=%llu",
show_root_type(__entry->root_objectid),
- (unsigned long long)__entry->generation)
+ __entry->generation)
);
DECLARE_EVENT_CLASS(btrfs__inode,
@@ -133,7 +133,7 @@ DECLARE_EVENT_CLASS(btrfs__inode,
TP_ARGS(inode),
TP_STRUCT__entry_btrfs(
- __field( ino_t, ino )
+ __field( u64, ino )
__field( blkcnt_t, blocks )
__field( u64, disk_i_size )
__field( u64, generation )
@@ -143,7 +143,7 @@ DECLARE_EVENT_CLASS(btrfs__inode,
),
TP_fast_assign_btrfs(btrfs_sb(inode->i_sb),
- __entry->ino = inode->i_ino;
+ __entry->ino = btrfs_ino(BTRFS_I(inode));
__entry->blocks = inode->i_blocks;
__entry->disk_i_size = BTRFS_I(inode)->disk_i_size;
__entry->generation = BTRFS_I(inode)->generation;
@@ -153,15 +153,15 @@ DECLARE_EVENT_CLASS(btrfs__inode,
BTRFS_I(inode)->root->root_key.objectid;
),
- TP_printk_btrfs("root=%llu(%s) gen=%llu ino=%lu blocks=%llu "
+ TP_printk_btrfs("root=%llu(%s) gen=%llu ino=%llu blocks=%llu "
"disk_i_size=%llu last_trans=%llu logged_trans=%llu",
show_root_type(__entry->root_objectid),
- (unsigned long long)__entry->generation,
- (unsigned long)__entry->ino,
+ __entry->generation,
+ __entry->ino,
(unsigned long long)__entry->blocks,
- (unsigned long long)__entry->disk_i_size,
- (unsigned long long)__entry->last_trans,
- (unsigned long long)__entry->logged_trans)
+ __entry->disk_i_size,
+ __entry->last_trans,
+ __entry->logged_trans)
);
DEFINE_EVENT(btrfs__inode, btrfs_inode_new,
@@ -244,23 +244,25 @@ TRACE_EVENT_CONDITION(btrfs_get_extent,
"block_len=%llu flags=%s refs=%u "
"compress_type=%u",
show_root_type(__entry->root_objectid),
- (unsigned long long)__entry->ino,
- (unsigned long long)__entry->start,
- (unsigned long long)__entry->len,
- (unsigned long long)__entry->orig_start,
+ __entry->ino,
+ __entry->start,
+ __entry->len,
+ __entry->orig_start,
show_map_type(__entry->block_start),
- (unsigned long long)__entry->block_len,
+ __entry->block_len,
show_map_flags(__entry->flags),
__entry->refs, __entry->compress_type)
);
TRACE_EVENT(btrfs_handle_em_exist,
- TP_PROTO(const struct extent_map *existing, const struct extent_map *map, u64 start, u64 len),
+ TP_PROTO(struct btrfs_fs_info *fs_info,
+ const struct extent_map *existing, const struct extent_map *map,
+ u64 start, u64 len),
- TP_ARGS(existing, map, start, len),
+ TP_ARGS(fs_info, existing, map, start, len),
- TP_STRUCT__entry(
+ TP_STRUCT__entry_btrfs(
__field( u64, e_start )
__field( u64, e_len )
__field( u64, map_start )
@@ -269,7 +271,7 @@ TRACE_EVENT(btrfs_handle_em_exist,
__field( u64, len )
),
- TP_fast_assign(
+ TP_fast_assign_btrfs(fs_info,
__entry->e_start = existing->start;
__entry->e_len = existing->len;
__entry->map_start = map->start;
@@ -278,15 +280,15 @@ TRACE_EVENT(btrfs_handle_em_exist,
__entry->len = len;
),
- TP_printk("start=%llu len=%llu "
+ TP_printk_btrfs("start=%llu len=%llu "
"existing(start=%llu len=%llu) "
"em(start=%llu len=%llu)",
- (unsigned long long)__entry->start,
- (unsigned long long)__entry->len,
- (unsigned long long)__entry->e_start,
- (unsigned long long)__entry->e_len,
- (unsigned long long)__entry->map_start,
- (unsigned long long)__entry->map_len)
+ __entry->start,
+ __entry->len,
+ __entry->e_start,
+ __entry->e_len,
+ __entry->map_start,
+ __entry->map_len)
);
/* file extent item */
@@ -443,7 +445,7 @@ DECLARE_EVENT_CLASS(btrfs__ordered_extent,
TP_ARGS(inode, ordered),
TP_STRUCT__entry_btrfs(
- __field( ino_t, ino )
+ __field( u64, ino )
__field( u64, file_offset )
__field( u64, start )
__field( u64, len )
@@ -457,7 +459,7 @@ DECLARE_EVENT_CLASS(btrfs__ordered_extent,
),
TP_fast_assign_btrfs(btrfs_sb(inode->i_sb),
- __entry->ino = inode->i_ino;
+ __entry->ino = btrfs_ino(BTRFS_I(inode));
__entry->file_offset = ordered->file_offset;
__entry->start = ordered->start;
__entry->len = ordered->len;
@@ -477,13 +479,13 @@ DECLARE_EVENT_CLASS(btrfs__ordered_extent,
"bytes_left=%llu flags=%s compress_type=%d "
"refs=%d",
show_root_type(__entry->root_objectid),
- (unsigned long long)__entry->ino,
- (unsigned long long)__entry->file_offset,
- (unsigned long long)__entry->start,
- (unsigned long long)__entry->len,
- (unsigned long long)__entry->disk_len,
- (unsigned long long)__entry->truncated_len,
- (unsigned long long)__entry->bytes_left,
+ __entry->ino,
+ __entry->file_offset,
+ __entry->start,
+ __entry->len,
+ __entry->disk_len,
+ __entry->truncated_len,
+ __entry->bytes_left,
show_ordered_flags(__entry->flags),
__entry->compress_type, __entry->refs)
);
@@ -528,7 +530,7 @@ DECLARE_EVENT_CLASS(btrfs__writepage,
TP_ARGS(page, inode, wbc),
TP_STRUCT__entry_btrfs(
- __field( ino_t, ino )
+ __field( u64, ino )
__field( pgoff_t, index )
__field( long, nr_to_write )
__field( long, pages_skipped )
@@ -542,7 +544,7 @@ DECLARE_EVENT_CLASS(btrfs__writepage,
),
TP_fast_assign_btrfs(btrfs_sb(inode->i_sb),
- __entry->ino = inode->i_ino;
+ __entry->ino = btrfs_ino(BTRFS_I(inode));
__entry->index = page->index;
__entry->nr_to_write = wbc->nr_to_write;
__entry->pages_skipped = wbc->pages_skipped;
@@ -556,12 +558,12 @@ DECLARE_EVENT_CLASS(btrfs__writepage,
BTRFS_I(inode)->root->root_key.objectid;
),
- TP_printk_btrfs("root=%llu(%s) ino=%lu page_index=%lu "
+ TP_printk_btrfs("root=%llu(%s) ino=%llu page_index=%lu "
"nr_to_write=%ld pages_skipped=%ld range_start=%llu "
"range_end=%llu for_kupdate=%d "
"for_reclaim=%d range_cyclic=%d writeback_index=%lu",
show_root_type(__entry->root_objectid),
- (unsigned long)__entry->ino, __entry->index,
+ __entry->ino, __entry->index,
__entry->nr_to_write, __entry->pages_skipped,
__entry->range_start, __entry->range_end,
__entry->for_kupdate,
@@ -584,7 +586,7 @@ TRACE_EVENT(btrfs_writepage_end_io_hook,
TP_ARGS(page, start, end, uptodate),
TP_STRUCT__entry_btrfs(
- __field( ino_t, ino )
+ __field( u64, ino )
__field( pgoff_t, index )
__field( u64, start )
__field( u64, end )
@@ -593,7 +595,7 @@ TRACE_EVENT(btrfs_writepage_end_io_hook,
),
TP_fast_assign_btrfs(btrfs_sb(page->mapping->host->i_sb),
- __entry->ino = page->mapping->host->i_ino;
+ __entry->ino = btrfs_ino(BTRFS_I(page->mapping->host));
__entry->index = page->index;
__entry->start = start;
__entry->end = end;
@@ -602,12 +604,12 @@ TRACE_EVENT(btrfs_writepage_end_io_hook,
BTRFS_I(page->mapping->host)->root->root_key.objectid;
),
- TP_printk_btrfs("root=%llu(%s) ino=%lu page_index=%lu start=%llu "
+ TP_printk_btrfs("root=%llu(%s) ino=%llu page_index=%lu start=%llu "
"end=%llu uptodate=%d",
show_root_type(__entry->root_objectid),
- (unsigned long)__entry->ino, (unsigned long)__entry->index,
- (unsigned long long)__entry->start,
- (unsigned long long)__entry->end, __entry->uptodate)
+ __entry->ino, (unsigned long)__entry->index,
+ __entry->start,
+ __entry->end, __entry->uptodate)
);
TRACE_EVENT(btrfs_sync_file,
@@ -617,8 +619,8 @@ TRACE_EVENT(btrfs_sync_file,
TP_ARGS(file, datasync),
TP_STRUCT__entry_btrfs(
- __field( ino_t, ino )
- __field( ino_t, parent )
+ __field( u64, ino )
+ __field( u64, parent )
__field( int, datasync )
__field( u64, root_objectid )
),
@@ -628,16 +630,17 @@ TRACE_EVENT(btrfs_sync_file,
const struct inode *inode = d_inode(dentry);
TP_fast_assign_fsid(btrfs_sb(file->f_path.dentry->d_sb));
- __entry->ino = inode->i_ino;
- __entry->parent = d_inode(dentry->d_parent)->i_ino;
+ __entry->ino = btrfs_ino(BTRFS_I(inode));
+ __entry->parent = btrfs_ino(BTRFS_I(d_inode(dentry->d_parent)));
__entry->datasync = datasync;
__entry->root_objectid =
BTRFS_I(inode)->root->root_key.objectid;
),
- TP_printk_btrfs("root=%llu(%s) ino=%ld parent=%ld datasync=%d",
+ TP_printk_btrfs("root=%llu(%s) ino=%llu parent=%llu datasync=%d",
show_root_type(__entry->root_objectid),
- (unsigned long)__entry->ino, (unsigned long)__entry->parent,
+ __entry->ino,
+ __entry->parent,
__entry->datasync)
);
@@ -655,7 +658,7 @@ TRACE_EVENT(btrfs_sync_fs,
__entry->wait = wait;
),
- TP_printk_btrfs("wait = %d", __entry->wait)
+ TP_printk_btrfs("wait=%d", __entry->wait)
);
TRACE_EVENT(btrfs_add_block_group,
@@ -665,8 +668,7 @@ TRACE_EVENT(btrfs_add_block_group,
TP_ARGS(fs_info, block_group, create),
- TP_STRUCT__entry(
- __array( u8, fsid, BTRFS_FSID_SIZE )
+ TP_STRUCT__entry_btrfs(
__field( u64, offset )
__field( u64, size )
__field( u64, flags )
@@ -675,8 +677,7 @@ TRACE_EVENT(btrfs_add_block_group,
__field( int, create )
),
- TP_fast_assign(
- memcpy(__entry->fsid, fs_info->fsid, BTRFS_FSID_SIZE);
+ TP_fast_assign_btrfs(fs_info,
__entry->offset = block_group->key.objectid;
__entry->size = block_group->key.offset;
__entry->flags = block_group->flags;
@@ -686,16 +687,16 @@ TRACE_EVENT(btrfs_add_block_group,
__entry->create = create;
),
- TP_printk("%pU: block_group offset=%llu size=%llu "
+ TP_printk_btrfs("block_group offset=%llu size=%llu "
"flags=%llu(%s) bytes_used=%llu bytes_super=%llu "
- "create=%d", __entry->fsid,
- (unsigned long long)__entry->offset,
- (unsigned long long)__entry->size,
- (unsigned long long)__entry->flags,
+ "create=%d",
+ __entry->offset,
+ __entry->size,
+ __entry->flags,
__print_flags((unsigned long)__entry->flags, "|",
BTRFS_GROUP_FLAGS),
- (unsigned long long)__entry->bytes_used,
- (unsigned long long)__entry->bytes_super, __entry->create)
+ __entry->bytes_used,
+ __entry->bytes_super, __entry->create)
);
#define show_ref_action(action) \
@@ -740,13 +741,13 @@ DECLARE_EVENT_CLASS(btrfs_delayed_tree_ref,
TP_printk_btrfs("bytenr=%llu num_bytes=%llu action=%s "
"parent=%llu(%s) ref_root=%llu(%s) level=%d "
"type=%s seq=%llu",
- (unsigned long long)__entry->bytenr,
- (unsigned long long)__entry->num_bytes,
+ __entry->bytenr,
+ __entry->num_bytes,
show_ref_action(__entry->action),
show_root_type(__entry->parent),
show_root_type(__entry->ref_root),
__entry->level, show_ref_type(__entry->type),
- (unsigned long long)__entry->seq)
+ __entry->seq)
);
DEFINE_EVENT(btrfs_delayed_tree_ref, add_delayed_tree_ref,
@@ -805,15 +806,15 @@ DECLARE_EVENT_CLASS(btrfs_delayed_data_ref,
TP_printk_btrfs("bytenr=%llu num_bytes=%llu action=%s "
"parent=%llu(%s) ref_root=%llu(%s) owner=%llu "
"offset=%llu type=%s seq=%llu",
- (unsigned long long)__entry->bytenr,
- (unsigned long long)__entry->num_bytes,
+ __entry->bytenr,
+ __entry->num_bytes,
show_ref_action(__entry->action),
show_root_type(__entry->parent),
show_root_type(__entry->ref_root),
- (unsigned long long)__entry->owner,
- (unsigned long long)__entry->offset,
+ __entry->owner,
+ __entry->offset,
show_ref_type(__entry->type),
- (unsigned long long)__entry->seq)
+ __entry->seq)
);
DEFINE_EVENT(btrfs_delayed_data_ref, add_delayed_data_ref,
@@ -859,8 +860,8 @@ DECLARE_EVENT_CLASS(btrfs_delayed_ref_head,
),
TP_printk_btrfs("bytenr=%llu num_bytes=%llu action=%s is_data=%d",
- (unsigned long long)__entry->bytenr,
- (unsigned long long)__entry->num_bytes,
+ __entry->bytenr,
+ __entry->num_bytes,
show_ref_action(__entry->action),
__entry->is_data)
);
@@ -923,8 +924,8 @@ DECLARE_EVENT_CLASS(btrfs__chunk,
TP_printk_btrfs("root=%llu(%s) offset=%llu size=%llu "
"num_stripes=%d sub_stripes=%d type=%s",
show_root_type(__entry->root_objectid),
- (unsigned long long)__entry->offset,
- (unsigned long long)__entry->size,
+ __entry->offset,
+ __entry->size,
__entry->num_stripes, __entry->sub_stripes,
show_chunk_type(__entry->type))
);
@@ -974,9 +975,9 @@ TRACE_EVENT(btrfs_cow_block,
"(orig_level=%d) cow_buf=%llu (cow_level=%d)",
show_root_type(__entry->root_objectid),
__entry->refs,
- (unsigned long long)__entry->buf_start,
+ __entry->buf_start,
__entry->buf_level,
- (unsigned long long)__entry->cow_start,
+ __entry->cow_start,
__entry->cow_level)
);
@@ -1001,7 +1002,7 @@ TRACE_EVENT(btrfs_space_reservation,
__entry->reserve = reserve;
),
- TP_printk_btrfs("%s: %Lu %s %Lu", __get_str(type), __entry->val,
+ TP_printk_btrfs("%s: %llu %s %llu", __get_str(type), __entry->val,
__entry->reserve ? "reserve" : "release",
__entry->bytes)
);
@@ -1019,29 +1020,27 @@ TRACE_EVENT(btrfs_trigger_flush,
TP_ARGS(fs_info, flags, bytes, flush, reason),
- TP_STRUCT__entry(
- __array( u8, fsid, BTRFS_FSID_SIZE )
+ TP_STRUCT__entry_btrfs(
__field( u64, flags )
__field( u64, bytes )
__field( int, flush )
__string( reason, reason )
),
- TP_fast_assign(
- memcpy(__entry->fsid, fs_info->fsid, BTRFS_FSID_SIZE);
+ TP_fast_assign_btrfs(fs_info,
__entry->flags = flags;
__entry->bytes = bytes;
__entry->flush = flush;
__assign_str(reason, reason)
),
- TP_printk("%pU: %s: flush=%d(%s) flags=%llu(%s) bytes=%llu",
- __entry->fsid, __get_str(reason), __entry->flush,
+ TP_printk_btrfs("%s: flush=%d(%s) flags=%llu(%s) bytes=%llu",
+ __get_str(reason), __entry->flush,
show_flush_action(__entry->flush),
- (unsigned long long)__entry->flags,
+ __entry->flags,
__print_flags((unsigned long)__entry->flags, "|",
BTRFS_GROUP_FLAGS),
- (unsigned long long)__entry->bytes)
+ __entry->bytes)
);
#define show_flush_state(state) \
@@ -1060,29 +1059,27 @@ TRACE_EVENT(btrfs_flush_space,
TP_ARGS(fs_info, flags, num_bytes, state, ret),
- TP_STRUCT__entry(
- __array( u8, fsid, BTRFS_FSID_SIZE )
+ TP_STRUCT__entry_btrfs(
__field( u64, flags )
__field( u64, num_bytes )
__field( int, state )
__field( int, ret )
),
- TP_fast_assign(
- memcpy(__entry->fsid, fs_info->fsid, BTRFS_FSID_SIZE);
+ TP_fast_assign_btrfs(fs_info,
__entry->flags = flags;
__entry->num_bytes = num_bytes;
__entry->state = state;
__entry->ret = ret;
),
- TP_printk("%pU: state=%d(%s) flags=%llu(%s) num_bytes=%llu ret=%d",
- __entry->fsid, __entry->state,
+ TP_printk_btrfs("state=%d(%s) flags=%llu(%s) num_bytes=%llu ret=%d",
+ __entry->state,
show_flush_state(__entry->state),
- (unsigned long long)__entry->flags,
+ __entry->flags,
__print_flags((unsigned long)__entry->flags, "|",
BTRFS_GROUP_FLAGS),
- (unsigned long long)__entry->num_bytes, __entry->ret)
+ __entry->num_bytes, __entry->ret)
);
DECLARE_EVENT_CLASS(btrfs__reserved_extent,
@@ -1103,8 +1100,8 @@ DECLARE_EVENT_CLASS(btrfs__reserved_extent,
TP_printk_btrfs("root=%llu(%s) start=%llu len=%llu",
show_root_type(BTRFS_EXTENT_TREE_OBJECTID),
- (unsigned long long)__entry->start,
- (unsigned long long)__entry->len)
+ __entry->start,
+ __entry->len)
);
DEFINE_EVENT(btrfs__reserved_extent, btrfs_reserved_extent_alloc,
@@ -1140,7 +1137,7 @@ TRACE_EVENT(find_free_extent,
__entry->data = data;
),
- TP_printk_btrfs("root=%Lu(%s) len=%Lu empty_size=%Lu flags=%Lu(%s)",
+ TP_printk_btrfs("root=%llu(%s) len=%llu empty_size=%llu flags=%llu(%s)",
show_root_type(BTRFS_EXTENT_TREE_OBJECTID),
__entry->num_bytes, __entry->empty_size, __entry->data,
__print_flags((unsigned long)__entry->data, "|",
@@ -1149,11 +1146,10 @@ TRACE_EVENT(find_free_extent,
DECLARE_EVENT_CLASS(btrfs__reserve_extent,
- TP_PROTO(const struct btrfs_fs_info *fs_info,
- const struct btrfs_block_group_cache *block_group, u64 start,
+ TP_PROTO(const struct btrfs_block_group_cache *block_group, u64 start,
u64 len),
- TP_ARGS(fs_info, block_group, start, len),
+ TP_ARGS(block_group, start, len),
TP_STRUCT__entry_btrfs(
__field( u64, bg_objectid )
@@ -1162,15 +1158,15 @@ DECLARE_EVENT_CLASS(btrfs__reserve_extent,
__field( u64, len )
),
- TP_fast_assign_btrfs(fs_info,
+ TP_fast_assign_btrfs(block_group->fs_info,
__entry->bg_objectid = block_group->key.objectid;
__entry->flags = block_group->flags;
__entry->start = start;
__entry->len = len;
),
- TP_printk_btrfs("root=%Lu(%s) block_group=%Lu flags=%Lu(%s) "
- "start=%Lu len=%Lu",
+ TP_printk_btrfs("root=%llu(%s) block_group=%llu flags=%llu(%s) "
+ "start=%llu len=%llu",
show_root_type(BTRFS_EXTENT_TREE_OBJECTID),
__entry->bg_objectid,
__entry->flags, __print_flags((unsigned long)__entry->flags,
@@ -1180,20 +1176,18 @@ DECLARE_EVENT_CLASS(btrfs__reserve_extent,
DEFINE_EVENT(btrfs__reserve_extent, btrfs_reserve_extent,
- TP_PROTO(const struct btrfs_fs_info *fs_info,
- const struct btrfs_block_group_cache *block_group, u64 start,
+ TP_PROTO(const struct btrfs_block_group_cache *block_group, u64 start,
u64 len),
- TP_ARGS(fs_info, block_group, start, len)
+ TP_ARGS(block_group, start, len)
);
DEFINE_EVENT(btrfs__reserve_extent, btrfs_reserve_extent_cluster,
- TP_PROTO(const struct btrfs_fs_info *fs_info,
- const struct btrfs_block_group_cache *block_group, u64 start,
+ TP_PROTO(const struct btrfs_block_group_cache *block_group, u64 start,
u64 len),
- TP_ARGS(fs_info, block_group, start, len)
+ TP_ARGS(block_group, start, len)
);
TRACE_EVENT(btrfs_find_cluster,
@@ -1221,8 +1215,8 @@ TRACE_EVENT(btrfs_find_cluster,
__entry->min_bytes = min_bytes;
),
- TP_printk_btrfs("block_group=%Lu flags=%Lu(%s) start=%Lu len=%Lu "
- "empty_size=%Lu min_bytes=%Lu", __entry->bg_objectid,
+ TP_printk_btrfs("block_group=%llu flags=%llu(%s) start=%llu len=%llu "
+ "empty_size=%llu min_bytes=%llu", __entry->bg_objectid,
__entry->flags,
__print_flags((unsigned long)__entry->flags, "|",
BTRFS_GROUP_FLAGS), __entry->start,
@@ -1243,7 +1237,7 @@ TRACE_EVENT(btrfs_failed_cluster_setup,
__entry->bg_objectid = block_group->key.objectid;
),
- TP_printk_btrfs("block_group=%Lu", __entry->bg_objectid)
+ TP_printk_btrfs("block_group=%llu", __entry->bg_objectid)
);
TRACE_EVENT(btrfs_setup_cluster,
@@ -1272,8 +1266,8 @@ TRACE_EVENT(btrfs_setup_cluster,
__entry->bitmap = bitmap;
),
- TP_printk_btrfs("block_group=%Lu flags=%Lu(%s) window_start=%Lu "
- "size=%Lu max_size=%Lu bitmap=%d",
+ TP_printk_btrfs("block_group=%llu flags=%llu(%s) window_start=%llu "
+ "size=%llu max_size=%llu bitmap=%d",
__entry->bg_objectid,
__entry->flags,
__print_flags((unsigned long)__entry->flags, "|",
@@ -1476,7 +1470,7 @@ DECLARE_EVENT_CLASS(btrfs__qgroup_rsv_data,
TP_STRUCT__entry_btrfs(
__field( u64, rootid )
- __field( unsigned long, ino )
+ __field( u64, ino )
__field( u64, start )
__field( u64, len )
__field( u64, reserved )
@@ -1485,14 +1479,14 @@ DECLARE_EVENT_CLASS(btrfs__qgroup_rsv_data,
TP_fast_assign_btrfs(btrfs_sb(inode->i_sb),
__entry->rootid = BTRFS_I(inode)->root->objectid;
- __entry->ino = inode->i_ino;
+ __entry->ino = btrfs_ino(BTRFS_I(inode));
__entry->start = start;
__entry->len = len;
__entry->reserved = reserved;
__entry->op = op;
),
- TP_printk_btrfs("root=%llu ino=%lu start=%llu len=%llu reserved=%llu op=%s",
+ TP_printk_btrfs("root=%llu ino=%llu start=%llu len=%llu reserved=%llu op=%s",
__entry->rootid, __entry->ino, __entry->start, __entry->len,
__entry->reserved,
__print_flags((unsigned long)__entry->op, "",
@@ -1584,12 +1578,14 @@ DEFINE_EVENT(btrfs_qgroup_extent, btrfs_qgroup_trace_extent,
TRACE_EVENT(btrfs_qgroup_account_extent,
- TP_PROTO(const struct btrfs_fs_info *fs_info, u64 bytenr,
+ TP_PROTO(const struct btrfs_fs_info *fs_info, u64 transid, u64 bytenr,
u64 num_bytes, u64 nr_old_roots, u64 nr_new_roots),
- TP_ARGS(fs_info, bytenr, num_bytes, nr_old_roots, nr_new_roots),
+ TP_ARGS(fs_info, transid, bytenr, num_bytes, nr_old_roots,
+ nr_new_roots),
TP_STRUCT__entry_btrfs(
+ __field( u64, transid )
__field( u64, bytenr )
__field( u64, num_bytes )
__field( u64, nr_old_roots )
@@ -1597,43 +1593,49 @@ TRACE_EVENT(btrfs_qgroup_account_extent,
),
TP_fast_assign_btrfs(fs_info,
+ __entry->transid = transid;
__entry->bytenr = bytenr;
__entry->num_bytes = num_bytes;
__entry->nr_old_roots = nr_old_roots;
__entry->nr_new_roots = nr_new_roots;
),
- TP_printk_btrfs("bytenr=%llu num_bytes=%llu nr_old_roots=%llu "
- "nr_new_roots=%llu",
- __entry->bytenr,
- __entry->num_bytes,
- __entry->nr_old_roots,
- __entry->nr_new_roots)
+ TP_printk_btrfs(
+"transid=%llu bytenr=%llu num_bytes=%llu nr_old_roots=%llu nr_new_roots=%llu",
+ __entry->transid,
+ __entry->bytenr,
+ __entry->num_bytes,
+ __entry->nr_old_roots,
+ __entry->nr_new_roots)
);
TRACE_EVENT(qgroup_update_counters,
- TP_PROTO(const struct btrfs_fs_info *fs_info, u64 qgid,
+ TP_PROTO(const struct btrfs_fs_info *fs_info,
+ struct btrfs_qgroup *qgroup,
u64 cur_old_count, u64 cur_new_count),
- TP_ARGS(fs_info, qgid, cur_old_count, cur_new_count),
+ TP_ARGS(fs_info, qgroup, cur_old_count, cur_new_count),
TP_STRUCT__entry_btrfs(
__field( u64, qgid )
+ __field( u64, old_rfer )
+ __field( u64, old_excl )
__field( u64, cur_old_count )
__field( u64, cur_new_count )
),
TP_fast_assign_btrfs(fs_info,
- __entry->qgid = qgid;
+ __entry->qgid = qgroup->qgroupid;
+ __entry->old_rfer = qgroup->rfer;
+ __entry->old_excl = qgroup->excl;
__entry->cur_old_count = cur_old_count;
__entry->cur_new_count = cur_new_count;
),
- TP_printk_btrfs("qgid=%llu cur_old_count=%llu cur_new_count=%llu",
- __entry->qgid,
- __entry->cur_old_count,
- __entry->cur_new_count)
+ TP_printk_btrfs("qgid=%llu old_rfer=%llu old_excl=%llu cur_old_count=%llu cur_new_count=%llu",
+ __entry->qgid, __entry->old_rfer, __entry->old_excl,
+ __entry->cur_old_count, __entry->cur_new_count)
);
TRACE_EVENT(qgroup_update_reserve,
@@ -1765,14 +1767,14 @@ DECLARE_EVENT_CLASS(btrfs__prelim_ref,
),
TP_printk_btrfs("root_id=%llu key=[%llu,%u,%llu] level=%d count=[%d+%d=%d] parent=%llu wanted_disk_byte=%llu nodes=%llu",
- (unsigned long long)__entry->root_id,
- (unsigned long long)__entry->objectid, __entry->type,
- (unsigned long long)__entry->offset, __entry->level,
+ __entry->root_id,
+ __entry->objectid, __entry->type,
+ __entry->offset, __entry->level,
__entry->old_count, __entry->mod_count,
__entry->old_count + __entry->mod_count,
- (unsigned long long)__entry->parent,
- (unsigned long long)__entry->bytenr,
- (unsigned long long)__entry->tree_size)
+ __entry->parent,
+ __entry->bytenr,
+ __entry->tree_size)
);
DEFINE_EVENT(btrfs__prelim_ref, btrfs_prelim_ref_merge,
@@ -1808,8 +1810,51 @@ TRACE_EVENT(btrfs_inode_mod_outstanding_extents,
TP_printk_btrfs("root=%llu(%s) ino=%llu mod=%d",
show_root_type(__entry->root_objectid),
- (unsigned long long)__entry->ino, __entry->mod)
+ __entry->ino, __entry->mod)
);
+
+DECLARE_EVENT_CLASS(btrfs__block_group,
+ TP_PROTO(const struct btrfs_block_group_cache *bg_cache),
+
+ TP_ARGS(bg_cache),
+
+ TP_STRUCT__entry_btrfs(
+ __field( u64, bytenr )
+ __field( u64, len )
+ __field( u64, used )
+ __field( u64, flags )
+ ),
+
+ TP_fast_assign_btrfs(bg_cache->fs_info,
+ __entry->bytenr = bg_cache->key.objectid,
+ __entry->len = bg_cache->key.offset,
+ __entry->used = btrfs_block_group_used(&bg_cache->item);
+ __entry->flags = bg_cache->flags;
+ ),
+
+ TP_printk_btrfs("bg bytenr=%llu len=%llu used=%llu flags=%llu(%s)",
+ __entry->bytenr, __entry->len, __entry->used, __entry->flags,
+ __print_flags(__entry->flags, "|", BTRFS_GROUP_FLAGS))
+);
+
+DEFINE_EVENT(btrfs__block_group, btrfs_remove_block_group,
+ TP_PROTO(const struct btrfs_block_group_cache *bg_cache),
+
+ TP_ARGS(bg_cache)
+);
+
+DEFINE_EVENT(btrfs__block_group, btrfs_add_unused_block_group,
+ TP_PROTO(const struct btrfs_block_group_cache *bg_cache),
+
+ TP_ARGS(bg_cache)
+);
+
+DEFINE_EVENT(btrfs__block_group, btrfs_skip_unused_block_group,
+ TP_PROTO(const struct btrfs_block_group_cache *bg_cache),
+
+ TP_ARGS(bg_cache)
+);
+
#endif /* _TRACE_BTRFS_H */
/* This part must be outside protection */
diff --git a/include/trace/events/fib.h b/include/trace/events/fib.h
index 81b7e985bb45..9763cddd0594 100644
--- a/include/trace/events/fib.h
+++ b/include/trace/events/fib.h
@@ -12,12 +12,14 @@
TRACE_EVENT(fib_table_lookup,
- TP_PROTO(u32 tb_id, const struct flowi4 *flp),
+ TP_PROTO(u32 tb_id, const struct flowi4 *flp,
+ const struct fib_nh *nh, int err),
- TP_ARGS(tb_id, flp),
+ TP_ARGS(tb_id, flp, nh, err),
TP_STRUCT__entry(
__field( u32, tb_id )
+ __field( int, err )
__field( int, oif )
__field( int, iif )
__field( __u8, tos )
@@ -25,12 +27,19 @@ TRACE_EVENT(fib_table_lookup,
__field( __u8, flags )
__array( __u8, src, 4 )
__array( __u8, dst, 4 )
+ __array( __u8, gw, 4 )
+ __array( __u8, saddr, 4 )
+ __field( u16, sport )
+ __field( u16, dport )
+ __field( u8, proto )
+ __dynamic_array(char, name, IFNAMSIZ )
),
TP_fast_assign(
__be32 *p32;
__entry->tb_id = tb_id;
+ __entry->err = err;
__entry->oif = flp->flowi4_oif;
__entry->iif = flp->flowi4_iif;
__entry->tos = flp->flowi4_tos;
@@ -42,71 +51,41 @@ TRACE_EVENT(fib_table_lookup,
p32 = (__be32 *) __entry->dst;
*p32 = flp->daddr;
- ),
-
- TP_printk("table %u oif %d iif %d src %pI4 dst %pI4 tos %d scope %d flags %x",
- __entry->tb_id, __entry->oif, __entry->iif,
- __entry->src, __entry->dst, __entry->tos, __entry->scope,
- __entry->flags)
-);
-
-TRACE_EVENT(fib_table_lookup_nh,
-
- TP_PROTO(const struct fib_nh *nh),
-
- TP_ARGS(nh),
-
- TP_STRUCT__entry(
- __string( name, nh->nh_dev->name)
- __field( int, oif )
- __array( __u8, src, 4 )
- ),
-
- TP_fast_assign(
- __be32 *p32 = (__be32 *) __entry->src;
-
- __assign_str(name, nh->nh_dev ? nh->nh_dev->name : "not set");
- __entry->oif = nh->nh_oif;
- *p32 = nh->nh_saddr;
- ),
-
- TP_printk("nexthop dev %s oif %d src %pI4",
- __get_str(name), __entry->oif, __entry->src)
-);
-
-TRACE_EVENT(fib_validate_source,
-
- TP_PROTO(const struct net_device *dev, const struct flowi4 *flp),
-
- TP_ARGS(dev, flp),
- TP_STRUCT__entry(
- __string( name, dev->name )
- __field( int, oif )
- __field( int, iif )
- __field( __u8, tos )
- __array( __u8, src, 4 )
- __array( __u8, dst, 4 )
- ),
-
- TP_fast_assign(
- __be32 *p32;
-
- __assign_str(name, dev ? dev->name : "not set");
- __entry->oif = flp->flowi4_oif;
- __entry->iif = flp->flowi4_iif;
- __entry->tos = flp->flowi4_tos;
-
- p32 = (__be32 *) __entry->src;
- *p32 = flp->saddr;
-
- p32 = (__be32 *) __entry->dst;
- *p32 = flp->daddr;
+ __entry->proto = flp->flowi4_proto;
+ if (__entry->proto == IPPROTO_TCP ||
+ __entry->proto == IPPROTO_UDP) {
+ __entry->sport = ntohs(flp->fl4_sport);
+ __entry->dport = ntohs(flp->fl4_dport);
+ } else {
+ __entry->sport = 0;
+ __entry->dport = 0;
+ }
+
+ if (nh) {
+ p32 = (__be32 *) __entry->saddr;
+ *p32 = nh->nh_saddr;
+
+ p32 = (__be32 *) __entry->gw;
+ *p32 = nh->nh_gw;
+
+ __assign_str(name, nh->nh_dev ? nh->nh_dev->name : "-");
+ } else {
+ p32 = (__be32 *) __entry->saddr;
+ *p32 = 0;
+
+ p32 = (__be32 *) __entry->gw;
+ *p32 = 0;
+
+ __assign_str(name, "-");
+ }
),
- TP_printk("dev %s oif %d iif %d tos %d src %pI4 dst %pI4",
- __get_str(name), __entry->oif, __entry->iif, __entry->tos,
- __entry->src, __entry->dst)
+ TP_printk("table %u oif %d iif %d proto %u %pI4/%u -> %pI4/%u tos %d scope %d flags %x ==> dev %s gw %pI4 src %pI4 err %d",
+ __entry->tb_id, __entry->oif, __entry->iif, __entry->proto,
+ __entry->src, __entry->sport, __entry->dst, __entry->dport,
+ __entry->tos, __entry->scope, __entry->flags,
+ __get_str(name), __entry->gw, __entry->saddr, __entry->err)
);
#endif /* _TRACE_FIB_H */
diff --git a/include/trace/events/fib6.h b/include/trace/events/fib6.h
index 7e8d48a81b91..b088b54d699c 100644
--- a/include/trace/events/fib6.h
+++ b/include/trace/events/fib6.h
@@ -12,14 +12,14 @@
TRACE_EVENT(fib6_table_lookup,
- TP_PROTO(const struct net *net, const struct rt6_info *rt,
+ TP_PROTO(const struct net *net, const struct fib6_info *f6i,
struct fib6_table *table, const struct flowi6 *flp),
- TP_ARGS(net, rt, table, flp),
+ TP_ARGS(net, f6i, table, flp),
TP_STRUCT__entry(
__field( u32, tb_id )
-
+ __field( int, err )
__field( int, oif )
__field( int, iif )
__field( __u8, tos )
@@ -27,7 +27,10 @@ TRACE_EVENT(fib6_table_lookup,
__field( __u8, flags )
__array( __u8, src, 16 )
__array( __u8, dst, 16 )
-
+ __field( u16, sport )
+ __field( u16, dport )
+ __field( u8, proto )
+ __field( u8, rt_type )
__dynamic_array( char, name, IFNAMSIZ )
__array( __u8, gw, 16 )
),
@@ -36,6 +39,7 @@ TRACE_EVENT(fib6_table_lookup,
struct in6_addr *in6;
__entry->tb_id = table->tb6_id;
+ __entry->err = ip6_rt_type_to_error(f6i->fib6_type);
__entry->oif = flp->flowi6_oif;
__entry->iif = flp->flowi6_iif;
__entry->tos = ip6_tclass(flp->flowlabel);
@@ -48,27 +52,38 @@ TRACE_EVENT(fib6_table_lookup,
in6 = (struct in6_addr *)__entry->dst;
*in6 = flp->daddr;
- if (rt->rt6i_idev) {
- __assign_str(name, rt->rt6i_idev->dev->name);
+ __entry->proto = flp->flowi6_proto;
+ if (__entry->proto == IPPROTO_TCP ||
+ __entry->proto == IPPROTO_UDP) {
+ __entry->sport = ntohs(flp->fl6_sport);
+ __entry->dport = ntohs(flp->fl6_dport);
+ } else {
+ __entry->sport = 0;
+ __entry->dport = 0;
+ }
+
+ if (f6i->fib6_nh.nh_dev) {
+ __assign_str(name, f6i->fib6_nh.nh_dev);
} else {
- __assign_str(name, "");
+ __assign_str(name, "-");
}
- if (rt == net->ipv6.ip6_null_entry) {
+ if (f6i == net->ipv6.fib6_null_entry) {
struct in6_addr in6_zero = {};
in6 = (struct in6_addr *)__entry->gw;
*in6 = in6_zero;
- } else if (rt) {
+ } else if (f6i) {
in6 = (struct in6_addr *)__entry->gw;
- *in6 = rt->rt6i_gateway;
+ *in6 = f6i->fib6_nh.nh_gw;
}
),
- TP_printk("table %3u oif %d iif %d src %pI6c dst %pI6c tos %d scope %d flags %x ==> dev %s gw %pI6c",
- __entry->tb_id, __entry->oif, __entry->iif,
- __entry->src, __entry->dst, __entry->tos, __entry->scope,
- __entry->flags, __get_str(name), __entry->gw)
+ TP_printk("table %3u oif %d iif %d proto %u %pI6c/%u -> %pI6c/%u tos %d scope %d flags %x ==> dev %s gw %pI6c err %d",
+ __entry->tb_id, __entry->oif, __entry->iif, __entry->proto,
+ __entry->src, __entry->sport, __entry->dst, __entry->dport,
+ __entry->tos, __entry->scope, __entry->flags,
+ __get_str(name), __entry->gw, __entry->err)
);
#endif /* _TRACE_FIB6_H */
diff --git a/include/trace/events/host1x.h b/include/trace/events/host1x.h
index 63116362543c..a37ef73092e5 100644
--- a/include/trace/events/host1x.h
+++ b/include/trace/events/host1x.h
@@ -115,16 +115,15 @@ TRACE_EVENT(host1x_cdma_push_gather,
);
TRACE_EVENT(host1x_channel_submit,
- TP_PROTO(const char *name, u32 cmdbufs, u32 relocs, u32 waitchks,
- u32 syncpt_id, u32 syncpt_incrs),
+ TP_PROTO(const char *name, u32 cmdbufs, u32 relocs, u32 syncpt_id,
+ u32 syncpt_incrs),
- TP_ARGS(name, cmdbufs, relocs, waitchks, syncpt_id, syncpt_incrs),
+ TP_ARGS(name, cmdbufs, relocs, syncpt_id, syncpt_incrs),
TP_STRUCT__entry(
__field(const char *, name)
__field(u32, cmdbufs)
__field(u32, relocs)
- __field(u32, waitchks)
__field(u32, syncpt_id)
__field(u32, syncpt_incrs)
),
@@ -133,15 +132,14 @@ TRACE_EVENT(host1x_channel_submit,
__entry->name = name;
__entry->cmdbufs = cmdbufs;
__entry->relocs = relocs;
- __entry->waitchks = waitchks;
__entry->syncpt_id = syncpt_id;
__entry->syncpt_incrs = syncpt_incrs;
),
- TP_printk("name=%s, cmdbufs=%u, relocs=%u, waitchks=%d,"
- "syncpt_id=%u, syncpt_incrs=%u",
- __entry->name, __entry->cmdbufs, __entry->relocs, __entry->waitchks,
- __entry->syncpt_id, __entry->syncpt_incrs)
+ TP_printk("name=%s, cmdbufs=%u, relocs=%u, syncpt_id=%u, "
+ "syncpt_incrs=%u",
+ __entry->name, __entry->cmdbufs, __entry->relocs,
+ __entry->syncpt_id, __entry->syncpt_incrs)
);
TRACE_EVENT(host1x_channel_submitted,
diff --git a/include/trace/events/initcall.h b/include/trace/events/initcall.h
index 8d6cf10d27c9..eb903c3f195f 100644
--- a/include/trace/events/initcall.h
+++ b/include/trace/events/initcall.h
@@ -31,7 +31,11 @@ TRACE_EVENT(initcall_start,
TP_ARGS(func),
TP_STRUCT__entry(
- __field(initcall_t, func)
+ /*
+ * Use field_struct to avoid is_signed_type()
+ * comparison of a function pointer
+ */
+ __field_struct(initcall_t, func)
),
TP_fast_assign(
@@ -48,8 +52,12 @@ TRACE_EVENT(initcall_finish,
TP_ARGS(func, ret),
TP_STRUCT__entry(
- __field(initcall_t, func)
- __field(int, ret)
+ /*
+ * Use field_struct to avoid is_signed_type()
+ * comparison of a function pointer
+ */
+ __field_struct(initcall_t, func)
+ __field(int, ret)
),
TP_fast_assign(
diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h
index d8c33298c153..5936aac357ab 100644
--- a/include/trace/events/rcu.h
+++ b/include/trace/events/rcu.h
@@ -84,20 +84,21 @@ TRACE_EVENT(rcu_grace_period,
);
/*
- * Tracepoint for future grace-period events, including those for no-callbacks
- * CPUs. The caller should pull the data from the rcu_node structure,
- * other than rcuname, which comes from the rcu_state structure, and event,
- * which is one of the following:
+ * Tracepoint for future grace-period events. The caller should pull
+ * the data from the rcu_node structure, other than rcuname, which comes
+ * from the rcu_state structure, and event, which is one of the following:
*
- * "Startleaf": Request a nocb grace period based on leaf-node data.
+ * "Startleaf": Request a grace period based on leaf-node data.
+ * "Prestarted": Someone beat us to the request
* "Startedleaf": Leaf-node start proved sufficient.
* "Startedleafroot": Leaf-node start proved sufficient after checking root.
* "Startedroot": Requested a nocb grace period based on root-node data.
+ * "NoGPkthread": The RCU grace-period kthread has not yet started.
* "StartWait": Start waiting for the requested grace period.
* "ResumeWait": Resume waiting after signal.
* "EndWait": Complete wait.
* "Cleanup": Clean up rcu_node structure after previous GP.
- * "CleanupMore": Clean up, and another no-CB GP is needed.
+ * "CleanupMore": Clean up, and another GP is needed.
*/
TRACE_EVENT(rcu_future_grace_period,
diff --git a/include/trace/events/rpcrdma.h b/include/trace/events/rpcrdma.h
index 50ed3f8bf534..53df203b8057 100644
--- a/include/trace/events/rpcrdma.h
+++ b/include/trace/events/rpcrdma.h
@@ -1,6 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * Copyright (c) 2017 Oracle. All rights reserved.
+ * Copyright (c) 2017, 2018 Oracle. All rights reserved.
+ *
+ * Trace point definitions for the "rpcrdma" subsystem.
*/
#undef TRACE_SYSTEM
#define TRACE_SYSTEM rpcrdma
@@ -528,24 +530,54 @@ TRACE_EVENT(xprtrdma_post_send,
TRACE_EVENT(xprtrdma_post_recv,
TP_PROTO(
- const struct rpcrdma_rep *rep,
+ const struct ib_cqe *cqe
+ ),
+
+ TP_ARGS(cqe),
+
+ TP_STRUCT__entry(
+ __field(const void *, cqe)
+ ),
+
+ TP_fast_assign(
+ __entry->cqe = cqe;
+ ),
+
+ TP_printk("cqe=%p",
+ __entry->cqe
+ )
+);
+
+TRACE_EVENT(xprtrdma_post_recvs,
+ TP_PROTO(
+ const struct rpcrdma_xprt *r_xprt,
+ unsigned int count,
int status
),
- TP_ARGS(rep, status),
+ TP_ARGS(r_xprt, count, status),
TP_STRUCT__entry(
- __field(const void *, rep)
+ __field(const void *, r_xprt)
+ __field(unsigned int, count)
__field(int, status)
+ __field(int, posted)
+ __string(addr, rpcrdma_addrstr(r_xprt))
+ __string(port, rpcrdma_portstr(r_xprt))
),
TP_fast_assign(
- __entry->rep = rep;
+ __entry->r_xprt = r_xprt;
+ __entry->count = count;
__entry->status = status;
+ __entry->posted = r_xprt->rx_buf.rb_posted_receives;
+ __assign_str(addr, rpcrdma_addrstr(r_xprt));
+ __assign_str(port, rpcrdma_portstr(r_xprt));
),
- TP_printk("rep=%p status=%d",
- __entry->rep, __entry->status
+ TP_printk("peer=[%s]:%s r_xprt=%p: %u new recvs, %d active (rc %d)",
+ __get_str(addr), __get_str(port), __entry->r_xprt,
+ __entry->count, __entry->posted, __entry->status
)
);
@@ -584,28 +616,32 @@ TRACE_EVENT(xprtrdma_wc_send,
TRACE_EVENT(xprtrdma_wc_receive,
TP_PROTO(
- const struct rpcrdma_rep *rep,
const struct ib_wc *wc
),
- TP_ARGS(rep, wc),
+ TP_ARGS(wc),
TP_STRUCT__entry(
- __field(const void *, rep)
- __field(unsigned int, byte_len)
+ __field(const void *, cqe)
+ __field(u32, byte_len)
__field(unsigned int, status)
- __field(unsigned int, vendor_err)
+ __field(u32, vendor_err)
),
TP_fast_assign(
- __entry->rep = rep;
- __entry->byte_len = wc->byte_len;
+ __entry->cqe = wc->wr_cqe;
__entry->status = wc->status;
- __entry->vendor_err = __entry->status ? wc->vendor_err : 0;
+ if (wc->status) {
+ __entry->byte_len = 0;
+ __entry->vendor_err = wc->vendor_err;
+ } else {
+ __entry->byte_len = wc->byte_len;
+ __entry->vendor_err = 0;
+ }
),
- TP_printk("rep=%p, %u bytes: %s (%u/0x%x)",
- __entry->rep, __entry->byte_len,
+ TP_printk("cqe=%p %u bytes: %s (%u/0x%x)",
+ __entry->cqe, __entry->byte_len,
rdma_show_wc_status(__entry->status),
__entry->status, __entry->vendor_err
)
@@ -616,6 +652,7 @@ DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li);
DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li_wake);
DEFINE_MR_EVENT(xprtrdma_localinv);
+DEFINE_MR_EVENT(xprtrdma_dma_map);
DEFINE_MR_EVENT(xprtrdma_dma_unmap);
DEFINE_MR_EVENT(xprtrdma_remoteinv);
DEFINE_MR_EVENT(xprtrdma_recover_mr);
@@ -799,7 +836,6 @@ TRACE_EVENT(xprtrdma_allocate,
__field(unsigned int, task_id)
__field(unsigned int, client_id)
__field(const void *, req)
- __field(const void *, rep)
__field(size_t, callsize)
__field(size_t, rcvsize)
),
@@ -808,15 +844,13 @@ TRACE_EVENT(xprtrdma_allocate,
__entry->task_id = task->tk_pid;
__entry->client_id = task->tk_client->cl_clid;
__entry->req = req;
- __entry->rep = req ? req->rl_reply : NULL;
__entry->callsize = task->tk_rqstp->rq_callsize;
__entry->rcvsize = task->tk_rqstp->rq_rcvsize;
),
- TP_printk("task:%u@%u req=%p rep=%p (%zu, %zu)",
+ TP_printk("task:%u@%u req=%p (%zu, %zu)",
__entry->task_id, __entry->client_id,
- __entry->req, __entry->rep,
- __entry->callsize, __entry->rcvsize
+ __entry->req, __entry->callsize, __entry->rcvsize
)
);
@@ -848,8 +882,6 @@ TRACE_EVENT(xprtrdma_rpc_done,
)
);
-DEFINE_RXPRT_EVENT(xprtrdma_noreps);
-
/**
** Callback events
**/
@@ -885,6 +917,586 @@ TRACE_EVENT(xprtrdma_cb_setup,
DEFINE_CB_EVENT(xprtrdma_cb_call);
DEFINE_CB_EVENT(xprtrdma_cb_reply);
+/**
+ ** Server-side RPC/RDMA events
+ **/
+
+DECLARE_EVENT_CLASS(svcrdma_xprt_event,
+ TP_PROTO(
+ const struct svc_xprt *xprt
+ ),
+
+ TP_ARGS(xprt),
+
+ TP_STRUCT__entry(
+ __field(const void *, xprt)
+ __string(addr, xprt->xpt_remotebuf)
+ ),
+
+ TP_fast_assign(
+ __entry->xprt = xprt;
+ __assign_str(addr, xprt->xpt_remotebuf);
+ ),
+
+ TP_printk("xprt=%p addr=%s",
+ __entry->xprt, __get_str(addr)
+ )
+);
+
+#define DEFINE_XPRT_EVENT(name) \
+ DEFINE_EVENT(svcrdma_xprt_event, svcrdma_xprt_##name, \
+ TP_PROTO( \
+ const struct svc_xprt *xprt \
+ ), \
+ TP_ARGS(xprt))
+
+DEFINE_XPRT_EVENT(accept);
+DEFINE_XPRT_EVENT(fail);
+DEFINE_XPRT_EVENT(free);
+
+TRACE_DEFINE_ENUM(RDMA_MSG);
+TRACE_DEFINE_ENUM(RDMA_NOMSG);
+TRACE_DEFINE_ENUM(RDMA_MSGP);
+TRACE_DEFINE_ENUM(RDMA_DONE);
+TRACE_DEFINE_ENUM(RDMA_ERROR);
+
+#define show_rpcrdma_proc(x) \
+ __print_symbolic(x, \
+ { RDMA_MSG, "RDMA_MSG" }, \
+ { RDMA_NOMSG, "RDMA_NOMSG" }, \
+ { RDMA_MSGP, "RDMA_MSGP" }, \
+ { RDMA_DONE, "RDMA_DONE" }, \
+ { RDMA_ERROR, "RDMA_ERROR" })
+
+TRACE_EVENT(svcrdma_decode_rqst,
+ TP_PROTO(
+ __be32 *p,
+ unsigned int hdrlen
+ ),
+
+ TP_ARGS(p, hdrlen),
+
+ TP_STRUCT__entry(
+ __field(u32, xid)
+ __field(u32, vers)
+ __field(u32, proc)
+ __field(u32, credits)
+ __field(unsigned int, hdrlen)
+ ),
+
+ TP_fast_assign(
+ __entry->xid = be32_to_cpup(p++);
+ __entry->vers = be32_to_cpup(p++);
+ __entry->credits = be32_to_cpup(p++);
+ __entry->proc = be32_to_cpup(p);
+ __entry->hdrlen = hdrlen;
+ ),
+
+ TP_printk("xid=0x%08x vers=%u credits=%u proc=%s hdrlen=%u",
+ __entry->xid, __entry->vers, __entry->credits,
+ show_rpcrdma_proc(__entry->proc), __entry->hdrlen)
+);
+
+TRACE_EVENT(svcrdma_decode_short,
+ TP_PROTO(
+ unsigned int hdrlen
+ ),
+
+ TP_ARGS(hdrlen),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, hdrlen)
+ ),
+
+ TP_fast_assign(
+ __entry->hdrlen = hdrlen;
+ ),
+
+ TP_printk("hdrlen=%u", __entry->hdrlen)
+);
+
+DECLARE_EVENT_CLASS(svcrdma_badreq_event,
+ TP_PROTO(
+ __be32 *p
+ ),
+
+ TP_ARGS(p),
+
+ TP_STRUCT__entry(
+ __field(u32, xid)
+ __field(u32, vers)
+ __field(u32, proc)
+ __field(u32, credits)
+ ),
+
+ TP_fast_assign(
+ __entry->xid = be32_to_cpup(p++);
+ __entry->vers = be32_to_cpup(p++);
+ __entry->credits = be32_to_cpup(p++);
+ __entry->proc = be32_to_cpup(p);
+ ),
+
+ TP_printk("xid=0x%08x vers=%u credits=%u proc=%u",
+ __entry->xid, __entry->vers, __entry->credits, __entry->proc)
+);
+
+#define DEFINE_BADREQ_EVENT(name) \
+ DEFINE_EVENT(svcrdma_badreq_event, svcrdma_decode_##name,\
+ TP_PROTO( \
+ __be32 *p \
+ ), \
+ TP_ARGS(p))
+
+DEFINE_BADREQ_EVENT(badvers);
+DEFINE_BADREQ_EVENT(drop);
+DEFINE_BADREQ_EVENT(badproc);
+DEFINE_BADREQ_EVENT(parse);
+
+DECLARE_EVENT_CLASS(svcrdma_segment_event,
+ TP_PROTO(
+ u32 handle,
+ u32 length,
+ u64 offset
+ ),
+
+ TP_ARGS(handle, length, offset),
+
+ TP_STRUCT__entry(
+ __field(u32, handle)
+ __field(u32, length)
+ __field(u64, offset)
+ ),
+
+ TP_fast_assign(
+ __entry->handle = handle;
+ __entry->length = length;
+ __entry->offset = offset;
+ ),
+
+ TP_printk("%u@0x%016llx:0x%08x",
+ __entry->length, (unsigned long long)__entry->offset,
+ __entry->handle
+ )
+);
+
+#define DEFINE_SEGMENT_EVENT(name) \
+ DEFINE_EVENT(svcrdma_segment_event, svcrdma_encode_##name,\
+ TP_PROTO( \
+ u32 handle, \
+ u32 length, \
+ u64 offset \
+ ), \
+ TP_ARGS(handle, length, offset))
+
+DEFINE_SEGMENT_EVENT(rseg);
+DEFINE_SEGMENT_EVENT(wseg);
+
+DECLARE_EVENT_CLASS(svcrdma_chunk_event,
+ TP_PROTO(
+ u32 length
+ ),
+
+ TP_ARGS(length),
+
+ TP_STRUCT__entry(
+ __field(u32, length)
+ ),
+
+ TP_fast_assign(
+ __entry->length = length;
+ ),
+
+ TP_printk("length=%u",
+ __entry->length
+ )
+);
+
+#define DEFINE_CHUNK_EVENT(name) \
+ DEFINE_EVENT(svcrdma_chunk_event, svcrdma_encode_##name,\
+ TP_PROTO( \
+ u32 length \
+ ), \
+ TP_ARGS(length))
+
+DEFINE_CHUNK_EVENT(pzr);
+DEFINE_CHUNK_EVENT(write);
+DEFINE_CHUNK_EVENT(reply);
+
+TRACE_EVENT(svcrdma_encode_read,
+ TP_PROTO(
+ u32 length,
+ u32 position
+ ),
+
+ TP_ARGS(length, position),
+
+ TP_STRUCT__entry(
+ __field(u32, length)
+ __field(u32, position)
+ ),
+
+ TP_fast_assign(
+ __entry->length = length;
+ __entry->position = position;
+ ),
+
+ TP_printk("length=%u position=%u",
+ __entry->length, __entry->position
+ )
+);
+
+DECLARE_EVENT_CLASS(svcrdma_error_event,
+ TP_PROTO(
+ __be32 xid
+ ),
+
+ TP_ARGS(xid),
+
+ TP_STRUCT__entry(
+ __field(u32, xid)
+ ),
+
+ TP_fast_assign(
+ __entry->xid = be32_to_cpu(xid);
+ ),
+
+ TP_printk("xid=0x%08x",
+ __entry->xid
+ )
+);
+
+#define DEFINE_ERROR_EVENT(name) \
+ DEFINE_EVENT(svcrdma_error_event, svcrdma_err_##name, \
+ TP_PROTO( \
+ __be32 xid \
+ ), \
+ TP_ARGS(xid))
+
+DEFINE_ERROR_EVENT(vers);
+DEFINE_ERROR_EVENT(chunk);
+
+/**
+ ** Server-side RDMA API events
+ **/
+
+TRACE_EVENT(svcrdma_dma_map_page,
+ TP_PROTO(
+ const struct svcxprt_rdma *rdma,
+ const void *page
+ ),
+
+ TP_ARGS(rdma, page),
+
+ TP_STRUCT__entry(
+ __field(const void *, page);
+ __string(device, rdma->sc_cm_id->device->name)
+ __string(addr, rdma->sc_xprt.xpt_remotebuf)
+ ),
+
+ TP_fast_assign(
+ __entry->page = page;
+ __assign_str(device, rdma->sc_cm_id->device->name);
+ __assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
+ ),
+
+ TP_printk("addr=%s device=%s page=%p",
+ __get_str(addr), __get_str(device), __entry->page
+ )
+);
+
+TRACE_EVENT(svcrdma_dma_map_rwctx,
+ TP_PROTO(
+ const struct svcxprt_rdma *rdma,
+ int status
+ ),
+
+ TP_ARGS(rdma, status),
+
+ TP_STRUCT__entry(
+ __field(int, status)
+ __string(device, rdma->sc_cm_id->device->name)
+ __string(addr, rdma->sc_xprt.xpt_remotebuf)
+ ),
+
+ TP_fast_assign(
+ __entry->status = status;
+ __assign_str(device, rdma->sc_cm_id->device->name);
+ __assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
+ ),
+
+ TP_printk("addr=%s device=%s status=%d",
+ __get_str(addr), __get_str(device), __entry->status
+ )
+);
+
+TRACE_EVENT(svcrdma_send_failed,
+ TP_PROTO(
+ const struct svc_rqst *rqst,
+ int status
+ ),
+
+ TP_ARGS(rqst, status),
+
+ TP_STRUCT__entry(
+ __field(int, status)
+ __field(u32, xid)
+ __field(const void *, xprt)
+ __string(addr, rqst->rq_xprt->xpt_remotebuf)
+ ),
+
+ TP_fast_assign(
+ __entry->status = status;
+ __entry->xid = __be32_to_cpu(rqst->rq_xid);
+ __entry->xprt = rqst->rq_xprt;
+ __assign_str(addr, rqst->rq_xprt->xpt_remotebuf);
+ ),
+
+ TP_printk("xprt=%p addr=%s xid=0x%08x status=%d",
+ __entry->xprt, __get_str(addr),
+ __entry->xid, __entry->status
+ )
+);
+
+DECLARE_EVENT_CLASS(svcrdma_sendcomp_event,
+ TP_PROTO(
+ const struct ib_wc *wc
+ ),
+
+ TP_ARGS(wc),
+
+ TP_STRUCT__entry(
+ __field(const void *, cqe)
+ __field(unsigned int, status)
+ __field(unsigned int, vendor_err)
+ ),
+
+ TP_fast_assign(
+ __entry->cqe = wc->wr_cqe;
+ __entry->status = wc->status;
+ if (wc->status)
+ __entry->vendor_err = wc->vendor_err;
+ else
+ __entry->vendor_err = 0;
+ ),
+
+ TP_printk("cqe=%p status=%s (%u/0x%x)",
+ __entry->cqe, rdma_show_wc_status(__entry->status),
+ __entry->status, __entry->vendor_err
+ )
+);
+
+#define DEFINE_SENDCOMP_EVENT(name) \
+ DEFINE_EVENT(svcrdma_sendcomp_event, svcrdma_wc_##name, \
+ TP_PROTO( \
+ const struct ib_wc *wc \
+ ), \
+ TP_ARGS(wc))
+
+TRACE_EVENT(svcrdma_post_send,
+ TP_PROTO(
+ const struct ib_send_wr *wr,
+ int status
+ ),
+
+ TP_ARGS(wr, status),
+
+ TP_STRUCT__entry(
+ __field(const void *, cqe)
+ __field(unsigned int, num_sge)
+ __field(u32, inv_rkey)
+ __field(int, status)
+ ),
+
+ TP_fast_assign(
+ __entry->cqe = wr->wr_cqe;
+ __entry->num_sge = wr->num_sge;
+ __entry->inv_rkey = (wr->opcode == IB_WR_SEND_WITH_INV) ?
+ wr->ex.invalidate_rkey : 0;
+ __entry->status = status;
+ ),
+
+ TP_printk("cqe=%p num_sge=%u inv_rkey=0x%08x status=%d",
+ __entry->cqe, __entry->num_sge,
+ __entry->inv_rkey, __entry->status
+ )
+);
+
+DEFINE_SENDCOMP_EVENT(send);
+
+TRACE_EVENT(svcrdma_post_recv,
+ TP_PROTO(
+ const struct ib_recv_wr *wr,
+ int status
+ ),
+
+ TP_ARGS(wr, status),
+
+ TP_STRUCT__entry(
+ __field(const void *, cqe)
+ __field(int, status)
+ ),
+
+ TP_fast_assign(
+ __entry->cqe = wr->wr_cqe;
+ __entry->status = status;
+ ),
+
+ TP_printk("cqe=%p status=%d",
+ __entry->cqe, __entry->status
+ )
+);
+
+TRACE_EVENT(svcrdma_wc_receive,
+ TP_PROTO(
+ const struct ib_wc *wc
+ ),
+
+ TP_ARGS(wc),
+
+ TP_STRUCT__entry(
+ __field(const void *, cqe)
+ __field(u32, byte_len)
+ __field(unsigned int, status)
+ __field(u32, vendor_err)
+ ),
+
+ TP_fast_assign(
+ __entry->cqe = wc->wr_cqe;
+ __entry->status = wc->status;
+ if (wc->status) {
+ __entry->byte_len = 0;
+ __entry->vendor_err = wc->vendor_err;
+ } else {
+ __entry->byte_len = wc->byte_len;
+ __entry->vendor_err = 0;
+ }
+ ),
+
+ TP_printk("cqe=%p byte_len=%u status=%s (%u/0x%x)",
+ __entry->cqe, __entry->byte_len,
+ rdma_show_wc_status(__entry->status),
+ __entry->status, __entry->vendor_err
+ )
+);
+
+TRACE_EVENT(svcrdma_post_rw,
+ TP_PROTO(
+ const void *cqe,
+ int sqecount,
+ int status
+ ),
+
+ TP_ARGS(cqe, sqecount, status),
+
+ TP_STRUCT__entry(
+ __field(const void *, cqe)
+ __field(int, sqecount)
+ __field(int, status)
+ ),
+
+ TP_fast_assign(
+ __entry->cqe = cqe;
+ __entry->sqecount = sqecount;
+ __entry->status = status;
+ ),
+
+ TP_printk("cqe=%p sqecount=%d status=%d",
+ __entry->cqe, __entry->sqecount, __entry->status
+ )
+);
+
+DEFINE_SENDCOMP_EVENT(read);
+DEFINE_SENDCOMP_EVENT(write);
+
+TRACE_EVENT(svcrdma_cm_event,
+ TP_PROTO(
+ const struct rdma_cm_event *event,
+ const struct sockaddr *sap
+ ),
+
+ TP_ARGS(event, sap),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, event)
+ __field(int, status)
+ __array(__u8, addr, INET6_ADDRSTRLEN + 10)
+ ),
+
+ TP_fast_assign(
+ __entry->event = event->event;
+ __entry->status = event->status;
+ snprintf(__entry->addr, sizeof(__entry->addr) - 1,
+ "%pISpc", sap);
+ ),
+
+ TP_printk("addr=%s event=%s (%u/%d)",
+ __entry->addr,
+ rdma_show_cm_event(__entry->event),
+ __entry->event, __entry->status
+ )
+);
+
+TRACE_EVENT(svcrdma_qp_error,
+ TP_PROTO(
+ const struct ib_event *event,
+ const struct sockaddr *sap
+ ),
+
+ TP_ARGS(event, sap),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, event)
+ __string(device, event->device->name)
+ __array(__u8, addr, INET6_ADDRSTRLEN + 10)
+ ),
+
+ TP_fast_assign(
+ __entry->event = event->event;
+ __assign_str(device, event->device->name);
+ snprintf(__entry->addr, sizeof(__entry->addr) - 1,
+ "%pISpc", sap);
+ ),
+
+ TP_printk("addr=%s dev=%s event=%s (%u)",
+ __entry->addr, __get_str(device),
+ rdma_show_ib_event(__entry->event), __entry->event
+ )
+);
+
+DECLARE_EVENT_CLASS(svcrdma_sendqueue_event,
+ TP_PROTO(
+ const struct svcxprt_rdma *rdma
+ ),
+
+ TP_ARGS(rdma),
+
+ TP_STRUCT__entry(
+ __field(int, avail)
+ __field(int, depth)
+ __string(addr, rdma->sc_xprt.xpt_remotebuf)
+ ),
+
+ TP_fast_assign(
+ __entry->avail = atomic_read(&rdma->sc_sq_avail);
+ __entry->depth = rdma->sc_sq_depth;
+ __assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
+ ),
+
+ TP_printk("addr=%s sc_sq_avail=%d/%d",
+ __get_str(addr), __entry->avail, __entry->depth
+ )
+);
+
+#define DEFINE_SQ_EVENT(name) \
+ DEFINE_EVENT(svcrdma_sendqueue_event, svcrdma_sq_##name,\
+ TP_PROTO( \
+ const struct svcxprt_rdma *rdma \
+ ), \
+ TP_ARGS(rdma))
+
+DEFINE_SQ_EVENT(full);
+DEFINE_SQ_EVENT(retry);
+
#endif /* _TRACE_RPCRDMA_H */
#include <trace/define_trace.h>
diff --git a/include/trace/events/rseq.h b/include/trace/events/rseq.h
new file mode 100644
index 000000000000..a04a64bc1a00
--- /dev/null
+++ b/include/trace/events/rseq.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM rseq
+
+#if !defined(_TRACE_RSEQ_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_RSEQ_H
+
+#include <linux/tracepoint.h>
+#include <linux/types.h>
+
+TRACE_EVENT(rseq_update,
+
+ TP_PROTO(struct task_struct *t),
+
+ TP_ARGS(t),
+
+ TP_STRUCT__entry(
+ __field(s32, cpu_id)
+ ),
+
+ TP_fast_assign(
+ __entry->cpu_id = raw_smp_processor_id();
+ ),
+
+ TP_printk("cpu_id=%d", __entry->cpu_id)
+);
+
+TRACE_EVENT(rseq_ip_fixup,
+
+ TP_PROTO(unsigned long regs_ip, unsigned long start_ip,
+ unsigned long post_commit_offset, unsigned long abort_ip),
+
+ TP_ARGS(regs_ip, start_ip, post_commit_offset, abort_ip),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, regs_ip)
+ __field(unsigned long, start_ip)
+ __field(unsigned long, post_commit_offset)
+ __field(unsigned long, abort_ip)
+ ),
+
+ TP_fast_assign(
+ __entry->regs_ip = regs_ip;
+ __entry->start_ip = start_ip;
+ __entry->post_commit_offset = post_commit_offset;
+ __entry->abort_ip = abort_ip;
+ ),
+
+ TP_printk("regs_ip=0x%lx start_ip=0x%lx post_commit_offset=%lu abort_ip=0x%lx",
+ __entry->regs_ip, __entry->start_ip,
+ __entry->post_commit_offset, __entry->abort_ip)
+);
+
+#endif /* _TRACE_SOCK_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h
index 9e96c2fe2793..4fff00e9da8a 100644
--- a/include/trace/events/rxrpc.h
+++ b/include/trace/events/rxrpc.h
@@ -15,6 +15,7 @@
#define _TRACE_RXRPC_H
#include <linux/tracepoint.h>
+#include <linux/errqueue.h>
/*
* Define enums for tracing information.
@@ -210,6 +211,20 @@ enum rxrpc_congest_change {
rxrpc_cong_saw_nack,
};
+enum rxrpc_tx_fail_trace {
+ rxrpc_tx_fail_call_abort,
+ rxrpc_tx_fail_call_ack,
+ rxrpc_tx_fail_call_data_frag,
+ rxrpc_tx_fail_call_data_nofrag,
+ rxrpc_tx_fail_call_final_resend,
+ rxrpc_tx_fail_conn_abort,
+ rxrpc_tx_fail_conn_challenge,
+ rxrpc_tx_fail_conn_response,
+ rxrpc_tx_fail_reject,
+ rxrpc_tx_fail_version_keepalive,
+ rxrpc_tx_fail_version_reply,
+};
+
#endif /* end __RXRPC_DECLARE_TRACE_ENUMS_ONCE_ONLY */
/*
@@ -437,6 +452,19 @@ enum rxrpc_congest_change {
EM(RXRPC_CALL_LOCAL_ERROR, "LocalError") \
E_(RXRPC_CALL_NETWORK_ERROR, "NetError")
+#define rxrpc_tx_fail_traces \
+ EM(rxrpc_tx_fail_call_abort, "CallAbort") \
+ EM(rxrpc_tx_fail_call_ack, "CallAck") \
+ EM(rxrpc_tx_fail_call_data_frag, "CallDataFrag") \
+ EM(rxrpc_tx_fail_call_data_nofrag, "CallDataNofrag") \
+ EM(rxrpc_tx_fail_call_final_resend, "CallFinalResend") \
+ EM(rxrpc_tx_fail_conn_abort, "ConnAbort") \
+ EM(rxrpc_tx_fail_conn_challenge, "ConnChall") \
+ EM(rxrpc_tx_fail_conn_response, "ConnResp") \
+ EM(rxrpc_tx_fail_reject, "Reject") \
+ EM(rxrpc_tx_fail_version_keepalive, "VerKeepalive") \
+ E_(rxrpc_tx_fail_version_reply, "VerReply")
+
/*
* Export enum symbols via userspace.
*/
@@ -460,6 +488,7 @@ rxrpc_propose_ack_traces;
rxrpc_propose_ack_outcomes;
rxrpc_congest_modes;
rxrpc_congest_changes;
+rxrpc_tx_fail_traces;
/*
* Now redefine the EM() and E_() macros to map the enums to the strings that
@@ -1374,6 +1403,94 @@ TRACE_EVENT(rxrpc_resend,
__entry->anno)
);
+TRACE_EVENT(rxrpc_rx_icmp,
+ TP_PROTO(struct rxrpc_peer *peer, struct sock_extended_err *ee,
+ struct sockaddr_rxrpc *srx),
+
+ TP_ARGS(peer, ee, srx),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, peer )
+ __field_struct(struct sock_extended_err, ee )
+ __field_struct(struct sockaddr_rxrpc, srx )
+ ),
+
+ TP_fast_assign(
+ __entry->peer = peer->debug_id;
+ memcpy(&__entry->ee, ee, sizeof(__entry->ee));
+ memcpy(&__entry->srx, srx, sizeof(__entry->srx));
+ ),
+
+ TP_printk("P=%08x o=%u t=%u c=%u i=%u d=%u e=%d %pISp",
+ __entry->peer,
+ __entry->ee.ee_origin,
+ __entry->ee.ee_type,
+ __entry->ee.ee_code,
+ __entry->ee.ee_info,
+ __entry->ee.ee_data,
+ __entry->ee.ee_errno,
+ &__entry->srx.transport)
+ );
+
+TRACE_EVENT(rxrpc_tx_fail,
+ TP_PROTO(unsigned int debug_id, rxrpc_serial_t serial, int ret,
+ enum rxrpc_tx_fail_trace what),
+
+ TP_ARGS(debug_id, serial, ret, what),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, debug_id )
+ __field(rxrpc_serial_t, serial )
+ __field(int, ret )
+ __field(enum rxrpc_tx_fail_trace, what )
+ ),
+
+ TP_fast_assign(
+ __entry->debug_id = debug_id;
+ __entry->serial = serial;
+ __entry->ret = ret;
+ __entry->what = what;
+ ),
+
+ TP_printk("c=%08x r=%x ret=%d %s",
+ __entry->debug_id,
+ __entry->serial,
+ __entry->ret,
+ __print_symbolic(__entry->what, rxrpc_tx_fail_traces))
+ );
+
+TRACE_EVENT(rxrpc_call_reset,
+ TP_PROTO(struct rxrpc_call *call),
+
+ TP_ARGS(call),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, debug_id )
+ __field(u32, cid )
+ __field(u32, call_id )
+ __field(rxrpc_serial_t, call_serial )
+ __field(rxrpc_serial_t, conn_serial )
+ __field(rxrpc_seq_t, tx_seq )
+ __field(rxrpc_seq_t, rx_seq )
+ ),
+
+ TP_fast_assign(
+ __entry->debug_id = call->debug_id;
+ __entry->cid = call->cid;
+ __entry->call_id = call->call_id;
+ __entry->call_serial = call->rx_serial;
+ __entry->conn_serial = call->conn->hi_serial;
+ __entry->tx_seq = call->tx_hard_ack;
+ __entry->rx_seq = call->ackr_seen;
+ ),
+
+ TP_printk("c=%08x %08x:%08x r=%08x/%08x tx=%08x rx=%08x",
+ __entry->debug_id,
+ __entry->cid, __entry->call_id,
+ __entry->call_serial, __entry->conn_serial,
+ __entry->tx_seq, __entry->rx_seq)
+ );
+
#endif /* _TRACE_RXRPC_H */
/* This part must be outside protection */
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index bc01e06bc716..0be866c91f62 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -435,7 +435,9 @@ TRACE_EVENT(sched_pi_setprio,
memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
__entry->pid = tsk->pid;
__entry->oldprio = tsk->prio;
- __entry->newprio = pi_task ? pi_task->prio : tsk->prio;
+ __entry->newprio = pi_task ?
+ min(tsk->normal_prio, pi_task->prio) :
+ tsk->normal_prio;
/* XXX SCHED_DEADLINE bits missing */
),
diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h
index 335d87242439..bbb08a3ef5cc 100644
--- a/include/trace/events/sunrpc.h
+++ b/include/trace/events/sunrpc.h
@@ -224,6 +224,8 @@ TRACE_EVENT(rpc_stats_latency,
TP_ARGS(task, backlog, rtt, execute),
TP_STRUCT__entry(
+ __field(unsigned int, task_id)
+ __field(unsigned int, client_id)
__field(u32, xid)
__field(int, version)
__string(progname, task->tk_client->cl_program->name)
@@ -231,13 +233,11 @@ TRACE_EVENT(rpc_stats_latency,
__field(unsigned long, backlog)
__field(unsigned long, rtt)
__field(unsigned long, execute)
- __string(addr,
- task->tk_xprt->address_strings[RPC_DISPLAY_ADDR])
- __string(port,
- task->tk_xprt->address_strings[RPC_DISPLAY_PORT])
),
TP_fast_assign(
+ __entry->client_id = task->tk_client->cl_clid;
+ __entry->task_id = task->tk_pid;
__entry->xid = be32_to_cpu(task->tk_rqstp->rq_xid);
__entry->version = task->tk_client->cl_vers;
__assign_str(progname, task->tk_client->cl_program->name)
@@ -245,14 +245,10 @@ TRACE_EVENT(rpc_stats_latency,
__entry->backlog = ktime_to_us(backlog);
__entry->rtt = ktime_to_us(rtt);
__entry->execute = ktime_to_us(execute);
- __assign_str(addr,
- task->tk_xprt->address_strings[RPC_DISPLAY_ADDR]);
- __assign_str(port,
- task->tk_xprt->address_strings[RPC_DISPLAY_PORT]);
),
- TP_printk("peer=[%s]:%s xid=0x%08x %sv%d %s backlog=%lu rtt=%lu execute=%lu",
- __get_str(addr), __get_str(port), __entry->xid,
+ TP_printk("task:%u@%d xid=0x%08x %sv%d %s backlog=%lu rtt=%lu execute=%lu",
+ __entry->task_id, __entry->client_id, __entry->xid,
__get_str(progname), __entry->version, __get_str(procname),
__entry->backlog, __entry->rtt, __entry->execute)
);
diff --git a/include/trace/events/tcp.h b/include/trace/events/tcp.h
index 878b2be7ce77..ac55b328d61b 100644
--- a/include/trace/events/tcp.h
+++ b/include/trace/events/tcp.h
@@ -10,6 +10,7 @@
#include <linux/tracepoint.h>
#include <net/ipv6.h>
#include <net/tcp.h>
+#include <linux/sock_diag.h>
#define TP_STORE_V4MAPPED(__entry, saddr, daddr) \
do { \
@@ -113,7 +114,7 @@ DEFINE_EVENT(tcp_event_sk_skb, tcp_send_reset,
*/
DECLARE_EVENT_CLASS(tcp_event_sk,
- TP_PROTO(const struct sock *sk),
+ TP_PROTO(struct sock *sk),
TP_ARGS(sk),
@@ -125,6 +126,7 @@ DECLARE_EVENT_CLASS(tcp_event_sk,
__array(__u8, daddr, 4)
__array(__u8, saddr_v6, 16)
__array(__u8, daddr_v6, 16)
+ __field(__u64, sock_cookie)
),
TP_fast_assign(
@@ -144,73 +146,36 @@ DECLARE_EVENT_CLASS(tcp_event_sk,
TP_STORE_ADDRS(__entry, inet->inet_saddr, inet->inet_daddr,
sk->sk_v6_rcv_saddr, sk->sk_v6_daddr);
+
+ __entry->sock_cookie = sock_gen_cookie(sk);
),
- TP_printk("sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c",
+ TP_printk("sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c sock_cookie=%llx",
__entry->sport, __entry->dport,
__entry->saddr, __entry->daddr,
- __entry->saddr_v6, __entry->daddr_v6)
+ __entry->saddr_v6, __entry->daddr_v6,
+ __entry->sock_cookie)
);
DEFINE_EVENT(tcp_event_sk, tcp_receive_reset,
- TP_PROTO(const struct sock *sk),
+ TP_PROTO(struct sock *sk),
TP_ARGS(sk)
);
DEFINE_EVENT(tcp_event_sk, tcp_destroy_sock,
- TP_PROTO(const struct sock *sk),
+ TP_PROTO(struct sock *sk),
TP_ARGS(sk)
);
-TRACE_EVENT(tcp_set_state,
-
- TP_PROTO(const struct sock *sk, const int oldstate, const int newstate),
-
- TP_ARGS(sk, oldstate, newstate),
-
- TP_STRUCT__entry(
- __field(const void *, skaddr)
- __field(int, oldstate)
- __field(int, newstate)
- __field(__u16, sport)
- __field(__u16, dport)
- __array(__u8, saddr, 4)
- __array(__u8, daddr, 4)
- __array(__u8, saddr_v6, 16)
- __array(__u8, daddr_v6, 16)
- ),
-
- TP_fast_assign(
- struct inet_sock *inet = inet_sk(sk);
- __be32 *p32;
-
- __entry->skaddr = sk;
- __entry->oldstate = oldstate;
- __entry->newstate = newstate;
-
- __entry->sport = ntohs(inet->inet_sport);
- __entry->dport = ntohs(inet->inet_dport);
-
- p32 = (__be32 *) __entry->saddr;
- *p32 = inet->inet_saddr;
-
- p32 = (__be32 *) __entry->daddr;
- *p32 = inet->inet_daddr;
+DEFINE_EVENT(tcp_event_sk, tcp_rcv_space_adjust,
- TP_STORE_ADDRS(__entry, inet->inet_saddr, inet->inet_daddr,
- sk->sk_v6_rcv_saddr, sk->sk_v6_daddr);
- ),
+ TP_PROTO(struct sock *sk),
- TP_printk("sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c oldstate=%s newstate=%s",
- __entry->sport, __entry->dport,
- __entry->saddr, __entry->daddr,
- __entry->saddr_v6, __entry->daddr_v6,
- show_tcp_state_name(__entry->oldstate),
- show_tcp_state_name(__entry->newstate))
+ TP_ARGS(sk)
);
TRACE_EVENT(tcp_retransmit_synack,
@@ -271,7 +236,7 @@ TRACE_EVENT(tcp_probe,
__field(__u16, sport)
__field(__u16, dport)
__field(__u32, mark)
- __field(__u16, length)
+ __field(__u16, data_len)
__field(__u32, snd_nxt)
__field(__u32, snd_una)
__field(__u32, snd_cwnd)
@@ -279,11 +244,13 @@ TRACE_EVENT(tcp_probe,
__field(__u32, snd_wnd)
__field(__u32, srtt)
__field(__u32, rcv_wnd)
+ __field(__u64, sock_cookie)
),
TP_fast_assign(
- const struct tcp_sock *tp = tcp_sk(sk);
+ const struct tcphdr *th = (const struct tcphdr *)skb->data;
const struct inet_sock *inet = inet_sk(sk);
+ const struct tcp_sock *tp = tcp_sk(sk);
memset(__entry->saddr, 0, sizeof(struct sockaddr_in6));
memset(__entry->daddr, 0, sizeof(struct sockaddr_in6));
@@ -295,7 +262,7 @@ TRACE_EVENT(tcp_probe,
__entry->dport = ntohs(inet->inet_dport);
__entry->mark = skb->mark;
- __entry->length = skb->len;
+ __entry->data_len = skb->len - __tcp_hdrlen(th);
__entry->snd_nxt = tp->snd_nxt;
__entry->snd_una = tp->snd_una;
__entry->snd_cwnd = tp->snd_cwnd;
@@ -303,15 +270,14 @@ TRACE_EVENT(tcp_probe,
__entry->rcv_wnd = tp->rcv_wnd;
__entry->ssthresh = tcp_current_ssthresh(sk);
__entry->srtt = tp->srtt_us >> 3;
+ __entry->sock_cookie = sock_gen_cookie(sk);
),
- TP_printk("src=%pISpc dest=%pISpc mark=%#x length=%d snd_nxt=%#x "
- "snd_una=%#x snd_cwnd=%u ssthresh=%u snd_wnd=%u srtt=%u "
- "rcv_wnd=%u",
+ TP_printk("src=%pISpc dest=%pISpc mark=%#x data_len=%d snd_nxt=%#x snd_una=%#x snd_cwnd=%u ssthresh=%u snd_wnd=%u srtt=%u rcv_wnd=%u sock_cookie=%llx",
__entry->saddr, __entry->daddr, __entry->mark,
- __entry->length, __entry->snd_nxt, __entry->snd_una,
+ __entry->data_len, __entry->snd_nxt, __entry->snd_una,
__entry->snd_cwnd, __entry->ssthresh, __entry->snd_wnd,
- __entry->srtt, __entry->rcv_wnd)
+ __entry->srtt, __entry->rcv_wnd, __entry->sock_cookie)
);
#endif /* _TRACE_TCP_H */
diff --git a/include/trace/events/ufs.h b/include/trace/events/ufs.h
index bf6f82673492..f8260e5c79ad 100644
--- a/include/trace/events/ufs.h
+++ b/include/trace/events/ufs.h
@@ -257,6 +257,33 @@ TRACE_EVENT(ufshcd_command,
)
);
+TRACE_EVENT(ufshcd_upiu,
+ TP_PROTO(const char *dev_name, const char *str, void *hdr, void *tsf),
+
+ TP_ARGS(dev_name, str, hdr, tsf),
+
+ TP_STRUCT__entry(
+ __string(dev_name, dev_name)
+ __string(str, str)
+ __array(unsigned char, hdr, 12)
+ __array(unsigned char, tsf, 16)
+ ),
+
+ TP_fast_assign(
+ __assign_str(dev_name, dev_name);
+ __assign_str(str, str);
+ memcpy(__entry->hdr, hdr, sizeof(__entry->hdr));
+ memcpy(__entry->tsf, tsf, sizeof(__entry->tsf));
+ ),
+
+ TP_printk(
+ "%s: %s: HDR:%s, CDB:%s",
+ __get_str(str), __get_str(dev_name),
+ __print_hex(__entry->hdr, sizeof(__entry->hdr)),
+ __print_hex(__entry->tsf, sizeof(__entry->tsf))
+ )
+);
+
#endif /* if !defined(_TRACE_UFS_H) || defined(TRACE_HEADER_MULTI_READ) */
/* This part must be outside protection */
diff --git a/include/trace/events/workqueue.h b/include/trace/events/workqueue.h
index 2f057a494d93..9a761bc6a251 100644
--- a/include/trace/events/workqueue.h
+++ b/include/trace/events/workqueue.h
@@ -25,6 +25,8 @@ DECLARE_EVENT_CLASS(workqueue_work,
TP_printk("work struct %p", __entry->work)
);
+struct pool_workqueue;
+
/**
* workqueue_queue_work - called when a work gets queued
* @req_cpu: the requested cpu
diff --git a/include/trace/events/xdp.h b/include/trace/events/xdp.h
index 8989a92c571a..1ecf4c67fcf7 100644
--- a/include/trace/events/xdp.h
+++ b/include/trace/events/xdp.h
@@ -138,11 +138,18 @@ DEFINE_EVENT_PRINT(xdp_redirect_template, xdp_redirect_map_err,
__entry->map_id, __entry->map_index)
);
+#ifndef __DEVMAP_OBJ_TYPE
+#define __DEVMAP_OBJ_TYPE
+struct _bpf_dtab_netdev {
+ struct net_device *dev;
+};
+#endif /* __DEVMAP_OBJ_TYPE */
+
#define devmap_ifindex(fwd, map) \
(!fwd ? 0 : \
(!map ? 0 : \
((map->map_type == BPF_MAP_TYPE_DEVMAP) ? \
- ((struct net_device *)fwd)->ifindex : 0)))
+ ((struct _bpf_dtab_netdev *)fwd)->dev->ifindex : 0)))
#define _trace_xdp_redirect_map(dev, xdp, fwd, map, idx) \
trace_xdp_redirect_map(dev, xdp, devmap_ifindex(fwd, map), \
@@ -222,6 +229,47 @@ TRACE_EVENT(xdp_cpumap_enqueue,
__entry->to_cpu)
);
+TRACE_EVENT(xdp_devmap_xmit,
+
+ TP_PROTO(const struct bpf_map *map, u32 map_index,
+ int sent, int drops,
+ const struct net_device *from_dev,
+ const struct net_device *to_dev, int err),
+
+ TP_ARGS(map, map_index, sent, drops, from_dev, to_dev, err),
+
+ TP_STRUCT__entry(
+ __field(int, map_id)
+ __field(u32, act)
+ __field(u32, map_index)
+ __field(int, drops)
+ __field(int, sent)
+ __field(int, from_ifindex)
+ __field(int, to_ifindex)
+ __field(int, err)
+ ),
+
+ TP_fast_assign(
+ __entry->map_id = map->id;
+ __entry->act = XDP_REDIRECT;
+ __entry->map_index = map_index;
+ __entry->drops = drops;
+ __entry->sent = sent;
+ __entry->from_ifindex = from_dev->ifindex;
+ __entry->to_ifindex = to_dev->ifindex;
+ __entry->err = err;
+ ),
+
+ TP_printk("ndo_xdp_xmit"
+ " map_id=%d map_index=%d action=%s"
+ " sent=%d drops=%d"
+ " from_ifindex=%d to_ifindex=%d err=%d",
+ __entry->map_id, __entry->map_index,
+ __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
+ __entry->sent, __entry->drops,
+ __entry->from_ifindex, __entry->to_ifindex, __entry->err)
+);
+
#endif /* _TRACE_XDP_H */
#include <trace/define_trace.h>
diff --git a/include/trace/events/xen.h b/include/trace/events/xen.h
index 7dd8f34c37df..fdcf88bcf0ea 100644
--- a/include/trace/events/xen.h
+++ b/include/trace/events/xen.h
@@ -352,22 +352,6 @@ DECLARE_EVENT_CLASS(xen_mmu_pgd,
DEFINE_XEN_MMU_PGD_EVENT(xen_mmu_pgd_pin);
DEFINE_XEN_MMU_PGD_EVENT(xen_mmu_pgd_unpin);
-TRACE_EVENT(xen_mmu_flush_tlb_all,
- TP_PROTO(int x),
- TP_ARGS(x),
- TP_STRUCT__entry(__array(char, x, 0)),
- TP_fast_assign((void)x),
- TP_printk("%s", "")
- );
-
-TRACE_EVENT(xen_mmu_flush_tlb,
- TP_PROTO(int x),
- TP_ARGS(x),
- TP_STRUCT__entry(__array(char, x, 0)),
- TP_fast_assign((void)x),
- TP_printk("%s", "")
- );
-
TRACE_EVENT(xen_mmu_flush_tlb_one_user,
TP_PROTO(unsigned long addr),
TP_ARGS(addr),
diff --git a/include/trace/trace_events.h b/include/trace/trace_events.h
index bfda803b0a09..4ecdfe2e3580 100644
--- a/include/trace/trace_events.h
+++ b/include/trace/trace_events.h
@@ -422,6 +422,7 @@ static struct trace_event_functions trace_event_type_funcs_##call = { \
do { \
char *type_str = #type"["__stringify(len)"]"; \
BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \
+ BUILD_BUG_ON(len <= 0); \
ret = trace_define_field(event_call, type_str, #item, \
offsetof(typeof(field), item), \
sizeof(field.item), \
diff --git a/include/uapi/asm-generic/msgbuf.h b/include/uapi/asm-generic/msgbuf.h
index fb306ebdb36f..9fe4881557cb 100644
--- a/include/uapi/asm-generic/msgbuf.h
+++ b/include/uapi/asm-generic/msgbuf.h
@@ -18,31 +18,30 @@
* On big-endian systems, the padding is in the wrong place.
*
* Pad space is left for:
- * - 64-bit time_t to solve y2038 problem
* - 2 miscellaneous 32-bit values
*/
struct msqid64_ds {
struct ipc64_perm msg_perm;
+#if __BITS_PER_LONG == 64
__kernel_time_t msg_stime; /* last msgsnd time */
-#if __BITS_PER_LONG != 64
- unsigned long __unused1;
-#endif
__kernel_time_t msg_rtime; /* last msgrcv time */
-#if __BITS_PER_LONG != 64
- unsigned long __unused2;
-#endif
__kernel_time_t msg_ctime; /* last change time */
-#if __BITS_PER_LONG != 64
- unsigned long __unused3;
+#else
+ unsigned long msg_stime; /* last msgsnd time */
+ unsigned long msg_stime_high;
+ unsigned long msg_rtime; /* last msgrcv time */
+ unsigned long msg_rtime_high;
+ unsigned long msg_ctime; /* last change time */
+ unsigned long msg_ctime_high;
#endif
- __kernel_ulong_t msg_cbytes; /* current number of bytes on queue */
- __kernel_ulong_t msg_qnum; /* number of messages in queue */
- __kernel_ulong_t msg_qbytes; /* max number of bytes on queue */
+ unsigned long msg_cbytes; /* current number of bytes on queue */
+ unsigned long msg_qnum; /* number of messages in queue */
+ unsigned long msg_qbytes; /* max number of bytes on queue */
__kernel_pid_t msg_lspid; /* pid of last msgsnd */
__kernel_pid_t msg_lrpid; /* last receive pid */
- __kernel_ulong_t __unused4;
- __kernel_ulong_t __unused5;
+ unsigned long __unused4;
+ unsigned long __unused5;
};
#endif /* __ASM_GENERIC_MSGBUF_H */
diff --git a/include/uapi/asm-generic/posix_types.h b/include/uapi/asm-generic/posix_types.h
index 5e6ea22bd525..f0733a26ebfc 100644
--- a/include/uapi/asm-generic/posix_types.h
+++ b/include/uapi/asm-generic/posix_types.h
@@ -87,6 +87,7 @@ typedef struct {
typedef __kernel_long_t __kernel_off_t;
typedef long long __kernel_loff_t;
typedef __kernel_long_t __kernel_time_t;
+typedef long long __kernel_time64_t;
typedef __kernel_long_t __kernel_clock_t;
typedef int __kernel_timer_t;
typedef int __kernel_clockid_t;
diff --git a/include/uapi/asm-generic/sembuf.h b/include/uapi/asm-generic/sembuf.h
index cbf9cfe977d6..0bae010f1b64 100644
--- a/include/uapi/asm-generic/sembuf.h
+++ b/include/uapi/asm-generic/sembuf.h
@@ -13,23 +13,29 @@
* everyone just ended up making identical copies without specific
* optimizations, so we may just as well all use the same one.
*
- * 64 bit architectures typically define a 64 bit __kernel_time_t,
+ * 64 bit architectures use a 64-bit __kernel_time_t here, while
+ * 32 bit architectures have a pair of unsigned long values.
* so they do not need the first two padding words.
- * On big-endian systems, the padding is in the wrong place.
*
- * Pad space is left for:
- * - 64-bit time_t to solve y2038 problem
- * - 2 miscellaneous 32-bit values
+ * On big-endian systems, the padding is in the wrong place for
+ * historic reasons, so user space has to reconstruct a time_t
+ * value using
+ *
+ * user_semid_ds.sem_otime = kernel_semid64_ds.sem_otime +
+ * ((long long)kernel_semid64_ds.sem_otime_high << 32)
+ *
+ * Pad space is left for 2 miscellaneous 32-bit values
*/
struct semid64_ds {
struct ipc64_perm sem_perm; /* permissions .. see ipc.h */
+#if __BITS_PER_LONG == 64
__kernel_time_t sem_otime; /* last semop time */
-#if __BITS_PER_LONG != 64
- unsigned long __unused1;
-#endif
__kernel_time_t sem_ctime; /* last change time */
-#if __BITS_PER_LONG != 64
- unsigned long __unused2;
+#else
+ unsigned long sem_otime; /* last semop time */
+ unsigned long sem_otime_high;
+ unsigned long sem_ctime; /* last change time */
+ unsigned long sem_ctime_high;
#endif
unsigned long sem_nsems; /* no. of semaphores in array */
unsigned long __unused3;
diff --git a/include/uapi/asm-generic/shmbuf.h b/include/uapi/asm-generic/shmbuf.h
index 2b6c3bb97f97..e504422fc501 100644
--- a/include/uapi/asm-generic/shmbuf.h
+++ b/include/uapi/asm-generic/shmbuf.h
@@ -19,42 +19,41 @@
*
*
* Pad space is left for:
- * - 64-bit time_t to solve y2038 problem
* - 2 miscellaneous 32-bit values
*/
struct shmid64_ds {
struct ipc64_perm shm_perm; /* operation perms */
size_t shm_segsz; /* size of segment (bytes) */
+#if __BITS_PER_LONG == 64
__kernel_time_t shm_atime; /* last attach time */
-#if __BITS_PER_LONG != 64
- unsigned long __unused1;
-#endif
__kernel_time_t shm_dtime; /* last detach time */
-#if __BITS_PER_LONG != 64
- unsigned long __unused2;
-#endif
__kernel_time_t shm_ctime; /* last change time */
-#if __BITS_PER_LONG != 64
- unsigned long __unused3;
+#else
+ unsigned long shm_atime; /* last attach time */
+ unsigned long shm_atime_high;
+ unsigned long shm_dtime; /* last detach time */
+ unsigned long shm_dtime_high;
+ unsigned long shm_ctime; /* last change time */
+ unsigned long shm_ctime_high;
#endif
__kernel_pid_t shm_cpid; /* pid of creator */
__kernel_pid_t shm_lpid; /* pid of last operator */
- __kernel_ulong_t shm_nattch; /* no. of current attaches */
- __kernel_ulong_t __unused4;
- __kernel_ulong_t __unused5;
+ unsigned long shm_nattch; /* no. of current attaches */
+ unsigned long __unused4;
+ unsigned long __unused5;
};
struct shminfo64 {
- __kernel_ulong_t shmmax;
- __kernel_ulong_t shmmin;
- __kernel_ulong_t shmmni;
- __kernel_ulong_t shmseg;
- __kernel_ulong_t shmall;
- __kernel_ulong_t __unused1;
- __kernel_ulong_t __unused2;
- __kernel_ulong_t __unused3;
- __kernel_ulong_t __unused4;
+ unsigned long shmmax;
+ unsigned long shmmin;
+ unsigned long shmmni;
+ unsigned long shmseg;
+ unsigned long shmall;
+ unsigned long __unused1;
+ unsigned long __unused2;
+ unsigned long __unused3;
+ unsigned long __unused4;
};
#endif /* __ASM_GENERIC_SHMBUF_H */
diff --git a/include/uapi/asm-generic/siginfo.h b/include/uapi/asm-generic/siginfo.h
index 558b902f18d4..80e2a7227205 100644
--- a/include/uapi/asm-generic/siginfo.h
+++ b/include/uapi/asm-generic/siginfo.h
@@ -249,7 +249,8 @@ typedef struct siginfo {
#define TRAP_TRACE 2 /* process trace trap */
#define TRAP_BRANCH 3 /* process taken branch trap */
#define TRAP_HWBKPT 4 /* hardware breakpoint/watchpoint */
-#define NSIGTRAP 4
+#define TRAP_UNK 5 /* undiagnosed trap */
+#define NSIGTRAP 5
/*
* There is an additional set of SIGTRAP si_codes used by ptrace
diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h
index 8bcb186c6f67..42990676a55e 100644
--- a/include/uapi/asm-generic/unistd.h
+++ b/include/uapi/asm-generic/unistd.h
@@ -732,9 +732,11 @@ __SYSCALL(__NR_pkey_alloc, sys_pkey_alloc)
__SYSCALL(__NR_pkey_free, sys_pkey_free)
#define __NR_statx 291
__SYSCALL(__NR_statx, sys_statx)
+#define __NR_io_pgetevents 292
+__SC_COMP(__NR_io_pgetevents, sys_io_pgetevents, compat_sys_io_pgetevents)
#undef __NR_syscalls
-#define __NR_syscalls 292
+#define __NR_syscalls 293
/*
* 32 bit systems traditionally used different
diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
index c363b67f2d0a..78b4dd89fcb4 100644
--- a/include/uapi/drm/amdgpu_drm.h
+++ b/include/uapi/drm/amdgpu_drm.h
@@ -78,6 +78,12 @@ extern "C" {
#define AMDGPU_GEM_DOMAIN_GDS 0x8
#define AMDGPU_GEM_DOMAIN_GWS 0x10
#define AMDGPU_GEM_DOMAIN_OA 0x20
+#define AMDGPU_GEM_DOMAIN_MASK (AMDGPU_GEM_DOMAIN_CPU | \
+ AMDGPU_GEM_DOMAIN_GTT | \
+ AMDGPU_GEM_DOMAIN_VRAM | \
+ AMDGPU_GEM_DOMAIN_GDS | \
+ AMDGPU_GEM_DOMAIN_GWS | \
+ AMDGPU_GEM_DOMAIN_OA)
/* Flag that CPU access will be required for the case of VRAM domain */
#define AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED (1 << 0)
@@ -95,6 +101,10 @@ extern "C" {
#define AMDGPU_GEM_CREATE_VM_ALWAYS_VALID (1 << 6)
/* Flag that BO sharing will be explicitly synchronized */
#define AMDGPU_GEM_CREATE_EXPLICIT_SYNC (1 << 7)
+/* Flag that indicates allocating MQD gart on GFX9, where the mtype
+ * for the second page onward should be set to NC.
+ */
+#define AMDGPU_GEM_CREATE_MQD_GFX9 (1 << 8)
struct drm_amdgpu_gem_create_in {
/** the requested memory size */
@@ -520,6 +530,10 @@ union drm_amdgpu_cs {
/* Preempt flag, IB should set Pre_enb bit if PREEMPT flag detected */
#define AMDGPU_IB_FLAG_PREEMPT (1<<2)
+/* The IB fence should do the L2 writeback but not invalidate any shader
+ * caches (L2/vL1/sL1/I$). */
+#define AMDGPU_IB_FLAG_TC_WB_NOT_INVALIDATE (1 << 3)
+
struct drm_amdgpu_cs_chunk_ib {
__u32 _pad;
/** AMDGPU_IB_FLAG_* */
@@ -620,6 +634,12 @@ struct drm_amdgpu_cs_chunk_data {
#define AMDGPU_INFO_FW_ASD 0x0d
/* Subquery id: Query VCN firmware version */
#define AMDGPU_INFO_FW_VCN 0x0e
+ /* Subquery id: Query GFX RLC SRLC firmware version */
+ #define AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_CNTL 0x0f
+ /* Subquery id: Query GFX RLC SRLG firmware version */
+ #define AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_GPM_MEM 0x10
+ /* Subquery id: Query GFX RLC SRLS firmware version */
+ #define AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_SRM_MEM 0x11
/* number of bytes moved for TTM migration */
#define AMDGPU_INFO_NUM_BYTES_MOVED 0x0f
/* the used VRAM size */
diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h
index 6fdff5945c8a..9c660e1688ab 100644
--- a/include/uapi/drm/drm.h
+++ b/include/uapi/drm/drm.h
@@ -680,6 +680,13 @@ struct drm_get_cap {
*/
#define DRM_CLIENT_CAP_ATOMIC 3
+/**
+ * DRM_CLIENT_CAP_ASPECT_RATIO
+ *
+ * If set to 1, the DRM core will provide aspect ratio information in modes.
+ */
+#define DRM_CLIENT_CAP_ASPECT_RATIO 4
+
/** DRM_IOCTL_SET_CLIENT_CAP ioctl argument type */
struct drm_set_client_cap {
__u64 capability;
diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h
index 50bcf4214ff9..4b3a1bb58e68 100644
--- a/include/uapi/drm/drm_mode.h
+++ b/include/uapi/drm/drm_mode.h
@@ -93,6 +93,8 @@ extern "C" {
#define DRM_MODE_PICTURE_ASPECT_NONE 0
#define DRM_MODE_PICTURE_ASPECT_4_3 1
#define DRM_MODE_PICTURE_ASPECT_16_9 2
+#define DRM_MODE_PICTURE_ASPECT_64_27 3
+#define DRM_MODE_PICTURE_ASPECT_256_135 4
/* Aspect ratio flag bitmask (4 bits 22:19) */
#define DRM_MODE_FLAG_PIC_AR_MASK (0x0F<<19)
@@ -102,6 +104,10 @@ extern "C" {
(DRM_MODE_PICTURE_ASPECT_4_3<<19)
#define DRM_MODE_FLAG_PIC_AR_16_9 \
(DRM_MODE_PICTURE_ASPECT_16_9<<19)
+#define DRM_MODE_FLAG_PIC_AR_64_27 \
+ (DRM_MODE_PICTURE_ASPECT_64_27<<19)
+#define DRM_MODE_FLAG_PIC_AR_256_135 \
+ (DRM_MODE_PICTURE_ASPECT_256_135<<19)
#define DRM_MODE_FLAG_ALL (DRM_MODE_FLAG_PHSYNC | \
DRM_MODE_FLAG_NHSYNC | \
diff --git a/include/uapi/drm/exynos_drm.h b/include/uapi/drm/exynos_drm.h
index 4a54305120e0..3e59b8382dd8 100644
--- a/include/uapi/drm/exynos_drm.h
+++ b/include/uapi/drm/exynos_drm.h
@@ -135,6 +135,219 @@ struct drm_exynos_g2d_exec {
__u64 async;
};
+/* Exynos DRM IPP v2 API */
+
+/**
+ * Enumerate available IPP hardware modules.
+ *
+ * @count_ipps: size of ipp_id array / number of ipp modules (set by driver)
+ * @reserved: padding
+ * @ipp_id_ptr: pointer to ipp_id array or NULL
+ */
+struct drm_exynos_ioctl_ipp_get_res {
+ __u32 count_ipps;
+ __u32 reserved;
+ __u64 ipp_id_ptr;
+};
+
+enum drm_exynos_ipp_format_type {
+ DRM_EXYNOS_IPP_FORMAT_SOURCE = 0x01,
+ DRM_EXYNOS_IPP_FORMAT_DESTINATION = 0x02,
+};
+
+struct drm_exynos_ipp_format {
+ __u32 fourcc;
+ __u32 type;
+ __u64 modifier;
+};
+
+enum drm_exynos_ipp_capability {
+ DRM_EXYNOS_IPP_CAP_CROP = 0x01,
+ DRM_EXYNOS_IPP_CAP_ROTATE = 0x02,
+ DRM_EXYNOS_IPP_CAP_SCALE = 0x04,
+ DRM_EXYNOS_IPP_CAP_CONVERT = 0x08,
+};
+
+/**
+ * Get IPP hardware capabilities and supported image formats.
+ *
+ * @ipp_id: id of IPP module to query
+ * @capabilities: bitmask of drm_exynos_ipp_capability (set by driver)
+ * @reserved: padding
+ * @formats_count: size of formats array (in entries) / number of filled
+ * formats (set by driver)
+ * @formats_ptr: pointer to formats array or NULL
+ */
+struct drm_exynos_ioctl_ipp_get_caps {
+ __u32 ipp_id;
+ __u32 capabilities;
+ __u32 reserved;
+ __u32 formats_count;
+ __u64 formats_ptr;
+};
+
+enum drm_exynos_ipp_limit_type {
+ /* size (horizontal/vertial) limits, in pixels (min, max, alignment) */
+ DRM_EXYNOS_IPP_LIMIT_TYPE_SIZE = 0x0001,
+ /* scale ratio (horizonta/vertial), 16.16 fixed point (min, max) */
+ DRM_EXYNOS_IPP_LIMIT_TYPE_SCALE = 0x0002,
+
+ /* image buffer area */
+ DRM_EXYNOS_IPP_LIMIT_SIZE_BUFFER = 0x0001 << 16,
+ /* src/dst rectangle area */
+ DRM_EXYNOS_IPP_LIMIT_SIZE_AREA = 0x0002 << 16,
+ /* src/dst rectangle area when rotation enabled */
+ DRM_EXYNOS_IPP_LIMIT_SIZE_ROTATED = 0x0003 << 16,
+
+ DRM_EXYNOS_IPP_LIMIT_TYPE_MASK = 0x000f,
+ DRM_EXYNOS_IPP_LIMIT_SIZE_MASK = 0x000f << 16,
+};
+
+struct drm_exynos_ipp_limit_val {
+ __u32 min;
+ __u32 max;
+ __u32 align;
+ __u32 reserved;
+};
+
+/**
+ * IPP module limitation.
+ *
+ * @type: limit type (see drm_exynos_ipp_limit_type enum)
+ * @reserved: padding
+ * @h: horizontal limits
+ * @v: vertical limits
+ */
+struct drm_exynos_ipp_limit {
+ __u32 type;
+ __u32 reserved;
+ struct drm_exynos_ipp_limit_val h;
+ struct drm_exynos_ipp_limit_val v;
+};
+
+/**
+ * Get IPP limits for given image format.
+ *
+ * @ipp_id: id of IPP module to query
+ * @fourcc: image format code (see DRM_FORMAT_* in drm_fourcc.h)
+ * @modifier: image format modifier (see DRM_FORMAT_MOD_* in drm_fourcc.h)
+ * @type: source/destination identifier (drm_exynos_ipp_format_flag enum)
+ * @limits_count: size of limits array (in entries) / number of filled entries
+ * (set by driver)
+ * @limits_ptr: pointer to limits array or NULL
+ */
+struct drm_exynos_ioctl_ipp_get_limits {
+ __u32 ipp_id;
+ __u32 fourcc;
+ __u64 modifier;
+ __u32 type;
+ __u32 limits_count;
+ __u64 limits_ptr;
+};
+
+enum drm_exynos_ipp_task_id {
+ /* buffer described by struct drm_exynos_ipp_task_buffer */
+ DRM_EXYNOS_IPP_TASK_BUFFER = 0x0001,
+ /* rectangle described by struct drm_exynos_ipp_task_rect */
+ DRM_EXYNOS_IPP_TASK_RECTANGLE = 0x0002,
+ /* transformation described by struct drm_exynos_ipp_task_transform */
+ DRM_EXYNOS_IPP_TASK_TRANSFORM = 0x0003,
+ /* alpha configuration described by struct drm_exynos_ipp_task_alpha */
+ DRM_EXYNOS_IPP_TASK_ALPHA = 0x0004,
+
+ /* source image data (for buffer and rectangle chunks) */
+ DRM_EXYNOS_IPP_TASK_TYPE_SOURCE = 0x0001 << 16,
+ /* destination image data (for buffer and rectangle chunks) */
+ DRM_EXYNOS_IPP_TASK_TYPE_DESTINATION = 0x0002 << 16,
+};
+
+/**
+ * Memory buffer with image data.
+ *
+ * @id: must be DRM_EXYNOS_IPP_TASK_BUFFER
+ * other parameters are same as for AddFB2 generic DRM ioctl
+ */
+struct drm_exynos_ipp_task_buffer {
+ __u32 id;
+ __u32 fourcc;
+ __u32 width, height;
+ __u32 gem_id[4];
+ __u32 offset[4];
+ __u32 pitch[4];
+ __u64 modifier;
+};
+
+/**
+ * Rectangle for processing.
+ *
+ * @id: must be DRM_EXYNOS_IPP_TASK_RECTANGLE
+ * @reserved: padding
+ * @x,@y: left corner in pixels
+ * @w,@h: width/height in pixels
+ */
+struct drm_exynos_ipp_task_rect {
+ __u32 id;
+ __u32 reserved;
+ __u32 x;
+ __u32 y;
+ __u32 w;
+ __u32 h;
+};
+
+/**
+ * Image tranformation description.
+ *
+ * @id: must be DRM_EXYNOS_IPP_TASK_TRANSFORM
+ * @rotation: DRM_MODE_ROTATE_* and DRM_MODE_REFLECT_* values
+ */
+struct drm_exynos_ipp_task_transform {
+ __u32 id;
+ __u32 rotation;
+};
+
+/**
+ * Image global alpha configuration for formats without alpha values.
+ *
+ * @id: must be DRM_EXYNOS_IPP_TASK_ALPHA
+ * @value: global alpha value (0-255)
+ */
+struct drm_exynos_ipp_task_alpha {
+ __u32 id;
+ __u32 value;
+};
+
+enum drm_exynos_ipp_flag {
+ /* generate DRM event after processing */
+ DRM_EXYNOS_IPP_FLAG_EVENT = 0x01,
+ /* dry run, only check task parameters */
+ DRM_EXYNOS_IPP_FLAG_TEST_ONLY = 0x02,
+ /* non-blocking processing */
+ DRM_EXYNOS_IPP_FLAG_NONBLOCK = 0x04,
+};
+
+#define DRM_EXYNOS_IPP_FLAGS (DRM_EXYNOS_IPP_FLAG_EVENT |\
+ DRM_EXYNOS_IPP_FLAG_TEST_ONLY | DRM_EXYNOS_IPP_FLAG_NONBLOCK)
+
+/**
+ * Perform image processing described by array of drm_exynos_ipp_task_*
+ * structures (parameters array).
+ *
+ * @ipp_id: id of IPP module to run the task
+ * @flags: bitmask of drm_exynos_ipp_flag values
+ * @reserved: padding
+ * @params_size: size of parameters array (in bytes)
+ * @params_ptr: pointer to parameters array or NULL
+ * @user_data: (optional) data for drm event
+ */
+struct drm_exynos_ioctl_ipp_commit {
+ __u32 ipp_id;
+ __u32 flags;
+ __u32 reserved;
+ __u32 params_size;
+ __u64 params_ptr;
+ __u64 user_data;
+};
+
#define DRM_EXYNOS_GEM_CREATE 0x00
#define DRM_EXYNOS_GEM_MAP 0x01
/* Reserved 0x03 ~ 0x05 for exynos specific gem ioctl */
@@ -147,6 +360,11 @@ struct drm_exynos_g2d_exec {
#define DRM_EXYNOS_G2D_EXEC 0x22
/* Reserved 0x30 ~ 0x33 for obsolete Exynos IPP ioctls */
+/* IPP - Image Post Processing */
+#define DRM_EXYNOS_IPP_GET_RESOURCES 0x40
+#define DRM_EXYNOS_IPP_GET_CAPS 0x41
+#define DRM_EXYNOS_IPP_GET_LIMITS 0x42
+#define DRM_EXYNOS_IPP_COMMIT 0x43
#define DRM_IOCTL_EXYNOS_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + \
DRM_EXYNOS_GEM_CREATE, struct drm_exynos_gem_create)
@@ -165,8 +383,20 @@ struct drm_exynos_g2d_exec {
#define DRM_IOCTL_EXYNOS_G2D_EXEC DRM_IOWR(DRM_COMMAND_BASE + \
DRM_EXYNOS_G2D_EXEC, struct drm_exynos_g2d_exec)
+#define DRM_IOCTL_EXYNOS_IPP_GET_RESOURCES DRM_IOWR(DRM_COMMAND_BASE + \
+ DRM_EXYNOS_IPP_GET_RESOURCES, \
+ struct drm_exynos_ioctl_ipp_get_res)
+#define DRM_IOCTL_EXYNOS_IPP_GET_CAPS DRM_IOWR(DRM_COMMAND_BASE + \
+ DRM_EXYNOS_IPP_GET_CAPS, struct drm_exynos_ioctl_ipp_get_caps)
+#define DRM_IOCTL_EXYNOS_IPP_GET_LIMITS DRM_IOWR(DRM_COMMAND_BASE + \
+ DRM_EXYNOS_IPP_GET_LIMITS, \
+ struct drm_exynos_ioctl_ipp_get_limits)
+#define DRM_IOCTL_EXYNOS_IPP_COMMIT DRM_IOWR(DRM_COMMAND_BASE + \
+ DRM_EXYNOS_IPP_COMMIT, struct drm_exynos_ioctl_ipp_commit)
+
/* EXYNOS specific events */
#define DRM_EXYNOS_G2D_EVENT 0x80000000
+#define DRM_EXYNOS_IPP_EVENT 0x80000002
struct drm_exynos_g2d_event {
struct drm_event base;
@@ -177,6 +407,16 @@ struct drm_exynos_g2d_event {
__u32 reserved;
};
+struct drm_exynos_ipp_event {
+ struct drm_event base;
+ __u64 user_data;
+ __u32 tv_sec;
+ __u32 tv_usec;
+ __u32 ipp_id;
+ __u32 sequence;
+ __u64 reserved;
+};
+
#if defined(__cplusplus)
}
#endif
diff --git a/include/uapi/drm/tegra_drm.h b/include/uapi/drm/tegra_drm.h
index d954f8c33321..c4df3c3668b3 100644
--- a/include/uapi/drm/tegra_drm.h
+++ b/include/uapi/drm/tegra_drm.h
@@ -32,143 +32,615 @@ extern "C" {
#define DRM_TEGRA_GEM_CREATE_TILED (1 << 0)
#define DRM_TEGRA_GEM_CREATE_BOTTOM_UP (1 << 1)
+/**
+ * struct drm_tegra_gem_create - parameters for the GEM object creation IOCTL
+ */
struct drm_tegra_gem_create {
+ /**
+ * @size:
+ *
+ * The size, in bytes, of the buffer object to be created.
+ */
__u64 size;
+
+ /**
+ * @flags:
+ *
+ * A bitmask of flags that influence the creation of GEM objects:
+ *
+ * DRM_TEGRA_GEM_CREATE_TILED
+ * Use the 16x16 tiling format for this buffer.
+ *
+ * DRM_TEGRA_GEM_CREATE_BOTTOM_UP
+ * The buffer has a bottom-up layout.
+ */
__u32 flags;
+
+ /**
+ * @handle:
+ *
+ * The handle of the created GEM object. Set by the kernel upon
+ * successful completion of the IOCTL.
+ */
__u32 handle;
};
+/**
+ * struct drm_tegra_gem_mmap - parameters for the GEM mmap IOCTL
+ */
struct drm_tegra_gem_mmap {
+ /**
+ * @handle:
+ *
+ * Handle of the GEM object to obtain an mmap offset for.
+ */
__u32 handle;
+
+ /**
+ * @pad:
+ *
+ * Structure padding that may be used in the future. Must be 0.
+ */
__u32 pad;
+
+ /**
+ * @offset:
+ *
+ * The mmap offset for the given GEM object. Set by the kernel upon
+ * successful completion of the IOCTL.
+ */
__u64 offset;
};
+/**
+ * struct drm_tegra_syncpt_read - parameters for the read syncpoint IOCTL
+ */
struct drm_tegra_syncpt_read {
+ /**
+ * @id:
+ *
+ * ID of the syncpoint to read the current value from.
+ */
__u32 id;
+
+ /**
+ * @value:
+ *
+ * The current syncpoint value. Set by the kernel upon successful
+ * completion of the IOCTL.
+ */
__u32 value;
};
+/**
+ * struct drm_tegra_syncpt_incr - parameters for the increment syncpoint IOCTL
+ */
struct drm_tegra_syncpt_incr {
+ /**
+ * @id:
+ *
+ * ID of the syncpoint to increment.
+ */
__u32 id;
+
+ /**
+ * @pad:
+ *
+ * Structure padding that may be used in the future. Must be 0.
+ */
__u32 pad;
};
+/**
+ * struct drm_tegra_syncpt_wait - parameters for the wait syncpoint IOCTL
+ */
struct drm_tegra_syncpt_wait {
+ /**
+ * @id:
+ *
+ * ID of the syncpoint to wait on.
+ */
__u32 id;
+
+ /**
+ * @thresh:
+ *
+ * Threshold value for which to wait.
+ */
__u32 thresh;
+
+ /**
+ * @timeout:
+ *
+ * Timeout, in milliseconds, to wait.
+ */
__u32 timeout;
+
+ /**
+ * @value:
+ *
+ * The new syncpoint value after the wait. Set by the kernel upon
+ * successful completion of the IOCTL.
+ */
__u32 value;
};
#define DRM_TEGRA_NO_TIMEOUT (0xffffffff)
+/**
+ * struct drm_tegra_open_channel - parameters for the open channel IOCTL
+ */
struct drm_tegra_open_channel {
+ /**
+ * @client:
+ *
+ * The client ID for this channel.
+ */
__u32 client;
+
+ /**
+ * @pad:
+ *
+ * Structure padding that may be used in the future. Must be 0.
+ */
__u32 pad;
+
+ /**
+ * @context:
+ *
+ * The application context of this channel. Set by the kernel upon
+ * successful completion of the IOCTL. This context needs to be passed
+ * to the DRM_TEGRA_CHANNEL_CLOSE or the DRM_TEGRA_SUBMIT IOCTLs.
+ */
__u64 context;
};
+/**
+ * struct drm_tegra_close_channel - parameters for the close channel IOCTL
+ */
struct drm_tegra_close_channel {
+ /**
+ * @context:
+ *
+ * The application context of this channel. This is obtained from the
+ * DRM_TEGRA_OPEN_CHANNEL IOCTL.
+ */
__u64 context;
};
+/**
+ * struct drm_tegra_get_syncpt - parameters for the get syncpoint IOCTL
+ */
struct drm_tegra_get_syncpt {
+ /**
+ * @context:
+ *
+ * The application context identifying the channel for which to obtain
+ * the syncpoint ID.
+ */
__u64 context;
+
+ /**
+ * @index:
+ *
+ * Index of the client syncpoint for which to obtain the ID.
+ */
__u32 index;
+
+ /**
+ * @id:
+ *
+ * The ID of the given syncpoint. Set by the kernel upon successful
+ * completion of the IOCTL.
+ */
__u32 id;
};
+/**
+ * struct drm_tegra_get_syncpt_base - parameters for the get wait base IOCTL
+ */
struct drm_tegra_get_syncpt_base {
+ /**
+ * @context:
+ *
+ * The application context identifying for which channel to obtain the
+ * wait base.
+ */
__u64 context;
+
+ /**
+ * @syncpt:
+ *
+ * ID of the syncpoint for which to obtain the wait base.
+ */
__u32 syncpt;
+
+ /**
+ * @id:
+ *
+ * The ID of the wait base corresponding to the client syncpoint. Set
+ * by the kernel upon successful completion of the IOCTL.
+ */
__u32 id;
};
+/**
+ * struct drm_tegra_syncpt - syncpoint increment operation
+ */
struct drm_tegra_syncpt {
+ /**
+ * @id:
+ *
+ * ID of the syncpoint to operate on.
+ */
__u32 id;
+
+ /**
+ * @incrs:
+ *
+ * Number of increments to perform for the syncpoint.
+ */
__u32 incrs;
};
+/**
+ * struct drm_tegra_cmdbuf - structure describing a command buffer
+ */
struct drm_tegra_cmdbuf {
+ /**
+ * @handle:
+ *
+ * Handle to a GEM object containing the command buffer.
+ */
__u32 handle;
+
+ /**
+ * @offset:
+ *
+ * Offset, in bytes, into the GEM object identified by @handle at
+ * which the command buffer starts.
+ */
__u32 offset;
+
+ /**
+ * @words:
+ *
+ * Number of 32-bit words in this command buffer.
+ */
__u32 words;
+
+ /**
+ * @pad:
+ *
+ * Structure padding that may be used in the future. Must be 0.
+ */
__u32 pad;
};
+/**
+ * struct drm_tegra_reloc - GEM object relocation structure
+ */
struct drm_tegra_reloc {
struct {
+ /**
+ * @cmdbuf.handle:
+ *
+ * Handle to the GEM object containing the command buffer for
+ * which to perform this GEM object relocation.
+ */
__u32 handle;
+
+ /**
+ * @cmdbuf.offset:
+ *
+ * Offset, in bytes, into the command buffer at which to
+ * insert the relocated address.
+ */
__u32 offset;
} cmdbuf;
struct {
+ /**
+ * @target.handle:
+ *
+ * Handle to the GEM object to be relocated.
+ */
__u32 handle;
+
+ /**
+ * @target.offset:
+ *
+ * Offset, in bytes, into the target GEM object at which the
+ * relocated data starts.
+ */
__u32 offset;
} target;
+
+ /**
+ * @shift:
+ *
+ * The number of bits by which to shift relocated addresses.
+ */
__u32 shift;
+
+ /**
+ * @pad:
+ *
+ * Structure padding that may be used in the future. Must be 0.
+ */
__u32 pad;
};
+/**
+ * struct drm_tegra_waitchk - wait check structure
+ */
struct drm_tegra_waitchk {
+ /**
+ * @handle:
+ *
+ * Handle to the GEM object containing a command stream on which to
+ * perform the wait check.
+ */
__u32 handle;
+
+ /**
+ * @offset:
+ *
+ * Offset, in bytes, of the location in the command stream to perform
+ * the wait check on.
+ */
__u32 offset;
+
+ /**
+ * @syncpt:
+ *
+ * ID of the syncpoint to wait check.
+ */
__u32 syncpt;
+
+ /**
+ * @thresh:
+ *
+ * Threshold value for which to check.
+ */
__u32 thresh;
};
+/**
+ * struct drm_tegra_submit - job submission structure
+ */
struct drm_tegra_submit {
+ /**
+ * @context:
+ *
+ * The application context identifying the channel to use for the
+ * execution of this job.
+ */
__u64 context;
+
+ /**
+ * @num_syncpts:
+ *
+ * The number of syncpoints operated on by this job. This defines the
+ * length of the array pointed to by @syncpts.
+ */
__u32 num_syncpts;
+
+ /**
+ * @num_cmdbufs:
+ *
+ * The number of command buffers to execute as part of this job. This
+ * defines the length of the array pointed to by @cmdbufs.
+ */
__u32 num_cmdbufs;
+
+ /**
+ * @num_relocs:
+ *
+ * The number of relocations to perform before executing this job.
+ * This defines the length of the array pointed to by @relocs.
+ */
__u32 num_relocs;
+
+ /**
+ * @num_waitchks:
+ *
+ * The number of wait checks to perform as part of this job. This
+ * defines the length of the array pointed to by @waitchks.
+ */
__u32 num_waitchks;
+
+ /**
+ * @waitchk_mask:
+ *
+ * Bitmask of valid wait checks.
+ */
__u32 waitchk_mask;
+
+ /**
+ * @timeout:
+ *
+ * Timeout, in milliseconds, before this job is cancelled.
+ */
__u32 timeout;
+
+ /**
+ * @syncpts:
+ *
+ * A pointer to an array of &struct drm_tegra_syncpt structures that
+ * specify the syncpoint operations performed as part of this job.
+ * The number of elements in the array must be equal to the value
+ * given by @num_syncpts.
+ */
__u64 syncpts;
+
+ /**
+ * @cmdbufs:
+ *
+ * A pointer to an array of &struct drm_tegra_cmdbuf structures that
+ * define the command buffers to execute as part of this job. The
+ * number of elements in the array must be equal to the value given
+ * by @num_syncpts.
+ */
__u64 cmdbufs;
+
+ /**
+ * @relocs:
+ *
+ * A pointer to an array of &struct drm_tegra_reloc structures that
+ * specify the relocations that need to be performed before executing
+ * this job. The number of elements in the array must be equal to the
+ * value given by @num_relocs.
+ */
__u64 relocs;
+
+ /**
+ * @waitchks:
+ *
+ * A pointer to an array of &struct drm_tegra_waitchk structures that
+ * specify the wait checks to be performed while executing this job.
+ * The number of elements in the array must be equal to the value
+ * given by @num_waitchks.
+ */
__u64 waitchks;
- __u32 fence; /* Return value */
- __u32 reserved[5]; /* future expansion */
+ /**
+ * @fence:
+ *
+ * The threshold of the syncpoint associated with this job after it
+ * has been completed. Set by the kernel upon successful completion of
+ * the IOCTL. This can be used with the DRM_TEGRA_SYNCPT_WAIT IOCTL to
+ * wait for this job to be finished.
+ */
+ __u32 fence;
+
+ /**
+ * @reserved:
+ *
+ * This field is reserved for future use. Must be 0.
+ */
+ __u32 reserved[5];
};
#define DRM_TEGRA_GEM_TILING_MODE_PITCH 0
#define DRM_TEGRA_GEM_TILING_MODE_TILED 1
#define DRM_TEGRA_GEM_TILING_MODE_BLOCK 2
+/**
+ * struct drm_tegra_gem_set_tiling - parameters for the set tiling IOCTL
+ */
struct drm_tegra_gem_set_tiling {
- /* input */
+ /**
+ * @handle:
+ *
+ * Handle to the GEM object for which to set the tiling parameters.
+ */
__u32 handle;
+
+ /**
+ * @mode:
+ *
+ * The tiling mode to set. Must be one of:
+ *
+ * DRM_TEGRA_GEM_TILING_MODE_PITCH
+ * pitch linear format
+ *
+ * DRM_TEGRA_GEM_TILING_MODE_TILED
+ * 16x16 tiling format
+ *
+ * DRM_TEGRA_GEM_TILING_MODE_BLOCK
+ * 16Bx2 tiling format
+ */
__u32 mode;
+
+ /**
+ * @value:
+ *
+ * The value to set for the tiling mode parameter.
+ */
__u32 value;
+
+ /**
+ * @pad:
+ *
+ * Structure padding that may be used in the future. Must be 0.
+ */
__u32 pad;
};
+/**
+ * struct drm_tegra_gem_get_tiling - parameters for the get tiling IOCTL
+ */
struct drm_tegra_gem_get_tiling {
- /* input */
+ /**
+ * @handle:
+ *
+ * Handle to the GEM object for which to query the tiling parameters.
+ */
__u32 handle;
- /* output */
+
+ /**
+ * @mode:
+ *
+ * The tiling mode currently associated with the GEM object. Set by
+ * the kernel upon successful completion of the IOCTL.
+ */
__u32 mode;
+
+ /**
+ * @value:
+ *
+ * The tiling mode parameter currently associated with the GEM object.
+ * Set by the kernel upon successful completion of the IOCTL.
+ */
__u32 value;
+
+ /**
+ * @pad:
+ *
+ * Structure padding that may be used in the future. Must be 0.
+ */
__u32 pad;
};
#define DRM_TEGRA_GEM_BOTTOM_UP (1 << 0)
#define DRM_TEGRA_GEM_FLAGS (DRM_TEGRA_GEM_BOTTOM_UP)
+/**
+ * struct drm_tegra_gem_set_flags - parameters for the set flags IOCTL
+ */
struct drm_tegra_gem_set_flags {
- /* input */
+ /**
+ * @handle:
+ *
+ * Handle to the GEM object for which to set the flags.
+ */
__u32 handle;
- /* output */
+
+ /**
+ * @flags:
+ *
+ * The flags to set for the GEM object.
+ */
__u32 flags;
};
+/**
+ * struct drm_tegra_gem_get_flags - parameters for the get flags IOCTL
+ */
struct drm_tegra_gem_get_flags {
- /* input */
+ /**
+ * @handle:
+ *
+ * Handle to the GEM object for which to query the flags.
+ */
__u32 handle;
- /* output */
+
+ /**
+ * @flags:
+ *
+ * The flags currently associated with the GEM object. Set by the
+ * kernel upon successful completion of the IOCTL.
+ */
__u32 flags;
};
@@ -193,7 +665,7 @@ struct drm_tegra_gem_get_flags {
#define DRM_IOCTL_TEGRA_SYNCPT_INCR DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_SYNCPT_INCR, struct drm_tegra_syncpt_incr)
#define DRM_IOCTL_TEGRA_SYNCPT_WAIT DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_SYNCPT_WAIT, struct drm_tegra_syncpt_wait)
#define DRM_IOCTL_TEGRA_OPEN_CHANNEL DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_OPEN_CHANNEL, struct drm_tegra_open_channel)
-#define DRM_IOCTL_TEGRA_CLOSE_CHANNEL DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_CLOSE_CHANNEL, struct drm_tegra_open_channel)
+#define DRM_IOCTL_TEGRA_CLOSE_CHANNEL DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_CLOSE_CHANNEL, struct drm_tegra_close_channel)
#define DRM_IOCTL_TEGRA_GET_SYNCPT DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_GET_SYNCPT, struct drm_tegra_get_syncpt)
#define DRM_IOCTL_TEGRA_SUBMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_SUBMIT, struct drm_tegra_submit)
#define DRM_IOCTL_TEGRA_GET_SYNCPT_BASE DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_GET_SYNCPT_BASE, struct drm_tegra_get_syncpt_base)
diff --git a/include/uapi/drm/v3d_drm.h b/include/uapi/drm/v3d_drm.h
new file mode 100644
index 000000000000..7b6627783608
--- /dev/null
+++ b/include/uapi/drm/v3d_drm.h
@@ -0,0 +1,194 @@
+/*
+ * Copyright © 2014-2018 Broadcom
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef _V3D_DRM_H_
+#define _V3D_DRM_H_
+
+#include "drm.h"
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+#define DRM_V3D_SUBMIT_CL 0x00
+#define DRM_V3D_WAIT_BO 0x01
+#define DRM_V3D_CREATE_BO 0x02
+#define DRM_V3D_MMAP_BO 0x03
+#define DRM_V3D_GET_PARAM 0x04
+#define DRM_V3D_GET_BO_OFFSET 0x05
+
+#define DRM_IOCTL_V3D_SUBMIT_CL DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_SUBMIT_CL, struct drm_v3d_submit_cl)
+#define DRM_IOCTL_V3D_WAIT_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_WAIT_BO, struct drm_v3d_wait_bo)
+#define DRM_IOCTL_V3D_CREATE_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_CREATE_BO, struct drm_v3d_create_bo)
+#define DRM_IOCTL_V3D_MMAP_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_MMAP_BO, struct drm_v3d_mmap_bo)
+#define DRM_IOCTL_V3D_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_GET_PARAM, struct drm_v3d_get_param)
+#define DRM_IOCTL_V3D_GET_BO_OFFSET DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_GET_BO_OFFSET, struct drm_v3d_get_bo_offset)
+
+/**
+ * struct drm_v3d_submit_cl - ioctl argument for submitting commands to the 3D
+ * engine.
+ *
+ * This asks the kernel to have the GPU execute an optional binner
+ * command list, and a render command list.
+ */
+struct drm_v3d_submit_cl {
+ /* Pointer to the binner command list.
+ *
+ * This is the first set of commands executed, which runs the
+ * coordinate shader to determine where primitives land on the screen,
+ * then writes out the state updates and draw calls necessary per tile
+ * to the tile allocation BO.
+ */
+ __u32 bcl_start;
+
+ /** End address of the BCL (first byte after the BCL) */
+ __u32 bcl_end;
+
+ /* Offset of the render command list.
+ *
+ * This is the second set of commands executed, which will either
+ * execute the tiles that have been set up by the BCL, or a fixed set
+ * of tiles (in the case of RCL-only blits).
+ */
+ __u32 rcl_start;
+
+ /** End address of the RCL (first byte after the RCL) */
+ __u32 rcl_end;
+
+ /** An optional sync object to wait on before starting the BCL. */
+ __u32 in_sync_bcl;
+ /** An optional sync object to wait on before starting the RCL. */
+ __u32 in_sync_rcl;
+ /** An optional sync object to place the completion fence in. */
+ __u32 out_sync;
+
+ /* Offset of the tile alloc memory
+ *
+ * This is optional on V3D 3.3 (where the CL can set the value) but
+ * required on V3D 4.1.
+ */
+ __u32 qma;
+
+ /** Size of the tile alloc memory. */
+ __u32 qms;
+
+ /** Offset of the tile state data array. */
+ __u32 qts;
+
+ /* Pointer to a u32 array of the BOs that are referenced by the job.
+ */
+ __u64 bo_handles;
+
+ /* Number of BO handles passed in (size is that times 4). */
+ __u32 bo_handle_count;
+
+ /* Pad, must be zero-filled. */
+ __u32 pad;
+};
+
+/**
+ * struct drm_v3d_wait_bo - ioctl argument for waiting for
+ * completion of the last DRM_V3D_SUBMIT_CL on a BO.
+ *
+ * This is useful for cases where multiple processes might be
+ * rendering to a BO and you want to wait for all rendering to be
+ * completed.
+ */
+struct drm_v3d_wait_bo {
+ __u32 handle;
+ __u32 pad;
+ __u64 timeout_ns;
+};
+
+/**
+ * struct drm_v3d_create_bo - ioctl argument for creating V3D BOs.
+ *
+ * There are currently no values for the flags argument, but it may be
+ * used in a future extension.
+ */
+struct drm_v3d_create_bo {
+ __u32 size;
+ __u32 flags;
+ /** Returned GEM handle for the BO. */
+ __u32 handle;
+ /**
+ * Returned offset for the BO in the V3D address space. This offset
+ * is private to the DRM fd and is valid for the lifetime of the GEM
+ * handle.
+ *
+ * This offset value will always be nonzero, since various HW
+ * units treat 0 specially.
+ */
+ __u32 offset;
+};
+
+/**
+ * struct drm_v3d_mmap_bo - ioctl argument for mapping V3D BOs.
+ *
+ * This doesn't actually perform an mmap. Instead, it returns the
+ * offset you need to use in an mmap on the DRM device node. This
+ * means that tools like valgrind end up knowing about the mapped
+ * memory.
+ *
+ * There are currently no values for the flags argument, but it may be
+ * used in a future extension.
+ */
+struct drm_v3d_mmap_bo {
+ /** Handle for the object being mapped. */
+ __u32 handle;
+ __u32 flags;
+ /** offset into the drm node to use for subsequent mmap call. */
+ __u64 offset;
+};
+
+enum drm_v3d_param {
+ DRM_V3D_PARAM_V3D_UIFCFG,
+ DRM_V3D_PARAM_V3D_HUB_IDENT1,
+ DRM_V3D_PARAM_V3D_HUB_IDENT2,
+ DRM_V3D_PARAM_V3D_HUB_IDENT3,
+ DRM_V3D_PARAM_V3D_CORE0_IDENT0,
+ DRM_V3D_PARAM_V3D_CORE0_IDENT1,
+ DRM_V3D_PARAM_V3D_CORE0_IDENT2,
+};
+
+struct drm_v3d_get_param {
+ __u32 param;
+ __u32 pad;
+ __u64 value;
+};
+
+/**
+ * Returns the offset for the BO in the V3D address space for this DRM fd.
+ * This is the same value returned by drm_v3d_create_bo, if that was called
+ * from this DRM fd.
+ */
+struct drm_v3d_get_bo_offset {
+ __u32 handle;
+ __u32 offset;
+};
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif /* _V3D_DRM_H_ */
diff --git a/include/uapi/drm/vc4_drm.h b/include/uapi/drm/vc4_drm.h
index b95a0e11cb07..2cac6277a1d7 100644
--- a/include/uapi/drm/vc4_drm.h
+++ b/include/uapi/drm/vc4_drm.h
@@ -183,10 +183,17 @@ struct drm_vc4_submit_cl {
/* ID of the perfmon to attach to this job. 0 means no perfmon. */
__u32 perfmonid;
- /* Unused field to align this struct on 64 bits. Must be set to 0.
- * If one ever needs to add an u32 field to this struct, this field
- * can be used.
+ /* Syncobj handle to wait on. If set, processing of this render job
+ * will not start until the syncobj is signaled. 0 means ignore.
*/
+ __u32 in_sync;
+
+ /* Syncobj handle to export fence to. If set, the fence in the syncobj
+ * will be replaced with a fence that signals upon completion of this
+ * render job. 0 means ignore.
+ */
+ __u32 out_sync;
+
__u32 pad2;
};
diff --git a/include/uapi/linux/aio_abi.h b/include/uapi/linux/aio_abi.h
index a04adbc70ddf..d00221345c19 100644
--- a/include/uapi/linux/aio_abi.h
+++ b/include/uapi/linux/aio_abi.h
@@ -29,6 +29,7 @@
#include <linux/types.h>
#include <linux/fs.h>
+#include <linux/signal.h>
#include <asm/byteorder.h>
typedef __kernel_ulong_t aio_context_t;
@@ -38,10 +39,8 @@ enum {
IOCB_CMD_PWRITE = 1,
IOCB_CMD_FSYNC = 2,
IOCB_CMD_FDSYNC = 3,
- /* These two are experimental.
- * IOCB_CMD_PREADX = 4,
- * IOCB_CMD_POLL = 5,
- */
+ /* 4 was the experimental IOCB_CMD_PREADX */
+ IOCB_CMD_POLL = 5,
IOCB_CMD_NOOP = 6,
IOCB_CMD_PREADV = 7,
IOCB_CMD_PWRITEV = 8,
@@ -54,6 +53,7 @@ enum {
* is valid.
*/
#define IOCB_FLAG_RESFD (1 << 0)
+#define IOCB_FLAG_IOPRIO (1 << 1)
/* read() from /dev/aio returns these structures. */
struct io_event {
@@ -108,5 +108,10 @@ struct iocb {
#undef IFBIG
#undef IFLITTLE
+struct __aio_sigset {
+ const sigset_t __user *sigmask;
+ size_t sigsetsize;
+};
+
#endif /* __LINUX__AIO_ABI_H */
diff --git a/include/uapi/linux/audit.h b/include/uapi/linux/audit.h
index 4e61a9e05132..c35aee9ad4a6 100644
--- a/include/uapi/linux/audit.h
+++ b/include/uapi/linux/audit.h
@@ -147,6 +147,7 @@
#define AUDIT_INTEGRITY_HASH 1803 /* Integrity HASH type */
#define AUDIT_INTEGRITY_PCR 1804 /* PCR invalidation msgs */
#define AUDIT_INTEGRITY_RULE 1805 /* policy rule */
+#define AUDIT_INTEGRITY_EVM_XATTR 1806 /* New EVM-covered xattr */
#define AUDIT_KERNEL 2000 /* Asynchronous audit record. NOT A REQUEST. */
@@ -465,6 +466,7 @@ struct audit_tty_status {
};
#define AUDIT_UID_UNSET (unsigned int)-1
+#define AUDIT_SID_UNSET ((unsigned int)-1)
/* audit_rule_data supports filter rules with both integer and string
* fields. It corresponds with AUDIT_ADD_RULE, AUDIT_DEL_RULE and
diff --git a/include/uapi/linux/auto_fs.h b/include/uapi/linux/auto_fs.h
index 2a4432c7a4b4..e13eec3dfb2f 100644
--- a/include/uapi/linux/auto_fs.h
+++ b/include/uapi/linux/auto_fs.h
@@ -1,6 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
/*
- * Copyright 1997 Transmeta Corporation - All Rights Reserved
+ * Copyright 1997 Transmeta Corporation - All Rights Reserved
+ * Copyright 1999-2000 Jeremy Fitzhardinge <jeremy@goop.org>
+ * Copyright 2005-2006,2013,2017-2018 Ian Kent <raven@themaw.net>
*
* This file is part of the Linux kernel and is made available under
* the terms of the GNU General Public License, version 2, or at your
@@ -8,7 +10,6 @@
*
* ----------------------------------------------------------------------- */
-
#ifndef _UAPI_LINUX_AUTO_FS_H
#define _UAPI_LINUX_AUTO_FS_H
@@ -18,13 +19,11 @@
#include <sys/ioctl.h>
#endif /* __KERNEL__ */
+#define AUTOFS_PROTO_VERSION 5
+#define AUTOFS_MIN_PROTO_VERSION 3
+#define AUTOFS_MAX_PROTO_VERSION 5
-/* This file describes autofs v3 */
-#define AUTOFS_PROTO_VERSION 3
-
-/* Range of protocol versions defined */
-#define AUTOFS_MAX_PROTO_VERSION AUTOFS_PROTO_VERSION
-#define AUTOFS_MIN_PROTO_VERSION AUTOFS_PROTO_VERSION
+#define AUTOFS_PROTO_SUBVERSION 2
/*
* The wait_queue_token (autofs_wqt_t) is part of a structure which is passed
@@ -76,9 +75,155 @@ enum {
#define AUTOFS_IOC_READY _IO(AUTOFS_IOCTL, AUTOFS_IOC_READY_CMD)
#define AUTOFS_IOC_FAIL _IO(AUTOFS_IOCTL, AUTOFS_IOC_FAIL_CMD)
#define AUTOFS_IOC_CATATONIC _IO(AUTOFS_IOCTL, AUTOFS_IOC_CATATONIC_CMD)
-#define AUTOFS_IOC_PROTOVER _IOR(AUTOFS_IOCTL, AUTOFS_IOC_PROTOVER_CMD, int)
-#define AUTOFS_IOC_SETTIMEOUT32 _IOWR(AUTOFS_IOCTL, AUTOFS_IOC_SETTIMEOUT_CMD, compat_ulong_t)
-#define AUTOFS_IOC_SETTIMEOUT _IOWR(AUTOFS_IOCTL, AUTOFS_IOC_SETTIMEOUT_CMD, unsigned long)
-#define AUTOFS_IOC_EXPIRE _IOR(AUTOFS_IOCTL, AUTOFS_IOC_EXPIRE_CMD, struct autofs_packet_expire)
+#define AUTOFS_IOC_PROTOVER _IOR(AUTOFS_IOCTL, \
+ AUTOFS_IOC_PROTOVER_CMD, int)
+#define AUTOFS_IOC_SETTIMEOUT32 _IOWR(AUTOFS_IOCTL, \
+ AUTOFS_IOC_SETTIMEOUT_CMD, \
+ compat_ulong_t)
+#define AUTOFS_IOC_SETTIMEOUT _IOWR(AUTOFS_IOCTL, \
+ AUTOFS_IOC_SETTIMEOUT_CMD, \
+ unsigned long)
+#define AUTOFS_IOC_EXPIRE _IOR(AUTOFS_IOCTL, \
+ AUTOFS_IOC_EXPIRE_CMD, \
+ struct autofs_packet_expire)
+
+/* autofs version 4 and later definitions */
+
+/* Mask for expire behaviour */
+#define AUTOFS_EXP_IMMEDIATE 1
+#define AUTOFS_EXP_LEAVES 2
+
+#define AUTOFS_TYPE_ANY 0U
+#define AUTOFS_TYPE_INDIRECT 1U
+#define AUTOFS_TYPE_DIRECT 2U
+#define AUTOFS_TYPE_OFFSET 4U
+
+static inline void set_autofs_type_indirect(unsigned int *type)
+{
+ *type = AUTOFS_TYPE_INDIRECT;
+}
+
+static inline unsigned int autofs_type_indirect(unsigned int type)
+{
+ return (type == AUTOFS_TYPE_INDIRECT);
+}
+
+static inline void set_autofs_type_direct(unsigned int *type)
+{
+ *type = AUTOFS_TYPE_DIRECT;
+}
+
+static inline unsigned int autofs_type_direct(unsigned int type)
+{
+ return (type == AUTOFS_TYPE_DIRECT);
+}
+
+static inline void set_autofs_type_offset(unsigned int *type)
+{
+ *type = AUTOFS_TYPE_OFFSET;
+}
+
+static inline unsigned int autofs_type_offset(unsigned int type)
+{
+ return (type == AUTOFS_TYPE_OFFSET);
+}
+
+static inline unsigned int autofs_type_trigger(unsigned int type)
+{
+ return (type == AUTOFS_TYPE_DIRECT || type == AUTOFS_TYPE_OFFSET);
+}
+
+/*
+ * This isn't really a type as we use it to say "no type set" to
+ * indicate we want to search for "any" mount in the
+ * autofs_dev_ioctl_ismountpoint() device ioctl function.
+ */
+static inline void set_autofs_type_any(unsigned int *type)
+{
+ *type = AUTOFS_TYPE_ANY;
+}
+
+static inline unsigned int autofs_type_any(unsigned int type)
+{
+ return (type == AUTOFS_TYPE_ANY);
+}
+
+/* Daemon notification packet types */
+enum autofs_notify {
+ NFY_NONE,
+ NFY_MOUNT,
+ NFY_EXPIRE
+};
+
+/* Kernel protocol version 4 packet types */
+
+/* Expire entry (umount request) */
+#define autofs_ptype_expire_multi 2
+
+/* Kernel protocol version 5 packet types */
+
+/* Indirect mount missing and expire requests. */
+#define autofs_ptype_missing_indirect 3
+#define autofs_ptype_expire_indirect 4
+
+/* Direct mount missing and expire requests */
+#define autofs_ptype_missing_direct 5
+#define autofs_ptype_expire_direct 6
+
+/* v4 multi expire (via pipe) */
+struct autofs_packet_expire_multi {
+ struct autofs_packet_hdr hdr;
+ autofs_wqt_t wait_queue_token;
+ int len;
+ char name[NAME_MAX+1];
+};
+
+union autofs_packet_union {
+ struct autofs_packet_hdr hdr;
+ struct autofs_packet_missing missing;
+ struct autofs_packet_expire expire;
+ struct autofs_packet_expire_multi expire_multi;
+};
+
+/* autofs v5 common packet struct */
+struct autofs_v5_packet {
+ struct autofs_packet_hdr hdr;
+ autofs_wqt_t wait_queue_token;
+ __u32 dev;
+ __u64 ino;
+ __u32 uid;
+ __u32 gid;
+ __u32 pid;
+ __u32 tgid;
+ __u32 len;
+ char name[NAME_MAX+1];
+};
+
+typedef struct autofs_v5_packet autofs_packet_missing_indirect_t;
+typedef struct autofs_v5_packet autofs_packet_expire_indirect_t;
+typedef struct autofs_v5_packet autofs_packet_missing_direct_t;
+typedef struct autofs_v5_packet autofs_packet_expire_direct_t;
+
+union autofs_v5_packet_union {
+ struct autofs_packet_hdr hdr;
+ struct autofs_v5_packet v5_packet;
+ autofs_packet_missing_indirect_t missing_indirect;
+ autofs_packet_expire_indirect_t expire_indirect;
+ autofs_packet_missing_direct_t missing_direct;
+ autofs_packet_expire_direct_t expire_direct;
+};
+
+enum {
+ AUTOFS_IOC_EXPIRE_MULTI_CMD = 0x66, /* AUTOFS_IOC_EXPIRE_CMD + 1 */
+ AUTOFS_IOC_PROTOSUBVER_CMD,
+ AUTOFS_IOC_ASKUMOUNT_CMD = 0x70, /* AUTOFS_DEV_IOCTL_VERSION_CMD - 1 */
+};
+
+#define AUTOFS_IOC_EXPIRE_MULTI _IOW(AUTOFS_IOCTL, \
+ AUTOFS_IOC_EXPIRE_MULTI_CMD, int)
+#define AUTOFS_IOC_PROTOSUBVER _IOR(AUTOFS_IOCTL, \
+ AUTOFS_IOC_PROTOSUBVER_CMD, int)
+#define AUTOFS_IOC_ASKUMOUNT _IOR(AUTOFS_IOCTL, \
+ AUTOFS_IOC_ASKUMOUNT_CMD, int)
#endif /* _UAPI_LINUX_AUTO_FS_H */
diff --git a/include/uapi/linux/auto_fs4.h b/include/uapi/linux/auto_fs4.h
index 1f608e27a06f..d01ef0a0189c 100644
--- a/include/uapi/linux/auto_fs4.h
+++ b/include/uapi/linux/auto_fs4.h
@@ -7,156 +7,9 @@
* option, any later version, incorporated herein by reference.
*/
-#ifndef _LINUX_AUTO_FS4_H
-#define _LINUX_AUTO_FS4_H
+#ifndef _UAPI_LINUX_AUTO_FS4_H
+#define _UAPI_LINUX_AUTO_FS4_H
-/* Include common v3 definitions */
-#include <linux/types.h>
#include <linux/auto_fs.h>
-/* autofs v4 definitions */
-#undef AUTOFS_PROTO_VERSION
-#undef AUTOFS_MIN_PROTO_VERSION
-#undef AUTOFS_MAX_PROTO_VERSION
-
-#define AUTOFS_PROTO_VERSION 5
-#define AUTOFS_MIN_PROTO_VERSION 3
-#define AUTOFS_MAX_PROTO_VERSION 5
-
-#define AUTOFS_PROTO_SUBVERSION 2
-
-/* Mask for expire behaviour */
-#define AUTOFS_EXP_IMMEDIATE 1
-#define AUTOFS_EXP_LEAVES 2
-
-#define AUTOFS_TYPE_ANY 0U
-#define AUTOFS_TYPE_INDIRECT 1U
-#define AUTOFS_TYPE_DIRECT 2U
-#define AUTOFS_TYPE_OFFSET 4U
-
-static inline void set_autofs_type_indirect(unsigned int *type)
-{
- *type = AUTOFS_TYPE_INDIRECT;
-}
-
-static inline unsigned int autofs_type_indirect(unsigned int type)
-{
- return (type == AUTOFS_TYPE_INDIRECT);
-}
-
-static inline void set_autofs_type_direct(unsigned int *type)
-{
- *type = AUTOFS_TYPE_DIRECT;
-}
-
-static inline unsigned int autofs_type_direct(unsigned int type)
-{
- return (type == AUTOFS_TYPE_DIRECT);
-}
-
-static inline void set_autofs_type_offset(unsigned int *type)
-{
- *type = AUTOFS_TYPE_OFFSET;
-}
-
-static inline unsigned int autofs_type_offset(unsigned int type)
-{
- return (type == AUTOFS_TYPE_OFFSET);
-}
-
-static inline unsigned int autofs_type_trigger(unsigned int type)
-{
- return (type == AUTOFS_TYPE_DIRECT || type == AUTOFS_TYPE_OFFSET);
-}
-
-/*
- * This isn't really a type as we use it to say "no type set" to
- * indicate we want to search for "any" mount in the
- * autofs_dev_ioctl_ismountpoint() device ioctl function.
- */
-static inline void set_autofs_type_any(unsigned int *type)
-{
- *type = AUTOFS_TYPE_ANY;
-}
-
-static inline unsigned int autofs_type_any(unsigned int type)
-{
- return (type == AUTOFS_TYPE_ANY);
-}
-
-/* Daemon notification packet types */
-enum autofs_notify {
- NFY_NONE,
- NFY_MOUNT,
- NFY_EXPIRE
-};
-
-/* Kernel protocol version 4 packet types */
-
-/* Expire entry (umount request) */
-#define autofs_ptype_expire_multi 2
-
-/* Kernel protocol version 5 packet types */
-
-/* Indirect mount missing and expire requests. */
-#define autofs_ptype_missing_indirect 3
-#define autofs_ptype_expire_indirect 4
-
-/* Direct mount missing and expire requests */
-#define autofs_ptype_missing_direct 5
-#define autofs_ptype_expire_direct 6
-
-/* v4 multi expire (via pipe) */
-struct autofs_packet_expire_multi {
- struct autofs_packet_hdr hdr;
- autofs_wqt_t wait_queue_token;
- int len;
- char name[NAME_MAX+1];
-};
-
-union autofs_packet_union {
- struct autofs_packet_hdr hdr;
- struct autofs_packet_missing missing;
- struct autofs_packet_expire expire;
- struct autofs_packet_expire_multi expire_multi;
-};
-
-/* autofs v5 common packet struct */
-struct autofs_v5_packet {
- struct autofs_packet_hdr hdr;
- autofs_wqt_t wait_queue_token;
- __u32 dev;
- __u64 ino;
- __u32 uid;
- __u32 gid;
- __u32 pid;
- __u32 tgid;
- __u32 len;
- char name[NAME_MAX+1];
-};
-
-typedef struct autofs_v5_packet autofs_packet_missing_indirect_t;
-typedef struct autofs_v5_packet autofs_packet_expire_indirect_t;
-typedef struct autofs_v5_packet autofs_packet_missing_direct_t;
-typedef struct autofs_v5_packet autofs_packet_expire_direct_t;
-
-union autofs_v5_packet_union {
- struct autofs_packet_hdr hdr;
- struct autofs_v5_packet v5_packet;
- autofs_packet_missing_indirect_t missing_indirect;
- autofs_packet_expire_indirect_t expire_indirect;
- autofs_packet_missing_direct_t missing_direct;
- autofs_packet_expire_direct_t expire_direct;
-};
-
-enum {
- AUTOFS_IOC_EXPIRE_MULTI_CMD = 0x66, /* AUTOFS_IOC_EXPIRE_CMD + 1 */
- AUTOFS_IOC_PROTOSUBVER_CMD,
- AUTOFS_IOC_ASKUMOUNT_CMD = 0x70, /* AUTOFS_DEV_IOCTL_VERSION_CMD - 1 */
-};
-
-#define AUTOFS_IOC_EXPIRE_MULTI _IOW(AUTOFS_IOCTL, AUTOFS_IOC_EXPIRE_MULTI_CMD, int)
-#define AUTOFS_IOC_PROTOSUBVER _IOR(AUTOFS_IOCTL, AUTOFS_IOC_PROTOSUBVER_CMD, int)
-#define AUTOFS_IOC_ASKUMOUNT _IOR(AUTOFS_IOCTL, AUTOFS_IOC_ASKUMOUNT_CMD, int)
-
-#endif /* _LINUX_AUTO_FS4_H */
+#endif /* _UAPI_LINUX_AUTO_FS4_H */
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index c5ec89732a8d..59b19b6a40d7 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -95,6 +95,9 @@ enum bpf_cmd {
BPF_OBJ_GET_INFO_BY_FD,
BPF_PROG_QUERY,
BPF_RAW_TRACEPOINT_OPEN,
+ BPF_BTF_LOAD,
+ BPF_BTF_GET_FD_BY_ID,
+ BPF_TASK_FD_QUERY,
};
enum bpf_map_type {
@@ -115,6 +118,8 @@ enum bpf_map_type {
BPF_MAP_TYPE_DEVMAP,
BPF_MAP_TYPE_SOCKMAP,
BPF_MAP_TYPE_CPUMAP,
+ BPF_MAP_TYPE_XSKMAP,
+ BPF_MAP_TYPE_SOCKHASH,
};
enum bpf_prog_type {
@@ -137,6 +142,8 @@ enum bpf_prog_type {
BPF_PROG_TYPE_SK_MSG,
BPF_PROG_TYPE_RAW_TRACEPOINT,
BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
+ BPF_PROG_TYPE_LWT_SEG6LOCAL,
+ BPF_PROG_TYPE_LIRC_MODE2,
};
enum bpf_attach_type {
@@ -154,6 +161,9 @@ enum bpf_attach_type {
BPF_CGROUP_INET6_CONNECT,
BPF_CGROUP_INET4_POST_BIND,
BPF_CGROUP_INET6_POST_BIND,
+ BPF_CGROUP_UDP4_SENDMSG,
+ BPF_CGROUP_UDP6_SENDMSG,
+ BPF_LIRC_MODE2,
__MAX_BPF_ATTACH_TYPE
};
@@ -279,6 +289,9 @@ union bpf_attr {
*/
char map_name[BPF_OBJ_NAME_LEN];
__u32 map_ifindex; /* ifindex of netdev to create on */
+ __u32 btf_fd; /* fd pointing to a BTF type data */
+ __u32 btf_key_type_id; /* BTF type_id of the key */
+ __u32 btf_value_type_id; /* BTF type_id of the value */
};
struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */
@@ -339,6 +352,7 @@ union bpf_attr {
__u32 start_id;
__u32 prog_id;
__u32 map_id;
+ __u32 btf_id;
};
__u32 next_id;
__u32 open_flags;
@@ -363,398 +377,1704 @@ union bpf_attr {
__u64 name;
__u32 prog_fd;
} raw_tracepoint;
+
+ struct { /* anonymous struct for BPF_BTF_LOAD */
+ __aligned_u64 btf;
+ __aligned_u64 btf_log_buf;
+ __u32 btf_size;
+ __u32 btf_log_size;
+ __u32 btf_log_level;
+ };
+
+ struct {
+ __u32 pid; /* input: pid */
+ __u32 fd; /* input: fd */
+ __u32 flags; /* input: flags */
+ __u32 buf_len; /* input/output: buf len */
+ __aligned_u64 buf; /* input/output:
+ * tp_name for tracepoint
+ * symbol for kprobe
+ * filename for uprobe
+ */
+ __u32 prog_id; /* output: prod_id */
+ __u32 fd_type; /* output: BPF_FD_TYPE_* */
+ __u64 probe_offset; /* output: probe_offset */
+ __u64 probe_addr; /* output: probe_addr */
+ } task_fd_query;
} __attribute__((aligned(8)));
-/* BPF helper function descriptions:
- *
- * void *bpf_map_lookup_elem(&map, &key)
- * Return: Map value or NULL
- *
- * int bpf_map_update_elem(&map, &key, &value, flags)
- * Return: 0 on success or negative error
- *
- * int bpf_map_delete_elem(&map, &key)
- * Return: 0 on success or negative error
- *
- * int bpf_probe_read(void *dst, int size, void *src)
- * Return: 0 on success or negative error
+/* The description below is an attempt at providing documentation to eBPF
+ * developers about the multiple available eBPF helper functions. It can be
+ * parsed and used to produce a manual page. The workflow is the following,
+ * and requires the rst2man utility:
+ *
+ * $ ./scripts/bpf_helpers_doc.py \
+ * --filename include/uapi/linux/bpf.h > /tmp/bpf-helpers.rst
+ * $ rst2man /tmp/bpf-helpers.rst > /tmp/bpf-helpers.7
+ * $ man /tmp/bpf-helpers.7
+ *
+ * Note that in order to produce this external documentation, some RST
+ * formatting is used in the descriptions to get "bold" and "italics" in
+ * manual pages. Also note that the few trailing white spaces are
+ * intentional, removing them would break paragraphs for rst2man.
+ *
+ * Start of BPF helper function descriptions:
+ *
+ * void *bpf_map_lookup_elem(struct bpf_map *map, const void *key)
+ * Description
+ * Perform a lookup in *map* for an entry associated to *key*.
+ * Return
+ * Map value associated to *key*, or **NULL** if no entry was
+ * found.
+ *
+ * int bpf_map_update_elem(struct bpf_map *map, const void *key, const void *value, u64 flags)
+ * Description
+ * Add or update the value of the entry associated to *key* in
+ * *map* with *value*. *flags* is one of:
+ *
+ * **BPF_NOEXIST**
+ * The entry for *key* must not exist in the map.
+ * **BPF_EXIST**
+ * The entry for *key* must already exist in the map.
+ * **BPF_ANY**
+ * No condition on the existence of the entry for *key*.
+ *
+ * Flag value **BPF_NOEXIST** cannot be used for maps of types
+ * **BPF_MAP_TYPE_ARRAY** or **BPF_MAP_TYPE_PERCPU_ARRAY** (all
+ * elements always exist), the helper would return an error.
+ * Return
+ * 0 on success, or a negative error in case of failure.
+ *
+ * int bpf_map_delete_elem(struct bpf_map *map, const void *key)
+ * Description
+ * Delete entry with *key* from *map*.
+ * Return
+ * 0 on success, or a negative error in case of failure.
+ *
+ * int bpf_probe_read(void *dst, u32 size, const void *src)
+ * Description
+ * For tracing programs, safely attempt to read *size* bytes from
+ * address *src* and store the data in *dst*.
+ * Return
+ * 0 on success, or a negative error in case of failure.
*
* u64 bpf_ktime_get_ns(void)
- * Return: current ktime
- *
- * int bpf_trace_printk(const char *fmt, int fmt_size, ...)
- * Return: length of buffer written or negative error
- *
- * u32 bpf_prandom_u32(void)
- * Return: random value
- *
- * u32 bpf_raw_smp_processor_id(void)
- * Return: SMP processor ID
- *
- * int bpf_skb_store_bytes(skb, offset, from, len, flags)
- * store bytes into packet
- * @skb: pointer to skb
- * @offset: offset within packet from skb->mac_header
- * @from: pointer where to copy bytes from
- * @len: number of bytes to store into packet
- * @flags: bit 0 - if true, recompute skb->csum
- * other bits - reserved
- * Return: 0 on success or negative error
- *
- * int bpf_l3_csum_replace(skb, offset, from, to, flags)
- * recompute IP checksum
- * @skb: pointer to skb
- * @offset: offset within packet where IP checksum is located
- * @from: old value of header field
- * @to: new value of header field
- * @flags: bits 0-3 - size of header field
- * other bits - reserved
- * Return: 0 on success or negative error
- *
- * int bpf_l4_csum_replace(skb, offset, from, to, flags)
- * recompute TCP/UDP checksum
- * @skb: pointer to skb
- * @offset: offset within packet where TCP/UDP checksum is located
- * @from: old value of header field
- * @to: new value of header field
- * @flags: bits 0-3 - size of header field
- * bit 4 - is pseudo header
- * other bits - reserved
- * Return: 0 on success or negative error
- *
- * int bpf_tail_call(ctx, prog_array_map, index)
- * jump into another BPF program
- * @ctx: context pointer passed to next program
- * @prog_array_map: pointer to map which type is BPF_MAP_TYPE_PROG_ARRAY
- * @index: 32-bit index inside array that selects specific program to run
- * Return: 0 on success or negative error
- *
- * int bpf_clone_redirect(skb, ifindex, flags)
- * redirect to another netdev
- * @skb: pointer to skb
- * @ifindex: ifindex of the net device
- * @flags: bit 0 - if set, redirect to ingress instead of egress
- * other bits - reserved
- * Return: 0 on success or negative error
+ * Description
+ * Return the time elapsed since system boot, in nanoseconds.
+ * Return
+ * Current *ktime*.
+ *
+ * int bpf_trace_printk(const char *fmt, u32 fmt_size, ...)
+ * Description
+ * This helper is a "printk()-like" facility for debugging. It
+ * prints a message defined by format *fmt* (of size *fmt_size*)
+ * to file *\/sys/kernel/debug/tracing/trace* from DebugFS, if
+ * available. It can take up to three additional **u64**
+ * arguments (as an eBPF helpers, the total number of arguments is
+ * limited to five).
+ *
+ * Each time the helper is called, it appends a line to the trace.
+ * The format of the trace is customizable, and the exact output
+ * one will get depends on the options set in
+ * *\/sys/kernel/debug/tracing/trace_options* (see also the
+ * *README* file under the same directory). However, it usually
+ * defaults to something like:
+ *
+ * ::
+ *
+ * telnet-470 [001] .N.. 419421.045894: 0x00000001: <formatted msg>
+ *
+ * In the above:
+ *
+ * * ``telnet`` is the name of the current task.
+ * * ``470`` is the PID of the current task.
+ * * ``001`` is the CPU number on which the task is
+ * running.
+ * * In ``.N..``, each character refers to a set of
+ * options (whether irqs are enabled, scheduling
+ * options, whether hard/softirqs are running, level of
+ * preempt_disabled respectively). **N** means that
+ * **TIF_NEED_RESCHED** and **PREEMPT_NEED_RESCHED**
+ * are set.
+ * * ``419421.045894`` is a timestamp.
+ * * ``0x00000001`` is a fake value used by BPF for the
+ * instruction pointer register.
+ * * ``<formatted msg>`` is the message formatted with
+ * *fmt*.
+ *
+ * The conversion specifiers supported by *fmt* are similar, but
+ * more limited than for printk(). They are **%d**, **%i**,
+ * **%u**, **%x**, **%ld**, **%li**, **%lu**, **%lx**, **%lld**,
+ * **%lli**, **%llu**, **%llx**, **%p**, **%s**. No modifier (size
+ * of field, padding with zeroes, etc.) is available, and the
+ * helper will return **-EINVAL** (but print nothing) if it
+ * encounters an unknown specifier.
+ *
+ * Also, note that **bpf_trace_printk**\ () is slow, and should
+ * only be used for debugging purposes. For this reason, a notice
+ * bloc (spanning several lines) is printed to kernel logs and
+ * states that the helper should not be used "for production use"
+ * the first time this helper is used (or more precisely, when
+ * **trace_printk**\ () buffers are allocated). For passing values
+ * to user space, perf events should be preferred.
+ * Return
+ * The number of bytes written to the buffer, or a negative error
+ * in case of failure.
+ *
+ * u32 bpf_get_prandom_u32(void)
+ * Description
+ * Get a pseudo-random number.
+ *
+ * From a security point of view, this helper uses its own
+ * pseudo-random internal state, and cannot be used to infer the
+ * seed of other random functions in the kernel. However, it is
+ * essential to note that the generator used by the helper is not
+ * cryptographically secure.
+ * Return
+ * A random 32-bit unsigned value.
+ *
+ * u32 bpf_get_smp_processor_id(void)
+ * Description
+ * Get the SMP (symmetric multiprocessing) processor id. Note that
+ * all programs run with preemption disabled, which means that the
+ * SMP processor id is stable during all the execution of the
+ * program.
+ * Return
+ * The SMP id of the processor running the program.
+ *
+ * int bpf_skb_store_bytes(struct sk_buff *skb, u32 offset, const void *from, u32 len, u64 flags)
+ * Description
+ * Store *len* bytes from address *from* into the packet
+ * associated to *skb*, at *offset*. *flags* are a combination of
+ * **BPF_F_RECOMPUTE_CSUM** (automatically recompute the
+ * checksum for the packet after storing the bytes) and
+ * **BPF_F_INVALIDATE_HASH** (set *skb*\ **->hash**, *skb*\
+ * **->swhash** and *skb*\ **->l4hash** to 0).
+ *
+ * A call to this helper is susceptible to change the underlaying
+ * packet buffer. Therefore, at load time, all checks on pointers
+ * previously done by the verifier are invalidated and must be
+ * performed again, if the helper is used in combination with
+ * direct packet access.
+ * Return
+ * 0 on success, or a negative error in case of failure.
+ *
+ * int bpf_l3_csum_replace(struct sk_buff *skb, u32 offset, u64 from, u64 to, u64 size)
+ * Description
+ * Recompute the layer 3 (e.g. IP) checksum for the packet
+ * associated to *skb*. Computation is incremental, so the helper
+ * must know the former value of the header field that was
+ * modified (*from*), the new value of this field (*to*), and the
+ * number of bytes (2 or 4) for this field, stored in *size*.
+ * Alternatively, it is possible to store the difference between
+ * the previous and the new values of the header field in *to*, by
+ * setting *from* and *size* to 0. For both methods, *offset*
+ * indicates the location of the IP checksum within the packet.
+ *
+ * This helper works in combination with **bpf_csum_diff**\ (),
+ * which does not update the checksum in-place, but offers more
+ * flexibility and can handle sizes larger than 2 or 4 for the
+ * checksum to update.
+ *
+ * A call to this helper is susceptible to change the underlaying
+ * packet buffer. Therefore, at load time, all checks on pointers
+ * previously done by the verifier are invalidated and must be
+ * performed again, if the helper is used in combination with
+ * direct packet access.
+ * Return
+ * 0 on success, or a negative error in case of failure.
+ *
+ * int bpf_l4_csum_replace(struct sk_buff *skb, u32 offset, u64 from, u64 to, u64 flags)
+ * Description
+ * Recompute the layer 4 (e.g. TCP, UDP or ICMP) checksum for the
+ * packet associated to *skb*. Computation is incremental, so the
+ * helper must know the former value of the header field that was
+ * modified (*from*), the new value of this field (*to*), and the
+ * number of bytes (2 or 4) for this field, stored on the lowest
+ * four bits of *flags*. Alternatively, it is possible to store
+ * the difference between the previous and the new values of the
+ * header field in *to*, by setting *from* and the four lowest
+ * bits of *flags* to 0. For both methods, *offset* indicates the
+ * location of the IP checksum within the packet. In addition to
+ * the size of the field, *flags* can be added (bitwise OR) actual
+ * flags. With **BPF_F_MARK_MANGLED_0**, a null checksum is left
+ * untouched (unless **BPF_F_MARK_ENFORCE** is added as well), and
+ * for updates resulting in a null checksum the value is set to
+ * **CSUM_MANGLED_0** instead. Flag **BPF_F_PSEUDO_HDR** indicates
+ * the checksum is to be computed against a pseudo-header.
+ *
+ * This helper works in combination with **bpf_csum_diff**\ (),
+ * which does not update the checksum in-place, but offers more
+ * flexibility and can handle sizes larger than 2 or 4 for the
+ * checksum to update.
+ *
+ * A call to this helper is susceptible to change the underlaying
+ * packet buffer. Therefore, at load time, all checks on pointers
+ * previously done by the verifier are invalidated and must be
+ * performed again, if the helper is used in combination with
+ * direct packet access.
+ * Return
+ * 0 on success, or a negative error in case of failure.
+ *
+ * int bpf_tail_call(void *ctx, struct bpf_map *prog_array_map, u32 index)
+ * Description
+ * This special helper is used to trigger a "tail call", or in
+ * other words, to jump into another eBPF program. The same stack
+ * frame is used (but values on stack and in registers for the
+ * caller are not accessible to the callee). This mechanism allows
+ * for program chaining, either for raising the maximum number of
+ * available eBPF instructions, or to execute given programs in
+ * conditional blocks. For security reasons, there is an upper
+ * limit to the number of successive tail calls that can be
+ * performed.
+ *
+ * Upon call of this helper, the program attempts to jump into a
+ * program referenced at index *index* in *prog_array_map*, a
+ * special map of type **BPF_MAP_TYPE_PROG_ARRAY**, and passes
+ * *ctx*, a pointer to the context.
+ *
+ * If the call succeeds, the kernel immediately runs the first
+ * instruction of the new program. This is not a function call,
+ * and it never returns to the previous program. If the call
+ * fails, then the helper has no effect, and the caller continues
+ * to run its subsequent instructions. A call can fail if the
+ * destination program for the jump does not exist (i.e. *index*
+ * is superior to the number of entries in *prog_array_map*), or
+ * if the maximum number of tail calls has been reached for this
+ * chain of programs. This limit is defined in the kernel by the
+ * macro **MAX_TAIL_CALL_CNT** (not accessible to user space),
+ * which is currently set to 32.
+ * Return
+ * 0 on success, or a negative error in case of failure.
+ *
+ * int bpf_clone_redirect(struct sk_buff *skb, u32 ifindex, u64 flags)
+ * Description
+ * Clone and redirect the packet associated to *skb* to another
+ * net device of index *ifindex*. Both ingress and egress
+ * interfaces can be used for redirection. The **BPF_F_INGRESS**
+ * value in *flags* is used to make the distinction (ingress path
+ * is selected if the flag is present, egress path otherwise).
+ * This is the only flag supported for now.
+ *
+ * In comparison with **bpf_redirect**\ () helper,
+ * **bpf_clone_redirect**\ () has the associated cost of
+ * duplicating the packet buffer, but this can be executed out of
+ * the eBPF program. Conversely, **bpf_redirect**\ () is more
+ * efficient, but it is handled through an action code where the
+ * redirection happens only after the eBPF program has returned.
+ *
+ * A call to this helper is susceptible to change the underlaying
+ * packet buffer. Therefore, at load time, all checks on pointers
+ * previously done by the verifier are invalidated and must be
+ * performed again, if the helper is used in combination with
+ * direct packet access.
+ * Return
+ * 0 on success, or a negative error in case of failure.
*
* u64 bpf_get_current_pid_tgid(void)
- * Return: current->tgid << 32 | current->pid
+ * Return
+ * A 64-bit integer containing the current tgid and pid, and
+ * created as such:
+ * *current_task*\ **->tgid << 32 \|**
+ * *current_task*\ **->pid**.
*
* u64 bpf_get_current_uid_gid(void)
- * Return: current_gid << 32 | current_uid
- *
- * int bpf_get_current_comm(char *buf, int size_of_buf)
- * stores current->comm into buf
- * Return: 0 on success or negative error
- *
- * u32 bpf_get_cgroup_classid(skb)
- * retrieve a proc's classid
- * @skb: pointer to skb
- * Return: classid if != 0
- *
- * int bpf_skb_vlan_push(skb, vlan_proto, vlan_tci)
- * Return: 0 on success or negative error
- *
- * int bpf_skb_vlan_pop(skb)
- * Return: 0 on success or negative error
- *
- * int bpf_skb_get_tunnel_key(skb, key, size, flags)
- * int bpf_skb_set_tunnel_key(skb, key, size, flags)
- * retrieve or populate tunnel metadata
- * @skb: pointer to skb
- * @key: pointer to 'struct bpf_tunnel_key'
- * @size: size of 'struct bpf_tunnel_key'
- * @flags: room for future extensions
- * Return: 0 on success or negative error
- *
- * u64 bpf_perf_event_read(map, flags)
- * read perf event counter value
- * @map: pointer to perf_event_array map
- * @flags: index of event in the map or bitmask flags
- * Return: value of perf event counter read or error code
- *
- * int bpf_redirect(ifindex, flags)
- * redirect to another netdev
- * @ifindex: ifindex of the net device
- * @flags:
- * cls_bpf:
- * bit 0 - if set, redirect to ingress instead of egress
- * other bits - reserved
- * xdp_bpf:
- * all bits - reserved
- * Return: cls_bpf: TC_ACT_REDIRECT on success or TC_ACT_SHOT on error
- * xdp_bfp: XDP_REDIRECT on success or XDP_ABORT on error
- * int bpf_redirect_map(map, key, flags)
- * redirect to endpoint in map
- * @map: pointer to dev map
- * @key: index in map to lookup
- * @flags: --
- * Return: XDP_REDIRECT on success or XDP_ABORT on error
- *
- * u32 bpf_get_route_realm(skb)
- * retrieve a dst's tclassid
- * @skb: pointer to skb
- * Return: realm if != 0
- *
- * int bpf_perf_event_output(ctx, map, flags, data, size)
- * output perf raw sample
- * @ctx: struct pt_regs*
- * @map: pointer to perf_event_array map
- * @flags: index of event in the map or bitmask flags
- * @data: data on stack to be output as raw data
- * @size: size of data
- * Return: 0 on success or negative error
- *
- * int bpf_get_stackid(ctx, map, flags)
- * walk user or kernel stack and return id
- * @ctx: struct pt_regs*
- * @map: pointer to stack_trace map
- * @flags: bits 0-7 - numer of stack frames to skip
- * bit 8 - collect user stack instead of kernel
- * bit 9 - compare stacks by hash only
- * bit 10 - if two different stacks hash into the same stackid
- * discard old
- * other bits - reserved
- * Return: >= 0 stackid on success or negative error
- *
- * s64 bpf_csum_diff(from, from_size, to, to_size, seed)
- * calculate csum diff
- * @from: raw from buffer
- * @from_size: length of from buffer
- * @to: raw to buffer
- * @to_size: length of to buffer
- * @seed: optional seed
- * Return: csum result or negative error code
- *
- * int bpf_skb_get_tunnel_opt(skb, opt, size)
- * retrieve tunnel options metadata
- * @skb: pointer to skb
- * @opt: pointer to raw tunnel option data
- * @size: size of @opt
- * Return: option size
- *
- * int bpf_skb_set_tunnel_opt(skb, opt, size)
- * populate tunnel options metadata
- * @skb: pointer to skb
- * @opt: pointer to raw tunnel option data
- * @size: size of @opt
- * Return: 0 on success or negative error
- *
- * int bpf_skb_change_proto(skb, proto, flags)
- * Change protocol of the skb. Currently supported is v4 -> v6,
- * v6 -> v4 transitions. The helper will also resize the skb. eBPF
- * program is expected to fill the new headers via skb_store_bytes
- * and lX_csum_replace.
- * @skb: pointer to skb
- * @proto: new skb->protocol type
- * @flags: reserved
- * Return: 0 on success or negative error
- *
- * int bpf_skb_change_type(skb, type)
- * Change packet type of skb.
- * @skb: pointer to skb
- * @type: new skb->pkt_type type
- * Return: 0 on success or negative error
- *
- * int bpf_skb_under_cgroup(skb, map, index)
- * Check cgroup2 membership of skb
- * @skb: pointer to skb
- * @map: pointer to bpf_map in BPF_MAP_TYPE_CGROUP_ARRAY type
- * @index: index of the cgroup in the bpf_map
- * Return:
- * == 0 skb failed the cgroup2 descendant test
- * == 1 skb succeeded the cgroup2 descendant test
- * < 0 error
- *
- * u32 bpf_get_hash_recalc(skb)
- * Retrieve and possibly recalculate skb->hash.
- * @skb: pointer to skb
- * Return: hash
+ * Return
+ * A 64-bit integer containing the current GID and UID, and
+ * created as such: *current_gid* **<< 32 \|** *current_uid*.
+ *
+ * int bpf_get_current_comm(char *buf, u32 size_of_buf)
+ * Description
+ * Copy the **comm** attribute of the current task into *buf* of
+ * *size_of_buf*. The **comm** attribute contains the name of
+ * the executable (excluding the path) for the current task. The
+ * *size_of_buf* must be strictly positive. On success, the
+ * helper makes sure that the *buf* is NUL-terminated. On failure,
+ * it is filled with zeroes.
+ * Return
+ * 0 on success, or a negative error in case of failure.
+ *
+ * u32 bpf_get_cgroup_classid(struct sk_buff *skb)
+ * Description
+ * Retrieve the classid for the current task, i.e. for the net_cls
+ * cgroup to which *skb* belongs.
+ *
+ * This helper can be used on TC egress path, but not on ingress.
+ *
+ * The net_cls cgroup provides an interface to tag network packets
+ * based on a user-provided identifier for all traffic coming from
+ * the tasks belonging to the related cgroup. See also the related
+ * kernel documentation, available from the Linux sources in file
+ * *Documentation/cgroup-v1/net_cls.txt*.
+ *
+ * The Linux kernel has two versions for cgroups: there are
+ * cgroups v1 and cgroups v2. Both are available to users, who can
+ * use a mixture of them, but note that the net_cls cgroup is for
+ * cgroup v1 only. This makes it incompatible with BPF programs
+ * run on cgroups, which is a cgroup-v2-only feature (a socket can
+ * only hold data for one version of cgroups at a time).
+ *
+ * This helper is only available is the kernel was compiled with
+ * the **CONFIG_CGROUP_NET_CLASSID** configuration option set to
+ * "**y**" or to "**m**".
+ * Return
+ * The classid, or 0 for the default unconfigured classid.
+ *
+ * int bpf_skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci)
+ * Description
+ * Push a *vlan_tci* (VLAN tag control information) of protocol
+ * *vlan_proto* to the packet associated to *skb*, then update
+ * the checksum. Note that if *vlan_proto* is different from
+ * **ETH_P_8021Q** and **ETH_P_8021AD**, it is considered to
+ * be **ETH_P_8021Q**.
+ *
+ * A call to this helper is susceptible to change the underlaying
+ * packet buffer. Therefore, at load time, all checks on pointers
+ * previously done by the verifier are invalidated and must be
+ * performed again, if the helper is used in combination with
+ * direct packet access.
+ * Return
+ * 0 on success, or a negative error in case of failure.
+ *
+ * int bpf_skb_vlan_pop(struct sk_buff *skb)
+ * Description
+ * Pop a VLAN header from the packet associated to *skb*.
+ *
+ * A call to this helper is susceptible to change the underlaying
+ * packet buffer. Therefore, at load time, all checks on pointers
+ * previously done by the verifier are invalidated and must be
+ * performed again, if the helper is used in combination with
+ * direct packet access.
+ * Return
+ * 0 on success, or a negative error in case of failure.
+ *
+ * int bpf_skb_get_tunnel_key(struct sk_buff *skb, struct bpf_tunnel_key *key, u32 size, u64 flags)
+ * Description
+ * Get tunnel metadata. This helper takes a pointer *key* to an
+ * empty **struct bpf_tunnel_key** of **size**, that will be
+ * filled with tunnel metadata for the packet associated to *skb*.
+ * The *flags* can be set to **BPF_F_TUNINFO_IPV6**, which
+ * indicates that the tunnel is based on IPv6 protocol instead of
+ * IPv4.
+ *
+ * The **struct bpf_tunnel_key** is an object that generalizes the
+ * principal parameters used by various tunneling protocols into a
+ * single struct. This way, it can be used to easily make a
+ * decision based on the contents of the encapsulation header,
+ * "summarized" in this struct. In particular, it holds the IP
+ * address of the remote end (IPv4 or IPv6, depending on the case)
+ * in *key*\ **->remote_ipv4** or *key*\ **->remote_ipv6**. Also,
+ * this struct exposes the *key*\ **->tunnel_id**, which is
+ * generally mapped to a VNI (Virtual Network Identifier), making
+ * it programmable together with the **bpf_skb_set_tunnel_key**\
+ * () helper.
+ *
+ * Let's imagine that the following code is part of a program
+ * attached to the TC ingress interface, on one end of a GRE
+ * tunnel, and is supposed to filter out all messages coming from
+ * remote ends with IPv4 address other than 10.0.0.1:
+ *
+ * ::
+ *
+ * int ret;
+ * struct bpf_tunnel_key key = {};
+ *
+ * ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key), 0);
+ * if (ret < 0)
+ * return TC_ACT_SHOT; // drop packet
+ *
+ * if (key.remote_ipv4 != 0x0a000001)
+ * return TC_ACT_SHOT; // drop packet
+ *
+ * return TC_ACT_OK; // accept packet
+ *
+ * This interface can also be used with all encapsulation devices
+ * that can operate in "collect metadata" mode: instead of having
+ * one network device per specific configuration, the "collect
+ * metadata" mode only requires a single device where the
+ * configuration can be extracted from this helper.
+ *
+ * This can be used together with various tunnels such as VXLan,
+ * Geneve, GRE or IP in IP (IPIP).
+ * Return
+ * 0 on success, or a negative error in case of failure.
+ *
+ * int bpf_skb_set_tunnel_key(struct sk_buff *skb, struct bpf_tunnel_key *key, u32 size, u64 flags)
+ * Description
+ * Populate tunnel metadata for packet associated to *skb.* The
+ * tunnel metadata is set to the contents of *key*, of *size*. The
+ * *flags* can be set to a combination of the following values:
+ *
+ * **BPF_F_TUNINFO_IPV6**
+ * Indicate that the tunnel is based on IPv6 protocol
+ * instead of IPv4.
+ * **BPF_F_ZERO_CSUM_TX**
+ * For IPv4 packets, add a flag to tunnel metadata
+ * indicating that checksum computation should be skipped
+ * and checksum set to zeroes.
+ * **BPF_F_DONT_FRAGMENT**
+ * Add a flag to tunnel metadata indicating that the
+ * packet should not be fragmented.
+ * **BPF_F_SEQ_NUMBER**
+ * Add a flag to tunnel metadata indicating that a
+ * sequence number should be added to tunnel header before
+ * sending the packet. This flag was added for GRE
+ * encapsulation, but might be used with other protocols
+ * as well in the future.
+ *
+ * Here is a typical usage on the transmit path:
+ *
+ * ::
+ *
+ * struct bpf_tunnel_key key;
+ * populate key ...
+ * bpf_skb_set_tunnel_key(skb, &key, sizeof(key), 0);
+ * bpf_clone_redirect(skb, vxlan_dev_ifindex, 0);
+ *
+ * See also the description of the **bpf_skb_get_tunnel_key**\ ()
+ * helper for additional information.
+ * Return
+ * 0 on success, or a negative error in case of failure.
+ *
+ * u64 bpf_perf_event_read(struct bpf_map *map, u64 flags)
+ * Description
+ * Read the value of a perf event counter. This helper relies on a
+ * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. The nature of
+ * the perf event counter is selected when *map* is updated with
+ * perf event file descriptors. The *map* is an array whose size
+ * is the number of available CPUs, and each cell contains a value
+ * relative to one CPU. The value to retrieve is indicated by
+ * *flags*, that contains the index of the CPU to look up, masked
+ * with **BPF_F_INDEX_MASK**. Alternatively, *flags* can be set to
+ * **BPF_F_CURRENT_CPU** to indicate that the value for the
+ * current CPU should be retrieved.
+ *
+ * Note that before Linux 4.13, only hardware perf event can be
+ * retrieved.
+ *
+ * Also, be aware that the newer helper
+ * **bpf_perf_event_read_value**\ () is recommended over
+ * **bpf_perf_event_read**\ () in general. The latter has some ABI
+ * quirks where error and counter value are used as a return code
+ * (which is wrong to do since ranges may overlap). This issue is
+ * fixed with **bpf_perf_event_read_value**\ (), which at the same
+ * time provides more features over the **bpf_perf_event_read**\
+ * () interface. Please refer to the description of
+ * **bpf_perf_event_read_value**\ () for details.
+ * Return
+ * The value of the perf event counter read from the map, or a
+ * negative error code in case of failure.
+ *
+ * int bpf_redirect(u32 ifindex, u64 flags)
+ * Description
+ * Redirect the packet to another net device of index *ifindex*.
+ * This helper is somewhat similar to **bpf_clone_redirect**\
+ * (), except that the packet is not cloned, which provides
+ * increased performance.
+ *
+ * Except for XDP, both ingress and egress interfaces can be used
+ * for redirection. The **BPF_F_INGRESS** value in *flags* is used
+ * to make the distinction (ingress path is selected if the flag
+ * is present, egress path otherwise). Currently, XDP only
+ * supports redirection to the egress interface, and accepts no
+ * flag at all.
+ *
+ * The same effect can be attained with the more generic
+ * **bpf_redirect_map**\ (), which requires specific maps to be
+ * used but offers better performance.
+ * Return
+ * For XDP, the helper returns **XDP_REDIRECT** on success or
+ * **XDP_ABORTED** on error. For other program types, the values
+ * are **TC_ACT_REDIRECT** on success or **TC_ACT_SHOT** on
+ * error.
+ *
+ * u32 bpf_get_route_realm(struct sk_buff *skb)
+ * Description
+ * Retrieve the realm or the route, that is to say the
+ * **tclassid** field of the destination for the *skb*. The
+ * indentifier retrieved is a user-provided tag, similar to the
+ * one used with the net_cls cgroup (see description for
+ * **bpf_get_cgroup_classid**\ () helper), but here this tag is
+ * held by a route (a destination entry), not by a task.
+ *
+ * Retrieving this identifier works with the clsact TC egress hook
+ * (see also **tc-bpf(8)**), or alternatively on conventional
+ * classful egress qdiscs, but not on TC ingress path. In case of
+ * clsact TC egress hook, this has the advantage that, internally,
+ * the destination entry has not been dropped yet in the transmit
+ * path. Therefore, the destination entry does not need to be
+ * artificially held via **netif_keep_dst**\ () for a classful
+ * qdisc until the *skb* is freed.
+ *
+ * This helper is available only if the kernel was compiled with
+ * **CONFIG_IP_ROUTE_CLASSID** configuration option.
+ * Return
+ * The realm of the route for the packet associated to *skb*, or 0
+ * if none was found.
+ *
+ * int bpf_perf_event_output(struct pt_reg *ctx, struct bpf_map *map, u64 flags, void *data, u64 size)
+ * Description
+ * Write raw *data* blob into a special BPF perf event held by
+ * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf
+ * event must have the following attributes: **PERF_SAMPLE_RAW**
+ * as **sample_type**, **PERF_TYPE_SOFTWARE** as **type**, and
+ * **PERF_COUNT_SW_BPF_OUTPUT** as **config**.
+ *
+ * The *flags* are used to indicate the index in *map* for which
+ * the value must be put, masked with **BPF_F_INDEX_MASK**.
+ * Alternatively, *flags* can be set to **BPF_F_CURRENT_CPU**
+ * to indicate that the index of the current CPU core should be
+ * used.
+ *
+ * The value to write, of *size*, is passed through eBPF stack and
+ * pointed by *data*.
+ *
+ * The context of the program *ctx* needs also be passed to the
+ * helper.
+ *
+ * On user space, a program willing to read the values needs to
+ * call **perf_event_open**\ () on the perf event (either for
+ * one or for all CPUs) and to store the file descriptor into the
+ * *map*. This must be done before the eBPF program can send data
+ * into it. An example is available in file
+ * *samples/bpf/trace_output_user.c* in the Linux kernel source
+ * tree (the eBPF program counterpart is in
+ * *samples/bpf/trace_output_kern.c*).
+ *
+ * **bpf_perf_event_output**\ () achieves better performance
+ * than **bpf_trace_printk**\ () for sharing data with user
+ * space, and is much better suitable for streaming data from eBPF
+ * programs.
+ *
+ * Note that this helper is not restricted to tracing use cases
+ * and can be used with programs attached to TC or XDP as well,
+ * where it allows for passing data to user space listeners. Data
+ * can be:
+ *
+ * * Only custom structs,
+ * * Only the packet payload, or
+ * * A combination of both.
+ * Return
+ * 0 on success, or a negative error in case of failure.
+ *
+ * int bpf_skb_load_bytes(const struct sk_buff *skb, u32 offset, void *to, u32 len)
+ * Description
+ * This helper was provided as an easy way to load data from a
+ * packet. It can be used to load *len* bytes from *offset* from
+ * the packet associated to *skb*, into the buffer pointed by
+ * *to*.
+ *
+ * Since Linux 4.7, usage of this helper has mostly been replaced
+ * by "direct packet access", enabling packet data to be
+ * manipulated with *skb*\ **->data** and *skb*\ **->data_end**
+ * pointing respectively to the first byte of packet data and to
+ * the byte after the last byte of packet data. However, it
+ * remains useful if one wishes to read large quantities of data
+ * at once from a packet into the eBPF stack.
+ * Return
+ * 0 on success, or a negative error in case of failure.
+ *
+ * int bpf_get_stackid(struct pt_reg *ctx, struct bpf_map *map, u64 flags)
+ * Description
+ * Walk a user or a kernel stack and return its id. To achieve
+ * this, the helper needs *ctx*, which is a pointer to the context
+ * on which the tracing program is executed, and a pointer to a
+ * *map* of type **BPF_MAP_TYPE_STACK_TRACE**.
+ *
+ * The last argument, *flags*, holds the number of stack frames to
+ * skip (from 0 to 255), masked with
+ * **BPF_F_SKIP_FIELD_MASK**. The next bits can be used to set
+ * a combination of the following flags:
+ *
+ * **BPF_F_USER_STACK**
+ * Collect a user space stack instead of a kernel stack.
+ * **BPF_F_FAST_STACK_CMP**
+ * Compare stacks by hash only.
+ * **BPF_F_REUSE_STACKID**
+ * If two different stacks hash into the same *stackid*,
+ * discard the old one.
+ *
+ * The stack id retrieved is a 32 bit long integer handle which
+ * can be further combined with other data (including other stack
+ * ids) and used as a key into maps. This can be useful for
+ * generating a variety of graphs (such as flame graphs or off-cpu
+ * graphs).
+ *
+ * For walking a stack, this helper is an improvement over
+ * **bpf_probe_read**\ (), which can be used with unrolled loops
+ * but is not efficient and consumes a lot of eBPF instructions.
+ * Instead, **bpf_get_stackid**\ () can collect up to
+ * **PERF_MAX_STACK_DEPTH** both kernel and user frames. Note that
+ * this limit can be controlled with the **sysctl** program, and
+ * that it should be manually increased in order to profile long
+ * user stacks (such as stacks for Java programs). To do so, use:
+ *
+ * ::
+ *
+ * # sysctl kernel.perf_event_max_stack=<new value>
+ * Return
+ * The positive or null stack id on success, or a negative error
+ * in case of failure.
+ *
+ * s64 bpf_csum_diff(__be32 *from, u32 from_size, __be32 *to, u32 to_size, __wsum seed)
+ * Description
+ * Compute a checksum difference, from the raw buffer pointed by
+ * *from*, of length *from_size* (that must be a multiple of 4),
+ * towards the raw buffer pointed by *to*, of size *to_size*
+ * (same remark). An optional *seed* can be added to the value
+ * (this can be cascaded, the seed may come from a previous call
+ * to the helper).
+ *
+ * This is flexible enough to be used in several ways:
+ *
+ * * With *from_size* == 0, *to_size* > 0 and *seed* set to
+ * checksum, it can be used when pushing new data.
+ * * With *from_size* > 0, *to_size* == 0 and *seed* set to
+ * checksum, it can be used when removing data from a packet.
+ * * With *from_size* > 0, *to_size* > 0 and *seed* set to 0, it
+ * can be used to compute a diff. Note that *from_size* and
+ * *to_size* do not need to be equal.
+ *
+ * This helper can be used in combination with
+ * **bpf_l3_csum_replace**\ () and **bpf_l4_csum_replace**\ (), to
+ * which one can feed in the difference computed with
+ * **bpf_csum_diff**\ ().
+ * Return
+ * The checksum result, or a negative error code in case of
+ * failure.
+ *
+ * int bpf_skb_get_tunnel_opt(struct sk_buff *skb, u8 *opt, u32 size)
+ * Description
+ * Retrieve tunnel options metadata for the packet associated to
+ * *skb*, and store the raw tunnel option data to the buffer *opt*
+ * of *size*.
+ *
+ * This helper can be used with encapsulation devices that can
+ * operate in "collect metadata" mode (please refer to the related
+ * note in the description of **bpf_skb_get_tunnel_key**\ () for
+ * more details). A particular example where this can be used is
+ * in combination with the Geneve encapsulation protocol, where it
+ * allows for pushing (with **bpf_skb_get_tunnel_opt**\ () helper)
+ * and retrieving arbitrary TLVs (Type-Length-Value headers) from
+ * the eBPF program. This allows for full customization of these
+ * headers.
+ * Return
+ * The size of the option data retrieved.
+ *
+ * int bpf_skb_set_tunnel_opt(struct sk_buff *skb, u8 *opt, u32 size)
+ * Description
+ * Set tunnel options metadata for the packet associated to *skb*
+ * to the option data contained in the raw buffer *opt* of *size*.
+ *
+ * See also the description of the **bpf_skb_get_tunnel_opt**\ ()
+ * helper for additional information.
+ * Return
+ * 0 on success, or a negative error in case of failure.
+ *
+ * int bpf_skb_change_proto(struct sk_buff *skb, __be16 proto, u64 flags)
+ * Description
+ * Change the protocol of the *skb* to *proto*. Currently
+ * supported are transition from IPv4 to IPv6, and from IPv6 to
+ * IPv4. The helper takes care of the groundwork for the
+ * transition, including resizing the socket buffer. The eBPF
+ * program is expected to fill the new headers, if any, via
+ * **skb_store_bytes**\ () and to recompute the checksums with
+ * **bpf_l3_csum_replace**\ () and **bpf_l4_csum_replace**\
+ * (). The main case for this helper is to perform NAT64
+ * operations out of an eBPF program.
+ *
+ * Internally, the GSO type is marked as dodgy so that headers are
+ * checked and segments are recalculated by the GSO/GRO engine.
+ * The size for GSO target is adapted as well.
+ *
+ * All values for *flags* are reserved for future usage, and must
+ * be left at zero.
+ *
+ * A call to this helper is susceptible to change the underlaying
+ * packet buffer. Therefore, at load time, all checks on pointers
+ * previously done by the verifier are invalidated and must be
+ * performed again, if the helper is used in combination with
+ * direct packet access.
+ * Return
+ * 0 on success, or a negative error in case of failure.
+ *
+ * int bpf_skb_change_type(struct sk_buff *skb, u32 type)
+ * Description
+ * Change the packet type for the packet associated to *skb*. This
+ * comes down to setting *skb*\ **->pkt_type** to *type*, except
+ * the eBPF program does not have a write access to *skb*\
+ * **->pkt_type** beside this helper. Using a helper here allows
+ * for graceful handling of errors.
+ *
+ * The major use case is to change incoming *skb*s to
+ * **PACKET_HOST** in a programmatic way instead of having to
+ * recirculate via **redirect**\ (..., **BPF_F_INGRESS**), for
+ * example.
+ *
+ * Note that *type* only allows certain values. At this time, they
+ * are:
+ *
+ * **PACKET_HOST**
+ * Packet is for us.
+ * **PACKET_BROADCAST**
+ * Send packet to all.
+ * **PACKET_MULTICAST**
+ * Send packet to group.
+ * **PACKET_OTHERHOST**
+ * Send packet to someone else.
+ * Return
+ * 0 on success, or a negative error in case of failure.
+ *
+ * int bpf_skb_under_cgroup(struct sk_buff *skb, struct bpf_map *map, u32 index)
+ * Description
+ * Check whether *skb* is a descendant of the cgroup2 held by
+ * *map* of type **BPF_MAP_TYPE_CGROUP_ARRAY**, at *index*.
+ * Return
+ * The return value depends on the result of the test, and can be:
+ *
+ * * 0, if the *skb* failed the cgroup2 descendant test.
+ * * 1, if the *skb* succeeded the cgroup2 descendant test.
+ * * A negative error code, if an error occurred.
+ *
+ * u32 bpf_get_hash_recalc(struct sk_buff *skb)
+ * Description
+ * Retrieve the hash of the packet, *skb*\ **->hash**. If it is
+ * not set, in particular if the hash was cleared due to mangling,
+ * recompute this hash. Later accesses to the hash can be done
+ * directly with *skb*\ **->hash**.
+ *
+ * Calling **bpf_set_hash_invalid**\ (), changing a packet
+ * prototype with **bpf_skb_change_proto**\ (), or calling
+ * **bpf_skb_store_bytes**\ () with the
+ * **BPF_F_INVALIDATE_HASH** are actions susceptible to clear
+ * the hash and to trigger a new computation for the next call to
+ * **bpf_get_hash_recalc**\ ().
+ * Return
+ * The 32-bit hash.
*
* u64 bpf_get_current_task(void)
- * Returns current task_struct
- * Return: current
- *
- * int bpf_probe_write_user(void *dst, void *src, int len)
- * safely attempt to write to a location
- * @dst: destination address in userspace
- * @src: source address on stack
- * @len: number of bytes to copy
- * Return: 0 on success or negative error
- *
- * int bpf_current_task_under_cgroup(map, index)
- * Check cgroup2 membership of current task
- * @map: pointer to bpf_map in BPF_MAP_TYPE_CGROUP_ARRAY type
- * @index: index of the cgroup in the bpf_map
- * Return:
- * == 0 current failed the cgroup2 descendant test
- * == 1 current succeeded the cgroup2 descendant test
- * < 0 error
- *
- * int bpf_skb_change_tail(skb, len, flags)
- * The helper will resize the skb to the given new size, to be used f.e.
- * with control messages.
- * @skb: pointer to skb
- * @len: new skb length
- * @flags: reserved
- * Return: 0 on success or negative error
- *
- * int bpf_skb_pull_data(skb, len)
- * The helper will pull in non-linear data in case the skb is non-linear
- * and not all of len are part of the linear section. Only needed for
- * read/write with direct packet access.
- * @skb: pointer to skb
- * @len: len to make read/writeable
- * Return: 0 on success or negative error
- *
- * s64 bpf_csum_update(skb, csum)
- * Adds csum into skb->csum in case of CHECKSUM_COMPLETE.
- * @skb: pointer to skb
- * @csum: csum to add
- * Return: csum on success or negative error
- *
- * void bpf_set_hash_invalid(skb)
- * Invalidate current skb->hash.
- * @skb: pointer to skb
- *
- * int bpf_get_numa_node_id()
- * Return: Id of current NUMA node.
- *
- * int bpf_skb_change_head()
- * Grows headroom of skb and adjusts MAC header offset accordingly.
- * Will extends/reallocae as required automatically.
- * May change skb data pointer and will thus invalidate any check
- * performed for direct packet access.
- * @skb: pointer to skb
- * @len: length of header to be pushed in front
- * @flags: Flags (unused for now)
- * Return: 0 on success or negative error
- *
- * int bpf_xdp_adjust_head(xdp_md, delta)
- * Adjust the xdp_md.data by delta
- * @xdp_md: pointer to xdp_md
- * @delta: An positive/negative integer to be added to xdp_md.data
- * Return: 0 on success or negative on error
+ * Return
+ * A pointer to the current task struct.
+ *
+ * int bpf_probe_write_user(void *dst, const void *src, u32 len)
+ * Description
+ * Attempt in a safe way to write *len* bytes from the buffer
+ * *src* to *dst* in memory. It only works for threads that are in
+ * user context, and *dst* must be a valid user space address.
+ *
+ * This helper should not be used to implement any kind of
+ * security mechanism because of TOC-TOU attacks, but rather to
+ * debug, divert, and manipulate execution of semi-cooperative
+ * processes.
+ *
+ * Keep in mind that this feature is meant for experiments, and it
+ * has a risk of crashing the system and running programs.
+ * Therefore, when an eBPF program using this helper is attached,
+ * a warning including PID and process name is printed to kernel
+ * logs.
+ * Return
+ * 0 on success, or a negative error in case of failure.
+ *
+ * int bpf_current_task_under_cgroup(struct bpf_map *map, u32 index)
+ * Description
+ * Check whether the probe is being run is the context of a given
+ * subset of the cgroup2 hierarchy. The cgroup2 to test is held by
+ * *map* of type **BPF_MAP_TYPE_CGROUP_ARRAY**, at *index*.
+ * Return
+ * The return value depends on the result of the test, and can be:
+ *
+ * * 0, if the *skb* task belongs to the cgroup2.
+ * * 1, if the *skb* task does not belong to the cgroup2.
+ * * A negative error code, if an error occurred.
+ *
+ * int bpf_skb_change_tail(struct sk_buff *skb, u32 len, u64 flags)
+ * Description
+ * Resize (trim or grow) the packet associated to *skb* to the
+ * new *len*. The *flags* are reserved for future usage, and must
+ * be left at zero.
+ *
+ * The basic idea is that the helper performs the needed work to
+ * change the size of the packet, then the eBPF program rewrites
+ * the rest via helpers like **bpf_skb_store_bytes**\ (),
+ * **bpf_l3_csum_replace**\ (), **bpf_l3_csum_replace**\ ()
+ * and others. This helper is a slow path utility intended for
+ * replies with control messages. And because it is targeted for
+ * slow path, the helper itself can afford to be slow: it
+ * implicitly linearizes, unclones and drops offloads from the
+ * *skb*.
+ *
+ * A call to this helper is susceptible to change the underlaying
+ * packet buffer. Therefore, at load time, all checks on pointers
+ * previously done by the verifier are invalidated and must be
+ * performed again, if the helper is used in combination with
+ * direct packet access.
+ * Return
+ * 0 on success, or a negative error in case of failure.
+ *
+ * int bpf_skb_pull_data(struct sk_buff *skb, u32 len)
+ * Description
+ * Pull in non-linear data in case the *skb* is non-linear and not
+ * all of *len* are part of the linear section. Make *len* bytes
+ * from *skb* readable and writable. If a zero value is passed for
+ * *len*, then the whole length of the *skb* is pulled.
+ *
+ * This helper is only needed for reading and writing with direct
+ * packet access.
+ *
+ * For direct packet access, testing that offsets to access
+ * are within packet boundaries (test on *skb*\ **->data_end**) is
+ * susceptible to fail if offsets are invalid, or if the requested
+ * data is in non-linear parts of the *skb*. On failure the
+ * program can just bail out, or in the case of a non-linear
+ * buffer, use a helper to make the data available. The
+ * **bpf_skb_load_bytes**\ () helper is a first solution to access
+ * the data. Another one consists in using **bpf_skb_pull_data**
+ * to pull in once the non-linear parts, then retesting and
+ * eventually access the data.
+ *
+ * At the same time, this also makes sure the *skb* is uncloned,
+ * which is a necessary condition for direct write. As this needs
+ * to be an invariant for the write part only, the verifier
+ * detects writes and adds a prologue that is calling
+ * **bpf_skb_pull_data()** to effectively unclone the *skb* from
+ * the very beginning in case it is indeed cloned.
+ *
+ * A call to this helper is susceptible to change the underlaying
+ * packet buffer. Therefore, at load time, all checks on pointers
+ * previously done by the verifier are invalidated and must be
+ * performed again, if the helper is used in combination with
+ * direct packet access.
+ * Return
+ * 0 on success, or a negative error in case of failure.
+ *
+ * s64 bpf_csum_update(struct sk_buff *skb, __wsum csum)
+ * Description
+ * Add the checksum *csum* into *skb*\ **->csum** in case the
+ * driver has supplied a checksum for the entire packet into that
+ * field. Return an error otherwise. This helper is intended to be
+ * used in combination with **bpf_csum_diff**\ (), in particular
+ * when the checksum needs to be updated after data has been
+ * written into the packet through direct packet access.
+ * Return
+ * The checksum on success, or a negative error code in case of
+ * failure.
+ *
+ * void bpf_set_hash_invalid(struct sk_buff *skb)
+ * Description
+ * Invalidate the current *skb*\ **->hash**. It can be used after
+ * mangling on headers through direct packet access, in order to
+ * indicate that the hash is outdated and to trigger a
+ * recalculation the next time the kernel tries to access this
+ * hash or when the **bpf_get_hash_recalc**\ () helper is called.
+ *
+ * int bpf_get_numa_node_id(void)
+ * Description
+ * Return the id of the current NUMA node. The primary use case
+ * for this helper is the selection of sockets for the local NUMA
+ * node, when the program is attached to sockets using the
+ * **SO_ATTACH_REUSEPORT_EBPF** option (see also **socket(7)**),
+ * but the helper is also available to other eBPF program types,
+ * similarly to **bpf_get_smp_processor_id**\ ().
+ * Return
+ * The id of current NUMA node.
+ *
+ * int bpf_skb_change_head(struct sk_buff *skb, u32 len, u64 flags)
+ * Description
+ * Grows headroom of packet associated to *skb* and adjusts the
+ * offset of the MAC header accordingly, adding *len* bytes of
+ * space. It automatically extends and reallocates memory as
+ * required.
+ *
+ * This helper can be used on a layer 3 *skb* to push a MAC header
+ * for redirection into a layer 2 device.
+ *
+ * All values for *flags* are reserved for future usage, and must
+ * be left at zero.
+ *
+ * A call to this helper is susceptible to change the underlaying
+ * packet buffer. Therefore, at load time, all checks on pointers
+ * previously done by the verifier are invalidated and must be
+ * performed again, if the helper is used in combination with
+ * direct packet access.
+ * Return
+ * 0 on success, or a negative error in case of failure.
+ *
+ * int bpf_xdp_adjust_head(struct xdp_buff *xdp_md, int delta)
+ * Description
+ * Adjust (move) *xdp_md*\ **->data** by *delta* bytes. Note that
+ * it is possible to use a negative value for *delta*. This helper
+ * can be used to prepare the packet for pushing or popping
+ * headers.
+ *
+ * A call to this helper is susceptible to change the underlaying
+ * packet buffer. Therefore, at load time, all checks on pointers
+ * previously done by the verifier are invalidated and must be
+ * performed again, if the helper is used in combination with
+ * direct packet access.
+ * Return
+ * 0 on success, or a negative error in case of failure.
*
* int bpf_probe_read_str(void *dst, int size, const void *unsafe_ptr)
- * Copy a NUL terminated string from unsafe address. In case the string
- * length is smaller than size, the target is not padded with further NUL
- * bytes. In case the string length is larger than size, just count-1
- * bytes are copied and the last byte is set to NUL.
- * @dst: destination address
- * @size: maximum number of bytes to copy, including the trailing NUL
- * @unsafe_ptr: unsafe address
- * Return:
- * > 0 length of the string including the trailing NUL on success
- * < 0 error
- *
- * u64 bpf_get_socket_cookie(skb)
- * Get the cookie for the socket stored inside sk_buff.
- * @skb: pointer to skb
- * Return: 8 Bytes non-decreasing number on success or 0 if the socket
- * field is missing inside sk_buff
- *
- * u32 bpf_get_socket_uid(skb)
- * Get the owner uid of the socket stored inside sk_buff.
- * @skb: pointer to skb
- * Return: uid of the socket owner on success or overflowuid if failed.
- *
- * u32 bpf_set_hash(skb, hash)
- * Set full skb->hash.
- * @skb: pointer to skb
- * @hash: hash to set
- *
- * int bpf_setsockopt(bpf_socket, level, optname, optval, optlen)
- * Calls setsockopt. Not all opts are available, only those with
- * integer optvals plus TCP_CONGESTION.
- * Supported levels: SOL_SOCKET and IPPROTO_TCP
- * @bpf_socket: pointer to bpf_socket
- * @level: SOL_SOCKET or IPPROTO_TCP
- * @optname: option name
- * @optval: pointer to option value
- * @optlen: length of optval in bytes
- * Return: 0 or negative error
- *
- * int bpf_getsockopt(bpf_socket, level, optname, optval, optlen)
- * Calls getsockopt. Not all opts are available.
- * Supported levels: IPPROTO_TCP
- * @bpf_socket: pointer to bpf_socket
- * @level: IPPROTO_TCP
- * @optname: option name
- * @optval: pointer to option value
- * @optlen: length of optval in bytes
- * Return: 0 or negative error
- *
- * int bpf_sock_ops_cb_flags_set(bpf_sock_ops, flags)
- * Set callback flags for sock_ops
- * @bpf_sock_ops: pointer to bpf_sock_ops_kern struct
- * @flags: flags value
- * Return: 0 for no error
- * -EINVAL if there is no full tcp socket
- * bits in flags that are not supported by current kernel
- *
- * int bpf_skb_adjust_room(skb, len_diff, mode, flags)
- * Grow or shrink room in sk_buff.
- * @skb: pointer to skb
- * @len_diff: (signed) amount of room to grow/shrink
- * @mode: operation mode (enum bpf_adj_room_mode)
- * @flags: reserved for future use
- * Return: 0 on success or negative error code
- *
- * int bpf_sk_redirect_map(map, key, flags)
- * Redirect skb to a sock in map using key as a lookup key for the
- * sock in map.
- * @map: pointer to sockmap
- * @key: key to lookup sock in map
- * @flags: reserved for future use
- * Return: SK_PASS
- *
- * int bpf_sock_map_update(skops, map, key, flags)
- * @skops: pointer to bpf_sock_ops
- * @map: pointer to sockmap to update
- * @key: key to insert/update sock in map
- * @flags: same flags as map update elem
- *
- * int bpf_xdp_adjust_meta(xdp_md, delta)
- * Adjust the xdp_md.data_meta by delta
- * @xdp_md: pointer to xdp_md
- * @delta: An positive/negative integer to be added to xdp_md.data_meta
- * Return: 0 on success or negative on error
- *
- * int bpf_perf_event_read_value(map, flags, buf, buf_size)
- * read perf event counter value and perf event enabled/running time
- * @map: pointer to perf_event_array map
- * @flags: index of event in the map or bitmask flags
- * @buf: buf to fill
- * @buf_size: size of the buf
- * Return: 0 on success or negative error code
- *
- * int bpf_perf_prog_read_value(ctx, buf, buf_size)
- * read perf prog attached perf event counter and enabled/running time
- * @ctx: pointer to ctx
- * @buf: buf to fill
- * @buf_size: size of the buf
- * Return : 0 on success or negative error code
- *
- * int bpf_override_return(pt_regs, rc)
- * @pt_regs: pointer to struct pt_regs
- * @rc: the return value to set
- *
- * int bpf_msg_redirect_map(map, key, flags)
- * Redirect msg to a sock in map using key as a lookup key for the
- * sock in map.
- * @map: pointer to sockmap
- * @key: key to lookup sock in map
- * @flags: reserved for future use
- * Return: SK_PASS
- *
- * int bpf_bind(ctx, addr, addr_len)
- * Bind socket to address. Only binding to IP is supported, no port can be
- * set in addr.
- * @ctx: pointer to context of type bpf_sock_addr
- * @addr: pointer to struct sockaddr to bind socket to
- * @addr_len: length of sockaddr structure
- * Return: 0 on success or negative error code
+ * Description
+ * Copy a NUL terminated string from an unsafe address
+ * *unsafe_ptr* to *dst*. The *size* should include the
+ * terminating NUL byte. In case the string length is smaller than
+ * *size*, the target is not padded with further NUL bytes. If the
+ * string length is larger than *size*, just *size*-1 bytes are
+ * copied and the last byte is set to NUL.
+ *
+ * On success, the length of the copied string is returned. This
+ * makes this helper useful in tracing programs for reading
+ * strings, and more importantly to get its length at runtime. See
+ * the following snippet:
+ *
+ * ::
+ *
+ * SEC("kprobe/sys_open")
+ * void bpf_sys_open(struct pt_regs *ctx)
+ * {
+ * char buf[PATHLEN]; // PATHLEN is defined to 256
+ * int res = bpf_probe_read_str(buf, sizeof(buf),
+ * ctx->di);
+ *
+ * // Consume buf, for example push it to
+ * // userspace via bpf_perf_event_output(); we
+ * // can use res (the string length) as event
+ * // size, after checking its boundaries.
+ * }
+ *
+ * In comparison, using **bpf_probe_read()** helper here instead
+ * to read the string would require to estimate the length at
+ * compile time, and would often result in copying more memory
+ * than necessary.
+ *
+ * Another useful use case is when parsing individual process
+ * arguments or individual environment variables navigating
+ * *current*\ **->mm->arg_start** and *current*\
+ * **->mm->env_start**: using this helper and the return value,
+ * one can quickly iterate at the right offset of the memory area.
+ * Return
+ * On success, the strictly positive length of the string,
+ * including the trailing NUL character. On error, a negative
+ * value.
+ *
+ * u64 bpf_get_socket_cookie(struct sk_buff *skb)
+ * Description
+ * If the **struct sk_buff** pointed by *skb* has a known socket,
+ * retrieve the cookie (generated by the kernel) of this socket.
+ * If no cookie has been set yet, generate a new cookie. Once
+ * generated, the socket cookie remains stable for the life of the
+ * socket. This helper can be useful for monitoring per socket
+ * networking traffic statistics as it provides a unique socket
+ * identifier per namespace.
+ * Return
+ * A 8-byte long non-decreasing number on success, or 0 if the
+ * socket field is missing inside *skb*.
+ *
+ * u32 bpf_get_socket_uid(struct sk_buff *skb)
+ * Return
+ * The owner UID of the socket associated to *skb*. If the socket
+ * is **NULL**, or if it is not a full socket (i.e. if it is a
+ * time-wait or a request socket instead), **overflowuid** value
+ * is returned (note that **overflowuid** might also be the actual
+ * UID value for the socket).
+ *
+ * u32 bpf_set_hash(struct sk_buff *skb, u32 hash)
+ * Description
+ * Set the full hash for *skb* (set the field *skb*\ **->hash**)
+ * to value *hash*.
+ * Return
+ * 0
+ *
+ * int bpf_setsockopt(struct bpf_sock_ops *bpf_socket, int level, int optname, char *optval, int optlen)
+ * Description
+ * Emulate a call to **setsockopt()** on the socket associated to
+ * *bpf_socket*, which must be a full socket. The *level* at
+ * which the option resides and the name *optname* of the option
+ * must be specified, see **setsockopt(2)** for more information.
+ * The option value of length *optlen* is pointed by *optval*.
+ *
+ * This helper actually implements a subset of **setsockopt()**.
+ * It supports the following *level*\ s:
+ *
+ * * **SOL_SOCKET**, which supports the following *optname*\ s:
+ * **SO_RCVBUF**, **SO_SNDBUF**, **SO_MAX_PACING_RATE**,
+ * **SO_PRIORITY**, **SO_RCVLOWAT**, **SO_MARK**.
+ * * **IPPROTO_TCP**, which supports the following *optname*\ s:
+ * **TCP_CONGESTION**, **TCP_BPF_IW**,
+ * **TCP_BPF_SNDCWND_CLAMP**.
+ * * **IPPROTO_IP**, which supports *optname* **IP_TOS**.
+ * * **IPPROTO_IPV6**, which supports *optname* **IPV6_TCLASS**.
+ * Return
+ * 0 on success, or a negative error in case of failure.
+ *
+ * int bpf_skb_adjust_room(struct sk_buff *skb, u32 len_diff, u32 mode, u64 flags)
+ * Description
+ * Grow or shrink the room for data in the packet associated to
+ * *skb* by *len_diff*, and according to the selected *mode*.
+ *
+ * There is a single supported mode at this time:
+ *
+ * * **BPF_ADJ_ROOM_NET**: Adjust room at the network layer
+ * (room space is added or removed below the layer 3 header).
+ *
+ * All values for *flags* are reserved for future usage, and must
+ * be left at zero.
+ *
+ * A call to this helper is susceptible to change the underlaying
+ * packet buffer. Therefore, at load time, all checks on pointers
+ * previously done by the verifier are invalidated and must be
+ * performed again, if the helper is used in combination with
+ * direct packet access.
+ * Return
+ * 0 on success, or a negative error in case of failure.
+ *
+ * int bpf_redirect_map(struct bpf_map *map, u32 key, u64 flags)
+ * Description
+ * Redirect the packet to the endpoint referenced by *map* at
+ * index *key*. Depending on its type, this *map* can contain
+ * references to net devices (for forwarding packets through other
+ * ports), or to CPUs (for redirecting XDP frames to another CPU;
+ * but this is only implemented for native XDP (with driver
+ * support) as of this writing).
+ *
+ * All values for *flags* are reserved for future usage, and must
+ * be left at zero.
+ *
+ * When used to redirect packets to net devices, this helper
+ * provides a high performance increase over **bpf_redirect**\ ().
+ * This is due to various implementation details of the underlying
+ * mechanisms, one of which is the fact that **bpf_redirect_map**\
+ * () tries to send packet as a "bulk" to the device.
+ * Return
+ * **XDP_REDIRECT** on success, or **XDP_ABORTED** on error.
+ *
+ * int bpf_sk_redirect_map(struct bpf_map *map, u32 key, u64 flags)
+ * Description
+ * Redirect the packet to the socket referenced by *map* (of type
+ * **BPF_MAP_TYPE_SOCKMAP**) at index *key*. Both ingress and
+ * egress interfaces can be used for redirection. The
+ * **BPF_F_INGRESS** value in *flags* is used to make the
+ * distinction (ingress path is selected if the flag is present,
+ * egress path otherwise). This is the only flag supported for now.
+ * Return
+ * **SK_PASS** on success, or **SK_DROP** on error.
+ *
+ * int bpf_sock_map_update(struct bpf_sock_ops *skops, struct bpf_map *map, void *key, u64 flags)
+ * Description
+ * Add an entry to, or update a *map* referencing sockets. The
+ * *skops* is used as a new value for the entry associated to
+ * *key*. *flags* is one of:
+ *
+ * **BPF_NOEXIST**
+ * The entry for *key* must not exist in the map.
+ * **BPF_EXIST**
+ * The entry for *key* must already exist in the map.
+ * **BPF_ANY**
+ * No condition on the existence of the entry for *key*.
+ *
+ * If the *map* has eBPF programs (parser and verdict), those will
+ * be inherited by the socket being added. If the socket is
+ * already attached to eBPF programs, this results in an error.
+ * Return
+ * 0 on success, or a negative error in case of failure.
+ *
+ * int bpf_xdp_adjust_meta(struct xdp_buff *xdp_md, int delta)
+ * Description
+ * Adjust the address pointed by *xdp_md*\ **->data_meta** by
+ * *delta* (which can be positive or negative). Note that this
+ * operation modifies the address stored in *xdp_md*\ **->data**,
+ * so the latter must be loaded only after the helper has been
+ * called.
+ *
+ * The use of *xdp_md*\ **->data_meta** is optional and programs
+ * are not required to use it. The rationale is that when the
+ * packet is processed with XDP (e.g. as DoS filter), it is
+ * possible to push further meta data along with it before passing
+ * to the stack, and to give the guarantee that an ingress eBPF
+ * program attached as a TC classifier on the same device can pick
+ * this up for further post-processing. Since TC works with socket
+ * buffers, it remains possible to set from XDP the **mark** or
+ * **priority** pointers, or other pointers for the socket buffer.
+ * Having this scratch space generic and programmable allows for
+ * more flexibility as the user is free to store whatever meta
+ * data they need.
+ *
+ * A call to this helper is susceptible to change the underlaying
+ * packet buffer. Therefore, at load time, all checks on pointers
+ * previously done by the verifier are invalidated and must be
+ * performed again, if the helper is used in combination with
+ * direct packet access.
+ * Return
+ * 0 on success, or a negative error in case of failure.
+ *
+ * int bpf_perf_event_read_value(struct bpf_map *map, u64 flags, struct bpf_perf_event_value *buf, u32 buf_size)
+ * Description
+ * Read the value of a perf event counter, and store it into *buf*
+ * of size *buf_size*. This helper relies on a *map* of type
+ * **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. The nature of the perf event
+ * counter is selected when *map* is updated with perf event file
+ * descriptors. The *map* is an array whose size is the number of
+ * available CPUs, and each cell contains a value relative to one
+ * CPU. The value to retrieve is indicated by *flags*, that
+ * contains the index of the CPU to look up, masked with
+ * **BPF_F_INDEX_MASK**. Alternatively, *flags* can be set to
+ * **BPF_F_CURRENT_CPU** to indicate that the value for the
+ * current CPU should be retrieved.
+ *
+ * This helper behaves in a way close to
+ * **bpf_perf_event_read**\ () helper, save that instead of
+ * just returning the value observed, it fills the *buf*
+ * structure. This allows for additional data to be retrieved: in
+ * particular, the enabled and running times (in *buf*\
+ * **->enabled** and *buf*\ **->running**, respectively) are
+ * copied. In general, **bpf_perf_event_read_value**\ () is
+ * recommended over **bpf_perf_event_read**\ (), which has some
+ * ABI issues and provides fewer functionalities.
+ *
+ * These values are interesting, because hardware PMU (Performance
+ * Monitoring Unit) counters are limited resources. When there are
+ * more PMU based perf events opened than available counters,
+ * kernel will multiplex these events so each event gets certain
+ * percentage (but not all) of the PMU time. In case that
+ * multiplexing happens, the number of samples or counter value
+ * will not reflect the case compared to when no multiplexing
+ * occurs. This makes comparison between different runs difficult.
+ * Typically, the counter value should be normalized before
+ * comparing to other experiments. The usual normalization is done
+ * as follows.
+ *
+ * ::
+ *
+ * normalized_counter = counter * t_enabled / t_running
+ *
+ * Where t_enabled is the time enabled for event and t_running is
+ * the time running for event since last normalization. The
+ * enabled and running times are accumulated since the perf event
+ * open. To achieve scaling factor between two invocations of an
+ * eBPF program, users can can use CPU id as the key (which is
+ * typical for perf array usage model) to remember the previous
+ * value and do the calculation inside the eBPF program.
+ * Return
+ * 0 on success, or a negative error in case of failure.
+ *
+ * int bpf_perf_prog_read_value(struct bpf_perf_event_data *ctx, struct bpf_perf_event_value *buf, u32 buf_size)
+ * Description
+ * For en eBPF program attached to a perf event, retrieve the
+ * value of the event counter associated to *ctx* and store it in
+ * the structure pointed by *buf* and of size *buf_size*. Enabled
+ * and running times are also stored in the structure (see
+ * description of helper **bpf_perf_event_read_value**\ () for
+ * more details).
+ * Return
+ * 0 on success, or a negative error in case of failure.
+ *
+ * int bpf_getsockopt(struct bpf_sock_ops *bpf_socket, int level, int optname, char *optval, int optlen)
+ * Description
+ * Emulate a call to **getsockopt()** on the socket associated to
+ * *bpf_socket*, which must be a full socket. The *level* at
+ * which the option resides and the name *optname* of the option
+ * must be specified, see **getsockopt(2)** for more information.
+ * The retrieved value is stored in the structure pointed by
+ * *opval* and of length *optlen*.
+ *
+ * This helper actually implements a subset of **getsockopt()**.
+ * It supports the following *level*\ s:
+ *
+ * * **IPPROTO_TCP**, which supports *optname*
+ * **TCP_CONGESTION**.
+ * * **IPPROTO_IP**, which supports *optname* **IP_TOS**.
+ * * **IPPROTO_IPV6**, which supports *optname* **IPV6_TCLASS**.
+ * Return
+ * 0 on success, or a negative error in case of failure.
+ *
+ * int bpf_override_return(struct pt_reg *regs, u64 rc)
+ * Description
+ * Used for error injection, this helper uses kprobes to override
+ * the return value of the probed function, and to set it to *rc*.
+ * The first argument is the context *regs* on which the kprobe
+ * works.
+ *
+ * This helper works by setting setting the PC (program counter)
+ * to an override function which is run in place of the original
+ * probed function. This means the probed function is not run at
+ * all. The replacement function just returns with the required
+ * value.
+ *
+ * This helper has security implications, and thus is subject to
+ * restrictions. It is only available if the kernel was compiled
+ * with the **CONFIG_BPF_KPROBE_OVERRIDE** configuration
+ * option, and in this case it only works on functions tagged with
+ * **ALLOW_ERROR_INJECTION** in the kernel code.
+ *
+ * Also, the helper is only available for the architectures having
+ * the CONFIG_FUNCTION_ERROR_INJECTION option. As of this writing,
+ * x86 architecture is the only one to support this feature.
+ * Return
+ * 0
+ *
+ * int bpf_sock_ops_cb_flags_set(struct bpf_sock_ops *bpf_sock, int argval)
+ * Description
+ * Attempt to set the value of the **bpf_sock_ops_cb_flags** field
+ * for the full TCP socket associated to *bpf_sock_ops* to
+ * *argval*.
+ *
+ * The primary use of this field is to determine if there should
+ * be calls to eBPF programs of type
+ * **BPF_PROG_TYPE_SOCK_OPS** at various points in the TCP
+ * code. A program of the same type can change its value, per
+ * connection and as necessary, when the connection is
+ * established. This field is directly accessible for reading, but
+ * this helper must be used for updates in order to return an
+ * error if an eBPF program tries to set a callback that is not
+ * supported in the current kernel.
+ *
+ * The supported callback values that *argval* can combine are:
+ *
+ * * **BPF_SOCK_OPS_RTO_CB_FLAG** (retransmission time out)
+ * * **BPF_SOCK_OPS_RETRANS_CB_FLAG** (retransmission)
+ * * **BPF_SOCK_OPS_STATE_CB_FLAG** (TCP state change)
+ *
+ * Here are some examples of where one could call such eBPF
+ * program:
+ *
+ * * When RTO fires.
+ * * When a packet is retransmitted.
+ * * When the connection terminates.
+ * * When a packet is sent.
+ * * When a packet is received.
+ * Return
+ * Code **-EINVAL** if the socket is not a full TCP socket;
+ * otherwise, a positive number containing the bits that could not
+ * be set is returned (which comes down to 0 if all bits were set
+ * as required).
+ *
+ * int bpf_msg_redirect_map(struct sk_msg_buff *msg, struct bpf_map *map, u32 key, u64 flags)
+ * Description
+ * This helper is used in programs implementing policies at the
+ * socket level. If the message *msg* is allowed to pass (i.e. if
+ * the verdict eBPF program returns **SK_PASS**), redirect it to
+ * the socket referenced by *map* (of type
+ * **BPF_MAP_TYPE_SOCKMAP**) at index *key*. Both ingress and
+ * egress interfaces can be used for redirection. The
+ * **BPF_F_INGRESS** value in *flags* is used to make the
+ * distinction (ingress path is selected if the flag is present,
+ * egress path otherwise). This is the only flag supported for now.
+ * Return
+ * **SK_PASS** on success, or **SK_DROP** on error.
+ *
+ * int bpf_msg_apply_bytes(struct sk_msg_buff *msg, u32 bytes)
+ * Description
+ * For socket policies, apply the verdict of the eBPF program to
+ * the next *bytes* (number of bytes) of message *msg*.
+ *
+ * For example, this helper can be used in the following cases:
+ *
+ * * A single **sendmsg**\ () or **sendfile**\ () system call
+ * contains multiple logical messages that the eBPF program is
+ * supposed to read and for which it should apply a verdict.
+ * * An eBPF program only cares to read the first *bytes* of a
+ * *msg*. If the message has a large payload, then setting up
+ * and calling the eBPF program repeatedly for all bytes, even
+ * though the verdict is already known, would create unnecessary
+ * overhead.
+ *
+ * When called from within an eBPF program, the helper sets a
+ * counter internal to the BPF infrastructure, that is used to
+ * apply the last verdict to the next *bytes*. If *bytes* is
+ * smaller than the current data being processed from a
+ * **sendmsg**\ () or **sendfile**\ () system call, the first
+ * *bytes* will be sent and the eBPF program will be re-run with
+ * the pointer for start of data pointing to byte number *bytes*
+ * **+ 1**. If *bytes* is larger than the current data being
+ * processed, then the eBPF verdict will be applied to multiple
+ * **sendmsg**\ () or **sendfile**\ () calls until *bytes* are
+ * consumed.
+ *
+ * Note that if a socket closes with the internal counter holding
+ * a non-zero value, this is not a problem because data is not
+ * being buffered for *bytes* and is sent as it is received.
+ * Return
+ * 0
+ *
+ * int bpf_msg_cork_bytes(struct sk_msg_buff *msg, u32 bytes)
+ * Description
+ * For socket policies, prevent the execution of the verdict eBPF
+ * program for message *msg* until *bytes* (byte number) have been
+ * accumulated.
+ *
+ * This can be used when one needs a specific number of bytes
+ * before a verdict can be assigned, even if the data spans
+ * multiple **sendmsg**\ () or **sendfile**\ () calls. The extreme
+ * case would be a user calling **sendmsg**\ () repeatedly with
+ * 1-byte long message segments. Obviously, this is bad for
+ * performance, but it is still valid. If the eBPF program needs
+ * *bytes* bytes to validate a header, this helper can be used to
+ * prevent the eBPF program to be called again until *bytes* have
+ * been accumulated.
+ * Return
+ * 0
+ *
+ * int bpf_msg_pull_data(struct sk_msg_buff *msg, u32 start, u32 end, u64 flags)
+ * Description
+ * For socket policies, pull in non-linear data from user space
+ * for *msg* and set pointers *msg*\ **->data** and *msg*\
+ * **->data_end** to *start* and *end* bytes offsets into *msg*,
+ * respectively.
+ *
+ * If a program of type **BPF_PROG_TYPE_SK_MSG** is run on a
+ * *msg* it can only parse data that the (**data**, **data_end**)
+ * pointers have already consumed. For **sendmsg**\ () hooks this
+ * is likely the first scatterlist element. But for calls relying
+ * on the **sendpage** handler (e.g. **sendfile**\ ()) this will
+ * be the range (**0**, **0**) because the data is shared with
+ * user space and by default the objective is to avoid allowing
+ * user space to modify data while (or after) eBPF verdict is
+ * being decided. This helper can be used to pull in data and to
+ * set the start and end pointer to given values. Data will be
+ * copied if necessary (i.e. if data was not linear and if start
+ * and end pointers do not point to the same chunk).
+ *
+ * A call to this helper is susceptible to change the underlaying
+ * packet buffer. Therefore, at load time, all checks on pointers
+ * previously done by the verifier are invalidated and must be
+ * performed again, if the helper is used in combination with
+ * direct packet access.
+ *
+ * All values for *flags* are reserved for future usage, and must
+ * be left at zero.
+ * Return
+ * 0 on success, or a negative error in case of failure.
+ *
+ * int bpf_bind(struct bpf_sock_addr *ctx, struct sockaddr *addr, int addr_len)
+ * Description
+ * Bind the socket associated to *ctx* to the address pointed by
+ * *addr*, of length *addr_len*. This allows for making outgoing
+ * connection from the desired IP address, which can be useful for
+ * example when all processes inside a cgroup should use one
+ * single IP address on a host that has multiple IP configured.
+ *
+ * This helper works for IPv4 and IPv6, TCP and UDP sockets. The
+ * domain (*addr*\ **->sa_family**) must be **AF_INET** (or
+ * **AF_INET6**). Looking for a free port to bind to can be
+ * expensive, therefore binding to port is not permitted by the
+ * helper: *addr*\ **->sin_port** (or **sin6_port**, respectively)
+ * must be set to zero.
+ * Return
+ * 0 on success, or a negative error in case of failure.
+ *
+ * int bpf_xdp_adjust_tail(struct xdp_buff *xdp_md, int delta)
+ * Description
+ * Adjust (move) *xdp_md*\ **->data_end** by *delta* bytes. It is
+ * only possible to shrink the packet as of this writing,
+ * therefore *delta* must be a negative integer.
+ *
+ * A call to this helper is susceptible to change the underlaying
+ * packet buffer. Therefore, at load time, all checks on pointers
+ * previously done by the verifier are invalidated and must be
+ * performed again, if the helper is used in combination with
+ * direct packet access.
+ * Return
+ * 0 on success, or a negative error in case of failure.
+ *
+ * int bpf_skb_get_xfrm_state(struct sk_buff *skb, u32 index, struct bpf_xfrm_state *xfrm_state, u32 size, u64 flags)
+ * Description
+ * Retrieve the XFRM state (IP transform framework, see also
+ * **ip-xfrm(8)**) at *index* in XFRM "security path" for *skb*.
+ *
+ * The retrieved value is stored in the **struct bpf_xfrm_state**
+ * pointed by *xfrm_state* and of length *size*.
+ *
+ * All values for *flags* are reserved for future usage, and must
+ * be left at zero.
+ *
+ * This helper is available only if the kernel was compiled with
+ * **CONFIG_XFRM** configuration option.
+ * Return
+ * 0 on success, or a negative error in case of failure.
+ *
+ * int bpf_get_stack(struct pt_regs *regs, void *buf, u32 size, u64 flags)
+ * Description
+ * Return a user or a kernel stack in bpf program provided buffer.
+ * To achieve this, the helper needs *ctx*, which is a pointer
+ * to the context on which the tracing program is executed.
+ * To store the stacktrace, the bpf program provides *buf* with
+ * a nonnegative *size*.
+ *
+ * The last argument, *flags*, holds the number of stack frames to
+ * skip (from 0 to 255), masked with
+ * **BPF_F_SKIP_FIELD_MASK**. The next bits can be used to set
+ * the following flags:
+ *
+ * **BPF_F_USER_STACK**
+ * Collect a user space stack instead of a kernel stack.
+ * **BPF_F_USER_BUILD_ID**
+ * Collect buildid+offset instead of ips for user stack,
+ * only valid if **BPF_F_USER_STACK** is also specified.
+ *
+ * **bpf_get_stack**\ () can collect up to
+ * **PERF_MAX_STACK_DEPTH** both kernel and user frames, subject
+ * to sufficient large buffer size. Note that
+ * this limit can be controlled with the **sysctl** program, and
+ * that it should be manually increased in order to profile long
+ * user stacks (such as stacks for Java programs). To do so, use:
+ *
+ * ::
+ *
+ * # sysctl kernel.perf_event_max_stack=<new value>
+ * Return
+ * A non-negative value equal to or less than *size* on success,
+ * or a negative error in case of failure.
+ *
+ * int skb_load_bytes_relative(const struct sk_buff *skb, u32 offset, void *to, u32 len, u32 start_header)
+ * Description
+ * This helper is similar to **bpf_skb_load_bytes**\ () in that
+ * it provides an easy way to load *len* bytes from *offset*
+ * from the packet associated to *skb*, into the buffer pointed
+ * by *to*. The difference to **bpf_skb_load_bytes**\ () is that
+ * a fifth argument *start_header* exists in order to select a
+ * base offset to start from. *start_header* can be one of:
+ *
+ * **BPF_HDR_START_MAC**
+ * Base offset to load data from is *skb*'s mac header.
+ * **BPF_HDR_START_NET**
+ * Base offset to load data from is *skb*'s network header.
+ *
+ * In general, "direct packet access" is the preferred method to
+ * access packet data, however, this helper is in particular useful
+ * in socket filters where *skb*\ **->data** does not always point
+ * to the start of the mac header and where "direct packet access"
+ * is not available.
+ * Return
+ * 0 on success, or a negative error in case of failure.
+ *
+ * int bpf_fib_lookup(void *ctx, struct bpf_fib_lookup *params, int plen, u32 flags)
+ * Description
+ * Do FIB lookup in kernel tables using parameters in *params*.
+ * If lookup is successful and result shows packet is to be
+ * forwarded, the neighbor tables are searched for the nexthop.
+ * If successful (ie., FIB lookup shows forwarding and nexthop
+ * is resolved), the nexthop address is returned in ipv4_dst
+ * or ipv6_dst based on family, smac is set to mac address of
+ * egress device, dmac is set to nexthop mac address, rt_metric
+ * is set to metric from route (IPv4/IPv6 only).
+ *
+ * *plen* argument is the size of the passed in struct.
+ * *flags* argument can be a combination of one or more of the
+ * following values:
+ *
+ * **BPF_FIB_LOOKUP_DIRECT**
+ * Do a direct table lookup vs full lookup using FIB
+ * rules.
+ * **BPF_FIB_LOOKUP_OUTPUT**
+ * Perform lookup from an egress perspective (default is
+ * ingress).
+ *
+ * *ctx* is either **struct xdp_md** for XDP programs or
+ * **struct sk_buff** tc cls_act programs.
+ * Return
+ * Egress device index on success, 0 if packet needs to continue
+ * up the stack for further processing or a negative error in case
+ * of failure.
+ *
+ * int bpf_sock_hash_update(struct bpf_sock_ops_kern *skops, struct bpf_map *map, void *key, u64 flags)
+ * Description
+ * Add an entry to, or update a sockhash *map* referencing sockets.
+ * The *skops* is used as a new value for the entry associated to
+ * *key*. *flags* is one of:
+ *
+ * **BPF_NOEXIST**
+ * The entry for *key* must not exist in the map.
+ * **BPF_EXIST**
+ * The entry for *key* must already exist in the map.
+ * **BPF_ANY**
+ * No condition on the existence of the entry for *key*.
+ *
+ * If the *map* has eBPF programs (parser and verdict), those will
+ * be inherited by the socket being added. If the socket is
+ * already attached to eBPF programs, this results in an error.
+ * Return
+ * 0 on success, or a negative error in case of failure.
+ *
+ * int bpf_msg_redirect_hash(struct sk_msg_buff *msg, struct bpf_map *map, void *key, u64 flags)
+ * Description
+ * This helper is used in programs implementing policies at the
+ * socket level. If the message *msg* is allowed to pass (i.e. if
+ * the verdict eBPF program returns **SK_PASS**), redirect it to
+ * the socket referenced by *map* (of type
+ * **BPF_MAP_TYPE_SOCKHASH**) using hash *key*. Both ingress and
+ * egress interfaces can be used for redirection. The
+ * **BPF_F_INGRESS** value in *flags* is used to make the
+ * distinction (ingress path is selected if the flag is present,
+ * egress path otherwise). This is the only flag supported for now.
+ * Return
+ * **SK_PASS** on success, or **SK_DROP** on error.
+ *
+ * int bpf_sk_redirect_hash(struct sk_buff *skb, struct bpf_map *map, void *key, u64 flags)
+ * Description
+ * This helper is used in programs implementing policies at the
+ * skb socket level. If the sk_buff *skb* is allowed to pass (i.e.
+ * if the verdeict eBPF program returns **SK_PASS**), redirect it
+ * to the socket referenced by *map* (of type
+ * **BPF_MAP_TYPE_SOCKHASH**) using hash *key*. Both ingress and
+ * egress interfaces can be used for redirection. The
+ * **BPF_F_INGRESS** value in *flags* is used to make the
+ * distinction (ingress path is selected if the flag is present,
+ * egress otherwise). This is the only flag supported for now.
+ * Return
+ * **SK_PASS** on success, or **SK_DROP** on error.
+ *
+ * int bpf_lwt_push_encap(struct sk_buff *skb, u32 type, void *hdr, u32 len)
+ * Description
+ * Encapsulate the packet associated to *skb* within a Layer 3
+ * protocol header. This header is provided in the buffer at
+ * address *hdr*, with *len* its size in bytes. *type* indicates
+ * the protocol of the header and can be one of:
+ *
+ * **BPF_LWT_ENCAP_SEG6**
+ * IPv6 encapsulation with Segment Routing Header
+ * (**struct ipv6_sr_hdr**). *hdr* only contains the SRH,
+ * the IPv6 header is computed by the kernel.
+ * **BPF_LWT_ENCAP_SEG6_INLINE**
+ * Only works if *skb* contains an IPv6 packet. Insert a
+ * Segment Routing Header (**struct ipv6_sr_hdr**) inside
+ * the IPv6 header.
+ *
+ * A call to this helper is susceptible to change the underlaying
+ * packet buffer. Therefore, at load time, all checks on pointers
+ * previously done by the verifier are invalidated and must be
+ * performed again, if the helper is used in combination with
+ * direct packet access.
+ * Return
+ * 0 on success, or a negative error in case of failure.
+ *
+ * int bpf_lwt_seg6_store_bytes(struct sk_buff *skb, u32 offset, const void *from, u32 len)
+ * Description
+ * Store *len* bytes from address *from* into the packet
+ * associated to *skb*, at *offset*. Only the flags, tag and TLVs
+ * inside the outermost IPv6 Segment Routing Header can be
+ * modified through this helper.
+ *
+ * A call to this helper is susceptible to change the underlaying
+ * packet buffer. Therefore, at load time, all checks on pointers
+ * previously done by the verifier are invalidated and must be
+ * performed again, if the helper is used in combination with
+ * direct packet access.
+ * Return
+ * 0 on success, or a negative error in case of failure.
+ *
+ * int bpf_lwt_seg6_adjust_srh(struct sk_buff *skb, u32 offset, s32 delta)
+ * Description
+ * Adjust the size allocated to TLVs in the outermost IPv6
+ * Segment Routing Header contained in the packet associated to
+ * *skb*, at position *offset* by *delta* bytes. Only offsets
+ * after the segments are accepted. *delta* can be as well
+ * positive (growing) as negative (shrinking).
+ *
+ * A call to this helper is susceptible to change the underlaying
+ * packet buffer. Therefore, at load time, all checks on pointers
+ * previously done by the verifier are invalidated and must be
+ * performed again, if the helper is used in combination with
+ * direct packet access.
+ * Return
+ * 0 on success, or a negative error in case of failure.
+ *
+ * int bpf_lwt_seg6_action(struct sk_buff *skb, u32 action, void *param, u32 param_len)
+ * Description
+ * Apply an IPv6 Segment Routing action of type *action* to the
+ * packet associated to *skb*. Each action takes a parameter
+ * contained at address *param*, and of length *param_len* bytes.
+ * *action* can be one of:
+ *
+ * **SEG6_LOCAL_ACTION_END_X**
+ * End.X action: Endpoint with Layer-3 cross-connect.
+ * Type of *param*: **struct in6_addr**.
+ * **SEG6_LOCAL_ACTION_END_T**
+ * End.T action: Endpoint with specific IPv6 table lookup.
+ * Type of *param*: **int**.
+ * **SEG6_LOCAL_ACTION_END_B6**
+ * End.B6 action: Endpoint bound to an SRv6 policy.
+ * Type of param: **struct ipv6_sr_hdr**.
+ * **SEG6_LOCAL_ACTION_END_B6_ENCAP**
+ * End.B6.Encap action: Endpoint bound to an SRv6
+ * encapsulation policy.
+ * Type of param: **struct ipv6_sr_hdr**.
+ *
+ * A call to this helper is susceptible to change the underlaying
+ * packet buffer. Therefore, at load time, all checks on pointers
+ * previously done by the verifier are invalidated and must be
+ * performed again, if the helper is used in combination with
+ * direct packet access.
+ * Return
+ * 0 on success, or a negative error in case of failure.
+ *
+ * int bpf_rc_keydown(void *ctx, u32 protocol, u64 scancode, u32 toggle)
+ * Description
+ * This helper is used in programs implementing IR decoding, to
+ * report a successfully decoded key press with *scancode*,
+ * *toggle* value in the given *protocol*. The scancode will be
+ * translated to a keycode using the rc keymap, and reported as
+ * an input key down event. After a period a key up event is
+ * generated. This period can be extended by calling either
+ * **bpf_rc_keydown** () again with the same values, or calling
+ * **bpf_rc_repeat** ().
+ *
+ * Some protocols include a toggle bit, in case the button was
+ * released and pressed again between consecutive scancodes.
+ *
+ * The *ctx* should point to the lirc sample as passed into
+ * the program.
+ *
+ * The *protocol* is the decoded protocol number (see
+ * **enum rc_proto** for some predefined values).
+ *
+ * This helper is only available is the kernel was compiled with
+ * the **CONFIG_BPF_LIRC_MODE2** configuration option set to
+ * "**y**".
+ *
+ * Return
+ * 0
+ *
+ * int bpf_rc_repeat(void *ctx)
+ * Description
+ * This helper is used in programs implementing IR decoding, to
+ * report a successfully decoded repeat key message. This delays
+ * the generation of a key up event for previously generated
+ * key down event.
+ *
+ * Some IR protocols like NEC have a special IR message for
+ * repeating last button, for when a button is held down.
+ *
+ * The *ctx* should point to the lirc sample as passed into
+ * the program.
+ *
+ * This helper is only available is the kernel was compiled with
+ * the **CONFIG_BPF_LIRC_MODE2** configuration option set to
+ * "**y**".
+ *
+ * Return
+ * 0
+ *
+ * uint64_t bpf_skb_cgroup_id(struct sk_buff *skb)
+ * Description
+ * Return the cgroup v2 id of the socket associated with the *skb*.
+ * This is roughly similar to the **bpf_get_cgroup_classid**\ ()
+ * helper for cgroup v1 by providing a tag resp. identifier that
+ * can be matched on or used for map lookups e.g. to implement
+ * policy. The cgroup v2 id of a given path in the hierarchy is
+ * exposed in user space through the f_handle API in order to get
+ * to the same 64-bit id.
+ *
+ * This helper can be used on TC egress path, but not on ingress,
+ * and is available only if the kernel was compiled with the
+ * **CONFIG_SOCK_CGROUP_DATA** configuration option.
+ * Return
+ * The id is returned or 0 in case the id could not be retrieved.
+ *
+ * u64 bpf_get_current_cgroup_id(void)
+ * Return
+ * A 64-bit integer containing the current cgroup id based
+ * on the cgroup within which the current task is running.
*/
#define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \
@@ -821,7 +2141,23 @@ union bpf_attr {
FN(msg_apply_bytes), \
FN(msg_cork_bytes), \
FN(msg_pull_data), \
- FN(bind),
+ FN(bind), \
+ FN(xdp_adjust_tail), \
+ FN(skb_get_xfrm_state), \
+ FN(get_stack), \
+ FN(skb_load_bytes_relative), \
+ FN(fib_lookup), \
+ FN(sock_hash_update), \
+ FN(msg_redirect_hash), \
+ FN(sk_redirect_hash), \
+ FN(lwt_push_encap), \
+ FN(lwt_seg6_store_bytes), \
+ FN(lwt_seg6_adjust_srh), \
+ FN(lwt_seg6_action), \
+ FN(rc_repeat), \
+ FN(rc_keydown), \
+ FN(skb_cgroup_id), \
+ FN(get_current_cgroup_id),
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
* function eBPF program intends to call
@@ -855,11 +2191,14 @@ enum bpf_func_id {
/* BPF_FUNC_skb_set_tunnel_key and BPF_FUNC_skb_get_tunnel_key flags. */
#define BPF_F_TUNINFO_IPV6 (1ULL << 0)
-/* BPF_FUNC_get_stackid flags. */
+/* flags for both BPF_FUNC_get_stackid and BPF_FUNC_get_stack. */
#define BPF_F_SKIP_FIELD_MASK 0xffULL
#define BPF_F_USER_STACK (1ULL << 8)
+/* flags used by BPF_FUNC_get_stackid only. */
#define BPF_F_FAST_STACK_CMP (1ULL << 9)
#define BPF_F_REUSE_STACKID (1ULL << 10)
+/* flags used by BPF_FUNC_get_stack only. */
+#define BPF_F_USER_BUILD_ID (1ULL << 11)
/* BPF_FUNC_skb_set_tunnel_key flags. */
#define BPF_F_ZERO_CSUM_TX (1ULL << 1)
@@ -879,6 +2218,18 @@ enum bpf_adj_room_mode {
BPF_ADJ_ROOM_NET,
};
+/* Mode for BPF_FUNC_skb_load_bytes_relative helper. */
+enum bpf_hdr_start_off {
+ BPF_HDR_START_MAC,
+ BPF_HDR_START_NET,
+};
+
+/* Encapsulation type for BPF_FUNC_lwt_push_encap helper. */
+enum bpf_lwt_encap_mode {
+ BPF_LWT_ENCAP_SEG6,
+ BPF_LWT_ENCAP_SEG6_INLINE
+};
+
/* user accessible mirror of in-kernel sk_buff.
* new fields can only be added to the end of this structure
*/
@@ -923,10 +2274,24 @@ struct bpf_tunnel_key {
};
__u8 tunnel_tos;
__u8 tunnel_ttl;
- __u16 tunnel_ext;
+ __u16 tunnel_ext; /* Padding, future use. */
__u32 tunnel_label;
};
+/* user accessible mirror of in-kernel xfrm_state.
+ * new fields can only be added to the end of this structure
+ */
+struct bpf_xfrm_state {
+ __u32 reqid;
+ __u32 spi; /* Stored in network byte order */
+ __u16 family;
+ __u16 ext; /* Padding, future use. */
+ union {
+ __u32 remote_ipv4; /* Stored in network byte order */
+ __u32 remote_ipv6[4]; /* Stored in network byte order */
+ };
+};
+
/* Generic BPF return codes which all BPF program types may support.
* The values are binary compatible with their TC_ACT_* counter-part to
* provide backwards compatibility with existing SCHED_CLS and SCHED_ACT
@@ -999,6 +2364,14 @@ enum sk_action {
struct sk_msg_md {
void *data;
void *data_end;
+
+ __u32 family;
+ __u32 remote_ip4; /* Stored in network byte order */
+ __u32 local_ip4; /* Stored in network byte order */
+ __u32 remote_ip6[4]; /* Stored in network byte order */
+ __u32 local_ip6[4]; /* Stored in network byte order */
+ __u32 remote_port; /* Stored in network byte order */
+ __u32 local_port; /* stored in host byte order */
};
#define BPF_TAG_SIZE 8
@@ -1017,8 +2390,13 @@ struct bpf_prog_info {
__aligned_u64 map_ids;
char name[BPF_OBJ_NAME_LEN];
__u32 ifindex;
+ __u32 gpl_compatible:1;
__u64 netns_dev;
__u64 netns_ino;
+ __u32 nr_jited_ksyms;
+ __u32 nr_jited_func_lens;
+ __aligned_u64 jited_ksyms;
+ __aligned_u64 jited_func_lens;
} __attribute__((aligned(8)));
struct bpf_map_info {
@@ -1030,8 +2408,18 @@ struct bpf_map_info {
__u32 map_flags;
char name[BPF_OBJ_NAME_LEN];
__u32 ifindex;
+ __u32 :32;
__u64 netns_dev;
__u64 netns_ino;
+ __u32 btf_id;
+ __u32 btf_key_type_id;
+ __u32 btf_value_type_id;
+} __attribute__((aligned(8)));
+
+struct bpf_btf_info {
+ __aligned_u64 btf;
+ __u32 btf_size;
+ __u32 id;
} __attribute__((aligned(8)));
/* User bpf_sock_addr struct to access socket fields and sockaddr struct passed
@@ -1052,6 +2440,12 @@ struct bpf_sock_addr {
__u32 family; /* Allows 4-byte read, but no write */
__u32 type; /* Allows 4-byte read, but no write */
__u32 protocol; /* Allows 4-byte read, but no write */
+ __u32 msg_src_ip4; /* Allows 1,2,4-byte read an 4-byte write.
+ * Stored in network byte order.
+ */
+ __u32 msg_src_ip6[4]; /* Allows 1,2,4-byte read an 4-byte write.
+ * Stored in network byte order.
+ */
};
/* User bpf_sock_ops struct to access socket values and specify request ops
@@ -1212,4 +2606,64 @@ struct bpf_raw_tracepoint_args {
__u64 args[0];
};
+/* DIRECT: Skip the FIB rules and go to FIB table associated with device
+ * OUTPUT: Do lookup from egress perspective; default is ingress
+ */
+#define BPF_FIB_LOOKUP_DIRECT BIT(0)
+#define BPF_FIB_LOOKUP_OUTPUT BIT(1)
+
+struct bpf_fib_lookup {
+ /* input: network family for lookup (AF_INET, AF_INET6)
+ * output: network family of egress nexthop
+ */
+ __u8 family;
+
+ /* set if lookup is to consider L4 data - e.g., FIB rules */
+ __u8 l4_protocol;
+ __be16 sport;
+ __be16 dport;
+
+ /* total length of packet from network header - used for MTU check */
+ __u16 tot_len;
+ __u32 ifindex; /* L3 device index for lookup */
+
+ union {
+ /* inputs to lookup */
+ __u8 tos; /* AF_INET */
+ __be32 flowinfo; /* AF_INET6, flow_label + priority */
+
+ /* output: metric of fib result (IPv4/IPv6 only) */
+ __u32 rt_metric;
+ };
+
+ union {
+ __be32 ipv4_src;
+ __u32 ipv6_src[4]; /* in6_addr; network order */
+ };
+
+ /* input to bpf_fib_lookup, ipv{4,6}_dst is destination address in
+ * network header. output: bpf_fib_lookup sets to gateway address
+ * if FIB lookup returns gateway route
+ */
+ union {
+ __be32 ipv4_dst;
+ __u32 ipv6_dst[4]; /* in6_addr; network order */
+ };
+
+ /* output */
+ __be16 h_vlan_proto;
+ __be16 h_vlan_TCI;
+ __u8 smac[6]; /* ETH_ALEN */
+ __u8 dmac[6]; /* ETH_ALEN */
+};
+
+enum bpf_task_fd_type {
+ BPF_FD_TYPE_RAW_TRACEPOINT, /* tp name */
+ BPF_FD_TYPE_TRACEPOINT, /* tp name */
+ BPF_FD_TYPE_KPROBE, /* (symbol + offset) or addr */
+ BPF_FD_TYPE_KRETPROBE, /* (symbol + offset) or addr */
+ BPF_FD_TYPE_UPROBE, /* filename + offset */
+ BPF_FD_TYPE_URETPROBE, /* filename + offset */
+};
+
#endif /* _UAPI__LINUX_BPF_H__ */
diff --git a/include/uapi/linux/bpfilter.h b/include/uapi/linux/bpfilter.h
new file mode 100644
index 000000000000..2ec3cc99ea4c
--- /dev/null
+++ b/include/uapi/linux/bpfilter.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _UAPI_LINUX_BPFILTER_H
+#define _UAPI_LINUX_BPFILTER_H
+
+#include <linux/if.h>
+
+enum {
+ BPFILTER_IPT_SO_SET_REPLACE = 64,
+ BPFILTER_IPT_SO_SET_ADD_COUNTERS = 65,
+ BPFILTER_IPT_SET_MAX,
+};
+
+enum {
+ BPFILTER_IPT_SO_GET_INFO = 64,
+ BPFILTER_IPT_SO_GET_ENTRIES = 65,
+ BPFILTER_IPT_SO_GET_REVISION_MATCH = 66,
+ BPFILTER_IPT_SO_GET_REVISION_TARGET = 67,
+ BPFILTER_IPT_GET_MAX,
+};
+
+#endif /* _UAPI_LINUX_BPFILTER_H */
diff --git a/include/uapi/linux/btf.h b/include/uapi/linux/btf.h
new file mode 100644
index 000000000000..0b5ddbe135a4
--- /dev/null
+++ b/include/uapi/linux/btf.h
@@ -0,0 +1,113 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/* Copyright (c) 2018 Facebook */
+#ifndef _UAPI__LINUX_BTF_H__
+#define _UAPI__LINUX_BTF_H__
+
+#include <linux/types.h>
+
+#define BTF_MAGIC 0xeB9F
+#define BTF_VERSION 1
+
+struct btf_header {
+ __u16 magic;
+ __u8 version;
+ __u8 flags;
+ __u32 hdr_len;
+
+ /* All offsets are in bytes relative to the end of this header */
+ __u32 type_off; /* offset of type section */
+ __u32 type_len; /* length of type section */
+ __u32 str_off; /* offset of string section */
+ __u32 str_len; /* length of string section */
+};
+
+/* Max # of type identifier */
+#define BTF_MAX_TYPE 0x0000ffff
+/* Max offset into the string section */
+#define BTF_MAX_NAME_OFFSET 0x0000ffff
+/* Max # of struct/union/enum members or func args */
+#define BTF_MAX_VLEN 0xffff
+
+struct btf_type {
+ __u32 name_off;
+ /* "info" bits arrangement
+ * bits 0-15: vlen (e.g. # of struct's members)
+ * bits 16-23: unused
+ * bits 24-27: kind (e.g. int, ptr, array...etc)
+ * bits 28-31: unused
+ */
+ __u32 info;
+ /* "size" is used by INT, ENUM, STRUCT and UNION.
+ * "size" tells the size of the type it is describing.
+ *
+ * "type" is used by PTR, TYPEDEF, VOLATILE, CONST and RESTRICT.
+ * "type" is a type_id referring to another type.
+ */
+ union {
+ __u32 size;
+ __u32 type;
+ };
+};
+
+#define BTF_INFO_KIND(info) (((info) >> 24) & 0x0f)
+#define BTF_INFO_VLEN(info) ((info) & 0xffff)
+
+#define BTF_KIND_UNKN 0 /* Unknown */
+#define BTF_KIND_INT 1 /* Integer */
+#define BTF_KIND_PTR 2 /* Pointer */
+#define BTF_KIND_ARRAY 3 /* Array */
+#define BTF_KIND_STRUCT 4 /* Struct */
+#define BTF_KIND_UNION 5 /* Union */
+#define BTF_KIND_ENUM 6 /* Enumeration */
+#define BTF_KIND_FWD 7 /* Forward */
+#define BTF_KIND_TYPEDEF 8 /* Typedef */
+#define BTF_KIND_VOLATILE 9 /* Volatile */
+#define BTF_KIND_CONST 10 /* Const */
+#define BTF_KIND_RESTRICT 11 /* Restrict */
+#define BTF_KIND_MAX 11
+#define NR_BTF_KINDS 12
+
+/* For some specific BTF_KIND, "struct btf_type" is immediately
+ * followed by extra data.
+ */
+
+/* BTF_KIND_INT is followed by a u32 and the following
+ * is the 32 bits arrangement:
+ */
+#define BTF_INT_ENCODING(VAL) (((VAL) & 0x0f000000) >> 24)
+#define BTF_INT_OFFSET(VAL) (((VAL & 0x00ff0000)) >> 16)
+#define BTF_INT_BITS(VAL) ((VAL) & 0x0000ffff)
+
+/* Attributes stored in the BTF_INT_ENCODING */
+#define BTF_INT_SIGNED (1 << 0)
+#define BTF_INT_CHAR (1 << 1)
+#define BTF_INT_BOOL (1 << 2)
+
+/* BTF_KIND_ENUM is followed by multiple "struct btf_enum".
+ * The exact number of btf_enum is stored in the vlen (of the
+ * info in "struct btf_type").
+ */
+struct btf_enum {
+ __u32 name_off;
+ __s32 val;
+};
+
+/* BTF_KIND_ARRAY is followed by one "struct btf_array" */
+struct btf_array {
+ __u32 type;
+ __u32 index_type;
+ __u32 nelems;
+};
+
+/* BTF_KIND_STRUCT and BTF_KIND_UNION are followed
+ * by multiple "struct btf_member". The exact number
+ * of btf_member is stored in the vlen (of the info in
+ * "struct btf_type").
+ */
+struct btf_member {
+ __u32 name_off;
+ __u32 type;
+ __u32 offset; /* offset in bits */
+};
+
+#endif /* _UAPI__LINUX_BTF_H__ */
diff --git a/include/uapi/linux/btrfs.h b/include/uapi/linux/btrfs.h
index c8d99b9ca550..5ca1d21fc4a7 100644
--- a/include/uapi/linux/btrfs.h
+++ b/include/uapi/linux/btrfs.h
@@ -422,6 +422,21 @@ struct btrfs_ioctl_ino_lookup_args {
char name[BTRFS_INO_LOOKUP_PATH_MAX];
};
+#define BTRFS_INO_LOOKUP_USER_PATH_MAX (4080 - BTRFS_VOL_NAME_MAX - 1)
+struct btrfs_ioctl_ino_lookup_user_args {
+ /* in, inode number containing the subvolume of 'subvolid' */
+ __u64 dirid;
+ /* in */
+ __u64 treeid;
+ /* out, name of the subvolume of 'treeid' */
+ char name[BTRFS_VOL_NAME_MAX + 1];
+ /*
+ * out, constructed path from the directory with which the ioctl is
+ * called to dirid
+ */
+ char path[BTRFS_INO_LOOKUP_USER_PATH_MAX];
+};
+
/* Search criteria for the btrfs SEARCH ioctl family. */
struct btrfs_ioctl_search_key {
/*
@@ -725,6 +740,82 @@ struct btrfs_ioctl_send_args {
__u64 reserved[4]; /* in */
};
+/*
+ * Information about a fs tree root.
+ *
+ * All items are filled by the ioctl
+ */
+struct btrfs_ioctl_get_subvol_info_args {
+ /* Id of this subvolume */
+ __u64 treeid;
+
+ /* Name of this subvolume, used to get the real name at mount point */
+ char name[BTRFS_VOL_NAME_MAX + 1];
+
+ /*
+ * Id of the subvolume which contains this subvolume.
+ * Zero for top-level subvolume or a deleted subvolume.
+ */
+ __u64 parent_id;
+
+ /*
+ * Inode number of the directory which contains this subvolume.
+ * Zero for top-level subvolume or a deleted subvolume
+ */
+ __u64 dirid;
+
+ /* Latest transaction id of this subvolume */
+ __u64 generation;
+
+ /* Flags of this subvolume */
+ __u64 flags;
+
+ /* UUID of this subvolume */
+ __u8 uuid[BTRFS_UUID_SIZE];
+
+ /*
+ * UUID of the subvolume of which this subvolume is a snapshot.
+ * All zero for a non-snapshot subvolume.
+ */
+ __u8 parent_uuid[BTRFS_UUID_SIZE];
+
+ /*
+ * UUID of the subvolume from which this subvolume was received.
+ * All zero for non-received subvolume.
+ */
+ __u8 received_uuid[BTRFS_UUID_SIZE];
+
+ /* Transaction id indicating when change/create/send/receive happened */
+ __u64 ctransid;
+ __u64 otransid;
+ __u64 stransid;
+ __u64 rtransid;
+ /* Time corresponding to c/o/s/rtransid */
+ struct btrfs_ioctl_timespec ctime;
+ struct btrfs_ioctl_timespec otime;
+ struct btrfs_ioctl_timespec stime;
+ struct btrfs_ioctl_timespec rtime;
+
+ /* Must be zero */
+ __u64 reserved[8];
+};
+
+#define BTRFS_MAX_ROOTREF_BUFFER_NUM 255
+struct btrfs_ioctl_get_subvol_rootref_args {
+ /* in/out, minimum id of rootref's treeid to be searched */
+ __u64 min_treeid;
+
+ /* out */
+ struct {
+ __u64 treeid;
+ __u64 dirid;
+ } rootref[BTRFS_MAX_ROOTREF_BUFFER_NUM];
+
+ /* out, number of found items */
+ __u8 num_items;
+ __u8 align[7];
+};
+
/* Error codes as returned by the kernel */
enum btrfs_err_code {
BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET = 1,
@@ -843,5 +934,11 @@ enum btrfs_err_code {
struct btrfs_ioctl_vol_args_v2)
#define BTRFS_IOC_LOGICAL_INO_V2 _IOWR(BTRFS_IOCTL_MAGIC, 59, \
struct btrfs_ioctl_logical_ino_args)
+#define BTRFS_IOC_GET_SUBVOL_INFO _IOR(BTRFS_IOCTL_MAGIC, 60, \
+ struct btrfs_ioctl_get_subvol_info_args)
+#define BTRFS_IOC_GET_SUBVOL_ROOTREF _IOWR(BTRFS_IOCTL_MAGIC, 61, \
+ struct btrfs_ioctl_get_subvol_rootref_args)
+#define BTRFS_IOC_INO_LOOKUP_USER _IOWR(BTRFS_IOCTL_MAGIC, 62, \
+ struct btrfs_ioctl_ino_lookup_user_args)
#endif /* _UAPI_LINUX_BTRFS_H */
diff --git a/include/uapi/linux/cn_proc.h b/include/uapi/linux/cn_proc.h
index 68ff25414700..db210625cee8 100644
--- a/include/uapi/linux/cn_proc.h
+++ b/include/uapi/linux/cn_proc.h
@@ -116,12 +116,16 @@ struct proc_event {
struct coredump_proc_event {
__kernel_pid_t process_pid;
__kernel_pid_t process_tgid;
+ __kernel_pid_t parent_pid;
+ __kernel_pid_t parent_tgid;
} coredump;
struct exit_proc_event {
__kernel_pid_t process_pid;
__kernel_pid_t process_tgid;
__u32 exit_code, exit_signal;
+ __kernel_pid_t parent_pid;
+ __kernel_pid_t parent_tgid;
} exit;
} event_data;
diff --git a/include/uapi/linux/dcbnl.h b/include/uapi/linux/dcbnl.h
index 2c0c6453c3f4..60aa2e446698 100644
--- a/include/uapi/linux/dcbnl.h
+++ b/include/uapi/linux/dcbnl.h
@@ -163,6 +163,16 @@ struct ieee_pfc {
__u64 indications[IEEE_8021QAZ_MAX_TCS];
};
+#define IEEE_8021Q_MAX_PRIORITIES 8
+#define DCBX_MAX_BUFFERS 8
+struct dcbnl_buffer {
+ /* priority to buffer mapping */
+ __u8 prio2buffer[IEEE_8021Q_MAX_PRIORITIES];
+ /* buffer size in Bytes */
+ __u32 buffer_size[DCBX_MAX_BUFFERS];
+ __u32 total_size;
+};
+
/* CEE DCBX std supported values */
#define CEE_DCBX_MAX_PGS 8
#define CEE_DCBX_MAX_PRIO 8
@@ -406,6 +416,7 @@ enum ieee_attrs {
DCB_ATTR_IEEE_MAXRATE,
DCB_ATTR_IEEE_QCN,
DCB_ATTR_IEEE_QCN_STATS,
+ DCB_ATTR_DCB_BUFFER,
__DCB_ATTR_IEEE_MAX
};
#define DCB_ATTR_IEEE_MAX (__DCB_ATTR_IEEE_MAX - 1)
diff --git a/include/uapi/linux/devlink.h b/include/uapi/linux/devlink.h
index 1df65a4c2044..75cb5450c851 100644
--- a/include/uapi/linux/devlink.h
+++ b/include/uapi/linux/devlink.h
@@ -132,6 +132,16 @@ enum devlink_eswitch_encap_mode {
DEVLINK_ESWITCH_ENCAP_MODE_BASIC,
};
+enum devlink_port_flavour {
+ DEVLINK_PORT_FLAVOUR_PHYSICAL, /* Any kind of a port physically
+ * facing the user.
+ */
+ DEVLINK_PORT_FLAVOUR_CPU, /* CPU port */
+ DEVLINK_PORT_FLAVOUR_DSA, /* Distributed switch architecture
+ * interconnect port.
+ */
+};
+
enum devlink_attr {
/* don't change the order or add anything between, this is ABI! */
DEVLINK_ATTR_UNSPEC,
@@ -224,6 +234,10 @@ enum devlink_attr {
DEVLINK_ATTR_DPIPE_TABLE_RESOURCE_ID, /* u64 */
DEVLINK_ATTR_DPIPE_TABLE_RESOURCE_UNITS,/* u64 */
+ DEVLINK_ATTR_PORT_FLAVOUR, /* u16 */
+ DEVLINK_ATTR_PORT_NUMBER, /* u32 */
+ DEVLINK_ATTR_PORT_SPLIT_SUBPORT_NUMBER, /* u32 */
+
/* add new attributes above here, update the policy in devlink.c */
__DEVLINK_ATTR_MAX,
diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h
index e2535d6dcec7..4e12c423b9fe 100644
--- a/include/uapi/linux/elf.h
+++ b/include/uapi/linux/elf.h
@@ -421,6 +421,7 @@ typedef struct elf64_shdr {
#define NT_ARM_SYSTEM_CALL 0x404 /* ARM system call number */
#define NT_ARM_SVE 0x405 /* ARM Scalable Vector Extension registers */
#define NT_ARC_V2 0x600 /* ARCv2 accumulator/extra registers */
+#define NT_VMCOREDD 0x700 /* Vmcore Device Dump Note */
/* Note header in a PT_NOTE section */
typedef struct elf32_note {
diff --git a/include/uapi/linux/fs.h b/include/uapi/linux/fs.h
index d2a8313fabd7..73e01918f996 100644
--- a/include/uapi/linux/fs.h
+++ b/include/uapi/linux/fs.h
@@ -242,6 +242,8 @@ struct fsxattr {
#define FICLONERANGE _IOW(0x94, 13, struct file_clone_range)
#define FIDEDUPERANGE _IOWR(0x94, 54, struct file_dedupe_range)
+#define FSLABEL_MAX 256 /* Max chars for the interface; each fs may differ */
+
#define FS_IOC_GETFLAGS _IOR('f', 1, long)
#define FS_IOC_SETFLAGS _IOW('f', 2, long)
#define FS_IOC_GETVERSION _IOR('v', 1, long)
@@ -251,8 +253,10 @@ struct fsxattr {
#define FS_IOC32_SETFLAGS _IOW('f', 2, int)
#define FS_IOC32_GETVERSION _IOR('v', 1, int)
#define FS_IOC32_SETVERSION _IOW('v', 2, int)
-#define FS_IOC_FSGETXATTR _IOR ('X', 31, struct fsxattr)
-#define FS_IOC_FSSETXATTR _IOW ('X', 32, struct fsxattr)
+#define FS_IOC_FSGETXATTR _IOR('X', 31, struct fsxattr)
+#define FS_IOC_FSSETXATTR _IOW('X', 32, struct fsxattr)
+#define FS_IOC_GETFSLABEL _IOR(0x94, 49, char[FSLABEL_MAX])
+#define FS_IOC_SETFSLABEL _IOW(0x94, 50, char[FSLABEL_MAX])
/*
* File system encryption support
@@ -275,6 +279,8 @@ struct fsxattr {
#define FS_ENCRYPTION_MODE_AES_256_CTS 4
#define FS_ENCRYPTION_MODE_AES_128_CBC 5
#define FS_ENCRYPTION_MODE_AES_128_CTS 6
+#define FS_ENCRYPTION_MODE_SPECK128_256_XTS 7
+#define FS_ENCRYPTION_MODE_SPECK128_256_CTS 8
struct fscrypt_policy {
__u8 version;
diff --git a/include/uapi/linux/fuse.h b/include/uapi/linux/fuse.h
index 4b5001c57f46..92fa24c24c92 100644
--- a/include/uapi/linux/fuse.h
+++ b/include/uapi/linux/fuse.h
@@ -113,6 +113,9 @@
* 7.26
* - add FUSE_HANDLE_KILLPRIV
* - add FUSE_POSIX_ACL
+ *
+ * 7.27
+ * - add FUSE_ABORT_ERROR
*/
#ifndef _LINUX_FUSE_H
@@ -148,7 +151,7 @@
#define FUSE_KERNEL_VERSION 7
/** Minor version number of this interface */
-#define FUSE_KERNEL_MINOR_VERSION 26
+#define FUSE_KERNEL_MINOR_VERSION 27
/** The node ID of the root inode */
#define FUSE_ROOT_ID 1
@@ -245,6 +248,7 @@ struct fuse_file_lock {
* FUSE_PARALLEL_DIROPS: allow parallel lookups and readdir
* FUSE_HANDLE_KILLPRIV: fs handles killing suid/sgid/cap on write/chown/trunc
* FUSE_POSIX_ACL: filesystem supports posix acls
+ * FUSE_ABORT_ERROR: reading the device after abort returns ECONNABORTED
*/
#define FUSE_ASYNC_READ (1 << 0)
#define FUSE_POSIX_LOCKS (1 << 1)
@@ -267,6 +271,7 @@ struct fuse_file_lock {
#define FUSE_PARALLEL_DIROPS (1 << 18)
#define FUSE_HANDLE_KILLPRIV (1 << 19)
#define FUSE_POSIX_ACL (1 << 20)
+#define FUSE_ABORT_ERROR (1 << 21)
/**
* CUSE INIT request/reply flags
diff --git a/include/uapi/linux/if_addr.h b/include/uapi/linux/if_addr.h
index 2ef053d265de..ebaf5701c9db 100644
--- a/include/uapi/linux/if_addr.h
+++ b/include/uapi/linux/if_addr.h
@@ -33,6 +33,7 @@ enum {
IFA_CACHEINFO,
IFA_MULTICAST,
IFA_FLAGS,
+ IFA_RT_PRIORITY, /* u32, priority/metric for prefix route */
__IFA_MAX,
};
diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h
index 68699f654118..cf01b6824244 100644
--- a/include/uapi/linux/if_link.h
+++ b/include/uapi/linux/if_link.h
@@ -333,6 +333,7 @@ enum {
IFLA_BRPORT_BCAST_FLOOD,
IFLA_BRPORT_GROUP_FWD_MASK,
IFLA_BRPORT_NEIGH_SUPPRESS,
+ IFLA_BRPORT_ISOLATED,
__IFLA_BRPORT_MAX
};
#define IFLA_BRPORT_MAX (__IFLA_BRPORT_MAX - 1)
@@ -516,6 +517,7 @@ enum {
IFLA_VXLAN_COLLECT_METADATA,
IFLA_VXLAN_LABEL,
IFLA_VXLAN_GPE,
+ IFLA_VXLAN_TTL_INHERIT,
__IFLA_VXLAN_MAX
};
#define IFLA_VXLAN_MAX (__IFLA_VXLAN_MAX - 1)
diff --git a/include/uapi/linux/if_xdp.h b/include/uapi/linux/if_xdp.h
new file mode 100644
index 000000000000..caed8b1614ff
--- /dev/null
+++ b/include/uapi/linux/if_xdp.h
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * if_xdp: XDP socket user-space interface
+ * Copyright(c) 2018 Intel Corporation.
+ *
+ * Author(s): Björn Töpel <bjorn.topel@intel.com>
+ * Magnus Karlsson <magnus.karlsson@intel.com>
+ */
+
+#ifndef _LINUX_IF_XDP_H
+#define _LINUX_IF_XDP_H
+
+#include <linux/types.h>
+
+/* Options for the sxdp_flags field */
+#define XDP_SHARED_UMEM (1 << 0)
+#define XDP_COPY (1 << 1) /* Force copy-mode */
+#define XDP_ZEROCOPY (1 << 2) /* Force zero-copy mode */
+
+struct sockaddr_xdp {
+ __u16 sxdp_family;
+ __u16 sxdp_flags;
+ __u32 sxdp_ifindex;
+ __u32 sxdp_queue_id;
+ __u32 sxdp_shared_umem_fd;
+};
+
+struct xdp_ring_offset {
+ __u64 producer;
+ __u64 consumer;
+ __u64 desc;
+};
+
+struct xdp_mmap_offsets {
+ struct xdp_ring_offset rx;
+ struct xdp_ring_offset tx;
+ struct xdp_ring_offset fr; /* Fill */
+ struct xdp_ring_offset cr; /* Completion */
+};
+
+/* XDP socket options */
+#define XDP_MMAP_OFFSETS 1
+#define XDP_RX_RING 2
+#define XDP_TX_RING 3
+#define XDP_UMEM_REG 4
+#define XDP_UMEM_FILL_RING 5
+#define XDP_UMEM_COMPLETION_RING 6
+#define XDP_STATISTICS 7
+
+struct xdp_umem_reg {
+ __u64 addr; /* Start of packet data area */
+ __u64 len; /* Length of packet data area */
+ __u32 chunk_size;
+ __u32 headroom;
+};
+
+struct xdp_statistics {
+ __u64 rx_dropped; /* Dropped for reasons other than invalid desc */
+ __u64 rx_invalid_descs; /* Dropped due to invalid descriptor */
+ __u64 tx_invalid_descs; /* Dropped due to invalid descriptor */
+};
+
+/* Pgoff for mmaping the rings */
+#define XDP_PGOFF_RX_RING 0
+#define XDP_PGOFF_TX_RING 0x80000000
+#define XDP_UMEM_PGOFF_FILL_RING 0x100000000ULL
+#define XDP_UMEM_PGOFF_COMPLETION_RING 0x180000000ULL
+
+/* Rx/Tx descriptor */
+struct xdp_desc {
+ __u64 addr;
+ __u32 len;
+ __u32 options;
+};
+
+/* UMEM descriptor is __u64 */
+
+#endif /* _LINUX_IF_XDP_H */
diff --git a/include/uapi/linux/kernel-page-flags.h b/include/uapi/linux/kernel-page-flags.h
index fa139841ec18..21b9113c69da 100644
--- a/include/uapi/linux/kernel-page-flags.h
+++ b/include/uapi/linux/kernel-page-flags.h
@@ -35,6 +35,6 @@
#define KPF_BALLOON 23
#define KPF_ZERO_PAGE 24
#define KPF_IDLE 25
-
+#define KPF_PGTABLE 26
#endif /* _UAPILINUX_KERNEL_PAGE_FLAGS_H */
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index 1065006c9bf5..b6270a3b38e9 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -676,6 +676,13 @@ struct kvm_ioeventfd {
__u8 pad[36];
};
+#define KVM_X86_DISABLE_EXITS_MWAIT (1 << 0)
+#define KVM_X86_DISABLE_EXITS_HLT (1 << 1)
+#define KVM_X86_DISABLE_EXITS_PAUSE (1 << 2)
+#define KVM_X86_DISABLE_VALID_EXITS (KVM_X86_DISABLE_EXITS_MWAIT | \
+ KVM_X86_DISABLE_EXITS_HLT | \
+ KVM_X86_DISABLE_EXITS_PAUSE)
+
/* for KVM_ENABLE_CAP */
struct kvm_enable_cap {
/* in */
@@ -941,6 +948,7 @@ struct kvm_ppc_resize_hpt {
#define KVM_CAP_S390_BPB 152
#define KVM_CAP_GET_MSR_FEATURES 153
#define KVM_CAP_HYPERV_EVENTFD 154
+#define KVM_CAP_HYPERV_TLBFLUSH 155
#ifdef KVM_CAP_IRQ_ROUTING
diff --git a/include/uapi/linux/lirc.h b/include/uapi/linux/lirc.h
index f189931042a7..6b319581882f 100644
--- a/include/uapi/linux/lirc.h
+++ b/include/uapi/linux/lirc.h
@@ -134,6 +134,12 @@
#define LIRC_SET_WIDEBAND_RECEIVER _IOW('i', 0x00000023, __u32)
/*
+ * Return the recording timeout, which is either set by
+ * the ioctl LIRC_SET_REC_TIMEOUT or by the kernel after setting the protocols.
+ */
+#define LIRC_GET_REC_TIMEOUT _IOR('i', 0x00000024, __u32)
+
+/*
* struct lirc_scancode - decoded scancode with protocol for use with
* LIRC_MODE_SCANCODE
*
diff --git a/include/uapi/linux/ncp.h b/include/uapi/linux/ncp.h
deleted file mode 100644
index ca6f3d42c88f..000000000000
--- a/include/uapi/linux/ncp.h
+++ /dev/null
@@ -1,202 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-/*
- * ncp.h
- *
- * Copyright (C) 1995 by Volker Lendecke
- * Modified for sparc by J.F. Chadima
- * Modified for __constant_ntoh by Frank A. Vorstenbosch
- *
- */
-
-#ifndef _LINUX_NCP_H
-#define _LINUX_NCP_H
-
-#include <linux/types.h>
-
-#define NCP_PTYPE (0x11)
-#define NCP_PORT (0x0451)
-
-#define NCP_ALLOC_SLOT_REQUEST (0x1111)
-#define NCP_REQUEST (0x2222)
-#define NCP_DEALLOC_SLOT_REQUEST (0x5555)
-
-struct ncp_request_header {
- __u16 type;
- __u8 sequence;
- __u8 conn_low;
- __u8 task;
- __u8 conn_high;
- __u8 function;
- __u8 data[0];
-} __attribute__((packed));
-
-#define NCP_REPLY (0x3333)
-#define NCP_WATCHDOG (0x3E3E)
-#define NCP_POSITIVE_ACK (0x9999)
-
-struct ncp_reply_header {
- __u16 type;
- __u8 sequence;
- __u8 conn_low;
- __u8 task;
- __u8 conn_high;
- __u8 completion_code;
- __u8 connection_state;
- __u8 data[0];
-} __attribute__((packed));
-
-#define NCP_VOLNAME_LEN (16)
-#define NCP_NUMBER_OF_VOLUMES (256)
-struct ncp_volume_info {
- __u32 total_blocks;
- __u32 free_blocks;
- __u32 purgeable_blocks;
- __u32 not_yet_purgeable_blocks;
- __u32 total_dir_entries;
- __u32 available_dir_entries;
- __u8 sectors_per_block;
- char volume_name[NCP_VOLNAME_LEN + 1];
-};
-
-#define AR_READ (cpu_to_le16(1))
-#define AR_WRITE (cpu_to_le16(2))
-#define AR_EXCLUSIVE (cpu_to_le16(0x20))
-
-#define NCP_FILE_ID_LEN 6
-
-/* Defines for Name Spaces */
-#define NW_NS_DOS 0
-#define NW_NS_MAC 1
-#define NW_NS_NFS 2
-#define NW_NS_FTAM 3
-#define NW_NS_OS2 4
-
-/* Defines for ReturnInformationMask */
-#define RIM_NAME (cpu_to_le32(1))
-#define RIM_SPACE_ALLOCATED (cpu_to_le32(2))
-#define RIM_ATTRIBUTES (cpu_to_le32(4))
-#define RIM_DATA_SIZE (cpu_to_le32(8))
-#define RIM_TOTAL_SIZE (cpu_to_le32(0x10))
-#define RIM_EXT_ATTR_INFO (cpu_to_le32(0x20))
-#define RIM_ARCHIVE (cpu_to_le32(0x40))
-#define RIM_MODIFY (cpu_to_le32(0x80))
-#define RIM_CREATION (cpu_to_le32(0x100))
-#define RIM_OWNING_NAMESPACE (cpu_to_le32(0x200))
-#define RIM_DIRECTORY (cpu_to_le32(0x400))
-#define RIM_RIGHTS (cpu_to_le32(0x800))
-#define RIM_ALL (cpu_to_le32(0xFFF))
-#define RIM_COMPRESSED_INFO (cpu_to_le32(0x80000000))
-
-/* Defines for NSInfoBitMask */
-#define NSIBM_NFS_NAME 0x0001
-#define NSIBM_NFS_MODE 0x0002
-#define NSIBM_NFS_GID 0x0004
-#define NSIBM_NFS_NLINKS 0x0008
-#define NSIBM_NFS_RDEV 0x0010
-#define NSIBM_NFS_LINK 0x0020
-#define NSIBM_NFS_CREATED 0x0040
-#define NSIBM_NFS_UID 0x0080
-#define NSIBM_NFS_ACSFLAG 0x0100
-#define NSIBM_NFS_MYFLAG 0x0200
-
-/* open/create modes */
-#define OC_MODE_OPEN 0x01
-#define OC_MODE_TRUNCATE 0x02
-#define OC_MODE_REPLACE 0x02
-#define OC_MODE_CREATE 0x08
-
-/* open/create results */
-#define OC_ACTION_NONE 0x00
-#define OC_ACTION_OPEN 0x01
-#define OC_ACTION_CREATE 0x02
-#define OC_ACTION_TRUNCATE 0x04
-#define OC_ACTION_REPLACE 0x04
-
-/* access rights attributes */
-#ifndef AR_READ_ONLY
-#define AR_READ_ONLY 0x0001
-#define AR_WRITE_ONLY 0x0002
-#define AR_DENY_READ 0x0004
-#define AR_DENY_WRITE 0x0008
-#define AR_COMPATIBILITY 0x0010
-#define AR_WRITE_THROUGH 0x0040
-#define AR_OPEN_COMPRESSED 0x0100
-#endif
-
-struct nw_nfs_info {
- __u32 mode;
- __u32 rdev;
-};
-
-struct nw_info_struct {
- __u32 spaceAlloc;
- __le32 attributes;
- __u16 flags;
- __le32 dataStreamSize;
- __le32 totalStreamSize;
- __u16 numberOfStreams;
- __le16 creationTime;
- __le16 creationDate;
- __u32 creatorID;
- __le16 modifyTime;
- __le16 modifyDate;
- __u32 modifierID;
- __le16 lastAccessDate;
- __u16 archiveTime;
- __u16 archiveDate;
- __u32 archiverID;
- __u16 inheritedRightsMask;
- __le32 dirEntNum;
- __le32 DosDirNum;
- __u32 volNumber;
- __u32 EADataSize;
- __u32 EAKeyCount;
- __u32 EAKeySize;
- __u32 NSCreator;
- __u8 nameLen;
- __u8 entryName[256];
- /* libncp may depend on there being nothing after entryName */
-#ifdef __KERNEL__
- struct nw_nfs_info nfs;
-#endif
-} __attribute__((packed));
-
-/* modify mask - use with MODIFY_DOS_INFO structure */
-#define DM_ATTRIBUTES (cpu_to_le32(0x02))
-#define DM_CREATE_DATE (cpu_to_le32(0x04))
-#define DM_CREATE_TIME (cpu_to_le32(0x08))
-#define DM_CREATOR_ID (cpu_to_le32(0x10))
-#define DM_ARCHIVE_DATE (cpu_to_le32(0x20))
-#define DM_ARCHIVE_TIME (cpu_to_le32(0x40))
-#define DM_ARCHIVER_ID (cpu_to_le32(0x80))
-#define DM_MODIFY_DATE (cpu_to_le32(0x0100))
-#define DM_MODIFY_TIME (cpu_to_le32(0x0200))
-#define DM_MODIFIER_ID (cpu_to_le32(0x0400))
-#define DM_LAST_ACCESS_DATE (cpu_to_le32(0x0800))
-#define DM_INHERITED_RIGHTS_MASK (cpu_to_le32(0x1000))
-#define DM_MAXIMUM_SPACE (cpu_to_le32(0x2000))
-
-struct nw_modify_dos_info {
- __le32 attributes;
- __le16 creationDate;
- __le16 creationTime;
- __u32 creatorID;
- __le16 modifyDate;
- __le16 modifyTime;
- __u32 modifierID;
- __u16 archiveDate;
- __u16 archiveTime;
- __u32 archiverID;
- __le16 lastAccessDate;
- __u16 inheritanceGrantMask;
- __u16 inheritanceRevokeMask;
- __u32 maximumSpace;
-} __attribute__((packed));
-
-struct nw_search_sequence {
- __u8 volNumber;
- __u32 dirBase;
- __u32 sequence;
-} __attribute__((packed));
-
-#endif /* _LINUX_NCP_H */
diff --git a/include/uapi/linux/ncp_fs.h b/include/uapi/linux/ncp_fs.h
deleted file mode 100644
index e76a44229d2f..000000000000
--- a/include/uapi/linux/ncp_fs.h
+++ /dev/null
@@ -1,147 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-/*
- * ncp_fs.h
- *
- * Copyright (C) 1995, 1996 by Volker Lendecke
- *
- */
-
-#ifndef _LINUX_NCP_FS_H
-#define _LINUX_NCP_FS_H
-
-#include <linux/fs.h>
-#include <linux/in.h>
-#include <linux/types.h>
-#include <linux/magic.h>
-
-#include <linux/ipx.h>
-#include <linux/ncp_no.h>
-
-/*
- * ioctl commands
- */
-
-struct ncp_ioctl_request {
- unsigned int function;
- unsigned int size;
- char __user *data;
-};
-
-struct ncp_fs_info {
- int version;
- struct sockaddr_ipx addr;
- __kernel_uid_t mounted_uid;
- int connection; /* Connection number the server assigned us */
- int buffer_size; /* The negotiated buffer size, to be
- used for read/write requests! */
-
- int volume_number;
- __le32 directory_id;
-};
-
-struct ncp_fs_info_v2 {
- int version;
- unsigned long mounted_uid;
- unsigned int connection;
- unsigned int buffer_size;
-
- unsigned int volume_number;
- __le32 directory_id;
-
- __u32 dummy1;
- __u32 dummy2;
- __u32 dummy3;
-};
-
-struct ncp_sign_init
-{
- char sign_root[8];
- char sign_last[16];
-};
-
-struct ncp_lock_ioctl
-{
-#define NCP_LOCK_LOG 0
-#define NCP_LOCK_SH 1
-#define NCP_LOCK_EX 2
-#define NCP_LOCK_CLEAR 256
- int cmd;
- int origin;
- unsigned int offset;
- unsigned int length;
-#define NCP_LOCK_DEFAULT_TIMEOUT 18
-#define NCP_LOCK_MAX_TIMEOUT 180
- int timeout;
-};
-
-struct ncp_setroot_ioctl
-{
- int volNumber;
- int namespace;
- __le32 dirEntNum;
-};
-
-struct ncp_objectname_ioctl
-{
-#define NCP_AUTH_NONE 0x00
-#define NCP_AUTH_BIND 0x31
-#define NCP_AUTH_NDS 0x32
- int auth_type;
- size_t object_name_len;
- void __user * object_name; /* a userspace data, in most cases user name */
-};
-
-struct ncp_privatedata_ioctl
-{
- size_t len;
- void __user * data; /* ~1000 for NDS */
-};
-
-/* NLS charsets by ioctl */
-#define NCP_IOCSNAME_LEN 20
-struct ncp_nls_ioctl
-{
- unsigned char codepage[NCP_IOCSNAME_LEN+1];
- unsigned char iocharset[NCP_IOCSNAME_LEN+1];
-};
-
-#define NCP_IOC_NCPREQUEST _IOR('n', 1, struct ncp_ioctl_request)
-#define NCP_IOC_GETMOUNTUID _IOW('n', 2, __kernel_old_uid_t)
-#define NCP_IOC_GETMOUNTUID2 _IOW('n', 2, unsigned long)
-
-#define NCP_IOC_CONN_LOGGED_IN _IO('n', 3)
-
-#define NCP_GET_FS_INFO_VERSION (1)
-#define NCP_IOC_GET_FS_INFO _IOWR('n', 4, struct ncp_fs_info)
-#define NCP_GET_FS_INFO_VERSION_V2 (2)
-#define NCP_IOC_GET_FS_INFO_V2 _IOWR('n', 4, struct ncp_fs_info_v2)
-
-#define NCP_IOC_SIGN_INIT _IOR('n', 5, struct ncp_sign_init)
-#define NCP_IOC_SIGN_WANTED _IOR('n', 6, int)
-#define NCP_IOC_SET_SIGN_WANTED _IOW('n', 6, int)
-
-#define NCP_IOC_LOCKUNLOCK _IOR('n', 7, struct ncp_lock_ioctl)
-
-#define NCP_IOC_GETROOT _IOW('n', 8, struct ncp_setroot_ioctl)
-#define NCP_IOC_SETROOT _IOR('n', 8, struct ncp_setroot_ioctl)
-
-#define NCP_IOC_GETOBJECTNAME _IOWR('n', 9, struct ncp_objectname_ioctl)
-#define NCP_IOC_SETOBJECTNAME _IOR('n', 9, struct ncp_objectname_ioctl)
-#define NCP_IOC_GETPRIVATEDATA _IOWR('n', 10, struct ncp_privatedata_ioctl)
-#define NCP_IOC_SETPRIVATEDATA _IOR('n', 10, struct ncp_privatedata_ioctl)
-
-#define NCP_IOC_GETCHARSETS _IOWR('n', 11, struct ncp_nls_ioctl)
-#define NCP_IOC_SETCHARSETS _IOR('n', 11, struct ncp_nls_ioctl)
-
-#define NCP_IOC_GETDENTRYTTL _IOW('n', 12, __u32)
-#define NCP_IOC_SETDENTRYTTL _IOR('n', 12, __u32)
-
-/*
- * The packet size to allocate. One page should be enough.
- */
-#define NCP_PACKET_SIZE 4070
-
-#define NCP_MAXPATHLEN 255
-#define NCP_MAXNAMELEN 14
-
-#endif /* _LINUX_NCP_FS_H */
diff --git a/include/uapi/linux/ncp_mount.h b/include/uapi/linux/ncp_mount.h
deleted file mode 100644
index 9bdbcd68c329..000000000000
--- a/include/uapi/linux/ncp_mount.h
+++ /dev/null
@@ -1,72 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-/*
- * ncp_mount.h
- *
- * Copyright (C) 1995, 1996 by Volker Lendecke
- *
- */
-
-#ifndef _LINUX_NCP_MOUNT_H
-#define _LINUX_NCP_MOUNT_H
-
-#include <linux/types.h>
-#include <linux/ncp.h>
-
-#define NCP_MOUNT_VERSION 3 /* Binary */
-
-/* Values for flags */
-#define NCP_MOUNT_SOFT 0x0001
-#define NCP_MOUNT_INTR 0x0002
-#define NCP_MOUNT_STRONG 0x0004 /* enable delete/rename of r/o files */
-#define NCP_MOUNT_NO_OS2 0x0008 /* do not use OS/2 (LONG) namespace */
-#define NCP_MOUNT_NO_NFS 0x0010 /* do not use NFS namespace */
-#define NCP_MOUNT_EXTRAS 0x0020
-#define NCP_MOUNT_SYMLINKS 0x0040 /* enable symlinks */
-#define NCP_MOUNT_NFS_EXTRAS 0x0080 /* Enable use of NFS NS meta-info */
-
-struct ncp_mount_data {
- int version;
- unsigned int ncp_fd; /* The socket to the ncp port */
- __kernel_uid_t mounted_uid; /* Who may umount() this filesystem? */
- __kernel_pid_t wdog_pid; /* Who cares for our watchdog packets? */
-
- unsigned char mounted_vol[NCP_VOLNAME_LEN + 1];
- unsigned int time_out; /* How long should I wait after
- sending a NCP request? */
- unsigned int retry_count; /* And how often should I retry? */
- unsigned int flags;
-
- __kernel_uid_t uid;
- __kernel_gid_t gid;
- __kernel_mode_t file_mode;
- __kernel_mode_t dir_mode;
-};
-
-#define NCP_MOUNT_VERSION_V4 (4) /* Binary or text */
-
-struct ncp_mount_data_v4 {
- int version;
- unsigned long flags; /* NCP_MOUNT_* flags */
- /* MIPS uses long __kernel_uid_t, but... */
- /* we neever pass -1, so it is safe */
- unsigned long mounted_uid; /* Who may umount() this filesystem? */
- /* MIPS uses long __kernel_pid_t */
- long wdog_pid; /* Who cares for our watchdog packets? */
-
- unsigned int ncp_fd; /* The socket to the ncp port */
- unsigned int time_out; /* How long should I wait after
- sending a NCP request? */
- unsigned int retry_count; /* And how often should I retry? */
-
- /* MIPS uses long __kernel_uid_t... */
- /* we never pass -1, so it is safe */
- unsigned long uid;
- unsigned long gid;
- /* MIPS uses unsigned long __kernel_mode_t */
- unsigned long file_mode;
- unsigned long dir_mode;
-};
-
-#define NCP_MOUNT_VERSION_V5 (5) /* Text only */
-
-#endif
diff --git a/include/uapi/linux/ncp_no.h b/include/uapi/linux/ncp_no.h
deleted file mode 100644
index 654d7c7f5d92..000000000000
--- a/include/uapi/linux/ncp_no.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-#ifndef _NCP_NO
-#define _NCP_NO
-
-/* these define the attribute byte as seen by NCP */
-#define aRONLY (__cpu_to_le32(1))
-#define aHIDDEN (__cpu_to_le32(2))
-#define aSYSTEM (__cpu_to_le32(4))
-#define aEXECUTE (__cpu_to_le32(8))
-#define aDIR (__cpu_to_le32(0x10))
-#define aARCH (__cpu_to_le32(0x20))
-#define aSHARED (__cpu_to_le32(0x80))
-#define aDONTSUBALLOCATE (__cpu_to_le32(1L<<11))
-#define aTRANSACTIONAL (__cpu_to_le32(1L<<12))
-#define aPURGE (__cpu_to_le32(1L<<16))
-#define aRENAMEINHIBIT (__cpu_to_le32(1L<<17))
-#define aDELETEINHIBIT (__cpu_to_le32(1L<<18))
-#define aDONTCOMPRESS (__cpu_to_le32(1L<<27))
-
-#endif /* _NCP_NO */
diff --git a/include/uapi/linux/netfilter/nf_conntrack_common.h b/include/uapi/linux/netfilter/nf_conntrack_common.h
index c712eb6879f1..336014bf8868 100644
--- a/include/uapi/linux/netfilter/nf_conntrack_common.h
+++ b/include/uapi/linux/netfilter/nf_conntrack_common.h
@@ -112,7 +112,7 @@ enum ip_conntrack_status {
IPS_EXPECTED | IPS_CONFIRMED | IPS_DYING |
IPS_SEQ_ADJUST | IPS_TEMPLATE | IPS_OFFLOAD),
- __IPS_MAX_BIT = 14,
+ __IPS_MAX_BIT = 15,
};
/* Connection tracking event types */
diff --git a/include/uapi/linux/netfilter/nf_conntrack_tcp.h b/include/uapi/linux/netfilter/nf_conntrack_tcp.h
index 74b91151d494..bcba72def817 100644
--- a/include/uapi/linux/netfilter/nf_conntrack_tcp.h
+++ b/include/uapi/linux/netfilter/nf_conntrack_tcp.h
@@ -46,6 +46,9 @@ enum tcp_conntrack {
/* Marks possibility for expected RFC5961 challenge ACK */
#define IP_CT_EXP_CHALLENGE_ACK 0x40
+/* Simultaneous open initialized */
+#define IP_CT_TCP_SIMULTANEOUS_OPEN 0x80
+
struct nf_ct_tcp_flags {
__u8 flags;
__u8 mask;
diff --git a/include/uapi/linux/netfilter/nf_nat.h b/include/uapi/linux/netfilter/nf_nat.h
index a33000da7229..4a95c0db14d4 100644
--- a/include/uapi/linux/netfilter/nf_nat.h
+++ b/include/uapi/linux/netfilter/nf_nat.h
@@ -10,6 +10,7 @@
#define NF_NAT_RANGE_PROTO_RANDOM (1 << 2)
#define NF_NAT_RANGE_PERSISTENT (1 << 3)
#define NF_NAT_RANGE_PROTO_RANDOM_FULLY (1 << 4)
+#define NF_NAT_RANGE_PROTO_OFFSET (1 << 5)
#define NF_NAT_RANGE_PROTO_RANDOM_ALL \
(NF_NAT_RANGE_PROTO_RANDOM | NF_NAT_RANGE_PROTO_RANDOM_FULLY)
@@ -17,7 +18,7 @@
#define NF_NAT_RANGE_MASK \
(NF_NAT_RANGE_MAP_IPS | NF_NAT_RANGE_PROTO_SPECIFIED | \
NF_NAT_RANGE_PROTO_RANDOM | NF_NAT_RANGE_PERSISTENT | \
- NF_NAT_RANGE_PROTO_RANDOM_FULLY)
+ NF_NAT_RANGE_PROTO_RANDOM_FULLY | NF_NAT_RANGE_PROTO_OFFSET)
struct nf_nat_ipv4_range {
unsigned int flags;
@@ -40,4 +41,13 @@ struct nf_nat_range {
union nf_conntrack_man_proto max_proto;
};
+struct nf_nat_range2 {
+ unsigned int flags;
+ union nf_inet_addr min_addr;
+ union nf_inet_addr max_addr;
+ union nf_conntrack_man_proto min_proto;
+ union nf_conntrack_man_proto max_proto;
+ union nf_conntrack_man_proto base_proto;
+};
+
#endif /* _NETFILTER_NF_NAT_H */
diff --git a/include/uapi/linux/netfilter/nf_osf.h b/include/uapi/linux/netfilter/nf_osf.h
new file mode 100644
index 000000000000..8f2f2f403183
--- /dev/null
+++ b/include/uapi/linux/netfilter/nf_osf.h
@@ -0,0 +1,86 @@
+#ifndef _NF_OSF_H
+#define _NF_OSF_H
+
+#include <linux/types.h>
+
+#define MAXGENRELEN 32
+
+#define NF_OSF_GENRE (1 << 0)
+#define NF_OSF_TTL (1 << 1)
+#define NF_OSF_LOG (1 << 2)
+#define NF_OSF_INVERT (1 << 3)
+
+#define NF_OSF_LOGLEVEL_ALL 0 /* log all matched fingerprints */
+#define NF_OSF_LOGLEVEL_FIRST 1 /* log only the first matced fingerprint */
+#define NF_OSF_LOGLEVEL_ALL_KNOWN 2 /* do not log unknown packets */
+
+#define NF_OSF_TTL_TRUE 0 /* True ip and fingerprint TTL comparison */
+
+/* Do not compare ip and fingerprint TTL at all */
+#define NF_OSF_TTL_NOCHECK 2
+
+/* Wildcard MSS (kind of).
+ * It is used to implement a state machine for the different wildcard values
+ * of the MSS and window sizes.
+ */
+struct nf_osf_wc {
+ __u32 wc;
+ __u32 val;
+};
+
+/* This struct represents IANA options
+ * http://www.iana.org/assignments/tcp-parameters
+ */
+struct nf_osf_opt {
+ __u16 kind, length;
+ struct nf_osf_wc wc;
+};
+
+struct nf_osf_info {
+ char genre[MAXGENRELEN];
+ __u32 len;
+ __u32 flags;
+ __u32 loglevel;
+ __u32 ttl;
+};
+
+struct nf_osf_user_finger {
+ struct nf_osf_wc wss;
+
+ __u8 ttl, df;
+ __u16 ss, mss;
+ __u16 opt_num;
+
+ char genre[MAXGENRELEN];
+ char version[MAXGENRELEN];
+ char subtype[MAXGENRELEN];
+
+ /* MAX_IPOPTLEN is maximum if all options are NOPs or EOLs */
+ struct nf_osf_opt opt[MAX_IPOPTLEN];
+};
+
+struct nf_osf_nlmsg {
+ struct nf_osf_user_finger f;
+ struct iphdr ip;
+ struct tcphdr tcp;
+};
+
+/* Defines for IANA option kinds */
+enum iana_options {
+ OSFOPT_EOL = 0, /* End of options */
+ OSFOPT_NOP, /* NOP */
+ OSFOPT_MSS, /* Maximum segment size */
+ OSFOPT_WSO, /* Window scale option */
+ OSFOPT_SACKP, /* SACK permitted */
+ OSFOPT_SACK, /* SACK */
+ OSFOPT_ECHO,
+ OSFOPT_ECHOREPLY,
+ OSFOPT_TS, /* Timestamp option */
+ OSFOPT_POCP, /* Partial Order Connection Permitted */
+ OSFOPT_POSP, /* Partial Order Service Profile */
+
+ /* Others are not used in the current OSF */
+ OSFOPT_EMPTY = 255,
+};
+
+#endif /* _NF_OSF_H */
diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h
index 6a3d653d5b27..89438e68dc03 100644
--- a/include/uapi/linux/netfilter/nf_tables.h
+++ b/include/uapi/linux/netfilter/nf_tables.h
@@ -266,7 +266,7 @@ enum nft_rule_compat_attributes {
* @NFT_SET_INTERVAL: set contains intervals
* @NFT_SET_MAP: set is used as a dictionary
* @NFT_SET_TIMEOUT: set uses timeouts
- * @NFT_SET_EVAL: set contains expressions for evaluation
+ * @NFT_SET_EVAL: set can be updated from the evaluation path
* @NFT_SET_OBJECT: set contains stateful objects
*/
enum nft_set_flags {
@@ -831,7 +831,9 @@ enum nft_rt_keys {
NFT_RT_NEXTHOP4,
NFT_RT_NEXTHOP6,
NFT_RT_TCPMSS,
+ __NFT_RT_MAX
};
+#define NFT_RT_MAX (__NFT_RT_MAX - 1)
/**
* enum nft_hash_types - nf_tables hash expression types
@@ -854,6 +856,8 @@ enum nft_hash_types {
* @NFTA_HASH_SEED: seed value (NLA_U32)
* @NFTA_HASH_OFFSET: add this offset value to hash result (NLA_U32)
* @NFTA_HASH_TYPE: hash operation (NLA_U32: nft_hash_types)
+ * @NFTA_HASH_SET_NAME: name of the map to lookup (NLA_STRING)
+ * @NFTA_HASH_SET_ID: id of the map (NLA_U32)
*/
enum nft_hash_attributes {
NFTA_HASH_UNSPEC,
@@ -864,6 +868,8 @@ enum nft_hash_attributes {
NFTA_HASH_SEED,
NFTA_HASH_OFFSET,
NFTA_HASH_TYPE,
+ NFTA_HASH_SET_NAME,
+ NFTA_HASH_SET_ID,
__NFTA_HASH_MAX,
};
#define NFTA_HASH_MAX (__NFTA_HASH_MAX - 1)
@@ -899,6 +905,31 @@ enum nft_rt_attributes {
#define NFTA_RT_MAX (__NFTA_RT_MAX - 1)
/**
+ * enum nft_socket_attributes - nf_tables socket expression netlink attributes
+ *
+ * @NFTA_SOCKET_KEY: socket key to match
+ * @NFTA_SOCKET_DREG: destination register
+ */
+enum nft_socket_attributes {
+ NFTA_SOCKET_UNSPEC,
+ NFTA_SOCKET_KEY,
+ NFTA_SOCKET_DREG,
+ __NFTA_SOCKET_MAX
+};
+#define NFTA_SOCKET_MAX (__NFTA_SOCKET_MAX - 1)
+
+/*
+ * enum nft_socket_keys - nf_tables socket expression keys
+ *
+ * @NFT_SOCKET_TRANSPARENT: Value of the IP(V6)_TRANSPARENT socket option_
+ */
+enum nft_socket_keys {
+ NFT_SOCKET_TRANSPARENT,
+ __NFT_SOCKET_MAX
+};
+#define NFT_SOCKET_MAX (__NFT_SOCKET_MAX - 1)
+
+/**
* enum nft_ct_keys - nf_tables ct expression keys
*
* @NFT_CT_STATE: conntrack state (bitmask of enum ip_conntrack_info)
@@ -949,7 +980,9 @@ enum nft_ct_keys {
NFT_CT_DST_IP,
NFT_CT_SRC_IP6,
NFT_CT_DST_IP6,
+ __NFT_CT_MAX
};
+#define NFT_CT_MAX (__NFT_CT_MAX - 1)
/**
* enum nft_ct_attributes - nf_tables ct expression netlink attributes
@@ -1010,6 +1043,24 @@ enum nft_limit_attributes {
};
#define NFTA_LIMIT_MAX (__NFTA_LIMIT_MAX - 1)
+enum nft_connlimit_flags {
+ NFT_CONNLIMIT_F_INV = (1 << 0),
+};
+
+/**
+ * enum nft_connlimit_attributes - nf_tables connlimit expression netlink attributes
+ *
+ * @NFTA_CONNLIMIT_COUNT: number of connections (NLA_U32)
+ * @NFTA_CONNLIMIT_FLAGS: flags (NLA_U32: enum nft_connlimit_flags)
+ */
+enum nft_connlimit_attributes {
+ NFTA_CONNLIMIT_UNSPEC,
+ NFTA_CONNLIMIT_COUNT,
+ NFTA_CONNLIMIT_FLAGS,
+ __NFTA_CONNLIMIT_MAX
+};
+#define NFTA_CONNLIMIT_MAX (__NFTA_CONNLIMIT_MAX - 1)
+
/**
* enum nft_counter_attributes - nf_tables counter expression netlink attributes
*
@@ -1048,6 +1099,33 @@ enum nft_log_attributes {
#define NFTA_LOG_MAX (__NFTA_LOG_MAX - 1)
/**
+ * enum nft_log_level - nf_tables log levels
+ *
+ * @NFT_LOGLEVEL_EMERG: system is unusable
+ * @NFT_LOGLEVEL_ALERT: action must be taken immediately
+ * @NFT_LOGLEVEL_CRIT: critical conditions
+ * @NFT_LOGLEVEL_ERR: error conditions
+ * @NFT_LOGLEVEL_WARNING: warning conditions
+ * @NFT_LOGLEVEL_NOTICE: normal but significant condition
+ * @NFT_LOGLEVEL_INFO: informational
+ * @NFT_LOGLEVEL_DEBUG: debug-level messages
+ * @NFT_LOGLEVEL_AUDIT: enabling audit logging
+ */
+enum nft_log_level {
+ NFT_LOGLEVEL_EMERG,
+ NFT_LOGLEVEL_ALERT,
+ NFT_LOGLEVEL_CRIT,
+ NFT_LOGLEVEL_ERR,
+ NFT_LOGLEVEL_WARNING,
+ NFT_LOGLEVEL_NOTICE,
+ NFT_LOGLEVEL_INFO,
+ NFT_LOGLEVEL_DEBUG,
+ NFT_LOGLEVEL_AUDIT,
+ __NFT_LOGLEVEL_MAX
+};
+#define NFT_LOGLEVEL_MAX (__NFT_LOGLEVEL_MAX + 1)
+
+/**
* enum nft_queue_attributes - nf_tables queue expression netlink attributes
*
* @NFTA_QUEUE_NUM: netlink queue to send messages to (NLA_U16)
@@ -1222,10 +1300,14 @@ enum nft_dup_attributes {
* enum nft_fwd_attributes - nf_tables fwd expression netlink attributes
*
* @NFTA_FWD_SREG_DEV: source register of output interface (NLA_U32: nft_register)
+ * @NFTA_FWD_SREG_ADDR: source register of destination address (NLA_U32: nft_register)
+ * @NFTA_FWD_NFPROTO: layer 3 family of source register address (NLA_U32: enum nfproto)
*/
enum nft_fwd_attributes {
NFTA_FWD_UNSPEC,
NFTA_FWD_SREG_DEV,
+ NFTA_FWD_SREG_ADDR,
+ NFTA_FWD_NFPROTO,
__NFTA_FWD_MAX
};
#define NFTA_FWD_MAX (__NFTA_FWD_MAX - 1)
@@ -1315,7 +1397,8 @@ enum nft_ct_helper_attributes {
#define NFT_OBJECT_QUOTA 2
#define NFT_OBJECT_CT_HELPER 3
#define NFT_OBJECT_LIMIT 4
-#define __NFT_OBJECT_MAX 5
+#define NFT_OBJECT_CONNLIMIT 5
+#define __NFT_OBJECT_MAX 6
#define NFT_OBJECT_MAX (__NFT_OBJECT_MAX - 1)
/**
@@ -1450,6 +1533,8 @@ enum nft_trace_types {
* @NFTA_NG_MODULUS: maximum counter value (NLA_U32)
* @NFTA_NG_TYPE: operation type (NLA_U32)
* @NFTA_NG_OFFSET: offset to be added to the counter (NLA_U32)
+ * @NFTA_NG_SET_NAME: name of the map to lookup (NLA_STRING)
+ * @NFTA_NG_SET_ID: id of the map (NLA_U32)
*/
enum nft_ng_attributes {
NFTA_NG_UNSPEC,
@@ -1457,6 +1542,8 @@ enum nft_ng_attributes {
NFTA_NG_MODULUS,
NFTA_NG_TYPE,
NFTA_NG_OFFSET,
+ NFTA_NG_SET_NAME,
+ NFTA_NG_SET_ID,
__NFTA_NG_MAX
};
#define NFTA_NG_MAX (__NFTA_NG_MAX - 1)
diff --git a/include/uapi/linux/netfilter/nfnetlink_conntrack.h b/include/uapi/linux/netfilter/nfnetlink_conntrack.h
index 77987111cab0..1d41810d17e2 100644
--- a/include/uapi/linux/netfilter/nfnetlink_conntrack.h
+++ b/include/uapi/linux/netfilter/nfnetlink_conntrack.h
@@ -262,6 +262,7 @@ enum ctattr_stats_cpu {
enum ctattr_stats_global {
CTA_STATS_GLOBAL_UNSPEC,
CTA_STATS_GLOBAL_ENTRIES,
+ CTA_STATS_GLOBAL_MAX_ENTRIES,
__CTA_STATS_GLOBAL_MAX,
};
#define CTA_STATS_GLOBAL_MAX (__CTA_STATS_GLOBAL_MAX - 1)
diff --git a/include/uapi/linux/netfilter/xt_osf.h b/include/uapi/linux/netfilter/xt_osf.h
index dad197e2ab99..72956eceeb09 100644
--- a/include/uapi/linux/netfilter/xt_osf.h
+++ b/include/uapi/linux/netfilter/xt_osf.h
@@ -23,101 +23,29 @@
#include <linux/types.h>
#include <linux/ip.h>
#include <linux/tcp.h>
+#include <linux/netfilter/nf_osf.h>
-#define MAXGENRELEN 32
+#define XT_OSF_GENRE NF_OSF_GENRE
+#define XT_OSF_INVERT NF_OSF_INVERT
-#define XT_OSF_GENRE (1<<0)
-#define XT_OSF_TTL (1<<1)
-#define XT_OSF_LOG (1<<2)
-#define XT_OSF_INVERT (1<<3)
+#define XT_OSF_TTL NF_OSF_TTL
+#define XT_OSF_LOG NF_OSF_LOG
-#define XT_OSF_LOGLEVEL_ALL 0 /* log all matched fingerprints */
-#define XT_OSF_LOGLEVEL_FIRST 1 /* log only the first matced fingerprint */
-#define XT_OSF_LOGLEVEL_ALL_KNOWN 2 /* do not log unknown packets */
+#define XT_OSF_LOGLEVEL_ALL NF_OSF_LOGLEVEL_ALL
+#define XT_OSF_LOGLEVEL_FIRST NF_OSF_LOGLEVEL_FIRST
+#define XT_OSF_LOGLEVEL_ALL_KNOWN NF_OSF_LOGLEVEL_ALL_KNOWN
-#define XT_OSF_TTL_TRUE 0 /* True ip and fingerprint TTL comparison */
-#define XT_OSF_TTL_LESS 1 /* Check if ip TTL is less than fingerprint one */
-#define XT_OSF_TTL_NOCHECK 2 /* Do not compare ip and fingerprint TTL at all */
+#define XT_OSF_TTL_TRUE NF_OSF_TTL_TRUE
+#define XT_OSF_TTL_NOCHECK NF_OSF_TTL_NOCHECK
-struct xt_osf_info {
- char genre[MAXGENRELEN];
- __u32 len;
- __u32 flags;
- __u32 loglevel;
- __u32 ttl;
-};
-
-/*
- * Wildcard MSS (kind of).
- * It is used to implement a state machine for the different wildcard values
- * of the MSS and window sizes.
- */
-struct xt_osf_wc {
- __u32 wc;
- __u32 val;
-};
-
-/*
- * This struct represents IANA options
- * http://www.iana.org/assignments/tcp-parameters
- */
-struct xt_osf_opt {
- __u16 kind, length;
- struct xt_osf_wc wc;
-};
-
-struct xt_osf_user_finger {
- struct xt_osf_wc wss;
-
- __u8 ttl, df;
- __u16 ss, mss;
- __u16 opt_num;
-
- char genre[MAXGENRELEN];
- char version[MAXGENRELEN];
- char subtype[MAXGENRELEN];
+#define XT_OSF_TTL_LESS 1 /* Check if ip TTL is less than fingerprint one */
- /* MAX_IPOPTLEN is maximum if all options are NOPs or EOLs */
- struct xt_osf_opt opt[MAX_IPOPTLEN];
-};
-
-struct xt_osf_nlmsg {
- struct xt_osf_user_finger f;
- struct iphdr ip;
- struct tcphdr tcp;
-};
-
-/* Defines for IANA option kinds */
-
-enum iana_options {
- OSFOPT_EOL = 0, /* End of options */
- OSFOPT_NOP, /* NOP */
- OSFOPT_MSS, /* Maximum segment size */
- OSFOPT_WSO, /* Window scale option */
- OSFOPT_SACKP, /* SACK permitted */
- OSFOPT_SACK, /* SACK */
- OSFOPT_ECHO,
- OSFOPT_ECHOREPLY,
- OSFOPT_TS, /* Timestamp option */
- OSFOPT_POCP, /* Partial Order Connection Permitted */
- OSFOPT_POSP, /* Partial Order Service Profile */
-
- /* Others are not used in the current OSF */
- OSFOPT_EMPTY = 255,
-};
-
-/*
- * Initial window size option state machine: multiple of mss, mtu or
- * plain numeric value. Can also be made as plain numeric value which
- * is not a multiple of specified value.
- */
-enum xt_osf_window_size_options {
- OSF_WSS_PLAIN = 0,
- OSF_WSS_MSS,
- OSF_WSS_MTU,
- OSF_WSS_MODULO,
- OSF_WSS_MAX,
-};
+#define xt_osf_wc nf_osf_wc
+#define xt_osf_opt nf_osf_opt
+#define xt_osf_info nf_osf_info
+#define xt_osf_user_finger nf_osf_user_finger
+#define xt_osf_finger nf_osf_finger
+#define xt_osf_nlmsg nf_osf_nlmsg
/*
* Add/remove fingerprint from the kernel.
diff --git a/include/uapi/linux/netfilter_bridge/ebtables.h b/include/uapi/linux/netfilter_bridge/ebtables.h
index 0c7dc8315013..3b86c14ea49d 100644
--- a/include/uapi/linux/netfilter_bridge/ebtables.h
+++ b/include/uapi/linux/netfilter_bridge/ebtables.h
@@ -191,6 +191,12 @@ struct ebt_entry {
unsigned char elems[0] __attribute__ ((aligned (__alignof__(struct ebt_replace))));
};
+static __inline__ struct ebt_entry_target *
+ebt_get_target(struct ebt_entry *e)
+{
+ return (void *)e + e->target_offset;
+}
+
/* {g,s}etsockopt numbers */
#define EBT_BASE_CTL 128
diff --git a/include/uapi/linux/netfilter_ipv6/ip6t_srh.h b/include/uapi/linux/netfilter_ipv6/ip6t_srh.h
index f3cc0ef514a7..54ed83360dac 100644
--- a/include/uapi/linux/netfilter_ipv6/ip6t_srh.h
+++ b/include/uapi/linux/netfilter_ipv6/ip6t_srh.h
@@ -17,7 +17,10 @@
#define IP6T_SRH_LAST_GT 0x0100
#define IP6T_SRH_LAST_LT 0x0200
#define IP6T_SRH_TAG 0x0400
-#define IP6T_SRH_MASK 0x07FF
+#define IP6T_SRH_PSID 0x0800
+#define IP6T_SRH_NSID 0x1000
+#define IP6T_SRH_LSID 0x2000
+#define IP6T_SRH_MASK 0x3FFF
/* Values for "mt_invflags" field in struct ip6t_srh */
#define IP6T_SRH_INV_NEXTHDR 0x0001
@@ -31,7 +34,10 @@
#define IP6T_SRH_INV_LAST_GT 0x0100
#define IP6T_SRH_INV_LAST_LT 0x0200
#define IP6T_SRH_INV_TAG 0x0400
-#define IP6T_SRH_INV_MASK 0x07FF
+#define IP6T_SRH_INV_PSID 0x0800
+#define IP6T_SRH_INV_NSID 0x1000
+#define IP6T_SRH_INV_LSID 0x2000
+#define IP6T_SRH_INV_MASK 0x3FFF
/**
* struct ip6t_srh - SRH match options
@@ -54,4 +60,37 @@ struct ip6t_srh {
__u16 mt_invflags;
};
+/**
+ * struct ip6t_srh1 - SRH match options (revision 1)
+ * @ next_hdr: Next header field of SRH
+ * @ hdr_len: Extension header length field of SRH
+ * @ segs_left: Segments left field of SRH
+ * @ last_entry: Last entry field of SRH
+ * @ tag: Tag field of SRH
+ * @ psid_addr: Address of previous SID in SRH SID list
+ * @ nsid_addr: Address of NEXT SID in SRH SID list
+ * @ lsid_addr: Address of LAST SID in SRH SID list
+ * @ psid_msk: Mask of previous SID in SRH SID list
+ * @ nsid_msk: Mask of next SID in SRH SID list
+ * @ lsid_msk: MAsk of last SID in SRH SID list
+ * @ mt_flags: match options
+ * @ mt_invflags: Invert the sense of match options
+ */
+
+struct ip6t_srh1 {
+ __u8 next_hdr;
+ __u8 hdr_len;
+ __u8 segs_left;
+ __u8 last_entry;
+ __u16 tag;
+ struct in6_addr psid_addr;
+ struct in6_addr nsid_addr;
+ struct in6_addr lsid_addr;
+ struct in6_addr psid_msk;
+ struct in6_addr nsid_msk;
+ struct in6_addr lsid_msk;
+ __u16 mt_flags;
+ __u16 mt_invflags;
+};
+
#endif /*_IP6T_SRH_H*/
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
index 15daf5e2638d..27e4e441caac 100644
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@ -11,6 +11,7 @@
* Copyright 2008 Jouni Malinen <jouni.malinen@atheros.com>
* Copyright 2008 Colin McCabe <colin@cozybit.com>
* Copyright 2015-2017 Intel Deutschland GmbH
+ * Copyright (C) 2018 Intel Corporation
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -203,7 +204,8 @@
* FILS shared key authentication offload should be able to construct the
* authentication and association frames for FILS shared key authentication and
* eventually do a key derivation as per IEEE 802.11ai. The below additional
- * parameters should be given to driver in %NL80211_CMD_CONNECT.
+ * parameters should be given to driver in %NL80211_CMD_CONNECT and/or in
+ * %NL80211_CMD_UPDATE_CONNECT_PARAMS.
* %NL80211_ATTR_FILS_ERP_USERNAME - used to construct keyname_nai
* %NL80211_ATTR_FILS_ERP_REALM - used to construct keyname_nai
* %NL80211_ATTR_FILS_ERP_NEXT_SEQ_NUM - used to construct erp message
@@ -214,7 +216,8 @@
* as specified in IETF RFC 6696.
*
* When FILS shared key authentication is completed, driver needs to provide the
- * below additional parameters to userspace.
+ * below additional parameters to userspace, which can be either after setting
+ * up a connection or after roaming.
* %NL80211_ATTR_FILS_KEK - used for key renewal
* %NL80211_ATTR_FILS_ERP_NEXT_SEQ_NUM - used in further EAP-RP exchanges
* %NL80211_ATTR_PMKID - used to identify the PMKSA used/generated
@@ -978,18 +981,18 @@
* only the %NL80211_ATTR_IE data is used and updated with this command.
*
* @NL80211_CMD_SET_PMK: For offloaded 4-Way handshake, set the PMK or PMK-R0
- * for the given authenticator address (specified with &NL80211_ATTR_MAC).
- * When &NL80211_ATTR_PMKR0_NAME is set, &NL80211_ATTR_PMK specifies the
+ * for the given authenticator address (specified with %NL80211_ATTR_MAC).
+ * When %NL80211_ATTR_PMKR0_NAME is set, %NL80211_ATTR_PMK specifies the
* PMK-R0, otherwise it specifies the PMK.
* @NL80211_CMD_DEL_PMK: For offloaded 4-Way handshake, delete the previously
* configured PMK for the authenticator address identified by
- * &NL80211_ATTR_MAC.
+ * %NL80211_ATTR_MAC.
* @NL80211_CMD_PORT_AUTHORIZED: An event that indicates that the 4 way
* handshake was completed successfully by the driver. The BSSID is
- * specified with &NL80211_ATTR_MAC. Drivers that support 4 way handshake
+ * specified with %NL80211_ATTR_MAC. Drivers that support 4 way handshake
* offload should send this event after indicating 802.11 association with
- * &NL80211_CMD_CONNECT or &NL80211_CMD_ROAM. If the 4 way handshake failed
- * &NL80211_CMD_DISCONNECT should be indicated instead.
+ * %NL80211_CMD_CONNECT or %NL80211_CMD_ROAM. If the 4 way handshake failed
+ * %NL80211_CMD_DISCONNECT should be indicated instead.
*
* @NL80211_CMD_CONTROL_PORT_FRAME: Control Port (e.g. PAE) frame TX request
* and RX notification. This command is used both as a request to transmit
@@ -1026,9 +1029,9 @@
* initiated the connection through the connect request.
*
* @NL80211_CMD_STA_OPMODE_CHANGED: An event that notify station's
- * ht opmode or vht opmode changes using any of &NL80211_ATTR_SMPS_MODE,
- * &NL80211_ATTR_CHANNEL_WIDTH,&NL80211_ATTR_NSS attributes with its
- * address(specified in &NL80211_ATTR_MAC).
+ * ht opmode or vht opmode changes using any of %NL80211_ATTR_SMPS_MODE,
+ * %NL80211_ATTR_CHANNEL_WIDTH,%NL80211_ATTR_NSS attributes with its
+ * address(specified in %NL80211_ATTR_MAC).
*
* @NL80211_CMD_MAX: highest used command number
* @__NL80211_CMD_AFTER_LAST: internal use
@@ -2215,7 +2218,7 @@ enum nl80211_commands {
* @NL80211_ATTR_EXTERNAL_AUTH_ACTION: Identify the requested external
* authentication operation (u32 attribute with an
* &enum nl80211_external_auth_action value). This is used with the
- * &NL80211_CMD_EXTERNAL_AUTH request event.
+ * %NL80211_CMD_EXTERNAL_AUTH request event.
* @NL80211_ATTR_EXTERNAL_AUTH_SUPPORT: Flag attribute indicating that the user
* space supports external authentication. This attribute shall be used
* only with %NL80211_CMD_CONNECT request. The driver may offload
@@ -2225,6 +2228,16 @@ enum nl80211_commands {
* @NL80211_ATTR_NSS: Station's New/updated RX_NSS value notified using this
* u8 attribute. This is used with %NL80211_CMD_STA_OPMODE_CHANGED.
*
+ * @NL80211_ATTR_TXQ_STATS: TXQ statistics (nested attribute, see &enum
+ * nl80211_txq_stats)
+ * @NL80211_ATTR_TXQ_LIMIT: Total packet limit for the TXQ queues for this phy.
+ * The smaller of this and the memory limit is enforced.
+ * @NL80211_ATTR_TXQ_MEMORY_LIMIT: Total memory memory limit (in bytes) for the
+ * TXQ queues for this phy. The smaller of this and the packet limit is
+ * enforced.
+ * @NL80211_ATTR_TXQ_QUANTUM: TXQ scheduler quantum (bytes). Number of bytes
+ * a flow is assigned on each round of the DRR scheduler.
+ *
* @NUM_NL80211_ATTR: total number of nl80211_attrs available
* @NL80211_ATTR_MAX: highest attribute number currently defined
* @__NL80211_ATTR_AFTER_LAST: internal use
@@ -2659,6 +2672,11 @@ enum nl80211_attrs {
NL80211_ATTR_CONTROL_PORT_OVER_NL80211,
+ NL80211_ATTR_TXQ_STATS,
+ NL80211_ATTR_TXQ_LIMIT,
+ NL80211_ATTR_TXQ_MEMORY_LIMIT,
+ NL80211_ATTR_TXQ_QUANTUM,
+
/* add attributes here, update the policy in nl80211.c */
__NL80211_ATTR_AFTER_LAST,
@@ -2698,6 +2716,8 @@ enum nl80211_attrs {
#define NL80211_ATTR_KEYS NL80211_ATTR_KEYS
#define NL80211_ATTR_FEATURE_FLAGS NL80211_ATTR_FEATURE_FLAGS
+#define NL80211_WIPHY_NAME_MAXLEN 64
+
#define NL80211_MAX_SUPP_RATES 32
#define NL80211_MAX_SUPP_HT_RATES 77
#define NL80211_MAX_SUPP_REG_RULES 64
@@ -2980,6 +3000,8 @@ enum nl80211_sta_bss_param {
* received from the station (u64, usec)
* @NL80211_STA_INFO_PAD: attribute used for padding for 64-bit alignment
* @NL80211_STA_INFO_ACK_SIGNAL: signal strength of the last ACK frame(u8, dBm)
+ * @NL80211_STA_INFO_DATA_ACK_SIGNAL_AVG: avg signal strength of (data)
+ * ACK frame (s8, dBm)
* @__NL80211_STA_INFO_AFTER_LAST: internal
* @NL80211_STA_INFO_MAX: highest possible station info attribute
*/
@@ -3019,6 +3041,7 @@ enum nl80211_sta_info {
NL80211_STA_INFO_RX_DURATION,
NL80211_STA_INFO_PAD,
NL80211_STA_INFO_ACK_SIGNAL,
+ NL80211_STA_INFO_DATA_ACK_SIGNAL_AVG,
/* keep last */
__NL80211_STA_INFO_AFTER_LAST,
@@ -3036,6 +3059,7 @@ enum nl80211_sta_info {
* @NL80211_TID_STATS_TX_MSDU_FAILED: number of failed transmitted
* MSDUs (u64)
* @NL80211_TID_STATS_PAD: attribute used for padding for 64-bit alignment
+ * @NL80211_TID_STATS_TXQ_STATS: TXQ stats (nested attribute)
* @NUM_NL80211_TID_STATS: number of attributes here
* @NL80211_TID_STATS_MAX: highest numbered attribute here
*/
@@ -3046,6 +3070,7 @@ enum nl80211_tid_stats {
NL80211_TID_STATS_TX_MSDU_RETRIES,
NL80211_TID_STATS_TX_MSDU_FAILED,
NL80211_TID_STATS_PAD,
+ NL80211_TID_STATS_TXQ_STATS,
/* keep last */
NUM_NL80211_TID_STATS,
@@ -3053,6 +3078,44 @@ enum nl80211_tid_stats {
};
/**
+ * enum nl80211_txq_stats - per TXQ statistics attributes
+ * @__NL80211_TXQ_STATS_INVALID: attribute number 0 is reserved
+ * @NUM_NL80211_TXQ_STATS: number of attributes here
+ * @NL80211_TXQ_STATS_BACKLOG_BYTES: number of bytes currently backlogged
+ * @NL80211_TXQ_STATS_BACKLOG_PACKETS: number of packets currently
+ * backlogged
+ * @NL80211_TXQ_STATS_FLOWS: total number of new flows seen
+ * @NL80211_TXQ_STATS_DROPS: total number of packet drops
+ * @NL80211_TXQ_STATS_ECN_MARKS: total number of packet ECN marks
+ * @NL80211_TXQ_STATS_OVERLIMIT: number of drops due to queue space overflow
+ * @NL80211_TXQ_STATS_OVERMEMORY: number of drops due to memory limit overflow
+ * (only for per-phy stats)
+ * @NL80211_TXQ_STATS_COLLISIONS: number of hash collisions
+ * @NL80211_TXQ_STATS_TX_BYTES: total number of bytes dequeued from TXQ
+ * @NL80211_TXQ_STATS_TX_PACKETS: total number of packets dequeued from TXQ
+ * @NL80211_TXQ_STATS_MAX_FLOWS: number of flow buckets for PHY
+ * @NL80211_TXQ_STATS_MAX: highest numbered attribute here
+ */
+enum nl80211_txq_stats {
+ __NL80211_TXQ_STATS_INVALID,
+ NL80211_TXQ_STATS_BACKLOG_BYTES,
+ NL80211_TXQ_STATS_BACKLOG_PACKETS,
+ NL80211_TXQ_STATS_FLOWS,
+ NL80211_TXQ_STATS_DROPS,
+ NL80211_TXQ_STATS_ECN_MARKS,
+ NL80211_TXQ_STATS_OVERLIMIT,
+ NL80211_TXQ_STATS_OVERMEMORY,
+ NL80211_TXQ_STATS_COLLISIONS,
+ NL80211_TXQ_STATS_TX_BYTES,
+ NL80211_TXQ_STATS_TX_PACKETS,
+ NL80211_TXQ_STATS_MAX_FLOWS,
+
+ /* keep last */
+ NUM_NL80211_TXQ_STATS,
+ NL80211_TXQ_STATS_MAX = NUM_NL80211_TXQ_STATS - 1
+};
+
+/**
* enum nl80211_mpath_flags - nl80211 mesh path flags
*
* @NL80211_MPATH_FLAG_ACTIVE: the mesh path is active
@@ -3142,6 +3205,29 @@ enum nl80211_band_attr {
#define NL80211_BAND_ATTR_HT_CAPA NL80211_BAND_ATTR_HT_CAPA
/**
+ * enum nl80211_wmm_rule - regulatory wmm rule
+ *
+ * @__NL80211_WMMR_INVALID: attribute number 0 is reserved
+ * @NL80211_WMMR_CW_MIN: Minimum contention window slot.
+ * @NL80211_WMMR_CW_MAX: Maximum contention window slot.
+ * @NL80211_WMMR_AIFSN: Arbitration Inter Frame Space.
+ * @NL80211_WMMR_TXOP: Maximum allowed tx operation time.
+ * @nl80211_WMMR_MAX: highest possible wmm rule.
+ * @__NL80211_WMMR_LAST: Internal use.
+ */
+enum nl80211_wmm_rule {
+ __NL80211_WMMR_INVALID,
+ NL80211_WMMR_CW_MIN,
+ NL80211_WMMR_CW_MAX,
+ NL80211_WMMR_AIFSN,
+ NL80211_WMMR_TXOP,
+
+ /* keep last */
+ __NL80211_WMMR_LAST,
+ NL80211_WMMR_MAX = __NL80211_WMMR_LAST - 1
+};
+
+/**
* enum nl80211_frequency_attr - frequency attributes
* @__NL80211_FREQUENCY_ATTR_INVALID: attribute number 0 is reserved
* @NL80211_FREQUENCY_ATTR_FREQ: Frequency in MHz
@@ -3190,6 +3276,9 @@ enum nl80211_band_attr {
* on this channel in current regulatory domain.
* @NL80211_FREQUENCY_ATTR_NO_10MHZ: 10 MHz operation is not allowed
* on this channel in current regulatory domain.
+ * @NL80211_FREQUENCY_ATTR_WMM: this channel has wmm limitations.
+ * This is a nested attribute that contains the wmm limitation per AC.
+ * (see &enum nl80211_wmm_rule)
* @NL80211_FREQUENCY_ATTR_MAX: highest frequency attribute number
* currently defined
* @__NL80211_FREQUENCY_ATTR_AFTER_LAST: internal use
@@ -3218,6 +3307,7 @@ enum nl80211_frequency_attr {
NL80211_FREQUENCY_ATTR_IR_CONCURRENT,
NL80211_FREQUENCY_ATTR_NO_20MHZ,
NL80211_FREQUENCY_ATTR_NO_10MHZ,
+ NL80211_FREQUENCY_ATTR_WMM,
/* keep last */
__NL80211_FREQUENCY_ATTR_AFTER_LAST,
@@ -3401,7 +3491,7 @@ enum nl80211_sched_scan_match_attr {
* @NL80211_RRF_AUTO_BW: maximum available bandwidth should be calculated
* base on contiguous rules and wider channels will be allowed to cross
* multiple contiguous/overlapping frequency ranges.
- * @NL80211_RRF_IR_CONCURRENT: See &NL80211_FREQUENCY_ATTR_IR_CONCURRENT
+ * @NL80211_RRF_IR_CONCURRENT: See %NL80211_FREQUENCY_ATTR_IR_CONCURRENT
* @NL80211_RRF_NO_HT40MINUS: channels can't be used in HT40- operation
* @NL80211_RRF_NO_HT40PLUS: channels can't be used in HT40+ operation
* @NL80211_RRF_NO_80MHZ: 80MHz operation not allowed
@@ -5038,6 +5128,11 @@ enum nl80211_feature_flags {
* "radar detected" event.
* @NL80211_EXT_FEATURE_CONTROL_PORT_OVER_NL80211: Driver supports sending and
* receiving control port frames over nl80211 instead of the netdevice.
+ * @NL80211_EXT_FEATURE_DATA_ACK_SIGNAL_SUPPORT: This Driver support data ack
+ * rssi if firmware support, this flag is to intimate about ack rssi
+ * support to nl80211.
+ * @NL80211_EXT_FEATURE_TXQS: Driver supports FQ-CoDel-enabled intermediate
+ * TXQs.
*
* @NUM_NL80211_EXT_FEATURES: number of extended features.
* @MAX_NL80211_EXT_FEATURES: highest extended feature index.
@@ -5070,6 +5165,8 @@ enum nl80211_ext_feature_index {
NL80211_EXT_FEATURE_HIGH_ACCURACY_SCAN,
NL80211_EXT_FEATURE_DFS_OFFLOAD,
NL80211_EXT_FEATURE_CONTROL_PORT_OVER_NL80211,
+ NL80211_EXT_FEATURE_DATA_ACK_SIGNAL_SUPPORT,
+ NL80211_EXT_FEATURE_TXQS,
/* add new features before the definition below */
NUM_NL80211_EXT_FEATURES,
@@ -5546,11 +5643,11 @@ enum nl80211_nan_func_attributes {
* @NL80211_NAN_SRF_INCLUDE: present if the include bit of the SRF set.
* This is a flag.
* @NL80211_NAN_SRF_BF: Bloom Filter. Present if and only if
- * &NL80211_NAN_SRF_MAC_ADDRS isn't present. This attribute is binary.
+ * %NL80211_NAN_SRF_MAC_ADDRS isn't present. This attribute is binary.
* @NL80211_NAN_SRF_BF_IDX: index of the Bloom Filter. Mandatory if
- * &NL80211_NAN_SRF_BF is present. This is a u8.
+ * %NL80211_NAN_SRF_BF is present. This is a u8.
* @NL80211_NAN_SRF_MAC_ADDRS: list of MAC addresses for the SRF. Present if
- * and only if &NL80211_NAN_SRF_BF isn't present. This is a nested
+ * and only if %NL80211_NAN_SRF_BF isn't present. This is a nested
* attribute. Each nested attribute is a MAC address.
* @NUM_NL80211_NAN_SRF_ATTR: internal
* @NL80211_NAN_SRF_ATTR_MAX: highest NAN SRF attribute
diff --git a/include/uapi/linux/omap3isp.h b/include/uapi/linux/omap3isp.h
index 1a920145db04..87b55755f4ff 100644
--- a/include/uapi/linux/omap3isp.h
+++ b/include/uapi/linux/omap3isp.h
@@ -55,6 +55,8 @@
_IOWR('V', BASE_VIDIOC_PRIVATE + 5, struct omap3isp_h3a_af_config)
#define VIDIOC_OMAP3ISP_STAT_REQ \
_IOWR('V', BASE_VIDIOC_PRIVATE + 6, struct omap3isp_stat_data)
+#define VIDIOC_OMAP3ISP_STAT_REQ_TIME32 \
+ _IOWR('V', BASE_VIDIOC_PRIVATE + 6, struct omap3isp_stat_data_time32)
#define VIDIOC_OMAP3ISP_STAT_EN \
_IOWR('V', BASE_VIDIOC_PRIVATE + 7, unsigned long)
@@ -165,7 +167,14 @@ struct omap3isp_h3a_aewb_config {
* @config_counter: Number of the configuration associated with the data.
*/
struct omap3isp_stat_data {
+#ifdef __KERNEL__
+ struct {
+ __s64 tv_sec;
+ __s64 tv_usec;
+ } ts;
+#else
struct timeval ts;
+#endif
void __user *buf;
__u32 buf_size;
__u16 frame_number;
@@ -173,6 +182,19 @@ struct omap3isp_stat_data {
__u16 config_counter;
};
+#ifdef __KERNEL__
+struct omap3isp_stat_data_time32 {
+ struct {
+ __s32 tv_sec;
+ __s32 tv_usec;
+ } ts;
+ __u32 buf;
+ __u32 buf_size;
+ __u16 frame_number;
+ __u16 cur_frame;
+ __u16 config_counter;
+};
+#endif
/* Histogram related structs */
diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h
index 713e56ce681f..863aabaa5cc9 100644
--- a/include/uapi/linux/openvswitch.h
+++ b/include/uapi/linux/openvswitch.h
@@ -937,4 +937,32 @@ enum ovs_meter_band_type {
#define OVS_METER_BAND_TYPE_MAX (__OVS_METER_BAND_TYPE_MAX - 1)
+/* Conntrack limit */
+#define OVS_CT_LIMIT_FAMILY "ovs_ct_limit"
+#define OVS_CT_LIMIT_MCGROUP "ovs_ct_limit"
+#define OVS_CT_LIMIT_VERSION 0x1
+
+enum ovs_ct_limit_cmd {
+ OVS_CT_LIMIT_CMD_UNSPEC,
+ OVS_CT_LIMIT_CMD_SET, /* Add or modify ct limit. */
+ OVS_CT_LIMIT_CMD_DEL, /* Delete ct limit. */
+ OVS_CT_LIMIT_CMD_GET /* Get ct limit. */
+};
+
+enum ovs_ct_limit_attr {
+ OVS_CT_LIMIT_ATTR_UNSPEC,
+ OVS_CT_LIMIT_ATTR_ZONE_LIMIT, /* Nested struct ovs_zone_limit. */
+ __OVS_CT_LIMIT_ATTR_MAX
+};
+
+#define OVS_CT_LIMIT_ATTR_MAX (__OVS_CT_LIMIT_ATTR_MAX - 1)
+
+#define OVS_ZONE_LIMIT_DEFAULT_ZONE -1
+
+struct ovs_zone_limit {
+ int zone_id;
+ __u32 limit;
+ __u32 count;
+};
+
#endif /* _LINUX_OPENVSWITCH_H */
diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h
index 103ba797a8f3..4da87e2ef8a8 100644
--- a/include/uapi/linux/pci_regs.h
+++ b/include/uapi/linux/pci_regs.h
@@ -506,6 +506,8 @@
#define PCI_EXP_DEVCTL_READRQ_256B 0x1000 /* 256 Bytes */
#define PCI_EXP_DEVCTL_READRQ_512B 0x2000 /* 512 Bytes */
#define PCI_EXP_DEVCTL_READRQ_1024B 0x3000 /* 1024 Bytes */
+#define PCI_EXP_DEVCTL_READRQ_2048B 0x4000 /* 2048 Bytes */
+#define PCI_EXP_DEVCTL_READRQ_4096B 0x5000 /* 4096 Bytes */
#define PCI_EXP_DEVCTL_BCR_FLR 0x8000 /* Bridge Configuration Retry / FLR */
#define PCI_EXP_DEVSTA 10 /* Device Status */
#define PCI_EXP_DEVSTA_CED 0x0001 /* Correctable Error Detected */
@@ -655,6 +657,11 @@
#define PCI_EXP_LNKCAP2_SLS_16_0GB 0x00000010 /* Supported Speed 16GT/s */
#define PCI_EXP_LNKCAP2_CROSSLINK 0x00000100 /* Crosslink supported */
#define PCI_EXP_LNKCTL2 48 /* Link Control 2 */
+#define PCI_EXP_LNKCTL2_TLS 0x000f
+#define PCI_EXP_LNKCTL2_TLS_2_5GT 0x0001 /* Supported Speed 2.5GT/s */
+#define PCI_EXP_LNKCTL2_TLS_5_0GT 0x0002 /* Supported Speed 5GT/s */
+#define PCI_EXP_LNKCTL2_TLS_8_0GT 0x0003 /* Supported Speed 8GT/s */
+#define PCI_EXP_LNKCTL2_TLS_16_0GT 0x0004 /* Supported Speed 16GT/s */
#define PCI_EXP_LNKSTA2 50 /* Link Status 2 */
#define PCI_CAP_EXP_ENDPOINT_SIZEOF_V2 52 /* v2 endpoints with link end here */
#define PCI_EXP_SLTCAP2 52 /* Slot Capabilities 2 */
@@ -981,6 +988,7 @@
#define PCI_EXP_DPC_CAP_DL_ACTIVE 0x1000 /* ERR_COR signal on DL_Active supported */
#define PCI_EXP_DPC_CTL 6 /* DPC control */
+#define PCI_EXP_DPC_CTL_EN_FATAL 0x0001 /* Enable trigger on ERR_FATAL message */
#define PCI_EXP_DPC_CTL_EN_NONFATAL 0x0002 /* Enable trigger on ERR_NONFATAL message */
#define PCI_EXP_DPC_CTL_INT_EN 0x0008 /* DPC Interrupt Enable */
diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
index 912b85b52344..b8e288a1f740 100644
--- a/include/uapi/linux/perf_event.h
+++ b/include/uapi/linux/perf_event.h
@@ -650,11 +650,23 @@ struct perf_event_mmap_page {
#define PERF_RECORD_MISC_COMM_EXEC (1 << 13)
#define PERF_RECORD_MISC_SWITCH_OUT (1 << 13)
/*
- * Indicates that the content of PERF_SAMPLE_IP points to
- * the actual instruction that triggered the event. See also
- * perf_event_attr::precise_ip.
+ * These PERF_RECORD_MISC_* flags below are safely reused
+ * for the following events:
+ *
+ * PERF_RECORD_MISC_EXACT_IP - PERF_RECORD_SAMPLE of precise events
+ * PERF_RECORD_MISC_SWITCH_OUT_PREEMPT - PERF_RECORD_SWITCH* events
+ *
+ *
+ * PERF_RECORD_MISC_EXACT_IP:
+ * Indicates that the content of PERF_SAMPLE_IP points to
+ * the actual instruction that triggered the event. See also
+ * perf_event_attr::precise_ip.
+ *
+ * PERF_RECORD_MISC_SWITCH_OUT_PREEMPT:
+ * Indicates that thread was preempted in TASK_RUNNING state.
*/
#define PERF_RECORD_MISC_EXACT_IP (1 << 14)
+#define PERF_RECORD_MISC_SWITCH_OUT_PREEMPT (1 << 14)
/*
* Reserve the last bit to indicate some extended misc field
*/
diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h
index be05e66c167b..84e4c1d0f874 100644
--- a/include/uapi/linux/pkt_cls.h
+++ b/include/uapi/linux/pkt_cls.h
@@ -129,6 +129,7 @@ enum {
#define TCA_CLS_FLAGS_SKIP_SW (1 << 1) /* don't use filter in SW */
#define TCA_CLS_FLAGS_IN_HW (1 << 2) /* filter is offloaded to HW */
#define TCA_CLS_FLAGS_NOT_IN_HW (1 << 3) /* filter isn't offloaded to HW */
+#define TCA_CLS_FLAGS_VERBOSE (1 << 4) /* verbose logging */
/* U32 filters */
diff --git a/include/uapi/linux/ppp-ioctl.h b/include/uapi/linux/ppp-ioctl.h
index b19a9c249b15..784c2e3e572e 100644
--- a/include/uapi/linux/ppp-ioctl.h
+++ b/include/uapi/linux/ppp-ioctl.h
@@ -106,7 +106,7 @@ struct pppol2tp_ioc_stats {
#define PPPIOCGIDLE _IOR('t', 63, struct ppp_idle) /* get idle time */
#define PPPIOCNEWUNIT _IOWR('t', 62, int) /* create new ppp unit */
#define PPPIOCATTACH _IOW('t', 61, int) /* attach to ppp unit */
-#define PPPIOCDETACH _IOW('t', 60, int) /* detach from ppp unit/chan */
+#define PPPIOCDETACH _IOW('t', 60, int) /* obsolete, do not use */
#define PPPIOCSMRRU _IOW('t', 59, int) /* set multilink MRU */
#define PPPIOCCONNECT _IOW('t', 58, int) /* connect channel to unit */
#define PPPIOCDISCONN _IO('t', 57) /* disconnect channel */
diff --git a/include/uapi/linux/prctl.h b/include/uapi/linux/prctl.h
index af5f8c2df87a..c0d7ea0bf5b6 100644
--- a/include/uapi/linux/prctl.h
+++ b/include/uapi/linux/prctl.h
@@ -170,7 +170,7 @@ struct prctl_mm_map {
* asking selinux for a specific new context (e.g. with runcon) will result
* in execve returning -EPERM.
*
- * See Documentation/prctl/no_new_privs.txt for more details.
+ * See Documentation/userspace-api/no_new_privs.rst for more details.
*/
#define PR_SET_NO_NEW_PRIVS 38
#define PR_GET_NO_NEW_PRIVS 39
@@ -207,4 +207,16 @@ struct prctl_mm_map {
# define PR_SVE_VL_LEN_MASK 0xffff
# define PR_SVE_VL_INHERIT (1 << 17) /* inherit across exec */
+/* Per task speculation control */
+#define PR_GET_SPECULATION_CTRL 52
+#define PR_SET_SPECULATION_CTRL 53
+/* Speculation control variants */
+# define PR_SPEC_STORE_BYPASS 0
+/* Return and control values for PR_SET/GET_SPECULATION_CTRL */
+# define PR_SPEC_NOT_AFFECTED 0
+# define PR_SPEC_PRCTL (1UL << 0)
+# define PR_SPEC_ENABLE (1UL << 1)
+# define PR_SPEC_DISABLE (1UL << 2)
+# define PR_SPEC_FORCE_DISABLE (1UL << 3)
+
#endif /* _LINUX_PRCTL_H */
diff --git a/include/uapi/linux/psp-sev.h b/include/uapi/linux/psp-sev.h
index 9008f31c7eb6..ac8c60bcc83b 100644
--- a/include/uapi/linux/psp-sev.h
+++ b/include/uapi/linux/psp-sev.h
@@ -30,6 +30,7 @@ enum {
SEV_PDH_GEN,
SEV_PDH_CERT_EXPORT,
SEV_PEK_CERT_IMPORT,
+ SEV_GET_ID,
SEV_MAX,
};
@@ -124,6 +125,17 @@ struct sev_user_data_pdh_cert_export {
} __packed;
/**
+ * struct sev_user_data_get_id - GET_ID command parameters
+ *
+ * @socket1: Buffer to pass unique ID of first socket
+ * @socket2: Buffer to pass unique ID of second socket
+ */
+struct sev_user_data_get_id {
+ __u8 socket1[64]; /* Out */
+ __u8 socket2[64]; /* Out */
+} __packed;
+
+/**
* struct sev_issue_cmd - SEV ioctl parameters
*
* @cmd: SEV commands to execute
diff --git a/include/uapi/linux/random.h b/include/uapi/linux/random.h
index c34f4490d025..26ee91300e3e 100644
--- a/include/uapi/linux/random.h
+++ b/include/uapi/linux/random.h
@@ -35,6 +35,9 @@
/* Clear the entropy pool and associated counters. (Superuser only.) */
#define RNDCLEARPOOL _IO( 'R', 0x06 )
+/* Reseed CRNG. (Superuser only.) */
+#define RNDRESEEDCRNG _IO( 'R', 0x07 )
+
struct rand_pool_info {
int entropy_count;
int buf_size;
diff --git a/include/uapi/linux/rpmsg.h b/include/uapi/linux/rpmsg.h
index 225eb38705dc..e14c6dab4223 100644
--- a/include/uapi/linux/rpmsg.h
+++ b/include/uapi/linux/rpmsg.h
@@ -1,15 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
* Copyright (c) 2016, Linaro Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef _UAPI_RPMSG_H_
diff --git a/include/uapi/linux/rseq.h b/include/uapi/linux/rseq.h
new file mode 100644
index 000000000000..d620fa43756c
--- /dev/null
+++ b/include/uapi/linux/rseq.h
@@ -0,0 +1,133 @@
+/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
+#ifndef _UAPI_LINUX_RSEQ_H
+#define _UAPI_LINUX_RSEQ_H
+
+/*
+ * linux/rseq.h
+ *
+ * Restartable sequences system call API
+ *
+ * Copyright (c) 2015-2018 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#ifdef __KERNEL__
+# include <linux/types.h>
+#else
+# include <stdint.h>
+#endif
+
+#include <linux/types_32_64.h>
+
+enum rseq_cpu_id_state {
+ RSEQ_CPU_ID_UNINITIALIZED = -1,
+ RSEQ_CPU_ID_REGISTRATION_FAILED = -2,
+};
+
+enum rseq_flags {
+ RSEQ_FLAG_UNREGISTER = (1 << 0),
+};
+
+enum rseq_cs_flags_bit {
+ RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT_BIT = 0,
+ RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL_BIT = 1,
+ RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE_BIT = 2,
+};
+
+enum rseq_cs_flags {
+ RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT =
+ (1U << RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT_BIT),
+ RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL =
+ (1U << RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL_BIT),
+ RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE =
+ (1U << RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE_BIT),
+};
+
+/*
+ * struct rseq_cs is aligned on 4 * 8 bytes to ensure it is always
+ * contained within a single cache-line. It is usually declared as
+ * link-time constant data.
+ */
+struct rseq_cs {
+ /* Version of this structure. */
+ __u32 version;
+ /* enum rseq_cs_flags */
+ __u32 flags;
+ LINUX_FIELD_u32_u64(start_ip);
+ /* Offset from start_ip. */
+ LINUX_FIELD_u32_u64(post_commit_offset);
+ LINUX_FIELD_u32_u64(abort_ip);
+} __attribute__((aligned(4 * sizeof(__u64))));
+
+/*
+ * struct rseq is aligned on 4 * 8 bytes to ensure it is always
+ * contained within a single cache-line.
+ *
+ * A single struct rseq per thread is allowed.
+ */
+struct rseq {
+ /*
+ * Restartable sequences cpu_id_start field. Updated by the
+ * kernel, and read by user-space with single-copy atomicity
+ * semantics. Aligned on 32-bit. Always contains a value in the
+ * range of possible CPUs, although the value may not be the
+ * actual current CPU (e.g. if rseq is not initialized). This
+ * CPU number value should always be compared against the value
+ * of the cpu_id field before performing a rseq commit or
+ * returning a value read from a data structure indexed using
+ * the cpu_id_start value.
+ */
+ __u32 cpu_id_start;
+ /*
+ * Restartable sequences cpu_id field. Updated by the kernel,
+ * and read by user-space with single-copy atomicity semantics.
+ * Aligned on 32-bit. Values RSEQ_CPU_ID_UNINITIALIZED and
+ * RSEQ_CPU_ID_REGISTRATION_FAILED have a special semantic: the
+ * former means "rseq uninitialized", and latter means "rseq
+ * initialization failed". This value is meant to be read within
+ * rseq critical sections and compared with the cpu_id_start
+ * value previously read, before performing the commit instruction,
+ * or read and compared with the cpu_id_start value before returning
+ * a value loaded from a data structure indexed using the
+ * cpu_id_start value.
+ */
+ __u32 cpu_id;
+ /*
+ * Restartable sequences rseq_cs field.
+ *
+ * Contains NULL when no critical section is active for the current
+ * thread, or holds a pointer to the currently active struct rseq_cs.
+ *
+ * Updated by user-space, which sets the address of the currently
+ * active rseq_cs at the beginning of assembly instruction sequence
+ * block, and set to NULL by the kernel when it restarts an assembly
+ * instruction sequence block, as well as when the kernel detects that
+ * it is preempting or delivering a signal outside of the range
+ * targeted by the rseq_cs. Also needs to be set to NULL by user-space
+ * before reclaiming memory that contains the targeted struct rseq_cs.
+ *
+ * Read and set by the kernel with single-copy atomicity semantics.
+ * Set by user-space with single-copy atomicity semantics. Aligned
+ * on 64-bit.
+ */
+ LINUX_FIELD_u32_u64(rseq_cs);
+ /*
+ * - RSEQ_DISABLE flag:
+ *
+ * Fallback fast-track flag for single-stepping.
+ * Set by user-space if lack of progress is detected.
+ * Cleared by user-space after rseq finish.
+ * Read by the kernel.
+ * - RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT
+ * Inhibit instruction sequence block restart and event
+ * counter increment on preemption for this thread.
+ * - RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL
+ * Inhibit instruction sequence block restart and event
+ * counter increment on signal delivery for this thread.
+ * - RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE
+ * Inhibit instruction sequence block restart and event
+ * counter increment on migration for this thread.
+ */
+ __u32 flags;
+} __attribute__((aligned(4 * sizeof(__u64))));
+
+#endif /* _UAPI_LINUX_RSEQ_H */
diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h
index 9b15005955fa..7d8502313c99 100644
--- a/include/uapi/linux/rtnetlink.h
+++ b/include/uapi/linux/rtnetlink.h
@@ -254,6 +254,11 @@ enum {
#define RTPROT_DHCP 16 /* DHCP client */
#define RTPROT_MROUTED 17 /* Multicast daemon */
#define RTPROT_BABEL 42 /* Babel daemon */
+#define RTPROT_BGP 186 /* BGP Routes */
+#define RTPROT_ISIS 187 /* ISIS Routes */
+#define RTPROT_OSPF 188 /* OSPF Routes */
+#define RTPROT_RIP 189 /* RIP Routes */
+#define RTPROT_EIGRP 192 /* EIGRP Routes */
/* rtm_scope
@@ -327,6 +332,9 @@ enum rtattr_type_t {
RTA_PAD,
RTA_UID,
RTA_TTL_PROPAGATE,
+ RTA_IP_PROTO,
+ RTA_SPORT,
+ RTA_DPORT,
__RTA_MAX
};
diff --git a/include/uapi/linux/seccomp.h b/include/uapi/linux/seccomp.h
index 2a0bd9dd104d..9efc0e73d50b 100644
--- a/include/uapi/linux/seccomp.h
+++ b/include/uapi/linux/seccomp.h
@@ -17,8 +17,9 @@
#define SECCOMP_GET_ACTION_AVAIL 2
/* Valid flags for SECCOMP_SET_MODE_FILTER */
-#define SECCOMP_FILTER_FLAG_TSYNC 1
-#define SECCOMP_FILTER_FLAG_LOG 2
+#define SECCOMP_FILTER_FLAG_TSYNC (1UL << 0)
+#define SECCOMP_FILTER_FLAG_LOG (1UL << 1)
+#define SECCOMP_FILTER_FLAG_SPEC_ALLOW (1UL << 2)
/*
* All BPF programs must return a 32-bit value.
diff --git a/include/uapi/linux/seg6_local.h b/include/uapi/linux/seg6_local.h
index ef2d8c3e76c1..edc138bdc56d 100644
--- a/include/uapi/linux/seg6_local.h
+++ b/include/uapi/linux/seg6_local.h
@@ -25,6 +25,7 @@ enum {
SEG6_LOCAL_NH6,
SEG6_LOCAL_IIF,
SEG6_LOCAL_OIF,
+ SEG6_LOCAL_BPF,
__SEG6_LOCAL_MAX,
};
#define SEG6_LOCAL_MAX (__SEG6_LOCAL_MAX - 1)
@@ -59,10 +60,21 @@ enum {
SEG6_LOCAL_ACTION_END_AS = 13,
/* forward to SR-unaware VNF with masquerading */
SEG6_LOCAL_ACTION_END_AM = 14,
+ /* custom BPF action */
+ SEG6_LOCAL_ACTION_END_BPF = 15,
__SEG6_LOCAL_ACTION_MAX,
};
#define SEG6_LOCAL_ACTION_MAX (__SEG6_LOCAL_ACTION_MAX - 1)
+enum {
+ SEG6_LOCAL_BPF_PROG_UNSPEC,
+ SEG6_LOCAL_BPF_PROG,
+ SEG6_LOCAL_BPF_PROG_NAME,
+ __SEG6_LOCAL_BPF_PROG_MAX,
+};
+
+#define SEG6_LOCAL_BPF_PROG_MAX (__SEG6_LOCAL_BPF_PROG_MAX - 1)
+
#endif
diff --git a/include/uapi/linux/signalfd.h b/include/uapi/linux/signalfd.h
index 6f0da42fc5ef..83429a05b698 100644
--- a/include/uapi/linux/signalfd.h
+++ b/include/uapi/linux/signalfd.h
@@ -35,6 +35,10 @@ struct signalfd_siginfo {
__u64 ssi_stime;
__u64 ssi_addr;
__u16 ssi_addr_lsb;
+ __u16 __pad2;
+ __s32 ssi_syscall;
+ __u64 ssi_call_addr;
+ __u32 ssi_arch;
/*
* Pad strcture to 128 bytes. Remember to update the
@@ -45,7 +49,7 @@ struct signalfd_siginfo {
* comes out of a read(2) and we really don't want to have
* a compat on read(2).
*/
- __u8 __pad[46];
+ __u8 __pad[28];
};
diff --git a/include/uapi/linux/snmp.h b/include/uapi/linux/snmp.h
index 33a70ece462f..750d89120335 100644
--- a/include/uapi/linux/snmp.h
+++ b/include/uapi/linux/snmp.h
@@ -276,6 +276,9 @@ enum
LINUX_MIB_TCPKEEPALIVE, /* TCPKeepAlive */
LINUX_MIB_TCPMTUPFAIL, /* TCPMTUPFail */
LINUX_MIB_TCPMTUPSUCCESS, /* TCPMTUPSuccess */
+ LINUX_MIB_TCPDELIVERED, /* TCPDelivered */
+ LINUX_MIB_TCPDELIVEREDCE, /* TCPDeliveredCE */
+ LINUX_MIB_TCPACKCOMPRESSED, /* TCPAckCompressed */
__LINUX_MIB_MAX
};
diff --git a/include/uapi/linux/sysctl.h b/include/uapi/linux/sysctl.h
index 0f272818a4d2..6b58371b1f0d 100644
--- a/include/uapi/linux/sysctl.h
+++ b/include/uapi/linux/sysctl.h
@@ -780,24 +780,6 @@ enum {
NET_BRIDGE_NF_FILTER_PPPOE_TAGGED = 5,
};
-/* proc/sys/net/irda */
-enum {
- NET_IRDA_DISCOVERY=1,
- NET_IRDA_DEVNAME=2,
- NET_IRDA_DEBUG=3,
- NET_IRDA_FAST_POLL=4,
- NET_IRDA_DISCOVERY_SLOTS=5,
- NET_IRDA_DISCOVERY_TIMEOUT=6,
- NET_IRDA_SLOT_TIMEOUT=7,
- NET_IRDA_MAX_BAUD_RATE=8,
- NET_IRDA_MIN_TX_TURN_TIME=9,
- NET_IRDA_MAX_TX_DATA_SIZE=10,
- NET_IRDA_MAX_TX_WINDOW=11,
- NET_IRDA_MAX_NOREPLY_TIME=12,
- NET_IRDA_WARN_NOREPLY_TIME=13,
- NET_IRDA_LAP_KEEPALIVE_TIME=14,
-};
-
/* CTL_FS names: */
enum
diff --git a/include/uapi/linux/target_core_user.h b/include/uapi/linux/target_core_user.h
index 0be80f72646b..6e299349b158 100644
--- a/include/uapi/linux/target_core_user.h
+++ b/include/uapi/linux/target_core_user.h
@@ -9,21 +9,22 @@
#define TCMU_VERSION "2.0"
-/*
+/**
+ * DOC: Ring Design
* Ring Design
* -----------
*
* The mmaped area is divided into three parts:
- * 1) The mailbox (struct tcmu_mailbox, below)
- * 2) The command ring
- * 3) Everything beyond the command ring (data)
+ * 1) The mailbox (struct tcmu_mailbox, below);
+ * 2) The command ring;
+ * 3) Everything beyond the command ring (data).
*
* The mailbox tells userspace the offset of the command ring from the
* start of the shared memory region, and how big the command ring is.
*
* The kernel passes SCSI commands to userspace by putting a struct
* tcmu_cmd_entry in the ring, updating mailbox->cmd_head, and poking
- * userspace via uio's interrupt mechanism.
+ * userspace via UIO's interrupt mechanism.
*
* tcmu_cmd_entry contains a header. If the header type is PAD,
* userspace should skip hdr->length bytes (mod cmdr_size) to find the
diff --git a/include/uapi/linux/tcp.h b/include/uapi/linux/tcp.h
index 560374c978f9..29eb659aa77a 100644
--- a/include/uapi/linux/tcp.h
+++ b/include/uapi/linux/tcp.h
@@ -122,6 +122,10 @@ enum {
#define TCP_MD5SIG_EXT 32 /* TCP MD5 Signature with extensions */
#define TCP_FASTOPEN_KEY 33 /* Set the key for Fast Open (cookie) */
#define TCP_FASTOPEN_NO_COOKIE 34 /* Enable TFO without a TFO cookie */
+#define TCP_ZEROCOPY_RECEIVE 35
+#define TCP_INQ 36 /* Notify bytes available to read as a cmsg on read */
+
+#define TCP_CM_INQ TCP_INQ
struct tcp_repair_opt {
__u32 opt_code;
@@ -224,6 +228,9 @@ struct tcp_info {
__u64 tcpi_busy_time; /* Time (usec) busy sending data */
__u64 tcpi_rwnd_limited; /* Time (usec) limited by receive window */
__u64 tcpi_sndbuf_limited; /* Time (usec) limited by send buffer */
+
+ __u32 tcpi_delivered;
+ __u32 tcpi_delivered_ce;
};
/* netlink attributes types for SCM_TIMESTAMPING_OPT_STATS */
@@ -244,6 +251,8 @@ enum {
TCP_NLA_SNDQ_SIZE, /* Data (bytes) pending in send queue */
TCP_NLA_CA_STATE, /* ca_state of socket */
TCP_NLA_SND_SSTHRESH, /* Slow start size threshold */
+ TCP_NLA_DELIVERED, /* Data pkts delivered incl. out-of-order */
+ TCP_NLA_DELIVERED_CE, /* Like above but only ones w/ CE marks */
};
@@ -271,4 +280,11 @@ struct tcp_diag_md5sig {
__u8 tcpm_key[TCP_MD5SIG_MAXKEYLEN];
};
+/* setsockopt(fd, IPPROTO_TCP, TCP_ZEROCOPY_RECEIVE, ...) */
+
+struct tcp_zerocopy_receive {
+ __u64 address; /* in: address of mapping */
+ __u32 length; /* in/out: number of bytes to map/mapped */
+ __u32 recv_skip_hint; /* out: amount of bytes to skip */
+};
#endif /* _UAPI_LINUX_TCP_H */
diff --git a/include/uapi/linux/time.h b/include/uapi/linux/time.h
index 16a296612ba4..fcf936656493 100644
--- a/include/uapi/linux/time.h
+++ b/include/uapi/linux/time.h
@@ -42,6 +42,13 @@ struct itimerval {
struct timeval it_value; /* current value */
};
+#ifndef __kernel_timespec
+struct __kernel_timespec {
+ __kernel_time64_t tv_sec; /* seconds */
+ long long tv_nsec; /* nanoseconds */
+};
+#endif
+
/*
* legacy timeval structure, only embedded in structures that
* traditionally used 'timeval' to pass time intervals (not absolute
@@ -73,7 +80,6 @@ struct __kernel_old_timeval {
*/
#define CLOCK_SGI_CYCLE 10
#define CLOCK_TAI 11
-#define CLOCK_MONOTONIC_ACTIVE 12
#define MAX_CLOCKS 16
#define CLOCKS_MASK (CLOCK_REALTIME | CLOCK_MONOTONIC)
diff --git a/include/uapi/linux/tipc.h b/include/uapi/linux/tipc.h
index bf6d28677cfe..6b2fd4d9655f 100644
--- a/include/uapi/linux/tipc.h
+++ b/include/uapi/linux/tipc.h
@@ -209,16 +209,16 @@ struct tipc_group_req {
* The string formatting for each name element is:
* media: media
* interface: media:interface name
- * link: Z.C.N:interface-Z.C.N:interface
- *
+ * link: node:interface-node:interface
*/
-
+#define TIPC_NODEID_LEN 16
#define TIPC_MAX_MEDIA_NAME 16
#define TIPC_MAX_IF_NAME 16
#define TIPC_MAX_BEARER_NAME 32
#define TIPC_MAX_LINK_NAME 68
-#define SIOCGETLINKNAME SIOCPROTOPRIVATE
+#define SIOCGETLINKNAME SIOCPROTOPRIVATE
+#define SIOCGETNODEID (SIOCPROTOPRIVATE + 1)
struct tipc_sioc_ln_req {
__u32 peer;
@@ -226,6 +226,10 @@ struct tipc_sioc_ln_req {
char linkname[TIPC_MAX_LINK_NAME];
};
+struct tipc_sioc_nodeid_req {
+ __u32 peer;
+ char node_id[TIPC_NODEID_LEN];
+};
/* The macros and functions below are deprecated:
*/
diff --git a/include/uapi/linux/tipc_config.h b/include/uapi/linux/tipc_config.h
index 3f29e3c8ed06..4b2c93b1934c 100644
--- a/include/uapi/linux/tipc_config.h
+++ b/include/uapi/linux/tipc_config.h
@@ -185,6 +185,11 @@
#define TIPC_DEF_LINK_WIN 50
#define TIPC_MAX_LINK_WIN 8191
+/*
+ * Default MTU for UDP media
+ */
+
+#define TIPC_DEF_LINK_UDP_MTU 14000
struct tipc_node_info {
__be32 addr; /* network address of node */
diff --git a/include/uapi/linux/tipc_netlink.h b/include/uapi/linux/tipc_netlink.h
index 0affb682e5e3..85c11982c89b 100644
--- a/include/uapi/linux/tipc_netlink.h
+++ b/include/uapi/linux/tipc_netlink.h
@@ -266,6 +266,7 @@ enum {
TIPC_NLA_PROP_PRIO, /* u32 */
TIPC_NLA_PROP_TOL, /* u32 */
TIPC_NLA_PROP_WIN, /* u32 */
+ TIPC_NLA_PROP_MTU, /* u32 */
__TIPC_NLA_PROP_MAX,
TIPC_NLA_PROP_MAX = __TIPC_NLA_PROP_MAX - 1
diff --git a/include/uapi/linux/tty_flags.h b/include/uapi/linux/tty_flags.h
index 6ac609a00dea..900a32e63424 100644
--- a/include/uapi/linux/tty_flags.h
+++ b/include/uapi/linux/tty_flags.h
@@ -13,7 +13,7 @@
*/
#define ASYNCB_HUP_NOTIFY 0 /* Notify getty on hangups and closes
* on the callout port */
-#define ASYNCB_FOURPORT 1 /* Set OU1, OUT2 per AST Fourport settings */
+#define ASYNCB_FOURPORT 1 /* Set OUT1, OUT2 per AST Fourport settings */
#define ASYNCB_SAK 2 /* Secure Attention Key (Orange book) */
#define ASYNCB_SPLIT_TERMIOS 3 /* [x] Separate termios for dialin/callout */
#define ASYNCB_SPD_HI 4 /* Use 57600 instead of 38400 bps */
diff --git a/include/uapi/linux/types.h b/include/uapi/linux/types.h
index cd4f0b897a48..2fce8b6876e9 100644
--- a/include/uapi/linux/types.h
+++ b/include/uapi/linux/types.h
@@ -49,11 +49,7 @@ typedef __u32 __bitwise __wsum;
#define __aligned_be64 __be64 __attribute__((aligned(8)))
#define __aligned_le64 __le64 __attribute__((aligned(8)))
-#ifdef __CHECK_POLL
typedef unsigned __bitwise __poll_t;
-#else
-typedef unsigned __poll_t;
-#endif
#endif /* __ASSEMBLY__ */
#endif /* _UAPI_LINUX_TYPES_H */
diff --git a/include/uapi/linux/types_32_64.h b/include/uapi/linux/types_32_64.h
new file mode 100644
index 000000000000..0a87ace34a57
--- /dev/null
+++ b/include/uapi/linux/types_32_64.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
+#ifndef _UAPI_LINUX_TYPES_32_64_H
+#define _UAPI_LINUX_TYPES_32_64_H
+
+/*
+ * linux/types_32_64.h
+ *
+ * Integer type declaration for pointers across 32-bit and 64-bit systems.
+ *
+ * Copyright (c) 2015-2018 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#ifdef __KERNEL__
+# include <linux/types.h>
+#else
+# include <stdint.h>
+#endif
+
+#include <asm/byteorder.h>
+
+#ifdef __BYTE_ORDER
+# if (__BYTE_ORDER == __BIG_ENDIAN)
+# define LINUX_BYTE_ORDER_BIG_ENDIAN
+# else
+# define LINUX_BYTE_ORDER_LITTLE_ENDIAN
+# endif
+#else
+# ifdef __BIG_ENDIAN
+# define LINUX_BYTE_ORDER_BIG_ENDIAN
+# else
+# define LINUX_BYTE_ORDER_LITTLE_ENDIAN
+# endif
+#endif
+
+#ifdef __LP64__
+# define LINUX_FIELD_u32_u64(field) __u64 field
+# define LINUX_FIELD_u32_u64_INIT_ONSTACK(field, v) field = (intptr_t)v
+#else
+# ifdef LINUX_BYTE_ORDER_BIG_ENDIAN
+# define LINUX_FIELD_u32_u64(field) __u32 field ## _padding, field
+# define LINUX_FIELD_u32_u64_INIT_ONSTACK(field, v) \
+ field ## _padding = 0, field = (intptr_t)v
+# else
+# define LINUX_FIELD_u32_u64(field) __u32 field, field ## _padding
+# define LINUX_FIELD_u32_u64_INIT_ONSTACK(field, v) \
+ field = (intptr_t)v, field ## _padding = 0
+# endif
+#endif
+
+#endif /* _UAPI_LINUX_TYPES_32_64_H */
diff --git a/include/uapi/linux/udp.h b/include/uapi/linux/udp.h
index efb7b5991c2f..09d00f8c442b 100644
--- a/include/uapi/linux/udp.h
+++ b/include/uapi/linux/udp.h
@@ -32,6 +32,7 @@ struct udphdr {
#define UDP_ENCAP 100 /* Set the socket to accept encapsulated packets */
#define UDP_NO_CHECK6_TX 101 /* Disable sending checksum for UDP6X */
#define UDP_NO_CHECK6_RX 102 /* Disable accpeting checksum for UDP6 */
+#define UDP_SEGMENT 103 /* Set GSO segmentation size */
/* UDP encapsulation types */
#define UDP_ENCAP_ESPINUDP_NON_IKE 1 /* draft-ietf-ipsec-nat-t-ike-00/01 */
diff --git a/include/uapi/linux/usb/audio.h b/include/uapi/linux/usb/audio.h
index 3a78e7145689..74e520fb944f 100644
--- a/include/uapi/linux/usb/audio.h
+++ b/include/uapi/linux/usb/audio.h
@@ -230,6 +230,14 @@ struct uac1_output_terminal_descriptor {
#define UAC_OUTPUT_TERMINAL_COMMUNICATION_SPEAKER 0x306
#define UAC_OUTPUT_TERMINAL_LOW_FREQ_EFFECTS_SPEAKER 0x307
+/* Terminals - 2.4 Bi-directional Terminal Types */
+#define UAC_BIDIR_TERMINAL_UNDEFINED 0x400
+#define UAC_BIDIR_TERMINAL_HANDSET 0x401
+#define UAC_BIDIR_TERMINAL_HEADSET 0x402
+#define UAC_BIDIR_TERMINAL_SPEAKER_PHONE 0x403
+#define UAC_BIDIR_TERMINAL_ECHO_SUPPRESSING 0x404
+#define UAC_BIDIR_TERMINAL_ECHO_CANCELING 0x405
+
/* Set bControlSize = 2 as default setting */
#define UAC_DT_FEATURE_UNIT_SIZE(ch) (7 + ((ch) + 1) * 2)
@@ -285,9 +293,22 @@ static inline __u8 uac_mixer_unit_iChannelNames(struct uac_mixer_unit_descriptor
static inline __u8 *uac_mixer_unit_bmControls(struct uac_mixer_unit_descriptor *desc,
int protocol)
{
- return (protocol == UAC_VERSION_1) ?
- &desc->baSourceID[desc->bNrInPins + 4] :
- &desc->baSourceID[desc->bNrInPins + 6];
+ switch (protocol) {
+ case UAC_VERSION_1:
+ return &desc->baSourceID[desc->bNrInPins + 4];
+ case UAC_VERSION_2:
+ return &desc->baSourceID[desc->bNrInPins + 6];
+ case UAC_VERSION_3:
+ return &desc->baSourceID[desc->bNrInPins + 2];
+ default:
+ return NULL;
+ }
+}
+
+static inline __u16 uac3_mixer_unit_wClusterDescrID(struct uac_mixer_unit_descriptor *desc)
+{
+ return (desc->baSourceID[desc->bNrInPins + 1] << 8) |
+ desc->baSourceID[desc->bNrInPins];
}
static inline __u8 uac_mixer_unit_iMixer(struct uac_mixer_unit_descriptor *desc)
diff --git a/include/uapi/linux/usb/ch11.h b/include/uapi/linux/usb/ch11.h
index 29c120c88747..fb0cd24c392c 100644
--- a/include/uapi/linux/usb/ch11.h
+++ b/include/uapi/linux/usb/ch11.h
@@ -197,6 +197,11 @@ struct usb_port_status {
#define USB_EXT_PORT_STAT_RX_LANES 0x00000f00
#define USB_EXT_PORT_STAT_TX_LANES 0x0000f000
+#define USB_EXT_PORT_RX_LANES(p) \
+ (((p) & USB_EXT_PORT_STAT_RX_LANES) >> 8)
+#define USB_EXT_PORT_TX_LANES(p) \
+ (((p) & USB_EXT_PORT_STAT_TX_LANES) >> 12)
+
/*
* wHubCharacteristics (masks)
* See USB 2.0 spec Table 11-13, offset 3
diff --git a/include/uapi/linux/virtio_balloon.h b/include/uapi/linux/virtio_balloon.h
index 40297a3181ed..13b8cb563892 100644
--- a/include/uapi/linux/virtio_balloon.h
+++ b/include/uapi/linux/virtio_balloon.h
@@ -57,6 +57,21 @@ struct virtio_balloon_config {
#define VIRTIO_BALLOON_S_HTLB_PGFAIL 9 /* Hugetlb page allocation failures */
#define VIRTIO_BALLOON_S_NR 10
+#define VIRTIO_BALLOON_S_NAMES_WITH_PREFIX(VIRTIO_BALLOON_S_NAMES_prefix) { \
+ VIRTIO_BALLOON_S_NAMES_prefix "swap-in", \
+ VIRTIO_BALLOON_S_NAMES_prefix "swap-out", \
+ VIRTIO_BALLOON_S_NAMES_prefix "major-faults", \
+ VIRTIO_BALLOON_S_NAMES_prefix "minor-faults", \
+ VIRTIO_BALLOON_S_NAMES_prefix "free-memory", \
+ VIRTIO_BALLOON_S_NAMES_prefix "total-memory", \
+ VIRTIO_BALLOON_S_NAMES_prefix "available-memory", \
+ VIRTIO_BALLOON_S_NAMES_prefix "disk-caches", \
+ VIRTIO_BALLOON_S_NAMES_prefix "hugetlb-allocations", \
+ VIRTIO_BALLOON_S_NAMES_prefix "hugetlb-failures" \
+}
+
+#define VIRTIO_BALLOON_S_NAMES VIRTIO_BALLOON_S_NAMES_WITH_PREFIX("")
+
/*
* Memory statistics structure.
* Driver fills an array of these structures and passes to device.
diff --git a/include/uapi/linux/virtio_config.h b/include/uapi/linux/virtio_config.h
index 308e2096291f..449132c76b1c 100644
--- a/include/uapi/linux/virtio_config.h
+++ b/include/uapi/linux/virtio_config.h
@@ -45,11 +45,14 @@
/* We've given up on this device. */
#define VIRTIO_CONFIG_S_FAILED 0x80
-/* Some virtio feature bits (currently bits 28 through 32) are reserved for the
- * transport being used (eg. virtio_ring), the rest are per-device feature
- * bits. */
+/*
+ * Virtio feature bits VIRTIO_TRANSPORT_F_START through
+ * VIRTIO_TRANSPORT_F_END are reserved for the transport
+ * being used (e.g. virtio_ring, virtio_pci etc.), the
+ * rest are per-device feature bits.
+ */
#define VIRTIO_TRANSPORT_F_START 28
-#define VIRTIO_TRANSPORT_F_END 34
+#define VIRTIO_TRANSPORT_F_END 38
#ifndef VIRTIO_CONFIG_NO_LEGACY
/* Do we get callbacks when the ring is completely used, even if we've
@@ -71,4 +74,9 @@
* this is for compatibility with legacy systems.
*/
#define VIRTIO_F_IOMMU_PLATFORM 33
+
+/*
+ * Does the device support Single Root I/O Virtualization?
+ */
+#define VIRTIO_F_SR_IOV 37
#endif /* _UAPI_LINUX_VIRTIO_CONFIG_H */
diff --git a/include/uapi/linux/virtio_gpu.h b/include/uapi/linux/virtio_gpu.h
index 4b04ead26cd9..f43c3c6171ff 100644
--- a/include/uapi/linux/virtio_gpu.h
+++ b/include/uapi/linux/virtio_gpu.h
@@ -260,6 +260,7 @@ struct virtio_gpu_cmd_submit {
};
#define VIRTIO_GPU_CAPSET_VIRGL 1
+#define VIRTIO_GPU_CAPSET_VIRGL2 2
/* VIRTIO_GPU_CMD_GET_CAPSET_INFO */
struct virtio_gpu_get_capset_info {
diff --git a/include/uapi/linux/virtio_net.h b/include/uapi/linux/virtio_net.h
index 5de6ed37695b..a3715a3224c1 100644
--- a/include/uapi/linux/virtio_net.h
+++ b/include/uapi/linux/virtio_net.h
@@ -57,6 +57,9 @@
* Steering */
#define VIRTIO_NET_F_CTRL_MAC_ADDR 23 /* Set MAC address */
+#define VIRTIO_NET_F_STANDBY 62 /* Act as standby for another device
+ * with the same MAC.
+ */
#define VIRTIO_NET_F_SPEED_DUPLEX 63 /* Device set linkspeed and duplex */
#ifndef VIRTIO_NET_NO_LEGACY
diff --git a/include/uapi/linux/vmcore.h b/include/uapi/linux/vmcore.h
new file mode 100644
index 000000000000..022619668e0e
--- /dev/null
+++ b/include/uapi/linux/vmcore.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _UAPI_VMCORE_H
+#define _UAPI_VMCORE_H
+
+#include <linux/types.h>
+
+#define VMCOREDD_NOTE_NAME "LINUX"
+#define VMCOREDD_MAX_NAME_BYTES 44
+
+struct vmcoredd_header {
+ __u32 n_namesz; /* Name size */
+ __u32 n_descsz; /* Content size */
+ __u32 n_type; /* NT_VMCOREDD */
+ __u8 name[8]; /* LINUX\0\0\0 */
+ __u8 dump_name[VMCOREDD_MAX_NAME_BYTES]; /* Device dump's name */
+};
+
+#endif /* _UAPI_VMCORE_H */
diff --git a/include/uapi/misc/ocxl.h b/include/uapi/misc/ocxl.h
index 0af83d80fb3e..97937cfa3baa 100644
--- a/include/uapi/misc/ocxl.h
+++ b/include/uapi/misc/ocxl.h
@@ -48,6 +48,18 @@ struct ocxl_ioctl_metadata {
__u64 reserved[13]; // Total of 16*u64
};
+struct ocxl_ioctl_p9_wait {
+ __u16 thread_id; // The thread ID required to wake this thread
+ __u16 reserved1;
+ __u32 reserved2;
+ __u64 reserved3[3];
+};
+
+#define OCXL_IOCTL_FEATURES_FLAGS0_P9_WAIT 0x01
+struct ocxl_ioctl_features {
+ __u64 flags[4];
+};
+
struct ocxl_ioctl_irq_fd {
__u64 irq_offset;
__s32 eventfd;
@@ -62,5 +74,7 @@ struct ocxl_ioctl_irq_fd {
#define OCXL_IOCTL_IRQ_FREE _IOW(OCXL_MAGIC, 0x12, __u64)
#define OCXL_IOCTL_IRQ_SET_FD _IOW(OCXL_MAGIC, 0x13, struct ocxl_ioctl_irq_fd)
#define OCXL_IOCTL_GET_METADATA _IOR(OCXL_MAGIC, 0x14, struct ocxl_ioctl_metadata)
+#define OCXL_IOCTL_ENABLE_P9_WAIT _IOR(OCXL_MAGIC, 0x15, struct ocxl_ioctl_p9_wait)
+#define OCXL_IOCTL_GET_FEATURES _IOR(OCXL_MAGIC, 0x16, struct ocxl_ioctl_features)
#endif /* _UAPI_MISC_OCXL_H */
diff --git a/include/uapi/sound/asoc.h b/include/uapi/sound/asoc.h
index 69c37ecbff7e..a74ca232f1fc 100644
--- a/include/uapi/sound/asoc.h
+++ b/include/uapi/sound/asoc.h
@@ -139,6 +139,15 @@
#define SND_SOC_TPLG_DAI_FLGBIT_SYMMETRIC_CHANNELS (1 << 1)
#define SND_SOC_TPLG_DAI_FLGBIT_SYMMETRIC_SAMPLEBITS (1 << 2)
+/* DAI clock gating */
+#define SND_SOC_TPLG_DAI_CLK_GATE_UNDEFINED 0
+#define SND_SOC_TPLG_DAI_CLK_GATE_GATED 1
+#define SND_SOC_TPLG_DAI_CLK_GATE_CONT 2
+
+/* DAI mclk_direction */
+#define SND_SOC_TPLG_MCLK_CO 0 /* for codec, mclk is output */
+#define SND_SOC_TPLG_MCLK_CI 1 /* for codec, mclk is input */
+
/* DAI physical PCM data formats.
* Add new formats to the end of the list.
*/
@@ -160,6 +169,18 @@
#define SND_SOC_TPLG_LNK_FLGBIT_SYMMETRIC_SAMPLEBITS (1 << 2)
#define SND_SOC_TPLG_LNK_FLGBIT_VOICE_WAKEUP (1 << 3)
+/* DAI topology BCLK parameter
+ * For the backwards capability, by default codec is bclk master
+ */
+#define SND_SOC_TPLG_BCLK_CM 0 /* codec is bclk master */
+#define SND_SOC_TPLG_BCLK_CS 1 /* codec is bclk slave */
+
+/* DAI topology FSYNC parameter
+ * For the backwards capability, by default codec is fsync master
+ */
+#define SND_SOC_TPLG_FSYNC_CM 0 /* codec is fsync master */
+#define SND_SOC_TPLG_FSYNC_CS 1 /* codec is fsync slave */
+
/*
* Block Header.
* This header precedes all object and object arrays below.
@@ -312,12 +333,12 @@ struct snd_soc_tplg_hw_config {
__le32 size; /* in bytes of this structure */
__le32 id; /* unique ID - - used to match */
__le32 fmt; /* SND_SOC_DAI_FORMAT_ format value */
- __u8 clock_gated; /* 1 if clock can be gated to save power */
+ __u8 clock_gated; /* SND_SOC_TPLG_DAI_CLK_GATE_ value */
__u8 invert_bclk; /* 1 for inverted BCLK, 0 for normal */
__u8 invert_fsync; /* 1 for inverted frame clock, 0 for normal */
- __u8 bclk_master; /* 1 for master of BCLK, 0 for slave */
- __u8 fsync_master; /* 1 for master of FSYNC, 0 for slave */
- __u8 mclk_direction; /* 0 for input, 1 for output */
+ __u8 bclk_master; /* SND_SOC_TPLG_BCLK_ value */
+ __u8 fsync_master; /* SND_SOC_TPLG_FSYNC_ value */
+ __u8 mclk_direction; /* SND_SOC_TPLG_MCLK_ value */
__le16 reserved; /* for 32bit alignment */
__le32 mclk_rate; /* MCLK or SYSCLK freqency in Hz */
__le32 bclk_rate; /* BCLK freqency in Hz */
@@ -552,4 +573,61 @@ struct snd_soc_tplg_dai {
__le32 flags; /* SND_SOC_TPLG_DAI_FLGBIT_* */
struct snd_soc_tplg_private priv;
} __attribute__((packed));
+
+/*
+ * Old version of ABI structs, supported for backward compatibility.
+ */
+
+/* Manifest v4 */
+struct snd_soc_tplg_manifest_v4 {
+ __le32 size; /* in bytes of this structure */
+ __le32 control_elems; /* number of control elements */
+ __le32 widget_elems; /* number of widget elements */
+ __le32 graph_elems; /* number of graph elements */
+ __le32 pcm_elems; /* number of PCM elements */
+ __le32 dai_link_elems; /* number of DAI link elements */
+ struct snd_soc_tplg_private priv;
+} __packed;
+
+/* Stream Capabilities v4 */
+struct snd_soc_tplg_stream_caps_v4 {
+ __le32 size; /* in bytes of this structure */
+ char name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
+ __le64 formats; /* supported formats SNDRV_PCM_FMTBIT_* */
+ __le32 rates; /* supported rates SNDRV_PCM_RATE_* */
+ __le32 rate_min; /* min rate */
+ __le32 rate_max; /* max rate */
+ __le32 channels_min; /* min channels */
+ __le32 channels_max; /* max channels */
+ __le32 periods_min; /* min number of periods */
+ __le32 periods_max; /* max number of periods */
+ __le32 period_size_min; /* min period size bytes */
+ __le32 period_size_max; /* max period size bytes */
+ __le32 buffer_size_min; /* min buffer size bytes */
+ __le32 buffer_size_max; /* max buffer size bytes */
+} __packed;
+
+/* PCM v4 */
+struct snd_soc_tplg_pcm_v4 {
+ __le32 size; /* in bytes of this structure */
+ char pcm_name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
+ char dai_name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
+ __le32 pcm_id; /* unique ID - used to match with DAI link */
+ __le32 dai_id; /* unique ID - used to match */
+ __le32 playback; /* supports playback mode */
+ __le32 capture; /* supports capture mode */
+ __le32 compress; /* 1 = compressed; 0 = PCM */
+ struct snd_soc_tplg_stream stream[SND_SOC_TPLG_STREAM_CONFIG_MAX]; /* for DAI link */
+ __le32 num_streams; /* number of streams */
+ struct snd_soc_tplg_stream_caps_v4 caps[2]; /* playback and capture for DAI */
+} __packed;
+
+/* Physical link config v4 */
+struct snd_soc_tplg_link_config_v4 {
+ __le32 size; /* in bytes of this structure */
+ __le32 id; /* unique ID - used to match */
+ struct snd_soc_tplg_stream stream[SND_SOC_TPLG_STREAM_CONFIG_MAX]; /* supported configs playback and captrure */
+ __le32 num_streams; /* number of streams */
+} __packed;
+
#endif
diff --git a/include/uapi/sound/skl-tplg-interface.h b/include/uapi/sound/skl-tplg-interface.h
new file mode 100644
index 000000000000..f58cafa42f18
--- /dev/null
+++ b/include/uapi/sound/skl-tplg-interface.h
@@ -0,0 +1,237 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * skl-tplg-interface.h - Intel DSP FW private data interface
+ *
+ * Copyright (C) 2015 Intel Corp
+ * Author: Jeeja KP <jeeja.kp@intel.com>
+ * Nilofer, Samreen <samreen.nilofer@intel.com>
+ */
+
+#ifndef __HDA_TPLG_INTERFACE_H__
+#define __HDA_TPLG_INTERFACE_H__
+
+/*
+ * Default types range from 0~12. type can range from 0 to 0xff
+ * SST types start at higher to avoid any overlapping in future
+ */
+#define SKL_CONTROL_TYPE_BYTE_TLV 0x100
+#define SKL_CONTROL_TYPE_MIC_SELECT 0x102
+
+#define HDA_SST_CFG_MAX 900 /* size of copier cfg*/
+#define MAX_IN_QUEUE 8
+#define MAX_OUT_QUEUE 8
+
+#define SKL_UUID_STR_SZ 40
+/* Event types goes here */
+/* Reserve event type 0 for no event handlers */
+enum skl_event_types {
+ SKL_EVENT_NONE = 0,
+ SKL_MIXER_EVENT,
+ SKL_MUX_EVENT,
+ SKL_VMIXER_EVENT,
+ SKL_PGA_EVENT
+};
+
+/**
+ * enum skl_ch_cfg - channel configuration
+ *
+ * @SKL_CH_CFG_MONO: One channel only
+ * @SKL_CH_CFG_STEREO: L & R
+ * @SKL_CH_CFG_2_1: L, R & LFE
+ * @SKL_CH_CFG_3_0: L, C & R
+ * @SKL_CH_CFG_3_1: L, C, R & LFE
+ * @SKL_CH_CFG_QUATRO: L, R, Ls & Rs
+ * @SKL_CH_CFG_4_0: L, C, R & Cs
+ * @SKL_CH_CFG_5_0: L, C, R, Ls & Rs
+ * @SKL_CH_CFG_5_1: L, C, R, Ls, Rs & LFE
+ * @SKL_CH_CFG_DUAL_MONO: One channel replicated in two
+ * @SKL_CH_CFG_I2S_DUAL_STEREO_0: Stereo(L,R) in 4 slots, 1st stream:[ L, R, -, - ]
+ * @SKL_CH_CFG_I2S_DUAL_STEREO_1: Stereo(L,R) in 4 slots, 2nd stream:[ -, -, L, R ]
+ * @SKL_CH_CFG_INVALID: Invalid
+ */
+enum skl_ch_cfg {
+ SKL_CH_CFG_MONO = 0,
+ SKL_CH_CFG_STEREO = 1,
+ SKL_CH_CFG_2_1 = 2,
+ SKL_CH_CFG_3_0 = 3,
+ SKL_CH_CFG_3_1 = 4,
+ SKL_CH_CFG_QUATRO = 5,
+ SKL_CH_CFG_4_0 = 6,
+ SKL_CH_CFG_5_0 = 7,
+ SKL_CH_CFG_5_1 = 8,
+ SKL_CH_CFG_DUAL_MONO = 9,
+ SKL_CH_CFG_I2S_DUAL_STEREO_0 = 10,
+ SKL_CH_CFG_I2S_DUAL_STEREO_1 = 11,
+ SKL_CH_CFG_4_CHANNEL = 12,
+ SKL_CH_CFG_INVALID
+};
+
+enum skl_module_type {
+ SKL_MODULE_TYPE_MIXER = 0,
+ SKL_MODULE_TYPE_COPIER,
+ SKL_MODULE_TYPE_UPDWMIX,
+ SKL_MODULE_TYPE_SRCINT,
+ SKL_MODULE_TYPE_ALGO,
+ SKL_MODULE_TYPE_BASE_OUTFMT,
+ SKL_MODULE_TYPE_KPB,
+ SKL_MODULE_TYPE_MIC_SELECT,
+};
+
+enum skl_core_affinity {
+ SKL_AFFINITY_CORE_0 = 0,
+ SKL_AFFINITY_CORE_1,
+ SKL_AFFINITY_CORE_MAX
+};
+
+enum skl_pipe_conn_type {
+ SKL_PIPE_CONN_TYPE_NONE = 0,
+ SKL_PIPE_CONN_TYPE_FE,
+ SKL_PIPE_CONN_TYPE_BE
+};
+
+enum skl_hw_conn_type {
+ SKL_CONN_NONE = 0,
+ SKL_CONN_SOURCE = 1,
+ SKL_CONN_SINK = 2
+};
+
+enum skl_dev_type {
+ SKL_DEVICE_BT = 0x0,
+ SKL_DEVICE_DMIC = 0x1,
+ SKL_DEVICE_I2S = 0x2,
+ SKL_DEVICE_SLIMBUS = 0x3,
+ SKL_DEVICE_HDALINK = 0x4,
+ SKL_DEVICE_HDAHOST = 0x5,
+ SKL_DEVICE_NONE
+};
+
+/**
+ * enum skl_interleaving - interleaving style
+ *
+ * @SKL_INTERLEAVING_PER_CHANNEL: [s1_ch1...s1_chN,...,sM_ch1...sM_chN]
+ * @SKL_INTERLEAVING_PER_SAMPLE: [s1_ch1...sM_ch1,...,s1_chN...sM_chN]
+ */
+enum skl_interleaving {
+ SKL_INTERLEAVING_PER_CHANNEL = 0,
+ SKL_INTERLEAVING_PER_SAMPLE = 1,
+};
+
+enum skl_sample_type {
+ SKL_SAMPLE_TYPE_INT_MSB = 0,
+ SKL_SAMPLE_TYPE_INT_LSB = 1,
+ SKL_SAMPLE_TYPE_INT_SIGNED = 2,
+ SKL_SAMPLE_TYPE_INT_UNSIGNED = 3,
+ SKL_SAMPLE_TYPE_FLOAT = 4
+};
+
+enum module_pin_type {
+ /* All pins of the module takes same PCM inputs or outputs
+ * e.g. mixout
+ */
+ SKL_PIN_TYPE_HOMOGENEOUS,
+ /* All pins of the module takes different PCM inputs or outputs
+ * e.g mux
+ */
+ SKL_PIN_TYPE_HETEROGENEOUS,
+};
+
+enum skl_module_param_type {
+ SKL_PARAM_DEFAULT = 0,
+ SKL_PARAM_INIT,
+ SKL_PARAM_SET,
+ SKL_PARAM_BIND
+};
+
+struct skl_dfw_algo_data {
+ u32 set_params:2;
+ u32 rsvd:30;
+ u32 param_id;
+ u32 max;
+ char params[0];
+} __packed;
+
+enum skl_tkn_dir {
+ SKL_DIR_IN,
+ SKL_DIR_OUT
+};
+
+enum skl_tuple_type {
+ SKL_TYPE_TUPLE,
+ SKL_TYPE_DATA
+};
+
+/* v4 configuration data */
+
+struct skl_dfw_v4_module_pin {
+ u16 module_id;
+ u16 instance_id;
+} __packed;
+
+struct skl_dfw_v4_module_fmt {
+ u32 channels;
+ u32 freq;
+ u32 bit_depth;
+ u32 valid_bit_depth;
+ u32 ch_cfg;
+ u32 interleaving_style;
+ u32 sample_type;
+ u32 ch_map;
+} __packed;
+
+struct skl_dfw_v4_module_caps {
+ u32 set_params:2;
+ u32 rsvd:30;
+ u32 param_id;
+ u32 caps_size;
+ u32 caps[HDA_SST_CFG_MAX];
+} __packed;
+
+struct skl_dfw_v4_pipe {
+ u8 pipe_id;
+ u8 pipe_priority;
+ u16 conn_type:4;
+ u16 rsvd:4;
+ u16 memory_pages:8;
+} __packed;
+
+struct skl_dfw_v4_module {
+ char uuid[SKL_UUID_STR_SZ];
+
+ u16 module_id;
+ u16 instance_id;
+ u32 max_mcps;
+ u32 mem_pages;
+ u32 obs;
+ u32 ibs;
+ u32 vbus_id;
+
+ u32 max_in_queue:8;
+ u32 max_out_queue:8;
+ u32 time_slot:8;
+ u32 core_id:4;
+ u32 rsvd1:4;
+
+ u32 module_type:8;
+ u32 conn_type:4;
+ u32 dev_type:4;
+ u32 hw_conn_type:4;
+ u32 rsvd2:12;
+
+ u32 params_fixup:8;
+ u32 converter:8;
+ u32 input_pin_type:1;
+ u32 output_pin_type:1;
+ u32 is_dynamic_in_pin:1;
+ u32 is_dynamic_out_pin:1;
+ u32 is_loadable:1;
+ u32 rsvd3:11;
+
+ struct skl_dfw_v4_pipe pipe;
+ struct skl_dfw_v4_module_fmt in_fmt[MAX_IN_QUEUE];
+ struct skl_dfw_v4_module_fmt out_fmt[MAX_OUT_QUEUE];
+ struct skl_dfw_v4_module_pin in_pin[MAX_IN_QUEUE];
+ struct skl_dfw_v4_module_pin out_pin[MAX_OUT_QUEUE];
+ struct skl_dfw_v4_module_caps caps;
+} __packed;
+
+#endif
diff --git a/include/uapi/sound/tlv.h b/include/uapi/sound/tlv.h
index be5371f09a62..7d6d65f60a42 100644
--- a/include/uapi/sound/tlv.h
+++ b/include/uapi/sound/tlv.h
@@ -42,6 +42,10 @@
#define SNDRV_CTL_TLVD_LENGTH(...) \
((unsigned int)sizeof((const unsigned int[]) { __VA_ARGS__ }))
+/* Accessor offsets for TLV data items */
+#define SNDRV_CTL_TLVO_TYPE 0
+#define SNDRV_CTL_TLVO_LEN 1
+
#define SNDRV_CTL_TLVD_CONTAINER_ITEM(...) \
SNDRV_CTL_TLVD_ITEM(SNDRV_CTL_TLVT_CONTAINER, __VA_ARGS__)
#define SNDRV_CTL_TLVD_DECLARE_CONTAINER(name, ...) \
@@ -61,6 +65,10 @@
SNDRV_CTL_TLVD_DB_SCALE_ITEM(min, step, mute) \
}
+/* Accessor offsets for min, mute and step items in dB scale type TLV */
+#define SNDRV_CTL_TLVO_DB_SCALE_MIN 2
+#define SNDRV_CTL_TLVO_DB_SCALE_MUTE_AND_STEP 3
+
/* dB scale specified with min/max values instead of step */
#define SNDRV_CTL_TLVD_DB_MINMAX_ITEM(min_dB, max_dB) \
SNDRV_CTL_TLVD_ITEM(SNDRV_CTL_TLVT_DB_MINMAX, (min_dB), (max_dB))
@@ -75,6 +83,10 @@
SNDRV_CTL_TLVD_DB_MINMAX_MUTE_ITEM(min_dB, max_dB) \
}
+/* Accessor offsets for min, max items in db-minmax types of TLV. */
+#define SNDRV_CTL_TLVO_DB_MINMAX_MIN 2
+#define SNDRV_CTL_TLVO_DB_MINMAX_MAX 3
+
/* linear volume between min_dB and max_dB (.01dB unit) */
#define SNDRV_CTL_TLVD_DB_LINEAR_ITEM(min_dB, max_dB) \
SNDRV_CTL_TLVD_ITEM(SNDRV_CTL_TLVT_DB_LINEAR, (min_dB), (max_dB))
@@ -83,6 +95,10 @@
SNDRV_CTL_TLVD_DB_LINEAR_ITEM(min_dB, max_dB) \
}
+/* Accessor offsets for min, max items in db-linear type of TLV. */
+#define SNDRV_CTL_TLVO_DB_LINEAR_MIN 2
+#define SNDRV_CTL_TLVO_DB_LINEAR_MAX 3
+
/* dB range container:
* Items in dB range container must be ordered by their values and by their
* dB values. This implies that larger values must correspond with larger
diff --git a/include/uapi/xen/privcmd.h b/include/uapi/xen/privcmd.h
index 39d3e7b8e993..d2029556083e 100644
--- a/include/uapi/xen/privcmd.h
+++ b/include/uapi/xen/privcmd.h
@@ -89,6 +89,15 @@ struct privcmd_dm_op {
const struct privcmd_dm_op_buf __user *ubufs;
};
+struct privcmd_mmap_resource {
+ domid_t dom;
+ __u32 type;
+ __u32 id;
+ __u32 idx;
+ __u64 num;
+ __u64 addr;
+};
+
/*
* @cmd: IOCTL_PRIVCMD_HYPERCALL
* @arg: &privcmd_hypercall_t
@@ -114,5 +123,7 @@ struct privcmd_dm_op {
_IOC(_IOC_NONE, 'P', 5, sizeof(struct privcmd_dm_op))
#define IOCTL_PRIVCMD_RESTRICT \
_IOC(_IOC_NONE, 'P', 6, sizeof(domid_t))
+#define IOCTL_PRIVCMD_MMAP_RESOURCE \
+ _IOC(_IOC_NONE, 'P', 7, sizeof(struct privcmd_mmap_resource))
#endif /* __LINUX_PUBLIC_PRIVCMD_H__ */
diff --git a/include/video/auo_k190xfb.h b/include/video/auo_k190xfb.h
deleted file mode 100644
index ac329ee1d753..000000000000
--- a/include/video/auo_k190xfb.h
+++ /dev/null
@@ -1,107 +0,0 @@
-/*
- * Definitions for AUO-K190X framebuffer drivers
- *
- * Copyright (C) 2012 Heiko Stuebner <heiko@sntech.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef _LINUX_VIDEO_AUO_K190XFB_H_
-#define _LINUX_VIDEO_AUO_K190XFB_H_
-
-/* Controller standby command needs a param */
-#define AUOK190X_QUIRK_STANDBYPARAM (1 << 0)
-
-/* Controller standby is completely broken */
-#define AUOK190X_QUIRK_STANDBYBROKEN (1 << 1)
-
-/*
- * Resolutions for the displays
- */
-#define AUOK190X_RESOLUTION_800_600 0
-#define AUOK190X_RESOLUTION_1024_768 1
-#define AUOK190X_RESOLUTION_600_800 4
-#define AUOK190X_RESOLUTION_768_1024 5
-
-/*
- * struct used by auok190x. board specific stuff comes from *board
- */
-struct auok190xfb_par {
- struct fb_info *info;
- struct auok190x_board *board;
-
- struct regulator *regulator;
-
- struct mutex io_lock;
- struct delayed_work work;
- wait_queue_head_t waitq;
- int resolution;
- int rotation;
- int consecutive_threshold;
- int update_cnt;
-
- /* panel and controller informations */
- int epd_type;
- int panel_size_int;
- int panel_size_float;
- int panel_model;
- int tcon_version;
- int lut_version;
-
- /* individual controller callbacks */
- void (*update_partial)(struct auok190xfb_par *par, u16 y1, u16 y2);
- void (*update_all)(struct auok190xfb_par *par);
- bool (*need_refresh)(struct auok190xfb_par *par);
- void (*init)(struct auok190xfb_par *par);
- void (*recover)(struct auok190xfb_par *par);
-
- int update_mode; /* mode to use for updates */
- int last_mode; /* update mode last used */
- int flash;
-
- /* power management */
- int autosuspend_delay;
- bool standby;
- bool manual_standby;
-};
-
-/**
- * Board specific platform-data
- * @init: initialize the controller interface
- * @cleanup: cleanup the controller interface
- * @wait_for_rdy: wait until the controller is not busy anymore
- * @set_ctl: change an interface control
- * @set_hdb: write a value to the data register
- * @get_hdb: read a value from the data register
- * @setup_irq: method to setup the irq handling on the busy gpio
- * @gpio_nsleep: sleep gpio
- * @gpio_nrst: reset gpio
- * @gpio_nbusy: busy gpio
- * @resolution: one of the AUOK190X_RESOLUTION constants
- * @rotation: rotation of the framebuffer
- * @quirks: controller quirks to honor
- * @fps: frames per second for defio
- */
-struct auok190x_board {
- int (*init)(struct auok190xfb_par *);
- void (*cleanup)(struct auok190xfb_par *);
- int (*wait_for_rdy)(struct auok190xfb_par *);
-
- void (*set_ctl)(struct auok190xfb_par *, unsigned char, u8);
- void (*set_hdb)(struct auok190xfb_par *, u16);
- u16 (*get_hdb)(struct auok190xfb_par *);
-
- int (*setup_irq)(struct fb_info *);
-
- int gpio_nsleep;
- int gpio_nrst;
- int gpio_nbusy;
-
- int resolution;
- int quirks;
- int fps;
-};
-
-#endif
diff --git a/include/video/omapfb_dss.h b/include/video/omapfb_dss.h
index 1d38901d599d..12755d8d9b4f 100644
--- a/include/video/omapfb_dss.h
+++ b/include/video/omapfb_dss.h
@@ -774,6 +774,12 @@ struct omap_dss_driver {
const struct hdmi_avi_infoframe *avi);
};
+#define for_each_dss_dev(d) while ((d = omap_dss_get_next_device(d)) != NULL)
+
+typedef void (*omap_dispc_isr_t) (void *arg, u32 mask);
+
+#if IS_ENABLED(CONFIG_FB_OMAP2)
+
enum omapdss_version omapdss_get_version(void);
bool omapdss_is_initialized(void);
@@ -785,7 +791,6 @@ void omapdss_unregister_display(struct omap_dss_device *dssdev);
struct omap_dss_device *omap_dss_get_device(struct omap_dss_device *dssdev);
void omap_dss_put_device(struct omap_dss_device *dssdev);
-#define for_each_dss_dev(d) while ((d = omap_dss_get_next_device(d)) != NULL)
struct omap_dss_device *omap_dss_get_next_device(struct omap_dss_device *from);
struct omap_dss_device *omap_dss_find_device(void *data,
int (*match)(struct omap_dss_device *dssdev, void *data));
@@ -826,7 +831,6 @@ int omapdss_default_get_recommended_bpp(struct omap_dss_device *dssdev);
void omapdss_default_get_timings(struct omap_dss_device *dssdev,
struct omap_video_timings *timings);
-typedef void (*omap_dispc_isr_t) (void *arg, u32 mask);
int omap_dispc_register_isr(omap_dispc_isr_t isr, void *arg, u32 mask);
int omap_dispc_unregister_isr(omap_dispc_isr_t isr, void *arg, u32 mask);
@@ -856,5 +860,51 @@ omapdss_of_get_first_endpoint(const struct device_node *parent);
struct omap_dss_device *
omapdss_of_find_source_for_first_ep(struct device_node *node);
+#else
+
+static inline enum omapdss_version omapdss_get_version(void)
+{ return OMAPDSS_VER_UNKNOWN; };
+
+static inline bool omapdss_is_initialized(void)
+{ return false; };
+
+static inline int omap_dispc_register_isr(omap_dispc_isr_t isr,
+ void *arg, u32 mask)
+{ return 0; };
+
+static inline int omap_dispc_unregister_isr(omap_dispc_isr_t isr,
+ void *arg, u32 mask)
+{ return 0; };
+
+static inline struct omap_dss_device
+*omap_dss_get_device(struct omap_dss_device *dssdev)
+{ return NULL; };
+
+static inline struct omap_dss_device
+*omap_dss_get_next_device(struct omap_dss_device *from)
+{return NULL; };
+
+static inline void omap_dss_put_device(struct omap_dss_device *dssdev) {};
+
+static inline int omapdss_compat_init(void)
+{ return 0; };
+
+static inline void omapdss_compat_uninit(void) {};
+
+static inline int omap_dss_get_num_overlay_managers(void)
+{ return 0; };
+
+static inline struct omap_overlay_manager *omap_dss_get_overlay_manager(int num)
+{ return NULL; };
+
+static inline int omap_dss_get_num_overlays(void)
+{ return 0; };
+
+static inline struct omap_overlay *omap_dss_get_overlay(int num)
+{ return NULL; };
+
+
+#endif /* FB_OMAP2 */
+
#endif /* __OMAPFB_DSS_H */
diff --git a/include/video/sh_mobile_lcdc.h b/include/video/sh_mobile_lcdc.h
index f706b0fed399..84aa976ca4ea 100644
--- a/include/video/sh_mobile_lcdc.h
+++ b/include/video/sh_mobile_lcdc.h
@@ -3,7 +3,6 @@
#define __ASM_SH_MOBILE_LCDC_H__
#include <linux/fb.h>
-#include <video/sh_mobile_meram.h>
/* Register definitions */
#define _LDDCKR 0x410
@@ -184,7 +183,6 @@ struct sh_mobile_lcdc_chan_cfg {
struct sh_mobile_lcdc_panel_cfg panel_cfg;
struct sh_mobile_lcdc_bl_info bl_info;
struct sh_mobile_lcdc_sys_bus_cfg sys_bus_cfg; /* only for SYSn I/F */
- const struct sh_mobile_meram_cfg *meram_cfg;
struct platform_device *tx_dev; /* HDMI/DSI transmitter device */
};
@@ -193,7 +191,6 @@ struct sh_mobile_lcdc_info {
int clock_source;
struct sh_mobile_lcdc_chan_cfg ch[2];
struct sh_mobile_lcdc_overlay_cfg overlays[4];
- struct sh_mobile_meram_info *meram_dev;
};
#endif /* __ASM_SH_MOBILE_LCDC_H__ */
diff --git a/include/video/sh_mobile_meram.h b/include/video/sh_mobile_meram.h
deleted file mode 100644
index f4efc21e205d..000000000000
--- a/include/video/sh_mobile_meram.h
+++ /dev/null
@@ -1,95 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __VIDEO_SH_MOBILE_MERAM_H__
-#define __VIDEO_SH_MOBILE_MERAM_H__
-
-/* For sh_mobile_meram_info.addr_mode */
-enum {
- SH_MOBILE_MERAM_MODE0 = 0,
- SH_MOBILE_MERAM_MODE1
-};
-
-enum {
- SH_MOBILE_MERAM_PF_NV = 0,
- SH_MOBILE_MERAM_PF_RGB,
- SH_MOBILE_MERAM_PF_NV24
-};
-
-
-struct sh_mobile_meram_priv;
-
-/*
- * struct sh_mobile_meram_info - MERAM platform data
- * @reserved_icbs: Bitmask of reserved ICBs (for instance used through UIO)
- */
-struct sh_mobile_meram_info {
- int addr_mode;
- u32 reserved_icbs;
- struct sh_mobile_meram_priv *priv;
- struct platform_device *pdev;
-};
-
-/* icb config */
-struct sh_mobile_meram_icb_cfg {
- unsigned int meram_size; /* MERAM Buffer Size to use */
-};
-
-struct sh_mobile_meram_cfg {
- struct sh_mobile_meram_icb_cfg icb[2];
-};
-
-#if defined(CONFIG_FB_SH_MOBILE_MERAM) || \
- defined(CONFIG_FB_SH_MOBILE_MERAM_MODULE)
-unsigned long sh_mobile_meram_alloc(struct sh_mobile_meram_info *meram_dev,
- size_t size);
-void sh_mobile_meram_free(struct sh_mobile_meram_info *meram_dev,
- unsigned long mem, size_t size);
-void *sh_mobile_meram_cache_alloc(struct sh_mobile_meram_info *dev,
- const struct sh_mobile_meram_cfg *cfg,
- unsigned int xres, unsigned int yres,
- unsigned int pixelformat,
- unsigned int *pitch);
-void sh_mobile_meram_cache_free(struct sh_mobile_meram_info *dev, void *data);
-void sh_mobile_meram_cache_update(struct sh_mobile_meram_info *dev, void *data,
- unsigned long base_addr_y,
- unsigned long base_addr_c,
- unsigned long *icb_addr_y,
- unsigned long *icb_addr_c);
-#else
-static inline unsigned long
-sh_mobile_meram_alloc(struct sh_mobile_meram_info *meram_dev, size_t size)
-{
- return 0;
-}
-
-static inline void
-sh_mobile_meram_free(struct sh_mobile_meram_info *meram_dev,
- unsigned long mem, size_t size)
-{
-}
-
-static inline void *
-sh_mobile_meram_cache_alloc(struct sh_mobile_meram_info *dev,
- const struct sh_mobile_meram_cfg *cfg,
- unsigned int xres, unsigned int yres,
- unsigned int pixelformat,
- unsigned int *pitch)
-{
- return ERR_PTR(-ENODEV);
-}
-
-static inline void
-sh_mobile_meram_cache_free(struct sh_mobile_meram_info *dev, void *data)
-{
-}
-
-static inline void
-sh_mobile_meram_cache_update(struct sh_mobile_meram_info *dev, void *data,
- unsigned long base_addr_y,
- unsigned long base_addr_c,
- unsigned long *icb_addr_y,
- unsigned long *icb_addr_c)
-{
-}
-#endif
-
-#endif /* __VIDEO_SH_MOBILE_MERAM_H__ */
diff --git a/include/xen/interface/io/kbdif.h b/include/xen/interface/io/kbdif.h
index 2a9510ade701..e2340a4130cf 100644
--- a/include/xen/interface/io/kbdif.h
+++ b/include/xen/interface/io/kbdif.h
@@ -317,7 +317,7 @@ struct xenkbd_position {
* Linux [2] and Windows [3] multi-touch support.
*
* [1] https://cgit.freedesktop.org/wayland/wayland/tree/protocol/wayland.xml
- * [2] https://www.kernel.org/doc/Documentation/input/multi-touch-protocol.txt
+ * [2] https://www.kernel.org/doc/Documentation/input/multi-touch-protocol.rst
* [3] https://msdn.microsoft.com/en-us/library/jj151564(v=vs.85).aspx
*
*
diff --git a/include/xen/interface/io/sndif.h b/include/xen/interface/io/sndif.h
index 5c918276835e..78bb5d9f8d83 100644
--- a/include/xen/interface/io/sndif.h
+++ b/include/xen/interface/io/sndif.h
@@ -38,6 +38,13 @@
/*
******************************************************************************
+ * Protocol version
+ ******************************************************************************
+ */
+#define XENSND_PROTOCOL_VERSION 2
+
+/*
+ ******************************************************************************
* Feature and Parameter Negotiation
******************************************************************************
*
@@ -106,6 +113,8 @@
*
* /local/domain/1/device/vsnd/0/0/0/ring-ref = "386"
* /local/domain/1/device/vsnd/0/0/0/event-channel = "15"
+ * /local/domain/1/device/vsnd/0/0/0/evt-ring-ref = "1386"
+ * /local/domain/1/device/vsnd/0/0/0/evt-event-channel = "215"
*
*------------------------------ Stream 1, capture ----------------------------
*
@@ -115,6 +124,8 @@
*
* /local/domain/1/device/vsnd/0/0/1/ring-ref = "384"
* /local/domain/1/device/vsnd/0/0/1/event-channel = "13"
+ * /local/domain/1/device/vsnd/0/0/1/evt-ring-ref = "1384"
+ * /local/domain/1/device/vsnd/0/0/1/evt-event-channel = "213"
*
*------------------------------- PCM device 1 --------------------------------
*
@@ -128,6 +139,8 @@
*
* /local/domain/1/device/vsnd/0/1/0/ring-ref = "387"
* /local/domain/1/device/vsnd/0/1/0/event-channel = "151"
+ * /local/domain/1/device/vsnd/0/1/0/evt-ring-ref = "1387"
+ * /local/domain/1/device/vsnd/0/1/0/evt-event-channel = "351"
*
*------------------------------- PCM device 2 --------------------------------
*
@@ -140,6 +153,8 @@
*
* /local/domain/1/device/vsnd/0/2/0/ring-ref = "389"
* /local/domain/1/device/vsnd/0/2/0/event-channel = "152"
+ * /local/domain/1/device/vsnd/0/2/0/evt-ring-ref = "1389"
+ * /local/domain/1/device/vsnd/0/2/0/evt-event-channel = "452"
*
******************************************************************************
* Backend XenBus Nodes
@@ -285,6 +300,23 @@
* The Xen grant reference granting permission for the backend to map
* a sole page in a single page sized ring buffer.
*
+ *--------------------- Stream Event Transport Parameters ---------------------
+ *
+ * This communication path is used to deliver asynchronous events from backend
+ * to frontend, set up per stream.
+ *
+ * evt-event-channel
+ * Values: <uint32_t>
+ *
+ * The identifier of the Xen event channel used to signal activity
+ * in the ring buffer.
+ *
+ * evt-ring-ref
+ * Values: <uint32_t>
+ *
+ * The Xen grant reference granting permission for the backend to map
+ * a sole page in a single page sized ring buffer.
+ *
******************************************************************************
* STATE DIAGRAMS
******************************************************************************
@@ -432,6 +464,20 @@
#define XENSND_OP_GET_VOLUME 5
#define XENSND_OP_MUTE 6
#define XENSND_OP_UNMUTE 7
+#define XENSND_OP_TRIGGER 8
+#define XENSND_OP_HW_PARAM_QUERY 9
+
+#define XENSND_OP_TRIGGER_START 0
+#define XENSND_OP_TRIGGER_PAUSE 1
+#define XENSND_OP_TRIGGER_STOP 2
+#define XENSND_OP_TRIGGER_RESUME 3
+
+/*
+ ******************************************************************************
+ * EVENT CODES
+ ******************************************************************************
+ */
+#define XENSND_EVT_CUR_POS 0
/*
******************************************************************************
@@ -448,6 +494,8 @@
#define XENSND_FIELD_VCARD_LONG_NAME "long-name"
#define XENSND_FIELD_RING_REF "ring-ref"
#define XENSND_FIELD_EVT_CHNL "event-channel"
+#define XENSND_FIELD_EVT_RING_REF "evt-ring-ref"
+#define XENSND_FIELD_EVT_EVT_CHNL "evt-event-channel"
#define XENSND_FIELD_DEVICE_NAME "name"
#define XENSND_FIELD_TYPE "type"
#define XENSND_FIELD_STREAM_UNIQUE_ID "unique-id"
@@ -526,7 +574,7 @@
*
*---------------------------------- Requests ---------------------------------
*
- * All request packets have the same length (32 octets)
+ * All request packets have the same length (64 octets)
* All request packets have common header:
* 0 1 2 3 octet
* +----------------+----------------+----------------+----------------+
@@ -559,11 +607,13 @@
* +----------------+----------------+----------------+----------------+
* | gref_directory | 24
* +----------------+----------------+----------------+----------------+
- * | reserved | 28
+ * | period_sz | 28
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 32
* +----------------+----------------+----------------+----------------+
* |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
* +----------------+----------------+----------------+----------------+
- * | reserved | 32
+ * | reserved | 64
* +----------------+----------------+----------------+----------------+
*
* pcm_rate - uint32_t, stream data rate, Hz
@@ -571,6 +621,14 @@
* pcm_channels - uint8_t, number of channels of this stream,
* [channels-min; channels-max]
* buffer_sz - uint32_t, buffer size to be allocated, octets
+ * period_sz - uint32_t, event period size, octets
+ * This is the requested value of the period at which frontend would
+ * like to receive XENSND_EVT_CUR_POS notifications from the backend when
+ * stream position advances during playback/capture.
+ * It shows how many octets are expected to be played/captured before
+ * sending such an event.
+ * If set to 0 no XENSND_EVT_CUR_POS events are sent by the backend.
+ *
* gref_directory - grant_ref_t, a reference to the first shared page
* describing shared buffer references. At least one page exists. If shared
* buffer size (buffer_sz) exceeds what can be addressed by this single page,
@@ -585,6 +643,7 @@ struct xensnd_open_req {
uint16_t reserved;
uint32_t buffer_sz;
grant_ref_t gref_directory;
+ uint32_t period_sz;
};
/*
@@ -632,7 +691,7 @@ struct xensnd_page_directory {
* +----------------+----------------+----------------+----------------+
* |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
* +----------------+----------------+----------------+----------------+
- * | reserved | 32
+ * | reserved | 64
* +----------------+----------------+----------------+----------------+
*
* Request read/write - used for read (for capture) or write (for playback):
@@ -650,7 +709,7 @@ struct xensnd_page_directory {
* +----------------+----------------+----------------+----------------+
* |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
* +----------------+----------------+----------------+----------------+
- * | reserved | 32
+ * | reserved | 64
* +----------------+----------------+----------------+----------------+
*
* operation - XENSND_OP_READ for read or XENSND_OP_WRITE for write
@@ -673,9 +732,11 @@ struct xensnd_rw_req {
* +----------------+----------------+----------------+----------------+
* | length | 16
* +----------------+----------------+----------------+----------------+
+ * | reserved | 20
+ * +----------------+----------------+----------------+----------------+
* |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
* +----------------+----------------+----------------+----------------+
- * | reserved | 32
+ * | reserved | 64
* +----------------+----------------+----------------+----------------+
*
* operation - XENSND_OP_SET_VOLUME for volume set
@@ -713,9 +774,11 @@ struct xensnd_rw_req {
* +----------------+----------------+----------------+----------------+
* | length | 16
* +----------------+----------------+----------------+----------------+
+ * | reserved | 20
+ * +----------------+----------------+----------------+----------------+
* |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
* +----------------+----------------+----------------+----------------+
- * | reserved | 32
+ * | reserved | 64
* +----------------+----------------+----------------+----------------+
*
* operation - XENSND_OP_MUTE for mute or XENSND_OP_UNMUTE for unmute
@@ -743,32 +806,213 @@ struct xensnd_rw_req {
*
* The 'struct xensnd_rw_req' is also used for XENSND_OP_SET_VOLUME,
* XENSND_OP_GET_VOLUME, XENSND_OP_MUTE, XENSND_OP_UNMUTE.
+ *
+ * Request stream running state change - trigger PCM stream running state
+ * to start, stop, pause or resume:
+ *
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | id | _OP_TRIGGER | reserved | 4
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 8
+ * +----------------+----------------+----------------+----------------+
+ * | type | reserved | 12
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 16
+ * +----------------+----------------+----------------+----------------+
+ * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 64
+ * +----------------+----------------+----------------+----------------+
+ *
+ * type - uint8_t, XENSND_OP_TRIGGER_XXX value
*/
+struct xensnd_trigger_req {
+ uint8_t type;
+};
+
/*
- *---------------------------------- Responses --------------------------------
+ * Request stream parameter ranges: request intervals and
+ * masks of supported ranges for stream configuration values.
*
- * All response packets have the same length (32 octets)
+ * Sound device configuration for a particular stream is a limited subset
+ * of the multidimensional configuration available on XenStore, e.g.
+ * once the frame rate has been selected there is a limited supported range
+ * for sample rates becomes available (which might be the same set configured
+ * on XenStore or less). For example, selecting 96kHz sample rate may limit
+ * number of channels available for such configuration from 4 to 2, etc.
+ * Thus, each call to XENSND_OP_HW_PARAM_QUERY may reduce configuration
+ * space making it possible to iteratively get the final stream configuration,
+ * used in XENSND_OP_OPEN request.
+ *
+ * See response format for this request.
*
- * Response for all requests:
* 0 1 2 3 octet
* +----------------+----------------+----------------+----------------+
- * | id | operation | reserved | 4
+ * | id | _HW_PARAM_QUERY| reserved | 4
* +----------------+----------------+----------------+----------------+
- * | status | 8
+ * | reserved | 8
+ * +----------------+----------------+----------------+----------------+
+ * | formats mask low 32-bit | 12
+ * +----------------+----------------+----------------+----------------+
+ * | formats mask high 32-bit | 16
* +----------------+----------------+----------------+----------------+
- * | reserved | 12
+ * | min rate | 20
+ * +----------------+----------------+----------------+----------------+
+ * | max rate | 24
+ * +----------------+----------------+----------------+----------------+
+ * | min channels | 28
+ * +----------------+----------------+----------------+----------------+
+ * | max channels | 32
+ * +----------------+----------------+----------------+----------------+
+ * | min buffer frames | 36
+ * +----------------+----------------+----------------+----------------+
+ * | max buffer frames | 40
+ * +----------------+----------------+----------------+----------------+
+ * | min period frames | 44
+ * +----------------+----------------+----------------+----------------+
+ * | max period frames | 48
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 52
* +----------------+----------------+----------------+----------------+
* |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
* +----------------+----------------+----------------+----------------+
- * | reserved | 32
+ * | reserved | 64
+ * +----------------+----------------+----------------+----------------+
+ *
+ * formats - uint64_t, bit mask representing values of the parameter
+ * made as bitwise OR of (1 << XENSND_PCM_FORMAT_XXX) values
+ *
+ * For interval parameters:
+ * min - uint32_t, minimum value of the parameter
+ * max - uint32_t, maximum value of the parameter
+ *
+ * Frame is defined as a product of the number of channels by the
+ * number of octets per one sample.
+ */
+
+struct xensnd_query_hw_param {
+ uint64_t formats;
+ struct {
+ uint32_t min;
+ uint32_t max;
+ } rates;
+ struct {
+ uint32_t min;
+ uint32_t max;
+ } channels;
+ struct {
+ uint32_t min;
+ uint32_t max;
+ } buffer;
+ struct {
+ uint32_t min;
+ uint32_t max;
+ } period;
+};
+
+/*
+ *---------------------------------- Responses --------------------------------
+ *
+ * All response packets have the same length (64 octets)
+ *
+ * All response packets have common header:
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | id | operation | reserved | 4
+ * +----------------+----------------+----------------+----------------+
+ * | status | 8
* +----------------+----------------+----------------+----------------+
*
* id - uint16_t, copied from the request
* operation - uint8_t, XENSND_OP_* - copied from request
* status - int32_t, response status, zero on success and -XEN_EXX on failure
+ *
+ *
+ * HW parameter query response - response for XENSND_OP_HW_PARAM_QUERY:
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | id | operation | reserved | 4
+ * +----------------+----------------+----------------+----------------+
+ * | status | 8
+ * +----------------+----------------+----------------+----------------+
+ * | formats mask low 32-bit | 12
+ * +----------------+----------------+----------------+----------------+
+ * | formats mask high 32-bit | 16
+ * +----------------+----------------+----------------+----------------+
+ * | min rate | 20
+ * +----------------+----------------+----------------+----------------+
+ * | max rate | 24
+ * +----------------+----------------+----------------+----------------+
+ * | min channels | 28
+ * +----------------+----------------+----------------+----------------+
+ * | max channels | 32
+ * +----------------+----------------+----------------+----------------+
+ * | min buffer frames | 36
+ * +----------------+----------------+----------------+----------------+
+ * | max buffer frames | 40
+ * +----------------+----------------+----------------+----------------+
+ * | min period frames | 44
+ * +----------------+----------------+----------------+----------------+
+ * | max period frames | 48
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 52
+ * +----------------+----------------+----------------+----------------+
+ * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 64
+ * +----------------+----------------+----------------+----------------+
+ *
+ * Meaning of the values in this response is the same as for
+ * XENSND_OP_HW_PARAM_QUERY request.
+ */
+
+/*
+ *----------------------------------- Events ----------------------------------
+ *
+ * Events are sent via shared page allocated by the front and propagated by
+ * evt-event-channel/evt-ring-ref XenStore entries
+ * All event packets have the same length (64 octets)
+ * All event packets have common header:
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | id | type | reserved | 4
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 8
+ * +----------------+----------------+----------------+----------------+
+ *
+ * id - uint16_t, event id, may be used by front
+ * type - uint8_t, type of the event
+ *
+ *
+ * Current stream position - event from back to front when stream's
+ * playback/capture position has advanced:
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | id | _EVT_CUR_POS | reserved | 4
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 8
+ * +----------------+----------------+----------------+----------------+
+ * | position low 32-bit | 12
+ * +----------------+----------------+----------------+----------------+
+ * | position high 32-bit | 16
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 20
+ * +----------------+----------------+----------------+----------------+
+ * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 64
+ * +----------------+----------------+----------------+----------------+
+ *
+ * position - current value of stream's playback/capture position, octets
+ *
*/
+struct xensnd_cur_pos_evt {
+ uint64_t position;
+};
+
struct xensnd_req {
uint16_t id;
uint8_t operation;
@@ -776,7 +1020,9 @@ struct xensnd_req {
union {
struct xensnd_open_req open;
struct xensnd_rw_req rw;
- uint8_t reserved[24];
+ struct xensnd_trigger_req trigger;
+ struct xensnd_query_hw_param hw_param;
+ uint8_t reserved[56];
} op;
};
@@ -785,9 +1031,53 @@ struct xensnd_resp {
uint8_t operation;
uint8_t reserved;
int32_t status;
- uint8_t reserved1[24];
+ union {
+ struct xensnd_query_hw_param hw_param;
+ uint8_t reserved1[56];
+ } resp;
+};
+
+struct xensnd_evt {
+ uint16_t id;
+ uint8_t type;
+ uint8_t reserved[5];
+ union {
+ struct xensnd_cur_pos_evt cur_pos;
+ uint8_t reserved[56];
+ } op;
};
DEFINE_RING_TYPES(xen_sndif, struct xensnd_req, struct xensnd_resp);
+/*
+ ******************************************************************************
+ * Back to front events delivery
+ ******************************************************************************
+ * In order to deliver asynchronous events from back to front a shared page is
+ * allocated by front and its granted reference propagated to back via
+ * XenStore entries (evt-ring-ref/evt-event-channel).
+ * This page has a common header used by both front and back to synchronize
+ * access and control event's ring buffer, while back being a producer of the
+ * events and front being a consumer. The rest of the page after the header
+ * is used for event packets.
+ *
+ * Upon reception of an event(s) front may confirm its reception
+ * for either each event, group of events or none.
+ */
+
+struct xensnd_event_page {
+ uint32_t in_cons;
+ uint32_t in_prod;
+ uint8_t reserved[56];
+};
+
+#define XENSND_EVENT_PAGE_SIZE XEN_PAGE_SIZE
+#define XENSND_IN_RING_OFFS (sizeof(struct xensnd_event_page))
+#define XENSND_IN_RING_SIZE (XENSND_EVENT_PAGE_SIZE - XENSND_IN_RING_OFFS)
+#define XENSND_IN_RING_LEN (XENSND_IN_RING_SIZE / sizeof(struct xensnd_evt))
+#define XENSND_IN_RING(page) \
+ ((struct xensnd_evt *)((char *)(page) + XENSND_IN_RING_OFFS))
+#define XENSND_IN_RING_REF(page, idx) \
+ (XENSND_IN_RING((page))[(idx) % XENSND_IN_RING_LEN])
+
#endif /* __XEN_PUBLIC_IO_SNDIF_H__ */
diff --git a/include/xen/interface/memory.h b/include/xen/interface/memory.h
index 583dd93b3016..4c5751c26f87 100644
--- a/include/xen/interface/memory.h
+++ b/include/xen/interface/memory.h
@@ -265,4 +265,70 @@ struct xen_remove_from_physmap {
};
DEFINE_GUEST_HANDLE_STRUCT(xen_remove_from_physmap);
+/*
+ * Get the pages for a particular guest resource, so that they can be
+ * mapped directly by a tools domain.
+ */
+#define XENMEM_acquire_resource 28
+struct xen_mem_acquire_resource {
+ /* IN - The domain whose resource is to be mapped */
+ domid_t domid;
+ /* IN - the type of resource */
+ uint16_t type;
+
+#define XENMEM_resource_ioreq_server 0
+#define XENMEM_resource_grant_table 1
+
+ /*
+ * IN - a type-specific resource identifier, which must be zero
+ * unless stated otherwise.
+ *
+ * type == XENMEM_resource_ioreq_server -> id == ioreq server id
+ * type == XENMEM_resource_grant_table -> id defined below
+ */
+ uint32_t id;
+
+#define XENMEM_resource_grant_table_id_shared 0
+#define XENMEM_resource_grant_table_id_status 1
+
+ /* IN/OUT - As an IN parameter number of frames of the resource
+ * to be mapped. However, if the specified value is 0 and
+ * frame_list is NULL then this field will be set to the
+ * maximum value supported by the implementation on return.
+ */
+ uint32_t nr_frames;
+ /*
+ * OUT - Must be zero on entry. On return this may contain a bitwise
+ * OR of the following values.
+ */
+ uint32_t flags;
+
+ /* The resource pages have been assigned to the calling domain */
+#define _XENMEM_rsrc_acq_caller_owned 0
+#define XENMEM_rsrc_acq_caller_owned (1u << _XENMEM_rsrc_acq_caller_owned)
+
+ /*
+ * IN - the index of the initial frame to be mapped. This parameter
+ * is ignored if nr_frames is 0.
+ */
+ uint64_t frame;
+
+#define XENMEM_resource_ioreq_server_frame_bufioreq 0
+#define XENMEM_resource_ioreq_server_frame_ioreq(n) (1 + (n))
+
+ /*
+ * IN/OUT - If the tools domain is PV then, upon return, frame_list
+ * will be populated with the MFNs of the resource.
+ * If the tools domain is HVM then it is expected that, on
+ * entry, frame_list will be populated with a list of GFNs
+ * that will be mapped to the MFNs of the resource.
+ * If -EIO is returned then the frame_list has only been
+ * partially mapped and it is up to the caller to unmap all
+ * the GFNs.
+ * This parameter may be NULL if nr_frames is 0.
+ */
+ GUEST_HANDLE(xen_pfn_t) frame_list;
+};
+DEFINE_GUEST_HANDLE_STRUCT(xen_mem_acquire_resource);
+
#endif /* __XEN_PUBLIC_MEMORY_H__ */
diff --git a/include/xen/interface/xen.h b/include/xen/interface/xen.h
index 4f4830ef8f93..8bfb242f433e 100644
--- a/include/xen/interface/xen.h
+++ b/include/xen/interface/xen.h
@@ -265,9 +265,10 @@
*
* PAT (bit 7 on) --> PWT (bit 3 on) and clear bit 7.
*/
-#define MMU_NORMAL_PT_UPDATE 0 /* checked '*ptr = val'. ptr is MA. */
-#define MMU_MACHPHYS_UPDATE 1 /* ptr = MA of frame to modify entry for */
-#define MMU_PT_UPDATE_PRESERVE_AD 2 /* atomically: *ptr = val | (*ptr&(A|D)) */
+#define MMU_NORMAL_PT_UPDATE 0 /* checked '*ptr = val'. ptr is MA. */
+#define MMU_MACHPHYS_UPDATE 1 /* ptr = MA of frame to modify entry for */
+#define MMU_PT_UPDATE_PRESERVE_AD 2 /* atomically: *ptr = val | (*ptr&(A|D)) */
+#define MMU_PT_UPDATE_NO_TRANSLATE 3 /* checked '*ptr = val'. ptr is MA. */
/*
* MMU EXTENDED OPERATIONS
diff --git a/include/xen/xen-ops.h b/include/xen/xen-ops.h
index fd23e42c6024..fd18c974a619 100644
--- a/include/xen/xen-ops.h
+++ b/include/xen/xen-ops.h
@@ -63,7 +63,7 @@ static inline void xen_destroy_contiguous_region(phys_addr_t pstart,
struct vm_area_struct;
/*
- * xen_remap_domain_gfn_array() - map an array of foreign frames
+ * xen_remap_domain_gfn_array() - map an array of foreign frames by gfn
* @vma: VMA to map the pages into
* @addr: Address at which to map the pages
* @gfn: Array of GFNs to map
@@ -86,6 +86,28 @@ int xen_remap_domain_gfn_array(struct vm_area_struct *vma,
unsigned domid,
struct page **pages);
+/*
+ * xen_remap_domain_mfn_array() - map an array of foreign frames by mfn
+ * @vma: VMA to map the pages into
+ * @addr: Address at which to map the pages
+ * @mfn: Array of MFNs to map
+ * @nr: Number entries in the MFN array
+ * @err_ptr: Returns per-MFN error status.
+ * @prot: page protection mask
+ * @domid: Domain owning the pages
+ * @pages: Array of pages if this domain has an auto-translated physmap
+ *
+ * @mfn and @err_ptr may point to the same buffer, the MFNs will be
+ * overwritten by the error codes after they are mapped.
+ *
+ * Returns the number of successfully mapped frames, or a -ve error
+ * code.
+ */
+int xen_remap_domain_mfn_array(struct vm_area_struct *vma,
+ unsigned long addr, xen_pfn_t *mfn, int nr,
+ int *err_ptr, pgprot_t prot,
+ unsigned int domid, struct page **pages);
+
/* xen_remap_domain_gfn_range() - map a range of foreign frames
* @vma: VMA to map the pages into
* @addr: Address at which to map the pages