summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/net/mediatek-net.txt7
-rw-r--r--Documentation/devicetree/bindings/phy/rockchip-dp-phy.txt18
-rw-r--r--Documentation/devicetree/bindings/phy/rockchip-emmc-phy.txt22
-rw-r--r--Documentation/devicetree/bindings/pinctrl/meson,pinctrl.txt38
-rw-r--r--Documentation/devicetree/bindings/rtc/s3c-rtc.txt7
-rw-r--r--Documentation/input/event-codes.txt4
-rw-r--r--Documentation/kernel-parameters.txt2
-rw-r--r--Documentation/usb/gadget_multi.txt2
-rw-r--r--Documentation/x86/protection-keys.txt27
-rw-r--r--Documentation/x86/x86_64/mm.txt6
-rw-r--r--MAINTAINERS2
-rw-r--r--Makefile5
-rw-r--r--arch/arc/Kconfig1
-rw-r--r--arch/arc/boot/dts/axs10x_mb.dtsi8
-rw-r--r--arch/arc/configs/axs103_defconfig1
-rw-r--r--arch/arc/configs/axs103_smp_defconfig1
-rw-r--r--arch/arc/include/asm/fb.h19
-rw-r--r--arch/arm/boot/dts/am335x-baltos-ir5221.dts5
-rw-r--r--arch/arm/boot/dts/am4372.dtsi2
-rw-r--r--arch/arm/boot/dts/am43x-epos-evm.dts5
-rw-r--r--arch/arm/boot/dts/armada-385-linksys.dtsi2
-rw-r--r--arch/arm/boot/dts/meson8.dtsi57
-rw-r--r--arch/arm/boot/dts/meson8b.dtsi12
-rw-r--r--arch/arm/boot/dts/omap4.dtsi2
-rw-r--r--arch/arm/configs/u8500_defconfig3
-rw-r--r--arch/arm/include/asm/cputype.h2
-rw-r--r--arch/arm/kernel/setup.c2
-rw-r--r--arch/arm/kvm/arm.c13
-rw-r--r--arch/arm/mach-omap2/id.c4
-rw-r--r--arch/arm/mach-omap2/io.c1
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.c8
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_81xx_data.c2
-rw-r--r--arch/arm/mach-omap2/soc.h1
-rw-r--r--arch/arm/mach-pxa/devices.c1
-rw-r--r--arch/arm/mach-sa1100/Kconfig10
-rw-r--r--arch/arm/mach-uniphier/platsmp.c2
-rw-r--r--arch/arm/mm/dma-mapping.c3
-rw-r--r--arch/arm64/boot/dts/broadcom/vulcan.dtsi15
-rw-r--r--arch/arm64/include/asm/kvm_arm.h6
-rw-r--r--arch/arm64/include/asm/kvm_asm.h2
-rw-r--r--arch/arm64/include/asm/kvm_host.h7
-rw-r--r--arch/arm64/kernel/head.S13
-rw-r--r--arch/arm64/kernel/smp_spin_table.c11
-rw-r--r--arch/arm64/kvm/hyp/s2-setup.c39
-rw-r--r--arch/m68k/coldfire/gpio.c8
-rw-r--r--arch/m68k/configs/amiga_defconfig13
-rw-r--r--arch/m68k/configs/apollo_defconfig13
-rw-r--r--arch/m68k/configs/atari_defconfig13
-rw-r--r--arch/m68k/configs/bvme6000_defconfig13
-rw-r--r--arch/m68k/configs/hp300_defconfig13
-rw-r--r--arch/m68k/configs/mac_defconfig13
-rw-r--r--arch/m68k/configs/multi_defconfig13
-rw-r--r--arch/m68k/configs/mvme147_defconfig13
-rw-r--r--arch/m68k/configs/mvme16x_defconfig13
-rw-r--r--arch/m68k/configs/q40_defconfig13
-rw-r--r--arch/m68k/configs/sun3_defconfig13
-rw-r--r--arch/m68k/configs/sun3x_defconfig13
-rw-r--r--arch/m68k/include/asm/unistd.h2
-rw-r--r--arch/m68k/include/uapi/asm/unistd.h2
-rw-r--r--arch/m68k/kernel/syscalltable.S2
-rw-r--r--arch/parisc/Kconfig4
-rw-r--r--arch/parisc/Kconfig.debug4
-rw-r--r--arch/parisc/Makefile4
-rw-r--r--arch/parisc/include/asm/ftrace.h18
-rw-r--r--arch/parisc/kernel/Makefile4
-rw-r--r--arch/parisc/kernel/entry.S93
-rw-r--r--arch/parisc/kernel/ftrace.c146
-rw-r--r--arch/parisc/kernel/head.S9
-rw-r--r--arch/powerpc/include/uapi/asm/cputable.h1
-rw-r--r--arch/powerpc/kernel/prom.c26
-rw-r--r--arch/s390/Kconfig3
-rw-r--r--arch/s390/include/asm/pci.h3
-rw-r--r--arch/s390/include/asm/seccomp.h2
-rw-r--r--arch/s390/lib/spinlock.c1
-rw-r--r--arch/sh/include/asm/smp.h5
-rw-r--r--arch/sh/include/asm/topology.h2
-rw-r--r--arch/sh/kernel/cpu/sh4a/smp-shx3.c2
-rw-r--r--arch/sh/kernel/topology.c4
-rw-r--r--arch/x86/boot/compressed/Makefile14
-rw-r--r--arch/x86/boot/compressed/head_32.S28
-rw-r--r--arch/x86/boot/compressed/head_64.S8
-rw-r--r--arch/x86/crypto/sha-mb/sha1_mb.c4
-rw-r--r--arch/x86/include/asm/hugetlb.h1
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce-genpool.c4
-rw-r--r--arch/x86/kernel/cpu/mshyperv.c12
-rw-r--r--arch/x86/kernel/setup.c37
-rw-r--r--arch/x86/kvm/cpuid.c1
-rw-r--r--arch/x86/kvm/mmu.h9
-rw-r--r--arch/x86/kvm/paging_tmpl.h2
-rw-r--r--arch/x86/kvm/x86.c10
-rw-r--r--block/partition-generic.c13
-rw-r--r--crypto/rsa-pkcs1pad.c12
-rw-r--r--drivers/acpi/nfit.c5
-rw-r--r--drivers/bcma/main.c17
-rw-r--r--drivers/block/loop.c6
-rw-r--r--drivers/bus/mvebu-mbus.c2
-rw-r--r--drivers/bus/uniphier-system-bus.c2
-rw-r--r--drivers/char/hw_random/bcm63xx-rng.c1
-rw-r--r--drivers/clocksource/tango_xtal.c2
-rw-r--r--drivers/cpufreq/cpufreq.c3
-rw-r--r--drivers/cpufreq/intel_pstate.c4
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes-cmac.c3
-rw-r--r--drivers/crypto/ccp/ccp-crypto-sha.c3
-rw-r--r--drivers/dma/dw/core.c34
-rw-r--r--drivers/dma/edma.c63
-rw-r--r--drivers/dma/hsu/hsu.c13
-rw-r--r--drivers/dma/hsu/hsu.h3
-rw-r--r--drivers/dma/omap-dma.c26
-rw-r--r--drivers/dma/xilinx/xilinx_vdma.c2
-rw-r--r--drivers/edac/sb_edac.c30
-rw-r--r--drivers/extcon/extcon-palmas.c3
-rw-r--r--drivers/firmware/efi/arm-init.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/tonga_ih.c2
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c9
-rw-r--r--drivers/gpu/drm/drm_edid.c10
-rw-r--r--drivers/gpu/drm/exynos/Kconfig2
-rw-r--r--drivers/gpu/drm/exynos/Makefile6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_core.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c11
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c11
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.h23
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_mic.c3
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_plane.c12
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c4
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h5
-rw-r--r--drivers/gpu/drm/i915/i915_gem_userptr.c29
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c4
-rw-r--r--drivers/gpu/drm/i915/intel_dp_mst.c6
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c16
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c5
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c42
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c18
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c13
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.h2
-rw-r--r--drivers/gpu/drm/radeon/ni_reg.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_atpx_handler.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c14
-rw-r--r--drivers/gpu/drm/radeon/radeon_dp_mst.c31
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c2
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c1
-rw-r--r--drivers/hid/hid-core.c3
-rw-r--r--drivers/hid/hid-ids.h3
-rw-r--r--drivers/hid/hid-lenovo.c16
-rw-r--r--drivers/hid/hid-microsoft.c6
-rw-r--r--drivers/hid/hid-multitouch.c1
-rw-r--r--drivers/hid/hid-wiimote-modules.c14
-rw-r--r--drivers/hid/usbhid/hid-core.c73
-rw-r--r--drivers/hid/wacom_sys.c102
-rw-r--r--drivers/hid/wacom_wac.c11
-rw-r--r--drivers/hid/wacom_wac.h8
-rw-r--r--drivers/input/joystick/xpad.c2
-rw-r--r--drivers/input/misc/arizona-haptics.c1
-rw-r--r--drivers/input/misc/pmic8xxx-pwrkey.c7
-rw-r--r--drivers/input/misc/twl4030-vibra.c1
-rw-r--r--drivers/input/misc/twl6040-vibra.c8
-rw-r--r--drivers/input/tablet/gtco.c10
-rw-r--r--drivers/iommu/amd_iommu.c87
-rw-r--r--drivers/iommu/arm-smmu.c22
-rw-r--r--drivers/irqchip/irq-mips-gic.c4
-rw-r--r--drivers/isdn/mISDN/socket.c3
-rw-r--r--drivers/lguest/interrupts_and_traps.c6
-rw-r--r--drivers/lguest/lg.h1
-rw-r--r--drivers/lguest/x86/core.c6
-rw-r--r--drivers/mailbox/mailbox-test.c16
-rw-r--r--drivers/mailbox/mailbox-xgene-slimpro.c4
-rw-r--r--drivers/mailbox/mailbox.c4
-rw-r--r--drivers/md/dm-cache-metadata.c64
-rw-r--r--drivers/md/dm.c4
-rw-r--r--drivers/misc/lkdtm.c11
-rw-r--r--drivers/mmc/card/block.c18
-rw-r--r--drivers/mmc/host/sdhci-tegra.c10
-rw-r--r--drivers/mtd/nand/nand_base.c10
-rw-r--r--drivers/net/Kconfig1
-rw-r--r--drivers/net/dsa/mv88e6xxx.c34
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl2.c2
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c5
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.h6
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c6
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c5
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c20
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c43
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pf.c30
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c49
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h10
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c49
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_port.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c76
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c13
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c157
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c3
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c16
-rw-r--r--drivers/net/ethernet/ti/cpsw.c4
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c3
-rw-r--r--drivers/net/phy/spi_ks8995.c2
-rw-r--r--drivers/net/usb/cdc_mbim.c9
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c12
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h4
-rw-r--r--drivers/net/vrf.c177
-rw-r--r--drivers/net/wireless/broadcom/b43/main.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c6
-rw-r--r--drivers/nvdimm/Kconfig13
-rw-r--r--drivers/nvdimm/Makefile1
-rw-r--r--drivers/nvdimm/blk.c208
-rw-r--r--drivers/nvdimm/btt.c20
-rw-r--r--drivers/nvdimm/btt_devs.c24
-rw-r--r--drivers/nvdimm/bus.c4
-rw-r--r--drivers/nvdimm/claim.c63
-rw-r--r--drivers/nvdimm/dax_devs.c99
-rw-r--r--drivers/nvdimm/namespace_devs.c38
-rw-r--r--drivers/nvdimm/nd-core.h1
-rw-r--r--drivers/nvdimm/nd.h72
-rw-r--r--drivers/nvdimm/pfn.h4
-rw-r--r--drivers/nvdimm/pfn_devs.c319
-rw-r--r--drivers/nvdimm/pmem.c499
-rw-r--r--drivers/nvdimm/region.c2
-rw-r--r--drivers/nvdimm/region_devs.c29
-rw-r--r--drivers/nvme/host/pci.c31
-rw-r--r--drivers/pci/access.c42
-rw-r--r--drivers/pci/host/pci-imx6.c20
-rw-r--r--drivers/pci/pci-sysfs.c2
-rw-r--r--drivers/pci/pci.h1
-rw-r--r--drivers/perf/arm_pmu.c15
-rw-r--r--drivers/phy/phy-rockchip-dp.c7
-rw-r--r--drivers/phy/phy-rockchip-emmc.c5
-rw-r--r--drivers/pinctrl/freescale/Kconfig1
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-common.c9
-rw-r--r--drivers/pinctrl/pinctrl-single.c6
-rw-r--r--drivers/platform/x86/hp_accel.c6
-rw-r--r--drivers/platform/x86/intel-hid.c2
-rw-r--r--drivers/platform/x86/intel_pmc_ipc.c48
-rw-r--r--drivers/platform/x86/intel_punit_ipc.c48
-rw-r--r--drivers/platform/x86/intel_telemetry_pltdrv.c2
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c4
-rw-r--r--drivers/pwm/pwm-fsl-ftm.c2
-rw-r--r--drivers/rtc/rtc-ds1307.c6
-rw-r--r--drivers/s390/block/dcssblk.c5
-rw-r--r--drivers/s390/block/scm_blk.c2
-rw-r--r--drivers/thermal/Kconfig2
-rw-r--r--drivers/thermal/mtk_thermal.c3
-rw-r--r--drivers/thermal/of-thermal.c4
-rw-r--r--drivers/thermal/power_allocator.c2
-rw-r--r--drivers/thermal/thermal_core.c8
-rw-r--r--drivers/tty/pty.c63
-rw-r--r--drivers/tty/serial/8250/8250_port.c11
-rw-r--r--drivers/tty/serial/8250/Kconfig1
-rw-r--r--drivers/tty/serial/uartlite.c8
-rw-r--r--drivers/usb/class/cdc-acm.c4
-rw-r--r--drivers/usb/core/hcd-pci.c9
-rw-r--r--drivers/usb/dwc3/core.c23
-rw-r--r--drivers/usb/dwc3/debugfs.c13
-rw-r--r--drivers/usb/dwc3/dwc3-omap.c12
-rw-r--r--drivers/usb/dwc3/gadget.c6
-rw-r--r--drivers/usb/gadget/composite.c2
-rw-r--r--drivers/usb/gadget/function/f_fs.c5
-rw-r--r--drivers/usb/host/xhci-mem.c6
-rw-r--r--drivers/usb/host/xhci-pci.c5
-rw-r--r--drivers/usb/host/xhci-plat.c13
-rw-r--r--drivers/usb/host/xhci-plat.h2
-rw-r--r--drivers/usb/host/xhci-ring.c3
-rw-r--r--drivers/usb/host/xhci.c24
-rw-r--r--drivers/usb/host/xhci.h2
-rw-r--r--drivers/usb/storage/uas.c21
-rw-r--r--drivers/usb/storage/unusual_uas.h7
-rw-r--r--drivers/usb/storage/usb.c5
-rw-r--r--drivers/video/fbdev/amba-clcd.c15
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/panel-sharp-ls037v7dw01.c12
-rw-r--r--fs/crypto/crypto.c51
-rw-r--r--fs/debugfs/inode.c2
-rw-r--r--fs/devpts/inode.c49
-rw-r--r--fs/ext4/crypto.c4
-rw-r--r--fs/f2fs/data.c16
-rw-r--r--fs/f2fs/file.c10
-rw-r--r--fs/seq_file.c7
-rw-r--r--include/asm-generic/futex.h8
-rw-r--r--include/drm/drm_cache.h2
-rw-r--r--include/linux/devpts_fs.h34
-rw-r--r--include/linux/fscrypto.h9
-rw-r--r--include/linux/mlx4/device.h7
-rw-r--r--include/linux/mm.h64
-rw-r--r--include/linux/nd.h11
-rw-r--r--include/linux/pci.h1
-rw-r--r--include/linux/pmem.h22
-rw-r--r--include/linux/rculist_nulls.h39
-rw-r--r--include/linux/seq_file.h13
-rw-r--r--include/linux/thermal.h4
-rw-r--r--include/linux/usb_usual.h2
-rw-r--r--include/net/cls_cgroup.h7
-rw-r--r--include/net/ip6_route.h3
-rw-r--r--include/net/ipv6.h2
-rw-r--r--include/net/route.h3
-rw-r--r--include/net/sctp/structs.h8
-rw-r--r--include/net/sock.h6
-rw-r--r--include/net/tcp.h2
-rw-r--r--include/sound/hda_regmap.h2
-rw-r--r--include/uapi/asm-generic/unistd.h6
-rw-r--r--include/uapi/linux/Kbuild1
-rw-r--r--include/uapi/linux/ndctl.h2
-rw-r--r--kernel/bpf/verifier.c1
-rw-r--r--kernel/cpu.c33
-rw-r--r--kernel/futex.c27
-rw-r--r--kernel/irq/ipi.c1
-rw-r--r--kernel/locking/lockdep.c2
-rw-r--r--kernel/locking/qspinlock_stat.h8
-rw-r--r--kernel/resource.c13
-rw-r--r--lib/assoc_array.c4
-rw-r--r--lib/lz4/lz4defs.h25
-rw-r--r--mm/backing-dev.c4
-rw-r--r--mm/gup.c52
-rw-r--r--mm/nommu.c44
-rw-r--r--net/bridge/netfilter/ebtables.c6
-rw-r--r--net/core/skbuff.c7
-rw-r--r--net/decnet/dn_route.c9
-rw-r--r--net/ipv4/netfilter/arptable_filter.c6
-rw-r--r--net/ipv4/route.c19
-rw-r--r--net/ipv4/tcp_input.c4
-rw-r--r--net/ipv4/tcp_output.c16
-rw-r--r--net/ipv4/udp.c9
-rw-r--r--net/ipv6/addrconf.c22
-rw-r--r--net/ipv6/datagram.c169
-rw-r--r--net/ipv6/route.c19
-rw-r--r--net/ipv6/udp.c1
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c4
-rw-r--r--net/netlink/af_netlink.c2
-rw-r--r--net/openvswitch/actions.c4
-rw-r--r--net/openvswitch/conntrack.c1
-rw-r--r--net/packet/af_packet.c1
-rw-r--r--net/rds/cong.c4
-rw-r--r--net/rds/ib_cm.c2
-rw-r--r--net/sched/sch_generic.c5
-rw-r--r--net/sctp/outqueue.c15
-rw-r--r--net/sctp/sm_make_chunk.c3
-rw-r--r--net/sctp/sm_sideeffect.c36
-rw-r--r--net/sctp/transport.c19
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_crypto.c8
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_mech.c3
-rw-r--r--net/tipc/core.c1
-rw-r--r--net/tipc/core.h3
-rw-r--r--net/tipc/name_distr.c35
-rw-r--r--net/vmw_vsock/vmci_transport.c7
-rw-r--r--net/wireless/nl80211.c2
-rw-r--r--scripts/asn1_compiler.c2
-rw-r--r--sound/hda/hdac_device.c10
-rw-r--r--sound/hda/hdac_i915.c15
-rw-r--r--sound/hda/hdac_regmap.c40
-rw-r--r--sound/isa/sscape.c2
-rw-r--r--sound/pci/hda/hda_generic.c6
-rw-r--r--sound/pci/hda/hda_intel.c3
-rw-r--r--sound/pci/hda/patch_cirrus.c14
-rw-r--r--sound/pci/hda/patch_hdmi.c11
-rw-r--r--sound/pci/hda/patch_realtek.c11
-rw-r--r--sound/pci/pcxhr/pcxhr_core.c1
-rw-r--r--sound/usb/mixer_maps.c14
-rw-r--r--sound/usb/quirks.c2
-rw-r--r--tools/objtool/Documentation/stack-validation.txt38
-rw-r--r--tools/objtool/builtin-check.c97
-rw-r--r--tools/perf/util/intel-pt.c2
-rw-r--r--tools/testing/nvdimm/Kbuild2
-rw-r--r--tools/testing/nvdimm/test/iomap.c27
-rw-r--r--tools/testing/selftests/net/.gitignore1
-rw-r--r--tools/testing/selftests/net/Makefile2
-rw-r--r--tools/testing/selftests/net/reuseport_dualstack.c208
-rw-r--r--virt/kvm/arm/arch_timer.c49
-rw-r--r--virt/kvm/arm/pmu.c3
386 files changed, 4070 insertions, 2479 deletions
diff --git a/Documentation/devicetree/bindings/net/mediatek-net.txt b/Documentation/devicetree/bindings/net/mediatek-net.txt
index 5ca79290eabf..32eaaca04d9b 100644
--- a/Documentation/devicetree/bindings/net/mediatek-net.txt
+++ b/Documentation/devicetree/bindings/net/mediatek-net.txt
@@ -9,7 +9,8 @@ have dual GMAC each represented by a child node..
Required properties:
- compatible: Should be "mediatek,mt7623-eth"
- reg: Address and length of the register set for the device
-- interrupts: Should contain the frame engines interrupt
+- interrupts: Should contain the three frame engines interrupts in numeric
+ order. These are fe_int0, fe_int1 and fe_int2.
- clocks: the clock used by the core
- clock-names: the names of the clock listed in the clocks property. These are
"ethif", "esw", "gp2", "gp1"
@@ -42,7 +43,9 @@ eth: ethernet@1b100000 {
<&ethsys CLK_ETHSYS_GP2>,
<&ethsys CLK_ETHSYS_GP1>;
clock-names = "ethif", "esw", "gp2", "gp1";
- interrupts = <GIC_SPI 200 IRQ_TYPE_LEVEL_LOW>;
+ interrupts = <GIC_SPI 200 IRQ_TYPE_LEVEL_LOW
+ GIC_SPI 199 IRQ_TYPE_LEVEL_LOW
+ GIC_SPI 198 IRQ_TYPE_LEVEL_LOW>;
power-domains = <&scpsys MT2701_POWER_DOMAIN_ETH>;
resets = <&ethsys MT2701_ETHSYS_ETH_RST>;
reset-names = "eth";
diff --git a/Documentation/devicetree/bindings/phy/rockchip-dp-phy.txt b/Documentation/devicetree/bindings/phy/rockchip-dp-phy.txt
index 50c4f9b00adf..e3b4809fbe82 100644
--- a/Documentation/devicetree/bindings/phy/rockchip-dp-phy.txt
+++ b/Documentation/devicetree/bindings/phy/rockchip-dp-phy.txt
@@ -8,15 +8,19 @@ Required properties:
of memory mapped region.
- clock-names: from common clock binding:
Required elements: "24m"
-- rockchip,grf: phandle to the syscon managing the "general register files"
- #phy-cells : from the generic PHY bindings, must be 0;
Example:
-edp_phy: edp-phy {
- compatible = "rockchip,rk3288-dp-phy";
- rockchip,grf = <&grf>;
- clocks = <&cru SCLK_EDP_24M>;
- clock-names = "24m";
- #phy-cells = <0>;
+grf: syscon@ff770000 {
+ compatible = "rockchip,rk3288-grf", "syscon", "simple-mfd";
+
+...
+
+ edp_phy: edp-phy {
+ compatible = "rockchip,rk3288-dp-phy";
+ clocks = <&cru SCLK_EDP_24M>;
+ clock-names = "24m";
+ #phy-cells = <0>;
+ };
};
diff --git a/Documentation/devicetree/bindings/phy/rockchip-emmc-phy.txt b/Documentation/devicetree/bindings/phy/rockchip-emmc-phy.txt
index 61916f15a949..555cb0f40690 100644
--- a/Documentation/devicetree/bindings/phy/rockchip-emmc-phy.txt
+++ b/Documentation/devicetree/bindings/phy/rockchip-emmc-phy.txt
@@ -3,17 +3,23 @@ Rockchip EMMC PHY
Required properties:
- compatible: rockchip,rk3399-emmc-phy
- - rockchip,grf : phandle to the syscon managing the "general
- register files"
- #phy-cells: must be 0
- - reg: PHY configure reg address offset in "general
+ - reg: PHY register address offset and length in "general
register files"
Example:
-emmcphy: phy {
- compatible = "rockchip,rk3399-emmc-phy";
- rockchip,grf = <&grf>;
- reg = <0xf780>;
- #phy-cells = <0>;
+
+grf: syscon@ff770000 {
+ compatible = "rockchip,rk3399-grf", "syscon", "simple-mfd";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+...
+
+ emmcphy: phy@f780 {
+ compatible = "rockchip,rk3399-emmc-phy";
+ reg = <0xf780 0x20>;
+ #phy-cells = <0>;
+ };
};
diff --git a/Documentation/devicetree/bindings/pinctrl/meson,pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/meson,pinctrl.txt
index 3f6a524cc5ff..32f4a2d6d0b3 100644
--- a/Documentation/devicetree/bindings/pinctrl/meson,pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/meson,pinctrl.txt
@@ -1,13 +1,16 @@
== Amlogic Meson pinmux controller ==
Required properties for the root node:
- - compatible: "amlogic,meson8-pinctrl" or "amlogic,meson8b-pinctrl"
+ - compatible: one of "amlogic,meson8-cbus-pinctrl"
+ "amlogic,meson8b-cbus-pinctrl"
+ "amlogic,meson8-aobus-pinctrl"
+ "amlogic,meson8b-aobus-pinctrl"
- reg: address and size of registers controlling irq functionality
=== GPIO sub-nodes ===
-The 2 power domains of the controller (regular and always-on) are
-represented as sub-nodes and each of them acts as a GPIO controller.
+The GPIO bank for the controller is represented as a sub-node and it acts as a
+GPIO controller.
Required properties for sub-nodes are:
- reg: should contain address and size for mux, pull-enable, pull and
@@ -18,10 +21,6 @@ Required properties for sub-nodes are:
- gpio-controller: identifies the node as a gpio controller
- #gpio-cells: must be 2
-Valid sub-node names are:
- - "banks" for the regular domain
- - "ao-bank" for the always-on domain
-
=== Other sub-nodes ===
Child nodes without the "gpio-controller" represent some desired
@@ -45,7 +44,7 @@ pinctrl-bindings.txt
=== Example ===
pinctrl: pinctrl@c1109880 {
- compatible = "amlogic,meson8-pinctrl";
+ compatible = "amlogic,meson8-cbus-pinctrl";
reg = <0xc1109880 0x10>;
#address-cells = <1>;
#size-cells = <1>;
@@ -61,15 +60,6 @@ pinctrl-bindings.txt
#gpio-cells = <2>;
};
- gpio_ao: ao-bank@c1108030 {
- reg = <0xc8100014 0x4>,
- <0xc810002c 0x4>,
- <0xc8100024 0x8>;
- reg-names = "mux", "pull", "gpio";
- gpio-controller;
- #gpio-cells = <2>;
- };
-
nand {
mux {
groups = "nand_io", "nand_io_ce0", "nand_io_ce1",
@@ -79,18 +69,4 @@ pinctrl-bindings.txt
function = "nand";
};
};
-
- uart_ao_a {
- mux {
- groups = "uart_tx_ao_a", "uart_rx_ao_a",
- "uart_cts_ao_a", "uart_rts_ao_a";
- function = "uart_ao";
- };
-
- conf {
- pins = "GPIOAO_0", "GPIOAO_1",
- "GPIOAO_2", "GPIOAO_3";
- bias-disable;
- };
- };
};
diff --git a/Documentation/devicetree/bindings/rtc/s3c-rtc.txt b/Documentation/devicetree/bindings/rtc/s3c-rtc.txt
index 1068ffce9f91..fdde63a5419c 100644
--- a/Documentation/devicetree/bindings/rtc/s3c-rtc.txt
+++ b/Documentation/devicetree/bindings/rtc/s3c-rtc.txt
@@ -15,9 +15,10 @@ Required properties:
is the rtc tick interrupt. The number of cells representing a interrupt
depends on the parent interrupt controller.
- clocks: Must contain a list of phandle and clock specifier for the rtc
- and source clocks.
-- clock-names: Must contain "rtc" and "rtc_src" entries sorted in the
- same order as the clocks property.
+ clock and in the case of a s3c6410 compatible controller, also
+ a source clock.
+- clock-names: Must contain "rtc" and for a s3c6410 compatible controller,
+ a "rtc_src" sorted in the same order as the clocks property.
Example:
diff --git a/Documentation/input/event-codes.txt b/Documentation/input/event-codes.txt
index 3f0f5ce3338b..36ea940e5bb9 100644
--- a/Documentation/input/event-codes.txt
+++ b/Documentation/input/event-codes.txt
@@ -173,6 +173,10 @@ A few EV_ABS codes have special meanings:
proximity of the device and while the value of the BTN_TOUCH code is 0. If
the input device may be used freely in three dimensions, consider ABS_Z
instead.
+ - BTN_TOOL_<name> should be set to 1 when the tool comes into detectable
+ proximity and set to 0 when the tool leaves detectable proximity.
+ BTN_TOOL_<name> signals the type of tool that is currently detected by the
+ hardware and is otherwise independent of ABS_DISTANCE and/or BTN_TOUCH.
* ABS_MT_<name>:
- Used to describe multitouch input events. Please see
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index ecc74fa4bfde..0b3de80ec8f6 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -4077,6 +4077,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
sector if the number is odd);
i = IGNORE_DEVICE (don't bind to this
device);
+ j = NO_REPORT_LUNS (don't use report luns
+ command, uas only);
l = NOT_LOCKABLE (don't try to lock and
unlock ejectable media);
m = MAX_SECTORS_64 (don't transfer more
diff --git a/Documentation/usb/gadget_multi.txt b/Documentation/usb/gadget_multi.txt
index 7d66a8636cb5..5faf514047e9 100644
--- a/Documentation/usb/gadget_multi.txt
+++ b/Documentation/usb/gadget_multi.txt
@@ -43,7 +43,7 @@ For the gadget two work under Windows two conditions have to be met:
First of all, Windows need to detect the gadget as an USB composite
gadget which on its own have some conditions[4]. If they are met,
Windows lets USB Generic Parent Driver[5] handle the device which then
-tries to much drivers for each individual interface (sort of, don't
+tries to match drivers for each individual interface (sort of, don't
get into too many details).
The good news is: you do not have to worry about most of the
diff --git a/Documentation/x86/protection-keys.txt b/Documentation/x86/protection-keys.txt
new file mode 100644
index 000000000000..c281ded1ba16
--- /dev/null
+++ b/Documentation/x86/protection-keys.txt
@@ -0,0 +1,27 @@
+Memory Protection Keys for Userspace (PKU aka PKEYs) is a CPU feature
+which will be found on future Intel CPUs.
+
+Memory Protection Keys provides a mechanism for enforcing page-based
+protections, but without requiring modification of the page tables
+when an application changes protection domains. It works by
+dedicating 4 previously ignored bits in each page table entry to a
+"protection key", giving 16 possible keys.
+
+There is also a new user-accessible register (PKRU) with two separate
+bits (Access Disable and Write Disable) for each key. Being a CPU
+register, PKRU is inherently thread-local, potentially giving each
+thread a different set of protections from every other thread.
+
+There are two new instructions (RDPKRU/WRPKRU) for reading and writing
+to the new register. The feature is only available in 64-bit mode,
+even though there is theoretically space in the PAE PTEs. These
+permissions are enforced on data access only and have no effect on
+instruction fetches.
+
+=========================== Config Option ===========================
+
+This config option adds approximately 1.5kb of text. and 50 bytes of
+data to the executable. A workload which does large O_DIRECT reads
+of holes in XFS files was run to exercise get_user_pages_fast(). No
+performance delta was observed with the config option
+enabled or disabled.
diff --git a/Documentation/x86/x86_64/mm.txt b/Documentation/x86/x86_64/mm.txt
index c518dce7da4d..5aa738346062 100644
--- a/Documentation/x86/x86_64/mm.txt
+++ b/Documentation/x86/x86_64/mm.txt
@@ -19,7 +19,7 @@ ffffff0000000000 - ffffff7fffffffff (=39 bits) %esp fixup stacks
ffffffef00000000 - ffffffff00000000 (=64 GB) EFI region mapping space
... unused hole ...
ffffffff80000000 - ffffffffa0000000 (=512 MB) kernel text mapping, from phys 0
-ffffffffa0000000 - ffffffffff5fffff (=1525 MB) module mapping space
+ffffffffa0000000 - ffffffffff5fffff (=1526 MB) module mapping space
ffffffffff600000 - ffffffffffdfffff (=8 MB) vsyscalls
ffffffffffe00000 - ffffffffffffffff (=2 MB) unused hole
@@ -31,8 +31,8 @@ vmalloc space is lazily synchronized into the different PML4 pages of
the processes using the page fault handler, with init_level4_pgt as
reference.
-Current X86-64 implementations only support 40 bits of address space,
-but we support up to 46 bits. This expands into MBZ space in the page tables.
+Current X86-64 implementations support up to 46 bits of address space (64 TB),
+which is our current limit. This expands into MBZ space in the page tables.
We map EFI runtime services in the 'efi_pgd' PGD in a 64Gb large virtual
memory window (this size is arbitrary, it can be raised later if needed).
diff --git a/MAINTAINERS b/MAINTAINERS
index 61a323a6b2cf..1d5b4becab6f 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -6252,8 +6252,8 @@ S: Maintained
F: tools/testing/selftests
KERNEL VIRTUAL MACHINE (KVM)
-M: Gleb Natapov <gleb@kernel.org>
M: Paolo Bonzini <pbonzini@redhat.com>
+M: Radim Krčmář <rkrcmar@redhat.com>
L: kvm@vger.kernel.org
W: http://www.linux-kvm.org
T: git git://git.kernel.org/pub/scm/virt/kvm/kvm.git
diff --git a/Makefile b/Makefile
index 1d0aef03eae7..9496df81b9a8 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 4
PATCHLEVEL = 6
SUBLEVEL = 0
-EXTRAVERSION = -rc3
+EXTRAVERSION = -rc5
NAME = Blurry Fish Butt
# *DOCUMENTATION*
@@ -1008,7 +1008,8 @@ prepare0: archprepare FORCE
prepare: prepare0 prepare-objtool
ifdef CONFIG_STACK_VALIDATION
- has_libelf := $(shell echo "int main() {}" | $(HOSTCC) -xc -o /dev/null -lelf - &> /dev/null && echo 1 || echo 0)
+ has_libelf := $(call try-run,\
+ echo "int main() {}" | $(HOSTCC) -xc -o /dev/null -lelf -,1,0)
ifeq ($(has_libelf),1)
objtool_target := tools/objtool FORCE
else
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index 208aae071b37..12d0284a46e5 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -593,7 +593,6 @@ config PCI_SYSCALL
def_bool PCI
source "drivers/pci/Kconfig"
-source "drivers/pci/pcie/Kconfig"
endmenu
diff --git a/arch/arc/boot/dts/axs10x_mb.dtsi b/arch/arc/boot/dts/axs10x_mb.dtsi
index ab5d5701e11d..44a578c10732 100644
--- a/arch/arc/boot/dts/axs10x_mb.dtsi
+++ b/arch/arc/boot/dts/axs10x_mb.dtsi
@@ -47,14 +47,6 @@
clocks = <&apbclk>;
clock-names = "stmmaceth";
max-speed = <100>;
- mdio0 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "snps,dwmac-mdio";
- phy1: ethernet-phy@1 {
- reg = <1>;
- };
- };
};
ehci@0x40000 {
diff --git a/arch/arc/configs/axs103_defconfig b/arch/arc/configs/axs103_defconfig
index f8b396c9aedb..491b3b5f22bd 100644
--- a/arch/arc/configs/axs103_defconfig
+++ b/arch/arc/configs/axs103_defconfig
@@ -42,6 +42,7 @@ CONFIG_DEVTMPFS=y
# CONFIG_STANDALONE is not set
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
# CONFIG_FIRMWARE_IN_KERNEL is not set
+CONFIG_BLK_DEV_LOOP=y
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_NETDEVICES=y
diff --git a/arch/arc/configs/axs103_smp_defconfig b/arch/arc/configs/axs103_smp_defconfig
index 56128ea2b748..b25ee73b2e79 100644
--- a/arch/arc/configs/axs103_smp_defconfig
+++ b/arch/arc/configs/axs103_smp_defconfig
@@ -43,6 +43,7 @@ CONFIG_DEVTMPFS=y
# CONFIG_STANDALONE is not set
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
# CONFIG_FIRMWARE_IN_KERNEL is not set
+CONFIG_BLK_DEV_LOOP=y
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_NETDEVICES=y
diff --git a/arch/arc/include/asm/fb.h b/arch/arc/include/asm/fb.h
new file mode 100644
index 000000000000..bd3f68c9ddfc
--- /dev/null
+++ b/arch/arc/include/asm/fb.h
@@ -0,0 +1,19 @@
+#ifndef _ASM_FB_H_
+#define _ASM_FB_H_
+
+#include <linux/fb.h>
+#include <linux/fs.h>
+#include <asm/page.h>
+
+static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma,
+ unsigned long off)
+{
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+}
+
+static inline int fb_is_primary_device(struct fb_info *info)
+{
+ return 0;
+}
+
+#endif /* _ASM_FB_H_ */
diff --git a/arch/arm/boot/dts/am335x-baltos-ir5221.dts b/arch/arm/boot/dts/am335x-baltos-ir5221.dts
index 6c667fb35449..4e28d87e9356 100644
--- a/arch/arm/boot/dts/am335x-baltos-ir5221.dts
+++ b/arch/arm/boot/dts/am335x-baltos-ir5221.dts
@@ -470,9 +470,12 @@
};
&cpsw_emac0 {
- phy_id = <&davinci_mdio>, <0>;
phy-mode = "rmii";
dual_emac_res_vlan = <1>;
+ fixed-link {
+ speed = <100>;
+ full-duplex;
+ };
};
&cpsw_emac1 {
diff --git a/arch/arm/boot/dts/am4372.dtsi b/arch/arm/boot/dts/am4372.dtsi
index 6e4f5af3d8f8..344b861a55a5 100644
--- a/arch/arm/boot/dts/am4372.dtsi
+++ b/arch/arm/boot/dts/am4372.dtsi
@@ -207,7 +207,7 @@
ti,tptcs = <&edma_tptc0 7>, <&edma_tptc1 5>,
<&edma_tptc2 0>;
- ti,edma-memcpy-channels = <32 33>;
+ ti,edma-memcpy-channels = <58 59>;
};
edma_tptc0: tptc@49800000 {
diff --git a/arch/arm/boot/dts/am43x-epos-evm.dts b/arch/arm/boot/dts/am43x-epos-evm.dts
index 83dfafaaba1b..d5dd72047a7e 100644
--- a/arch/arm/boot/dts/am43x-epos-evm.dts
+++ b/arch/arm/boot/dts/am43x-epos-evm.dts
@@ -794,3 +794,8 @@
tx-num-evt = <32>;
rx-num-evt = <32>;
};
+
+&synctimer_32kclk {
+ assigned-clocks = <&mux_synctimer32k_ck>;
+ assigned-clock-parents = <&clkdiv32k_ick>;
+};
diff --git a/arch/arm/boot/dts/armada-385-linksys.dtsi b/arch/arm/boot/dts/armada-385-linksys.dtsi
index 3710755c6d76..85d2c377c332 100644
--- a/arch/arm/boot/dts/armada-385-linksys.dtsi
+++ b/arch/arm/boot/dts/armada-385-linksys.dtsi
@@ -117,7 +117,7 @@
};
/* USB part of the eSATA/USB 2.0 port */
- usb@50000 {
+ usb@58000 {
status = "okay";
};
diff --git a/arch/arm/boot/dts/meson8.dtsi b/arch/arm/boot/dts/meson8.dtsi
index a2ddcb8c545a..45619f6162c5 100644
--- a/arch/arm/boot/dts/meson8.dtsi
+++ b/arch/arm/boot/dts/meson8.dtsi
@@ -91,8 +91,8 @@
clock-frequency = <141666666>;
};
- pinctrl: pinctrl@c1109880 {
- compatible = "amlogic,meson8-pinctrl";
+ pinctrl_cbus: pinctrl@c1109880 {
+ compatible = "amlogic,meson8-cbus-pinctrl";
reg = <0xc1109880 0x10>;
#address-cells = <1>;
#size-cells = <1>;
@@ -108,29 +108,6 @@
#gpio-cells = <2>;
};
- gpio_ao: ao-bank@c1108030 {
- reg = <0xc8100014 0x4>,
- <0xc810002c 0x4>,
- <0xc8100024 0x8>;
- reg-names = "mux", "pull", "gpio";
- gpio-controller;
- #gpio-cells = <2>;
- };
-
- uart_ao_a_pins: uart_ao_a {
- mux {
- groups = "uart_tx_ao_a", "uart_rx_ao_a";
- function = "uart_ao";
- };
- };
-
- i2c_ao_pins: i2c_mst_ao {
- mux {
- groups = "i2c_mst_sck_ao", "i2c_mst_sda_ao";
- function = "i2c_mst_ao";
- };
- };
-
spi_nor_pins: nor {
mux {
groups = "nor_d", "nor_q", "nor_c", "nor_cs";
@@ -157,4 +134,34 @@
};
};
+ pinctrl_aobus: pinctrl@c8100084 {
+ compatible = "amlogic,meson8-aobus-pinctrl";
+ reg = <0xc8100084 0xc>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ gpio_ao: ao-bank@c1108030 {
+ reg = <0xc8100014 0x4>,
+ <0xc810002c 0x4>,
+ <0xc8100024 0x8>;
+ reg-names = "mux", "pull", "gpio";
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ uart_ao_a_pins: uart_ao_a {
+ mux {
+ groups = "uart_tx_ao_a", "uart_rx_ao_a";
+ function = "uart_ao";
+ };
+ };
+
+ i2c_ao_pins: i2c_mst_ao {
+ mux {
+ groups = "i2c_mst_sck_ao", "i2c_mst_sda_ao";
+ function = "i2c_mst_ao";
+ };
+ };
+ };
}; /* end of / */
diff --git a/arch/arm/boot/dts/meson8b.dtsi b/arch/arm/boot/dts/meson8b.dtsi
index 8bad5571af46..2bfe401a4da9 100644
--- a/arch/arm/boot/dts/meson8b.dtsi
+++ b/arch/arm/boot/dts/meson8b.dtsi
@@ -155,8 +155,8 @@
reg = <0xc1108000 0x4>, <0xc1104000 0x460>;
};
- pinctrl: pinctrl@c1109880 {
- compatible = "amlogic,meson8b-pinctrl";
+ pinctrl_cbus: pinctrl@c1109880 {
+ compatible = "amlogic,meson8b-cbus-pinctrl";
reg = <0xc1109880 0x10>;
#address-cells = <1>;
#size-cells = <1>;
@@ -171,6 +171,14 @@
gpio-controller;
#gpio-cells = <2>;
};
+ };
+
+ pinctrl_aobus: pinctrl@c8100084 {
+ compatible = "amlogic,meson8b-aobus-pinctrl";
+ reg = <0xc8100084 0xc>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
gpio_ao: ao-bank@c1108030 {
reg = <0xc8100014 0x4>,
diff --git a/arch/arm/boot/dts/omap4.dtsi b/arch/arm/boot/dts/omap4.dtsi
index 2bd9c83300b2..421fe9f8a9eb 100644
--- a/arch/arm/boot/dts/omap4.dtsi
+++ b/arch/arm/boot/dts/omap4.dtsi
@@ -70,7 +70,7 @@
compatible = "arm,cortex-a9-twd-timer";
clocks = <&mpu_periphclk>;
reg = <0x48240600 0x20>;
- interrupts = <GIC_PPI 13 (GIC_CPU_MASK_RAW(3) | IRQ_TYPE_LEVEL_HIGH)>;
+ interrupts = <GIC_PPI 13 (GIC_CPU_MASK_RAW(3) | IRQ_TYPE_EDGE_RISING)>;
interrupt-parent = <&gic>;
};
diff --git a/arch/arm/configs/u8500_defconfig b/arch/arm/configs/u8500_defconfig
index 07055eacbb0f..a691d590fbd1 100644
--- a/arch/arm/configs/u8500_defconfig
+++ b/arch/arm/configs/u8500_defconfig
@@ -63,6 +63,9 @@ CONFIG_INPUT_TOUCHSCREEN=y
CONFIG_TOUCHSCREEN_BU21013=y
CONFIG_INPUT_MISC=y
CONFIG_INPUT_AB8500_PONKEY=y
+CONFIG_RMI4_CORE=y
+CONFIG_RMI4_I2C=y
+CONFIG_RMI4_F11=y
# CONFIG_SERIO is not set
CONFIG_VT_HW_CONSOLE_BINDING=y
# CONFIG_LEGACY_PTYS is not set
diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h
index b23c6c81c9ad..1ee94c716a7f 100644
--- a/arch/arm/include/asm/cputype.h
+++ b/arch/arm/include/asm/cputype.h
@@ -276,7 +276,7 @@ static inline int __attribute_const__ cpuid_feature_extract_field(u32 features,
int feature = (features >> field) & 15;
/* feature registers are signed values */
- if (feature > 8)
+ if (feature > 7)
feature -= 16;
return feature;
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index a28fce0bdbbe..2c4bea39cf22 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -512,7 +512,7 @@ static void __init elf_hwcap_fixup(void)
*/
if (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) > 1 ||
(cpuid_feature_extract(CPUID_EXT_ISAR3, 12) == 1 &&
- cpuid_feature_extract(CPUID_EXT_ISAR3, 20) >= 3))
+ cpuid_feature_extract(CPUID_EXT_ISAR4, 20) >= 3))
elf_hwcap &= ~HWCAP_SWP;
}
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index b5384311dec4..dded1b763c16 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -1112,10 +1112,17 @@ static void __init hyp_cpu_pm_init(void)
{
cpu_pm_register_notifier(&hyp_init_cpu_pm_nb);
}
+static void __init hyp_cpu_pm_exit(void)
+{
+ cpu_pm_unregister_notifier(&hyp_init_cpu_pm_nb);
+}
#else
static inline void hyp_cpu_pm_init(void)
{
}
+static inline void hyp_cpu_pm_exit(void)
+{
+}
#endif
static void teardown_common_resources(void)
@@ -1141,9 +1148,7 @@ static int init_subsystems(void)
/*
* Register CPU Hotplug notifier
*/
- cpu_notifier_register_begin();
- err = __register_cpu_notifier(&hyp_init_cpu_nb);
- cpu_notifier_register_done();
+ err = register_cpu_notifier(&hyp_init_cpu_nb);
if (err) {
kvm_err("Cannot register KVM init CPU notifier (%d)\n", err);
return err;
@@ -1193,6 +1198,8 @@ static void teardown_hyp_mode(void)
free_hyp_pgds();
for_each_possible_cpu(cpu)
free_page(per_cpu(kvm_arm_hyp_stack_page, cpu));
+ unregister_cpu_notifier(&hyp_init_cpu_nb);
+ hyp_cpu_pm_exit();
}
static int init_vhe_mode(void)
diff --git a/arch/arm/mach-omap2/id.c b/arch/arm/mach-omap2/id.c
index d85c24918c17..2abd53ae3e7a 100644
--- a/arch/arm/mach-omap2/id.c
+++ b/arch/arm/mach-omap2/id.c
@@ -669,9 +669,9 @@ void __init dra7xxx_check_revision(void)
case 0:
omap_revision = DRA722_REV_ES1_0;
break;
+ case 1:
default:
- /* If we have no new revisions */
- omap_revision = DRA722_REV_ES1_0;
+ omap_revision = DRA722_REV_ES2_0;
break;
}
break;
diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c
index 3c87e40650cf..9821be6dfd5e 100644
--- a/arch/arm/mach-omap2/io.c
+++ b/arch/arm/mach-omap2/io.c
@@ -368,6 +368,7 @@ void __init omap5_map_io(void)
void __init dra7xx_map_io(void)
{
iotable_init(dra7xx_io_desc, ARRAY_SIZE(dra7xx_io_desc));
+ omap_barriers_init();
}
#endif
/*
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index b6d62e4cdfdd..2af6ff63e3b4 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -1416,9 +1416,7 @@ static void _enable_sysc(struct omap_hwmod *oh)
(sf & SYSC_HAS_CLOCKACTIVITY))
_set_clockactivity(oh, oh->class->sysc->clockact, &v);
- /* If the cached value is the same as the new value, skip the write */
- if (oh->_sysc_cache != v)
- _write_sysconfig(v, oh);
+ _write_sysconfig(v, oh);
/*
* Set the autoidle bit only after setting the smartidle bit
@@ -1481,7 +1479,9 @@ static void _idle_sysc(struct omap_hwmod *oh)
_set_master_standbymode(oh, idlemode, &v);
}
- _write_sysconfig(v, oh);
+ /* If the cached value is the same as the new value, skip the write */
+ if (oh->_sysc_cache != v)
+ _write_sysconfig(v, oh);
}
/**
diff --git a/arch/arm/mach-omap2/omap_hwmod_81xx_data.c b/arch/arm/mach-omap2/omap_hwmod_81xx_data.c
index 39736ad2a754..df8327713d06 100644
--- a/arch/arm/mach-omap2/omap_hwmod_81xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_81xx_data.c
@@ -582,9 +582,11 @@ static struct omap_hwmod_ocp_if dm81xx_alwon_l3_slow__gpmc = {
.user = OCP_USER_MPU,
};
+/* USB needs udelay 1 after reset at least on hp t410, use 2 for margin */
static struct omap_hwmod_class_sysconfig dm81xx_usbhsotg_sysc = {
.rev_offs = 0x0,
.sysc_offs = 0x10,
+ .srst_udelay = 2,
.sysc_flags = SYSC_HAS_SIDLEMODE | SYSC_HAS_MIDLEMODE |
SYSC_HAS_SOFTRESET,
.idlemodes = SIDLE_SMART | MSTANDBY_FORCE | MSTANDBY_SMART,
diff --git a/arch/arm/mach-omap2/soc.h b/arch/arm/mach-omap2/soc.h
index 70df8f6cddcc..364418c78bf3 100644
--- a/arch/arm/mach-omap2/soc.h
+++ b/arch/arm/mach-omap2/soc.h
@@ -489,6 +489,7 @@ IS_OMAP_TYPE(3430, 0x3430)
#define DRA752_REV_ES2_0 (DRA7XX_CLASS | (0x52 << 16) | (0x20 << 8))
#define DRA722_REV_ES1_0 (DRA7XX_CLASS | (0x22 << 16) | (0x10 << 8))
#define DRA722_REV_ES1_0 (DRA7XX_CLASS | (0x22 << 16) | (0x10 << 8))
+#define DRA722_REV_ES2_0 (DRA7XX_CLASS | (0x22 << 16) | (0x20 << 8))
void omap2xxx_check_revision(void);
void omap3xxx_check_revision(void);
diff --git a/arch/arm/mach-pxa/devices.c b/arch/arm/mach-pxa/devices.c
index 913a319c7b00..fffb697bbf0e 100644
--- a/arch/arm/mach-pxa/devices.c
+++ b/arch/arm/mach-pxa/devices.c
@@ -1235,5 +1235,6 @@ static struct platform_device pxa2xx_pxa_dma = {
void __init pxa2xx_set_dmac_info(int nb_channels, int nb_requestors)
{
pxa_dma_pdata.dma_channels = nb_channels;
+ pxa_dma_pdata.nb_requestors = nb_requestors;
pxa_register_device(&pxa2xx_pxa_dma, &pxa_dma_pdata);
}
diff --git a/arch/arm/mach-sa1100/Kconfig b/arch/arm/mach-sa1100/Kconfig
index c6f6ed1cbed0..36e3c79f4973 100644
--- a/arch/arm/mach-sa1100/Kconfig
+++ b/arch/arm/mach-sa1100/Kconfig
@@ -61,10 +61,7 @@ config SA1100_H3100
select MFD_IPAQ_MICRO
help
Say Y here if you intend to run this kernel on the Compaq iPAQ
- H3100 handheld computer. Information about this machine and the
- Linux port to this machine can be found at:
-
- <http://www.handhelds.org/Compaq/index.html#iPAQ_H3100>
+ H3100 handheld computer.
config SA1100_H3600
bool "Compaq iPAQ H3600/H3700"
@@ -73,10 +70,7 @@ config SA1100_H3600
select MFD_IPAQ_MICRO
help
Say Y here if you intend to run this kernel on the Compaq iPAQ
- H3600 handheld computer. Information about this machine and the
- Linux port to this machine can be found at:
-
- <http://www.handhelds.org/Compaq/index.html#iPAQ_H3600>
+ H3600 and H3700 handheld computers.
config SA1100_BADGE4
bool "HP Labs BadgePAD 4"
diff --git a/arch/arm/mach-uniphier/platsmp.c b/arch/arm/mach-uniphier/platsmp.c
index 69141357afe8..db04142f88bc 100644
--- a/arch/arm/mach-uniphier/platsmp.c
+++ b/arch/arm/mach-uniphier/platsmp.c
@@ -120,7 +120,7 @@ static int __init uniphier_smp_prepare_trampoline(unsigned int max_cpus)
if (ret)
return ret;
- uniphier_smp_rom_boot_rsv2 = ioremap(rom_rsv2_phys, sizeof(SZ_4));
+ uniphier_smp_rom_boot_rsv2 = ioremap(rom_rsv2_phys, SZ_4);
if (!uniphier_smp_rom_boot_rsv2) {
pr_err("failed to map ROM_BOOT_RSV2 register\n");
return -ENOMEM;
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index deac58d5f1f7..c941e93048ad 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -762,7 +762,8 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
if (!mask)
return NULL;
- buf = kzalloc(sizeof(*buf), gfp);
+ buf = kzalloc(sizeof(*buf),
+ gfp & ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM));
if (!buf)
return NULL;
diff --git a/arch/arm64/boot/dts/broadcom/vulcan.dtsi b/arch/arm64/boot/dts/broadcom/vulcan.dtsi
index c49b5a85809c..85820e2bca9d 100644
--- a/arch/arm64/boot/dts/broadcom/vulcan.dtsi
+++ b/arch/arm64/boot/dts/broadcom/vulcan.dtsi
@@ -108,12 +108,15 @@
reg = <0x0 0x30000000 0x0 0x10000000>;
reg-names = "PCI ECAM";
- /* IO 0x4000_0000 - 0x4001_0000 */
- ranges = <0x01000000 0 0x40000000 0 0x40000000 0 0x00010000
- /* MEM 0x4800_0000 - 0x5000_0000 */
- 0x02000000 0 0x48000000 0 0x48000000 0 0x08000000
- /* MEM64 pref 0x6_0000_0000 - 0x7_0000_0000 */
- 0x43000000 6 0x00000000 6 0x00000000 1 0x00000000>;
+ /*
+ * PCI ranges:
+ * IO no supported
+ * MEM 0x4000_0000 - 0x6000_0000
+ * MEM64 pref 0x40_0000_0000 - 0x60_0000_0000
+ */
+ ranges =
+ <0x02000000 0 0x40000000 0 0x40000000 0 0x20000000
+ 0x43000000 0x40 0x00000000 0x40 0x00000000 0x20 0x00000000>;
interrupt-map-mask = <0 0 0 7>;
interrupt-map =
/* addr pin ic icaddr icintr */
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index 4150fd8bae01..3f29887995bc 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -151,8 +151,7 @@
*/
#define VTCR_EL2_FLAGS (VTCR_EL2_TG0_64K | VTCR_EL2_SH0_INNER | \
VTCR_EL2_ORGN0_WBWA | VTCR_EL2_IRGN0_WBWA | \
- VTCR_EL2_SL0_LVL1 | VTCR_EL2_T0SZ_40B | \
- VTCR_EL2_RES1)
+ VTCR_EL2_SL0_LVL1 | VTCR_EL2_RES1)
#define VTTBR_X (38 - VTCR_EL2_T0SZ_40B)
#else
/*
@@ -163,8 +162,7 @@
*/
#define VTCR_EL2_FLAGS (VTCR_EL2_TG0_4K | VTCR_EL2_SH0_INNER | \
VTCR_EL2_ORGN0_WBWA | VTCR_EL2_IRGN0_WBWA | \
- VTCR_EL2_SL0_LVL1 | VTCR_EL2_T0SZ_40B | \
- VTCR_EL2_RES1)
+ VTCR_EL2_SL0_LVL1 | VTCR_EL2_RES1)
#define VTTBR_X (37 - VTCR_EL2_T0SZ_40B)
#endif
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index eb7490d232a0..40a0a24e6c98 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -54,7 +54,7 @@ extern void __vgic_v3_init_lrs(void);
extern u32 __kvm_get_mdcr_el2(void);
-extern void __init_stage2_translation(void);
+extern u32 __init_stage2_translation(void);
#endif
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index b7e82a795ac9..f5c6bd2541ef 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -369,11 +369,12 @@ int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu,
int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
struct kvm_device_attr *attr);
-/* #define kvm_call_hyp(f, ...) __kvm_call_hyp(kvm_ksym_ref(f), ##__VA_ARGS__) */
-
static inline void __cpu_init_stage2(void)
{
- kvm_call_hyp(__init_stage2_translation);
+ u32 parange = kvm_call_hyp(__init_stage2_translation);
+
+ WARN_ONCE(parange < 40,
+ "PARange is %d bits, unsupported configuration!", parange);
}
#endif /* __ARM64_KVM_HOST_H__ */
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 4203d5f257bc..85da0f599cd6 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -588,6 +588,15 @@ set_hcr:
msr vpidr_el2, x0
msr vmpidr_el2, x1
+ /*
+ * When VHE is not in use, early init of EL2 and EL1 needs to be
+ * done here.
+ * When VHE _is_ in use, EL1 will not be used in the host and
+ * requires no configuration, and all non-hyp-specific EL2 setup
+ * will be done via the _EL1 system register aliases in __cpu_setup.
+ */
+ cbnz x2, 1f
+
/* sctlr_el1 */
mov x0, #0x0800 // Set/clear RES{1,0} bits
CPU_BE( movk x0, #0x33d0, lsl #16 ) // Set EE and E0E on BE systems
@@ -597,6 +606,7 @@ CPU_LE( movk x0, #0x30d0, lsl #16 ) // Clear EE and E0E on LE systems
/* Coprocessor traps. */
mov x0, #0x33ff
msr cptr_el2, x0 // Disable copro. traps to EL2
+1:
#ifdef CONFIG_COMPAT
msr hstr_el2, xzr // Disable CP15 traps to EL2
@@ -734,7 +744,8 @@ ENDPROC(__secondary_switched)
.macro update_early_cpu_boot_status status, tmp1, tmp2
mov \tmp2, #\status
- str_l \tmp2, __early_cpu_boot_status, \tmp1
+ adr_l \tmp1, __early_cpu_boot_status
+ str \tmp2, [\tmp1]
dmb sy
dc ivac, \tmp1 // Invalidate potentially stale cache line
.endm
diff --git a/arch/arm64/kernel/smp_spin_table.c b/arch/arm64/kernel/smp_spin_table.c
index aef3605a8c47..18a71bcd26ee 100644
--- a/arch/arm64/kernel/smp_spin_table.c
+++ b/arch/arm64/kernel/smp_spin_table.c
@@ -52,6 +52,7 @@ static void write_pen_release(u64 val)
static int smp_spin_table_cpu_init(unsigned int cpu)
{
struct device_node *dn;
+ int ret;
dn = of_get_cpu_node(cpu, NULL);
if (!dn)
@@ -60,15 +61,15 @@ static int smp_spin_table_cpu_init(unsigned int cpu)
/*
* Determine the address from which the CPU is polling.
*/
- if (of_property_read_u64(dn, "cpu-release-addr",
- &cpu_release_addr[cpu])) {
+ ret = of_property_read_u64(dn, "cpu-release-addr",
+ &cpu_release_addr[cpu]);
+ if (ret)
pr_err("CPU %d: missing or invalid cpu-release-addr property\n",
cpu);
- return -1;
- }
+ of_node_put(dn);
- return 0;
+ return ret;
}
static int smp_spin_table_cpu_prepare(unsigned int cpu)
diff --git a/arch/arm64/kvm/hyp/s2-setup.c b/arch/arm64/kvm/hyp/s2-setup.c
index 5a9f3bf542b0..bcbe761a5a3d 100644
--- a/arch/arm64/kvm/hyp/s2-setup.c
+++ b/arch/arm64/kvm/hyp/s2-setup.c
@@ -20,9 +20,10 @@
#include <asm/kvm_asm.h>
#include <asm/kvm_hyp.h>
-void __hyp_text __init_stage2_translation(void)
+u32 __hyp_text __init_stage2_translation(void)
{
u64 val = VTCR_EL2_FLAGS;
+ u64 parange;
u64 tmp;
/*
@@ -30,7 +31,39 @@ void __hyp_text __init_stage2_translation(void)
* bits in VTCR_EL2. Amusingly, the PARange is 4 bits, while
* PS is only 3. Fortunately, bit 19 is RES0 in VTCR_EL2...
*/
- val |= (read_sysreg(id_aa64mmfr0_el1) & 7) << 16;
+ parange = read_sysreg(id_aa64mmfr0_el1) & 7;
+ val |= parange << 16;
+
+ /* Compute the actual PARange... */
+ switch (parange) {
+ case 0:
+ parange = 32;
+ break;
+ case 1:
+ parange = 36;
+ break;
+ case 2:
+ parange = 40;
+ break;
+ case 3:
+ parange = 42;
+ break;
+ case 4:
+ parange = 44;
+ break;
+ case 5:
+ default:
+ parange = 48;
+ break;
+ }
+
+ /*
+ * ... and clamp it to 40 bits, unless we have some braindead
+ * HW that implements less than that. In all cases, we'll
+ * return that value for the rest of the kernel to decide what
+ * to do.
+ */
+ val |= 64 - (parange > 40 ? 40 : parange);
/*
* Read the VMIDBits bits from ID_AA64MMFR1_EL1 and set the VS
@@ -42,4 +75,6 @@ void __hyp_text __init_stage2_translation(void)
VTCR_EL2_VS_8BIT;
write_sysreg(val, vtcr_el2);
+
+ return parange;
}
diff --git a/arch/m68k/coldfire/gpio.c b/arch/m68k/coldfire/gpio.c
index 8832083e1cb8..b515809be2b9 100644
--- a/arch/m68k/coldfire/gpio.c
+++ b/arch/m68k/coldfire/gpio.c
@@ -158,11 +158,6 @@ static int mcfgpio_to_irq(struct gpio_chip *chip, unsigned offset)
return -EINVAL;
}
-static struct bus_type mcfgpio_subsys = {
- .name = "gpio",
- .dev_name = "gpio",
-};
-
static struct gpio_chip mcfgpio_chip = {
.label = "mcfgpio",
.request = mcfgpio_request,
@@ -178,8 +173,7 @@ static struct gpio_chip mcfgpio_chip = {
static int __init mcfgpio_sysinit(void)
{
- gpiochip_add_data(&mcfgpio_chip, NULL);
- return subsys_system_register(&mcfgpio_subsys, NULL);
+ return gpiochip_add_data(&mcfgpio_chip, NULL);
}
core_initcall(mcfgpio_sysinit);
diff --git a/arch/m68k/configs/amiga_defconfig b/arch/m68k/configs/amiga_defconfig
index d1fc4796025e..3ee6976f6088 100644
--- a/arch/m68k/configs/amiga_defconfig
+++ b/arch/m68k/configs/amiga_defconfig
@@ -1,7 +1,6 @@
CONFIG_LOCALVERSION="-amiga"
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
-CONFIG_FHANDLE=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_BSD_PROCESS_ACCT_V3=y
CONFIG_LOG_BUF_SHIFT=16
@@ -64,7 +63,6 @@ CONFIG_INET_IPCOMP=m
CONFIG_INET_XFRM_MODE_TRANSPORT=m
CONFIG_INET_XFRM_MODE_TUNNEL=m
CONFIG_INET_XFRM_MODE_BEET=m
-# CONFIG_INET_LRO is not set
CONFIG_INET_DIAG=m
CONFIG_INET_UDP_DIAG=m
CONFIG_IPV6=m
@@ -285,7 +283,9 @@ CONFIG_NET_MPLS_GSO=m
CONFIG_MPLS_ROUTING=m
CONFIG_MPLS_IPTUNNEL=m
CONFIG_NET_L3_MASTER_DEV=y
+CONFIG_AF_KCM=m
# CONFIG_WIRELESS is not set
+CONFIG_NET_DEVLINK=m
# CONFIG_UEVENT_HELPER is not set
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
@@ -359,6 +359,7 @@ CONFIG_MACVTAP=m
CONFIG_IPVLAN=m
CONFIG_VXLAN=m
CONFIG_GENEVE=m
+CONFIG_MACSEC=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
CONFIG_VETH=m
@@ -452,6 +453,7 @@ CONFIG_JFS_FS=m
CONFIG_XFS_FS=m
CONFIG_OCFS2_FS=m
# CONFIG_OCFS2_DEBUG_MASKLOG is not set
+CONFIG_FS_ENCRYPTION=m
CONFIG_FANOTIFY=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
# CONFIG_PRINT_QUOTA_WARNING is not set
@@ -468,6 +470,7 @@ CONFIG_VFAT_FS=m
CONFIG_PROC_KCORE=y
CONFIG_PROC_CHILDREN=y
CONFIG_TMPFS=y
+CONFIG_ORANGEFS_FS=m
CONFIG_AFFS_FS=m
CONFIG_ECRYPT_FS=m
CONFIG_ECRYPT_FS_MESSAGING=y
@@ -549,6 +552,7 @@ CONFIG_TEST_HEXDUMP=m
CONFIG_TEST_STRING_HELPERS=m
CONFIG_TEST_KSTRTOX=m
CONFIG_TEST_PRINTF=m
+CONFIG_TEST_BITMAP=m
CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_LKM=m
CONFIG_TEST_USER_COPY=m
@@ -557,7 +561,6 @@ CONFIG_TEST_FIRMWARE=m
CONFIG_TEST_UDELAY=m
CONFIG_TEST_STATIC_KEYS=m
CONFIG_EARLY_PRINTK=y
-CONFIG_ENCRYPTED_KEYS=m
CONFIG_CRYPTO_RSA=m
CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_USER=m
@@ -565,12 +568,9 @@ CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CCM=m
-CONFIG_CRYPTO_GCM=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
-CONFIG_CRYPTO_CTS=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_XTS=m
CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
@@ -594,7 +594,6 @@ CONFIG_CRYPTO_SEED=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
-CONFIG_CRYPTO_ZLIB=m
CONFIG_CRYPTO_LZO=m
CONFIG_CRYPTO_842=m
CONFIG_CRYPTO_LZ4=m
diff --git a/arch/m68k/configs/apollo_defconfig b/arch/m68k/configs/apollo_defconfig
index 9bfe8be3658c..e96787ffcbce 100644
--- a/arch/m68k/configs/apollo_defconfig
+++ b/arch/m68k/configs/apollo_defconfig
@@ -1,7 +1,6 @@
CONFIG_LOCALVERSION="-apollo"
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
-CONFIG_FHANDLE=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_BSD_PROCESS_ACCT_V3=y
CONFIG_LOG_BUF_SHIFT=16
@@ -62,7 +61,6 @@ CONFIG_INET_IPCOMP=m
CONFIG_INET_XFRM_MODE_TRANSPORT=m
CONFIG_INET_XFRM_MODE_TUNNEL=m
CONFIG_INET_XFRM_MODE_BEET=m
-# CONFIG_INET_LRO is not set
CONFIG_INET_DIAG=m
CONFIG_INET_UDP_DIAG=m
CONFIG_IPV6=m
@@ -283,7 +281,9 @@ CONFIG_NET_MPLS_GSO=m
CONFIG_MPLS_ROUTING=m
CONFIG_MPLS_IPTUNNEL=m
CONFIG_NET_L3_MASTER_DEV=y
+CONFIG_AF_KCM=m
# CONFIG_WIRELESS is not set
+CONFIG_NET_DEVLINK=m
# CONFIG_UEVENT_HELPER is not set
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
@@ -341,6 +341,7 @@ CONFIG_MACVTAP=m
CONFIG_IPVLAN=m
CONFIG_VXLAN=m
CONFIG_GENEVE=m
+CONFIG_MACSEC=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
CONFIG_VETH=m
@@ -411,6 +412,7 @@ CONFIG_JFS_FS=m
CONFIG_XFS_FS=m
CONFIG_OCFS2_FS=m
# CONFIG_OCFS2_DEBUG_MASKLOG is not set
+CONFIG_FS_ENCRYPTION=m
CONFIG_FANOTIFY=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
# CONFIG_PRINT_QUOTA_WARNING is not set
@@ -427,6 +429,7 @@ CONFIG_VFAT_FS=m
CONFIG_PROC_KCORE=y
CONFIG_PROC_CHILDREN=y
CONFIG_TMPFS=y
+CONFIG_ORANGEFS_FS=m
CONFIG_AFFS_FS=m
CONFIG_ECRYPT_FS=m
CONFIG_ECRYPT_FS_MESSAGING=y
@@ -508,6 +511,7 @@ CONFIG_TEST_HEXDUMP=m
CONFIG_TEST_STRING_HELPERS=m
CONFIG_TEST_KSTRTOX=m
CONFIG_TEST_PRINTF=m
+CONFIG_TEST_BITMAP=m
CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_LKM=m
CONFIG_TEST_USER_COPY=m
@@ -516,7 +520,6 @@ CONFIG_TEST_FIRMWARE=m
CONFIG_TEST_UDELAY=m
CONFIG_TEST_STATIC_KEYS=m
CONFIG_EARLY_PRINTK=y
-CONFIG_ENCRYPTED_KEYS=m
CONFIG_CRYPTO_RSA=m
CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_USER=m
@@ -524,12 +527,9 @@ CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CCM=m
-CONFIG_CRYPTO_GCM=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
-CONFIG_CRYPTO_CTS=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_XTS=m
CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
@@ -553,7 +553,6 @@ CONFIG_CRYPTO_SEED=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
-CONFIG_CRYPTO_ZLIB=m
CONFIG_CRYPTO_LZO=m
CONFIG_CRYPTO_842=m
CONFIG_CRYPTO_LZ4=m
diff --git a/arch/m68k/configs/atari_defconfig b/arch/m68k/configs/atari_defconfig
index ebdcfae55580..083fe6beac14 100644
--- a/arch/m68k/configs/atari_defconfig
+++ b/arch/m68k/configs/atari_defconfig
@@ -1,7 +1,6 @@
CONFIG_LOCALVERSION="-atari"
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
-CONFIG_FHANDLE=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_BSD_PROCESS_ACCT_V3=y
CONFIG_LOG_BUF_SHIFT=16
@@ -62,7 +61,6 @@ CONFIG_INET_IPCOMP=m
CONFIG_INET_XFRM_MODE_TRANSPORT=m
CONFIG_INET_XFRM_MODE_TUNNEL=m
CONFIG_INET_XFRM_MODE_BEET=m
-# CONFIG_INET_LRO is not set
CONFIG_INET_DIAG=m
CONFIG_INET_UDP_DIAG=m
CONFIG_IPV6=m
@@ -283,7 +281,9 @@ CONFIG_NET_MPLS_GSO=m
CONFIG_MPLS_ROUTING=m
CONFIG_MPLS_IPTUNNEL=m
CONFIG_NET_L3_MASTER_DEV=y
+CONFIG_AF_KCM=m
# CONFIG_WIRELESS is not set
+CONFIG_NET_DEVLINK=m
# CONFIG_UEVENT_HELPER is not set
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
@@ -350,6 +350,7 @@ CONFIG_MACVTAP=m
CONFIG_IPVLAN=m
CONFIG_VXLAN=m
CONFIG_GENEVE=m
+CONFIG_MACSEC=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
CONFIG_VETH=m
@@ -432,6 +433,7 @@ CONFIG_JFS_FS=m
CONFIG_XFS_FS=m
CONFIG_OCFS2_FS=m
# CONFIG_OCFS2_DEBUG_MASKLOG is not set
+CONFIG_FS_ENCRYPTION=m
CONFIG_FANOTIFY=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
# CONFIG_PRINT_QUOTA_WARNING is not set
@@ -448,6 +450,7 @@ CONFIG_VFAT_FS=m
CONFIG_PROC_KCORE=y
CONFIG_PROC_CHILDREN=y
CONFIG_TMPFS=y
+CONFIG_ORANGEFS_FS=m
CONFIG_AFFS_FS=m
CONFIG_ECRYPT_FS=m
CONFIG_ECRYPT_FS_MESSAGING=y
@@ -529,6 +532,7 @@ CONFIG_TEST_HEXDUMP=m
CONFIG_TEST_STRING_HELPERS=m
CONFIG_TEST_KSTRTOX=m
CONFIG_TEST_PRINTF=m
+CONFIG_TEST_BITMAP=m
CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_LKM=m
CONFIG_TEST_USER_COPY=m
@@ -537,7 +541,6 @@ CONFIG_TEST_FIRMWARE=m
CONFIG_TEST_UDELAY=m
CONFIG_TEST_STATIC_KEYS=m
CONFIG_EARLY_PRINTK=y
-CONFIG_ENCRYPTED_KEYS=m
CONFIG_CRYPTO_RSA=m
CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_USER=m
@@ -545,12 +548,9 @@ CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CCM=m
-CONFIG_CRYPTO_GCM=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
-CONFIG_CRYPTO_CTS=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_XTS=m
CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
@@ -574,7 +574,6 @@ CONFIG_CRYPTO_SEED=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
-CONFIG_CRYPTO_ZLIB=m
CONFIG_CRYPTO_LZO=m
CONFIG_CRYPTO_842=m
CONFIG_CRYPTO_LZ4=m
diff --git a/arch/m68k/configs/bvme6000_defconfig b/arch/m68k/configs/bvme6000_defconfig
index 8acc65e54995..475130c06dcb 100644
--- a/arch/m68k/configs/bvme6000_defconfig
+++ b/arch/m68k/configs/bvme6000_defconfig
@@ -1,7 +1,6 @@
CONFIG_LOCALVERSION="-bvme6000"
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
-CONFIG_FHANDLE=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_BSD_PROCESS_ACCT_V3=y
CONFIG_LOG_BUF_SHIFT=16
@@ -60,7 +59,6 @@ CONFIG_INET_IPCOMP=m
CONFIG_INET_XFRM_MODE_TRANSPORT=m
CONFIG_INET_XFRM_MODE_TUNNEL=m
CONFIG_INET_XFRM_MODE_BEET=m
-# CONFIG_INET_LRO is not set
CONFIG_INET_DIAG=m
CONFIG_INET_UDP_DIAG=m
CONFIG_IPV6=m
@@ -281,7 +279,9 @@ CONFIG_NET_MPLS_GSO=m
CONFIG_MPLS_ROUTING=m
CONFIG_MPLS_IPTUNNEL=m
CONFIG_NET_L3_MASTER_DEV=y
+CONFIG_AF_KCM=m
# CONFIG_WIRELESS is not set
+CONFIG_NET_DEVLINK=m
# CONFIG_UEVENT_HELPER is not set
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
@@ -340,6 +340,7 @@ CONFIG_MACVTAP=m
CONFIG_IPVLAN=m
CONFIG_VXLAN=m
CONFIG_GENEVE=m
+CONFIG_MACSEC=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
CONFIG_VETH=m
@@ -403,6 +404,7 @@ CONFIG_JFS_FS=m
CONFIG_XFS_FS=m
CONFIG_OCFS2_FS=m
# CONFIG_OCFS2_DEBUG_MASKLOG is not set
+CONFIG_FS_ENCRYPTION=m
CONFIG_FANOTIFY=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
# CONFIG_PRINT_QUOTA_WARNING is not set
@@ -419,6 +421,7 @@ CONFIG_VFAT_FS=m
CONFIG_PROC_KCORE=y
CONFIG_PROC_CHILDREN=y
CONFIG_TMPFS=y
+CONFIG_ORANGEFS_FS=m
CONFIG_AFFS_FS=m
CONFIG_ECRYPT_FS=m
CONFIG_ECRYPT_FS_MESSAGING=y
@@ -500,6 +503,7 @@ CONFIG_TEST_HEXDUMP=m
CONFIG_TEST_STRING_HELPERS=m
CONFIG_TEST_KSTRTOX=m
CONFIG_TEST_PRINTF=m
+CONFIG_TEST_BITMAP=m
CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_LKM=m
CONFIG_TEST_USER_COPY=m
@@ -508,7 +512,6 @@ CONFIG_TEST_FIRMWARE=m
CONFIG_TEST_UDELAY=m
CONFIG_TEST_STATIC_KEYS=m
CONFIG_EARLY_PRINTK=y
-CONFIG_ENCRYPTED_KEYS=m
CONFIG_CRYPTO_RSA=m
CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_USER=m
@@ -516,12 +519,9 @@ CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CCM=m
-CONFIG_CRYPTO_GCM=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
-CONFIG_CRYPTO_CTS=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_XTS=m
CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
@@ -545,7 +545,6 @@ CONFIG_CRYPTO_SEED=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
-CONFIG_CRYPTO_ZLIB=m
CONFIG_CRYPTO_LZO=m
CONFIG_CRYPTO_842=m
CONFIG_CRYPTO_LZ4=m
diff --git a/arch/m68k/configs/hp300_defconfig b/arch/m68k/configs/hp300_defconfig
index 0c6a3d52b26e..4339658c200f 100644
--- a/arch/m68k/configs/hp300_defconfig
+++ b/arch/m68k/configs/hp300_defconfig
@@ -1,7 +1,6 @@
CONFIG_LOCALVERSION="-hp300"
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
-CONFIG_FHANDLE=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_BSD_PROCESS_ACCT_V3=y
CONFIG_LOG_BUF_SHIFT=16
@@ -62,7 +61,6 @@ CONFIG_INET_IPCOMP=m
CONFIG_INET_XFRM_MODE_TRANSPORT=m
CONFIG_INET_XFRM_MODE_TUNNEL=m
CONFIG_INET_XFRM_MODE_BEET=m
-# CONFIG_INET_LRO is not set
CONFIG_INET_DIAG=m
CONFIG_INET_UDP_DIAG=m
CONFIG_IPV6=m
@@ -283,7 +281,9 @@ CONFIG_NET_MPLS_GSO=m
CONFIG_MPLS_ROUTING=m
CONFIG_MPLS_IPTUNNEL=m
CONFIG_NET_L3_MASTER_DEV=y
+CONFIG_AF_KCM=m
# CONFIG_WIRELESS is not set
+CONFIG_NET_DEVLINK=m
# CONFIG_UEVENT_HELPER is not set
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
@@ -341,6 +341,7 @@ CONFIG_MACVTAP=m
CONFIG_IPVLAN=m
CONFIG_VXLAN=m
CONFIG_GENEVE=m
+CONFIG_MACSEC=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
CONFIG_VETH=m
@@ -413,6 +414,7 @@ CONFIG_JFS_FS=m
CONFIG_XFS_FS=m
CONFIG_OCFS2_FS=m
# CONFIG_OCFS2_DEBUG_MASKLOG is not set
+CONFIG_FS_ENCRYPTION=m
CONFIG_FANOTIFY=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
# CONFIG_PRINT_QUOTA_WARNING is not set
@@ -429,6 +431,7 @@ CONFIG_VFAT_FS=m
CONFIG_PROC_KCORE=y
CONFIG_PROC_CHILDREN=y
CONFIG_TMPFS=y
+CONFIG_ORANGEFS_FS=m
CONFIG_AFFS_FS=m
CONFIG_ECRYPT_FS=m
CONFIG_ECRYPT_FS_MESSAGING=y
@@ -510,6 +513,7 @@ CONFIG_TEST_HEXDUMP=m
CONFIG_TEST_STRING_HELPERS=m
CONFIG_TEST_KSTRTOX=m
CONFIG_TEST_PRINTF=m
+CONFIG_TEST_BITMAP=m
CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_LKM=m
CONFIG_TEST_USER_COPY=m
@@ -518,7 +522,6 @@ CONFIG_TEST_FIRMWARE=m
CONFIG_TEST_UDELAY=m
CONFIG_TEST_STATIC_KEYS=m
CONFIG_EARLY_PRINTK=y
-CONFIG_ENCRYPTED_KEYS=m
CONFIG_CRYPTO_RSA=m
CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_USER=m
@@ -526,12 +529,9 @@ CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CCM=m
-CONFIG_CRYPTO_GCM=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
-CONFIG_CRYPTO_CTS=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_XTS=m
CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
@@ -555,7 +555,6 @@ CONFIG_CRYPTO_SEED=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
-CONFIG_CRYPTO_ZLIB=m
CONFIG_CRYPTO_LZO=m
CONFIG_CRYPTO_842=m
CONFIG_CRYPTO_LZ4=m
diff --git a/arch/m68k/configs/mac_defconfig b/arch/m68k/configs/mac_defconfig
index 12a8a6cb32f4..831cc8c3a2e2 100644
--- a/arch/m68k/configs/mac_defconfig
+++ b/arch/m68k/configs/mac_defconfig
@@ -1,7 +1,6 @@
CONFIG_LOCALVERSION="-mac"
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
-CONFIG_FHANDLE=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_BSD_PROCESS_ACCT_V3=y
CONFIG_LOG_BUF_SHIFT=16
@@ -61,7 +60,6 @@ CONFIG_INET_IPCOMP=m
CONFIG_INET_XFRM_MODE_TRANSPORT=m
CONFIG_INET_XFRM_MODE_TUNNEL=m
CONFIG_INET_XFRM_MODE_BEET=m
-# CONFIG_INET_LRO is not set
CONFIG_INET_DIAG=m
CONFIG_INET_UDP_DIAG=m
CONFIG_IPV6=m
@@ -285,7 +283,9 @@ CONFIG_NET_MPLS_GSO=m
CONFIG_MPLS_ROUTING=m
CONFIG_MPLS_IPTUNNEL=m
CONFIG_NET_L3_MASTER_DEV=y
+CONFIG_AF_KCM=m
# CONFIG_WIRELESS is not set
+CONFIG_NET_DEVLINK=m
# CONFIG_UEVENT_HELPER is not set
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
@@ -357,6 +357,7 @@ CONFIG_MACVTAP=m
CONFIG_IPVLAN=m
CONFIG_VXLAN=m
CONFIG_GENEVE=m
+CONFIG_MACSEC=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
CONFIG_VETH=m
@@ -435,6 +436,7 @@ CONFIG_JFS_FS=m
CONFIG_XFS_FS=m
CONFIG_OCFS2_FS=m
# CONFIG_OCFS2_DEBUG_MASKLOG is not set
+CONFIG_FS_ENCRYPTION=m
CONFIG_FANOTIFY=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
# CONFIG_PRINT_QUOTA_WARNING is not set
@@ -451,6 +453,7 @@ CONFIG_VFAT_FS=m
CONFIG_PROC_KCORE=y
CONFIG_PROC_CHILDREN=y
CONFIG_TMPFS=y
+CONFIG_ORANGEFS_FS=m
CONFIG_AFFS_FS=m
CONFIG_ECRYPT_FS=m
CONFIG_ECRYPT_FS_MESSAGING=y
@@ -532,6 +535,7 @@ CONFIG_TEST_HEXDUMP=m
CONFIG_TEST_STRING_HELPERS=m
CONFIG_TEST_KSTRTOX=m
CONFIG_TEST_PRINTF=m
+CONFIG_TEST_BITMAP=m
CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_LKM=m
CONFIG_TEST_USER_COPY=m
@@ -540,7 +544,6 @@ CONFIG_TEST_FIRMWARE=m
CONFIG_TEST_UDELAY=m
CONFIG_TEST_STATIC_KEYS=m
CONFIG_EARLY_PRINTK=y
-CONFIG_ENCRYPTED_KEYS=m
CONFIG_CRYPTO_RSA=m
CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_USER=m
@@ -548,12 +551,9 @@ CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CCM=m
-CONFIG_CRYPTO_GCM=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
-CONFIG_CRYPTO_CTS=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_XTS=m
CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
@@ -577,7 +577,6 @@ CONFIG_CRYPTO_SEED=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
-CONFIG_CRYPTO_ZLIB=m
CONFIG_CRYPTO_LZO=m
CONFIG_CRYPTO_842=m
CONFIG_CRYPTO_LZ4=m
diff --git a/arch/m68k/configs/multi_defconfig b/arch/m68k/configs/multi_defconfig
index 64ff2dcb34c8..6377afeb522b 100644
--- a/arch/m68k/configs/multi_defconfig
+++ b/arch/m68k/configs/multi_defconfig
@@ -1,7 +1,6 @@
CONFIG_LOCALVERSION="-multi"
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
-CONFIG_FHANDLE=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_BSD_PROCESS_ACCT_V3=y
CONFIG_LOG_BUF_SHIFT=16
@@ -71,7 +70,6 @@ CONFIG_INET_IPCOMP=m
CONFIG_INET_XFRM_MODE_TRANSPORT=m
CONFIG_INET_XFRM_MODE_TUNNEL=m
CONFIG_INET_XFRM_MODE_BEET=m
-# CONFIG_INET_LRO is not set
CONFIG_INET_DIAG=m
CONFIG_INET_UDP_DIAG=m
CONFIG_IPV6=m
@@ -295,7 +293,9 @@ CONFIG_NET_MPLS_GSO=m
CONFIG_MPLS_ROUTING=m
CONFIG_MPLS_IPTUNNEL=m
CONFIG_NET_L3_MASTER_DEV=y
+CONFIG_AF_KCM=m
# CONFIG_WIRELESS is not set
+CONFIG_NET_DEVLINK=m
# CONFIG_UEVENT_HELPER is not set
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
@@ -390,6 +390,7 @@ CONFIG_MACVTAP=m
CONFIG_IPVLAN=m
CONFIG_VXLAN=m
CONFIG_GENEVE=m
+CONFIG_MACSEC=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
CONFIG_VETH=m
@@ -515,6 +516,7 @@ CONFIG_JFS_FS=m
CONFIG_XFS_FS=m
CONFIG_OCFS2_FS=m
# CONFIG_OCFS2_DEBUG_MASKLOG is not set
+CONFIG_FS_ENCRYPTION=m
CONFIG_FANOTIFY=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
# CONFIG_PRINT_QUOTA_WARNING is not set
@@ -531,6 +533,7 @@ CONFIG_VFAT_FS=m
CONFIG_PROC_KCORE=y
CONFIG_PROC_CHILDREN=y
CONFIG_TMPFS=y
+CONFIG_ORANGEFS_FS=m
CONFIG_AFFS_FS=m
CONFIG_ECRYPT_FS=m
CONFIG_ECRYPT_FS_MESSAGING=y
@@ -612,6 +615,7 @@ CONFIG_TEST_HEXDUMP=m
CONFIG_TEST_STRING_HELPERS=m
CONFIG_TEST_KSTRTOX=m
CONFIG_TEST_PRINTF=m
+CONFIG_TEST_BITMAP=m
CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_LKM=m
CONFIG_TEST_USER_COPY=m
@@ -620,7 +624,6 @@ CONFIG_TEST_FIRMWARE=m
CONFIG_TEST_UDELAY=m
CONFIG_TEST_STATIC_KEYS=m
CONFIG_EARLY_PRINTK=y
-CONFIG_ENCRYPTED_KEYS=m
CONFIG_CRYPTO_RSA=m
CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_USER=m
@@ -628,12 +631,9 @@ CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CCM=m
-CONFIG_CRYPTO_GCM=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
-CONFIG_CRYPTO_CTS=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_XTS=m
CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
@@ -657,7 +657,6 @@ CONFIG_CRYPTO_SEED=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
-CONFIG_CRYPTO_ZLIB=m
CONFIG_CRYPTO_LZO=m
CONFIG_CRYPTO_842=m
CONFIG_CRYPTO_LZ4=m
diff --git a/arch/m68k/configs/mvme147_defconfig b/arch/m68k/configs/mvme147_defconfig
index 07fc6abcfe0c..4304b3d56262 100644
--- a/arch/m68k/configs/mvme147_defconfig
+++ b/arch/m68k/configs/mvme147_defconfig
@@ -1,7 +1,6 @@
CONFIG_LOCALVERSION="-mvme147"
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
-CONFIG_FHANDLE=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_BSD_PROCESS_ACCT_V3=y
CONFIG_LOG_BUF_SHIFT=16
@@ -59,7 +58,6 @@ CONFIG_INET_IPCOMP=m
CONFIG_INET_XFRM_MODE_TRANSPORT=m
CONFIG_INET_XFRM_MODE_TUNNEL=m
CONFIG_INET_XFRM_MODE_BEET=m
-# CONFIG_INET_LRO is not set
CONFIG_INET_DIAG=m
CONFIG_INET_UDP_DIAG=m
CONFIG_IPV6=m
@@ -280,7 +278,9 @@ CONFIG_NET_MPLS_GSO=m
CONFIG_MPLS_ROUTING=m
CONFIG_MPLS_IPTUNNEL=m
CONFIG_NET_L3_MASTER_DEV=y
+CONFIG_AF_KCM=m
# CONFIG_WIRELESS is not set
+CONFIG_NET_DEVLINK=m
# CONFIG_UEVENT_HELPER is not set
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
@@ -339,6 +339,7 @@ CONFIG_MACVTAP=m
CONFIG_IPVLAN=m
CONFIG_VXLAN=m
CONFIG_GENEVE=m
+CONFIG_MACSEC=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
CONFIG_VETH=m
@@ -403,6 +404,7 @@ CONFIG_JFS_FS=m
CONFIG_XFS_FS=m
CONFIG_OCFS2_FS=m
# CONFIG_OCFS2_DEBUG_MASKLOG is not set
+CONFIG_FS_ENCRYPTION=m
CONFIG_FANOTIFY=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
# CONFIG_PRINT_QUOTA_WARNING is not set
@@ -419,6 +421,7 @@ CONFIG_VFAT_FS=m
CONFIG_PROC_KCORE=y
CONFIG_PROC_CHILDREN=y
CONFIG_TMPFS=y
+CONFIG_ORANGEFS_FS=m
CONFIG_AFFS_FS=m
CONFIG_ECRYPT_FS=m
CONFIG_ECRYPT_FS_MESSAGING=y
@@ -500,6 +503,7 @@ CONFIG_TEST_HEXDUMP=m
CONFIG_TEST_STRING_HELPERS=m
CONFIG_TEST_KSTRTOX=m
CONFIG_TEST_PRINTF=m
+CONFIG_TEST_BITMAP=m
CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_LKM=m
CONFIG_TEST_USER_COPY=m
@@ -508,7 +512,6 @@ CONFIG_TEST_FIRMWARE=m
CONFIG_TEST_UDELAY=m
CONFIG_TEST_STATIC_KEYS=m
CONFIG_EARLY_PRINTK=y
-CONFIG_ENCRYPTED_KEYS=m
CONFIG_CRYPTO_RSA=m
CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_USER=m
@@ -516,12 +519,9 @@ CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CCM=m
-CONFIG_CRYPTO_GCM=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
-CONFIG_CRYPTO_CTS=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_XTS=m
CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
@@ -545,7 +545,6 @@ CONFIG_CRYPTO_SEED=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
-CONFIG_CRYPTO_ZLIB=m
CONFIG_CRYPTO_LZO=m
CONFIG_CRYPTO_842=m
CONFIG_CRYPTO_LZ4=m
diff --git a/arch/m68k/configs/mvme16x_defconfig b/arch/m68k/configs/mvme16x_defconfig
index 69903ded88f7..074bda4094ff 100644
--- a/arch/m68k/configs/mvme16x_defconfig
+++ b/arch/m68k/configs/mvme16x_defconfig
@@ -1,7 +1,6 @@
CONFIG_LOCALVERSION="-mvme16x"
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
-CONFIG_FHANDLE=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_BSD_PROCESS_ACCT_V3=y
CONFIG_LOG_BUF_SHIFT=16
@@ -60,7 +59,6 @@ CONFIG_INET_IPCOMP=m
CONFIG_INET_XFRM_MODE_TRANSPORT=m
CONFIG_INET_XFRM_MODE_TUNNEL=m
CONFIG_INET_XFRM_MODE_BEET=m
-# CONFIG_INET_LRO is not set
CONFIG_INET_DIAG=m
CONFIG_INET_UDP_DIAG=m
CONFIG_IPV6=m
@@ -281,7 +279,9 @@ CONFIG_NET_MPLS_GSO=m
CONFIG_MPLS_ROUTING=m
CONFIG_MPLS_IPTUNNEL=m
CONFIG_NET_L3_MASTER_DEV=y
+CONFIG_AF_KCM=m
# CONFIG_WIRELESS is not set
+CONFIG_NET_DEVLINK=m
# CONFIG_UEVENT_HELPER is not set
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
@@ -340,6 +340,7 @@ CONFIG_MACVTAP=m
CONFIG_IPVLAN=m
CONFIG_VXLAN=m
CONFIG_GENEVE=m
+CONFIG_MACSEC=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
CONFIG_VETH=m
@@ -403,6 +404,7 @@ CONFIG_JFS_FS=m
CONFIG_XFS_FS=m
CONFIG_OCFS2_FS=m
# CONFIG_OCFS2_DEBUG_MASKLOG is not set
+CONFIG_FS_ENCRYPTION=m
CONFIG_FANOTIFY=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
# CONFIG_PRINT_QUOTA_WARNING is not set
@@ -419,6 +421,7 @@ CONFIG_VFAT_FS=m
CONFIG_PROC_KCORE=y
CONFIG_PROC_CHILDREN=y
CONFIG_TMPFS=y
+CONFIG_ORANGEFS_FS=m
CONFIG_AFFS_FS=m
CONFIG_ECRYPT_FS=m
CONFIG_ECRYPT_FS_MESSAGING=y
@@ -500,6 +503,7 @@ CONFIG_TEST_HEXDUMP=m
CONFIG_TEST_STRING_HELPERS=m
CONFIG_TEST_KSTRTOX=m
CONFIG_TEST_PRINTF=m
+CONFIG_TEST_BITMAP=m
CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_LKM=m
CONFIG_TEST_USER_COPY=m
@@ -508,7 +512,6 @@ CONFIG_TEST_FIRMWARE=m
CONFIG_TEST_UDELAY=m
CONFIG_TEST_STATIC_KEYS=m
CONFIG_EARLY_PRINTK=y
-CONFIG_ENCRYPTED_KEYS=m
CONFIG_CRYPTO_RSA=m
CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_USER=m
@@ -516,12 +519,9 @@ CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CCM=m
-CONFIG_CRYPTO_GCM=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
-CONFIG_CRYPTO_CTS=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_XTS=m
CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
@@ -545,7 +545,6 @@ CONFIG_CRYPTO_SEED=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
-CONFIG_CRYPTO_ZLIB=m
CONFIG_CRYPTO_LZO=m
CONFIG_CRYPTO_842=m
CONFIG_CRYPTO_LZ4=m
diff --git a/arch/m68k/configs/q40_defconfig b/arch/m68k/configs/q40_defconfig
index bd8401686dde..07b9fa8d7f2e 100644
--- a/arch/m68k/configs/q40_defconfig
+++ b/arch/m68k/configs/q40_defconfig
@@ -1,7 +1,6 @@
CONFIG_LOCALVERSION="-q40"
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
-CONFIG_FHANDLE=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_BSD_PROCESS_ACCT_V3=y
CONFIG_LOG_BUF_SHIFT=16
@@ -60,7 +59,6 @@ CONFIG_INET_IPCOMP=m
CONFIG_INET_XFRM_MODE_TRANSPORT=m
CONFIG_INET_XFRM_MODE_TUNNEL=m
CONFIG_INET_XFRM_MODE_BEET=m
-# CONFIG_INET_LRO is not set
CONFIG_INET_DIAG=m
CONFIG_INET_UDP_DIAG=m
CONFIG_IPV6=m
@@ -281,7 +279,9 @@ CONFIG_NET_MPLS_GSO=m
CONFIG_MPLS_ROUTING=m
CONFIG_MPLS_IPTUNNEL=m
CONFIG_NET_L3_MASTER_DEV=y
+CONFIG_AF_KCM=m
# CONFIG_WIRELESS is not set
+CONFIG_NET_DEVLINK=m
# CONFIG_UEVENT_HELPER is not set
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
@@ -346,6 +346,7 @@ CONFIG_MACVTAP=m
CONFIG_IPVLAN=m
CONFIG_VXLAN=m
CONFIG_GENEVE=m
+CONFIG_MACSEC=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
CONFIG_VETH=m
@@ -426,6 +427,7 @@ CONFIG_JFS_FS=m
CONFIG_XFS_FS=m
CONFIG_OCFS2_FS=m
# CONFIG_OCFS2_DEBUG_MASKLOG is not set
+CONFIG_FS_ENCRYPTION=m
CONFIG_FANOTIFY=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
# CONFIG_PRINT_QUOTA_WARNING is not set
@@ -442,6 +444,7 @@ CONFIG_VFAT_FS=m
CONFIG_PROC_KCORE=y
CONFIG_PROC_CHILDREN=y
CONFIG_TMPFS=y
+CONFIG_ORANGEFS_FS=m
CONFIG_AFFS_FS=m
CONFIG_ECRYPT_FS=m
CONFIG_ECRYPT_FS_MESSAGING=y
@@ -523,6 +526,7 @@ CONFIG_TEST_HEXDUMP=m
CONFIG_TEST_STRING_HELPERS=m
CONFIG_TEST_KSTRTOX=m
CONFIG_TEST_PRINTF=m
+CONFIG_TEST_BITMAP=m
CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_LKM=m
CONFIG_TEST_USER_COPY=m
@@ -531,7 +535,6 @@ CONFIG_TEST_FIRMWARE=m
CONFIG_TEST_UDELAY=m
CONFIG_TEST_STATIC_KEYS=m
CONFIG_EARLY_PRINTK=y
-CONFIG_ENCRYPTED_KEYS=m
CONFIG_CRYPTO_RSA=m
CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_USER=m
@@ -539,12 +542,9 @@ CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CCM=m
-CONFIG_CRYPTO_GCM=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
-CONFIG_CRYPTO_CTS=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_XTS=m
CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
@@ -568,7 +568,6 @@ CONFIG_CRYPTO_SEED=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
-CONFIG_CRYPTO_ZLIB=m
CONFIG_CRYPTO_LZO=m
CONFIG_CRYPTO_842=m
CONFIG_CRYPTO_LZ4=m
diff --git a/arch/m68k/configs/sun3_defconfig b/arch/m68k/configs/sun3_defconfig
index 5f9fb3ab9636..36e6fae02d45 100644
--- a/arch/m68k/configs/sun3_defconfig
+++ b/arch/m68k/configs/sun3_defconfig
@@ -1,7 +1,6 @@
CONFIG_LOCALVERSION="-sun3"
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
-CONFIG_FHANDLE=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_BSD_PROCESS_ACCT_V3=y
CONFIG_LOG_BUF_SHIFT=16
@@ -57,7 +56,6 @@ CONFIG_INET_IPCOMP=m
CONFIG_INET_XFRM_MODE_TRANSPORT=m
CONFIG_INET_XFRM_MODE_TUNNEL=m
CONFIG_INET_XFRM_MODE_BEET=m
-# CONFIG_INET_LRO is not set
CONFIG_INET_DIAG=m
CONFIG_INET_UDP_DIAG=m
CONFIG_IPV6=m
@@ -278,7 +276,9 @@ CONFIG_NET_MPLS_GSO=m
CONFIG_MPLS_ROUTING=m
CONFIG_MPLS_IPTUNNEL=m
CONFIG_NET_L3_MASTER_DEV=y
+CONFIG_AF_KCM=m
# CONFIG_WIRELESS is not set
+CONFIG_NET_DEVLINK=m
# CONFIG_UEVENT_HELPER is not set
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
@@ -337,6 +337,7 @@ CONFIG_MACVTAP=m
CONFIG_IPVLAN=m
CONFIG_VXLAN=m
CONFIG_GENEVE=m
+CONFIG_MACSEC=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
CONFIG_VETH=m
@@ -405,6 +406,7 @@ CONFIG_JFS_FS=m
CONFIG_XFS_FS=m
CONFIG_OCFS2_FS=m
# CONFIG_OCFS2_DEBUG_MASKLOG is not set
+CONFIG_FS_ENCRYPTION=m
CONFIG_FANOTIFY=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
# CONFIG_PRINT_QUOTA_WARNING is not set
@@ -421,6 +423,7 @@ CONFIG_VFAT_FS=m
CONFIG_PROC_KCORE=y
CONFIG_PROC_CHILDREN=y
CONFIG_TMPFS=y
+CONFIG_ORANGEFS_FS=m
CONFIG_AFFS_FS=m
CONFIG_ECRYPT_FS=m
CONFIG_ECRYPT_FS_MESSAGING=y
@@ -502,6 +505,7 @@ CONFIG_TEST_HEXDUMP=m
CONFIG_TEST_STRING_HELPERS=m
CONFIG_TEST_KSTRTOX=m
CONFIG_TEST_PRINTF=m
+CONFIG_TEST_BITMAP=m
CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_LKM=m
CONFIG_TEST_USER_COPY=m
@@ -509,7 +513,6 @@ CONFIG_TEST_BPF=m
CONFIG_TEST_FIRMWARE=m
CONFIG_TEST_UDELAY=m
CONFIG_TEST_STATIC_KEYS=m
-CONFIG_ENCRYPTED_KEYS=m
CONFIG_CRYPTO_RSA=m
CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_USER=m
@@ -517,12 +520,9 @@ CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CCM=m
-CONFIG_CRYPTO_GCM=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
-CONFIG_CRYPTO_CTS=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_XTS=m
CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
@@ -546,7 +546,6 @@ CONFIG_CRYPTO_SEED=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
-CONFIG_CRYPTO_ZLIB=m
CONFIG_CRYPTO_LZO=m
CONFIG_CRYPTO_842=m
CONFIG_CRYPTO_LZ4=m
diff --git a/arch/m68k/configs/sun3x_defconfig b/arch/m68k/configs/sun3x_defconfig
index 5d1c674530e2..903acf929511 100644
--- a/arch/m68k/configs/sun3x_defconfig
+++ b/arch/m68k/configs/sun3x_defconfig
@@ -1,7 +1,6 @@
CONFIG_LOCALVERSION="-sun3x"
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
-CONFIG_FHANDLE=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_BSD_PROCESS_ACCT_V3=y
CONFIG_LOG_BUF_SHIFT=16
@@ -57,7 +56,6 @@ CONFIG_INET_IPCOMP=m
CONFIG_INET_XFRM_MODE_TRANSPORT=m
CONFIG_INET_XFRM_MODE_TUNNEL=m
CONFIG_INET_XFRM_MODE_BEET=m
-# CONFIG_INET_LRO is not set
CONFIG_INET_DIAG=m
CONFIG_INET_UDP_DIAG=m
CONFIG_IPV6=m
@@ -278,7 +276,9 @@ CONFIG_NET_MPLS_GSO=m
CONFIG_MPLS_ROUTING=m
CONFIG_MPLS_IPTUNNEL=m
CONFIG_NET_L3_MASTER_DEV=y
+CONFIG_AF_KCM=m
# CONFIG_WIRELESS is not set
+CONFIG_NET_DEVLINK=m
# CONFIG_UEVENT_HELPER is not set
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
@@ -337,6 +337,7 @@ CONFIG_MACVTAP=m
CONFIG_IPVLAN=m
CONFIG_VXLAN=m
CONFIG_GENEVE=m
+CONFIG_MACSEC=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
CONFIG_VETH=m
@@ -405,6 +406,7 @@ CONFIG_JFS_FS=m
CONFIG_XFS_FS=m
CONFIG_OCFS2_FS=m
# CONFIG_OCFS2_DEBUG_MASKLOG is not set
+CONFIG_FS_ENCRYPTION=m
CONFIG_FANOTIFY=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
# CONFIG_PRINT_QUOTA_WARNING is not set
@@ -421,6 +423,7 @@ CONFIG_VFAT_FS=m
CONFIG_PROC_KCORE=y
CONFIG_PROC_CHILDREN=y
CONFIG_TMPFS=y
+CONFIG_ORANGEFS_FS=m
CONFIG_AFFS_FS=m
CONFIG_ECRYPT_FS=m
CONFIG_ECRYPT_FS_MESSAGING=y
@@ -502,6 +505,7 @@ CONFIG_TEST_HEXDUMP=m
CONFIG_TEST_STRING_HELPERS=m
CONFIG_TEST_KSTRTOX=m
CONFIG_TEST_PRINTF=m
+CONFIG_TEST_BITMAP=m
CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_LKM=m
CONFIG_TEST_USER_COPY=m
@@ -510,7 +514,6 @@ CONFIG_TEST_FIRMWARE=m
CONFIG_TEST_UDELAY=m
CONFIG_TEST_STATIC_KEYS=m
CONFIG_EARLY_PRINTK=y
-CONFIG_ENCRYPTED_KEYS=m
CONFIG_CRYPTO_RSA=m
CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_USER=m
@@ -518,12 +521,9 @@ CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CCM=m
-CONFIG_CRYPTO_GCM=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
-CONFIG_CRYPTO_CTS=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_XTS=m
CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
@@ -547,7 +547,6 @@ CONFIG_CRYPTO_SEED=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
-CONFIG_CRYPTO_ZLIB=m
CONFIG_CRYPTO_LZO=m
CONFIG_CRYPTO_842=m
CONFIG_CRYPTO_LZ4=m
diff --git a/arch/m68k/include/asm/unistd.h b/arch/m68k/include/asm/unistd.h
index bafaff6dcd7b..a857d82ec509 100644
--- a/arch/m68k/include/asm/unistd.h
+++ b/arch/m68k/include/asm/unistd.h
@@ -4,7 +4,7 @@
#include <uapi/asm/unistd.h>
-#define NR_syscalls 377
+#define NR_syscalls 379
#define __ARCH_WANT_OLD_READDIR
#define __ARCH_WANT_OLD_STAT
diff --git a/arch/m68k/include/uapi/asm/unistd.h b/arch/m68k/include/uapi/asm/unistd.h
index 0ca729665f29..9fe674bf911f 100644
--- a/arch/m68k/include/uapi/asm/unistd.h
+++ b/arch/m68k/include/uapi/asm/unistd.h
@@ -382,5 +382,7 @@
#define __NR_membarrier 374
#define __NR_mlock2 375
#define __NR_copy_file_range 376
+#define __NR_preadv2 377
+#define __NR_pwritev2 378
#endif /* _UAPI_ASM_M68K_UNISTD_H_ */
diff --git a/arch/m68k/kernel/syscalltable.S b/arch/m68k/kernel/syscalltable.S
index 8bb94261ff97..d6fd6d9ced24 100644
--- a/arch/m68k/kernel/syscalltable.S
+++ b/arch/m68k/kernel/syscalltable.S
@@ -397,3 +397,5 @@ ENTRY(sys_call_table)
.long sys_membarrier
.long sys_mlock2 /* 375 */
.long sys_copy_file_range
+ .long sys_preadv2
+ .long sys_pwritev2
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index bd3c873951a1..88cfaa8af78e 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -4,8 +4,8 @@ config PARISC
select ARCH_MIGHT_HAVE_PC_PARPORT
select HAVE_IDE
select HAVE_OPROFILE
- select HAVE_FUNCTION_TRACER if 64BIT
- select HAVE_FUNCTION_GRAPH_TRACER if 64BIT
+ select HAVE_FUNCTION_TRACER
+ select HAVE_FUNCTION_GRAPH_TRACER
select ARCH_WANT_FRAME_POINTERS
select RTC_CLASS
select RTC_DRV_GENERIC
diff --git a/arch/parisc/Kconfig.debug b/arch/parisc/Kconfig.debug
index bc989e522a04..68b7cbd0810a 100644
--- a/arch/parisc/Kconfig.debug
+++ b/arch/parisc/Kconfig.debug
@@ -2,9 +2,13 @@ menu "Kernel hacking"
source "lib/Kconfig.debug"
+config TRACE_IRQFLAGS_SUPPORT
+ def_bool y
+
config DEBUG_RODATA
bool "Write protect kernel read-only data structures"
depends on DEBUG_KERNEL
+ default y
help
Mark the kernel read-only data as write-protected in the pagetables,
in order to catch accidental (and incorrect) writes to such const
diff --git a/arch/parisc/Makefile b/arch/parisc/Makefile
index 965a0999fc4c..75cb451b1f03 100644
--- a/arch/parisc/Makefile
+++ b/arch/parisc/Makefile
@@ -62,9 +62,7 @@ cflags-y += -mdisable-fpregs
# Without this, "ld -r" results in .text sections that are too big
# (> 0x40000) for branches to reach stubs.
-ifndef CONFIG_FUNCTION_TRACER
- cflags-y += -ffunction-sections
-endif
+cflags-y += -ffunction-sections
# Use long jumps instead of long branches (needed if your linker fails to
# link a too big vmlinux executable). Not enabled for building modules.
diff --git a/arch/parisc/include/asm/ftrace.h b/arch/parisc/include/asm/ftrace.h
index 544ed8ef87eb..24cd81d58d70 100644
--- a/arch/parisc/include/asm/ftrace.h
+++ b/arch/parisc/include/asm/ftrace.h
@@ -4,23 +4,7 @@
#ifndef __ASSEMBLY__
extern void mcount(void);
-/*
- * Stack of return addresses for functions of a thread.
- * Used in struct thread_info
- */
-struct ftrace_ret_stack {
- unsigned long ret;
- unsigned long func;
- unsigned long long calltime;
-};
-
-/*
- * Primary handler of a function return.
- * It relays on ftrace_return_to_handler.
- * Defined in entry.S
- */
-extern void return_to_handler(void);
-
+#define MCOUNT_INSN_SIZE 4
extern unsigned long return_address(unsigned int);
diff --git a/arch/parisc/kernel/Makefile b/arch/parisc/kernel/Makefile
index ff87b4603e3d..69a11183d48d 100644
--- a/arch/parisc/kernel/Makefile
+++ b/arch/parisc/kernel/Makefile
@@ -15,11 +15,7 @@ ifdef CONFIG_FUNCTION_TRACER
# Do not profile debug and lowlevel utilities
CFLAGS_REMOVE_ftrace.o = -pg
CFLAGS_REMOVE_cache.o = -pg
-CFLAGS_REMOVE_irq.o = -pg
-CFLAGS_REMOVE_pacache.o = -pg
CFLAGS_REMOVE_perf.o = -pg
-CFLAGS_REMOVE_traps.o = -pg
-CFLAGS_REMOVE_unaligned.o = -pg
CFLAGS_REMOVE_unwind.o = -pg
endif
diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S
index 623496c11756..39127d3e70e5 100644
--- a/arch/parisc/kernel/entry.S
+++ b/arch/parisc/kernel/entry.S
@@ -1970,43 +1970,98 @@ pt_regs_ok:
b intr_restore
copy %r25,%r16
- .import schedule,code
syscall_do_resched:
- BL schedule,%r2
+ load32 syscall_check_resched,%r2 /* if resched, we start over again */
+ load32 schedule,%r19
+ bv %r0(%r19) /* jumps to schedule() */
#ifdef CONFIG_64BIT
ldo -16(%r30),%r29 /* Reference param save area */
#else
nop
#endif
- b syscall_check_resched /* if resched, we start over again */
- nop
ENDPROC(syscall_exit)
#ifdef CONFIG_FUNCTION_TRACER
+
.import ftrace_function_trampoline,code
-ENTRY(_mcount)
- copy %r3, %arg2
+ .align L1_CACHE_BYTES
+ .globl mcount
+ .type mcount, @function
+ENTRY(mcount)
+_mcount:
+ .export _mcount,data
+ .proc
+ .callinfo caller,frame=0
+ .entry
+ /*
+ * The 64bit mcount() function pointer needs 4 dwords, of which the
+ * first two are free. We optimize it here and put 2 instructions for
+ * calling mcount(), and 2 instructions for ftrace_stub(). That way we
+ * have all on one L1 cacheline.
+ */
b ftrace_function_trampoline
+ copy %r3, %arg2 /* caller original %sp */
+ftrace_stub:
+ .globl ftrace_stub
+ .type ftrace_stub, @function
+#ifdef CONFIG_64BIT
+ bve (%rp)
+#else
+ bv %r0(%rp)
+#endif
nop
-ENDPROC(_mcount)
+#ifdef CONFIG_64BIT
+ .dword mcount
+ .dword 0 /* code in head.S puts value of global gp here */
+#endif
+ .exit
+ .procend
+ENDPROC(mcount)
+ .align 8
+ .globl return_to_handler
+ .type return_to_handler, @function
ENTRY(return_to_handler)
- load32 return_trampoline, %rp
- copy %ret0, %arg0
- copy %ret1, %arg1
- b ftrace_return_to_handler
- nop
-return_trampoline:
- copy %ret0, %rp
- copy %r23, %ret0
- copy %r24, %ret1
+ .proc
+ .callinfo caller,frame=FRAME_SIZE
+ .entry
+ .export parisc_return_to_handler,data
+parisc_return_to_handler:
+ copy %r3,%r1
+ STREG %r0,-RP_OFFSET(%sp) /* store 0 as %rp */
+ copy %sp,%r3
+ STREGM %r1,FRAME_SIZE(%sp)
+ STREG %ret0,8(%r3)
+ STREG %ret1,16(%r3)
-.globl ftrace_stub
-ftrace_stub:
+#ifdef CONFIG_64BIT
+ loadgp
+#endif
+
+ /* call ftrace_return_to_handler(0) */
+#ifdef CONFIG_64BIT
+ ldo -16(%sp),%ret1 /* Reference param save area */
+#endif
+ BL ftrace_return_to_handler,%r2
+ ldi 0,%r26
+ copy %ret0,%rp
+
+ /* restore original return values */
+ LDREG 8(%r3),%ret0
+ LDREG 16(%r3),%ret1
+
+ /* return from function */
+#ifdef CONFIG_64BIT
+ bve (%rp)
+#else
bv %r0(%rp)
- nop
+#endif
+ LDREGM -FRAME_SIZE(%sp),%r3
+ .exit
+ .procend
ENDPROC(return_to_handler)
+
#endif /* CONFIG_FUNCTION_TRACER */
#ifdef CONFIG_IRQSTACKS
diff --git a/arch/parisc/kernel/ftrace.c b/arch/parisc/kernel/ftrace.c
index 559d400f9385..b13f9ec6f294 100644
--- a/arch/parisc/kernel/ftrace.c
+++ b/arch/parisc/kernel/ftrace.c
@@ -1,6 +1,6 @@
/*
* Code for tracing calls in Linux kernel.
- * Copyright (C) 2009 Helge Deller <deller@gmx.de>
+ * Copyright (C) 2009-2016 Helge Deller <deller@gmx.de>
*
* based on code for x86 which is:
* Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
@@ -13,104 +13,21 @@
#include <linux/init.h>
#include <linux/ftrace.h>
+#include <asm/assembly.h>
#include <asm/sections.h>
#include <asm/ftrace.h>
-
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
-
-/* Add a function return address to the trace stack on thread info.*/
-static int push_return_trace(unsigned long ret, unsigned long long time,
- unsigned long func, int *depth)
-{
- int index;
-
- if (!current->ret_stack)
- return -EBUSY;
-
- /* The return trace stack is full */
- if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
- atomic_inc(&current->trace_overrun);
- return -EBUSY;
- }
-
- index = ++current->curr_ret_stack;
- barrier();
- current->ret_stack[index].ret = ret;
- current->ret_stack[index].func = func;
- current->ret_stack[index].calltime = time;
- *depth = index;
-
- return 0;
-}
-
-/* Retrieve a function return address to the trace stack on thread info.*/
-static void pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret)
-{
- int index;
-
- index = current->curr_ret_stack;
-
- if (unlikely(index < 0)) {
- ftrace_graph_stop();
- WARN_ON(1);
- /* Might as well panic, otherwise we have no where to go */
- *ret = (unsigned long)
- dereference_function_descriptor(&panic);
- return;
- }
-
- *ret = current->ret_stack[index].ret;
- trace->func = current->ret_stack[index].func;
- trace->calltime = current->ret_stack[index].calltime;
- trace->overrun = atomic_read(&current->trace_overrun);
- trace->depth = index;
- barrier();
- current->curr_ret_stack--;
-
-}
-
-/*
- * Send the trace to the ring-buffer.
- * @return the original return address.
- */
-unsigned long ftrace_return_to_handler(unsigned long retval0,
- unsigned long retval1)
-{
- struct ftrace_graph_ret trace;
- unsigned long ret;
-
- pop_return_trace(&trace, &ret);
- trace.rettime = local_clock();
- ftrace_graph_return(&trace);
-
- if (unlikely(!ret)) {
- ftrace_graph_stop();
- WARN_ON(1);
- /* Might as well panic. What else to do? */
- ret = (unsigned long)
- dereference_function_descriptor(&panic);
- }
-
- /* HACK: we hand over the old functions' return values
- in %r23 and %r24. Assembly in entry.S will take care
- and move those to their final registers %ret0 and %ret1 */
- asm( "copy %0, %%r23 \n\t"
- "copy %1, %%r24 \n" : : "r" (retval0), "r" (retval1) );
-
- return ret;
-}
-
/*
* Hook the return address and push it in the stack of return addrs
* in current thread info.
*/
-void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
+static void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
{
unsigned long old;
- unsigned long long calltime;
struct ftrace_graph_ent trace;
+ extern int parisc_return_to_handler;
if (unlikely(ftrace_graph_is_dead()))
return;
@@ -119,64 +36,47 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
return;
old = *parent;
- *parent = (unsigned long)
- dereference_function_descriptor(&return_to_handler);
- if (unlikely(!__kernel_text_address(old))) {
- ftrace_graph_stop();
- *parent = old;
- WARN_ON(1);
- return;
- }
-
- calltime = local_clock();
+ trace.func = self_addr;
+ trace.depth = current->curr_ret_stack + 1;
- if (push_return_trace(old, calltime,
- self_addr, &trace.depth) == -EBUSY) {
- *parent = old;
+ /* Only trace if the calling function expects to */
+ if (!ftrace_graph_entry(&trace))
return;
- }
- trace.func = self_addr;
+ if (ftrace_push_return_trace(old, self_addr, &trace.depth,
+ 0 ) == -EBUSY)
+ return;
- /* Only trace if the calling function expects to */
- if (!ftrace_graph_entry(&trace)) {
- current->curr_ret_stack--;
- *parent = old;
- }
+ /* activate parisc_return_to_handler() as return point */
+ *parent = (unsigned long) &parisc_return_to_handler;
}
-
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
-
-void ftrace_function_trampoline(unsigned long parent,
+void notrace ftrace_function_trampoline(unsigned long parent,
unsigned long self_addr,
unsigned long org_sp_gr3)
{
- extern ftrace_func_t ftrace_trace_function;
+ extern ftrace_func_t ftrace_trace_function; /* depends on CONFIG_DYNAMIC_FTRACE */
+ extern int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace);
if (ftrace_trace_function != ftrace_stub) {
- ftrace_trace_function(parent, self_addr);
+ /* struct ftrace_ops *op, struct pt_regs *regs); */
+ ftrace_trace_function(parent, self_addr, NULL, NULL);
return;
}
+
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- if (ftrace_graph_entry && ftrace_graph_return) {
- unsigned long sp;
+ if (ftrace_graph_return != (trace_func_graph_ret_t) ftrace_stub ||
+ ftrace_graph_entry != ftrace_graph_entry_stub) {
unsigned long *parent_rp;
- asm volatile ("copy %%r30, %0" : "=r"(sp));
- /* sanity check: is stack pointer which we got from
- assembler function in entry.S in a reasonable
- range compared to current stack pointer? */
- if ((sp - org_sp_gr3) > 0x400)
- return;
-
/* calculate pointer to %rp in stack */
- parent_rp = (unsigned long *) org_sp_gr3 - 0x10;
+ parent_rp = (unsigned long *) (org_sp_gr3 - RP_OFFSET);
/* sanity check: parent_rp should hold parent */
if (*parent_rp != parent)
return;
-
+
prepare_ftrace_return(parent_rp, self_addr);
return;
}
diff --git a/arch/parisc/kernel/head.S b/arch/parisc/kernel/head.S
index 75aa0db9f69e..bbbe360b458f 100644
--- a/arch/parisc/kernel/head.S
+++ b/arch/parisc/kernel/head.S
@@ -129,6 +129,15 @@ $pgt_fill_loop:
/* And the stack pointer too */
ldo THREAD_SZ_ALGN(%r6),%sp
+#if defined(CONFIG_64BIT) && defined(CONFIG_FUNCTION_TRACER)
+ .import _mcount,data
+ /* initialize mcount FPTR */
+ /* Get the global data pointer */
+ loadgp
+ load32 PA(_mcount), %r10
+ std %dp,0x18(%r10)
+#endif
+
#ifdef CONFIG_SMP
/* Set the smp rendezvous address into page zero.
** It would be safer to do this in init_smp_config() but
diff --git a/arch/powerpc/include/uapi/asm/cputable.h b/arch/powerpc/include/uapi/asm/cputable.h
index 8dde19962a5b..f63c96cd3608 100644
--- a/arch/powerpc/include/uapi/asm/cputable.h
+++ b/arch/powerpc/include/uapi/asm/cputable.h
@@ -31,6 +31,7 @@
#define PPC_FEATURE_PSERIES_PERFMON_COMPAT \
0x00000040
+/* Reserved - do not use 0x00000004 */
#define PPC_FEATURE_TRUE_LE 0x00000002
#define PPC_FEATURE_PPC_LE 0x00000001
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index 7030b035905d..a15fe1d4e84a 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -148,23 +148,25 @@ static struct ibm_pa_feature {
unsigned long cpu_features; /* CPU_FTR_xxx bit */
unsigned long mmu_features; /* MMU_FTR_xxx bit */
unsigned int cpu_user_ftrs; /* PPC_FEATURE_xxx bit */
+ unsigned int cpu_user_ftrs2; /* PPC_FEATURE2_xxx bit */
unsigned char pabyte; /* byte number in ibm,pa-features */
unsigned char pabit; /* bit number (big-endian) */
unsigned char invert; /* if 1, pa bit set => clear feature */
} ibm_pa_features[] __initdata = {
- {0, 0, PPC_FEATURE_HAS_MMU, 0, 0, 0},
- {0, 0, PPC_FEATURE_HAS_FPU, 0, 1, 0},
- {CPU_FTR_CTRL, 0, 0, 0, 3, 0},
- {CPU_FTR_NOEXECUTE, 0, 0, 0, 6, 0},
- {CPU_FTR_NODSISRALIGN, 0, 0, 1, 1, 1},
- {0, MMU_FTR_CI_LARGE_PAGE, 0, 1, 2, 0},
- {CPU_FTR_REAL_LE, PPC_FEATURE_TRUE_LE, 5, 0, 0},
+ {0, 0, PPC_FEATURE_HAS_MMU, 0, 0, 0, 0},
+ {0, 0, PPC_FEATURE_HAS_FPU, 0, 0, 1, 0},
+ {CPU_FTR_CTRL, 0, 0, 0, 0, 3, 0},
+ {CPU_FTR_NOEXECUTE, 0, 0, 0, 0, 6, 0},
+ {CPU_FTR_NODSISRALIGN, 0, 0, 0, 1, 1, 1},
+ {0, MMU_FTR_CI_LARGE_PAGE, 0, 0, 1, 2, 0},
+ {CPU_FTR_REAL_LE, 0, PPC_FEATURE_TRUE_LE, 0, 5, 0, 0},
/*
- * If the kernel doesn't support TM (ie. CONFIG_PPC_TRANSACTIONAL_MEM=n),
- * we don't want to turn on CPU_FTR_TM here, so we use CPU_FTR_TM_COMP
- * which is 0 if the kernel doesn't support TM.
+ * If the kernel doesn't support TM (ie CONFIG_PPC_TRANSACTIONAL_MEM=n),
+ * we don't want to turn on TM here, so we use the *_COMP versions
+ * which are 0 if the kernel doesn't support TM.
*/
- {CPU_FTR_TM_COMP, 0, 0, 22, 0, 0},
+ {CPU_FTR_TM_COMP, 0, 0,
+ PPC_FEATURE2_HTM_COMP|PPC_FEATURE2_HTM_NOSC_COMP, 22, 0, 0},
};
static void __init scan_features(unsigned long node, const unsigned char *ftrs,
@@ -195,10 +197,12 @@ static void __init scan_features(unsigned long node, const unsigned char *ftrs,
if (bit ^ fp->invert) {
cur_cpu_spec->cpu_features |= fp->cpu_features;
cur_cpu_spec->cpu_user_features |= fp->cpu_user_ftrs;
+ cur_cpu_spec->cpu_user_features2 |= fp->cpu_user_ftrs2;
cur_cpu_spec->mmu_features |= fp->mmu_features;
} else {
cur_cpu_spec->cpu_features &= ~fp->cpu_features;
cur_cpu_spec->cpu_user_features &= ~fp->cpu_user_ftrs;
+ cur_cpu_spec->cpu_user_features2 &= ~fp->cpu_user_ftrs2;
cur_cpu_spec->mmu_features &= ~fp->mmu_features;
}
}
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index aad23e3dff2c..bf24ab188921 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -4,6 +4,9 @@ config MMU
config ZONE_DMA
def_bool y
+config CPU_BIG_ENDIAN
+ def_bool y
+
config LOCKDEP_SUPPORT
def_bool y
diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h
index b6bfa169a002..535a46d46d28 100644
--- a/arch/s390/include/asm/pci.h
+++ b/arch/s390/include/asm/pci.h
@@ -44,7 +44,8 @@ struct zpci_fmb {
u64 rpcit_ops;
u64 dma_rbytes;
u64 dma_wbytes;
-} __packed __aligned(64);
+ u64 pad[2];
+} __packed __aligned(128);
enum zpci_state {
ZPCI_FN_STATE_RESERVED,
diff --git a/arch/s390/include/asm/seccomp.h b/arch/s390/include/asm/seccomp.h
index 781a9cf9b002..e10f8337367b 100644
--- a/arch/s390/include/asm/seccomp.h
+++ b/arch/s390/include/asm/seccomp.h
@@ -13,4 +13,6 @@
#define __NR_seccomp_exit_32 __NR_exit
#define __NR_seccomp_sigreturn_32 __NR_sigreturn
+#include <asm-generic/seccomp.h>
+
#endif /* _ASM_S390_SECCOMP_H */
diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c
index d4549c964589..e5f50a7d2f4e 100644
--- a/arch/s390/lib/spinlock.c
+++ b/arch/s390/lib/spinlock.c
@@ -105,6 +105,7 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
if (_raw_compare_and_swap(&lp->lock, 0, cpu))
return;
local_irq_restore(flags);
+ continue;
}
/* Check if the lock owner is running. */
if (first_diag && cpu_is_preempted(~owner)) {
diff --git a/arch/sh/include/asm/smp.h b/arch/sh/include/asm/smp.h
index 1baf0ba96242..c9f8bbdb1bf8 100644
--- a/arch/sh/include/asm/smp.h
+++ b/arch/sh/include/asm/smp.h
@@ -34,11 +34,6 @@ enum {
DECLARE_PER_CPU(int, cpu_state);
void smp_message_recv(unsigned int msg);
-void smp_timer_broadcast(const struct cpumask *mask);
-
-void local_timer_interrupt(void);
-void local_timer_setup(unsigned int cpu);
-void local_timer_stop(unsigned int cpu);
void arch_send_call_function_single_ipi(int cpu);
void arch_send_call_function_ipi_mask(const struct cpumask *mask);
diff --git a/arch/sh/include/asm/topology.h b/arch/sh/include/asm/topology.h
index b0a282d65f6a..358e3f516ef6 100644
--- a/arch/sh/include/asm/topology.h
+++ b/arch/sh/include/asm/topology.h
@@ -17,7 +17,7 @@
#define mc_capable() (1)
-const struct cpumask *cpu_coregroup_mask(unsigned int cpu);
+const struct cpumask *cpu_coregroup_mask(int cpu);
extern cpumask_t cpu_core_map[NR_CPUS];
diff --git a/arch/sh/kernel/cpu/sh4a/smp-shx3.c b/arch/sh/kernel/cpu/sh4a/smp-shx3.c
index 4a298808789c..839612c8a0a0 100644
--- a/arch/sh/kernel/cpu/sh4a/smp-shx3.c
+++ b/arch/sh/kernel/cpu/sh4a/smp-shx3.c
@@ -73,8 +73,6 @@ static void shx3_prepare_cpus(unsigned int max_cpus)
{
int i;
- local_timer_setup(0);
-
BUILD_BUG_ON(SMP_MSG_NR >= 8);
for (i = 0; i < SMP_MSG_NR; i++)
diff --git a/arch/sh/kernel/topology.c b/arch/sh/kernel/topology.c
index 772caffba22f..c82912a61d74 100644
--- a/arch/sh/kernel/topology.c
+++ b/arch/sh/kernel/topology.c
@@ -21,7 +21,7 @@ static DEFINE_PER_CPU(struct cpu, cpu_devices);
cpumask_t cpu_core_map[NR_CPUS];
EXPORT_SYMBOL(cpu_core_map);
-static cpumask_t cpu_coregroup_map(unsigned int cpu)
+static cpumask_t cpu_coregroup_map(int cpu)
{
/*
* Presently all SH-X3 SMP cores are multi-cores, so just keep it
@@ -30,7 +30,7 @@ static cpumask_t cpu_coregroup_map(unsigned int cpu)
return *cpu_possible_mask;
}
-const struct cpumask *cpu_coregroup_mask(unsigned int cpu)
+const struct cpumask *cpu_coregroup_mask(int cpu)
{
return &cpu_core_map[cpu];
}
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
index 6915ff2bd996..8774cb23064f 100644
--- a/arch/x86/boot/compressed/Makefile
+++ b/arch/x86/boot/compressed/Makefile
@@ -26,7 +26,7 @@ targets := vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma \
vmlinux.bin.xz vmlinux.bin.lzo vmlinux.bin.lz4
KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2
-KBUILD_CFLAGS += -fno-strict-aliasing -fPIC
+KBUILD_CFLAGS += -fno-strict-aliasing $(call cc-option, -fPIE, -fPIC)
KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
cflags-$(CONFIG_X86_32) := -march=i386
cflags-$(CONFIG_X86_64) := -mcmodel=small
@@ -40,6 +40,18 @@ GCOV_PROFILE := n
UBSAN_SANITIZE :=n
LDFLAGS := -m elf_$(UTS_MACHINE)
+ifeq ($(CONFIG_RELOCATABLE),y)
+# If kernel is relocatable, build compressed kernel as PIE.
+ifeq ($(CONFIG_X86_32),y)
+LDFLAGS += $(call ld-option, -pie) $(call ld-option, --no-dynamic-linker)
+else
+# To build 64-bit compressed kernel as PIE, we disable relocation
+# overflow check to avoid relocation overflow error with a new linker
+# command-line option, -z noreloc-overflow.
+LDFLAGS += $(shell $(LD) --help 2>&1 | grep -q "\-z noreloc-overflow" \
+ && echo "-z noreloc-overflow -pie --no-dynamic-linker")
+endif
+endif
LDFLAGS_vmlinux := -T
hostprogs-y := mkpiggy
diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
index 8ef964ddc18e..0256064da8da 100644
--- a/arch/x86/boot/compressed/head_32.S
+++ b/arch/x86/boot/compressed/head_32.S
@@ -31,6 +31,34 @@
#include <asm/asm-offsets.h>
#include <asm/bootparam.h>
+/*
+ * The 32-bit x86 assembler in binutils 2.26 will generate R_386_GOT32X
+ * relocation to get the symbol address in PIC. When the compressed x86
+ * kernel isn't built as PIC, the linker optimizes R_386_GOT32X
+ * relocations to their fixed symbol addresses. However, when the
+ * compressed x86 kernel is loaded at a different address, it leads
+ * to the following load failure:
+ *
+ * Failed to allocate space for phdrs
+ *
+ * during the decompression stage.
+ *
+ * If the compressed x86 kernel is relocatable at run-time, it should be
+ * compiled with -fPIE, instead of -fPIC, if possible and should be built as
+ * Position Independent Executable (PIE) so that linker won't optimize
+ * R_386_GOT32X relocation to its fixed symbol address. Older
+ * linkers generate R_386_32 relocations against locally defined symbols,
+ * _bss, _ebss, _got and _egot, in PIE. It isn't wrong, just less
+ * optimal than R_386_RELATIVE. But the x86 kernel fails to properly handle
+ * R_386_32 relocations when relocating the kernel. To generate
+ * R_386_RELATIVE relocations, we mark _bss, _ebss, _got and _egot as
+ * hidden:
+ */
+ .hidden _bss
+ .hidden _ebss
+ .hidden _got
+ .hidden _egot
+
__HEAD
ENTRY(startup_32)
#ifdef CONFIG_EFI_STUB
diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
index b0c0d16ef58d..86558a199139 100644
--- a/arch/x86/boot/compressed/head_64.S
+++ b/arch/x86/boot/compressed/head_64.S
@@ -33,6 +33,14 @@
#include <asm/asm-offsets.h>
#include <asm/bootparam.h>
+/*
+ * Locally defined symbols should be marked hidden:
+ */
+ .hidden _bss
+ .hidden _ebss
+ .hidden _got
+ .hidden _egot
+
__HEAD
.code32
ENTRY(startup_32)
diff --git a/arch/x86/crypto/sha-mb/sha1_mb.c b/arch/x86/crypto/sha-mb/sha1_mb.c
index a8a0224fa0f8..081255cea1ee 100644
--- a/arch/x86/crypto/sha-mb/sha1_mb.c
+++ b/arch/x86/crypto/sha-mb/sha1_mb.c
@@ -453,10 +453,10 @@ static int sha_complete_job(struct mcryptd_hash_request_ctx *rctx,
req = cast_mcryptd_ctx_to_req(req_ctx);
if (irqs_disabled())
- rctx->complete(&req->base, ret);
+ req_ctx->complete(&req->base, ret);
else {
local_bh_disable();
- rctx->complete(&req->base, ret);
+ req_ctx->complete(&req->base, ret);
local_bh_enable();
}
}
diff --git a/arch/x86/include/asm/hugetlb.h b/arch/x86/include/asm/hugetlb.h
index f8a29d2c97b0..e6a8613fbfb0 100644
--- a/arch/x86/include/asm/hugetlb.h
+++ b/arch/x86/include/asm/hugetlb.h
@@ -4,6 +4,7 @@
#include <asm/page.h>
#include <asm-generic/hugetlb.h>
+#define hugepages_supported() cpu_has_pse
static inline int is_hugepage_only_range(struct mm_struct *mm,
unsigned long addr,
diff --git a/arch/x86/kernel/cpu/mcheck/mce-genpool.c b/arch/x86/kernel/cpu/mcheck/mce-genpool.c
index 0a850100c594..2658e2af74ec 100644
--- a/arch/x86/kernel/cpu/mcheck/mce-genpool.c
+++ b/arch/x86/kernel/cpu/mcheck/mce-genpool.c
@@ -29,7 +29,7 @@ static char gen_pool_buf[MCE_POOLSZ];
void mce_gen_pool_process(void)
{
struct llist_node *head;
- struct mce_evt_llist *node;
+ struct mce_evt_llist *node, *tmp;
struct mce *mce;
head = llist_del_all(&mce_event_llist);
@@ -37,7 +37,7 @@ void mce_gen_pool_process(void)
return;
head = llist_reverse_order(head);
- llist_for_each_entry(node, head, llnode) {
+ llist_for_each_entry_safe(node, tmp, head, llnode) {
mce = &node->mce;
atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, mce);
gen_pool_free(mce_evt_pool, (unsigned long)node, sizeof(*node));
diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
index 4e7c6933691c..10c11b4da31d 100644
--- a/arch/x86/kernel/cpu/mshyperv.c
+++ b/arch/x86/kernel/cpu/mshyperv.c
@@ -152,6 +152,11 @@ static struct clocksource hyperv_cs = {
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
+static unsigned char hv_get_nmi_reason(void)
+{
+ return 0;
+}
+
static void __init ms_hyperv_init_platform(void)
{
/*
@@ -191,6 +196,13 @@ static void __init ms_hyperv_init_platform(void)
machine_ops.crash_shutdown = hv_machine_crash_shutdown;
#endif
mark_tsc_unstable("running on Hyper-V");
+
+ /*
+ * Generation 2 instances don't support reading the NMI status from
+ * 0x61 port.
+ */
+ if (efi_enabled(EFI_BOOT))
+ x86_platform.get_nmi_reason = hv_get_nmi_reason;
}
const __refconst struct hypervisor_x86 x86_hyper_ms_hyperv = {
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 319b08a5b6ed..2367ae07eb76 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -146,6 +146,31 @@ int default_check_phys_apicid_present(int phys_apicid)
struct boot_params boot_params;
+/*
+ * Machine setup..
+ */
+static struct resource data_resource = {
+ .name = "Kernel data",
+ .start = 0,
+ .end = 0,
+ .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
+};
+
+static struct resource code_resource = {
+ .name = "Kernel code",
+ .start = 0,
+ .end = 0,
+ .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
+};
+
+static struct resource bss_resource = {
+ .name = "Kernel bss",
+ .start = 0,
+ .end = 0,
+ .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
+};
+
+
#ifdef CONFIG_X86_32
/* cpu data as detected by the assembly code in head.S */
struct cpuinfo_x86 new_cpu_data = {
@@ -924,6 +949,13 @@ void __init setup_arch(char **cmdline_p)
mpx_mm_init(&init_mm);
+ code_resource.start = __pa_symbol(_text);
+ code_resource.end = __pa_symbol(_etext)-1;
+ data_resource.start = __pa_symbol(_etext);
+ data_resource.end = __pa_symbol(_edata)-1;
+ bss_resource.start = __pa_symbol(__bss_start);
+ bss_resource.end = __pa_symbol(__bss_stop)-1;
+
#ifdef CONFIG_CMDLINE_BOOL
#ifdef CONFIG_CMDLINE_OVERRIDE
strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
@@ -987,6 +1019,11 @@ void __init setup_arch(char **cmdline_p)
x86_init.resources.probe_roms();
+ /* after parse_early_param, so could debug it */
+ insert_resource(&iomem_resource, &code_resource);
+ insert_resource(&iomem_resource, &data_resource);
+ insert_resource(&iomem_resource, &bss_resource);
+
e820_add_kernel_range();
trim_bios_range();
#ifdef CONFIG_X86_32
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 8efb839948e5..bbbaa802d13e 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -534,6 +534,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
do_cpuid_1_ent(&entry[i], function, idx);
if (idx == 1) {
entry[i].eax &= kvm_cpuid_D_1_eax_x86_features;
+ cpuid_mask(&entry[i].eax, CPUID_D_1_EAX);
entry[i].ebx = 0;
if (entry[i].eax & (F(XSAVES)|F(XSAVEC)))
entry[i].ebx =
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index b70df72e2b33..66b33b96a31b 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -173,10 +173,9 @@ static inline u8 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
int index = (pfec >> 1) +
(smap >> (X86_EFLAGS_AC_BIT - PFERR_RSVD_BIT + 1));
bool fault = (mmu->permissions[index] >> pte_access) & 1;
+ u32 errcode = PFERR_PRESENT_MASK;
WARN_ON(pfec & (PFERR_PK_MASK | PFERR_RSVD_MASK));
- pfec |= PFERR_PRESENT_MASK;
-
if (unlikely(mmu->pkru_mask)) {
u32 pkru_bits, offset;
@@ -189,15 +188,15 @@ static inline u8 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
pkru_bits = (kvm_read_pkru(vcpu) >> (pte_pkey * 2)) & 3;
/* clear present bit, replace PFEC.RSVD with ACC_USER_MASK. */
- offset = pfec - 1 +
+ offset = (pfec & ~1) +
((pte_access & PT_USER_MASK) << (PFERR_RSVD_BIT - PT_USER_SHIFT));
pkru_bits &= mmu->pkru_mask >> offset;
- pfec |= -pkru_bits & PFERR_PK_MASK;
+ errcode |= -pkru_bits & PFERR_PK_MASK;
fault |= (pkru_bits != 0);
}
- return -(uint32_t)fault & pfec;
+ return -(u32)fault & errcode;
}
void kvm_mmu_invalidate_zap_all_pages(struct kvm *kvm);
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 1d971c7553c3..bc019f70e0b6 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -360,7 +360,7 @@ retry_walk:
goto error;
if (unlikely(is_rsvd_bits_set(mmu, pte, walker->level))) {
- errcode |= PFERR_RSVD_MASK | PFERR_PRESENT_MASK;
+ errcode = PFERR_RSVD_MASK | PFERR_PRESENT_MASK;
goto error;
}
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 0a2c70e43bc8..9b7798c7b210 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -700,7 +700,6 @@ static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
if ((xcr0 & XFEATURE_MASK_AVX512) != XFEATURE_MASK_AVX512)
return 1;
}
- kvm_put_guest_xcr0(vcpu);
vcpu->arch.xcr0 = xcr0;
if ((xcr0 ^ old_xcr0) & XFEATURE_MASK_EXTEND)
@@ -6590,8 +6589,6 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
kvm_x86_ops->prepare_guest_switch(vcpu);
if (vcpu->fpu_active)
kvm_load_guest_fpu(vcpu);
- kvm_load_guest_xcr0(vcpu);
-
vcpu->mode = IN_GUEST_MODE;
srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
@@ -6618,6 +6615,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
goto cancel_injection;
}
+ kvm_load_guest_xcr0(vcpu);
+
if (req_immediate_exit)
smp_send_reschedule(vcpu->cpu);
@@ -6667,6 +6666,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
vcpu->mode = OUTSIDE_GUEST_MODE;
smp_wmb();
+ kvm_put_guest_xcr0(vcpu);
+
/* Interrupt is enabled by handle_external_intr() */
kvm_x86_ops->handle_external_intr(vcpu);
@@ -7314,7 +7315,6 @@ void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
* and assume host would use all available bits.
* Guest xcr0 would be loaded later.
*/
- kvm_put_guest_xcr0(vcpu);
vcpu->guest_fpu_loaded = 1;
__kernel_fpu_begin();
__copy_kernel_to_fpregs(&vcpu->arch.guest_fpu.state);
@@ -7323,8 +7323,6 @@ void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
{
- kvm_put_guest_xcr0(vcpu);
-
if (!vcpu->guest_fpu_loaded) {
vcpu->fpu_counter = 0;
return;
diff --git a/block/partition-generic.c b/block/partition-generic.c
index 2c6ae2aed2c4..d7eb77e1e3a8 100644
--- a/block/partition-generic.c
+++ b/block/partition-generic.c
@@ -361,15 +361,20 @@ struct hd_struct *add_partition(struct gendisk *disk, int partno,
goto out_del;
}
+ err = hd_ref_init(p);
+ if (err) {
+ if (flags & ADDPART_FLAG_WHOLEDISK)
+ goto out_remove_file;
+ goto out_del;
+ }
+
/* everything is up and running, commence */
rcu_assign_pointer(ptbl->part[partno], p);
/* suppress uevent if the disk suppresses it */
if (!dev_get_uevent_suppress(ddev))
kobject_uevent(&pdev->kobj, KOBJ_ADD);
-
- if (!hd_ref_init(p))
- return p;
+ return p;
out_free_info:
free_part_info(p);
@@ -378,6 +383,8 @@ out_free_stats:
out_free:
kfree(p);
return ERR_PTR(err);
+out_remove_file:
+ device_remove_file(pdev, &dev_attr_whole_disk);
out_del:
kobject_put(p->holder_dir);
device_del(pdev);
diff --git a/crypto/rsa-pkcs1pad.c b/crypto/rsa-pkcs1pad.c
index 1cea67d43e1d..ead8dc0d084e 100644
--- a/crypto/rsa-pkcs1pad.c
+++ b/crypto/rsa-pkcs1pad.c
@@ -387,16 +387,16 @@ static int pkcs1pad_decrypt(struct akcipher_request *req)
req_ctx->child_req.src = req->src;
req_ctx->child_req.src_len = req->src_len;
req_ctx->child_req.dst = req_ctx->out_sg;
- req_ctx->child_req.dst_len = ctx->key_size - 1;
+ req_ctx->child_req.dst_len = ctx->key_size ;
- req_ctx->out_buf = kmalloc(ctx->key_size - 1,
+ req_ctx->out_buf = kmalloc(ctx->key_size,
(req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC);
if (!req_ctx->out_buf)
return -ENOMEM;
pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf,
- ctx->key_size - 1, NULL);
+ ctx->key_size, NULL);
akcipher_request_set_tfm(&req_ctx->child_req, ctx->child);
akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
@@ -595,16 +595,16 @@ static int pkcs1pad_verify(struct akcipher_request *req)
req_ctx->child_req.src = req->src;
req_ctx->child_req.src_len = req->src_len;
req_ctx->child_req.dst = req_ctx->out_sg;
- req_ctx->child_req.dst_len = ctx->key_size - 1;
+ req_ctx->child_req.dst_len = ctx->key_size;
- req_ctx->out_buf = kmalloc(ctx->key_size - 1,
+ req_ctx->out_buf = kmalloc(ctx->key_size,
(req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC);
if (!req_ctx->out_buf)
return -ENOMEM;
pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf,
- ctx->key_size - 1, NULL);
+ ctx->key_size, NULL);
akcipher_request_set_tfm(&req_ctx->child_req, ctx->child);
akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
diff --git a/drivers/acpi/nfit.c b/drivers/acpi/nfit.c
index a434b58c5e11..d60f73a63c3c 100644
--- a/drivers/acpi/nfit.c
+++ b/drivers/acpi/nfit.c
@@ -287,8 +287,11 @@ static int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc,
offset);
rc = -ENXIO;
}
- } else
+ } else {
rc = 0;
+ if (cmd_rc)
+ *cmd_rc = xlat_status(buf, cmd);
+ }
out:
ACPI_FREE(out_obj);
diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c
index 786be8fed39e..1f635471f318 100644
--- a/drivers/bcma/main.c
+++ b/drivers/bcma/main.c
@@ -136,7 +136,6 @@ static bool bcma_is_core_needed_early(u16 core_id)
return false;
}
-#if defined(CONFIG_OF) && defined(CONFIG_OF_ADDRESS)
static struct device_node *bcma_of_find_child_device(struct platform_device *parent,
struct bcma_device *core)
{
@@ -184,7 +183,7 @@ static unsigned int bcma_of_get_irq(struct platform_device *parent,
struct of_phandle_args out_irq;
int ret;
- if (!parent || !parent->dev.of_node)
+ if (!IS_ENABLED(CONFIG_OF_IRQ) || !parent || !parent->dev.of_node)
return 0;
ret = bcma_of_irq_parse(parent, core, &out_irq, num);
@@ -202,23 +201,15 @@ static void bcma_of_fill_device(struct platform_device *parent,
{
struct device_node *node;
+ if (!IS_ENABLED(CONFIG_OF_IRQ))
+ return;
+
node = bcma_of_find_child_device(parent, core);
if (node)
core->dev.of_node = node;
core->irq = bcma_of_get_irq(parent, core, 0);
}
-#else
-static void bcma_of_fill_device(struct platform_device *parent,
- struct bcma_device *core)
-{
-}
-static inline unsigned int bcma_of_get_irq(struct platform_device *parent,
- struct bcma_device *core, int num)
-{
- return 0;
-}
-#endif /* CONFIG_OF */
unsigned int bcma_core_irq(struct bcma_device *core, int num)
{
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 423f4ca7d712..80cf8add46ff 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -488,6 +488,12 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd,
bvec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
iov_iter_bvec(&iter, ITER_BVEC | rw, bvec,
bio_segments(bio), blk_rq_bytes(cmd->rq));
+ /*
+ * This bio may be started from the middle of the 'bvec'
+ * because of bio splitting, so offset from the bvec must
+ * be passed to iov iterator
+ */
+ iter.iov_offset = bio->bi_iter.bi_bvec_done;
cmd->iocb.ki_pos = pos;
cmd->iocb.ki_filp = file;
diff --git a/drivers/bus/mvebu-mbus.c b/drivers/bus/mvebu-mbus.c
index c2e52864bb03..ce54a0160faa 100644
--- a/drivers/bus/mvebu-mbus.c
+++ b/drivers/bus/mvebu-mbus.c
@@ -972,7 +972,7 @@ int mvebu_mbus_get_dram_win_info(phys_addr_t phyaddr, u8 *target, u8 *attr)
}
}
- pr_err("invalid dram address 0x%x\n", phyaddr);
+ pr_err("invalid dram address %pa\n", &phyaddr);
return -EINVAL;
}
EXPORT_SYMBOL_GPL(mvebu_mbus_get_dram_win_info);
diff --git a/drivers/bus/uniphier-system-bus.c b/drivers/bus/uniphier-system-bus.c
index 834a2aeaf27a..350b7309c26d 100644
--- a/drivers/bus/uniphier-system-bus.c
+++ b/drivers/bus/uniphier-system-bus.c
@@ -108,7 +108,7 @@ static int uniphier_system_bus_check_overlap(
for (i = 0; i < ARRAY_SIZE(priv->bank); i++) {
for (j = i + 1; j < ARRAY_SIZE(priv->bank); j++) {
- if (priv->bank[i].end > priv->bank[j].base ||
+ if (priv->bank[i].end > priv->bank[j].base &&
priv->bank[i].base < priv->bank[j].end) {
dev_err(priv->dev,
"region overlap between bank%d and bank%d\n",
diff --git a/drivers/char/hw_random/bcm63xx-rng.c b/drivers/char/hw_random/bcm63xx-rng.c
index ca9c40309757..5132c9cde50d 100644
--- a/drivers/char/hw_random/bcm63xx-rng.c
+++ b/drivers/char/hw_random/bcm63xx-rng.c
@@ -12,6 +12,7 @@
#include <linux/clk.h>
#include <linux/platform_device.h>
#include <linux/hw_random.h>
+#include <linux/of.h>
#define RNG_CTRL 0x00
#define RNG_EN (1 << 0)
diff --git a/drivers/clocksource/tango_xtal.c b/drivers/clocksource/tango_xtal.c
index 2bcecafdeaea..c407c47a3232 100644
--- a/drivers/clocksource/tango_xtal.c
+++ b/drivers/clocksource/tango_xtal.c
@@ -42,7 +42,7 @@ static void __init tango_clocksource_init(struct device_node *np)
ret = clocksource_mmio_init(xtal_in_cnt, "tango-xtal", xtal_freq, 350,
32, clocksource_mmio_readl_up);
- if (!ret) {
+ if (ret) {
pr_err("%s: registration failed\n", np->full_name);
return;
}
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index b87596b591b3..e93405f0eac4 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1491,6 +1491,9 @@ static unsigned int cpufreq_update_current_freq(struct cpufreq_policy *policy)
{
unsigned int new_freq;
+ if (cpufreq_suspended)
+ return 0;
+
new_freq = cpufreq_driver->get(policy->cpu);
if (!new_freq)
return 0;
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 8b5a415ee14a..30fe323c4551 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -1130,6 +1130,10 @@ static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu)
sample_ratio = div_fp(int_tofp(pid_params.sample_rate_ns),
int_tofp(duration_ns));
core_busy = mul_fp(core_busy, sample_ratio);
+ } else {
+ sample_ratio = div_fp(100 * cpu->sample.mperf, cpu->sample.tsc);
+ if (sample_ratio < int_tofp(1))
+ core_busy = 0;
}
cpu->sample.busy_scaled = core_busy;
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
index 3d9acc53d247..60fc0fa26fd3 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
@@ -225,6 +225,9 @@ static int ccp_aes_cmac_export(struct ahash_request *req, void *out)
struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req);
struct ccp_aes_cmac_exp_ctx state;
+ /* Don't let anything leak to 'out' */
+ memset(&state, 0, sizeof(state));
+
state.null_msg = rctx->null_msg;
memcpy(state.iv, rctx->iv, sizeof(state.iv));
state.buf_count = rctx->buf_count;
diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c b/drivers/crypto/ccp/ccp-crypto-sha.c
index b5ad72897dc2..8f36af62fe95 100644
--- a/drivers/crypto/ccp/ccp-crypto-sha.c
+++ b/drivers/crypto/ccp/ccp-crypto-sha.c
@@ -212,6 +212,9 @@ static int ccp_sha_export(struct ahash_request *req, void *out)
struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req);
struct ccp_sha_exp_ctx state;
+ /* Don't let anything leak to 'out' */
+ memset(&state, 0, sizeof(state));
+
state.type = rctx->type;
state.msg_bits = rctx->msg_bits;
state.first = rctx->first;
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index 5ad0ec1f0e29..97199b3c25a2 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -130,26 +130,14 @@ static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
static void dwc_initialize(struct dw_dma_chan *dwc)
{
struct dw_dma *dw = to_dw_dma(dwc->chan.device);
- struct dw_dma_slave *dws = dwc->chan.private;
u32 cfghi = DWC_CFGH_FIFO_MODE;
u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority);
if (dwc->initialized == true)
return;
- if (dws) {
- /*
- * We need controller-specific data to set up slave
- * transfers.
- */
- BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev);
-
- cfghi |= DWC_CFGH_DST_PER(dws->dst_id);
- cfghi |= DWC_CFGH_SRC_PER(dws->src_id);
- } else {
- cfghi |= DWC_CFGH_DST_PER(dwc->dst_id);
- cfghi |= DWC_CFGH_SRC_PER(dwc->src_id);
- }
+ cfghi |= DWC_CFGH_DST_PER(dwc->dst_id);
+ cfghi |= DWC_CFGH_SRC_PER(dwc->src_id);
channel_writel(dwc, CFG_LO, cfglo);
channel_writel(dwc, CFG_HI, cfghi);
@@ -941,7 +929,7 @@ bool dw_dma_filter(struct dma_chan *chan, void *param)
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
struct dw_dma_slave *dws = param;
- if (!dws || dws->dma_dev != chan->device->dev)
+ if (dws->dma_dev != chan->device->dev)
return false;
/* We have to copy data since dws can be temporary storage */
@@ -1165,6 +1153,14 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
* doesn't mean what you think it means), and status writeback.
*/
+ /*
+ * We need controller-specific data to set up slave transfers.
+ */
+ if (chan->private && !dw_dma_filter(chan, chan->private)) {
+ dev_warn(chan2dev(chan), "Wrong controller-specific data\n");
+ return -EINVAL;
+ }
+
/* Enable controller here if needed */
if (!dw->in_use)
dw_dma_on(dw);
@@ -1226,6 +1222,14 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
spin_lock_irqsave(&dwc->lock, flags);
list_splice_init(&dwc->free_list, &list);
dwc->descs_allocated = 0;
+
+ /* Clear custom channel configuration */
+ dwc->src_id = 0;
+ dwc->dst_id = 0;
+
+ dwc->src_master = 0;
+ dwc->dst_master = 0;
+
dwc->initialized = false;
/* Disable interrupts */
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index ee3463e774f8..04070baab78a 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -1238,6 +1238,7 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
struct edma_desc *edesc;
dma_addr_t src_addr, dst_addr;
enum dma_slave_buswidth dev_width;
+ bool use_intermediate = false;
u32 burst;
int i, ret, nslots;
@@ -1279,8 +1280,21 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
* but the synchronization is difficult to achieve with Cyclic and
* cannot be guaranteed, so we error out early.
*/
- if (nslots > MAX_NR_SG)
- return NULL;
+ if (nslots > MAX_NR_SG) {
+ /*
+ * If the burst and period sizes are the same, we can put
+ * the full buffer into a single period and activate
+ * intermediate interrupts. This will produce interrupts
+ * after each burst, which is also after each desired period.
+ */
+ if (burst == period_len) {
+ period_len = buf_len;
+ nslots = 2;
+ use_intermediate = true;
+ } else {
+ return NULL;
+ }
+ }
edesc = kzalloc(sizeof(*edesc) + nslots * sizeof(edesc->pset[0]),
GFP_ATOMIC);
@@ -1358,8 +1372,13 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
/*
* Enable period interrupt only if it is requested
*/
- if (tx_flags & DMA_PREP_INTERRUPT)
+ if (tx_flags & DMA_PREP_INTERRUPT) {
edesc->pset[i].param.opt |= TCINTEN;
+
+ /* Also enable intermediate interrupts if necessary */
+ if (use_intermediate)
+ edesc->pset[i].param.opt |= ITCINTEN;
+ }
}
/* Place the cyclic channel to highest priority queue */
@@ -1570,32 +1589,6 @@ static irqreturn_t dma_ccerr_handler(int irq, void *data)
return IRQ_HANDLED;
}
-static void edma_tc_set_pm_state(struct edma_tc *tc, bool enable)
-{
- struct platform_device *tc_pdev;
- int ret;
-
- if (!IS_ENABLED(CONFIG_OF) || !tc)
- return;
-
- tc_pdev = of_find_device_by_node(tc->node);
- if (!tc_pdev) {
- pr_err("%s: TPTC device is not found\n", __func__);
- return;
- }
- if (!pm_runtime_enabled(&tc_pdev->dev))
- pm_runtime_enable(&tc_pdev->dev);
-
- if (enable)
- ret = pm_runtime_get_sync(&tc_pdev->dev);
- else
- ret = pm_runtime_put_sync(&tc_pdev->dev);
-
- if (ret < 0)
- pr_err("%s: pm_runtime_%s_sync() failed for %s\n", __func__,
- enable ? "get" : "put", dev_name(&tc_pdev->dev));
-}
-
/* Alloc channel resources */
static int edma_alloc_chan_resources(struct dma_chan *chan)
{
@@ -1632,8 +1625,6 @@ static int edma_alloc_chan_resources(struct dma_chan *chan)
EDMA_CHAN_SLOT(echan->ch_num), chan->chan_id,
echan->hw_triggered ? "HW" : "SW");
- edma_tc_set_pm_state(echan->tc, true);
-
return 0;
err_slot:
@@ -1670,7 +1661,6 @@ static void edma_free_chan_resources(struct dma_chan *chan)
echan->alloced = false;
}
- edma_tc_set_pm_state(echan->tc, false);
echan->tc = NULL;
echan->hw_triggered = false;
@@ -2417,10 +2407,8 @@ static int edma_pm_suspend(struct device *dev)
int i;
for (i = 0; i < ecc->num_channels; i++) {
- if (echan[i].alloced) {
+ if (echan[i].alloced)
edma_setup_interrupt(&echan[i], false);
- edma_tc_set_pm_state(echan[i].tc, false);
- }
}
return 0;
@@ -2450,8 +2438,6 @@ static int edma_pm_resume(struct device *dev)
/* Set up channel -> slot mapping for the entry slot */
edma_set_chmap(&echan[i], echan[i].slot[0]);
-
- edma_tc_set_pm_state(echan[i].tc, true);
}
}
@@ -2475,7 +2461,8 @@ static struct platform_driver edma_driver = {
static int edma_tptc_probe(struct platform_device *pdev)
{
- return 0;
+ pm_runtime_enable(&pdev->dev);
+ return pm_runtime_get_sync(&pdev->dev);
}
static struct platform_driver edma_tptc_driver = {
diff --git a/drivers/dma/hsu/hsu.c b/drivers/dma/hsu/hsu.c
index eef145edb936..ee510515ce18 100644
--- a/drivers/dma/hsu/hsu.c
+++ b/drivers/dma/hsu/hsu.c
@@ -64,10 +64,10 @@ static void hsu_dma_chan_start(struct hsu_dma_chan *hsuc)
if (hsuc->direction == DMA_MEM_TO_DEV) {
bsr = config->dst_maxburst;
- mtsr = config->dst_addr_width;
+ mtsr = config->src_addr_width;
} else if (hsuc->direction == DMA_DEV_TO_MEM) {
bsr = config->src_maxburst;
- mtsr = config->src_addr_width;
+ mtsr = config->dst_addr_width;
}
hsu_chan_disable(hsuc);
@@ -135,7 +135,7 @@ static u32 hsu_dma_chan_get_sr(struct hsu_dma_chan *hsuc)
sr = hsu_chan_readl(hsuc, HSU_CH_SR);
spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
- return sr;
+ return sr & ~(HSU_CH_SR_DESCE_ANY | HSU_CH_SR_CDESC_ANY);
}
irqreturn_t hsu_dma_irq(struct hsu_dma_chip *chip, unsigned short nr)
@@ -254,10 +254,13 @@ static void hsu_dma_issue_pending(struct dma_chan *chan)
static size_t hsu_dma_active_desc_size(struct hsu_dma_chan *hsuc)
{
struct hsu_dma_desc *desc = hsuc->desc;
- size_t bytes = desc->length;
+ size_t bytes = 0;
int i;
- i = desc->active % HSU_DMA_CHAN_NR_DESC;
+ for (i = desc->active; i < desc->nents; i++)
+ bytes += desc->sg[i].len;
+
+ i = HSU_DMA_CHAN_NR_DESC - 1;
do {
bytes += hsu_chan_readl(hsuc, HSU_CH_DxTSR(i));
} while (--i >= 0);
diff --git a/drivers/dma/hsu/hsu.h b/drivers/dma/hsu/hsu.h
index 578a8ee8cd05..6b070c22b1df 100644
--- a/drivers/dma/hsu/hsu.h
+++ b/drivers/dma/hsu/hsu.h
@@ -41,6 +41,9 @@
#define HSU_CH_SR_DESCTO(x) BIT(8 + (x))
#define HSU_CH_SR_DESCTO_ANY (BIT(11) | BIT(10) | BIT(9) | BIT(8))
#define HSU_CH_SR_CHE BIT(15)
+#define HSU_CH_SR_DESCE(x) BIT(16 + (x))
+#define HSU_CH_SR_DESCE_ANY (BIT(19) | BIT(18) | BIT(17) | BIT(16))
+#define HSU_CH_SR_CDESC_ANY (BIT(31) | BIT(30))
/* Bits in HSU_CH_CR */
#define HSU_CH_CR_CHA BIT(0)
diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c
index 43bd5aee7ffe..1e984e18c126 100644
--- a/drivers/dma/omap-dma.c
+++ b/drivers/dma/omap-dma.c
@@ -48,6 +48,7 @@ struct omap_chan {
unsigned dma_sig;
bool cyclic;
bool paused;
+ bool running;
int dma_ch;
struct omap_desc *desc;
@@ -294,6 +295,8 @@ static void omap_dma_start(struct omap_chan *c, struct omap_desc *d)
/* Enable channel */
omap_dma_chan_write(c, CCR, d->ccr | CCR_ENABLE);
+
+ c->running = true;
}
static void omap_dma_stop(struct omap_chan *c)
@@ -355,6 +358,8 @@ static void omap_dma_stop(struct omap_chan *c)
omap_dma_chan_write(c, CLNK_CTRL, val);
}
+
+ c->running = false;
}
static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d,
@@ -673,15 +678,20 @@ static enum dma_status omap_dma_tx_status(struct dma_chan *chan,
struct omap_chan *c = to_omap_dma_chan(chan);
struct virt_dma_desc *vd;
enum dma_status ret;
- uint32_t ccr;
unsigned long flags;
- ccr = omap_dma_chan_read(c, CCR);
- /* The channel is no longer active, handle the completion right away */
- if (!(ccr & CCR_ENABLE))
- omap_dma_callback(c->dma_ch, 0, c);
-
ret = dma_cookie_status(chan, cookie, txstate);
+
+ if (!c->paused && c->running) {
+ uint32_t ccr = omap_dma_chan_read(c, CCR);
+ /*
+ * The channel is no longer active, set the return value
+ * accordingly
+ */
+ if (!(ccr & CCR_ENABLE))
+ ret = DMA_COMPLETE;
+ }
+
if (ret == DMA_COMPLETE || !txstate)
return ret;
@@ -945,9 +955,7 @@ static struct dma_async_tx_descriptor *omap_dma_prep_dma_memcpy(
d->ccr = c->ccr;
d->ccr |= CCR_DST_AMODE_POSTINC | CCR_SRC_AMODE_POSTINC;
- d->cicr = CICR_DROP_IE;
- if (tx_flags & DMA_PREP_INTERRUPT)
- d->cicr |= CICR_FRAME_IE;
+ d->cicr = CICR_DROP_IE | CICR_FRAME_IE;
d->csdp = data_type;
diff --git a/drivers/dma/xilinx/xilinx_vdma.c b/drivers/dma/xilinx/xilinx_vdma.c
index 0ee0321868d3..ef67f278e076 100644
--- a/drivers/dma/xilinx/xilinx_vdma.c
+++ b/drivers/dma/xilinx/xilinx_vdma.c
@@ -1236,7 +1236,7 @@ static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec,
struct xilinx_vdma_device *xdev = ofdma->of_dma_data;
int chan_id = dma_spec->args[0];
- if (chan_id >= XILINX_VDMA_MAX_CHANS_PER_DEVICE)
+ if (chan_id >= XILINX_VDMA_MAX_CHANS_PER_DEVICE || !xdev->chan[chan_id])
return NULL;
return dma_get_slave_channel(&xdev->chan[chan_id]->common);
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
index 93f0d4120289..468447aff8eb 100644
--- a/drivers/edac/sb_edac.c
+++ b/drivers/edac/sb_edac.c
@@ -362,6 +362,7 @@ struct sbridge_pvt {
/* Memory type detection */
bool is_mirrored, is_lockstep, is_close_pg;
+ bool is_chan_hash;
/* Fifo double buffers */
struct mce mce_entry[MCE_LOG_LEN];
@@ -1060,6 +1061,20 @@ static inline u8 sad_pkg_ha(u8 pkg)
return (pkg >> 2) & 0x1;
}
+static int haswell_chan_hash(int idx, u64 addr)
+{
+ int i;
+
+ /*
+ * XOR even bits from 12:26 to bit0 of idx,
+ * odd bits from 13:27 to bit1
+ */
+ for (i = 12; i < 28; i += 2)
+ idx ^= (addr >> i) & 3;
+
+ return idx;
+}
+
/****************************************************************************
Memory check routines
****************************************************************************/
@@ -1616,6 +1631,10 @@ static int get_dimm_config(struct mem_ctl_info *mci)
KNL_MAX_CHANNELS : NUM_CHANNELS;
u64 knl_mc_sizes[KNL_MAX_CHANNELS];
+ if (pvt->info.type == HASWELL || pvt->info.type == BROADWELL) {
+ pci_read_config_dword(pvt->pci_ha0, HASWELL_HASYSDEFEATURE2, &reg);
+ pvt->is_chan_hash = GET_BITFIELD(reg, 21, 21);
+ }
if (pvt->info.type == HASWELL || pvt->info.type == BROADWELL ||
pvt->info.type == KNIGHTS_LANDING)
pci_read_config_dword(pvt->pci_sad1, SAD_TARGET, &reg);
@@ -2118,12 +2137,15 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
}
ch_way = TAD_CH(reg) + 1;
- sck_way = 1 << TAD_SOCK(reg);
+ sck_way = TAD_SOCK(reg);
if (ch_way == 3)
idx = addr >> 6;
- else
+ else {
idx = (addr >> (6 + sck_way + shiftup)) & 0x3;
+ if (pvt->is_chan_hash)
+ idx = haswell_chan_hash(idx, addr);
+ }
idx = idx % ch_way;
/*
@@ -2157,7 +2179,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
switch(ch_way) {
case 2:
case 4:
- sck_xch = 1 << sck_way * (ch_way >> 1);
+ sck_xch = (1 << sck_way) * (ch_way >> 1);
break;
default:
sprintf(msg, "Invalid mirror set. Can't decode addr");
@@ -2193,7 +2215,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
ch_addr = addr - offset;
ch_addr >>= (6 + shiftup);
- ch_addr /= ch_way * sck_way;
+ ch_addr /= sck_xch;
ch_addr <<= (6 + shiftup);
ch_addr |= addr & ((1 << (6 + shiftup)) - 1);
diff --git a/drivers/extcon/extcon-palmas.c b/drivers/extcon/extcon-palmas.c
index 841a4b586395..8b3226dca1d9 100644
--- a/drivers/extcon/extcon-palmas.c
+++ b/drivers/extcon/extcon-palmas.c
@@ -348,8 +348,7 @@ static int palmas_usb_probe(struct platform_device *pdev)
palmas_vbus_irq_handler,
IRQF_TRIGGER_FALLING |
IRQF_TRIGGER_RISING |
- IRQF_ONESHOT |
- IRQF_EARLY_RESUME,
+ IRQF_ONESHOT,
"palmas_usb_vbus",
palmas_usb);
if (status < 0) {
diff --git a/drivers/firmware/efi/arm-init.c b/drivers/firmware/efi/arm-init.c
index aa1f743152a2..8714f8c271ba 100644
--- a/drivers/firmware/efi/arm-init.c
+++ b/drivers/firmware/efi/arm-init.c
@@ -203,7 +203,19 @@ void __init efi_init(void)
reserve_regions();
early_memunmap(memmap.map, params.mmap_size);
- memblock_mark_nomap(params.mmap & PAGE_MASK,
- PAGE_ALIGN(params.mmap_size +
- (params.mmap & ~PAGE_MASK)));
+
+ if (IS_ENABLED(CONFIG_ARM)) {
+ /*
+ * ARM currently does not allow ioremap_cache() to be called on
+ * memory regions that are covered by struct page. So remove the
+ * UEFI memory map from the linear mapping.
+ */
+ memblock_mark_nomap(params.mmap & PAGE_MASK,
+ PAGE_ALIGN(params.mmap_size +
+ (params.mmap & ~PAGE_MASK)));
+ } else {
+ memblock_reserve(params.mmap & PAGE_MASK,
+ PAGE_ALIGN(params.mmap_size +
+ (params.mmap & ~PAGE_MASK)));
+ }
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 62a778012fe0..1bcbade479dc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1591,6 +1591,7 @@ struct amdgpu_uvd {
struct amdgpu_bo *vcpu_bo;
void *cpu_addr;
uint64_t gpu_addr;
+ unsigned fw_version;
void *saved_bo;
atomic_t handles[AMDGPU_MAX_UVD_HANDLES];
struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES];
@@ -2034,6 +2035,7 @@ struct amdgpu_device {
/* tracking pinned memory */
u64 vram_pin_size;
+ u64 invisible_pin_size;
u64 gart_pin_size;
/* amdkfd interface */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
index d6b0bff510aa..b7b583c42ea8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
@@ -425,6 +425,10 @@ static int acp_resume(void *handle)
struct acp_pm_domain *apd;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ /* return early if no ACP */
+ if (!adev->acp.acp_genpd)
+ return 0;
+
/* SMU block will power on ACP irrespective of ACP runtime status.
* Power off explicitly based on genpd ACP runtime status so that ACP
* hw and ACP-genpd status are in sync.
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 598eb0cd5aab..b04337de65d1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -303,7 +303,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
fw_info.feature = adev->vce.fb_version;
break;
case AMDGPU_INFO_FW_UVD:
- fw_info.ver = 0;
+ fw_info.ver = adev->uvd.fw_version;
fw_info.feature = 0;
break;
case AMDGPU_INFO_FW_GMC:
@@ -384,7 +384,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
vram_gtt.vram_size = adev->mc.real_vram_size;
vram_gtt.vram_size -= adev->vram_pin_size;
vram_gtt.vram_cpu_accessible_size = adev->mc.visible_vram_size;
- vram_gtt.vram_cpu_accessible_size -= adev->vram_pin_size;
+ vram_gtt.vram_cpu_accessible_size -= (adev->vram_pin_size - adev->invisible_pin_size);
vram_gtt.gtt_size = adev->mc.gtt_size;
vram_gtt.gtt_size -= adev->gart_pin_size;
return copy_to_user(out, &vram_gtt,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
index 8d432e6901af..81bd964d3dfc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
@@ -53,7 +53,7 @@ struct amdgpu_hpd;
#define AMDGPU_MAX_HPD_PINS 6
#define AMDGPU_MAX_CRTCS 6
-#define AMDGPU_MAX_AFMT_BLOCKS 7
+#define AMDGPU_MAX_AFMT_BLOCKS 9
enum amdgpu_rmx_type {
RMX_OFF,
@@ -309,8 +309,8 @@ struct amdgpu_mode_info {
struct atom_context *atom_context;
struct card_info *atom_card_info;
bool mode_config_initialized;
- struct amdgpu_crtc *crtcs[6];
- struct amdgpu_afmt *afmt[7];
+ struct amdgpu_crtc *crtcs[AMDGPU_MAX_CRTCS];
+ struct amdgpu_afmt *afmt[AMDGPU_MAX_AFMT_BLOCKS];
/* DVI-I properties */
struct drm_property *coherent_mode_property;
/* DAC enable load detect */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 5b6639faa731..e557fc1f17c8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -424,9 +424,11 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
bo->pin_count = 1;
if (gpu_addr != NULL)
*gpu_addr = amdgpu_bo_gpu_offset(bo);
- if (domain == AMDGPU_GEM_DOMAIN_VRAM)
+ if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
bo->adev->vram_pin_size += amdgpu_bo_size(bo);
- else
+ if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
+ bo->adev->invisible_pin_size += amdgpu_bo_size(bo);
+ } else
bo->adev->gart_pin_size += amdgpu_bo_size(bo);
} else {
dev_err(bo->adev->dev, "%p pin failed\n", bo);
@@ -456,9 +458,11 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo)
}
r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
if (likely(r == 0)) {
- if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
+ if (bo->tbo.mem.mem_type == TTM_PL_VRAM) {
bo->adev->vram_pin_size -= amdgpu_bo_size(bo);
- else
+ if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
+ bo->adev->invisible_pin_size -= amdgpu_bo_size(bo);
+ } else
bo->adev->gart_pin_size -= amdgpu_bo_size(bo);
} else {
dev_err(bo->adev->dev, "%p validate failed for unpin\n", bo);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 6f3369de232f..11af4492b4be 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -223,6 +223,8 @@ static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp)
{
struct amdgpu_bo *rbo = container_of(bo, struct amdgpu_bo, tbo);
+ if (amdgpu_ttm_tt_get_usermm(bo->ttm))
+ return -EPERM;
return drm_vma_node_verify_access(&rbo->gem_base.vma_node, filp);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index 338da80006b6..871018c634e0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -158,6 +158,9 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
DRM_INFO("Found UVD firmware Version: %hu.%hu Family ID: %hu\n",
version_major, version_minor, family_id);
+ adev->uvd.fw_version = ((version_major << 24) | (version_minor << 16) |
+ (family_id << 8));
+
bo_size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8)
+ AMDGPU_UVD_STACK_SIZE + AMDGPU_UVD_HEAP_SIZE;
r = amdgpu_bo_create(adev, bo_size, PAGE_SIZE, true,
@@ -255,6 +258,8 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev)
if (i == AMDGPU_MAX_UVD_HANDLES)
return 0;
+ cancel_delayed_work_sync(&adev->uvd.idle_work);
+
size = amdgpu_bo_size(adev->uvd.vcpu_bo);
ptr = adev->uvd.cpu_addr;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 4bec0c108cea..481a64fa9b47 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -234,6 +234,7 @@ int amdgpu_vce_suspend(struct amdgpu_device *adev)
if (i == AMDGPU_MAX_VCE_HANDLES)
return 0;
+ cancel_delayed_work_sync(&adev->vce.idle_work);
/* TODO: suspending running encoding sessions isn't supported */
return -EINVAL;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
index b6f7d7bff929..0f14199cf716 100644
--- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
@@ -307,7 +307,7 @@ static int tonga_ih_sw_fini(void *handle)
amdgpu_irq_fini(adev);
amdgpu_ih_ring_fini(adev);
- amdgpu_irq_add_domain(adev);
+ amdgpu_irq_remove_domain(adev);
return 0;
}
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 27fbd79d0daf..e17fbdaf874b 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -1672,13 +1672,19 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
u8 sinks[DRM_DP_MAX_SDP_STREAMS];
int i;
+ port = drm_dp_get_validated_port_ref(mgr, port);
+ if (!port)
+ return -EINVAL;
+
port_num = port->port_num;
mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
if (!mstb) {
mstb = drm_dp_get_last_connected_port_and_mstb(mgr, port->parent, &port_num);
- if (!mstb)
+ if (!mstb) {
+ drm_dp_put_port(port);
return -EINVAL;
+ }
}
txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
@@ -1707,6 +1713,7 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
kfree(txmsg);
fail_put:
drm_dp_put_mst_branch_device(mstb);
+ drm_dp_put_port(port);
return ret;
}
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 414d7f61aa05..558ef9fc39e6 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -205,7 +205,7 @@ static const struct drm_display_mode drm_dmt_modes[] = {
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 0x0f - 1024x768@43Hz, interlace */
{ DRM_MODE("1024x768i", DRM_MODE_TYPE_DRIVER, 44900, 1024, 1032,
- 1208, 1264, 0, 768, 768, 772, 817, 0,
+ 1208, 1264, 0, 768, 768, 776, 817, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
DRM_MODE_FLAG_INTERLACE) },
/* 0x10 - 1024x768@60Hz */
@@ -522,12 +522,12 @@ static const struct drm_display_mode edid_est_modes[] = {
720, 840, 0, 480, 481, 484, 500, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@75Hz */
{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 664,
- 704, 832, 0, 480, 489, 491, 520, 0,
+ 704, 832, 0, 480, 489, 492, 520, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@72Hz */
{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 30240, 640, 704,
768, 864, 0, 480, 483, 486, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@67Hz */
- { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25200, 640, 656,
+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
752, 800, 0, 480, 490, 492, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@60Hz */
{ DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 35500, 720, 738,
@@ -539,7 +539,7 @@ static const struct drm_display_mode edid_est_modes[] = {
{ DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 135000, 1280, 1296,
1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1280x1024@75Hz */
- { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 78800, 1024, 1040,
+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 78750, 1024, 1040,
1136, 1312, 0, 768, 769, 772, 800, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1024x768@75Hz */
{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 75000, 1024, 1048,
@@ -2241,7 +2241,7 @@ drm_est3_modes(struct drm_connector *connector, struct detailed_timing *timing)
{
int i, j, m, modes = 0;
struct drm_display_mode *mode;
- u8 *est = ((u8 *)timing) + 5;
+ u8 *est = ((u8 *)timing) + 6;
for (i = 0; i < 6; i++) {
for (j = 7; j >= 0; j--) {
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index f17d39279596..baddf33fb475 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -94,7 +94,7 @@ comment "Sub-drivers"
config DRM_EXYNOS_G2D
bool "G2D"
- depends on !VIDEO_SAMSUNG_S5P_G2D
+ depends on VIDEO_SAMSUNG_S5P_G2D=n
select FRAME_VECTOR
help
Choose this option if you want to use Exynos G2D for DRM.
diff --git a/drivers/gpu/drm/exynos/Makefile b/drivers/gpu/drm/exynos/Makefile
index 968b31c522b2..23d2f958739b 100644
--- a/drivers/gpu/drm/exynos/Makefile
+++ b/drivers/gpu/drm/exynos/Makefile
@@ -2,10 +2,10 @@
# Makefile for the drm device driver. This driver provides support for the
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
-exynosdrm-y := exynos_drm_drv.o exynos_drm_crtc.o exynos_drm_fbdev.o \
- exynos_drm_fb.o exynos_drm_gem.o exynos_drm_core.o \
- exynos_drm_plane.o
+exynosdrm-y := exynos_drm_drv.o exynos_drm_crtc.o exynos_drm_fb.o \
+ exynos_drm_gem.o exynos_drm_core.o exynos_drm_plane.o
+exynosdrm-$(CONFIG_DRM_FBDEV_EMULATION) += exynos_drm_fbdev.o
exynosdrm-$(CONFIG_DRM_EXYNOS_IOMMU) += exynos_drm_iommu.o
exynosdrm-$(CONFIG_DRM_EXYNOS_FIMD) += exynos_drm_fimd.o
exynosdrm-$(CONFIG_DRM_EXYNOS5433_DECON) += exynos5433_drm_decon.o
diff --git a/drivers/gpu/drm/exynos/exynos_drm_core.c b/drivers/gpu/drm/exynos/exynos_drm_core.c
index 7f55ba6771c6..011211e4167d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_core.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_core.c
@@ -101,7 +101,7 @@ int exynos_drm_subdrv_open(struct drm_device *dev, struct drm_file *file)
return 0;
err:
- list_for_each_entry_reverse(subdrv, &subdrv->list, list) {
+ list_for_each_entry_continue_reverse(subdrv, &exynos_drm_subdrv_list, list) {
if (subdrv->close)
subdrv->close(dev, subdrv->dev, file);
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index d614194644c8..81cc5537cf25 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -199,17 +199,6 @@ dma_addr_t exynos_drm_fb_dma_addr(struct drm_framebuffer *fb, int index)
return exynos_fb->dma_addr[index];
}
-static void exynos_drm_output_poll_changed(struct drm_device *dev)
-{
- struct exynos_drm_private *private = dev->dev_private;
- struct drm_fb_helper *fb_helper = private->fb_helper;
-
- if (fb_helper)
- drm_fb_helper_hotplug_event(fb_helper);
- else
- exynos_drm_fbdev_init(dev);
-}
-
static const struct drm_mode_config_funcs exynos_drm_mode_config_funcs = {
.fb_create = exynos_user_fb_create,
.output_poll_changed = exynos_drm_output_poll_changed,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index 4ae860c44f1d..72d7c0b7c216 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -317,3 +317,14 @@ void exynos_drm_fbdev_restore_mode(struct drm_device *dev)
drm_fb_helper_restore_fbdev_mode_unlocked(private->fb_helper);
}
+
+void exynos_drm_output_poll_changed(struct drm_device *dev)
+{
+ struct exynos_drm_private *private = dev->dev_private;
+ struct drm_fb_helper *fb_helper = private->fb_helper;
+
+ if (fb_helper)
+ drm_fb_helper_hotplug_event(fb_helper);
+ else
+ exynos_drm_fbdev_init(dev);
+}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.h b/drivers/gpu/drm/exynos/exynos_drm_fbdev.h
index e16d7f0ae192..330eef87f718 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.h
@@ -15,9 +15,30 @@
#ifndef _EXYNOS_DRM_FBDEV_H_
#define _EXYNOS_DRM_FBDEV_H_
+#ifdef CONFIG_DRM_FBDEV_EMULATION
+
int exynos_drm_fbdev_init(struct drm_device *dev);
-int exynos_drm_fbdev_reinit(struct drm_device *dev);
void exynos_drm_fbdev_fini(struct drm_device *dev);
void exynos_drm_fbdev_restore_mode(struct drm_device *dev);
+void exynos_drm_output_poll_changed(struct drm_device *dev);
+
+#else
+
+static inline int exynos_drm_fbdev_init(struct drm_device *dev)
+{
+ return 0;
+}
+
+static inline void exynos_drm_fbdev_fini(struct drm_device *dev)
+{
+}
+
+static inline void exynos_drm_fbdev_restore_mode(struct drm_device *dev)
+{
+}
+
+#define exynos_drm_output_poll_changed (NULL)
+
+#endif
#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 51d484ae9f49..018449f8d557 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -888,7 +888,7 @@ static void fimd_dp_clock_enable(struct exynos_drm_crtc *crtc, bool enable)
* clock. On these SoCs the bootloader may enable it but any
* power domain off/on will reset it to disable state.
*/
- if (ctx->driver_data != &exynos5_fimd_driver_data ||
+ if (ctx->driver_data != &exynos5_fimd_driver_data &&
ctx->driver_data != &exynos5420_fimd_driver_data)
return;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_mic.c b/drivers/gpu/drm/exynos/exynos_drm_mic.c
index 9869d70e9e54..a0def0be6d65 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_mic.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_mic.c
@@ -129,7 +129,7 @@ static void mic_set_path(struct exynos_mic *mic, bool enable)
} else
val &= ~(MIC0_RGB_MUX | MIC0_I80_MUX | MIC0_ON_MUX);
- regmap_write(mic->sysreg, DSD_CFG_MUX, val);
+ ret = regmap_write(mic->sysreg, DSD_CFG_MUX, val);
if (ret)
DRM_ERROR("mic: Failed to read system register\n");
}
@@ -457,6 +457,7 @@ static int exynos_mic_probe(struct platform_device *pdev)
"samsung,disp-syscon");
if (IS_ERR(mic->sysreg)) {
DRM_ERROR("mic: Failed to get system register.\n");
+ ret = PTR_ERR(mic->sysreg);
goto err;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c
index d86227236f55..50185ac347b2 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_plane.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c
@@ -11,9 +11,10 @@
#include <drm/drmP.h>
-#include <drm/exynos_drm.h>
-#include <drm/drm_plane_helper.h>
+#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_plane_helper.h>
+#include <drm/exynos_drm.h>
#include "exynos_drm_drv.h"
#include "exynos_drm_crtc.h"
#include "exynos_drm_fb.h"
@@ -57,11 +58,12 @@ static int exynos_plane_get_size(int start, unsigned length, unsigned last)
}
static void exynos_plane_mode_set(struct exynos_drm_plane_state *exynos_state)
-
{
struct drm_plane_state *state = &exynos_state->base;
- struct drm_crtc *crtc = exynos_state->base.crtc;
- struct drm_display_mode *mode = &crtc->state->adjusted_mode;
+ struct drm_crtc *crtc = state->crtc;
+ struct drm_crtc_state *crtc_state =
+ drm_atomic_get_existing_crtc_state(state->state, crtc);
+ struct drm_display_mode *mode = &crtc_state->adjusted_mode;
int crtc_x, crtc_y;
unsigned int crtc_w, crtc_h;
unsigned int src_x, src_y;
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 20e82008b8b6..30798cbc6fc0 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -758,10 +758,10 @@ static int i915_drm_resume(struct drm_device *dev)
dev_priv->display.hpd_irq_setup(dev);
spin_unlock_irq(&dev_priv->irq_lock);
- intel_display_resume(dev);
-
intel_dp_mst_resume(dev);
+ intel_display_resume(dev);
+
/*
* ... but also need to make sure that hotplug processing
* doesn't cause havoc. Like in the driver load code we don't
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 10480939159c..daba7ebb9699 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2634,8 +2634,9 @@ struct drm_i915_cmd_table {
/* WaRsDisableCoarsePowerGating:skl,bxt */
#define NEEDS_WaRsDisableCoarsePowerGating(dev) (IS_BXT_REVID(dev, 0, BXT_REVID_A1) || \
- ((IS_SKL_GT3(dev) || IS_SKL_GT4(dev)) && \
- IS_SKL_REVID(dev, 0, SKL_REVID_F0)))
+ IS_SKL_GT3(dev) || \
+ IS_SKL_GT4(dev))
+
/*
* dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts
* even when in MSI mode. This results in spurious interrupt warnings if the
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index 18ba8139e922..4d30b60defda 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -501,19 +501,24 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
if (pvec != NULL) {
struct mm_struct *mm = obj->userptr.mm->mm;
- down_read(&mm->mmap_sem);
- while (pinned < npages) {
- ret = get_user_pages_remote(work->task, mm,
- obj->userptr.ptr + pinned * PAGE_SIZE,
- npages - pinned,
- !obj->userptr.read_only, 0,
- pvec + pinned, NULL);
- if (ret < 0)
- break;
-
- pinned += ret;
+ ret = -EFAULT;
+ if (atomic_inc_not_zero(&mm->mm_users)) {
+ down_read(&mm->mmap_sem);
+ while (pinned < npages) {
+ ret = get_user_pages_remote
+ (work->task, mm,
+ obj->userptr.ptr + pinned * PAGE_SIZE,
+ npages - pinned,
+ !obj->userptr.read_only, 0,
+ pvec + pinned, NULL);
+ if (ret < 0)
+ break;
+
+ pinned += ret;
+ }
+ up_read(&mm->mmap_sem);
+ mmput(mm);
}
- up_read(&mm->mmap_sem);
}
mutex_lock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index d1a46ef5ab3f..1c212205d0e7 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -1829,7 +1829,7 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
/* IRQs are synced during runtime_suspend, we don't require a wakeref */
disable_rpm_wakeref_asserts(dev_priv);
- for (;;) {
+ do {
master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
iir = I915_READ(VLV_IIR);
@@ -1857,7 +1857,7 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
POSTING_READ(GEN8_MASTER_IRQ);
- }
+ } while (0);
enable_rpm_wakeref_asserts(dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index a2bd698fe2f7..937e77228466 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -506,6 +506,8 @@ static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
struct intel_connector *intel_connector = to_intel_connector(connector);
struct drm_device *dev = connector->dev;
+ intel_connector->unregister(intel_connector);
+
/* need to nuke the connector */
drm_modeset_lock_all(dev);
if (connector->state->crtc) {
@@ -519,11 +521,7 @@ static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
WARN(ret, "Disabling mst crtc failed with %i\n", ret);
}
- drm_modeset_unlock_all(dev);
- intel_connector->unregister(intel_connector);
-
- drm_modeset_lock_all(dev);
intel_connector_remove_from_fbdev(intel_connector);
drm_connector_cleanup(connector);
drm_modeset_unlock_all(dev);
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 6a978ce80244..5c6080fd0968 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -841,11 +841,11 @@ static int logical_ring_prepare(struct drm_i915_gem_request *req, int bytes)
if (unlikely(total_bytes > remain_usable)) {
/*
* The base request will fit but the reserved space
- * falls off the end. So only need to to wait for the
- * reserved size after flushing out the remainder.
+ * falls off the end. So don't need an immediate wrap
+ * and only need to effectively wait for the reserved
+ * size space from the start of ringbuffer.
*/
wait_bytes = remain_actual + ringbuf->reserved_size;
- need_wrap = true;
} else if (total_bytes > ringbuf->space) {
/* No wrapping required, just waiting. */
wait_bytes = total_bytes;
@@ -1913,15 +1913,18 @@ static int gen8_emit_request_render(struct drm_i915_gem_request *request)
struct intel_ringbuffer *ringbuf = request->ringbuf;
int ret;
- ret = intel_logical_ring_begin(request, 6 + WA_TAIL_DWORDS);
+ ret = intel_logical_ring_begin(request, 8 + WA_TAIL_DWORDS);
if (ret)
return ret;
+ /* We're using qword write, seqno should be aligned to 8 bytes. */
+ BUILD_BUG_ON(I915_GEM_HWS_INDEX & 1);
+
/* w/a for post sync ops following a GPGPU operation we
* need a prior CS_STALL, which is emitted by the flush
* following the batch.
*/
- intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(5));
+ intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
intel_logical_ring_emit(ringbuf,
(PIPE_CONTROL_GLOBAL_GTT_IVB |
PIPE_CONTROL_CS_STALL |
@@ -1929,7 +1932,10 @@ static int gen8_emit_request_render(struct drm_i915_gem_request *request)
intel_logical_ring_emit(ringbuf, hws_seqno_address(request->ring));
intel_logical_ring_emit(ringbuf, 0);
intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request));
+ /* We're thrashing one dword of HWS. */
+ intel_logical_ring_emit(ringbuf, 0);
intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT);
+ intel_logical_ring_emit(ringbuf, MI_NOOP);
return intel_logical_ring_advance_and_submit(request);
}
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 30a8403a8f4f..cd9fe609aefb 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -478,11 +478,8 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
* and as part of the cleanup in the hw state restore we also redisable
* the vga plane.
*/
- if (!HAS_PCH_SPLIT(dev)) {
- drm_modeset_lock_all(dev);
+ if (!HAS_PCH_SPLIT(dev))
intel_display_resume(dev);
- drm_modeset_unlock_all(dev);
- }
dev_priv->modeset_restore = MODESET_DONE;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 347d4df49a9b..8ed3cf34f82d 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -2876,25 +2876,28 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
const struct drm_plane_state *pstate,
int y)
{
- struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
+ struct intel_plane_state *intel_pstate = to_intel_plane_state(pstate);
struct drm_framebuffer *fb = pstate->fb;
+ uint32_t width = 0, height = 0;
+
+ width = drm_rect_width(&intel_pstate->src) >> 16;
+ height = drm_rect_height(&intel_pstate->src) >> 16;
+
+ if (intel_rotation_90_or_270(pstate->rotation))
+ swap(width, height);
/* for planar format */
if (fb->pixel_format == DRM_FORMAT_NV12) {
if (y) /* y-plane data rate */
- return intel_crtc->config->pipe_src_w *
- intel_crtc->config->pipe_src_h *
+ return width * height *
drm_format_plane_cpp(fb->pixel_format, 0);
else /* uv-plane data rate */
- return (intel_crtc->config->pipe_src_w/2) *
- (intel_crtc->config->pipe_src_h/2) *
+ return (width / 2) * (height / 2) *
drm_format_plane_cpp(fb->pixel_format, 1);
}
/* for packed formats */
- return intel_crtc->config->pipe_src_w *
- intel_crtc->config->pipe_src_h *
- drm_format_plane_cpp(fb->pixel_format, 0);
+ return width * height * drm_format_plane_cpp(fb->pixel_format, 0);
}
/*
@@ -2973,8 +2976,9 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
struct drm_framebuffer *fb = plane->state->fb;
int id = skl_wm_plane_id(intel_plane);
- if (fb == NULL)
+ if (!to_intel_plane_state(plane->state)->visible)
continue;
+
if (plane->type == DRM_PLANE_TYPE_CURSOR)
continue;
@@ -3000,7 +3004,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
uint16_t plane_blocks, y_plane_blocks = 0;
int id = skl_wm_plane_id(intel_plane);
- if (pstate->fb == NULL)
+ if (!to_intel_plane_state(pstate)->visible)
continue;
if (plane->type == DRM_PLANE_TYPE_CURSOR)
continue;
@@ -3123,26 +3127,36 @@ static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
{
struct drm_plane *plane = &intel_plane->base;
struct drm_framebuffer *fb = plane->state->fb;
+ struct intel_plane_state *intel_pstate =
+ to_intel_plane_state(plane->state);
uint32_t latency = dev_priv->wm.skl_latency[level];
uint32_t method1, method2;
uint32_t plane_bytes_per_line, plane_blocks_per_line;
uint32_t res_blocks, res_lines;
uint32_t selected_result;
uint8_t cpp;
+ uint32_t width = 0, height = 0;
- if (latency == 0 || !cstate->base.active || !fb)
+ if (latency == 0 || !cstate->base.active || !intel_pstate->visible)
return false;
+ width = drm_rect_width(&intel_pstate->src) >> 16;
+ height = drm_rect_height(&intel_pstate->src) >> 16;
+
+ if (intel_rotation_90_or_270(plane->state->rotation))
+ swap(width, height);
+
cpp = drm_format_plane_cpp(fb->pixel_format, 0);
method1 = skl_wm_method1(skl_pipe_pixel_rate(cstate),
cpp, latency);
method2 = skl_wm_method2(skl_pipe_pixel_rate(cstate),
cstate->base.adjusted_mode.crtc_htotal,
- cstate->pipe_src_w,
- cpp, fb->modifier[0],
+ width,
+ cpp,
+ fb->modifier[0],
latency);
- plane_bytes_per_line = cstate->pipe_src_w * cpp;
+ plane_bytes_per_line = width * cpp;
plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
if (fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 45ce45a5e122..9121646d7c4d 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -968,7 +968,7 @@ static int gen9_init_workarounds(struct intel_engine_cs *ring)
/* WaForceContextSaveRestoreNonCoherent:skl,bxt */
tmp = HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT;
- if (IS_SKL_REVID(dev, SKL_REVID_F0, SKL_REVID_F0) ||
+ if (IS_SKL_REVID(dev, SKL_REVID_F0, REVID_FOREVER) ||
IS_BXT_REVID(dev, BXT_REVID_B0, REVID_FOREVER))
tmp |= HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE;
WA_SET_BIT_MASKED(HDC_CHICKEN0, tmp);
@@ -1085,7 +1085,8 @@ static int skl_init_workarounds(struct intel_engine_cs *ring)
WA_SET_BIT_MASKED(HIZ_CHICKEN,
BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE);
- if (IS_SKL_REVID(dev, 0, SKL_REVID_F0)) {
+ /* This is tied to WaForceContextSaveRestoreNonCoherent */
+ if (IS_SKL_REVID(dev, 0, REVID_FOREVER)) {
/*
*Use Force Non-Coherent whenever executing a 3D context. This
* is a workaround for a possible hang in the unlikely event
@@ -2090,10 +2091,12 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_object *obj = ringbuf->obj;
+ /* Ring wraparound at offset 0 sometimes hangs. No idea why. */
+ unsigned flags = PIN_OFFSET_BIAS | 4096;
int ret;
if (HAS_LLC(dev_priv) && !obj->stolen) {
- ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, 0);
+ ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, flags);
if (ret)
return ret;
@@ -2109,7 +2112,8 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
return -ENOMEM;
}
} else {
- ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE);
+ ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE,
+ flags | PIN_MAPPABLE);
if (ret)
return ret;
@@ -2454,11 +2458,11 @@ static int __intel_ring_prepare(struct intel_engine_cs *ring, int bytes)
if (unlikely(total_bytes > remain_usable)) {
/*
* The base request will fit but the reserved space
- * falls off the end. So only need to to wait for the
- * reserved size after flushing out the remainder.
+ * falls off the end. So don't need an immediate wrap
+ * and only need to effectively wait for the reserved
+ * size space from the start of ringbuffer.
*/
wait_bytes = remain_actual + ringbuf->reserved_size;
- need_wrap = true;
} else if (total_bytes > ringbuf->space) {
/* No wrapping required, just waiting. */
wait_bytes = total_bytes;
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 436d8f2b8682..68b6f69aa682 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -1189,7 +1189,11 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev)
} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
dev_priv->uncore.funcs.force_wake_get =
fw_domains_get_with_thread_status;
- dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
+ if (IS_HASWELL(dev))
+ dev_priv->uncore.funcs.force_wake_put =
+ fw_domains_put_with_fifo;
+ else
+ dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
} else if (IS_IVYBRIDGE(dev)) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index ae96ebc490fb..e81aefe5ffa7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -1276,18 +1276,18 @@ nouveau_connector_create(struct drm_device *dev, int index)
break;
default:
if (disp->dithering_mode) {
+ nv_connector->dithering_mode = DITHERING_MODE_AUTO;
drm_object_attach_property(&connector->base,
disp->dithering_mode,
nv_connector->
dithering_mode);
- nv_connector->dithering_mode = DITHERING_MODE_AUTO;
}
if (disp->dithering_depth) {
+ nv_connector->dithering_depth = DITHERING_DEPTH_AUTO;
drm_object_attach_property(&connector->base,
disp->dithering_depth,
nv_connector->
dithering_depth);
- nv_connector->dithering_depth = DITHERING_DEPTH_AUTO;
}
break;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
index c56a886229f1..b2de290da16f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
@@ -1832,6 +1832,8 @@ gf100_gr_init(struct gf100_gr *gr)
gf100_gr_mmio(gr, gr->func->mmio);
+ nvkm_mask(device, TPC_UNIT(0, 0, 0x05c), 0x00000001, 0x00000001);
+
memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
for (i = 0, gpc = -1; i < gr->tpc_total; i++) {
do {
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 43e5f503d1c5..030409a3ee4e 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -375,10 +375,15 @@ static int qxl_crtc_cursor_set2(struct drm_crtc *crtc,
qxl_bo_kunmap(user_bo);
+ qcrtc->cur_x += qcrtc->hot_spot_x - hot_x;
+ qcrtc->cur_y += qcrtc->hot_spot_y - hot_y;
+ qcrtc->hot_spot_x = hot_x;
+ qcrtc->hot_spot_y = hot_y;
+
cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release);
cmd->type = QXL_CURSOR_SET;
- cmd->u.set.position.x = qcrtc->cur_x;
- cmd->u.set.position.y = qcrtc->cur_y;
+ cmd->u.set.position.x = qcrtc->cur_x + qcrtc->hot_spot_x;
+ cmd->u.set.position.y = qcrtc->cur_y + qcrtc->hot_spot_y;
cmd->u.set.shape = qxl_bo_physical_address(qdev, cursor_bo, 0);
@@ -441,8 +446,8 @@ static int qxl_crtc_cursor_move(struct drm_crtc *crtc,
cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release);
cmd->type = QXL_CURSOR_MOVE;
- cmd->u.position.x = qcrtc->cur_x;
- cmd->u.position.y = qcrtc->cur_y;
+ cmd->u.position.x = qcrtc->cur_x + qcrtc->hot_spot_x;
+ cmd->u.position.y = qcrtc->cur_y + qcrtc->hot_spot_y;
qxl_release_unmap(qdev, release, &cmd->release_info);
qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index 6e6b9b1519b8..3f3897eb458c 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -135,6 +135,8 @@ struct qxl_crtc {
int index;
int cur_x;
int cur_y;
+ int hot_spot_x;
+ int hot_spot_y;
};
struct qxl_output {
diff --git a/drivers/gpu/drm/radeon/ni_reg.h b/drivers/gpu/drm/radeon/ni_reg.h
index da310a70c0f0..827ccc87cbc3 100644
--- a/drivers/gpu/drm/radeon/ni_reg.h
+++ b/drivers/gpu/drm/radeon/ni_reg.h
@@ -109,6 +109,8 @@
#define NI_DP_MSE_SAT2 0x7398
#define NI_DP_MSE_SAT_UPDATE 0x739c
+# define NI_DP_MSE_SAT_UPDATE_MASK 0x3
+# define NI_DP_MSE_16_MTP_KEEPOUT 0x100
#define NI_DIG_BE_CNTL 0x7140
# define NI_DIG_FE_SOURCE_SELECT(x) (((x) & 0x7f) << 8)
diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
index fd8c4d317e60..95f4fea89302 100644
--- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
@@ -62,10 +62,6 @@ bool radeon_has_atpx(void) {
return radeon_atpx_priv.atpx_detected;
}
-bool radeon_has_atpx_dgpu_power_cntl(void) {
- return radeon_atpx_priv.atpx.functions.power_cntl;
-}
-
/**
* radeon_atpx_call - call an ATPX method
*
@@ -145,6 +141,13 @@ static void radeon_atpx_parse_functions(struct radeon_atpx_functions *f, u32 mas
*/
static int radeon_atpx_validate(struct radeon_atpx *atpx)
{
+ /* make sure required functions are enabled */
+ /* dGPU power control is required */
+ if (atpx->functions.power_cntl == false) {
+ printk("ATPX dGPU power cntl not present, forcing\n");
+ atpx->functions.power_cntl = true;
+ }
+
if (atpx->functions.px_params) {
union acpi_object *info;
struct atpx_px_params output;
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index cfcc099c537d..81a63d7f5cd9 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -2002,10 +2002,12 @@ radeon_add_atom_connector(struct drm_device *dev,
rdev->mode_info.dither_property,
RADEON_FMT_DITHER_DISABLE);
- if (radeon_audio != 0)
+ if (radeon_audio != 0) {
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.audio_property,
RADEON_AUDIO_AUTO);
+ radeon_connector->audio = RADEON_AUDIO_AUTO;
+ }
if (ASIC_IS_DCE5(rdev))
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.output_csc_property,
@@ -2130,6 +2132,7 @@ radeon_add_atom_connector(struct drm_device *dev,
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.audio_property,
RADEON_AUDIO_AUTO);
+ radeon_connector->audio = RADEON_AUDIO_AUTO;
}
if (connector_type == DRM_MODE_CONNECTOR_DVII) {
radeon_connector->dac_load_detect = true;
@@ -2185,6 +2188,7 @@ radeon_add_atom_connector(struct drm_device *dev,
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.audio_property,
RADEON_AUDIO_AUTO);
+ radeon_connector->audio = RADEON_AUDIO_AUTO;
}
if (ASIC_IS_DCE5(rdev))
drm_object_attach_property(&radeon_connector->base.base,
@@ -2237,6 +2241,7 @@ radeon_add_atom_connector(struct drm_device *dev,
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.audio_property,
RADEON_AUDIO_AUTO);
+ radeon_connector->audio = RADEON_AUDIO_AUTO;
}
if (ASIC_IS_DCE5(rdev))
drm_object_attach_property(&radeon_connector->base.base,
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 4fd1a961012d..d0826fb0434c 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -103,12 +103,6 @@ static const char radeon_family_name[][16] = {
"LAST",
};
-#if defined(CONFIG_VGA_SWITCHEROO)
-bool radeon_has_atpx_dgpu_power_cntl(void);
-#else
-static inline bool radeon_has_atpx_dgpu_power_cntl(void) { return false; }
-#endif
-
#define RADEON_PX_QUIRK_DISABLE_PX (1 << 0)
#define RADEON_PX_QUIRK_LONG_WAKEUP (1 << 1)
@@ -1305,9 +1299,9 @@ int radeon_device_init(struct radeon_device *rdev,
}
rdev->fence_context = fence_context_alloc(RADEON_NUM_RINGS);
- DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X).\n",
- radeon_family_name[rdev->family], pdev->vendor, pdev->device,
- pdev->subsystem_vendor, pdev->subsystem_device);
+ DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
+ radeon_family_name[rdev->family], pdev->vendor, pdev->device,
+ pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
/* mutex initialization are all done here so we
* can recall function without having locking issues */
@@ -1439,7 +1433,7 @@ int radeon_device_init(struct radeon_device *rdev,
* ignore it */
vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
- if ((rdev->flags & RADEON_IS_PX) && radeon_has_atpx_dgpu_power_cntl())
+ if (rdev->flags & RADEON_IS_PX)
runtime = true;
vga_switcheroo_register_client(rdev->pdev, &radeon_switcheroo_ops, runtime);
if (runtime)
diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c
index 43cffb526b0c..de504ea29c06 100644
--- a/drivers/gpu/drm/radeon/radeon_dp_mst.c
+++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c
@@ -89,8 +89,16 @@ static int radeon_dp_mst_set_stream_attrib(struct radeon_encoder *primary,
WREG32(NI_DP_MSE_SAT_UPDATE + primary->offset, 1);
do {
+ unsigned value1, value2;
+ udelay(10);
temp = RREG32(NI_DP_MSE_SAT_UPDATE + primary->offset);
- } while ((temp & 0x1) && retries++ < 10000);
+
+ value1 = temp & NI_DP_MSE_SAT_UPDATE_MASK;
+ value2 = temp & NI_DP_MSE_16_MTP_KEEPOUT;
+
+ if (!value1 && !value2)
+ break;
+ } while (retries++ < 50);
if (retries == 10000)
DRM_ERROR("timed out waitin for SAT update %d\n", primary->offset);
@@ -150,7 +158,7 @@ static int radeon_dp_mst_update_stream_attribs(struct radeon_connector *mst_conn
return 0;
}
-static int radeon_dp_mst_set_vcp_size(struct radeon_encoder *mst, uint32_t x, uint32_t y)
+static int radeon_dp_mst_set_vcp_size(struct radeon_encoder *mst, s64 avg_time_slots_per_mtp)
{
struct drm_device *dev = mst->base.dev;
struct radeon_device *rdev = dev->dev_private;
@@ -158,6 +166,8 @@ static int radeon_dp_mst_set_vcp_size(struct radeon_encoder *mst, uint32_t x, ui
uint32_t val, temp;
uint32_t offset = radeon_atom_set_enc_offset(mst_enc->fe);
int retries = 0;
+ uint32_t x = drm_fixp2int(avg_time_slots_per_mtp);
+ uint32_t y = drm_fixp2int_ceil((avg_time_slots_per_mtp - x) << 26);
val = NI_DP_MSE_RATE_X(x) | NI_DP_MSE_RATE_Y(y);
@@ -165,6 +175,7 @@ static int radeon_dp_mst_set_vcp_size(struct radeon_encoder *mst, uint32_t x, ui
do {
temp = RREG32(NI_DP_MSE_RATE_UPDATE + offset);
+ udelay(10);
} while ((temp & 0x1) && (retries++ < 10000));
if (retries >= 10000)
@@ -246,14 +257,8 @@ radeon_dp_mst_connector_destroy(struct drm_connector *connector)
kfree(radeon_connector);
}
-static int radeon_connector_dpms(struct drm_connector *connector, int mode)
-{
- DRM_DEBUG_KMS("\n");
- return 0;
-}
-
static const struct drm_connector_funcs radeon_dp_mst_connector_funcs = {
- .dpms = radeon_connector_dpms,
+ .dpms = drm_helper_connector_dpms,
.detect = radeon_dp_mst_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = radeon_dp_mst_connector_destroy,
@@ -394,7 +399,7 @@ radeon_mst_encoder_dpms(struct drm_encoder *encoder, int mode)
struct drm_crtc *crtc;
struct radeon_crtc *radeon_crtc;
int ret, slots;
-
+ s64 fixed_pbn, fixed_pbn_per_slot, avg_time_slots_per_mtp;
if (!ASIC_IS_DCE5(rdev)) {
DRM_ERROR("got mst dpms on non-DCE5\n");
return;
@@ -456,7 +461,11 @@ radeon_mst_encoder_dpms(struct drm_encoder *encoder, int mode)
mst_enc->enc_active = true;
radeon_dp_mst_update_stream_attribs(radeon_connector->mst_port, primary);
- radeon_dp_mst_set_vcp_size(radeon_encoder, slots, 0);
+
+ fixed_pbn = drm_int2fixp(mst_enc->pbn);
+ fixed_pbn_per_slot = drm_int2fixp(radeon_connector->mst_port->mst_mgr.pbn_div);
+ avg_time_slots_per_mtp = drm_fixp_div(fixed_pbn, fixed_pbn_per_slot);
+ radeon_dp_mst_set_vcp_size(radeon_encoder, avg_time_slots_per_mtp);
atombios_dig_encoder_setup2(&primary->base, ATOM_ENCODER_CMD_DP_VIDEO_ON, 0,
mst_enc->fe);
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 7dddfdce85e6..90f739478a1b 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -235,6 +235,8 @@ static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp)
{
struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo);
+ if (radeon_ttm_tt_has_userptr(bo->ttm))
+ return -EPERM;
return drm_vma_node_verify_access(&rbo->gem_base.vma_node, filp);
}
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index af4df81c4e0c..e6abc09b67e3 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -2931,6 +2931,7 @@ static struct si_dpm_quirk si_dpm_quirk_list[] = {
{ PCI_VENDOR_ID_ATI, 0x6811, 0x1462, 0x2015, 0, 120000 },
{ PCI_VENDOR_ID_ATI, 0x6811, 0x1043, 0x2015, 0, 120000 },
{ PCI_VENDOR_ID_ATI, 0x6811, 0x148c, 0x2015, 0, 120000 },
+ { PCI_VENDOR_ID_ATI, 0x6810, 0x1682, 0x9275, 0, 120000 },
{ 0, 0, 0, 0 },
};
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index bdb8cc89cacc..4f9c5c6deaed 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1979,6 +1979,9 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_JP) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_7K) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_600) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3KV1) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MONTEREY, USB_DEVICE_ID_GENIUS_KB29E) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GT683R_LED_PANEL) },
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 5c0e43ed5c53..c6eaff5f8845 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -676,6 +676,7 @@
#define USB_DEVICE_ID_SIDEWINDER_GV 0x003b
#define USB_DEVICE_ID_MS_OFFICE_KB 0x0048
#define USB_DEVICE_ID_WIRELESS_OPTICAL_DESKTOP_3_0 0x009d
+#define USB_DEVICE_ID_MS_DIGITAL_MEDIA_7K 0x00b4
#define USB_DEVICE_ID_MS_NE4K 0x00db
#define USB_DEVICE_ID_MS_NE4K_JP 0x00dc
#define USB_DEVICE_ID_MS_LK6K 0x00f9
@@ -683,6 +684,8 @@
#define USB_DEVICE_ID_MS_PRESENTER_8K_USB 0x0713
#define USB_DEVICE_ID_MS_NE7K 0x071d
#define USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K 0x0730
+#define USB_DEVICE_ID_MS_DIGITAL_MEDIA_3KV1 0x0732
+#define USB_DEVICE_ID_MS_DIGITAL_MEDIA_600 0x0750
#define USB_DEVICE_ID_MS_COMFORT_MOUSE_4500 0x076c
#define USB_DEVICE_ID_MS_COMFORT_KEYBOARD 0x00e3
#define USB_DEVICE_ID_MS_SURFACE_PRO_2 0x0799
diff --git a/drivers/hid/hid-lenovo.c b/drivers/hid/hid-lenovo.c
index 0125e356bd8d..1ac4ff4d57a6 100644
--- a/drivers/hid/hid-lenovo.c
+++ b/drivers/hid/hid-lenovo.c
@@ -184,21 +184,31 @@ static int lenovo_send_cmd_cptkbd(struct hid_device *hdev,
unsigned char byte2, unsigned char byte3)
{
int ret;
- unsigned char buf[] = {0x18, byte2, byte3};
+ unsigned char *buf;
+
+ buf = kzalloc(3, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ buf[0] = 0x18;
+ buf[1] = byte2;
+ buf[2] = byte3;
switch (hdev->product) {
case USB_DEVICE_ID_LENOVO_CUSBKBD:
- ret = hid_hw_raw_request(hdev, 0x13, buf, sizeof(buf),
+ ret = hid_hw_raw_request(hdev, 0x13, buf, 3,
HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
break;
case USB_DEVICE_ID_LENOVO_CBTKBD:
- ret = hid_hw_output_report(hdev, buf, sizeof(buf));
+ ret = hid_hw_output_report(hdev, buf, 3);
break;
default:
ret = -EINVAL;
break;
}
+ kfree(buf);
+
return ret < 0 ? ret : 0; /* BT returns 0, USB returns sizeof(buf) */
}
diff --git a/drivers/hid/hid-microsoft.c b/drivers/hid/hid-microsoft.c
index 75cd3bc59c54..e924d555536c 100644
--- a/drivers/hid/hid-microsoft.c
+++ b/drivers/hid/hid-microsoft.c
@@ -272,6 +272,12 @@ static const struct hid_device_id ms_devices[] = {
.driver_data = MS_PRESENTER },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K),
.driver_data = MS_ERGONOMY | MS_RDESC_3K },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_7K),
+ .driver_data = MS_ERGONOMY },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_600),
+ .driver_data = MS_ERGONOMY },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3KV1),
+ .driver_data = MS_ERGONOMY },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_WIRELESS_OPTICAL_DESKTOP_3_0),
.driver_data = MS_NOGET },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_MOUSE_4500),
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index 25d3c4330bf6..c741f5e50a66 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -1169,6 +1169,7 @@ static void mt_release_contacts(struct hid_device *hid)
MT_TOOL_FINGER,
false);
}
+ input_mt_sync_frame(input_dev);
input_sync(input_dev);
}
}
diff --git a/drivers/hid/hid-wiimote-modules.c b/drivers/hid/hid-wiimote-modules.c
index 4390eee2ce84..c830ed39348f 100644
--- a/drivers/hid/hid-wiimote-modules.c
+++ b/drivers/hid/hid-wiimote-modules.c
@@ -2049,9 +2049,11 @@ static void wiimod_mp_in_mp(struct wiimote_data *wdata, const __u8 *ext)
* -----+------------------------------+-----+-----+
* The single bits Yaw, Roll, Pitch in the lower right corner specify
* whether the wiimote is rotating fast (0) or slow (1). Speed for slow
- * roation is 440 deg/s and for fast rotation 2000 deg/s. To get a
- * linear scale we multiply by 2000/440 = ~4.5454 which is 18 for fast
- * and 9 for slow.
+ * roation is 8192/440 units / deg/s and for fast rotation 8192/2000
+ * units / deg/s. To get a linear scale for fast rotation we multiply
+ * by 2000/440 = ~4.5454 and scale both fast and slow by 9 to match the
+ * previous scale reported by this driver.
+ * This leaves a linear scale with 8192*9/440 (~167.564) units / deg/s.
* If the wiimote is not rotating the sensor reports 2^13 = 8192.
* Ext specifies whether an extension is connected to the motionp.
* which is parsed by wiimote-core.
@@ -2070,15 +2072,15 @@ static void wiimod_mp_in_mp(struct wiimote_data *wdata, const __u8 *ext)
z -= 8192;
if (!(ext[3] & 0x02))
- x *= 18;
+ x = (x * 2000 * 9) / 440;
else
x *= 9;
if (!(ext[4] & 0x02))
- y *= 18;
+ y = (y * 2000 * 9) / 440;
else
y *= 9;
if (!(ext[3] & 0x01))
- z *= 18;
+ z = (z * 2000 * 9) / 440;
else
z *= 9;
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index ad71160b9ea4..ae83af649a60 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -951,14 +951,6 @@ static int usbhid_output_report(struct hid_device *hid, __u8 *buf, size_t count)
return ret;
}
-static void usbhid_restart_queues(struct usbhid_device *usbhid)
-{
- if (usbhid->urbout && !test_bit(HID_OUT_RUNNING, &usbhid->iofl))
- usbhid_restart_out_queue(usbhid);
- if (!test_bit(HID_CTRL_RUNNING, &usbhid->iofl))
- usbhid_restart_ctrl_queue(usbhid);
-}
-
static void hid_free_buffers(struct usb_device *dev, struct hid_device *hid)
{
struct usbhid_device *usbhid = hid->driver_data;
@@ -1404,6 +1396,37 @@ static void hid_cease_io(struct usbhid_device *usbhid)
usb_kill_urb(usbhid->urbout);
}
+static void hid_restart_io(struct hid_device *hid)
+{
+ struct usbhid_device *usbhid = hid->driver_data;
+ int clear_halt = test_bit(HID_CLEAR_HALT, &usbhid->iofl);
+ int reset_pending = test_bit(HID_RESET_PENDING, &usbhid->iofl);
+
+ spin_lock_irq(&usbhid->lock);
+ clear_bit(HID_SUSPENDED, &usbhid->iofl);
+ usbhid_mark_busy(usbhid);
+
+ if (clear_halt || reset_pending)
+ schedule_work(&usbhid->reset_work);
+ usbhid->retry_delay = 0;
+ spin_unlock_irq(&usbhid->lock);
+
+ if (reset_pending || !test_bit(HID_STARTED, &usbhid->iofl))
+ return;
+
+ if (!clear_halt) {
+ if (hid_start_in(hid) < 0)
+ hid_io_error(hid);
+ }
+
+ spin_lock_irq(&usbhid->lock);
+ if (usbhid->urbout && !test_bit(HID_OUT_RUNNING, &usbhid->iofl))
+ usbhid_restart_out_queue(usbhid);
+ if (!test_bit(HID_CTRL_RUNNING, &usbhid->iofl))
+ usbhid_restart_ctrl_queue(usbhid);
+ spin_unlock_irq(&usbhid->lock);
+}
+
/* Treat USB reset pretty much the same as suspend/resume */
static int hid_pre_reset(struct usb_interface *intf)
{
@@ -1453,14 +1476,14 @@ static int hid_post_reset(struct usb_interface *intf)
return 1;
}
+ /* No need to do another reset or clear a halted endpoint */
spin_lock_irq(&usbhid->lock);
clear_bit(HID_RESET_PENDING, &usbhid->iofl);
+ clear_bit(HID_CLEAR_HALT, &usbhid->iofl);
spin_unlock_irq(&usbhid->lock);
hid_set_idle(dev, intf->cur_altsetting->desc.bInterfaceNumber, 0, 0);
- status = hid_start_in(hid);
- if (status < 0)
- hid_io_error(hid);
- usbhid_restart_queues(usbhid);
+
+ hid_restart_io(hid);
return 0;
}
@@ -1483,25 +1506,9 @@ void usbhid_put_power(struct hid_device *hid)
#ifdef CONFIG_PM
static int hid_resume_common(struct hid_device *hid, bool driver_suspended)
{
- struct usbhid_device *usbhid = hid->driver_data;
- int status;
-
- spin_lock_irq(&usbhid->lock);
- clear_bit(HID_SUSPENDED, &usbhid->iofl);
- usbhid_mark_busy(usbhid);
-
- if (test_bit(HID_CLEAR_HALT, &usbhid->iofl) ||
- test_bit(HID_RESET_PENDING, &usbhid->iofl))
- schedule_work(&usbhid->reset_work);
- usbhid->retry_delay = 0;
-
- usbhid_restart_queues(usbhid);
- spin_unlock_irq(&usbhid->lock);
-
- status = hid_start_in(hid);
- if (status < 0)
- hid_io_error(hid);
+ int status = 0;
+ hid_restart_io(hid);
if (driver_suspended && hid->driver && hid->driver->resume)
status = hid->driver->resume(hid);
return status;
@@ -1570,12 +1577,8 @@ static int hid_suspend(struct usb_interface *intf, pm_message_t message)
static int hid_resume(struct usb_interface *intf)
{
struct hid_device *hid = usb_get_intfdata (intf);
- struct usbhid_device *usbhid = hid->driver_data;
int status;
- if (!test_bit(HID_STARTED, &usbhid->iofl))
- return 0;
-
status = hid_resume_common(hid, true);
dev_dbg(&intf->dev, "resume status %d\n", status);
return 0;
@@ -1584,10 +1587,8 @@ static int hid_resume(struct usb_interface *intf)
static int hid_reset_resume(struct usb_interface *intf)
{
struct hid_device *hid = usb_get_intfdata(intf);
- struct usbhid_device *usbhid = hid->driver_data;
int status;
- clear_bit(HID_SUSPENDED, &usbhid->iofl);
status = hid_post_reset(intf);
if (status >= 0 && hid->driver && hid->driver->reset_resume) {
int ret = hid->driver->reset_resume(hid);
diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
index 68a560957871..ccf1883318c3 100644
--- a/drivers/hid/wacom_sys.c
+++ b/drivers/hid/wacom_sys.c
@@ -152,6 +152,25 @@ static void wacom_feature_mapping(struct hid_device *hdev,
hid_data->inputmode = field->report->id;
hid_data->inputmode_index = usage->usage_index;
break;
+
+ case HID_UP_DIGITIZER:
+ if (field->report->id == 0x0B &&
+ (field->application == WACOM_G9_DIGITIZER ||
+ field->application == WACOM_G11_DIGITIZER)) {
+ wacom->wacom_wac.mode_report = field->report->id;
+ wacom->wacom_wac.mode_value = 0;
+ }
+ break;
+
+ case WACOM_G9_PAGE:
+ case WACOM_G11_PAGE:
+ if (field->report->id == 0x03 &&
+ (field->application == WACOM_G9_TOUCHSCREEN ||
+ field->application == WACOM_G11_TOUCHSCREEN)) {
+ wacom->wacom_wac.mode_report = field->report->id;
+ wacom->wacom_wac.mode_value = 0;
+ }
+ break;
}
}
@@ -322,26 +341,41 @@ static int wacom_hid_set_device_mode(struct hid_device *hdev)
return 0;
}
-static int wacom_set_device_mode(struct hid_device *hdev, int report_id,
- int length, int mode)
+static int wacom_set_device_mode(struct hid_device *hdev,
+ struct wacom_wac *wacom_wac)
{
- unsigned char *rep_data;
+ u8 *rep_data;
+ struct hid_report *r;
+ struct hid_report_enum *re;
+ int length;
int error = -ENOMEM, limit = 0;
- rep_data = kzalloc(length, GFP_KERNEL);
+ if (wacom_wac->mode_report < 0)
+ return 0;
+
+ re = &(hdev->report_enum[HID_FEATURE_REPORT]);
+ r = re->report_id_hash[wacom_wac->mode_report];
+ if (!r)
+ return -EINVAL;
+
+ rep_data = hid_alloc_report_buf(r, GFP_KERNEL);
if (!rep_data)
- return error;
+ return -ENOMEM;
+
+ length = hid_report_len(r);
do {
- rep_data[0] = report_id;
- rep_data[1] = mode;
+ rep_data[0] = wacom_wac->mode_report;
+ rep_data[1] = wacom_wac->mode_value;
error = wacom_set_report(hdev, HID_FEATURE_REPORT, rep_data,
length, 1);
if (error >= 0)
error = wacom_get_report(hdev, HID_FEATURE_REPORT,
rep_data, length, 1);
- } while (error >= 0 && rep_data[1] != mode && limit++ < WAC_MSG_RETRIES);
+ } while (error >= 0 &&
+ rep_data[1] != wacom_wac->mode_report &&
+ limit++ < WAC_MSG_RETRIES);
kfree(rep_data);
@@ -411,32 +445,41 @@ static int wacom_bt_query_tablet_data(struct hid_device *hdev, u8 speed,
static int wacom_query_tablet_data(struct hid_device *hdev,
struct wacom_features *features)
{
+ struct wacom *wacom = hid_get_drvdata(hdev);
+ struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+
if (hdev->bus == BUS_BLUETOOTH)
return wacom_bt_query_tablet_data(hdev, 1, features);
- if (features->type == HID_GENERIC)
- return wacom_hid_set_device_mode(hdev);
-
- if (features->device_type & WACOM_DEVICETYPE_TOUCH) {
- if (features->type > TABLETPC) {
- /* MT Tablet PC touch */
- return wacom_set_device_mode(hdev, 3, 4, 4);
- }
- else if (features->type == WACOM_24HDT) {
- return wacom_set_device_mode(hdev, 18, 3, 2);
- }
- else if (features->type == WACOM_27QHDT) {
- return wacom_set_device_mode(hdev, 131, 3, 2);
- }
- else if (features->type == BAMBOO_PAD) {
- return wacom_set_device_mode(hdev, 2, 2, 2);
- }
- } else if (features->device_type & WACOM_DEVICETYPE_PEN) {
- if (features->type <= BAMBOO_PT) {
- return wacom_set_device_mode(hdev, 2, 2, 2);
+ if (features->type != HID_GENERIC) {
+ if (features->device_type & WACOM_DEVICETYPE_TOUCH) {
+ if (features->type > TABLETPC) {
+ /* MT Tablet PC touch */
+ wacom_wac->mode_report = 3;
+ wacom_wac->mode_value = 4;
+ } else if (features->type == WACOM_24HDT) {
+ wacom_wac->mode_report = 18;
+ wacom_wac->mode_value = 2;
+ } else if (features->type == WACOM_27QHDT) {
+ wacom_wac->mode_report = 131;
+ wacom_wac->mode_value = 2;
+ } else if (features->type == BAMBOO_PAD) {
+ wacom_wac->mode_report = 2;
+ wacom_wac->mode_value = 2;
+ }
+ } else if (features->device_type & WACOM_DEVICETYPE_PEN) {
+ if (features->type <= BAMBOO_PT) {
+ wacom_wac->mode_report = 2;
+ wacom_wac->mode_value = 2;
+ }
}
}
+ wacom_set_device_mode(hdev, wacom_wac);
+
+ if (features->type == HID_GENERIC)
+ return wacom_hid_set_device_mode(hdev);
+
return 0;
}
@@ -1817,6 +1860,9 @@ static int wacom_probe(struct hid_device *hdev,
goto fail_type;
}
+ wacom_wac->hid_data.inputmode = -1;
+ wacom_wac->mode_report = -1;
+
wacom->usbdev = dev;
wacom->intf = intf;
mutex_init(&wacom->lock);
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index bd198bbd4df0..02c4efea241c 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -2426,6 +2426,17 @@ void wacom_setup_device_quirks(struct wacom *wacom)
}
/*
+ * Hack for the Bamboo One:
+ * the device presents a PAD/Touch interface as most Bamboos and even
+ * sends ghosts PAD data on it. However, later, we must disable this
+ * ghost interface, and we can not detect it unless we set it here
+ * to WACOM_DEVICETYPE_PAD or WACOM_DEVICETYPE_TOUCH.
+ */
+ if (features->type == BAMBOO_PEN &&
+ features->pktlen == WACOM_PKGLEN_BBTOUCH3)
+ features->device_type |= WACOM_DEVICETYPE_PAD;
+
+ /*
* Raw Wacom-mode pen and touch events both come from interface
* 0, whose HID descriptor has an application usage of 0xFF0D
* (i.e., WACOM_VENDORDEFINED_PEN). We route pen packets back
diff --git a/drivers/hid/wacom_wac.h b/drivers/hid/wacom_wac.h
index 25baa7f29599..e2084d914c14 100644
--- a/drivers/hid/wacom_wac.h
+++ b/drivers/hid/wacom_wac.h
@@ -84,6 +84,12 @@
#define WACOM_DEVICETYPE_WL_MONITOR 0x0008
#define WACOM_VENDORDEFINED_PEN 0xff0d0001
+#define WACOM_G9_PAGE 0xff090000
+#define WACOM_G9_DIGITIZER (WACOM_G9_PAGE | 0x02)
+#define WACOM_G9_TOUCHSCREEN (WACOM_G9_PAGE | 0x11)
+#define WACOM_G11_PAGE 0xff110000
+#define WACOM_G11_DIGITIZER (WACOM_G11_PAGE | 0x02)
+#define WACOM_G11_TOUCHSCREEN (WACOM_G11_PAGE | 0x11)
#define WACOM_PEN_FIELD(f) (((f)->logical == HID_DG_STYLUS) || \
((f)->physical == HID_DG_STYLUS) || \
@@ -238,6 +244,8 @@ struct wacom_wac {
int ps_connected;
u8 bt_features;
u8 bt_high_speed;
+ int mode_report;
+ int mode_value;
struct hid_data hid_data;
};
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index e8a84d12b7ff..1142a93dd90b 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -153,6 +153,7 @@ static const struct xpad_device {
{ 0x0738, 0x4728, "Mad Catz Street Fighter IV FightPad", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x0738, 0x4738, "Mad Catz Wired Xbox 360 Controller (SFIV)", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x0738, 0x4740, "Mad Catz Beat Pad", 0, XTYPE_XBOX360 },
+ { 0x0738, 0x4a01, "Mad Catz FightStick TE 2", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE },
{ 0x0738, 0x6040, "Mad Catz Beat Pad Pro", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
{ 0x0738, 0xb726, "Mad Catz Xbox controller - MW2", 0, XTYPE_XBOX360 },
{ 0x0738, 0xbeef, "Mad Catz JOYTECH NEO SE Advanced GamePad", XTYPE_XBOX360 },
@@ -304,6 +305,7 @@ static struct usb_device_id xpad_table[] = {
XPAD_XBOX360_VENDOR(0x046d), /* Logitech X-Box 360 style controllers */
XPAD_XBOX360_VENDOR(0x0738), /* Mad Catz X-Box 360 controllers */
{ USB_DEVICE(0x0738, 0x4540) }, /* Mad Catz Beat Pad */
+ XPAD_XBOXONE_VENDOR(0x0738), /* Mad Catz FightStick TE 2 */
XPAD_XBOX360_VENDOR(0x0e6f), /* 0x0e6f X-Box 360 controllers */
XPAD_XBOX360_VENDOR(0x12ab), /* X-Box 360 dance pads */
XPAD_XBOX360_VENDOR(0x1430), /* RedOctane X-Box 360 controllers */
diff --git a/drivers/input/misc/arizona-haptics.c b/drivers/input/misc/arizona-haptics.c
index d5994a745ffa..982936334537 100644
--- a/drivers/input/misc/arizona-haptics.c
+++ b/drivers/input/misc/arizona-haptics.c
@@ -178,7 +178,6 @@ static int arizona_haptics_probe(struct platform_device *pdev)
input_set_drvdata(haptics->input_dev, haptics);
haptics->input_dev->name = "arizona:haptics";
- haptics->input_dev->dev.parent = pdev->dev.parent;
haptics->input_dev->close = arizona_haptics_close;
__set_bit(FF_RUMBLE, haptics->input_dev->ffbit);
diff --git a/drivers/input/misc/pmic8xxx-pwrkey.c b/drivers/input/misc/pmic8xxx-pwrkey.c
index 3f02e0e03d12..67aab86048ad 100644
--- a/drivers/input/misc/pmic8xxx-pwrkey.c
+++ b/drivers/input/misc/pmic8xxx-pwrkey.c
@@ -353,7 +353,8 @@ static int pmic8xxx_pwrkey_probe(struct platform_device *pdev)
if (of_property_read_u32(pdev->dev.of_node, "debounce", &kpd_delay))
kpd_delay = 15625;
- if (kpd_delay > 62500 || kpd_delay == 0) {
+ /* Valid range of pwr key trigger delay is 1/64 sec to 2 seconds. */
+ if (kpd_delay > USEC_PER_SEC * 2 || kpd_delay < USEC_PER_SEC / 64) {
dev_err(&pdev->dev, "invalid power key trigger delay\n");
return -EINVAL;
}
@@ -385,8 +386,8 @@ static int pmic8xxx_pwrkey_probe(struct platform_device *pdev)
pwr->name = "pmic8xxx_pwrkey";
pwr->phys = "pmic8xxx_pwrkey/input0";
- delay = (kpd_delay << 10) / USEC_PER_SEC;
- delay = 1 + ilog2(delay);
+ delay = (kpd_delay << 6) / USEC_PER_SEC;
+ delay = ilog2(delay);
err = regmap_read(regmap, PON_CNTL_1, &pon_cntl);
if (err < 0) {
diff --git a/drivers/input/misc/twl4030-vibra.c b/drivers/input/misc/twl4030-vibra.c
index 10c4e3d462f1..caa5a62c42fb 100644
--- a/drivers/input/misc/twl4030-vibra.c
+++ b/drivers/input/misc/twl4030-vibra.c
@@ -222,7 +222,6 @@ static int twl4030_vibra_probe(struct platform_device *pdev)
info->input_dev->name = "twl4030:vibrator";
info->input_dev->id.version = 1;
- info->input_dev->dev.parent = pdev->dev.parent;
info->input_dev->close = twl4030_vibra_close;
__set_bit(FF_RUMBLE, info->input_dev->ffbit);
diff --git a/drivers/input/misc/twl6040-vibra.c b/drivers/input/misc/twl6040-vibra.c
index ea63fad48de6..53e33fab3f7a 100644
--- a/drivers/input/misc/twl6040-vibra.c
+++ b/drivers/input/misc/twl6040-vibra.c
@@ -45,7 +45,6 @@
struct vibra_info {
struct device *dev;
struct input_dev *input_dev;
- struct workqueue_struct *workqueue;
struct work_struct play_work;
struct mutex mutex;
int irq;
@@ -213,11 +212,7 @@ static int vibra_play(struct input_dev *input, void *data,
info->strong_speed = effect->u.rumble.strong_magnitude;
info->direction = effect->direction < EFFECT_DIR_180_DEG ? 1 : -1;
- ret = queue_work(info->workqueue, &info->play_work);
- if (!ret) {
- dev_info(&input->dev, "work is already on queue\n");
- return ret;
- }
+ schedule_work(&info->play_work);
return 0;
}
@@ -362,7 +357,6 @@ static int twl6040_vibra_probe(struct platform_device *pdev)
info->input_dev->name = "twl6040:vibrator";
info->input_dev->id.version = 1;
- info->input_dev->dev.parent = pdev->dev.parent;
info->input_dev->close = twl6040_vibra_close;
__set_bit(FF_RUMBLE, info->input_dev->ffbit);
diff --git a/drivers/input/tablet/gtco.c b/drivers/input/tablet/gtco.c
index 3a7f3a4a4396..7c18249d6c8e 100644
--- a/drivers/input/tablet/gtco.c
+++ b/drivers/input/tablet/gtco.c
@@ -858,6 +858,14 @@ static int gtco_probe(struct usb_interface *usbinterface,
goto err_free_buf;
}
+ /* Sanity check that a device has an endpoint */
+ if (usbinterface->altsetting[0].desc.bNumEndpoints < 1) {
+ dev_err(&usbinterface->dev,
+ "Invalid number of endpoints\n");
+ error = -EINVAL;
+ goto err_free_urb;
+ }
+
/*
* The endpoint is always altsetting 0, we know this since we know
* this device only has one interrupt endpoint
@@ -879,7 +887,7 @@ static int gtco_probe(struct usb_interface *usbinterface,
* HID report descriptor
*/
if (usb_get_extra_descriptor(usbinterface->cur_altsetting,
- HID_DEVICE_TYPE, &hid_desc) != 0){
+ HID_DEVICE_TYPE, &hid_desc) != 0) {
dev_err(&usbinterface->dev,
"Can't retrieve exta USB descriptor to get hid report descriptor length\n");
error = -EIO;
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 374c129219ef..5efadad4615b 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -92,6 +92,7 @@ struct iommu_dev_data {
struct list_head dev_data_list; /* For global dev_data_list */
struct protection_domain *domain; /* Domain the device is bound to */
u16 devid; /* PCI Device ID */
+ u16 alias; /* Alias Device ID */
bool iommu_v2; /* Device can make use of IOMMUv2 */
bool passthrough; /* Device is identity mapped */
struct {
@@ -166,6 +167,13 @@ static struct protection_domain *to_pdomain(struct iommu_domain *dom)
return container_of(dom, struct protection_domain, domain);
}
+static inline u16 get_device_id(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+
+ return PCI_DEVID(pdev->bus->number, pdev->devfn);
+}
+
static struct iommu_dev_data *alloc_dev_data(u16 devid)
{
struct iommu_dev_data *dev_data;
@@ -203,6 +211,68 @@ out_unlock:
return dev_data;
}
+static int __last_alias(struct pci_dev *pdev, u16 alias, void *data)
+{
+ *(u16 *)data = alias;
+ return 0;
+}
+
+static u16 get_alias(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ u16 devid, ivrs_alias, pci_alias;
+
+ devid = get_device_id(dev);
+ ivrs_alias = amd_iommu_alias_table[devid];
+ pci_for_each_dma_alias(pdev, __last_alias, &pci_alias);
+
+ if (ivrs_alias == pci_alias)
+ return ivrs_alias;
+
+ /*
+ * DMA alias showdown
+ *
+ * The IVRS is fairly reliable in telling us about aliases, but it
+ * can't know about every screwy device. If we don't have an IVRS
+ * reported alias, use the PCI reported alias. In that case we may
+ * still need to initialize the rlookup and dev_table entries if the
+ * alias is to a non-existent device.
+ */
+ if (ivrs_alias == devid) {
+ if (!amd_iommu_rlookup_table[pci_alias]) {
+ amd_iommu_rlookup_table[pci_alias] =
+ amd_iommu_rlookup_table[devid];
+ memcpy(amd_iommu_dev_table[pci_alias].data,
+ amd_iommu_dev_table[devid].data,
+ sizeof(amd_iommu_dev_table[pci_alias].data));
+ }
+
+ return pci_alias;
+ }
+
+ pr_info("AMD-Vi: Using IVRS reported alias %02x:%02x.%d "
+ "for device %s[%04x:%04x], kernel reported alias "
+ "%02x:%02x.%d\n", PCI_BUS_NUM(ivrs_alias), PCI_SLOT(ivrs_alias),
+ PCI_FUNC(ivrs_alias), dev_name(dev), pdev->vendor, pdev->device,
+ PCI_BUS_NUM(pci_alias), PCI_SLOT(pci_alias),
+ PCI_FUNC(pci_alias));
+
+ /*
+ * If we don't have a PCI DMA alias and the IVRS alias is on the same
+ * bus, then the IVRS table may know about a quirk that we don't.
+ */
+ if (pci_alias == devid &&
+ PCI_BUS_NUM(ivrs_alias) == pdev->bus->number) {
+ pdev->dev_flags |= PCI_DEV_FLAGS_DMA_ALIAS_DEVFN;
+ pdev->dma_alias_devfn = ivrs_alias & 0xff;
+ pr_info("AMD-Vi: Added PCI DMA alias %02x.%d for %s\n",
+ PCI_SLOT(ivrs_alias), PCI_FUNC(ivrs_alias),
+ dev_name(dev));
+ }
+
+ return ivrs_alias;
+}
+
static struct iommu_dev_data *find_dev_data(u16 devid)
{
struct iommu_dev_data *dev_data;
@@ -215,13 +285,6 @@ static struct iommu_dev_data *find_dev_data(u16 devid)
return dev_data;
}
-static inline u16 get_device_id(struct device *dev)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
-
- return PCI_DEVID(pdev->bus->number, pdev->devfn);
-}
-
static struct iommu_dev_data *get_dev_data(struct device *dev)
{
return dev->archdata.iommu;
@@ -349,6 +412,8 @@ static int iommu_init_device(struct device *dev)
if (!dev_data)
return -ENOMEM;
+ dev_data->alias = get_alias(dev);
+
if (pci_iommuv2_capable(pdev)) {
struct amd_iommu *iommu;
@@ -369,7 +434,7 @@ static void iommu_ignore_device(struct device *dev)
u16 devid, alias;
devid = get_device_id(dev);
- alias = amd_iommu_alias_table[devid];
+ alias = get_alias(dev);
memset(&amd_iommu_dev_table[devid], 0, sizeof(struct dev_table_entry));
memset(&amd_iommu_dev_table[alias], 0, sizeof(struct dev_table_entry));
@@ -1061,7 +1126,7 @@ static int device_flush_dte(struct iommu_dev_data *dev_data)
int ret;
iommu = amd_iommu_rlookup_table[dev_data->devid];
- alias = amd_iommu_alias_table[dev_data->devid];
+ alias = dev_data->alias;
ret = iommu_flush_dte(iommu, dev_data->devid);
if (!ret && alias != dev_data->devid)
@@ -2039,7 +2104,7 @@ static void do_attach(struct iommu_dev_data *dev_data,
bool ats;
iommu = amd_iommu_rlookup_table[dev_data->devid];
- alias = amd_iommu_alias_table[dev_data->devid];
+ alias = dev_data->alias;
ats = dev_data->ats.enabled;
/* Update data structures */
@@ -2073,7 +2138,7 @@ static void do_detach(struct iommu_dev_data *dev_data)
return;
iommu = amd_iommu_rlookup_table[dev_data->devid];
- alias = amd_iommu_alias_table[dev_data->devid];
+ alias = dev_data->alias;
/* decrease reference counters */
dev_data->domain->dev_iommu[iommu->index] -= 1;
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 2409e3bd3df2..7c39ac4b9c53 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -826,6 +826,12 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
if (smmu_domain->smmu)
goto out_unlock;
+ /* We're bypassing these SIDs, so don't allocate an actual context */
+ if (domain->type == IOMMU_DOMAIN_DMA) {
+ smmu_domain->smmu = smmu;
+ goto out_unlock;
+ }
+
/*
* Mapping the requested stage onto what we support is surprisingly
* complicated, mainly because the spec allows S1+S2 SMMUs without
@@ -948,7 +954,7 @@ static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
void __iomem *cb_base;
int irq;
- if (!smmu)
+ if (!smmu || domain->type == IOMMU_DOMAIN_DMA)
return;
/*
@@ -1089,18 +1095,20 @@ static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
struct arm_smmu_device *smmu = smmu_domain->smmu;
void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
- /* Devices in an IOMMU group may already be configured */
- ret = arm_smmu_master_configure_smrs(smmu, cfg);
- if (ret)
- return ret == -EEXIST ? 0 : ret;
-
/*
* FIXME: This won't be needed once we have IOMMU-backed DMA ops
- * for all devices behind the SMMU.
+ * for all devices behind the SMMU. Note that we need to take
+ * care configuring SMRs for devices both a platform_device and
+ * and a PCI device (i.e. a PCI host controller)
*/
if (smmu_domain->domain.type == IOMMU_DOMAIN_DMA)
return 0;
+ /* Devices in an IOMMU group may already be configured */
+ ret = arm_smmu_master_configure_smrs(smmu, cfg);
+ if (ret)
+ return ret == -EEXIST ? 0 : ret;
+
for (i = 0; i < cfg->num_streamids; ++i) {
u32 idx, s2cr;
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index 94a30da0cfac..4dffccf532a2 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -467,7 +467,7 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask,
gic_map_to_vpe(irq, mips_cm_vp_id(cpumask_first(&tmp)));
/* Update the pcpu_masks */
- for (i = 0; i < gic_vpes; i++)
+ for (i = 0; i < min(gic_vpes, NR_CPUS); i++)
clear_bit(irq, pcpu_masks[i].pcpu_mask);
set_bit(irq, pcpu_masks[cpumask_first(&tmp)].pcpu_mask);
@@ -707,7 +707,7 @@ static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq,
spin_lock_irqsave(&gic_lock, flags);
gic_map_to_pin(intr, gic_cpu_pin);
gic_map_to_vpe(intr, vpe);
- for (i = 0; i < gic_vpes; i++)
+ for (i = 0; i < min(gic_vpes, NR_CPUS); i++)
clear_bit(intr, pcpu_masks[i].pcpu_mask);
set_bit(intr, pcpu_masks[vpe].pcpu_mask);
spin_unlock_irqrestore(&gic_lock, flags);
diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c
index 0d29b5a6356d..99e5f9751e8b 100644
--- a/drivers/isdn/mISDN/socket.c
+++ b/drivers/isdn/mISDN/socket.c
@@ -715,6 +715,9 @@ base_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
if (!maddr || maddr->family != AF_ISDN)
return -EINVAL;
+ if (addr_len < sizeof(struct sockaddr_mISDN))
+ return -EINVAL;
+
lock_sock(sk);
if (_pms(sk)->dev) {
diff --git a/drivers/lguest/interrupts_and_traps.c b/drivers/lguest/interrupts_and_traps.c
index eb934b0242e0..67392b6ab845 100644
--- a/drivers/lguest/interrupts_and_traps.c
+++ b/drivers/lguest/interrupts_and_traps.c
@@ -331,7 +331,7 @@ void set_interrupt(struct lg_cpu *cpu, unsigned int irq)
* Actually now I think of it, it's possible that Ron *is* half the Plan 9
* userbase. Oh well.
*/
-static bool could_be_syscall(unsigned int num)
+bool could_be_syscall(unsigned int num)
{
/* Normal Linux IA32_SYSCALL_VECTOR or reserved vector? */
return num == IA32_SYSCALL_VECTOR || num == syscall_vector;
@@ -416,6 +416,10 @@ bool deliver_trap(struct lg_cpu *cpu, unsigned int num)
*
* This routine indicates if a particular trap number could be delivered
* directly.
+ *
+ * Unfortunately, Linux 4.6 started using an interrupt gate instead of a
+ * trap gate for syscalls, so this trick is ineffective. See Mastery for
+ * how we could do this anyway...
*/
static bool direct_trap(unsigned int num)
{
diff --git a/drivers/lguest/lg.h b/drivers/lguest/lg.h
index ac8ad0461e80..69b3814afd2f 100644
--- a/drivers/lguest/lg.h
+++ b/drivers/lguest/lg.h
@@ -167,6 +167,7 @@ void guest_set_clockevent(struct lg_cpu *cpu, unsigned long delta);
bool send_notify_to_eventfd(struct lg_cpu *cpu);
void init_clockdev(struct lg_cpu *cpu);
bool check_syscall_vector(struct lguest *lg);
+bool could_be_syscall(unsigned int num);
int init_interrupts(void);
void free_interrupts(void);
diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
index 6a4cd771a2be..adc162c7040d 100644
--- a/drivers/lguest/x86/core.c
+++ b/drivers/lguest/x86/core.c
@@ -429,8 +429,12 @@ void lguest_arch_handle_trap(struct lg_cpu *cpu)
return;
break;
case 32 ... 255:
+ /* This might be a syscall. */
+ if (could_be_syscall(cpu->regs->trapnum))
+ break;
+
/*
- * These values mean a real interrupt occurred, in which case
+ * Other values mean a real interrupt occurred, in which case
* the Host handler has already been run. We just do a
* friendly check if another process should now be run, then
* return to run the Guest again.
diff --git a/drivers/mailbox/mailbox-test.c b/drivers/mailbox/mailbox-test.c
index dc11bbf27274..58d04726cdd7 100644
--- a/drivers/mailbox/mailbox-test.c
+++ b/drivers/mailbox/mailbox-test.c
@@ -46,7 +46,6 @@ static ssize_t mbox_test_signal_write(struct file *filp,
size_t count, loff_t *ppos)
{
struct mbox_test_device *tdev = filp->private_data;
- int ret;
if (!tdev->tx_channel) {
dev_err(tdev->dev, "Channel cannot do Tx\n");
@@ -60,17 +59,20 @@ static ssize_t mbox_test_signal_write(struct file *filp,
return -EINVAL;
}
- tdev->signal = kzalloc(MBOX_MAX_SIG_LEN, GFP_KERNEL);
- if (!tdev->signal)
- return -ENOMEM;
+ /* Only allocate memory if we need to */
+ if (!tdev->signal) {
+ tdev->signal = kzalloc(MBOX_MAX_SIG_LEN, GFP_KERNEL);
+ if (!tdev->signal)
+ return -ENOMEM;
+ }
- ret = copy_from_user(tdev->signal, userbuf, count);
- if (ret) {
+ if (copy_from_user(tdev->signal, userbuf, count)) {
kfree(tdev->signal);
+ tdev->signal = NULL;
return -EFAULT;
}
- return ret < 0 ? ret : count;
+ return count;
}
static const struct file_operations mbox_test_signal_ops = {
diff --git a/drivers/mailbox/mailbox-xgene-slimpro.c b/drivers/mailbox/mailbox-xgene-slimpro.c
index bd07f39f0692..dd2afbca51c9 100644
--- a/drivers/mailbox/mailbox-xgene-slimpro.c
+++ b/drivers/mailbox/mailbox-xgene-slimpro.c
@@ -189,8 +189,8 @@ static int slimpro_mbox_probe(struct platform_device *pdev)
int i;
ctx = devm_kzalloc(&pdev->dev, sizeof(struct slimpro_mbox), GFP_KERNEL);
- if (IS_ERR(ctx))
- return PTR_ERR(ctx);
+ if (!ctx)
+ return -ENOMEM;
platform_set_drvdata(pdev, ctx);
diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c
index 6a4811f85705..4a36632c236f 100644
--- a/drivers/mailbox/mailbox.c
+++ b/drivers/mailbox/mailbox.c
@@ -375,13 +375,13 @@ struct mbox_chan *mbox_request_channel_byname(struct mbox_client *cl,
if (!np) {
dev_err(cl->dev, "%s() currently only supports DT\n", __func__);
- return ERR_PTR(-ENOSYS);
+ return ERR_PTR(-EINVAL);
}
if (!of_get_property(np, "mbox-names", NULL)) {
dev_err(cl->dev,
"%s() requires an \"mbox-names\" property\n", __func__);
- return ERR_PTR(-ENOSYS);
+ return ERR_PTR(-EINVAL);
}
of_property_for_each_string(np, "mbox-names", prop, mbox_name) {
diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
index 27f2ef300f8b..3970cda10080 100644
--- a/drivers/md/dm-cache-metadata.c
+++ b/drivers/md/dm-cache-metadata.c
@@ -867,39 +867,55 @@ static int blocks_are_unmapped_or_clean(struct dm_cache_metadata *cmd,
return 0;
}
-#define WRITE_LOCK(cmd) \
- down_write(&cmd->root_lock); \
- if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) { \
- up_write(&cmd->root_lock); \
- return -EINVAL; \
+static bool cmd_write_lock(struct dm_cache_metadata *cmd)
+{
+ down_write(&cmd->root_lock);
+ if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) {
+ up_write(&cmd->root_lock);
+ return false;
}
+ return true;
+}
-#define WRITE_LOCK_VOID(cmd) \
- down_write(&cmd->root_lock); \
- if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) { \
- up_write(&cmd->root_lock); \
- return; \
- }
+#define WRITE_LOCK(cmd) \
+ do { \
+ if (!cmd_write_lock((cmd))) \
+ return -EINVAL; \
+ } while(0)
+
+#define WRITE_LOCK_VOID(cmd) \
+ do { \
+ if (!cmd_write_lock((cmd))) \
+ return; \
+ } while(0)
#define WRITE_UNLOCK(cmd) \
- up_write(&cmd->root_lock)
+ up_write(&(cmd)->root_lock)
-#define READ_LOCK(cmd) \
- down_read(&cmd->root_lock); \
- if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) { \
- up_read(&cmd->root_lock); \
- return -EINVAL; \
+static bool cmd_read_lock(struct dm_cache_metadata *cmd)
+{
+ down_read(&cmd->root_lock);
+ if (cmd->fail_io) {
+ up_read(&cmd->root_lock);
+ return false;
}
+ return true;
+}
-#define READ_LOCK_VOID(cmd) \
- down_read(&cmd->root_lock); \
- if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) { \
- up_read(&cmd->root_lock); \
- return; \
- }
+#define READ_LOCK(cmd) \
+ do { \
+ if (!cmd_read_lock((cmd))) \
+ return -EINVAL; \
+ } while(0)
+
+#define READ_LOCK_VOID(cmd) \
+ do { \
+ if (!cmd_read_lock((cmd))) \
+ return; \
+ } while(0)
#define READ_UNLOCK(cmd) \
- up_read(&cmd->root_lock)
+ up_read(&(cmd)->root_lock)
int dm_cache_resize(struct dm_cache_metadata *cmd, dm_cblock_t new_cache_size)
{
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index be4905769a45..3d3ac13287a4 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1662,8 +1662,10 @@ static int __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti,
tio = alloc_tio(ci, ti, target_bio_nr);
tio->len_ptr = len;
r = clone_bio(tio, bio, sector, *len);
- if (r < 0)
+ if (r < 0) {
+ free_tio(ci->md, tio);
break;
+ }
__map_bio(tio);
}
diff --git a/drivers/misc/lkdtm.c b/drivers/misc/lkdtm.c
index 5f1a36b8fbb0..0a5cbbe12452 100644
--- a/drivers/misc/lkdtm.c
+++ b/drivers/misc/lkdtm.c
@@ -458,8 +458,10 @@ static void lkdtm_do_action(enum ctype which)
break;
val = kmalloc(len, GFP_KERNEL);
- if (!val)
+ if (!val) {
+ kfree(base);
break;
+ }
*val = 0x12345678;
base[offset] = *val;
@@ -498,14 +500,17 @@ static void lkdtm_do_action(enum ctype which)
}
case CT_READ_BUDDY_AFTER_FREE: {
unsigned long p = __get_free_page(GFP_KERNEL);
- int saw, *val = kmalloc(1024, GFP_KERNEL);
+ int saw, *val;
int *base;
if (!p)
break;
- if (!val)
+ val = kmalloc(1024, GFP_KERNEL);
+ if (!val) {
+ free_page(p);
break;
+ }
base = (int *)p;
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 3bdbe50a363f..8a0147dfed27 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -86,7 +86,6 @@ static int max_devices;
/* TODO: Replace these with struct ida */
static DECLARE_BITMAP(dev_use, MAX_DEVICES);
-static DECLARE_BITMAP(name_use, MAX_DEVICES);
/*
* There is one mmc_blk_data per slot.
@@ -105,7 +104,6 @@ struct mmc_blk_data {
unsigned int usage;
unsigned int read_only;
unsigned int part_type;
- unsigned int name_idx;
unsigned int reset_done;
#define MMC_BLK_READ BIT(0)
#define MMC_BLK_WRITE BIT(1)
@@ -2202,19 +2200,6 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
goto out;
}
- /*
- * !subname implies we are creating main mmc_blk_data that will be
- * associated with mmc_card with dev_set_drvdata. Due to device
- * partitions, devidx will not coincide with a per-physical card
- * index anymore so we keep track of a name index.
- */
- if (!subname) {
- md->name_idx = find_first_zero_bit(name_use, max_devices);
- __set_bit(md->name_idx, name_use);
- } else
- md->name_idx = ((struct mmc_blk_data *)
- dev_to_disk(parent)->private_data)->name_idx;
-
md->area_type = area_type;
/*
@@ -2264,7 +2249,7 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
*/
snprintf(md->disk->disk_name, sizeof(md->disk->disk_name),
- "mmcblk%u%s", md->name_idx, subname ? subname : "");
+ "mmcblk%u%s", card->host->index, subname ? subname : "");
if (mmc_card_mmc(card))
blk_queue_logical_block_size(md->queue.queue,
@@ -2418,7 +2403,6 @@ static void mmc_blk_remove_parts(struct mmc_card *card,
struct list_head *pos, *q;
struct mmc_blk_data *part_md;
- __clear_bit(md->name_idx, name_use);
list_for_each_safe(pos, q, &md->part) {
part_md = list_entry(pos, struct mmc_blk_data, part);
list_del(pos);
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
index f8c4762bb48d..bcc0de47fe7e 100644
--- a/drivers/mmc/host/sdhci-tegra.c
+++ b/drivers/mmc/host/sdhci-tegra.c
@@ -382,14 +382,6 @@ static const struct sdhci_tegra_soc_data soc_data_tegra114 = {
.pdata = &sdhci_tegra114_pdata,
};
-static const struct sdhci_tegra_soc_data soc_data_tegra124 = {
- .pdata = &sdhci_tegra114_pdata,
- .nvquirks = NVQUIRK_ENABLE_SDR50 |
- NVQUIRK_ENABLE_DDR50 |
- NVQUIRK_ENABLE_SDR104 |
- NVQUIRK_HAS_PADCALIB,
-};
-
static const struct sdhci_pltfm_data sdhci_tegra210_pdata = {
.quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
@@ -407,7 +399,7 @@ static const struct sdhci_tegra_soc_data soc_data_tegra210 = {
static const struct of_device_id sdhci_tegra_dt_match[] = {
{ .compatible = "nvidia,tegra210-sdhci", .data = &soc_data_tegra210 },
- { .compatible = "nvidia,tegra124-sdhci", .data = &soc_data_tegra124 },
+ { .compatible = "nvidia,tegra124-sdhci", .data = &soc_data_tegra114 },
{ .compatible = "nvidia,tegra114-sdhci", .data = &soc_data_tegra114 },
{ .compatible = "nvidia,tegra30-sdhci", .data = &soc_data_tegra30 },
{ .compatible = "nvidia,tegra20-sdhci", .data = &soc_data_tegra20 },
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index b6facac54fc0..557b8462f55e 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -4009,7 +4009,6 @@ static int nand_dt_init(struct nand_chip *chip)
* This is the first phase of the normal nand_scan() function. It reads the
* flash ID and sets up MTD fields accordingly.
*
- * The mtd->owner field must be set to the module of the caller.
*/
int nand_scan_ident(struct mtd_info *mtd, int maxchips,
struct nand_flash_dev *table)
@@ -4429,19 +4428,12 @@ EXPORT_SYMBOL(nand_scan_tail);
*
* This fills out all the uninitialized function pointers with the defaults.
* The flash ID is read and the mtd/chip structures are filled with the
- * appropriate values. The mtd->owner field must be set to the module of the
- * caller.
+ * appropriate values.
*/
int nand_scan(struct mtd_info *mtd, int maxchips)
{
int ret;
- /* Many callers got this wrong, so check for it for a while... */
- if (!mtd->owner && caller_is_module()) {
- pr_crit("%s called with NULL mtd->owner!\n", __func__);
- BUG();
- }
-
ret = nand_scan_ident(mtd, maxchips, NULL);
if (!ret)
ret = nand_scan_tail(mtd);
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 2a1ba62b7da2..a24c18eee598 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -195,6 +195,7 @@ config GENEVE
config MACSEC
tristate "IEEE 802.1AE MAC-level encryption (MACsec)"
+ select CRYPTO
select CRYPTO_AES
select CRYPTO_GCM
---help---
diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c
index 50454be86570..a2904029cccc 100644
--- a/drivers/net/dsa/mv88e6xxx.c
+++ b/drivers/net/dsa/mv88e6xxx.c
@@ -2181,27 +2181,10 @@ int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port,
struct net_device *bridge)
{
struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
- u16 fid;
int i, err;
mutex_lock(&ps->smi_mutex);
- /* Get or create the bridge FID and assign it to the port */
- for (i = 0; i < ps->num_ports; ++i)
- if (ps->ports[i].bridge_dev == bridge)
- break;
-
- if (i < ps->num_ports)
- err = _mv88e6xxx_port_fid_get(ds, i, &fid);
- else
- err = _mv88e6xxx_fid_new(ds, &fid);
- if (err)
- goto unlock;
-
- err = _mv88e6xxx_port_fid_set(ds, port, fid);
- if (err)
- goto unlock;
-
/* Assign the bridge and remap each port's VLANTable */
ps->ports[port].bridge_dev = bridge;
@@ -2213,7 +2196,6 @@ int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port,
}
}
-unlock:
mutex_unlock(&ps->smi_mutex);
return err;
@@ -2223,16 +2205,10 @@ void mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port)
{
struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
struct net_device *bridge = ps->ports[port].bridge_dev;
- u16 fid;
int i;
mutex_lock(&ps->smi_mutex);
- /* Give the port a fresh Filtering Information Database */
- if (_mv88e6xxx_fid_new(ds, &fid) ||
- _mv88e6xxx_port_fid_set(ds, port, fid))
- netdev_warn(ds->ports[port], "failed to assign a new FID\n");
-
/* Unassign the bridge and remap each port's VLANTable */
ps->ports[port].bridge_dev = NULL;
@@ -2476,9 +2452,9 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
* the other bits clear.
*/
reg = 1 << port;
- /* Disable learning for DSA and CPU ports */
- if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port))
- reg = PORT_ASSOC_VECTOR_LOCKED_PORT;
+ /* Disable learning for CPU port */
+ if (dsa_is_cpu_port(ds, port))
+ reg = 0;
ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_ASSOC_VECTOR, reg);
if (ret)
@@ -2558,11 +2534,11 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
if (ret)
goto abort;
- /* Port based VLAN map: give each port its own address
+ /* Port based VLAN map: give each port the same default address
* database, and allow bidirectional communication between the
* CPU and DSA port(s), and the other ports.
*/
- ret = _mv88e6xxx_port_fid_set(ds, port, port + 1);
+ ret = _mv88e6xxx_port_fid_set(ds, port, 0);
if (ret)
goto abort;
diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
index 8f76f4558a88..2ff465848b65 100644
--- a/drivers/net/ethernet/atheros/atlx/atl2.c
+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
@@ -1412,7 +1412,7 @@ static int atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err = -EIO;
- netdev->hw_features = NETIF_F_SG | NETIF_F_HW_VLAN_CTAG_RX;
+ netdev->hw_features = NETIF_F_HW_VLAN_CTAG_RX;
netdev->features |= (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
/* Init PHY as early as possible due to power saving issue */
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 99b30a952b38..38db2e4d7d54 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -1572,6 +1572,11 @@ static int bgmac_probe(struct bcma_device *core)
dev_warn(&core->dev, "Using random MAC: %pM\n", mac);
}
+ /* This (reset &) enable is not preset in specs or reference driver but
+ * Broadcom does it in arch PCI code when enabling fake PCI device.
+ */
+ bcma_core_enable(core, 0);
+
/* Allocation and references */
net_dev = alloc_etherdev(sizeof(*bgmac));
if (!net_dev)
diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h
index 4fbb093e0d84..9a03c142b742 100644
--- a/drivers/net/ethernet/broadcom/bgmac.h
+++ b/drivers/net/ethernet/broadcom/bgmac.h
@@ -199,9 +199,9 @@
#define BGMAC_CMDCFG_TAI 0x00000200
#define BGMAC_CMDCFG_HD 0x00000400 /* Set if in half duplex mode */
#define BGMAC_CMDCFG_HD_SHIFT 10
-#define BGMAC_CMDCFG_SR_REV0 0x00000800 /* Set to reset mode, for other revs */
-#define BGMAC_CMDCFG_SR_REV4 0x00002000 /* Set to reset mode, only for core rev 4 */
-#define BGMAC_CMDCFG_SR(rev) ((rev == 4) ? BGMAC_CMDCFG_SR_REV4 : BGMAC_CMDCFG_SR_REV0)
+#define BGMAC_CMDCFG_SR_REV0 0x00000800 /* Set to reset mode, for core rev 0-3 */
+#define BGMAC_CMDCFG_SR_REV4 0x00002000 /* Set to reset mode, for core rev >= 4 */
+#define BGMAC_CMDCFG_SR(rev) ((rev >= 4) ? BGMAC_CMDCFG_SR_REV4 : BGMAC_CMDCFG_SR_REV0)
#define BGMAC_CMDCFG_ML 0x00008000 /* Set to activate mac loopback mode */
#define BGMAC_CMDCFG_AE 0x00400000
#define BGMAC_CMDCFG_CFE 0x00800000
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index cf6445d148ca..44ad1490b472 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -878,7 +878,11 @@ static void bcmgenet_get_ethtool_stats(struct net_device *dev,
else
p = (char *)priv;
p += s->stat_offset;
- data[i] = *(u32 *)p;
+ if (sizeof(unsigned long) != sizeof(u32) &&
+ s->stat_sizeof == sizeof(unsigned long))
+ data[i] = *(unsigned long *)p;
+ else
+ data[i] = *(u32 *)p;
}
}
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index 967951582e03..d20539a6d162 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -1011,10 +1011,11 @@ static int bgx_init_of_phy(struct bgx *bgx)
}
lmac++;
- if (lmac == MAX_LMAC_PER_BGX)
+ if (lmac == MAX_LMAC_PER_BGX) {
+ of_node_put(node);
break;
+ }
}
- of_node_put(node);
return 0;
defer:
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 984a3cc26f86..326d4009525e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -1451,6 +1451,9 @@ int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
unsigned int mmd, unsigned int reg, u16 *valp);
int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
unsigned int mmd, unsigned int reg, u16 val);
+int t4_iq_stop(struct adapter *adap, unsigned int mbox, unsigned int pf,
+ unsigned int vf, unsigned int iqtype, unsigned int iqid,
+ unsigned int fl0id, unsigned int fl1id);
int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
unsigned int vf, unsigned int iqtype, unsigned int iqid,
unsigned int fl0id, unsigned int fl1id);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 13b144bcf725..6278e5a74b74 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -2981,14 +2981,28 @@ void t4_free_ofld_rxqs(struct adapter *adap, int n, struct sge_ofld_rxq *q)
void t4_free_sge_resources(struct adapter *adap)
{
int i;
- struct sge_eth_rxq *eq = adap->sge.ethrxq;
- struct sge_eth_txq *etq = adap->sge.ethtxq;
+ struct sge_eth_rxq *eq;
+ struct sge_eth_txq *etq;
+
+ /* stop all Rx queues in order to start them draining */
+ for (i = 0; i < adap->sge.ethqsets; i++) {
+ eq = &adap->sge.ethrxq[i];
+ if (eq->rspq.desc)
+ t4_iq_stop(adap, adap->mbox, adap->pf, 0,
+ FW_IQ_TYPE_FL_INT_CAP,
+ eq->rspq.cntxt_id,
+ eq->fl.size ? eq->fl.cntxt_id : 0xffff,
+ 0xffff);
+ }
/* clean up Ethernet Tx/Rx queues */
- for (i = 0; i < adap->sge.ethqsets; i++, eq++, etq++) {
+ for (i = 0; i < adap->sge.ethqsets; i++) {
+ eq = &adap->sge.ethrxq[i];
if (eq->rspq.desc)
free_rspq_fl(adap, &eq->rspq,
eq->fl.size ? &eq->fl : NULL);
+
+ etq = &adap->sge.ethtxq[i];
if (etq->q.desc) {
t4_eth_eq_free(adap, adap->mbox, adap->pf, 0,
etq->q.cntxt_id);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index cc1736bece0f..71586a3e0f61 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -2557,6 +2557,7 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
}
#define EEPROM_STAT_ADDR 0x7bfc
+#define VPD_SIZE 0x800
#define VPD_BASE 0x400
#define VPD_BASE_OLD 0
#define VPD_LEN 1024
@@ -2594,6 +2595,15 @@ int t4_get_raw_vpd_params(struct adapter *adapter, struct vpd_params *p)
if (!vpd)
return -ENOMEM;
+ /* We have two VPD data structures stored in the adapter VPD area.
+ * By default, Linux calculates the size of the VPD area by traversing
+ * the first VPD area at offset 0x0, so we need to tell the OS what
+ * our real VPD size is.
+ */
+ ret = pci_set_vpd_size(adapter->pdev, VPD_SIZE);
+ if (ret < 0)
+ goto out;
+
/* Card information normally starts at VPD_BASE but early cards had
* it at 0.
*/
@@ -6940,6 +6950,39 @@ int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
}
/**
+ * t4_iq_stop - stop an ingress queue and its FLs
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @pf: the PF owning the queues
+ * @vf: the VF owning the queues
+ * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
+ * @iqid: ingress queue id
+ * @fl0id: FL0 queue id or 0xffff if no attached FL0
+ * @fl1id: FL1 queue id or 0xffff if no attached FL1
+ *
+ * Stops an ingress queue and its associated FLs, if any. This causes
+ * any current or future data/messages destined for these queues to be
+ * tossed.
+ */
+int t4_iq_stop(struct adapter *adap, unsigned int mbox, unsigned int pf,
+ unsigned int vf, unsigned int iqtype, unsigned int iqid,
+ unsigned int fl0id, unsigned int fl1id)
+{
+ struct fw_iq_cmd c;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F |
+ FW_CMD_EXEC_F | FW_IQ_CMD_PFN_V(pf) |
+ FW_IQ_CMD_VFN_V(vf));
+ c.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_IQSTOP_F | FW_LEN16(c));
+ c.type_to_iqandstindex = cpu_to_be32(FW_IQ_CMD_TYPE_V(iqtype));
+ c.iqid = cpu_to_be16(iqid);
+ c.fl0id = cpu_to_be16(fl0id);
+ c.fl1id = cpu_to_be16(fl1id);
+ return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+}
+
+/**
* t4_iq_free - free an ingress queue and its FLs
* @adap: the adapter
* @mbox: mailbox to use for the FW command
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
index 62ccebc5f728..8cf943db5662 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
@@ -1223,18 +1223,32 @@ s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *hw, u32 **results,
if (err)
return err;
- /* verify upper 16 bits are zero */
- if (vid >> 16)
- return FM10K_ERR_PARAM;
-
set = !(vid & FM10K_VLAN_CLEAR);
vid &= ~FM10K_VLAN_CLEAR;
- err = fm10k_iov_select_vid(vf_info, (u16)vid);
- if (err < 0)
- return err;
+ /* if the length field has been set, this is a multi-bit
+ * update request. For multi-bit requests, simply disallow
+ * them when the pf_vid has been set. In this case, the PF
+ * should have already cleared the VLAN_TABLE, and if we
+ * allowed them, it could allow a rogue VF to receive traffic
+ * on a VLAN it was not assigned. In the single-bit case, we
+ * need to modify requests for VLAN 0 to use the default PF or
+ * SW vid when assigned.
+ */
- vid = err;
+ if (vid >> 16) {
+ /* prevent multi-bit requests when PF has
+ * administratively set the VLAN for this VF
+ */
+ if (vf_info->pf_vid)
+ return FM10K_ERR_PARAM;
+ } else {
+ err = fm10k_iov_select_vid(vf_info, (u16)vid);
+ if (err < 0)
+ return err;
+
+ vid = err;
+ }
/* update VSI info for VF in regards to VLAN table */
err = hw->mac.ops.update_vlan(hw, vid, vf_info->vsi, set);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 084d0ab316b7..6a49b7ae511c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -2594,35 +2594,34 @@ int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
}
/**
- * __i40e_chk_linearize - Check if there are more than 8 fragments per packet
+ * __i40e_chk_linearize - Check if there are more than 8 buffers per packet
* @skb: send buffer
*
- * Note: Our HW can't scatter-gather more than 8 fragments to build
- * a packet on the wire and so we need to figure out the cases where we
- * need to linearize the skb.
+ * Note: Our HW can't DMA more than 8 buffers to build a packet on the wire
+ * and so we need to figure out the cases where we need to linearize the skb.
+ *
+ * For TSO we need to count the TSO header and segment payload separately.
+ * As such we need to check cases where we have 7 fragments or more as we
+ * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for
+ * the segment payload in the first descriptor, and another 7 for the
+ * fragments.
**/
bool __i40e_chk_linearize(struct sk_buff *skb)
{
const struct skb_frag_struct *frag, *stale;
- int gso_size, nr_frags, sum;
-
- /* check to see if TSO is enabled, if so we may get a repreive */
- gso_size = skb_shinfo(skb)->gso_size;
- if (unlikely(!gso_size))
- return true;
+ int nr_frags, sum;
- /* no need to check if number of frags is less than 8 */
+ /* no need to check if number of frags is less than 7 */
nr_frags = skb_shinfo(skb)->nr_frags;
- if (nr_frags < I40E_MAX_BUFFER_TXD)
+ if (nr_frags < (I40E_MAX_BUFFER_TXD - 1))
return false;
/* We need to walk through the list and validate that each group
* of 6 fragments totals at least gso_size. However we don't need
- * to perform such validation on the first or last 6 since the first
- * 6 cannot inherit any data from a descriptor before them, and the
- * last 6 cannot inherit any data from a descriptor after them.
+ * to perform such validation on the last 6 since the last 6 cannot
+ * inherit any data from a descriptor after them.
*/
- nr_frags -= I40E_MAX_BUFFER_TXD - 1;
+ nr_frags -= I40E_MAX_BUFFER_TXD - 2;
frag = &skb_shinfo(skb)->frags[0];
/* Initialize size to the negative value of gso_size minus 1. We
@@ -2631,21 +2630,21 @@ bool __i40e_chk_linearize(struct sk_buff *skb)
* descriptors for a single transmit as the header and previous
* fragment are already consuming 2 descriptors.
*/
- sum = 1 - gso_size;
+ sum = 1 - skb_shinfo(skb)->gso_size;
- /* Add size of frags 1 through 5 to create our initial sum */
- sum += skb_frag_size(++frag);
- sum += skb_frag_size(++frag);
- sum += skb_frag_size(++frag);
- sum += skb_frag_size(++frag);
- sum += skb_frag_size(++frag);
+ /* Add size of frags 0 through 4 to create our initial sum */
+ sum += skb_frag_size(frag++);
+ sum += skb_frag_size(frag++);
+ sum += skb_frag_size(frag++);
+ sum += skb_frag_size(frag++);
+ sum += skb_frag_size(frag++);
/* Walk through fragments adding latest fragment, testing it, and
* then removing stale fragments from the sum.
*/
stale = &skb_shinfo(skb)->frags[0];
for (;;) {
- sum += skb_frag_size(++frag);
+ sum += skb_frag_size(frag++);
/* if sum is negative we failed to make sufficient progress */
if (sum < 0)
@@ -2655,7 +2654,7 @@ bool __i40e_chk_linearize(struct sk_buff *skb)
if (!--nr_frags)
break;
- sum -= skb_frag_size(++stale);
+ sum -= skb_frag_size(stale++);
}
return false;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index cdd5dc00aec5..a9bd70537d65 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -413,10 +413,14 @@ static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
**/
static inline bool i40e_chk_linearize(struct sk_buff *skb, int count)
{
- /* we can only support up to 8 data buffers for a single send */
- if (likely(count <= I40E_MAX_BUFFER_TXD))
+ /* Both TSO and single send will work if count is less than 8 */
+ if (likely(count < I40E_MAX_BUFFER_TXD))
return false;
- return __i40e_chk_linearize(skb);
+ if (skb_is_gso(skb))
+ return __i40e_chk_linearize(skb);
+
+ /* we can support up to 8 data buffers for a single send */
+ return count != I40E_MAX_BUFFER_TXD;
}
#endif /* _I40E_TXRX_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index ebcc25c05796..cea97daa844c 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -1796,35 +1796,34 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
}
/**
- * __i40evf_chk_linearize - Check if there are more than 8 fragments per packet
+ * __i40evf_chk_linearize - Check if there are more than 8 buffers per packet
* @skb: send buffer
*
- * Note: Our HW can't scatter-gather more than 8 fragments to build
- * a packet on the wire and so we need to figure out the cases where we
- * need to linearize the skb.
+ * Note: Our HW can't DMA more than 8 buffers to build a packet on the wire
+ * and so we need to figure out the cases where we need to linearize the skb.
+ *
+ * For TSO we need to count the TSO header and segment payload separately.
+ * As such we need to check cases where we have 7 fragments or more as we
+ * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for
+ * the segment payload in the first descriptor, and another 7 for the
+ * fragments.
**/
bool __i40evf_chk_linearize(struct sk_buff *skb)
{
const struct skb_frag_struct *frag, *stale;
- int gso_size, nr_frags, sum;
-
- /* check to see if TSO is enabled, if so we may get a repreive */
- gso_size = skb_shinfo(skb)->gso_size;
- if (unlikely(!gso_size))
- return true;
+ int nr_frags, sum;
- /* no need to check if number of frags is less than 8 */
+ /* no need to check if number of frags is less than 7 */
nr_frags = skb_shinfo(skb)->nr_frags;
- if (nr_frags < I40E_MAX_BUFFER_TXD)
+ if (nr_frags < (I40E_MAX_BUFFER_TXD - 1))
return false;
/* We need to walk through the list and validate that each group
* of 6 fragments totals at least gso_size. However we don't need
- * to perform such validation on the first or last 6 since the first
- * 6 cannot inherit any data from a descriptor before them, and the
- * last 6 cannot inherit any data from a descriptor after them.
+ * to perform such validation on the last 6 since the last 6 cannot
+ * inherit any data from a descriptor after them.
*/
- nr_frags -= I40E_MAX_BUFFER_TXD - 1;
+ nr_frags -= I40E_MAX_BUFFER_TXD - 2;
frag = &skb_shinfo(skb)->frags[0];
/* Initialize size to the negative value of gso_size minus 1. We
@@ -1833,21 +1832,21 @@ bool __i40evf_chk_linearize(struct sk_buff *skb)
* descriptors for a single transmit as the header and previous
* fragment are already consuming 2 descriptors.
*/
- sum = 1 - gso_size;
+ sum = 1 - skb_shinfo(skb)->gso_size;
- /* Add size of frags 1 through 5 to create our initial sum */
- sum += skb_frag_size(++frag);
- sum += skb_frag_size(++frag);
- sum += skb_frag_size(++frag);
- sum += skb_frag_size(++frag);
- sum += skb_frag_size(++frag);
+ /* Add size of frags 0 through 4 to create our initial sum */
+ sum += skb_frag_size(frag++);
+ sum += skb_frag_size(frag++);
+ sum += skb_frag_size(frag++);
+ sum += skb_frag_size(frag++);
+ sum += skb_frag_size(frag++);
/* Walk through fragments adding latest fragment, testing it, and
* then removing stale fragments from the sum.
*/
stale = &skb_shinfo(skb)->frags[0];
for (;;) {
- sum += skb_frag_size(++frag);
+ sum += skb_frag_size(frag++);
/* if sum is negative we failed to make sufficient progress */
if (sum < 0)
@@ -1857,7 +1856,7 @@ bool __i40evf_chk_linearize(struct sk_buff *skb)
if (!--nr_frags)
break;
- sum -= skb_frag_size(++stale);
+ sum -= skb_frag_size(stale++);
}
return false;
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
index c1dd8c5c9666..0429553fe887 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
@@ -395,10 +395,14 @@ static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
**/
static inline bool i40e_chk_linearize(struct sk_buff *skb, int count)
{
- /* we can only support up to 8 data buffers for a single send */
- if (likely(count <= I40E_MAX_BUFFER_TXD))
+ /* Both TSO and single send will work if count is less than 8 */
+ if (likely(count < I40E_MAX_BUFFER_TXD))
return false;
- return __i40evf_chk_linearize(skb);
+ if (skb_is_gso(skb))
+ return __i40evf_chk_linearize(skb);
+
+ /* we can support up to 8 data buffers for a single send */
+ return count != I40E_MAX_BUFFER_TXD;
}
#endif /* _I40E_TXRX_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index f69584a9b47f..c761194bb323 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -337,7 +337,7 @@ static int mlx4_en_get_sset_count(struct net_device *dev, int sset)
case ETH_SS_STATS:
return bitmap_iterator_count(&it) +
(priv->tx_ring_num * 2) +
- (priv->rx_ring_num * 2);
+ (priv->rx_ring_num * 3);
case ETH_SS_TEST:
return MLX4_EN_NUM_SELF_TEST - !(priv->mdev->dev->caps.flags
& MLX4_DEV_CAP_FLAG_UC_LOOPBACK) * 2;
@@ -404,6 +404,7 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev,
for (i = 0; i < priv->rx_ring_num; i++) {
data[index++] = priv->rx_ring[i]->packets;
data[index++] = priv->rx_ring[i]->bytes;
+ data[index++] = priv->rx_ring[i]->dropped;
}
spin_unlock_bh(&priv->stats_lock);
@@ -477,6 +478,8 @@ static void mlx4_en_get_strings(struct net_device *dev,
"rx%d_packets", i);
sprintf(data + (index++) * ETH_GSTRING_LEN,
"rx%d_bytes", i);
+ sprintf(data + (index++) * ETH_GSTRING_LEN,
+ "rx%d_dropped", i);
}
break;
case ETH_SS_PRIV_FLAGS:
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c
index 3904b5fc0b7c..20b6c2e678b8 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c
@@ -158,6 +158,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
u64 in_mod = reset << 8 | port;
int err;
int i, counter_index;
+ unsigned long sw_rx_dropped = 0;
mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
if (IS_ERR(mailbox))
@@ -180,6 +181,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
for (i = 0; i < priv->rx_ring_num; i++) {
stats->rx_packets += priv->rx_ring[i]->packets;
stats->rx_bytes += priv->rx_ring[i]->bytes;
+ sw_rx_dropped += priv->rx_ring[i]->dropped;
priv->port_stats.rx_chksum_good += priv->rx_ring[i]->csum_ok;
priv->port_stats.rx_chksum_none += priv->rx_ring[i]->csum_none;
priv->port_stats.rx_chksum_complete += priv->rx_ring[i]->csum_complete;
@@ -236,7 +238,8 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
&mlx4_en_stats->MCAST_prio_1,
NUM_PRIORITIES);
stats->collisions = 0;
- stats->rx_dropped = be32_to_cpu(mlx4_en_stats->RDROP);
+ stats->rx_dropped = be32_to_cpu(mlx4_en_stats->RDROP) +
+ sw_rx_dropped;
stats->rx_length_errors = be32_to_cpu(mlx4_en_stats->RdropLength);
stats->rx_over_errors = 0;
stats->rx_crc_errors = be32_to_cpu(mlx4_en_stats->RCRC);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 86bcfe510e4e..b723e3bcab39 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -61,7 +61,7 @@ static int mlx4_alloc_pages(struct mlx4_en_priv *priv,
gfp_t gfp = _gfp;
if (order)
- gfp |= __GFP_COMP | __GFP_NOWARN;
+ gfp |= __GFP_COMP | __GFP_NOWARN | __GFP_NOMEMALLOC;
page = alloc_pages(gfp, order);
if (likely(page))
break;
@@ -126,7 +126,9 @@ out:
dma_unmap_page(priv->ddev, page_alloc[i].dma,
page_alloc[i].page_size, PCI_DMA_FROMDEVICE);
page = page_alloc[i].page;
- set_page_count(page, 1);
+ /* Revert changes done by mlx4_alloc_pages */
+ page_ref_sub(page, page_alloc[i].page_size /
+ priv->frag_info[i].frag_stride - 1);
put_page(page);
}
}
@@ -176,7 +178,9 @@ out:
dma_unmap_page(priv->ddev, page_alloc->dma,
page_alloc->page_size, PCI_DMA_FROMDEVICE);
page = page_alloc->page;
- set_page_count(page, 1);
+ /* Revert changes done by mlx4_alloc_pages */
+ page_ref_sub(page, page_alloc->page_size /
+ priv->frag_info[i].frag_stride - 1);
put_page(page);
page_alloc->page = NULL;
}
@@ -939,7 +943,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
/* GRO not possible, complete processing here */
skb = mlx4_en_rx_skb(priv, rx_desc, frags, length);
if (!skb) {
- priv->stats.rx_dropped++;
+ ring->dropped++;
goto next;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 358f7230da58..12c77a70abdb 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -3172,6 +3172,34 @@ static int mlx4_check_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap
return 0;
}
+static int mlx4_pci_enable_device(struct mlx4_dev *dev)
+{
+ struct pci_dev *pdev = dev->persist->pdev;
+ int err = 0;
+
+ mutex_lock(&dev->persist->pci_status_mutex);
+ if (dev->persist->pci_status == MLX4_PCI_STATUS_DISABLED) {
+ err = pci_enable_device(pdev);
+ if (!err)
+ dev->persist->pci_status = MLX4_PCI_STATUS_ENABLED;
+ }
+ mutex_unlock(&dev->persist->pci_status_mutex);
+
+ return err;
+}
+
+static void mlx4_pci_disable_device(struct mlx4_dev *dev)
+{
+ struct pci_dev *pdev = dev->persist->pdev;
+
+ mutex_lock(&dev->persist->pci_status_mutex);
+ if (dev->persist->pci_status == MLX4_PCI_STATUS_ENABLED) {
+ pci_disable_device(pdev);
+ dev->persist->pci_status = MLX4_PCI_STATUS_DISABLED;
+ }
+ mutex_unlock(&dev->persist->pci_status_mutex);
+}
+
static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data,
int total_vfs, int *nvfs, struct mlx4_priv *priv,
int reset_flow)
@@ -3582,7 +3610,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data,
pr_info(DRV_NAME ": Initializing %s\n", pci_name(pdev));
- err = pci_enable_device(pdev);
+ err = mlx4_pci_enable_device(&priv->dev);
if (err) {
dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
return err;
@@ -3715,7 +3743,7 @@ err_release_regions:
pci_release_regions(pdev);
err_disable_pdev:
- pci_disable_device(pdev);
+ mlx4_pci_disable_device(&priv->dev);
pci_set_drvdata(pdev, NULL);
return err;
}
@@ -3775,6 +3803,7 @@ static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
priv->pci_dev_data = id->driver_data;
mutex_init(&dev->persist->device_state_mutex);
mutex_init(&dev->persist->interface_state_mutex);
+ mutex_init(&dev->persist->pci_status_mutex);
ret = devlink_register(devlink, &pdev->dev);
if (ret)
@@ -3923,7 +3952,7 @@ static void mlx4_remove_one(struct pci_dev *pdev)
}
pci_release_regions(pdev);
- pci_disable_device(pdev);
+ mlx4_pci_disable_device(dev);
devlink_unregister(devlink);
kfree(dev->persist);
devlink_free(devlink);
@@ -4042,7 +4071,7 @@ static pci_ers_result_t mlx4_pci_err_detected(struct pci_dev *pdev,
if (state == pci_channel_io_perm_failure)
return PCI_ERS_RESULT_DISCONNECT;
- pci_disable_device(pdev);
+ mlx4_pci_disable_device(persist->dev);
return PCI_ERS_RESULT_NEED_RESET;
}
@@ -4050,45 +4079,53 @@ static pci_ers_result_t mlx4_pci_slot_reset(struct pci_dev *pdev)
{
struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
struct mlx4_dev *dev = persist->dev;
- struct mlx4_priv *priv = mlx4_priv(dev);
- int ret;
- int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0};
- int total_vfs;
+ int err;
mlx4_err(dev, "mlx4_pci_slot_reset was called\n");
- ret = pci_enable_device(pdev);
- if (ret) {
- mlx4_err(dev, "Can not re-enable device, ret=%d\n", ret);
+ err = mlx4_pci_enable_device(dev);
+ if (err) {
+ mlx4_err(dev, "Can not re-enable device, err=%d\n", err);
return PCI_ERS_RESULT_DISCONNECT;
}
pci_set_master(pdev);
pci_restore_state(pdev);
pci_save_state(pdev);
+ return PCI_ERS_RESULT_RECOVERED;
+}
+static void mlx4_pci_resume(struct pci_dev *pdev)
+{
+ struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
+ struct mlx4_dev *dev = persist->dev;
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0};
+ int total_vfs;
+ int err;
+
+ mlx4_err(dev, "%s was called\n", __func__);
total_vfs = dev->persist->num_vfs;
memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs));
mutex_lock(&persist->interface_state_mutex);
if (!(persist->interface_state & MLX4_INTERFACE_STATE_UP)) {
- ret = mlx4_load_one(pdev, priv->pci_dev_data, total_vfs, nvfs,
+ err = mlx4_load_one(pdev, priv->pci_dev_data, total_vfs, nvfs,
priv, 1);
- if (ret) {
- mlx4_err(dev, "%s: mlx4_load_one failed, ret=%d\n",
- __func__, ret);
+ if (err) {
+ mlx4_err(dev, "%s: mlx4_load_one failed, err=%d\n",
+ __func__, err);
goto end;
}
- ret = restore_current_port_types(dev, dev->persist->
+ err = restore_current_port_types(dev, dev->persist->
curr_port_type, dev->persist->
curr_port_poss_type);
- if (ret)
- mlx4_err(dev, "could not restore original port types (%d)\n", ret);
+ if (err)
+ mlx4_err(dev, "could not restore original port types (%d)\n", err);
}
end:
mutex_unlock(&persist->interface_state_mutex);
- return ret ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
}
static void mlx4_shutdown(struct pci_dev *pdev)
@@ -4105,6 +4142,7 @@ static void mlx4_shutdown(struct pci_dev *pdev)
static const struct pci_error_handlers mlx4_err_handler = {
.error_detected = mlx4_pci_err_detected,
.slot_reset = mlx4_pci_slot_reset,
+ .resume = mlx4_pci_resume,
};
static struct pci_driver mlx4_driver = {
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index ef9683101ead..c9d7fc5159f2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -586,6 +586,8 @@ struct mlx4_mfunc_master_ctx {
struct mlx4_master_qp0_state qp0_state[MLX4_MAX_PORTS + 1];
int init_port_ref[MLX4_MAX_PORTS + 1];
u16 max_mtu[MLX4_MAX_PORTS + 1];
+ u8 pptx;
+ u8 pprx;
int disable_mcast_ref[MLX4_MAX_PORTS + 1];
struct mlx4_resource_tracker res_tracker;
struct workqueue_struct *comm_wq;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index d12ab6a73344..63b1aeae2c03 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -323,6 +323,7 @@ struct mlx4_en_rx_ring {
unsigned long csum_ok;
unsigned long csum_none;
unsigned long csum_complete;
+ unsigned long dropped;
int hwtstamp_rx_filter;
cpumask_var_t affinity_mask;
};
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index 211c65087997..087b23b320cb 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -1317,6 +1317,19 @@ static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
}
gen_context->mtu = cpu_to_be16(master->max_mtu[port]);
+ /* Slave cannot change Global Pause configuration */
+ if (slave != mlx4_master_func_num(dev) &&
+ ((gen_context->pptx != master->pptx) ||
+ (gen_context->pprx != master->pprx))) {
+ gen_context->pptx = master->pptx;
+ gen_context->pprx = master->pprx;
+ mlx4_warn(dev,
+ "denying Global Pause change for slave:%d\n",
+ slave);
+ } else {
+ master->pptx = gen_context->pptx;
+ master->pprx = gen_context->pprx;
+ }
break;
case MLX4_SET_PORT_GID_TABLE:
/* change to MULTIPLE entries: number of guest's gids
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 518af329502d..7869465435fa 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -750,6 +750,12 @@ static bool qede_has_tx_work(struct qede_fastpath *fp)
return false;
}
+static inline void qede_rx_bd_ring_consume(struct qede_rx_queue *rxq)
+{
+ qed_chain_consume(&rxq->rx_bd_ring);
+ rxq->sw_rx_cons++;
+}
+
/* This function reuses the buffer(from an offset) from
* consumer index to producer index in the bd ring
*/
@@ -773,6 +779,21 @@ static inline void qede_reuse_page(struct qede_dev *edev,
curr_cons->data = NULL;
}
+/* In case of allocation failures reuse buffers
+ * from consumer index to produce buffers for firmware
+ */
+static void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq,
+ struct qede_dev *edev, u8 count)
+{
+ struct sw_rx_data *curr_cons;
+
+ for (; count > 0; count--) {
+ curr_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX];
+ qede_reuse_page(edev, rxq, curr_cons);
+ qede_rx_bd_ring_consume(rxq);
+ }
+}
+
static inline int qede_realloc_rx_buffer(struct qede_dev *edev,
struct qede_rx_queue *rxq,
struct sw_rx_data *curr_cons)
@@ -781,8 +802,14 @@ static inline int qede_realloc_rx_buffer(struct qede_dev *edev,
curr_cons->page_offset += rxq->rx_buf_seg_size;
if (curr_cons->page_offset == PAGE_SIZE) {
- if (unlikely(qede_alloc_rx_buffer(edev, rxq)))
+ if (unlikely(qede_alloc_rx_buffer(edev, rxq))) {
+ /* Since we failed to allocate new buffer
+ * current buffer can be used again.
+ */
+ curr_cons->page_offset -= rxq->rx_buf_seg_size;
+
return -ENOMEM;
+ }
dma_unmap_page(&edev->pdev->dev, curr_cons->mapping,
PAGE_SIZE, DMA_FROM_DEVICE);
@@ -901,7 +928,10 @@ static int qede_fill_frag_skb(struct qede_dev *edev,
len_on_bd);
if (unlikely(qede_realloc_rx_buffer(edev, rxq, current_bd))) {
- tpa_info->agg_state = QEDE_AGG_STATE_ERROR;
+ /* Incr page ref count to reuse on allocation failure
+ * so that it doesn't get freed while freeing SKB.
+ */
+ atomic_inc(&current_bd->data->_count);
goto out;
}
@@ -915,6 +945,8 @@ static int qede_fill_frag_skb(struct qede_dev *edev,
return 0;
out:
+ tpa_info->agg_state = QEDE_AGG_STATE_ERROR;
+ qede_recycle_rx_bd_ring(rxq, edev, 1);
return -ENOMEM;
}
@@ -966,8 +998,9 @@ static void qede_tpa_start(struct qede_dev *edev,
tpa_info->skb = netdev_alloc_skb(edev->ndev,
le16_to_cpu(cqe->len_on_first_bd));
if (unlikely(!tpa_info->skb)) {
+ DP_NOTICE(edev, "Failed to allocate SKB for gro\n");
tpa_info->agg_state = QEDE_AGG_STATE_ERROR;
- return;
+ goto cons_buf;
}
skb_put(tpa_info->skb, le16_to_cpu(cqe->len_on_first_bd));
@@ -990,6 +1023,7 @@ static void qede_tpa_start(struct qede_dev *edev,
/* This is needed in order to enable forwarding support */
qede_set_gro_params(edev, tpa_info->skb, cqe);
+cons_buf: /* We still need to handle bd_len_list to consume buffers */
if (likely(cqe->ext_bd_len_list[0]))
qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
le16_to_cpu(cqe->ext_bd_len_list[0]));
@@ -1007,7 +1041,6 @@ static void qede_gro_ip_csum(struct sk_buff *skb)
const struct iphdr *iph = ip_hdr(skb);
struct tcphdr *th;
- skb_set_network_header(skb, 0);
skb_set_transport_header(skb, sizeof(struct iphdr));
th = tcp_hdr(skb);
@@ -1022,7 +1055,6 @@ static void qede_gro_ipv6_csum(struct sk_buff *skb)
struct ipv6hdr *iph = ipv6_hdr(skb);
struct tcphdr *th;
- skb_set_network_header(skb, 0);
skb_set_transport_header(skb, sizeof(struct ipv6hdr));
th = tcp_hdr(skb);
@@ -1037,8 +1069,21 @@ static void qede_gro_receive(struct qede_dev *edev,
struct sk_buff *skb,
u16 vlan_tag)
{
+ /* FW can send a single MTU sized packet from gro flow
+ * due to aggregation timeout/last segment etc. which
+ * is not expected to be a gro packet. If a skb has zero
+ * frags then simply push it in the stack as non gso skb.
+ */
+ if (unlikely(!skb->data_len)) {
+ skb_shinfo(skb)->gso_type = 0;
+ skb_shinfo(skb)->gso_size = 0;
+ goto send_skb;
+ }
+
#ifdef CONFIG_INET
if (skb_shinfo(skb)->gso_size) {
+ skb_set_network_header(skb, 0);
+
switch (skb->protocol) {
case htons(ETH_P_IP):
qede_gro_ip_csum(skb);
@@ -1053,6 +1098,8 @@ static void qede_gro_receive(struct qede_dev *edev,
}
}
#endif
+
+send_skb:
skb_record_rx_queue(skb, fp->rss_id);
qede_skb_receive(edev, fp, skb, vlan_tag);
}
@@ -1244,17 +1291,17 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
"CQE in CONS = %u has error, flags = %x, dropping incoming packet\n",
sw_comp_cons, parse_flag);
rxq->rx_hw_errors++;
- qede_reuse_page(edev, rxq, sw_rx_data);
- goto next_rx;
+ qede_recycle_rx_bd_ring(rxq, edev, fp_cqe->bd_num);
+ goto next_cqe;
}
skb = netdev_alloc_skb(edev->ndev, QEDE_RX_HDR_SIZE);
if (unlikely(!skb)) {
DP_NOTICE(edev,
"Build_skb failed, dropping incoming packet\n");
- qede_reuse_page(edev, rxq, sw_rx_data);
+ qede_recycle_rx_bd_ring(rxq, edev, fp_cqe->bd_num);
rxq->rx_alloc_errors++;
- goto next_rx;
+ goto next_cqe;
}
/* Copy data into SKB */
@@ -1288,11 +1335,22 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
if (unlikely(qede_realloc_rx_buffer(edev, rxq,
sw_rx_data))) {
DP_ERR(edev, "Failed to allocate rx buffer\n");
+ /* Incr page ref count to reuse on allocation
+ * failure so that it doesn't get freed while
+ * freeing SKB.
+ */
+
+ atomic_inc(&sw_rx_data->data->_count);
rxq->rx_alloc_errors++;
+ qede_recycle_rx_bd_ring(rxq, edev,
+ fp_cqe->bd_num);
+ dev_kfree_skb_any(skb);
goto next_cqe;
}
}
+ qede_rx_bd_ring_consume(rxq);
+
if (fp_cqe->bd_num != 1) {
u16 pkt_len = le16_to_cpu(fp_cqe->pkt_len);
u8 num_frags;
@@ -1303,18 +1361,27 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
num_frags--) {
u16 cur_size = pkt_len > rxq->rx_buf_size ?
rxq->rx_buf_size : pkt_len;
+ if (unlikely(!cur_size)) {
+ DP_ERR(edev,
+ "Still got %d BDs for mapping jumbo, but length became 0\n",
+ num_frags);
+ qede_recycle_rx_bd_ring(rxq, edev,
+ num_frags);
+ dev_kfree_skb_any(skb);
+ goto next_cqe;
+ }
- WARN_ONCE(!cur_size,
- "Still got %d BDs for mapping jumbo, but length became 0\n",
- num_frags);
-
- if (unlikely(qede_alloc_rx_buffer(edev, rxq)))
+ if (unlikely(qede_alloc_rx_buffer(edev, rxq))) {
+ qede_recycle_rx_bd_ring(rxq, edev,
+ num_frags);
+ dev_kfree_skb_any(skb);
goto next_cqe;
+ }
- rxq->sw_rx_cons++;
sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
sw_rx_data = &rxq->sw_rx_ring[sw_rx_index];
- qed_chain_consume(&rxq->rx_bd_ring);
+ qede_rx_bd_ring_consume(rxq);
+
dma_unmap_page(&edev->pdev->dev,
sw_rx_data->mapping,
PAGE_SIZE, DMA_FROM_DEVICE);
@@ -1330,7 +1397,7 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
pkt_len -= cur_size;
}
- if (pkt_len)
+ if (unlikely(pkt_len))
DP_ERR(edev,
"Mapped all BDs of jumbo, but still have %d bytes\n",
pkt_len);
@@ -1349,10 +1416,6 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
skb_record_rx_queue(skb, fp->rss_id);
qede_skb_receive(edev, fp, skb, le16_to_cpu(fp_cqe->vlan_tag));
-
- qed_chain_consume(&rxq->rx_bd_ring);
-next_rx:
- rxq->sw_rx_cons++;
next_rx_only:
rx_pkt++;
@@ -2257,7 +2320,7 @@ static void qede_free_sge_mem(struct qede_dev *edev,
struct qede_agg_info *tpa_info = &rxq->tpa_info[i];
struct sw_rx_data *replace_buf = &tpa_info->replace_buf;
- if (replace_buf) {
+ if (replace_buf->data) {
dma_unmap_page(&edev->pdev->dev,
dma_unmap_addr(replace_buf, mapping),
PAGE_SIZE, DMA_FROM_DEVICE);
@@ -2377,7 +2440,7 @@ err:
static int qede_alloc_mem_rxq(struct qede_dev *edev,
struct qede_rx_queue *rxq)
{
- int i, rc, size, num_allocated;
+ int i, rc, size;
rxq->num_rx_buffers = edev->q_num_rx_buffers;
@@ -2394,6 +2457,7 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev,
rxq->sw_rx_ring = kzalloc(size, GFP_KERNEL);
if (!rxq->sw_rx_ring) {
DP_ERR(edev, "Rx buffers ring allocation failed\n");
+ rc = -ENOMEM;
goto err;
}
@@ -2421,26 +2485,16 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev,
/* Allocate buffers for the Rx ring */
for (i = 0; i < rxq->num_rx_buffers; i++) {
rc = qede_alloc_rx_buffer(edev, rxq);
- if (rc)
- break;
- }
- num_allocated = i;
- if (!num_allocated) {
- DP_ERR(edev, "Rx buffers allocation failed\n");
- goto err;
- } else if (num_allocated < rxq->num_rx_buffers) {
- DP_NOTICE(edev,
- "Allocated less buffers than desired (%d allocated)\n",
- num_allocated);
+ if (rc) {
+ DP_ERR(edev,
+ "Rx buffers allocation failed at index %d\n", i);
+ goto err;
+ }
}
- qede_alloc_sge_mem(edev, rxq);
-
- return 0;
-
+ rc = qede_alloc_sge_mem(edev, rxq);
err:
- qede_free_mem_rxq(edev, rxq);
- return -ENOMEM;
+ return rc;
}
static void qede_free_mem_txq(struct qede_dev *edev,
@@ -2523,10 +2577,8 @@ static int qede_alloc_mem_fp(struct qede_dev *edev,
}
return 0;
-
err:
- qede_free_mem_fp(edev, fp);
- return -ENOMEM;
+ return rc;
}
static void qede_free_mem_load(struct qede_dev *edev)
@@ -2549,22 +2601,13 @@ static int qede_alloc_mem_load(struct qede_dev *edev)
struct qede_fastpath *fp = &edev->fp_array[rss_id];
rc = qede_alloc_mem_fp(edev, fp);
- if (rc)
- break;
- }
-
- if (rss_id != QEDE_RSS_CNT(edev)) {
- /* Failed allocating memory for all the queues */
- if (!rss_id) {
+ if (rc) {
DP_ERR(edev,
- "Failed to allocate memory for the leading queue\n");
- rc = -ENOMEM;
- } else {
- DP_NOTICE(edev,
- "Failed to allocate memory for all of RSS queues\n Desired: %d queues, allocated: %d queues\n",
- QEDE_RSS_CNT(edev), rss_id);
+ "Failed to allocate memory for fastpath - rss id = %d\n",
+ rss_id);
+ qede_free_mem_load(edev);
+ return rc;
}
- edev->num_rss = rss_id;
}
return 0;
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 087e14a3fba7..9e2a0bd8f5a8 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -1691,6 +1691,9 @@ static int ravb_set_gti(struct net_device *ndev)
rate = clk_get_rate(clk);
clk_put(clk);
+ if (!rate)
+ return -EINVAL;
+
inc = 1000000000ULL << 20;
do_div(inc, rate);
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 004e2d7560fd..ceea74cc2229 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -2194,17 +2194,13 @@ static int sh_eth_set_ringparam(struct net_device *ndev,
__func__);
return ret;
}
- ret = sh_eth_dev_init(ndev, false);
+ ret = sh_eth_dev_init(ndev, true);
if (ret < 0) {
netdev_err(ndev, "%s: sh_eth_dev_init failed.\n",
__func__);
return ret;
}
- mdp->irq_enabled = true;
- sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
- /* Setting the Rx mode will start the Rx process. */
- sh_eth_write(ndev, EDRRR_R, EDRRR);
netif_device_attach(ndev);
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
index f0d797ab74d8..44022b1845ce 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
@@ -34,6 +34,9 @@
#define SYSMGR_EMACGRP_CTRL_PHYSEL_MASK 0x00000003
#define SYSMGR_EMACGRP_CTRL_PTP_REF_CLK_MASK 0x00000010
+#define SYSMGR_FPGAGRP_MODULE_REG 0x00000028
+#define SYSMGR_FPGAGRP_MODULE_EMAC 0x00000004
+
#define EMAC_SPLITTER_CTRL_REG 0x0
#define EMAC_SPLITTER_CTRL_SPEED_MASK 0x3
#define EMAC_SPLITTER_CTRL_SPEED_10 0x2
@@ -148,7 +151,7 @@ static int socfpga_dwmac_setup(struct socfpga_dwmac *dwmac)
int phymode = dwmac->interface;
u32 reg_offset = dwmac->reg_offset;
u32 reg_shift = dwmac->reg_shift;
- u32 ctrl, val;
+ u32 ctrl, val, module;
switch (phymode) {
case PHY_INTERFACE_MODE_RGMII:
@@ -175,12 +178,19 @@ static int socfpga_dwmac_setup(struct socfpga_dwmac *dwmac)
ctrl &= ~(SYSMGR_EMACGRP_CTRL_PHYSEL_MASK << reg_shift);
ctrl |= val << reg_shift;
- if (dwmac->f2h_ptp_ref_clk)
+ if (dwmac->f2h_ptp_ref_clk) {
ctrl |= SYSMGR_EMACGRP_CTRL_PTP_REF_CLK_MASK << (reg_shift / 2);
- else
+ regmap_read(sys_mgr_base_addr, SYSMGR_FPGAGRP_MODULE_REG,
+ &module);
+ module |= (SYSMGR_FPGAGRP_MODULE_EMAC << (reg_shift / 2));
+ regmap_write(sys_mgr_base_addr, SYSMGR_FPGAGRP_MODULE_REG,
+ module);
+ } else {
ctrl &= ~(SYSMGR_EMACGRP_CTRL_PTP_REF_CLK_MASK << (reg_shift / 2));
+ }
regmap_write(sys_mgr_base_addr, reg_offset, ctrl);
+
return 0;
}
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 42fdfd4d9d4f..bbb77cd8ad67 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -1251,12 +1251,12 @@ static int cpsw_ndo_open(struct net_device *ndev)
int i, ret;
u32 reg;
+ pm_runtime_get_sync(&priv->pdev->dev);
+
if (!cpsw_common_res_usage_state(priv))
cpsw_intr_disable(priv);
netif_carrier_off(ndev);
- pm_runtime_get_sync(&priv->pdev->dev);
-
reg = priv->version;
dev_info(priv->dev, "initializing cpsw version %d.%d (%d)\n",
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 5d9abedd6b75..58d58f002559 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -1878,8 +1878,6 @@ davinci_emac_of_get_pdata(struct platform_device *pdev, struct emac_priv *priv)
pdata->hw_ram_addr = auxdata->hw_ram_addr;
}
- pdev->dev.platform_data = pdata;
-
return pdata;
}
@@ -2101,6 +2099,7 @@ static int davinci_emac_remove(struct platform_device *pdev)
cpdma_ctlr_destroy(priv->dma);
unregister_netdev(ndev);
+ pm_runtime_disable(&pdev->dev);
free_netdev(ndev);
return 0;
diff --git a/drivers/net/phy/spi_ks8995.c b/drivers/net/phy/spi_ks8995.c
index b5d50d458728..93ffedfa2994 100644
--- a/drivers/net/phy/spi_ks8995.c
+++ b/drivers/net/phy/spi_ks8995.c
@@ -441,7 +441,7 @@ static int ks8995_probe(struct spi_device *spi)
return -ENOMEM;
mutex_init(&ks->lock);
- ks->spi = spi_dev_get(spi);
+ ks->spi = spi;
ks->chip = &ks8995_chip[variant];
if (ks->spi->dev.of_node) {
diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c
index bdd83d95ec0a..96a5028621c8 100644
--- a/drivers/net/usb/cdc_mbim.c
+++ b/drivers/net/usb/cdc_mbim.c
@@ -617,8 +617,13 @@ static const struct usb_device_id mbim_devs[] = {
{ USB_VENDOR_AND_INTERFACE_INFO(0x0bdb, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
.driver_info = (unsigned long)&cdc_mbim_info,
},
- /* Huawei E3372 fails unless NDP comes after the IP packets */
- { USB_DEVICE_AND_INTERFACE_INFO(0x12d1, 0x157d, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
+
+ /* Some Huawei devices, ME906s-158 (12d1:15c1) and E3372
+ * (12d1:157d), are known to fail unless the NDP is placed
+ * after the IP packets. Applying the quirk to all Huawei
+ * devices is broader than necessary, but harmless.
+ */
+ { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
.driver_info = (unsigned long)&cdc_mbim_info_ndp_to_end,
},
/* default entry */
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index b2348f67b00a..db8022ae415b 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -1152,12 +1152,16 @@ vmxnet3_rx_csum(struct vmxnet3_adapter *adapter,
union Vmxnet3_GenericDesc *gdesc)
{
if (!gdesc->rcd.cnc && adapter->netdev->features & NETIF_F_RXCSUM) {
- /* typical case: TCP/UDP over IP and both csums are correct */
- if ((le32_to_cpu(gdesc->dword[3]) & VMXNET3_RCD_CSUM_OK) ==
- VMXNET3_RCD_CSUM_OK) {
+ if (gdesc->rcd.v4 &&
+ (le32_to_cpu(gdesc->dword[3]) &
+ VMXNET3_RCD_CSUM_OK) == VMXNET3_RCD_CSUM_OK) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ BUG_ON(!(gdesc->rcd.tcp || gdesc->rcd.udp));
+ BUG_ON(gdesc->rcd.frg);
+ } else if (gdesc->rcd.v6 && (le32_to_cpu(gdesc->dword[3]) &
+ (1 << VMXNET3_RCD_TUC_SHIFT))) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
BUG_ON(!(gdesc->rcd.tcp || gdesc->rcd.udp));
- BUG_ON(!(gdesc->rcd.v4 || gdesc->rcd.v6));
BUG_ON(gdesc->rcd.frg);
} else {
if (gdesc->rcd.csum) {
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index 729c344e6774..c4825392d64b 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -69,10 +69,10 @@
/*
* Version numbers
*/
-#define VMXNET3_DRIVER_VERSION_STRING "1.4.6.0-k"
+#define VMXNET3_DRIVER_VERSION_STRING "1.4.7.0-k"
/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
-#define VMXNET3_DRIVER_VERSION_NUM 0x01040600
+#define VMXNET3_DRIVER_VERSION_NUM 0x01040700
#if defined(CONFIG_PCI_MSI)
/* RSS only makes sense if MSI-X is supported. */
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 9a9fabb900c1..8a8f1e58b415 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -60,41 +60,6 @@ struct pcpu_dstats {
struct u64_stats_sync syncp;
};
-static struct dst_entry *vrf_ip_check(struct dst_entry *dst, u32 cookie)
-{
- return dst;
-}
-
-static int vrf_ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb)
-{
- return ip_local_out(net, sk, skb);
-}
-
-static unsigned int vrf_v4_mtu(const struct dst_entry *dst)
-{
- /* TO-DO: return max ethernet size? */
- return dst->dev->mtu;
-}
-
-static void vrf_dst_destroy(struct dst_entry *dst)
-{
- /* our dst lives forever - or until the device is closed */
-}
-
-static unsigned int vrf_default_advmss(const struct dst_entry *dst)
-{
- return 65535 - 40;
-}
-
-static struct dst_ops vrf_dst_ops = {
- .family = AF_INET,
- .local_out = vrf_ip_local_out,
- .check = vrf_ip_check,
- .mtu = vrf_v4_mtu,
- .destroy = vrf_dst_destroy,
- .default_advmss = vrf_default_advmss,
-};
-
/* neighbor handling is done with actual device; do not want
* to flip skb->dev for those ndisc packets. This really fails
* for multiple next protocols (e.g., NEXTHDR_HOP). But it is
@@ -349,46 +314,6 @@ static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct net_device *dev)
}
#if IS_ENABLED(CONFIG_IPV6)
-static struct dst_entry *vrf_ip6_check(struct dst_entry *dst, u32 cookie)
-{
- return dst;
-}
-
-static struct dst_ops vrf_dst_ops6 = {
- .family = AF_INET6,
- .local_out = ip6_local_out,
- .check = vrf_ip6_check,
- .mtu = vrf_v4_mtu,
- .destroy = vrf_dst_destroy,
- .default_advmss = vrf_default_advmss,
-};
-
-static int init_dst_ops6_kmem_cachep(void)
-{
- vrf_dst_ops6.kmem_cachep = kmem_cache_create("vrf_ip6_dst_cache",
- sizeof(struct rt6_info),
- 0,
- SLAB_HWCACHE_ALIGN,
- NULL);
-
- if (!vrf_dst_ops6.kmem_cachep)
- return -ENOMEM;
-
- return 0;
-}
-
-static void free_dst_ops6_kmem_cachep(void)
-{
- kmem_cache_destroy(vrf_dst_ops6.kmem_cachep);
-}
-
-static int vrf_input6(struct sk_buff *skb)
-{
- skb->dev->stats.rx_errors++;
- kfree_skb(skb);
- return 0;
-}
-
/* modelled after ip6_finish_output2 */
static int vrf_finish_output6(struct net *net, struct sock *sk,
struct sk_buff *skb)
@@ -429,67 +354,34 @@ static int vrf_output6(struct net *net, struct sock *sk, struct sk_buff *skb)
!(IP6CB(skb)->flags & IP6SKB_REROUTED));
}
-static void vrf_rt6_destroy(struct net_vrf *vrf)
+static void vrf_rt6_release(struct net_vrf *vrf)
{
- dst_destroy(&vrf->rt6->dst);
- free_percpu(vrf->rt6->rt6i_pcpu);
+ dst_release(&vrf->rt6->dst);
vrf->rt6 = NULL;
}
static int vrf_rt6_create(struct net_device *dev)
{
struct net_vrf *vrf = netdev_priv(dev);
- struct dst_entry *dst;
+ struct net *net = dev_net(dev);
struct rt6_info *rt6;
- int cpu;
int rc = -ENOMEM;
- rt6 = dst_alloc(&vrf_dst_ops6, dev, 0,
- DST_OBSOLETE_NONE,
- (DST_HOST | DST_NOPOLICY | DST_NOXFRM));
+ rt6 = ip6_dst_alloc(net, dev,
+ DST_HOST | DST_NOPOLICY | DST_NOXFRM | DST_NOCACHE);
if (!rt6)
goto out;
- dst = &rt6->dst;
-
- rt6->rt6i_pcpu = alloc_percpu_gfp(struct rt6_info *, GFP_KERNEL);
- if (!rt6->rt6i_pcpu) {
- dst_destroy(dst);
- goto out;
- }
- for_each_possible_cpu(cpu) {
- struct rt6_info **p = per_cpu_ptr(rt6->rt6i_pcpu, cpu);
- *p = NULL;
- }
-
- memset(dst + 1, 0, sizeof(*rt6) - sizeof(*dst));
-
- INIT_LIST_HEAD(&rt6->rt6i_siblings);
- INIT_LIST_HEAD(&rt6->rt6i_uncached);
-
- rt6->dst.input = vrf_input6;
rt6->dst.output = vrf_output6;
-
- rt6->rt6i_table = fib6_get_table(dev_net(dev), vrf->tb_id);
-
- atomic_set(&rt6->dst.__refcnt, 2);
-
+ rt6->rt6i_table = fib6_get_table(net, vrf->tb_id);
+ dst_hold(&rt6->dst);
vrf->rt6 = rt6;
rc = 0;
out:
return rc;
}
#else
-static int init_dst_ops6_kmem_cachep(void)
-{
- return 0;
-}
-
-static void free_dst_ops6_kmem_cachep(void)
-{
-}
-
-static void vrf_rt6_destroy(struct net_vrf *vrf)
+static void vrf_rt6_release(struct net_vrf *vrf)
{
}
@@ -557,11 +449,11 @@ static int vrf_output(struct net *net, struct sock *sk, struct sk_buff *skb)
!(IPCB(skb)->flags & IPSKB_REROUTED));
}
-static void vrf_rtable_destroy(struct net_vrf *vrf)
+static void vrf_rtable_release(struct net_vrf *vrf)
{
struct dst_entry *dst = (struct dst_entry *)vrf->rth;
- dst_destroy(dst);
+ dst_release(dst);
vrf->rth = NULL;
}
@@ -570,22 +462,10 @@ static struct rtable *vrf_rtable_create(struct net_device *dev)
struct net_vrf *vrf = netdev_priv(dev);
struct rtable *rth;
- rth = dst_alloc(&vrf_dst_ops, dev, 2,
- DST_OBSOLETE_NONE,
- (DST_HOST | DST_NOPOLICY | DST_NOXFRM));
+ rth = rt_dst_alloc(dev, 0, RTN_UNICAST, 1, 1, 0);
if (rth) {
rth->dst.output = vrf_output;
- rth->rt_genid = rt_genid_ipv4(dev_net(dev));
- rth->rt_flags = 0;
- rth->rt_type = RTN_UNICAST;
- rth->rt_is_input = 0;
- rth->rt_iif = 0;
- rth->rt_pmtu = 0;
- rth->rt_gateway = 0;
- rth->rt_uses_gateway = 0;
rth->rt_table_id = vrf->tb_id;
- INIT_LIST_HEAD(&rth->rt_uncached);
- rth->rt_uncached_list = NULL;
}
return rth;
@@ -673,8 +553,8 @@ static void vrf_dev_uninit(struct net_device *dev)
struct net_device *port_dev;
struct list_head *iter;
- vrf_rtable_destroy(vrf);
- vrf_rt6_destroy(vrf);
+ vrf_rtable_release(vrf);
+ vrf_rt6_release(vrf);
netdev_for_each_lower_dev(dev, port_dev, iter)
vrf_del_slave(dev, port_dev);
@@ -704,7 +584,7 @@ static int vrf_dev_init(struct net_device *dev)
return 0;
out_rth:
- vrf_rtable_destroy(vrf);
+ vrf_rtable_release(vrf);
out_stats:
free_percpu(dev->dstats);
dev->dstats = NULL;
@@ -737,7 +617,7 @@ static struct rtable *vrf_get_rtable(const struct net_device *dev,
struct net_vrf *vrf = netdev_priv(dev);
rth = vrf->rth;
- atomic_inc(&rth->dst.__refcnt);
+ dst_hold(&rth->dst);
}
return rth;
@@ -788,7 +668,7 @@ static struct dst_entry *vrf_get_rt6_dst(const struct net_device *dev,
struct net_vrf *vrf = netdev_priv(dev);
rt = vrf->rt6;
- atomic_inc(&rt->dst.__refcnt);
+ dst_hold(&rt->dst);
}
return (struct dst_entry *)rt;
@@ -946,19 +826,6 @@ static int __init vrf_init_module(void)
{
int rc;
- vrf_dst_ops.kmem_cachep =
- kmem_cache_create("vrf_ip_dst_cache",
- sizeof(struct rtable), 0,
- SLAB_HWCACHE_ALIGN,
- NULL);
-
- if (!vrf_dst_ops.kmem_cachep)
- return -ENOMEM;
-
- rc = init_dst_ops6_kmem_cachep();
- if (rc != 0)
- goto error2;
-
register_netdevice_notifier(&vrf_notifier_block);
rc = rtnl_link_register(&vrf_link_ops);
@@ -969,22 +836,10 @@ static int __init vrf_init_module(void)
error:
unregister_netdevice_notifier(&vrf_notifier_block);
- free_dst_ops6_kmem_cachep();
-error2:
- kmem_cache_destroy(vrf_dst_ops.kmem_cachep);
return rc;
}
-static void __exit vrf_cleanup_module(void)
-{
- rtnl_link_unregister(&vrf_link_ops);
- unregister_netdevice_notifier(&vrf_notifier_block);
- kmem_cache_destroy(vrf_dst_ops.kmem_cachep);
- free_dst_ops6_kmem_cachep();
-}
-
module_init(vrf_init_module);
-module_exit(vrf_cleanup_module);
MODULE_AUTHOR("Shrijeet Mukherjee, David Ahern");
MODULE_DESCRIPTION("Device driver to instantiate VRF domains");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/wireless/broadcom/b43/main.c b/drivers/net/wireless/broadcom/b43/main.c
index 72380af9dc52..b0603e796ad8 100644
--- a/drivers/net/wireless/broadcom/b43/main.c
+++ b/drivers/net/wireless/broadcom/b43/main.c
@@ -5680,11 +5680,12 @@ static int b43_bcma_probe(struct bcma_device *core)
INIT_WORK(&wl->firmware_load, b43_request_firmware);
schedule_work(&wl->firmware_load);
-bcma_out:
return err;
bcma_err_wireless_exit:
ieee80211_free_hw(wl->hw);
+bcma_out:
+ kfree(dev);
return err;
}
@@ -5712,8 +5713,8 @@ static void b43_bcma_remove(struct bcma_device *core)
b43_rng_exit(wl);
b43_leds_unregister(wl);
-
ieee80211_free_hw(wl->hw);
+ kfree(wldev->dev);
}
static struct bcma_driver b43_bcma_driver = {
@@ -5796,6 +5797,7 @@ static void b43_ssb_remove(struct ssb_device *sdev)
b43_leds_unregister(wl);
b43_wireless_exit(dev, wl);
+ kfree(dev);
}
static struct ssb_driver b43_ssb_driver = {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 76e649c680a1..a50f4df7eae7 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -1147,6 +1147,8 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
/* the fw is stopped, the aux sta is dead: clean up driver state */
iwl_mvm_del_aux_sta(mvm);
+ iwl_free_fw_paging(mvm);
+
/*
* Clear IN_HW_RESTART flag when stopping the hw (as restart_complete()
* won't be called in this case).
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index 5e8ab796d5bc..d278399097dc 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -761,8 +761,6 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
for (i = 0; i < NVM_MAX_NUM_SECTIONS; i++)
kfree(mvm->nvm_sections[i].data);
- iwl_free_fw_paging(mvm);
-
iwl_mvm_tof_clean(mvm);
ieee80211_free_hw(mvm->hw);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index eb39c7e09781..b2b79354d5c0 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -732,8 +732,8 @@ static int iwl_pcie_rsa_race_bug_wa(struct iwl_trans *trans)
*/
val = iwl_read_prph(trans, PREG_AUX_BUS_WPROT_0);
if (val & (BIT(1) | BIT(17))) {
- IWL_INFO(trans,
- "can't access the RSA semaphore it is write protected\n");
+ IWL_DEBUG_INFO(trans,
+ "can't access the RSA semaphore it is write protected\n");
return 0;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c
index 95dcbff4673b..6a8245c4ea48 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c
@@ -2488,9 +2488,9 @@ void rtl8821ae_dm_txpower_tracking_callback_thermalmeter(
for (p = RF90_PATH_A; p < MAX_PATH_NUM_8821A; p++)
rtldm->swing_idx_ofdm_base[p] = rtldm->swing_idx_ofdm[p];
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "pDM_Odm->RFCalibrateInfo.ThermalValue = %d ThermalValue= %d\n",
- rtldm->thermalvalue, thermal_value);
+ RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "pDM_Odm->RFCalibrateInfo.ThermalValue = %d ThermalValue= %d\n",
+ rtldm->thermalvalue, thermal_value);
/*Record last Power Tracking Thermal Value*/
rtldm->thermalvalue = thermal_value;
}
diff --git a/drivers/nvdimm/Kconfig b/drivers/nvdimm/Kconfig
index 53c11621d5b1..7c8a3bf07884 100644
--- a/drivers/nvdimm/Kconfig
+++ b/drivers/nvdimm/Kconfig
@@ -88,4 +88,17 @@ config NVDIMM_PFN
Select Y if unsure
+config NVDIMM_DAX
+ bool "NVDIMM DAX: Raw access to persistent memory"
+ default LIBNVDIMM
+ depends on NVDIMM_PFN
+ help
+ Support raw device dax access to a persistent memory
+ namespace. For environments that want to hard partition
+ peristent memory, this capability provides a mechanism to
+ sub-divide a namespace into character devices that can only be
+ accessed via DAX (mmap(2)).
+
+ Select Y if unsure
+
endif
diff --git a/drivers/nvdimm/Makefile b/drivers/nvdimm/Makefile
index ea84d3c4e8e5..909554c3f955 100644
--- a/drivers/nvdimm/Makefile
+++ b/drivers/nvdimm/Makefile
@@ -23,3 +23,4 @@ libnvdimm-y += label.o
libnvdimm-$(CONFIG_ND_CLAIM) += claim.o
libnvdimm-$(CONFIG_BTT) += btt_devs.o
libnvdimm-$(CONFIG_NVDIMM_PFN) += pfn_devs.o
+libnvdimm-$(CONFIG_NVDIMM_DAX) += dax_devs.o
diff --git a/drivers/nvdimm/blk.c b/drivers/nvdimm/blk.c
index e9ff9229d942..495e06d9f7e7 100644
--- a/drivers/nvdimm/blk.c
+++ b/drivers/nvdimm/blk.c
@@ -21,19 +21,19 @@
#include <linux/sizes.h>
#include "nd.h"
-struct nd_blk_device {
- struct request_queue *queue;
- struct gendisk *disk;
- struct nd_namespace_blk *nsblk;
- struct nd_blk_region *ndbr;
- size_t disk_size;
- u32 sector_size;
- u32 internal_lbasize;
-};
+static u32 nsblk_meta_size(struct nd_namespace_blk *nsblk)
+{
+ return nsblk->lbasize - ((nsblk->lbasize >= 4096) ? 4096 : 512);
+}
+
+static u32 nsblk_internal_lbasize(struct nd_namespace_blk *nsblk)
+{
+ return roundup(nsblk->lbasize, INT_LBASIZE_ALIGNMENT);
+}
-static u32 nd_blk_meta_size(struct nd_blk_device *blk_dev)
+static u32 nsblk_sector_size(struct nd_namespace_blk *nsblk)
{
- return blk_dev->nsblk->lbasize - blk_dev->sector_size;
+ return nsblk->lbasize - nsblk_meta_size(nsblk);
}
static resource_size_t to_dev_offset(struct nd_namespace_blk *nsblk,
@@ -57,20 +57,29 @@ static resource_size_t to_dev_offset(struct nd_namespace_blk *nsblk,
return SIZE_MAX;
}
+static struct nd_blk_region *to_ndbr(struct nd_namespace_blk *nsblk)
+{
+ struct nd_region *nd_region;
+ struct device *parent;
+
+ parent = nsblk->common.dev.parent;
+ nd_region = container_of(parent, struct nd_region, dev);
+ return container_of(nd_region, struct nd_blk_region, nd_region);
+}
+
#ifdef CONFIG_BLK_DEV_INTEGRITY
-static int nd_blk_rw_integrity(struct nd_blk_device *blk_dev,
- struct bio_integrity_payload *bip, u64 lba,
- int rw)
+static int nd_blk_rw_integrity(struct nd_namespace_blk *nsblk,
+ struct bio_integrity_payload *bip, u64 lba, int rw)
{
- unsigned int len = nd_blk_meta_size(blk_dev);
+ struct nd_blk_region *ndbr = to_ndbr(nsblk);
+ unsigned int len = nsblk_meta_size(nsblk);
resource_size_t dev_offset, ns_offset;
- struct nd_namespace_blk *nsblk;
- struct nd_blk_region *ndbr;
+ u32 internal_lbasize, sector_size;
int err = 0;
- nsblk = blk_dev->nsblk;
- ndbr = blk_dev->ndbr;
- ns_offset = lba * blk_dev->internal_lbasize + blk_dev->sector_size;
+ internal_lbasize = nsblk_internal_lbasize(nsblk);
+ sector_size = nsblk_sector_size(nsblk);
+ ns_offset = lba * internal_lbasize + sector_size;
dev_offset = to_dev_offset(nsblk, ns_offset, len);
if (dev_offset == SIZE_MAX)
return -EIO;
@@ -104,25 +113,26 @@ static int nd_blk_rw_integrity(struct nd_blk_device *blk_dev,
}
#else /* CONFIG_BLK_DEV_INTEGRITY */
-static int nd_blk_rw_integrity(struct nd_blk_device *blk_dev,
- struct bio_integrity_payload *bip, u64 lba,
- int rw)
+static int nd_blk_rw_integrity(struct nd_namespace_blk *nsblk,
+ struct bio_integrity_payload *bip, u64 lba, int rw)
{
return 0;
}
#endif
-static int nd_blk_do_bvec(struct nd_blk_device *blk_dev,
- struct bio_integrity_payload *bip, struct page *page,
- unsigned int len, unsigned int off, int rw,
- sector_t sector)
+static int nsblk_do_bvec(struct nd_namespace_blk *nsblk,
+ struct bio_integrity_payload *bip, struct page *page,
+ unsigned int len, unsigned int off, int rw, sector_t sector)
{
- struct nd_blk_region *ndbr = blk_dev->ndbr;
+ struct nd_blk_region *ndbr = to_ndbr(nsblk);
resource_size_t dev_offset, ns_offset;
+ u32 internal_lbasize, sector_size;
int err = 0;
void *iobuf;
u64 lba;
+ internal_lbasize = nsblk_internal_lbasize(nsblk);
+ sector_size = nsblk_sector_size(nsblk);
while (len) {
unsigned int cur_len;
@@ -132,11 +142,11 @@ static int nd_blk_do_bvec(struct nd_blk_device *blk_dev,
* Block Window setup/move steps. the do_io routine is capable
* of handling len <= PAGE_SIZE.
*/
- cur_len = bip ? min(len, blk_dev->sector_size) : len;
+ cur_len = bip ? min(len, sector_size) : len;
- lba = div_u64(sector << SECTOR_SHIFT, blk_dev->sector_size);
- ns_offset = lba * blk_dev->internal_lbasize;
- dev_offset = to_dev_offset(blk_dev->nsblk, ns_offset, cur_len);
+ lba = div_u64(sector << SECTOR_SHIFT, sector_size);
+ ns_offset = lba * internal_lbasize;
+ dev_offset = to_dev_offset(nsblk, ns_offset, cur_len);
if (dev_offset == SIZE_MAX)
return -EIO;
@@ -147,13 +157,13 @@ static int nd_blk_do_bvec(struct nd_blk_device *blk_dev,
return err;
if (bip) {
- err = nd_blk_rw_integrity(blk_dev, bip, lba, rw);
+ err = nd_blk_rw_integrity(nsblk, bip, lba, rw);
if (err)
return err;
}
len -= cur_len;
off += cur_len;
- sector += blk_dev->sector_size >> SECTOR_SHIFT;
+ sector += sector_size >> SECTOR_SHIFT;
}
return err;
@@ -161,10 +171,8 @@ static int nd_blk_do_bvec(struct nd_blk_device *blk_dev,
static blk_qc_t nd_blk_make_request(struct request_queue *q, struct bio *bio)
{
- struct block_device *bdev = bio->bi_bdev;
- struct gendisk *disk = bdev->bd_disk;
struct bio_integrity_payload *bip;
- struct nd_blk_device *blk_dev;
+ struct nd_namespace_blk *nsblk;
struct bvec_iter iter;
unsigned long start;
struct bio_vec bvec;
@@ -183,17 +191,17 @@ static blk_qc_t nd_blk_make_request(struct request_queue *q, struct bio *bio)
}
bip = bio_integrity(bio);
- blk_dev = disk->private_data;
+ nsblk = q->queuedata;
rw = bio_data_dir(bio);
do_acct = nd_iostat_start(bio, &start);
bio_for_each_segment(bvec, bio, iter) {
unsigned int len = bvec.bv_len;
BUG_ON(len > PAGE_SIZE);
- err = nd_blk_do_bvec(blk_dev, bip, bvec.bv_page, len,
- bvec.bv_offset, rw, iter.bi_sector);
+ err = nsblk_do_bvec(nsblk, bip, bvec.bv_page, len,
+ bvec.bv_offset, rw, iter.bi_sector);
if (err) {
- dev_info(&blk_dev->nsblk->common.dev,
+ dev_dbg(&nsblk->common.dev,
"io error in %s sector %lld, len %d,\n",
(rw == READ) ? "READ" : "WRITE",
(unsigned long long) iter.bi_sector, len);
@@ -209,17 +217,16 @@ static blk_qc_t nd_blk_make_request(struct request_queue *q, struct bio *bio)
return BLK_QC_T_NONE;
}
-static int nd_blk_rw_bytes(struct nd_namespace_common *ndns,
+static int nsblk_rw_bytes(struct nd_namespace_common *ndns,
resource_size_t offset, void *iobuf, size_t n, int rw)
{
- struct nd_blk_device *blk_dev = dev_get_drvdata(ndns->claim);
- struct nd_namespace_blk *nsblk = blk_dev->nsblk;
- struct nd_blk_region *ndbr = blk_dev->ndbr;
+ struct nd_namespace_blk *nsblk = to_nd_namespace_blk(&ndns->dev);
+ struct nd_blk_region *ndbr = to_ndbr(nsblk);
resource_size_t dev_offset;
dev_offset = to_dev_offset(nsblk, offset, n);
- if (unlikely(offset + n > blk_dev->disk_size)) {
+ if (unlikely(offset + n > nsblk->size)) {
dev_WARN_ONCE(&ndns->dev, 1, "request out of range\n");
return -EFAULT;
}
@@ -235,51 +242,65 @@ static const struct block_device_operations nd_blk_fops = {
.revalidate_disk = nvdimm_revalidate_disk,
};
-static int nd_blk_attach_disk(struct nd_namespace_common *ndns,
- struct nd_blk_device *blk_dev)
+static void nd_blk_release_queue(void *q)
+{
+ blk_cleanup_queue(q);
+}
+
+static void nd_blk_release_disk(void *disk)
+{
+ del_gendisk(disk);
+ put_disk(disk);
+}
+
+static int nsblk_attach_disk(struct nd_namespace_blk *nsblk)
{
+ struct device *dev = &nsblk->common.dev;
resource_size_t available_disk_size;
+ struct request_queue *q;
struct gendisk *disk;
u64 internal_nlba;
- internal_nlba = div_u64(blk_dev->disk_size, blk_dev->internal_lbasize);
- available_disk_size = internal_nlba * blk_dev->sector_size;
+ internal_nlba = div_u64(nsblk->size, nsblk_internal_lbasize(nsblk));
+ available_disk_size = internal_nlba * nsblk_sector_size(nsblk);
- blk_dev->queue = blk_alloc_queue(GFP_KERNEL);
- if (!blk_dev->queue)
+ q = blk_alloc_queue(GFP_KERNEL);
+ if (!q)
return -ENOMEM;
+ if (devm_add_action(dev, nd_blk_release_queue, q)) {
+ blk_cleanup_queue(q);
+ return -ENOMEM;
+ }
- blk_queue_make_request(blk_dev->queue, nd_blk_make_request);
- blk_queue_max_hw_sectors(blk_dev->queue, UINT_MAX);
- blk_queue_bounce_limit(blk_dev->queue, BLK_BOUNCE_ANY);
- blk_queue_logical_block_size(blk_dev->queue, blk_dev->sector_size);
- queue_flag_set_unlocked(QUEUE_FLAG_NONROT, blk_dev->queue);
+ blk_queue_make_request(q, nd_blk_make_request);
+ blk_queue_max_hw_sectors(q, UINT_MAX);
+ blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
+ blk_queue_logical_block_size(q, nsblk_sector_size(nsblk));
+ queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
+ q->queuedata = nsblk;
- disk = blk_dev->disk = alloc_disk(0);
- if (!disk) {
- blk_cleanup_queue(blk_dev->queue);
+ disk = alloc_disk(0);
+ if (!disk)
+ return -ENOMEM;
+ if (devm_add_action(dev, nd_blk_release_disk, disk)) {
+ put_disk(disk);
return -ENOMEM;
}
- disk->driverfs_dev = &ndns->dev;
+ disk->driverfs_dev = dev;
disk->first_minor = 0;
disk->fops = &nd_blk_fops;
- disk->private_data = blk_dev;
- disk->queue = blk_dev->queue;
+ disk->queue = q;
disk->flags = GENHD_FL_EXT_DEVT;
- nvdimm_namespace_disk_name(ndns, disk->disk_name);
+ nvdimm_namespace_disk_name(&nsblk->common, disk->disk_name);
set_capacity(disk, 0);
add_disk(disk);
- if (nd_blk_meta_size(blk_dev)) {
- int rc = nd_integrity_init(disk, nd_blk_meta_size(blk_dev));
+ if (nsblk_meta_size(nsblk)) {
+ int rc = nd_integrity_init(disk, nsblk_meta_size(nsblk));
- if (rc) {
- del_gendisk(disk);
- put_disk(disk);
- blk_cleanup_queue(blk_dev->queue);
+ if (rc)
return rc;
- }
}
set_capacity(disk, available_disk_size >> SECTOR_SHIFT);
@@ -291,56 +312,29 @@ static int nd_blk_probe(struct device *dev)
{
struct nd_namespace_common *ndns;
struct nd_namespace_blk *nsblk;
- struct nd_blk_device *blk_dev;
- int rc;
ndns = nvdimm_namespace_common_probe(dev);
if (IS_ERR(ndns))
return PTR_ERR(ndns);
- blk_dev = kzalloc(sizeof(*blk_dev), GFP_KERNEL);
- if (!blk_dev)
- return -ENOMEM;
-
nsblk = to_nd_namespace_blk(&ndns->dev);
- blk_dev->disk_size = nvdimm_namespace_capacity(ndns);
- blk_dev->ndbr = to_nd_blk_region(dev->parent);
- blk_dev->nsblk = to_nd_namespace_blk(&ndns->dev);
- blk_dev->internal_lbasize = roundup(nsblk->lbasize,
- INT_LBASIZE_ALIGNMENT);
- blk_dev->sector_size = ((nsblk->lbasize >= 4096) ? 4096 : 512);
- dev_set_drvdata(dev, blk_dev);
-
- ndns->rw_bytes = nd_blk_rw_bytes;
+ nsblk->size = nvdimm_namespace_capacity(ndns);
+ dev_set_drvdata(dev, nsblk);
+
+ ndns->rw_bytes = nsblk_rw_bytes;
if (is_nd_btt(dev))
- rc = nvdimm_namespace_attach_btt(ndns);
- else if (nd_btt_probe(ndns, blk_dev) == 0) {
+ return nvdimm_namespace_attach_btt(ndns);
+ else if (nd_btt_probe(dev, ndns) == 0) {
/* we'll come back as btt-blk */
- rc = -ENXIO;
+ return -ENXIO;
} else
- rc = nd_blk_attach_disk(ndns, blk_dev);
- if (rc)
- kfree(blk_dev);
- return rc;
-}
-
-static void nd_blk_detach_disk(struct nd_blk_device *blk_dev)
-{
- del_gendisk(blk_dev->disk);
- put_disk(blk_dev->disk);
- blk_cleanup_queue(blk_dev->queue);
+ return nsblk_attach_disk(nsblk);
}
static int nd_blk_remove(struct device *dev)
{
- struct nd_blk_device *blk_dev = dev_get_drvdata(dev);
-
if (is_nd_btt(dev))
- nvdimm_namespace_detach_btt(to_nd_btt(dev)->ndns);
- else
- nd_blk_detach_disk(blk_dev);
- kfree(blk_dev);
-
+ nvdimm_namespace_detach_btt(to_nd_btt(dev));
return 0;
}
diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c
index af09d6c26709..68a7c3c1eed9 100644
--- a/drivers/nvdimm/btt.c
+++ b/drivers/nvdimm/btt.c
@@ -1306,7 +1306,7 @@ static struct btt *btt_init(struct nd_btt *nd_btt, unsigned long long rawsize,
struct btt *btt;
struct device *dev = &nd_btt->dev;
- btt = kzalloc(sizeof(struct btt), GFP_KERNEL);
+ btt = devm_kzalloc(dev, sizeof(struct btt), GFP_KERNEL);
if (!btt)
return NULL;
@@ -1321,13 +1321,13 @@ static struct btt *btt_init(struct nd_btt *nd_btt, unsigned long long rawsize,
ret = discover_arenas(btt);
if (ret) {
dev_err(dev, "init: error in arena_discover: %d\n", ret);
- goto out_free;
+ return NULL;
}
if (btt->init_state != INIT_READY && nd_region->ro) {
dev_info(dev, "%s is read-only, unable to init btt metadata\n",
dev_name(&nd_region->dev));
- goto out_free;
+ return NULL;
} else if (btt->init_state != INIT_READY) {
btt->num_arenas = (rawsize / ARENA_MAX_SIZE) +
((rawsize % ARENA_MAX_SIZE) ? 1 : 0);
@@ -1337,29 +1337,25 @@ static struct btt *btt_init(struct nd_btt *nd_btt, unsigned long long rawsize,
ret = create_arenas(btt);
if (ret) {
dev_info(dev, "init: create_arenas: %d\n", ret);
- goto out_free;
+ return NULL;
}
ret = btt_meta_init(btt);
if (ret) {
dev_err(dev, "init: error in meta_init: %d\n", ret);
- goto out_free;
+ return NULL;
}
}
ret = btt_blk_init(btt);
if (ret) {
dev_err(dev, "init: error in blk_init: %d\n", ret);
- goto out_free;
+ return NULL;
}
btt_debugfs_init(btt);
return btt;
-
- out_free:
- kfree(btt);
- return NULL;
}
/**
@@ -1377,7 +1373,6 @@ static void btt_fini(struct btt *btt)
btt_blk_cleanup(btt);
free_arenas(btt);
debugfs_remove_recursive(btt->debugfs_dir);
- kfree(btt);
}
}
@@ -1410,9 +1405,8 @@ int nvdimm_namespace_attach_btt(struct nd_namespace_common *ndns)
}
EXPORT_SYMBOL(nvdimm_namespace_attach_btt);
-int nvdimm_namespace_detach_btt(struct nd_namespace_common *ndns)
+int nvdimm_namespace_detach_btt(struct nd_btt *nd_btt)
{
- struct nd_btt *nd_btt = to_nd_btt(ndns->claim);
struct btt *btt = nd_btt->btt;
btt_fini(btt);
diff --git a/drivers/nvdimm/btt_devs.c b/drivers/nvdimm/btt_devs.c
index cb477518dd0e..816d0dae6398 100644
--- a/drivers/nvdimm/btt_devs.c
+++ b/drivers/nvdimm/btt_devs.c
@@ -273,10 +273,10 @@ static int __nd_btt_probe(struct nd_btt *nd_btt,
return 0;
}
-int nd_btt_probe(struct nd_namespace_common *ndns, void *drvdata)
+int nd_btt_probe(struct device *dev, struct nd_namespace_common *ndns)
{
int rc;
- struct device *dev;
+ struct device *btt_dev;
struct btt_sb *btt_sb;
struct nd_region *nd_region = to_nd_region(ndns->dev.parent);
@@ -284,21 +284,19 @@ int nd_btt_probe(struct nd_namespace_common *ndns, void *drvdata)
return -ENODEV;
nvdimm_bus_lock(&ndns->dev);
- dev = __nd_btt_create(nd_region, 0, NULL, ndns);
+ btt_dev = __nd_btt_create(nd_region, 0, NULL, ndns);
nvdimm_bus_unlock(&ndns->dev);
- if (!dev)
+ if (!btt_dev)
return -ENOMEM;
- dev_set_drvdata(dev, drvdata);
- btt_sb = kzalloc(sizeof(*btt_sb), GFP_KERNEL);
- rc = __nd_btt_probe(to_nd_btt(dev), ndns, btt_sb);
- kfree(btt_sb);
- dev_dbg(&ndns->dev, "%s: btt: %s\n", __func__,
- rc == 0 ? dev_name(dev) : "<none>");
+ btt_sb = devm_kzalloc(dev, sizeof(*btt_sb), GFP_KERNEL);
+ rc = __nd_btt_probe(to_nd_btt(btt_dev), ndns, btt_sb);
+ dev_dbg(dev, "%s: btt: %s\n", __func__,
+ rc == 0 ? dev_name(btt_dev) : "<none>");
if (rc < 0) {
- struct nd_btt *nd_btt = to_nd_btt(dev);
+ struct nd_btt *nd_btt = to_nd_btt(btt_dev);
- __nd_detach_ndns(dev, &nd_btt->ndns);
- put_device(dev);
+ __nd_detach_ndns(btt_dev, &nd_btt->ndns);
+ put_device(btt_dev);
}
return rc;
diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
index 8111b1299515..dcaefe229887 100644
--- a/drivers/nvdimm/bus.c
+++ b/drivers/nvdimm/bus.c
@@ -40,6 +40,8 @@ static int to_nd_device_type(struct device *dev)
return ND_DEVICE_REGION_PMEM;
else if (is_nd_blk(dev))
return ND_DEVICE_REGION_BLK;
+ else if (is_nd_dax(dev))
+ return ND_DEVICE_DAX_PMEM;
else if (is_nd_pmem(dev->parent) || is_nd_blk(dev->parent))
return nd_region_to_nstype(to_nd_region(dev->parent));
@@ -246,6 +248,8 @@ static void nd_async_device_unregister(void *d, async_cookie_t cookie)
void __nd_device_register(struct device *dev)
{
+ if (!dev)
+ return;
dev->bus = &nvdimm_bus_type;
get_device(dev);
async_schedule_domain(nd_async_device_register, dev,
diff --git a/drivers/nvdimm/claim.c b/drivers/nvdimm/claim.c
index e8f03b0e95e4..5f53db59a058 100644
--- a/drivers/nvdimm/claim.c
+++ b/drivers/nvdimm/claim.c
@@ -12,6 +12,7 @@
*/
#include <linux/device.h>
#include <linux/sizes.h>
+#include <linux/pmem.h>
#include "nd-core.h"
#include "pfn.h"
#include "btt.h"
@@ -84,6 +85,8 @@ static bool is_idle(struct device *dev, struct nd_namespace_common *ndns)
seed = nd_region->btt_seed;
else if (is_nd_pfn(dev))
seed = nd_region->pfn_seed;
+ else if (is_nd_dax(dev))
+ seed = nd_region->dax_seed;
if (seed == dev || ndns || dev->driver)
return false;
@@ -199,3 +202,63 @@ u64 nd_sb_checksum(struct nd_gen_sb *nd_gen_sb)
return sum;
}
EXPORT_SYMBOL(nd_sb_checksum);
+
+static int nsio_rw_bytes(struct nd_namespace_common *ndns,
+ resource_size_t offset, void *buf, size_t size, int rw)
+{
+ struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
+
+ if (unlikely(offset + size > nsio->size)) {
+ dev_WARN_ONCE(&ndns->dev, 1, "request out of range\n");
+ return -EFAULT;
+ }
+
+ if (rw == READ) {
+ unsigned int sz_align = ALIGN(size + (offset & (512 - 1)), 512);
+
+ if (unlikely(is_bad_pmem(&nsio->bb, offset / 512, sz_align)))
+ return -EIO;
+ return memcpy_from_pmem(buf, nsio->addr + offset, size);
+ } else {
+ memcpy_to_pmem(nsio->addr + offset, buf, size);
+ wmb_pmem();
+ }
+
+ return 0;
+}
+
+int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio)
+{
+ struct resource *res = &nsio->res;
+ struct nd_namespace_common *ndns = &nsio->common;
+
+ nsio->size = resource_size(res);
+ if (!devm_request_mem_region(dev, res->start, resource_size(res),
+ dev_name(dev))) {
+ dev_warn(dev, "could not reserve region %pR\n", res);
+ return -EBUSY;
+ }
+
+ ndns->rw_bytes = nsio_rw_bytes;
+ if (devm_init_badblocks(dev, &nsio->bb))
+ return -ENOMEM;
+ nvdimm_badblocks_populate(to_nd_region(ndns->dev.parent), &nsio->bb,
+ &nsio->res);
+
+ nsio->addr = devm_memremap(dev, res->start, resource_size(res),
+ ARCH_MEMREMAP_PMEM);
+ if (IS_ERR(nsio->addr))
+ return PTR_ERR(nsio->addr);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(devm_nsio_enable);
+
+void devm_nsio_disable(struct device *dev, struct nd_namespace_io *nsio)
+{
+ struct resource *res = &nsio->res;
+
+ devm_memunmap(dev, nsio->addr);
+ devm_exit_badblocks(dev, &nsio->bb);
+ devm_release_mem_region(dev, res->start, resource_size(res));
+}
+EXPORT_SYMBOL_GPL(devm_nsio_disable);
diff --git a/drivers/nvdimm/dax_devs.c b/drivers/nvdimm/dax_devs.c
new file mode 100644
index 000000000000..f90f7549e7f4
--- /dev/null
+++ b/drivers/nvdimm/dax_devs.c
@@ -0,0 +1,99 @@
+/*
+ * Copyright(c) 2013-2016 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+#include <linux/device.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include "nd-core.h"
+#include "nd.h"
+
+static void nd_dax_release(struct device *dev)
+{
+ struct nd_region *nd_region = to_nd_region(dev->parent);
+ struct nd_dax *nd_dax = to_nd_dax(dev);
+ struct nd_pfn *nd_pfn = &nd_dax->nd_pfn;
+
+ dev_dbg(dev, "%s\n", __func__);
+ nd_detach_ndns(dev, &nd_pfn->ndns);
+ ida_simple_remove(&nd_region->dax_ida, nd_pfn->id);
+ kfree(nd_pfn->uuid);
+ kfree(nd_dax);
+}
+
+static struct device_type nd_dax_device_type = {
+ .name = "nd_dax",
+ .release = nd_dax_release,
+};
+
+bool is_nd_dax(struct device *dev)
+{
+ return dev ? dev->type == &nd_dax_device_type : false;
+}
+EXPORT_SYMBOL(is_nd_dax);
+
+struct nd_dax *to_nd_dax(struct device *dev)
+{
+ struct nd_dax *nd_dax = container_of(dev, struct nd_dax, nd_pfn.dev);
+
+ WARN_ON(!is_nd_dax(dev));
+ return nd_dax;
+}
+EXPORT_SYMBOL(to_nd_dax);
+
+static const struct attribute_group *nd_dax_attribute_groups[] = {
+ &nd_pfn_attribute_group,
+ &nd_device_attribute_group,
+ &nd_numa_attribute_group,
+ NULL,
+};
+
+static struct nd_dax *nd_dax_alloc(struct nd_region *nd_region)
+{
+ struct nd_pfn *nd_pfn;
+ struct nd_dax *nd_dax;
+ struct device *dev;
+
+ nd_dax = kzalloc(sizeof(*nd_dax), GFP_KERNEL);
+ if (!nd_dax)
+ return NULL;
+
+ nd_pfn = &nd_dax->nd_pfn;
+ nd_pfn->id = ida_simple_get(&nd_region->dax_ida, 0, 0, GFP_KERNEL);
+ if (nd_pfn->id < 0) {
+ kfree(nd_dax);
+ return NULL;
+ }
+
+ dev = &nd_pfn->dev;
+ dev_set_name(dev, "dax%d.%d", nd_region->id, nd_pfn->id);
+ dev->groups = nd_dax_attribute_groups;
+ dev->type = &nd_dax_device_type;
+ dev->parent = &nd_region->dev;
+
+ return nd_dax;
+}
+
+struct device *nd_dax_create(struct nd_region *nd_region)
+{
+ struct device *dev = NULL;
+ struct nd_dax *nd_dax;
+
+ if (!is_nd_pmem(&nd_region->dev))
+ return NULL;
+
+ nd_dax = nd_dax_alloc(nd_region);
+ if (nd_dax)
+ dev = nd_pfn_devinit(&nd_dax->nd_pfn, NULL);
+ __nd_device_register(dev);
+ return dev;
+}
diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
index f5cb88601359..c5e3196c45b0 100644
--- a/drivers/nvdimm/namespace_devs.c
+++ b/drivers/nvdimm/namespace_devs.c
@@ -1288,6 +1288,8 @@ static ssize_t mode_show(struct device *dev,
mode = "safe";
else if (claim && is_nd_pfn(claim))
mode = "memory";
+ else if (claim && is_nd_dax(claim))
+ mode = "dax";
else if (!claim && pmem_should_map_pages(dev))
mode = "memory";
else
@@ -1379,21 +1381,19 @@ struct nd_namespace_common *nvdimm_namespace_common_probe(struct device *dev)
{
struct nd_btt *nd_btt = is_nd_btt(dev) ? to_nd_btt(dev) : NULL;
struct nd_pfn *nd_pfn = is_nd_pfn(dev) ? to_nd_pfn(dev) : NULL;
- struct nd_namespace_common *ndns;
+ struct nd_dax *nd_dax = is_nd_dax(dev) ? to_nd_dax(dev) : NULL;
+ struct nd_namespace_common *ndns = NULL;
resource_size_t size;
- if (nd_btt || nd_pfn) {
- struct device *host = NULL;
-
- if (nd_btt) {
- host = &nd_btt->dev;
+ if (nd_btt || nd_pfn || nd_dax) {
+ if (nd_btt)
ndns = nd_btt->ndns;
- } else if (nd_pfn) {
- host = &nd_pfn->dev;
+ else if (nd_pfn)
ndns = nd_pfn->ndns;
- }
+ else if (nd_dax)
+ ndns = nd_dax->nd_pfn.ndns;
- if (!ndns || !host)
+ if (!ndns)
return ERR_PTR(-ENODEV);
/*
@@ -1404,12 +1404,12 @@ struct nd_namespace_common *nvdimm_namespace_common_probe(struct device *dev)
device_unlock(&ndns->dev);
if (ndns->dev.driver) {
dev_dbg(&ndns->dev, "is active, can't bind %s\n",
- dev_name(host));
+ dev_name(dev));
return ERR_PTR(-EBUSY);
}
- if (dev_WARN_ONCE(&ndns->dev, ndns->claim != host,
+ if (dev_WARN_ONCE(&ndns->dev, ndns->claim != dev,
"host (%s) vs claim (%s) mismatch\n",
- dev_name(host),
+ dev_name(dev),
dev_name(ndns->claim)))
return ERR_PTR(-ENXIO);
} else {
@@ -1784,6 +1784,18 @@ void nd_region_create_blk_seed(struct nd_region *nd_region)
nd_device_register(nd_region->ns_seed);
}
+void nd_region_create_dax_seed(struct nd_region *nd_region)
+{
+ WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
+ nd_region->dax_seed = nd_dax_create(nd_region);
+ /*
+ * Seed creation failures are not fatal, provisioning is simply
+ * disabled until memory becomes available
+ */
+ if (!nd_region->dax_seed)
+ dev_err(&nd_region->dev, "failed to create dax namespace\n");
+}
+
void nd_region_create_pfn_seed(struct nd_region *nd_region)
{
WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
diff --git a/drivers/nvdimm/nd-core.h b/drivers/nvdimm/nd-core.h
index 1d1500f3d8b5..cb65308c0329 100644
--- a/drivers/nvdimm/nd-core.h
+++ b/drivers/nvdimm/nd-core.h
@@ -54,6 +54,7 @@ struct nd_region;
void nd_region_create_blk_seed(struct nd_region *nd_region);
void nd_region_create_btt_seed(struct nd_region *nd_region);
void nd_region_create_pfn_seed(struct nd_region *nd_region);
+void nd_region_create_dax_seed(struct nd_region *nd_region);
void nd_region_disable(struct nvdimm_bus *nvdimm_bus, struct device *dev);
int nvdimm_bus_create_ndctl(struct nvdimm_bus *nvdimm_bus);
void nvdimm_bus_destroy_ndctl(struct nvdimm_bus *nvdimm_bus);
diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h
index 875c524fafb0..46910b8f32b1 100644
--- a/drivers/nvdimm/nd.h
+++ b/drivers/nvdimm/nd.h
@@ -13,6 +13,7 @@
#ifndef __ND_H__
#define __ND_H__
#include <linux/libnvdimm.h>
+#include <linux/badblocks.h>
#include <linux/blkdev.h>
#include <linux/device.h>
#include <linux/mutex.h>
@@ -100,10 +101,12 @@ struct nd_region {
struct ida ns_ida;
struct ida btt_ida;
struct ida pfn_ida;
+ struct ida dax_ida;
unsigned long flags;
struct device *ns_seed;
struct device *btt_seed;
struct device *pfn_seed;
+ struct device *dax_seed;
u16 ndr_mappings;
u64 ndr_size;
u64 ndr_start;
@@ -160,6 +163,10 @@ struct nd_pfn {
struct nd_namespace_common *ndns;
};
+struct nd_dax {
+ struct nd_pfn nd_pfn;
+};
+
enum nd_async_mode {
ND_SYNC,
ND_ASYNC,
@@ -197,11 +204,12 @@ struct nd_gen_sb {
u64 nd_sb_checksum(struct nd_gen_sb *sb);
#if IS_ENABLED(CONFIG_BTT)
-int nd_btt_probe(struct nd_namespace_common *ndns, void *drvdata);
+int nd_btt_probe(struct device *dev, struct nd_namespace_common *ndns);
bool is_nd_btt(struct device *dev);
struct device *nd_btt_create(struct nd_region *nd_region);
#else
-static inline int nd_btt_probe(struct nd_namespace_common *ndns, void *drvdata)
+static inline int nd_btt_probe(struct device *dev,
+ struct nd_namespace_common *ndns)
{
return -ENODEV;
}
@@ -219,12 +227,16 @@ static inline struct device *nd_btt_create(struct nd_region *nd_region)
struct nd_pfn *to_nd_pfn(struct device *dev);
#if IS_ENABLED(CONFIG_NVDIMM_PFN)
-int nd_pfn_probe(struct nd_namespace_common *ndns, void *drvdata);
+int nd_pfn_probe(struct device *dev, struct nd_namespace_common *ndns);
bool is_nd_pfn(struct device *dev);
struct device *nd_pfn_create(struct nd_region *nd_region);
+struct device *nd_pfn_devinit(struct nd_pfn *nd_pfn,
+ struct nd_namespace_common *ndns);
int nd_pfn_validate(struct nd_pfn *nd_pfn);
+extern struct attribute_group nd_pfn_attribute_group;
#else
-static inline int nd_pfn_probe(struct nd_namespace_common *ndns, void *drvdata)
+static inline int nd_pfn_probe(struct device *dev,
+ struct nd_namespace_common *ndns)
{
return -ENODEV;
}
@@ -245,6 +257,22 @@ static inline int nd_pfn_validate(struct nd_pfn *nd_pfn)
}
#endif
+struct nd_dax *to_nd_dax(struct device *dev);
+#if IS_ENABLED(CONFIG_NVDIMM_DAX)
+bool is_nd_dax(struct device *dev);
+struct device *nd_dax_create(struct nd_region *nd_region);
+#else
+static inline bool is_nd_dax(struct device *dev)
+{
+ return false;
+}
+
+static inline struct device *nd_dax_create(struct nd_region *nd_region)
+{
+ return NULL;
+}
+#endif
+
struct nd_region *to_nd_region(struct device *dev);
int nd_region_to_nstype(struct nd_region *nd_region);
int nd_region_register_namespaces(struct nd_region *nd_region, int *err);
@@ -263,11 +291,32 @@ struct resource *nvdimm_allocate_dpa(struct nvdimm_drvdata *ndd,
resource_size_t nvdimm_namespace_capacity(struct nd_namespace_common *ndns);
struct nd_namespace_common *nvdimm_namespace_common_probe(struct device *dev);
int nvdimm_namespace_attach_btt(struct nd_namespace_common *ndns);
-int nvdimm_namespace_detach_btt(struct nd_namespace_common *ndns);
+int nvdimm_namespace_detach_btt(struct nd_btt *nd_btt);
const char *nvdimm_namespace_disk_name(struct nd_namespace_common *ndns,
char *name);
void nvdimm_badblocks_populate(struct nd_region *nd_region,
struct badblocks *bb, const struct resource *res);
+#if IS_ENABLED(CONFIG_ND_CLAIM)
+struct vmem_altmap *nvdimm_setup_pfn(struct nd_pfn *nd_pfn,
+ struct resource *res, struct vmem_altmap *altmap);
+int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio);
+void devm_nsio_disable(struct device *dev, struct nd_namespace_io *nsio);
+#else
+static inline struct vmem_altmap *nvdimm_setup_pfn(struct nd_pfn *nd_pfn,
+ struct resource *res, struct vmem_altmap *altmap)
+{
+ return ERR_PTR(-ENXIO);
+}
+static inline int devm_nsio_enable(struct device *dev,
+ struct nd_namespace_io *nsio)
+{
+ return -ENXIO;
+}
+static inline void devm_nsio_disable(struct device *dev,
+ struct nd_namespace_io *nsio)
+{
+}
+#endif
int nd_blk_region_init(struct nd_region *nd_region);
void __nd_iostat_start(struct bio *bio, unsigned long *start);
static inline bool nd_iostat_start(struct bio *bio, unsigned long *start)
@@ -281,6 +330,19 @@ static inline bool nd_iostat_start(struct bio *bio, unsigned long *start)
return true;
}
void nd_iostat_end(struct bio *bio, unsigned long start);
+static inline bool is_bad_pmem(struct badblocks *bb, sector_t sector,
+ unsigned int len)
+{
+ if (bb->count) {
+ sector_t first_bad;
+ int num_bad;
+
+ return !!badblocks_check(bb, sector, len / 512, &first_bad,
+ &num_bad);
+ }
+
+ return false;
+}
resource_size_t nd_namespace_blk_validate(struct nd_namespace_blk *nsblk);
const u8 *nd_dev_to_uuid(struct device *dev);
bool pmem_should_map_pages(struct device *dev);
diff --git a/drivers/nvdimm/pfn.h b/drivers/nvdimm/pfn.h
index 8e343a3ca873..9d2704c83fa7 100644
--- a/drivers/nvdimm/pfn.h
+++ b/drivers/nvdimm/pfn.h
@@ -33,7 +33,9 @@ struct nd_pfn_sb {
/* minor-version-1 additions for section alignment */
__le32 start_pad;
__le32 end_trunc;
- u8 padding[4004];
+ /* minor-version-2 record the base alignment of the mapping */
+ __le32 align;
+ u8 padding[4000];
__le64 checksum;
};
diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
index e071e214feba..2248056d29e7 100644
--- a/drivers/nvdimm/pfn_devs.c
+++ b/drivers/nvdimm/pfn_devs.c
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013-2016 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -10,6 +10,7 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
+#include <linux/memremap.h>
#include <linux/blkdev.h>
#include <linux/device.h>
#include <linux/genhd.h>
@@ -53,10 +54,29 @@ struct nd_pfn *to_nd_pfn(struct device *dev)
}
EXPORT_SYMBOL(to_nd_pfn);
+static struct nd_pfn *to_nd_pfn_safe(struct device *dev)
+{
+ /*
+ * pfn device attributes are re-used by dax device instances, so we
+ * need to be careful to correct device-to-nd_pfn conversion.
+ */
+ if (is_nd_pfn(dev))
+ return to_nd_pfn(dev);
+
+ if (is_nd_dax(dev)) {
+ struct nd_dax *nd_dax = to_nd_dax(dev);
+
+ return &nd_dax->nd_pfn;
+ }
+
+ WARN_ON(1);
+ return NULL;
+}
+
static ssize_t mode_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct nd_pfn *nd_pfn = to_nd_pfn(dev);
+ struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
switch (nd_pfn->mode) {
case PFN_MODE_RAM:
@@ -71,7 +91,7 @@ static ssize_t mode_show(struct device *dev,
static ssize_t mode_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len)
{
- struct nd_pfn *nd_pfn = to_nd_pfn(dev);
+ struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
ssize_t rc = 0;
device_lock(dev);
@@ -105,7 +125,7 @@ static DEVICE_ATTR_RW(mode);
static ssize_t align_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct nd_pfn *nd_pfn = to_nd_pfn(dev);
+ struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
return sprintf(buf, "%lx\n", nd_pfn->align);
}
@@ -133,7 +153,7 @@ static ssize_t __align_store(struct nd_pfn *nd_pfn, const char *buf)
static ssize_t align_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len)
{
- struct nd_pfn *nd_pfn = to_nd_pfn(dev);
+ struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
ssize_t rc;
device_lock(dev);
@@ -151,7 +171,7 @@ static DEVICE_ATTR_RW(align);
static ssize_t uuid_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct nd_pfn *nd_pfn = to_nd_pfn(dev);
+ struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
if (nd_pfn->uuid)
return sprintf(buf, "%pUb\n", nd_pfn->uuid);
@@ -161,7 +181,7 @@ static ssize_t uuid_show(struct device *dev,
static ssize_t uuid_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len)
{
- struct nd_pfn *nd_pfn = to_nd_pfn(dev);
+ struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
ssize_t rc;
device_lock(dev);
@@ -177,7 +197,7 @@ static DEVICE_ATTR_RW(uuid);
static ssize_t namespace_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct nd_pfn *nd_pfn = to_nd_pfn(dev);
+ struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
ssize_t rc;
nvdimm_bus_lock(dev);
@@ -190,7 +210,7 @@ static ssize_t namespace_show(struct device *dev,
static ssize_t namespace_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len)
{
- struct nd_pfn *nd_pfn = to_nd_pfn(dev);
+ struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
ssize_t rc;
device_lock(dev);
@@ -208,7 +228,7 @@ static DEVICE_ATTR_RW(namespace);
static ssize_t resource_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct nd_pfn *nd_pfn = to_nd_pfn(dev);
+ struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
ssize_t rc;
device_lock(dev);
@@ -234,7 +254,7 @@ static DEVICE_ATTR_RO(resource);
static ssize_t size_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct nd_pfn *nd_pfn = to_nd_pfn(dev);
+ struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
ssize_t rc;
device_lock(dev);
@@ -269,7 +289,7 @@ static struct attribute *nd_pfn_attributes[] = {
NULL,
};
-static struct attribute_group nd_pfn_attribute_group = {
+struct attribute_group nd_pfn_attribute_group = {
.attrs = nd_pfn_attributes,
};
@@ -280,16 +300,32 @@ static const struct attribute_group *nd_pfn_attribute_groups[] = {
NULL,
};
-static struct device *__nd_pfn_create(struct nd_region *nd_region,
+struct device *nd_pfn_devinit(struct nd_pfn *nd_pfn,
struct nd_namespace_common *ndns)
{
- struct nd_pfn *nd_pfn;
- struct device *dev;
+ struct device *dev = &nd_pfn->dev;
- /* we can only create pages for contiguous ranged of pmem */
- if (!is_nd_pmem(&nd_region->dev))
+ if (!nd_pfn)
return NULL;
+ nd_pfn->mode = PFN_MODE_NONE;
+ nd_pfn->align = HPAGE_SIZE;
+ dev = &nd_pfn->dev;
+ device_initialize(&nd_pfn->dev);
+ if (ndns && !__nd_attach_ndns(&nd_pfn->dev, ndns, &nd_pfn->ndns)) {
+ dev_dbg(&ndns->dev, "%s failed, already claimed by %s\n",
+ __func__, dev_name(ndns->claim));
+ put_device(dev);
+ return NULL;
+ }
+ return dev;
+}
+
+static struct nd_pfn *nd_pfn_alloc(struct nd_region *nd_region)
+{
+ struct nd_pfn *nd_pfn;
+ struct device *dev;
+
nd_pfn = kzalloc(sizeof(*nd_pfn), GFP_KERNEL);
if (!nd_pfn)
return NULL;
@@ -300,29 +336,27 @@ static struct device *__nd_pfn_create(struct nd_region *nd_region,
return NULL;
}
- nd_pfn->mode = PFN_MODE_NONE;
- nd_pfn->align = HPAGE_SIZE;
dev = &nd_pfn->dev;
dev_set_name(dev, "pfn%d.%d", nd_region->id, nd_pfn->id);
- dev->parent = &nd_region->dev;
- dev->type = &nd_pfn_device_type;
dev->groups = nd_pfn_attribute_groups;
- device_initialize(&nd_pfn->dev);
- if (ndns && !__nd_attach_ndns(&nd_pfn->dev, ndns, &nd_pfn->ndns)) {
- dev_dbg(&ndns->dev, "%s failed, already claimed by %s\n",
- __func__, dev_name(ndns->claim));
- put_device(dev);
- return NULL;
- }
- return dev;
+ dev->type = &nd_pfn_device_type;
+ dev->parent = &nd_region->dev;
+
+ return nd_pfn;
}
struct device *nd_pfn_create(struct nd_region *nd_region)
{
- struct device *dev = __nd_pfn_create(nd_region, NULL);
+ struct nd_pfn *nd_pfn;
+ struct device *dev;
+
+ if (!is_nd_pmem(&nd_region->dev))
+ return NULL;
- if (dev)
- __nd_device_register(dev);
+ nd_pfn = nd_pfn_alloc(nd_region);
+ dev = nd_pfn_devinit(nd_pfn, NULL);
+
+ __nd_device_register(dev);
return dev;
}
@@ -360,6 +394,9 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn)
pfn_sb->end_trunc = 0;
}
+ if (__le16_to_cpu(pfn_sb->version_minor) < 2)
+ pfn_sb->align = 0;
+
switch (le32_to_cpu(pfn_sb->mode)) {
case PFN_MODE_RAM:
case PFN_MODE_PMEM:
@@ -399,7 +436,7 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn)
return -EBUSY;
}
- nd_pfn->align = 1UL << ilog2(offset);
+ nd_pfn->align = le32_to_cpu(pfn_sb->align);
if (!is_power_of_2(offset) || offset < PAGE_SIZE) {
dev_err(&nd_pfn->dev, "bad offset: %#llx dax disabled\n",
offset);
@@ -410,11 +447,11 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn)
}
EXPORT_SYMBOL(nd_pfn_validate);
-int nd_pfn_probe(struct nd_namespace_common *ndns, void *drvdata)
+int nd_pfn_probe(struct device *dev, struct nd_namespace_common *ndns)
{
int rc;
- struct device *dev;
struct nd_pfn *nd_pfn;
+ struct device *pfn_dev;
struct nd_pfn_sb *pfn_sb;
struct nd_region *nd_region = to_nd_region(ndns->dev.parent);
@@ -422,25 +459,213 @@ int nd_pfn_probe(struct nd_namespace_common *ndns, void *drvdata)
return -ENODEV;
nvdimm_bus_lock(&ndns->dev);
- dev = __nd_pfn_create(nd_region, ndns);
+ nd_pfn = nd_pfn_alloc(nd_region);
+ pfn_dev = nd_pfn_devinit(nd_pfn, ndns);
nvdimm_bus_unlock(&ndns->dev);
- if (!dev)
+ if (!pfn_dev)
return -ENOMEM;
- dev_set_drvdata(dev, drvdata);
- pfn_sb = kzalloc(sizeof(*pfn_sb), GFP_KERNEL);
- nd_pfn = to_nd_pfn(dev);
+ pfn_sb = devm_kzalloc(dev, sizeof(*pfn_sb), GFP_KERNEL);
+ nd_pfn = to_nd_pfn(pfn_dev);
nd_pfn->pfn_sb = pfn_sb;
rc = nd_pfn_validate(nd_pfn);
- nd_pfn->pfn_sb = NULL;
- kfree(pfn_sb);
- dev_dbg(&ndns->dev, "%s: pfn: %s\n", __func__,
- rc == 0 ? dev_name(dev) : "<none>");
+ dev_dbg(dev, "%s: pfn: %s\n", __func__,
+ rc == 0 ? dev_name(pfn_dev) : "<none>");
if (rc < 0) {
- __nd_detach_ndns(dev, &nd_pfn->ndns);
- put_device(dev);
+ __nd_detach_ndns(pfn_dev, &nd_pfn->ndns);
+ put_device(pfn_dev);
} else
- __nd_device_register(&nd_pfn->dev);
+ __nd_device_register(pfn_dev);
return rc;
}
EXPORT_SYMBOL(nd_pfn_probe);
+
+/*
+ * We hotplug memory at section granularity, pad the reserved area from
+ * the previous section base to the namespace base address.
+ */
+static unsigned long init_altmap_base(resource_size_t base)
+{
+ unsigned long base_pfn = PHYS_PFN(base);
+
+ return PFN_SECTION_ALIGN_DOWN(base_pfn);
+}
+
+static unsigned long init_altmap_reserve(resource_size_t base)
+{
+ unsigned long reserve = PHYS_PFN(SZ_8K);
+ unsigned long base_pfn = PHYS_PFN(base);
+
+ reserve += base_pfn - PFN_SECTION_ALIGN_DOWN(base_pfn);
+ return reserve;
+}
+
+static struct vmem_altmap *__nvdimm_setup_pfn(struct nd_pfn *nd_pfn,
+ struct resource *res, struct vmem_altmap *altmap)
+{
+ struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
+ u64 offset = le64_to_cpu(pfn_sb->dataoff);
+ u32 start_pad = __le32_to_cpu(pfn_sb->start_pad);
+ u32 end_trunc = __le32_to_cpu(pfn_sb->end_trunc);
+ struct nd_namespace_common *ndns = nd_pfn->ndns;
+ struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
+ resource_size_t base = nsio->res.start + start_pad;
+ struct vmem_altmap __altmap = {
+ .base_pfn = init_altmap_base(base),
+ .reserve = init_altmap_reserve(base),
+ };
+
+ memcpy(res, &nsio->res, sizeof(*res));
+ res->start += start_pad;
+ res->end -= end_trunc;
+
+ nd_pfn->mode = le32_to_cpu(nd_pfn->pfn_sb->mode);
+ if (nd_pfn->mode == PFN_MODE_RAM) {
+ if (offset < SZ_8K)
+ return ERR_PTR(-EINVAL);
+ nd_pfn->npfns = le64_to_cpu(pfn_sb->npfns);
+ altmap = NULL;
+ } else if (nd_pfn->mode == PFN_MODE_PMEM) {
+ nd_pfn->npfns = (resource_size(res) - offset) / PAGE_SIZE;
+ if (le64_to_cpu(nd_pfn->pfn_sb->npfns) > nd_pfn->npfns)
+ dev_info(&nd_pfn->dev,
+ "number of pfns truncated from %lld to %ld\n",
+ le64_to_cpu(nd_pfn->pfn_sb->npfns),
+ nd_pfn->npfns);
+ memcpy(altmap, &__altmap, sizeof(*altmap));
+ altmap->free = PHYS_PFN(offset - SZ_8K);
+ altmap->alloc = 0;
+ } else
+ return ERR_PTR(-ENXIO);
+
+ return altmap;
+}
+
+static int nd_pfn_init(struct nd_pfn *nd_pfn)
+{
+ u32 dax_label_reserve = is_nd_dax(&nd_pfn->dev) ? SZ_128K : 0;
+ struct nd_namespace_common *ndns = nd_pfn->ndns;
+ u32 start_pad = 0, end_trunc = 0;
+ resource_size_t start, size;
+ struct nd_namespace_io *nsio;
+ struct nd_region *nd_region;
+ struct nd_pfn_sb *pfn_sb;
+ unsigned long npfns;
+ phys_addr_t offset;
+ u64 checksum;
+ int rc;
+
+ pfn_sb = devm_kzalloc(&nd_pfn->dev, sizeof(*pfn_sb), GFP_KERNEL);
+ if (!pfn_sb)
+ return -ENOMEM;
+
+ nd_pfn->pfn_sb = pfn_sb;
+ rc = nd_pfn_validate(nd_pfn);
+ if (rc != -ENODEV)
+ return rc;
+
+ /* no info block, do init */;
+ nd_region = to_nd_region(nd_pfn->dev.parent);
+ if (nd_region->ro) {
+ dev_info(&nd_pfn->dev,
+ "%s is read-only, unable to init metadata\n",
+ dev_name(&nd_region->dev));
+ return -ENXIO;
+ }
+
+ memset(pfn_sb, 0, sizeof(*pfn_sb));
+
+ /*
+ * Check if pmem collides with 'System RAM' when section aligned and
+ * trim it accordingly
+ */
+ nsio = to_nd_namespace_io(&ndns->dev);
+ start = PHYS_SECTION_ALIGN_DOWN(nsio->res.start);
+ size = resource_size(&nsio->res);
+ if (region_intersects(start, size, IORESOURCE_SYSTEM_RAM,
+ IORES_DESC_NONE) == REGION_MIXED) {
+ start = nsio->res.start;
+ start_pad = PHYS_SECTION_ALIGN_UP(start) - start;
+ }
+
+ start = nsio->res.start;
+ size = PHYS_SECTION_ALIGN_UP(start + size) - start;
+ if (region_intersects(start, size, IORESOURCE_SYSTEM_RAM,
+ IORES_DESC_NONE) == REGION_MIXED) {
+ size = resource_size(&nsio->res);
+ end_trunc = start + size - PHYS_SECTION_ALIGN_DOWN(start + size);
+ }
+
+ if (start_pad + end_trunc)
+ dev_info(&nd_pfn->dev, "%s section collision, truncate %d bytes\n",
+ dev_name(&ndns->dev), start_pad + end_trunc);
+
+ /*
+ * Note, we use 64 here for the standard size of struct page,
+ * debugging options may cause it to be larger in which case the
+ * implementation will limit the pfns advertised through
+ * ->direct_access() to those that are included in the memmap.
+ */
+ start += start_pad;
+ size = resource_size(&nsio->res);
+ npfns = (size - start_pad - end_trunc - SZ_8K) / SZ_4K;
+ if (nd_pfn->mode == PFN_MODE_PMEM) {
+ unsigned long memmap_size;
+
+ /*
+ * vmemmap_populate_hugepages() allocates the memmap array in
+ * HPAGE_SIZE chunks.
+ */
+ memmap_size = ALIGN(64 * npfns, HPAGE_SIZE);
+ offset = ALIGN(start + SZ_8K + memmap_size + dax_label_reserve,
+ nd_pfn->align) - start;
+ } else if (nd_pfn->mode == PFN_MODE_RAM)
+ offset = ALIGN(start + SZ_8K + dax_label_reserve,
+ nd_pfn->align) - start;
+ else
+ return -ENXIO;
+
+ if (offset + start_pad + end_trunc >= size) {
+ dev_err(&nd_pfn->dev, "%s unable to satisfy requested alignment\n",
+ dev_name(&ndns->dev));
+ return -ENXIO;
+ }
+
+ npfns = (size - offset - start_pad - end_trunc) / SZ_4K;
+ pfn_sb->mode = cpu_to_le32(nd_pfn->mode);
+ pfn_sb->dataoff = cpu_to_le64(offset);
+ pfn_sb->npfns = cpu_to_le64(npfns);
+ memcpy(pfn_sb->signature, PFN_SIG, PFN_SIG_LEN);
+ memcpy(pfn_sb->uuid, nd_pfn->uuid, 16);
+ memcpy(pfn_sb->parent_uuid, nd_dev_to_uuid(&ndns->dev), 16);
+ pfn_sb->version_major = cpu_to_le16(1);
+ pfn_sb->version_minor = cpu_to_le16(2);
+ pfn_sb->start_pad = cpu_to_le32(start_pad);
+ pfn_sb->end_trunc = cpu_to_le32(end_trunc);
+ pfn_sb->align = cpu_to_le32(nd_pfn->align);
+ checksum = nd_sb_checksum((struct nd_gen_sb *) pfn_sb);
+ pfn_sb->checksum = cpu_to_le64(checksum);
+
+ return nvdimm_write_bytes(ndns, SZ_4K, pfn_sb, sizeof(*pfn_sb));
+}
+
+/*
+ * Determine the effective resource range and vmem_altmap from an nd_pfn
+ * instance.
+ */
+struct vmem_altmap *nvdimm_setup_pfn(struct nd_pfn *nd_pfn,
+ struct resource *res, struct vmem_altmap *altmap)
+{
+ int rc;
+
+ if (!nd_pfn->uuid || !nd_pfn->ndns)
+ return ERR_PTR(-ENODEV);
+
+ rc = nd_pfn_init(nd_pfn);
+ if (rc)
+ return ERR_PTR(rc);
+
+ /* we need a valid pfn_sb before we can init a vmem_altmap */
+ return __nvdimm_setup_pfn(nd_pfn, res, altmap);
+}
+EXPORT_SYMBOL_GPL(nvdimm_setup_pfn);
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index 8e09c544d892..d9a0dbc2d023 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -33,10 +33,6 @@
#include "nd.h"
struct pmem_device {
- struct request_queue *pmem_queue;
- struct gendisk *pmem_disk;
- struct nd_namespace_common *ndns;
-
/* One contiguous memory region per device */
phys_addr_t phys_addr;
/* when non-zero this device is hosting a 'pfn' instance */
@@ -50,23 +46,10 @@ struct pmem_device {
struct badblocks bb;
};
-static bool is_bad_pmem(struct badblocks *bb, sector_t sector, unsigned int len)
-{
- if (bb->count) {
- sector_t first_bad;
- int num_bad;
-
- return !!badblocks_check(bb, sector, len / 512, &first_bad,
- &num_bad);
- }
-
- return false;
-}
-
static void pmem_clear_poison(struct pmem_device *pmem, phys_addr_t offset,
unsigned int len)
{
- struct device *dev = disk_to_dev(pmem->pmem_disk);
+ struct device *dev = pmem->bb.dev;
sector_t sector;
long cleared;
@@ -103,6 +86,20 @@ static int pmem_do_bvec(struct pmem_device *pmem, struct page *page,
flush_dcache_page(page);
}
} else {
+ /*
+ * Note that we write the data both before and after
+ * clearing poison. The write before clear poison
+ * handles situations where the latest written data is
+ * preserved and the clear poison operation simply marks
+ * the address range as valid without changing the data.
+ * In this case application software can assume that an
+ * interrupted write will either return the new good
+ * data or an error.
+ *
+ * However, if pmem_clear_poison() leaves the data in an
+ * indeterminate state we need to perform the write
+ * after clear poison.
+ */
flush_dcache_page(page);
memcpy_to_pmem(pmem_addr, mem + off, len);
if (unlikely(bad_pmem)) {
@@ -122,8 +119,7 @@ static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio)
unsigned long start;
struct bio_vec bvec;
struct bvec_iter iter;
- struct block_device *bdev = bio->bi_bdev;
- struct pmem_device *pmem = bdev->bd_disk->private_data;
+ struct pmem_device *pmem = q->queuedata;
do_acct = nd_iostat_start(bio, &start);
bio_for_each_segment(bvec, bio, iter) {
@@ -148,7 +144,7 @@ static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio)
static int pmem_rw_page(struct block_device *bdev, sector_t sector,
struct page *page, int rw)
{
- struct pmem_device *pmem = bdev->bd_disk->private_data;
+ struct pmem_device *pmem = bdev->bd_queue->queuedata;
int rc;
rc = pmem_do_bvec(pmem, page, PAGE_SIZE, 0, rw, sector);
@@ -170,7 +166,7 @@ static int pmem_rw_page(struct block_device *bdev, sector_t sector,
static long pmem_direct_access(struct block_device *bdev, sector_t sector,
void __pmem **kaddr, pfn_t *pfn)
{
- struct pmem_device *pmem = bdev->bd_disk->private_data;
+ struct pmem_device *pmem = bdev->bd_queue->queuedata;
resource_size_t offset = sector * 512 + pmem->data_offset;
*kaddr = pmem->virt_addr + offset;
@@ -186,104 +182,119 @@ static const struct block_device_operations pmem_fops = {
.revalidate_disk = nvdimm_revalidate_disk,
};
-static struct pmem_device *pmem_alloc(struct device *dev,
- struct resource *res, int id)
+static void pmem_release_queue(void *q)
{
+ blk_cleanup_queue(q);
+}
+
+void pmem_release_disk(void *disk)
+{
+ del_gendisk(disk);
+ put_disk(disk);
+}
+
+static int pmem_attach_disk(struct device *dev,
+ struct nd_namespace_common *ndns)
+{
+ struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
+ struct vmem_altmap __altmap, *altmap = NULL;
+ struct resource *res = &nsio->res;
+ struct nd_pfn *nd_pfn = NULL;
+ int nid = dev_to_node(dev);
+ struct nd_pfn_sb *pfn_sb;
struct pmem_device *pmem;
+ struct resource pfn_res;
struct request_queue *q;
+ struct gendisk *disk;
+ void *addr;
+
+ /* while nsio_rw_bytes is active, parse a pfn info block if present */
+ if (is_nd_pfn(dev)) {
+ nd_pfn = to_nd_pfn(dev);
+ altmap = nvdimm_setup_pfn(nd_pfn, &pfn_res, &__altmap);
+ if (IS_ERR(altmap))
+ return PTR_ERR(altmap);
+ }
+
+ /* we're attaching a block device, disable raw namespace access */
+ devm_nsio_disable(dev, nsio);
pmem = devm_kzalloc(dev, sizeof(*pmem), GFP_KERNEL);
if (!pmem)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
+ dev_set_drvdata(dev, pmem);
pmem->phys_addr = res->start;
pmem->size = resource_size(res);
if (!arch_has_wmb_pmem())
dev_warn(dev, "unable to guarantee persistence of writes\n");
- if (!devm_request_mem_region(dev, pmem->phys_addr, pmem->size,
- dev_name(dev))) {
- dev_warn(dev, "could not reserve region [0x%pa:0x%zx]\n",
- &pmem->phys_addr, pmem->size);
- return ERR_PTR(-EBUSY);
+ if (!devm_request_mem_region(dev, res->start, resource_size(res),
+ dev_name(dev))) {
+ dev_warn(dev, "could not reserve region %pR\n", res);
+ return -EBUSY;
}
q = blk_alloc_queue_node(GFP_KERNEL, dev_to_node(dev));
if (!q)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
pmem->pfn_flags = PFN_DEV;
- if (pmem_should_map_pages(dev)) {
- pmem->virt_addr = (void __pmem *) devm_memremap_pages(dev, res,
+ if (is_nd_pfn(dev)) {
+ addr = devm_memremap_pages(dev, &pfn_res, &q->q_usage_counter,
+ altmap);
+ pfn_sb = nd_pfn->pfn_sb;
+ pmem->data_offset = le64_to_cpu(pfn_sb->dataoff);
+ pmem->pfn_pad = resource_size(res) - resource_size(&pfn_res);
+ pmem->pfn_flags |= PFN_MAP;
+ res = &pfn_res; /* for badblocks populate */
+ res->start += pmem->data_offset;
+ } else if (pmem_should_map_pages(dev)) {
+ addr = devm_memremap_pages(dev, &nsio->res,
&q->q_usage_counter, NULL);
pmem->pfn_flags |= PFN_MAP;
} else
- pmem->virt_addr = (void __pmem *) devm_memremap(dev,
- pmem->phys_addr, pmem->size,
- ARCH_MEMREMAP_PMEM);
+ addr = devm_memremap(dev, pmem->phys_addr,
+ pmem->size, ARCH_MEMREMAP_PMEM);
- if (IS_ERR(pmem->virt_addr)) {
+ /*
+ * At release time the queue must be dead before
+ * devm_memremap_pages is unwound
+ */
+ if (devm_add_action(dev, pmem_release_queue, q)) {
blk_cleanup_queue(q);
- return (void __force *) pmem->virt_addr;
+ return -ENOMEM;
}
- pmem->pmem_queue = q;
- return pmem;
-}
-
-static void pmem_detach_disk(struct pmem_device *pmem)
-{
- if (!pmem->pmem_disk)
- return;
-
- del_gendisk(pmem->pmem_disk);
- put_disk(pmem->pmem_disk);
- blk_cleanup_queue(pmem->pmem_queue);
-}
-
-static int pmem_attach_disk(struct device *dev,
- struct nd_namespace_common *ndns, struct pmem_device *pmem)
-{
- struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
- int nid = dev_to_node(dev);
- struct resource bb_res;
- struct gendisk *disk;
+ if (IS_ERR(addr))
+ return PTR_ERR(addr);
+ pmem->virt_addr = (void __pmem *) addr;
- blk_queue_make_request(pmem->pmem_queue, pmem_make_request);
- blk_queue_physical_block_size(pmem->pmem_queue, PAGE_SIZE);
- blk_queue_max_hw_sectors(pmem->pmem_queue, UINT_MAX);
- blk_queue_bounce_limit(pmem->pmem_queue, BLK_BOUNCE_ANY);
- queue_flag_set_unlocked(QUEUE_FLAG_NONROT, pmem->pmem_queue);
+ blk_queue_make_request(q, pmem_make_request);
+ blk_queue_physical_block_size(q, PAGE_SIZE);
+ blk_queue_max_hw_sectors(q, UINT_MAX);
+ blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
+ queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
+ q->queuedata = pmem;
disk = alloc_disk_node(0, nid);
- if (!disk) {
- blk_cleanup_queue(pmem->pmem_queue);
+ if (!disk)
+ return -ENOMEM;
+ if (devm_add_action(dev, pmem_release_disk, disk)) {
+ put_disk(disk);
return -ENOMEM;
}
disk->fops = &pmem_fops;
- disk->private_data = pmem;
- disk->queue = pmem->pmem_queue;
+ disk->queue = q;
disk->flags = GENHD_FL_EXT_DEVT;
nvdimm_namespace_disk_name(ndns, disk->disk_name);
disk->driverfs_dev = dev;
set_capacity(disk, (pmem->size - pmem->pfn_pad - pmem->data_offset)
/ 512);
- pmem->pmem_disk = disk;
- devm_exit_badblocks(dev, &pmem->bb);
if (devm_init_badblocks(dev, &pmem->bb))
return -ENOMEM;
- bb_res.start = nsio->res.start + pmem->data_offset;
- bb_res.end = nsio->res.end;
- if (is_nd_pfn(dev)) {
- struct nd_pfn *nd_pfn = to_nd_pfn(dev);
- struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
-
- bb_res.start += __le32_to_cpu(pfn_sb->start_pad);
- bb_res.end -= __le32_to_cpu(pfn_sb->end_trunc);
- }
- nvdimm_badblocks_populate(to_nd_region(dev->parent), &pmem->bb,
- &bb_res);
+ nvdimm_badblocks_populate(to_nd_region(dev->parent), &pmem->bb, res);
disk->bb = &pmem->bb;
add_disk(disk);
revalidate_disk(disk);
@@ -291,339 +302,67 @@ static int pmem_attach_disk(struct device *dev,
return 0;
}
-static int pmem_rw_bytes(struct nd_namespace_common *ndns,
- resource_size_t offset, void *buf, size_t size, int rw)
-{
- struct pmem_device *pmem = dev_get_drvdata(ndns->claim);
-
- if (unlikely(offset + size > pmem->size)) {
- dev_WARN_ONCE(&ndns->dev, 1, "request out of range\n");
- return -EFAULT;
- }
-
- if (rw == READ) {
- unsigned int sz_align = ALIGN(size + (offset & (512 - 1)), 512);
-
- if (unlikely(is_bad_pmem(&pmem->bb, offset / 512, sz_align)))
- return -EIO;
- return memcpy_from_pmem(buf, pmem->virt_addr + offset, size);
- } else {
- memcpy_to_pmem(pmem->virt_addr + offset, buf, size);
- wmb_pmem();
- }
-
- return 0;
-}
-
-static int nd_pfn_init(struct nd_pfn *nd_pfn)
-{
- struct nd_pfn_sb *pfn_sb = kzalloc(sizeof(*pfn_sb), GFP_KERNEL);
- struct pmem_device *pmem = dev_get_drvdata(&nd_pfn->dev);
- struct nd_namespace_common *ndns = nd_pfn->ndns;
- u32 start_pad = 0, end_trunc = 0;
- resource_size_t start, size;
- struct nd_namespace_io *nsio;
- struct nd_region *nd_region;
- unsigned long npfns;
- phys_addr_t offset;
- u64 checksum;
- int rc;
-
- if (!pfn_sb)
- return -ENOMEM;
-
- nd_pfn->pfn_sb = pfn_sb;
- rc = nd_pfn_validate(nd_pfn);
- if (rc == -ENODEV)
- /* no info block, do init */;
- else
- return rc;
-
- nd_region = to_nd_region(nd_pfn->dev.parent);
- if (nd_region->ro) {
- dev_info(&nd_pfn->dev,
- "%s is read-only, unable to init metadata\n",
- dev_name(&nd_region->dev));
- goto err;
- }
-
- memset(pfn_sb, 0, sizeof(*pfn_sb));
-
- /*
- * Check if pmem collides with 'System RAM' when section aligned and
- * trim it accordingly
- */
- nsio = to_nd_namespace_io(&ndns->dev);
- start = PHYS_SECTION_ALIGN_DOWN(nsio->res.start);
- size = resource_size(&nsio->res);
- if (region_intersects(start, size, IORESOURCE_SYSTEM_RAM,
- IORES_DESC_NONE) == REGION_MIXED) {
-
- start = nsio->res.start;
- start_pad = PHYS_SECTION_ALIGN_UP(start) - start;
- }
-
- start = nsio->res.start;
- size = PHYS_SECTION_ALIGN_UP(start + size) - start;
- if (region_intersects(start, size, IORESOURCE_SYSTEM_RAM,
- IORES_DESC_NONE) == REGION_MIXED) {
- size = resource_size(&nsio->res);
- end_trunc = start + size - PHYS_SECTION_ALIGN_DOWN(start + size);
- }
-
- if (start_pad + end_trunc)
- dev_info(&nd_pfn->dev, "%s section collision, truncate %d bytes\n",
- dev_name(&ndns->dev), start_pad + end_trunc);
-
- /*
- * Note, we use 64 here for the standard size of struct page,
- * debugging options may cause it to be larger in which case the
- * implementation will limit the pfns advertised through
- * ->direct_access() to those that are included in the memmap.
- */
- start += start_pad;
- npfns = (pmem->size - start_pad - end_trunc - SZ_8K) / SZ_4K;
- if (nd_pfn->mode == PFN_MODE_PMEM)
- offset = ALIGN(start + SZ_8K + 64 * npfns, nd_pfn->align)
- - start;
- else if (nd_pfn->mode == PFN_MODE_RAM)
- offset = ALIGN(start + SZ_8K, nd_pfn->align) - start;
- else
- goto err;
-
- if (offset + start_pad + end_trunc >= pmem->size) {
- dev_err(&nd_pfn->dev, "%s unable to satisfy requested alignment\n",
- dev_name(&ndns->dev));
- goto err;
- }
-
- npfns = (pmem->size - offset - start_pad - end_trunc) / SZ_4K;
- pfn_sb->mode = cpu_to_le32(nd_pfn->mode);
- pfn_sb->dataoff = cpu_to_le64(offset);
- pfn_sb->npfns = cpu_to_le64(npfns);
- memcpy(pfn_sb->signature, PFN_SIG, PFN_SIG_LEN);
- memcpy(pfn_sb->uuid, nd_pfn->uuid, 16);
- memcpy(pfn_sb->parent_uuid, nd_dev_to_uuid(&ndns->dev), 16);
- pfn_sb->version_major = cpu_to_le16(1);
- pfn_sb->version_minor = cpu_to_le16(1);
- pfn_sb->start_pad = cpu_to_le32(start_pad);
- pfn_sb->end_trunc = cpu_to_le32(end_trunc);
- checksum = nd_sb_checksum((struct nd_gen_sb *) pfn_sb);
- pfn_sb->checksum = cpu_to_le64(checksum);
-
- rc = nvdimm_write_bytes(ndns, SZ_4K, pfn_sb, sizeof(*pfn_sb));
- if (rc)
- goto err;
-
- return 0;
- err:
- nd_pfn->pfn_sb = NULL;
- kfree(pfn_sb);
- return -ENXIO;
-}
-
-static int nvdimm_namespace_detach_pfn(struct nd_namespace_common *ndns)
-{
- struct nd_pfn *nd_pfn = to_nd_pfn(ndns->claim);
- struct pmem_device *pmem;
-
- /* free pmem disk */
- pmem = dev_get_drvdata(&nd_pfn->dev);
- pmem_detach_disk(pmem);
-
- /* release nd_pfn resources */
- kfree(nd_pfn->pfn_sb);
- nd_pfn->pfn_sb = NULL;
-
- return 0;
-}
-
-/*
- * We hotplug memory at section granularity, pad the reserved area from
- * the previous section base to the namespace base address.
- */
-static unsigned long init_altmap_base(resource_size_t base)
-{
- unsigned long base_pfn = PHYS_PFN(base);
-
- return PFN_SECTION_ALIGN_DOWN(base_pfn);
-}
-
-static unsigned long init_altmap_reserve(resource_size_t base)
-{
- unsigned long reserve = PHYS_PFN(SZ_8K);
- unsigned long base_pfn = PHYS_PFN(base);
-
- reserve += base_pfn - PFN_SECTION_ALIGN_DOWN(base_pfn);
- return reserve;
-}
-
-static int __nvdimm_namespace_attach_pfn(struct nd_pfn *nd_pfn)
-{
- int rc;
- struct resource res;
- struct request_queue *q;
- struct pmem_device *pmem;
- struct vmem_altmap *altmap;
- struct device *dev = &nd_pfn->dev;
- struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
- struct nd_namespace_common *ndns = nd_pfn->ndns;
- u32 start_pad = __le32_to_cpu(pfn_sb->start_pad);
- u32 end_trunc = __le32_to_cpu(pfn_sb->end_trunc);
- struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
- resource_size_t base = nsio->res.start + start_pad;
- struct vmem_altmap __altmap = {
- .base_pfn = init_altmap_base(base),
- .reserve = init_altmap_reserve(base),
- };
-
- pmem = dev_get_drvdata(dev);
- pmem->data_offset = le64_to_cpu(pfn_sb->dataoff);
- pmem->pfn_pad = start_pad + end_trunc;
- nd_pfn->mode = le32_to_cpu(nd_pfn->pfn_sb->mode);
- if (nd_pfn->mode == PFN_MODE_RAM) {
- if (pmem->data_offset < SZ_8K)
- return -EINVAL;
- nd_pfn->npfns = le64_to_cpu(pfn_sb->npfns);
- altmap = NULL;
- } else if (nd_pfn->mode == PFN_MODE_PMEM) {
- nd_pfn->npfns = (pmem->size - pmem->pfn_pad - pmem->data_offset)
- / PAGE_SIZE;
- if (le64_to_cpu(nd_pfn->pfn_sb->npfns) > nd_pfn->npfns)
- dev_info(&nd_pfn->dev,
- "number of pfns truncated from %lld to %ld\n",
- le64_to_cpu(nd_pfn->pfn_sb->npfns),
- nd_pfn->npfns);
- altmap = & __altmap;
- altmap->free = PHYS_PFN(pmem->data_offset - SZ_8K);
- altmap->alloc = 0;
- } else {
- rc = -ENXIO;
- goto err;
- }
-
- /* establish pfn range for lookup, and switch to direct map */
- q = pmem->pmem_queue;
- memcpy(&res, &nsio->res, sizeof(res));
- res.start += start_pad;
- res.end -= end_trunc;
- devm_memunmap(dev, (void __force *) pmem->virt_addr);
- pmem->virt_addr = (void __pmem *) devm_memremap_pages(dev, &res,
- &q->q_usage_counter, altmap);
- pmem->pfn_flags |= PFN_MAP;
- if (IS_ERR(pmem->virt_addr)) {
- rc = PTR_ERR(pmem->virt_addr);
- goto err;
- }
-
- /* attach pmem disk in "pfn-mode" */
- rc = pmem_attach_disk(dev, ndns, pmem);
- if (rc)
- goto err;
-
- return rc;
- err:
- nvdimm_namespace_detach_pfn(ndns);
- return rc;
-
-}
-
-static int nvdimm_namespace_attach_pfn(struct nd_namespace_common *ndns)
-{
- struct nd_pfn *nd_pfn = to_nd_pfn(ndns->claim);
- int rc;
-
- if (!nd_pfn->uuid || !nd_pfn->ndns)
- return -ENODEV;
-
- rc = nd_pfn_init(nd_pfn);
- if (rc)
- return rc;
- /* we need a valid pfn_sb before we can init a vmem_altmap */
- return __nvdimm_namespace_attach_pfn(nd_pfn);
-}
-
static int nd_pmem_probe(struct device *dev)
{
- struct nd_region *nd_region = to_nd_region(dev->parent);
struct nd_namespace_common *ndns;
- struct nd_namespace_io *nsio;
- struct pmem_device *pmem;
ndns = nvdimm_namespace_common_probe(dev);
if (IS_ERR(ndns))
return PTR_ERR(ndns);
- nsio = to_nd_namespace_io(&ndns->dev);
- pmem = pmem_alloc(dev, &nsio->res, nd_region->id);
- if (IS_ERR(pmem))
- return PTR_ERR(pmem);
-
- pmem->ndns = ndns;
- dev_set_drvdata(dev, pmem);
- ndns->rw_bytes = pmem_rw_bytes;
- if (devm_init_badblocks(dev, &pmem->bb))
- return -ENOMEM;
- nvdimm_badblocks_populate(nd_region, &pmem->bb, &nsio->res);
+ if (devm_nsio_enable(dev, to_nd_namespace_io(&ndns->dev)))
+ return -ENXIO;
- if (is_nd_btt(dev)) {
- /* btt allocates its own request_queue */
- blk_cleanup_queue(pmem->pmem_queue);
- pmem->pmem_queue = NULL;
+ if (is_nd_btt(dev))
return nvdimm_namespace_attach_btt(ndns);
- }
if (is_nd_pfn(dev))
- return nvdimm_namespace_attach_pfn(ndns);
+ return pmem_attach_disk(dev, ndns);
- if (nd_btt_probe(ndns, pmem) == 0 || nd_pfn_probe(ndns, pmem) == 0) {
- /*
- * We'll come back as either btt-pmem, or pfn-pmem, so
- * drop the queue allocation for now.
- */
- blk_cleanup_queue(pmem->pmem_queue);
+ /* if we find a valid info-block we'll come back as that personality */
+ if (nd_btt_probe(dev, ndns) == 0 || nd_pfn_probe(dev, ndns) == 0)
return -ENXIO;
- }
- return pmem_attach_disk(dev, ndns, pmem);
+ /* ...otherwise we're just a raw pmem device */
+ return pmem_attach_disk(dev, ndns);
}
static int nd_pmem_remove(struct device *dev)
{
- struct pmem_device *pmem = dev_get_drvdata(dev);
-
if (is_nd_btt(dev))
- nvdimm_namespace_detach_btt(pmem->ndns);
- else if (is_nd_pfn(dev))
- nvdimm_namespace_detach_pfn(pmem->ndns);
- else
- pmem_detach_disk(pmem);
-
+ nvdimm_namespace_detach_btt(to_nd_btt(dev));
return 0;
}
static void nd_pmem_notify(struct device *dev, enum nvdimm_event event)
{
- struct pmem_device *pmem = dev_get_drvdata(dev);
- struct nd_namespace_common *ndns = pmem->ndns;
struct nd_region *nd_region = to_nd_region(dev->parent);
- struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
- struct resource res = {
- .start = nsio->res.start + pmem->data_offset,
- .end = nsio->res.end,
- };
+ struct pmem_device *pmem = dev_get_drvdata(dev);
+ resource_size_t offset = 0, end_trunc = 0;
+ struct nd_namespace_common *ndns;
+ struct nd_namespace_io *nsio;
+ struct resource res;
if (event != NVDIMM_REVALIDATE_POISON)
return;
- if (is_nd_pfn(dev)) {
+ if (is_nd_btt(dev)) {
+ struct nd_btt *nd_btt = to_nd_btt(dev);
+
+ ndns = nd_btt->ndns;
+ } else if (is_nd_pfn(dev)) {
struct nd_pfn *nd_pfn = to_nd_pfn(dev);
struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
- res.start += __le32_to_cpu(pfn_sb->start_pad);
- res.end -= __le32_to_cpu(pfn_sb->end_trunc);
- }
+ ndns = nd_pfn->ndns;
+ offset = pmem->data_offset + __le32_to_cpu(pfn_sb->start_pad);
+ end_trunc = __le32_to_cpu(pfn_sb->end_trunc);
+ } else
+ ndns = to_ndns(dev);
+ nsio = to_nd_namespace_io(&ndns->dev);
+ res.start = nsio->res.start + offset;
+ res.end = nsio->res.end - end_trunc;
nvdimm_badblocks_populate(nd_region, &pmem->bb, &res);
}
diff --git a/drivers/nvdimm/region.c b/drivers/nvdimm/region.c
index 4b7715e29cff..05a912359939 100644
--- a/drivers/nvdimm/region.c
+++ b/drivers/nvdimm/region.c
@@ -54,6 +54,7 @@ static int nd_region_probe(struct device *dev)
nd_region->btt_seed = nd_btt_create(nd_region);
nd_region->pfn_seed = nd_pfn_create(nd_region);
+ nd_region->dax_seed = nd_dax_create(nd_region);
if (err == 0)
return 0;
@@ -86,6 +87,7 @@ static int nd_region_remove(struct device *dev)
nd_region->ns_seed = NULL;
nd_region->btt_seed = NULL;
nd_region->pfn_seed = NULL;
+ nd_region->dax_seed = NULL;
dev_set_drvdata(dev, NULL);
nvdimm_bus_unlock(dev);
diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
index 139bf71ca549..9e1b054e0e61 100644
--- a/drivers/nvdimm/region_devs.c
+++ b/drivers/nvdimm/region_devs.c
@@ -306,6 +306,23 @@ static ssize_t pfn_seed_show(struct device *dev,
}
static DEVICE_ATTR_RO(pfn_seed);
+static ssize_t dax_seed_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nd_region *nd_region = to_nd_region(dev);
+ ssize_t rc;
+
+ nvdimm_bus_lock(dev);
+ if (nd_region->dax_seed)
+ rc = sprintf(buf, "%s\n", dev_name(nd_region->dax_seed));
+ else
+ rc = sprintf(buf, "\n");
+ nvdimm_bus_unlock(dev);
+
+ return rc;
+}
+static DEVICE_ATTR_RO(dax_seed);
+
static ssize_t read_only_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -335,6 +352,7 @@ static struct attribute *nd_region_attributes[] = {
&dev_attr_mappings.attr,
&dev_attr_btt_seed.attr,
&dev_attr_pfn_seed.attr,
+ &dev_attr_dax_seed.attr,
&dev_attr_read_only.attr,
&dev_attr_set_cookie.attr,
&dev_attr_available_size.attr,
@@ -353,6 +371,9 @@ static umode_t region_visible(struct kobject *kobj, struct attribute *a, int n)
if (!is_nd_pmem(dev) && a == &dev_attr_pfn_seed.attr)
return 0;
+ if (!is_nd_pmem(dev) && a == &dev_attr_dax_seed.attr)
+ return 0;
+
if (a != &dev_attr_set_cookie.attr
&& a != &dev_attr_available_size.attr)
return a->mode;
@@ -441,6 +462,13 @@ static void nd_region_notify_driver_action(struct nvdimm_bus *nvdimm_bus,
nd_region_create_pfn_seed(nd_region);
nvdimm_bus_unlock(dev);
}
+ if (is_nd_dax(dev) && probe) {
+ nd_region = to_nd_region(dev->parent);
+ nvdimm_bus_lock(dev);
+ if (nd_region->dax_seed == dev)
+ nd_region_create_dax_seed(nd_region);
+ nvdimm_bus_unlock(dev);
+ }
}
void nd_region_probe_success(struct nvdimm_bus *nvdimm_bus, struct device *dev)
@@ -718,6 +746,7 @@ static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus,
ida_init(&nd_region->ns_ida);
ida_init(&nd_region->btt_ida);
ida_init(&nd_region->pfn_ida);
+ ida_init(&nd_region->dax_ida);
dev = &nd_region->dev;
dev_set_name(dev, "region%d", nd_region->id);
dev->parent = &nvdimm_bus->dev;
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 24ccda303efb..4fd733ff72b1 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -1478,8 +1478,7 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
if (result > 0) {
dev_err(dev->ctrl.device,
"Could not set queue count (%d)\n", result);
- nr_io_queues = 0;
- result = 0;
+ return 0;
}
if (dev->cmb && NVME_CMB_SQS(dev->cmbsz)) {
@@ -1513,7 +1512,9 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
* If we enable msix early due to not intx, disable it again before
* setting up the full range we need.
*/
- if (!pdev->irq)
+ if (pdev->msi_enabled)
+ pci_disable_msi(pdev);
+ else if (pdev->msix_enabled)
pci_disable_msix(pdev);
for (i = 0; i < nr_io_queues; i++)
@@ -1696,7 +1697,6 @@ static int nvme_pci_enable(struct nvme_dev *dev)
if (pci_enable_device_mem(pdev))
return result;
- dev->entry[0].vector = pdev->irq;
pci_set_master(pdev);
if (dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64)) &&
@@ -1709,13 +1709,18 @@ static int nvme_pci_enable(struct nvme_dev *dev)
}
/*
- * Some devices don't advertse INTx interrupts, pre-enable a single
- * MSIX vec for setup. We'll adjust this later.
+ * Some devices and/or platforms don't advertise or work with INTx
+ * interrupts. Pre-enable a single MSIX or MSI vec for setup. We'll
+ * adjust this later.
*/
- if (!pdev->irq) {
- result = pci_enable_msix(pdev, dev->entry, 1);
- if (result < 0)
- goto disable;
+ if (pci_enable_msix(pdev, dev->entry, 1)) {
+ pci_enable_msi(pdev);
+ dev->entry[0].vector = pdev->irq;
+ }
+
+ if (!dev->entry[0].vector) {
+ result = -ENODEV;
+ goto disable;
}
cap = lo_hi_readq(dev->bar + NVME_REG_CAP);
@@ -1859,6 +1864,9 @@ static void nvme_reset_work(struct work_struct *work)
if (dev->ctrl.ctrl_config & NVME_CC_ENABLE)
nvme_dev_disable(dev, false);
+ if (test_bit(NVME_CTRL_REMOVING, &dev->flags))
+ goto out;
+
set_bit(NVME_CTRL_RESETTING, &dev->flags);
result = nvme_pci_enable(dev);
@@ -2078,11 +2086,10 @@ static void nvme_remove(struct pci_dev *pdev)
{
struct nvme_dev *dev = pci_get_drvdata(pdev);
- del_timer_sync(&dev->watchdog_timer);
-
set_bit(NVME_CTRL_REMOVING, &dev->flags);
pci_set_drvdata(pdev, NULL);
flush_work(&dev->async_work);
+ flush_work(&dev->reset_work);
flush_work(&dev->scan_work);
nvme_remove_namespaces(&dev->ctrl);
nvme_uninit_ctrl(&dev->ctrl);
diff --git a/drivers/pci/access.c b/drivers/pci/access.c
index 01b9d0a00abc..d11cdbb8fba3 100644
--- a/drivers/pci/access.c
+++ b/drivers/pci/access.c
@@ -275,6 +275,19 @@ ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void
}
EXPORT_SYMBOL(pci_write_vpd);
+/**
+ * pci_set_vpd_size - Set size of Vital Product Data space
+ * @dev: pci device struct
+ * @len: size of vpd space
+ */
+int pci_set_vpd_size(struct pci_dev *dev, size_t len)
+{
+ if (!dev->vpd || !dev->vpd->ops)
+ return -ENODEV;
+ return dev->vpd->ops->set_size(dev, len);
+}
+EXPORT_SYMBOL(pci_set_vpd_size);
+
#define PCI_VPD_MAX_SIZE (PCI_VPD_ADDR_MASK + 1)
/**
@@ -498,9 +511,23 @@ out:
return ret ? ret : count;
}
+static int pci_vpd_set_size(struct pci_dev *dev, size_t len)
+{
+ struct pci_vpd *vpd = dev->vpd;
+
+ if (len == 0 || len > PCI_VPD_MAX_SIZE)
+ return -EIO;
+
+ vpd->valid = 1;
+ vpd->len = len;
+
+ return 0;
+}
+
static const struct pci_vpd_ops pci_vpd_ops = {
.read = pci_vpd_read,
.write = pci_vpd_write,
+ .set_size = pci_vpd_set_size,
};
static ssize_t pci_vpd_f0_read(struct pci_dev *dev, loff_t pos, size_t count,
@@ -533,9 +560,24 @@ static ssize_t pci_vpd_f0_write(struct pci_dev *dev, loff_t pos, size_t count,
return ret;
}
+static int pci_vpd_f0_set_size(struct pci_dev *dev, size_t len)
+{
+ struct pci_dev *tdev = pci_get_slot(dev->bus,
+ PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
+ int ret;
+
+ if (!tdev)
+ return -ENODEV;
+
+ ret = pci_set_vpd_size(tdev, len);
+ pci_dev_put(tdev);
+ return ret;
+}
+
static const struct pci_vpd_ops pci_vpd_f0_ops = {
.read = pci_vpd_f0_read,
.write = pci_vpd_f0_write,
+ .set_size = pci_vpd_f0_set_size,
};
int pci_vpd_init(struct pci_dev *dev)
diff --git a/drivers/pci/host/pci-imx6.c b/drivers/pci/host/pci-imx6.c
index eb5a2755a164..2f817fa4c661 100644
--- a/drivers/pci/host/pci-imx6.c
+++ b/drivers/pci/host/pci-imx6.c
@@ -32,7 +32,7 @@
#define to_imx6_pcie(x) container_of(x, struct imx6_pcie, pp)
struct imx6_pcie {
- struct gpio_desc *reset_gpio;
+ int reset_gpio;
struct clk *pcie_bus;
struct clk *pcie_phy;
struct clk *pcie;
@@ -309,10 +309,10 @@ static int imx6_pcie_deassert_core_reset(struct pcie_port *pp)
usleep_range(200, 500);
/* Some boards don't have PCIe reset GPIO. */
- if (imx6_pcie->reset_gpio) {
- gpiod_set_value_cansleep(imx6_pcie->reset_gpio, 0);
+ if (gpio_is_valid(imx6_pcie->reset_gpio)) {
+ gpio_set_value_cansleep(imx6_pcie->reset_gpio, 0);
msleep(100);
- gpiod_set_value_cansleep(imx6_pcie->reset_gpio, 1);
+ gpio_set_value_cansleep(imx6_pcie->reset_gpio, 1);
}
return 0;
@@ -523,6 +523,7 @@ static int __init imx6_pcie_probe(struct platform_device *pdev)
{
struct imx6_pcie *imx6_pcie;
struct pcie_port *pp;
+ struct device_node *np = pdev->dev.of_node;
struct resource *dbi_base;
struct device_node *node = pdev->dev.of_node;
int ret;
@@ -544,8 +545,15 @@ static int __init imx6_pcie_probe(struct platform_device *pdev)
return PTR_ERR(pp->dbi_base);
/* Fetch GPIOs */
- imx6_pcie->reset_gpio = devm_gpiod_get_optional(&pdev->dev, "reset",
- GPIOD_OUT_LOW);
+ imx6_pcie->reset_gpio = of_get_named_gpio(np, "reset-gpio", 0);
+ if (gpio_is_valid(imx6_pcie->reset_gpio)) {
+ ret = devm_gpio_request_one(&pdev->dev, imx6_pcie->reset_gpio,
+ GPIOF_OUT_INIT_LOW, "PCIe reset");
+ if (ret) {
+ dev_err(&pdev->dev, "unable to get reset gpio\n");
+ return ret;
+ }
+ }
/* Fetch clocks */
imx6_pcie->pcie_phy = devm_clk_get(&pdev->dev, "pcie_phy");
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index e982010f0ed1..342b6918bbde 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -636,7 +636,7 @@ static ssize_t pci_read_config(struct file *filp, struct kobject *kobj,
u8 *data = (u8 *) buf;
/* Several chips lock up trying to read undefined config space */
- if (security_capable(filp->f_cred, &init_user_ns, CAP_SYS_ADMIN) == 0)
+ if (file_ns_capable(filp, &init_user_ns, CAP_SYS_ADMIN))
size = dev->cfg_size;
else if (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)
size = 128;
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index d0fb93481573..a814bbb80fcb 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -97,6 +97,7 @@ static inline bool pci_has_subordinate(struct pci_dev *pci_dev)
struct pci_vpd_ops {
ssize_t (*read)(struct pci_dev *dev, loff_t pos, size_t count, void *buf);
ssize_t (*write)(struct pci_dev *dev, loff_t pos, size_t count, const void *buf);
+ int (*set_size)(struct pci_dev *dev, size_t len);
};
struct pci_vpd {
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index 32346b5a8a11..f70090897fdf 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -737,8 +737,19 @@ static void cpu_pm_pmu_setup(struct arm_pmu *armpmu, unsigned long cmd)
break;
case CPU_PM_EXIT:
case CPU_PM_ENTER_FAILED:
- /* Restore and enable the counter */
- armpmu_start(event, PERF_EF_RELOAD);
+ /*
+ * Restore and enable the counter.
+ * armpmu_start() indirectly calls
+ *
+ * perf_event_update_userpage()
+ *
+ * that requires RCU read locking to be functional,
+ * wrap the call within RCU_NONIDLE to make the
+ * RCU subsystem aware this cpu is not idle from
+ * an RCU perspective for the armpmu_start() call
+ * duration.
+ */
+ RCU_NONIDLE(armpmu_start(event, PERF_EF_RELOAD));
break;
default:
break;
diff --git a/drivers/phy/phy-rockchip-dp.c b/drivers/phy/phy-rockchip-dp.c
index 77e2d02e6bee..793ecb6d87bc 100644
--- a/drivers/phy/phy-rockchip-dp.c
+++ b/drivers/phy/phy-rockchip-dp.c
@@ -86,6 +86,9 @@ static int rockchip_dp_phy_probe(struct platform_device *pdev)
if (!np)
return -ENODEV;
+ if (!dev->parent || !dev->parent->of_node)
+ return -ENODEV;
+
dp = devm_kzalloc(dev, sizeof(*dp), GFP_KERNEL);
if (IS_ERR(dp))
return -ENOMEM;
@@ -104,9 +107,9 @@ static int rockchip_dp_phy_probe(struct platform_device *pdev)
return ret;
}
- dp->grf = syscon_regmap_lookup_by_phandle(np, "rockchip,grf");
+ dp->grf = syscon_node_to_regmap(dev->parent->of_node);
if (IS_ERR(dp->grf)) {
- dev_err(dev, "rk3288-dp needs rockchip,grf property\n");
+ dev_err(dev, "rk3288-dp needs the General Register Files syscon\n");
return PTR_ERR(dp->grf);
}
diff --git a/drivers/phy/phy-rockchip-emmc.c b/drivers/phy/phy-rockchip-emmc.c
index 887b4c27195f..6ebcf3e41c46 100644
--- a/drivers/phy/phy-rockchip-emmc.c
+++ b/drivers/phy/phy-rockchip-emmc.c
@@ -176,7 +176,10 @@ static int rockchip_emmc_phy_probe(struct platform_device *pdev)
struct regmap *grf;
unsigned int reg_offset;
- grf = syscon_regmap_lookup_by_phandle(dev->of_node, "rockchip,grf");
+ if (!dev->parent || !dev->parent->of_node)
+ return -ENODEV;
+
+ grf = syscon_node_to_regmap(dev->parent->of_node);
if (IS_ERR(grf)) {
dev_err(dev, "Missing rockchip,grf property\n");
return PTR_ERR(grf);
diff --git a/drivers/pinctrl/freescale/Kconfig b/drivers/pinctrl/freescale/Kconfig
index debe1219d76d..fc8cbf611723 100644
--- a/drivers/pinctrl/freescale/Kconfig
+++ b/drivers/pinctrl/freescale/Kconfig
@@ -2,6 +2,7 @@ config PINCTRL_IMX
bool
select PINMUX
select PINCONF
+ select REGMAP
config PINCTRL_IMX1_CORE
bool
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
index 2bbe6f7964a7..6ab8c3ccdeea 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
@@ -1004,7 +1004,8 @@ static int mtk_gpio_set_debounce(struct gpio_chip *chip, unsigned offset,
struct mtk_pinctrl *pctl = dev_get_drvdata(chip->parent);
int eint_num, virq, eint_offset;
unsigned int set_offset, bit, clr_bit, clr_offset, rst, i, unmask, dbnc;
- static const unsigned int dbnc_arr[] = {0 , 1, 16, 32, 64, 128, 256};
+ static const unsigned int debounce_time[] = {500, 1000, 16000, 32000, 64000,
+ 128000, 256000};
const struct mtk_desc_pin *pin;
struct irq_data *d;
@@ -1022,9 +1023,9 @@ static int mtk_gpio_set_debounce(struct gpio_chip *chip, unsigned offset,
if (!mtk_eint_can_en_debounce(pctl, eint_num))
return -ENOSYS;
- dbnc = ARRAY_SIZE(dbnc_arr);
- for (i = 0; i < ARRAY_SIZE(dbnc_arr); i++) {
- if (debounce <= dbnc_arr[i]) {
+ dbnc = ARRAY_SIZE(debounce_time);
+ for (i = 0; i < ARRAY_SIZE(debounce_time); i++) {
+ if (debounce <= debounce_time[i]) {
dbnc = i;
break;
}
diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
index fb126d56ad40..cf9bafa10acf 100644
--- a/drivers/pinctrl/pinctrl-single.c
+++ b/drivers/pinctrl/pinctrl-single.c
@@ -1280,9 +1280,9 @@ static int pcs_parse_bits_in_pinctrl_entry(struct pcs_device *pcs,
/* Parse pins in each row from LSB */
while (mask) {
- bit_pos = ffs(mask);
+ bit_pos = __ffs(mask);
pin_num_from_lsb = bit_pos / pcs->bits_per_pin;
- mask_pos = ((pcs->fmask) << (bit_pos - 1));
+ mask_pos = ((pcs->fmask) << bit_pos);
val_pos = val & mask_pos;
submask = mask & mask_pos;
@@ -1852,7 +1852,7 @@ static int pcs_probe(struct platform_device *pdev)
ret = of_property_read_u32(np, "pinctrl-single,function-mask",
&pcs->fmask);
if (!ret) {
- pcs->fshift = ffs(pcs->fmask) - 1;
+ pcs->fshift = __ffs(pcs->fmask);
pcs->fmax = pcs->fmask >> pcs->fshift;
} else {
/* If mask property doesn't exist, function mux is invalid. */
diff --git a/drivers/platform/x86/hp_accel.c b/drivers/platform/x86/hp_accel.c
index 10ce6cba4455..09356684c32f 100644
--- a/drivers/platform/x86/hp_accel.c
+++ b/drivers/platform/x86/hp_accel.c
@@ -127,8 +127,10 @@ static int lis3lv02d_acpi_read(struct lis3lv02d *lis3, int reg, u8 *ret)
arg0.integer.value = reg;
status = acpi_evaluate_integer(dev->handle, "ALRD", &args, &lret);
+ if (ACPI_FAILURE(status))
+ return -EINVAL;
*ret = lret;
- return (status != AE_OK) ? -EINVAL : 0;
+ return 0;
}
/**
@@ -173,6 +175,7 @@ static int lis3lv02d_dmi_matched(const struct dmi_system_id *dmi)
DEFINE_CONV(normal, 1, 2, 3);
DEFINE_CONV(y_inverted, 1, -2, 3);
DEFINE_CONV(x_inverted, -1, 2, 3);
+DEFINE_CONV(x_inverted_usd, -1, 2, -3);
DEFINE_CONV(z_inverted, 1, 2, -3);
DEFINE_CONV(xy_swap, 2, 1, 3);
DEFINE_CONV(xy_rotated_left, -2, 1, 3);
@@ -236,6 +239,7 @@ static const struct dmi_system_id lis3lv02d_dmi_ids[] = {
AXIS_DMI_MATCH("HP8710", "HP Compaq 8710", y_inverted),
AXIS_DMI_MATCH("HDX18", "HP HDX 18", x_inverted),
AXIS_DMI_MATCH("HPB432x", "HP ProBook 432", xy_rotated_left),
+ AXIS_DMI_MATCH("HPB440G3", "HP ProBook 440 G3", x_inverted_usd),
AXIS_DMI_MATCH("HPB442x", "HP ProBook 442", xy_rotated_left),
AXIS_DMI_MATCH("HPB452x", "HP ProBook 452", y_inverted),
AXIS_DMI_MATCH("HPB522x", "HP ProBook 522", xy_swap),
diff --git a/drivers/platform/x86/intel-hid.c b/drivers/platform/x86/intel-hid.c
index f93abc8c1424..a818db6aa08f 100644
--- a/drivers/platform/x86/intel-hid.c
+++ b/drivers/platform/x86/intel-hid.c
@@ -91,6 +91,8 @@ static int intel_hid_pl_resume_handler(struct device *device)
}
static const struct dev_pm_ops intel_hid_pl_pm_ops = {
+ .freeze = intel_hid_pl_suspend_handler,
+ .restore = intel_hid_pl_resume_handler,
.suspend = intel_hid_pl_suspend_handler,
.resume = intel_hid_pl_resume_handler,
};
diff --git a/drivers/platform/x86/intel_pmc_ipc.c b/drivers/platform/x86/intel_pmc_ipc.c
index 3fb1d85c70a8..6f497e80c9df 100644
--- a/drivers/platform/x86/intel_pmc_ipc.c
+++ b/drivers/platform/x86/intel_pmc_ipc.c
@@ -687,8 +687,8 @@ static int ipc_plat_get_res(struct platform_device *pdev)
ipcdev.acpi_io_size = size;
dev_info(&pdev->dev, "io res: %pR\n", res);
- /* This is index 0 to cover BIOS data register */
punit_res = punit_res_array;
+ /* This is index 0 to cover BIOS data register */
res = platform_get_resource(pdev, IORESOURCE_MEM,
PLAT_RESOURCE_BIOS_DATA_INDEX);
if (!res) {
@@ -698,55 +698,51 @@ static int ipc_plat_get_res(struct platform_device *pdev)
*punit_res = *res;
dev_info(&pdev->dev, "punit BIOS data res: %pR\n", res);
+ /* This is index 1 to cover BIOS interface register */
res = platform_get_resource(pdev, IORESOURCE_MEM,
PLAT_RESOURCE_BIOS_IFACE_INDEX);
if (!res) {
dev_err(&pdev->dev, "Failed to get res of punit BIOS iface\n");
return -ENXIO;
}
- /* This is index 1 to cover BIOS interface register */
*++punit_res = *res;
dev_info(&pdev->dev, "punit BIOS interface res: %pR\n", res);
+ /* This is index 2 to cover ISP data register, optional */
res = platform_get_resource(pdev, IORESOURCE_MEM,
PLAT_RESOURCE_ISP_DATA_INDEX);
- if (!res) {
- dev_err(&pdev->dev, "Failed to get res of punit ISP data\n");
- return -ENXIO;
+ ++punit_res;
+ if (res) {
+ *punit_res = *res;
+ dev_info(&pdev->dev, "punit ISP data res: %pR\n", res);
}
- /* This is index 2 to cover ISP data register */
- *++punit_res = *res;
- dev_info(&pdev->dev, "punit ISP data res: %pR\n", res);
+ /* This is index 3 to cover ISP interface register, optional */
res = platform_get_resource(pdev, IORESOURCE_MEM,
PLAT_RESOURCE_ISP_IFACE_INDEX);
- if (!res) {
- dev_err(&pdev->dev, "Failed to get res of punit ISP iface\n");
- return -ENXIO;
+ ++punit_res;
+ if (res) {
+ *punit_res = *res;
+ dev_info(&pdev->dev, "punit ISP interface res: %pR\n", res);
}
- /* This is index 3 to cover ISP interface register */
- *++punit_res = *res;
- dev_info(&pdev->dev, "punit ISP interface res: %pR\n", res);
+ /* This is index 4 to cover GTD data register, optional */
res = platform_get_resource(pdev, IORESOURCE_MEM,
PLAT_RESOURCE_GTD_DATA_INDEX);
- if (!res) {
- dev_err(&pdev->dev, "Failed to get res of punit GTD data\n");
- return -ENXIO;
+ ++punit_res;
+ if (res) {
+ *punit_res = *res;
+ dev_info(&pdev->dev, "punit GTD data res: %pR\n", res);
}
- /* This is index 4 to cover GTD data register */
- *++punit_res = *res;
- dev_info(&pdev->dev, "punit GTD data res: %pR\n", res);
+ /* This is index 5 to cover GTD interface register, optional */
res = platform_get_resource(pdev, IORESOURCE_MEM,
PLAT_RESOURCE_GTD_IFACE_INDEX);
- if (!res) {
- dev_err(&pdev->dev, "Failed to get res of punit GTD iface\n");
- return -ENXIO;
+ ++punit_res;
+ if (res) {
+ *punit_res = *res;
+ dev_info(&pdev->dev, "punit GTD interface res: %pR\n", res);
}
- /* This is index 5 to cover GTD interface register */
- *++punit_res = *res;
- dev_info(&pdev->dev, "punit GTD interface res: %pR\n", res);
res = platform_get_resource(pdev, IORESOURCE_MEM,
PLAT_RESOURCE_IPC_INDEX);
diff --git a/drivers/platform/x86/intel_punit_ipc.c b/drivers/platform/x86/intel_punit_ipc.c
index bd875409a02d..a47a41fc10ad 100644
--- a/drivers/platform/x86/intel_punit_ipc.c
+++ b/drivers/platform/x86/intel_punit_ipc.c
@@ -227,6 +227,11 @@ static int intel_punit_get_bars(struct platform_device *pdev)
struct resource *res;
void __iomem *addr;
+ /*
+ * The following resources are required
+ * - BIOS_IPC BASE_DATA
+ * - BIOS_IPC BASE_IFACE
+ */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
addr = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(addr))
@@ -239,29 +244,40 @@ static int intel_punit_get_bars(struct platform_device *pdev)
return PTR_ERR(addr);
punit_ipcdev->base[BIOS_IPC][BASE_IFACE] = addr;
+ /*
+ * The following resources are optional
+ * - ISPDRIVER_IPC BASE_DATA
+ * - ISPDRIVER_IPC BASE_IFACE
+ * - GTDRIVER_IPC BASE_DATA
+ * - GTDRIVER_IPC BASE_IFACE
+ */
res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
- addr = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(addr))
- return PTR_ERR(addr);
- punit_ipcdev->base[ISPDRIVER_IPC][BASE_DATA] = addr;
+ if (res) {
+ addr = devm_ioremap_resource(&pdev->dev, res);
+ if (!IS_ERR(addr))
+ punit_ipcdev->base[ISPDRIVER_IPC][BASE_DATA] = addr;
+ }
res = platform_get_resource(pdev, IORESOURCE_MEM, 3);
- addr = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(addr))
- return PTR_ERR(addr);
- punit_ipcdev->base[ISPDRIVER_IPC][BASE_IFACE] = addr;
+ if (res) {
+ addr = devm_ioremap_resource(&pdev->dev, res);
+ if (!IS_ERR(addr))
+ punit_ipcdev->base[ISPDRIVER_IPC][BASE_IFACE] = addr;
+ }
res = platform_get_resource(pdev, IORESOURCE_MEM, 4);
- addr = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(addr))
- return PTR_ERR(addr);
- punit_ipcdev->base[GTDRIVER_IPC][BASE_DATA] = addr;
+ if (res) {
+ addr = devm_ioremap_resource(&pdev->dev, res);
+ if (!IS_ERR(addr))
+ punit_ipcdev->base[GTDRIVER_IPC][BASE_DATA] = addr;
+ }
res = platform_get_resource(pdev, IORESOURCE_MEM, 5);
- addr = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(addr))
- return PTR_ERR(addr);
- punit_ipcdev->base[GTDRIVER_IPC][BASE_IFACE] = addr;
+ if (res) {
+ addr = devm_ioremap_resource(&pdev->dev, res);
+ if (!IS_ERR(addr))
+ punit_ipcdev->base[GTDRIVER_IPC][BASE_IFACE] = addr;
+ }
return 0;
}
diff --git a/drivers/platform/x86/intel_telemetry_pltdrv.c b/drivers/platform/x86/intel_telemetry_pltdrv.c
index 397119f83e82..781bd10ca7ac 100644
--- a/drivers/platform/x86/intel_telemetry_pltdrv.c
+++ b/drivers/platform/x86/intel_telemetry_pltdrv.c
@@ -659,7 +659,7 @@ static int telemetry_plt_update_events(struct telemetry_evtconfig pss_evtconfig,
static int telemetry_plt_set_sampling_period(u8 pss_period, u8 ioss_period)
{
u32 telem_ctrl = 0;
- int ret;
+ int ret = 0;
mutex_lock(&(telm_conf->telem_lock));
if (ioss_period) {
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index e305ab541a22..9255ff3ee81a 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -7972,10 +7972,12 @@ static int fan_get_status_safe(u8 *status)
fan_update_desired_level(s);
mutex_unlock(&fan_mutex);
+ if (rc)
+ return rc;
if (status)
*status = s;
- return rc;
+ return 0;
}
static int fan_get_speed(unsigned int *speed)
diff --git a/drivers/pwm/pwm-fsl-ftm.c b/drivers/pwm/pwm-fsl-ftm.c
index 7225ac6b3df5..fad968eb75f6 100644
--- a/drivers/pwm/pwm-fsl-ftm.c
+++ b/drivers/pwm/pwm-fsl-ftm.c
@@ -392,7 +392,7 @@ static const struct regmap_config fsl_pwm_regmap_config = {
.max_register = FTM_PWMLOAD,
.volatile_reg = fsl_pwm_volatile_reg,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_FLAT,
};
static int fsl_pwm_probe(struct platform_device *pdev)
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
index b2156ee5bae1..ecb7dbae9be9 100644
--- a/drivers/rtc/rtc-ds1307.c
+++ b/drivers/rtc/rtc-ds1307.c
@@ -863,7 +863,7 @@ out:
* A user-initiated temperature conversion is not started by this function,
* so the temperature is updated once every 64 seconds.
*/
-static int ds3231_hwmon_read_temp(struct device *dev, s16 *mC)
+static int ds3231_hwmon_read_temp(struct device *dev, s32 *mC)
{
struct ds1307 *ds1307 = dev_get_drvdata(dev);
u8 temp_buf[2];
@@ -892,7 +892,7 @@ static ssize_t ds3231_hwmon_show_temp(struct device *dev,
struct device_attribute *attr, char *buf)
{
int ret;
- s16 temp;
+ s32 temp;
ret = ds3231_hwmon_read_temp(dev, &temp);
if (ret)
@@ -1531,7 +1531,7 @@ read_rtc:
return PTR_ERR(ds1307->rtc);
}
- if (ds1307_can_wakeup_device) {
+ if (ds1307_can_wakeup_device && ds1307->client->irq <= 0) {
/* Disable request for an IRQ */
want_irq = false;
dev_info(&client->dev, "'wakeup-source' is set, request for an IRQ is disabled!\n");
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index 1bce9cf51b1e..b83908670a9a 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -756,15 +756,16 @@ dcssblk_remove_store(struct device *dev, struct device_attribute *attr, const ch
blk_cleanup_queue(dev_info->dcssblk_queue);
dev_info->gd->queue = NULL;
put_disk(dev_info->gd);
- device_unregister(&dev_info->dev);
/* unload all related segments */
list_for_each_entry(entry, &dev_info->seg_list, lh)
segment_unload(entry->segment_name);
- put_device(&dev_info->dev);
up_write(&dcssblk_devices_sem);
+ device_unregister(&dev_info->dev);
+ put_device(&dev_info->dev);
+
rc = count;
out_buf:
kfree(local_buf);
diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c
index 75d9896deccb..e6f54d3b8969 100644
--- a/drivers/s390/block/scm_blk.c
+++ b/drivers/s390/block/scm_blk.c
@@ -303,7 +303,7 @@ static void scm_blk_request(struct request_queue *rq)
if (req->cmd_type != REQ_TYPE_FS) {
blk_start_request(req);
blk_dump_rq_flags(req, KMSG_COMPONENT " bad request");
- blk_end_request_all(req, -EIO);
+ __blk_end_request_all(req, -EIO);
continue;
}
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index c37eedc35a24..3c3dc4a3d52c 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -376,6 +376,8 @@ config MTK_THERMAL
tristate "Temperature sensor driver for mediatek SoCs"
depends on ARCH_MEDIATEK || COMPILE_TEST
depends on HAS_IOMEM
+ depends on NVMEM || NVMEM=n
+ depends on RESET_CONTROLLER
default y
help
Enable this option if you want to have support for thermal management
diff --git a/drivers/thermal/mtk_thermal.c b/drivers/thermal/mtk_thermal.c
index 3d93b1c07cee..507632b9648e 100644
--- a/drivers/thermal/mtk_thermal.c
+++ b/drivers/thermal/mtk_thermal.c
@@ -27,7 +27,6 @@
#include <linux/thermal.h>
#include <linux/reset.h>
#include <linux/types.h>
-#include <linux/nvmem-consumer.h>
/* AUXADC Registers */
#define AUXADC_CON0_V 0x000
@@ -619,7 +618,7 @@ static struct platform_driver mtk_thermal_driver = {
module_platform_driver(mtk_thermal_driver);
-MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de");
+MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>");
MODULE_AUTHOR("Hanyi Wu <hanyi.wu@mediatek.com>");
MODULE_DESCRIPTION("Mediatek thermal driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c
index 49ac23d3e776..d8ec44b194d6 100644
--- a/drivers/thermal/of-thermal.c
+++ b/drivers/thermal/of-thermal.c
@@ -803,8 +803,8 @@ static int thermal_of_populate_trip(struct device_node *np,
* otherwise, it returns a corresponding ERR_PTR(). Caller must
* check the return value with help of IS_ERR() helper.
*/
-static struct __thermal_zone *
-thermal_of_build_thermal_zone(struct device_node *np)
+static struct __thermal_zone
+__init *thermal_of_build_thermal_zone(struct device_node *np)
{
struct device_node *child = NULL, *gchild;
struct __thermal_zone *tz;
diff --git a/drivers/thermal/power_allocator.c b/drivers/thermal/power_allocator.c
index 1246aa6fcab0..2f1a863a8e15 100644
--- a/drivers/thermal/power_allocator.c
+++ b/drivers/thermal/power_allocator.c
@@ -301,7 +301,7 @@ static void divvy_up_power(u32 *req_power, u32 *max_power, int num_actors,
capped_extra_power = 0;
extra_power = 0;
for (i = 0; i < num_actors; i++) {
- u64 req_range = req_power[i] * power_range;
+ u64 req_range = (u64)req_power[i] * power_range;
granted_power[i] = DIV_ROUND_CLOSEST_ULL(req_range,
total_req_power);
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index d4b54653ecf8..f1db49625555 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -688,7 +688,7 @@ trip_point_temp_store(struct device *dev, struct device_attribute *attr,
{
struct thermal_zone_device *tz = to_thermal_zone(dev);
int trip, ret;
- unsigned long temperature;
+ int temperature;
if (!tz->ops->set_trip_temp)
return -EPERM;
@@ -696,7 +696,7 @@ trip_point_temp_store(struct device *dev, struct device_attribute *attr,
if (!sscanf(attr->attr.name, "trip_point_%d_temp", &trip))
return -EINVAL;
- if (kstrtoul(buf, 10, &temperature))
+ if (kstrtoint(buf, 10, &temperature))
return -EINVAL;
ret = tz->ops->set_trip_temp(tz, trip, temperature);
@@ -899,9 +899,9 @@ emul_temp_store(struct device *dev, struct device_attribute *attr,
{
struct thermal_zone_device *tz = to_thermal_zone(dev);
int ret = 0;
- unsigned long temperature;
+ int temperature;
- if (kstrtoul(buf, 10, &temperature))
+ if (kstrtoint(buf, 10, &temperature))
return -EINVAL;
if (!tz->ops->set_emul_temp) {
diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
index e16a49b507ef..0058d9fbf931 100644
--- a/drivers/tty/pty.c
+++ b/drivers/tty/pty.c
@@ -663,14 +663,14 @@ static int pty_unix98_install(struct tty_driver *driver, struct tty_struct *tty)
/* this is called once with whichever end is closed last */
static void pty_unix98_remove(struct tty_driver *driver, struct tty_struct *tty)
{
- struct inode *ptmx_inode;
+ struct pts_fs_info *fsi;
if (tty->driver->subtype == PTY_TYPE_MASTER)
- ptmx_inode = tty->driver_data;
+ fsi = tty->driver_data;
else
- ptmx_inode = tty->link->driver_data;
- devpts_kill_index(ptmx_inode, tty->index);
- devpts_del_ref(ptmx_inode);
+ fsi = tty->link->driver_data;
+ devpts_kill_index(fsi, tty->index);
+ devpts_put_ref(fsi);
}
static const struct tty_operations ptm_unix98_ops = {
@@ -720,6 +720,7 @@ static const struct tty_operations pty_unix98_ops = {
static int ptmx_open(struct inode *inode, struct file *filp)
{
+ struct pts_fs_info *fsi;
struct tty_struct *tty;
struct inode *slave_inode;
int retval;
@@ -734,47 +735,41 @@ static int ptmx_open(struct inode *inode, struct file *filp)
if (retval)
return retval;
+ fsi = devpts_get_ref(inode, filp);
+ retval = -ENODEV;
+ if (!fsi)
+ goto out_free_file;
+
/* find a device that is not in use. */
mutex_lock(&devpts_mutex);
- index = devpts_new_index(inode);
- if (index < 0) {
- retval = index;
- mutex_unlock(&devpts_mutex);
- goto err_file;
- }
-
+ index = devpts_new_index(fsi);
mutex_unlock(&devpts_mutex);
- mutex_lock(&tty_mutex);
- tty = tty_init_dev(ptm_driver, index);
+ retval = index;
+ if (index < 0)
+ goto out_put_ref;
- if (IS_ERR(tty)) {
- retval = PTR_ERR(tty);
- goto out;
- }
+ mutex_lock(&tty_mutex);
+ tty = tty_init_dev(ptm_driver, index);
/* The tty returned here is locked so we can safely
drop the mutex */
mutex_unlock(&tty_mutex);
- set_bit(TTY_PTY_LOCK, &tty->flags); /* LOCK THE SLAVE */
- tty->driver_data = inode;
+ retval = PTR_ERR(tty);
+ if (IS_ERR(tty))
+ goto out;
/*
- * In the case where all references to ptmx inode are dropped and we
- * still have /dev/tty opened pointing to the master/slave pair (ptmx
- * is closed/released before /dev/tty), we must make sure that the inode
- * is still valid when we call the final pty_unix98_shutdown, thus we
- * hold an additional reference to the ptmx inode. For the same /dev/tty
- * last close case, we also need to make sure the super_block isn't
- * destroyed (devpts instance unmounted), before /dev/tty is closed and
- * on its release devpts_kill_index is called.
+ * From here on out, the tty is "live", and the index and
+ * fsi will be killed/put by the tty_release()
*/
- devpts_add_ref(inode);
+ set_bit(TTY_PTY_LOCK, &tty->flags); /* LOCK THE SLAVE */
+ tty->driver_data = fsi;
tty_add_file(tty, filp);
- slave_inode = devpts_pty_new(inode,
+ slave_inode = devpts_pty_new(fsi,
MKDEV(UNIX98_PTY_SLAVE_MAJOR, index), index,
tty->link);
if (IS_ERR(slave_inode)) {
@@ -793,12 +788,14 @@ static int ptmx_open(struct inode *inode, struct file *filp)
return 0;
err_release:
tty_unlock(tty);
+ // This will also put-ref the fsi
tty_release(inode, filp);
return retval;
out:
- mutex_unlock(&tty_mutex);
- devpts_kill_index(inode, index);
-err_file:
+ devpts_kill_index(fsi, index);
+out_put_ref:
+ devpts_put_ref(fsi);
+out_free_file:
tty_free_file(filp);
return retval;
}
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index e213da01a3d7..00ad2637b08c 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -1403,9 +1403,18 @@ static void __do_stop_tx_rs485(struct uart_8250_port *p)
/*
* Empty the RX FIFO, we are not interested in anything
* received during the half-duplex transmission.
+ * Enable previously disabled RX interrupts.
*/
- if (!(p->port.rs485.flags & SER_RS485_RX_DURING_TX))
+ if (!(p->port.rs485.flags & SER_RS485_RX_DURING_TX)) {
serial8250_clear_fifos(p);
+
+ serial8250_rpm_get(p);
+
+ p->ier |= UART_IER_RLSI | UART_IER_RDI;
+ serial_port_out(&p->port, UART_IER, p->ier);
+
+ serial8250_rpm_put(p);
+ }
}
static void serial8250_em485_handle_stop_tx(unsigned long arg)
diff --git a/drivers/tty/serial/8250/Kconfig b/drivers/tty/serial/8250/Kconfig
index 64742a086ae3..4d7cb9c04fce 100644
--- a/drivers/tty/serial/8250/Kconfig
+++ b/drivers/tty/serial/8250/Kconfig
@@ -324,7 +324,6 @@ config SERIAL_8250_EM
config SERIAL_8250_RT288X
bool "Ralink RT288x/RT305x/RT3662/RT3883 serial port support"
depends on SERIAL_8250
- depends on MIPS || COMPILE_TEST
default y if MIPS_ALCHEMY || SOC_RT288X || SOC_RT305X || SOC_RT3883 || SOC_MT7620
help
Selecting this option will add support for the alternate register
diff --git a/drivers/tty/serial/uartlite.c b/drivers/tty/serial/uartlite.c
index c9fdfc8bf47f..d08baa668d5d 100644
--- a/drivers/tty/serial/uartlite.c
+++ b/drivers/tty/serial/uartlite.c
@@ -72,7 +72,7 @@ static void uartlite_outbe32(u32 val, void __iomem *addr)
iowrite32be(val, addr);
}
-static const struct uartlite_reg_ops uartlite_be = {
+static struct uartlite_reg_ops uartlite_be = {
.in = uartlite_inbe32,
.out = uartlite_outbe32,
};
@@ -87,21 +87,21 @@ static void uartlite_outle32(u32 val, void __iomem *addr)
iowrite32(val, addr);
}
-static const struct uartlite_reg_ops uartlite_le = {
+static struct uartlite_reg_ops uartlite_le = {
.in = uartlite_inle32,
.out = uartlite_outle32,
};
static inline u32 uart_in32(u32 offset, struct uart_port *port)
{
- const struct uartlite_reg_ops *reg_ops = port->private_data;
+ struct uartlite_reg_ops *reg_ops = port->private_data;
return reg_ops->in(port->membase + offset);
}
static inline void uart_out32(u32 val, u32 offset, struct uart_port *port)
{
- const struct uartlite_reg_ops *reg_ops = port->private_data;
+ struct uartlite_reg_ops *reg_ops = port->private_data;
reg_ops->out(val, port->membase + offset);
}
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 83fd30b0577c..a6c4a1b895bd 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -744,11 +744,15 @@ static void acm_tty_flush_chars(struct tty_struct *tty)
int err;
unsigned long flags;
+ if (!cur) /* nothing to do */
+ return;
+
acm->putbuffer = NULL;
err = usb_autopm_get_interface_async(acm->control);
spin_lock_irqsave(&acm->write_lock, flags);
if (err < 0) {
cur->use = 0;
+ acm->putbuffer = cur;
goto out;
}
diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
index f9d42cf23e55..7859d738df41 100644
--- a/drivers/usb/core/hcd-pci.c
+++ b/drivers/usb/core/hcd-pci.c
@@ -73,6 +73,15 @@ static void for_each_companion(struct pci_dev *pdev, struct usb_hcd *hcd,
if (companion->bus != pdev->bus ||
PCI_SLOT(companion->devfn) != slot)
continue;
+
+ /*
+ * Companion device should be either UHCI,OHCI or EHCI host
+ * controller, otherwise skip.
+ */
+ if (companion->class != CL_UHCI && companion->class != CL_OHCI &&
+ companion->class != CL_EHCI)
+ continue;
+
companion_hcd = pci_get_drvdata(companion);
if (!companion_hcd || !companion_hcd->self.root_hub)
continue;
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index fa20f5a99d12..34277ced26bd 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -1150,6 +1150,11 @@ static int dwc3_suspend(struct device *dev)
phy_exit(dwc->usb2_generic_phy);
phy_exit(dwc->usb3_generic_phy);
+ usb_phy_set_suspend(dwc->usb2_phy, 1);
+ usb_phy_set_suspend(dwc->usb3_phy, 1);
+ WARN_ON(phy_power_off(dwc->usb2_generic_phy) < 0);
+ WARN_ON(phy_power_off(dwc->usb3_generic_phy) < 0);
+
pinctrl_pm_select_sleep_state(dev);
return 0;
@@ -1163,11 +1168,21 @@ static int dwc3_resume(struct device *dev)
pinctrl_pm_select_default_state(dev);
+ usb_phy_set_suspend(dwc->usb2_phy, 0);
+ usb_phy_set_suspend(dwc->usb3_phy, 0);
+ ret = phy_power_on(dwc->usb2_generic_phy);
+ if (ret < 0)
+ return ret;
+
+ ret = phy_power_on(dwc->usb3_generic_phy);
+ if (ret < 0)
+ goto err_usb2phy_power;
+
usb_phy_init(dwc->usb3_phy);
usb_phy_init(dwc->usb2_phy);
ret = phy_init(dwc->usb2_generic_phy);
if (ret < 0)
- return ret;
+ goto err_usb3phy_power;
ret = phy_init(dwc->usb3_generic_phy);
if (ret < 0)
@@ -1200,6 +1215,12 @@ static int dwc3_resume(struct device *dev)
err_usb2phy_init:
phy_exit(dwc->usb2_generic_phy);
+err_usb3phy_power:
+ phy_power_off(dwc->usb3_generic_phy);
+
+err_usb2phy_power:
+ phy_power_off(dwc->usb2_generic_phy);
+
return ret;
}
diff --git a/drivers/usb/dwc3/debugfs.c b/drivers/usb/dwc3/debugfs.c
index 9ac37fe1b6a7..cebf9e38b60a 100644
--- a/drivers/usb/dwc3/debugfs.c
+++ b/drivers/usb/dwc3/debugfs.c
@@ -645,7 +645,7 @@ int dwc3_debugfs_init(struct dwc3 *dwc)
file = debugfs_create_regset32("regdump", S_IRUGO, root, dwc->regset);
if (!file) {
ret = -ENOMEM;
- goto err1;
+ goto err2;
}
if (IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE)) {
@@ -653,7 +653,7 @@ int dwc3_debugfs_init(struct dwc3 *dwc)
dwc, &dwc3_mode_fops);
if (!file) {
ret = -ENOMEM;
- goto err1;
+ goto err2;
}
}
@@ -663,19 +663,22 @@ int dwc3_debugfs_init(struct dwc3 *dwc)
dwc, &dwc3_testmode_fops);
if (!file) {
ret = -ENOMEM;
- goto err1;
+ goto err2;
}
file = debugfs_create_file("link_state", S_IRUGO | S_IWUSR, root,
dwc, &dwc3_link_state_fops);
if (!file) {
ret = -ENOMEM;
- goto err1;
+ goto err2;
}
}
return 0;
+err2:
+ kfree(dwc->regset);
+
err1:
debugfs_remove_recursive(root);
@@ -686,5 +689,5 @@ err0:
void dwc3_debugfs_exit(struct dwc3 *dwc)
{
debugfs_remove_recursive(dwc->root);
- dwc->root = NULL;
+ kfree(dwc->regset);
}
diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c
index 22e9606d8e08..55da2c7f727f 100644
--- a/drivers/usb/dwc3/dwc3-omap.c
+++ b/drivers/usb/dwc3/dwc3-omap.c
@@ -496,7 +496,7 @@ static int dwc3_omap_probe(struct platform_device *pdev)
ret = pm_runtime_get_sync(dev);
if (ret < 0) {
dev_err(dev, "get_sync failed with err %d\n", ret);
- goto err0;
+ goto err1;
}
dwc3_omap_map_offset(omap);
@@ -516,28 +516,24 @@ static int dwc3_omap_probe(struct platform_device *pdev)
ret = dwc3_omap_extcon_register(omap);
if (ret < 0)
- goto err2;
+ goto err1;
ret = of_platform_populate(node, NULL, NULL, dev);
if (ret) {
dev_err(&pdev->dev, "failed to create dwc3 core\n");
- goto err3;
+ goto err2;
}
dwc3_omap_enable_irqs(omap);
return 0;
-err3:
+err2:
extcon_unregister_notifier(omap->edev, EXTCON_USB, &omap->vbus_nb);
extcon_unregister_notifier(omap->edev, EXTCON_USB_HOST, &omap->id_nb);
-err2:
- dwc3_omap_disable_irqs(omap);
err1:
pm_runtime_put_sync(dev);
-
-err0:
pm_runtime_disable(dev);
return ret;
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index d54a028cdfeb..8e4a1b195e9b 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -2936,6 +2936,9 @@ void dwc3_gadget_exit(struct dwc3 *dwc)
int dwc3_gadget_suspend(struct dwc3 *dwc)
{
+ if (!dwc->gadget_driver)
+ return 0;
+
if (dwc->pullups_connected) {
dwc3_gadget_disable_irq(dwc);
dwc3_gadget_run_stop(dwc, true, true);
@@ -2954,6 +2957,9 @@ int dwc3_gadget_resume(struct dwc3 *dwc)
struct dwc3_ep *dep;
int ret;
+ if (!dwc->gadget_driver)
+ return 0;
+
/* Start with SuperSpeed Default */
dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index de9ffd60fcfa..524e233d48de 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -651,6 +651,8 @@ static int bos_desc(struct usb_composite_dev *cdev)
ssp_cap->bLength = USB_DT_USB_SSP_CAP_SIZE(1);
ssp_cap->bDescriptorType = USB_DT_DEVICE_CAPABILITY;
ssp_cap->bDevCapabilityType = USB_SSP_CAP_TYPE;
+ ssp_cap->bReserved = 0;
+ ssp_cap->wReserved = 0;
/* SSAC = 1 (2 attributes) */
ssp_cap->bmAttributes = cpu_to_le32(1);
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index e21ca2bd6839..15b648cbc75c 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -646,6 +646,7 @@ static void ffs_user_copy_worker(struct work_struct *work)
work);
int ret = io_data->req->status ? io_data->req->status :
io_data->req->actual;
+ bool kiocb_has_eventfd = io_data->kiocb->ki_flags & IOCB_EVENTFD;
if (io_data->read && ret > 0) {
use_mm(io_data->mm);
@@ -657,13 +658,11 @@ static void ffs_user_copy_worker(struct work_struct *work)
io_data->kiocb->ki_complete(io_data->kiocb, ret, ret);
- if (io_data->ffs->ffs_eventfd &&
- !(io_data->kiocb->ki_flags & IOCB_EVENTFD))
+ if (io_data->ffs->ffs_eventfd && !kiocb_has_eventfd)
eventfd_signal(io_data->ffs->ffs_eventfd, 1);
usb_ep_free_request(io_data->ep, io_data->req);
- io_data->kiocb->private = NULL;
if (io_data->read)
kfree(io_data->to_free);
kfree(io_data->buf);
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 80c1de239e9a..bad0d1f9a41d 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -1861,6 +1861,12 @@ no_bw:
kfree(xhci->rh_bw);
kfree(xhci->ext_caps);
+ xhci->usb2_ports = NULL;
+ xhci->usb3_ports = NULL;
+ xhci->port_array = NULL;
+ xhci->rh_bw = NULL;
+ xhci->ext_caps = NULL;
+
xhci->page_size = 0;
xhci->page_shift = 0;
xhci->bus_state[0].bus_suspended = 0;
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index f0640b7a1c42..48672fac7ff3 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -48,6 +48,7 @@
#define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI 0xa12f
#define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI 0x9d2f
#define PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI 0x0aa8
+#define PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI 0x1aa8
static const char hcd_name[] = "xhci_hcd";
@@ -155,7 +156,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
(pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI ||
- pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI)) {
+ pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI ||
+ pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI)) {
xhci->quirks |= XHCI_PME_STUCK_QUIRK;
}
if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
@@ -302,6 +304,7 @@ static void xhci_pci_remove(struct pci_dev *dev)
struct xhci_hcd *xhci;
xhci = hcd_to_xhci(pci_get_drvdata(dev));
+ xhci->xhc_state |= XHCI_STATE_REMOVING;
if (xhci->shared_hcd) {
usb_remove_hcd(xhci->shared_hcd);
usb_put_hcd(xhci->shared_hcd);
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index 5c15e9bc5f7a..474b5fa14900 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -39,12 +39,25 @@ static const struct xhci_driver_overrides xhci_plat_overrides __initconst = {
static void xhci_plat_quirks(struct device *dev, struct xhci_hcd *xhci)
{
+ struct usb_hcd *hcd = xhci_to_hcd(xhci);
+
/*
* As of now platform drivers don't provide MSI support so we ensure
* here that the generic code does not try to make a pci_dev from our
* dev struct in order to setup MSI
*/
xhci->quirks |= XHCI_PLAT;
+
+ /*
+ * On R-Car Gen2 and Gen3, the AC64 bit (bit 0) of HCCPARAMS1 is set
+ * to 1. However, these SoCs don't support 64-bit address memory
+ * pointers. So, this driver clears the AC64 bit of xhci->hcc_params
+ * to call dma_set_coherent_mask(dev, DMA_BIT_MASK(32)) in
+ * xhci_gen_setup().
+ */
+ if (xhci_plat_type_is(hcd, XHCI_PLAT_TYPE_RENESAS_RCAR_GEN2) ||
+ xhci_plat_type_is(hcd, XHCI_PLAT_TYPE_RENESAS_RCAR_GEN3))
+ xhci->quirks |= XHCI_NO_64BIT_SUPPORT;
}
/* called during probe() after chip reset completes */
diff --git a/drivers/usb/host/xhci-plat.h b/drivers/usb/host/xhci-plat.h
index 5a2e2e3936c4..529c3c40f901 100644
--- a/drivers/usb/host/xhci-plat.h
+++ b/drivers/usb/host/xhci-plat.h
@@ -14,7 +14,7 @@
#include "xhci.h" /* for hcd_to_xhci() */
enum xhci_plat_type {
- XHCI_PLAT_TYPE_MARVELL_ARMADA,
+ XHCI_PLAT_TYPE_MARVELL_ARMADA = 1,
XHCI_PLAT_TYPE_RENESAS_RCAR_GEN2,
XHCI_PLAT_TYPE_RENESAS_RCAR_GEN3,
};
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 7cf66212ceae..99b4ff42f7a0 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -4004,7 +4004,8 @@ static int queue_command(struct xhci_hcd *xhci, struct xhci_command *cmd,
int reserved_trbs = xhci->cmd_ring_reserved_trbs;
int ret;
- if (xhci->xhc_state) {
+ if ((xhci->xhc_state & XHCI_STATE_DYING) ||
+ (xhci->xhc_state & XHCI_STATE_HALTED)) {
xhci_dbg(xhci, "xHCI dying or halted, can't queue_command\n");
return -ESHUTDOWN;
}
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index d51ee0c3cf9f..9e71c96ad74a 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -147,7 +147,8 @@ static int xhci_start(struct xhci_hcd *xhci)
"waited %u microseconds.\n",
XHCI_MAX_HALT_USEC);
if (!ret)
- xhci->xhc_state &= ~(XHCI_STATE_HALTED | XHCI_STATE_DYING);
+ /* clear state flags. Including dying, halted or removing */
+ xhci->xhc_state = 0;
return ret;
}
@@ -1108,8 +1109,8 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
/* Resume root hubs only when have pending events. */
status = readl(&xhci->op_regs->status);
if (status & STS_EINT) {
- usb_hcd_resume_root_hub(hcd);
usb_hcd_resume_root_hub(xhci->shared_hcd);
+ usb_hcd_resume_root_hub(hcd);
}
}
@@ -1124,10 +1125,10 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
/* Re-enable port polling. */
xhci_dbg(xhci, "%s: starting port polling.\n", __func__);
- set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
- usb_hcd_poll_rh_status(hcd);
set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
usb_hcd_poll_rh_status(xhci->shared_hcd);
+ set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
+ usb_hcd_poll_rh_status(hcd);
return retval;
}
@@ -2773,7 +2774,8 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
if (ret <= 0)
return ret;
xhci = hcd_to_xhci(hcd);
- if (xhci->xhc_state & XHCI_STATE_DYING)
+ if ((xhci->xhc_state & XHCI_STATE_DYING) ||
+ (xhci->xhc_state & XHCI_STATE_REMOVING))
return -ENODEV;
xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
@@ -3820,7 +3822,7 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
mutex_lock(&xhci->mutex);
- if (xhci->xhc_state) /* dying or halted */
+ if (xhci->xhc_state) /* dying, removing or halted */
goto out;
if (!udev->slot_id) {
@@ -4948,6 +4950,16 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
return retval;
xhci_dbg(xhci, "Reset complete\n");
+ /*
+ * On some xHCI controllers (e.g. R-Car SoCs), the AC64 bit (bit 0)
+ * of HCCPARAMS1 is set to 1. However, the xHCs don't support 64-bit
+ * address memory pointers actually. So, this driver clears the AC64
+ * bit of xhci->hcc_params to call dma_set_coherent_mask(dev,
+ * DMA_BIT_MASK(32)) in this xhci_gen_setup().
+ */
+ if (xhci->quirks & XHCI_NO_64BIT_SUPPORT)
+ xhci->hcc_params &= ~BIT(0);
+
/* Set dma_mask and coherent_dma_mask to 64-bits,
* if xHC supports 64-bit addressing */
if (HCC_64BIT_ADDR(xhci->hcc_params) &&
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index e293e0974f48..6c629c97f8ad 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1605,6 +1605,7 @@ struct xhci_hcd {
*/
#define XHCI_STATE_DYING (1 << 0)
#define XHCI_STATE_HALTED (1 << 1)
+#define XHCI_STATE_REMOVING (1 << 2)
/* Statistics */
int error_bitmask;
unsigned int quirks;
@@ -1641,6 +1642,7 @@ struct xhci_hcd {
#define XHCI_PME_STUCK_QUIRK (1 << 20)
#define XHCI_MTK_HOST (1 << 21)
#define XHCI_SSIC_PORT_UNUSED (1 << 22)
+#define XHCI_NO_64BIT_SUPPORT (1 << 23)
unsigned int num_active_eps;
unsigned int limit_active_eps;
/* There are two roothubs to keep track of bus suspend info for */
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
index 13e4cc31bc79..16bc679dc2fc 100644
--- a/drivers/usb/storage/uas.c
+++ b/drivers/usb/storage/uas.c
@@ -2,7 +2,7 @@
* USB Attached SCSI
* Note that this is not the same as the USB Mass Storage driver
*
- * Copyright Hans de Goede <hdegoede@redhat.com> for Red Hat, Inc. 2013 - 2014
+ * Copyright Hans de Goede <hdegoede@redhat.com> for Red Hat, Inc. 2013 - 2016
* Copyright Matthew Wilcox for Intel Corp, 2010
* Copyright Sarah Sharp for Intel Corp, 2010
*
@@ -781,6 +781,17 @@ static int uas_eh_bus_reset_handler(struct scsi_cmnd *cmnd)
return SUCCESS;
}
+static int uas_target_alloc(struct scsi_target *starget)
+{
+ struct uas_dev_info *devinfo = (struct uas_dev_info *)
+ dev_to_shost(starget->dev.parent)->hostdata;
+
+ if (devinfo->flags & US_FL_NO_REPORT_LUNS)
+ starget->no_report_luns = 1;
+
+ return 0;
+}
+
static int uas_slave_alloc(struct scsi_device *sdev)
{
struct uas_dev_info *devinfo =
@@ -824,7 +835,6 @@ static int uas_slave_configure(struct scsi_device *sdev)
if (devinfo->flags & US_FL_BROKEN_FUA)
sdev->broken_fua = 1;
- scsi_change_queue_depth(sdev, devinfo->qdepth - 2);
return 0;
}
@@ -832,6 +842,7 @@ static struct scsi_host_template uas_host_template = {
.module = THIS_MODULE,
.name = "uas",
.queuecommand = uas_queuecommand,
+ .target_alloc = uas_target_alloc,
.slave_alloc = uas_slave_alloc,
.slave_configure = uas_slave_configure,
.eh_abort_handler = uas_eh_abort_handler,
@@ -956,6 +967,12 @@ static int uas_probe(struct usb_interface *intf, const struct usb_device_id *id)
if (result)
goto set_alt0;
+ /*
+ * 1 tag is reserved for untagged commands +
+ * 1 tag to avoid off by one errors in some bridge firmwares
+ */
+ shost->can_queue = devinfo->qdepth - 2;
+
usb_set_intfdata(intf, shost);
result = scsi_add_host(shost, &intf->dev);
if (result)
diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
index ccc113e83d88..53341a77d89f 100644
--- a/drivers/usb/storage/unusual_uas.h
+++ b/drivers/usb/storage/unusual_uas.h
@@ -64,6 +64,13 @@ UNUSUAL_DEV(0x0bc2, 0x3312, 0x0000, 0x9999,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_NO_ATA_1X),
+/* Reported-by: David Webb <djw@noc.ac.uk> */
+UNUSUAL_DEV(0x0bc2, 0x331a, 0x0000, 0x9999,
+ "Seagate",
+ "Expansion Desk",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_NO_REPORT_LUNS),
+
/* Reported-by: Hans de Goede <hdegoede@redhat.com> */
UNUSUAL_DEV(0x0bc2, 0x3320, 0x0000, 0x9999,
"Seagate",
diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
index 43576ed31ccd..9de988a0f856 100644
--- a/drivers/usb/storage/usb.c
+++ b/drivers/usb/storage/usb.c
@@ -482,7 +482,7 @@ void usb_stor_adjust_quirks(struct usb_device *udev, unsigned long *fflags)
US_FL_NO_READ_DISC_INFO | US_FL_NO_READ_CAPACITY_16 |
US_FL_INITIAL_READ10 | US_FL_WRITE_CACHE |
US_FL_NO_ATA_1X | US_FL_NO_REPORT_OPCODES |
- US_FL_MAX_SECTORS_240);
+ US_FL_MAX_SECTORS_240 | US_FL_NO_REPORT_LUNS);
p = quirks;
while (*p) {
@@ -532,6 +532,9 @@ void usb_stor_adjust_quirks(struct usb_device *udev, unsigned long *fflags)
case 'i':
f |= US_FL_IGNORE_DEVICE;
break;
+ case 'j':
+ f |= US_FL_NO_REPORT_LUNS;
+ break;
case 'l':
f |= US_FL_NOT_LOCKABLE;
break;
diff --git a/drivers/video/fbdev/amba-clcd.c b/drivers/video/fbdev/amba-clcd.c
index fe274b5851c7..93e66a9148b9 100644
--- a/drivers/video/fbdev/amba-clcd.c
+++ b/drivers/video/fbdev/amba-clcd.c
@@ -440,13 +440,14 @@ static int clcdfb_register(struct clcd_fb *fb)
fb->off_ienb = CLCD_PL111_IENB;
fb->off_cntl = CLCD_PL111_CNTL;
} else {
-#ifdef CONFIG_ARCH_VERSATILE
- fb->off_ienb = CLCD_PL111_IENB;
- fb->off_cntl = CLCD_PL111_CNTL;
-#else
- fb->off_ienb = CLCD_PL110_IENB;
- fb->off_cntl = CLCD_PL110_CNTL;
-#endif
+ if (of_machine_is_compatible("arm,versatile-ab") ||
+ of_machine_is_compatible("arm,versatile-pb")) {
+ fb->off_ienb = CLCD_PL111_IENB;
+ fb->off_cntl = CLCD_PL111_CNTL;
+ } else {
+ fb->off_ienb = CLCD_PL110_IENB;
+ fb->off_cntl = CLCD_PL110_CNTL;
+ }
}
fb->clk = clk_get(&fb->dev->dev, NULL);
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-sharp-ls037v7dw01.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-sharp-ls037v7dw01.c
index abfd1f6e3327..1954ec913ce5 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/panel-sharp-ls037v7dw01.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-sharp-ls037v7dw01.c
@@ -200,20 +200,16 @@ static struct omap_dss_driver sharp_ls_ops = {
static int sharp_ls_get_gpio(struct device *dev, int gpio, unsigned long flags,
char *desc, struct gpio_desc **gpiod)
{
- struct gpio_desc *gd;
int r;
- *gpiod = NULL;
-
r = devm_gpio_request_one(dev, gpio, flags, desc);
- if (r)
+ if (r) {
+ *gpiod = NULL;
return r == -ENOENT ? 0 : r;
+ }
- gd = gpio_to_desc(gpio);
- if (IS_ERR(gd))
- return PTR_ERR(gd) == -ENOENT ? 0 : PTR_ERR(gd);
+ *gpiod = gpio_to_desc(gpio);
- *gpiod = gd;
return 0;
}
diff --git a/fs/crypto/crypto.c b/fs/crypto/crypto.c
index 7f5804537d30..2fc8c43ce531 100644
--- a/fs/crypto/crypto.c
+++ b/fs/crypto/crypto.c
@@ -26,6 +26,7 @@
#include <linux/ratelimit.h>
#include <linux/bio.h>
#include <linux/dcache.h>
+#include <linux/namei.h>
#include <linux/fscrypto.h>
#include <linux/ecryptfs.h>
@@ -81,13 +82,14 @@ EXPORT_SYMBOL(fscrypt_release_ctx);
/**
* fscrypt_get_ctx() - Gets an encryption context
* @inode: The inode for which we are doing the crypto
+ * @gfp_flags: The gfp flag for memory allocation
*
* Allocates and initializes an encryption context.
*
* Return: An allocated and initialized encryption context on success; error
* value or NULL otherwise.
*/
-struct fscrypt_ctx *fscrypt_get_ctx(struct inode *inode)
+struct fscrypt_ctx *fscrypt_get_ctx(struct inode *inode, gfp_t gfp_flags)
{
struct fscrypt_ctx *ctx = NULL;
struct fscrypt_info *ci = inode->i_crypt_info;
@@ -113,7 +115,7 @@ struct fscrypt_ctx *fscrypt_get_ctx(struct inode *inode)
list_del(&ctx->free_list);
spin_unlock_irqrestore(&fscrypt_ctx_lock, flags);
if (!ctx) {
- ctx = kmem_cache_zalloc(fscrypt_ctx_cachep, GFP_NOFS);
+ ctx = kmem_cache_zalloc(fscrypt_ctx_cachep, gfp_flags);
if (!ctx)
return ERR_PTR(-ENOMEM);
ctx->flags |= FS_CTX_REQUIRES_FREE_ENCRYPT_FL;
@@ -147,7 +149,8 @@ typedef enum {
static int do_page_crypto(struct inode *inode,
fscrypt_direction_t rw, pgoff_t index,
- struct page *src_page, struct page *dest_page)
+ struct page *src_page, struct page *dest_page,
+ gfp_t gfp_flags)
{
u8 xts_tweak[FS_XTS_TWEAK_SIZE];
struct skcipher_request *req = NULL;
@@ -157,7 +160,7 @@ static int do_page_crypto(struct inode *inode,
struct crypto_skcipher *tfm = ci->ci_ctfm;
int res = 0;
- req = skcipher_request_alloc(tfm, GFP_NOFS);
+ req = skcipher_request_alloc(tfm, gfp_flags);
if (!req) {
printk_ratelimited(KERN_ERR
"%s: crypto_request_alloc() failed\n",
@@ -199,10 +202,9 @@ static int do_page_crypto(struct inode *inode,
return 0;
}
-static struct page *alloc_bounce_page(struct fscrypt_ctx *ctx)
+static struct page *alloc_bounce_page(struct fscrypt_ctx *ctx, gfp_t gfp_flags)
{
- ctx->w.bounce_page = mempool_alloc(fscrypt_bounce_page_pool,
- GFP_NOWAIT);
+ ctx->w.bounce_page = mempool_alloc(fscrypt_bounce_page_pool, gfp_flags);
if (ctx->w.bounce_page == NULL)
return ERR_PTR(-ENOMEM);
ctx->flags |= FS_WRITE_PATH_FL;
@@ -213,6 +215,7 @@ static struct page *alloc_bounce_page(struct fscrypt_ctx *ctx)
* fscypt_encrypt_page() - Encrypts a page
* @inode: The inode for which the encryption should take place
* @plaintext_page: The page to encrypt. Must be locked.
+ * @gfp_flags: The gfp flag for memory allocation
*
* Allocates a ciphertext page and encrypts plaintext_page into it using the ctx
* encryption context.
@@ -225,7 +228,7 @@ static struct page *alloc_bounce_page(struct fscrypt_ctx *ctx)
* error value or NULL.
*/
struct page *fscrypt_encrypt_page(struct inode *inode,
- struct page *plaintext_page)
+ struct page *plaintext_page, gfp_t gfp_flags)
{
struct fscrypt_ctx *ctx;
struct page *ciphertext_page = NULL;
@@ -233,18 +236,19 @@ struct page *fscrypt_encrypt_page(struct inode *inode,
BUG_ON(!PageLocked(plaintext_page));
- ctx = fscrypt_get_ctx(inode);
+ ctx = fscrypt_get_ctx(inode, gfp_flags);
if (IS_ERR(ctx))
return (struct page *)ctx;
/* The encryption operation will require a bounce page. */
- ciphertext_page = alloc_bounce_page(ctx);
+ ciphertext_page = alloc_bounce_page(ctx, gfp_flags);
if (IS_ERR(ciphertext_page))
goto errout;
ctx->w.control_page = plaintext_page;
err = do_page_crypto(inode, FS_ENCRYPT, plaintext_page->index,
- plaintext_page, ciphertext_page);
+ plaintext_page, ciphertext_page,
+ gfp_flags);
if (err) {
ciphertext_page = ERR_PTR(err);
goto errout;
@@ -275,7 +279,7 @@ int fscrypt_decrypt_page(struct page *page)
BUG_ON(!PageLocked(page));
return do_page_crypto(page->mapping->host,
- FS_DECRYPT, page->index, page, page);
+ FS_DECRYPT, page->index, page, page, GFP_NOFS);
}
EXPORT_SYMBOL(fscrypt_decrypt_page);
@@ -289,11 +293,11 @@ int fscrypt_zeroout_range(struct inode *inode, pgoff_t lblk,
BUG_ON(inode->i_sb->s_blocksize != PAGE_SIZE);
- ctx = fscrypt_get_ctx(inode);
+ ctx = fscrypt_get_ctx(inode, GFP_NOFS);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
- ciphertext_page = alloc_bounce_page(ctx);
+ ciphertext_page = alloc_bounce_page(ctx, GFP_NOWAIT);
if (IS_ERR(ciphertext_page)) {
err = PTR_ERR(ciphertext_page);
goto errout;
@@ -301,11 +305,12 @@ int fscrypt_zeroout_range(struct inode *inode, pgoff_t lblk,
while (len--) {
err = do_page_crypto(inode, FS_ENCRYPT, lblk,
- ZERO_PAGE(0), ciphertext_page);
+ ZERO_PAGE(0), ciphertext_page,
+ GFP_NOFS);
if (err)
goto errout;
- bio = bio_alloc(GFP_KERNEL, 1);
+ bio = bio_alloc(GFP_NOWAIT, 1);
if (!bio) {
err = -ENOMEM;
goto errout;
@@ -345,13 +350,20 @@ EXPORT_SYMBOL(fscrypt_zeroout_range);
*/
static int fscrypt_d_revalidate(struct dentry *dentry, unsigned int flags)
{
- struct inode *dir = d_inode(dentry->d_parent);
- struct fscrypt_info *ci = dir->i_crypt_info;
+ struct dentry *dir;
+ struct fscrypt_info *ci;
int dir_has_key, cached_with_key;
- if (!dir->i_sb->s_cop->is_encrypted(dir))
+ if (flags & LOOKUP_RCU)
+ return -ECHILD;
+
+ dir = dget_parent(dentry);
+ if (!d_inode(dir)->i_sb->s_cop->is_encrypted(d_inode(dir))) {
+ dput(dir);
return 0;
+ }
+ ci = d_inode(dir)->i_crypt_info;
if (ci && ci->ci_keyring_key &&
(ci->ci_keyring_key->flags & ((1 << KEY_FLAG_INVALIDATED) |
(1 << KEY_FLAG_REVOKED) |
@@ -363,6 +375,7 @@ static int fscrypt_d_revalidate(struct dentry *dentry, unsigned int flags)
cached_with_key = dentry->d_flags & DCACHE_ENCRYPTED_WITH_KEY;
spin_unlock(&dentry->d_lock);
dir_has_key = (ci != NULL);
+ dput(dir);
/*
* If the dentry was cached without the key, and it is a
diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
index bece948b363d..8580831ed237 100644
--- a/fs/debugfs/inode.c
+++ b/fs/debugfs/inode.c
@@ -457,7 +457,7 @@ struct dentry *debugfs_create_automount(const char *name,
if (unlikely(!inode))
return failed_creating(dentry);
- inode->i_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
+ make_empty_dir_inode(inode);
inode->i_flags |= S_AUTOMOUNT;
inode->i_private = data;
dentry->d_fsdata = (void *)f;
diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c
index 655f21f99160..0af8e7d70d27 100644
--- a/fs/devpts/inode.c
+++ b/fs/devpts/inode.c
@@ -128,6 +128,7 @@ static const match_table_t tokens = {
struct pts_fs_info {
struct ida allocated_ptys;
struct pts_mount_opts mount_opts;
+ struct super_block *sb;
struct dentry *ptmx_dentry;
};
@@ -358,7 +359,7 @@ static const struct super_operations devpts_sops = {
.show_options = devpts_show_options,
};
-static void *new_pts_fs_info(void)
+static void *new_pts_fs_info(struct super_block *sb)
{
struct pts_fs_info *fsi;
@@ -369,6 +370,7 @@ static void *new_pts_fs_info(void)
ida_init(&fsi->allocated_ptys);
fsi->mount_opts.mode = DEVPTS_DEFAULT_MODE;
fsi->mount_opts.ptmxmode = DEVPTS_DEFAULT_PTMX_MODE;
+ fsi->sb = sb;
return fsi;
}
@@ -384,7 +386,7 @@ devpts_fill_super(struct super_block *s, void *data, int silent)
s->s_op = &devpts_sops;
s->s_time_gran = 1;
- s->s_fs_info = new_pts_fs_info();
+ s->s_fs_info = new_pts_fs_info(s);
if (!s->s_fs_info)
goto fail;
@@ -524,17 +526,14 @@ static struct file_system_type devpts_fs_type = {
* to the System V naming convention
*/
-int devpts_new_index(struct inode *ptmx_inode)
+int devpts_new_index(struct pts_fs_info *fsi)
{
- struct super_block *sb = pts_sb_from_inode(ptmx_inode);
- struct pts_fs_info *fsi;
int index;
int ida_ret;
- if (!sb)
+ if (!fsi)
return -ENODEV;
- fsi = DEVPTS_SB(sb);
retry:
if (!ida_pre_get(&fsi->allocated_ptys, GFP_KERNEL))
return -ENOMEM;
@@ -564,11 +563,8 @@ retry:
return index;
}
-void devpts_kill_index(struct inode *ptmx_inode, int idx)
+void devpts_kill_index(struct pts_fs_info *fsi, int idx)
{
- struct super_block *sb = pts_sb_from_inode(ptmx_inode);
- struct pts_fs_info *fsi = DEVPTS_SB(sb);
-
mutex_lock(&allocated_ptys_lock);
ida_remove(&fsi->allocated_ptys, idx);
pty_count--;
@@ -578,21 +574,25 @@ void devpts_kill_index(struct inode *ptmx_inode, int idx)
/*
* pty code needs to hold extra references in case of last /dev/tty close
*/
-
-void devpts_add_ref(struct inode *ptmx_inode)
+struct pts_fs_info *devpts_get_ref(struct inode *ptmx_inode, struct file *file)
{
- struct super_block *sb = pts_sb_from_inode(ptmx_inode);
+ struct super_block *sb;
+ struct pts_fs_info *fsi;
+
+ sb = pts_sb_from_inode(ptmx_inode);
+ if (!sb)
+ return NULL;
+ fsi = DEVPTS_SB(sb);
+ if (!fsi)
+ return NULL;
atomic_inc(&sb->s_active);
- ihold(ptmx_inode);
+ return fsi;
}
-void devpts_del_ref(struct inode *ptmx_inode)
+void devpts_put_ref(struct pts_fs_info *fsi)
{
- struct super_block *sb = pts_sb_from_inode(ptmx_inode);
-
- iput(ptmx_inode);
- deactivate_super(sb);
+ deactivate_super(fsi->sb);
}
/**
@@ -604,22 +604,21 @@ void devpts_del_ref(struct inode *ptmx_inode)
*
* The created inode is returned. Remove it from /dev/pts/ by devpts_pty_kill.
*/
-struct inode *devpts_pty_new(struct inode *ptmx_inode, dev_t device, int index,
+struct inode *devpts_pty_new(struct pts_fs_info *fsi, dev_t device, int index,
void *priv)
{
struct dentry *dentry;
- struct super_block *sb = pts_sb_from_inode(ptmx_inode);
+ struct super_block *sb;
struct inode *inode;
struct dentry *root;
- struct pts_fs_info *fsi;
struct pts_mount_opts *opts;
char s[12];
- if (!sb)
+ if (!fsi)
return ERR_PTR(-ENODEV);
+ sb = fsi->sb;
root = sb->s_root;
- fsi = DEVPTS_SB(sb);
opts = &fsi->mount_opts;
inode = new_inode(sb);
diff --git a/fs/ext4/crypto.c b/fs/ext4/crypto.c
index db9ae6e18154..6a6c27373b54 100644
--- a/fs/ext4/crypto.c
+++ b/fs/ext4/crypto.c
@@ -32,6 +32,7 @@
#include <linux/random.h>
#include <linux/scatterlist.h>
#include <linux/spinlock_types.h>
+#include <linux/namei.h>
#include "ext4_extents.h"
#include "xattr.h"
@@ -482,6 +483,9 @@ static int ext4_d_revalidate(struct dentry *dentry, unsigned int flags)
struct ext4_crypt_info *ci;
int dir_has_key, cached_with_key;
+ if (flags & LOOKUP_RCU)
+ return -ECHILD;
+
dir = dget_parent(dentry);
if (!ext4_encrypted_inode(d_inode(dir))) {
dput(dir);
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index 53fec0872e60..5dafb9cef12e 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -992,7 +992,7 @@ submit_and_realloc:
if (f2fs_encrypted_inode(inode) &&
S_ISREG(inode->i_mode)) {
- ctx = fscrypt_get_ctx(inode);
+ ctx = fscrypt_get_ctx(inode, GFP_NOFS);
if (IS_ERR(ctx))
goto set_error_page;
@@ -1092,14 +1092,24 @@ int do_write_data_page(struct f2fs_io_info *fio)
}
if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) {
+ gfp_t gfp_flags = GFP_NOFS;
/* wait for GCed encrypted page writeback */
f2fs_wait_on_encrypted_page_writeback(F2FS_I_SB(inode),
fio->old_blkaddr);
-
- fio->encrypted_page = fscrypt_encrypt_page(inode, fio->page);
+retry_encrypt:
+ fio->encrypted_page = fscrypt_encrypt_page(inode, fio->page,
+ gfp_flags);
if (IS_ERR(fio->encrypted_page)) {
err = PTR_ERR(fio->encrypted_page);
+ if (err == -ENOMEM) {
+ /* flush pending ios and wait for a while */
+ f2fs_flush_merged_bios(F2FS_I_SB(inode));
+ congestion_wait(BLK_RW_ASYNC, HZ/50);
+ gfp_flags |= __GFP_NOFAIL;
+ err = 0;
+ goto retry_encrypt;
+ }
goto out_writepage;
}
}
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index 443e07705c2a..90d1157a09f9 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -441,7 +441,7 @@ static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma)
static int f2fs_file_open(struct inode *inode, struct file *filp)
{
int ret = generic_file_open(inode, filp);
- struct inode *dir = filp->f_path.dentry->d_parent->d_inode;
+ struct dentry *dir;
if (!ret && f2fs_encrypted_inode(inode)) {
ret = fscrypt_get_encryption_info(inode);
@@ -450,9 +450,13 @@ static int f2fs_file_open(struct inode *inode, struct file *filp)
if (!fscrypt_has_encryption_key(inode))
return -ENOKEY;
}
- if (f2fs_encrypted_inode(dir) &&
- !fscrypt_has_permitted_context(dir, inode))
+ dir = dget_parent(file_dentry(filp));
+ if (f2fs_encrypted_inode(d_inode(dir)) &&
+ !fscrypt_has_permitted_context(d_inode(dir), inode)) {
+ dput(dir);
return -EPERM;
+ }
+ dput(dir);
return ret;
}
diff --git a/fs/seq_file.c b/fs/seq_file.c
index e85664b7c7d9..19f532e7d35e 100644
--- a/fs/seq_file.c
+++ b/fs/seq_file.c
@@ -72,9 +72,10 @@ int seq_open(struct file *file, const struct seq_operations *op)
mutex_init(&p->lock);
p->op = op;
-#ifdef CONFIG_USER_NS
- p->user_ns = file->f_cred->user_ns;
-#endif
+
+ // No refcounting: the lifetime of 'p' is constrained
+ // to the lifetime of the file.
+ p->file = file;
/*
* Wrappers around seq_open(e.g. swaps_open) need to be
diff --git a/include/asm-generic/futex.h b/include/asm-generic/futex.h
index e56272c919b5..bf2d34c9d804 100644
--- a/include/asm-generic/futex.h
+++ b/include/asm-generic/futex.h
@@ -108,11 +108,15 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
u32 val;
preempt_disable();
- if (unlikely(get_user(val, uaddr) != 0))
+ if (unlikely(get_user(val, uaddr) != 0)) {
+ preempt_enable();
return -EFAULT;
+ }
- if (val == oldval && unlikely(put_user(newval, uaddr) != 0))
+ if (val == oldval && unlikely(put_user(newval, uaddr) != 0)) {
+ preempt_enable();
return -EFAULT;
+ }
*uval = val;
preempt_enable();
diff --git a/include/drm/drm_cache.h b/include/drm/drm_cache.h
index 461a0558bca4..cebecff536a3 100644
--- a/include/drm/drm_cache.h
+++ b/include/drm/drm_cache.h
@@ -39,6 +39,8 @@ static inline bool drm_arch_can_wc_memory(void)
{
#if defined(CONFIG_PPC) && !defined(CONFIG_NOT_COHERENT_CACHE)
return false;
+#elif defined(CONFIG_MIPS) && defined(CONFIG_CPU_LOONGSON3)
+ return false;
#else
return true;
#endif
diff --git a/include/linux/devpts_fs.h b/include/linux/devpts_fs.h
index e0ee0b3000b2..358a4db72a27 100644
--- a/include/linux/devpts_fs.h
+++ b/include/linux/devpts_fs.h
@@ -15,38 +15,24 @@
#include <linux/errno.h>
+struct pts_fs_info;
+
#ifdef CONFIG_UNIX98_PTYS
-int devpts_new_index(struct inode *ptmx_inode);
-void devpts_kill_index(struct inode *ptmx_inode, int idx);
-void devpts_add_ref(struct inode *ptmx_inode);
-void devpts_del_ref(struct inode *ptmx_inode);
+/* Look up a pts fs info and get a ref to it */
+struct pts_fs_info *devpts_get_ref(struct inode *, struct file *);
+void devpts_put_ref(struct pts_fs_info *);
+
+int devpts_new_index(struct pts_fs_info *);
+void devpts_kill_index(struct pts_fs_info *, int);
+
/* mknod in devpts */
-struct inode *devpts_pty_new(struct inode *ptmx_inode, dev_t device, int index,
- void *priv);
+struct inode *devpts_pty_new(struct pts_fs_info *, dev_t, int, void *);
/* get private structure */
void *devpts_get_priv(struct inode *pts_inode);
/* unlink */
void devpts_pty_kill(struct inode *inode);
-#else
-
-/* Dummy stubs in the no-pty case */
-static inline int devpts_new_index(struct inode *ptmx_inode) { return -EINVAL; }
-static inline void devpts_kill_index(struct inode *ptmx_inode, int idx) { }
-static inline void devpts_add_ref(struct inode *ptmx_inode) { }
-static inline void devpts_del_ref(struct inode *ptmx_inode) { }
-static inline struct inode *devpts_pty_new(struct inode *ptmx_inode,
- dev_t device, int index, void *priv)
-{
- return ERR_PTR(-EINVAL);
-}
-static inline void *devpts_get_priv(struct inode *pts_inode)
-{
- return NULL;
-}
-static inline void devpts_pty_kill(struct inode *inode) { }
-
#endif
diff --git a/include/linux/fscrypto.h b/include/linux/fscrypto.h
index cd91f75de49b..6027f6bbb061 100644
--- a/include/linux/fscrypto.h
+++ b/include/linux/fscrypto.h
@@ -263,9 +263,9 @@ static inline void fscrypt_set_d_op(struct dentry *dentry)
extern struct kmem_cache *fscrypt_info_cachep;
int fscrypt_initialize(void);
-extern struct fscrypt_ctx *fscrypt_get_ctx(struct inode *);
+extern struct fscrypt_ctx *fscrypt_get_ctx(struct inode *, gfp_t);
extern void fscrypt_release_ctx(struct fscrypt_ctx *);
-extern struct page *fscrypt_encrypt_page(struct inode *, struct page *);
+extern struct page *fscrypt_encrypt_page(struct inode *, struct page *, gfp_t);
extern int fscrypt_decrypt_page(struct page *);
extern void fscrypt_decrypt_bio_pages(struct fscrypt_ctx *, struct bio *);
extern void fscrypt_pullback_bio_page(struct page **, bool);
@@ -299,7 +299,8 @@ extern int fscrypt_fname_usr_to_disk(struct inode *, const struct qstr *,
#endif
/* crypto.c */
-static inline struct fscrypt_ctx *fscrypt_notsupp_get_ctx(struct inode *i)
+static inline struct fscrypt_ctx *fscrypt_notsupp_get_ctx(struct inode *i,
+ gfp_t f)
{
return ERR_PTR(-EOPNOTSUPP);
}
@@ -310,7 +311,7 @@ static inline void fscrypt_notsupp_release_ctx(struct fscrypt_ctx *c)
}
static inline struct page *fscrypt_notsupp_encrypt_page(struct inode *i,
- struct page *p)
+ struct page *p, gfp_t f)
{
return ERR_PTR(-EOPNOTSUPP);
}
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 8541a913f6a3..d1f904c8b2cb 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -828,6 +828,11 @@ struct mlx4_vf_dev {
u8 n_ports;
};
+enum mlx4_pci_status {
+ MLX4_PCI_STATUS_DISABLED,
+ MLX4_PCI_STATUS_ENABLED,
+};
+
struct mlx4_dev_persistent {
struct pci_dev *pdev;
struct mlx4_dev *dev;
@@ -841,6 +846,8 @@ struct mlx4_dev_persistent {
u8 state;
struct mutex interface_state_mutex; /* protect SW state */
u8 interface_state;
+ struct mutex pci_status_mutex; /* sync pci state */
+ enum mlx4_pci_status pci_status;
};
struct mlx4_dev {
diff --git a/include/linux/mm.h b/include/linux/mm.h
index ffcff53e3b2b..a55e5be0894f 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1250,78 +1250,20 @@ long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, unsigned long nr_pages,
int write, int force, struct page **pages,
struct vm_area_struct **vmas);
-long get_user_pages6(unsigned long start, unsigned long nr_pages,
+long get_user_pages(unsigned long start, unsigned long nr_pages,
int write, int force, struct page **pages,
struct vm_area_struct **vmas);
-long get_user_pages_locked6(unsigned long start, unsigned long nr_pages,
+long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
int write, int force, struct page **pages, int *locked);
long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, unsigned long nr_pages,
int write, int force, struct page **pages,
unsigned int gup_flags);
-long get_user_pages_unlocked5(unsigned long start, unsigned long nr_pages,
+long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
int write, int force, struct page **pages);
int get_user_pages_fast(unsigned long start, int nr_pages, int write,
struct page **pages);
-/* suppress warnings from use in EXPORT_SYMBOL() */
-#ifndef __DISABLE_GUP_DEPRECATED
-#define __gup_deprecated __deprecated
-#else
-#define __gup_deprecated
-#endif
-/*
- * These macros provide backward-compatibility with the old
- * get_user_pages() variants which took tsk/mm. These
- * functions/macros provide both compile-time __deprecated so we
- * can catch old-style use and not break the build. The actual
- * functions also have WARN_ON()s to let us know at runtime if
- * the get_user_pages() should have been the "remote" variant.
- *
- * These are hideous, but temporary.
- *
- * If you run into one of these __deprecated warnings, look
- * at how you are calling get_user_pages(). If you are calling
- * it with current/current->mm as the first two arguments,
- * simply remove those arguments. The behavior will be the same
- * as it is now. If you are calling it on another task, use
- * get_user_pages_remote() instead.
- *
- * Any questions? Ask Dave Hansen <dave@sr71.net>
- */
-long
-__gup_deprecated
-get_user_pages8(struct task_struct *tsk, struct mm_struct *mm,
- unsigned long start, unsigned long nr_pages,
- int write, int force, struct page **pages,
- struct vm_area_struct **vmas);
-#define GUP_MACRO(_1, _2, _3, _4, _5, _6, _7, _8, get_user_pages, ...) \
- get_user_pages
-#define get_user_pages(...) GUP_MACRO(__VA_ARGS__, \
- get_user_pages8, x, \
- get_user_pages6, x, x, x, x, x)(__VA_ARGS__)
-
-__gup_deprecated
-long get_user_pages_locked8(struct task_struct *tsk, struct mm_struct *mm,
- unsigned long start, unsigned long nr_pages,
- int write, int force, struct page **pages,
- int *locked);
-#define GUPL_MACRO(_1, _2, _3, _4, _5, _6, _7, _8, get_user_pages_locked, ...) \
- get_user_pages_locked
-#define get_user_pages_locked(...) GUPL_MACRO(__VA_ARGS__, \
- get_user_pages_locked8, x, \
- get_user_pages_locked6, x, x, x, x)(__VA_ARGS__)
-
-__gup_deprecated
-long get_user_pages_unlocked7(struct task_struct *tsk, struct mm_struct *mm,
- unsigned long start, unsigned long nr_pages,
- int write, int force, struct page **pages);
-#define GUPU_MACRO(_1, _2, _3, _4, _5, _6, _7, get_user_pages_unlocked, ...) \
- get_user_pages_unlocked
-#define get_user_pages_unlocked(...) GUPU_MACRO(__VA_ARGS__, \
- get_user_pages_unlocked7, x, \
- get_user_pages_unlocked5, x, x, x, x)(__VA_ARGS__)
-
/* Container for pinned pfns / pages */
struct frame_vector {
unsigned int nr_allocated; /* Number of frames we have space for */
diff --git a/include/linux/nd.h b/include/linux/nd.h
index 5489ab756d1a..aee2761d294c 100644
--- a/include/linux/nd.h
+++ b/include/linux/nd.h
@@ -15,6 +15,7 @@
#include <linux/fs.h>
#include <linux/ndctl.h>
#include <linux/device.h>
+#include <linux/badblocks.h>
enum nvdimm_event {
NVDIMM_REVALIDATE_POISON,
@@ -55,13 +56,19 @@ static inline struct nd_namespace_common *to_ndns(struct device *dev)
}
/**
- * struct nd_namespace_io - infrastructure for loading an nd_pmem instance
+ * struct nd_namespace_io - device representation of a persistent memory range
* @dev: namespace device created by the nd region driver
* @res: struct resource conversion of a NFIT SPA table
+ * @size: cached resource_size(@res) for fast path size checks
+ * @addr: virtual address to access the namespace range
+ * @bb: badblocks list for the namespace range
*/
struct nd_namespace_io {
struct nd_namespace_common common;
struct resource res;
+ resource_size_t size;
+ void __pmem *addr;
+ struct badblocks bb;
};
/**
@@ -82,6 +89,7 @@ struct nd_namespace_pmem {
* @uuid: namespace name supplied in the dimm label
* @id: ida allocated id
* @lbasize: blk namespaces have a native sector size when btt not present
+ * @size: sum of all the resource ranges allocated to this namespace
* @num_resources: number of dpa extents to claim
* @res: discontiguous dpa extents for given dimm
*/
@@ -91,6 +99,7 @@ struct nd_namespace_blk {
u8 *uuid;
int id;
unsigned long lbasize;
+ resource_size_t size;
int num_resources;
struct resource **res;
};
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 004b8133417d..932ec74909c6 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -1111,6 +1111,7 @@ void pci_unlock_rescan_remove(void);
/* Vital product data routines */
ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf);
ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void *buf);
+int pci_set_vpd_size(struct pci_dev *dev, size_t len);
/* Helper functions for low-level code (drivers/pci/setup-[bus,res].c) */
resource_size_t pcibios_retrieve_fw_addr(struct pci_dev *dev, int idx);
diff --git a/include/linux/pmem.h b/include/linux/pmem.h
index ac6d872ce067..57d146fe44dd 100644
--- a/include/linux/pmem.h
+++ b/include/linux/pmem.h
@@ -72,6 +72,18 @@ static inline void arch_invalidate_pmem(void __pmem *addr, size_t size)
}
#endif
+static inline bool arch_has_pmem_api(void)
+{
+ return IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API);
+}
+
+static inline int default_memcpy_from_pmem(void *dst, void __pmem const *src,
+ size_t size)
+{
+ memcpy(dst, (void __force *) src, size);
+ return 0;
+}
+
/*
* memcpy_from_pmem - read from persistent memory with error handling
* @dst: destination buffer
@@ -83,12 +95,10 @@ static inline void arch_invalidate_pmem(void __pmem *addr, size_t size)
static inline int memcpy_from_pmem(void *dst, void __pmem const *src,
size_t size)
{
- return arch_memcpy_from_pmem(dst, src, size);
-}
-
-static inline bool arch_has_pmem_api(void)
-{
- return IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API);
+ if (arch_has_pmem_api())
+ return arch_memcpy_from_pmem(dst, src, size);
+ else
+ return default_memcpy_from_pmem(dst, src, size);
}
/**
diff --git a/include/linux/rculist_nulls.h b/include/linux/rculist_nulls.h
index 1c33dd7da4a7..4ae95f7e8597 100644
--- a/include/linux/rculist_nulls.h
+++ b/include/linux/rculist_nulls.h
@@ -98,6 +98,45 @@ static inline void hlist_nulls_add_head_rcu(struct hlist_nulls_node *n,
if (!is_a_nulls(first))
first->pprev = &n->next;
}
+
+/**
+ * hlist_nulls_add_tail_rcu
+ * @n: the element to add to the hash list.
+ * @h: the list to add to.
+ *
+ * Description:
+ * Adds the specified element to the end of the specified hlist_nulls,
+ * while permitting racing traversals. NOTE: tail insertion requires
+ * list traversal.
+ *
+ * The caller must take whatever precautions are necessary
+ * (such as holding appropriate locks) to avoid racing
+ * with another list-mutation primitive, such as hlist_nulls_add_head_rcu()
+ * or hlist_nulls_del_rcu(), running on this same list.
+ * However, it is perfectly legal to run concurrently with
+ * the _rcu list-traversal primitives, such as
+ * hlist_nulls_for_each_entry_rcu(), used to prevent memory-consistency
+ * problems on Alpha CPUs. Regardless of the type of CPU, the
+ * list-traversal primitive must be guarded by rcu_read_lock().
+ */
+static inline void hlist_nulls_add_tail_rcu(struct hlist_nulls_node *n,
+ struct hlist_nulls_head *h)
+{
+ struct hlist_nulls_node *i, *last = NULL;
+
+ for (i = hlist_nulls_first_rcu(h); !is_a_nulls(i);
+ i = hlist_nulls_next_rcu(i))
+ last = i;
+
+ if (last) {
+ n->next = last->next;
+ n->pprev = &last->next;
+ rcu_assign_pointer(hlist_nulls_next_rcu(last), n);
+ } else {
+ hlist_nulls_add_head_rcu(n, h);
+ }
+}
+
/**
* hlist_nulls_for_each_entry_rcu - iterate over rcu list of given type
* @tpos: the type * to use as a loop cursor.
diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
index dde00defbaa5..f3d45dd42695 100644
--- a/include/linux/seq_file.h
+++ b/include/linux/seq_file.h
@@ -7,13 +7,10 @@
#include <linux/mutex.h>
#include <linux/cpumask.h>
#include <linux/nodemask.h>
+#include <linux/fs.h>
+#include <linux/cred.h>
struct seq_operations;
-struct file;
-struct path;
-struct inode;
-struct dentry;
-struct user_namespace;
struct seq_file {
char *buf;
@@ -27,9 +24,7 @@ struct seq_file {
struct mutex lock;
const struct seq_operations *op;
int poll_event;
-#ifdef CONFIG_USER_NS
- struct user_namespace *user_ns;
-#endif
+ const struct file *file;
void *private;
};
@@ -147,7 +142,7 @@ int seq_release_private(struct inode *, struct file *);
static inline struct user_namespace *seq_user_ns(struct seq_file *seq)
{
#ifdef CONFIG_USER_NS
- return seq->user_ns;
+ return seq->file->f_cred->user_ns;
#else
extern struct user_namespace init_user_ns;
return &init_user_ns;
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
index a55d0523f75d..1b8a5a7876ce 100644
--- a/include/linux/thermal.h
+++ b/include/linux/thermal.h
@@ -352,8 +352,8 @@ struct thermal_zone_of_device_ops {
struct thermal_trip {
struct device_node *np;
- unsigned long int temperature;
- unsigned long int hysteresis;
+ int temperature;
+ int hysteresis;
enum thermal_trip_type type;
};
diff --git a/include/linux/usb_usual.h b/include/linux/usb_usual.h
index 7f5f78bd15ad..245f57dbbb61 100644
--- a/include/linux/usb_usual.h
+++ b/include/linux/usb_usual.h
@@ -79,6 +79,8 @@
/* Cannot handle MI_REPORT_SUPPORTED_OPERATION_CODES */ \
US_FLAG(MAX_SECTORS_240, 0x08000000) \
/* Sets max_sectors to 240 */ \
+ US_FLAG(NO_REPORT_LUNS, 0x10000000) \
+ /* Cannot handle REPORT_LUNS */ \
#define US_FLAG(name, value) US_FL_##name = value ,
enum { US_DO_ALL_FLAGS };
diff --git a/include/net/cls_cgroup.h b/include/net/cls_cgroup.h
index c0a92e2c286d..74c9693d4941 100644
--- a/include/net/cls_cgroup.h
+++ b/include/net/cls_cgroup.h
@@ -17,6 +17,7 @@
#include <linux/hardirq.h>
#include <linux/rcupdate.h>
#include <net/sock.h>
+#include <net/inet_sock.h>
#ifdef CONFIG_CGROUP_NET_CLASSID
struct cgroup_cls_state {
@@ -63,11 +64,13 @@ static inline u32 task_get_classid(const struct sk_buff *skb)
* softirqs always disables bh.
*/
if (in_serving_softirq()) {
+ struct sock *sk = skb_to_full_sk(skb);
+
/* If there is an sock_cgroup_classid we'll use that. */
- if (!skb->sk)
+ if (!sk || !sk_fullsock(sk))
return 0;
- classid = sock_cgroup_classid(&skb->sk->sk_cgrp_data);
+ classid = sock_cgroup_classid(&sk->sk_cgrp_data);
}
return classid;
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index 295d291269e2..54c779416eec 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -101,6 +101,9 @@ void fib6_force_start_gc(struct net *net);
struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
const struct in6_addr *addr, bool anycast);
+struct rt6_info *ip6_dst_alloc(struct net *net, struct net_device *dev,
+ int flags);
+
/*
* support functions for ND
*
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index d0aeb97aec5d..1be050ada8c5 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -959,6 +959,8 @@ int compat_ipv6_getsockopt(struct sock *sk, int level, int optname,
int ip6_datagram_connect(struct sock *sk, struct sockaddr *addr, int addr_len);
int ip6_datagram_connect_v6_only(struct sock *sk, struct sockaddr *addr,
int addr_len);
+int ip6_datagram_dst_update(struct sock *sk, bool fix_sk_saddr);
+void ip6_datagram_release_cb(struct sock *sk);
int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len,
int *addr_len);
diff --git a/include/net/route.h b/include/net/route.h
index 9b0a523bb428..6de665bf1750 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -209,6 +209,9 @@ unsigned int inet_addr_type_dev_table(struct net *net,
void ip_rt_multicast_event(struct in_device *);
int ip_rt_ioctl(struct net *, unsigned int cmd, void __user *arg);
void ip_rt_get_source(u8 *src, struct sk_buff *skb, struct rtable *rt);
+struct rtable *rt_dst_alloc(struct net_device *dev,
+ unsigned int flags, u16 type,
+ bool nopolicy, bool noxfrm, bool will_cache);
struct in_ifaddr;
void fib_add_ifaddr(struct in_ifaddr *);
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 6df1ce7a411c..5a404c354f4c 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -847,6 +847,11 @@ struct sctp_transport {
*/
ktime_t last_time_heard;
+ /* When was the last time that we sent a chunk using this
+ * transport? We use this to check for idle transports
+ */
+ unsigned long last_time_sent;
+
/* Last time(in jiffies) when cwnd is reduced due to the congestion
* indication based on ECNE chunk.
*/
@@ -952,7 +957,8 @@ void sctp_transport_route(struct sctp_transport *, union sctp_addr *,
struct sctp_sock *);
void sctp_transport_pmtu(struct sctp_transport *, struct sock *sk);
void sctp_transport_free(struct sctp_transport *);
-void sctp_transport_reset_timers(struct sctp_transport *);
+void sctp_transport_reset_t3_rtx(struct sctp_transport *);
+void sctp_transport_reset_hb_timer(struct sctp_transport *);
int sctp_transport_hold(struct sctp_transport *);
void sctp_transport_put(struct sctp_transport *);
void sctp_transport_update_rto(struct sctp_transport *, __u32);
diff --git a/include/net/sock.h b/include/net/sock.h
index 255d3e03727b..121ffc115c4f 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -630,7 +630,11 @@ static inline void sk_add_node_rcu(struct sock *sk, struct hlist_head *list)
static inline void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
{
- hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list);
+ if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport &&
+ sk->sk_family == AF_INET6)
+ hlist_nulls_add_tail_rcu(&sk->sk_nulls_node, list);
+ else
+ hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list);
}
static inline void sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
diff --git a/include/net/tcp.h b/include/net/tcp.h
index b91370f61be6..6db10228113f 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -552,6 +552,8 @@ void tcp_send_ack(struct sock *sk);
void tcp_send_delayed_ack(struct sock *sk);
void tcp_send_loss_probe(struct sock *sk);
bool tcp_schedule_loss_probe(struct sock *sk);
+void tcp_skb_collapse_tstamp(struct sk_buff *skb,
+ const struct sk_buff *next_skb);
/* tcp_input.c */
void tcp_resume_early_retransmit(struct sock *sk);
diff --git a/include/sound/hda_regmap.h b/include/sound/hda_regmap.h
index 2767c55a641e..ca64f0f50b45 100644
--- a/include/sound/hda_regmap.h
+++ b/include/sound/hda_regmap.h
@@ -17,6 +17,8 @@ int snd_hdac_regmap_add_vendor_verb(struct hdac_device *codec,
unsigned int verb);
int snd_hdac_regmap_read_raw(struct hdac_device *codec, unsigned int reg,
unsigned int *val);
+int snd_hdac_regmap_read_raw_uncached(struct hdac_device *codec,
+ unsigned int reg, unsigned int *val);
int snd_hdac_regmap_write_raw(struct hdac_device *codec, unsigned int reg,
unsigned int val);
int snd_hdac_regmap_update_raw(struct hdac_device *codec, unsigned int reg,
diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h
index 2622b33fb2ec..6e0f5f01734c 100644
--- a/include/uapi/asm-generic/unistd.h
+++ b/include/uapi/asm-generic/unistd.h
@@ -717,9 +717,13 @@ __SYSCALL(__NR_membarrier, sys_membarrier)
__SYSCALL(__NR_mlock2, sys_mlock2)
#define __NR_copy_file_range 285
__SYSCALL(__NR_copy_file_range, sys_copy_file_range)
+#define __NR_preadv2 286
+__SYSCALL(__NR_preadv2, sys_preadv2)
+#define __NR_pwritev2 287
+__SYSCALL(__NR_pwritev2, sys_pwritev2)
#undef __NR_syscalls
-#define __NR_syscalls 286
+#define __NR_syscalls 288
/*
* All syscalls below here should go away really,
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index b71fd0b5cbad..813ffb2e22c9 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -96,6 +96,7 @@ header-y += cyclades.h
header-y += cycx_cfm.h
header-y += dcbnl.h
header-y += dccp.h
+header-y += devlink.h
header-y += dlmconstants.h
header-y += dlm_device.h
header-y += dlm.h
diff --git a/include/uapi/linux/ndctl.h b/include/uapi/linux/ndctl.h
index 59c61e018a86..1eac426aead5 100644
--- a/include/uapi/linux/ndctl.h
+++ b/include/uapi/linux/ndctl.h
@@ -240,6 +240,7 @@ static inline const char *nvdimm_cmd_name(unsigned cmd)
#define ND_DEVICE_NAMESPACE_IO 4 /* legacy persistent memory */
#define ND_DEVICE_NAMESPACE_PMEM 5 /* PMEM namespace (may alias with BLK) */
#define ND_DEVICE_NAMESPACE_BLK 6 /* BLK namespace (may alias with PMEM) */
+#define ND_DEVICE_DAX_PMEM 7 /* Device DAX interface to pmem */
enum nd_driver_flags {
ND_DRIVER_DIMM = 1 << ND_DEVICE_DIMM,
@@ -248,6 +249,7 @@ enum nd_driver_flags {
ND_DRIVER_NAMESPACE_IO = 1 << ND_DEVICE_NAMESPACE_IO,
ND_DRIVER_NAMESPACE_PMEM = 1 << ND_DEVICE_NAMESPACE_PMEM,
ND_DRIVER_NAMESPACE_BLK = 1 << ND_DEVICE_NAMESPACE_BLK,
+ ND_DRIVER_DAX_PMEM = 1 << ND_DEVICE_DAX_PMEM,
};
enum {
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 2e08f8e9b771..618ef77c302a 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -1374,6 +1374,7 @@ static int check_ld_abs(struct verifier_env *env, struct bpf_insn *insn)
}
if (insn->dst_reg != BPF_REG_0 || insn->off != 0 ||
+ BPF_SIZE(insn->code) == BPF_DW ||
(mode == BPF_ABS && insn->src_reg != BPF_REG_0)) {
verbose("BPF_LD_ABS uses reserved fields\n");
return -EINVAL;
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 6ea42e8da861..3e3f6e49eabb 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -36,6 +36,7 @@
* @target: The target state
* @thread: Pointer to the hotplug thread
* @should_run: Thread should execute
+ * @rollback: Perform a rollback
* @cb_stat: The state for a single callback (install/uninstall)
* @cb: Single callback function (install/uninstall)
* @result: Result of the operation
@@ -47,6 +48,7 @@ struct cpuhp_cpu_state {
#ifdef CONFIG_SMP
struct task_struct *thread;
bool should_run;
+ bool rollback;
enum cpuhp_state cb_state;
int (*cb)(unsigned int cpu);
int result;
@@ -301,6 +303,11 @@ static int cpu_notify(unsigned long val, unsigned int cpu)
return __cpu_notify(val, cpu, -1, NULL);
}
+static void cpu_notify_nofail(unsigned long val, unsigned int cpu)
+{
+ BUG_ON(cpu_notify(val, cpu));
+}
+
/* Notifier wrappers for transitioning to state machine */
static int notify_prepare(unsigned int cpu)
{
@@ -477,6 +484,16 @@ static void cpuhp_thread_fun(unsigned int cpu)
} else {
ret = cpuhp_invoke_callback(cpu, st->cb_state, st->cb);
}
+ } else if (st->rollback) {
+ BUG_ON(st->state < CPUHP_AP_ONLINE_IDLE);
+
+ undo_cpu_down(cpu, st, cpuhp_ap_states);
+ /*
+ * This is a momentary workaround to keep the notifier users
+ * happy. Will go away once we got rid of the notifiers.
+ */
+ cpu_notify_nofail(CPU_DOWN_FAILED, cpu);
+ st->rollback = false;
} else {
/* Cannot happen .... */
BUG_ON(st->state < CPUHP_AP_ONLINE_IDLE);
@@ -636,11 +653,6 @@ static inline void check_for_tasks(int dead_cpu)
read_unlock(&tasklist_lock);
}
-static void cpu_notify_nofail(unsigned long val, unsigned int cpu)
-{
- BUG_ON(cpu_notify(val, cpu));
-}
-
static int notify_down_prepare(unsigned int cpu)
{
int err, nr_calls = 0;
@@ -721,9 +733,10 @@ static int takedown_cpu(unsigned int cpu)
*/
err = stop_machine(take_cpu_down, NULL, cpumask_of(cpu));
if (err) {
- /* CPU didn't die: tell everyone. Can't complain. */
- cpu_notify_nofail(CPU_DOWN_FAILED, cpu);
+ /* CPU refused to die */
irq_unlock_sparse();
+ /* Unpark the hotplug thread so we can rollback there */
+ kthread_unpark(per_cpu_ptr(&cpuhp_state, cpu)->thread);
return err;
}
BUG_ON(cpu_online(cpu));
@@ -832,6 +845,11 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
* to do the further cleanups.
*/
ret = cpuhp_down_callbacks(cpu, st, cpuhp_bp_states, target);
+ if (ret && st->state > CPUHP_TEARDOWN_CPU && st->state < prev_state) {
+ st->target = prev_state;
+ st->rollback = true;
+ cpuhp_kick_ap_work(cpu);
+ }
hasdied = prev_state != st->state && st->state == CPUHP_OFFLINE;
out:
@@ -1249,6 +1267,7 @@ static struct cpuhp_step cpuhp_ap_states[] = {
.name = "notify:online",
.startup = notify_online,
.teardown = notify_down_prepare,
+ .skip_onerr = true,
},
#endif
/*
diff --git a/kernel/futex.c b/kernel/futex.c
index a5d2e74c89e0..c20f06f38ef3 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -1295,10 +1295,20 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this,
if (unlikely(should_fail_futex(true)))
ret = -EFAULT;
- if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))
+ if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)) {
ret = -EFAULT;
- else if (curval != uval)
- ret = -EINVAL;
+ } else if (curval != uval) {
+ /*
+ * If a unconditional UNLOCK_PI operation (user space did not
+ * try the TID->0 transition) raced with a waiter setting the
+ * FUTEX_WAITERS flag between get_user() and locking the hash
+ * bucket lock, retry the operation.
+ */
+ if ((FUTEX_TID_MASK & curval) == uval)
+ ret = -EAGAIN;
+ else
+ ret = -EINVAL;
+ }
if (ret) {
raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
return ret;
@@ -1525,8 +1535,8 @@ void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1,
if (likely(&hb1->chain != &hb2->chain)) {
plist_del(&q->list, &hb1->chain);
hb_waiters_dec(hb1);
- plist_add(&q->list, &hb2->chain);
hb_waiters_inc(hb2);
+ plist_add(&q->list, &hb2->chain);
q->lock_ptr = &hb2->lock;
}
get_futex_key_refs(key2);
@@ -2623,6 +2633,15 @@ retry:
if (ret == -EFAULT)
goto pi_faulted;
/*
+ * A unconditional UNLOCK_PI op raced against a waiter
+ * setting the FUTEX_WAITERS bit. Try again.
+ */
+ if (ret == -EAGAIN) {
+ spin_unlock(&hb->lock);
+ put_futex_key(&key);
+ goto retry;
+ }
+ /*
* wake_futex_pi has detected invalid state. Tell user
* space.
*/
diff --git a/kernel/irq/ipi.c b/kernel/irq/ipi.c
index c37f34b00a11..14777af8e097 100644
--- a/kernel/irq/ipi.c
+++ b/kernel/irq/ipi.c
@@ -94,6 +94,7 @@ unsigned int irq_reserve_ipi(struct irq_domain *domain,
data = irq_get_irq_data(virq + i);
cpumask_copy(data->common->affinity, dest);
data->common->ipi_offset = offset;
+ irq_set_status_flags(virq + i, IRQ_NO_BALANCING);
}
return virq;
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 2324ba5310db..ed9410936a22 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -1999,6 +1999,7 @@ static inline int get_first_held_lock(struct task_struct *curr,
return ++i;
}
+#ifdef CONFIG_DEBUG_LOCKDEP
/*
* Returns the next chain_key iteration
*/
@@ -2069,6 +2070,7 @@ static void print_collision(struct task_struct *curr,
printk("\nstack backtrace:\n");
dump_stack();
}
+#endif
/*
* Checks whether the chain and the current held locks are consistent
diff --git a/kernel/locking/qspinlock_stat.h b/kernel/locking/qspinlock_stat.h
index eb2a2c9bc3fc..d734b7502001 100644
--- a/kernel/locking/qspinlock_stat.h
+++ b/kernel/locking/qspinlock_stat.h
@@ -136,10 +136,12 @@ static ssize_t qstat_read(struct file *file, char __user *user_buf,
}
if (counter == qstat_pv_hash_hops) {
- u64 frac;
+ u64 frac = 0;
- frac = 100ULL * do_div(stat, kicks);
- frac = DIV_ROUND_CLOSEST_ULL(frac, kicks);
+ if (kicks) {
+ frac = 100ULL * do_div(stat, kicks);
+ frac = DIV_ROUND_CLOSEST_ULL(frac, kicks);
+ }
/*
* Return a X.XX decimal number
diff --git a/kernel/resource.c b/kernel/resource.c
index 2e78ead30934..9b5f04404152 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -105,16 +105,25 @@ static int r_show(struct seq_file *m, void *v)
{
struct resource *root = m->private;
struct resource *r = v, *p;
+ unsigned long long start, end;
int width = root->end < 0x10000 ? 4 : 8;
int depth;
for (depth = 0, p = r; depth < MAX_IORES_LEVEL; depth++, p = p->parent)
if (p->parent == root)
break;
+
+ if (file_ns_capable(m->file, &init_user_ns, CAP_SYS_ADMIN)) {
+ start = r->start;
+ end = r->end;
+ } else {
+ start = end = 0;
+ }
+
seq_printf(m, "%*s%0*llx-%0*llx : %s\n",
depth * 2, "",
- width, (unsigned long long) r->start,
- width, (unsigned long long) r->end,
+ width, start,
+ width, end,
r->name ? r->name : "<BAD>");
return 0;
}
diff --git a/lib/assoc_array.c b/lib/assoc_array.c
index 03dd576e6773..59fd7c0b119c 100644
--- a/lib/assoc_array.c
+++ b/lib/assoc_array.c
@@ -524,7 +524,9 @@ static bool assoc_array_insert_into_terminal_node(struct assoc_array_edit *edit,
free_slot = i;
continue;
}
- if (ops->compare_object(assoc_array_ptr_to_leaf(ptr), index_key)) {
+ if (assoc_array_ptr_is_leaf(ptr) &&
+ ops->compare_object(assoc_array_ptr_to_leaf(ptr),
+ index_key)) {
pr_devel("replace in slot %d\n", i);
edit->leaf_p = &node->slots[i];
edit->dead_leaf = node->slots[i];
diff --git a/lib/lz4/lz4defs.h b/lib/lz4/lz4defs.h
index abcecdc2d0f2..c79d7ea8a38e 100644
--- a/lib/lz4/lz4defs.h
+++ b/lib/lz4/lz4defs.h
@@ -11,8 +11,7 @@
/*
* Detects 64 bits mode
*/
-#if (defined(__x86_64__) || defined(__x86_64) || defined(__amd64__) \
- || defined(__ppc64__) || defined(__LP64__))
+#if defined(CONFIG_64BIT)
#define LZ4_ARCH64 1
#else
#define LZ4_ARCH64 0
@@ -25,9 +24,7 @@
typedef struct _U16_S { u16 v; } U16_S;
typedef struct _U32_S { u32 v; } U32_S;
typedef struct _U64_S { u64 v; } U64_S;
-#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) \
- || defined(CONFIG_ARM) && __LINUX_ARM_ARCH__ >= 6 \
- && defined(ARM_EFFICIENT_UNALIGNED_ACCESS)
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
#define A16(x) (((U16_S *)(x))->v)
#define A32(x) (((U32_S *)(x))->v)
@@ -35,6 +32,10 @@ typedef struct _U64_S { u64 v; } U64_S;
#define PUT4(s, d) (A32(d) = A32(s))
#define PUT8(s, d) (A64(d) = A64(s))
+
+#define LZ4_READ_LITTLEENDIAN_16(d, s, p) \
+ (d = s - A16(p))
+
#define LZ4_WRITE_LITTLEENDIAN_16(p, v) \
do { \
A16(p) = v; \
@@ -51,10 +52,13 @@ typedef struct _U64_S { u64 v; } U64_S;
#define PUT8(s, d) \
put_unaligned(get_unaligned((const u64 *) s), (u64 *) d)
-#define LZ4_WRITE_LITTLEENDIAN_16(p, v) \
- do { \
- put_unaligned(v, (u16 *)(p)); \
- p += 2; \
+#define LZ4_READ_LITTLEENDIAN_16(d, s, p) \
+ (d = s - get_unaligned_le16(p))
+
+#define LZ4_WRITE_LITTLEENDIAN_16(p, v) \
+ do { \
+ put_unaligned_le16(v, (u16 *)(p)); \
+ p += 2; \
} while (0)
#endif
@@ -140,9 +144,6 @@ typedef struct _U64_S { u64 v; } U64_S;
#endif
-#define LZ4_READ_LITTLEENDIAN_16(d, s, p) \
- (d = s - get_unaligned_le16(p))
-
#define LZ4_WILDCOPY(s, d, e) \
do { \
LZ4_COPYPACKET(s, d); \
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index bfbd7096b6ed..0c6317b7db38 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -898,7 +898,7 @@ static atomic_t nr_wb_congested[2];
void clear_wb_congested(struct bdi_writeback_congested *congested, int sync)
{
wait_queue_head_t *wqh = &congestion_wqh[sync];
- enum wb_state bit;
+ enum wb_congested_state bit;
bit = sync ? WB_sync_congested : WB_async_congested;
if (test_and_clear_bit(bit, &congested->state))
@@ -911,7 +911,7 @@ EXPORT_SYMBOL(clear_wb_congested);
void set_wb_congested(struct bdi_writeback_congested *congested, int sync)
{
- enum wb_state bit;
+ enum wb_congested_state bit;
bit = sync ? WB_sync_congested : WB_async_congested;
if (!test_and_set_bit(bit, &congested->state))
diff --git a/mm/gup.c b/mm/gup.c
index fb87aea9edc8..c057784c8444 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -1,4 +1,3 @@
-#define __DISABLE_GUP_DEPRECATED 1
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/err.h>
@@ -839,7 +838,7 @@ static __always_inline long __get_user_pages_locked(struct task_struct *tsk,
* if (locked)
* up_read(&mm->mmap_sem);
*/
-long get_user_pages_locked6(unsigned long start, unsigned long nr_pages,
+long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
int write, int force, struct page **pages,
int *locked)
{
@@ -847,7 +846,7 @@ long get_user_pages_locked6(unsigned long start, unsigned long nr_pages,
write, force, pages, NULL, locked, true,
FOLL_TOUCH);
}
-EXPORT_SYMBOL(get_user_pages_locked6);
+EXPORT_SYMBOL(get_user_pages_locked);
/*
* Same as get_user_pages_unlocked(...., FOLL_TOUCH) but it allows to
@@ -892,13 +891,13 @@ EXPORT_SYMBOL(__get_user_pages_unlocked);
* or if "force" shall be set to 1 (get_user_pages_fast misses the
* "force" parameter).
*/
-long get_user_pages_unlocked5(unsigned long start, unsigned long nr_pages,
+long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
int write, int force, struct page **pages)
{
return __get_user_pages_unlocked(current, current->mm, start, nr_pages,
write, force, pages, FOLL_TOUCH);
}
-EXPORT_SYMBOL(get_user_pages_unlocked5);
+EXPORT_SYMBOL(get_user_pages_unlocked);
/*
* get_user_pages_remote() - pin user pages in memory
@@ -972,7 +971,7 @@ EXPORT_SYMBOL(get_user_pages_remote);
* and mm being operated on are the current task's. We also
* obviously don't pass FOLL_REMOTE in here.
*/
-long get_user_pages6(unsigned long start, unsigned long nr_pages,
+long get_user_pages(unsigned long start, unsigned long nr_pages,
int write, int force, struct page **pages,
struct vm_area_struct **vmas)
{
@@ -980,7 +979,7 @@ long get_user_pages6(unsigned long start, unsigned long nr_pages,
write, force, pages, vmas, NULL, false,
FOLL_TOUCH);
}
-EXPORT_SYMBOL(get_user_pages6);
+EXPORT_SYMBOL(get_user_pages);
/**
* populate_vma_page_range() - populate a range of pages in the vma.
@@ -1491,7 +1490,6 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
int get_user_pages_fast(unsigned long start, int nr_pages, int write,
struct page **pages)
{
- struct mm_struct *mm = current->mm;
int nr, ret;
start &= PAGE_MASK;
@@ -1503,8 +1501,7 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
start += nr << PAGE_SHIFT;
pages += nr;
- ret = get_user_pages_unlocked(current, mm, start,
- nr_pages - nr, write, 0, pages);
+ ret = get_user_pages_unlocked(start, nr_pages - nr, write, 0, pages);
/* Have to be a bit careful with return values */
if (nr > 0) {
@@ -1519,38 +1516,3 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
}
#endif /* CONFIG_HAVE_GENERIC_RCU_GUP */
-
-long get_user_pages8(struct task_struct *tsk, struct mm_struct *mm,
- unsigned long start, unsigned long nr_pages,
- int write, int force, struct page **pages,
- struct vm_area_struct **vmas)
-{
- WARN_ONCE(tsk != current, "get_user_pages() called on remote task");
- WARN_ONCE(mm != current->mm, "get_user_pages() called on remote mm");
-
- return get_user_pages6(start, nr_pages, write, force, pages, vmas);
-}
-EXPORT_SYMBOL(get_user_pages8);
-
-long get_user_pages_locked8(struct task_struct *tsk, struct mm_struct *mm,
- unsigned long start, unsigned long nr_pages,
- int write, int force, struct page **pages, int *locked)
-{
- WARN_ONCE(tsk != current, "get_user_pages_locked() called on remote task");
- WARN_ONCE(mm != current->mm, "get_user_pages_locked() called on remote mm");
-
- return get_user_pages_locked6(start, nr_pages, write, force, pages, locked);
-}
-EXPORT_SYMBOL(get_user_pages_locked8);
-
-long get_user_pages_unlocked7(struct task_struct *tsk, struct mm_struct *mm,
- unsigned long start, unsigned long nr_pages,
- int write, int force, struct page **pages)
-{
- WARN_ONCE(tsk != current, "get_user_pages_unlocked() called on remote task");
- WARN_ONCE(mm != current->mm, "get_user_pages_unlocked() called on remote mm");
-
- return get_user_pages_unlocked5(start, nr_pages, write, force, pages);
-}
-EXPORT_SYMBOL(get_user_pages_unlocked7);
-
diff --git a/mm/nommu.c b/mm/nommu.c
index 102e257cc6c3..c8bd59a03c71 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -15,8 +15,6 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#define __DISABLE_GUP_DEPRECATED
-
#include <linux/export.h>
#include <linux/mm.h>
#include <linux/vmacache.h>
@@ -161,7 +159,7 @@ finish_or_fault:
* slab page or a secondary page from a compound page
* - don't permit access to VMAs that don't support it, such as I/O mappings
*/
-long get_user_pages6(unsigned long start, unsigned long nr_pages,
+long get_user_pages(unsigned long start, unsigned long nr_pages,
int write, int force, struct page **pages,
struct vm_area_struct **vmas)
{
@@ -175,15 +173,15 @@ long get_user_pages6(unsigned long start, unsigned long nr_pages,
return __get_user_pages(current, current->mm, start, nr_pages, flags,
pages, vmas, NULL);
}
-EXPORT_SYMBOL(get_user_pages6);
+EXPORT_SYMBOL(get_user_pages);
-long get_user_pages_locked6(unsigned long start, unsigned long nr_pages,
+long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
int write, int force, struct page **pages,
int *locked)
{
- return get_user_pages6(start, nr_pages, write, force, pages, NULL);
+ return get_user_pages(start, nr_pages, write, force, pages, NULL);
}
-EXPORT_SYMBOL(get_user_pages_locked6);
+EXPORT_SYMBOL(get_user_pages_locked);
long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, unsigned long nr_pages,
@@ -199,13 +197,13 @@ long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
}
EXPORT_SYMBOL(__get_user_pages_unlocked);
-long get_user_pages_unlocked5(unsigned long start, unsigned long nr_pages,
+long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
int write, int force, struct page **pages)
{
return __get_user_pages_unlocked(current, current->mm, start, nr_pages,
write, force, pages, 0);
}
-EXPORT_SYMBOL(get_user_pages_unlocked5);
+EXPORT_SYMBOL(get_user_pages_unlocked);
/**
* follow_pfn - look up PFN at a user virtual address
@@ -1989,31 +1987,3 @@ static int __meminit init_admin_reserve(void)
return 0;
}
subsys_initcall(init_admin_reserve);
-
-long get_user_pages8(struct task_struct *tsk, struct mm_struct *mm,
- unsigned long start, unsigned long nr_pages,
- int write, int force, struct page **pages,
- struct vm_area_struct **vmas)
-{
- return get_user_pages6(start, nr_pages, write, force, pages, vmas);
-}
-EXPORT_SYMBOL(get_user_pages8);
-
-long get_user_pages_locked8(struct task_struct *tsk, struct mm_struct *mm,
- unsigned long start, unsigned long nr_pages,
- int write, int force, struct page **pages,
- int *locked)
-{
- return get_user_pages_locked6(start, nr_pages, write,
- force, pages, locked);
-}
-EXPORT_SYMBOL(get_user_pages_locked8);
-
-long get_user_pages_unlocked7(struct task_struct *tsk, struct mm_struct *mm,
- unsigned long start, unsigned long nr_pages,
- int write, int force, struct page **pages)
-{
- return get_user_pages_unlocked5(start, nr_pages, write, force, pages);
-}
-EXPORT_SYMBOL(get_user_pages_unlocked7);
-
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index 8570bc7744c2..5a61f35412a0 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -370,7 +370,11 @@ ebt_check_match(struct ebt_entry_match *m, struct xt_mtchk_param *par,
left - sizeof(struct ebt_entry_match) < m->match_size)
return -EINVAL;
- match = xt_request_find_match(NFPROTO_BRIDGE, m->u.name, 0);
+ match = xt_find_match(NFPROTO_BRIDGE, m->u.name, 0);
+ if (IS_ERR(match) || match->family != NFPROTO_BRIDGE) {
+ request_module("ebt_%s", m->u.name);
+ match = xt_find_match(NFPROTO_BRIDGE, m->u.name, 0);
+ }
if (IS_ERR(match))
return PTR_ERR(match);
m->u.match = match;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index d04c2d1c8c87..e561f9f07d6d 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -4502,13 +4502,16 @@ int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci)
__skb_push(skb, offset);
err = __vlan_insert_tag(skb, skb->vlan_proto,
skb_vlan_tag_get(skb));
- if (err)
+ if (err) {
+ __skb_pull(skb, offset);
return err;
+ }
+
skb->protocol = skb->vlan_proto;
skb->mac_len += VLAN_HLEN;
- __skb_pull(skb, offset);
skb_postpush_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN);
+ __skb_pull(skb, offset);
}
__vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci);
return 0;
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
index 607a14f20d88..b1dc096d22f8 100644
--- a/net/decnet/dn_route.c
+++ b/net/decnet/dn_route.c
@@ -1034,10 +1034,13 @@ source_ok:
if (!fld.daddr) {
fld.daddr = fld.saddr;
- err = -EADDRNOTAVAIL;
if (dev_out)
dev_put(dev_out);
+ err = -EINVAL;
dev_out = init_net.loopback_dev;
+ if (!dev_out->dn_ptr)
+ goto out;
+ err = -EADDRNOTAVAIL;
dev_hold(dev_out);
if (!fld.daddr) {
fld.daddr =
@@ -1110,6 +1113,8 @@ source_ok:
if (dev_out == NULL)
goto out;
dn_db = rcu_dereference_raw(dev_out->dn_ptr);
+ if (!dn_db)
+ goto e_inval;
/* Possible improvement - check all devices for local addr */
if (dn_dev_islocal(dev_out, fld.daddr)) {
dev_put(dev_out);
@@ -1151,6 +1156,8 @@ select_source:
dev_put(dev_out);
dev_out = init_net.loopback_dev;
dev_hold(dev_out);
+ if (!dev_out->dn_ptr)
+ goto e_inval;
fld.flowidn_oif = dev_out->ifindex;
if (res.fi)
dn_fib_info_put(res.fi);
diff --git a/net/ipv4/netfilter/arptable_filter.c b/net/ipv4/netfilter/arptable_filter.c
index dd8c80dc32a2..8f8713b4388f 100644
--- a/net/ipv4/netfilter/arptable_filter.c
+++ b/net/ipv4/netfilter/arptable_filter.c
@@ -81,6 +81,12 @@ static int __init arptable_filter_init(void)
return ret;
}
+ ret = arptable_filter_table_init(&init_net);
+ if (ret) {
+ unregister_pernet_subsys(&arptable_filter_net_ops);
+ kfree(arpfilter_ops);
+ }
+
return ret;
}
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 02c62299d717..60398a9370e7 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1438,9 +1438,9 @@ static void rt_set_nexthop(struct rtable *rt, __be32 daddr,
#endif
}
-static struct rtable *rt_dst_alloc(struct net_device *dev,
- unsigned int flags, u16 type,
- bool nopolicy, bool noxfrm, bool will_cache)
+struct rtable *rt_dst_alloc(struct net_device *dev,
+ unsigned int flags, u16 type,
+ bool nopolicy, bool noxfrm, bool will_cache)
{
struct rtable *rt;
@@ -1468,6 +1468,7 @@ static struct rtable *rt_dst_alloc(struct net_device *dev,
return rt;
}
+EXPORT_SYMBOL(rt_dst_alloc);
/* called in rcu_read_lock() section */
static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
@@ -2045,6 +2046,18 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
*/
if (fi && res->prefixlen < 4)
fi = NULL;
+ } else if ((type == RTN_LOCAL) && (orig_oif != 0) &&
+ (orig_oif != dev_out->ifindex)) {
+ /* For local routes that require a particular output interface
+ * we do not want to cache the result. Caching the result
+ * causes incorrect behaviour when there are multiple source
+ * addresses on the interface, the end result being that if the
+ * intended recipient is waiting on that interface for the
+ * packet he won't receive it because it will be delivered on
+ * the loopback interface and the IP_PKTINFO ipi_ifindex will
+ * be set to the loopback interface as well.
+ */
+ fi = NULL;
}
fnhe = NULL;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index e6e65f79ade8..c124c3c12f7c 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1309,6 +1309,7 @@ static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
if (skb == tcp_highest_sack(sk))
tcp_advance_highest_sack(sk, skb);
+ tcp_skb_collapse_tstamp(prev, skb);
tcp_unlink_write_queue(skb, sk);
sk_wmem_free_skb(sk, skb);
@@ -3098,7 +3099,8 @@ static void tcp_ack_tstamp(struct sock *sk, struct sk_buff *skb,
shinfo = skb_shinfo(skb);
if ((shinfo->tx_flags & SKBTX_ACK_TSTAMP) &&
- between(shinfo->tskey, prior_snd_una, tcp_sk(sk)->snd_una - 1))
+ !before(shinfo->tskey, prior_snd_una) &&
+ before(shinfo->tskey, tcp_sk(sk)->snd_una))
__skb_tstamp_tx(skb, NULL, sk, SCM_TSTAMP_ACK);
}
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 7d2dc015cd19..441ae9da3a23 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2441,6 +2441,20 @@ u32 __tcp_select_window(struct sock *sk)
return window;
}
+void tcp_skb_collapse_tstamp(struct sk_buff *skb,
+ const struct sk_buff *next_skb)
+{
+ const struct skb_shared_info *next_shinfo = skb_shinfo(next_skb);
+ u8 tsflags = next_shinfo->tx_flags & SKBTX_ANY_TSTAMP;
+
+ if (unlikely(tsflags)) {
+ struct skb_shared_info *shinfo = skb_shinfo(skb);
+
+ shinfo->tx_flags |= tsflags;
+ shinfo->tskey = next_shinfo->tskey;
+ }
+}
+
/* Collapses two adjacent SKB's during retransmission. */
static void tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb)
{
@@ -2484,6 +2498,8 @@ static void tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb)
tcp_adjust_pcount(sk, next_skb, tcp_skb_pcount(next_skb));
+ tcp_skb_collapse_tstamp(skb, next_skb);
+
sk_wmem_free_skb(sk, next_skb);
}
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 08eed5e16df0..a2e7f55a1f61 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -339,8 +339,13 @@ found:
hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash);
spin_lock(&hslot2->lock);
- hlist_nulls_add_head_rcu(&udp_sk(sk)->udp_portaddr_node,
- &hslot2->head);
+ if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport &&
+ sk->sk_family == AF_INET6)
+ hlist_nulls_add_tail_rcu(&udp_sk(sk)->udp_portaddr_node,
+ &hslot2->head);
+ else
+ hlist_nulls_add_head_rcu(&udp_sk(sk)->udp_portaddr_node,
+ &hslot2->head);
hslot2->count++;
spin_unlock(&hslot2->lock);
}
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 27aed1afcf81..23cec53b568a 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -3255,6 +3255,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
void *ptr)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ struct netdev_notifier_changeupper_info *info;
struct inet6_dev *idev = __in6_dev_get(dev);
int run_pending = 0;
int err;
@@ -3413,6 +3414,15 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
if (idev)
addrconf_type_change(dev, event);
break;
+
+ case NETDEV_CHANGEUPPER:
+ info = ptr;
+
+ /* flush all routes if dev is linked to or unlinked from
+ * an L3 master device (e.g., VRF)
+ */
+ if (info->upper_dev && netif_is_l3_master(info->upper_dev))
+ addrconf_ifdown(dev, 0);
}
return NOTIFY_OK;
@@ -3438,6 +3448,12 @@ static void addrconf_type_change(struct net_device *dev, unsigned long event)
ipv6_mc_unmap(idev);
}
+static bool addr_is_local(const struct in6_addr *addr)
+{
+ return ipv6_addr_type(addr) &
+ (IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK);
+}
+
static int addrconf_ifdown(struct net_device *dev, int how)
{
struct net *net = dev_net(dev);
@@ -3495,7 +3511,8 @@ restart:
* address is retained on a down event
*/
if (!keep_addr ||
- !(ifa->flags & IFA_F_PERMANENT)) {
+ !(ifa->flags & IFA_F_PERMANENT) ||
+ addr_is_local(&ifa->addr)) {
hlist_del_init_rcu(&ifa->addr_lst);
goto restart;
}
@@ -3544,7 +3561,8 @@ restart:
write_unlock_bh(&idev->lock);
spin_lock_bh(&ifa->lock);
- if (keep_addr && (ifa->flags & IFA_F_PERMANENT)) {
+ if (keep_addr && (ifa->flags & IFA_F_PERMANENT) &&
+ !addr_is_local(&ifa->addr)) {
/* set state to skip the notifier below */
state = INET6_IFADDR_STATE_DEAD;
ifa->state = 0;
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index 428162155280..9dd3882fe6bf 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -40,18 +40,114 @@ static bool ipv6_mapped_addr_any(const struct in6_addr *a)
return ipv6_addr_v4mapped(a) && (a->s6_addr32[3] == 0);
}
+static void ip6_datagram_flow_key_init(struct flowi6 *fl6, struct sock *sk)
+{
+ struct inet_sock *inet = inet_sk(sk);
+ struct ipv6_pinfo *np = inet6_sk(sk);
+
+ memset(fl6, 0, sizeof(*fl6));
+ fl6->flowi6_proto = sk->sk_protocol;
+ fl6->daddr = sk->sk_v6_daddr;
+ fl6->saddr = np->saddr;
+ fl6->flowi6_oif = sk->sk_bound_dev_if;
+ fl6->flowi6_mark = sk->sk_mark;
+ fl6->fl6_dport = inet->inet_dport;
+ fl6->fl6_sport = inet->inet_sport;
+ fl6->flowlabel = np->flow_label;
+
+ if (!fl6->flowi6_oif)
+ fl6->flowi6_oif = np->sticky_pktinfo.ipi6_ifindex;
+
+ if (!fl6->flowi6_oif && ipv6_addr_is_multicast(&fl6->daddr))
+ fl6->flowi6_oif = np->mcast_oif;
+
+ security_sk_classify_flow(sk, flowi6_to_flowi(fl6));
+}
+
+int ip6_datagram_dst_update(struct sock *sk, bool fix_sk_saddr)
+{
+ struct ip6_flowlabel *flowlabel = NULL;
+ struct in6_addr *final_p, final;
+ struct ipv6_txoptions *opt;
+ struct dst_entry *dst;
+ struct inet_sock *inet = inet_sk(sk);
+ struct ipv6_pinfo *np = inet6_sk(sk);
+ struct flowi6 fl6;
+ int err = 0;
+
+ if (np->sndflow && (np->flow_label & IPV6_FLOWLABEL_MASK)) {
+ flowlabel = fl6_sock_lookup(sk, np->flow_label);
+ if (!flowlabel)
+ return -EINVAL;
+ }
+ ip6_datagram_flow_key_init(&fl6, sk);
+
+ rcu_read_lock();
+ opt = flowlabel ? flowlabel->opt : rcu_dereference(np->opt);
+ final_p = fl6_update_dst(&fl6, opt, &final);
+ rcu_read_unlock();
+
+ dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
+ if (IS_ERR(dst)) {
+ err = PTR_ERR(dst);
+ goto out;
+ }
+
+ if (fix_sk_saddr) {
+ if (ipv6_addr_any(&np->saddr))
+ np->saddr = fl6.saddr;
+
+ if (ipv6_addr_any(&sk->sk_v6_rcv_saddr)) {
+ sk->sk_v6_rcv_saddr = fl6.saddr;
+ inet->inet_rcv_saddr = LOOPBACK4_IPV6;
+ if (sk->sk_prot->rehash)
+ sk->sk_prot->rehash(sk);
+ }
+ }
+
+ ip6_dst_store(sk, dst,
+ ipv6_addr_equal(&fl6.daddr, &sk->sk_v6_daddr) ?
+ &sk->sk_v6_daddr : NULL,
+#ifdef CONFIG_IPV6_SUBTREES
+ ipv6_addr_equal(&fl6.saddr, &np->saddr) ?
+ &np->saddr :
+#endif
+ NULL);
+
+out:
+ fl6_sock_release(flowlabel);
+ return err;
+}
+
+void ip6_datagram_release_cb(struct sock *sk)
+{
+ struct dst_entry *dst;
+
+ if (ipv6_addr_v4mapped(&sk->sk_v6_daddr))
+ return;
+
+ rcu_read_lock();
+ dst = __sk_dst_get(sk);
+ if (!dst || !dst->obsolete ||
+ dst->ops->check(dst, inet6_sk(sk)->dst_cookie)) {
+ rcu_read_unlock();
+ return;
+ }
+ rcu_read_unlock();
+
+ ip6_datagram_dst_update(sk, false);
+}
+EXPORT_SYMBOL_GPL(ip6_datagram_release_cb);
+
static int __ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
{
struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
struct inet_sock *inet = inet_sk(sk);
struct ipv6_pinfo *np = inet6_sk(sk);
- struct in6_addr *daddr, *final_p, final;
- struct dst_entry *dst;
- struct flowi6 fl6;
- struct ip6_flowlabel *flowlabel = NULL;
- struct ipv6_txoptions *opt;
+ struct in6_addr *daddr;
int addr_type;
int err;
+ __be32 fl6_flowlabel = 0;
if (usin->sin6_family == AF_INET) {
if (__ipv6_only_sock(sk))
@@ -66,15 +162,8 @@ static int __ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int a
if (usin->sin6_family != AF_INET6)
return -EAFNOSUPPORT;
- memset(&fl6, 0, sizeof(fl6));
- if (np->sndflow) {
- fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
- if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
- flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
- if (!flowlabel)
- return -EINVAL;
- }
- }
+ if (np->sndflow)
+ fl6_flowlabel = usin->sin6_flowinfo & IPV6_FLOWINFO_MASK;
addr_type = ipv6_addr_type(&usin->sin6_addr);
@@ -145,7 +234,7 @@ ipv4_connected:
}
sk->sk_v6_daddr = *daddr;
- np->flow_label = fl6.flowlabel;
+ np->flow_label = fl6_flowlabel;
inet->inet_dport = usin->sin6_port;
@@ -154,59 +243,13 @@ ipv4_connected:
* destination cache for it.
*/
- fl6.flowi6_proto = sk->sk_protocol;
- fl6.daddr = sk->sk_v6_daddr;
- fl6.saddr = np->saddr;
- fl6.flowi6_oif = sk->sk_bound_dev_if;
- fl6.flowi6_mark = sk->sk_mark;
- fl6.fl6_dport = inet->inet_dport;
- fl6.fl6_sport = inet->inet_sport;
-
- if (!fl6.flowi6_oif)
- fl6.flowi6_oif = np->sticky_pktinfo.ipi6_ifindex;
-
- if (!fl6.flowi6_oif && (addr_type&IPV6_ADDR_MULTICAST))
- fl6.flowi6_oif = np->mcast_oif;
-
- security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
-
- rcu_read_lock();
- opt = flowlabel ? flowlabel->opt : rcu_dereference(np->opt);
- final_p = fl6_update_dst(&fl6, opt, &final);
- rcu_read_unlock();
-
- dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
- err = 0;
- if (IS_ERR(dst)) {
- err = PTR_ERR(dst);
+ err = ip6_datagram_dst_update(sk, true);
+ if (err)
goto out;
- }
-
- /* source address lookup done in ip6_dst_lookup */
-
- if (ipv6_addr_any(&np->saddr))
- np->saddr = fl6.saddr;
-
- if (ipv6_addr_any(&sk->sk_v6_rcv_saddr)) {
- sk->sk_v6_rcv_saddr = fl6.saddr;
- inet->inet_rcv_saddr = LOOPBACK4_IPV6;
- if (sk->sk_prot->rehash)
- sk->sk_prot->rehash(sk);
- }
-
- ip6_dst_store(sk, dst,
- ipv6_addr_equal(&fl6.daddr, &sk->sk_v6_daddr) ?
- &sk->sk_v6_daddr : NULL,
-#ifdef CONFIG_IPV6_SUBTREES
- ipv6_addr_equal(&fl6.saddr, &np->saddr) ?
- &np->saddr :
-#endif
- NULL);
sk->sk_state = TCP_ESTABLISHED;
sk_set_txhash(sk);
out:
- fl6_sock_release(flowlabel);
return err;
}
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index ed446639219c..d916d6ab9ad2 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -338,9 +338,9 @@ static struct rt6_info *__ip6_dst_alloc(struct net *net,
return rt;
}
-static struct rt6_info *ip6_dst_alloc(struct net *net,
- struct net_device *dev,
- int flags)
+struct rt6_info *ip6_dst_alloc(struct net *net,
+ struct net_device *dev,
+ int flags)
{
struct rt6_info *rt = __ip6_dst_alloc(net, dev, flags);
@@ -364,6 +364,7 @@ static struct rt6_info *ip6_dst_alloc(struct net *net,
return rt;
}
+EXPORT_SYMBOL(ip6_dst_alloc);
static void ip6_dst_destroy(struct dst_entry *dst)
{
@@ -1417,8 +1418,20 @@ EXPORT_SYMBOL_GPL(ip6_update_pmtu);
void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu)
{
+ struct dst_entry *dst;
+
ip6_update_pmtu(skb, sock_net(sk), mtu,
sk->sk_bound_dev_if, sk->sk_mark);
+
+ dst = __sk_dst_get(sk);
+ if (!dst || !dst->obsolete ||
+ dst->ops->check(dst, inet6_sk(sk)->dst_cookie))
+ return;
+
+ bh_lock_sock(sk);
+ if (!sock_owned_by_user(sk) && !ipv6_addr_v4mapped(&sk->sk_v6_daddr))
+ ip6_datagram_dst_update(sk, false);
+ bh_unlock_sock(sk);
}
EXPORT_SYMBOL_GPL(ip6_sk_update_pmtu);
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 8125931106be..6bc5c664fa46 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -1539,6 +1539,7 @@ struct proto udpv6_prot = {
.sendmsg = udpv6_sendmsg,
.recvmsg = udpv6_recvmsg,
.backlog_rcv = __udpv6_queue_rcv_skb,
+ .release_cb = ip6_datagram_release_cb,
.hash = udp_lib_hash,
.unhash = udp_lib_unhash,
.rehash = udp_v6_rehash,
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index 278f3b9356ef..7cc1d9c22a9f 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -410,6 +410,8 @@ static void tcp_options(const struct sk_buff *skb,
length--;
continue;
default:
+ if (length < 2)
+ return;
opsize=*ptr++;
if (opsize < 2) /* "silly options" */
return;
@@ -470,6 +472,8 @@ static void tcp_sack(const struct sk_buff *skb, unsigned int dataoff,
length--;
continue;
default:
+ if (length < 2)
+ return;
opsize = *ptr++;
if (opsize < 2) /* "silly options" */
return;
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 215fc08c02ab..330ebd600f25 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -688,7 +688,7 @@ static int netlink_release(struct socket *sock)
skb_queue_purge(&sk->sk_write_queue);
- if (nlk->portid) {
+ if (nlk->portid && nlk->bound) {
struct netlink_notify n = {
.net = sock_net(sk),
.protocol = sk->sk_protocol,
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
index e9dd47b2a85b..879185fe183f 100644
--- a/net/openvswitch/actions.c
+++ b/net/openvswitch/actions.c
@@ -461,7 +461,7 @@ static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *flow_key,
mask_ipv6_addr(saddr, key->ipv6_src, mask->ipv6_src, masked);
if (unlikely(memcmp(saddr, masked, sizeof(masked)))) {
- set_ipv6_addr(skb, key->ipv6_proto, saddr, masked,
+ set_ipv6_addr(skb, flow_key->ip.proto, saddr, masked,
true);
memcpy(&flow_key->ipv6.addr.src, masked,
sizeof(flow_key->ipv6.addr.src));
@@ -483,7 +483,7 @@ static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *flow_key,
NULL, &flags)
!= NEXTHDR_ROUTING);
- set_ipv6_addr(skb, key->ipv6_proto, daddr, masked,
+ set_ipv6_addr(skb, flow_key->ip.proto, daddr, masked,
recalc_csum);
memcpy(&flow_key->ipv6.addr.dst, masked,
sizeof(flow_key->ipv6.addr.dst));
diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
index 1b9d286756be..b5fea1101faa 100644
--- a/net/openvswitch/conntrack.c
+++ b/net/openvswitch/conntrack.c
@@ -367,6 +367,7 @@ static int handle_fragments(struct net *net, struct sw_flow_key *key,
} else if (key->eth.type == htons(ETH_P_IPV6)) {
enum ip6_defrag_users user = IP6_DEFRAG_CONNTRACK_IN + zone;
+ skb_orphan(skb);
memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm));
err = nf_ct_frag6_gather(net, skb, user);
if (err)
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index f12c17f355d9..18d0becbc46d 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -3521,6 +3521,7 @@ static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq)
i->ifindex = mreq->mr_ifindex;
i->alen = mreq->mr_alen;
memcpy(i->addr, mreq->mr_address, i->alen);
+ memset(i->addr + i->alen, 0, sizeof(i->addr) - i->alen);
i->count = 1;
i->next = po->mclist;
po->mclist = i;
diff --git a/net/rds/cong.c b/net/rds/cong.c
index e6144b8246fd..6641bcf7c185 100644
--- a/net/rds/cong.c
+++ b/net/rds/cong.c
@@ -299,7 +299,7 @@ void rds_cong_set_bit(struct rds_cong_map *map, __be16 port)
i = be16_to_cpu(port) / RDS_CONG_MAP_PAGE_BITS;
off = be16_to_cpu(port) % RDS_CONG_MAP_PAGE_BITS;
- __set_bit_le(off, (void *)map->m_page_addrs[i]);
+ set_bit_le(off, (void *)map->m_page_addrs[i]);
}
void rds_cong_clear_bit(struct rds_cong_map *map, __be16 port)
@@ -313,7 +313,7 @@ void rds_cong_clear_bit(struct rds_cong_map *map, __be16 port)
i = be16_to_cpu(port) / RDS_CONG_MAP_PAGE_BITS;
off = be16_to_cpu(port) % RDS_CONG_MAP_PAGE_BITS;
- __clear_bit_le(off, (void *)map->m_page_addrs[i]);
+ clear_bit_le(off, (void *)map->m_page_addrs[i]);
}
static int rds_cong_test_bit(struct rds_cong_map *map, __be16 port)
diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
index 8764970f0c24..310cabce2311 100644
--- a/net/rds/ib_cm.c
+++ b/net/rds/ib_cm.c
@@ -194,7 +194,7 @@ static void rds_ib_cm_fill_conn_param(struct rds_connection *conn,
dp->dp_protocol_major = RDS_PROTOCOL_MAJOR(protocol_version);
dp->dp_protocol_minor = RDS_PROTOCOL_MINOR(protocol_version);
dp->dp_protocol_minor_mask = cpu_to_be16(RDS_IB_SUPPORTED_PROTOCOLS);
- dp->dp_ack_seq = rds_ib_piggyb_ack(ic);
+ dp->dp_ack_seq = cpu_to_be64(rds_ib_piggyb_ack(ic));
/* Advertise flow control */
if (ic->i_flowctl) {
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index f18c35024207..80742edea96f 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -159,12 +159,15 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
if (validate)
skb = validate_xmit_skb_list(skb, dev);
- if (skb) {
+ if (likely(skb)) {
HARD_TX_LOCK(dev, txq, smp_processor_id());
if (!netif_xmit_frozen_or_stopped(txq))
skb = dev_hard_start_xmit(skb, dev, txq, &ret);
HARD_TX_UNLOCK(dev, txq);
+ } else {
+ spin_lock(root_lock);
+ return qdisc_qlen(q);
}
spin_lock(root_lock);
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index 8d3d3625130e..084718f9b3da 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -866,8 +866,10 @@ static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
* sender MUST assure that at least one T3-rtx
* timer is running.
*/
- if (chunk->chunk_hdr->type == SCTP_CID_FWD_TSN)
- sctp_transport_reset_timers(transport);
+ if (chunk->chunk_hdr->type == SCTP_CID_FWD_TSN) {
+ sctp_transport_reset_t3_rtx(transport);
+ transport->last_time_sent = jiffies;
+ }
}
break;
@@ -924,8 +926,10 @@ static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
error = sctp_outq_flush_rtx(q, packet,
rtx_timeout, &start_timer);
- if (start_timer)
- sctp_transport_reset_timers(transport);
+ if (start_timer) {
+ sctp_transport_reset_t3_rtx(transport);
+ transport->last_time_sent = jiffies;
+ }
/* This can happen on COOKIE-ECHO resend. Only
* one chunk can get bundled with a COOKIE-ECHO.
@@ -1062,7 +1066,8 @@ static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
list_add_tail(&chunk->transmitted_list,
&transport->transmitted);
- sctp_transport_reset_timers(transport);
+ sctp_transport_reset_t3_rtx(transport);
+ transport->last_time_sent = jiffies;
/* Only let one DATA chunk get bundled with a
* COOKIE-ECHO chunk.
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 7f0bf798205b..56f364d8f932 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -3080,8 +3080,7 @@ static __be16 sctp_process_asconf_param(struct sctp_association *asoc,
return SCTP_ERROR_RSRC_LOW;
/* Start the heartbeat timer. */
- if (!mod_timer(&peer->hb_timer, sctp_transport_timeout(peer)))
- sctp_transport_hold(peer);
+ sctp_transport_reset_hb_timer(peer);
asoc->new_transport = peer;
break;
case SCTP_PARAM_DEL_IP:
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index 7fe56d0acabf..41b081a64752 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -69,8 +69,6 @@ static int sctp_side_effects(sctp_event_t event_type, sctp_subtype_t subtype,
sctp_cmd_seq_t *commands,
gfp_t gfp);
-static void sctp_cmd_hb_timer_update(sctp_cmd_seq_t *cmds,
- struct sctp_transport *t);
/********************************************************************
* Helper functions
********************************************************************/
@@ -367,6 +365,7 @@ void sctp_generate_heartbeat_event(unsigned long data)
struct sctp_association *asoc = transport->asoc;
struct sock *sk = asoc->base.sk;
struct net *net = sock_net(sk);
+ u32 elapsed, timeout;
bh_lock_sock(sk);
if (sock_owned_by_user(sk)) {
@@ -378,6 +377,16 @@ void sctp_generate_heartbeat_event(unsigned long data)
goto out_unlock;
}
+ /* Check if we should still send the heartbeat or reschedule */
+ elapsed = jiffies - transport->last_time_sent;
+ timeout = sctp_transport_timeout(transport);
+ if (elapsed < timeout) {
+ elapsed = timeout - elapsed;
+ if (!mod_timer(&transport->hb_timer, jiffies + elapsed))
+ sctp_transport_hold(transport);
+ goto out_unlock;
+ }
+
error = sctp_do_sm(net, SCTP_EVENT_T_TIMEOUT,
SCTP_ST_TIMEOUT(SCTP_EVENT_TIMEOUT_HEARTBEAT),
asoc->state, asoc->ep, asoc,
@@ -507,7 +516,7 @@ static void sctp_do_8_2_transport_strike(sctp_cmd_seq_t *commands,
0);
/* Update the hb timer to resend a heartbeat every rto */
- sctp_cmd_hb_timer_update(commands, transport);
+ sctp_transport_reset_hb_timer(transport);
}
if (transport->state != SCTP_INACTIVE &&
@@ -634,11 +643,8 @@ static void sctp_cmd_hb_timers_start(sctp_cmd_seq_t *cmds,
* hold a reference on the transport to make sure none of
* the needed data structures go away.
*/
- list_for_each_entry(t, &asoc->peer.transport_addr_list, transports) {
-
- if (!mod_timer(&t->hb_timer, sctp_transport_timeout(t)))
- sctp_transport_hold(t);
- }
+ list_for_each_entry(t, &asoc->peer.transport_addr_list, transports)
+ sctp_transport_reset_hb_timer(t);
}
static void sctp_cmd_hb_timers_stop(sctp_cmd_seq_t *cmds,
@@ -669,15 +675,6 @@ static void sctp_cmd_t3_rtx_timers_stop(sctp_cmd_seq_t *cmds,
}
-/* Helper function to update the heartbeat timer. */
-static void sctp_cmd_hb_timer_update(sctp_cmd_seq_t *cmds,
- struct sctp_transport *t)
-{
- /* Update the heartbeat timer. */
- if (!mod_timer(&t->hb_timer, sctp_transport_timeout(t)))
- sctp_transport_hold(t);
-}
-
/* Helper function to handle the reception of an HEARTBEAT ACK. */
static void sctp_cmd_transport_on(sctp_cmd_seq_t *cmds,
struct sctp_association *asoc,
@@ -742,8 +739,7 @@ static void sctp_cmd_transport_on(sctp_cmd_seq_t *cmds,
sctp_transport_update_rto(t, (jiffies - hbinfo->sent_at));
/* Update the heartbeat timer. */
- if (!mod_timer(&t->hb_timer, sctp_transport_timeout(t)))
- sctp_transport_hold(t);
+ sctp_transport_reset_hb_timer(t);
if (was_unconfirmed && asoc->peer.transport_count == 1)
sctp_transport_immediate_rtx(t);
@@ -1614,7 +1610,7 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
case SCTP_CMD_HB_TIMER_UPDATE:
t = cmd->obj.transport;
- sctp_cmd_hb_timer_update(commands, t);
+ sctp_transport_reset_hb_timer(t);
break;
case SCTP_CMD_HB_TIMERS_STOP:
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index 9b6b48c7524e..81b86678be4d 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -183,7 +183,7 @@ static void sctp_transport_destroy(struct sctp_transport *transport)
/* Start T3_rtx timer if it is not already running and update the heartbeat
* timer. This routine is called every time a DATA chunk is sent.
*/
-void sctp_transport_reset_timers(struct sctp_transport *transport)
+void sctp_transport_reset_t3_rtx(struct sctp_transport *transport)
{
/* RFC 2960 6.3.2 Retransmission Timer Rules
*
@@ -197,11 +197,18 @@ void sctp_transport_reset_timers(struct sctp_transport *transport)
if (!mod_timer(&transport->T3_rtx_timer,
jiffies + transport->rto))
sctp_transport_hold(transport);
+}
+
+void sctp_transport_reset_hb_timer(struct sctp_transport *transport)
+{
+ unsigned long expires;
/* When a data chunk is sent, reset the heartbeat interval. */
- if (!mod_timer(&transport->hb_timer,
- sctp_transport_timeout(transport)))
- sctp_transport_hold(transport);
+ expires = jiffies + sctp_transport_timeout(transport);
+ if (time_before(transport->hb_timer.expires, expires) &&
+ !mod_timer(&transport->hb_timer,
+ expires + prandom_u32_max(transport->rto)))
+ sctp_transport_hold(transport);
}
/* This transport has been assigned to an association.
@@ -595,13 +602,13 @@ void sctp_transport_burst_reset(struct sctp_transport *t)
unsigned long sctp_transport_timeout(struct sctp_transport *trans)
{
/* RTO + timer slack +/- 50% of RTO */
- unsigned long timeout = (trans->rto >> 1) + prandom_u32_max(trans->rto);
+ unsigned long timeout = trans->rto >> 1;
if (trans->state != SCTP_UNCONFIRMED &&
trans->state != SCTP_PF)
timeout += trans->hbinterval;
- return timeout + jiffies;
+ return timeout;
}
/* Reset transport variables to their initial values */
diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c
index 045e11ecd332..244245bcbbd2 100644
--- a/net/sunrpc/auth_gss/gss_krb5_crypto.c
+++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c
@@ -78,6 +78,7 @@ krb5_encrypt(
memcpy(out, in, length);
sg_init_one(sg, out, length);
+ skcipher_request_set_tfm(req, tfm);
skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, sg, sg, length, local_iv);
@@ -115,6 +116,7 @@ krb5_decrypt(
memcpy(out, in, length);
sg_init_one(sg, out, length);
+ skcipher_request_set_tfm(req, tfm);
skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, sg, sg, length, local_iv);
@@ -946,7 +948,8 @@ krb5_rc4_setup_seq_key(struct krb5_ctx *kctx, struct crypto_skcipher *cipher,
return PTR_ERR(hmac);
}
- desc = kmalloc(sizeof(*desc), GFP_KERNEL);
+ desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(hmac),
+ GFP_KERNEL);
if (!desc) {
dprintk("%s: failed to allocate shash descriptor for '%s'\n",
__func__, kctx->gk5e->cksum_name);
@@ -1012,7 +1015,8 @@ krb5_rc4_setup_enc_key(struct krb5_ctx *kctx, struct crypto_skcipher *cipher,
return PTR_ERR(hmac);
}
- desc = kmalloc(sizeof(*desc), GFP_KERNEL);
+ desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(hmac),
+ GFP_KERNEL);
if (!desc) {
dprintk("%s: failed to allocate shash descriptor for '%s'\n",
__func__, kctx->gk5e->cksum_name);
diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c
index 71341ccb9890..65427492b1c9 100644
--- a/net/sunrpc/auth_gss/gss_krb5_mech.c
+++ b/net/sunrpc/auth_gss/gss_krb5_mech.c
@@ -451,7 +451,8 @@ context_derive_keys_rc4(struct krb5_ctx *ctx)
goto out_err_free_hmac;
- desc = kmalloc(sizeof(*desc), GFP_KERNEL);
+ desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(hmac),
+ GFP_KERNEL);
if (!desc) {
dprintk("%s: failed to allocate hash descriptor for '%s'\n",
__func__, ctx->gk5e->cksum_name);
diff --git a/net/tipc/core.c b/net/tipc/core.c
index 03a842870c52..e2bdb07a49a2 100644
--- a/net/tipc/core.c
+++ b/net/tipc/core.c
@@ -69,6 +69,7 @@ static int __net_init tipc_init_net(struct net *net)
if (err)
goto out_nametbl;
+ INIT_LIST_HEAD(&tn->dist_queue);
err = tipc_topsrv_start(net);
if (err)
goto out_subscr;
diff --git a/net/tipc/core.h b/net/tipc/core.h
index 5504d63503df..eff58dc53aa1 100644
--- a/net/tipc/core.h
+++ b/net/tipc/core.h
@@ -103,6 +103,9 @@ struct tipc_net {
spinlock_t nametbl_lock;
struct name_table *nametbl;
+ /* Name dist queue */
+ struct list_head dist_queue;
+
/* Topology subscription server */
struct tipc_server *topsrv;
atomic_t subscription_count;
diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
index ebe9d0ff6e9e..6b626a64b517 100644
--- a/net/tipc/name_distr.c
+++ b/net/tipc/name_distr.c
@@ -40,11 +40,6 @@
int sysctl_tipc_named_timeout __read_mostly = 2000;
-/**
- * struct tipc_dist_queue - queue holding deferred name table updates
- */
-static struct list_head tipc_dist_queue = LIST_HEAD_INIT(tipc_dist_queue);
-
struct distr_queue_item {
struct distr_item i;
u32 dtype;
@@ -229,12 +224,31 @@ static void tipc_publ_purge(struct net *net, struct publication *publ, u32 addr)
kfree_rcu(p, rcu);
}
+/**
+ * tipc_dist_queue_purge - remove deferred updates from a node that went down
+ */
+static void tipc_dist_queue_purge(struct net *net, u32 addr)
+{
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
+ struct distr_queue_item *e, *tmp;
+
+ spin_lock_bh(&tn->nametbl_lock);
+ list_for_each_entry_safe(e, tmp, &tn->dist_queue, next) {
+ if (e->node != addr)
+ continue;
+ list_del(&e->next);
+ kfree(e);
+ }
+ spin_unlock_bh(&tn->nametbl_lock);
+}
+
void tipc_publ_notify(struct net *net, struct list_head *nsub_list, u32 addr)
{
struct publication *publ, *tmp;
list_for_each_entry_safe(publ, tmp, nsub_list, nodesub_list)
tipc_publ_purge(net, publ, addr);
+ tipc_dist_queue_purge(net, addr);
}
/**
@@ -279,9 +293,11 @@ static bool tipc_update_nametbl(struct net *net, struct distr_item *i,
* tipc_named_add_backlog - add a failed name table update to the backlog
*
*/
-static void tipc_named_add_backlog(struct distr_item *i, u32 type, u32 node)
+static void tipc_named_add_backlog(struct net *net, struct distr_item *i,
+ u32 type, u32 node)
{
struct distr_queue_item *e;
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
unsigned long now = get_jiffies_64();
e = kzalloc(sizeof(*e), GFP_ATOMIC);
@@ -291,7 +307,7 @@ static void tipc_named_add_backlog(struct distr_item *i, u32 type, u32 node)
e->node = node;
e->expires = now + msecs_to_jiffies(sysctl_tipc_named_timeout);
memcpy(e, i, sizeof(*i));
- list_add_tail(&e->next, &tipc_dist_queue);
+ list_add_tail(&e->next, &tn->dist_queue);
}
/**
@@ -301,10 +317,11 @@ static void tipc_named_add_backlog(struct distr_item *i, u32 type, u32 node)
void tipc_named_process_backlog(struct net *net)
{
struct distr_queue_item *e, *tmp;
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
char addr[16];
unsigned long now = get_jiffies_64();
- list_for_each_entry_safe(e, tmp, &tipc_dist_queue, next) {
+ list_for_each_entry_safe(e, tmp, &tn->dist_queue, next) {
if (time_after(e->expires, now)) {
if (!tipc_update_nametbl(net, &e->i, e->node, e->dtype))
continue;
@@ -344,7 +361,7 @@ void tipc_named_rcv(struct net *net, struct sk_buff_head *inputq)
node = msg_orignode(msg);
while (count--) {
if (!tipc_update_nametbl(net, item, node, mtype))
- tipc_named_add_backlog(item, mtype, node);
+ tipc_named_add_backlog(net, item, mtype, node);
item++;
}
kfree_skb(skb);
diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c
index 662bdd20a748..56214736fe88 100644
--- a/net/vmw_vsock/vmci_transport.c
+++ b/net/vmw_vsock/vmci_transport.c
@@ -1735,11 +1735,8 @@ static int vmci_transport_dgram_dequeue(struct vsock_sock *vsk,
/* Retrieve the head sk_buff from the socket's receive queue. */
err = 0;
skb = skb_recv_datagram(&vsk->sk, flags, noblock, &err);
- if (err)
- return err;
-
if (!skb)
- return -EAGAIN;
+ return err;
dg = (struct vmci_datagram *)skb->data;
if (!dg)
@@ -2154,7 +2151,7 @@ module_exit(vmci_transport_exit);
MODULE_AUTHOR("VMware, Inc.");
MODULE_DESCRIPTION("VMCI transport for Virtual Sockets");
-MODULE_VERSION("1.0.3.0-k");
+MODULE_VERSION("1.0.4.0-k");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("vmware_vsock");
MODULE_ALIAS_NETPROTO(PF_VSOCK);
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 98c924260b3d..056a7307862b 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -13216,7 +13216,7 @@ static int nl80211_netlink_notify(struct notifier_block * nb,
struct wireless_dev *wdev;
struct cfg80211_beacon_registration *reg, *tmp;
- if (state != NETLINK_URELEASE)
+ if (state != NETLINK_URELEASE || notify->protocol != NETLINK_GENERIC)
return NOTIFY_DONE;
rcu_read_lock();
diff --git a/scripts/asn1_compiler.c b/scripts/asn1_compiler.c
index e000f44e37b8..c1b7ef3e24c1 100644
--- a/scripts/asn1_compiler.c
+++ b/scripts/asn1_compiler.c
@@ -650,7 +650,7 @@ int main(int argc, char **argv)
}
hdr = fopen(headername, "w");
- if (!out) {
+ if (!hdr) {
perror(headername);
exit(1);
}
diff --git a/sound/hda/hdac_device.c b/sound/hda/hdac_device.c
index d1a4d6973330..03c9872c31cf 100644
--- a/sound/hda/hdac_device.c
+++ b/sound/hda/hdac_device.c
@@ -299,13 +299,11 @@ EXPORT_SYMBOL_GPL(_snd_hdac_read_parm);
int snd_hdac_read_parm_uncached(struct hdac_device *codec, hda_nid_t nid,
int parm)
{
- int val;
+ unsigned int cmd, val;
- if (codec->regmap)
- regcache_cache_bypass(codec->regmap, true);
- val = snd_hdac_read_parm(codec, nid, parm);
- if (codec->regmap)
- regcache_cache_bypass(codec->regmap, false);
+ cmd = snd_hdac_regmap_encode_verb(nid, AC_VERB_PARAMETERS) | parm;
+ if (snd_hdac_regmap_read_raw_uncached(codec, cmd, &val) < 0)
+ return -1;
return val;
}
EXPORT_SYMBOL_GPL(snd_hdac_read_parm_uncached);
diff --git a/sound/hda/hdac_i915.c b/sound/hda/hdac_i915.c
index fb96aead8257..54babe1c0b16 100644
--- a/sound/hda/hdac_i915.c
+++ b/sound/hda/hdac_i915.c
@@ -267,6 +267,18 @@ int snd_hdac_i915_register_notifier(const struct i915_audio_component_audio_ops
}
EXPORT_SYMBOL_GPL(snd_hdac_i915_register_notifier);
+/* check whether intel graphics is present */
+static bool i915_gfx_present(void)
+{
+ static struct pci_device_id ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_ANY_ID),
+ .class = PCI_BASE_CLASS_DISPLAY << 16,
+ .class_mask = 0xff << 16 },
+ {}
+ };
+ return pci_dev_present(ids);
+}
+
/**
* snd_hdac_i915_init - Initialize i915 audio component
* @bus: HDA core bus
@@ -286,6 +298,9 @@ int snd_hdac_i915_init(struct hdac_bus *bus)
struct i915_audio_component *acomp;
int ret;
+ if (!i915_gfx_present())
+ return -ENODEV;
+
acomp = kzalloc(sizeof(*acomp), GFP_KERNEL);
if (!acomp)
return -ENOMEM;
diff --git a/sound/hda/hdac_regmap.c b/sound/hda/hdac_regmap.c
index bdbcd6b75ff6..87041ddd29cb 100644
--- a/sound/hda/hdac_regmap.c
+++ b/sound/hda/hdac_regmap.c
@@ -453,14 +453,30 @@ int snd_hdac_regmap_write_raw(struct hdac_device *codec, unsigned int reg,
EXPORT_SYMBOL_GPL(snd_hdac_regmap_write_raw);
static int reg_raw_read(struct hdac_device *codec, unsigned int reg,
- unsigned int *val)
+ unsigned int *val, bool uncached)
{
- if (!codec->regmap)
+ if (uncached || !codec->regmap)
return hda_reg_read(codec, reg, val);
else
return regmap_read(codec->regmap, reg, val);
}
+static int __snd_hdac_regmap_read_raw(struct hdac_device *codec,
+ unsigned int reg, unsigned int *val,
+ bool uncached)
+{
+ int err;
+
+ err = reg_raw_read(codec, reg, val, uncached);
+ if (err == -EAGAIN) {
+ err = snd_hdac_power_up_pm(codec);
+ if (!err)
+ err = reg_raw_read(codec, reg, val, uncached);
+ snd_hdac_power_down_pm(codec);
+ }
+ return err;
+}
+
/**
* snd_hdac_regmap_read_raw - read a pseudo register with power mgmt
* @codec: the codec object
@@ -472,19 +488,19 @@ static int reg_raw_read(struct hdac_device *codec, unsigned int reg,
int snd_hdac_regmap_read_raw(struct hdac_device *codec, unsigned int reg,
unsigned int *val)
{
- int err;
-
- err = reg_raw_read(codec, reg, val);
- if (err == -EAGAIN) {
- err = snd_hdac_power_up_pm(codec);
- if (!err)
- err = reg_raw_read(codec, reg, val);
- snd_hdac_power_down_pm(codec);
- }
- return err;
+ return __snd_hdac_regmap_read_raw(codec, reg, val, false);
}
EXPORT_SYMBOL_GPL(snd_hdac_regmap_read_raw);
+/* Works like snd_hdac_regmap_read_raw(), but this doesn't read from the
+ * cache but always via hda verbs.
+ */
+int snd_hdac_regmap_read_raw_uncached(struct hdac_device *codec,
+ unsigned int reg, unsigned int *val)
+{
+ return __snd_hdac_regmap_read_raw(codec, reg, val, true);
+}
+
/**
* snd_hdac_regmap_update_raw - update a pseudo register with power mgmt
* @codec: the codec object
diff --git a/sound/isa/sscape.c b/sound/isa/sscape.c
index 7b248cdf06e2..fdcfa29e2205 100644
--- a/sound/isa/sscape.c
+++ b/sound/isa/sscape.c
@@ -591,7 +591,7 @@ static int sscape_upload_microcode(struct snd_card *card, int version)
}
err = upload_dma_data(sscape, init_fw->data, init_fw->size);
if (err == 0)
- snd_printk(KERN_INFO "sscape: MIDI firmware loaded %d KBs\n",
+ snd_printk(KERN_INFO "sscape: MIDI firmware loaded %zu KBs\n",
init_fw->size >> 10);
release_firmware(init_fw);
diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
index 7ca5b89f088a..dfaf1a93fb8a 100644
--- a/sound/pci/hda/hda_generic.c
+++ b/sound/pci/hda/hda_generic.c
@@ -826,7 +826,7 @@ static hda_nid_t path_power_update(struct hda_codec *codec,
bool allow_powerdown)
{
hda_nid_t nid, changed = 0;
- int i, state;
+ int i, state, power;
for (i = 0; i < path->depth; i++) {
nid = path->path[i];
@@ -838,7 +838,9 @@ static hda_nid_t path_power_update(struct hda_codec *codec,
state = AC_PWRST_D0;
else
state = AC_PWRST_D3;
- if (!snd_hda_check_power_state(codec, nid, state)) {
+ power = snd_hda_codec_read(codec, nid, 0,
+ AC_VERB_GET_POWER_STATE, 0);
+ if (power != (state | (state << 4))) {
snd_hda_codec_write(codec, nid, 0,
AC_VERB_SET_POWER_STATE, state);
changed = nid;
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index b680be0e937d..637b8a0e2a91 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -2232,6 +2232,9 @@ static const struct pci_device_id azx_ids[] = {
/* Broxton-P(Apollolake) */
{ PCI_DEVICE(0x8086, 0x5a98),
.driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_BROXTON },
+ /* Broxton-T */
+ { PCI_DEVICE(0x8086, 0x1a98),
+ .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_BROXTON },
/* Haswell */
{ PCI_DEVICE(0x8086, 0x0a0c),
.driver_data = AZX_DRIVER_HDMI | AZX_DCAPS_INTEL_HASWELL },
diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
index a47e8ae0eb30..80bbadc83721 100644
--- a/sound/pci/hda/patch_cirrus.c
+++ b/sound/pci/hda/patch_cirrus.c
@@ -361,6 +361,7 @@ static int cs_parse_auto_config(struct hda_codec *codec)
{
struct cs_spec *spec = codec->spec;
int err;
+ int i;
err = snd_hda_parse_pin_defcfg(codec, &spec->gen.autocfg, NULL, 0);
if (err < 0)
@@ -370,6 +371,19 @@ static int cs_parse_auto_config(struct hda_codec *codec)
if (err < 0)
return err;
+ /* keep the ADCs powered up when it's dynamically switchable */
+ if (spec->gen.dyn_adc_switch) {
+ unsigned int done = 0;
+ for (i = 0; i < spec->gen.input_mux.num_items; i++) {
+ int idx = spec->gen.dyn_adc_idx[i];
+ if (done & (1 << idx))
+ continue;
+ snd_hda_gen_fix_pin_power(codec,
+ spec->gen.adc_nids[idx]);
+ done |= 1 << idx;
+ }
+ }
+
return 0;
}
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index 5af372d01834..40933aa33afe 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -1396,7 +1396,6 @@ static bool hdmi_present_sense_via_verbs(struct hdmi_spec_per_pin *per_pin,
struct hda_codec *codec = per_pin->codec;
struct hdmi_spec *spec = codec->spec;
struct hdmi_eld *eld = &spec->temp_eld;
- struct hdmi_eld *pin_eld = &per_pin->sink_eld;
hda_nid_t pin_nid = per_pin->pin_nid;
/*
* Always execute a GetPinSense verb here, even when called from
@@ -1413,15 +1412,15 @@ static bool hdmi_present_sense_via_verbs(struct hdmi_spec_per_pin *per_pin,
present = snd_hda_pin_sense(codec, pin_nid);
mutex_lock(&per_pin->lock);
- pin_eld->monitor_present = !!(present & AC_PINSENSE_PRESENCE);
- if (pin_eld->monitor_present)
+ eld->monitor_present = !!(present & AC_PINSENSE_PRESENCE);
+ if (eld->monitor_present)
eld->eld_valid = !!(present & AC_PINSENSE_ELDV);
else
eld->eld_valid = false;
codec_dbg(codec,
"HDMI status: Codec=%d Pin=%d Presence_Detect=%d ELD_Valid=%d\n",
- codec->addr, pin_nid, pin_eld->monitor_present, eld->eld_valid);
+ codec->addr, pin_nid, eld->monitor_present, eld->eld_valid);
if (eld->eld_valid) {
if (spec->ops.pin_get_eld(codec, pin_nid, eld->eld_buffer,
@@ -1441,7 +1440,7 @@ static bool hdmi_present_sense_via_verbs(struct hdmi_spec_per_pin *per_pin,
else
update_eld(codec, per_pin, eld);
- ret = !repoll || !pin_eld->monitor_present || pin_eld->eld_valid;
+ ret = !repoll || !eld->monitor_present || eld->eld_valid;
jack = snd_hda_jack_tbl_get(codec, pin_nid);
if (jack)
@@ -1859,6 +1858,8 @@ static void hdmi_set_chmap(struct hdac_device *hdac, int pcm_idx,
struct hdmi_spec *spec = codec->spec;
struct hdmi_spec_per_pin *per_pin = pcm_idx_to_pin(spec, pcm_idx);
+ if (!per_pin)
+ return;
mutex_lock(&per_pin->lock);
per_pin->chmap_set = true;
memcpy(per_pin->chmap, chmap, ARRAY_SIZE(per_pin->chmap));
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index fefe83f2beab..810bceee4fd2 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -4760,6 +4760,7 @@ enum {
ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
ALC280_FIXUP_HP_HEADSET_MIC,
ALC221_FIXUP_HP_FRONT_MIC,
+ ALC292_FIXUP_TPT460,
};
static const struct hda_fixup alc269_fixups[] = {
@@ -5409,6 +5410,12 @@ static const struct hda_fixup alc269_fixups[] = {
{ }
},
},
+ [ALC292_FIXUP_TPT460] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc_fixup_tpt440_dock,
+ .chained = true,
+ .chain_id = ALC293_FIXUP_LENOVO_SPK_NOISE,
+ },
};
static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -5442,6 +5449,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1028, 0x064a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x064b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x0665, "Dell XPS 13", ALC288_FIXUP_DELL_XPS_13),
+ SND_PCI_QUIRK(0x1028, 0x0669, "Dell Optiplex 9020m", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x069a, "Dell Vostro 5480", ALC290_FIXUP_SUBWOOFER_HSJACK),
SND_PCI_QUIRK(0x1028, 0x06c7, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x06d9, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
@@ -5563,7 +5571,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x17aa, 0x2218, "Thinkpad X1 Carbon 2nd", ALC292_FIXUP_TPT440_DOCK),
SND_PCI_QUIRK(0x17aa, 0x2223, "ThinkPad T550", ALC292_FIXUP_TPT440_DOCK),
SND_PCI_QUIRK(0x17aa, 0x2226, "ThinkPad X250", ALC292_FIXUP_TPT440_DOCK),
- SND_PCI_QUIRK(0x17aa, 0x2233, "Thinkpad", ALC293_FIXUP_LENOVO_SPK_NOISE),
+ SND_PCI_QUIRK(0x17aa, 0x2233, "Thinkpad", ALC292_FIXUP_TPT460),
SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
@@ -5658,6 +5666,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
{.id = ALC283_FIXUP_SENSE_COMBO_JACK, .name = "alc283-sense-combo"},
{.id = ALC292_FIXUP_TPT440_DOCK, .name = "tpt440-dock"},
{.id = ALC292_FIXUP_TPT440, .name = "tpt440"},
+ {.id = ALC292_FIXUP_TPT460, .name = "tpt460"},
{}
};
#define ALC225_STANDARD_PINS \
diff --git a/sound/pci/pcxhr/pcxhr_core.c b/sound/pci/pcxhr/pcxhr_core.c
index c5194f5b150a..d7e71f309299 100644
--- a/sound/pci/pcxhr/pcxhr_core.c
+++ b/sound/pci/pcxhr/pcxhr_core.c
@@ -1341,5 +1341,6 @@ irqreturn_t pcxhr_threaded_irq(int irq, void *dev_id)
}
pcxhr_msg_thread(mgr);
+ mutex_unlock(&mgr->lock);
return IRQ_HANDLED;
}
diff --git a/sound/usb/mixer_maps.c b/sound/usb/mixer_maps.c
index ddca6547399b..1f8fb0d904e0 100644
--- a/sound/usb/mixer_maps.c
+++ b/sound/usb/mixer_maps.c
@@ -349,6 +349,16 @@ static struct usbmix_name_map bose_companion5_map[] = {
};
/*
+ * Dell usb dock with ALC4020 codec had a firmware problem where it got
+ * screwed up when zero volume is passed; just skip it as a workaround
+ */
+static const struct usbmix_name_map dell_alc4020_map[] = {
+ { 16, NULL },
+ { 19, NULL },
+ { 0 }
+};
+
+/*
* Control map entries
*/
@@ -431,6 +441,10 @@ static struct usbmix_ctl_map usbmix_ctl_maps[] = {
.map = aureon_51_2_map,
},
{
+ .id = USB_ID(0x0bda, 0x4014),
+ .map = dell_alc4020_map,
+ },
+ {
.id = USB_ID(0x0dba, 0x1000),
.map = mbox1_map,
},
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index 6178bb5d0731..0adfd9537cf7 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -1134,9 +1134,11 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
case USB_ID(0x045E, 0x076F): /* MS Lifecam HD-6000 */
case USB_ID(0x045E, 0x0772): /* MS Lifecam Studio */
case USB_ID(0x045E, 0x0779): /* MS Lifecam HD-3000 */
+ case USB_ID(0x047F, 0x0415): /* Plantronics BT-300 */
case USB_ID(0x047F, 0xAA05): /* Plantronics DA45 */
case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */
case USB_ID(0x074D, 0x3553): /* Outlaw RR2150 (Micronas UAC3553B) */
+ case USB_ID(0x1de7, 0x0014): /* Phoenix Audio TMX320 */
case USB_ID(0x21B4, 0x0081): /* AudioQuest DragonFly */
return true;
}
diff --git a/tools/objtool/Documentation/stack-validation.txt b/tools/objtool/Documentation/stack-validation.txt
index 5a95896105bc..55a60d331f47 100644
--- a/tools/objtool/Documentation/stack-validation.txt
+++ b/tools/objtool/Documentation/stack-validation.txt
@@ -299,18 +299,38 @@ they mean, and suggestions for how to fix them.
Errors in .c files
------------------
-If you're getting an objtool error in a compiled .c file, chances are
-the file uses an asm() statement which has a "call" instruction. An
-asm() statement with a call instruction must declare the use of the
-stack pointer in its output operand. For example, on x86_64:
+1. c_file.o: warning: objtool: funcA() falls through to next function funcB()
- register void *__sp asm("rsp");
- asm volatile("call func" : "+r" (__sp));
+ This means that funcA() doesn't end with a return instruction or an
+ unconditional jump, and that objtool has determined that the function
+ can fall through into the next function. There could be different
+ reasons for this:
-Otherwise the stack frame may not get created before the call.
+ 1) funcA()'s last instruction is a call to a "noreturn" function like
+ panic(). In this case the noreturn function needs to be added to
+ objtool's hard-coded global_noreturns array. Feel free to bug the
+ objtool maintainer, or you can submit a patch.
-Another possible cause for errors in C code is if the Makefile removes
--fno-omit-frame-pointer or adds -fomit-frame-pointer to the gcc options.
+ 2) funcA() uses the unreachable() annotation in a section of code
+ that is actually reachable.
+
+ 3) If funcA() calls an inline function, the object code for funcA()
+ might be corrupt due to a gcc bug. For more details, see:
+ https://gcc.gnu.org/bugzilla/show_bug.cgi?id=70646
+
+2. If you're getting any other objtool error in a compiled .c file, it
+ may be because the file uses an asm() statement which has a "call"
+ instruction. An asm() statement with a call instruction must declare
+ the use of the stack pointer in its output operand. For example, on
+ x86_64:
+
+ register void *__sp asm("rsp");
+ asm volatile("call func" : "+r" (__sp));
+
+ Otherwise the stack frame may not get created before the call.
+
+3. Another possible cause for errors in C code is if the Makefile removes
+ -fno-omit-frame-pointer or adds -fomit-frame-pointer to the gcc options.
Also see the above section for .S file errors for more information what
the individual error messages mean.
diff --git a/tools/objtool/builtin-check.c b/tools/objtool/builtin-check.c
index 7515cb2e879a..e8a1e69eb92c 100644
--- a/tools/objtool/builtin-check.c
+++ b/tools/objtool/builtin-check.c
@@ -54,6 +54,7 @@ struct instruction {
struct symbol *call_dest;
struct instruction *jump_dest;
struct list_head alts;
+ struct symbol *func;
};
struct alternative {
@@ -66,6 +67,7 @@ struct objtool_file {
struct list_head insn_list;
DECLARE_HASHTABLE(insn_hash, 16);
struct section *rodata, *whitelist;
+ bool ignore_unreachables, c_file;
};
const char *objname;
@@ -228,7 +230,7 @@ static int __dead_end_function(struct objtool_file *file, struct symbol *func,
}
}
- if (insn->type == INSN_JUMP_DYNAMIC)
+ if (insn->type == INSN_JUMP_DYNAMIC && list_empty(&insn->alts))
/* sibling call */
return 0;
}
@@ -248,6 +250,7 @@ static int dead_end_function(struct objtool_file *file, struct symbol *func)
static int decode_instructions(struct objtool_file *file)
{
struct section *sec;
+ struct symbol *func;
unsigned long offset;
struct instruction *insn;
int ret;
@@ -281,6 +284,21 @@ static int decode_instructions(struct objtool_file *file)
hash_add(file->insn_hash, &insn->hash, insn->offset);
list_add_tail(&insn->list, &file->insn_list);
}
+
+ list_for_each_entry(func, &sec->symbol_list, list) {
+ if (func->type != STT_FUNC)
+ continue;
+
+ if (!find_insn(file, sec, func->offset)) {
+ WARN("%s(): can't find starting instruction",
+ func->name);
+ return -1;
+ }
+
+ func_for_each_insn(file, func, insn)
+ if (!insn->func)
+ insn->func = func;
+ }
}
return 0;
@@ -664,13 +682,40 @@ static int add_func_switch_tables(struct objtool_file *file,
text_rela->addend);
/*
- * TODO: Document where this is needed, or get rid of it.
- *
* rare case: jmpq *[addr](%rip)
+ *
+ * This check is for a rare gcc quirk, currently only seen in
+ * three driver functions in the kernel, only with certain
+ * obscure non-distro configs.
+ *
+ * As part of an optimization, gcc makes a copy of an existing
+ * switch jump table, modifies it, and then hard-codes the jump
+ * (albeit with an indirect jump) to use a single entry in the
+ * table. The rest of the jump table and some of its jump
+ * targets remain as dead code.
+ *
+ * In such a case we can just crudely ignore all unreachable
+ * instruction warnings for the entire object file. Ideally we
+ * would just ignore them for the function, but that would
+ * require redesigning the code quite a bit. And honestly
+ * that's just not worth doing: unreachable instruction
+ * warnings are of questionable value anyway, and this is such
+ * a rare issue.
+ *
+ * kbuild reports:
+ * - https://lkml.kernel.org/r/201603231906.LWcVUpxm%25fengguang.wu@intel.com
+ * - https://lkml.kernel.org/r/201603271114.K9i45biy%25fengguang.wu@intel.com
+ * - https://lkml.kernel.org/r/201603291058.zuJ6ben1%25fengguang.wu@intel.com
+ *
+ * gcc bug:
+ * - https://gcc.gnu.org/bugzilla/show_bug.cgi?id=70604
*/
- if (!rodata_rela)
+ if (!rodata_rela) {
rodata_rela = find_rela_by_dest(file->rodata,
text_rela->addend + 4);
+ if (rodata_rela)
+ file->ignore_unreachables = true;
+ }
if (!rodata_rela)
continue;
@@ -732,9 +777,6 @@ static int decode_sections(struct objtool_file *file)
{
int ret;
- file->whitelist = find_section_by_name(file->elf, "__func_stack_frame_non_standard");
- file->rodata = find_section_by_name(file->elf, ".rodata");
-
ret = decode_instructions(file);
if (ret)
return ret;
@@ -799,6 +841,7 @@ static int validate_branch(struct objtool_file *file,
struct alternative *alt;
struct instruction *insn;
struct section *sec;
+ struct symbol *func = NULL;
unsigned char state;
int ret;
@@ -813,6 +856,16 @@ static int validate_branch(struct objtool_file *file,
}
while (1) {
+ if (file->c_file && insn->func) {
+ if (func && func != insn->func) {
+ WARN("%s() falls through to next function %s()",
+ func->name, insn->func->name);
+ return 1;
+ }
+
+ func = insn->func;
+ }
+
if (insn->visited) {
if (frame_state(insn->state) != frame_state(state)) {
WARN_FUNC("frame pointer state mismatch",
@@ -823,13 +876,6 @@ static int validate_branch(struct objtool_file *file,
return 0;
}
- /*
- * Catch a rare case where a noreturn function falls through to
- * the next function.
- */
- if (is_fentry_call(insn) && (state & STATE_FENTRY))
- return 0;
-
insn->visited = true;
insn->state = state;
@@ -1035,12 +1081,8 @@ static int validate_functions(struct objtool_file *file)
continue;
insn = find_insn(file, sec, func->offset);
- if (!insn) {
- WARN("%s(): can't find starting instruction",
- func->name);
- warnings++;
+ if (!insn)
continue;
- }
ret = validate_branch(file, insn, 0);
warnings += ret;
@@ -1056,13 +1098,14 @@ static int validate_functions(struct objtool_file *file)
if (insn->visited)
continue;
- if (!ignore_unreachable_insn(func, insn) &&
- !warnings) {
- WARN_FUNC("function has unreachable instruction", insn->sec, insn->offset);
- warnings++;
- }
-
insn->visited = true;
+
+ if (file->ignore_unreachables || warnings ||
+ ignore_unreachable_insn(func, insn))
+ continue;
+
+ WARN_FUNC("function has unreachable instruction", insn->sec, insn->offset);
+ warnings++;
}
}
}
@@ -1133,6 +1176,10 @@ int cmd_check(int argc, const char **argv)
INIT_LIST_HEAD(&file.insn_list);
hash_init(file.insn_hash);
+ file.whitelist = find_section_by_name(file.elf, "__func_stack_frame_non_standard");
+ file.rodata = find_section_by_name(file.elf, ".rodata");
+ file.ignore_unreachables = false;
+ file.c_file = find_section_by_name(file.elf, ".comment");
ret = decode_sections(&file);
if (ret < 0)
diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
index 407f11b97c8d..617578440989 100644
--- a/tools/perf/util/intel-pt.c
+++ b/tools/perf/util/intel-pt.c
@@ -1130,7 +1130,7 @@ static int intel_pt_synth_transaction_sample(struct intel_pt_queue *ptq)
pr_err("Intel Processor Trace: failed to deliver transaction event, error %d\n",
ret);
- if (pt->synth_opts.callchain)
+ if (pt->synth_opts.last_branch)
intel_pt_reset_last_branch_rb(ptq);
return ret;
diff --git a/tools/testing/nvdimm/Kbuild b/tools/testing/nvdimm/Kbuild
index a34bfd0c8928..5ff6d3c126a9 100644
--- a/tools/testing/nvdimm/Kbuild
+++ b/tools/testing/nvdimm/Kbuild
@@ -7,6 +7,7 @@ ldflags-y += --wrap=ioremap_nocache
ldflags-y += --wrap=iounmap
ldflags-y += --wrap=memunmap
ldflags-y += --wrap=__devm_request_region
+ldflags-y += --wrap=__devm_release_region
ldflags-y += --wrap=__request_region
ldflags-y += --wrap=__release_region
ldflags-y += --wrap=devm_memremap_pages
@@ -49,6 +50,7 @@ libnvdimm-y += $(NVDIMM_SRC)/label.o
libnvdimm-$(CONFIG_ND_CLAIM) += $(NVDIMM_SRC)/claim.o
libnvdimm-$(CONFIG_BTT) += $(NVDIMM_SRC)/btt_devs.o
libnvdimm-$(CONFIG_NVDIMM_PFN) += $(NVDIMM_SRC)/pfn_devs.o
+libnvdimm-$(CONFIG_NVDIMM_DAX) += $(NVDIMM_SRC)/dax_devs.o
libnvdimm-y += config_check.o
obj-m += test/
diff --git a/tools/testing/nvdimm/test/iomap.c b/tools/testing/nvdimm/test/iomap.c
index 0c1a7e65bb81..c842095f2801 100644
--- a/tools/testing/nvdimm/test/iomap.c
+++ b/tools/testing/nvdimm/test/iomap.c
@@ -239,13 +239,11 @@ struct resource *__wrap___devm_request_region(struct device *dev,
}
EXPORT_SYMBOL(__wrap___devm_request_region);
-void __wrap___release_region(struct resource *parent, resource_size_t start,
- resource_size_t n)
+static bool nfit_test_release_region(struct resource *parent,
+ resource_size_t start, resource_size_t n)
{
- struct nfit_test_resource *nfit_res;
-
if (parent == &iomem_resource) {
- nfit_res = get_nfit_res(start);
+ struct nfit_test_resource *nfit_res = get_nfit_res(start);
if (nfit_res) {
struct resource *res = nfit_res->res + 1;
@@ -254,11 +252,26 @@ void __wrap___release_region(struct resource *parent, resource_size_t start,
__func__, start, n, res);
else
memset(res, 0, sizeof(*res));
- return;
+ return true;
}
}
- __release_region(parent, start, n);
+ return false;
+}
+
+void __wrap___release_region(struct resource *parent, resource_size_t start,
+ resource_size_t n)
+{
+ if (!nfit_test_release_region(parent, start, n))
+ __release_region(parent, start, n);
}
EXPORT_SYMBOL(__wrap___release_region);
+void __wrap___devm_release_region(struct device *dev, struct resource *parent,
+ resource_size_t start, resource_size_t n)
+{
+ if (!nfit_test_release_region(parent, start, n))
+ __devm_release_region(dev, parent, start, n);
+}
+EXPORT_SYMBOL(__wrap___devm_release_region);
+
MODULE_LICENSE("GPL v2");
diff --git a/tools/testing/selftests/net/.gitignore b/tools/testing/selftests/net/.gitignore
index 69bb3fc38fb2..0840684deb7d 100644
--- a/tools/testing/selftests/net/.gitignore
+++ b/tools/testing/selftests/net/.gitignore
@@ -3,3 +3,4 @@ psock_fanout
psock_tpacket
reuseport_bpf
reuseport_bpf_cpu
+reuseport_dualstack
diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile
index c658792d47b4..0e5340742620 100644
--- a/tools/testing/selftests/net/Makefile
+++ b/tools/testing/selftests/net/Makefile
@@ -4,7 +4,7 @@ CFLAGS = -Wall -O2 -g
CFLAGS += -I../../../../usr/include/
-NET_PROGS = socket psock_fanout psock_tpacket reuseport_bpf reuseport_bpf_cpu
+NET_PROGS = socket psock_fanout psock_tpacket reuseport_bpf reuseport_bpf_cpu reuseport_dualstack
all: $(NET_PROGS)
%: %.c
diff --git a/tools/testing/selftests/net/reuseport_dualstack.c b/tools/testing/selftests/net/reuseport_dualstack.c
new file mode 100644
index 000000000000..90958aaaafb9
--- /dev/null
+++ b/tools/testing/selftests/net/reuseport_dualstack.c
@@ -0,0 +1,208 @@
+/*
+ * It is possible to use SO_REUSEPORT to open multiple sockets bound to
+ * equivalent local addresses using AF_INET and AF_INET6 at the same time. If
+ * the AF_INET6 socket has IPV6_V6ONLY set, it's clear which socket should
+ * receive a given incoming packet. However, when it is not set, incoming v4
+ * packets should prefer the AF_INET socket(s). This behavior was defined with
+ * the original SO_REUSEPORT implementation, but broke with
+ * e32ea7e74727 ("soreuseport: fast reuseport UDP socket selection")
+ * This test creates these mixed AF_INET/AF_INET6 sockets and asserts the
+ * AF_INET preference for v4 packets.
+ */
+
+#define _GNU_SOURCE
+
+#include <arpa/inet.h>
+#include <errno.h>
+#include <error.h>
+#include <linux/in.h>
+#include <linux/unistd.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/epoll.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <unistd.h>
+
+static const int PORT = 8888;
+
+static void build_rcv_fd(int family, int proto, int *rcv_fds, int count)
+{
+ struct sockaddr_storage addr;
+ struct sockaddr_in *addr4;
+ struct sockaddr_in6 *addr6;
+ int opt, i;
+
+ switch (family) {
+ case AF_INET:
+ addr4 = (struct sockaddr_in *)&addr;
+ addr4->sin_family = AF_INET;
+ addr4->sin_addr.s_addr = htonl(INADDR_ANY);
+ addr4->sin_port = htons(PORT);
+ break;
+ case AF_INET6:
+ addr6 = (struct sockaddr_in6 *)&addr;
+ addr6->sin6_family = AF_INET6;
+ addr6->sin6_addr = in6addr_any;
+ addr6->sin6_port = htons(PORT);
+ break;
+ default:
+ error(1, 0, "Unsupported family %d", family);
+ }
+
+ for (i = 0; i < count; ++i) {
+ rcv_fds[i] = socket(family, proto, 0);
+ if (rcv_fds[i] < 0)
+ error(1, errno, "failed to create receive socket");
+
+ opt = 1;
+ if (setsockopt(rcv_fds[i], SOL_SOCKET, SO_REUSEPORT, &opt,
+ sizeof(opt)))
+ error(1, errno, "failed to set SO_REUSEPORT");
+
+ if (bind(rcv_fds[i], (struct sockaddr *)&addr, sizeof(addr)))
+ error(1, errno, "failed to bind receive socket");
+
+ if (proto == SOCK_STREAM && listen(rcv_fds[i], 10))
+ error(1, errno, "failed to listen on receive port");
+ }
+}
+
+static void send_from_v4(int proto)
+{
+ struct sockaddr_in saddr, daddr;
+ int fd;
+
+ saddr.sin_family = AF_INET;
+ saddr.sin_addr.s_addr = htonl(INADDR_ANY);
+ saddr.sin_port = 0;
+
+ daddr.sin_family = AF_INET;
+ daddr.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
+ daddr.sin_port = htons(PORT);
+
+ fd = socket(AF_INET, proto, 0);
+ if (fd < 0)
+ error(1, errno, "failed to create send socket");
+
+ if (bind(fd, (struct sockaddr *)&saddr, sizeof(saddr)))
+ error(1, errno, "failed to bind send socket");
+
+ if (connect(fd, (struct sockaddr *)&daddr, sizeof(daddr)))
+ error(1, errno, "failed to connect send socket");
+
+ if (send(fd, "a", 1, 0) < 0)
+ error(1, errno, "failed to send message");
+
+ close(fd);
+}
+
+static int receive_once(int epfd, int proto)
+{
+ struct epoll_event ev;
+ int i, fd;
+ char buf[8];
+
+ i = epoll_wait(epfd, &ev, 1, -1);
+ if (i < 0)
+ error(1, errno, "epoll_wait failed");
+
+ if (proto == SOCK_STREAM) {
+ fd = accept(ev.data.fd, NULL, NULL);
+ if (fd < 0)
+ error(1, errno, "failed to accept");
+ i = recv(fd, buf, sizeof(buf), 0);
+ close(fd);
+ } else {
+ i = recv(ev.data.fd, buf, sizeof(buf), 0);
+ }
+
+ if (i < 0)
+ error(1, errno, "failed to recv");
+
+ return ev.data.fd;
+}
+
+static void test(int *rcv_fds, int count, int proto)
+{
+ struct epoll_event ev;
+ int epfd, i, test_fd;
+ uint16_t test_family;
+ socklen_t len;
+
+ epfd = epoll_create(1);
+ if (epfd < 0)
+ error(1, errno, "failed to create epoll");
+
+ ev.events = EPOLLIN;
+ for (i = 0; i < count; ++i) {
+ ev.data.fd = rcv_fds[i];
+ if (epoll_ctl(epfd, EPOLL_CTL_ADD, rcv_fds[i], &ev))
+ error(1, errno, "failed to register sock epoll");
+ }
+
+ send_from_v4(proto);
+
+ test_fd = receive_once(epfd, proto);
+ if (getsockopt(test_fd, SOL_SOCKET, SO_DOMAIN, &test_family, &len))
+ error(1, errno, "failed to read socket domain");
+ if (test_family != AF_INET)
+ error(1, 0, "expected to receive on v4 socket but got v6 (%d)",
+ test_family);
+
+ close(epfd);
+}
+
+int main(void)
+{
+ int rcv_fds[32], i;
+
+ fprintf(stderr, "---- UDP IPv4 created before IPv6 ----\n");
+ build_rcv_fd(AF_INET, SOCK_DGRAM, rcv_fds, 5);
+ build_rcv_fd(AF_INET6, SOCK_DGRAM, &(rcv_fds[5]), 5);
+ test(rcv_fds, 10, SOCK_DGRAM);
+ for (i = 0; i < 10; ++i)
+ close(rcv_fds[i]);
+
+ fprintf(stderr, "---- UDP IPv6 created before IPv4 ----\n");
+ build_rcv_fd(AF_INET6, SOCK_DGRAM, rcv_fds, 5);
+ build_rcv_fd(AF_INET, SOCK_DGRAM, &(rcv_fds[5]), 5);
+ test(rcv_fds, 10, SOCK_DGRAM);
+ for (i = 0; i < 10; ++i)
+ close(rcv_fds[i]);
+
+ /* NOTE: UDP socket lookups traverse a different code path when there
+ * are > 10 sockets in a group.
+ */
+ fprintf(stderr, "---- UDP IPv4 created before IPv6 (large) ----\n");
+ build_rcv_fd(AF_INET, SOCK_DGRAM, rcv_fds, 16);
+ build_rcv_fd(AF_INET6, SOCK_DGRAM, &(rcv_fds[16]), 16);
+ test(rcv_fds, 32, SOCK_DGRAM);
+ for (i = 0; i < 32; ++i)
+ close(rcv_fds[i]);
+
+ fprintf(stderr, "---- UDP IPv6 created before IPv4 (large) ----\n");
+ build_rcv_fd(AF_INET6, SOCK_DGRAM, rcv_fds, 16);
+ build_rcv_fd(AF_INET, SOCK_DGRAM, &(rcv_fds[16]), 16);
+ test(rcv_fds, 32, SOCK_DGRAM);
+ for (i = 0; i < 32; ++i)
+ close(rcv_fds[i]);
+
+ fprintf(stderr, "---- TCP IPv4 created before IPv6 ----\n");
+ build_rcv_fd(AF_INET, SOCK_STREAM, rcv_fds, 5);
+ build_rcv_fd(AF_INET6, SOCK_STREAM, &(rcv_fds[5]), 5);
+ test(rcv_fds, 10, SOCK_STREAM);
+ for (i = 0; i < 10; ++i)
+ close(rcv_fds[i]);
+
+ fprintf(stderr, "---- TCP IPv6 created before IPv4 ----\n");
+ build_rcv_fd(AF_INET6, SOCK_STREAM, rcv_fds, 5);
+ build_rcv_fd(AF_INET, SOCK_STREAM, &(rcv_fds[5]), 5);
+ test(rcv_fds, 10, SOCK_STREAM);
+ for (i = 0; i < 10; ++i)
+ close(rcv_fds[i]);
+
+ fprintf(stderr, "SUCCESS\n");
+ return 0;
+}
diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c
index a9ad4fe3f68f..9aaa35dd9144 100644
--- a/virt/kvm/arm/arch_timer.c
+++ b/virt/kvm/arm/arch_timer.c
@@ -91,6 +91,8 @@ static void kvm_timer_inject_irq_work(struct work_struct *work)
vcpu = container_of(work, struct kvm_vcpu, arch.timer_cpu.expired);
vcpu->arch.timer_cpu.armed = false;
+ WARN_ON(!kvm_timer_should_fire(vcpu));
+
/*
* If the vcpu is blocked we want to wake it up so that it will see
* the timer has expired when entering the guest.
@@ -98,10 +100,46 @@ static void kvm_timer_inject_irq_work(struct work_struct *work)
kvm_vcpu_kick(vcpu);
}
+static u64 kvm_timer_compute_delta(struct kvm_vcpu *vcpu)
+{
+ cycle_t cval, now;
+
+ cval = vcpu->arch.timer_cpu.cntv_cval;
+ now = kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff;
+
+ if (now < cval) {
+ u64 ns;
+
+ ns = cyclecounter_cyc2ns(timecounter->cc,
+ cval - now,
+ timecounter->mask,
+ &timecounter->frac);
+ return ns;
+ }
+
+ return 0;
+}
+
static enum hrtimer_restart kvm_timer_expire(struct hrtimer *hrt)
{
struct arch_timer_cpu *timer;
+ struct kvm_vcpu *vcpu;
+ u64 ns;
+
timer = container_of(hrt, struct arch_timer_cpu, timer);
+ vcpu = container_of(timer, struct kvm_vcpu, arch.timer_cpu);
+
+ /*
+ * Check that the timer has really expired from the guest's
+ * PoV (NTP on the host may have forced it to expire
+ * early). If we should have slept longer, restart it.
+ */
+ ns = kvm_timer_compute_delta(vcpu);
+ if (unlikely(ns)) {
+ hrtimer_forward_now(hrt, ns_to_ktime(ns));
+ return HRTIMER_RESTART;
+ }
+
queue_work(wqueue, &timer->expired);
return HRTIMER_NORESTART;
}
@@ -176,8 +214,6 @@ static int kvm_timer_update_state(struct kvm_vcpu *vcpu)
void kvm_timer_schedule(struct kvm_vcpu *vcpu)
{
struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
- u64 ns;
- cycle_t cval, now;
BUG_ON(timer_is_armed(timer));
@@ -197,14 +233,7 @@ void kvm_timer_schedule(struct kvm_vcpu *vcpu)
return;
/* The timer has not yet expired, schedule a background timer */
- cval = timer->cntv_cval;
- now = kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff;
-
- ns = cyclecounter_cyc2ns(timecounter->cc,
- cval - now,
- timecounter->mask,
- &timecounter->frac);
- timer_arm(timer, ns);
+ timer_arm(timer, kvm_timer_compute_delta(vcpu));
}
void kvm_timer_unschedule(struct kvm_vcpu *vcpu)
diff --git a/virt/kvm/arm/pmu.c b/virt/kvm/arm/pmu.c
index b5754c6c5508..575c7aa30d7e 100644
--- a/virt/kvm/arm/pmu.c
+++ b/virt/kvm/arm/pmu.c
@@ -193,11 +193,12 @@ static u64 kvm_pmu_overflow_status(struct kvm_vcpu *vcpu)
{
u64 reg = 0;
- if ((vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E))
+ if ((vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E)) {
reg = vcpu_sys_reg(vcpu, PMOVSSET_EL0);
reg &= vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
reg &= vcpu_sys_reg(vcpu, PMINTENSET_EL1);
reg &= kvm_pmu_valid_counter_mask(vcpu);
+ }
return reg;
}