summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt12
-rw-r--r--Documentation/core-api/local_ops.rst10
-rw-r--r--Documentation/devicetree/bindings/display/google,goldfish-fb.txt17
-rw-r--r--Documentation/devicetree/bindings/display/imx/fsl-imx-drm.txt2
-rw-r--r--Documentation/devicetree/bindings/mtd/cadence-quadspi.txt7
-rw-r--r--Documentation/devicetree/bindings/mtd/denali-nand.txt2
-rw-r--r--Documentation/devicetree/bindings/mtd/jedec,spi-nor.txt1
-rw-r--r--Documentation/devicetree/bindings/mtd/mtk-quadspi.txt15
-rw-r--r--Documentation/devicetree/bindings/mtd/pxa3xx-nand.txt4
-rw-r--r--Documentation/devicetree/bindings/pwm/renesas,pwm-rcar.txt1
-rw-r--r--Documentation/devicetree/bindings/rtc/imxdi-rtc.txt14
-rw-r--r--Documentation/devicetree/bindings/rtc/pcf85363.txt17
-rw-r--r--Documentation/devicetree/bindings/rtc/rtc-mt7622.txt21
-rw-r--r--Documentation/devicetree/bindings/rtc/sprd,sc27xx-rtc.txt27
-rw-r--r--Documentation/devicetree/bindings/trivial-devices.txt3
-rw-r--r--Documentation/devicetree/bindings/usb/usb-device.txt33
-rw-r--r--Documentation/ia64/xen.txt2
-rw-r--r--Documentation/printk-formats.txt31
-rw-r--r--Documentation/process/5.Posting.rst5
-rw-r--r--Documentation/scheduler/sched-deadline.txt13
-rw-r--r--Documentation/security/keys/core.rst10
-rw-r--r--Documentation/svga.txt59
-rw-r--r--Documentation/sysctl/vm.txt7
-rw-r--r--Documentation/translations/ko_KR/memory-barriers.txt178
-rw-r--r--Documentation/virtual/kvm/devices/arm-vgic-its.txt2
-rw-r--r--Documentation/x86/protection-keys.txt9
-rw-r--r--MAINTAINERS9
-rw-r--r--Makefile81
-rw-r--r--arch/alpha/kernel/srmcons.c7
-rw-r--r--arch/arc/Kconfig2
-rw-r--r--arch/arc/boot/dts/axs10x_mb.dtsi8
-rw-r--r--arch/arc/include/asm/arcregs.h33
-rw-r--r--arch/arc/kernel/perf_event.c40
-rw-r--r--arch/arc/kernel/setup.c43
-rw-r--r--arch/arc/mm/tlb.c57
-rw-r--r--arch/arc/plat-axs10x/Kconfig2
-rw-r--r--arch/arc/plat-axs10x/axs10x.c7
-rw-r--r--arch/arm/Kconfig.debug4
-rw-r--r--arch/arm/include/asm/assembler.h18
-rw-r--r--arch/arm/include/asm/pgtable-3level.h1
-rw-r--r--arch/arm/include/asm/pgtable.h12
-rw-r--r--arch/arm/kernel/entry-header.S6
-rw-r--r--arch/arm/kvm/Kconfig5
-rw-r--r--arch/arm/kvm/Makefile1
-rw-r--r--arch/arm/mach-iop32x/n2100.c5
-rw-r--r--arch/arm/mach-ixp4xx/dsmg600-setup.c4
-rw-r--r--arch/arm/mach-ixp4xx/nas100d-setup.c4
-rw-r--r--arch/arm/mach-orion5x/db88f5281-setup.c4
-rw-r--r--arch/arm/mach-pxa/cm-x255.c19
-rw-r--r--arch/arm/mach-uniphier/Makefile1
-rw-r--r--arch/arm/mm/dump.c4
-rw-r--r--arch/arm/mm/init.c4
-rw-r--r--arch/arm64/include/asm/pgtable.h1
-rw-r--r--arch/arm64/kvm/Kconfig3
-rw-r--r--arch/arm64/kvm/Makefile1
-rw-r--r--arch/blackfin/kernel/nmi.c5
-rw-r--r--arch/m68k/amiga/amisound.c4
-rw-r--r--arch/m68k/mac/macboing.c8
-rw-r--r--arch/microblaze/include/asm/mmu_context_mm.h1
-rw-r--r--arch/mips/boot/dts/brcm/Makefile3
-rw-r--r--arch/mips/boot/dts/cavium-octeon/Makefile3
-rw-r--r--arch/mips/boot/dts/img/Makefile3
-rw-r--r--arch/mips/boot/dts/ingenic/Makefile3
-rw-r--r--arch/mips/boot/dts/lantiq/Makefile3
-rw-r--r--arch/mips/boot/dts/mti/Makefile3
-rw-r--r--arch/mips/boot/dts/netlogic/Makefile3
-rw-r--r--arch/mips/boot/dts/ni/Makefile3
-rw-r--r--arch/mips/boot/dts/pic32/Makefile3
-rw-r--r--arch/mips/boot/dts/qca/Makefile3
-rw-r--r--arch/mips/boot/dts/ralink/Makefile3
-rw-r--r--arch/mips/boot/dts/xilfpga/Makefile3
-rw-r--r--arch/mips/include/asm/pgtable.h2
-rw-r--r--arch/mips/lasat/picvue_proc.c4
-rw-r--r--arch/mips/mti-malta/malta-display.c4
-rw-r--r--arch/parisc/kernel/pdc_cons.c4
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgtable.h1
-rw-r--r--arch/powerpc/include/asm/imc-pmu.h6
-rw-r--r--arch/powerpc/kernel/dt_cpu_ftrs.c4
-rw-r--r--arch/powerpc/kernel/tau_6xx.c4
-rw-r--r--arch/powerpc/kvm/booke.c7
-rw-r--r--arch/powerpc/lib/code-patching.c6
-rw-r--r--arch/powerpc/mm/slice.c34
-rw-r--r--arch/powerpc/oprofile/op_model_cell.c8
-rw-r--r--arch/powerpc/perf/imc-pmu.c27
-rw-r--r--arch/powerpc/platforms/cell/spufs/sched.c8
-rw-r--r--arch/powerpc/platforms/powermac/low_i2c.c6
-rw-r--r--arch/powerpc/platforms/powernv/opal-imc.c22
-rw-r--r--arch/powerpc/platforms/powernv/vas.c1
-rw-r--r--arch/s390/include/asm/pgtable.h8
-rw-r--r--arch/s390/kernel/time.c4
-rw-r--r--arch/s390/mm/cmm.c8
-rw-r--r--arch/sh/drivers/heartbeat.c6
-rw-r--r--arch/sh/drivers/pci/common.c16
-rw-r--r--arch/sh/drivers/push-switch.c9
-rw-r--r--arch/sparc/include/asm/pgtable_64.h2
-rw-r--r--arch/sparc/lib/Makefile2
-rw-r--r--arch/sparc/mm/gup.c4
-rw-r--r--arch/tile/include/asm/pgtable.h1
-rw-r--r--arch/um/Kconfig.common1
-rw-r--r--arch/x86/Kconfig12
-rw-r--r--arch/x86/boot/compressed/kaslr.c5
-rw-r--r--arch/x86/entry/entry_64.S14
-rw-r--r--arch/x86/events/intel/core.c35
-rw-r--r--arch/x86/events/intel/uncore.c4
-rw-r--r--arch/x86/events/intel/uncore.h2
-rw-r--r--arch/x86/events/intel/uncore_snbep.c10
-rw-r--r--arch/x86/include/asm/elf.h1
-rw-r--r--arch/x86/include/asm/hw_irq.h8
-rw-r--r--arch/x86/include/asm/hypertransport.h46
-rw-r--r--arch/x86/include/asm/insn-eval.h2
-rw-r--r--arch/x86/include/asm/io.h4
-rw-r--r--arch/x86/include/asm/irqdomain.h6
-rw-r--r--arch/x86/include/asm/pgtable.h8
-rw-r--r--arch/x86/include/asm/processor.h1
-rw-r--r--arch/x86/kernel/acpi/boot.c61
-rw-r--r--arch/x86/kernel/apic/Makefile1
-rw-r--r--arch/x86/kernel/apic/htirq.c198
-rw-r--r--arch/x86/kernel/apic/vector.c5
-rw-r--r--arch/x86/kernel/cpu/common.c2
-rw-r--r--arch/x86/kernel/mpparse.c6
-rw-r--r--arch/x86/kernel/smpboot.c128
-rw-r--r--arch/x86/kernel/sys_x86_64.c10
-rw-r--r--arch/x86/kernel/umip.c88
-rw-r--r--arch/x86/kvm/svm.c7
-rw-r--r--arch/x86/kvm/vmx.c161
-rw-r--r--arch/x86/lib/insn-eval.c4
-rw-r--r--arch/x86/lib/x86-opcode-map.txt2
-rw-r--r--arch/x86/mm/hugetlbpage.c11
-rw-r--r--arch/x86/mm/mmap.c62
-rw-r--r--block/blk-core.c10
-rw-r--r--block/blk-stat.c6
-rw-r--r--block/blk-throttle.c9
-rw-r--r--crypto/af_alg.c21
-rw-r--r--crypto/algif_aead.c56
-rw-r--r--crypto/algif_skcipher.c23
-rw-r--r--crypto/asymmetric_keys/pkcs7_key_type.c1
-rw-r--r--crypto/asymmetric_keys/pkcs7_parser.c5
-rw-r--r--crypto/asymmetric_keys/public_key.c2
-rw-r--r--crypto/asymmetric_keys/x509_public_key.c1
-rw-r--r--crypto/skcipher.c3
-rw-r--r--drivers/atm/ambassador.c11
-rw-r--r--drivers/atm/firestream.c8
-rw-r--r--drivers/atm/fore200e.c4
-rw-r--r--drivers/atm/horizon.c8
-rw-r--r--drivers/atm/idt77105.c8
-rw-r--r--drivers/atm/idt77252.c6
-rw-r--r--drivers/atm/iphase.c4
-rw-r--r--drivers/atm/lanai.c16
-rw-r--r--drivers/atm/nicstar.c8
-rw-r--r--drivers/atm/suni.c2
-rw-r--r--drivers/auxdisplay/Kconfig1
-rw-r--r--drivers/base/power/wakeup.c2
-rw-r--r--drivers/block/DAC960.c9
-rw-r--r--drivers/block/DAC960.h2
-rw-r--r--drivers/block/aoe/aoecmd.c2
-rw-r--r--drivers/block/ataflop.c16
-rw-r--r--drivers/block/rbd.c65
-rw-r--r--drivers/block/rsxx/cregs.c7
-rw-r--r--drivers/block/rsxx/dma.c7
-rw-r--r--drivers/block/skd_main.c6
-rw-r--r--drivers/block/sunvdc.c9
-rw-r--r--drivers/block/swim3.c2
-rw-r--r--drivers/block/umem.c5
-rw-r--r--drivers/block/xsysace.c6
-rw-r--r--drivers/char/dtlk.c4
-rw-r--r--drivers/char/hangcheck-timer.c4
-rw-r--r--drivers/char/ipmi/bt-bmc.c7
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c4
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c6
-rw-r--r--drivers/char/ipmi/ipmi_ssif.c7
-rw-r--r--drivers/char/mem.c4
-rw-r--r--drivers/char/nwbutton.c4
-rw-r--r--drivers/char/nwbutton.h2
-rw-r--r--drivers/char/rtc.c4
-rw-r--r--drivers/char/tpm/tpm-dev-common.c7
-rw-r--r--drivers/clocksource/timer-of.c9
-rw-r--r--drivers/clocksource/timer-of.h2
-rw-r--r--drivers/dax/device.c12
-rw-r--r--drivers/firmware/psci_checker.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c43
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c15
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c4
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c29
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h1
-rw-r--r--drivers/gpu/drm/drm_connector.c17
-rw-r--r--drivers/gpu/drm/drm_edid.c23
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c6
-rw-r--r--drivers/gpu/drm/drm_plane.c1
-rw-r--r--drivers/gpu/drm/drm_vblank.c11
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c6
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c3
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c5
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c7
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c3
-rw-r--r--drivers/gpu/drm/i915/i915_gem_userptr.c6
-rw-r--r--drivers/gpu/drm/i915/intel_breadcrumbs.c1
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c3
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c13
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.h1
-rw-r--r--drivers/gpu/drm/i915/selftests/lib_sw_fence.c6
-rw-r--r--drivers/gpu/drm/imx/ipuv3-crtc.c2
-rw-r--r--drivers/gpu/drm/imx/parallel-display.c2
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_preempt.c7
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c7
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dsi.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c1
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_psr.c6
-rw-r--r--drivers/gpu/drm/tegra/sor.c157
-rw-r--r--drivers/gpu/drm/tilcdc/Kconfig11
-rw-r--r--drivers/gpu/drm/tilcdc/Makefile3
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_slave_compat.c264
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_slave_compat.dts72
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_slave_compat.h25
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c36
-rw-r--r--drivers/gpu/drm/vc4/vc4_bo.c9
-rw-r--r--drivers/gpu/drm/vc4/vc4_gem.c10
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c3
-rw-r--r--drivers/gpu/drm/vc4/vc4_irq.c6
-rw-r--r--drivers/gpu/drm/vgem/vgem_fence.c6
-rw-r--r--drivers/gpu/drm/via/via_dmablit.c7
-rw-r--r--drivers/gpu/ipu-v3/ipu-dc.c3
-rw-r--r--drivers/hid/hid-appleir.c7
-rw-r--r--drivers/hid/hid-prodikeys.c7
-rw-r--r--drivers/hid/hid-wiimote-core.c6
-rw-r--r--drivers/iio/common/ssp_sensors/ssp_dev.c6
-rw-r--r--drivers/infiniband/core/umem.c2
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c6
-rw-r--r--drivers/infiniband/hw/mthca/mthca_catas.c8
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c2
-rw-r--r--drivers/input/gameport/gameport.c7
-rw-r--r--drivers/input/input.c2
-rw-r--r--drivers/input/joystick/db9.c6
-rw-r--r--drivers/input/joystick/gamecon.c6
-rw-r--r--drivers/input/joystick/turbografx.c6
-rw-r--r--drivers/input/touchscreen/s3c2410_ts.c2
-rw-r--r--drivers/iommu/iova.c8
-rw-r--r--drivers/irqchip/Kconfig7
-rw-r--r--drivers/irqchip/Makefile3
-rw-r--r--drivers/irqchip/irq-gic-v3.c11
-rw-r--r--drivers/irqchip/irq-gic-v4.c7
-rw-r--r--drivers/irqchip/irq-imgpdc.c2
-rw-r--r--drivers/irqchip/irq-s3c24xx.c4
-rw-r--r--drivers/irqchip/irq-sni-exiu.c4
-rw-r--r--drivers/irqchip/qcom-irq-combiner.c2
-rw-r--r--drivers/isdn/capi/capidrv.c6
-rw-r--r--drivers/isdn/divert/isdn_divert.c9
-rw-r--r--drivers/isdn/hardware/eicon/divasi.c9
-rw-r--r--drivers/isdn/hardware/mISDN/hfcmulti.c8
-rw-r--r--drivers/isdn/hardware/mISDN/hfcpci.c10
-rw-r--r--drivers/isdn/hardware/mISDN/mISDNisar.c10
-rw-r--r--drivers/isdn/i4l/isdn_common.c5
-rw-r--r--drivers/isdn/i4l/isdn_net.c9
-rw-r--r--drivers/isdn/i4l/isdn_ppp.c9
-rw-r--r--drivers/isdn/i4l/isdn_tty.c7
-rw-r--r--drivers/lightnvm/pblk-core.c4
-rw-r--r--drivers/lightnvm/pblk-gc.c6
-rw-r--r--drivers/lightnvm/pblk-init.c2
-rw-r--r--drivers/lightnvm/pblk-rl.c6
-rw-r--r--drivers/lightnvm/pblk.h2
-rw-r--r--drivers/lightnvm/rrpc.c6
-rw-r--r--drivers/media/common/saa7146/saa7146_vbi.c2
-rw-r--r--drivers/media/platform/fsl-viu.c7
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc.c8
-rw-r--r--drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c7
-rw-r--r--drivers/media/platform/vim2m.c6
-rw-r--r--drivers/media/usb/au0828/au0828-dvb.c8
-rw-r--r--drivers/media/usb/au0828/au0828-video.c14
-rw-r--r--drivers/media/v4l2-core/videobuf-dma-sg.c5
-rw-r--r--drivers/memstick/core/ms_block.c7
-rw-r--r--drivers/mfd/rtsx_usb.c6
-rw-r--r--drivers/mmc/core/host.c6
-rw-r--r--drivers/mtd/Kconfig1
-rw-r--r--drivers/mtd/chips/map_ram.c34
-rw-r--r--drivers/mtd/chips/map_rom.c34
-rw-r--r--drivers/mtd/devices/docg3.c7
-rw-r--r--drivers/mtd/devices/lart.c2
-rw-r--r--drivers/mtd/devices/m25p80.c1
-rw-r--r--drivers/mtd/devices/mtdram.c36
-rw-r--r--drivers/mtd/devices/slram.c9
-rw-r--r--drivers/mtd/maps/cfi_flagadm.c2
-rw-r--r--drivers/mtd/maps/impa7.c2
-rw-r--r--drivers/mtd/maps/netsc520.c2
-rw-r--r--drivers/mtd/maps/nettel.c2
-rw-r--r--drivers/mtd/maps/plat-ram.c38
-rw-r--r--drivers/mtd/maps/sbc_gxx.c2
-rw-r--r--drivers/mtd/maps/ts5500_flash.c2
-rw-r--r--drivers/mtd/maps/uclinux.c2
-rw-r--r--drivers/mtd/mtdconcat.c27
-rw-r--r--drivers/mtd/mtdcore.c61
-rw-r--r--drivers/mtd/mtdpart.c14
-rw-r--r--drivers/mtd/mtdsuper.c6
-rw-r--r--drivers/mtd/mtdswap.c4
-rw-r--r--drivers/mtd/nand/Kconfig5
-rw-r--r--drivers/mtd/nand/Makefile2
-rw-r--r--drivers/mtd/nand/ams-delta.c2
-rw-r--r--drivers/mtd/nand/atmel/nand-controller.c7
-rw-r--r--drivers/mtd/nand/atmel/pmecc.c17
-rw-r--r--drivers/mtd/nand/atmel/pmecc.h1
-rw-r--r--drivers/mtd/nand/au1550nd.c3
-rw-r--r--drivers/mtd/nand/cmx270_nand.c2
-rw-r--r--drivers/mtd/nand/denali.c291
-rw-r--r--drivers/mtd/nand/denali.h44
-rw-r--r--drivers/mtd/nand/denali_dt.c4
-rw-r--r--drivers/mtd/nand/denali_pci.c5
-rw-r--r--drivers/mtd/nand/diskonchip.c3
-rw-r--r--drivers/mtd/nand/gpio.c112
-rw-r--r--drivers/mtd/nand/hisi504_nand.c3
-rw-r--r--drivers/mtd/nand/mtk_ecc.c13
-rw-r--r--drivers/mtd/nand/mxc_nand.c19
-rw-r--r--drivers/mtd/nand/nand_base.c34
-rw-r--r--drivers/mtd/nand/nandsim.c13
-rw-r--r--drivers/mtd/nand/nuc900_nand.c2
-rw-r--r--drivers/mtd/nand/omap2.c377
-rw-r--r--drivers/mtd/nand/pxa3xx_nand.c41
-rw-r--r--drivers/mtd/nand/qcom_nandc.c127
-rw-r--r--drivers/mtd/nand/sh_flctl.c9
-rw-r--r--drivers/mtd/parsers/Kconfig8
-rw-r--r--drivers/mtd/parsers/Makefile1
-rw-r--r--drivers/mtd/parsers/sharpslpart.c398
-rw-r--r--drivers/mtd/sm_ftl.c6
-rw-r--r--drivers/mtd/spi-nor/Kconfig6
-rw-r--r--drivers/mtd/spi-nor/cadence-quadspi.c55
-rw-r--r--drivers/mtd/spi-nor/intel-spi-pci.c3
-rw-r--r--drivers/mtd/spi-nor/intel-spi.c209
-rw-r--r--drivers/mtd/spi-nor/mtk-quadspi.c70
-rw-r--r--drivers/mtd/spi-nor/spi-nor.c105
-rw-r--r--drivers/mtd/spi-nor/stm32-quadspi.c35
-rw-r--r--drivers/net/bonding/bond_netlink.c2
-rw-r--r--drivers/net/caif/caif_hsi.c21
-rw-r--r--drivers/net/cris/eth_v10.c36
-rw-r--r--drivers/net/dsa/bcm_sf2.c2
-rw-r--r--drivers/net/dsa/mv88e6xxx/phy.c7
-rw-r--r--drivers/net/eql.c6
-rw-r--r--drivers/net/ethernet/adi/bfin_mac.c9
-rw-r--r--drivers/net/ethernet/agere/et131x.c7
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c18
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.h3
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c14
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c8
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_main.c8
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.c8
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl2.c15
-rw-r--r--drivers/net/ethernet/broadcom/b44.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c4
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c6
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c1
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_clsf.c4
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_clsf.h5
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c7
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_hw.c6
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.h3
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c9
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_main.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c6
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c10
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c166
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_nvm.c8
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c23
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c2
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_client.c38
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_main.c10
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c2
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c2
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c13
-rw-r--r--drivers/net/ethernet/marvell/mvpp2.c46
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c7
-rw-r--r--drivers/net/ethernet/marvell/skge.c6
-rw-r--r--drivers/net/ethernet/marvell/sky2.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c109
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c7
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.c37
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/offload.c10
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c1
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c4
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c8
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac.c7
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.c6
-rw-r--r--drivers/net/ethernet/realtek/r8169.c55
-rw-r--r--drivers/net/ethernet/rocker/rocker_ofdpa.c7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c15
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-net.c7
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.c6
-rw-r--r--drivers/net/ethernet/ti/netcp_ethss.c7
-rw-r--r--drivers/net/ethernet/ti/tlan.c6
-rw-r--r--drivers/net/ethernet/toshiba/spider_net.c16
-rw-r--r--drivers/net/ethernet/via/via-rhine.c4
-rw-r--r--drivers/net/ethernet/xilinx/Kconfig1
-rw-r--r--drivers/net/geneve.c16
-rw-r--r--drivers/net/hamradio/scc.c8
-rw-r--r--drivers/net/ipvlan/ipvlan_core.c36
-rw-r--r--drivers/net/phy/cortina.c4
-rw-r--r--drivers/net/phy/marvell10g.c5
-rw-r--r--drivers/net/slip/slip.c16
-rw-r--r--drivers/net/tap.c2
-rw-r--r--drivers/net/thunderbolt.c57
-rw-r--r--drivers/net/tun.c13
-rw-r--r--drivers/net/usb/ipheth.c5
-rw-r--r--drivers/net/usb/qmi_wwan.c1
-rw-r--r--drivers/net/vxlan.c4
-rw-r--r--drivers/net/wan/hdlc_ppp.c6
-rw-r--r--drivers/net/wan/lmc/lmc_main.c13
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c51
-rw-r--r--drivers/net/wireless/ath/ath9k/channel.c2
-rw-r--r--drivers/net/wireless/ath/wcn36xx/main.c2
-rw-r--r--drivers/net/wireless/atmel/at76c50x-usb.c4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c7
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/9000.c73
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/a000.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/main.c14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/tt.c18
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/scan.h59
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/file.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c86
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c132
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c7
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_ap.c8
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_hw.c17
-rw-r--r--drivers/net/wireless/intersil/orinoco/orinoco_usb.c6
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c5
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/cfg80211.c2
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/core.c2
-rw-r--r--drivers/net/wireless/ray_cs.c12
-rw-r--r--drivers/net/wireless/st/cw1200/sta.c4
-rw-r--r--drivers/net/wireless/ti/wl1251/main.c2
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.c5
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c7
-rw-r--r--drivers/net/xen-netfront.c25
-rw-r--r--drivers/nfc/nfcmrvl/fw_dnld.c7
-rw-r--r--drivers/nfc/pn533/pn533.c8
-rw-r--r--drivers/nfc/st-nci/ndlc.c17
-rw-r--r--drivers/nfc/st-nci/se.c19
-rw-r--r--drivers/nfc/st21nfca/se.c19
-rw-r--r--drivers/ntb/test/ntb_pingpong.c8
-rw-r--r--drivers/of/base.c6
-rw-r--r--drivers/of/of_pci.c2
-rw-r--r--drivers/of/unittest-data/Makefile1
-rw-r--r--drivers/of/unittest-data/testcases.dts65
-rw-r--r--drivers/pci/Kconfig9
-rw-r--r--drivers/pci/Makefile3
-rw-r--r--drivers/pci/htirq.c135
-rw-r--r--drivers/platform/x86/dell-laptop.c4
-rw-r--r--drivers/platform/x86/dell-smbios-wmi.c13
-rw-r--r--drivers/platform/x86/dell-wmi-descriptor.c26
-rw-r--r--drivers/platform/x86/dell-wmi-descriptor.h1
-rw-r--r--drivers/platform/x86/sony-laptop.c4
-rw-r--r--drivers/pps/clients/pps-ktimer.c4
-rw-r--r--drivers/pwm/pwm-atmel-tcb.c63
-rw-r--r--drivers/pwm/pwm-img.c160
-rw-r--r--drivers/pwm/pwm-mediatek.c53
-rw-r--r--drivers/pwm/pwm-stm32-lp.c3
-rw-r--r--drivers/pwm/pwm-sun4i.c8
-rw-r--r--drivers/rtc/Kconfig42
-rw-r--r--drivers/rtc/Makefile3
-rw-r--r--drivers/rtc/interface.c6
-rw-r--r--drivers/rtc/rtc-abx80x.c12
-rw-r--r--drivers/rtc/rtc-armada38x.c101
-rw-r--r--drivers/rtc/rtc-at91rm9200.c19
-rw-r--r--drivers/rtc/rtc-dev.c6
-rw-r--r--drivers/rtc/rtc-ds1305.c70
-rw-r--r--drivers/rtc/rtc-ds1307.c57
-rw-r--r--drivers/rtc/rtc-ds1390.c7
-rw-r--r--drivers/rtc/rtc-ds1511.c75
-rw-r--r--drivers/rtc/rtc-jz4740.c6
-rw-r--r--drivers/rtc/rtc-m41t80.c84
-rw-r--r--drivers/rtc/rtc-m48t86.c58
-rw-r--r--drivers/rtc/rtc-mt7622.c422
-rw-r--r--drivers/rtc/rtc-omap.c57
-rw-r--r--drivers/rtc/rtc-pcf8523.c40
-rw-r--r--drivers/rtc/rtc-pcf85363.c220
-rw-r--r--drivers/rtc/rtc-pcf8563.c4
-rw-r--r--drivers/rtc/rtc-pl031.c48
-rw-r--r--drivers/rtc/rtc-rv3029c2.c18
-rw-r--r--drivers/rtc/rtc-rx8010.c7
-rw-r--r--drivers/rtc/rtc-sc27xx.c662
-rw-r--r--drivers/rtc/rtc-sysfs.c25
-rw-r--r--drivers/rtc/rtc-xgene.c47
-rw-r--r--drivers/s390/block/dasd.c20
-rw-r--r--drivers/s390/char/sclp.c4
-rw-r--r--drivers/s390/net/fsm.c11
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c4
-rw-r--r--drivers/scsi/aic94xx/aic94xx_hwi.c2
-rw-r--r--drivers/scsi/aic94xx/aic94xx_tmf.c2
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c14
-rw-r--r--drivers/scsi/arm/fas216.c8
-rw-r--r--drivers/scsi/be2iscsi/be_main.c4
-rw-r--r--drivers/scsi/bfa/bfad.c8
-rw-r--r--drivers/scsi/bfa/bfad_drv.h2
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_io.c40
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_tgt.c16
-rw-r--r--drivers/scsi/cxgbi/cxgb3i/cxgb3i.c4
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c4
-rw-r--r--drivers/scsi/esas2r/esas2r_main.c10
-rw-r--r--drivers/scsi/fcoe/fcoe_ctlr.c8
-rw-r--r--drivers/scsi/fnic/fnic_main.c14
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c4
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v2_hw.c6
-rw-r--r--drivers/scsi/ipr.c8
-rw-r--r--drivers/scsi/libfc/fc_fcp.c6
-rw-r--r--drivers/scsi/libsas/sas_expander.c2
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c2
-rw-r--r--drivers/scsi/mvsas/mv_sas.c4
-rw-r--r--drivers/scsi/ncr53c8xx.c8
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.c4
-rw-r--r--drivers/scsi/pmcraid.c10
-rw-r--r--drivers/scsi/scsi_devinfo.c18
-rw-r--r--drivers/scsi/scsi_priv.h15
-rw-r--r--drivers/scsi/scsi_scan.c2
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_glue.c8
-rw-r--r--drivers/staging/greybus/operation.c7
-rw-r--r--drivers/staging/irda/include/net/irda/timer.h2
-rw-r--r--drivers/staging/lustre/lnet/lnet/net_fault.c6
-rw-r--r--drivers/staging/lustre/lustre/llite/file.c2
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_lib.c14
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/service.c9
-rw-r--r--drivers/staging/media/imx/imx-ic-prpencvf.c7
-rw-r--r--drivers/staging/media/imx/imx-media-csi.c7
-rw-r--r--drivers/staging/most/hdm-usb/hdm_usb.c7
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c16
-rw-r--r--drivers/staging/rtl8712/recv_linux.c9
-rw-r--r--drivers/staging/rtl8712/rtl8712_led.c9
-rw-r--r--drivers/staging/speakup/main.c4
-rw-r--r--drivers/staging/speakup/synth.c2
-rw-r--r--drivers/staging/unisys/visorbus/visorbus_main.c6
-rw-r--r--drivers/staging/unisys/visornic/visornic_main.c8
-rw-r--r--drivers/staging/wilc1000/wilc_wfi_cfgoperations.c8
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit.h2
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit_cm.c45
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit_ddp.c8
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit_main.c1
-rw-r--r--drivers/target/iscsi/iscsi_target.c80
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.c3
-rw-r--r--drivers/target/iscsi/iscsi_target_erl1.c7
-rw-r--r--drivers/target/iscsi/iscsi_target_parameters.c39
-rw-r--r--drivers/target/iscsi/iscsi_target_seq_pdu_list.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.c7
-rw-r--r--drivers/target/iscsi/iscsi_target_util.c4
-rw-r--r--drivers/target/target_core_alua.c51
-rw-r--r--drivers/target/target_core_alua.h9
-rw-r--r--drivers/target/target_core_configfs.c14
-rw-r--r--drivers/target/target_core_fabric_configfs.c2
-rw-r--r--drivers/target/target_core_file.c4
-rw-r--r--drivers/target/target_core_internal.h1
-rw-r--r--drivers/target/target_core_pr.c41
-rw-r--r--drivers/target/target_core_tmr.c12
-rw-r--r--drivers/target/target_core_transport.c84
-rw-r--r--drivers/target/target_core_user.c215
-rw-r--r--drivers/tty/cyclades.c4
-rw-r--r--drivers/tty/ipwireless/hardware.c11
-rw-r--r--drivers/tty/isicom.c4
-rw-r--r--drivers/tty/moxa.c4
-rw-r--r--drivers/tty/n_gsm.c12
-rw-r--r--drivers/tty/n_r3964.c8
-rw-r--r--drivers/tty/rocket.c4
-rw-r--r--drivers/tty/serial/8250/8250_core.c4
-rw-r--r--drivers/tty/serial/crisv10.c4
-rw-r--r--drivers/tty/serial/fsl_lpuart.c7
-rw-r--r--drivers/tty/serial/ifx6x60.c7
-rw-r--r--drivers/tty/serial/imx.c6
-rw-r--r--drivers/tty/serial/kgdb_nmi.c6
-rw-r--r--drivers/tty/serial/max3100.c7
-rw-r--r--drivers/tty/serial/mux.c4
-rw-r--r--drivers/tty/serial/pnx8xxx_uart.c7
-rw-r--r--drivers/tty/serial/sa1100.c7
-rw-r--r--drivers/tty/serial/sh-sci.c16
-rw-r--r--drivers/tty/serial/sn_console.c6
-rw-r--r--drivers/tty/synclink.c8
-rw-r--r--drivers/tty/synclink_gt.c16
-rw-r--r--drivers/tty/synclinkmp.c17
-rw-r--r--drivers/tty/vt/keyboard.c2
-rw-r--r--drivers/tty/vt/vt.c4
-rw-r--r--drivers/usb/atm/cxacru.c23
-rw-r--r--drivers/usb/atm/speedtch.c16
-rw-r--r--drivers/usb/atm/usbatm.c10
-rw-r--r--drivers/usb/core/hcd.c8
-rw-r--r--drivers/usb/dwc2/hcd.c7
-rw-r--r--drivers/usb/dwc2/hcd_queue.c7
-rw-r--r--drivers/usb/gadget/udc/at91_udc.c7
-rw-r--r--drivers/usb/gadget/udc/dummy_hcd.c8
-rw-r--r--drivers/usb/gadget/udc/m66592-udc.c6
-rw-r--r--drivers/usb/gadget/udc/omap_udc.c8
-rw-r--r--drivers/usb/gadget/udc/pxa25x_udc.c6
-rw-r--r--drivers/usb/gadget/udc/r8a66597-udc.c6
-rw-r--r--drivers/usb/host/ohci-hcd.c9
-rw-r--r--drivers/usb/host/oxu210hp-hcd.c6
-rw-r--r--drivers/usb/host/r8a66597-hcd.c7
-rw-r--r--drivers/usb/host/sl811-hcd.c6
-rw-r--r--drivers/usb/host/uhci-hcd.c3
-rw-r--r--drivers/usb/host/uhci-q.c4
-rw-r--r--drivers/usb/host/xhci.c8
-rw-r--r--drivers/usb/serial/mos7840.c15
-rw-r--r--drivers/usb/storage/realtek_cr.c7
-rw-r--r--drivers/uwb/drp.c6
-rw-r--r--drivers/uwb/neh.c8
-rw-r--r--drivers/uwb/rsv.c15
-rw-r--r--drivers/uwb/uwb-internal.h2
-rw-r--r--drivers/video/fbdev/Kconfig10
-rw-r--r--drivers/video/fbdev/Makefile1
-rw-r--r--drivers/video/fbdev/aty/atyfb_base.c4
-rw-r--r--drivers/video/fbdev/aty/radeon_base.c10
-rw-r--r--drivers/video/fbdev/aty/radeon_pm.c3
-rw-r--r--drivers/video/fbdev/au1200fb.c43
-rw-r--r--drivers/video/fbdev/cirrusfb.c6
-rw-r--r--drivers/video/fbdev/controlfb.h2
-rw-r--r--drivers/video/fbdev/core/fbcon.c11
-rw-r--r--drivers/video/fbdev/core/fbcon.h1
-rw-r--r--drivers/video/fbdev/dnfb.c15
-rw-r--r--drivers/video/fbdev/goldfishfb.c8
-rw-r--r--drivers/video/fbdev/igafb.c579
-rw-r--r--drivers/video/fbdev/intelfb/intelfbhw.c9
-rw-r--r--drivers/video/fbdev/matrox/matroxfb_base.c2
-rw-r--r--drivers/video/fbdev/mxsfb.c13
-rw-r--r--drivers/video/fbdev/omap/hwa742.c6
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dsi.c6
-rw-r--r--drivers/video/fbdev/omap2/omapfb/omapfb-main.c2
-rw-r--r--drivers/video/fbdev/pxa3xx-gcu.c24
-rw-r--r--drivers/video/fbdev/sa1100fb.c75
-rw-r--r--drivers/video/fbdev/sa1100fb.h2
-rw-r--r--drivers/video/fbdev/sis/init301.c2
-rw-r--r--drivers/video/fbdev/sis/sis_main.c4
-rw-r--r--drivers/video/fbdev/sm501fb.c22
-rw-r--r--drivers/video/fbdev/udlfb.c10
-rw-r--r--drivers/watchdog/alim7101_wdt.c4
-rw-r--r--drivers/watchdog/at91sam9_wdt.c6
-rw-r--r--drivers/watchdog/bcm47xx_wdt.c9
-rw-r--r--drivers/watchdog/bcm63xx_wdt.c4
-rw-r--r--drivers/watchdog/cpu5wdt.c4
-rw-r--r--drivers/watchdog/machzwd.c4
-rw-r--r--drivers/watchdog/mixcomwd.c4
-rw-r--r--drivers/watchdog/mpc8xxx_wdt.c7
-rw-r--r--drivers/watchdog/mtx-1_wdt.c4
-rw-r--r--drivers/watchdog/nuc900_wdt.c4
-rw-r--r--drivers/watchdog/pcwd.c4
-rw-r--r--drivers/watchdog/pika_wdt.c4
-rw-r--r--drivers/watchdog/rdc321x_wdt.c4
-rw-r--r--drivers/watchdog/sbc60xxwdt.c4
-rw-r--r--drivers/watchdog/sc520_wdt.c4
-rw-r--r--drivers/watchdog/shwdt.c6
-rw-r--r--drivers/watchdog/via_wdt.c4
-rw-r--r--drivers/watchdog/w83877f_wdt.c4
-rw-r--r--drivers/xen/grant-table.c4
-rw-r--r--firmware/Makefile3
-rw-r--r--fs/9p/vfs_inode.c3
-rw-r--r--fs/9p/vfs_inode_dotl.c3
-rw-r--r--fs/9p/vfs_super.c6
-rw-r--r--fs/adfs/super.c4
-rw-r--r--fs/affs/amigaffs.c2
-rw-r--r--fs/affs/bitmap.c6
-rw-r--r--fs/affs/super.c16
-rw-r--r--fs/afs/cell.c7
-rw-r--r--fs/afs/dir.c25
-rw-r--r--fs/afs/flock.c548
-rw-r--r--fs/afs/internal.h23
-rw-r--r--fs/afs/rotate.c70
-rw-r--r--fs/afs/security.c4
-rw-r--r--fs/afs/server_list.c2
-rw-r--r--fs/afs/super.c4
-rw-r--r--fs/afs/write.c5
-rw-r--r--fs/autofs4/root.c17
-rw-r--r--fs/befs/ChangeLog2
-rw-r--r--fs/befs/linuxvfs.c4
-rw-r--r--fs/btrfs/compression.c9
-rw-r--r--fs/btrfs/compression.h5
-rw-r--r--fs/btrfs/ctree.h3
-rw-r--r--fs/btrfs/disk-io.c10
-rw-r--r--fs/btrfs/extent-tree.c14
-rw-r--r--fs/btrfs/extent_io.c4
-rw-r--r--fs/btrfs/extent_io.h8
-rw-r--r--fs/btrfs/file.c130
-rw-r--r--fs/btrfs/free-space-cache.c3
-rw-r--r--fs/btrfs/inode.c34
-rw-r--r--fs/btrfs/ioctl.c4
-rw-r--r--fs/btrfs/relocation.c3
-rw-r--r--fs/btrfs/send.c124
-rw-r--r--fs/btrfs/super.c63
-rw-r--r--fs/btrfs/tests/extent-io-tests.c6
-rw-r--r--fs/btrfs/tests/inode-tests.c12
-rw-r--r--fs/btrfs/tree-checker.c27
-rw-r--r--fs/btrfs/tree-checker.h14
-rw-r--r--fs/btrfs/tree-log.c2
-rw-r--r--fs/btrfs/volumes.c36
-rw-r--r--fs/ceph/caps.c9
-rw-r--r--fs/ceph/inode.c9
-rw-r--r--fs/ceph/locks.c177
-rw-r--r--fs/ceph/mds_client.c96
-rw-r--r--fs/ceph/super.c13
-rw-r--r--fs/ceph/super.h4
-rw-r--r--fs/cifs/cifs_fs_sb.h2
-rw-r--r--fs/cifs/cifsfs.c12
-rw-r--r--fs/cifs/cifsglob.h4
-rw-r--r--fs/cifs/inode.c2
-rw-r--r--fs/cifs/xattr.c8
-rw-r--r--fs/coda/inode.c4
-rw-r--r--fs/cramfs/inode.c4
-rw-r--r--fs/dax.c3
-rw-r--r--fs/ecryptfs/main.c8
-rw-r--r--fs/efs/super.c4
-rw-r--r--fs/exec.c7
-rw-r--r--fs/ext2/balloc.c4
-rw-r--r--fs/ext2/ialloc.c4
-rw-r--r--fs/ext2/super.c20
-rw-r--r--fs/ext4/inode.c4
-rw-r--r--fs/ext4/super.c52
-rw-r--r--fs/f2fs/checkpoint.c10
-rw-r--r--fs/f2fs/f2fs.h2
-rw-r--r--fs/f2fs/gc.c2
-rw-r--r--fs/f2fs/recovery.c10
-rw-r--r--fs/f2fs/super.c28
-rw-r--r--fs/fat/fatent.c6
-rw-r--r--fs/fat/inode.c10
-rw-r--r--fs/fat/misc.c2
-rw-r--r--fs/fat/namei_msdos.c2
-rw-r--r--fs/freevxfs/vxfs_super.c4
-rw-r--r--fs/fs-writeback.c2
-rw-r--r--fs/fuse/inode.c12
-rw-r--r--fs/gfs2/ops_fstype.c16
-rw-r--r--fs/gfs2/super.c10
-rw-r--r--fs/gfs2/trans.c2
-rw-r--r--fs/hfs/mdb.c4
-rw-r--r--fs/hfs/super.c16
-rw-r--r--fs/hfsplus/super.c22
-rw-r--r--fs/hpfs/map.c2
-rw-r--r--fs/hpfs/super.c8
-rw-r--r--fs/hugetlbfs/inode.c4
-rw-r--r--fs/inode.c10
-rw-r--r--fs/isofs/inode.c2
-rw-r--r--fs/jffs2/fs.c4
-rw-r--r--fs/jffs2/os-linux.h2
-rw-r--r--fs/jffs2/super.c4
-rw-r--r--fs/jfs/super.c10
-rw-r--r--fs/kernfs/mount.c2
-rw-r--r--fs/libfs.c6
-rw-r--r--fs/lockd/host.c24
-rw-r--r--fs/lockd/mon.c3
-rw-r--r--fs/lockd/svc.c38
-rw-r--r--fs/lockd/svcsubs.c2
-rw-r--r--fs/locks.c2
-rw-r--r--fs/mbcache.c3
-rw-r--r--fs/minix/inode.c4
-rw-r--r--fs/namei.c15
-rw-r--r--fs/ncpfs/inode.c4
-rw-r--r--fs/nfs/dir.c2
-rw-r--r--fs/nfs/inode.c2
-rw-r--r--fs/nfs/internal.h2
-rw-r--r--fs/nfs/super.c22
-rw-r--r--fs/nfs_common/grace.c10
-rw-r--r--fs/nfsd/export.c10
-rw-r--r--fs/nfsd/netns.h3
-rw-r--r--fs/nfsd/nfs4idmap.c4
-rw-r--r--fs/nfsd/nfs4state.c279
-rw-r--r--fs/nfsd/nfsctl.c3
-rw-r--r--fs/nfsd/nfssvc.c14
-rw-r--r--fs/nilfs2/segment.c2
-rw-r--r--fs/nilfs2/super.c24
-rw-r--r--fs/nilfs2/the_nilfs.c6
-rw-r--r--fs/notify/fsnotify.c2
-rw-r--r--fs/nsfs.c2
-rw-r--r--fs/ntfs/super.c32
-rw-r--r--fs/ocfs2/cluster/tcp.c9
-rw-r--r--fs/ocfs2/file.c2
-rw-r--r--fs/ocfs2/super.c28
-rw-r--r--fs/ocfs2/xattr.c2
-rw-r--r--fs/openpromfs/inode.c4
-rw-r--r--fs/orangefs/acl.c10
-rw-r--r--fs/orangefs/dir.c1
-rw-r--r--fs/orangefs/file.c16
-rw-r--r--fs/orangefs/inode.c17
-rw-r--r--fs/orangefs/namei.c45
-rw-r--r--fs/orangefs/orangefs-debug.h4
-rw-r--r--fs/orangefs/orangefs-kernel.h31
-rw-r--r--fs/orangefs/orangefs-utils.c86
-rw-r--r--fs/orangefs/super.c23
-rw-r--r--fs/orangefs/symlink.c1
-rw-r--r--fs/overlayfs/super.c10
-rw-r--r--fs/proc/base.c5
-rw-r--r--fs/proc/inode.c2
-rw-r--r--fs/proc/internal.h2
-rw-r--r--fs/proc/root.c2
-rw-r--r--fs/proc_namespace.c8
-rw-r--r--fs/pstore/platform.c4
-rw-r--r--fs/qnx4/inode.c4
-rw-r--r--fs/qnx6/inode.c4
-rw-r--r--fs/reiserfs/inode.c2
-rw-r--r--fs/reiserfs/journal.c6
-rw-r--r--fs/reiserfs/prints.c4
-rw-r--r--fs/reiserfs/super.c18
-rw-r--r--fs/reiserfs/xattr.c10
-rw-r--r--fs/romfs/super.c4
-rw-r--r--fs/squashfs/super.c4
-rw-r--r--fs/statfs.c6
-rw-r--r--fs/sysfs/mount.c2
-rw-r--r--fs/sysv/inode.c2
-rw-r--r--fs/sysv/super.c2
-rw-r--r--fs/ubifs/file.c2
-rw-r--r--fs/ubifs/io.c2
-rw-r--r--fs/ubifs/super.c20
-rw-r--r--fs/ubifs/ubifs.h4
-rw-r--r--fs/udf/super.c6
-rw-r--r--fs/ufs/balloc.c8
-rw-r--r--fs/ufs/ialloc.c10
-rw-r--r--fs/ufs/super.c30
-rw-r--r--fs/xfs/libxfs/xfs_iext_tree.c2
-rw-r--r--fs/xfs/libxfs/xfs_inode_fork.c8
-rw-r--r--fs/xfs/xfs_linux.h10
-rw-r--r--fs/xfs/xfs_log.c6
-rw-r--r--fs/xfs/xfs_super.c8
-rw-r--r--fs/xfs/xfs_super.h2
-rw-r--r--include/asm-generic/pgtable.h12
-rw-r--r--include/crypto/if_alg.h1
-rw-r--r--include/drm/drm_connector.h5
-rw-r--r--include/drm/drm_edid.h3
-rw-r--r--include/drm/drm_mode_config.h7
-rw-r--r--include/dt-bindings/msm/msm-bus-ids.h887
-rw-r--r--include/kvm/arm_vgic.h41
-rw-r--r--include/linux/bpf.h19
-rw-r--r--include/linux/bpf_verifier.h4
-rw-r--r--include/linux/compiler.h21
-rw-r--r--include/linux/fs.h19
-rw-r--r--include/linux/htirq.h39
-rw-r--r--include/linux/hugetlb.h8
-rw-r--r--include/linux/irq.h11
-rw-r--r--include/linux/irqchip/arm-gic-v4.h1
-rw-r--r--include/linux/kallsyms.h6
-rw-r--r--include/linux/key-type.h2
-rw-r--r--include/linux/key.h7
-rw-r--r--include/linux/kthread.h10
-rw-r--r--include/linux/migrate.h2
-rw-r--r--include/linux/mm.h14
-rw-r--r--include/linux/mtd/mtd.h6
-rw-r--r--include/linux/mtd/nand-gpio.h5
-rw-r--r--include/linux/mtd/rawnand.h3
-rw-r--r--include/linux/mtd/spi-nor.h10
-rw-r--r--include/linux/netdev_features.h4
-rw-r--r--include/linux/netdevice.h1
-rw-r--r--include/linux/pci.h6
-rw-r--r--include/linux/platform_data/mtd-nand-omap2.h17
-rw-r--r--include/linux/printk.h6
-rw-r--r--include/linux/sched.h8
-rw-r--r--include/linux/skbuff.h2
-rw-r--r--include/linux/sunrpc/cache.h2
-rw-r--r--include/linux/timekeeper_internal.h7
-rw-r--r--include/linux/timekeeping.h6
-rw-r--r--include/linux/timer.h120
-rw-r--r--include/linux/virtio_net.h5
-rw-r--r--include/linux/workqueue.h17
-rw-r--r--include/linux/writeback.h2
-rw-r--r--include/net/ipv6.h1
-rw-r--r--include/net/mac80211.h8
-rw-r--r--include/net/sctp/checksum.h13
-rw-r--r--include/net/sctp/sctp.h5
-rw-r--r--include/net/sctp/stream_sched.h5
-rw-r--r--include/net/tcp.h2
-rw-r--r--include/scsi/scsi_device.h4
-rw-r--r--include/scsi/scsi_devinfo.h50
-rw-r--r--include/sound/control.h4
-rw-r--r--include/target/target_core_base.h2
-rw-r--r--include/trace/events/rxrpc.h86
-rw-r--r--include/trace/events/sched.h6
-rw-r--r--include/uapi/linux/bfs_fs.h2
-rw-r--r--include/uapi/linux/bpf.h8
-rw-r--r--include/uapi/linux/rxrpc.h1
-rw-r--r--include/uapi/linux/vm_sockets_diag.h1
-rw-r--r--include/video/iga.h25
-rw-r--r--ipc/mqueue.c2
-rw-r--r--kernel/bpf/offload.c27
-rw-r--r--kernel/bpf/syscall.c40
-rw-r--r--kernel/bpf/verifier.c31
-rw-r--r--kernel/events/core.c1
-rw-r--r--kernel/irq/manage.c13
-rw-r--r--kernel/irq/matrix.c2
-rw-r--r--kernel/irq/spurious.c4
-rw-r--r--kernel/jump_label.c2
-rw-r--r--kernel/kallsyms.c8
-rw-r--r--kernel/kthread.c2
-rw-r--r--kernel/module.c6
-rw-r--r--kernel/padata.c6
-rw-r--r--kernel/printk/printk.c2
-rw-r--r--kernel/printk/printk_safe.c2
-rw-r--r--kernel/time/Kconfig4
-rw-r--r--kernel/time/clocksource.c5
-rw-r--r--kernel/time/timekeeping.c45
-rw-r--r--kernel/time/timer.c40
-rw-r--r--kernel/time/timer_list.c2
-rw-r--r--kernel/trace/bpf_trace.c12
-rw-r--r--kernel/workqueue.c2
-rw-r--r--lib/Kconfig4
-rw-r--r--lib/random32.c4
-rw-r--r--lib/test_printf.c108
-rw-r--r--lib/vsprintf.c194
-rw-r--r--mm/frame_vector.c12
-rw-r--r--mm/gup.c66
-rw-r--r--mm/hmm.c8
-rw-r--r--mm/huge_memory.c42
-rw-r--r--mm/hugetlb.c12
-rw-r--r--mm/kasan/report.c8
-rw-r--r--mm/kmemleak.c2
-rw-r--r--mm/madvise.c4
-rw-r--r--mm/memcontrol.c2
-rw-r--r--mm/memory.c8
-rw-r--r--mm/mmap.c8
-rw-r--r--mm/oom_kill.c7
-rw-r--r--mm/page-writeback.c12
-rw-r--r--mm/page_alloc.c13
-rw-r--r--mm/shmem.c10
-rw-r--r--net/802/garp.c6
-rw-r--r--net/802/mrp.c13
-rw-r--r--net/9p/client.c5
-rw-r--r--net/9p/trans_fd.c6
-rw-r--r--net/9p/trans_virtio.c13
-rw-r--r--net/9p/trans_xen.c4
-rw-r--r--net/appletalk/aarp.c4
-rw-r--r--net/appletalk/ddp.c7
-rw-r--r--net/atm/lec.c6
-rw-r--r--net/atm/mpc.c3
-rw-r--r--net/batman-adv/tp_meter.c14
-rw-r--r--net/bluetooth/hidp/core.c7
-rw-r--r--net/bluetooth/rfcomm/core.c12
-rw-r--r--net/bluetooth/sco.c6
-rw-r--r--net/can/proc.c4
-rw-r--r--net/ceph/ceph_hash.c12
-rw-r--r--net/ceph/crypto.c4
-rw-r--r--net/ceph/messenger.c1
-rw-r--r--net/ceph/mon_client.c5
-rw-r--r--net/core/dev.c17
-rw-r--r--net/core/drop_monitor.c7
-rw-r--r--net/core/filter.c4
-rw-r--r--net/core/gen_estimator.c6
-rw-r--r--net/core/neighbour.c14
-rw-r--r--net/decnet/dn_route.c8
-rw-r--r--net/decnet/dn_timer.c8
-rw-r--r--net/dsa/dsa2.c25
-rw-r--r--net/ipv4/af_inet.c12
-rw-r--r--net/ipv4/igmp.c20
-rw-r--r--net/ipv4/ipmr.c9
-rw-r--r--net/ipv4/tcp_input.c2
-rw-r--r--net/ipv4/tcp_output.c8
-rw-r--r--net/ipv4/udp_offload.c49
-rw-r--r--net/ipv6/addrconf.c9
-rw-r--r--net/ipv6/ip6_fib.c10
-rw-r--r--net/ipv6/ip6_flowlabel.c4
-rw-r--r--net/ipv6/ip6_gre.c2
-rw-r--r--net/ipv6/ip6mr.c9
-rw-r--r--net/ipv6/mcast.c33
-rw-r--r--net/ipv6/output_core.c31
-rw-r--r--net/ipv6/route.c7
-rw-r--r--net/ipv6/udp_offload.c85
-rw-r--r--net/lapb/lapb_timer.c4
-rw-r--r--net/mac80211/agg-rx.c41
-rw-r--r--net/mac80211/agg-tx.c49
-rw-r--r--net/mac80211/ht.c4
-rw-r--r--net/mac80211/ibss.c7
-rw-r--r--net/mac80211/ieee80211_i.h3
-rw-r--r--net/mac80211/led.c11
-rw-r--r--net/mac80211/main.c3
-rw-r--r--net/mac80211/mesh.c27
-rw-r--r--net/mac80211/mesh.h2
-rw-r--r--net/mac80211/mesh_hwmp.c19
-rw-r--r--net/mac80211/mesh_pathtbl.c3
-rw-r--r--net/mac80211/mlme.c34
-rw-r--r--net/mac80211/ocb.c10
-rw-r--r--net/mac80211/sta_info.c15
-rw-r--r--net/mac80211/sta_info.h12
-rw-r--r--net/mac80211/tx.c29
-rw-r--r--net/ncsi/ncsi-manage.c15
-rw-r--r--net/netfilter/nf_conntrack_expect.c7
-rw-r--r--net/netfilter/nfnetlink_log.c8
-rw-r--r--net/netfilter/xt_IDLETIMER.c7
-rw-r--r--net/netfilter/xt_LED.c8
-rw-r--r--net/netrom/af_netrom.c2
-rw-r--r--net/netrom/nr_loopback.c4
-rw-r--r--net/netrom/nr_timer.c2
-rw-r--r--net/nfc/nci/core.c14
-rw-r--r--net/openvswitch/datapath.c14
-rw-r--r--net/openvswitch/flow.c6
-rw-r--r--net/openvswitch/flow_netlink.c16
-rw-r--r--net/packet/af_packet.c37
-rw-r--r--net/packet/internal.h1
-rw-r--r--net/rose/rose_link.c4
-rw-r--r--net/rose/rose_timer.c12
-rw-r--r--net/rxrpc/af_rxrpc.c23
-rw-r--r--net/rxrpc/ar-internal.h103
-rw-r--r--net/rxrpc/call_accept.c2
-rw-r--r--net/rxrpc/call_event.c229
-rw-r--r--net/rxrpc/call_object.c69
-rw-r--r--net/rxrpc/conn_client.c54
-rw-r--r--net/rxrpc/conn_event.c74
-rw-r--r--net/rxrpc/conn_object.c76
-rw-r--r--net/rxrpc/input.c74
-rw-r--r--net/rxrpc/misc.c19
-rw-r--r--net/rxrpc/net_ns.c33
-rw-r--r--net/rxrpc/output.c43
-rw-r--r--net/rxrpc/recvmsg.c12
-rw-r--r--net/rxrpc/sendmsg.c126
-rw-r--r--net/rxrpc/sysctl.c60
-rw-r--r--net/sched/act_csum.c6
-rw-r--r--net/sched/cls_api.c24
-rw-r--r--net/sched/cls_bpf.c31
-rw-r--r--net/sched/sch_cbq.c9
-rw-r--r--net/sched/sch_sfq.c1
-rw-r--r--net/sctp/protocol.c1
-rw-r--r--net/sctp/socket.c6
-rw-r--r--net/sctp/stream.c79
-rw-r--r--net/sctp/stream_sched.c25
-rw-r--r--net/sctp/stream_sched_prio.c7
-rw-r--r--net/sctp/stream_sched_rr.c7
-rw-r--r--net/smc/smc_core.c4
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c4
-rw-r--r--net/sunrpc/cache.c2
-rw-r--r--net/sunrpc/svc_xprt.c2
-rw-r--r--net/sunrpc/svcauth_unix.c4
-rw-r--r--net/tipc/group.c4
-rw-r--r--net/vmw_vsock/vmci_transport.c14
-rw-r--r--net/wireless/Kconfig7
-rw-r--r--net/wireless/lib80211.c11
-rw-r--r--net/wireless/nl80211.c26
-rw-r--r--net/wireless/reg.c42
-rw-r--r--net/x25/af_x25.c2
-rw-r--r--net/x25/x25_link.c8
-rw-r--r--net/x25/x25_timer.c2
-rw-r--r--net/xfrm/xfrm_state.c9
-rw-r--r--samples/bpf/Makefile3
-rw-r--r--samples/hidraw/Makefile3
-rw-r--r--samples/seccomp/Makefile3
-rw-r--r--samples/sockmap/Makefile3
-rw-r--r--samples/statx/Makefile3
-rw-r--r--samples/uhid/Makefile3
-rw-r--r--scripts/Makefile.build9
-rw-r--r--scripts/Makefile.lib2
-rwxr-xr-xscripts/bloat-o-meter7
-rwxr-xr-xscripts/checkpatch.pl2
-rwxr-xr-xscripts/coccicheck15
-rw-r--r--scripts/coccinelle/api/setup_timer.cocci277
-rwxr-xr-xscripts/faddr2line21
-rw-r--r--scripts/kconfig/symbol.c2
-rwxr-xr-xscripts/kernel-doc25
-rw-r--r--scripts/package/Makefile7
-rw-r--r--security/apparmor/apparmorfs.c24
-rw-r--r--security/apparmor/domain.c68
-rw-r--r--security/apparmor/file.c8
-rw-r--r--security/apparmor/include/lib.h2
-rw-r--r--security/apparmor/label.c2
-rw-r--r--security/apparmor/lib.c16
-rw-r--r--security/apparmor/lsm.c8
-rw-r--r--security/apparmor/mount.c13
-rw-r--r--security/apparmor/policy.c11
-rw-r--r--security/apparmor/policy_ns.c8
-rw-r--r--security/apparmor/policy_unpack.c2
-rw-r--r--security/apparmor/resource.c4
-rw-r--r--security/keys/gc.c24
-rw-r--r--security/keys/internal.h8
-rw-r--r--security/keys/key.c27
-rw-r--r--security/keys/keyring.c20
-rw-r--r--security/keys/permission.c5
-rw-r--r--security/keys/proc.c21
-rw-r--r--security/keys/process_keys.c2
-rw-r--r--sound/core/pcm_lib.c6
-rw-r--r--sound/core/timer_compat.c12
-rw-r--r--sound/core/vmaster.c6
-rw-r--r--sound/hda/hdmi_chmap.c2
-rw-r--r--sound/pci/hda/hda_codec.c10
-rw-r--r--sound/pci/hda/hda_intel.c3
-rw-r--r--sound/pci/hda/patch_realtek.c9
-rw-r--r--sound/soc/intel/Kconfig5
-rw-r--r--sound/usb/clock.c9
-rw-r--r--sound/usb/line6/driver.c2
-rw-r--r--sound/usb/mixer.c22
-rw-r--r--tools/bpf/bpftool/prog.c31
-rw-r--r--tools/include/uapi/linux/bpf.h8
-rw-r--r--tools/objtool/.gitignore2
-rw-r--r--tools/objtool/Makefile22
-rw-r--r--tools/objtool/arch/x86/Build10
-rw-r--r--tools/objtool/arch/x86/decode.c6
-rw-r--r--tools/objtool/arch/x86/include/asm/inat.h (renamed from tools/objtool/arch/x86/insn/inat.h)12
-rw-r--r--tools/objtool/arch/x86/include/asm/inat_types.h (renamed from tools/objtool/arch/x86/insn/inat_types.h)0
-rw-r--r--tools/objtool/arch/x86/include/asm/insn.h (renamed from tools/objtool/arch/x86/insn/insn.h)2
-rw-r--r--tools/objtool/arch/x86/include/asm/orc_types.h (renamed from tools/objtool/orc_types.h)0
-rw-r--r--tools/objtool/arch/x86/lib/inat.c (renamed from tools/objtool/arch/x86/insn/inat.c)2
-rw-r--r--tools/objtool/arch/x86/lib/insn.c (renamed from tools/objtool/arch/x86/insn/insn.c)4
-rw-r--r--tools/objtool/arch/x86/lib/x86-opcode-map.txt (renamed from tools/objtool/arch/x86/insn/x86-opcode-map.txt)0
-rw-r--r--tools/objtool/arch/x86/tools/gen-insn-attr-x86.awk (renamed from tools/objtool/arch/x86/insn/gen-insn-attr-x86.awk)0
-rw-r--r--tools/objtool/orc.h2
-rwxr-xr-xtools/objtool/sync-check.sh29
-rw-r--r--tools/power/cpupower/Makefile2
-rw-r--r--tools/scripts/Makefile.include2
-rw-r--r--tools/testing/selftests/bpf/test_verifier.c152
-rw-r--r--tools/testing/selftests/x86/5lvl.c177
-rw-r--r--tools/testing/selftests/x86/Makefile2
-rw-r--r--tools/testing/selftests/x86/mpx-hw.h4
-rw-r--r--tools/testing/selftests/x86/pkey-helpers.h5
-rw-r--r--tools/testing/selftests/x86/protection_keys.c10
-rw-r--r--virt/kvm/arm/arch_timer.c24
-rw-r--r--virt/kvm/arm/arm.c48
-rw-r--r--virt/kvm/arm/hyp/vgic-v3-sr.c9
-rw-r--r--virt/kvm/arm/vgic/vgic-init.c7
-rw-r--r--virt/kvm/arm/vgic/vgic-its.c204
-rw-r--r--virt/kvm/arm/vgic/vgic-mmio-v3.c5
-rw-r--r--virt/kvm/arm/vgic/vgic-v3.c14
-rw-r--r--virt/kvm/arm/vgic/vgic-v4.c364
-rw-r--r--virt/kvm/arm/vgic/vgic.c67
-rw-r--r--virt/kvm/arm/vgic/vgic.h10
1131 files changed, 12919 insertions, 9488 deletions
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 62436bd5f34a..6571fbfdb2a1 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -1890,6 +1890,10 @@
[KVM,ARM] Trap guest accesses to GICv3 common
system registers
+ kvm-arm.vgic_v4_enable=
+ [KVM,ARM] Allow use of GICv4 for direct injection of
+ LPIs.
+
kvm-intel.ept= [KVM,Intel] Disable extended page tables
(virtualized MMU) support on capable Intel chips.
Default is 1 (enabled)
@@ -3246,13 +3250,15 @@
instead using the legacy FADT method
profile= [KNL] Enable kernel profiling via /proc/profile
- Format: [schedule,]<number>
+ Format: [<profiletype>,]<number>
+ Param: <profiletype>: "schedule", "sleep", or "kvm"
+ [defaults to kernel profiling]
Param: "schedule" - profile schedule points.
- Param: <number> - step/bucket size as a power of 2 for
- statistical time based profiling.
Param: "sleep" - profile D-state sleeping (millisecs).
Requires CONFIG_SCHEDSTATS
Param: "kvm" - profile VM exits.
+ Param: <number> - step/bucket size as a power of 2 for
+ statistical time based profiling.
prompt_ramdisk= [RAM] List of RAM disks to prompt for floppy disk
before loading.
diff --git a/Documentation/core-api/local_ops.rst b/Documentation/core-api/local_ops.rst
index 1062ddba62c7..2ac3f9f29845 100644
--- a/Documentation/core-api/local_ops.rst
+++ b/Documentation/core-api/local_ops.rst
@@ -177,18 +177,14 @@ Here is a sample module which implements a basic per cpu counter using
printk("Read : CPU %d, count %ld\n", cpu,
local_read(&per_cpu(counters, cpu)));
}
- del_timer(&test_timer);
- test_timer.expires = jiffies + 1000;
- add_timer(&test_timer);
+ mod_timer(&test_timer, jiffies + 1000);
}
static int __init test_init(void)
{
/* initialize the timer that will increment the counter */
- init_timer(&test_timer);
- test_timer.function = do_test_timer;
- test_timer.expires = jiffies + 1;
- add_timer(&test_timer);
+ timer_setup(&test_timer, do_test_timer, 0);
+ mod_timer(&test_timer, jiffies + 1);
return 0;
}
diff --git a/Documentation/devicetree/bindings/display/google,goldfish-fb.txt b/Documentation/devicetree/bindings/display/google,goldfish-fb.txt
new file mode 100644
index 000000000000..751fa9f51e5d
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/google,goldfish-fb.txt
@@ -0,0 +1,17 @@
+Android Goldfish framebuffer
+
+Android Goldfish framebuffer device used by Android emulator.
+
+Required properties:
+
+- compatible : should contain "google,goldfish-fb"
+- reg : <registers mapping>
+- interrupts : <interrupt mapping>
+
+Example:
+
+ display-controller@1f008000 {
+ compatible = "google,goldfish-fb";
+ interrupts = <0x10>;
+ reg = <0x1f008000 0x100>;
+ };
diff --git a/Documentation/devicetree/bindings/display/imx/fsl-imx-drm.txt b/Documentation/devicetree/bindings/display/imx/fsl-imx-drm.txt
index f79854783c2c..5bf77f6dd19d 100644
--- a/Documentation/devicetree/bindings/display/imx/fsl-imx-drm.txt
+++ b/Documentation/devicetree/bindings/display/imx/fsl-imx-drm.txt
@@ -129,7 +129,7 @@ Optional properties:
example:
-display@di0 {
+disp0 {
compatible = "fsl,imx-parallel-display";
edid = [edid-data];
interface-pix-fmt = "rgb24";
diff --git a/Documentation/devicetree/bindings/mtd/cadence-quadspi.txt b/Documentation/devicetree/bindings/mtd/cadence-quadspi.txt
index f248056da24c..bb2075df9b38 100644
--- a/Documentation/devicetree/bindings/mtd/cadence-quadspi.txt
+++ b/Documentation/devicetree/bindings/mtd/cadence-quadspi.txt
@@ -1,7 +1,9 @@
* Cadence Quad SPI controller
Required properties:
-- compatible : Should be "cdns,qspi-nor".
+- compatible : should be one of the following:
+ Generic default - "cdns,qspi-nor".
+ For TI 66AK2G SoC - "ti,k2g-qspi", "cdns,qspi-nor".
- reg : Contains two entries, each of which is a tuple consisting of a
physical address and length. The first entry is the address and
length of the controller register set. The second entry is the
@@ -14,6 +16,9 @@ Required properties:
Optional properties:
- cdns,is-decoded-cs : Flag to indicate whether decoder is used or not.
+- cdns,rclk-en : Flag to indicate that QSPI return clock is used to latch
+ the read data rather than the QSPI clock. Make sure that QSPI return
+ clock is populated on the board before using this property.
Optional subnodes:
Subnodes of the Cadence Quad SPI controller are spi slave nodes with additional
diff --git a/Documentation/devicetree/bindings/mtd/denali-nand.txt b/Documentation/devicetree/bindings/mtd/denali-nand.txt
index 504291d2e5c2..0ee8edb60efc 100644
--- a/Documentation/devicetree/bindings/mtd/denali-nand.txt
+++ b/Documentation/devicetree/bindings/mtd/denali-nand.txt
@@ -29,7 +29,7 @@ nand: nand@ff900000 {
#address-cells = <1>;
#size-cells = <1>;
compatible = "altr,socfpga-denali-nand";
- reg = <0xff900000 0x100000>, <0xffb80000 0x10000>;
+ reg = <0xff900000 0x20>, <0xffb80000 0x1000>;
reg-names = "nand_data", "denali_reg";
interrupts = <0 144 4>;
};
diff --git a/Documentation/devicetree/bindings/mtd/jedec,spi-nor.txt b/Documentation/devicetree/bindings/mtd/jedec,spi-nor.txt
index 4cab5d85cf6f..376fa2f50e6b 100644
--- a/Documentation/devicetree/bindings/mtd/jedec,spi-nor.txt
+++ b/Documentation/devicetree/bindings/mtd/jedec,spi-nor.txt
@@ -14,6 +14,7 @@ Required properties:
at25df641
at26df081a
en25s64
+ mr25h128
mr25h256
mr25h10
mr25h40
diff --git a/Documentation/devicetree/bindings/mtd/mtk-quadspi.txt b/Documentation/devicetree/bindings/mtd/mtk-quadspi.txt
index 840f9405dcf0..56d3668e2c50 100644
--- a/Documentation/devicetree/bindings/mtd/mtk-quadspi.txt
+++ b/Documentation/devicetree/bindings/mtd/mtk-quadspi.txt
@@ -1,13 +1,16 @@
* Serial NOR flash controller for MTK MT81xx (and similar)
Required properties:
-- compatible: The possible values are:
- "mediatek,mt2701-nor"
- "mediatek,mt7623-nor"
+- compatible: For mt8173, compatible should be "mediatek,mt8173-nor",
+ and it's the fallback compatible for other Soc.
+ For every other SoC, should contain both the SoC-specific compatible
+ string and "mediatek,mt8173-nor".
+ The possible values are:
+ "mediatek,mt2701-nor", "mediatek,mt8173-nor"
+ "mediatek,mt2712-nor", "mediatek,mt8173-nor"
+ "mediatek,mt7622-nor", "mediatek,mt8173-nor"
+ "mediatek,mt7623-nor", "mediatek,mt8173-nor"
"mediatek,mt8173-nor"
- For mt8173, compatible should be "mediatek,mt8173-nor".
- For every other SoC, should contain both the SoC-specific compatible string
- and "mediatek,mt8173-nor".
- reg: physical base address and length of the controller's register
- clocks: the phandle of the clocks needed by the nor controller
- clock-names: the names of the clocks
diff --git a/Documentation/devicetree/bindings/mtd/pxa3xx-nand.txt b/Documentation/devicetree/bindings/mtd/pxa3xx-nand.txt
index d9b655f11048..d4ee4da58463 100644
--- a/Documentation/devicetree/bindings/mtd/pxa3xx-nand.txt
+++ b/Documentation/devicetree/bindings/mtd/pxa3xx-nand.txt
@@ -5,9 +5,13 @@ Required properties:
- compatible: Should be set to one of the following:
marvell,pxa3xx-nand
marvell,armada370-nand
+ marvell,armada-8k-nand
- reg: The register base for the controller
- interrupts: The interrupt to map
- #address-cells: Set to <1> if the node includes partitions
+ - marvell,system-controller: Set to retrieve the syscon node that handles
+ NAND controller related registers (only required
+ with marvell,armada-8k-nand compatible).
Optional properties:
diff --git a/Documentation/devicetree/bindings/pwm/renesas,pwm-rcar.txt b/Documentation/devicetree/bindings/pwm/renesas,pwm-rcar.txt
index 7e94b802395d..74c118015980 100644
--- a/Documentation/devicetree/bindings/pwm/renesas,pwm-rcar.txt
+++ b/Documentation/devicetree/bindings/pwm/renesas,pwm-rcar.txt
@@ -9,6 +9,7 @@ Required Properties:
- "renesas,pwm-r8a7794": for R-Car E2
- "renesas,pwm-r8a7795": for R-Car H3
- "renesas,pwm-r8a7796": for R-Car M3-W
+ - "renesas,pwm-r8a77995": for R-Car D3
- reg: base address and length of the registers block for the PWM.
- #pwm-cells: should be 2. See pwm.txt in this directory for a description of
the cells format.
diff --git a/Documentation/devicetree/bindings/rtc/imxdi-rtc.txt b/Documentation/devicetree/bindings/rtc/imxdi-rtc.txt
index 323cf26374cb..c797bc9d77d2 100644
--- a/Documentation/devicetree/bindings/rtc/imxdi-rtc.txt
+++ b/Documentation/devicetree/bindings/rtc/imxdi-rtc.txt
@@ -1,20 +1,20 @@
* i.MX25 Real Time Clock controller
-This binding supports the following chips: i.MX25, i.MX53
-
Required properties:
- compatible: should be: "fsl,imx25-rtc"
- reg: physical base address of the controller and length of memory mapped
region.
+- clocks: should contain the phandle for the rtc clock
- interrupts: rtc alarm interrupt
Optional properties:
-- interrupts: dryice security violation interrupt
+- interrupts: dryice security violation interrupt (second entry)
Example:
-rtc@80056000 {
- compatible = "fsl,imx53-rtc", "fsl,imx25-rtc";
- reg = <0x80056000 2000>;
- interrupts = <29 56>;
+rtc@53ffc000 {
+ compatible = "fsl,imx25-rtc";
+ reg = <0x53ffc000 0x4000>;
+ clocks = <&clks 81>;
+ interrupts = <25 56>;
};
diff --git a/Documentation/devicetree/bindings/rtc/pcf85363.txt b/Documentation/devicetree/bindings/rtc/pcf85363.txt
new file mode 100644
index 000000000000..76fdabc59742
--- /dev/null
+++ b/Documentation/devicetree/bindings/rtc/pcf85363.txt
@@ -0,0 +1,17 @@
+NXP PCF85363 Real Time Clock
+============================
+
+Required properties:
+- compatible: Should contain "nxp,pcf85363".
+- reg: I2C address for chip.
+
+Optional properties:
+- interrupts: IRQ line for the RTC (not implemented).
+
+Example:
+
+pcf85363: pcf85363@51 {
+ compatible = "nxp,pcf85363";
+ reg = <0x51>;
+};
+
diff --git a/Documentation/devicetree/bindings/rtc/rtc-mt7622.txt b/Documentation/devicetree/bindings/rtc/rtc-mt7622.txt
new file mode 100644
index 000000000000..09fe8f51476f
--- /dev/null
+++ b/Documentation/devicetree/bindings/rtc/rtc-mt7622.txt
@@ -0,0 +1,21 @@
+Device-Tree bindings for MediaTek SoC based RTC
+
+Required properties:
+- compatible : Should be
+ "mediatek,mt7622-rtc", "mediatek,soc-rtc" : for MT7622 SoC
+- reg : Specifies base physical address and size of the registers;
+- interrupts : Should contain the interrupt for RTC alarm;
+- clocks : Specifies list of clock specifiers, corresponding to
+ entries in clock-names property;
+- clock-names : Should contain "rtc" entries
+
+Example:
+
+rtc: rtc@10212800 {
+ compatible = "mediatek,mt7622-rtc",
+ "mediatek,soc-rtc";
+ reg = <0 0x10212800 0 0x200>;
+ interrupts = <GIC_SPI 129 IRQ_TYPE_LEVEL_LOW>;
+ clocks = <&topckgen CLK_TOP_RTC>;
+ clock-names = "rtc";
+};
diff --git a/Documentation/devicetree/bindings/rtc/sprd,sc27xx-rtc.txt b/Documentation/devicetree/bindings/rtc/sprd,sc27xx-rtc.txt
new file mode 100644
index 000000000000..7c170da0d4b7
--- /dev/null
+++ b/Documentation/devicetree/bindings/rtc/sprd,sc27xx-rtc.txt
@@ -0,0 +1,27 @@
+Spreadtrum SC27xx Real Time Clock
+
+Required properties:
+- compatible: should be "sprd,sc2731-rtc".
+- reg: address offset of rtc register.
+- interrupt-parent: phandle for the interrupt controller.
+- interrupts: rtc alarm interrupt.
+
+Example:
+
+ sc2731_pmic: pmic@0 {
+ compatible = "sprd,sc2731";
+ reg = <0>;
+ spi-max-frequency = <26000000>;
+ interrupts = <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ rtc@280 {
+ compatible = "sprd,sc2731-rtc";
+ reg = <0x280>;
+ interrupt-parent = <&sc2731_pmic>;
+ interrupts = <2 IRQ_TYPE_LEVEL_HIGH>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/trivial-devices.txt b/Documentation/devicetree/bindings/trivial-devices.txt
index 27dce08edd73..5f3143f97098 100644
--- a/Documentation/devicetree/bindings/trivial-devices.txt
+++ b/Documentation/devicetree/bindings/trivial-devices.txt
@@ -55,7 +55,6 @@ epson,rx8010 I2C-BUS INTERFACE REAL TIME CLOCK MODULE
epson,rx8581 I2C-BUS INTERFACE REAL TIME CLOCK MODULE
emmicro,em3027 EM Microelectronic EM3027 Real-time Clock
fsl,mag3110 MAG3110: Xtrinsic High Accuracy, 3D Magnetometer
-fsl,mc13892 MC13892: Power Management Integrated Circuit (PMIC) for i.MX35/51
fsl,mma7660 MMA7660FC: 3-Axis Orientation/Motion Detection Sensor
fsl,mma8450 MMA8450Q: Xtrinsic Low-power, 3-axis Xtrinsic Accelerometer
fsl,mpl3115 MPL3115: Absolute Digital Pressure Sensor
@@ -73,7 +72,6 @@ maxim,ds1050 5 Bit Programmable, Pulse-Width Modulator
maxim,max1237 Low-Power, 4-/12-Channel, 2-Wire Serial, 12-Bit ADCs
maxim,max6621 PECI-to-I2C translator for PECI-to-SMBus/I2C protocol conversion
maxim,max6625 9-Bit/12-Bit Temperature Sensors with I²C-Compatible Serial Interface
-mc,rv3029c2 Real Time Clock Module with I2C-Bus
mcube,mc3230 mCube 3-axis 8-bit digital accelerometer
memsic,mxc6225 MEMSIC 2-axis 8-bit digital accelerometer
microchip,mcp4531-502 Microchip 7-bit Single I2C Digital Potentiometer (5k)
@@ -142,6 +140,7 @@ microchip,mcp4662-503 Microchip 8-bit Dual I2C Digital Potentiometer with NV Mem
microchip,mcp4662-104 Microchip 8-bit Dual I2C Digital Potentiometer with NV Memory (100k)
microchip,tc654 PWM Fan Speed Controller With Fan Fault Detection
microchip,tc655 PWM Fan Speed Controller With Fan Fault Detection
+microcrystal,rv3029 Real Time Clock Module with I2C-Bus
miramems,da226 MiraMEMS DA226 2-axis 14-bit digital accelerometer
miramems,da280 MiraMEMS DA280 3-axis 14-bit digital accelerometer
miramems,da311 MiraMEMS DA311 3-axis 12-bit digital accelerometer
diff --git a/Documentation/devicetree/bindings/usb/usb-device.txt b/Documentation/devicetree/bindings/usb/usb-device.txt
index ce02cebac26a..1b27cebb47f4 100644
--- a/Documentation/devicetree/bindings/usb/usb-device.txt
+++ b/Documentation/devicetree/bindings/usb/usb-device.txt
@@ -4,24 +4,35 @@ Usually, we only use device tree for hard wired USB device.
The reference binding doc is from:
http://www.devicetree.org/open-firmware/bindings/usb/usb-1_0.ps
+
Required properties:
-- compatible: usbVID,PID. The textual representation of VID, PID shall
- be in lower case hexadecimal with leading zeroes suppressed. The
- other compatible strings from the above standard binding could also
- be used, but a device adhering to this binding may leave out all except
- for usbVID,PID.
-- reg: the port number which this device is connecting to, the range
- is 1-31.
+- compatible: "usbVID,PID", where VID is the vendor id and PID the product id.
+ The textual representation of VID and PID shall be in lower case hexadecimal
+ with leading zeroes suppressed. The other compatible strings from the above
+ standard binding could also be used, but a device adhering to this binding
+ may leave out all except for "usbVID,PID".
+- reg: the number of the USB hub port or the USB host-controller port to which
+ this device is attached. The range is 1-255.
+
+
+Required properties for hub nodes with device nodes:
+- #address-cells: shall be 1
+- #size-cells: shall be 0
-Example:
-&usb1 {
+Required properties for host-controller nodes with device nodes:
+- #address-cells: shall be 1
+- #size-cells: shall be 0
+
+
+Example:
+&usb1 { /* host controller */
#address-cells = <1>;
#size-cells = <0>;
- hub: genesys@1 {
+ hub@1 { /* hub connected to port 1 */
compatible = "usb5e3,608";
reg = <1>;
};
-}
+};
diff --git a/Documentation/ia64/xen.txt b/Documentation/ia64/xen.txt
index c61a99f7c8bb..a12c74ce2773 100644
--- a/Documentation/ia64/xen.txt
+++ b/Documentation/ia64/xen.txt
@@ -41,7 +41,7 @@ Getting and Building Xen and Dom0
5. make initrd for Dom0/DomU
# make -C linux-2.6.18-xen.hg ARCH=ia64 modules_install \
- O=$(/bin/pwd)/build-linux-2.6.18-xen_ia64
+ O=$(pwd)/build-linux-2.6.18-xen_ia64
# mkinitrd -f /boot/efi/efi/redhat/initrd-2.6.18.8-xen.img \
2.6.18.8-xen --builtin mptspi --builtin mptbase \
--builtin mptscsih --builtin uhci-hcd --builtin ohci-hcd \
diff --git a/Documentation/printk-formats.txt b/Documentation/printk-formats.txt
index 361789df51ec..aa0a776c817a 100644
--- a/Documentation/printk-formats.txt
+++ b/Documentation/printk-formats.txt
@@ -5,7 +5,6 @@ How to get printk format specifiers right
:Author: Randy Dunlap <rdunlap@infradead.org>
:Author: Andrew Murray <amurray@mpc-data.co.uk>
-
Integer types
=============
@@ -45,6 +44,18 @@ return from vsnprintf.
Raw pointer value SHOULD be printed with %p. The kernel supports
the following extended format specifiers for pointer types:
+Pointer Types
+=============
+
+Pointers printed without a specifier extension (i.e unadorned %p) are
+hashed to give a unique identifier without leaking kernel addresses to user
+space. On 64 bit machines the first 32 bits are zeroed. If you _really_
+want the address see %px below.
+
+::
+
+ %p abcdef12 or 00000000abcdef12
+
Symbols/Function Pointers
=========================
@@ -85,18 +96,32 @@ Examples::
printk("Faulted at %pS\n", (void *)regs->ip);
printk(" %s%pB\n", (reliable ? "" : "? "), (void *)*stack);
-
Kernel Pointers
===============
::
- %pK 0x01234567 or 0x0123456789abcdef
+ %pK 01234567 or 0123456789abcdef
For printing kernel pointers which should be hidden from unprivileged
users. The behaviour of ``%pK`` depends on the ``kptr_restrict sysctl`` - see
Documentation/sysctl/kernel.txt for more details.
+Unmodified Addresses
+====================
+
+::
+
+ %px 01234567 or 0123456789abcdef
+
+For printing pointers when you _really_ want to print the address. Please
+consider whether or not you are leaking sensitive information about the
+Kernel layout in memory before printing pointers with %px. %px is
+functionally equivalent to %lx. %px is preferred to %lx because it is more
+uniquely grep'able. If, in the future, we need to modify the way the Kernel
+handles printing pointers it will be nice to be able to find the call
+sites.
+
Struct Resources
================
diff --git a/Documentation/process/5.Posting.rst b/Documentation/process/5.Posting.rst
index 1b7728b19ea7..645fa9c7388a 100644
--- a/Documentation/process/5.Posting.rst
+++ b/Documentation/process/5.Posting.rst
@@ -213,6 +213,11 @@ The tags in common use are:
which can be found in Documentation/process/submitting-patches.rst. Code without a
proper signoff cannot be merged into the mainline.
+ - Co-Developed-by: states that the patch was also created by another developer
+ along with the original author. This is useful at times when multiple
+ people work on a single patch. Note, this person also needs to have a
+ Signed-off-by: line in the patch as well.
+
- Acked-by: indicates an agreement by another developer (often a
maintainer of the relevant code) that the patch is appropriate for
inclusion into the kernel.
diff --git a/Documentation/scheduler/sched-deadline.txt b/Documentation/scheduler/sched-deadline.txt
index e89e36ec15a5..8ce78f82ae23 100644
--- a/Documentation/scheduler/sched-deadline.txt
+++ b/Documentation/scheduler/sched-deadline.txt
@@ -204,10 +204,17 @@ CONTENTS
It does so by decrementing the runtime of the executing task Ti at a pace equal
to
- dq = -max{ Ui, (1 - Uinact) } dt
+ dq = -max{ Ui / Umax, (1 - Uinact - Uextra) } dt
- where Uinact is the inactive utilization, computed as (this_bq - running_bw),
- and Ui is the bandwidth of task Ti.
+ where:
+
+ - Ui is the bandwidth of task Ti;
+ - Umax is the maximum reclaimable utilization (subjected to RT throttling
+ limits);
+ - Uinact is the (per runqueue) inactive utilization, computed as
+ (this_bq - running_bw);
+ - Uextra is the (per runqueue) extra reclaimable utilization
+ (subjected to RT throttling limits).
Let's now see a trivial example of two deadline tasks with runtime equal
diff --git a/Documentation/security/keys/core.rst b/Documentation/security/keys/core.rst
index 1266eeae45f6..9ce7256c6edb 100644
--- a/Documentation/security/keys/core.rst
+++ b/Documentation/security/keys/core.rst
@@ -628,12 +628,12 @@ The keyctl syscall functions are:
defined key type will return its data as is. If a key type does not
implement this function, error EOPNOTSUPP will result.
- As much of the data as can be fitted into the buffer will be copied to
- userspace if the buffer pointer is not NULL.
-
- On a successful return, the function will always return the amount of data
- available rather than the amount copied.
+ If the specified buffer is too small, then the size of the buffer required
+ will be returned. Note that in this case, the contents of the buffer may
+ have been overwritten in some undefined way.
+ Otherwise, on success, the function will return the amount of data copied
+ into the buffer.
* Instantiate a partially constructed key::
diff --git a/Documentation/svga.txt b/Documentation/svga.txt
index 119f1515b1ac..b6c2f9acca92 100644
--- a/Documentation/svga.txt
+++ b/Documentation/svga.txt
@@ -67,8 +67,7 @@ The menu looks like::
<name-of-detected-video-adapter> tells what video adapter did Linux detect
-- it's either a generic adapter name (MDA, CGA, HGC, EGA, VGA, VESA VGA [a VGA
with VESA-compliant BIOS]) or a chipset name (e.g., Trident). Direct detection
-of chipsets is turned off by default (see CONFIG_VIDEO_SVGA in chapter 4 to see
-how to enable it if you really want) as it's inherently unreliable due to
+of chipsets is turned off by default as it's inherently unreliable due to
absolutely insane PC design.
"0 0F00 80x25" means that the first menu item (the menu items are numbered
@@ -138,7 +137,7 @@ The ID numbers can be divided to those regions::
0x0f05 VGA 80x30 (480 scans, 16-point font)
0x0f06 VGA 80x34 (480 scans, 14-point font)
0x0f07 VGA 80x60 (480 scans, 8-point font)
- 0x0f08 Graphics hack (see the CONFIG_VIDEO_HACK paragraph below)
+ 0x0f08 Graphics hack (see the VIDEO_GFX_HACK paragraph below)
0x1000 to 0x7fff - modes specified by resolution. The code has a "0xRRCC"
form where RR is a number of rows and CC is a number of columns.
@@ -160,58 +159,22 @@ end of the display.
Options
~~~~~~~
-Some options can be set in the source text (in arch/i386/boot/video.S).
-All of them are simple #define's -- change them to #undef's when you want to
-switch them off. Currently supported:
-
-CONFIG_VIDEO_SVGA - enables autodetection of SVGA cards. This is switched
-off by default as it's a bit unreliable due to terribly bad PC design. If you
-really want to have the adapter autodetected (maybe in case the ``scan`` feature
-doesn't work on your machine), switch this on and don't cry if the results
-are not completely sane. In case you really need this feature, please drop me
-a mail as I think of removing it some day.
-
-CONFIG_VIDEO_VESA - enables autodetection of VESA modes. If it doesn't work
-on your machine (or displays a "Error: Scanning of VESA modes failed" message),
-you can switch it off and report as a bug.
-
-CONFIG_VIDEO_COMPACT - enables compacting of the video mode list. If there
-are more modes with the same screen size, only the first one is kept (see above
-for more info on mode ordering). However, in very strange cases it's possible
-that the first "version" of the mode doesn't work although some of the others
-do -- in this case turn this switch off to see the rest.
-
-CONFIG_VIDEO_RETAIN - enables retaining of screen contents when switching
-video modes. Works only with some boot loaders which leave enough room for the
-buffer. (If you have old LILO, you can adjust heap_end_ptr and loadflags
-in setup.S, but it's better to upgrade the boot loader...)
-
-CONFIG_VIDEO_LOCAL - enables inclusion of "local modes" in the list. The
-local modes are added automatically to the beginning of the list not depending
-on hardware configuration. The local modes are listed in the source text after
-the "local_mode_table:" line. The comment before this line describes the format
-of the table (which also includes a video card name to be displayed on the
-top of the menu).
-
-CONFIG_VIDEO_400_HACK - force setting of 400 scan lines for standard VGA
-modes. This option is intended to be used on certain buggy BIOSes which draw
-some useless logo using font download and then fail to reset the correct mode.
-Don't use unless needed as it forces resetting the video card.
-
-CONFIG_VIDEO_GFX_HACK - includes special hack for setting of graphics modes
-to be used later by special drivers (e.g., 800x600 on IBM ThinkPad -- see
-ftp://ftp.phys.keio.ac.jp/pub/XFree86/800x600/XF86Configs/XF86Config.IBM_TP560).
+Build options for arch/x86/boot/* are selected by the kernel kconfig
+utility and the kernel .config file.
+
+VIDEO_GFX_HACK - includes special hack for setting of graphics modes
+to be used later by special drivers.
Allows to set _any_ BIOS mode including graphic ones and forcing specific
text screen resolution instead of peeking it from BIOS variables. Don't use
unless you think you know what you're doing. To activate this setup, use
-mode number 0x0f08 (see section 3).
+mode number 0x0f08 (see the Mode IDs section above).
Still doesn't work?
~~~~~~~~~~~~~~~~~~~
When the mode detection doesn't work (e.g., the mode list is incorrect or
the machine hangs instead of displaying the menu), try to switch off some of
-the configuration options listed in section 4. If it fails, you can still use
+the configuration options listed under "Options". If it fails, you can still use
your kernel with the video mode set directly via the kernel parameter.
In either case, please send me a bug report containing what _exactly_
@@ -228,10 +191,6 @@ contains the most common video BIOS bug called "incorrect vertical display
end setting". Adding 0x8000 to the mode ID might fix the problem. Unfortunately,
this must be done manually -- no autodetection mechanisms are available.
-If you have a VGA card and your display still looks as on EGA, your BIOS
-is probably broken and you need to set the CONFIG_VIDEO_400_HACK switch to
-force setting of the correct mode.
-
History
~~~~~~~
diff --git a/Documentation/sysctl/vm.txt b/Documentation/sysctl/vm.txt
index b920423f88cb..5025ff9307e6 100644
--- a/Documentation/sysctl/vm.txt
+++ b/Documentation/sysctl/vm.txt
@@ -158,10 +158,6 @@ Note: the minimum value allowed for dirty_bytes is two pages (in bytes); any
value lower than this limit will be ignored and the old configuration will be
retained.
-Note: the value of dirty_bytes also must be set greater than
-dirty_background_bytes or the amount of memory corresponding to
-dirty_background_ratio.
-
==============================================================
dirty_expire_centisecs
@@ -181,9 +177,6 @@ generating disk writes will itself start writing out dirty data.
The total available memory is not equal to total system memory.
-Note: dirty_ratio must be set greater than dirty_background_ratio or
-ratio corresponding to dirty_background_bytes.
-
==============================================================
dirty_writeback_centisecs
diff --git a/Documentation/translations/ko_KR/memory-barriers.txt b/Documentation/translations/ko_KR/memory-barriers.txt
index ec3b46e27b7a..0a0930ab4156 100644
--- a/Documentation/translations/ko_KR/memory-barriers.txt
+++ b/Documentation/translations/ko_KR/memory-barriers.txt
@@ -82,7 +82,7 @@ Documentation/memory-barriers.txt
- SMP 배리어 ì§ë§žì¶”기.
- 메모리 배리어 ì‹œí€€ìŠ¤ì˜ ì˜ˆ.
- ì½ê¸° 메모리 배리어 vs 로드 예측.
- - ì´í–‰ì„±
+ - Multicopy ì›ìžì„±.
(*) ëª…ì‹œì  ì»¤ë„ ë°°ë¦¬ì–´.
@@ -656,6 +656,11 @@ Documentation/RCU/rcu_dereference.txt 파ì¼ì„ ì£¼ì˜ ê¹Šê²Œ ì½ì–´ 주시기 ë
í•´ì¤ë‹ˆë‹¤.
+ë°ì´í„° ì˜ì¡´ì„±ì— ì˜í•´ 제공ë˜ëŠ” ì´ ìˆœì„œê·œì¹™ì€ ì´ë¥¼ í¬í•¨í•˜ê³  있는 CPU ì—
+지역ì ìž„ì„ ì•Œì•„ë‘시기 ë°”ëžë‹ˆë‹¤. ë” ë§Žì€ ì •ë³´ë¥¼ 위해선 "Multicopy ì›ìžì„±"
+ì„¹ì…˜ì„ ì°¸ê³ í•˜ì„¸ìš”.
+
+
ë°ì´í„° ì˜ì¡´ì„± 배리어는 매우 중요한ë°, 예를 들어 RCU 시스템ì—ì„œ 그렇습니다.
include/linux/rcupdate.h ì˜ rcu_assign_pointer() 와 rcu_dereference() 를
참고하세요. 여기서 ë°ì´í„° ì˜ì¡´ì„± 배리어는 RCU ë¡œ 관리ë˜ëŠ” í¬ì¸í„°ì˜ íƒ€ê²Ÿì„ í˜„ìž¬
@@ -864,38 +869,10 @@ CPU 는 b ë¡œë¶€í„°ì˜ ë¡œë“œ 오í¼ë ˆì´ì…˜ì´ a ë¡œë¶€í„°ì˜ ë¡œë“œ 오í¼ë ˆ
주어진 if ë¬¸ì˜ then 절과 else ì ˆì—게만 (그리고 ì´ ë‘ ì ˆ ë‚´ì—ì„œ 호출ë˜ëŠ”
함수들ì—게까지) ì ìš©ë˜ì§€, ì´ if ë¬¸ì„ ë’¤ë”°ë¥´ëŠ” 코드ì—는 ì ìš©ë˜ì§€ 않습니다.
-마지막으로, 컨트롤 ì˜ì¡´ì„±ì€ ì´í–‰ì„± (transitivity) ì„ ì œê³µí•˜ì§€ -않습니다-. ì´ê±´
-'x' 와 'y' ê°€ 둘 다 0 ì´ë¼ëŠ” ì´ˆê¸°ê°’ì„ ê°€ì¡Œë‹¤ëŠ” 가정 í•˜ì˜ ë‘ê°œì˜ ì˜ˆì œë¡œ
-ë³´ì´ê² ìŠµë‹ˆë‹¤:
-
- CPU 0 CPU 1
- ======================= =======================
- r1 = READ_ONCE(x); r2 = READ_ONCE(y);
- if (r1 > 0) if (r2 > 0)
- WRITE_ONCE(y, 1); WRITE_ONCE(x, 1);
-
- assert(!(r1 == 1 && r2 == 1));
-
-ì´ ë‘ CPU 예제ì—ì„œ assert() ì˜ ì¡°ê±´ì€ í•­ìƒ ì°¸ì¼ ê²ƒìž…ë‹ˆë‹¤. 그리고, 만약 컨트롤
-ì˜ì¡´ì„±ì´ ì´í–‰ì„±ì„ (실제로는 그러지 않지만) 보장한다면, 다ìŒì˜ CPU ê°€ 추가ë˜ì–´ë„
-ì•„ëž˜ì˜ assert() ì¡°ê±´ì€ ì°¸ì´ ë ê²ƒìž…니다:
- CPU 2
- =====================
- WRITE_ONCE(x, 2);
+컨트롤 ì˜ì¡´ì„±ì— ì˜í•´ 제공ë˜ëŠ” ì´ ìˆœì„œê·œì¹™ì€ ì´ë¥¼ í¬í•¨í•˜ê³  있는 CPU ì—
+지역ì ìž…니다. ë” ë§Žì€ ì •ë³´ë¥¼ 위해선 "Multicopy ì›ìžì„±" ì„¹ì…˜ì„ ì°¸ê³ í•˜ì„¸ìš”.
- assert(!(r1 == 2 && r2 == 1 && x == 2)); /* FAILS!!! */
-
-하지만 컨트롤 ì˜ì¡´ì„±ì€ ì´í–‰ì„±ì„ 제공하지 -않기- 때문ì—, ì„¸ê°œì˜ CPU 예제가 실행
-ì™„ë£Œëœ í›„ì— ìœ„ì˜ assert() ì˜ ì¡°ê±´ì€ ê±°ì§“ìœ¼ë¡œ í‰ê°€ë  수 있습니다. ì„¸ê°œì˜ CPU
-예제가 순서를 지키길 ì›í•œë‹¤ë©´, CPU 0 와 CPU 1 ì½”ë“œì˜ ë¡œë“œì™€ 스토어 사ì´, "if"
-문 바로 다ìŒì— smp_mb()를 넣어야 합니다. ë” ë‚˜ì•„ê°€ì„œ, ìµœì´ˆì˜ ë‘ CPU 예제는
-매우 위험하므로 사용ë˜ì§€ 않아야 합니다.
-
-ì´ ë‘ê°œì˜ ì˜ˆì œëŠ” ë‹¤ìŒ ë…¼ë¬¸:
-http://www.cl.cam.ac.uk/users/pes20/ppc-supplemental/test6.pdf 와
-ì´ ì‚¬ì´íŠ¸: https://www.cl.cam.ac.uk/~pes20/ppcmem/index.html ì— ë‚˜ì˜¨ LB 와 WWC
-리트머스 테스트입니다.
요약하ìžë©´:
@@ -930,8 +907,8 @@ http://www.cl.cam.ac.uk/users/pes20/ppc-supplemental/test6.pdf 와
(*) 컨트롤 ì˜ì¡´ì„±ì€ 보통 다른 íƒ€ìž…ì˜ ë°°ë¦¬ì–´ë“¤ê³¼ ì§ì„ 맞춰 사용ë©ë‹ˆë‹¤.
- (*) 컨트롤 ì˜ì¡´ì„±ì€ ì´í–‰ì„±ì„ 제공하지 -않습니다-. ì´í–‰ì„±ì´ 필요하다면,
- smp_mb() 를 사용하세요.
+ (*) 컨트롤 ì˜ì¡´ì„±ì€ multicopy ì›ìžì„±ì„ 제공하지 -않습니다-. 모든 CPU 들ì´
+ 특정 스토어를 ë™ì‹œì— 보길 ì›í•œë‹¤ë©´, smp_mb() 를 사용하세요.
(*) 컴파ì¼ëŸ¬ëŠ” 컨트롤 ì˜ì¡´ì„±ì„ ì´í•´í•˜ê³  있지 않습니다. ë”°ë¼ì„œ 컴파ì¼ëŸ¬ê°€
ì—¬ëŸ¬ë¶„ì˜ ì½”ë“œë¥¼ ë§ê°€ëœ¨ë¦¬ì§€ ì•Šë„ë¡ í•˜ëŠ”ê±´ ì—¬ëŸ¬ë¶„ì´ í•´ì•¼ 하는 ì¼ìž…니다.
@@ -943,13 +920,14 @@ SMP 배리어 ì§ë§žì¶”기
CPU ê°„ ìƒí˜¸ìž‘ìš©ì„ ë‹¤ë£° ë•Œì— ì¼ë¶€ íƒ€ìž…ì˜ ë©”ëª¨ë¦¬ 배리어는 í•­ìƒ ì§ì„ 맞춰
사용ë˜ì–´ì•¼ 합니다. ì ì ˆí•˜ê²Œ ì§ì„ 맞추지 ì•Šì€ ì½”ë“œëŠ” ì‚¬ì‹¤ìƒ ì—ëŸ¬ì— ê°€ê¹ìŠµë‹ˆë‹¤.
-범용 ë°°ë¦¬ì–´ë“¤ì€ ë²”ìš© 배리어ë¼ë¦¬ë„ ì§ì„ 맞추지만 ì´í–‰ì„±ì´ 없는 ëŒ€ë¶€ë¶„ì˜ ë‹¤ë¥¸
-íƒ€ìž…ì˜ ë°°ë¦¬ì–´ë“¤ê³¼ë„ ì§ì„ 맞춥니다. ACQUIRE 배리어는 RELEASE 배리어와 ì§ì„
-맞춥니다만, 둘 다 범용 배리어를 í¬í•¨í•´ 다른 ë°°ë¦¬ì–´ë“¤ê³¼ë„ ì§ì„ 맞출 수 있습니다.
-쓰기 배리어는 ë°ì´í„° ì˜ì¡´ì„± 배리어나 컨트롤 ì˜ì¡´ì„±, ACQUIRE 배리어, RELEASE
-배리어, ì½ê¸° 배리어, ë˜ëŠ” 범용 배리어와 ì§ì„ 맞춥니다. 비슷하게 ì½ê¸° 배리어나
-컨트롤 ì˜ì¡´ì„±, ë˜ëŠ” ë°ì´í„° ì˜ì¡´ì„± 배리어는 쓰기 배리어나 ACQUIRE 배리어,
-RELEASE 배리어, ë˜ëŠ” 범용 배리어와 ì§ì„ 맞추는ë°, 다ìŒê³¼ 같습니다:
+범용 ë°°ë¦¬ì–´ë“¤ì€ ë²”ìš© 배리어ë¼ë¦¬ë„ ì§ì„ 맞추지만 multicopy ì›ìžì„±ì´ 없는
+ëŒ€ë¶€ë¶„ì˜ ë‹¤ë¥¸ íƒ€ìž…ì˜ ë°°ë¦¬ì–´ë“¤ê³¼ë„ ì§ì„ 맞춥니다. ACQUIRE 배리어는 RELEASE
+배리어와 ì§ì„ 맞춥니다만, 둘 다 범용 배리어를 í¬í•¨í•´ 다른 ë°°ë¦¬ì–´ë“¤ê³¼ë„ ì§ì„
+맞출 수 있습니다. 쓰기 배리어는 ë°ì´í„° ì˜ì¡´ì„± 배리어나 컨트롤 ì˜ì¡´ì„±, ACQUIRE
+배리어, RELEASE 배리어, ì½ê¸° 배리어, ë˜ëŠ” 범용 배리어와 ì§ì„ 맞춥니다.
+비슷하게 ì½ê¸° 배리어나 컨트롤 ì˜ì¡´ì„±, ë˜ëŠ” ë°ì´í„° ì˜ì¡´ì„± 배리어는 쓰기 배리어나
+ACQUIRE 배리어, RELEASE 배리어, ë˜ëŠ” 범용 배리어와 ì§ì„ 맞추는ë°, 다ìŒê³¼
+같습니다:
CPU 1 CPU 2
=============== ===============
@@ -975,7 +953,7 @@ RELEASE 배리어, ë˜ëŠ” 범용 배리어와 ì§ì„ 맞추는ë°, 다ìŒê³¼ ê°™
=============== ===============================
r1 = READ_ONCE(y);
<범용 배리어>
- WRITE_ONCE(y, 1); if (r2 = READ_ONCE(x)) {
+ WRITE_ONCE(x, 1); if (r2 = READ_ONCE(x)) {
<ë¬µì‹œì  ì»¨íŠ¸ë¡¤ ì˜ì¡´ì„±>
WRITE_ONCE(y, 1);
}
@@ -1361,57 +1339,74 @@ A ì˜ ë¡œë“œ ë‘개가 ëª¨ë‘ B ì˜ ë¡œë“œ ë’¤ì— ìžˆì§€ë§Œ, 서로 다른 ê°’ì
: : +-------+
-ì´í–‰ì„±
-------
+MULTICOPY ì›ìžì„±
+----------------
-ì´í–‰ì„±(transitivity)ì€ ì‹¤ì œì˜ ì»´í“¨í„° 시스템ì—ì„œ í•­ìƒ ì œê³µë˜ì§€ëŠ” 않는, 순서
-ë§žì¶”ê¸°ì— ëŒ€í•œ ìƒë‹¹ížˆ ì§ê´€ì ì¸ ê°œë…입니다. 다ìŒì˜ 예가 ì´í–‰ì„±ì„ ë³´ì—¬ì¤ë‹ˆë‹¤:
+Multicopy ì›ìžì„±ì€ ì‹¤ì œì˜ ì»´í“¨í„° 시스템ì—ì„œ í•­ìƒ ì œê³µë˜ì§€ëŠ” 않는, 순서 맞추기ì—
+대한 ìƒë‹¹ížˆ ì§ê´€ì ì¸ ê°œë…으로, 특정 스토어가 모든 CPU 들ì—게 ë™ì‹œì— 보여지게
+ë¨ì„, 달리 ë§í•˜ìžë©´ 모든 CPU ë“¤ì´ ëª¨ë“  ìŠ¤í† ì–´ë“¤ì´ ë³´ì—¬ì§€ëŠ” 순서를 ë™ì˜í•˜ê²Œ ë˜ëŠ”
+것입니다. 하지만, 완전한 multicopy ì›ìžì„±ì˜ ì‚¬ìš©ì€ ê°€ì¹˜ìžˆëŠ” 하드웨어
+최ì í™”ë“¤ì„ ë¬´ëŠ¥í•˜ê²Œ 만들어버릴 수 있어서, 보다 ì™„í™”ëœ í˜•íƒœì˜ ``다른 multicopy
+ì›ìžì„±'' ë¼ëŠ” ì´ë¦„ì˜, 특정 스토어가 모든 -다른- CPU 들ì—게는 ë™ì‹œì— 보여지게
+하는 ë³´ìž¥ì„ ëŒ€ì‹  제공합니다. ì´ ë¬¸ì„œì˜ ë’·ë¶€ë¶„ë“¤ì€ ì´ ì™„í™”ëœ í˜•íƒœì— ëŒ€í•´ 논하게
+ë©ë‹ˆë‹¤ë§Œ, 단순히 ``multicopy ì›ìžì„±'' ì´ë¼ê³  부르겠습니다.
+
+다ìŒì˜ 예가 multicopy ì›ìžì„±ì„ 보입니다:
CPU 1 CPU 2 CPU 3
======================= ======================= =======================
{ X = 0, Y = 0 }
- STORE X=1 LOAD X STORE Y=1
- <범용 배리어> <범용 배리어>
- LOAD Y LOAD X
-
-CPU 2 ì˜ X 로드가 1ì„ ë¦¬í„´í–ˆê³  Y 로드가 0ì„ ë¦¬í„´í–ˆë‹¤ê³  해봅시다. ì´ëŠ” CPU 2 ì˜
-X 로드가 CPU 1 ì˜ X 스토어 ë’¤ì— ì´ë£¨ì–´ì¡Œê³  CPU 2 ì˜ Y 로드는 CPU 3 ì˜ Y 스토어
-ì „ì— ì´ë£¨ì–´ì¡ŒìŒì„ ì˜ë¯¸í•©ë‹ˆë‹¤. 그럼 "CPU 3 ì˜ X 로드는 0ì„ ë¦¬í„´í•  수 있나요?"
-
-CPU 2 ì˜ X 로드는 CPU 1 ì˜ ìŠ¤í† ì–´ í›„ì— ì´ë£¨ì–´ì¡Œìœ¼ë‹ˆ, CPU 3 ì˜ X 로드는 1ì„
-리턴하는게 ìžì—°ìŠ¤ëŸ½ìŠµë‹ˆë‹¤. ì´ëŸ° ìƒê°ì´ ì´í–‰ì„±ì˜ í•œ 예입니다: CPU A ì—ì„œ 실행ëœ
-로드가 CPU B ì—ì„œì˜ ê°™ì€ ë³€ìˆ˜ì— ëŒ€í•œ 로드를 뒤따른다면, CPU A ì˜ ë¡œë“œëŠ” CPU B
-ì˜ ë¡œë“œê°€ ë‚´ë†“ì€ ê°’ê³¼ 같거나 ê·¸ í›„ì˜ ê°’ì„ ë‚´ë†“ì•„ì•¼ 합니다.
-
-리눅스 커ë„ì—ì„œ 범용 ë°°ë¦¬ì–´ì˜ ì‚¬ìš©ì€ ì´í–‰ì„±ì„ 보장합니다. ë”°ë¼ì„œ, ì•žì˜ ì˜ˆì—ì„œ
-CPU 2 ì˜ X 로드가 1ì„, Y 로드는 0ì„ ë¦¬í„´í–ˆë‹¤ë©´, CPU 3 ì˜ X 로드는 반드시 1ì„
-리턴합니다.
-
-하지만, ì½ê¸°ë‚˜ 쓰기 ë°°ë¦¬ì–´ì— ëŒ€í•´ì„œëŠ” ì´í–‰ì„±ì´ 보장ë˜ì§€ -않습니다-. 예를 들어,
-ì•žì˜ ì˜ˆì—ì„œ CPU 2 ì˜ ë²”ìš© 배리어가 아래처럼 ì½ê¸° 배리어로 ë°”ë€ ê²½ìš°ë¥¼ ìƒê°í•´
-봅시다:
+ STORE X=1 r1=LOAD X (reads 1) LOAD Y (reads 1)
+ <범용 배리어> <ì½ê¸° 배리어>
+ STORE Y=r1 LOAD X
+
+CPU 2 ì˜ Y ë¡œì˜ ìŠ¤í† ì–´ì— ì‚¬ìš©ë˜ëŠ” X ë¡œë“œì˜ ê²°ê³¼ê°€ 1 ì´ì—ˆê³  CPU 3 ì˜ Y 로드가
+1ì„ ë¦¬í„´í–ˆë‹¤ê³  해봅시다. ì´ëŠ” CPU 1 ì˜ X ë¡œì˜ ìŠ¤í† ì–´ê°€ CPU 2 ì˜ X 로부터ì˜
+로드를 앞서고 CPU 2 ì˜ Y ë¡œì˜ ìŠ¤í† ì–´ê°€ CPU 3 ì˜ Y ë¡œë¶€í„°ì˜ ë¡œë“œë¥¼ 앞섬ì„
+ì˜ë¯¸í•©ë‹ˆë‹¤. ë˜í•œ, ì—¬ê¸°ì„œì˜ ë©”ëª¨ë¦¬ ë°°ë¦¬ì–´ë“¤ì€ CPU 2 ê°€ ìžì‹ ì˜ 로드를 ìžì‹ ì˜
+스토어 ì „ì— ìˆ˜í–‰í•˜ê³ , CPU 3 ê°€ Y ë¡œë¶€í„°ì˜ ë¡œë“œë¥¼ X ë¡œë¶€í„°ì˜ ë¡œë“œ ì „ì— ìˆ˜í–‰í•¨ì„
+보장합니다. 그럼 "CPU 3 ì˜ X ë¡œë¶€í„°ì˜ ë¡œë“œëŠ” 0 ì„ ë¦¬í„´í•  수 있ì„까요?"
+
+CPU 3 ì˜ X 로드가 CPU 2 ì˜ ë¡œë“œë³´ë‹¤ ë’¤ì— ì´ë£¨ì–´ì¡Œìœ¼ë¯€ë¡œ, CPU 3 ì˜ X 로부터ì˜
+로드는 1 ì„ ë¦¬í„´í•œë‹¤ê³  예ìƒí•˜ëŠ”게 당연합니다. ì´ëŸ° 예ìƒì€ multicopy
+ì›ìžì„±ìœ¼ë¡œë¶€í„° 나옵니다: CPU B ì—ì„œ ìˆ˜í–‰ëœ ë¡œë“œê°€ CPU A ì˜ ê°™ì€ ë³€ìˆ˜ë¡œë¶€í„°ì˜
+로드를 뒤따른다면 (그리고 CPU A ê°€ ìžì‹ ì´ ì½ì€ 값으로 먼저 해당 ë³€ìˆ˜ì— ìŠ¤í† ì–´
+하지 않았다면) multicopy ì›ìžì„±ì„ 제공하는 시스템ì—서는, CPU B ì˜ ë¡œë“œê°€ CPU A
+ì˜ ë¡œë“œì™€ ê°™ì€ ê°’ ë˜ëŠ” ê·¸ 나중 ê°’ì„ ë¦¬í„´í•´ì•¼ë§Œ 합니다. 하지만, 리눅스 커ë„ì€
+ì‹œìŠ¤í…œë“¤ì´ multicopy ì›ìžì„±ì„ 제공할 ê²ƒì„ ìš”êµ¬í•˜ì§€ 않습니다.
+
+ì•žì˜ ë²”ìš© 메모리 ë°°ë¦¬ì–´ì˜ ì‚¬ìš©ì€ ëª¨ë“  multicopy ì›ìžì„±ì˜ ë¶€ì¡±ì„ ë³´ìƒí•´ì¤ë‹ˆë‹¤.
+ì•žì˜ ì˜ˆì—ì„œ, CPU 2 ì˜ X ë¡œë¶€í„°ì˜ ë¡œë“œê°€ 1 ì„ ë¦¬í„´í–ˆê³  CPU 3 ì˜ Y 로부터ì˜
+로드가 1 ì„ ë¦¬í„´í–ˆë‹¤ë©´, CPU 3 ì˜ X ë¡œë¶€í„°ì˜ ë¡œë“œëŠ” 1ì„ ë¦¬í„´í•´ì•¼ë§Œ 합니다.
+
+하지만, ì˜ì¡´ì„±, ì½ê¸° 배리어, 쓰기 배리어는 í•­ìƒ non-multicopy ì›ìžì„±ì„ ë³´ìƒí•´
+주지는 않습니다. 예를 들어, CPU 2 ì˜ ë²”ìš© 배리어가 ì•žì˜ ì˜ˆì—ì„œ 사ë¼ì ¸ì„œ
+아래처럼 ë°ì´í„° ì˜ì¡´ì„±ë§Œ 남게 ë˜ì—ˆë‹¤ê³  해봅시다:
CPU 1 CPU 2 CPU 3
======================= ======================= =======================
{ X = 0, Y = 0 }
- STORE X=1 LOAD X STORE Y=1
- <ì½ê¸° 배리어> <범용 배리어>
- LOAD Y LOAD X
-
-ì´ ì½”ë“œëŠ” ì´í–‰ì„±ì„ 갖지 않습니다: ì´ ì˜ˆì—서는, CPU 2 ì˜ X 로드가 1ì„
-리턴하고, Y 로드는 0ì„ ë¦¬í„´í•˜ì§€ë§Œ CPU 3 ì˜ X 로드가 0ì„ ë¦¬í„´í•˜ëŠ” ê²ƒë„ ì™„ì „ížˆ
-합법ì ìž…니다.
-
-CPU 2 ì˜ ì½ê¸° 배리어가 ìžì‹ ì˜ ì½ê¸°ëŠ” 순서를 맞춰줘ë„, CPU 1 ì˜ ìŠ¤í† ì–´ì™€ì˜
-순서를 맞춰준다고는 보장할 수 없다는게 핵심입니다. ë”°ë¼ì„œ, CPU 1 ê³¼ CPU 2 ê°€
-버í¼ë‚˜ ìºì‹œë¥¼ 공유하는 시스템ì—ì„œ ì´ ì˜ˆì œ 코드가 실행ëœë‹¤ë©´, CPU 2 는 CPU 1 ì´
-ì“´ ê°’ì— ì¢€ 빨리 접근할 수 ìžˆì„ ê²ƒìž…ë‹ˆë‹¤. ë”°ë¼ì„œ CPU 1 ê³¼ CPU 2 ì˜ ì ‘ê·¼ìœ¼ë¡œ
-ì¡°í•©ëœ ìˆœì„œë¥¼ 모든 CPU ê°€ ë™ì˜í•  수 있ë„ë¡ í•˜ê¸° 위해 범용 배리어가 필요합니다.
-
-범용 배리어는 "글로벌 ì´í–‰ì„±"ì„ ì œê³µí•´ì„œ, 모든 CPU ë“¤ì´ ì˜¤í¼ë ˆì´ì…˜ë“¤ì˜ 순서ì—
-ë™ì˜í•˜ê²Œ í•  것입니다. 반면, release-acquire ì¡°í•©ì€ "로컬 ì´í–‰ì„±" 만ì„
-제공해서, 해당 ì¡°í•©ì´ ì‚¬ìš©ëœ CPU ë“¤ë§Œì´ í•´ë‹¹ ì•¡ì„¸ìŠ¤ë“¤ì˜ ì¡°í•©ëœ ìˆœì„œì— ë™ì˜í•¨ì´
-보장ë©ë‹ˆë‹¤. 예를 들어, 존경스런 Herman Hollerith ì˜ C 코드로 ë³´ë©´:
+ STORE X=1 r1=LOAD X (reads 1) LOAD Y (reads 1)
+ <ë°ì´í„° ì˜ì¡´ì„±> <ì½ê¸° 배리어>
+ STORE Y=r1 LOAD X (reads 0)
+
+ì´ ë³€í™”ëŠ” non-multicopy ì›ìžì„±ì´ 만연하게 합니다: ì´ ì˜ˆì—ì„œ, CPU 2 ì˜ X
+ë¡œë¶€í„°ì˜ ë¡œë“œê°€ 1ì„ ë¦¬í„´í•˜ê³ , CPU 3 ì˜ Y ë¡œë¶€í„°ì˜ ë¡œë“œê°€ 1 ì„ ë¦¬í„´í•˜ëŠ”ë°, CPU 3
+ì˜ X ë¡œë¶€í„°ì˜ ë¡œë“œê°€ 0 ì„ ë¦¬í„´í•˜ëŠ”ê²Œ 완전히 합법ì ìž…니다.
+
+핵심ì€, CPU 2 ì˜ ë°ì´í„° ì˜ì¡´ì„±ì´ ìžì‹ ì˜ 로드와 스토어를 순서짓지만, CPU 1 ì˜
+ìŠ¤í† ì–´ì— ëŒ€í•œ 순서는 보장하지 않는다는 것입니다. ë”°ë¼ì„œ, ì´ ì˜ˆì œê°€ CPU 1 ê³¼
+CPU 2 ê°€ 스토어 버í¼ë‚˜ í•œ ìˆ˜ì¤€ì˜ ìºì‹œë¥¼ 공유하는, multicopy ì›ìžì„±ì„ 제공하지
+않는 시스템ì—ì„œ 수행ëœë‹¤ë©´ CPU 2 는 CPU 1 ì˜ ì“°ê¸°ì— ì´ë¥¸ ì ‘ê·¼ì„ í•  수ë„
+있습니다. ë”°ë¼ì„œ, 모든 CPU ë“¤ì´ ì—¬ëŸ¬ ì ‘ê·¼ë“¤ì˜ ì¡°í•©ëœ ìˆœì„œì— ëŒ€í•´ì„œ ë™ì˜í•˜ê²Œ
+하기 위해서는 범용 배리어가 필요합니다.
+
+범용 배리어는 non-multicopy ì›ìžì„±ë§Œ ë³´ìƒí•  수 있는게 아니ë¼, -모든- CPU 들ì´
+-모든- 오í¼ë ˆì´ì…˜ë“¤ì˜ 순서를 ë™ì¼í•˜ê²Œ ì¸ì‹í•˜ê²Œ 하는 추가ì ì¸ 순서 보장ì„
+만들어냅니다. 반대로, release-acquire ì§ì˜ ì—°ê²°ì€ ì´ëŸ° 추가ì ì¸ 순서는
+제공하지 않는ë°, 해당 ì—°ê²°ì— ë“¤ì–´ìžˆëŠ” CPU ë“¤ë§Œì´ ë©”ëª¨ë¦¬ ì ‘ê·¼ì˜ ì¡°í•©ëœ ìˆœì„œì—
+대해 ë™ì˜í•  것으로 보장ë¨ì„ ì˜ë¯¸í•©ë‹ˆë‹¤. 예를 들어, 존경스런 Herman Hollerith
+ì˜ ì½”ë“œë¥¼ C 코드로 변환하면:
int u, v, x, y, z;
@@ -1444,8 +1439,7 @@ CPU 2 ì˜ ì½ê¸° 배리어가 ìžì‹ ì˜ ì½ê¸°ëŠ” 순서를 맞춰줘ë„, CPU 1
}
cpu0(), cpu1(), 그리고 cpu2() 는 smp_store_release()/smp_load_acquire() ìŒì˜
-ì—°ê²°ì„ í†µí•œ 로컬 ì´í–‰ì„±ì— ë™ì°¸í•˜ê³  있으므로, 다ìŒê³¼ ê°™ì€ ê²°ê³¼ëŠ” 나오지 ì•Šì„
-ê²ë‹ˆë‹¤:
+ì—°ê²°ì— ì°¸ì—¬ë˜ì–´ 있으므로, 다ìŒê³¼ ê°™ì€ ê²°ê³¼ëŠ” 나오지 ì•Šì„ ê²ë‹ˆë‹¤:
r0 == 1 && r1 == 1 && r2 == 1
@@ -1454,8 +1448,9 @@ cpu0() ì˜ ì“°ê¸°ë¥¼ ë´ì•¼ë§Œ 하므로, 다ìŒê³¼ ê°™ì€ ê²°ê³¼ë„ ì—†ì„ ê²ë
r1 == 1 && r5 == 0
-하지만, release-acquire 타ë™ì„±ì€ ë™ì°¸í•œ CPU 들ì—만 ì ìš©ë˜ë¯€ë¡œ cpu3() ì—는
-ì ìš©ë˜ì§€ 않습니다. ë”°ë¼ì„œ, 다ìŒê³¼ ê°™ì€ ê²°ê³¼ê°€ 가능합니다:
+하지만, release-acquire ì— ì˜í•´ 제공ë˜ëŠ” 순서는 해당 ì—°ê²°ì— ë™ì°¸í•œ CPU 들ì—만
+ì ìš©ë˜ë¯€ë¡œ cpu3() ì—, ì ì–´ë„ 스토어들 외ì—는 ì ìš©ë˜ì§€ 않습니다. ë”°ë¼ì„œ, 다ìŒê³¼
+ê°™ì€ ê²°ê³¼ê°€ 가능합니다:
r0 == 0 && r1 == 1 && r2 == 1 && r3 == 0 && r4 == 0
@@ -1482,8 +1477,8 @@ u ë¡œì˜ ìŠ¤í† ì–´ë¥¼ cpu1() ì˜ v ë¡œë¶€í„°ì˜ ë¡œë“œ ë’¤ì— ì¼ì–´ë‚œ 것으ë¡
ì´ëŸ° 결과는 ì–´ë–¤ ê²ƒë„ ìž¬ë°°ì¹˜ ë˜ì§€ 않는, ìˆœì°¨ì  ì¼ê´€ì„±ì„ 가진 ê°€ìƒì˜
시스템ì—ì„œë„ ì¼ì–´ë‚  수 있ìŒì„ 기억해 ë‘시기 ë°”ëžë‹ˆë‹¤.
-다시 ë§í•˜ì§€ë§Œ, ë‹¹ì‹ ì˜ ì½”ë“œê°€ 글로벌 ì´í–‰ì„±ì„ 필요로 한다면, 범용 배리어를
-사용하십시오.
+다시 ë§í•˜ì§€ë§Œ, ë‹¹ì‹ ì˜ ì½”ë“œê°€ 모든 오í¼ë ˆì´ì…˜ë“¤ì˜ 완전한 순서를 필요로 한다면,
+범용 배리어를 사용하십시오.
==================
@@ -3046,6 +3041,9 @@ AMD64 Architecture Programmer's Manual Volume 2: System Programming
Chapter 7.1: Memory-Access Ordering
Chapter 7.4: Buffering and Combining Memory Writes
+ARM Architecture Reference Manual (ARMv8, for ARMv8-A architecture profile)
+ Chapter B2: The AArch64 Application Level Memory Model
+
IA-32 Intel Architecture Software Developer's Manual, Volume 3:
System Programming Guide
Chapter 7.1: Locked Atomic Operations
@@ -3057,6 +3055,8 @@ The SPARC Architecture Manual, Version 9
Appendix D: Formal Specification of the Memory Models
Appendix J: Programming with the Memory Models
+Storage in the PowerPC (Stone and Fitzgerald)
+
UltraSPARC Programmer Reference Manual
Chapter 5: Memory Accesses and Cacheability
Chapter 15: Sparc-V9 Memory Models
diff --git a/Documentation/virtual/kvm/devices/arm-vgic-its.txt b/Documentation/virtual/kvm/devices/arm-vgic-its.txt
index 8d5830eab26a..4f0c9fc40365 100644
--- a/Documentation/virtual/kvm/devices/arm-vgic-its.txt
+++ b/Documentation/virtual/kvm/devices/arm-vgic-its.txt
@@ -64,6 +64,8 @@ Groups:
-EINVAL: Inconsistent restored data
-EFAULT: Invalid guest ram access
-EBUSY: One or more VCPUS are running
+ -EACCES: The virtual ITS is backed by a physical GICv4 ITS, and the
+ state is not available
KVM_DEV_ARM_VGIC_GRP_ITS_REGS
Attributes:
diff --git a/Documentation/x86/protection-keys.txt b/Documentation/x86/protection-keys.txt
index fa46dcb347bc..ecb0d2dadfb7 100644
--- a/Documentation/x86/protection-keys.txt
+++ b/Documentation/x86/protection-keys.txt
@@ -1,5 +1,10 @@
-Memory Protection Keys for Userspace (PKU aka PKEYs) is a CPU feature
-which will be found on future Intel CPUs.
+Memory Protection Keys for Userspace (PKU aka PKEYs) is a feature
+which is found on Intel's Skylake "Scalable Processor" Server CPUs.
+It will be avalable in future non-server parts.
+
+For anyone wishing to test or use this feature, it is available in
+Amazon's EC2 C5 instances and is known to work there using an Ubuntu
+17.04 image.
Memory Protection Keys provides a mechanism for enforcing page-based
protections, but without requiring modification of the page tables
diff --git a/MAINTAINERS b/MAINTAINERS
index 290e13fa9b77..77d819b458a9 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1590,10 +1590,13 @@ F: drivers/rtc/rtc-armada38x.c
ARM/Mediatek RTC DRIVER
M: Eddie Huang <eddie.huang@mediatek.com>
+M: Sean Wang <sean.wang@mediatek.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
L: linux-mediatek@lists.infradead.org (moderated for non-subscribers)
S: Maintained
+F: Documentation/devicetree/bindings/rtc/rtc-mt7622.txt
F: drivers/rtc/rtc-mt6397.c
+F: drivers/rtc/rtc-mt7622.c
ARM/Mediatek SoC support
M: Matthias Brugger <matthias.bgg@gmail.com>
@@ -7473,7 +7476,7 @@ JFS FILESYSTEM
M: Dave Kleikamp <shaggy@kernel.org>
L: jfs-discussion@lists.sourceforge.net
W: http://jfs.sourceforge.net/
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/shaggy/jfs-2.6.git
+T: git git://github.com/kleikamp/linux-shaggy.git
S: Maintained
F: Documentation/filesystems/jfs.txt
F: fs/jfs/
@@ -9328,9 +9331,9 @@ F: drivers/gpu/drm/mxsfb/
F: Documentation/devicetree/bindings/display/mxsfb-drm.txt
MYRICOM MYRI-10G 10GbE DRIVER (MYRI10GE)
-M: Hyong-Youb Kim <hykim@myri.com>
+M: Chris Lee <christopher.lee@cspi.com>
L: netdev@vger.kernel.org
-W: https://www.myricom.com/support/downloads/myri10ge.html
+W: https://www.cspi.com/ethernet-products/support/downloads/
S: Supported
F: drivers/net/ethernet/myricom/myri10ge/
diff --git a/Makefile b/Makefile
index efb942ad0b55..f761bf475ba5 100644
--- a/Makefile
+++ b/Makefile
@@ -1,8 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 4
-PATCHLEVEL = 14
+PATCHLEVEL = 15
SUBLEVEL = 0
-EXTRAVERSION =
+EXTRAVERSION = -rc1
NAME = Fearless Coyote
# *DOCUMENTATION*
@@ -132,7 +132,7 @@ ifneq ($(KBUILD_OUTPUT),)
# check that the output directory actually exists
saved-output := $(KBUILD_OUTPUT)
KBUILD_OUTPUT := $(shell mkdir -p $(KBUILD_OUTPUT) && cd $(KBUILD_OUTPUT) \
- && /bin/pwd)
+ && pwd)
$(if $(KBUILD_OUTPUT),, \
$(error failed to create output directory "$(saved-output)"))
@@ -474,6 +474,38 @@ ifneq ($(KBUILD_SRC),)
$(srctree) $(objtree) $(VERSION) $(PATCHLEVEL)
endif
+ifeq ($(cc-name),clang)
+ifneq ($(CROSS_COMPILE),)
+CLANG_TARGET := --target=$(notdir $(CROSS_COMPILE:%-=%))
+GCC_TOOLCHAIN := $(realpath $(dir $(shell which $(LD)))/..)
+endif
+ifneq ($(GCC_TOOLCHAIN),)
+CLANG_GCC_TC := --gcc-toolchain=$(GCC_TOOLCHAIN)
+endif
+KBUILD_CFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC)
+KBUILD_AFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC)
+KBUILD_CPPFLAGS += $(call cc-option,-Qunused-arguments,)
+KBUILD_CFLAGS += $(call cc-disable-warning, unused-variable)
+KBUILD_CFLAGS += $(call cc-disable-warning, format-invalid-specifier)
+KBUILD_CFLAGS += $(call cc-disable-warning, gnu)
+KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member)
+# Quiet clang warning: comparison of unsigned expression < 0 is always false
+KBUILD_CFLAGS += $(call cc-disable-warning, tautological-compare)
+# CLANG uses a _MergedGlobals as optimization, but this breaks modpost, as the
+# source of a reference will be _MergedGlobals and not on of the whitelisted names.
+# See modpost pattern 2
+KBUILD_CFLAGS += $(call cc-option, -mno-global-merge,)
+KBUILD_CFLAGS += $(call cc-option, -fcatch-undefined-behavior)
+KBUILD_CFLAGS += $(call cc-option, -no-integrated-as)
+KBUILD_AFLAGS += $(call cc-option, -no-integrated-as)
+else
+
+# These warnings generated too much noise in a regular build.
+# Use make W=1 to enable them (see scripts/Makefile.extrawarn)
+KBUILD_CFLAGS += $(call cc-disable-warning, unused-but-set-variable)
+KBUILD_CFLAGS += $(call cc-disable-warning, unused-const-variable)
+endif
+
ifeq ($(config-targets),1)
# ===========================================================================
# *config targets only - make sure prerequisites are updated, and descend
@@ -684,38 +716,6 @@ ifdef CONFIG_CC_STACKPROTECTOR
endif
KBUILD_CFLAGS += $(stackp-flag)
-ifeq ($(cc-name),clang)
-ifneq ($(CROSS_COMPILE),)
-CLANG_TARGET := --target=$(notdir $(CROSS_COMPILE:%-=%))
-GCC_TOOLCHAIN := $(realpath $(dir $(shell which $(LD)))/..)
-endif
-ifneq ($(GCC_TOOLCHAIN),)
-CLANG_GCC_TC := --gcc-toolchain=$(GCC_TOOLCHAIN)
-endif
-KBUILD_CFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC)
-KBUILD_AFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC)
-KBUILD_CPPFLAGS += $(call cc-option,-Qunused-arguments,)
-KBUILD_CFLAGS += $(call cc-disable-warning, unused-variable)
-KBUILD_CFLAGS += $(call cc-disable-warning, format-invalid-specifier)
-KBUILD_CFLAGS += $(call cc-disable-warning, gnu)
-KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member)
-# Quiet clang warning: comparison of unsigned expression < 0 is always false
-KBUILD_CFLAGS += $(call cc-disable-warning, tautological-compare)
-# CLANG uses a _MergedGlobals as optimization, but this breaks modpost, as the
-# source of a reference will be _MergedGlobals and not on of the whitelisted names.
-# See modpost pattern 2
-KBUILD_CFLAGS += $(call cc-option, -mno-global-merge,)
-KBUILD_CFLAGS += $(call cc-option, -fcatch-undefined-behavior)
-KBUILD_CFLAGS += $(call cc-option, -no-integrated-as)
-KBUILD_AFLAGS += $(call cc-option, -no-integrated-as)
-else
-
-# These warnings generated too much noise in a regular build.
-# Use make W=1 to enable them (see scripts/Makefile.extrawarn)
-KBUILD_CFLAGS += $(call cc-disable-warning, unused-but-set-variable)
-KBUILD_CFLAGS += $(call cc-disable-warning, unused-const-variable)
-endif
-
ifdef CONFIG_FRAME_POINTER
KBUILD_CFLAGS += -fno-omit-frame-pointer -fno-optimize-sibling-calls
else
@@ -1009,7 +1009,7 @@ $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
PHONY += $(vmlinux-dirs)
$(vmlinux-dirs): prepare scripts
- $(Q)$(MAKE) $(build)=$@
+ $(Q)$(MAKE) $(build)=$@ need-builtin=1
define filechk_kernel.release
echo "$(KERNELVERSION)$$($(CONFIG_SHELL) $(srctree)/scripts/setlocalversion $(srctree))"
@@ -1337,8 +1337,9 @@ package-dir := scripts/package
$(Q)$(MAKE) $(build)=$(package-dir) $@
%pkg: include/config/kernel.release FORCE
$(Q)$(MAKE) $(build)=$(package-dir) $@
-rpm: include/config/kernel.release FORCE
- $(Q)$(MAKE) $(build)=$(package-dir) $@
+rpm: rpm-pkg
+ @echo " WARNING: \"rpm\" target will be removed after Linux 4.18"
+ @echo " Please use \"rpm-pkg\" instead."
# Brief documentation of the typical targets used
@@ -1546,9 +1547,9 @@ clean: $(clean-dirs)
$(call cmd,rmdirs)
$(call cmd,rmfiles)
@find $(if $(KBUILD_EXTMOD), $(KBUILD_EXTMOD), .) $(RCS_FIND_IGNORE) \
- \( -name '*.[oas]' -o -name '*.ko' -o -name '.*.cmd' \
+ \( -name '*.[aios]' -o -name '*.ko' -o -name '.*.cmd' \
-o -name '*.ko.*' -o -name '*.dtb' -o -name '*.dtb.S' \
- -o -name '*.dwo' \
+ -o -name '*.dwo' -o -name '*.lst' \
-o -name '*.su' \
-o -name '.*.d' -o -name '.*.tmp' -o -name '*.mod.c' \
-o -name '*.symtypes' -o -name 'modules.order' \
diff --git a/arch/alpha/kernel/srmcons.c b/arch/alpha/kernel/srmcons.c
index 5da0aec8ce90..438b10c44d73 100644
--- a/arch/alpha/kernel/srmcons.c
+++ b/arch/alpha/kernel/srmcons.c
@@ -65,9 +65,9 @@ srmcons_do_receive_chars(struct tty_port *port)
}
static void
-srmcons_receive_chars(unsigned long data)
+srmcons_receive_chars(struct timer_list *t)
{
- struct srmcons_private *srmconsp = (struct srmcons_private *)data;
+ struct srmcons_private *srmconsp = from_timer(srmconsp, t, timer);
struct tty_port *port = &srmconsp->port;
unsigned long flags;
int incr = 10;
@@ -206,8 +206,7 @@ static const struct tty_operations srmcons_ops = {
static int __init
srmcons_init(void)
{
- setup_timer(&srmcons_singleton.timer, srmcons_receive_chars,
- (unsigned long)&srmcons_singleton);
+ timer_setup(&srmcons_singleton.timer, srmcons_receive_chars, 0);
if (srm_is_registered_console) {
struct tty_driver *driver;
int err;
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index 5c7adf100a58..9d5fd00d9e91 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -39,7 +39,7 @@ config ARC
select OF
select OF_EARLY_FLATTREE
select OF_RESERVED_MEM
- select PERF_USE_VMALLOC
+ select PERF_USE_VMALLOC if ARC_CACHE_VIPT_ALIASING
select HAVE_DEBUG_STACKOVERFLOW
select HAVE_GENERIC_DMA_COHERENT
select HAVE_KERNEL_GZIP
diff --git a/arch/arc/boot/dts/axs10x_mb.dtsi b/arch/arc/boot/dts/axs10x_mb.dtsi
index e114000a84f5..74d070cd3c13 100644
--- a/arch/arc/boot/dts/axs10x_mb.dtsi
+++ b/arch/arc/boot/dts/axs10x_mb.dtsi
@@ -16,6 +16,12 @@
ranges = <0x00000000 0x0 0xe0000000 0x10000000>;
interrupt-parent = <&mb_intc>;
+ creg_rst: reset-controller@11220 {
+ compatible = "snps,axs10x-reset";
+ #reset-cells = <1>;
+ reg = <0x11220 0x4>;
+ };
+
i2sclk: i2sclk@100a0 {
compatible = "snps,axs10x-i2s-pll-clock";
reg = <0x100a0 0x10>;
@@ -73,6 +79,8 @@
clocks = <&apbclk>;
clock-names = "stmmaceth";
max-speed = <100>;
+ resets = <&creg_rst 5>;
+ reset-names = "stmmaceth";
};
ehci@0x40000 {
diff --git a/arch/arc/include/asm/arcregs.h b/arch/arc/include/asm/arcregs.h
index b1c56d35f2a9..49bfbd879caa 100644
--- a/arch/arc/include/asm/arcregs.h
+++ b/arch/arc/include/asm/arcregs.h
@@ -11,12 +11,14 @@
/* Build Configuration Registers */
#define ARC_REG_AUX_DCCM 0x18 /* DCCM Base Addr ARCv2 */
+#define ARC_REG_ERP_CTRL 0x3F /* ARCv2 Error protection control */
#define ARC_REG_DCCM_BASE_BUILD 0x61 /* DCCM Base Addr ARCompact */
#define ARC_REG_CRC_BCR 0x62
#define ARC_REG_VECBASE_BCR 0x68
#define ARC_REG_PERIBASE_BCR 0x69
#define ARC_REG_FP_BCR 0x6B /* ARCompact: Single-Precision FPU */
#define ARC_REG_DPFP_BCR 0x6C /* ARCompact: Dbl Precision FPU */
+#define ARC_REG_ERP_BUILD 0xc7 /* ARCv2 Error protection Build: ECC/Parity */
#define ARC_REG_FP_V2_BCR 0xc8 /* ARCv2 FPU */
#define ARC_REG_SLC_BCR 0xce
#define ARC_REG_DCCM_BUILD 0x74 /* DCCM size (common) */
@@ -32,11 +34,14 @@
#define ARC_REG_D_UNCACH_BCR 0x6A
#define ARC_REG_BPU_BCR 0xc0
#define ARC_REG_ISA_CFG_BCR 0xc1
+#define ARC_REG_LPB_BUILD 0xE9 /* ARCv2 Loop Buffer Build */
#define ARC_REG_RTT_BCR 0xF2
#define ARC_REG_IRQ_BCR 0xF3
+#define ARC_REG_MICRO_ARCH_BCR 0xF9 /* ARCv2 Product revision */
#define ARC_REG_SMART_BCR 0xFF
#define ARC_REG_CLUSTER_BCR 0xcf
#define ARC_REG_AUX_ICCM 0x208 /* ICCM Base Addr (ARCv2) */
+#define ARC_REG_LPB_CTRL 0x488 /* ARCv2 Loop Buffer control */
/* Common for ARCompact and ARCv2 status register */
#define ARC_REG_STATUS32 0x0A
@@ -229,6 +234,32 @@ struct bcr_bpu_arcv2 {
#endif
};
+/* Error Protection Build: ECC/Parity */
+struct bcr_erp {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int pad3:5, mmu:3, pad2:4, ic:3, dc:3, pad1:6, ver:8;
+#else
+ unsigned int ver:8, pad1:6, dc:3, ic:3, pad2:4, mmu:3, pad3:5;
+#endif
+};
+
+/* Error Protection Control */
+struct ctl_erp {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int pad2:27, mpd:1, pad1:2, dpd:1, dpi:1;
+#else
+ unsigned int dpi:1, dpd:1, pad1:2, mpd:1, pad2:27;
+#endif
+};
+
+struct bcr_lpb {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int pad:16, entries:8, ver:8;
+#else
+ unsigned int ver:8, entries:8, pad:16;
+#endif
+};
+
struct bcr_generic {
#ifdef CONFIG_CPU_BIG_ENDIAN
unsigned int info:24, ver:8;
@@ -270,7 +301,7 @@ struct cpuinfo_arc {
struct cpuinfo_arc_ccm iccm, dccm;
struct {
unsigned int swap:1, norm:1, minmax:1, barrel:1, crc:1, swape:1, pad1:2,
- fpu_sp:1, fpu_dp:1, dual_iss_enb:1, dual_iss_exist:1, pad2:4,
+ fpu_sp:1, fpu_dp:1, dual:1, dual_enb:1, pad2:4,
debug:1, ap:1, smart:1, rtt:1, pad3:4,
timer0:1, timer1:1, rtc:1, gfrc:1, pad4:4;
} extn;
diff --git a/arch/arc/kernel/perf_event.c b/arch/arc/kernel/perf_event.c
index 2ce24e74f879..8aec462d90fb 100644
--- a/arch/arc/kernel/perf_event.c
+++ b/arch/arc/kernel/perf_event.c
@@ -336,15 +336,12 @@ static int arc_pmu_add(struct perf_event *event, int flags)
struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx;
- if (__test_and_set_bit(idx, pmu_cpu->used_mask)) {
- idx = find_first_zero_bit(pmu_cpu->used_mask,
- arc_pmu->n_counters);
- if (idx == arc_pmu->n_counters)
- return -EAGAIN;
-
- __set_bit(idx, pmu_cpu->used_mask);
- hwc->idx = idx;
- }
+ idx = ffz(pmu_cpu->used_mask[0]);
+ if (idx == arc_pmu->n_counters)
+ return -EAGAIN;
+
+ __set_bit(idx, pmu_cpu->used_mask);
+ hwc->idx = idx;
write_aux_reg(ARC_REG_PCT_INDEX, idx);
@@ -377,21 +374,22 @@ static irqreturn_t arc_pmu_intr(int irq, void *dev)
struct perf_sample_data data;
struct arc_pmu_cpu *pmu_cpu = this_cpu_ptr(&arc_pmu_cpu);
struct pt_regs *regs;
- int active_ints;
+ unsigned int active_ints;
int idx;
arc_pmu_disable(&arc_pmu->pmu);
active_ints = read_aux_reg(ARC_REG_PCT_INT_ACT);
+ if (!active_ints)
+ goto done;
regs = get_irq_regs();
- for (idx = 0; idx < arc_pmu->n_counters; idx++) {
- struct perf_event *event = pmu_cpu->act_counter[idx];
+ do {
+ struct perf_event *event;
struct hw_perf_event *hwc;
- if (!(active_ints & (1 << idx)))
- continue;
+ idx = __ffs(active_ints);
/* Reset interrupt flag by writing of 1 */
write_aux_reg(ARC_REG_PCT_INT_ACT, 1 << idx);
@@ -404,19 +402,22 @@ static irqreturn_t arc_pmu_intr(int irq, void *dev)
write_aux_reg(ARC_REG_PCT_INT_CTRL,
read_aux_reg(ARC_REG_PCT_INT_CTRL) | (1 << idx));
+ event = pmu_cpu->act_counter[idx];
hwc = &event->hw;
WARN_ON_ONCE(hwc->idx != idx);
arc_perf_event_update(event, &event->hw, event->hw.idx);
perf_sample_data_init(&data, 0, hwc->last_period);
- if (!arc_pmu_event_set_period(event))
- continue;
+ if (arc_pmu_event_set_period(event)) {
+ if (perf_event_overflow(event, &data, regs))
+ arc_pmu_stop(event, 0);
+ }
- if (perf_event_overflow(event, &data, regs))
- arc_pmu_stop(event, 0);
- }
+ active_ints &= ~(1U << idx);
+ } while (active_ints);
+done:
arc_pmu_enable(&arc_pmu->pmu);
return IRQ_HANDLED;
@@ -461,6 +462,7 @@ static int arc_pmu_device_probe(struct platform_device *pdev)
pr_err("This core does not have performance counters!\n");
return -ENODEV;
}
+ BUILD_BUG_ON(ARC_PERF_MAX_COUNTERS > 32);
BUG_ON(pct_bcr.c > ARC_PERF_MAX_COUNTERS);
READ_BCR(ARC_REG_CC_BUILD, cc_bcr);
diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c
index fb83844daeea..7ef7d9a8ff89 100644
--- a/arch/arc/kernel/setup.c
+++ b/arch/arc/kernel/setup.c
@@ -199,8 +199,10 @@ static void read_arc_build_cfg_regs(void)
unsigned int exec_ctrl;
READ_BCR(AUX_EXEC_CTRL, exec_ctrl);
- cpu->extn.dual_iss_exist = 1;
- cpu->extn.dual_iss_enb = exec_ctrl & 1;
+ cpu->extn.dual_enb = exec_ctrl & 1;
+
+ /* dual issue always present for this core */
+ cpu->extn.dual = 1;
}
}
@@ -253,7 +255,7 @@ static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len)
cpu_id, cpu->name, cpu->details,
is_isa_arcompact() ? "ARCompact" : "ARCv2",
IS_AVAIL1(cpu->isa.be, "[Big-Endian]"),
- IS_AVAIL3(cpu->extn.dual_iss_exist, cpu->extn.dual_iss_enb, " Dual-Issue"));
+ IS_AVAIL3(cpu->extn.dual, cpu->extn.dual_enb, " Dual-Issue "));
n += scnprintf(buf + n, len - n, "Timers\t\t: %s%s%s%s%s%s\nISA Extn\t: ",
IS_AVAIL1(cpu->extn.timer0, "Timer0 "),
@@ -293,11 +295,26 @@ static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len)
if (cpu->bpu.ver)
n += scnprintf(buf + n, len - n,
- "BPU\t\t: %s%s match, cache:%d, Predict Table:%d\n",
+ "BPU\t\t: %s%s match, cache:%d, Predict Table:%d",
IS_AVAIL1(cpu->bpu.full, "full"),
IS_AVAIL1(!cpu->bpu.full, "partial"),
cpu->bpu.num_cache, cpu->bpu.num_pred);
+ if (is_isa_arcv2()) {
+ struct bcr_lpb lpb;
+
+ READ_BCR(ARC_REG_LPB_BUILD, lpb);
+ if (lpb.ver) {
+ unsigned int ctl;
+ ctl = read_aux_reg(ARC_REG_LPB_CTRL);
+
+ n += scnprintf(buf + n, len - n, " Loop Buffer:%d %s",
+ lpb.entries,
+ IS_DISABLED_RUN(!ctl));
+ }
+ }
+
+ n += scnprintf(buf + n, len - n, "\n");
return buf;
}
@@ -326,6 +343,24 @@ static char *arc_extn_mumbojumbo(int cpu_id, char *buf, int len)
cpu->dccm.base_addr, TO_KB(cpu->dccm.sz),
cpu->iccm.base_addr, TO_KB(cpu->iccm.sz));
+ if (is_isa_arcv2()) {
+
+ /* Error Protection: ECC/Parity */
+ struct bcr_erp erp;
+ READ_BCR(ARC_REG_ERP_BUILD, erp);
+
+ if (erp.ver) {
+ struct ctl_erp ctl;
+ READ_BCR(ARC_REG_ERP_CTRL, ctl);
+
+ /* inverted bits: 0 means enabled */
+ n += scnprintf(buf + n, len - n, "Extn [ECC]\t: %s%s%s%s%s%s\n",
+ IS_AVAIL3(erp.ic, !ctl.dpi, "IC "),
+ IS_AVAIL3(erp.dc, !ctl.dpd, "DC "),
+ IS_AVAIL3(erp.mmu, !ctl.mpd, "MMU "));
+ }
+ }
+
n += scnprintf(buf + n, len - n, "OS ABI [v%d]\t: %s\n",
EF_ARC_OSABI_CURRENT >> 8,
EF_ARC_OSABI_CURRENT == EF_ARC_OSABI_V3 ?
diff --git a/arch/arc/mm/tlb.c b/arch/arc/mm/tlb.c
index 8ceefbf72fb0..4097764fea23 100644
--- a/arch/arc/mm/tlb.c
+++ b/arch/arc/mm/tlb.c
@@ -762,21 +762,23 @@ void read_decode_mmu_bcr(void)
tmp = read_aux_reg(ARC_REG_MMU_BCR);
mmu->ver = (tmp >> 24);
- if (mmu->ver <= 2) {
- mmu2 = (struct bcr_mmu_1_2 *)&tmp;
- mmu->pg_sz_k = TO_KB(0x2000);
- mmu->sets = 1 << mmu2->sets;
- mmu->ways = 1 << mmu2->ways;
- mmu->u_dtlb = mmu2->u_dtlb;
- mmu->u_itlb = mmu2->u_itlb;
- } else if (mmu->ver == 3) {
- mmu3 = (struct bcr_mmu_3 *)&tmp;
- mmu->pg_sz_k = 1 << (mmu3->pg_sz - 1);
- mmu->sets = 1 << mmu3->sets;
- mmu->ways = 1 << mmu3->ways;
- mmu->u_dtlb = mmu3->u_dtlb;
- mmu->u_itlb = mmu3->u_itlb;
- mmu->sasid = mmu3->sasid;
+ if (is_isa_arcompact()) {
+ if (mmu->ver <= 2) {
+ mmu2 = (struct bcr_mmu_1_2 *)&tmp;
+ mmu->pg_sz_k = TO_KB(0x2000);
+ mmu->sets = 1 << mmu2->sets;
+ mmu->ways = 1 << mmu2->ways;
+ mmu->u_dtlb = mmu2->u_dtlb;
+ mmu->u_itlb = mmu2->u_itlb;
+ } else {
+ mmu3 = (struct bcr_mmu_3 *)&tmp;
+ mmu->pg_sz_k = 1 << (mmu3->pg_sz - 1);
+ mmu->sets = 1 << mmu3->sets;
+ mmu->ways = 1 << mmu3->ways;
+ mmu->u_dtlb = mmu3->u_dtlb;
+ mmu->u_itlb = mmu3->u_itlb;
+ mmu->sasid = mmu3->sasid;
+ }
} else {
mmu4 = (struct bcr_mmu_4 *)&tmp;
mmu->pg_sz_k = 1 << (mmu4->sz0 - 1);
@@ -818,8 +820,9 @@ int pae40_exist_but_not_enab(void)
void arc_mmu_init(void)
{
- char str[256];
struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
+ char str[256];
+ int compat = 0;
pr_info("%s", arc_mmu_mumbojumbo(0, str, sizeof(str)));
@@ -834,15 +837,21 @@ void arc_mmu_init(void)
*/
BUILD_BUG_ON(!IS_ALIGNED(STACK_TOP, PMD_SIZE));
- /* For efficiency sake, kernel is compile time built for a MMU ver
- * This must match the hardware it is running on.
- * Linux built for MMU V2, if run on MMU V1 will break down because V1
- * hardware doesn't understand cmds such as WriteNI, or IVUTLB
- * On the other hand, Linux built for V1 if run on MMU V2 will do
- * un-needed workarounds to prevent memcpy thrashing.
- * Similarly MMU V3 has new features which won't work on older MMU
+ /*
+ * Ensure that MMU features assumed by kernel exist in hardware.
+ * For older ARC700 cpus, it has to be exact match, since the MMU
+ * revisions were not backwards compatible (MMUv3 TLB layout changed
+ * so even if kernel for v2 didn't use any new cmds of v3, it would
+ * still not work.
+ * For HS cpus, MMUv4 was baseline and v5 is backwards compatible
+ * (will run older software).
*/
- if (mmu->ver != CONFIG_ARC_MMU_VER) {
+ if (is_isa_arcompact() && mmu->ver == CONFIG_ARC_MMU_VER)
+ compat = 1;
+ else if (is_isa_arcv2() && mmu->ver >= CONFIG_ARC_MMU_VER)
+ compat = 1;
+
+ if (!compat) {
panic("MMU ver %d doesn't match kernel built for %d...\n",
mmu->ver, CONFIG_ARC_MMU_VER);
}
diff --git a/arch/arc/plat-axs10x/Kconfig b/arch/arc/plat-axs10x/Kconfig
index c54d1ae57fe0..4e0df7b7a248 100644
--- a/arch/arc/plat-axs10x/Kconfig
+++ b/arch/arc/plat-axs10x/Kconfig
@@ -14,6 +14,8 @@ menuconfig ARC_PLAT_AXS10X
select MIGHT_HAVE_PCI
select GENERIC_IRQ_CHIP
select GPIOLIB
+ select AXS101 if ISA_ARCOMPACT
+ select AXS103 if ISA_ARCV2
help
Support for the ARC AXS10x Software Development Platforms.
diff --git a/arch/arc/plat-axs10x/axs10x.c b/arch/arc/plat-axs10x/axs10x.c
index cf14ebc36916..f1ac6790da5f 100644
--- a/arch/arc/plat-axs10x/axs10x.c
+++ b/arch/arc/plat-axs10x/axs10x.c
@@ -111,13 +111,6 @@ static void __init axs10x_early_init(void)
axs10x_enable_gpio_intc_wire();
- /*
- * Reset ethernet IP core.
- * TODO: get rid of this quirk after axs10x reset driver (or simple
- * reset driver) will be available in upstream.
- */
- iowrite32((1 << 5), (void __iomem *) CREG_MB_SW_RESET);
-
scnprintf(mb, 32, "MainBoard v%d", mb_rev);
axs10x_print_board_ver(CREG_MB_VER, mb);
}
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index 12b8c8f8ec07..17685e19aed8 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -1776,9 +1776,9 @@ config DEBUG_UART_8250_FLOW_CONTROL
default y if ARCH_EBSA110 || DEBUG_FOOTBRIDGE_COM1 || DEBUG_GEMINI || ARCH_RPC
config DEBUG_UNCOMPRESS
- bool
+ bool "Enable decompressor debugging via DEBUG_LL output"
depends on ARCH_MULTIPLATFORM || PLAT_SAMSUNG || ARM_SINGLE_ARMV7M
- default y if DEBUG_LL && !DEBUG_OMAP2PLUS_UART && \
+ depends on DEBUG_LL && !DEBUG_OMAP2PLUS_UART && \
(!DEBUG_TEGRA_UART || !ZBOOT_ROM) && \
!DEBUG_BRCMSTB_UART
help
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index ad301f107dd2..bc8d4bbd82e2 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -518,4 +518,22 @@ THUMB( orr \reg , \reg , #PSR_T_BIT )
#endif
.endm
+ .macro bug, msg, line
+#ifdef CONFIG_THUMB2_KERNEL
+1: .inst 0xde02
+#else
+1: .inst 0xe7f001f2
+#endif
+#ifdef CONFIG_DEBUG_BUGVERBOSE
+ .pushsection .rodata.str, "aMS", %progbits, 1
+2: .asciz "\msg"
+ .popsection
+ .pushsection __bug_table, "aw"
+ .align 2
+ .word 1b, 2b
+ .hword \line
+ .popsection
+#endif
+ .endm
+
#endif /* __ASM_ASSEMBLER_H__ */
diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
index 2a029bceaf2f..1a7a17b2a1ba 100644
--- a/arch/arm/include/asm/pgtable-3level.h
+++ b/arch/arm/include/asm/pgtable-3level.h
@@ -221,7 +221,6 @@ static inline pte_t pte_mkspecial(pte_t pte)
}
#define __HAVE_ARCH_PTE_SPECIAL
-#define __HAVE_ARCH_PMD_WRITE
#define pmd_write(pmd) (pmd_isclear((pmd), L_PMD_SECT_RDONLY))
#define pmd_dirty(pmd) (pmd_isset((pmd), L_PMD_SECT_DIRTY))
#define pud_page(pud) pmd_page(__pmd(pud_val(pud)))
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index 1c462381c225..150ece66ddf3 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -232,6 +232,18 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd)
#define pte_valid_user(pte) \
(pte_valid(pte) && pte_isset((pte), L_PTE_USER) && pte_young(pte))
+static inline bool pte_access_permitted(pte_t pte, bool write)
+{
+ pteval_t mask = L_PTE_PRESENT | L_PTE_USER;
+ pteval_t needed = mask;
+
+ if (write)
+ mask |= L_PTE_RDONLY;
+
+ return (pte_val(pte) & mask) == needed;
+}
+#define pte_access_permitted pte_access_permitted
+
#if __LINUX_ARM_ARCH__ < 6
static inline void __sync_icache_dcache(pte_t pteval)
{
diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
index d523cd8439a3..7f4d80c2db6b 100644
--- a/arch/arm/kernel/entry-header.S
+++ b/arch/arm/kernel/entry-header.S
@@ -300,6 +300,8 @@
mov r2, sp
ldr r1, [r2, #\offset + S_PSR] @ get calling cpsr
ldr lr, [r2, #\offset + S_PC]! @ get pc
+ tst r1, #0xcf
+ bne 1f
msr spsr_cxsf, r1 @ save in spsr_svc
#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K)
@ We must avoid clrex due to Cortex-A15 erratum #830321
@@ -314,6 +316,7 @@
@ after ldm {}^
add sp, sp, #\offset + PT_REGS_SIZE
movs pc, lr @ return & move spsr_svc into cpsr
+1: bug "Returning to usermode but unexpected PSR bits set?", \@
#elif defined(CONFIG_CPU_V7M)
@ V7M restore.
@ Note that we don't need to do clrex here as clearing the local
@@ -329,6 +332,8 @@
ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr
ldr lr, [sp, #\offset + S_PC] @ get pc
add sp, sp, #\offset + S_SP
+ tst r1, #0xcf
+ bne 1f
msr spsr_cxsf, r1 @ save in spsr_svc
@ We must avoid clrex due to Cortex-A15 erratum #830321
@@ -341,6 +346,7 @@
.endif
add sp, sp, #PT_REGS_SIZE - S_SP
movs pc, lr @ return & move spsr_svc into cpsr
+1: bug "Returning to usermode but unexpected PSR bits set?", \@
#endif /* !CONFIG_THUMB2_KERNEL */
.endm
diff --git a/arch/arm/kvm/Kconfig b/arch/arm/kvm/Kconfig
index f24628db5409..e2bd35b6780c 100644
--- a/arch/arm/kvm/Kconfig
+++ b/arch/arm/kvm/Kconfig
@@ -4,6 +4,7 @@
#
source "virt/kvm/Kconfig"
+source "virt/lib/Kconfig"
menuconfig VIRTUALIZATION
bool "Virtualization"
@@ -23,6 +24,8 @@ config KVM
select PREEMPT_NOTIFIERS
select ANON_INODES
select ARM_GIC
+ select ARM_GIC_V3
+ select ARM_GIC_V3_ITS
select HAVE_KVM_CPU_RELAX_INTERCEPT
select HAVE_KVM_ARCH_TLB_FLUSH_ALL
select KVM_MMIO
@@ -36,6 +39,8 @@ config KVM
select HAVE_KVM_IRQCHIP
select HAVE_KVM_IRQ_ROUTING
select HAVE_KVM_MSI
+ select IRQ_BYPASS_MANAGER
+ select HAVE_KVM_IRQ_BYPASS
depends on ARM_VIRT_EXT && ARM_LPAE && ARM_ARCH_TIMER
---help---
Support hosting virtualized guest machines.
diff --git a/arch/arm/kvm/Makefile b/arch/arm/kvm/Makefile
index f550abd64a25..48de846f2246 100644
--- a/arch/arm/kvm/Makefile
+++ b/arch/arm/kvm/Makefile
@@ -32,6 +32,7 @@ obj-y += $(KVM)/arm/vgic/vgic-init.o
obj-y += $(KVM)/arm/vgic/vgic-irqfd.o
obj-y += $(KVM)/arm/vgic/vgic-v2.o
obj-y += $(KVM)/arm/vgic/vgic-v3.o
+obj-y += $(KVM)/arm/vgic/vgic-v4.o
obj-y += $(KVM)/arm/vgic/vgic-mmio.o
obj-y += $(KVM)/arm/vgic/vgic-mmio-v2.o
obj-y += $(KVM)/arm/vgic/vgic-mmio-v3.o
diff --git a/arch/arm/mach-iop32x/n2100.c b/arch/arm/mach-iop32x/n2100.c
index c1cd80ecc219..3b73813c6b04 100644
--- a/arch/arm/mach-iop32x/n2100.c
+++ b/arch/arm/mach-iop32x/n2100.c
@@ -305,7 +305,7 @@ static void n2100_restart(enum reboot_mode mode, const char *cmd)
static struct timer_list power_button_poll_timer;
-static void power_button_poll(unsigned long dummy)
+static void power_button_poll(struct timer_list *unused)
{
if (gpio_get_value(N2100_POWER_BUTTON) == 0) {
ctrl_alt_del();
@@ -336,8 +336,7 @@ static int __init n2100_request_gpios(void)
pr_err("could not set power GPIO as input\n");
}
/* Set up power button poll timer */
- init_timer(&power_button_poll_timer);
- power_button_poll_timer.function = power_button_poll;
+ timer_setup(&power_button_poll_timer, power_button_poll, 0);
power_button_poll_timer.expires = jiffies + (HZ / 10);
add_timer(&power_button_poll_timer);
return 0;
diff --git a/arch/arm/mach-ixp4xx/dsmg600-setup.c b/arch/arm/mach-ixp4xx/dsmg600-setup.c
index ac97a4599034..0f5c99941a7d 100644
--- a/arch/arm/mach-ixp4xx/dsmg600-setup.c
+++ b/arch/arm/mach-ixp4xx/dsmg600-setup.c
@@ -179,10 +179,10 @@ static int power_button_countdown;
/* Must hold the button down for at least this many counts to be processed */
#define PBUTTON_HOLDDOWN_COUNT 4 /* 2 secs */
-static void dsmg600_power_handler(unsigned long data);
+static void dsmg600_power_handler(struct timer_list *unused);
static DEFINE_TIMER(dsmg600_power_timer, dsmg600_power_handler);
-static void dsmg600_power_handler(unsigned long data)
+static void dsmg600_power_handler(struct timer_list *unused)
{
/* This routine is called twice per second to check the
* state of the power button.
diff --git a/arch/arm/mach-ixp4xx/nas100d-setup.c b/arch/arm/mach-ixp4xx/nas100d-setup.c
index 435602085408..76dfff03cb71 100644
--- a/arch/arm/mach-ixp4xx/nas100d-setup.c
+++ b/arch/arm/mach-ixp4xx/nas100d-setup.c
@@ -202,10 +202,10 @@ static int power_button_countdown;
/* Must hold the button down for at least this many counts to be processed */
#define PBUTTON_HOLDDOWN_COUNT 4 /* 2 secs */
-static void nas100d_power_handler(unsigned long data);
+static void nas100d_power_handler(struct timer_list *unused);
static DEFINE_TIMER(nas100d_power_timer, nas100d_power_handler);
-static void nas100d_power_handler(unsigned long data)
+static void nas100d_power_handler(struct timer_list *unused)
{
/* This routine is called twice per second to check the
* state of the power button.
diff --git a/arch/arm/mach-orion5x/db88f5281-setup.c b/arch/arm/mach-orion5x/db88f5281-setup.c
index 3f5863de766a..39eae10ac8de 100644
--- a/arch/arm/mach-orion5x/db88f5281-setup.c
+++ b/arch/arm/mach-orion5x/db88f5281-setup.c
@@ -172,7 +172,7 @@ static struct platform_device db88f5281_nand_flash = {
static void __iomem *db88f5281_7seg;
static struct timer_list db88f5281_timer;
-static void db88f5281_7seg_event(unsigned long data)
+static void db88f5281_7seg_event(struct timer_list *unused)
{
static int count = 0;
writel(0, db88f5281_7seg + (count << 4));
@@ -189,7 +189,7 @@ static int __init db88f5281_7seg_init(void)
printk(KERN_ERR "Failed to ioremap db88f5281_7seg\n");
return -EIO;
}
- setup_timer(&db88f5281_timer, db88f5281_7seg_event, 0);
+ timer_setup(&db88f5281_timer, db88f5281_7seg_event, 0);
mod_timer(&db88f5281_timer, jiffies + 2 * HZ);
}
diff --git a/arch/arm/mach-pxa/cm-x255.c b/arch/arm/mach-pxa/cm-x255.c
index b592f79a1742..fa8e7dd4d898 100644
--- a/arch/arm/mach-pxa/cm-x255.c
+++ b/arch/arm/mach-pxa/cm-x255.c
@@ -14,7 +14,7 @@
#include <linux/mtd/partitions.h>
#include <linux/mtd/physmap.h>
#include <linux/mtd/nand-gpio.h>
-
+#include <linux/gpio/machine.h>
#include <linux/spi/spi.h>
#include <linux/spi/pxa2xx_spi.h>
@@ -176,6 +176,17 @@ static inline void cmx255_init_nor(void) {}
#endif
#if defined(CONFIG_MTD_NAND_GPIO) || defined(CONFIG_MTD_NAND_GPIO_MODULE)
+
+static struct gpiod_lookup_table cmx255_nand_gpiod_table = {
+ .dev_id = "gpio-nand",
+ .table = {
+ GPIO_LOOKUP("gpio-pxa", GPIO_NAND_CS, "nce", GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP("gpio-pxa", GPIO_NAND_CLE, "cle", GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP("gpio-pxa", GPIO_NAND_ALE, "ale", GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP("gpio-pxa", GPIO_NAND_RB, "rdy", GPIO_ACTIVE_HIGH),
+ },
+};
+
static struct resource cmx255_nand_resource[] = {
[0] = {
.start = PXA_CS1_PHYS,
@@ -198,11 +209,6 @@ static struct mtd_partition cmx255_nand_parts[] = {
};
static struct gpio_nand_platdata cmx255_nand_platdata = {
- .gpio_nce = GPIO_NAND_CS,
- .gpio_cle = GPIO_NAND_CLE,
- .gpio_ale = GPIO_NAND_ALE,
- .gpio_rdy = GPIO_NAND_RB,
- .gpio_nwp = -1,
.parts = cmx255_nand_parts,
.num_parts = ARRAY_SIZE(cmx255_nand_parts),
.chip_delay = 25,
@@ -220,6 +226,7 @@ static struct platform_device cmx255_nand = {
static void __init cmx255_init_nand(void)
{
+ gpiod_add_lookup_table(&cmx255_nand_gpiod_table);
platform_device_register(&cmx255_nand);
}
#else
diff --git a/arch/arm/mach-uniphier/Makefile b/arch/arm/mach-uniphier/Makefile
index 6bea3d3a2dd7..e69de29bb2d1 100644
--- a/arch/arm/mach-uniphier/Makefile
+++ b/arch/arm/mach-uniphier/Makefile
@@ -1 +0,0 @@
-obj- += dummy.o
diff --git a/arch/arm/mm/dump.c b/arch/arm/mm/dump.c
index 35ff45470dbf..fc3b44028cfb 100644
--- a/arch/arm/mm/dump.c
+++ b/arch/arm/mm/dump.c
@@ -129,8 +129,8 @@ static const struct prot_bits section_bits[] = {
.val = PMD_SECT_USER,
.set = "USR",
}, {
- .mask = L_PMD_SECT_RDONLY,
- .val = L_PMD_SECT_RDONLY,
+ .mask = L_PMD_SECT_RDONLY | PMD_SECT_AP2,
+ .val = L_PMD_SECT_RDONLY | PMD_SECT_AP2,
.set = "ro",
.clear = "RW",
#elif __LINUX_ARM_ARCH__ >= 6
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 81d4482b6861..a1f11a7ee81b 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -629,8 +629,8 @@ static struct section_perm ro_perms[] = {
.start = (unsigned long)_stext,
.end = (unsigned long)__init_begin,
#ifdef CONFIG_ARM_LPAE
- .mask = ~L_PMD_SECT_RDONLY,
- .prot = L_PMD_SECT_RDONLY,
+ .mask = ~(L_PMD_SECT_RDONLY | PMD_SECT_AP2),
+ .prot = L_PMD_SECT_RDONLY | PMD_SECT_AP2,
#else
.mask = ~(PMD_SECT_APX | PMD_SECT_AP_WRITE),
.prot = PMD_SECT_APX | PMD_SECT_AP_WRITE,
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index c9530b5b5ca8..149d05fb9421 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -345,7 +345,6 @@ static inline int pmd_protnone(pmd_t pmd)
#define pmd_thp_or_huge(pmd) (pmd_huge(pmd) || pmd_trans_huge(pmd))
-#define __HAVE_ARCH_PMD_WRITE
#define pmd_write(pmd) pte_write(pmd_pte(pmd))
#define pmd_mkhuge(pmd) (__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT))
diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig
index 13f81f971390..2257dfcc44cc 100644
--- a/arch/arm64/kvm/Kconfig
+++ b/arch/arm64/kvm/Kconfig
@@ -4,6 +4,7 @@
#
source "virt/kvm/Kconfig"
+source "virt/lib/Kconfig"
menuconfig VIRTUALIZATION
bool "Virtualization"
@@ -36,6 +37,8 @@ config KVM
select HAVE_KVM_MSI
select HAVE_KVM_IRQCHIP
select HAVE_KVM_IRQ_ROUTING
+ select IRQ_BYPASS_MANAGER
+ select HAVE_KVM_IRQ_BYPASS
---help---
Support hosting virtualized guest machines.
We don't support KVM with 16K page tables yet, due to the multiple
diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile
index 861acbbac385..87c4f7ae24de 100644
--- a/arch/arm64/kvm/Makefile
+++ b/arch/arm64/kvm/Makefile
@@ -27,6 +27,7 @@ kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-init.o
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-irqfd.o
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-v2.o
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-v3.o
+kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-v4.o
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-mmio.o
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-mmio-v2.o
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-mmio-v3.o
diff --git a/arch/blackfin/kernel/nmi.c b/arch/blackfin/kernel/nmi.c
index 1e714329fe8a..8a211d95821f 100644
--- a/arch/blackfin/kernel/nmi.c
+++ b/arch/blackfin/kernel/nmi.c
@@ -166,7 +166,7 @@ int check_nmi_wdt_touched(void)
return 1;
}
-static void nmi_wdt_timer(unsigned long data)
+static void nmi_wdt_timer(struct timer_list *unused)
{
if (check_nmi_wdt_touched())
nmi_wdt_keepalive();
@@ -180,8 +180,7 @@ static int __init init_nmi_wdt(void)
nmi_wdt_start();
nmi_active = true;
- init_timer(&ntimer);
- ntimer.function = nmi_wdt_timer;
+ timer_setup(&ntimer, nmi_wdt_timer, 0);
ntimer.expires = jiffies + NMI_CHECK_TIMEOUT;
add_timer(&ntimer);
diff --git a/arch/m68k/amiga/amisound.c b/arch/m68k/amiga/amisound.c
index a23f48181fd6..442bdeee6bd7 100644
--- a/arch/m68k/amiga/amisound.c
+++ b/arch/m68k/amiga/amisound.c
@@ -65,7 +65,7 @@ void __init amiga_init_sound(void)
#endif
}
-static void nosound( unsigned long ignored );
+static void nosound(struct timer_list *unused);
static DEFINE_TIMER(sound_timer, nosound);
void amiga_mksound( unsigned int hz, unsigned int ticks )
@@ -107,7 +107,7 @@ void amiga_mksound( unsigned int hz, unsigned int ticks )
}
-static void nosound( unsigned long ignored )
+static void nosound(struct timer_list *unused)
{
/* turn off DMA for audio channel 2 */
custom.dmacon = DMAF_AUD2;
diff --git a/arch/m68k/mac/macboing.c b/arch/m68k/mac/macboing.c
index d17668649641..388780797f7d 100644
--- a/arch/m68k/mac/macboing.c
+++ b/arch/m68k/mac/macboing.c
@@ -48,9 +48,9 @@ static unsigned long mac_bell_phasepersample;
* some function protos
*/
static void mac_init_asc( void );
-static void mac_nosound( unsigned long );
+static void mac_nosound(struct timer_list *);
static void mac_quadra_start_bell( unsigned int, unsigned int, unsigned int );
-static void mac_quadra_ring_bell( unsigned long );
+static void mac_quadra_ring_bell(struct timer_list *);
static void mac_av_start_bell( unsigned int, unsigned int, unsigned int );
static void ( *mac_special_bell )( unsigned int, unsigned int, unsigned int );
@@ -216,7 +216,7 @@ void mac_mksound( unsigned int freq, unsigned int length )
/*
* regular ASC: stop whining ..
*/
-static void mac_nosound( unsigned long ignored )
+static void mac_nosound(struct timer_list *unused)
{
mac_asc_regs[ ASC_ENABLE ] = 0;
}
@@ -270,7 +270,7 @@ static void mac_quadra_start_bell( unsigned int freq, unsigned int length, unsig
* already load the wave table, or at least call this one...
* This piece keeps reloading the wave table until done.
*/
-static void mac_quadra_ring_bell( unsigned long ignored )
+static void mac_quadra_ring_bell(struct timer_list *unused)
{
int i, count = mac_asc_samplespersec / HZ;
unsigned long flags;
diff --git a/arch/microblaze/include/asm/mmu_context_mm.h b/arch/microblaze/include/asm/mmu_context_mm.h
index 99472d2ca340..97559fe0b953 100644
--- a/arch/microblaze/include/asm/mmu_context_mm.h
+++ b/arch/microblaze/include/asm/mmu_context_mm.h
@@ -13,6 +13,7 @@
#include <linux/atomic.h>
#include <linux/mm_types.h>
+#include <linux/sched.h>
#include <asm/bitops.h>
#include <asm/mmu.h>
diff --git a/arch/mips/boot/dts/brcm/Makefile b/arch/mips/boot/dts/brcm/Makefile
index 09ba7e894bad..d8787c9a499e 100644
--- a/arch/mips/boot/dts/brcm/Makefile
+++ b/arch/mips/boot/dts/brcm/Makefile
@@ -35,6 +35,3 @@ dtb-$(CONFIG_DT_NONE) += \
bcm97435svmb.dtb
obj-y += $(patsubst %.dtb, %.dtb.o, $(dtb-y))
-
-# Force kbuild to make empty built-in.o if necessary
-obj- += dummy.o
diff --git a/arch/mips/boot/dts/cavium-octeon/Makefile b/arch/mips/boot/dts/cavium-octeon/Makefile
index f5d01b31df50..24a8efcd7b03 100644
--- a/arch/mips/boot/dts/cavium-octeon/Makefile
+++ b/arch/mips/boot/dts/cavium-octeon/Makefile
@@ -2,6 +2,3 @@
dtb-$(CONFIG_CAVIUM_OCTEON_SOC) += octeon_3xxx.dtb octeon_68xx.dtb
obj-y += $(patsubst %.dtb, %.dtb.o, $(dtb-y))
-
-# Force kbuild to make empty built-in.o if necessary
-obj- += dummy.o
diff --git a/arch/mips/boot/dts/img/Makefile b/arch/mips/boot/dts/img/Makefile
index 3eb2597a4d6c..441a3c16efb0 100644
--- a/arch/mips/boot/dts/img/Makefile
+++ b/arch/mips/boot/dts/img/Makefile
@@ -3,6 +3,3 @@ dtb-$(CONFIG_FIT_IMAGE_FDT_BOSTON) += boston.dtb
dtb-$(CONFIG_MACH_PISTACHIO) += pistachio_marduk.dtb
obj-$(CONFIG_MACH_PISTACHIO) += pistachio_marduk.dtb.o
-
-# Force kbuild to make empty built-in.o if necessary
-obj- += dummy.o
diff --git a/arch/mips/boot/dts/ingenic/Makefile b/arch/mips/boot/dts/ingenic/Makefile
index 035769269cbc..6a31759839b4 100644
--- a/arch/mips/boot/dts/ingenic/Makefile
+++ b/arch/mips/boot/dts/ingenic/Makefile
@@ -3,6 +3,3 @@ dtb-$(CONFIG_JZ4740_QI_LB60) += qi_lb60.dtb
dtb-$(CONFIG_JZ4780_CI20) += ci20.dtb
obj-y += $(patsubst %.dtb, %.dtb.o, $(dtb-y))
-
-# Force kbuild to make empty built-in.o if necessary
-obj- += dummy.o
diff --git a/arch/mips/boot/dts/lantiq/Makefile b/arch/mips/boot/dts/lantiq/Makefile
index 00e2e540ed3f..51ab9c1dff42 100644
--- a/arch/mips/boot/dts/lantiq/Makefile
+++ b/arch/mips/boot/dts/lantiq/Makefile
@@ -2,6 +2,3 @@
dtb-$(CONFIG_DT_EASY50712) += easy50712.dtb
obj-y += $(patsubst %.dtb, %.dtb.o, $(dtb-y))
-
-# Force kbuild to make empty built-in.o if necessary
-obj- += dummy.o
diff --git a/arch/mips/boot/dts/mti/Makefile b/arch/mips/boot/dts/mti/Makefile
index 480af498a9dd..3508720cb6d9 100644
--- a/arch/mips/boot/dts/mti/Makefile
+++ b/arch/mips/boot/dts/mti/Makefile
@@ -3,6 +3,3 @@ dtb-$(CONFIG_MIPS_MALTA) += malta.dtb
dtb-$(CONFIG_LEGACY_BOARD_SEAD3) += sead3.dtb
obj-y += $(patsubst %.dtb, %.dtb.o, $(dtb-y))
-
-# Force kbuild to make empty built-in.o if necessary
-obj- += dummy.o
diff --git a/arch/mips/boot/dts/netlogic/Makefile b/arch/mips/boot/dts/netlogic/Makefile
index 2b99450d7433..d630b27950f0 100644
--- a/arch/mips/boot/dts/netlogic/Makefile
+++ b/arch/mips/boot/dts/netlogic/Makefile
@@ -6,6 +6,3 @@ dtb-$(CONFIG_DT_XLP_GVP) += xlp_gvp.dtb
dtb-$(CONFIG_DT_XLP_RVP) += xlp_rvp.dtb
obj-y += $(patsubst %.dtb, %.dtb.o, $(dtb-y))
-
-# Force kbuild to make empty built-in.o if necessary
-obj- += dummy.o
diff --git a/arch/mips/boot/dts/ni/Makefile b/arch/mips/boot/dts/ni/Makefile
index 6cd9c606f025..9e2c9faede47 100644
--- a/arch/mips/boot/dts/ni/Makefile
+++ b/arch/mips/boot/dts/ni/Makefile
@@ -1,4 +1 @@
dtb-$(CONFIG_FIT_IMAGE_FDT_NI169445) += 169445.dtb
-
-# Force kbuild to make empty built-in.o if necessary
-obj- += dummy.o
diff --git a/arch/mips/boot/dts/pic32/Makefile b/arch/mips/boot/dts/pic32/Makefile
index a139a0fbd7b7..ba9bcef8fde9 100644
--- a/arch/mips/boot/dts/pic32/Makefile
+++ b/arch/mips/boot/dts/pic32/Makefile
@@ -5,6 +5,3 @@ dtb-$(CONFIG_DTB_PIC32_NONE) += \
pic32mzda_sk.dtb
obj-y += $(patsubst %.dtb, %.dtb.o, $(dtb-y))
-
-# Force kbuild to make empty built-in.o if necessary
-obj- += dummy.o
diff --git a/arch/mips/boot/dts/qca/Makefile b/arch/mips/boot/dts/qca/Makefile
index 639adeac90af..4451cf45b0ad 100644
--- a/arch/mips/boot/dts/qca/Makefile
+++ b/arch/mips/boot/dts/qca/Makefile
@@ -5,6 +5,3 @@ dtb-$(CONFIG_ATH79) += ar9331_dpt_module.dtb
dtb-$(CONFIG_ATH79) += ar9331_dragino_ms14.dtb
dtb-$(CONFIG_ATH79) += ar9331_omega.dtb
dtb-$(CONFIG_ATH79) += ar9331_tl_mr3020.dtb
-
-# Force kbuild to make empty built-in.o if necessary
-obj- += dummy.o
diff --git a/arch/mips/boot/dts/ralink/Makefile b/arch/mips/boot/dts/ralink/Makefile
index 323c8bcfb602..94bee5b38b53 100644
--- a/arch/mips/boot/dts/ralink/Makefile
+++ b/arch/mips/boot/dts/ralink/Makefile
@@ -7,6 +7,3 @@ dtb-$(CONFIG_DTB_OMEGA2P) += omega2p.dtb
dtb-$(CONFIG_DTB_VOCORE2) += vocore2.dtb
obj-y += $(patsubst %.dtb, %.dtb.o, $(dtb-y))
-
-# Force kbuild to make empty built-in.o if necessary
-obj- += dummy.o
diff --git a/arch/mips/boot/dts/xilfpga/Makefile b/arch/mips/boot/dts/xilfpga/Makefile
index 616322405ade..9987e0e378c5 100644
--- a/arch/mips/boot/dts/xilfpga/Makefile
+++ b/arch/mips/boot/dts/xilfpga/Makefile
@@ -2,6 +2,3 @@
dtb-$(CONFIG_FIT_IMAGE_FDT_XILFPGA) += nexys4ddr.dtb
obj-y += $(patsubst %.dtb, %.dtb.o, $(dtb-y))
-
-# Force kbuild to make empty built-in.o if necessary
-obj- += dummy.o
diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
index 9e9e94415d08..1a508a74d48d 100644
--- a/arch/mips/include/asm/pgtable.h
+++ b/arch/mips/include/asm/pgtable.h
@@ -552,7 +552,7 @@ static inline pmd_t pmd_mkhuge(pmd_t pmd)
extern void set_pmd_at(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp, pmd_t pmd);
-#define __HAVE_ARCH_PMD_WRITE
+#define pmd_write pmd_write
static inline int pmd_write(pmd_t pmd)
{
return !!(pmd_val(pmd) & _PAGE_WRITE);
diff --git a/arch/mips/lasat/picvue_proc.c b/arch/mips/lasat/picvue_proc.c
index a8103f6972cd..5d89e1ec5fcc 100644
--- a/arch/mips/lasat/picvue_proc.c
+++ b/arch/mips/lasat/picvue_proc.c
@@ -156,7 +156,7 @@ static const struct file_operations pvc_scroll_proc_fops = {
.write = pvc_scroll_proc_write,
};
-void pvc_proc_timerfunc(unsigned long data)
+void pvc_proc_timerfunc(struct timer_list *unused)
{
if (scroll_dir < 0)
pvc_move(DISPLAY|RIGHT);
@@ -197,7 +197,7 @@ static int __init pvc_proc_init(void)
if (proc_entry == NULL)
goto error;
- setup_timer(&timer, pvc_proc_timerfunc, 0UL);
+ timer_setup(&timer, pvc_proc_timerfunc, 0);
return 0;
error:
diff --git a/arch/mips/mti-malta/malta-display.c b/arch/mips/mti-malta/malta-display.c
index 063de44675ce..ee0bd50f754b 100644
--- a/arch/mips/mti-malta/malta-display.c
+++ b/arch/mips/mti-malta/malta-display.c
@@ -36,10 +36,10 @@ void mips_display_message(const char *str)
}
}
-static void scroll_display_message(unsigned long unused);
+static void scroll_display_message(struct timer_list *unused);
static DEFINE_TIMER(mips_scroll_timer, scroll_display_message);
-static void scroll_display_message(unsigned long unused)
+static void scroll_display_message(struct timer_list *unused)
{
mips_display_message(&display_string[display_count++]);
if (display_count == max_display_count)
diff --git a/arch/parisc/kernel/pdc_cons.c b/arch/parisc/kernel/pdc_cons.c
index 27a2dd616a7d..c46bf29ae412 100644
--- a/arch/parisc/kernel/pdc_cons.c
+++ b/arch/parisc/kernel/pdc_cons.c
@@ -91,7 +91,7 @@ static int pdc_console_setup(struct console *co, char *options)
#define PDC_CONS_POLL_DELAY (30 * HZ / 1000)
-static void pdc_console_poll(unsigned long unused);
+static void pdc_console_poll(struct timer_list *unused);
static DEFINE_TIMER(pdc_console_timer, pdc_console_poll);
static struct tty_port tty_port;
@@ -135,7 +135,7 @@ static const struct tty_operations pdc_console_tty_ops = {
.chars_in_buffer = pdc_console_tty_chars_in_buffer,
};
-static void pdc_console_poll(unsigned long unused)
+static void pdc_console_poll(struct timer_list *unused)
{
int data, count = 0;
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index 9a677cd5997f..44697817ccc6 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -1005,7 +1005,6 @@ static inline int pmd_protnone(pmd_t pmd)
}
#endif /* CONFIG_NUMA_BALANCING */
-#define __HAVE_ARCH_PMD_WRITE
#define pmd_write(pmd) pte_write(pmd_pte(pmd))
#define __pmd_write(pmd) __pte_write(pmd_pte(pmd))
#define pmd_savedwrite(pmd) pte_savedwrite(pmd_pte(pmd))
diff --git a/arch/powerpc/include/asm/imc-pmu.h b/arch/powerpc/include/asm/imc-pmu.h
index 7f74c282710f..fad0e6ff460f 100644
--- a/arch/powerpc/include/asm/imc-pmu.h
+++ b/arch/powerpc/include/asm/imc-pmu.h
@@ -21,11 +21,6 @@
#include <asm/opal.h>
/*
- * For static allocation of some of the structures.
- */
-#define IMC_MAX_PMUS 32
-
-/*
* Compatibility macros for IMC devices
*/
#define IMC_DTB_COMPAT "ibm,opal-in-memory-counters"
@@ -125,4 +120,5 @@ enum {
extern int init_imc_pmu(struct device_node *parent,
struct imc_pmu *pmu_ptr, int pmu_id);
extern void thread_imc_disable(void);
+extern int get_max_nest_dev(void);
#endif /* __ASM_POWERPC_IMC_PMU_H */
diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c
index 602e0fde19b4..8bdc2f96c5d6 100644
--- a/arch/powerpc/kernel/dt_cpu_ftrs.c
+++ b/arch/powerpc/kernel/dt_cpu_ftrs.c
@@ -735,8 +735,8 @@ static __init void cpufeatures_cpu_quirks(void)
*/
if ((version & 0xffffff00) == 0x004e0100)
cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD1;
- else if ((version & 0xffffefff) == 0x004e0200)
- cur_cpu_spec->cpu_features &= ~CPU_FTR_POWER9_DD2_1;
+ else if ((version & 0xffffefff) == 0x004e0201)
+ cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD2_1;
}
static void __init cpufeatures_setup_finished(void)
diff --git a/arch/powerpc/kernel/tau_6xx.c b/arch/powerpc/kernel/tau_6xx.c
index e3c5f75d137c..8cdd852aedd1 100644
--- a/arch/powerpc/kernel/tau_6xx.c
+++ b/arch/powerpc/kernel/tau_6xx.c
@@ -188,7 +188,7 @@ static void tau_timeout(void * info)
local_irq_restore(flags);
}
-static void tau_timeout_smp(unsigned long unused)
+static void tau_timeout_smp(struct timer_list *unused)
{
/* schedule ourselves to be run again */
@@ -230,7 +230,7 @@ int __init TAU_init(void)
/* first, set up the window shrinking timer */
- setup_timer(&tau_timer, tau_timeout_smp, 0UL);
+ timer_setup(&tau_timer, tau_timeout_smp, 0);
tau_timer.expires = jiffies + shrink_timer;
add_timer(&tau_timer);
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 071b87ee682f..83b485810aea 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -599,9 +599,9 @@ static void arm_next_watchdog(struct kvm_vcpu *vcpu)
spin_unlock_irqrestore(&vcpu->arch.wdt_lock, flags);
}
-void kvmppc_watchdog_func(unsigned long data)
+void kvmppc_watchdog_func(struct timer_list *t)
{
- struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
+ struct kvm_vcpu *vcpu = from_timer(vcpu, t, arch.wdt_timer);
u32 tsr, new_tsr;
int final;
@@ -1412,8 +1412,7 @@ int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu)
{
/* setup watchdog timer once */
spin_lock_init(&vcpu->arch.wdt_lock);
- setup_timer(&vcpu->arch.wdt_timer, kvmppc_watchdog_func,
- (unsigned long)vcpu);
+ timer_setup(&vcpu->arch.wdt_timer, kvmppc_watchdog_func, 0);
/*
* Clear DBSR.MRR to avoid guest debug interrupt as
diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c
index c9de03e0c1f1..d469224c4ada 100644
--- a/arch/powerpc/lib/code-patching.c
+++ b/arch/powerpc/lib/code-patching.c
@@ -21,6 +21,7 @@
#include <asm/tlbflush.h>
#include <asm/page.h>
#include <asm/code-patching.h>
+#include <asm/setup.h>
static int __patch_instruction(unsigned int *addr, unsigned int instr)
{
@@ -146,11 +147,8 @@ int patch_instruction(unsigned int *addr, unsigned int instr)
* During early early boot patch_instruction is called
* when text_poke_area is not ready, but we still need
* to allow patching. We just do the plain old patching
- * We use slab_is_available and per cpu read * via this_cpu_read
- * of text_poke_area. Per-CPU areas might not be up early
- * this can create problems with just using this_cpu_read()
*/
- if (!slab_is_available() || !this_cpu_read(text_poke_area))
+ if (!this_cpu_read(*PTRRELOC(&text_poke_area)))
return __patch_instruction(addr, instr);
local_irq_save(flags);
diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
index 564fff06f5c1..23ec2c5e3b78 100644
--- a/arch/powerpc/mm/slice.c
+++ b/arch/powerpc/mm/slice.c
@@ -122,7 +122,8 @@ static int slice_high_has_vma(struct mm_struct *mm, unsigned long slice)
return !slice_area_is_free(mm, start, end - start);
}
-static void slice_mask_for_free(struct mm_struct *mm, struct slice_mask *ret)
+static void slice_mask_for_free(struct mm_struct *mm, struct slice_mask *ret,
+ unsigned long high_limit)
{
unsigned long i;
@@ -133,15 +134,16 @@ static void slice_mask_for_free(struct mm_struct *mm, struct slice_mask *ret)
if (!slice_low_has_vma(mm, i))
ret->low_slices |= 1u << i;
- if (mm->context.slb_addr_limit <= SLICE_LOW_TOP)
+ if (high_limit <= SLICE_LOW_TOP)
return;
- for (i = 0; i < GET_HIGH_SLICE_INDEX(mm->context.slb_addr_limit); i++)
+ for (i = 0; i < GET_HIGH_SLICE_INDEX(high_limit); i++)
if (!slice_high_has_vma(mm, i))
__set_bit(i, ret->high_slices);
}
-static void slice_mask_for_size(struct mm_struct *mm, int psize, struct slice_mask *ret)
+static void slice_mask_for_size(struct mm_struct *mm, int psize, struct slice_mask *ret,
+ unsigned long high_limit)
{
unsigned char *hpsizes;
int index, mask_index;
@@ -156,8 +158,11 @@ static void slice_mask_for_size(struct mm_struct *mm, int psize, struct slice_ma
if (((lpsizes >> (i * 4)) & 0xf) == psize)
ret->low_slices |= 1u << i;
+ if (high_limit <= SLICE_LOW_TOP)
+ return;
+
hpsizes = mm->context.high_slices_psize;
- for (i = 0; i < GET_HIGH_SLICE_INDEX(mm->context.slb_addr_limit); i++) {
+ for (i = 0; i < GET_HIGH_SLICE_INDEX(high_limit); i++) {
mask_index = i & 0x1;
index = i >> 1;
if (((hpsizes[index] >> (mask_index * 4)) & 0xf) == psize)
@@ -169,6 +174,10 @@ static int slice_check_fit(struct mm_struct *mm,
struct slice_mask mask, struct slice_mask available)
{
DECLARE_BITMAP(result, SLICE_NUM_HIGH);
+ /*
+ * Make sure we just do bit compare only to the max
+ * addr limit and not the full bit map size.
+ */
unsigned long slice_count = GET_HIGH_SLICE_INDEX(mm->context.slb_addr_limit);
bitmap_and(result, mask.high_slices,
@@ -472,7 +481,7 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
/* First make up a "good" mask of slices that have the right size
* already
*/
- slice_mask_for_size(mm, psize, &good_mask);
+ slice_mask_for_size(mm, psize, &good_mask, high_limit);
slice_print_mask(" good_mask", good_mask);
/*
@@ -497,7 +506,7 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
#ifdef CONFIG_PPC_64K_PAGES
/* If we support combo pages, we can allow 64k pages in 4k slices */
if (psize == MMU_PAGE_64K) {
- slice_mask_for_size(mm, MMU_PAGE_4K, &compat_mask);
+ slice_mask_for_size(mm, MMU_PAGE_4K, &compat_mask, high_limit);
if (fixed)
slice_or_mask(&good_mask, &compat_mask);
}
@@ -530,11 +539,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
return newaddr;
}
}
-
- /* We don't fit in the good mask, check what other slices are
+ /*
+ * We don't fit in the good mask, check what other slices are
* empty and thus can be converted
*/
- slice_mask_for_free(mm, &potential_mask);
+ slice_mask_for_free(mm, &potential_mask, high_limit);
slice_or_mask(&potential_mask, &good_mask);
slice_print_mask(" potential", potential_mask);
@@ -744,17 +753,18 @@ int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
{
struct slice_mask mask, available;
unsigned int psize = mm->context.user_psize;
+ unsigned long high_limit = mm->context.slb_addr_limit;
if (radix_enabled())
return 0;
slice_range_to_mask(addr, len, &mask);
- slice_mask_for_size(mm, psize, &available);
+ slice_mask_for_size(mm, psize, &available, high_limit);
#ifdef CONFIG_PPC_64K_PAGES
/* We need to account for 4k slices too */
if (psize == MMU_PAGE_64K) {
struct slice_mask compat_mask;
- slice_mask_for_size(mm, MMU_PAGE_4K, &compat_mask);
+ slice_mask_for_size(mm, MMU_PAGE_4K, &compat_mask, high_limit);
slice_or_mask(&available, &compat_mask);
}
#endif
diff --git a/arch/powerpc/oprofile/op_model_cell.c b/arch/powerpc/oprofile/op_model_cell.c
index 264b6ab11978..b90a21bc2f3f 100644
--- a/arch/powerpc/oprofile/op_model_cell.c
+++ b/arch/powerpc/oprofile/op_model_cell.c
@@ -451,7 +451,7 @@ static inline void enable_ctr(u32 cpu, u32 ctr, u32 *pm07_cntrl)
* This routine will alternate loading the virtual counters for
* virtual CPUs
*/
-static void cell_virtual_cntr(unsigned long data)
+static void cell_virtual_cntr(struct timer_list *unused)
{
int i, prev_hdw_thread, next_hdw_thread;
u32 cpu;
@@ -555,7 +555,7 @@ static void cell_virtual_cntr(unsigned long data)
static void start_virt_cntrs(void)
{
- setup_timer(&timer_virt_cntr, cell_virtual_cntr, 0UL);
+ timer_setup(&timer_virt_cntr, cell_virtual_cntr, 0);
timer_virt_cntr.expires = jiffies + HZ / 10;
add_timer(&timer_virt_cntr);
}
@@ -587,7 +587,7 @@ static int cell_reg_setup_spu_cycles(struct op_counter_config *ctr,
* periodically based on kernel timer to switch which SPU is
* being monitored in a round robbin fashion.
*/
-static void spu_evnt_swap(unsigned long data)
+static void spu_evnt_swap(struct timer_list *unused)
{
int node;
int cur_phys_spu, nxt_phys_spu, cur_spu_evnt_phys_spu_indx;
@@ -677,7 +677,7 @@ static void spu_evnt_swap(unsigned long data)
static void start_spu_event_swap(void)
{
- setup_timer(&timer_spu_event_swap, spu_evnt_swap, 0UL);
+ timer_setup(&timer_spu_event_swap, spu_evnt_swap, 0);
timer_spu_event_swap.expires = jiffies + HZ / 25;
add_timer(&timer_spu_event_swap);
}
diff --git a/arch/powerpc/perf/imc-pmu.c b/arch/powerpc/perf/imc-pmu.c
index 36344117c680..0ead3cd73caa 100644
--- a/arch/powerpc/perf/imc-pmu.c
+++ b/arch/powerpc/perf/imc-pmu.c
@@ -26,7 +26,7 @@
*/
static DEFINE_MUTEX(nest_init_lock);
static DEFINE_PER_CPU(struct imc_pmu_ref *, local_nest_imc_refc);
-static struct imc_pmu *per_nest_pmu_arr[IMC_MAX_PMUS];
+static struct imc_pmu **per_nest_pmu_arr;
static cpumask_t nest_imc_cpumask;
struct imc_pmu_ref *nest_imc_refc;
static int nest_pmus;
@@ -286,13 +286,14 @@ static struct imc_pmu_ref *get_nest_pmu_ref(int cpu)
static void nest_change_cpu_context(int old_cpu, int new_cpu)
{
struct imc_pmu **pn = per_nest_pmu_arr;
- int i;
if (old_cpu < 0 || new_cpu < 0)
return;
- for (i = 0; *pn && i < IMC_MAX_PMUS; i++, pn++)
+ while (*pn) {
perf_pmu_migrate_context(&(*pn)->pmu, old_cpu, new_cpu);
+ pn++;
+ }
}
static int ppc_nest_imc_cpu_offline(unsigned int cpu)
@@ -467,7 +468,7 @@ static int nest_imc_event_init(struct perf_event *event)
* Nest HW counter memory resides in a per-chip reserve-memory (HOMER).
* Get the base memory addresss for this cpu.
*/
- chip_id = topology_physical_package_id(event->cpu);
+ chip_id = cpu_to_chip_id(event->cpu);
pcni = pmu->mem_info;
do {
if (pcni->id == chip_id) {
@@ -524,19 +525,19 @@ static int nest_imc_event_init(struct perf_event *event)
*/
static int core_imc_mem_init(int cpu, int size)
{
- int phys_id, rc = 0, core_id = (cpu / threads_per_core);
+ int nid, rc = 0, core_id = (cpu / threads_per_core);
struct imc_mem_info *mem_info;
/*
* alloc_pages_node() will allocate memory for core in the
* local node only.
*/
- phys_id = topology_physical_package_id(cpu);
+ nid = cpu_to_node(cpu);
mem_info = &core_imc_pmu->mem_info[core_id];
mem_info->id = core_id;
/* We need only vbase for core counters */
- mem_info->vbase = page_address(alloc_pages_node(phys_id,
+ mem_info->vbase = page_address(alloc_pages_node(nid,
GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE |
__GFP_NOWARN, get_order(size)));
if (!mem_info->vbase)
@@ -797,14 +798,14 @@ static int core_imc_event_init(struct perf_event *event)
static int thread_imc_mem_alloc(int cpu_id, int size)
{
u64 ldbar_value, *local_mem = per_cpu(thread_imc_mem, cpu_id);
- int phys_id = topology_physical_package_id(cpu_id);
+ int nid = cpu_to_node(cpu_id);
if (!local_mem) {
/*
* This case could happen only once at start, since we dont
* free the memory in cpu offline path.
*/
- local_mem = page_address(alloc_pages_node(phys_id,
+ local_mem = page_address(alloc_pages_node(nid,
GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE |
__GFP_NOWARN, get_order(size)));
if (!local_mem)
@@ -1194,6 +1195,7 @@ static void imc_common_cpuhp_mem_free(struct imc_pmu *pmu_ptr)
kfree(pmu_ptr->attr_groups[IMC_EVENT_ATTR]->attrs);
kfree(pmu_ptr->attr_groups[IMC_EVENT_ATTR]);
kfree(pmu_ptr);
+ kfree(per_nest_pmu_arr);
return;
}
@@ -1218,6 +1220,13 @@ static int imc_mem_init(struct imc_pmu *pmu_ptr, struct device_node *parent,
return -ENOMEM;
/* Needed for hotplug/migration */
+ if (!per_nest_pmu_arr) {
+ per_nest_pmu_arr = kcalloc(get_max_nest_dev() + 1,
+ sizeof(struct imc_pmu *),
+ GFP_KERNEL);
+ if (!per_nest_pmu_arr)
+ return -ENOMEM;
+ }
per_nest_pmu_arr[pmu_index] = pmu_ptr;
break;
case IMC_DOMAIN_CORE:
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
index e47761cdcb98..9033c8194eda 100644
--- a/arch/powerpc/platforms/cell/spufs/sched.c
+++ b/arch/powerpc/platforms/cell/spufs/sched.c
@@ -992,13 +992,13 @@ static void spu_calc_load(void)
CALC_LOAD(spu_avenrun[2], EXP_15, active_tasks);
}
-static void spusched_wake(unsigned long data)
+static void spusched_wake(struct timer_list *unused)
{
mod_timer(&spusched_timer, jiffies + SPUSCHED_TICK);
wake_up_process(spusched_task);
}
-static void spuloadavg_wake(unsigned long data)
+static void spuloadavg_wake(struct timer_list *unused)
{
mod_timer(&spuloadavg_timer, jiffies + LOAD_FREQ);
spu_calc_load();
@@ -1124,8 +1124,8 @@ int __init spu_sched_init(void)
}
spin_lock_init(&spu_prio->runq_lock);
- setup_timer(&spusched_timer, spusched_wake, 0);
- setup_timer(&spuloadavg_timer, spuloadavg_wake, 0);
+ timer_setup(&spusched_timer, spusched_wake, 0);
+ timer_setup(&spuloadavg_timer, spuloadavg_wake, 0);
spusched_task = kthread_run(spusched_thread, NULL, "spusched");
if (IS_ERR(spusched_task)) {
diff --git a/arch/powerpc/platforms/powermac/low_i2c.c b/arch/powerpc/platforms/powermac/low_i2c.c
index 39a1d4225e0f..3408f315ef48 100644
--- a/arch/powerpc/platforms/powermac/low_i2c.c
+++ b/arch/powerpc/platforms/powermac/low_i2c.c
@@ -361,9 +361,9 @@ static irqreturn_t kw_i2c_irq(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static void kw_i2c_timeout(unsigned long data)
+static void kw_i2c_timeout(struct timer_list *t)
{
- struct pmac_i2c_host_kw *host = (struct pmac_i2c_host_kw *)data;
+ struct pmac_i2c_host_kw *host = from_timer(host, t, timeout_timer);
unsigned long flags;
spin_lock_irqsave(&host->lock, flags);
@@ -513,7 +513,7 @@ static struct pmac_i2c_host_kw *__init kw_i2c_host_init(struct device_node *np)
mutex_init(&host->mutex);
init_completion(&host->complete);
spin_lock_init(&host->lock);
- setup_timer(&host->timeout_timer, kw_i2c_timeout, (unsigned long)host);
+ timer_setup(&host->timeout_timer, kw_i2c_timeout, 0);
psteps = of_get_property(np, "AAPL,address-step", NULL);
steps = psteps ? (*psteps) : 0x10;
diff --git a/arch/powerpc/platforms/powernv/opal-imc.c b/arch/powerpc/platforms/powernv/opal-imc.c
index 21f6531fae20..465ea105b771 100644
--- a/arch/powerpc/platforms/powernv/opal-imc.c
+++ b/arch/powerpc/platforms/powernv/opal-imc.c
@@ -153,6 +153,22 @@ static void disable_core_pmu_counters(void)
put_online_cpus();
}
+int get_max_nest_dev(void)
+{
+ struct device_node *node;
+ u32 pmu_units = 0, type;
+
+ for_each_compatible_node(node, NULL, IMC_DTB_UNIT_COMPAT) {
+ if (of_property_read_u32(node, "type", &type))
+ continue;
+
+ if (type == IMC_TYPE_CHIP)
+ pmu_units++;
+ }
+
+ return pmu_units;
+}
+
static int opal_imc_counters_probe(struct platform_device *pdev)
{
struct device_node *imc_dev = pdev->dev.of_node;
@@ -191,8 +207,10 @@ static int opal_imc_counters_probe(struct platform_device *pdev)
break;
}
- if (!imc_pmu_create(imc_dev, pmu_count, domain))
- pmu_count++;
+ if (!imc_pmu_create(imc_dev, pmu_count, domain)) {
+ if (domain == IMC_DOMAIN_NEST)
+ pmu_count++;
+ }
}
return 0;
diff --git a/arch/powerpc/platforms/powernv/vas.c b/arch/powerpc/platforms/powernv/vas.c
index c488621dbec3..aebbe95c9230 100644
--- a/arch/powerpc/platforms/powernv/vas.c
+++ b/arch/powerpc/platforms/powernv/vas.c
@@ -135,6 +135,7 @@ int chip_to_vas_id(int chipid)
}
return -1;
}
+EXPORT_SYMBOL(chip_to_vas_id);
static int vas_probe(struct platform_device *pdev)
{
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index d7fe9838084d..57d7bc92e0b8 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -709,7 +709,7 @@ static inline unsigned long pmd_pfn(pmd_t pmd)
return (pmd_val(pmd) & origin_mask) >> PAGE_SHIFT;
}
-#define __HAVE_ARCH_PMD_WRITE
+#define pmd_write pmd_write
static inline int pmd_write(pmd_t pmd)
{
return (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) != 0;
@@ -1264,6 +1264,12 @@ static inline pud_t pud_mkwrite(pud_t pud)
return pud;
}
+#define pud_write pud_write
+static inline int pud_write(pud_t pud)
+{
+ return (pud_val(pud) & _REGION3_ENTRY_WRITE) != 0;
+}
+
static inline pud_t pud_mkclean(pud_t pud)
{
if (pud_large(pud)) {
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index 44c012c866d2..cf561160ea88 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -524,7 +524,7 @@ static void __init stp_reset(void)
}
}
-static void stp_timeout(unsigned long dummy)
+static void stp_timeout(struct timer_list *unused)
{
queue_work(time_sync_wq, &stp_work);
}
@@ -533,7 +533,7 @@ static int __init stp_init(void)
{
if (!test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags))
return 0;
- setup_timer(&stp_timer, stp_timeout, 0UL);
+ timer_setup(&stp_timer, stp_timeout, 0);
time_init_wq();
if (!stp_online)
return 0;
diff --git a/arch/s390/mm/cmm.c b/arch/s390/mm/cmm.c
index 3596ef91cf68..6cf024eb2085 100644
--- a/arch/s390/mm/cmm.c
+++ b/arch/s390/mm/cmm.c
@@ -57,10 +57,10 @@ static DEFINE_SPINLOCK(cmm_lock);
static struct task_struct *cmm_thread_ptr;
static DECLARE_WAIT_QUEUE_HEAD(cmm_thread_wait);
-static DEFINE_TIMER(cmm_timer, NULL);
-static void cmm_timer_fn(unsigned long);
+static void cmm_timer_fn(struct timer_list *);
static void cmm_set_timer(void);
+static DEFINE_TIMER(cmm_timer, cmm_timer_fn);
static long cmm_alloc_pages(long nr, long *counter,
struct cmm_page_array **list)
@@ -195,13 +195,11 @@ static void cmm_set_timer(void)
if (mod_timer(&cmm_timer, jiffies + cmm_timeout_seconds*HZ))
return;
}
- cmm_timer.function = cmm_timer_fn;
- cmm_timer.data = 0;
cmm_timer.expires = jiffies + cmm_timeout_seconds*HZ;
add_timer(&cmm_timer);
}
-static void cmm_timer_fn(unsigned long ignored)
+static void cmm_timer_fn(struct timer_list *unused)
{
long nr;
diff --git a/arch/sh/drivers/heartbeat.c b/arch/sh/drivers/heartbeat.c
index c6d96049a0bb..e8af2ff29bc3 100644
--- a/arch/sh/drivers/heartbeat.c
+++ b/arch/sh/drivers/heartbeat.c
@@ -59,9 +59,9 @@ static inline void heartbeat_toggle_bit(struct heartbeat_data *hd,
}
}
-static void heartbeat_timer(unsigned long data)
+static void heartbeat_timer(struct timer_list *t)
{
- struct heartbeat_data *hd = (struct heartbeat_data *)data;
+ struct heartbeat_data *hd = from_timer(hd, t, timer);
static unsigned bit = 0, up = 1;
heartbeat_toggle_bit(hd, bit, hd->flags & HEARTBEAT_INVERTED);
@@ -133,7 +133,7 @@ static int heartbeat_drv_probe(struct platform_device *pdev)
}
}
- setup_timer(&hd->timer, heartbeat_timer, (unsigned long)hd);
+ timer_setup(&hd->timer, heartbeat_timer, 0);
platform_set_drvdata(pdev, hd);
return mod_timer(&hd->timer, jiffies + 1);
diff --git a/arch/sh/drivers/pci/common.c b/arch/sh/drivers/pci/common.c
index cae707f3472d..fe163ecd0719 100644
--- a/arch/sh/drivers/pci/common.c
+++ b/arch/sh/drivers/pci/common.c
@@ -85,18 +85,18 @@ int __init pci_is_66mhz_capable(struct pci_channel *hose,
return cap66 > 0;
}
-static void pcibios_enable_err(unsigned long __data)
+static void pcibios_enable_err(struct timer_list *t)
{
- struct pci_channel *hose = (struct pci_channel *)__data;
+ struct pci_channel *hose = from_timer(hose, t, err_timer);
del_timer(&hose->err_timer);
printk(KERN_DEBUG "PCI: re-enabling error IRQ.\n");
enable_irq(hose->err_irq);
}
-static void pcibios_enable_serr(unsigned long __data)
+static void pcibios_enable_serr(struct timer_list *t)
{
- struct pci_channel *hose = (struct pci_channel *)__data;
+ struct pci_channel *hose = from_timer(hose, t, serr_timer);
del_timer(&hose->serr_timer);
printk(KERN_DEBUG "PCI: re-enabling system error IRQ.\n");
@@ -106,15 +106,11 @@ static void pcibios_enable_serr(unsigned long __data)
void pcibios_enable_timers(struct pci_channel *hose)
{
if (hose->err_irq) {
- init_timer(&hose->err_timer);
- hose->err_timer.data = (unsigned long)hose;
- hose->err_timer.function = pcibios_enable_err;
+ timer_setup(&hose->err_timer, pcibios_enable_err, 0);
}
if (hose->serr_irq) {
- init_timer(&hose->serr_timer);
- hose->serr_timer.data = (unsigned long)hose;
- hose->serr_timer.function = pcibios_enable_serr;
+ timer_setup(&hose->serr_timer, pcibios_enable_serr, 0);
}
}
diff --git a/arch/sh/drivers/push-switch.c b/arch/sh/drivers/push-switch.c
index 5bfb341cc5c4..a17181160233 100644
--- a/arch/sh/drivers/push-switch.c
+++ b/arch/sh/drivers/push-switch.c
@@ -26,9 +26,9 @@ static ssize_t switch_show(struct device *dev,
}
static DEVICE_ATTR(switch, S_IRUGO, switch_show, NULL);
-static void switch_timer(unsigned long data)
+static void switch_timer(struct timer_list *t)
{
- struct push_switch *psw = (struct push_switch *)data;
+ struct push_switch *psw = from_timer(psw, t, debounce);
schedule_work(&psw->work);
}
@@ -78,10 +78,7 @@ static int switch_drv_probe(struct platform_device *pdev)
}
INIT_WORK(&psw->work, switch_work_handler);
- init_timer(&psw->debounce);
-
- psw->debounce.function = switch_timer;
- psw->debounce.data = (unsigned long)psw;
+ timer_setup(&psw->debounce, switch_timer, 0);
/* Workqueue API brain-damage */
psw->pdev = pdev;
diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
index 5a9e96be1665..9937c5ff94a9 100644
--- a/arch/sparc/include/asm/pgtable_64.h
+++ b/arch/sparc/include/asm/pgtable_64.h
@@ -715,7 +715,7 @@ static inline unsigned long pmd_pfn(pmd_t pmd)
return pte_pfn(pte);
}
-#define __HAVE_ARCH_PMD_WRITE
+#define pmd_write pmd_write
static inline unsigned long pmd_write(pmd_t pmd)
{
pte_t pte = __pte(pmd_val(pmd));
diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
index 0f0f76b4f6cd..063556fe2cb1 100644
--- a/arch/sparc/lib/Makefile
+++ b/arch/sparc/lib/Makefile
@@ -19,7 +19,7 @@ lib-$(CONFIG_SPARC32) += muldi3.o bitext.o cmpdi2.o
lib-$(CONFIG_SPARC64) += multi3.o
lib-$(CONFIG_SPARC64) += fls.o
lib-$(CONFIG_SPARC64) += fls64.o
-obj-$(CONFIG_SPARC64) += NG4fls.o
+lib-$(CONFIG_SPARC64) += NG4fls.o
lib-$(CONFIG_SPARC64) += copy_page.o clear_page.o bzero.o
lib-$(CONFIG_SPARC64) += csum_copy.o csum_copy_from_user.o csum_copy_to_user.o
diff --git a/arch/sparc/mm/gup.c b/arch/sparc/mm/gup.c
index 5335ba3c850e..33c0f8bb0f33 100644
--- a/arch/sparc/mm/gup.c
+++ b/arch/sparc/mm/gup.c
@@ -75,7 +75,7 @@ static int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
if (!(pmd_val(pmd) & _PAGE_VALID))
return 0;
- if (write && !pmd_write(pmd))
+ if (!pmd_access_permitted(pmd, write))
return 0;
refs = 0;
@@ -114,7 +114,7 @@ static int gup_huge_pud(pud_t *pudp, pud_t pud, unsigned long addr,
if (!(pud_val(pud) & _PAGE_VALID))
return 0;
- if (write && !pud_write(pud))
+ if (!pud_access_permitted(pud, write))
return 0;
refs = 0;
diff --git a/arch/tile/include/asm/pgtable.h b/arch/tile/include/asm/pgtable.h
index 2a26cc4fefc2..adfa21b18488 100644
--- a/arch/tile/include/asm/pgtable.h
+++ b/arch/tile/include/asm/pgtable.h
@@ -475,7 +475,6 @@ static inline void pmd_clear(pmd_t *pmdp)
#define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd)))
#define pmd_huge_page(pmd) pte_huge(pmd_pte(pmd))
#define pmd_mkhuge(pmd) pte_pmd(pte_mkhuge(pmd_pte(pmd)))
-#define __HAVE_ARCH_PMD_WRITE
#define pfn_pmd(pfn, pgprot) pte_pmd(pfn_pte((pfn), (pgprot)))
#define pmd_pfn(pmd) pte_pfn(pmd_pte(pmd))
diff --git a/arch/um/Kconfig.common b/arch/um/Kconfig.common
index d9280482a2f8..c68add8df3ae 100644
--- a/arch/um/Kconfig.common
+++ b/arch/um/Kconfig.common
@@ -10,7 +10,6 @@ config UML
select HAVE_DEBUG_KMEMLEAK
select GENERIC_IRQ_SHOW
select GENERIC_CPU_DEVICES
- select GENERIC_IO
select GENERIC_CLOCKEVENTS
select HAVE_GCC_PLUGINS
select TTY # Needed for line.c
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index df3276d6bfe3..8eed3f94bfc7 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -1804,14 +1804,20 @@ config X86_SMAP
If unsure, say Y.
config X86_INTEL_UMIP
- def_bool n
+ def_bool y
depends on CPU_SUP_INTEL
prompt "Intel User Mode Instruction Prevention" if EXPERT
---help---
The User Mode Instruction Prevention (UMIP) is a security
feature in newer Intel processors. If enabled, a general
- protection fault is issued if the instructions SGDT, SLDT,
- SIDT, SMSW and STR are executed in user mode.
+ protection fault is issued if the SGDT, SLDT, SIDT, SMSW
+ or STR instructions are executed in user mode. These instructions
+ unnecessarily expose information about the hardware state.
+
+ The vast majority of applications do not use these instructions.
+ For the very few that do, software emulation is provided in
+ specific cases in protected and virtual-8086 modes. Emulated
+ results are dummy.
config X86_INTEL_MPX
prompt "Intel MPX (Memory Protection Extensions)"
diff --git a/arch/x86/boot/compressed/kaslr.c b/arch/x86/boot/compressed/kaslr.c
index a63fbc25ce84..8199a6187251 100644
--- a/arch/x86/boot/compressed/kaslr.c
+++ b/arch/x86/boot/compressed/kaslr.c
@@ -171,7 +171,6 @@ parse_memmap(char *p, unsigned long long *start, unsigned long long *size)
static void mem_avoid_memmap(char *str)
{
static int i;
- int rc;
if (i >= MAX_MEMMAP_REGIONS)
return;
@@ -219,7 +218,7 @@ static int handle_mem_memmap(void)
return 0;
tmp_cmdline = malloc(len + 1);
- if (!tmp_cmdline )
+ if (!tmp_cmdline)
error("Failed to allocate space for tmp_cmdline");
memcpy(tmp_cmdline, args, len);
@@ -363,7 +362,7 @@ static void mem_avoid_init(unsigned long input, unsigned long input_size,
cmd_line |= boot_params->hdr.cmd_line_ptr;
/* Calculate size of cmd_line. */
ptr = (char *)(unsigned long)cmd_line;
- for (cmd_line_size = 0; ptr[cmd_line_size++]; )
+ for (cmd_line_size = 0; ptr[cmd_line_size++];)
;
mem_avoid[MEM_AVOID_CMDLINE].start = cmd_line;
mem_avoid[MEM_AVOID_CMDLINE].size = cmd_line_size;
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index a2b30ec69497..f81d50d7ceac 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -51,15 +51,19 @@ ENTRY(native_usergs_sysret64)
END(native_usergs_sysret64)
#endif /* CONFIG_PARAVIRT */
-.macro TRACE_IRQS_IRETQ
+.macro TRACE_IRQS_FLAGS flags:req
#ifdef CONFIG_TRACE_IRQFLAGS
- bt $9, EFLAGS(%rsp) /* interrupts off? */
+ bt $9, \flags /* interrupts off? */
jnc 1f
TRACE_IRQS_ON
1:
#endif
.endm
+.macro TRACE_IRQS_IRETQ
+ TRACE_IRQS_FLAGS EFLAGS(%rsp)
+.endm
+
/*
* When dynamic function tracer is enabled it will add a breakpoint
* to all locations that it is about to modify, sync CPUs, update
@@ -148,8 +152,6 @@ ENTRY(entry_SYSCALL_64)
movq %rsp, PER_CPU_VAR(rsp_scratch)
movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
- TRACE_IRQS_OFF
-
/* Construct struct pt_regs on stack */
pushq $__USER_DS /* pt_regs->ss */
pushq PER_CPU_VAR(rsp_scratch) /* pt_regs->sp */
@@ -170,6 +172,8 @@ GLOBAL(entry_SYSCALL_64_after_hwframe)
sub $(6*8), %rsp /* pt_regs->bp, bx, r12-15 not saved */
UNWIND_HINT_REGS extra=0
+ TRACE_IRQS_OFF
+
/*
* If we need to do entry work or if we guess we'll need to do
* exit work, go straight to the slow path.
@@ -943,11 +947,13 @@ ENTRY(native_load_gs_index)
FRAME_BEGIN
pushfq
DISABLE_INTERRUPTS(CLBR_ANY & ~CLBR_RDI)
+ TRACE_IRQS_OFF
SWAPGS
.Lgs_change:
movl %edi, %gs
2: ALTERNATIVE "", "mfence", X86_BUG_SWAPGS_FENCE
SWAPGS
+ TRACE_IRQS_FLAGS (%rsp)
popfq
FRAME_END
ret
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index 43445da30cea..09c26a4f139c 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -3734,6 +3734,19 @@ EVENT_ATTR_STR(cycles-t, cycles_t, "event=0x3c,in_tx=1");
EVENT_ATTR_STR(cycles-ct, cycles_ct, "event=0x3c,in_tx=1,in_tx_cp=1");
static struct attribute *hsw_events_attrs[] = {
+ EVENT_PTR(mem_ld_hsw),
+ EVENT_PTR(mem_st_hsw),
+ EVENT_PTR(td_slots_issued),
+ EVENT_PTR(td_slots_retired),
+ EVENT_PTR(td_fetch_bubbles),
+ EVENT_PTR(td_total_slots),
+ EVENT_PTR(td_total_slots_scale),
+ EVENT_PTR(td_recovery_bubbles),
+ EVENT_PTR(td_recovery_bubbles_scale),
+ NULL
+};
+
+static struct attribute *hsw_tsx_events_attrs[] = {
EVENT_PTR(tx_start),
EVENT_PTR(tx_commit),
EVENT_PTR(tx_abort),
@@ -3746,18 +3759,16 @@ static struct attribute *hsw_events_attrs[] = {
EVENT_PTR(el_conflict),
EVENT_PTR(cycles_t),
EVENT_PTR(cycles_ct),
- EVENT_PTR(mem_ld_hsw),
- EVENT_PTR(mem_st_hsw),
- EVENT_PTR(td_slots_issued),
- EVENT_PTR(td_slots_retired),
- EVENT_PTR(td_fetch_bubbles),
- EVENT_PTR(td_total_slots),
- EVENT_PTR(td_total_slots_scale),
- EVENT_PTR(td_recovery_bubbles),
- EVENT_PTR(td_recovery_bubbles_scale),
NULL
};
+static __init struct attribute **get_hsw_events_attrs(void)
+{
+ return boot_cpu_has(X86_FEATURE_RTM) ?
+ merge_attr(hsw_events_attrs, hsw_tsx_events_attrs) :
+ hsw_events_attrs;
+}
+
static ssize_t freeze_on_smi_show(struct device *cdev,
struct device_attribute *attr,
char *buf)
@@ -4186,7 +4197,7 @@ __init int intel_pmu_init(void)
x86_pmu.hw_config = hsw_hw_config;
x86_pmu.get_event_constraints = hsw_get_event_constraints;
- x86_pmu.cpu_events = hsw_events_attrs;
+ x86_pmu.cpu_events = get_hsw_events_attrs();
x86_pmu.lbr_double_abort = true;
extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
hsw_format_attr : nhm_format_attr;
@@ -4225,7 +4236,7 @@ __init int intel_pmu_init(void)
x86_pmu.hw_config = hsw_hw_config;
x86_pmu.get_event_constraints = hsw_get_event_constraints;
- x86_pmu.cpu_events = hsw_events_attrs;
+ x86_pmu.cpu_events = get_hsw_events_attrs();
x86_pmu.limit_period = bdw_limit_period;
extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
hsw_format_attr : nhm_format_attr;
@@ -4283,7 +4294,7 @@ __init int intel_pmu_init(void)
extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
hsw_format_attr : nhm_format_attr;
extra_attr = merge_attr(extra_attr, skl_format_attr);
- x86_pmu.cpu_events = hsw_events_attrs;
+ x86_pmu.cpu_events = get_hsw_events_attrs();
intel_pmu_pebs_data_source_skl(
boot_cpu_data.x86_model == INTEL_FAM6_SKYLAKE_X);
pr_cont("Skylake events, ");
diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c
index d45e06346f14..7874c980d569 100644
--- a/arch/x86/events/intel/uncore.c
+++ b/arch/x86/events/intel/uncore.c
@@ -975,10 +975,10 @@ static void uncore_pci_remove(struct pci_dev *pdev)
int i, phys_id, pkg;
phys_id = uncore_pcibus_to_physid(pdev->bus);
- pkg = topology_phys_to_logical_pkg(phys_id);
box = pci_get_drvdata(pdev);
if (!box) {
+ pkg = topology_phys_to_logical_pkg(phys_id);
for (i = 0; i < UNCORE_EXTRA_PCI_DEV_MAX; i++) {
if (uncore_extra_pci_dev[pkg].dev[i] == pdev) {
uncore_extra_pci_dev[pkg].dev[i] = NULL;
@@ -994,7 +994,7 @@ static void uncore_pci_remove(struct pci_dev *pdev)
return;
pci_set_drvdata(pdev, NULL);
- pmu->boxes[pkg] = NULL;
+ pmu->boxes[box->pkgid] = NULL;
if (atomic_dec_return(&pmu->activeboxes) == 0)
uncore_pmu_unregister(pmu);
uncore_box_exit(box);
diff --git a/arch/x86/events/intel/uncore.h b/arch/x86/events/intel/uncore.h
index 4364191e7c6b..414dc7e7c950 100644
--- a/arch/x86/events/intel/uncore.h
+++ b/arch/x86/events/intel/uncore.h
@@ -100,7 +100,7 @@ struct intel_uncore_extra_reg {
struct intel_uncore_box {
int pci_phys_id;
- int pkgid;
+ int pkgid; /* Logical package ID */
int n_active; /* number of active events */
int n_events;
int cpu; /* cpu to collect events */
diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c
index 95cb19f4e06f..6d8044ab1060 100644
--- a/arch/x86/events/intel/uncore_snbep.c
+++ b/arch/x86/events/intel/uncore_snbep.c
@@ -1057,7 +1057,7 @@ static void snbep_qpi_enable_event(struct intel_uncore_box *box, struct perf_eve
if (reg1->idx != EXTRA_REG_NONE) {
int idx = box->pmu->pmu_idx + SNBEP_PCI_QPI_PORT0_FILTER;
- int pkg = topology_phys_to_logical_pkg(box->pci_phys_id);
+ int pkg = box->pkgid;
struct pci_dev *filter_pdev = uncore_extra_pci_dev[pkg].dev[idx];
if (filter_pdev) {
@@ -3035,11 +3035,19 @@ static struct intel_uncore_type *bdx_msr_uncores[] = {
NULL,
};
+/* Bit 7 'Use Occupancy' is not available for counter 0 on BDX */
+static struct event_constraint bdx_uncore_pcu_constraints[] = {
+ EVENT_CONSTRAINT(0x80, 0xe, 0x80),
+ EVENT_CONSTRAINT_END
+};
+
void bdx_uncore_cpu_init(void)
{
if (bdx_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
bdx_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
uncore_msr_uncores = bdx_msr_uncores;
+
+ hswep_uncore_pcu.constraints = bdx_uncore_pcu_constraints;
}
static struct intel_uncore_type bdx_uncore_ha = {
diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
index 3a091cea36c5..0d157d2a1e2a 100644
--- a/arch/x86/include/asm/elf.h
+++ b/arch/x86/include/asm/elf.h
@@ -309,6 +309,7 @@ static inline int mmap_is_ia32(void)
extern unsigned long task_size_32bit(void);
extern unsigned long task_size_64bit(int full_addr_space);
extern unsigned long get_mmap_base(int is_legacy);
+extern bool mmap_address_hint_valid(unsigned long addr, unsigned long len);
#ifdef CONFIG_X86_32
diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
index b80e46733909..2851077b6051 100644
--- a/arch/x86/include/asm/hw_irq.h
+++ b/arch/x86/include/asm/hw_irq.h
@@ -99,14 +99,6 @@ struct irq_alloc_info {
void *dmar_data;
};
#endif
-#ifdef CONFIG_HT_IRQ
- struct {
- int ht_pos;
- int ht_idx;
- struct pci_dev *ht_dev;
- void *ht_update;
- };
-#endif
#ifdef CONFIG_X86_UV
struct {
int uv_limit;
diff --git a/arch/x86/include/asm/hypertransport.h b/arch/x86/include/asm/hypertransport.h
deleted file mode 100644
index 5d55df352879..000000000000
--- a/arch/x86/include/asm/hypertransport.h
+++ /dev/null
@@ -1,46 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_X86_HYPERTRANSPORT_H
-#define _ASM_X86_HYPERTRANSPORT_H
-
-/*
- * Constants for x86 Hypertransport Interrupts.
- */
-
-#define HT_IRQ_LOW_BASE 0xf8000000
-
-#define HT_IRQ_LOW_VECTOR_SHIFT 16
-#define HT_IRQ_LOW_VECTOR_MASK 0x00ff0000
-#define HT_IRQ_LOW_VECTOR(v) \
- (((v) << HT_IRQ_LOW_VECTOR_SHIFT) & HT_IRQ_LOW_VECTOR_MASK)
-
-#define HT_IRQ_LOW_DEST_ID_SHIFT 8
-#define HT_IRQ_LOW_DEST_ID_MASK 0x0000ff00
-#define HT_IRQ_LOW_DEST_ID(v) \
- (((v) << HT_IRQ_LOW_DEST_ID_SHIFT) & HT_IRQ_LOW_DEST_ID_MASK)
-
-#define HT_IRQ_LOW_DM_PHYSICAL 0x0000000
-#define HT_IRQ_LOW_DM_LOGICAL 0x0000040
-
-#define HT_IRQ_LOW_RQEOI_EDGE 0x0000000
-#define HT_IRQ_LOW_RQEOI_LEVEL 0x0000020
-
-
-#define HT_IRQ_LOW_MT_FIXED 0x0000000
-#define HT_IRQ_LOW_MT_ARBITRATED 0x0000004
-#define HT_IRQ_LOW_MT_SMI 0x0000008
-#define HT_IRQ_LOW_MT_NMI 0x000000c
-#define HT_IRQ_LOW_MT_INIT 0x0000010
-#define HT_IRQ_LOW_MT_STARTUP 0x0000014
-#define HT_IRQ_LOW_MT_EXTINT 0x0000018
-#define HT_IRQ_LOW_MT_LINT1 0x000008c
-#define HT_IRQ_LOW_MT_LINT0 0x0000098
-
-#define HT_IRQ_LOW_IRQ_MASKED 0x0000001
-
-
-#define HT_IRQ_HIGH_DEST_ID_SHIFT 0
-#define HT_IRQ_HIGH_DEST_ID_MASK 0x00ffffff
-#define HT_IRQ_HIGH_DEST_ID(v) \
- ((((v) >> 8) << HT_IRQ_HIGH_DEST_ID_SHIFT) & HT_IRQ_HIGH_DEST_ID_MASK)
-
-#endif /* _ASM_X86_HYPERTRANSPORT_H */
diff --git a/arch/x86/include/asm/insn-eval.h b/arch/x86/include/asm/insn-eval.h
index e1d3b4ce8a92..2b6ccf2c49f1 100644
--- a/arch/x86/include/asm/insn-eval.h
+++ b/arch/x86/include/asm/insn-eval.h
@@ -18,6 +18,6 @@
void __user *insn_get_addr_ref(struct insn *insn, struct pt_regs *regs);
int insn_get_modrm_rm_off(struct insn *insn, struct pt_regs *regs);
unsigned long insn_get_seg_base(struct pt_regs *regs, int seg_reg_idx);
-char insn_get_code_seg_params(struct pt_regs *regs);
+int insn_get_code_seg_params(struct pt_regs *regs);
#endif /* _ASM_X86_INSN_EVAL_H */
diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
index 93ae8aee1780..95e948627fd0 100644
--- a/arch/x86/include/asm/io.h
+++ b/arch/x86/include/asm/io.h
@@ -111,6 +111,10 @@ build_mmio_write(__writeq, "q", unsigned long, "r", )
#endif
+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
+extern int valid_phys_addr_range(phys_addr_t addr, size_t size);
+extern int valid_mmap_phys_addr_range(unsigned long pfn, size_t size);
+
/**
* virt_to_phys - map virtual addresses to physical
* @address: address to remap
diff --git a/arch/x86/include/asm/irqdomain.h b/arch/x86/include/asm/irqdomain.h
index f695cc6b8e1f..139feef467f7 100644
--- a/arch/x86/include/asm/irqdomain.h
+++ b/arch/x86/include/asm/irqdomain.h
@@ -56,10 +56,4 @@ extern void arch_init_msi_domain(struct irq_domain *domain);
static inline void arch_init_msi_domain(struct irq_domain *domain) { }
#endif
-#ifdef CONFIG_HT_IRQ
-extern void arch_init_htirq_domain(struct irq_domain *domain);
-#else
-static inline void arch_init_htirq_domain(struct irq_domain *domain) { }
-#endif
-
#endif
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 09f9e1e00e3b..95e2dfd75521 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -1061,7 +1061,7 @@ extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp);
-#define __HAVE_ARCH_PMD_WRITE
+#define pmd_write pmd_write
static inline int pmd_write(pmd_t pmd)
{
return pmd_flags(pmd) & _PAGE_RW;
@@ -1088,6 +1088,12 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
clear_bit(_PAGE_BIT_RW, (unsigned long *)pmdp);
}
+#define pud_write pud_write
+static inline int pud_write(pud_t pud)
+{
+ return pud_flags(pud) & _PAGE_RW;
+}
+
/*
* clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
*
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 2db7cf720b04..cc16fa882e3e 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -132,6 +132,7 @@ struct cpuinfo_x86 {
/* Index into per_cpu list: */
u16 cpu_index;
u32 microcode;
+ unsigned initialized : 1;
} __randomize_layout;
struct cpuid_regs {
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index ef9e02e614d0..f4c463df8b08 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -342,13 +342,12 @@ acpi_parse_lapic_nmi(struct acpi_subtable_header * header, const unsigned long e
#ifdef CONFIG_X86_IO_APIC
#define MP_ISA_BUS 0
+static int __init mp_register_ioapic_irq(u8 bus_irq, u8 polarity,
+ u8 trigger, u32 gsi);
+
static void __init mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger,
u32 gsi)
{
- int ioapic;
- int pin;
- struct mpc_intsrc mp_irq;
-
/*
* Check bus_irq boundary.
*/
@@ -358,14 +357,6 @@ static void __init mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger,
}
/*
- * Convert 'gsi' to 'ioapic.pin'.
- */
- ioapic = mp_find_ioapic(gsi);
- if (ioapic < 0)
- return;
- pin = mp_find_ioapic_pin(ioapic, gsi);
-
- /*
* TBD: This check is for faulty timer entries, where the override
* erroneously sets the trigger to level, resulting in a HUGE
* increase of timer interrupts!
@@ -373,16 +364,8 @@ static void __init mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger,
if ((bus_irq == 0) && (trigger == 3))
trigger = 1;
- mp_irq.type = MP_INTSRC;
- mp_irq.irqtype = mp_INT;
- mp_irq.irqflag = (trigger << 2) | polarity;
- mp_irq.srcbus = MP_ISA_BUS;
- mp_irq.srcbusirq = bus_irq; /* IRQ */
- mp_irq.dstapic = mpc_ioapic_id(ioapic); /* APIC ID */
- mp_irq.dstirq = pin; /* INTIN# */
-
- mp_save_irq(&mp_irq);
-
+ if (mp_register_ioapic_irq(bus_irq, polarity, trigger, gsi) < 0)
+ return;
/*
* Reset default identity mapping if gsi is also an legacy IRQ,
* otherwise there will be more than one entry with the same GSI
@@ -429,6 +412,34 @@ static int mp_config_acpi_gsi(struct device *dev, u32 gsi, int trigger,
return 0;
}
+static int __init mp_register_ioapic_irq(u8 bus_irq, u8 polarity,
+ u8 trigger, u32 gsi)
+{
+ struct mpc_intsrc mp_irq;
+ int ioapic, pin;
+
+ /* Convert 'gsi' to 'ioapic.pin'(INTIN#) */
+ ioapic = mp_find_ioapic(gsi);
+ if (ioapic < 0) {
+ pr_warn("Failed to find ioapic for gsi : %u\n", gsi);
+ return ioapic;
+ }
+
+ pin = mp_find_ioapic_pin(ioapic, gsi);
+
+ mp_irq.type = MP_INTSRC;
+ mp_irq.irqtype = mp_INT;
+ mp_irq.irqflag = (trigger << 2) | polarity;
+ mp_irq.srcbus = MP_ISA_BUS;
+ mp_irq.srcbusirq = bus_irq;
+ mp_irq.dstapic = mpc_ioapic_id(ioapic);
+ mp_irq.dstirq = pin;
+
+ mp_save_irq(&mp_irq);
+
+ return 0;
+}
+
static int __init
acpi_parse_ioapic(struct acpi_subtable_header * header, const unsigned long end)
{
@@ -473,7 +484,11 @@ static void __init acpi_sci_ioapic_setup(u8 bus_irq, u16 polarity, u16 trigger,
if (acpi_sci_flags & ACPI_MADT_POLARITY_MASK)
polarity = acpi_sci_flags & ACPI_MADT_POLARITY_MASK;
- mp_override_legacy_irq(bus_irq, polarity, trigger, gsi);
+ if (bus_irq < NR_IRQS_LEGACY)
+ mp_override_legacy_irq(bus_irq, polarity, trigger, gsi);
+ else
+ mp_register_ioapic_irq(bus_irq, polarity, trigger, gsi);
+
acpi_penalize_sci_irq(bus_irq, trigger, polarity);
/*
diff --git a/arch/x86/kernel/apic/Makefile b/arch/x86/kernel/apic/Makefile
index a9e08924927e..a6fcaf16cdbf 100644
--- a/arch/x86/kernel/apic/Makefile
+++ b/arch/x86/kernel/apic/Makefile
@@ -12,7 +12,6 @@ obj-y += hw_nmi.o
obj-$(CONFIG_X86_IO_APIC) += io_apic.o
obj-$(CONFIG_PCI_MSI) += msi.o
-obj-$(CONFIG_HT_IRQ) += htirq.o
obj-$(CONFIG_SMP) += ipi.o
ifeq ($(CONFIG_X86_64),y)
diff --git a/arch/x86/kernel/apic/htirq.c b/arch/x86/kernel/apic/htirq.c
deleted file mode 100644
index b07075dce8b7..000000000000
--- a/arch/x86/kernel/apic/htirq.c
+++ /dev/null
@@ -1,198 +0,0 @@
-/*
- * Support Hypertransport IRQ
- *
- * Copyright (C) 1997, 1998, 1999, 2000, 2009 Ingo Molnar, Hajnalka Szabo
- * Moved from arch/x86/kernel/apic/io_apic.c.
- * Jiang Liu <jiang.liu@linux.intel.com>
- * Add support of hierarchical irqdomain
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#include <linux/mm.h>
-#include <linux/interrupt.h>
-#include <linux/init.h>
-#include <linux/device.h>
-#include <linux/pci.h>
-#include <linux/htirq.h>
-#include <asm/irqdomain.h>
-#include <asm/hw_irq.h>
-#include <asm/apic.h>
-#include <asm/hypertransport.h>
-
-static struct irq_domain *htirq_domain;
-
-/*
- * Hypertransport interrupt support
- */
-static int
-ht_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force)
-{
- struct irq_data *parent = data->parent_data;
- int ret;
-
- ret = parent->chip->irq_set_affinity(parent, mask, force);
- if (ret >= 0) {
- struct ht_irq_msg msg;
- struct irq_cfg *cfg = irqd_cfg(data);
-
- fetch_ht_irq_msg(data->irq, &msg);
- msg.address_lo &= ~(HT_IRQ_LOW_VECTOR_MASK |
- HT_IRQ_LOW_DEST_ID_MASK);
- msg.address_lo |= HT_IRQ_LOW_VECTOR(cfg->vector) |
- HT_IRQ_LOW_DEST_ID(cfg->dest_apicid);
- msg.address_hi &= ~(HT_IRQ_HIGH_DEST_ID_MASK);
- msg.address_hi |= HT_IRQ_HIGH_DEST_ID(cfg->dest_apicid);
- write_ht_irq_msg(data->irq, &msg);
- }
-
- return ret;
-}
-
-static struct irq_chip ht_irq_chip = {
- .name = "PCI-HT",
- .irq_mask = mask_ht_irq,
- .irq_unmask = unmask_ht_irq,
- .irq_ack = irq_chip_ack_parent,
- .irq_set_affinity = ht_set_affinity,
- .irq_retrigger = irq_chip_retrigger_hierarchy,
- .flags = IRQCHIP_SKIP_SET_WAKE,
-};
-
-static int htirq_domain_alloc(struct irq_domain *domain, unsigned int virq,
- unsigned int nr_irqs, void *arg)
-{
- struct ht_irq_cfg *ht_cfg;
- struct irq_alloc_info *info = arg;
- struct pci_dev *dev;
- irq_hw_number_t hwirq;
- int ret;
-
- if (nr_irqs > 1 || !info)
- return -EINVAL;
-
- dev = info->ht_dev;
- hwirq = (info->ht_idx & 0xFF) |
- PCI_DEVID(dev->bus->number, dev->devfn) << 8 |
- (pci_domain_nr(dev->bus) & 0xFFFFFFFF) << 24;
- if (irq_find_mapping(domain, hwirq) > 0)
- return -EEXIST;
-
- ht_cfg = kmalloc(sizeof(*ht_cfg), GFP_KERNEL);
- if (!ht_cfg)
- return -ENOMEM;
-
- ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, info);
- if (ret < 0) {
- kfree(ht_cfg);
- return ret;
- }
-
- /* Initialize msg to a value that will never match the first write. */
- ht_cfg->msg.address_lo = 0xffffffff;
- ht_cfg->msg.address_hi = 0xffffffff;
- ht_cfg->dev = info->ht_dev;
- ht_cfg->update = info->ht_update;
- ht_cfg->pos = info->ht_pos;
- ht_cfg->idx = 0x10 + (info->ht_idx * 2);
- irq_domain_set_info(domain, virq, hwirq, &ht_irq_chip, ht_cfg,
- handle_edge_irq, ht_cfg, "edge");
-
- return 0;
-}
-
-static void htirq_domain_free(struct irq_domain *domain, unsigned int virq,
- unsigned int nr_irqs)
-{
- struct irq_data *irq_data = irq_domain_get_irq_data(domain, virq);
-
- BUG_ON(nr_irqs != 1);
- kfree(irq_data->chip_data);
- irq_domain_free_irqs_top(domain, virq, nr_irqs);
-}
-
-static int htirq_domain_activate(struct irq_domain *domain,
- struct irq_data *irq_data, bool early)
-{
- struct ht_irq_msg msg;
- struct irq_cfg *cfg = irqd_cfg(irq_data);
-
- msg.address_hi = HT_IRQ_HIGH_DEST_ID(cfg->dest_apicid);
- msg.address_lo =
- HT_IRQ_LOW_BASE |
- HT_IRQ_LOW_DEST_ID(cfg->dest_apicid) |
- HT_IRQ_LOW_VECTOR(cfg->vector) |
- ((apic->irq_dest_mode == 0) ?
- HT_IRQ_LOW_DM_PHYSICAL :
- HT_IRQ_LOW_DM_LOGICAL) |
- HT_IRQ_LOW_RQEOI_EDGE |
- ((apic->irq_delivery_mode != dest_LowestPrio) ?
- HT_IRQ_LOW_MT_FIXED :
- HT_IRQ_LOW_MT_ARBITRATED) |
- HT_IRQ_LOW_IRQ_MASKED;
- write_ht_irq_msg(irq_data->irq, &msg);
- return 0;
-}
-
-static void htirq_domain_deactivate(struct irq_domain *domain,
- struct irq_data *irq_data)
-{
- struct ht_irq_msg msg;
-
- memset(&msg, 0, sizeof(msg));
- write_ht_irq_msg(irq_data->irq, &msg);
-}
-
-static const struct irq_domain_ops htirq_domain_ops = {
- .alloc = htirq_domain_alloc,
- .free = htirq_domain_free,
- .activate = htirq_domain_activate,
- .deactivate = htirq_domain_deactivate,
-};
-
-void __init arch_init_htirq_domain(struct irq_domain *parent)
-{
- struct fwnode_handle *fn;
-
- if (disable_apic)
- return;
-
- fn = irq_domain_alloc_named_fwnode("PCI-HT");
- if (!fn)
- goto warn;
-
- htirq_domain = irq_domain_create_tree(fn, &htirq_domain_ops, NULL);
- irq_domain_free_fwnode(fn);
- if (!htirq_domain)
- goto warn;
-
- htirq_domain->parent = parent;
- return;
-
-warn:
- pr_warn("Failed to initialize irqdomain for HTIRQ.\n");
-}
-
-int arch_setup_ht_irq(int idx, int pos, struct pci_dev *dev,
- ht_irq_update_t *update)
-{
- struct irq_alloc_info info;
-
- if (!htirq_domain)
- return -ENOSYS;
-
- init_irq_alloc_info(&info, NULL);
- info.ht_idx = idx;
- info.ht_pos = pos;
- info.ht_dev = dev;
- info.ht_update = update;
-
- return irq_domain_alloc_irqs(htirq_domain, 1, dev_to_node(&dev->dev),
- &info);
-}
-
-void arch_teardown_ht_irq(unsigned int irq)
-{
- irq_domain_free_irqs(irq, 1);
-}
diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
index 05c85e693a5d..6a823a25eaff 100644
--- a/arch/x86/kernel/apic/vector.c
+++ b/arch/x86/kernel/apic/vector.c
@@ -1,5 +1,5 @@
/*
- * Local APIC related interfaces to support IOAPIC, MSI, HT_IRQ etc.
+ * Local APIC related interfaces to support IOAPIC, MSI, etc.
*
* Copyright (C) 1997, 1998, 1999, 2000, 2009 Ingo Molnar, Hajnalka Szabo
* Moved from arch/x86/kernel/apic/io_apic.c.
@@ -601,7 +601,7 @@ int __init arch_probe_nr_irqs(void)
nr_irqs = NR_VECTORS * nr_cpu_ids;
nr = (gsi_top + nr_legacy_irqs()) + 8 * nr_cpu_ids;
-#if defined(CONFIG_PCI_MSI) || defined(CONFIG_HT_IRQ)
+#if defined(CONFIG_PCI_MSI)
/*
* for MSI and HT dyn irq
*/
@@ -663,7 +663,6 @@ int __init arch_early_irq_init(void)
irq_set_default_host(x86_vector_domain);
arch_init_msi_domain(x86_vector_domain);
- arch_init_htirq_domain(x86_vector_domain);
BUG_ON(!alloc_cpumask_var(&vector_searchmask, GFP_KERNEL));
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 13ae9e5eec2f..fa998ca8aa5a 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -341,6 +341,8 @@ static __always_inline void setup_umip(struct cpuinfo_x86 *c)
cr4_set_bits(X86_CR4_UMIP);
+ pr_info("x86/cpu: Activated the Intel User Mode Instruction Prevention (UMIP) CPU feature\n");
+
return;
out:
diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c
index 410c5dadcee3..3a4b12809ab5 100644
--- a/arch/x86/kernel/mpparse.c
+++ b/arch/x86/kernel/mpparse.c
@@ -431,6 +431,7 @@ static inline void __init construct_default_ISA_mptable(int mpc_default_type)
}
static unsigned long mpf_base;
+static bool mpf_found;
static unsigned long __init get_mpc_size(unsigned long physptr)
{
@@ -504,7 +505,7 @@ void __init default_get_smp_config(unsigned int early)
if (!smp_found_config)
return;
- if (!mpf_base)
+ if (!mpf_found)
return;
if (acpi_lapic && early)
@@ -593,6 +594,7 @@ static int __init smp_scan_config(unsigned long base, unsigned long length)
smp_found_config = 1;
#endif
mpf_base = base;
+ mpf_found = true;
pr_info("found SMP MP-table at [mem %#010lx-%#010lx] mapped at [%p]\n",
base, base + sizeof(*mpf) - 1, mpf);
@@ -858,7 +860,7 @@ static int __init update_mp_table(void)
if (!enable_update_mptable)
return 0;
- if (!mpf_base)
+ if (!mpf_found)
return 0;
mpf = early_memremap(mpf_base, sizeof(*mpf));
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 5f59e6bee123..3d01df7d7cf6 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -101,9 +101,6 @@ DEFINE_PER_CPU_READ_MOSTLY(struct cpuinfo_x86, cpu_info);
EXPORT_PER_CPU_SYMBOL(cpu_info);
/* Logical package management. We might want to allocate that dynamically */
-static int *physical_to_logical_pkg __read_mostly;
-static unsigned long *physical_package_map __read_mostly;;
-static unsigned int max_physical_pkg_id __read_mostly;
unsigned int __max_logical_packages __read_mostly;
EXPORT_SYMBOL(__max_logical_packages);
static unsigned int logical_packages __read_mostly;
@@ -281,108 +278,48 @@ static void notrace start_secondary(void *unused)
}
/**
+ * topology_phys_to_logical_pkg - Map a physical package id to a logical
+ *
+ * Returns logical package id or -1 if not found
+ */
+int topology_phys_to_logical_pkg(unsigned int phys_pkg)
+{
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ struct cpuinfo_x86 *c = &cpu_data(cpu);
+
+ if (c->initialized && c->phys_proc_id == phys_pkg)
+ return c->logical_proc_id;
+ }
+ return -1;
+}
+EXPORT_SYMBOL(topology_phys_to_logical_pkg);
+
+/**
* topology_update_package_map - Update the physical to logical package map
* @pkg: The physical package id as retrieved via CPUID
* @cpu: The cpu for which this is updated
*/
int topology_update_package_map(unsigned int pkg, unsigned int cpu)
{
- unsigned int new;
+ int new;
- /* Called from early boot ? */
- if (!physical_package_map)
- return 0;
-
- if (pkg >= max_physical_pkg_id)
- return -EINVAL;
-
- /* Set the logical package id */
- if (test_and_set_bit(pkg, physical_package_map))
+ /* Already available somewhere? */
+ new = topology_phys_to_logical_pkg(pkg);
+ if (new >= 0)
goto found;
- if (logical_packages >= __max_logical_packages) {
- pr_warn("Package %u of CPU %u exceeds BIOS package data %u.\n",
- logical_packages, cpu, __max_logical_packages);
- return -ENOSPC;
- }
-
new = logical_packages++;
if (new != pkg) {
pr_info("CPU %u Converting physical %u to logical package %u\n",
cpu, pkg, new);
}
- physical_to_logical_pkg[pkg] = new;
-
found:
- cpu_data(cpu).logical_proc_id = physical_to_logical_pkg[pkg];
+ cpu_data(cpu).logical_proc_id = new;
return 0;
}
-/**
- * topology_phys_to_logical_pkg - Map a physical package id to a logical
- *
- * Returns logical package id or -1 if not found
- */
-int topology_phys_to_logical_pkg(unsigned int phys_pkg)
-{
- if (phys_pkg >= max_physical_pkg_id)
- return -1;
- return physical_to_logical_pkg[phys_pkg];
-}
-EXPORT_SYMBOL(topology_phys_to_logical_pkg);
-
-static void __init smp_init_package_map(struct cpuinfo_x86 *c, unsigned int cpu)
-{
- unsigned int ncpus;
- size_t size;
-
- /*
- * Today neither Intel nor AMD support heterogenous systems. That
- * might change in the future....
- *
- * While ideally we'd want '* smp_num_siblings' in the below @ncpus
- * computation, this won't actually work since some Intel BIOSes
- * report inconsistent HT data when they disable HT.
- *
- * In particular, they reduce the APIC-IDs to only include the cores,
- * but leave the CPUID topology to say there are (2) siblings.
- * This means we don't know how many threads there will be until
- * after the APIC enumeration.
- *
- * By not including this we'll sometimes over-estimate the number of
- * logical packages by the amount of !present siblings, but this is
- * still better than MAX_LOCAL_APIC.
- *
- * We use total_cpus not nr_cpu_ids because nr_cpu_ids can be limited
- * on the command line leading to a similar issue as the HT disable
- * problem because the hyperthreads are usually enumerated after the
- * primary cores.
- */
- ncpus = boot_cpu_data.x86_max_cores;
- if (!ncpus) {
- pr_warn("x86_max_cores == zero !?!?");
- ncpus = 1;
- }
-
- __max_logical_packages = DIV_ROUND_UP(total_cpus, ncpus);
- logical_packages = 0;
-
- /*
- * Possibly larger than what we need as the number of apic ids per
- * package can be smaller than the actual used apic ids.
- */
- max_physical_pkg_id = DIV_ROUND_UP(MAX_LOCAL_APIC, ncpus);
- size = max_physical_pkg_id * sizeof(unsigned int);
- physical_to_logical_pkg = kmalloc(size, GFP_KERNEL);
- memset(physical_to_logical_pkg, 0xff, size);
- size = BITS_TO_LONGS(max_physical_pkg_id) * sizeof(unsigned long);
- physical_package_map = kzalloc(size, GFP_KERNEL);
-
- pr_info("Max logical packages: %u\n", __max_logical_packages);
-
- topology_update_package_map(c->phys_proc_id, cpu);
-}
-
void __init smp_store_boot_cpu_info(void)
{
int id = 0; /* CPU 0 */
@@ -390,7 +327,8 @@ void __init smp_store_boot_cpu_info(void)
*c = boot_cpu_data;
c->cpu_index = id;
- smp_init_package_map(c, id);
+ topology_update_package_map(c->phys_proc_id, id);
+ c->initialized = true;
}
/*
@@ -401,13 +339,16 @@ void smp_store_cpu_info(int id)
{
struct cpuinfo_x86 *c = &cpu_data(id);
- *c = boot_cpu_data;
+ /* Copy boot_cpu_data only on the first bringup */
+ if (!c->initialized)
+ *c = boot_cpu_data;
c->cpu_index = id;
/*
* During boot time, CPU0 has this setup already. Save the info when
* bringing up AP or offlined CPU0.
*/
identify_secondary_cpu(c);
+ c->initialized = true;
}
static bool
@@ -1356,7 +1297,16 @@ void __init native_smp_prepare_boot_cpu(void)
void __init native_smp_cpus_done(unsigned int max_cpus)
{
+ int ncpus;
+
pr_debug("Boot done\n");
+ /*
+ * Today neither Intel nor AMD support heterogenous systems so
+ * extrapolate the boot cpu's data to all packages.
+ */
+ ncpus = cpu_data(0).booted_cores * smp_num_siblings;
+ __max_logical_packages = DIV_ROUND_UP(nr_cpu_ids, ncpus);
+ pr_info("Max logical packages: %u\n", __max_logical_packages);
if (x86_has_numa_in_package)
set_sched_topology(x86_numa_in_package_topology);
diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
index a63fe77b3217..676774b9bb8d 100644
--- a/arch/x86/kernel/sys_x86_64.c
+++ b/arch/x86/kernel/sys_x86_64.c
@@ -188,6 +188,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
if (len > TASK_SIZE)
return -ENOMEM;
+ /* No address checking. See comment at mmap_address_hint_valid() */
if (flags & MAP_FIXED)
return addr;
@@ -197,12 +198,15 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
/* requesting a specific address */
if (addr) {
- addr = PAGE_ALIGN(addr);
+ addr &= PAGE_MASK;
+ if (!mmap_address_hint_valid(addr, len))
+ goto get_unmapped_area;
+
vma = find_vma(mm, addr);
- if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vm_start_gap(vma)))
+ if (!vma || addr + len <= vm_start_gap(vma))
return addr;
}
+get_unmapped_area:
info.flags = VM_UNMAPPED_AREA_TOPDOWN;
info.length = len;
diff --git a/arch/x86/kernel/umip.c b/arch/x86/kernel/umip.c
index 6ba82be68cff..f44ce0fb3583 100644
--- a/arch/x86/kernel/umip.c
+++ b/arch/x86/kernel/umip.c
@@ -78,7 +78,60 @@
#define UMIP_INST_SGDT 0 /* 0F 01 /0 */
#define UMIP_INST_SIDT 1 /* 0F 01 /1 */
-#define UMIP_INST_SMSW 3 /* 0F 01 /4 */
+#define UMIP_INST_SMSW 2 /* 0F 01 /4 */
+#define UMIP_INST_SLDT 3 /* 0F 00 /0 */
+#define UMIP_INST_STR 4 /* 0F 00 /1 */
+
+const char * const umip_insns[5] = {
+ [UMIP_INST_SGDT] = "SGDT",
+ [UMIP_INST_SIDT] = "SIDT",
+ [UMIP_INST_SMSW] = "SMSW",
+ [UMIP_INST_SLDT] = "SLDT",
+ [UMIP_INST_STR] = "STR",
+};
+
+#define umip_pr_err(regs, fmt, ...) \
+ umip_printk(regs, KERN_ERR, fmt, ##__VA_ARGS__)
+#define umip_pr_warning(regs, fmt, ...) \
+ umip_printk(regs, KERN_WARNING, fmt, ##__VA_ARGS__)
+
+/**
+ * umip_printk() - Print a rate-limited message
+ * @regs: Register set with the context in which the warning is printed
+ * @log_level: Kernel log level to print the message
+ * @fmt: The text string to print
+ *
+ * Print the text contained in @fmt. The print rate is limited to bursts of 5
+ * messages every two minutes. The purpose of this customized version of
+ * printk() is to print messages when user space processes use any of the
+ * UMIP-protected instructions. Thus, the printed text is prepended with the
+ * task name and process ID number of the current task as well as the
+ * instruction and stack pointers in @regs as seen when entering kernel mode.
+ *
+ * Returns:
+ *
+ * None.
+ */
+static __printf(3, 4)
+void umip_printk(const struct pt_regs *regs, const char *log_level,
+ const char *fmt, ...)
+{
+ /* Bursts of 5 messages every two minutes */
+ static DEFINE_RATELIMIT_STATE(ratelimit, 2 * 60 * HZ, 5);
+ struct task_struct *tsk = current;
+ struct va_format vaf;
+ va_list args;
+
+ if (!__ratelimit(&ratelimit))
+ return;
+
+ va_start(args, fmt);
+ vaf.fmt = fmt;
+ vaf.va = &args;
+ printk("%s" pr_fmt("%s[%d] ip:%lx sp:%lx: %pV"), log_level, tsk->comm,
+ task_pid_nr(tsk), regs->ip, regs->sp, &vaf);
+ va_end(args);
+}
/**
* identify_insn() - Identify a UMIP-protected instruction
@@ -118,10 +171,16 @@ static int identify_insn(struct insn *insn)
default:
return -EINVAL;
}
+ } else if (insn->opcode.bytes[1] == 0x0) {
+ if (X86_MODRM_REG(insn->modrm.value) == 0)
+ return UMIP_INST_SLDT;
+ else if (X86_MODRM_REG(insn->modrm.value) == 1)
+ return UMIP_INST_STR;
+ else
+ return -EINVAL;
+ } else {
+ return -EINVAL;
}
-
- /* SLDT AND STR are not emulated */
- return -EINVAL;
}
/**
@@ -228,10 +287,8 @@ static void force_sig_info_umip_fault(void __user *addr, struct pt_regs *regs)
if (!(show_unhandled_signals && unhandled_signal(tsk, SIGSEGV)))
return;
- pr_err_ratelimited("%s[%d] umip emulation segfault ip:%lx sp:%lx error:%x in %lx\n",
- tsk->comm, task_pid_nr(tsk), regs->ip,
- regs->sp, X86_PF_USER | X86_PF_WRITE,
- regs->ip);
+ umip_pr_err(regs, "segfault in emulation. error%x\n",
+ X86_PF_USER | X86_PF_WRITE);
}
/**
@@ -262,15 +319,11 @@ bool fixup_umip_exception(struct pt_regs *regs)
unsigned char buf[MAX_INSN_SIZE];
void __user *uaddr;
struct insn insn;
- char seg_defs;
+ int seg_defs;
if (!regs)
return false;
- /* Do not emulate 64-bit processes. */
- if (user_64bit_mode(regs))
- return false;
-
/*
* If not in user-space long mode, a custom code segment could be in
* use. This is true in protected mode (if the process defined a local
@@ -322,6 +375,15 @@ bool fixup_umip_exception(struct pt_regs *regs)
if (umip_inst < 0)
return false;
+ umip_pr_warning(regs, "%s instruction cannot be used by applications.\n",
+ umip_insns[umip_inst]);
+
+ /* Do not emulate SLDT, STR or user long mode processes. */
+ if (umip_inst == UMIP_INST_STR || umip_inst == UMIP_INST_SLDT || user_64bit_mode(regs))
+ return false;
+
+ umip_pr_warning(regs, "For now, expensive software emulation returns the result.\n");
+
if (emulate_umip_insn(&insn, umip_inst, dummy_data, &dummy_data_size))
return false;
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index b71daed3cca2..59e13a79c2e3 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -3671,6 +3671,13 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
u32 ecx = msr->index;
u64 data = msr->data;
switch (ecx) {
+ case MSR_IA32_CR_PAT:
+ if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data))
+ return 1;
+ vcpu->arch.pat = data;
+ svm->vmcb->save.g_pat = data;
+ mark_dirty(svm->vmcb, VMCB_NPT);
+ break;
case MSR_IA32_TSC:
kvm_write_tsc(vcpu, msr);
break;
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 7c3522a989d0..714a0673ec3c 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -70,6 +70,9 @@ MODULE_DEVICE_TABLE(x86cpu, vmx_cpu_id);
static bool __read_mostly enable_vpid = 1;
module_param_named(vpid, enable_vpid, bool, 0444);
+static bool __read_mostly enable_vnmi = 1;
+module_param_named(vnmi, enable_vnmi, bool, S_IRUGO);
+
static bool __read_mostly flexpriority_enabled = 1;
module_param_named(flexpriority, flexpriority_enabled, bool, S_IRUGO);
@@ -202,6 +205,10 @@ struct loaded_vmcs {
bool nmi_known_unmasked;
unsigned long vmcs_host_cr3; /* May not match real cr3 */
unsigned long vmcs_host_cr4; /* May not match real cr4 */
+ /* Support for vnmi-less CPUs */
+ int soft_vnmi_blocked;
+ ktime_t entry_time;
+ s64 vnmi_blocked_time;
struct list_head loaded_vmcss_on_cpu_link;
};
@@ -1291,6 +1298,11 @@ static inline bool cpu_has_vmx_invpcid(void)
SECONDARY_EXEC_ENABLE_INVPCID;
}
+static inline bool cpu_has_virtual_nmis(void)
+{
+ return vmcs_config.pin_based_exec_ctrl & PIN_BASED_VIRTUAL_NMIS;
+}
+
static inline bool cpu_has_vmx_wbinvd_exit(void)
{
return vmcs_config.cpu_based_2nd_exec_ctrl &
@@ -1348,11 +1360,6 @@ static inline bool nested_cpu_has2(struct vmcs12 *vmcs12, u32 bit)
(vmcs12->secondary_vm_exec_control & bit);
}
-static inline bool nested_cpu_has_virtual_nmis(struct vmcs12 *vmcs12)
-{
- return vmcs12->pin_based_vm_exec_control & PIN_BASED_VIRTUAL_NMIS;
-}
-
static inline bool nested_cpu_has_preemption_timer(struct vmcs12 *vmcs12)
{
return vmcs12->pin_based_vm_exec_control &
@@ -3712,9 +3719,9 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
&_vmexit_control) < 0)
return -EIO;
- min = PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING |
- PIN_BASED_VIRTUAL_NMIS;
- opt = PIN_BASED_POSTED_INTR | PIN_BASED_VMX_PREEMPTION_TIMER;
+ min = PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING;
+ opt = PIN_BASED_VIRTUAL_NMIS | PIN_BASED_POSTED_INTR |
+ PIN_BASED_VMX_PREEMPTION_TIMER;
if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PINBASED_CTLS,
&_pin_based_exec_control) < 0)
return -EIO;
@@ -5232,6 +5239,10 @@ static u32 vmx_pin_based_exec_ctrl(struct vcpu_vmx *vmx)
if (!kvm_vcpu_apicv_active(&vmx->vcpu))
pin_based_exec_ctrl &= ~PIN_BASED_POSTED_INTR;
+
+ if (!enable_vnmi)
+ pin_based_exec_ctrl &= ~PIN_BASED_VIRTUAL_NMIS;
+
/* Enable the preemption timer dynamically */
pin_based_exec_ctrl &= ~PIN_BASED_VMX_PREEMPTION_TIMER;
return pin_based_exec_ctrl;
@@ -5666,7 +5677,8 @@ static void enable_irq_window(struct kvm_vcpu *vcpu)
static void enable_nmi_window(struct kvm_vcpu *vcpu)
{
- if (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_STI) {
+ if (!enable_vnmi ||
+ vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_STI) {
enable_irq_window(vcpu);
return;
}
@@ -5706,6 +5718,19 @@ static void vmx_inject_nmi(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
+ if (!enable_vnmi) {
+ /*
+ * Tracking the NMI-blocked state in software is built upon
+ * finding the next open IRQ window. This, in turn, depends on
+ * well-behaving guests: They have to keep IRQs disabled at
+ * least as long as the NMI handler runs. Otherwise we may
+ * cause NMI nesting, maybe breaking the guest. But as this is
+ * highly unlikely, we can live with the residual risk.
+ */
+ vmx->loaded_vmcs->soft_vnmi_blocked = 1;
+ vmx->loaded_vmcs->vnmi_blocked_time = 0;
+ }
+
++vcpu->stat.nmi_injections;
vmx->loaded_vmcs->nmi_known_unmasked = false;
@@ -5724,6 +5749,8 @@ static bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu)
struct vcpu_vmx *vmx = to_vmx(vcpu);
bool masked;
+ if (!enable_vnmi)
+ return vmx->loaded_vmcs->soft_vnmi_blocked;
if (vmx->loaded_vmcs->nmi_known_unmasked)
return false;
masked = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_NMI;
@@ -5735,13 +5762,20 @@ static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
- vmx->loaded_vmcs->nmi_known_unmasked = !masked;
- if (masked)
- vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
- GUEST_INTR_STATE_NMI);
- else
- vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO,
- GUEST_INTR_STATE_NMI);
+ if (!enable_vnmi) {
+ if (vmx->loaded_vmcs->soft_vnmi_blocked != masked) {
+ vmx->loaded_vmcs->soft_vnmi_blocked = masked;
+ vmx->loaded_vmcs->vnmi_blocked_time = 0;
+ }
+ } else {
+ vmx->loaded_vmcs->nmi_known_unmasked = !masked;
+ if (masked)
+ vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
+ GUEST_INTR_STATE_NMI);
+ else
+ vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO,
+ GUEST_INTR_STATE_NMI);
+ }
}
static int vmx_nmi_allowed(struct kvm_vcpu *vcpu)
@@ -5749,6 +5783,10 @@ static int vmx_nmi_allowed(struct kvm_vcpu *vcpu)
if (to_vmx(vcpu)->nested.nested_run_pending)
return 0;
+ if (!enable_vnmi &&
+ to_vmx(vcpu)->loaded_vmcs->soft_vnmi_blocked)
+ return 0;
+
return !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
(GUEST_INTR_STATE_MOV_SS | GUEST_INTR_STATE_STI
| GUEST_INTR_STATE_NMI));
@@ -6476,6 +6514,7 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu)
* AAK134, BY25.
*/
if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) &&
+ enable_vnmi &&
(exit_qualification & INTR_INFO_UNBLOCK_NMI))
vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, GUEST_INTR_STATE_NMI);
@@ -6535,6 +6574,7 @@ static int handle_ept_misconfig(struct kvm_vcpu *vcpu)
static int handle_nmi_window(struct kvm_vcpu *vcpu)
{
+ WARN_ON_ONCE(!enable_vnmi);
vmcs_clear_bits(CPU_BASED_VM_EXEC_CONTROL,
CPU_BASED_VIRTUAL_NMI_PENDING);
++vcpu->stat.nmi_window_exits;
@@ -6758,6 +6798,9 @@ static __init int hardware_setup(void)
if (!cpu_has_vmx_flexpriority())
flexpriority_enabled = 0;
+ if (!cpu_has_virtual_nmis())
+ enable_vnmi = 0;
+
/*
* set_apic_access_page_addr() is used to reload apic access
* page upon invalidation. No need to do anything if not
@@ -6962,7 +7005,7 @@ static struct loaded_vmcs *nested_get_current_vmcs02(struct vcpu_vmx *vmx)
}
/* Create a new VMCS */
- item = kmalloc(sizeof(struct vmcs02_list), GFP_KERNEL);
+ item = kzalloc(sizeof(struct vmcs02_list), GFP_KERNEL);
if (!item)
return NULL;
item->vmcs02.vmcs = alloc_vmcs();
@@ -7979,6 +8022,7 @@ static int handle_pml_full(struct kvm_vcpu *vcpu)
* "blocked by NMI" bit has to be set before next VM entry.
*/
if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) &&
+ enable_vnmi &&
(exit_qualification & INTR_INFO_UNBLOCK_NMI))
vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
GUEST_INTR_STATE_NMI);
@@ -8823,6 +8867,25 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu)
return 0;
}
+ if (unlikely(!enable_vnmi &&
+ vmx->loaded_vmcs->soft_vnmi_blocked)) {
+ if (vmx_interrupt_allowed(vcpu)) {
+ vmx->loaded_vmcs->soft_vnmi_blocked = 0;
+ } else if (vmx->loaded_vmcs->vnmi_blocked_time > 1000000000LL &&
+ vcpu->arch.nmi_pending) {
+ /*
+ * This CPU don't support us in finding the end of an
+ * NMI-blocked window if the guest runs with IRQs
+ * disabled. So we pull the trigger after 1 s of
+ * futile waiting, but inform the user about this.
+ */
+ printk(KERN_WARNING "%s: Breaking out of NMI-blocked "
+ "state on VCPU %d after 1 s timeout\n",
+ __func__, vcpu->vcpu_id);
+ vmx->loaded_vmcs->soft_vnmi_blocked = 0;
+ }
+ }
+
if (exit_reason < kvm_vmx_max_exit_handlers
&& kvm_vmx_exit_handlers[exit_reason])
return kvm_vmx_exit_handlers[exit_reason](vcpu);
@@ -9105,33 +9168,38 @@ static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx)
idtv_info_valid = vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK;
- if (vmx->loaded_vmcs->nmi_known_unmasked)
- return;
- /*
- * Can't use vmx->exit_intr_info since we're not sure what
- * the exit reason is.
- */
- exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
- unblock_nmi = (exit_intr_info & INTR_INFO_UNBLOCK_NMI) != 0;
- vector = exit_intr_info & INTR_INFO_VECTOR_MASK;
- /*
- * SDM 3: 27.7.1.2 (September 2008)
- * Re-set bit "block by NMI" before VM entry if vmexit caused by
- * a guest IRET fault.
- * SDM 3: 23.2.2 (September 2008)
- * Bit 12 is undefined in any of the following cases:
- * If the VM exit sets the valid bit in the IDT-vectoring
- * information field.
- * If the VM exit is due to a double fault.
- */
- if ((exit_intr_info & INTR_INFO_VALID_MASK) && unblock_nmi &&
- vector != DF_VECTOR && !idtv_info_valid)
- vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
- GUEST_INTR_STATE_NMI);
- else
- vmx->loaded_vmcs->nmi_known_unmasked =
- !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO)
- & GUEST_INTR_STATE_NMI);
+ if (enable_vnmi) {
+ if (vmx->loaded_vmcs->nmi_known_unmasked)
+ return;
+ /*
+ * Can't use vmx->exit_intr_info since we're not sure what
+ * the exit reason is.
+ */
+ exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
+ unblock_nmi = (exit_intr_info & INTR_INFO_UNBLOCK_NMI) != 0;
+ vector = exit_intr_info & INTR_INFO_VECTOR_MASK;
+ /*
+ * SDM 3: 27.7.1.2 (September 2008)
+ * Re-set bit "block by NMI" before VM entry if vmexit caused by
+ * a guest IRET fault.
+ * SDM 3: 23.2.2 (September 2008)
+ * Bit 12 is undefined in any of the following cases:
+ * If the VM exit sets the valid bit in the IDT-vectoring
+ * information field.
+ * If the VM exit is due to a double fault.
+ */
+ if ((exit_intr_info & INTR_INFO_VALID_MASK) && unblock_nmi &&
+ vector != DF_VECTOR && !idtv_info_valid)
+ vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
+ GUEST_INTR_STATE_NMI);
+ else
+ vmx->loaded_vmcs->nmi_known_unmasked =
+ !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO)
+ & GUEST_INTR_STATE_NMI);
+ } else if (unlikely(vmx->loaded_vmcs->soft_vnmi_blocked))
+ vmx->loaded_vmcs->vnmi_blocked_time +=
+ ktime_to_ns(ktime_sub(ktime_get(),
+ vmx->loaded_vmcs->entry_time));
}
static void __vmx_complete_interrupts(struct kvm_vcpu *vcpu,
@@ -9248,6 +9316,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
struct vcpu_vmx *vmx = to_vmx(vcpu);
unsigned long debugctlmsr, cr3, cr4;
+ /* Record the guest's net vcpu time for enforced NMI injections. */
+ if (unlikely(!enable_vnmi &&
+ vmx->loaded_vmcs->soft_vnmi_blocked))
+ vmx->loaded_vmcs->entry_time = ktime_get();
+
/* Don't enter VMX if guest state is invalid, let the exit handler
start emulation until we arrive back to a valid state */
if (vmx->emulation_required)
diff --git a/arch/x86/lib/insn-eval.c b/arch/x86/lib/insn-eval.c
index 35625d279458..9119d8e41f1f 100644
--- a/arch/x86/lib/insn-eval.c
+++ b/arch/x86/lib/insn-eval.c
@@ -733,11 +733,11 @@ static unsigned long get_seg_limit(struct pt_regs *regs, int seg_reg_idx)
*
* Returns:
*
- * A signed 8-bit value containing the default parameters on success.
+ * An int containing ORed-in default parameters on success.
*
* -EINVAL on error.
*/
-char insn_get_code_seg_params(struct pt_regs *regs)
+int insn_get_code_seg_params(struct pt_regs *regs)
{
struct desc_struct *desc;
short sel;
diff --git a/arch/x86/lib/x86-opcode-map.txt b/arch/x86/lib/x86-opcode-map.txt
index 12e377184ee4..c4d55919fac1 100644
--- a/arch/x86/lib/x86-opcode-map.txt
+++ b/arch/x86/lib/x86-opcode-map.txt
@@ -896,7 +896,7 @@ EndTable
GrpTable: Grp3_1
0: TEST Eb,Ib
-1:
+1: TEST Eb,Ib
2: NOT Eb
3: NEG Eb
4: MUL AL,Eb
diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
index 8ae0000cbdb3..00b296617ca4 100644
--- a/arch/x86/mm/hugetlbpage.c
+++ b/arch/x86/mm/hugetlbpage.c
@@ -158,6 +158,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
if (len > TASK_SIZE)
return -ENOMEM;
+ /* No address checking. See comment at mmap_address_hint_valid() */
if (flags & MAP_FIXED) {
if (prepare_hugepage_range(file, addr, len))
return -EINVAL;
@@ -165,12 +166,16 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
}
if (addr) {
- addr = ALIGN(addr, huge_page_size(h));
+ addr &= huge_page_mask(h);
+ if (!mmap_address_hint_valid(addr, len))
+ goto get_unmapped_area;
+
vma = find_vma(mm, addr);
- if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vm_start_gap(vma)))
+ if (!vma || addr + len <= vm_start_gap(vma))
return addr;
}
+
+get_unmapped_area:
if (mm->get_unmapped_area == arch_get_unmapped_area)
return hugetlb_get_unmapped_area_bottomup(file, addr, len,
pgoff, flags);
diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
index a99679826846..155ecbac9e28 100644
--- a/arch/x86/mm/mmap.c
+++ b/arch/x86/mm/mmap.c
@@ -33,6 +33,8 @@
#include <linux/compat.h>
#include <asm/elf.h>
+#include "physaddr.h"
+
struct va_alignment __read_mostly va_align = {
.flags = -1,
};
@@ -174,3 +176,63 @@ const char *arch_vma_name(struct vm_area_struct *vma)
return "[mpx]";
return NULL;
}
+
+/**
+ * mmap_address_hint_valid - Validate the address hint of mmap
+ * @addr: Address hint
+ * @len: Mapping length
+ *
+ * Check whether @addr and @addr + @len result in a valid mapping.
+ *
+ * On 32bit this only checks whether @addr + @len is <= TASK_SIZE.
+ *
+ * On 64bit with 5-level page tables another sanity check is required
+ * because mappings requested by mmap(@addr, 0) which cross the 47-bit
+ * virtual address boundary can cause the following theoretical issue:
+ *
+ * An application calls mmap(addr, 0), i.e. without MAP_FIXED, where @addr
+ * is below the border of the 47-bit address space and @addr + @len is
+ * above the border.
+ *
+ * With 4-level paging this request succeeds, but the resulting mapping
+ * address will always be within the 47-bit virtual address space, because
+ * the hint address does not result in a valid mapping and is
+ * ignored. Hence applications which are not prepared to handle virtual
+ * addresses above 47-bit work correctly.
+ *
+ * With 5-level paging this request would be granted and result in a
+ * mapping which crosses the border of the 47-bit virtual address
+ * space. If the application cannot handle addresses above 47-bit this
+ * will lead to misbehaviour and hard to diagnose failures.
+ *
+ * Therefore ignore address hints which would result in a mapping crossing
+ * the 47-bit virtual address boundary.
+ *
+ * Note, that in the same scenario with MAP_FIXED the behaviour is
+ * different. The request with @addr < 47-bit and @addr + @len > 47-bit
+ * fails on a 4-level paging machine but succeeds on a 5-level paging
+ * machine. It is reasonable to expect that an application does not rely on
+ * the failure of such a fixed mapping request, so the restriction is not
+ * applied.
+ */
+bool mmap_address_hint_valid(unsigned long addr, unsigned long len)
+{
+ if (TASK_SIZE - len < addr)
+ return false;
+
+ return (addr > DEFAULT_MAP_WINDOW) == (addr + len > DEFAULT_MAP_WINDOW);
+}
+
+/* Can we access it for direct reading/writing? Must be RAM: */
+int valid_phys_addr_range(phys_addr_t addr, size_t count)
+{
+ return addr + count <= __pa(high_memory);
+}
+
+/* Can we access it through mmap? Must be a valid physical address: */
+int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
+{
+ phys_addr_t addr = (phys_addr_t)pfn << PAGE_SHIFT;
+
+ return phys_addr_valid(addr + count - 1);
+}
diff --git a/block/blk-core.c b/block/blk-core.c
index 1038706edd87..b8881750a3ac 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -863,9 +863,9 @@ static void blk_queue_usage_counter_release(struct percpu_ref *ref)
wake_up_all(&q->mq_freeze_wq);
}
-static void blk_rq_timed_out_timer(unsigned long data)
+static void blk_rq_timed_out_timer(struct timer_list *t)
{
- struct request_queue *q = (struct request_queue *)data;
+ struct request_queue *q = from_timer(q, t, timeout);
kblockd_schedule_work(&q->timeout_work);
}
@@ -901,9 +901,9 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
q->backing_dev_info->name = "block";
q->node = node_id;
- setup_timer(&q->backing_dev_info->laptop_mode_wb_timer,
- laptop_mode_timer_fn, (unsigned long) q);
- setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q);
+ timer_setup(&q->backing_dev_info->laptop_mode_wb_timer,
+ laptop_mode_timer_fn, 0);
+ timer_setup(&q->timeout, blk_rq_timed_out_timer, 0);
INIT_WORK(&q->timeout_work, NULL);
INIT_LIST_HEAD(&q->queue_head);
INIT_LIST_HEAD(&q->timeout_list);
diff --git a/block/blk-stat.c b/block/blk-stat.c
index 3a2f3c96f367..28003bf9941c 100644
--- a/block/blk-stat.c
+++ b/block/blk-stat.c
@@ -79,9 +79,9 @@ void blk_stat_add(struct request *rq)
rcu_read_unlock();
}
-static void blk_stat_timer_fn(unsigned long data)
+static void blk_stat_timer_fn(struct timer_list *t)
{
- struct blk_stat_callback *cb = (void *)data;
+ struct blk_stat_callback *cb = from_timer(cb, t, timer);
unsigned int bucket;
int cpu;
@@ -130,7 +130,7 @@ blk_stat_alloc_callback(void (*timer_fn)(struct blk_stat_callback *),
cb->bucket_fn = bucket_fn;
cb->data = data;
cb->buckets = buckets;
- setup_timer(&cb->timer, blk_stat_timer_fn, (unsigned long)cb);
+ timer_setup(&cb->timer, blk_stat_timer_fn, 0);
return cb;
}
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 96ad32623427..825bc29767e6 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -225,7 +225,7 @@ struct throtl_data
bool track_bio_latency;
};
-static void throtl_pending_timer_fn(unsigned long arg);
+static void throtl_pending_timer_fn(struct timer_list *t);
static inline struct throtl_grp *pd_to_tg(struct blkg_policy_data *pd)
{
@@ -478,8 +478,7 @@ static void throtl_service_queue_init(struct throtl_service_queue *sq)
INIT_LIST_HEAD(&sq->queued[0]);
INIT_LIST_HEAD(&sq->queued[1]);
sq->pending_tree = RB_ROOT;
- setup_timer(&sq->pending_timer, throtl_pending_timer_fn,
- (unsigned long)sq);
+ timer_setup(&sq->pending_timer, throtl_pending_timer_fn, 0);
}
static struct blkg_policy_data *throtl_pd_alloc(gfp_t gfp, int node)
@@ -1249,9 +1248,9 @@ static bool throtl_can_upgrade(struct throtl_data *td,
* the top-level service_tree is reached, throtl_data->dispatch_work is
* kicked so that the ready bio's are issued.
*/
-static void throtl_pending_timer_fn(unsigned long arg)
+static void throtl_pending_timer_fn(struct timer_list *t)
{
- struct throtl_service_queue *sq = (void *)arg;
+ struct throtl_service_queue *sq = from_timer(sq, t, pending_timer);
struct throtl_grp *tg = sq_to_tg(sq);
struct throtl_data *td = sq_to_td(sq);
struct request_queue *q = td->queue;
diff --git a/crypto/af_alg.c b/crypto/af_alg.c
index 85cea9de324a..358749c38894 100644
--- a/crypto/af_alg.c
+++ b/crypto/af_alg.c
@@ -1021,6 +1021,18 @@ unlock:
EXPORT_SYMBOL_GPL(af_alg_sendpage);
/**
+ * af_alg_free_resources - release resources required for crypto request
+ */
+void af_alg_free_resources(struct af_alg_async_req *areq)
+{
+ struct sock *sk = areq->sk;
+
+ af_alg_free_areq_sgls(areq);
+ sock_kfree_s(sk, areq, areq->areqlen);
+}
+EXPORT_SYMBOL_GPL(af_alg_free_resources);
+
+/**
* af_alg_async_cb - AIO callback handler
*
* This handler cleans up the struct af_alg_async_req upon completion of the
@@ -1036,18 +1048,13 @@ void af_alg_async_cb(struct crypto_async_request *_req, int err)
struct kiocb *iocb = areq->iocb;
unsigned int resultlen;
- lock_sock(sk);
-
/* Buffer size written by crypto operation. */
resultlen = areq->outlen;
- af_alg_free_areq_sgls(areq);
- sock_kfree_s(sk, areq, areq->areqlen);
- __sock_put(sk);
+ af_alg_free_resources(areq);
+ sock_put(sk);
iocb->ki_complete(iocb, err ? err : resultlen, 0);
-
- release_sock(sk);
}
EXPORT_SYMBOL_GPL(af_alg_async_cb);
diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c
index aacae0837aff..805f485ddf1b 100644
--- a/crypto/algif_aead.c
+++ b/crypto/algif_aead.c
@@ -101,10 +101,10 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
struct aead_tfm *aeadc = pask->private;
struct crypto_aead *tfm = aeadc->aead;
struct crypto_skcipher *null_tfm = aeadc->null_tfm;
- unsigned int as = crypto_aead_authsize(tfm);
+ unsigned int i, as = crypto_aead_authsize(tfm);
struct af_alg_async_req *areq;
- struct af_alg_tsgl *tsgl;
- struct scatterlist *src;
+ struct af_alg_tsgl *tsgl, *tmp;
+ struct scatterlist *rsgl_src, *tsgl_src = NULL;
int err = 0;
size_t used = 0; /* [in] TX bufs to be en/decrypted */
size_t outlen = 0; /* [out] RX bufs produced by kernel */
@@ -178,7 +178,22 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
}
processed = used + ctx->aead_assoclen;
- tsgl = list_first_entry(&ctx->tsgl_list, struct af_alg_tsgl, list);
+ list_for_each_entry_safe(tsgl, tmp, &ctx->tsgl_list, list) {
+ for (i = 0; i < tsgl->cur; i++) {
+ struct scatterlist *process_sg = tsgl->sg + i;
+
+ if (!(process_sg->length) || !sg_page(process_sg))
+ continue;
+ tsgl_src = process_sg;
+ break;
+ }
+ if (tsgl_src)
+ break;
+ }
+ if (processed && !tsgl_src) {
+ err = -EFAULT;
+ goto free;
+ }
/*
* Copy of AAD from source to destination
@@ -194,7 +209,7 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
*/
/* Use the RX SGL as source (and destination) for crypto op. */
- src = areq->first_rsgl.sgl.sg;
+ rsgl_src = areq->first_rsgl.sgl.sg;
if (ctx->enc) {
/*
@@ -207,7 +222,7 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
* v v
* RX SGL: AAD || PT || Tag
*/
- err = crypto_aead_copy_sgl(null_tfm, tsgl->sg,
+ err = crypto_aead_copy_sgl(null_tfm, tsgl_src,
areq->first_rsgl.sgl.sg, processed);
if (err)
goto free;
@@ -225,7 +240,7 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
*/
/* Copy AAD || CT to RX SGL buffer for in-place operation. */
- err = crypto_aead_copy_sgl(null_tfm, tsgl->sg,
+ err = crypto_aead_copy_sgl(null_tfm, tsgl_src,
areq->first_rsgl.sgl.sg, outlen);
if (err)
goto free;
@@ -257,23 +272,34 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
areq->tsgl);
} else
/* no RX SGL present (e.g. authentication only) */
- src = areq->tsgl;
+ rsgl_src = areq->tsgl;
}
/* Initialize the crypto operation */
- aead_request_set_crypt(&areq->cra_u.aead_req, src,
+ aead_request_set_crypt(&areq->cra_u.aead_req, rsgl_src,
areq->first_rsgl.sgl.sg, used, ctx->iv);
aead_request_set_ad(&areq->cra_u.aead_req, ctx->aead_assoclen);
aead_request_set_tfm(&areq->cra_u.aead_req, tfm);
if (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) {
/* AIO operation */
+ sock_hold(sk);
areq->iocb = msg->msg_iocb;
aead_request_set_callback(&areq->cra_u.aead_req,
CRYPTO_TFM_REQ_MAY_BACKLOG,
af_alg_async_cb, areq);
err = ctx->enc ? crypto_aead_encrypt(&areq->cra_u.aead_req) :
crypto_aead_decrypt(&areq->cra_u.aead_req);
+
+ /* AIO operation in progress */
+ if (err == -EINPROGRESS || err == -EBUSY) {
+ /* Remember output size that will be generated. */
+ areq->outlen = outlen;
+
+ return -EIOCBQUEUED;
+ }
+
+ sock_put(sk);
} else {
/* Synchronous operation */
aead_request_set_callback(&areq->cra_u.aead_req,
@@ -285,19 +311,9 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
&ctx->wait);
}
- /* AIO operation in progress */
- if (err == -EINPROGRESS) {
- sock_hold(sk);
-
- /* Remember output size that will be generated. */
- areq->outlen = outlen;
-
- return -EIOCBQUEUED;
- }
free:
- af_alg_free_areq_sgls(areq);
- sock_kfree_s(sk, areq, areq->areqlen);
+ af_alg_free_resources(areq);
return err ? err : outlen;
}
diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
index 9954b078f0b9..30cff827dd8f 100644
--- a/crypto/algif_skcipher.c
+++ b/crypto/algif_skcipher.c
@@ -117,6 +117,7 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
if (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) {
/* AIO operation */
+ sock_hold(sk);
areq->iocb = msg->msg_iocb;
skcipher_request_set_callback(&areq->cra_u.skcipher_req,
CRYPTO_TFM_REQ_MAY_SLEEP,
@@ -124,6 +125,16 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
err = ctx->enc ?
crypto_skcipher_encrypt(&areq->cra_u.skcipher_req) :
crypto_skcipher_decrypt(&areq->cra_u.skcipher_req);
+
+ /* AIO operation in progress */
+ if (err == -EINPROGRESS || err == -EBUSY) {
+ /* Remember output size that will be generated. */
+ areq->outlen = len;
+
+ return -EIOCBQUEUED;
+ }
+
+ sock_put(sk);
} else {
/* Synchronous operation */
skcipher_request_set_callback(&areq->cra_u.skcipher_req,
@@ -136,19 +147,9 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
&ctx->wait);
}
- /* AIO operation in progress */
- if (err == -EINPROGRESS) {
- sock_hold(sk);
-
- /* Remember output size that will be generated. */
- areq->outlen = len;
-
- return -EIOCBQUEUED;
- }
free:
- af_alg_free_areq_sgls(areq);
- sock_kfree_s(sk, areq, areq->areqlen);
+ af_alg_free_resources(areq);
return err ? err : len;
}
diff --git a/crypto/asymmetric_keys/pkcs7_key_type.c b/crypto/asymmetric_keys/pkcs7_key_type.c
index 1063b644efcd..e284d9cb9237 100644
--- a/crypto/asymmetric_keys/pkcs7_key_type.c
+++ b/crypto/asymmetric_keys/pkcs7_key_type.c
@@ -19,6 +19,7 @@
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("PKCS#7 testing key type");
+MODULE_AUTHOR("Red Hat, Inc.");
static unsigned pkcs7_usage;
module_param_named(usage, pkcs7_usage, uint, S_IWUSR | S_IRUGO);
diff --git a/crypto/asymmetric_keys/pkcs7_parser.c b/crypto/asymmetric_keys/pkcs7_parser.c
index d140d8bb2c96..c1ca1e86f5c4 100644
--- a/crypto/asymmetric_keys/pkcs7_parser.c
+++ b/crypto/asymmetric_keys/pkcs7_parser.c
@@ -11,6 +11,7 @@
#define pr_fmt(fmt) "PKCS7: "fmt
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/export.h>
#include <linux/slab.h>
#include <linux/err.h>
@@ -19,6 +20,10 @@
#include "pkcs7_parser.h"
#include "pkcs7-asn1.h"
+MODULE_DESCRIPTION("PKCS#7 parser");
+MODULE_AUTHOR("Red Hat, Inc.");
+MODULE_LICENSE("GPL");
+
struct pkcs7_parse_context {
struct pkcs7_message *msg; /* Message being constructed */
struct pkcs7_signed_info *sinfo; /* SignedInfo being constructed */
diff --git a/crypto/asymmetric_keys/public_key.c b/crypto/asymmetric_keys/public_key.c
index d916235d6cf5..bc3035ef27a2 100644
--- a/crypto/asymmetric_keys/public_key.c
+++ b/crypto/asymmetric_keys/public_key.c
@@ -22,6 +22,8 @@
#include <crypto/public_key.h>
#include <crypto/akcipher.h>
+MODULE_DESCRIPTION("In-software asymmetric public-key subtype");
+MODULE_AUTHOR("Red Hat, Inc.");
MODULE_LICENSE("GPL");
/*
diff --git a/crypto/asymmetric_keys/x509_public_key.c b/crypto/asymmetric_keys/x509_public_key.c
index eea71dc9686c..c9013582c026 100644
--- a/crypto/asymmetric_keys/x509_public_key.c
+++ b/crypto/asymmetric_keys/x509_public_key.c
@@ -275,4 +275,5 @@ module_init(x509_key_init);
module_exit(x509_key_exit);
MODULE_DESCRIPTION("X.509 certificate parser");
+MODULE_AUTHOR("Red Hat, Inc.");
MODULE_LICENSE("GPL");
diff --git a/crypto/skcipher.c b/crypto/skcipher.c
index d5692e35fab1..778e0ff42bfa 100644
--- a/crypto/skcipher.c
+++ b/crypto/skcipher.c
@@ -522,6 +522,9 @@ static int skcipher_walk_aead_common(struct skcipher_walk *walk,
scatterwalk_copychunks(NULL, &walk->in, req->assoclen, 2);
scatterwalk_copychunks(NULL, &walk->out, req->assoclen, 2);
+ scatterwalk_done(&walk->in, 0, walk->total);
+ scatterwalk_done(&walk->out, 0, walk->total);
+
walk->iv = req->iv;
walk->oiv = req->iv;
diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
index acf16c323e38..9287ec958b70 100644
--- a/drivers/atm/ambassador.c
+++ b/drivers/atm/ambassador.c
@@ -293,7 +293,7 @@ static inline void __init show_version (void) {
*/
-static void do_housekeeping (unsigned long arg);
+static void do_housekeeping (struct timer_list *t);
/********** globals **********/
static unsigned short debug = 0;
@@ -1493,8 +1493,8 @@ static const struct atmdev_ops amb_ops = {
};
/********** housekeeping **********/
-static void do_housekeeping (unsigned long arg) {
- amb_dev * dev = (amb_dev *) arg;
+static void do_housekeeping (struct timer_list *t) {
+ amb_dev * dev = from_timer(dev, t, housekeeping);
// could collect device-specific (not driver/atm-linux) stats here
@@ -2258,7 +2258,7 @@ static int amb_probe(struct pci_dev *pci_dev,
PRINTD (DBG_INFO, "registered Madge ATM adapter (no. %d) (%p) at %p",
dev->atm_dev->number, dev, dev->atm_dev);
- dev->atm_dev->dev_data = (void *) dev;
+ dev->atm_dev->dev_data = (void *) dev;
// register our address
amb_esi (dev, dev->atm_dev->esi);
@@ -2267,8 +2267,7 @@ static int amb_probe(struct pci_dev *pci_dev,
dev->atm_dev->ci_range.vpi_bits = NUM_VPI_BITS;
dev->atm_dev->ci_range.vci_bits = NUM_VCI_BITS;
- setup_timer(&dev->housekeeping, do_housekeeping,
- (unsigned long)dev);
+ timer_setup(&dev->housekeeping, do_housekeeping, 0);
mod_timer(&dev->housekeeping, jiffies);
// enable host interrupts
diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
index 6b6368a56526..d97c05690faa 100644
--- a/drivers/atm/firestream.c
+++ b/drivers/atm/firestream.c
@@ -1656,9 +1656,9 @@ static irqreturn_t fs_irq (int irq, void *dev_id)
#ifdef FS_POLL_FREQ
-static void fs_poll (unsigned long data)
+static void fs_poll (struct timer_list *t)
{
- struct fs_dev *dev = (struct fs_dev *) data;
+ struct fs_dev *dev = from_timer(dev, t, timer);
fs_irq (0, dev);
dev->timer.expires = jiffies + FS_POLL_FREQ;
@@ -1885,9 +1885,7 @@ static int fs_init(struct fs_dev *dev)
}
#ifdef FS_POLL_FREQ
- init_timer (&dev->timer);
- dev->timer.data = (unsigned long) dev;
- dev->timer.function = fs_poll;
+ timer_setup(&dev->timer, fs_poll, 0);
dev->timer.expires = jiffies + FS_POLL_FREQ;
add_timer (&dev->timer);
#endif
diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
index 126855e6cb7d..6ebc4e4820fc 100644
--- a/drivers/atm/fore200e.c
+++ b/drivers/atm/fore200e.c
@@ -3083,8 +3083,8 @@ fore200e_proc_read(struct atm_dev *dev, loff_t* pos, char* page)
ASSERT(fore200e_vcc);
len = sprintf(page,
- " %08x %03d %05d %1d %09lu %05d/%05d %09lu %05d/%05d\n",
- (u32)(unsigned long)vcc,
+ " %pK %03d %05d %1d %09lu %05d/%05d %09lu %05d/%05d\n",
+ vcc,
vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal),
fore200e_vcc->tx_pdu,
fore200e_vcc->tx_min_pdu > 0xFFFF ? 0 : fore200e_vcc->tx_min_pdu,
diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
index e121b8485731..5ddc203206b8 100644
--- a/drivers/atm/horizon.c
+++ b/drivers/atm/horizon.c
@@ -357,7 +357,7 @@ static inline void __init show_version (void) {
/********** globals **********/
-static void do_housekeeping (unsigned long arg);
+static void do_housekeeping (struct timer_list *t);
static unsigned short debug = 0;
static unsigned short vpi_bits = 0;
@@ -1418,9 +1418,9 @@ static irqreturn_t interrupt_handler(int irq, void *dev_id)
/********** housekeeping **********/
-static void do_housekeeping (unsigned long arg) {
+static void do_housekeeping (struct timer_list *t) {
// just stats at the moment
- hrz_dev * dev = (hrz_dev *) arg;
+ hrz_dev * dev = from_timer(dev, t, housekeeping);
// collect device-specific (not driver/atm-linux) stats here
dev->tx_cell_count += rd_regw (dev, TX_CELL_COUNT_OFF);
@@ -2796,7 +2796,7 @@ static int hrz_probe(struct pci_dev *pci_dev,
dev->atm_dev->ci_range.vpi_bits = vpi_bits;
dev->atm_dev->ci_range.vci_bits = 10-vpi_bits;
- setup_timer(&dev->housekeeping, do_housekeeping, (unsigned long) dev);
+ timer_setup(&dev->housekeeping, do_housekeeping, 0);
mod_timer(&dev->housekeeping, jiffies);
out:
diff --git a/drivers/atm/idt77105.c b/drivers/atm/idt77105.c
index 909744eb7bab..0a67487c0b1d 100644
--- a/drivers/atm/idt77105.c
+++ b/drivers/atm/idt77105.c
@@ -45,8 +45,8 @@ static DEFINE_SPINLOCK(idt77105_priv_lock);
#define PUT(val,reg) dev->ops->phy_put(dev,val,IDT77105_##reg)
#define GET(reg) dev->ops->phy_get(dev,IDT77105_##reg)
-static void idt77105_stats_timer_func(unsigned long);
-static void idt77105_restart_timer_func(unsigned long);
+static void idt77105_stats_timer_func(struct timer_list *);
+static void idt77105_restart_timer_func(struct timer_list *);
static DEFINE_TIMER(stats_timer, idt77105_stats_timer_func);
@@ -80,7 +80,7 @@ static u16 get_counter(struct atm_dev *dev, int counter)
* a separate copy of the stats allows implementation of
* an ioctl which gathers the stats *without* zero'ing them.
*/
-static void idt77105_stats_timer_func(unsigned long dummy)
+static void idt77105_stats_timer_func(struct timer_list *unused)
{
struct idt77105_priv *walk;
struct atm_dev *dev;
@@ -109,7 +109,7 @@ static void idt77105_stats_timer_func(unsigned long dummy)
* interrupts need to be disabled when the cable is pulled out
* to avoid lots of spurious cell error interrupts.
*/
-static void idt77105_restart_timer_func(unsigned long dummy)
+static void idt77105_restart_timer_func(struct timer_list *unused)
{
struct idt77105_priv *walk;
struct atm_dev *dev;
diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
index 0e3b9c44c808..0277f36be85b 100644
--- a/drivers/atm/idt77252.c
+++ b/drivers/atm/idt77252.c
@@ -1528,9 +1528,9 @@ idt77252_tx(struct idt77252_dev *card)
static void
-tst_timer(unsigned long data)
+tst_timer(struct timer_list *t)
{
- struct idt77252_dev *card = (struct idt77252_dev *)data;
+ struct idt77252_dev *card = from_timer(card, t, tst_timer);
unsigned long base, idle, jump;
unsigned long flags;
u32 pc;
@@ -3634,7 +3634,7 @@ static int idt77252_init_one(struct pci_dev *pcidev,
spin_lock_init(&card->cmd_lock);
spin_lock_init(&card->tst_lock);
- setup_timer(&card->tst_timer, tst_timer, (unsigned long)card);
+ timer_setup(&card->tst_timer, tst_timer, 0);
/* Do the I/O remapping... */
card->membase = ioremap(membase, 1024);
diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
index 12f646760b68..98a3a43484c8 100644
--- a/drivers/atm/iphase.c
+++ b/drivers/atm/iphase.c
@@ -75,7 +75,7 @@ static void desc_dbg(IADEV *iadev);
static IADEV *ia_dev[8];
static struct atm_dev *_ia_dev[8];
static int iadev_count;
-static void ia_led_timer(unsigned long arg);
+static void ia_led_timer(struct timer_list *unused);
static DEFINE_TIMER(ia_timer, ia_led_timer);
static int IA_TX_BUF = DFL_TX_BUFFERS, IA_TX_BUF_SZ = DFL_TX_BUF_SZ;
static int IA_RX_BUF = DFL_RX_BUFFERS, IA_RX_BUF_SZ = DFL_RX_BUF_SZ;
@@ -2432,7 +2432,7 @@ static void ia_update_stats(IADEV *iadev) {
return;
}
-static void ia_led_timer(unsigned long arg) {
+static void ia_led_timer(struct timer_list *unused) {
unsigned long flags;
static u_char blinking[8] = {0, 0, 0, 0, 0, 0, 0, 0};
u_char i;
diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
index 2351dad78ff5..5f8e009b2da1 100644
--- a/drivers/atm/lanai.c
+++ b/drivers/atm/lanai.c
@@ -1586,8 +1586,8 @@ static int service_buffer_allocate(struct lanai_dev *lanai)
lanai->pci);
if (unlikely(lanai->service.start == NULL))
return -ENOMEM;
- DPRINTK("allocated service buffer at 0x%08lX, size %zu(%d)\n",
- (unsigned long) lanai->service.start,
+ DPRINTK("allocated service buffer at %p, size %zu(%d)\n",
+ lanai->service.start,
lanai_buf_size(&lanai->service),
lanai_buf_size_cardorder(&lanai->service));
/* Clear ServWrite register to be safe */
@@ -1761,9 +1761,9 @@ static void iter_dequeue(struct lanai_dev *lanai, vci_t vci)
}
#endif /* !DEBUG_RW */
-static void lanai_timed_poll(unsigned long arg)
+static void lanai_timed_poll(struct timer_list *t)
{
- struct lanai_dev *lanai = (struct lanai_dev *) arg;
+ struct lanai_dev *lanai = from_timer(lanai, t, timer);
#ifndef DEBUG_RW
unsigned long flags;
#ifdef USE_POWERDOWN
@@ -1790,10 +1790,8 @@ static void lanai_timed_poll(unsigned long arg)
static inline void lanai_timed_poll_start(struct lanai_dev *lanai)
{
- init_timer(&lanai->timer);
+ timer_setup(&lanai->timer, lanai_timed_poll, 0);
lanai->timer.expires = jiffies + LANAI_POLL_PERIOD;
- lanai->timer.data = (unsigned long) lanai;
- lanai->timer.function = lanai_timed_poll;
add_timer(&lanai->timer);
}
@@ -2220,9 +2218,9 @@ static int lanai_dev_open(struct atm_dev *atmdev)
#endif
memcpy(atmdev->esi, eeprom_mac(lanai), ESI_LEN);
lanai_timed_poll_start(lanai);
- printk(KERN_NOTICE DEV_LABEL "(itf %d): rev.%d, base=0x%lx, irq=%u "
+ printk(KERN_NOTICE DEV_LABEL "(itf %d): rev.%d, base=%p, irq=%u "
"(%pMF)\n", lanai->number, (int) lanai->pci->revision,
- (unsigned long) lanai->base, lanai->pci->irq, atmdev->esi);
+ lanai->base, lanai->pci->irq, atmdev->esi);
printk(KERN_NOTICE DEV_LABEL "(itf %d): LANAI%s, serialno=%u(0x%X), "
"board_rev=%d\n", lanai->number,
lanai->type==lanai2 ? "2" : "HB", (unsigned int) lanai->serialno,
diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
index a9702836cbae..cbec9adc01c7 100644
--- a/drivers/atm/nicstar.c
+++ b/drivers/atm/nicstar.c
@@ -145,7 +145,7 @@ static int ns_ioctl(struct atm_dev *dev, unsigned int cmd, void __user * arg);
#ifdef EXTRA_DEBUG
static void which_list(ns_dev * card, struct sk_buff *skb);
#endif
-static void ns_poll(unsigned long arg);
+static void ns_poll(struct timer_list *unused);
static void ns_phy_put(struct atm_dev *dev, unsigned char value,
unsigned long addr);
static unsigned char ns_phy_get(struct atm_dev *dev, unsigned long addr);
@@ -284,10 +284,8 @@ static int __init nicstar_init(void)
XPRINTK("nicstar: nicstar_init() returned.\n");
if (!error) {
- init_timer(&ns_timer);
+ timer_setup(&ns_timer, ns_poll, 0);
ns_timer.expires = jiffies + NS_POLL_PERIOD;
- ns_timer.data = 0UL;
- ns_timer.function = ns_poll;
add_timer(&ns_timer);
}
@@ -2681,7 +2679,7 @@ static void which_list(ns_dev * card, struct sk_buff *skb)
}
#endif /* EXTRA_DEBUG */
-static void ns_poll(unsigned long arg)
+static void ns_poll(struct timer_list *unused)
{
int i;
ns_dev *card;
diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
index b8825f2d79e0..4b044710a8cf 100644
--- a/drivers/atm/suni.c
+++ b/drivers/atm/suni.c
@@ -177,7 +177,7 @@ static int set_loopback(struct atm_dev *dev,int mode)
default:
return -EINVAL;
}
- dev->ops->phy_put(dev, control, reg);
+ dev->ops->phy_put(dev, control, reg);
PRIV(dev)->loop_mode = mode;
return 0;
}
diff --git a/drivers/auxdisplay/Kconfig b/drivers/auxdisplay/Kconfig
index d7d21118d3e0..2c2ed9cf8796 100644
--- a/drivers/auxdisplay/Kconfig
+++ b/drivers/auxdisplay/Kconfig
@@ -136,6 +136,7 @@ config CFAG12864B_RATE
config IMG_ASCII_LCD
tristate "Imagination Technologies ASCII LCD Display"
+ depends on HAS_IOMEM
default y if MIPS_MALTA || MIPS_SEAD3
select SYSCON
help
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index 680ee1d36ac9..38559f04db2c 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -481,7 +481,7 @@ static bool wakeup_source_not_registered(struct wakeup_source *ws)
* Use timer struct to check if the given source is initialized
* by wakeup_source_add.
*/
- return ws->timer.function != (TIMER_FUNC_TYPE)pm_wakeup_timer_fn;
+ return ws->timer.function != pm_wakeup_timer_fn;
}
/*
diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
index 255591ab3716..442e777bdfb2 100644
--- a/drivers/block/DAC960.c
+++ b/drivers/block/DAC960.c
@@ -3079,11 +3079,10 @@ DAC960_InitializeController(DAC960_Controller_T *Controller)
/*
Initialize the Monitoring Timer.
*/
- init_timer(&Controller->MonitoringTimer);
+ timer_setup(&Controller->MonitoringTimer,
+ DAC960_MonitoringTimerFunction, 0);
Controller->MonitoringTimer.expires =
jiffies + DAC960_MonitoringTimerInterval;
- Controller->MonitoringTimer.data = (unsigned long) Controller;
- Controller->MonitoringTimer.function = DAC960_MonitoringTimerFunction;
add_timer(&Controller->MonitoringTimer);
Controller->ControllerInitialized = true;
return true;
@@ -5620,9 +5619,9 @@ static void DAC960_V2_QueueMonitoringCommand(DAC960_Command_T *Command)
the status of DAC960 Controllers.
*/
-static void DAC960_MonitoringTimerFunction(unsigned long TimerData)
+static void DAC960_MonitoringTimerFunction(struct timer_list *t)
{
- DAC960_Controller_T *Controller = (DAC960_Controller_T *) TimerData;
+ DAC960_Controller_T *Controller = from_timer(Controller, t, MonitoringTimer);
DAC960_Command_T *Command;
unsigned long flags;
diff --git a/drivers/block/DAC960.h b/drivers/block/DAC960.h
index 85fa9bb63759..6a6226a2b932 100644
--- a/drivers/block/DAC960.h
+++ b/drivers/block/DAC960.h
@@ -4406,7 +4406,7 @@ static irqreturn_t DAC960_PD_InterruptHandler(int, void *);
static irqreturn_t DAC960_P_InterruptHandler(int, void *);
static void DAC960_V1_QueueMonitoringCommand(DAC960_Command_T *);
static void DAC960_V2_QueueMonitoringCommand(DAC960_Command_T *);
-static void DAC960_MonitoringTimerFunction(unsigned long);
+static void DAC960_MonitoringTimerFunction(struct timer_list *);
static void DAC960_Message(DAC960_MessageLevel_T, unsigned char *,
DAC960_Controller_T *, ...);
static void DAC960_CreateProcEntries(DAC960_Controller_T *);
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index 55ab25f79a08..812fed069708 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -1429,7 +1429,7 @@ aoecmd_ata_id(struct aoedev *d)
d->rttavg = RTTAVG_INIT;
d->rttdev = RTTDEV_INIT;
- d->timer.function = (TIMER_FUNC_TYPE)rexmit_timer;
+ d->timer.function = rexmit_timer;
skb = skb_clone(skb, GFP_ATOMIC);
if (skb) {
diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c
index ae596e55bcb6..8bc3b9fd8dd2 100644
--- a/drivers/block/ataflop.c
+++ b/drivers/block/ataflop.c
@@ -342,8 +342,8 @@ static int NeedSeek = 0;
static void fd_select_side( int side );
static void fd_select_drive( int drive );
static void fd_deselect( void );
-static void fd_motor_off_timer( unsigned long dummy );
-static void check_change( unsigned long dummy );
+static void fd_motor_off_timer(struct timer_list *unused);
+static void check_change(struct timer_list *unused);
static irqreturn_t floppy_irq (int irq, void *dummy);
static void fd_error( void );
static int do_format(int drive, int type, struct atari_format_descr *desc);
@@ -353,12 +353,12 @@ static void fd_calibrate_done( int status );
static void fd_seek( void );
static void fd_seek_done( int status );
static void fd_rwsec( void );
-static void fd_readtrack_check( unsigned long dummy );
+static void fd_readtrack_check(struct timer_list *unused);
static void fd_rwsec_done( int status );
static void fd_rwsec_done1(int status);
static void fd_writetrack( void );
static void fd_writetrack_done( int status );
-static void fd_times_out( unsigned long dummy );
+static void fd_times_out(struct timer_list *unused);
static void finish_fdc( void );
static void finish_fdc_done( int dummy );
static void setup_req_params( int drive );
@@ -479,7 +479,7 @@ static void fd_deselect( void )
* counts the index signals, which arrive only if one drive is selected.
*/
-static void fd_motor_off_timer( unsigned long dummy )
+static void fd_motor_off_timer(struct timer_list *unused)
{
unsigned char status;
@@ -515,7 +515,7 @@ static void fd_motor_off_timer( unsigned long dummy )
* as possible) and keep track of the current state of the write protection.
*/
-static void check_change( unsigned long dummy )
+static void check_change(struct timer_list *unused)
{
static int drive = 0;
@@ -966,7 +966,7 @@ static void fd_rwsec( void )
}
-static void fd_readtrack_check( unsigned long dummy )
+static void fd_readtrack_check(struct timer_list *unused)
{
unsigned long flags, addr, addr2;
@@ -1237,7 +1237,7 @@ static void fd_writetrack_done( int status )
fd_error();
}
-static void fd_times_out( unsigned long dummy )
+static void fd_times_out(struct timer_list *unused)
{
atari_disable_irq( IRQ_MFP_FDC );
if (!FloppyIRQHandler) goto end; /* int occurred after timer was fired, but
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index adc877dfef5c..38fc5f397fde 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -348,7 +348,6 @@ struct rbd_client_id {
struct rbd_mapping {
u64 size;
u64 features;
- bool read_only;
};
/*
@@ -450,12 +449,11 @@ static DEFINE_IDA(rbd_dev_id_ida);
static struct workqueue_struct *rbd_wq;
/*
- * Default to false for now, as single-major requires >= 0.75 version of
- * userspace rbd utility.
+ * single-major requires >= 0.75 version of userspace rbd utility.
*/
-static bool single_major = false;
+static bool single_major = true;
module_param(single_major, bool, S_IRUGO);
-MODULE_PARM_DESC(single_major, "Use a single major number for all rbd devices (default: false)");
+MODULE_PARM_DESC(single_major, "Use a single major number for all rbd devices (default: true)");
static int rbd_img_request_submit(struct rbd_img_request *img_request);
@@ -608,9 +606,6 @@ static int rbd_open(struct block_device *bdev, fmode_t mode)
struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
bool removing = false;
- if ((mode & FMODE_WRITE) && rbd_dev->mapping.read_only)
- return -EROFS;
-
spin_lock_irq(&rbd_dev->lock);
if (test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags))
removing = true;
@@ -640,46 +635,24 @@ static void rbd_release(struct gendisk *disk, fmode_t mode)
static int rbd_ioctl_set_ro(struct rbd_device *rbd_dev, unsigned long arg)
{
- int ret = 0;
- int val;
- bool ro;
- bool ro_changed = false;
+ int ro;
- /* get_user() may sleep, so call it before taking rbd_dev->lock */
- if (get_user(val, (int __user *)(arg)))
+ if (get_user(ro, (int __user *)arg))
return -EFAULT;
- ro = val ? true : false;
- /* Snapshot doesn't allow to write*/
+ /* Snapshots can't be marked read-write */
if (rbd_dev->spec->snap_id != CEPH_NOSNAP && !ro)
return -EROFS;
- spin_lock_irq(&rbd_dev->lock);
- /* prevent others open this device */
- if (rbd_dev->open_count > 1) {
- ret = -EBUSY;
- goto out;
- }
-
- if (rbd_dev->mapping.read_only != ro) {
- rbd_dev->mapping.read_only = ro;
- ro_changed = true;
- }
-
-out:
- spin_unlock_irq(&rbd_dev->lock);
- /* set_disk_ro() may sleep, so call it after releasing rbd_dev->lock */
- if (ret == 0 && ro_changed)
- set_disk_ro(rbd_dev->disk, ro ? 1 : 0);
-
- return ret;
+ /* Let blkdev_roset() handle it */
+ return -ENOTTY;
}
static int rbd_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
- int ret = 0;
+ int ret;
switch (cmd) {
case BLKROSET:
@@ -4050,15 +4023,8 @@ static void rbd_queue_workfn(struct work_struct *work)
goto err_rq;
}
- /* Only reads are allowed to a read-only device */
-
- if (op_type != OBJ_OP_READ) {
- if (rbd_dev->mapping.read_only) {
- result = -EROFS;
- goto err_rq;
- }
- rbd_assert(rbd_dev->spec->snap_id == CEPH_NOSNAP);
- }
+ rbd_assert(op_type == OBJ_OP_READ ||
+ rbd_dev->spec->snap_id == CEPH_NOSNAP);
/*
* Quit early if the mapped snapshot no longer exists. It's
@@ -4423,7 +4389,6 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
/* enable the discard support */
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
q->limits.discard_granularity = segment_size;
- q->limits.discard_alignment = segment_size;
blk_queue_max_discard_sectors(q, segment_size / SECTOR_SIZE);
blk_queue_max_write_zeroes_sectors(q, segment_size / SECTOR_SIZE);
@@ -5994,7 +5959,7 @@ static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
goto err_out_disk;
set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
- set_disk_ro(rbd_dev->disk, rbd_dev->mapping.read_only);
+ set_disk_ro(rbd_dev->disk, rbd_dev->opts->read_only);
ret = dev_set_name(&rbd_dev->dev, "%d", rbd_dev->dev_id);
if (ret)
@@ -6145,7 +6110,6 @@ static ssize_t do_rbd_add(struct bus_type *bus,
struct rbd_options *rbd_opts = NULL;
struct rbd_spec *spec = NULL;
struct rbd_client *rbdc;
- bool read_only;
int rc;
if (!try_module_get(THIS_MODULE))
@@ -6194,11 +6158,8 @@ static ssize_t do_rbd_add(struct bus_type *bus,
}
/* If we are mapping a snapshot it must be marked read-only */
-
- read_only = rbd_dev->opts->read_only;
if (rbd_dev->spec->snap_id != CEPH_NOSNAP)
- read_only = true;
- rbd_dev->mapping.read_only = read_only;
+ rbd_dev->opts->read_only = true;
rc = rbd_dev_device_setup(rbd_dev);
if (rc)
diff --git a/drivers/block/rsxx/cregs.c b/drivers/block/rsxx/cregs.c
index 926dce9c452f..c148e83e4ed7 100644
--- a/drivers/block/rsxx/cregs.c
+++ b/drivers/block/rsxx/cregs.c
@@ -203,9 +203,9 @@ static int creg_queue_cmd(struct rsxx_cardinfo *card,
return 0;
}
-static void creg_cmd_timed_out(unsigned long data)
+static void creg_cmd_timed_out(struct timer_list *t)
{
- struct rsxx_cardinfo *card = (struct rsxx_cardinfo *) data;
+ struct rsxx_cardinfo *card = from_timer(card, t, creg_ctrl.cmd_timer);
struct creg_cmd *cmd;
spin_lock(&card->creg_ctrl.lock);
@@ -745,8 +745,7 @@ int rsxx_creg_setup(struct rsxx_cardinfo *card)
mutex_init(&card->creg_ctrl.reset_lock);
INIT_LIST_HEAD(&card->creg_ctrl.queue);
spin_lock_init(&card->creg_ctrl.lock);
- setup_timer(&card->creg_ctrl.cmd_timer, creg_cmd_timed_out,
- (unsigned long) card);
+ timer_setup(&card->creg_ctrl.cmd_timer, creg_cmd_timed_out, 0);
return 0;
}
diff --git a/drivers/block/rsxx/dma.c b/drivers/block/rsxx/dma.c
index 6a1b2177951c..beaccf197a5a 100644
--- a/drivers/block/rsxx/dma.c
+++ b/drivers/block/rsxx/dma.c
@@ -354,9 +354,9 @@ static void rsxx_handle_dma_error(struct rsxx_dma_ctrl *ctrl,
rsxx_complete_dma(ctrl, dma, status);
}
-static void dma_engine_stalled(unsigned long data)
+static void dma_engine_stalled(struct timer_list *t)
{
- struct rsxx_dma_ctrl *ctrl = (struct rsxx_dma_ctrl *)data;
+ struct rsxx_dma_ctrl *ctrl = from_timer(ctrl, t, activity_timer);
int cnt;
if (atomic_read(&ctrl->stats.hw_q_depth) == 0 ||
@@ -838,8 +838,7 @@ static int rsxx_dma_ctrl_init(struct pci_dev *dev,
mutex_init(&ctrl->work_lock);
INIT_LIST_HEAD(&ctrl->queue);
- setup_timer(&ctrl->activity_timer, dma_engine_stalled,
- (unsigned long)ctrl);
+ timer_setup(&ctrl->activity_timer, dma_engine_stalled, 0);
ctrl->issue_wq = alloc_ordered_workqueue(DRIVER_NAME"_issue", 0);
if (!ctrl->issue_wq)
diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c
index 2819f23e8bf2..de0d08133c7e 100644
--- a/drivers/block/skd_main.c
+++ b/drivers/block/skd_main.c
@@ -707,9 +707,9 @@ static void skd_start_queue(struct work_struct *work)
blk_mq_start_hw_queues(skdev->queue);
}
-static void skd_timer_tick(ulong arg)
+static void skd_timer_tick(struct timer_list *t)
{
- struct skd_device *skdev = (struct skd_device *)arg;
+ struct skd_device *skdev = from_timer(skdev, t, timer);
unsigned long reqflags;
u32 state;
@@ -857,7 +857,7 @@ static int skd_start_timer(struct skd_device *skdev)
{
int rc;
- setup_timer(&skdev->timer, skd_timer_tick, (ulong)skdev);
+ timer_setup(&skdev->timer, skd_timer_tick, 0);
rc = mod_timer(&skdev->timer, (jiffies + HZ));
if (rc)
diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c
index ad9749463d4f..5ca56bfae63c 100644
--- a/drivers/block/sunvdc.c
+++ b/drivers/block/sunvdc.c
@@ -81,7 +81,7 @@ struct vdc_port {
static void vdc_ldc_reset(struct vdc_port *port);
static void vdc_ldc_reset_work(struct work_struct *work);
-static void vdc_ldc_reset_timer(unsigned long _arg);
+static void vdc_ldc_reset_timer(struct timer_list *t);
static inline struct vdc_port *to_vdc_port(struct vio_driver_state *vio)
{
@@ -974,8 +974,7 @@ static int vdc_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
*/
ldc_timeout = mdesc_get_property(hp, vdev->mp, "vdc-timeout", NULL);
port->ldc_timeout = ldc_timeout ? *ldc_timeout : 0;
- setup_timer(&port->ldc_reset_timer, vdc_ldc_reset_timer,
- (unsigned long)port);
+ timer_setup(&port->ldc_reset_timer, vdc_ldc_reset_timer, 0);
INIT_WORK(&port->ldc_reset_work, vdc_ldc_reset_work);
err = vio_driver_init(&port->vio, vdev, VDEV_DISK,
@@ -1087,9 +1086,9 @@ static void vdc_queue_drain(struct vdc_port *port)
__blk_end_request_all(req, BLK_STS_IOERR);
}
-static void vdc_ldc_reset_timer(unsigned long _arg)
+static void vdc_ldc_reset_timer(struct timer_list *t)
{
- struct vdc_port *port = (struct vdc_port *) _arg;
+ struct vdc_port *port = from_timer(port, t, ldc_reset_timer);
struct vio_driver_state *vio = &port->vio;
unsigned long flags;
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c
index e620e423102b..af51015d056e 100644
--- a/drivers/block/swim3.c
+++ b/drivers/block/swim3.c
@@ -397,7 +397,7 @@ static void set_timeout(struct floppy_state *fs, int nticks,
if (fs->timeout_pending)
del_timer(&fs->timeout);
fs->timeout.expires = jiffies + nticks;
- fs->timeout.function = (TIMER_FUNC_TYPE)proc;
+ fs->timeout.function = proc;
add_timer(&fs->timeout);
fs->timeout_pending = 1;
}
diff --git a/drivers/block/umem.c b/drivers/block/umem.c
index 0677d2514665..8077123678ad 100644
--- a/drivers/block/umem.c
+++ b/drivers/block/umem.c
@@ -718,7 +718,7 @@ static void check_batteries(struct cardinfo *card)
set_fault_to_battery_status(card);
}
-static void check_all_batteries(unsigned long ptr)
+static void check_all_batteries(struct timer_list *unused)
{
int i;
@@ -738,8 +738,7 @@ static void check_all_batteries(unsigned long ptr)
static void init_battery_timer(void)
{
- init_timer(&battery_timer);
- battery_timer.function = check_all_batteries;
+ timer_setup(&battery_timer, check_all_batteries, 0);
battery_timer.expires = jiffies + (HZ * 60);
add_timer(&battery_timer);
}
diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c
index 14459d66ef0c..c24589414c75 100644
--- a/drivers/block/xsysace.c
+++ b/drivers/block/xsysace.c
@@ -770,9 +770,9 @@ static void ace_fsm_tasklet(unsigned long data)
spin_unlock_irqrestore(&ace->lock, flags);
}
-static void ace_stall_timer(unsigned long data)
+static void ace_stall_timer(struct timer_list *t)
{
- struct ace_device *ace = (void *)data;
+ struct ace_device *ace = from_timer(ace, t, stall_timer);
unsigned long flags;
dev_warn(ace->dev,
@@ -984,7 +984,7 @@ static int ace_setup(struct ace_device *ace)
* Initialize the state machine tasklet and stall timer
*/
tasklet_init(&ace->fsm_tasklet, ace_fsm_tasklet, (unsigned long)ace);
- setup_timer(&ace->stall_timer, ace_stall_timer, (unsigned long)ace);
+ timer_setup(&ace->stall_timer, ace_stall_timer, 0);
/*
* Initialize the request queue
diff --git a/drivers/char/dtlk.c b/drivers/char/dtlk.c
index 1a0385ed6417..839ee61d352a 100644
--- a/drivers/char/dtlk.c
+++ b/drivers/char/dtlk.c
@@ -74,7 +74,7 @@
#endif /* TRACING */
static DEFINE_MUTEX(dtlk_mutex);
-static void dtlk_timer_tick(unsigned long data);
+static void dtlk_timer_tick(struct timer_list *unused);
static int dtlk_major;
static int dtlk_port_lpc;
@@ -259,7 +259,7 @@ static unsigned int dtlk_poll(struct file *file, poll_table * wait)
return mask;
}
-static void dtlk_timer_tick(unsigned long data)
+static void dtlk_timer_tick(struct timer_list *unused)
{
TRACE_TEXT(" dtlk_timer_tick");
wake_up_interruptible(&dtlk_process_list);
diff --git a/drivers/char/hangcheck-timer.c b/drivers/char/hangcheck-timer.c
index 5b8db2ed844d..7700280717f2 100644
--- a/drivers/char/hangcheck-timer.c
+++ b/drivers/char/hangcheck-timer.c
@@ -122,11 +122,11 @@ __setup("hcheck_dump_tasks", hangcheck_parse_dump_tasks);
/* Last time scheduled */
static unsigned long long hangcheck_tsc, hangcheck_tsc_margin;
-static void hangcheck_fire(unsigned long);
+static void hangcheck_fire(struct timer_list *);
static DEFINE_TIMER(hangcheck_ticktock, hangcheck_fire);
-static void hangcheck_fire(unsigned long data)
+static void hangcheck_fire(struct timer_list *unused)
{
unsigned long long cur_tsc, tsc_diff;
diff --git a/drivers/char/ipmi/bt-bmc.c b/drivers/char/ipmi/bt-bmc.c
index c4ef73c6f455..6edfaa72b98b 100644
--- a/drivers/char/ipmi/bt-bmc.c
+++ b/drivers/char/ipmi/bt-bmc.c
@@ -367,9 +367,9 @@ static const struct file_operations bt_bmc_fops = {
.unlocked_ioctl = bt_bmc_ioctl,
};
-static void poll_timer(unsigned long data)
+static void poll_timer(struct timer_list *t)
{
- struct bt_bmc *bt_bmc = (void *)data;
+ struct bt_bmc *bt_bmc = from_timer(bt_bmc, t, poll_timer);
bt_bmc->poll_timer.expires += msecs_to_jiffies(500);
wake_up(&bt_bmc->queue);
@@ -487,8 +487,7 @@ static int bt_bmc_probe(struct platform_device *pdev)
dev_info(dev, "Using IRQ %d\n", bt_bmc->irq);
} else {
dev_info(dev, "No IRQ; using timer\n");
- setup_timer(&bt_bmc->poll_timer, poll_timer,
- (unsigned long)bt_bmc);
+ timer_setup(&bt_bmc->poll_timer, poll_timer, 0);
bt_bmc->poll_timer.expires = jiffies + msecs_to_jiffies(10);
add_timer(&bt_bmc->poll_timer);
}
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index 9de189db2cc3..f45732a2cb3e 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -4766,7 +4766,7 @@ static struct timer_list ipmi_timer;
static atomic_t stop_operation;
-static void ipmi_timeout(unsigned long data)
+static void ipmi_timeout(struct timer_list *unused)
{
ipmi_smi_t intf;
int nt = 0;
@@ -5172,7 +5172,7 @@ static int ipmi_init_msghandler(void)
#endif /* CONFIG_IPMI_PROC_INTERFACE */
- setup_timer(&ipmi_timer, ipmi_timeout, 0);
+ timer_setup(&ipmi_timer, ipmi_timeout, 0);
mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 71d33a1807e4..779869ed32b1 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -1091,9 +1091,9 @@ static void set_need_watch(void *send_info, bool enable)
spin_unlock_irqrestore(&smi_info->si_lock, flags);
}
-static void smi_timeout(unsigned long data)
+static void smi_timeout(struct timer_list *t)
{
- struct smi_info *smi_info = (struct smi_info *) data;
+ struct smi_info *smi_info = from_timer(smi_info, t, si_timer);
enum si_sm_result smi_result;
unsigned long flags;
unsigned long jiffies_now;
@@ -1166,7 +1166,7 @@ static int smi_start_processing(void *send_info,
new_smi->intf = intf;
/* Set up the timer that drives the interface. */
- setup_timer(&new_smi->si_timer, smi_timeout, (long)new_smi);
+ timer_setup(&new_smi->si_timer, smi_timeout, 0);
smi_mod_timer(new_smi, jiffies + SI_TIMEOUT_JIFFIES);
/* Try to claim any interrupts. */
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
index 466b3a1c0adf..3cfaec728604 100644
--- a/drivers/char/ipmi/ipmi_ssif.c
+++ b/drivers/char/ipmi/ipmi_ssif.c
@@ -551,9 +551,9 @@ static void start_get(struct ssif_info *ssif_info)
}
}
-static void retry_timeout(unsigned long data)
+static void retry_timeout(struct timer_list *t)
{
- struct ssif_info *ssif_info = (void *) data;
+ struct ssif_info *ssif_info = from_timer(ssif_info, t, retry_timer);
unsigned long oflags, *flags;
bool waiting;
@@ -1691,8 +1691,7 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id)
spin_lock_init(&ssif_info->lock);
ssif_info->ssif_state = SSIF_NORMAL;
- setup_timer(&ssif_info->retry_timer, retry_timeout,
- (unsigned long)ssif_info);
+ timer_setup(&ssif_info->retry_timer, retry_timeout, 0);
for (i = 0; i < SSIF_NUM_STATS; i++)
atomic_set(&ssif_info->stats[i], 0);
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 970e1242a282..6aefe5370e5b 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -343,6 +343,10 @@ static int mmap_mem(struct file *file, struct vm_area_struct *vma)
size_t size = vma->vm_end - vma->vm_start;
phys_addr_t offset = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
+ /* Does it even fit in phys_addr_t? */
+ if (offset >> PAGE_SHIFT != vma->vm_pgoff)
+ return -EINVAL;
+
/* It's illegal to wrap around the end of the physical address space. */
if (offset + (phys_addr_t)size - 1 < offset)
return -EINVAL;
diff --git a/drivers/char/nwbutton.c b/drivers/char/nwbutton.c
index 44006ed9558f..a7113b78251a 100644
--- a/drivers/char/nwbutton.c
+++ b/drivers/char/nwbutton.c
@@ -23,7 +23,7 @@
#define __NWBUTTON_C /* Tell the header file who we are */
#include "nwbutton.h"
-static void button_sequence_finished (unsigned long parameters);
+static void button_sequence_finished(struct timer_list *unused);
static int button_press_count; /* The count of button presses */
/* Times for the end of a sequence */
@@ -127,7 +127,7 @@ static void button_consume_callbacks (int bpcount)
* any matching registered function callbacks, initiate reboot, etc.).
*/
-static void button_sequence_finished (unsigned long parameters)
+static void button_sequence_finished(struct timer_list *unused)
{
if (IS_ENABLED(CONFIG_NWBUTTON_REBOOT) &&
button_press_count == reboot_count)
diff --git a/drivers/char/nwbutton.h b/drivers/char/nwbutton.h
index abee3ca74801..9dedfd7adc0e 100644
--- a/drivers/char/nwbutton.h
+++ b/drivers/char/nwbutton.h
@@ -25,7 +25,7 @@ struct button_callback {
/* Function prototypes: */
-static void button_sequence_finished (unsigned long parameters);
+static void button_sequence_finished(struct timer_list *unused);
static irqreturn_t button_handler (int irq, void *dev_id);
int button_init (void);
int button_add_callback (void (*callback) (void), int count);
diff --git a/drivers/char/rtc.c b/drivers/char/rtc.c
index 616871e68e09..5542a438bbd0 100644
--- a/drivers/char/rtc.c
+++ b/drivers/char/rtc.c
@@ -135,7 +135,7 @@ static struct fasync_struct *rtc_async_queue;
static DECLARE_WAIT_QUEUE_HEAD(rtc_wait);
#ifdef RTC_IRQ
-static void rtc_dropped_irq(unsigned long data);
+static void rtc_dropped_irq(struct timer_list *unused);
static DEFINE_TIMER(rtc_irq_timer, rtc_dropped_irq);
#endif
@@ -1171,7 +1171,7 @@ module_exit(rtc_exit);
* for something that requires a steady > 1KHz signal anyways.)
*/
-static void rtc_dropped_irq(unsigned long data)
+static void rtc_dropped_irq(struct timer_list *unused)
{
unsigned long freq;
diff --git a/drivers/char/tpm/tpm-dev-common.c b/drivers/char/tpm/tpm-dev-common.c
index 461bf0b8a094..230b99288024 100644
--- a/drivers/char/tpm/tpm-dev-common.c
+++ b/drivers/char/tpm/tpm-dev-common.c
@@ -22,9 +22,9 @@
#include "tpm.h"
#include "tpm-dev.h"
-static void user_reader_timeout(unsigned long ptr)
+static void user_reader_timeout(struct timer_list *t)
{
- struct file_priv *priv = (struct file_priv *)ptr;
+ struct file_priv *priv = from_timer(priv, t, user_read_timer);
pr_warn("TPM user space timeout is deprecated (pid=%d)\n",
task_tgid_nr(current));
@@ -48,8 +48,7 @@ void tpm_common_open(struct file *file, struct tpm_chip *chip,
priv->chip = chip;
atomic_set(&priv->data_pending, 0);
mutex_init(&priv->buffer_mutex);
- setup_timer(&priv->user_read_timer, user_reader_timeout,
- (unsigned long)priv);
+ timer_setup(&priv->user_read_timer, user_reader_timeout, 0);
INIT_WORK(&priv->work, timeout_work);
file->private_data = priv;
diff --git a/drivers/clocksource/timer-of.c b/drivers/clocksource/timer-of.c
index 7c64a5c1bfc1..a31990408153 100644
--- a/drivers/clocksource/timer-of.c
+++ b/drivers/clocksource/timer-of.c
@@ -177,7 +177,14 @@ out_fail:
return ret;
}
-void timer_of_exit(struct timer_of *to)
+/**
+ * timer_of_cleanup - release timer_of ressources
+ * @to: timer_of structure
+ *
+ * Release the ressources that has been used in timer_of_init().
+ * This function should be called in init error cases
+ */
+void __init timer_of_cleanup(struct timer_of *to)
{
if (to->flags & TIMER_OF_IRQ)
timer_irq_exit(&to->of_irq);
diff --git a/drivers/clocksource/timer-of.h b/drivers/clocksource/timer-of.h
index 43f5ba3f8979..3f708f1be43d 100644
--- a/drivers/clocksource/timer-of.h
+++ b/drivers/clocksource/timer-of.h
@@ -68,6 +68,6 @@ static inline unsigned long timer_of_period(struct timer_of *to)
extern int __init timer_of_init(struct device_node *np,
struct timer_of *to);
-extern void timer_of_exit(struct timer_of *to);
+extern void __init timer_of_cleanup(struct timer_of *to);
#endif
diff --git a/drivers/dax/device.c b/drivers/dax/device.c
index 6833ada237ab..7b0bf825c4e7 100644
--- a/drivers/dax/device.c
+++ b/drivers/dax/device.c
@@ -428,9 +428,21 @@ static int dev_dax_fault(struct vm_fault *vmf)
return dev_dax_huge_fault(vmf, PE_SIZE_PTE);
}
+static int dev_dax_split(struct vm_area_struct *vma, unsigned long addr)
+{
+ struct file *filp = vma->vm_file;
+ struct dev_dax *dev_dax = filp->private_data;
+ struct dax_region *dax_region = dev_dax->region;
+
+ if (!IS_ALIGNED(addr, dax_region->align))
+ return -EINVAL;
+ return 0;
+}
+
static const struct vm_operations_struct dax_vm_ops = {
.fault = dev_dax_fault,
.huge_fault = dev_dax_huge_fault,
+ .split = dev_dax_split,
};
static int dax_mmap(struct file *filp, struct vm_area_struct *vma)
diff --git a/drivers/firmware/psci_checker.c b/drivers/firmware/psci_checker.c
index 56cf825ed779..f3f4f810e5df 100644
--- a/drivers/firmware/psci_checker.c
+++ b/drivers/firmware/psci_checker.c
@@ -220,7 +220,7 @@ out_free_cpus:
return err;
}
-static void dummy_callback(unsigned long ignored) {}
+static void dummy_callback(struct timer_list *unused) {}
static int suspend_cpu(int index, bool broadcast)
{
@@ -287,7 +287,7 @@ static int suspend_test_thread(void *arg)
pr_info("CPU %d entering suspend cycles, states 1 through %d\n",
cpu, drv->state_count - 1);
- setup_timer_on_stack(&wakeup_timer, dummy_callback, 0);
+ timer_setup_on_stack(&wakeup_timer, dummy_callback, 0);
for (i = 0; i < NUM_SUSPEND_CYCLE; ++i) {
int index;
/*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
index c21adf60a7f2..057e1ecd83ce 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
@@ -59,12 +59,6 @@ static bool check_atom_bios(uint8_t *bios, size_t size)
return false;
}
- tmp = bios[0x18] | (bios[0x19] << 8);
- if (bios[tmp + 0x14] != 0x0) {
- DRM_INFO("Not an x86 BIOS ROM\n");
- return false;
- }
-
bios_header_start = bios[0x48] | (bios[0x49] << 8);
if (!bios_header_start) {
DRM_INFO("Can't locate bios header\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 6c78623e1386..a57cec737c18 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -1495,8 +1495,11 @@ out:
memset(wait, 0, sizeof(*wait));
wait->out.status = (r > 0);
wait->out.first_signaled = first;
- /* set return value 0 to indicate success */
- r = array[first]->error;
+
+ if (first < fence_count && array[first])
+ r = array[first]->error;
+ else
+ r = 0;
err_free_fence_array:
for (i = 0; i < fence_count; i++)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 2d792cdc094c..2c574374d9b6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -1837,6 +1837,9 @@ static int amdgpu_fini(struct amdgpu_device *adev)
adev->ip_blocks[i].status.hw = false;
}
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU)
+ amdgpu_ucode_fini_bo(adev);
+
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
if (!adev->ip_blocks[i].status.sw)
continue;
@@ -3261,9 +3264,9 @@ static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
pm_pg_lock = (*pos >> 23) & 1;
if (*pos & (1ULL << 62)) {
- se_bank = (*pos >> 24) & 0x3FF;
- sh_bank = (*pos >> 34) & 0x3FF;
- instance_bank = (*pos >> 44) & 0x3FF;
+ se_bank = (*pos & GENMASK_ULL(33, 24)) >> 24;
+ sh_bank = (*pos & GENMASK_ULL(43, 34)) >> 34;
+ instance_bank = (*pos & GENMASK_ULL(53, 44)) >> 44;
if (se_bank == 0x3FF)
se_bank = 0xFFFFFFFF;
@@ -3337,9 +3340,9 @@ static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
pm_pg_lock = (*pos >> 23) & 1;
if (*pos & (1ULL << 62)) {
- se_bank = (*pos >> 24) & 0x3FF;
- sh_bank = (*pos >> 34) & 0x3FF;
- instance_bank = (*pos >> 44) & 0x3FF;
+ se_bank = (*pos & GENMASK_ULL(33, 24)) >> 24;
+ sh_bank = (*pos & GENMASK_ULL(43, 34)) >> 34;
+ instance_bank = (*pos & GENMASK_ULL(53, 44)) >> 44;
if (se_bank == 0x3FF)
se_bank = 0xFFFFFFFF;
@@ -3687,12 +3690,12 @@ static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
return -EINVAL;
/* decode offset */
- offset = (*pos & 0x7F);
- se = ((*pos >> 7) & 0xFF);
- sh = ((*pos >> 15) & 0xFF);
- cu = ((*pos >> 23) & 0xFF);
- wave = ((*pos >> 31) & 0xFF);
- simd = ((*pos >> 37) & 0xFF);
+ offset = (*pos & GENMASK_ULL(6, 0));
+ se = (*pos & GENMASK_ULL(14, 7)) >> 7;
+ sh = (*pos & GENMASK_ULL(22, 15)) >> 15;
+ cu = (*pos & GENMASK_ULL(30, 23)) >> 23;
+ wave = (*pos & GENMASK_ULL(36, 31)) >> 31;
+ simd = (*pos & GENMASK_ULL(44, 37)) >> 37;
/* switch to the specific se/sh/cu */
mutex_lock(&adev->grbm_idx_mutex);
@@ -3737,14 +3740,14 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
return -EINVAL;
/* decode offset */
- offset = (*pos & 0xFFF); /* in dwords */
- se = ((*pos >> 12) & 0xFF);
- sh = ((*pos >> 20) & 0xFF);
- cu = ((*pos >> 28) & 0xFF);
- wave = ((*pos >> 36) & 0xFF);
- simd = ((*pos >> 44) & 0xFF);
- thread = ((*pos >> 52) & 0xFF);
- bank = ((*pos >> 60) & 1);
+ offset = *pos & GENMASK_ULL(11, 0);
+ se = (*pos & GENMASK_ULL(19, 12)) >> 12;
+ sh = (*pos & GENMASK_ULL(27, 20)) >> 20;
+ cu = (*pos & GENMASK_ULL(35, 28)) >> 28;
+ wave = (*pos & GENMASK_ULL(43, 36)) >> 36;
+ simd = (*pos & GENMASK_ULL(51, 44)) >> 44;
+ thread = (*pos & GENMASK_ULL(59, 52)) >> 52;
+ bank = (*pos & GENMASK_ULL(61, 60)) >> 60;
data = kmalloc_array(1024, sizeof(*data), GFP_KERNEL);
if (!data)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index bd5b8065c32e..2fa95aef74d5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -268,9 +268,10 @@ void amdgpu_fence_process(struct amdgpu_ring *ring)
*
* Checks for fence activity.
*/
-static void amdgpu_fence_fallback(unsigned long arg)
+static void amdgpu_fence_fallback(struct timer_list *t)
{
- struct amdgpu_ring *ring = (void *)arg;
+ struct amdgpu_ring *ring = from_timer(ring, t,
+ fence_drv.fallback_timer);
amdgpu_fence_process(ring);
}
@@ -422,8 +423,7 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
atomic_set(&ring->fence_drv.last_seq, 0);
ring->fence_drv.initialized = false;
- setup_timer(&ring->fence_drv.fallback_timer, amdgpu_fence_fallback,
- (unsigned long)ring);
+ timer_setup(&ring->fence_drv.fallback_timer, amdgpu_fence_fallback, 0);
ring->fence_drv.num_fences_mask = num_hw_submission * 2 - 1;
spin_lock_init(&ring->fence_drv.lock);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index a418df1b9422..e87eedcc0da9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -63,6 +63,11 @@ retry:
flags, NULL, resv, 0, &bo);
if (r) {
if (r != -ERESTARTSYS) {
+ if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) {
+ flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
+ goto retry;
+ }
+
if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
initial_domain |= AMDGPU_GEM_DOMAIN_GTT;
goto retry;
@@ -323,7 +328,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
r = amdgpu_ttm_tt_get_user_pages(bo->tbo.ttm,
bo->tbo.ttm->pages);
if (r)
- goto unlock_mmap_sem;
+ goto release_object;
r = amdgpu_bo_reserve(bo, true);
if (r)
@@ -348,9 +353,6 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
free_pages:
release_pages(bo->tbo.ttm->pages, bo->tbo.ttm->num_pages);
-unlock_mmap_sem:
- up_read(&current->mm->mmap_sem);
-
release_object:
drm_gem_object_put_unlocked(gobj);
@@ -556,9 +558,8 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
if (args->va_address < AMDGPU_VA_RESERVED_SIZE) {
dev_err(&dev->pdev->dev,
- "va_address 0x%lX is in reserved area 0x%X\n",
- (unsigned long)args->va_address,
- AMDGPU_VA_RESERVED_SIZE);
+ "va_address 0x%LX is in reserved area 0x%LX\n",
+ args->va_address, AMDGPU_VA_RESERVED_SIZE);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
index 33535d347734..00e0ce10862f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
@@ -71,12 +71,6 @@ static int amdgpu_gtt_mgr_fini(struct ttm_mem_type_manager *man)
{
struct amdgpu_gtt_mgr *mgr = man->priv;
- spin_lock(&mgr->lock);
- if (!drm_mm_clean(&mgr->mm)) {
- spin_unlock(&mgr->lock);
- return -EBUSY;
- }
-
drm_mm_takedown(&mgr->mm);
spin_unlock(&mgr->lock);
kfree(mgr);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index d6df5728df7f..6c570d4e4516 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -946,6 +946,10 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
struct amdgpu_device *adev = dev_get_drvdata(dev);
umode_t effective_mode = attr->mode;
+ /* no skipping for powerplay */
+ if (adev->powerplay.cgs_device)
+ return effective_mode;
+
/* Skip limit attributes if DPM is not enabled */
if (!adev->pm.dpm_enabled &&
(attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
index 5f5aa5fddc16..033fba2def6f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
@@ -164,9 +164,6 @@ static int amdgpu_pp_hw_fini(void *handle)
ret = adev->powerplay.ip_funcs->hw_fini(
adev->powerplay.pp_handle);
- if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU)
- amdgpu_ucode_fini_bo(adev);
-
return ret;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
index 90af8e82b16a..ae9c106979d7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
@@ -169,10 +169,14 @@ struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,
int flags)
{
struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
+ struct dma_buf *buf;
if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) ||
bo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
return ERR_PTR(-EPERM);
- return drm_gem_prime_export(dev, gobj, flags);
+ buf = drm_gem_prime_export(dev, gobj, flags);
+ if (!IS_ERR(buf))
+ buf->file->f_mapping = dev->anon_inode->i_mapping;
+ return buf;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 447d446b5015..7714f4a6c8b0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -442,8 +442,6 @@ static int psp_hw_fini(void *handle)
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
return 0;
- amdgpu_ucode_fini_bo(adev);
-
psp_ring_destroy(psp, PSP_RING_TYPE__KM);
amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, &psp->tmr_buf);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index aa914256b4bc..bae77353447b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -94,7 +94,8 @@ struct amdgpu_bo_list_entry;
#define AMDGPU_MMHUB 1
/* hardcode that limit for now */
-#define AMDGPU_VA_RESERVED_SIZE (8 << 20)
+#define AMDGPU_VA_RESERVED_SIZE (8ULL << 20)
+
/* max vmids dedicated for process */
#define AMDGPU_VM_MAX_RESERVED_VMID 1
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index 26e900627971..4acca92f6a52 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -68,11 +68,6 @@ static int amdgpu_vram_mgr_fini(struct ttm_mem_type_manager *man)
struct amdgpu_vram_mgr *mgr = man->priv;
spin_lock(&mgr->lock);
- if (!drm_mm_clean(&mgr->mm)) {
- spin_unlock(&mgr->lock);
- return -EBUSY;
- }
-
drm_mm_takedown(&mgr->mm);
spin_unlock(&mgr->lock);
kfree(mgr);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 00868764a0dd..5c8a7a48a4ad 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -4670,6 +4670,14 @@ static int gfx_v7_0_sw_fini(void *handle)
gfx_v7_0_cp_compute_fini(adev);
gfx_v7_0_rlc_fini(adev);
gfx_v7_0_mec_fini(adev);
+ amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
+ &adev->gfx.rlc.clear_state_gpu_addr,
+ (void **)&adev->gfx.rlc.cs_ptr);
+ if (adev->gfx.rlc.cp_table_size) {
+ amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
+ &adev->gfx.rlc.cp_table_gpu_addr,
+ (void **)&adev->gfx.rlc.cp_table_ptr);
+ }
gfx_v7_0_free_microcode(adev);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index b8002ac3e536..9ecdf621a74a 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -2118,6 +2118,15 @@ static int gfx_v8_0_sw_fini(void *handle)
gfx_v8_0_mec_fini(adev);
gfx_v8_0_rlc_fini(adev);
+ amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
+ &adev->gfx.rlc.clear_state_gpu_addr,
+ (void **)&adev->gfx.rlc.cs_ptr);
+ if ((adev->asic_type == CHIP_CARRIZO) ||
+ (adev->asic_type == CHIP_STONEY)) {
+ amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
+ &adev->gfx.rlc.cp_table_gpu_addr,
+ (void **)&adev->gfx.rlc.cp_table_ptr);
+ }
gfx_v8_0_free_microcode(adev);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 7f15bb2c5233..da43813d67a4 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -207,6 +207,12 @@ static const u32 golden_settings_gc_9_1_rv1[] =
SOC15_REG_OFFSET(GC, 0, mmTD_CNTL), 0x01bd9f33, 0x00000800
};
+static const u32 golden_settings_gc_9_x_common[] =
+{
+ SOC15_REG_OFFSET(GC, 0, mmGRBM_CAM_INDEX), 0xffffffff, 0x00000000,
+ SOC15_REG_OFFSET(GC, 0, mmGRBM_CAM_DATA), 0xffffffff, 0x2544c382
+};
+
#define VEGA10_GB_ADDR_CONFIG_GOLDEN 0x2a114042
#define RAVEN_GB_ADDR_CONFIG_GOLDEN 0x24000042
@@ -242,6 +248,9 @@ static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev)
default:
break;
}
+
+ amdgpu_program_register_sequence(adev, golden_settings_gc_9_x_common,
+ (const u32)ARRAY_SIZE(golden_settings_gc_9_x_common));
}
static void gfx_v9_0_scratch_init(struct amdgpu_device *adev)
@@ -988,12 +997,22 @@ static void gfx_v9_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd,
start + SQIND_WAVE_SGPRS_OFFSET, size, dst);
}
+static void gfx_v9_0_read_wave_vgprs(struct amdgpu_device *adev, uint32_t simd,
+ uint32_t wave, uint32_t thread,
+ uint32_t start, uint32_t size,
+ uint32_t *dst)
+{
+ wave_read_regs(
+ adev, simd, wave, thread,
+ start + SQIND_WAVE_VGPRS_OFFSET, size, dst);
+}
static const struct amdgpu_gfx_funcs gfx_v9_0_gfx_funcs = {
.get_gpu_clock_counter = &gfx_v9_0_get_gpu_clock_counter,
.select_se_sh = &gfx_v9_0_select_se_sh,
.read_wave_data = &gfx_v9_0_read_wave_data,
.read_wave_sgprs = &gfx_v9_0_read_wave_sgprs,
+ .read_wave_vgprs = &gfx_v9_0_read_wave_vgprs,
};
static void gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
@@ -1449,6 +1468,14 @@ static int gfx_v9_0_sw_fini(void *handle)
gfx_v9_0_mec_fini(adev);
gfx_v9_0_ngg_fini(adev);
+ amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
+ &adev->gfx.rlc.clear_state_gpu_addr,
+ (void **)&adev->gfx.rlc.cs_ptr);
+ if (adev->asic_type == CHIP_RAVEN) {
+ amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
+ &adev->gfx.rlc.cp_table_gpu_addr,
+ (void **)&adev->gfx.rlc.cp_table_ptr);
+ }
gfx_v9_0_free_microcode(adev);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 621699331e09..c8f1aebeac7a 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -392,7 +392,16 @@ static int gmc_v9_0_early_init(void *handle)
static int gmc_v9_0_late_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- unsigned vm_inv_eng[AMDGPU_MAX_VMHUBS] = { 3, 3 };
+ /*
+ * The latest engine allocation on gfx9 is:
+ * Engine 0, 1: idle
+ * Engine 2, 3: firmware
+ * Engine 4~13: amdgpu ring, subject to change when ring number changes
+ * Engine 14~15: idle
+ * Engine 16: kfd tlb invalidation
+ * Engine 17: Gart flushes
+ */
+ unsigned vm_inv_eng[AMDGPU_MAX_VMHUBS] = { 4, 4 };
unsigned i;
for(i = 0; i < adev->num_rings; ++i) {
@@ -405,9 +414,9 @@ static int gmc_v9_0_late_init(void *handle)
ring->funcs->vmhub);
}
- /* Engine 17 is used for GART flushes */
+ /* Engine 16 is used for KFD and 17 for GART flushes */
for(i = 0; i < AMDGPU_MAX_VMHUBS; ++i)
- BUG_ON(vm_inv_eng[i] > 17);
+ BUG_ON(vm_inv_eng[i] > 16);
return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
index a129bc5b1844..c6febbf0bf69 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
@@ -1486,7 +1486,7 @@ int atomctrl_get_leakage_vddc_base_on_leakage(struct pp_hwmgr *hwmgr,
if (vddci_id_buf[i] == virtual_voltage_id) {
for (j = 0; j < profile->ucLeakageBinNum; j++) {
if (efuse_voltage_id <= leakage_bin[j]) {
- *vddci = vddci_buf[j * profile->ucElbVDDC_Num + i];
+ *vddci = vddci_buf[j * profile->ucElbVDDCI_Num + i];
break;
}
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
index d1af1483c69b..a651ebcf44fd 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
@@ -830,9 +830,9 @@ static int init_over_drive_limits(
const ATOM_Tonga_POWERPLAYTABLE *powerplay_table)
{
hwmgr->platform_descriptor.overdriveLimit.engineClock =
- le16_to_cpu(powerplay_table->ulMaxODEngineClock);
+ le32_to_cpu(powerplay_table->ulMaxODEngineClock);
hwmgr->platform_descriptor.overdriveLimit.memoryClock =
- le16_to_cpu(powerplay_table->ulMaxODMemoryClock);
+ le32_to_cpu(powerplay_table->ulMaxODMemoryClock);
hwmgr->platform_descriptor.minOverdriveVDDC = 0;
hwmgr->platform_descriptor.maxOverdriveVDDC = 0;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index 4466469cf8ab..e33ec7fc5d09 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -3778,7 +3778,7 @@ static int smu7_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
"Trying to Unfreeze MCLK DPM when DPM is disabled",
);
PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr,
- PPSMC_MSG_SCLKDPM_UnfreezeLevel),
+ PPSMC_MSG_MCLKDPM_UnfreezeLevel),
"Failed to unfreeze MCLK DPM during UnFreezeSclkMclkDPM Function!",
return -EINVAL);
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
index 4f79c21f27ed..f8d838c2c8ee 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
@@ -753,6 +753,7 @@ static int vega10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
uint32_t config_telemetry = 0;
struct pp_atomfwctrl_voltage_table vol_table;
struct cgs_system_info sys_info = {0};
+ uint32_t reg;
data = kzalloc(sizeof(struct vega10_hwmgr), GFP_KERNEL);
if (data == NULL)
@@ -859,6 +860,16 @@ static int vega10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
advanceFanControlParameters.usFanPWMMinLimit *
hwmgr->thermal_controller.fanInfo.ulMaxRPM / 100;
+ reg = soc15_get_register_offset(DF_HWID, 0,
+ mmDF_CS_AON0_DramBaseAddress0_BASE_IDX,
+ mmDF_CS_AON0_DramBaseAddress0);
+ data->mem_channels = (cgs_read_register(hwmgr->device, reg) &
+ DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK) >>
+ DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT;
+ PP_ASSERT_WITH_CODE(data->mem_channels < ARRAY_SIZE(channel_number),
+ "Mem Channel Index Exceeded maximum!",
+ return -EINVAL);
+
return result;
}
@@ -1777,7 +1788,7 @@ static int vega10_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
struct vega10_single_dpm_table *dpm_table =
&(data->dpm_table.mem_table);
int result = 0;
- uint32_t i, j, reg, mem_channels;
+ uint32_t i, j;
for (i = 0; i < dpm_table->count; i++) {
result = vega10_populate_single_memory_level(hwmgr,
@@ -1801,20 +1812,10 @@ static int vega10_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
i++;
}
- reg = soc15_get_register_offset(DF_HWID, 0,
- mmDF_CS_AON0_DramBaseAddress0_BASE_IDX,
- mmDF_CS_AON0_DramBaseAddress0);
- mem_channels = (cgs_read_register(hwmgr->device, reg) &
- DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK) >>
- DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT;
- PP_ASSERT_WITH_CODE(mem_channels < ARRAY_SIZE(channel_number),
- "Mem Channel Index Exceeded maximum!",
- return -1);
-
- pp_table->NumMemoryChannels = cpu_to_le16(mem_channels);
+ pp_table->NumMemoryChannels = (uint16_t)(data->mem_channels);
pp_table->MemoryChannelWidth =
- cpu_to_le16(HBM_MEMORY_CHANNEL_WIDTH *
- channel_number[mem_channels]);
+ (uint16_t)(HBM_MEMORY_CHANNEL_WIDTH *
+ channel_number[data->mem_channels]);
pp_table->LowestUclkReservedForUlv =
(uint8_t)(data->lowest_uclk_reserved_for_ulv);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h
index b4b461c3b8ee..8f7358cc3327 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h
@@ -389,6 +389,7 @@ struct vega10_hwmgr {
uint32_t config_telemetry;
uint32_t smu_version;
uint32_t acg_loop_state;
+ uint32_t mem_channels;
};
#define VEGA10_DPM2_NEAR_TDP_DEC 10
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index 704fc8934616..25f4b2e9a44f 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -234,6 +234,10 @@ int drm_connector_init(struct drm_device *dev,
config->link_status_property,
0);
+ drm_object_attach_property(&connector->base,
+ config->non_desktop_property,
+ 0);
+
if (drm_core_check_feature(dev, DRIVER_ATOMIC)) {
drm_object_attach_property(&connector->base, config->prop_crtc_id, 0);
}
@@ -763,6 +767,10 @@ DRM_ENUM_NAME_FN(drm_get_tv_subconnector_name,
* value of link-status is "GOOD". If something fails during or after modeset,
* the kernel driver may set this to "BAD" and issue a hotplug uevent. Drivers
* should update this value using drm_mode_connector_set_link_status_property().
+ * non_desktop:
+ * Indicates the output should be ignored for purposes of displaying a
+ * standard desktop environment or console. This is most likely because
+ * the output device is not rectilinear.
*
* Connectors also have one standardized atomic property:
*
@@ -811,6 +819,11 @@ int drm_connector_create_standard_properties(struct drm_device *dev)
return -ENOMEM;
dev->mode_config.link_status_property = prop;
+ prop = drm_property_create_bool(dev, DRM_MODE_PROP_IMMUTABLE, "non-desktop");
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.non_desktop_property = prop;
+
return 0;
}
@@ -1194,6 +1207,10 @@ int drm_mode_connector_update_edid_property(struct drm_connector *connector,
if (edid)
size = EDID_LENGTH * (1 + edid->extensions);
+ drm_object_property_set_value(&connector->base,
+ dev->mode_config.non_desktop_property,
+ connector->display_info.non_desktop);
+
ret = drm_property_replace_global_blob(dev,
&connector->edid_blob_ptr,
size,
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 00ddabfbf980..5dfe14763871 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -82,6 +82,8 @@
#define EDID_QUIRK_FORCE_6BPC (1 << 10)
/* Force 10bpc */
#define EDID_QUIRK_FORCE_10BPC (1 << 11)
+/* Non desktop display (i.e. HMD) */
+#define EDID_QUIRK_NON_DESKTOP (1 << 12)
struct detailed_mode_closure {
struct drm_connector *connector;
@@ -157,6 +159,9 @@ static const struct edid_quirk {
/* Rotel RSX-1058 forwards sink's EDID but only does HDMI 1.1*/
{ "ETR", 13896, EDID_QUIRK_FORCE_8BPC },
+
+ /* HTC Vive VR Headset */
+ { "HVR", 0xaa01, EDID_QUIRK_NON_DESKTOP },
};
/*
@@ -4393,7 +4398,7 @@ static void drm_parse_cea_ext(struct drm_connector *connector,
}
static void drm_add_display_info(struct drm_connector *connector,
- struct edid *edid)
+ struct edid *edid, u32 quirks)
{
struct drm_display_info *info = &connector->display_info;
@@ -4407,6 +4412,8 @@ static void drm_add_display_info(struct drm_connector *connector,
info->max_tmds_clock = 0;
info->dvi_dual = false;
+ info->non_desktop = !!(quirks & EDID_QUIRK_NON_DESKTOP);
+
if (edid->revision < 3)
return;
@@ -4627,7 +4634,7 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
* To avoid multiple parsing of same block, lets parse that map
* from sink info, before parsing CEA modes.
*/
- drm_add_display_info(connector, edid);
+ drm_add_display_info(connector, edid, quirks);
/*
* EDID spec says modes should be preferred in this order:
@@ -4824,7 +4831,8 @@ void
drm_hdmi_avi_infoframe_quant_range(struct hdmi_avi_infoframe *frame,
const struct drm_display_mode *mode,
enum hdmi_quantization_range rgb_quant_range,
- bool rgb_quant_range_selectable)
+ bool rgb_quant_range_selectable,
+ bool is_hdmi2_sink)
{
/*
* CEA-861:
@@ -4848,8 +4856,15 @@ drm_hdmi_avi_infoframe_quant_range(struct hdmi_avi_infoframe *frame,
* YQ-field to match the RGB Quantization Range being transmitted
* (e.g., when Limited Range RGB, set YQ=0 or when Full Range RGB,
* set YQ=1) and the Sink shall ignore the YQ-field."
+ *
+ * Unfortunate certain sinks (eg. VIZ Model 67/E261VA) get confused
+ * by non-zero YQ when receiving RGB. There doesn't seem to be any
+ * good way to tell which version of CEA-861 the sink supports, so
+ * we limit non-zero YQ to HDMI 2.0 sinks only as HDMI 2.0 is based
+ * on on CEA-861-F.
*/
- if (rgb_quant_range == HDMI_QUANTIZATION_RANGE_LIMITED)
+ if (!is_hdmi2_sink ||
+ rgb_quant_range == HDMI_QUANTIZATION_RANGE_LIMITED)
frame->ycc_quantization_range =
HDMI_YCC_QUANTIZATION_RANGE_LIMITED;
else
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 116d1f1337c7..07374008f146 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -2033,6 +2033,9 @@ static bool drm_connector_enabled(struct drm_connector *connector, bool strict)
{
bool enable;
+ if (connector->display_info.non_desktop)
+ return false;
+
if (strict)
enable = connector->status == connector_status_connected;
else
@@ -2052,7 +2055,8 @@ static void drm_enable_connectors(struct drm_fb_helper *fb_helper,
connector = fb_helper->connector_info[i]->connector;
enabled[i] = drm_connector_enabled(connector, true);
DRM_DEBUG_KMS("connector %d enabled? %s\n", connector->base.id,
- enabled[i] ? "yes" : "no");
+ connector->display_info.non_desktop ? "non desktop" : enabled[i] ? "yes" : "no");
+
any_enabled |= enabled[i];
}
diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c
index 19404e34cd59..37a93cdffb4a 100644
--- a/drivers/gpu/drm/drm_plane.c
+++ b/drivers/gpu/drm/drm_plane.c
@@ -1030,6 +1030,7 @@ retry:
e->event.base.type = DRM_EVENT_FLIP_COMPLETE;
e->event.base.length = sizeof(e->event);
e->event.vbl.user_data = page_flip->user_data;
+ e->event.vbl.crtc_id = crtc->base.id;
ret = drm_event_reserve_init(dev, file_priv, &e->base, &e->event.base);
if (ret) {
kfree(e);
diff --git a/drivers/gpu/drm/drm_vblank.c b/drivers/gpu/drm/drm_vblank.c
index 09c1c4ff93ca..3717b3df34a4 100644
--- a/drivers/gpu/drm/drm_vblank.c
+++ b/drivers/gpu/drm/drm_vblank.c
@@ -367,9 +367,9 @@ void drm_vblank_disable_and_save(struct drm_device *dev, unsigned int pipe)
spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
}
-static void vblank_disable_fn(unsigned long arg)
+static void vblank_disable_fn(struct timer_list *t)
{
- struct drm_vblank_crtc *vblank = (void *)arg;
+ struct drm_vblank_crtc *vblank = from_timer(vblank, t, disable_timer);
struct drm_device *dev = vblank->dev;
unsigned int pipe = vblank->pipe;
unsigned long irqflags;
@@ -436,8 +436,7 @@ int drm_vblank_init(struct drm_device *dev, unsigned int num_crtcs)
vblank->dev = dev;
vblank->pipe = i;
init_waitqueue_head(&vblank->queue);
- setup_timer(&vblank->disable_timer, vblank_disable_fn,
- (unsigned long)vblank);
+ timer_setup(&vblank->disable_timer, vblank_disable_fn, 0);
seqlock_init(&vblank->seqlock);
}
@@ -1019,7 +1018,7 @@ static void drm_vblank_put(struct drm_device *dev, unsigned int pipe)
if (drm_vblank_offdelay == 0)
return;
else if (drm_vblank_offdelay < 0)
- vblank_disable_fn((unsigned long)vblank);
+ vblank_disable_fn(&vblank->disable_timer);
else if (!dev->vblank_disable_immediate)
mod_timer(&vblank->disable_timer,
jiffies + ((drm_vblank_offdelay * HZ)/1000));
@@ -1650,7 +1649,7 @@ bool drm_handle_vblank(struct drm_device *dev, unsigned int pipe)
spin_unlock_irqrestore(&dev->event_lock, irqflags);
if (disable_irq)
- vblank_disable_fn((unsigned long)vblank);
+ vblank_disable_fn(&vblank->disable_timer);
return true;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index 53e03f8af3d5..e6b0940b1ac2 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -161,9 +161,9 @@ static const struct exynos_drm_crtc_ops vidi_crtc_ops = {
.atomic_flush = exynos_crtc_handle_event,
};
-static void vidi_fake_vblank_timer(unsigned long arg)
+static void vidi_fake_vblank_timer(struct timer_list *t)
{
- struct vidi_context *ctx = (void *)arg;
+ struct vidi_context *ctx = from_timer(ctx, t, timer);
if (drm_crtc_handle_vblank(&ctx->crtc->base))
mod_timer(&ctx->timer,
@@ -449,7 +449,7 @@ static int vidi_probe(struct platform_device *pdev)
ctx->pdev = pdev;
- setup_timer(&ctx->timer, vidi_fake_vblank_timer, (unsigned long)ctx);
+ timer_setup(&ctx->timer, vidi_fake_vblank_timer, 0);
mutex_init(&ctx->lock);
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
index 58e9e0601a61..faf17b83b910 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
@@ -210,7 +210,6 @@ static int fsl_dcu_drm_pm_suspend(struct device *dev)
return PTR_ERR(fsl_dev->state);
}
- clk_disable_unprepare(fsl_dev->pix_clk);
clk_disable_unprepare(fsl_dev->clk);
return 0;
@@ -233,6 +232,7 @@ static int fsl_dcu_drm_pm_resume(struct device *dev)
if (fsl_dev->tcon)
fsl_tcon_bypass_enable(fsl_dev->tcon);
fsl_dcu_drm_init_planes(fsl_dev->drm);
+ enable_irq(fsl_dev->irq);
drm_atomic_helper_resume(fsl_dev->drm, fsl_dev->state);
console_lock();
@@ -240,7 +240,6 @@ static int fsl_dcu_drm_pm_resume(struct device *dev)
console_unlock();
drm_kms_helper_poll_enable(fsl_dev->drm);
- enable_irq(fsl_dev->irq);
return 0;
}
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
index edd7d8127d19..c54806d08dd7 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
@@ -102,7 +102,6 @@ static int fsl_dcu_attach_panel(struct fsl_dcu_drm_device *fsl_dev,
{
struct drm_encoder *encoder = &fsl_dev->encoder;
struct drm_connector *connector = &fsl_dev->connector.base;
- struct drm_mode_config *mode_config = &fsl_dev->drm->mode_config;
int ret;
fsl_dev->connector.encoder = encoder;
@@ -122,10 +121,6 @@ static int fsl_dcu_attach_panel(struct fsl_dcu_drm_device *fsl_dev,
if (ret < 0)
goto err_sysfs;
- drm_object_property_set_value(&connector->base,
- mode_config->dpms_property,
- DRM_MODE_DPMS_OFF);
-
ret = drm_panel_attach(panel, connector);
if (ret) {
dev_err(fsl_dev->dev, "failed to attach panel\n");
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index 4d1f45acf2cd..127815253a84 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -601,9 +601,9 @@ tda998x_reset(struct tda998x_priv *priv)
* we have seen a HPD inactive->active transition. This code implements
* that delay.
*/
-static void tda998x_edid_delay_done(unsigned long data)
+static void tda998x_edid_delay_done(struct timer_list *t)
{
- struct tda998x_priv *priv = (struct tda998x_priv *)data;
+ struct tda998x_priv *priv = from_timer(priv, t, edid_delay_timer);
priv->edid_delay_active = false;
wake_up(&priv->edid_delay_waitq);
@@ -1492,8 +1492,7 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
mutex_init(&priv->mutex); /* protect the page access */
init_waitqueue_head(&priv->edid_delay_waitq);
- setup_timer(&priv->edid_delay_timer, tda998x_edid_delay_done,
- (unsigned long)priv);
+ timer_setup(&priv->edid_delay_timer, tda998x_edid_delay_done, 0);
INIT_WORK(&priv->detect_work, tda998x_detect_work);
/* wake up the device: */
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 701a3c6f1669..85d4c57870fb 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -1628,7 +1628,7 @@ static int perform_bb_shadow(struct parser_exec_state *s)
struct intel_shadow_bb_entry *entry_obj;
struct intel_vgpu *vgpu = s->vgpu;
unsigned long gma = 0;
- uint32_t bb_size;
+ int bb_size;
void *dst = NULL;
int ret = 0;
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 960d3d8b95b8..2cf10d17acfb 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -1714,6 +1714,7 @@ static int i915_drm_resume(struct drm_device *dev)
intel_guc_resume(dev_priv);
intel_modeset_init_hw(dev);
+ intel_init_clock_gating(dev_priv);
spin_lock_irq(&dev_priv->irq_lock);
if (dev_priv->display.hpd_irq_setup)
@@ -2618,6 +2619,8 @@ static int intel_runtime_resume(struct device *kdev)
ret = vlv_resume_prepare(dev_priv, true);
}
+ intel_uncore_runtime_resume(dev_priv);
+
/*
* No point of rolling back things in case of an error, as the best
* we can do is to hope that things will still work (and disable RPM).
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index 135fc750a837..382a77a1097e 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -172,7 +172,9 @@ i915_mmu_notifier_create(struct mm_struct *mm)
spin_lock_init(&mn->lock);
mn->mn.ops = &i915_gem_userptr_notifier;
mn->objects = RB_ROOT_CACHED;
- mn->wq = alloc_workqueue("i915-userptr-release", WQ_UNBOUND, 0);
+ mn->wq = alloc_workqueue("i915-userptr-release",
+ WQ_UNBOUND | WQ_MEM_RECLAIM,
+ 0);
if (mn->wq == NULL) {
kfree(mn);
return ERR_PTR(-ENOMEM);
@@ -827,7 +829,7 @@ int i915_gem_init_userptr(struct drm_i915_private *dev_priv)
dev_priv->mm.userptr_wq =
alloc_workqueue("i915-userptr-acquire",
- WQ_HIGHPRI | WQ_MEM_RECLAIM,
+ WQ_HIGHPRI | WQ_UNBOUND,
0);
if (!dev_priv->mm.userptr_wq)
return -ENOMEM;
diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c
index 48e1ba01ccf8..5f8b9f1f40f1 100644
--- a/drivers/gpu/drm/i915/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c
@@ -517,6 +517,7 @@ static void __intel_engine_remove_wait(struct intel_engine_cs *engine,
GEM_BUG_ON(RB_EMPTY_NODE(&wait->node));
rb_erase(&wait->node, &b->waiters);
+ RB_CLEAR_NODE(&wait->node);
out:
GEM_BUG_ON(b->irq_wait == wait);
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 5132dc814788..4dea833f9d1b 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -487,7 +487,8 @@ static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
crtc_state->limited_color_range ?
HDMI_QUANTIZATION_RANGE_LIMITED :
HDMI_QUANTIZATION_RANGE_FULL,
- intel_hdmi->rgb_quant_range_selectable);
+ intel_hdmi->rgb_quant_range_selectable,
+ is_hdmi2_sink);
/* TODO: handle pixel repetition for YCBCR420 outputs */
intel_write_infoframe(encoder, crtc_state, &frame);
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 20e3c65c0999..8c2ce81f01c2 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -434,6 +434,12 @@ void intel_uncore_resume_early(struct drm_i915_private *dev_priv)
i915_check_and_clear_faults(dev_priv);
}
+void intel_uncore_runtime_resume(struct drm_i915_private *dev_priv)
+{
+ iosf_mbi_register_pmic_bus_access_notifier(
+ &dev_priv->uncore.pmic_bus_access_nb);
+}
+
void intel_uncore_sanitize(struct drm_i915_private *dev_priv)
{
i915_modparams.enable_rc6 =
@@ -1240,8 +1246,15 @@ static int i915_pmic_bus_access_notifier(struct notifier_block *nb,
* bus, which will be busy after this notification, leading to:
* "render: timed out waiting for forcewake ack request."
* errors.
+ *
+ * The notifier is unregistered during intel_runtime_suspend(),
+ * so it's ok to access the HW here without holding a RPM
+ * wake reference -> disable wakeref asserts for the time of
+ * the access.
*/
+ disable_rpm_wakeref_asserts(dev_priv);
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+ enable_rpm_wakeref_asserts(dev_priv);
break;
case MBI_PMIC_BUS_ACCESS_END:
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h
index 582771251b57..9ce079b5dd0d 100644
--- a/drivers/gpu/drm/i915/intel_uncore.h
+++ b/drivers/gpu/drm/i915/intel_uncore.h
@@ -134,6 +134,7 @@ bool intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv
void intel_uncore_fini(struct drm_i915_private *dev_priv);
void intel_uncore_suspend(struct drm_i915_private *dev_priv);
void intel_uncore_resume_early(struct drm_i915_private *dev_priv);
+void intel_uncore_runtime_resume(struct drm_i915_private *dev_priv);
u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv);
void assert_forcewakes_inactive(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/selftests/lib_sw_fence.c b/drivers/gpu/drm/i915/selftests/lib_sw_fence.c
index 3790fdf44a1a..b26f07b55d86 100644
--- a/drivers/gpu/drm/i915/selftests/lib_sw_fence.c
+++ b/drivers/gpu/drm/i915/selftests/lib_sw_fence.c
@@ -49,9 +49,9 @@ void onstack_fence_fini(struct i915_sw_fence *fence)
i915_sw_fence_fini(fence);
}
-static void timed_fence_wake(unsigned long data)
+static void timed_fence_wake(struct timer_list *t)
{
- struct timed_fence *tf = (struct timed_fence *)data;
+ struct timed_fence *tf = from_timer(tf, t, timer);
i915_sw_fence_commit(&tf->fence);
}
@@ -60,7 +60,7 @@ void timed_fence_init(struct timed_fence *tf, unsigned long expires)
{
onstack_fence_init(&tf->fence);
- setup_timer_on_stack(&tf->timer, timed_fence_wake, (unsigned long)tf);
+ timer_setup_on_stack(&tf->timer, timed_fence_wake, 0);
if (time_after(expires, jiffies))
mod_timer(&tf->timer, expires);
diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c
index 53e0b24beda6..9a9961802f5c 100644
--- a/drivers/gpu/drm/imx/ipuv3-crtc.c
+++ b/drivers/gpu/drm/imx/ipuv3-crtc.c
@@ -115,7 +115,7 @@ static void imx_drm_crtc_reset(struct drm_crtc *crtc)
if (crtc->state) {
if (crtc->state->mode_blob)
- drm_property_unreference_blob(crtc->state->mode_blob);
+ drm_property_blob_put(crtc->state->mode_blob);
state = to_imx_crtc_state(crtc->state);
memset(state, 0, sizeof(*state));
diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c
index 8def97d75030..aedecda9728a 100644
--- a/drivers/gpu/drm/imx/parallel-display.c
+++ b/drivers/gpu/drm/imx/parallel-display.c
@@ -183,7 +183,7 @@ static int imx_pd_register(struct drm_device *drm,
&imx_pd_connector_helper_funcs);
drm_connector_init(drm, &imxpd->connector,
&imx_pd_connector_funcs,
- DRM_MODE_CONNECTOR_VGA);
+ DRM_MODE_CONNECTOR_DPI);
}
if (imxpd->panel)
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
index 40f4840ef98e..970c7963ae29 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
@@ -82,9 +82,9 @@ static struct msm_ringbuffer *get_next_ring(struct msm_gpu *gpu)
return NULL;
}
-static void a5xx_preempt_timer(unsigned long data)
+static void a5xx_preempt_timer(struct timer_list *t)
{
- struct a5xx_gpu *a5xx_gpu = (struct a5xx_gpu *) data;
+ struct a5xx_gpu *a5xx_gpu = from_timer(a5xx_gpu, t, preempt_timer);
struct msm_gpu *gpu = &a5xx_gpu->base.base;
struct drm_device *dev = gpu->dev;
struct msm_drm_private *priv = dev->dev_private;
@@ -300,6 +300,5 @@ void a5xx_preempt_init(struct msm_gpu *gpu)
}
}
- setup_timer(&a5xx_gpu->preempt_timer, a5xx_preempt_timer,
- (unsigned long) a5xx_gpu);
+ timer_setup(&a5xx_gpu->preempt_timer, a5xx_preempt_timer, 0);
}
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 8d4477818ec2..232201403439 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -353,9 +353,9 @@ static void hangcheck_timer_reset(struct msm_gpu *gpu)
round_jiffies_up(jiffies + DRM_MSM_HANGCHECK_JIFFIES));
}
-static void hangcheck_handler(unsigned long data)
+static void hangcheck_handler(struct timer_list *t)
{
- struct msm_gpu *gpu = (struct msm_gpu *)data;
+ struct msm_gpu *gpu = from_timer(gpu, t, hangcheck_timer);
struct drm_device *dev = gpu->dev;
struct msm_drm_private *priv = dev->dev_private;
struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu);
@@ -703,8 +703,7 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
INIT_WORK(&gpu->recover_work, recover_worker);
- setup_timer(&gpu->hangcheck_timer, hangcheck_handler,
- (unsigned long)gpu);
+ timer_setup(&gpu->hangcheck_timer, hangcheck_handler, 0);
spin_lock_init(&gpu->perf_lock);
diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c
index b56a05730314..c2cf6d98e577 100644
--- a/drivers/gpu/drm/omapdrm/dss/dsi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dsi.c
@@ -4095,7 +4095,7 @@ static void dsi_update_screen_dispc(struct platform_device *dsidev)
}
#ifdef DSI_CATCH_MISSING_TE
-static void dsi_te_timeout(unsigned long arg)
+static void dsi_te_timeout(struct timer_list *unused)
{
DSSERR("TE not received for 250ms!\n");
}
@@ -5449,9 +5449,7 @@ static int dsi_bind(struct device *dev, struct device *master, void *data)
dsi_framedone_timeout_work_callback);
#ifdef DSI_CATCH_MISSING_TE
- init_timer(&dsi->te_timer);
- dsi->te_timer.function = dsi_te_timeout;
- dsi->te_timer.data = 0;
+ timer_setup(&dsi->te_timer, dsi_te_timeout, 0);
#endif
dsi_mem = platform_get_resource_byname(dsidev, IORESOURCE_MEM, "proto");
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index 2fcf805d3a16..33b821d6d018 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -245,7 +245,6 @@ static int radeonfb_create(struct drm_fb_helper *helper,
}
info->par = rfbdev;
- info->skip_vt_switch = true;
ret = radeon_framebuffer_init(rdev->ddev, &rfbdev->rfb, &mode_cmd, gobj);
if (ret) {
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_psr.c b/drivers/gpu/drm/rockchip/rockchip_drm_psr.c
index a553e182ff53..3acfd576b7df 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_psr.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_psr.c
@@ -101,9 +101,9 @@ static void psr_set_state(struct psr_drv *psr, enum psr_state state)
spin_unlock_irqrestore(&psr->lock, flags);
}
-static void psr_flush_handler(unsigned long data)
+static void psr_flush_handler(struct timer_list *t)
{
- struct psr_drv *psr = (struct psr_drv *)data;
+ struct psr_drv *psr = from_timer(psr, t, flush_timer);
unsigned long flags;
/* If the state has changed since we initiated the flush, do nothing */
@@ -232,7 +232,7 @@ int rockchip_drm_psr_register(struct drm_encoder *encoder,
if (!psr)
return -ENOMEM;
- setup_timer(&psr->flush_timer, psr_flush_handler, (unsigned long)psr);
+ timer_setup(&psr->flush_timer, psr_flush_handler, 0);
spin_lock_init(&psr->lock);
psr->active = true;
diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c
index 4bcacd3f4861..b0a1dedac802 100644
--- a/drivers/gpu/drm/tegra/sor.c
+++ b/drivers/gpu/drm/tegra/sor.c
@@ -174,9 +174,9 @@ struct tegra_sor {
struct reset_control *rst;
struct clk *clk_parent;
- struct clk *clk_brick;
struct clk *clk_safe;
- struct clk *clk_src;
+ struct clk *clk_out;
+ struct clk *clk_pad;
struct clk *clk_dp;
struct clk *clk;
@@ -255,7 +255,7 @@ static int tegra_sor_set_parent_clock(struct tegra_sor *sor, struct clk *parent)
clk_disable_unprepare(sor->clk);
- err = clk_set_parent(sor->clk, parent);
+ err = clk_set_parent(sor->clk_out, parent);
if (err < 0)
return err;
@@ -266,24 +266,24 @@ static int tegra_sor_set_parent_clock(struct tegra_sor *sor, struct clk *parent)
return 0;
}
-struct tegra_clk_sor_brick {
+struct tegra_clk_sor_pad {
struct clk_hw hw;
struct tegra_sor *sor;
};
-static inline struct tegra_clk_sor_brick *to_brick(struct clk_hw *hw)
+static inline struct tegra_clk_sor_pad *to_pad(struct clk_hw *hw)
{
- return container_of(hw, struct tegra_clk_sor_brick, hw);
+ return container_of(hw, struct tegra_clk_sor_pad, hw);
}
-static const char * const tegra_clk_sor_brick_parents[] = {
+static const char * const tegra_clk_sor_pad_parents[] = {
"pll_d2_out0", "pll_dp"
};
-static int tegra_clk_sor_brick_set_parent(struct clk_hw *hw, u8 index)
+static int tegra_clk_sor_pad_set_parent(struct clk_hw *hw, u8 index)
{
- struct tegra_clk_sor_brick *brick = to_brick(hw);
- struct tegra_sor *sor = brick->sor;
+ struct tegra_clk_sor_pad *pad = to_pad(hw);
+ struct tegra_sor *sor = pad->sor;
u32 value;
value = tegra_sor_readl(sor, SOR_CLK_CNTRL);
@@ -304,10 +304,10 @@ static int tegra_clk_sor_brick_set_parent(struct clk_hw *hw, u8 index)
return 0;
}
-static u8 tegra_clk_sor_brick_get_parent(struct clk_hw *hw)
+static u8 tegra_clk_sor_pad_get_parent(struct clk_hw *hw)
{
- struct tegra_clk_sor_brick *brick = to_brick(hw);
- struct tegra_sor *sor = brick->sor;
+ struct tegra_clk_sor_pad *pad = to_pad(hw);
+ struct tegra_sor *sor = pad->sor;
u8 parent = U8_MAX;
u32 value;
@@ -328,33 +328,33 @@ static u8 tegra_clk_sor_brick_get_parent(struct clk_hw *hw)
return parent;
}
-static const struct clk_ops tegra_clk_sor_brick_ops = {
- .set_parent = tegra_clk_sor_brick_set_parent,
- .get_parent = tegra_clk_sor_brick_get_parent,
+static const struct clk_ops tegra_clk_sor_pad_ops = {
+ .set_parent = tegra_clk_sor_pad_set_parent,
+ .get_parent = tegra_clk_sor_pad_get_parent,
};
-static struct clk *tegra_clk_sor_brick_register(struct tegra_sor *sor,
- const char *name)
+static struct clk *tegra_clk_sor_pad_register(struct tegra_sor *sor,
+ const char *name)
{
- struct tegra_clk_sor_brick *brick;
+ struct tegra_clk_sor_pad *pad;
struct clk_init_data init;
struct clk *clk;
- brick = devm_kzalloc(sor->dev, sizeof(*brick), GFP_KERNEL);
- if (!brick)
+ pad = devm_kzalloc(sor->dev, sizeof(*pad), GFP_KERNEL);
+ if (!pad)
return ERR_PTR(-ENOMEM);
- brick->sor = sor;
+ pad->sor = sor;
init.name = name;
init.flags = 0;
- init.parent_names = tegra_clk_sor_brick_parents;
- init.num_parents = ARRAY_SIZE(tegra_clk_sor_brick_parents);
- init.ops = &tegra_clk_sor_brick_ops;
+ init.parent_names = tegra_clk_sor_pad_parents;
+ init.num_parents = ARRAY_SIZE(tegra_clk_sor_pad_parents);
+ init.ops = &tegra_clk_sor_pad_ops;
- brick->hw.init = &init;
+ pad->hw.init = &init;
- clk = devm_clk_register(sor->dev, &brick->hw);
+ clk = devm_clk_register(sor->dev, &pad->hw);
return clk;
}
@@ -998,8 +998,10 @@ static int tegra_sor_power_down(struct tegra_sor *sor)
/* switch to safe parent clock */
err = tegra_sor_set_parent_clock(sor, sor->clk_safe);
- if (err < 0)
+ if (err < 0) {
dev_err(sor->dev, "failed to set safe parent clock: %d\n", err);
+ return err;
+ }
value = tegra_sor_readl(sor, SOR_DP_PADCTL0);
value &= ~(SOR_DP_PADCTL_PD_TXD_3 | SOR_DP_PADCTL_PD_TXD_0 |
@@ -2007,8 +2009,10 @@ static void tegra_sor_hdmi_enable(struct drm_encoder *encoder)
/* switch to safe parent clock */
err = tegra_sor_set_parent_clock(sor, sor->clk_safe);
- if (err < 0)
+ if (err < 0) {
dev_err(sor->dev, "failed to set safe parent clock: %d\n", err);
+ return;
+ }
div = clk_get_rate(sor->clk) / 1000000 * 4;
@@ -2111,13 +2115,17 @@ static void tegra_sor_hdmi_enable(struct drm_encoder *encoder)
tegra_sor_writel(sor, value, SOR_XBAR_CTRL);
/* switch to parent clock */
- err = clk_set_parent(sor->clk_src, sor->clk_parent);
- if (err < 0)
- dev_err(sor->dev, "failed to set source clock: %d\n", err);
-
- err = tegra_sor_set_parent_clock(sor, sor->clk_src);
- if (err < 0)
+ err = clk_set_parent(sor->clk, sor->clk_parent);
+ if (err < 0) {
dev_err(sor->dev, "failed to set parent clock: %d\n", err);
+ return;
+ }
+
+ err = tegra_sor_set_parent_clock(sor, sor->clk_pad);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to set pad clock: %d\n", err);
+ return;
+ }
value = SOR_INPUT_CONTROL_HDMI_SRC_SELECT(dc->pipe);
@@ -2628,11 +2636,24 @@ static int tegra_sor_probe(struct platform_device *pdev)
}
if (sor->soc->supports_hdmi || sor->soc->supports_dp) {
- sor->clk_src = devm_clk_get(&pdev->dev, "source");
- if (IS_ERR(sor->clk_src)) {
- err = PTR_ERR(sor->clk_src);
- dev_err(sor->dev, "failed to get source clock: %d\n",
- err);
+ struct device_node *np = pdev->dev.of_node;
+ const char *name;
+
+ /*
+ * For backwards compatibility with Tegra210 device trees,
+ * fall back to the old clock name "source" if the new "out"
+ * clock is not available.
+ */
+ if (of_property_match_string(np, "clock-names", "out") < 0)
+ name = "source";
+ else
+ name = "out";
+
+ sor->clk_out = devm_clk_get(&pdev->dev, name);
+ if (IS_ERR(sor->clk_out)) {
+ err = PTR_ERR(sor->clk_out);
+ dev_err(sor->dev, "failed to get %s clock: %d\n",
+ name, err);
goto remove;
}
}
@@ -2658,16 +2679,60 @@ static int tegra_sor_probe(struct platform_device *pdev)
goto remove;
}
+ /*
+ * Starting with Tegra186, the BPMP provides an implementation for
+ * the pad output clock, so we have to look it up from device tree.
+ */
+ sor->clk_pad = devm_clk_get(&pdev->dev, "pad");
+ if (IS_ERR(sor->clk_pad)) {
+ if (sor->clk_pad != ERR_PTR(-ENOENT)) {
+ err = PTR_ERR(sor->clk_pad);
+ goto remove;
+ }
+
+ /*
+ * If the pad output clock is not available, then we assume
+ * we're on Tegra210 or earlier and have to provide our own
+ * implementation.
+ */
+ sor->clk_pad = NULL;
+ }
+
+ /*
+ * The bootloader may have set up the SOR such that it's module clock
+ * is sourced by one of the display PLLs. However, that doesn't work
+ * without properly having set up other bits of the SOR.
+ */
+ err = clk_set_parent(sor->clk_out, sor->clk_safe);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to use safe clock: %d\n", err);
+ goto remove;
+ }
+
platform_set_drvdata(pdev, sor);
pm_runtime_enable(&pdev->dev);
- pm_runtime_get_sync(&pdev->dev);
- sor->clk_brick = tegra_clk_sor_brick_register(sor, "sor1_brick");
- pm_runtime_put(&pdev->dev);
+ /*
+ * On Tegra210 and earlier, provide our own implementation for the
+ * pad output clock.
+ */
+ if (!sor->clk_pad) {
+ err = pm_runtime_get_sync(&pdev->dev);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to get runtime PM: %d\n",
+ err);
+ goto remove;
+ }
+
+ sor->clk_pad = tegra_clk_sor_pad_register(sor,
+ "sor1_pad_clkout");
+ pm_runtime_put(&pdev->dev);
+ }
- if (IS_ERR(sor->clk_brick)) {
- err = PTR_ERR(sor->clk_brick);
- dev_err(&pdev->dev, "failed to register SOR clock: %d\n", err);
+ if (IS_ERR(sor->clk_pad)) {
+ err = PTR_ERR(sor->clk_pad);
+ dev_err(&pdev->dev, "failed to register SOR pad clock: %d\n",
+ err);
goto remove;
}
diff --git a/drivers/gpu/drm/tilcdc/Kconfig b/drivers/gpu/drm/tilcdc/Kconfig
index 28fed7e206d0..81ac82455ce4 100644
--- a/drivers/gpu/drm/tilcdc/Kconfig
+++ b/drivers/gpu/drm/tilcdc/Kconfig
@@ -12,14 +12,3 @@ config DRM_TILCDC
controller, for example AM33xx in beagle-bone, DA8xx, or
OMAP-L1xx. This driver replaces the FB_DA8XX fbdev driver.
-config DRM_TILCDC_SLAVE_COMPAT
- bool "Support device tree blobs using TI LCDC Slave binding"
- depends on DRM_TILCDC
- default y
- select OF_RESOLVE
- select OF_OVERLAY
- help
- Choose this option if you need a kernel that is compatible
- with device tree blobs using the obsolete "ti,tilcdc,slave"
- binding. If you find "ti,tilcdc,slave"-string from your DTB,
- you probably need this. Otherwise you do not.
diff --git a/drivers/gpu/drm/tilcdc/Makefile b/drivers/gpu/drm/tilcdc/Makefile
index b9e1108e5b4e..87f9480e43b0 100644
--- a/drivers/gpu/drm/tilcdc/Makefile
+++ b/drivers/gpu/drm/tilcdc/Makefile
@@ -3,9 +3,6 @@ ifeq (, $(findstring -W,$(EXTRA_CFLAGS)))
ccflags-y += -Werror
endif
-obj-$(CONFIG_DRM_TILCDC_SLAVE_COMPAT) += tilcdc_slave_compat.o \
- tilcdc_slave_compat.dtb.o
-
tilcdc-y := \
tilcdc_plane.o \
tilcdc_crtc.o \
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_slave_compat.c b/drivers/gpu/drm/tilcdc/tilcdc_slave_compat.c
deleted file mode 100644
index d2b9e5f04724..000000000000
--- a/drivers/gpu/drm/tilcdc/tilcdc_slave_compat.c
+++ /dev/null
@@ -1,264 +0,0 @@
-/*
- * Copyright (C) 2015 Texas Instruments
- * Author: Jyri Sarha <jsarha@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- */
-
-/*
- * To support the old "ti,tilcdc,slave" binding the binding has to be
- * transformed to the new external encoder binding.
- */
-
-#include <linux/kernel.h>
-#include <linux/of.h>
-#include <linux/of_graph.h>
-#include <linux/of_fdt.h>
-#include <linux/slab.h>
-#include <linux/list.h>
-
-#include "tilcdc_slave_compat.h"
-
-struct kfree_table {
- int total;
- int num;
- void **table;
-};
-
-static int __init kfree_table_init(struct kfree_table *kft)
-{
- kft->total = 32;
- kft->num = 0;
- kft->table = kmalloc(kft->total * sizeof(*kft->table),
- GFP_KERNEL);
- if (!kft->table)
- return -ENOMEM;
-
- return 0;
-}
-
-static int __init kfree_table_add(struct kfree_table *kft, void *p)
-{
- if (kft->num == kft->total) {
- void **old = kft->table;
-
- kft->total *= 2;
- kft->table = krealloc(old, kft->total * sizeof(*kft->table),
- GFP_KERNEL);
- if (!kft->table) {
- kft->table = old;
- kfree(p);
- return -ENOMEM;
- }
- }
- kft->table[kft->num++] = p;
- return 0;
-}
-
-static void __init kfree_table_free(struct kfree_table *kft)
-{
- int i;
-
- for (i = 0; i < kft->num; i++)
- kfree(kft->table[i]);
-
- kfree(kft->table);
-}
-
-static
-struct property * __init tilcdc_prop_dup(const struct property *prop,
- struct kfree_table *kft)
-{
- struct property *nprop;
-
- nprop = kzalloc(sizeof(*nprop), GFP_KERNEL);
- if (!nprop || kfree_table_add(kft, nprop))
- return NULL;
-
- nprop->name = kstrdup(prop->name, GFP_KERNEL);
- if (!nprop->name || kfree_table_add(kft, nprop->name))
- return NULL;
-
- nprop->value = kmemdup(prop->value, prop->length, GFP_KERNEL);
- if (!nprop->value || kfree_table_add(kft, nprop->value))
- return NULL;
-
- nprop->length = prop->length;
-
- return nprop;
-}
-
-static void __init tilcdc_copy_props(struct device_node *from,
- struct device_node *to,
- const char * const props[],
- struct kfree_table *kft)
-{
- struct property *prop;
- int i;
-
- for (i = 0; props[i]; i++) {
- prop = of_find_property(from, props[i], NULL);
- if (!prop)
- continue;
-
- prop = tilcdc_prop_dup(prop, kft);
- if (!prop)
- continue;
-
- prop->next = to->properties;
- to->properties = prop;
- }
-}
-
-static int __init tilcdc_prop_str_update(struct property *prop,
- const char *str,
- struct kfree_table *kft)
-{
- prop->value = kstrdup(str, GFP_KERNEL);
- if (kfree_table_add(kft, prop->value) || !prop->value)
- return -ENOMEM;
- prop->length = strlen(str)+1;
- return 0;
-}
-
-static void __init tilcdc_node_disable(struct device_node *node)
-{
- struct property *prop;
-
- prop = kzalloc(sizeof(*prop), GFP_KERNEL);
- if (!prop)
- return;
-
- prop->name = "status";
- prop->value = "disabled";
- prop->length = strlen((char *)prop->value)+1;
-
- of_update_property(node, prop);
-}
-
-static struct device_node * __init tilcdc_get_overlay(struct kfree_table *kft)
-{
- const int size = __dtb_tilcdc_slave_compat_end -
- __dtb_tilcdc_slave_compat_begin;
- static void *overlay_data;
- struct device_node *overlay;
-
- if (!size) {
- pr_warn("%s: No overlay data\n", __func__);
- return NULL;
- }
-
- overlay_data = kmemdup(__dtb_tilcdc_slave_compat_begin,
- size, GFP_KERNEL);
- if (!overlay_data || kfree_table_add(kft, overlay_data))
- return NULL;
-
- of_fdt_unflatten_tree(overlay_data, NULL, &overlay);
- if (!overlay) {
- pr_warn("%s: Unfattening overlay tree failed\n", __func__);
- return NULL;
- }
-
- return overlay;
-}
-
-static const struct of_device_id tilcdc_slave_of_match[] __initconst = {
- { .compatible = "ti,tilcdc,slave", },
- {},
-};
-
-static const struct of_device_id tilcdc_of_match[] __initconst = {
- { .compatible = "ti,am33xx-tilcdc", },
- {},
-};
-
-static const struct of_device_id tilcdc_tda998x_of_match[] __initconst = {
- { .compatible = "nxp,tda998x", },
- {},
-};
-
-static const char * const tilcdc_slave_props[] __initconst = {
- "pinctrl-names",
- "pinctrl-0",
- "pinctrl-1",
- NULL
-};
-
-static void __init tilcdc_convert_slave_node(void)
-{
- struct device_node *slave = NULL, *lcdc = NULL;
- struct device_node *i2c = NULL, *fragment = NULL;
- struct device_node *overlay, *encoder;
- struct property *prop;
- /* For all memory needed for the overlay tree. This memory can
- be freed after the overlay has been applied. */
- struct kfree_table kft;
- int ovcs_id, ret;
-
- if (kfree_table_init(&kft))
- return;
-
- lcdc = of_find_matching_node(NULL, tilcdc_of_match);
- slave = of_find_matching_node(NULL, tilcdc_slave_of_match);
-
- if (!slave || !of_device_is_available(lcdc))
- goto out;
-
- i2c = of_parse_phandle(slave, "i2c", 0);
- if (!i2c) {
- pr_err("%s: Can't find i2c node trough phandle\n", __func__);
- goto out;
- }
-
- overlay = tilcdc_get_overlay(&kft);
- if (!overlay)
- goto out;
-
- encoder = of_find_matching_node(overlay, tilcdc_tda998x_of_match);
- if (!encoder) {
- pr_err("%s: Failed to find tda998x node\n", __func__);
- goto out;
- }
-
- tilcdc_copy_props(slave, encoder, tilcdc_slave_props, &kft);
-
- for_each_child_of_node(overlay, fragment) {
- prop = of_find_property(fragment, "target-path", NULL);
- if (!prop)
- continue;
- if (!strncmp("i2c", (char *)prop->value, prop->length))
- if (tilcdc_prop_str_update(prop, i2c->full_name, &kft))
- goto out;
- if (!strncmp("lcdc", (char *)prop->value, prop->length))
- if (tilcdc_prop_str_update(prop, lcdc->full_name, &kft))
- goto out;
- }
-
- tilcdc_node_disable(slave);
-
- ovcs_id = 0;
- ret = of_overlay_apply(overlay, &ovcs_id);
- if (ret)
- pr_err("%s: Applying overlay changeset failed: %d\n",
- __func__, ret);
- else
- pr_info("%s: ti,tilcdc,slave node successfully converted\n",
- __func__);
-out:
- kfree_table_free(&kft);
- of_node_put(i2c);
- of_node_put(slave);
- of_node_put(lcdc);
- of_node_put(fragment);
-}
-
-static int __init tilcdc_slave_compat_init(void)
-{
- tilcdc_convert_slave_node();
- return 0;
-}
-
-subsys_initcall(tilcdc_slave_compat_init);
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_slave_compat.dts b/drivers/gpu/drm/tilcdc/tilcdc_slave_compat.dts
deleted file mode 100644
index 693f8b0aea2d..000000000000
--- a/drivers/gpu/drm/tilcdc/tilcdc_slave_compat.dts
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- * DTS overlay for converting ti,tilcdc,slave binding to new binding.
- *
- * Copyright (C) 2015 Texas Instruments Inc.
- * Author: Jyri Sarha <jsarha@ti.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- */
-
-/*
- * target-path property values are simple tags that are replaced with
- * correct values in tildcdc_slave_compat.c. Some properties are also
- * copied over from the ti,tilcdc,slave node.
- */
-
-/dts-v1/;
-/ {
- fragment@0 {
- target-path = "i2c";
- __overlay__ {
- #address-cells = <1>;
- #size-cells = <0>;
- tda19988 {
- compatible = "nxp,tda998x";
- reg = <0x70>;
- status = "okay";
-
- port {
- hdmi_0: endpoint@0 {
- remote-endpoint = <&lcd_0>;
- };
- };
- };
- };
- };
-
- fragment@1 {
- target-path = "lcdc";
- __overlay__ {
- port {
- lcd_0: endpoint@0 {
- remote-endpoint = <&hdmi_0>;
- };
- };
- };
- };
-
- __local_fixups__ {
- fragment@0 {
- __overlay__ {
- tda19988 {
- port {
- endpoint@0 {
- remote-endpoint = <0>;
- };
- };
- };
- };
- };
- fragment@1 {
- __overlay__ {
- port {
- endpoint@0 {
- remote-endpoint = <0>;
- };
- };
- };
- };
- };
-};
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_slave_compat.h b/drivers/gpu/drm/tilcdc/tilcdc_slave_compat.h
deleted file mode 100644
index 403d35d87d0b..000000000000
--- a/drivers/gpu/drm/tilcdc/tilcdc_slave_compat.h
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * Copyright (C) 2015 Texas Instruments
- * Author: Jyri Sarha <jsarha@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
- */
-/* This header declares the symbols defined in tilcdc_slave_compat.dts */
-
-#ifndef __TILCDC_SLAVE_COMPAT_H__
-#define __TILCDC_SLAVE_COMPAT_H__
-
-extern uint8_t __dtb_tilcdc_slave_compat_begin[];
-extern uint8_t __dtb_tilcdc_slave_compat_end[];
-
-#endif /* __TILCDC_SLAVE_COMPAT_H__ */
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index 316f831ad5f0..b0551aa677b8 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -744,12 +744,14 @@ static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- for (j = 0; j < HPAGE_PMD_NR; ++j)
- if (p++ != pages[i + j])
- break;
+ if (!(flags & TTM_PAGE_FLAG_DMA32)) {
+ for (j = 0; j < HPAGE_PMD_NR; ++j)
+ if (p++ != pages[i + j])
+ break;
- if (j == HPAGE_PMD_NR)
- order = HPAGE_PMD_ORDER;
+ if (j == HPAGE_PMD_NR)
+ order = HPAGE_PMD_ORDER;
+ }
#endif
if (page_count(pages[i]) != 1)
@@ -865,20 +867,22 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
i = 0;
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- while (npages >= HPAGE_PMD_NR) {
- gfp_t huge_flags = gfp_flags;
+ if (!(gfp_flags & GFP_DMA32)) {
+ while (npages >= HPAGE_PMD_NR) {
+ gfp_t huge_flags = gfp_flags;
- huge_flags |= GFP_TRANSHUGE;
- huge_flags &= ~__GFP_MOVABLE;
- huge_flags &= ~__GFP_COMP;
- p = alloc_pages(huge_flags, HPAGE_PMD_ORDER);
- if (!p)
- break;
+ huge_flags |= GFP_TRANSHUGE;
+ huge_flags &= ~__GFP_MOVABLE;
+ huge_flags &= ~__GFP_COMP;
+ p = alloc_pages(huge_flags, HPAGE_PMD_ORDER);
+ if (!p)
+ break;
- for (j = 0; j < HPAGE_PMD_NR; ++j)
- pages[i++] = p++;
+ for (j = 0; j < HPAGE_PMD_NR; ++j)
+ pages[i++] = p++;
- npages -= HPAGE_PMD_NR;
+ npages -= HPAGE_PMD_NR;
+ }
}
#endif
diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
index 98a6cb9f44fc..4ae45d7dac42 100644
--- a/drivers/gpu/drm/vc4/vc4_bo.c
+++ b/drivers/gpu/drm/vc4/vc4_bo.c
@@ -674,10 +674,9 @@ void vc4_bo_dec_usecnt(struct vc4_bo *bo)
mutex_unlock(&bo->madv_lock);
}
-static void vc4_bo_cache_time_timer(unsigned long data)
+static void vc4_bo_cache_time_timer(struct timer_list *t)
{
- struct drm_device *dev = (struct drm_device *)data;
- struct vc4_dev *vc4 = to_vc4_dev(dev);
+ struct vc4_dev *vc4 = from_timer(vc4, t, bo_cache.time_timer);
schedule_work(&vc4->bo_cache.time_work);
}
@@ -1039,9 +1038,7 @@ int vc4_bo_cache_init(struct drm_device *dev)
INIT_LIST_HEAD(&vc4->bo_cache.time_list);
INIT_WORK(&vc4->bo_cache.time_work, vc4_bo_cache_time_work);
- setup_timer(&vc4->bo_cache.time_timer,
- vc4_bo_cache_time_timer,
- (unsigned long)dev);
+ timer_setup(&vc4->bo_cache.time_timer, vc4_bo_cache_time_timer, 0);
return 0;
}
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
index e00ac2f3a264..6c32c89a83a9 100644
--- a/drivers/gpu/drm/vc4/vc4_gem.c
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
@@ -312,10 +312,10 @@ vc4_reset_work(struct work_struct *work)
}
static void
-vc4_hangcheck_elapsed(unsigned long data)
+vc4_hangcheck_elapsed(struct timer_list *t)
{
- struct drm_device *dev = (struct drm_device *)data;
- struct vc4_dev *vc4 = to_vc4_dev(dev);
+ struct vc4_dev *vc4 = from_timer(vc4, t, hangcheck.timer);
+ struct drm_device *dev = vc4->dev;
uint32_t ct0ca, ct1ca;
unsigned long irqflags;
struct vc4_exec_info *bin_exec, *render_exec;
@@ -1154,9 +1154,7 @@ vc4_gem_init(struct drm_device *dev)
spin_lock_init(&vc4->job_lock);
INIT_WORK(&vc4->hangcheck.reset_work, vc4_reset_work);
- setup_timer(&vc4->hangcheck.timer,
- vc4_hangcheck_elapsed,
- (unsigned long)dev);
+ timer_setup(&vc4->hangcheck.timer, vc4_hangcheck_elapsed, 0);
INIT_WORK(&vc4->job_done_work, vc4_job_done_work);
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index fa37a1c07cf6..0b2088264039 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -424,7 +424,8 @@ static void vc4_hdmi_set_avi_infoframe(struct drm_encoder *encoder)
vc4_encoder->limited_rgb_range ?
HDMI_QUANTIZATION_RANGE_LIMITED :
HDMI_QUANTIZATION_RANGE_FULL,
- vc4_encoder->rgb_range_selectable);
+ vc4_encoder->rgb_range_selectable,
+ false);
vc4_hdmi_write_infoframe(encoder, &frame);
}
diff --git a/drivers/gpu/drm/vc4/vc4_irq.c b/drivers/gpu/drm/vc4/vc4_irq.c
index 7d7af3a93d94..61b2e5377993 100644
--- a/drivers/gpu/drm/vc4/vc4_irq.c
+++ b/drivers/gpu/drm/vc4/vc4_irq.c
@@ -208,6 +208,9 @@ vc4_irq_postinstall(struct drm_device *dev)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
+ /* Undo the effects of a previous vc4_irq_uninstall. */
+ enable_irq(dev->irq);
+
/* Enable both the render done and out of memory interrupts. */
V3D_WRITE(V3D_INTENA, V3D_DRIVER_IRQS);
@@ -225,6 +228,9 @@ vc4_irq_uninstall(struct drm_device *dev)
/* Clear any pending interrupts we might have left. */
V3D_WRITE(V3D_INTCTL, V3D_DRIVER_IRQS);
+ /* Finish any interrupt handler still in flight. */
+ disable_irq(dev->irq);
+
cancel_work_sync(&vc4->overflow_mem_work);
}
diff --git a/drivers/gpu/drm/vgem/vgem_fence.c b/drivers/gpu/drm/vgem/vgem_fence.c
index 8fd52f211e9d..b28876c222b4 100644
--- a/drivers/gpu/drm/vgem/vgem_fence.c
+++ b/drivers/gpu/drm/vgem/vgem_fence.c
@@ -85,9 +85,9 @@ static const struct dma_fence_ops vgem_fence_ops = {
.timeline_value_str = vgem_fence_timeline_value_str,
};
-static void vgem_fence_timeout(unsigned long data)
+static void vgem_fence_timeout(struct timer_list *t)
{
- struct vgem_fence *fence = (struct vgem_fence *)data;
+ struct vgem_fence *fence = from_timer(fence, t, timer);
dma_fence_signal(&fence->base);
}
@@ -105,7 +105,7 @@ static struct dma_fence *vgem_fence_create(struct vgem_file *vfile,
dma_fence_init(&fence->base, &vgem_fence_ops, &fence->lock,
dma_fence_context_alloc(1), 1);
- setup_timer(&fence->timer, vgem_fence_timeout, (unsigned long)fence);
+ timer_setup(&fence->timer, vgem_fence_timeout, 0);
/* We force the fence to expire within 10s to prevent driver hangs */
mod_timer(&fence->timer, jiffies + VGEM_FENCE_TIMEOUT);
diff --git a/drivers/gpu/drm/via/via_dmablit.c b/drivers/gpu/drm/via/via_dmablit.c
index 32c9938e1e1e..d6e84a589ef1 100644
--- a/drivers/gpu/drm/via/via_dmablit.c
+++ b/drivers/gpu/drm/via/via_dmablit.c
@@ -452,9 +452,9 @@ via_dmablit_sync(struct drm_device *dev, uint32_t handle, int engine)
static void
-via_dmablit_timer(unsigned long data)
+via_dmablit_timer(struct timer_list *t)
{
- drm_via_blitq_t *blitq = (drm_via_blitq_t *) data;
+ drm_via_blitq_t *blitq = from_timer(blitq, t, poll_timer);
struct drm_device *dev = blitq->dev;
int engine = (int)
(blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues);
@@ -559,8 +559,7 @@ via_init_dmablit(struct drm_device *dev)
init_waitqueue_head(blitq->blit_queue + j);
init_waitqueue_head(&blitq->busy_queue);
INIT_WORK(&blitq->wq, via_dmablit_workqueue);
- setup_timer(&blitq->poll_timer, via_dmablit_timer,
- (unsigned long)blitq);
+ timer_setup(&blitq->poll_timer, via_dmablit_timer, 0);
}
}
diff --git a/drivers/gpu/ipu-v3/ipu-dc.c b/drivers/gpu/ipu-v3/ipu-dc.c
index 7a4b8362dda8..49bfe6e7d005 100644
--- a/drivers/gpu/ipu-v3/ipu-dc.c
+++ b/drivers/gpu/ipu-v3/ipu-dc.c
@@ -249,11 +249,8 @@ EXPORT_SYMBOL_GPL(ipu_dc_enable);
void ipu_dc_enable_channel(struct ipu_dc *dc)
{
- int di;
u32 reg;
- di = dc->di;
-
reg = readl(dc->base + DC_WR_CH_CONF);
reg |= DC_WR_CH_CONF_PROG_TYPE_NORMAL;
writel(reg, dc->base + DC_WR_CH_CONF);
diff --git a/drivers/hid/hid-appleir.c b/drivers/hid/hid-appleir.c
index 07cbc70f00e7..eae7d52cf1a8 100644
--- a/drivers/hid/hid-appleir.c
+++ b/drivers/hid/hid-appleir.c
@@ -173,9 +173,9 @@ static void battery_flat(struct appleir *appleir)
dev_err(&appleir->input_dev->dev, "possible flat battery?\n");
}
-static void key_up_tick(unsigned long data)
+static void key_up_tick(struct timer_list *t)
{
- struct appleir *appleir = (struct appleir *)data;
+ struct appleir *appleir = from_timer(appleir, t, key_up_timer);
struct hid_device *hid = appleir->hid;
unsigned long flags;
@@ -303,8 +303,7 @@ static int appleir_probe(struct hid_device *hid, const struct hid_device_id *id)
hid->quirks |= HID_QUIRK_HIDINPUT_FORCE;
spin_lock_init(&appleir->lock);
- setup_timer(&appleir->key_up_timer,
- key_up_tick, (unsigned long) appleir);
+ timer_setup(&appleir->key_up_timer, key_up_tick, 0);
hid_set_drvdata(hid, appleir);
diff --git a/drivers/hid/hid-prodikeys.c b/drivers/hid/hid-prodikeys.c
index 49c4bd34b3c5..87eda34ea2f8 100644
--- a/drivers/hid/hid-prodikeys.c
+++ b/drivers/hid/hid-prodikeys.c
@@ -239,9 +239,9 @@ drop_note:
return;
}
-static void pcmidi_sustained_note_release(unsigned long data)
+static void pcmidi_sustained_note_release(struct timer_list *t)
{
- struct pcmidi_sustain *pms = (struct pcmidi_sustain *)data;
+ struct pcmidi_sustain *pms = from_timer(pms, t, timer);
pcmidi_send_note(pms->pm, pms->status, pms->note, pms->velocity);
pms->in_use = 0;
@@ -256,8 +256,7 @@ static void init_sustain_timers(struct pcmidi_snd *pm)
pms = &pm->sustained_notes[i];
pms->in_use = 0;
pms->pm = pm;
- setup_timer(&pms->timer, pcmidi_sustained_note_release,
- (unsigned long)pms);
+ timer_setup(&pms->timer, pcmidi_sustained_note_release, 0);
}
}
diff --git a/drivers/hid/hid-wiimote-core.c b/drivers/hid/hid-wiimote-core.c
index d00391418d1a..579884ebd94d 100644
--- a/drivers/hid/hid-wiimote-core.c
+++ b/drivers/hid/hid-wiimote-core.c
@@ -1226,9 +1226,9 @@ static void wiimote_schedule(struct wiimote_data *wdata)
spin_unlock_irqrestore(&wdata->state.lock, flags);
}
-static void wiimote_init_timeout(unsigned long arg)
+static void wiimote_init_timeout(struct timer_list *t)
{
- struct wiimote_data *wdata = (void*)arg;
+ struct wiimote_data *wdata = from_timer(wdata, t, timer);
wiimote_schedule(wdata);
}
@@ -1740,7 +1740,7 @@ static struct wiimote_data *wiimote_create(struct hid_device *hdev)
wdata->state.cmd_battery = 0xff;
INIT_WORK(&wdata->init_worker, wiimote_init_worker);
- setup_timer(&wdata->timer, wiimote_init_timeout, (long)wdata);
+ timer_setup(&wdata->timer, wiimote_init_timeout, 0);
return wdata;
}
diff --git a/drivers/iio/common/ssp_sensors/ssp_dev.c b/drivers/iio/common/ssp_sensors/ssp_dev.c
index ea7adb638d99..2ba2ff5e59c4 100644
--- a/drivers/iio/common/ssp_sensors/ssp_dev.c
+++ b/drivers/iio/common/ssp_sensors/ssp_dev.c
@@ -175,9 +175,9 @@ static void ssp_wdt_work_func(struct work_struct *work)
data->timeout_cnt = 0;
}
-static void ssp_wdt_timer_func(unsigned long ptr)
+static void ssp_wdt_timer_func(struct timer_list *t)
{
- struct ssp_data *data = (struct ssp_data *)ptr;
+ struct ssp_data *data = from_timer(data, t, wdt_timer);
switch (data->fw_dl_state) {
case SSP_FW_DL_STATE_FAIL:
@@ -571,7 +571,7 @@ static int ssp_probe(struct spi_device *spi)
INIT_WORK(&data->work_wdt, ssp_wdt_work_func);
INIT_DELAYED_WORK(&data->work_refresh, ssp_refresh_task);
- setup_timer(&data->wdt_timer, ssp_wdt_timer_func, (unsigned long)data);
+ timer_setup(&data->wdt_timer, ssp_wdt_timer_func, 0);
ret = request_threaded_irq(data->spi->irq, NULL,
ssp_irq_thread_fn,
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index 21e60b1e2ff4..130606c3b07c 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -191,7 +191,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
sg_list_start = umem->sg_head.sgl;
while (npages) {
- ret = get_user_pages(cur_base,
+ ret = get_user_pages_longterm(cur_base,
min_t(unsigned long, npages,
PAGE_SIZE / sizeof (struct page *)),
gup_flags, page_list, vma_list);
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 9beee9cef137..ee0ee1f9994b 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -642,9 +642,9 @@ err:
return -ENOMEM;
}
-static void delay_time_func(unsigned long ctx)
+static void delay_time_func(struct timer_list *t)
{
- struct mlx5_ib_dev *dev = (struct mlx5_ib_dev *)ctx;
+ struct mlx5_ib_dev *dev = from_timer(dev, t, delay_timer);
dev->fill_delay = 0;
}
@@ -663,7 +663,7 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
return -ENOMEM;
}
- setup_timer(&dev->delay_timer, delay_time_func, (unsigned long)dev);
+ timer_setup(&dev->delay_timer, delay_time_func, 0);
for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
ent = &cache->ent[i];
INIT_LIST_HEAD(&ent->head);
diff --git a/drivers/infiniband/hw/mthca/mthca_catas.c b/drivers/infiniband/hw/mthca/mthca_catas.c
index f6474c24f193..ffb98eaaf1c2 100644
--- a/drivers/infiniband/hw/mthca/mthca_catas.c
+++ b/drivers/infiniband/hw/mthca/mthca_catas.c
@@ -130,9 +130,9 @@ static void handle_catas(struct mthca_dev *dev)
spin_unlock_irqrestore(&catas_lock, flags);
}
-static void poll_catas(unsigned long dev_ptr)
+static void poll_catas(struct timer_list *t)
{
- struct mthca_dev *dev = (struct mthca_dev *) dev_ptr;
+ struct mthca_dev *dev = from_timer(dev, t, catas_err.timer);
int i;
for (i = 0; i < dev->catas_err.size; ++i)
@@ -149,7 +149,7 @@ void mthca_start_catas_poll(struct mthca_dev *dev)
{
phys_addr_t addr;
- init_timer(&dev->catas_err.timer);
+ timer_setup(&dev->catas_err.timer, poll_catas, 0);
dev->catas_err.map = NULL;
addr = pci_resource_start(dev->pdev, 0) +
@@ -164,8 +164,6 @@ void mthca_start_catas_poll(struct mthca_dev *dev)
return;
}
- dev->catas_err.timer.data = (unsigned long) dev;
- dev->catas_err.timer.function = poll_catas;
dev->catas_err.timer.expires = jiffies + MTHCA_CATAS_POLL_INTERVAL;
INIT_LIST_HEAD(&dev->catas_err.list);
add_timer(&dev->catas_err.timer);
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index db46b7b53fb4..162475aeeedd 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -3819,7 +3819,7 @@ void nes_port_ibevent(struct nes_vnic *nesvnic)
if (!nesvnic->event_timer.function) {
ib_dispatch_event(&event);
nesvnic->last_dispatched_event = event.event;
- nesvnic->event_timer.function = (TIMER_FUNC_TYPE)nes_handle_delayed_event;
+ nesvnic->event_timer.function = nes_handle_delayed_event;
nesvnic->event_timer.expires = jiffies + NES_EVENT_DELAY;
add_timer(&nesvnic->event_timer);
} else {
diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
index cedc665364cd..73862a836062 100644
--- a/drivers/input/gameport/gameport.c
+++ b/drivers/input/gameport/gameport.c
@@ -202,9 +202,9 @@ void gameport_stop_polling(struct gameport *gameport)
}
EXPORT_SYMBOL(gameport_stop_polling);
-static void gameport_run_poll_handler(unsigned long d)
+static void gameport_run_poll_handler(struct timer_list *t)
{
- struct gameport *gameport = (struct gameport *)d;
+ struct gameport *gameport = from_timer(gameport, t, poll_timer);
gameport->poll_handler(gameport);
if (gameport->poll_cnt)
@@ -542,8 +542,7 @@ static void gameport_init_port(struct gameport *gameport)
INIT_LIST_HEAD(&gameport->node);
spin_lock_init(&gameport->timer_lock);
- setup_timer(&gameport->poll_timer, gameport_run_poll_handler,
- (unsigned long)gameport);
+ timer_setup(&gameport->poll_timer, gameport_run_poll_handler, 0);
}
/*
diff --git a/drivers/input/input.c b/drivers/input/input.c
index 44916ef4a424..e30642db50d5 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -2047,7 +2047,7 @@ static void devm_input_device_unregister(struct device *dev, void *res)
*/
void input_enable_softrepeat(struct input_dev *dev, int delay, int period)
{
- dev->timer.function = (TIMER_FUNC_TYPE)input_repeat_key;
+ dev->timer.function = input_repeat_key;
dev->rep[REP_DELAY] = delay;
dev->rep[REP_PERIOD] = period;
}
diff --git a/drivers/input/joystick/db9.c b/drivers/input/joystick/db9.c
index f4ad83eab67f..de0dd4756c84 100644
--- a/drivers/input/joystick/db9.c
+++ b/drivers/input/joystick/db9.c
@@ -364,9 +364,9 @@ static int db9_saturn(int mode, struct parport *port, struct input_dev *devs[])
return 0;
}
-static void db9_timer(unsigned long private)
+static void db9_timer(struct timer_list *t)
{
- struct db9 *db9 = (void *) private;
+ struct db9 *db9 = from_timer(db9, t, timer);
struct parport *port = db9->pd->port;
struct input_dev *dev = db9->dev[0];
struct input_dev *dev2 = db9->dev[1];
@@ -609,7 +609,7 @@ static void db9_attach(struct parport *pp)
db9->pd = pd;
db9->mode = mode;
db9->parportno = pp->number;
- setup_timer(&db9->timer, db9_timer, (long)db9);
+ timer_setup(&db9->timer, db9_timer, 0);
for (i = 0; i < (min(db9_mode->n_pads, DB9_MAX_DEVICES)); i++) {
diff --git a/drivers/input/joystick/gamecon.c b/drivers/input/joystick/gamecon.c
index ca734ea97e53..2ffb2e8bdc3b 100644
--- a/drivers/input/joystick/gamecon.c
+++ b/drivers/input/joystick/gamecon.c
@@ -743,9 +743,9 @@ static void gc_psx_process_packet(struct gc *gc)
* gc_timer() initiates reads of console pads data.
*/
-static void gc_timer(unsigned long private)
+static void gc_timer(struct timer_list *t)
{
- struct gc *gc = (void *) private;
+ struct gc *gc = from_timer(gc, t, timer);
/*
* N64 pads - must be read first, any read confuses them for 200 us
@@ -974,7 +974,7 @@ static void gc_attach(struct parport *pp)
mutex_init(&gc->mutex);
gc->pd = pd;
gc->parportno = pp->number;
- setup_timer(&gc->timer, gc_timer, (long) gc);
+ timer_setup(&gc->timer, gc_timer, 0);
for (i = 0; i < n_pads && i < GC_MAX_DEVICES; i++) {
if (!pads[i])
diff --git a/drivers/input/joystick/turbografx.c b/drivers/input/joystick/turbografx.c
index a1fdc75a438d..e2685753e460 100644
--- a/drivers/input/joystick/turbografx.c
+++ b/drivers/input/joystick/turbografx.c
@@ -89,9 +89,9 @@ static struct tgfx {
* tgfx_timer() reads and analyzes TurboGraFX joystick data.
*/
-static void tgfx_timer(unsigned long private)
+static void tgfx_timer(struct timer_list *t)
{
- struct tgfx *tgfx = (void *) private;
+ struct tgfx *tgfx = from_timer(tgfx, t, timer);
struct input_dev *dev;
int data1, data2, i;
@@ -200,7 +200,7 @@ static void tgfx_attach(struct parport *pp)
mutex_init(&tgfx->sem);
tgfx->pd = pd;
tgfx->parportno = pp->number;
- setup_timer(&tgfx->timer, tgfx_timer, (long)tgfx);
+ timer_setup(&tgfx->timer, tgfx_timer, 0);
for (i = 0; i < n_devs; i++) {
if (n_buttons[i] < 1)
diff --git a/drivers/input/touchscreen/s3c2410_ts.c b/drivers/input/touchscreen/s3c2410_ts.c
index d3265b6b58b8..1173890f6719 100644
--- a/drivers/input/touchscreen/s3c2410_ts.c
+++ b/drivers/input/touchscreen/s3c2410_ts.c
@@ -102,7 +102,7 @@ static inline bool get_down(unsigned long data0, unsigned long data1)
!(data1 & S3C2410_ADCDAT0_UPDOWN));
}
-static void touch_timer_fire(unsigned long data)
+static void touch_timer_fire(struct timer_list *unused)
{
unsigned long data0;
unsigned long data1;
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index 466aaa8ba841..83fe2621effe 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -36,7 +36,7 @@ static unsigned long iova_rcache_get(struct iova_domain *iovad,
static void init_iova_rcaches(struct iova_domain *iovad);
static void free_iova_rcaches(struct iova_domain *iovad);
static void fq_destroy_all_entries(struct iova_domain *iovad);
-static void fq_flush_timeout(unsigned long data);
+static void fq_flush_timeout(struct timer_list *t);
void
init_iova_domain(struct iova_domain *iovad, unsigned long granule,
@@ -107,7 +107,7 @@ int init_iova_flush_queue(struct iova_domain *iovad,
spin_lock_init(&fq->lock);
}
- setup_timer(&iovad->fq_timer, fq_flush_timeout, (unsigned long)iovad);
+ timer_setup(&iovad->fq_timer, fq_flush_timeout, 0);
atomic_set(&iovad->fq_timer_on, 0);
return 0;
@@ -519,9 +519,9 @@ static void fq_destroy_all_entries(struct iova_domain *iovad)
}
}
-static void fq_flush_timeout(unsigned long data)
+static void fq_flush_timeout(struct timer_list *t)
{
- struct iova_domain *iovad = (struct iova_domain *)data;
+ struct iova_domain *iovad = from_timer(iovad, t, fq_timer);
int cpu;
atomic_set(&iovad->fq_timer_on, 0);
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index 53380bd72ea4..c70476b34a53 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -41,8 +41,15 @@ config ARM_GIC_V3
config ARM_GIC_V3_ITS
bool
+ select GENERIC_MSI_IRQ_DOMAIN
+ default ARM_GIC_V3
+
+config ARM_GIC_V3_ITS_PCI
+ bool
+ depends on ARM_GIC_V3_ITS
depends on PCI
depends on PCI_MSI
+ default ARM_GIC_V3_ITS
config ARM_NVIC
bool
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index dae7282bfdef..d2df34a54d38 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -30,7 +30,8 @@ obj-$(CONFIG_ARM_GIC_PM) += irq-gic-pm.o
obj-$(CONFIG_ARCH_REALVIEW) += irq-gic-realview.o
obj-$(CONFIG_ARM_GIC_V2M) += irq-gic-v2m.o
obj-$(CONFIG_ARM_GIC_V3) += irq-gic-v3.o irq-gic-common.o
-obj-$(CONFIG_ARM_GIC_V3_ITS) += irq-gic-v3-its.o irq-gic-v3-its-pci-msi.o irq-gic-v3-its-platform-msi.o irq-gic-v4.o
+obj-$(CONFIG_ARM_GIC_V3_ITS) += irq-gic-v3-its.o irq-gic-v3-its-platform-msi.o irq-gic-v4.o
+obj-$(CONFIG_ARM_GIC_V3_ITS_PCI) += irq-gic-v3-its-pci-msi.o
obj-$(CONFIG_PARTITION_PERCPU) += irq-partition-percpu.o
obj-$(CONFIG_HISILICON_IRQ_MBIGEN) += irq-mbigen.o
obj-$(CONFIG_ARM_NVIC) += irq-nvic.o
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 17221143f505..b56c3e23f0af 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -1103,18 +1103,18 @@ static void __init gic_populate_ppi_partitions(struct device_node *gic_node)
int nr_parts;
struct partition_affinity *parts;
- parts_node = of_find_node_by_name(gic_node, "ppi-partitions");
+ parts_node = of_get_child_by_name(gic_node, "ppi-partitions");
if (!parts_node)
return;
nr_parts = of_get_child_count(parts_node);
if (!nr_parts)
- return;
+ goto out_put_node;
parts = kzalloc(sizeof(*parts) * nr_parts, GFP_KERNEL);
if (WARN_ON(!parts))
- return;
+ goto out_put_node;
for_each_child_of_node(parts_node, child_part) {
struct partition_affinity *part;
@@ -1181,6 +1181,9 @@ static void __init gic_populate_ppi_partitions(struct device_node *gic_node)
gic_data.ppi_descs[i] = desc;
}
+
+out_put_node:
+ of_node_put(parts_node);
}
static void __init gic_of_setup_kvm_info(struct device_node *node)
@@ -1523,7 +1526,7 @@ gic_acpi_init(struct acpi_subtable_header *header, const unsigned long end)
err = gic_validate_dist_version(acpi_data.dist_base);
if (err) {
- pr_err("No distributor detected at @%p, giving up",
+ pr_err("No distributor detected at @%p, giving up\n",
acpi_data.dist_base);
goto out_dist_unmap;
}
diff --git a/drivers/irqchip/irq-gic-v4.c b/drivers/irqchip/irq-gic-v4.c
index cd0bcc3b7e33..dba9d67cb9c1 100644
--- a/drivers/irqchip/irq-gic-v4.c
+++ b/drivers/irqchip/irq-gic-v4.c
@@ -177,6 +177,7 @@ int its_map_vlpi(int irq, struct its_vlpi_map *map)
.map = map,
},
};
+ int ret;
/*
* The host will never see that interrupt firing again, so it
@@ -184,7 +185,11 @@ int its_map_vlpi(int irq, struct its_vlpi_map *map)
*/
irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY);
- return irq_set_vcpu_affinity(irq, &info);
+ ret = irq_set_vcpu_affinity(irq, &info);
+ if (ret)
+ irq_clear_status_flags(irq, IRQ_DISABLE_UNLAZY);
+
+ return ret;
}
int its_get_vlpi(int irq, struct its_vlpi_map *map)
diff --git a/drivers/irqchip/irq-imgpdc.c b/drivers/irqchip/irq-imgpdc.c
index 1f59998e03f8..e80263e16c4c 100644
--- a/drivers/irqchip/irq-imgpdc.c
+++ b/drivers/irqchip/irq-imgpdc.c
@@ -325,7 +325,7 @@ static int pdc_intc_probe(struct platform_device *pdev)
/* Ioremap the registers */
priv->pdc_base = devm_ioremap(&pdev->dev, res_regs->start,
- res_regs->end - res_regs->start);
+ resource_size(res_regs));
if (!priv->pdc_base)
return -EIO;
diff --git a/drivers/irqchip/irq-s3c24xx.c b/drivers/irqchip/irq-s3c24xx.c
index c25ce5af091a..ec0e6a8cdb75 100644
--- a/drivers/irqchip/irq-s3c24xx.c
+++ b/drivers/irqchip/irq-s3c24xx.c
@@ -156,7 +156,7 @@ static int s3c_irq_type(struct irq_data *data, unsigned int type)
irq_set_handler(data->irq, handle_level_irq);
break;
default:
- pr_err("No such irq type %d", type);
+ pr_err("No such irq type %d\n", type);
return -EINVAL;
}
@@ -204,7 +204,7 @@ static int s3c_irqext_type_set(void __iomem *gpcon_reg,
break;
default:
- pr_err("No such irq type %d", type);
+ pr_err("No such irq type %d\n", type);
return -EINVAL;
}
diff --git a/drivers/irqchip/irq-sni-exiu.c b/drivers/irqchip/irq-sni-exiu.c
index 1b6e2f7c59af..1927b2f36ff6 100644
--- a/drivers/irqchip/irq-sni-exiu.c
+++ b/drivers/irqchip/irq-sni-exiu.c
@@ -196,8 +196,8 @@ static int __init exiu_init(struct device_node *node,
}
data->base = of_iomap(node, 0);
- if (IS_ERR(data->base)) {
- err = PTR_ERR(data->base);
+ if (!data->base) {
+ err = -ENODEV;
goto out_free;
}
diff --git a/drivers/irqchip/qcom-irq-combiner.c b/drivers/irqchip/qcom-irq-combiner.c
index 6aa3ea479214..f31265937439 100644
--- a/drivers/irqchip/qcom-irq-combiner.c
+++ b/drivers/irqchip/qcom-irq-combiner.c
@@ -238,7 +238,7 @@ static int __init combiner_probe(struct platform_device *pdev)
{
struct combiner *combiner;
size_t alloc_sz;
- u32 nregs;
+ int nregs;
int err;
nregs = count_registers(pdev);
diff --git a/drivers/isdn/capi/capidrv.c b/drivers/isdn/capi/capidrv.c
index 89dd1303a98a..49fef08858c5 100644
--- a/drivers/isdn/capi/capidrv.c
+++ b/drivers/isdn/capi/capidrv.c
@@ -2235,9 +2235,9 @@ static void send_listen(capidrv_contr *card)
send_message(card, &cmdcmsg);
}
-static void listentimerfunc(unsigned long x)
+static void listentimerfunc(struct timer_list *t)
{
- capidrv_contr *card = (capidrv_contr *)x;
+ capidrv_contr *card = from_timer(card, t, listentimer);
if (card->state != ST_LISTEN_NONE && card->state != ST_LISTEN_ACTIVE)
printk(KERN_ERR "%s: controller dead ??\n", card->name);
send_listen(card);
@@ -2264,7 +2264,7 @@ static int capidrv_addcontr(u16 contr, struct capi_profile *profp)
return -1;
}
card->owner = THIS_MODULE;
- setup_timer(&card->listentimer, listentimerfunc, (unsigned long)card);
+ timer_setup(&card->listentimer, listentimerfunc, 0);
strcpy(card->name, id);
card->contrnr = contr;
card->nbchan = profp->nbchannel;
diff --git a/drivers/isdn/divert/isdn_divert.c b/drivers/isdn/divert/isdn_divert.c
index 6f423bc49d0d..5620fd2c6009 100644
--- a/drivers/isdn/divert/isdn_divert.c
+++ b/drivers/isdn/divert/isdn_divert.c
@@ -55,10 +55,10 @@ DEFINE_SPINLOCK(divert_lock);
/***************************/
/* timer callback function */
/***************************/
-static void deflect_timer_expire(ulong arg)
+static void deflect_timer_expire(struct timer_list *t)
{
unsigned long flags;
- struct call_struc *cs = (struct call_struc *) arg;
+ struct call_struc *cs = from_timer(cs, t, timer);
spin_lock_irqsave(&divert_lock, flags);
del_timer(&cs->timer); /* delete active timer */
@@ -157,7 +157,7 @@ int cf_command(int drvid, int mode,
/* allocate mem for information struct */
if (!(cs = kmalloc(sizeof(struct call_struc), GFP_ATOMIC)))
return (-ENOMEM); /* no memory */
- setup_timer(&cs->timer, deflect_timer_expire, (ulong)cs);
+ timer_setup(&cs->timer, deflect_timer_expire, 0);
cs->info[0] = '\0';
cs->ics.driver = drvid;
cs->ics.command = ISDN_CMD_PROT_IO; /* protocol specific io */
@@ -450,8 +450,7 @@ static int isdn_divert_icall(isdn_ctrl *ic)
return (0); /* no external deflection needed */
if (!(cs = kmalloc(sizeof(struct call_struc), GFP_ATOMIC)))
return (0); /* no memory */
- setup_timer(&cs->timer, deflect_timer_expire,
- (ulong)cs);
+ timer_setup(&cs->timer, deflect_timer_expire, 0);
cs->info[0] = '\0';
cs->ics = *ic; /* copy incoming data */
diff --git a/drivers/isdn/hardware/eicon/divasi.c b/drivers/isdn/hardware/eicon/divasi.c
index c61049585cbd..0033d74a7291 100644
--- a/drivers/isdn/hardware/eicon/divasi.c
+++ b/drivers/isdn/hardware/eicon/divasi.c
@@ -78,7 +78,7 @@ static unsigned int um_idi_poll(struct file *file, poll_table *wait);
static int um_idi_open(struct inode *inode, struct file *file);
static int um_idi_release(struct inode *inode, struct file *file);
static int remove_entity(void *entity);
-static void diva_um_timer_function(unsigned long data);
+static void diva_um_timer_function(struct timer_list *t);
/*
* proc entry
@@ -300,8 +300,7 @@ static int um_idi_open_adapter(struct file *file, int adapter_nr)
p_os = (diva_um_idi_os_context_t *) diva_um_id_get_os_context(e);
init_waitqueue_head(&p_os->read_wait);
init_waitqueue_head(&p_os->close_wait);
- setup_timer(&p_os->diva_timer_id, (void *)diva_um_timer_function,
- (unsigned long)p_os);
+ timer_setup(&p_os->diva_timer_id, diva_um_timer_function, 0);
p_os->aborted = 0;
p_os->adapter_nr = adapter_nr;
return (1);
@@ -457,9 +456,9 @@ void diva_os_wakeup_close(void *os_context)
}
static
-void diva_um_timer_function(unsigned long data)
+void diva_um_timer_function(struct timer_list *t)
{
- diva_um_idi_os_context_t *p_os = (diva_um_idi_os_context_t *) data;
+ diva_um_idi_os_context_t *p_os = from_timer(p_os, t, diva_timer_id);
p_os->aborted = 1;
wake_up_interruptible(&p_os->read_wait);
diff --git a/drivers/isdn/hardware/mISDN/hfcmulti.c b/drivers/isdn/hardware/mISDN/hfcmulti.c
index 3cf07b8ced1c..4d85645c87f7 100644
--- a/drivers/isdn/hardware/mISDN/hfcmulti.c
+++ b/drivers/isdn/hardware/mISDN/hfcmulti.c
@@ -2855,7 +2855,7 @@ irq_notforus:
*/
static void
-hfcmulti_dbusy_timer(struct hfc_multi *hc)
+hfcmulti_dbusy_timer(struct timer_list *t)
{
}
@@ -3877,8 +3877,7 @@ hfcmulti_initmode(struct dchannel *dch)
if (hc->dnum[pt]) {
mode_hfcmulti(hc, dch->slot, dch->dev.D.protocol,
-1, 0, -1, 0);
- setup_timer(&dch->timer, (void *)hfcmulti_dbusy_timer,
- (long)dch);
+ timer_setup(&dch->timer, hfcmulti_dbusy_timer, 0);
}
for (i = 1; i <= 31; i++) {
if (!((1 << i) & hc->bmask[pt])) /* skip unused chan */
@@ -3984,8 +3983,7 @@ hfcmulti_initmode(struct dchannel *dch)
hc->chan[i].slot_rx = -1;
hc->chan[i].conf = -1;
mode_hfcmulti(hc, i, dch->dev.D.protocol, -1, 0, -1, 0);
- setup_timer(&dch->timer, (void *)hfcmulti_dbusy_timer,
- (long)dch);
+ timer_setup(&dch->timer, hfcmulti_dbusy_timer, 0);
hc->chan[i - 2].slot_tx = -1;
hc->chan[i - 2].slot_rx = -1;
hc->chan[i - 2].conf = -1;
diff --git a/drivers/isdn/hardware/mISDN/hfcpci.c b/drivers/isdn/hardware/mISDN/hfcpci.c
index e4ebbee863a1..34c93874af23 100644
--- a/drivers/isdn/hardware/mISDN/hfcpci.c
+++ b/drivers/isdn/hardware/mISDN/hfcpci.c
@@ -301,8 +301,9 @@ reset_hfcpci(struct hfc_pci *hc)
* Timer function called when kernel timer expires
*/
static void
-hfcpci_Timer(struct hfc_pci *hc)
+hfcpci_Timer(struct timer_list *t)
{
+ struct hfc_pci *hc = from_timer(hc, t, hw.timer);
hc->hw.timer.expires = jiffies + 75;
/* WD RESET */
/*
@@ -1241,7 +1242,7 @@ hfcpci_int(int intno, void *dev_id)
* timer callback for D-chan busy resolution. Currently no function
*/
static void
-hfcpci_dbusy_timer(struct hfc_pci *hc)
+hfcpci_dbusy_timer(struct timer_list *t)
{
}
@@ -1717,8 +1718,7 @@ static void
inithfcpci(struct hfc_pci *hc)
{
printk(KERN_DEBUG "inithfcpci: entered\n");
- setup_timer(&hc->dch.timer, (void *)hfcpci_dbusy_timer,
- (long)&hc->dch);
+ timer_setup(&hc->dch.timer, hfcpci_dbusy_timer, 0);
hc->chanlimit = 2;
mode_hfcpci(&hc->bch[0], 1, -1);
mode_hfcpci(&hc->bch[1], 2, -1);
@@ -2043,7 +2043,7 @@ setup_hw(struct hfc_pci *hc)
Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
/* At this point the needed PCI config is done */
/* fifos are still not enabled */
- setup_timer(&hc->hw.timer, (void *)hfcpci_Timer, (long)hc);
+ timer_setup(&hc->hw.timer, hfcpci_Timer, 0);
/* default PCM master */
test_and_set_bit(HFC_CFG_MASTER, &hc->cfg);
return 0;
diff --git a/drivers/isdn/hardware/mISDN/mISDNisar.c b/drivers/isdn/hardware/mISDN/mISDNisar.c
index 5b078591b6ee..b791688d0228 100644
--- a/drivers/isdn/hardware/mISDN/mISDNisar.c
+++ b/drivers/isdn/hardware/mISDN/mISDNisar.c
@@ -1146,9 +1146,9 @@ mISDNisar_irq(struct isar_hw *isar)
EXPORT_SYMBOL(mISDNisar_irq);
static void
-ftimer_handler(unsigned long data)
+ftimer_handler(struct timer_list *t)
{
- struct isar_ch *ch = (struct isar_ch *)data;
+ struct isar_ch *ch = from_timer(ch, t, ftimer);
pr_debug("%s: ftimer flags %lx\n", ch->is->name, ch->bch.Flags);
test_and_clear_bit(FLG_FTI_RUN, &ch->bch.Flags);
@@ -1635,11 +1635,9 @@ init_isar(struct isar_hw *isar)
}
if (isar->version != 1)
return -EINVAL;
- setup_timer(&isar->ch[0].ftimer, &ftimer_handler,
- (long)&isar->ch[0]);
+ timer_setup(&isar->ch[0].ftimer, ftimer_handler, 0);
test_and_set_bit(FLG_INITIALIZED, &isar->ch[0].bch.Flags);
- setup_timer(&isar->ch[1].ftimer, &ftimer_handler,
- (long)&isar->ch[1]);
+ timer_setup(&isar->ch[1].ftimer, ftimer_handler, 0);
test_and_set_bit(FLG_INITIALIZED, &isar->ch[1].bch.Flags);
return 0;
}
diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c
index 38a5bb764c7b..8b03d618185e 100644
--- a/drivers/isdn/i4l/isdn_common.c
+++ b/drivers/isdn/i4l/isdn_common.c
@@ -231,7 +231,7 @@ static int isdn_timer_cnt2 = 0;
static int isdn_timer_cnt3 = 0;
static void
-isdn_timer_funct(ulong dummy)
+isdn_timer_funct(struct timer_list *unused)
{
int tf = dev->tflags;
if (tf & ISDN_TIMER_FAST) {
@@ -2294,8 +2294,7 @@ static int __init isdn_init(void)
printk(KERN_WARNING "isdn: Could not allocate device-struct.\n");
return -EIO;
}
- init_timer(&dev->timer);
- dev->timer.function = isdn_timer_funct;
+ timer_setup(&dev->timer, isdn_timer_funct, 0);
spin_lock_init(&dev->lock);
spin_lock_init(&dev->timerlock);
#ifdef MODULE
diff --git a/drivers/isdn/i4l/isdn_net.c b/drivers/isdn/i4l/isdn_net.c
index f63a110b7bcb..c138f66f2659 100644
--- a/drivers/isdn/i4l/isdn_net.c
+++ b/drivers/isdn/i4l/isdn_net.c
@@ -1509,9 +1509,9 @@ static int isdn_net_ioctl(struct net_device *dev,
/* called via cisco_timer.function */
static void
-isdn_net_ciscohdlck_slarp_send_keepalive(unsigned long data)
+isdn_net_ciscohdlck_slarp_send_keepalive(struct timer_list *t)
{
- isdn_net_local *lp = (isdn_net_local *) data;
+ isdn_net_local *lp = from_timer(lp, t, cisco_timer);
struct sk_buff *skb;
unsigned char *p;
unsigned long last_cisco_myseq = lp->cisco_myseq;
@@ -1615,9 +1615,8 @@ isdn_net_ciscohdlck_connected(isdn_net_local *lp)
/* send slarp request because interface/seq.no.s reset */
isdn_net_ciscohdlck_slarp_send_request(lp);
- init_timer(&lp->cisco_timer);
- lp->cisco_timer.data = (unsigned long) lp;
- lp->cisco_timer.function = isdn_net_ciscohdlck_slarp_send_keepalive;
+ timer_setup(&lp->cisco_timer,
+ isdn_net_ciscohdlck_slarp_send_keepalive, 0);
lp->cisco_timer.expires = jiffies + lp->cisco_keepalive_period * HZ;
add_timer(&lp->cisco_timer);
}
diff --git a/drivers/isdn/i4l/isdn_ppp.c b/drivers/isdn/i4l/isdn_ppp.c
index cd2b3c69771a..e07aefb9151d 100644
--- a/drivers/isdn/i4l/isdn_ppp.c
+++ b/drivers/isdn/i4l/isdn_ppp.c
@@ -50,7 +50,7 @@ static struct ippp_ccp_reset *isdn_ppp_ccp_reset_alloc(struct ippp_struct *is);
static void isdn_ppp_ccp_reset_free(struct ippp_struct *is);
static void isdn_ppp_ccp_reset_free_state(struct ippp_struct *is,
unsigned char id);
-static void isdn_ppp_ccp_timer_callback(unsigned long closure);
+static void isdn_ppp_ccp_timer_callback(struct timer_list *t);
static struct ippp_ccp_reset_state *isdn_ppp_ccp_reset_alloc_state(struct ippp_struct *is,
unsigned char id);
static void isdn_ppp_ccp_reset_trans(struct ippp_struct *is,
@@ -2327,10 +2327,10 @@ static void isdn_ppp_ccp_reset_free_state(struct ippp_struct *is,
/* The timer callback function which is called when a ResetReq has timed out,
aka has never been answered by a ResetAck */
-static void isdn_ppp_ccp_timer_callback(unsigned long closure)
+static void isdn_ppp_ccp_timer_callback(struct timer_list *t)
{
struct ippp_ccp_reset_state *rs =
- (struct ippp_ccp_reset_state *)closure;
+ from_timer(rs, t, timer);
if (!rs) {
printk(KERN_ERR "ippp_ccp: timer cb with zero closure.\n");
@@ -2376,8 +2376,7 @@ static struct ippp_ccp_reset_state *isdn_ppp_ccp_reset_alloc_state(struct ippp_s
rs->state = CCPResetIdle;
rs->is = is;
rs->id = id;
- setup_timer(&rs->timer, isdn_ppp_ccp_timer_callback,
- (unsigned long)rs);
+ timer_setup(&rs->timer, isdn_ppp_ccp_timer_callback, 0);
is->reset->rs[id] = rs;
}
return rs;
diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c
index d30130c8d0f3..960f26348bb5 100644
--- a/drivers/isdn/i4l/isdn_tty.c
+++ b/drivers/isdn/i4l/isdn_tty.c
@@ -541,9 +541,9 @@ isdn_tty_senddown(modem_info *info)
* into the tty's buffer.
*/
static void
-isdn_tty_modem_do_ncarrier(unsigned long data)
+isdn_tty_modem_do_ncarrier(struct timer_list *t)
{
- modem_info *info = (modem_info *) data;
+ modem_info *info = from_timer(info, t, nc_timer);
isdn_tty_modem_result(RESULT_NO_CARRIER, info);
}
@@ -1812,8 +1812,7 @@ isdn_tty_modem_init(void)
info->isdn_channel = -1;
info->drv_index = -1;
info->xmit_size = ISDN_SERIAL_XMIT_SIZE;
- setup_timer(&info->nc_timer, isdn_tty_modem_do_ncarrier,
- (unsigned long)info);
+ timer_setup(&info->nc_timer, isdn_tty_modem_do_ncarrier, 0);
skb_queue_head_init(&info->xmit_queue);
#ifdef CONFIG_ISDN_AUDIO
skb_queue_head_init(&info->dtmf_queue);
diff --git a/drivers/lightnvm/pblk-core.c b/drivers/lightnvm/pblk-core.c
index ce90213a42fa..76516ee84e9a 100644
--- a/drivers/lightnvm/pblk-core.c
+++ b/drivers/lightnvm/pblk-core.c
@@ -270,9 +270,9 @@ static void pblk_write_kick(struct pblk *pblk)
mod_timer(&pblk->wtimer, jiffies + msecs_to_jiffies(1000));
}
-void pblk_write_timer_fn(unsigned long data)
+void pblk_write_timer_fn(struct timer_list *t)
{
- struct pblk *pblk = (struct pblk *)data;
+ struct pblk *pblk = from_timer(pblk, t, wtimer);
/* kick the write thread every tick to flush outstanding data */
pblk_write_kick(pblk);
diff --git a/drivers/lightnvm/pblk-gc.c b/drivers/lightnvm/pblk-gc.c
index 00d5698d64a9..9c8e114c8a54 100644
--- a/drivers/lightnvm/pblk-gc.c
+++ b/drivers/lightnvm/pblk-gc.c
@@ -442,9 +442,9 @@ next_gc_group:
goto next_gc_group;
}
-static void pblk_gc_timer(unsigned long data)
+static void pblk_gc_timer(struct timer_list *t)
{
- struct pblk *pblk = (struct pblk *)data;
+ struct pblk *pblk = from_timer(pblk, t, gc.gc_timer);
pblk_gc_kick(pblk);
}
@@ -601,7 +601,7 @@ int pblk_gc_init(struct pblk *pblk)
goto fail_free_writer_kthread;
}
- setup_timer(&gc->gc_timer, pblk_gc_timer, (unsigned long)pblk);
+ timer_setup(&gc->gc_timer, pblk_gc_timer, 0);
mod_timer(&gc->gc_timer, jiffies + msecs_to_jiffies(GC_TIME_MSECS));
gc->gc_active = 0;
diff --git a/drivers/lightnvm/pblk-init.c b/drivers/lightnvm/pblk-init.c
index f62112ba5482..695826a06b5d 100644
--- a/drivers/lightnvm/pblk-init.c
+++ b/drivers/lightnvm/pblk-init.c
@@ -866,7 +866,7 @@ fail:
static int pblk_writer_init(struct pblk *pblk)
{
- setup_timer(&pblk->wtimer, pblk_write_timer_fn, (unsigned long)pblk);
+ timer_setup(&pblk->wtimer, pblk_write_timer_fn, 0);
mod_timer(&pblk->wtimer, jiffies + msecs_to_jiffies(100));
pblk->writer_ts = kthread_create(pblk_write_ts, pblk, "pblk-writer-t");
diff --git a/drivers/lightnvm/pblk-rl.c b/drivers/lightnvm/pblk-rl.c
index abae31fd434e..dacc71922260 100644
--- a/drivers/lightnvm/pblk-rl.c
+++ b/drivers/lightnvm/pblk-rl.c
@@ -158,9 +158,9 @@ int pblk_rl_max_io(struct pblk_rl *rl)
return rl->rb_max_io;
}
-static void pblk_rl_u_timer(unsigned long data)
+static void pblk_rl_u_timer(struct timer_list *t)
{
- struct pblk_rl *rl = (struct pblk_rl *)data;
+ struct pblk_rl *rl = from_timer(rl, t, u_timer);
/* Release user I/O state. Protect from GC */
smp_store_release(&rl->rb_user_active, 0);
@@ -202,7 +202,7 @@ void pblk_rl_init(struct pblk_rl *rl, int budget)
atomic_set(&rl->rb_gc_cnt, 0);
atomic_set(&rl->rb_space, -1);
- setup_timer(&rl->u_timer, pblk_rl_u_timer, (unsigned long)rl);
+ timer_setup(&rl->u_timer, pblk_rl_u_timer, 0);
rl->rb_user_active = 0;
rl->rb_gc_active = 0;
diff --git a/drivers/lightnvm/pblk.h b/drivers/lightnvm/pblk.h
index 90961033a79f..59a64d461a5d 100644
--- a/drivers/lightnvm/pblk.h
+++ b/drivers/lightnvm/pblk.h
@@ -797,7 +797,7 @@ void pblk_map_rq(struct pblk *pblk, struct nvm_rq *rqd, unsigned int sentry,
* pblk write thread
*/
int pblk_write_ts(void *data);
-void pblk_write_timer_fn(unsigned long data);
+void pblk_write_timer_fn(struct timer_list *t);
void pblk_write_should_kick(struct pblk *pblk);
/*
diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c
index 267f01ae87e4..0993c14be860 100644
--- a/drivers/lightnvm/rrpc.c
+++ b/drivers/lightnvm/rrpc.c
@@ -267,9 +267,9 @@ static void rrpc_gc_kick(struct rrpc *rrpc)
/*
* timed GC every interval.
*/
-static void rrpc_gc_timer(unsigned long data)
+static void rrpc_gc_timer(struct timer_list *t)
{
- struct rrpc *rrpc = (struct rrpc *)data;
+ struct rrpc *rrpc = from_timer(rrpc, t, gc_timer);
rrpc_gc_kick(rrpc);
mod_timer(&rrpc->gc_timer, jiffies + msecs_to_jiffies(10));
@@ -1063,7 +1063,7 @@ static int rrpc_gc_init(struct rrpc *rrpc)
if (!rrpc->kgc_wq)
return -ENOMEM;
- setup_timer(&rrpc->gc_timer, rrpc_gc_timer, (unsigned long)rrpc);
+ timer_setup(&rrpc->gc_timer, rrpc_gc_timer, 0);
return 0;
}
diff --git a/drivers/media/common/saa7146/saa7146_vbi.c b/drivers/media/common/saa7146/saa7146_vbi.c
index ce8d78c137f0..e1d369b976ed 100644
--- a/drivers/media/common/saa7146/saa7146_vbi.c
+++ b/drivers/media/common/saa7146/saa7146_vbi.c
@@ -402,7 +402,7 @@ static int vbi_open(struct saa7146_dev *dev, struct file *file)
sizeof(struct saa7146_buf),
file, &dev->v4l2_lock);
- vv->vbi_read_timeout.function = (TIMER_FUNC_TYPE)vbi_read_timeout;
+ vv->vbi_read_timeout.function = vbi_read_timeout;
vv->vbi_read_timeout_file = file;
/* initialize the brs */
diff --git a/drivers/media/platform/fsl-viu.c b/drivers/media/platform/fsl-viu.c
index fb43025df573..dba21215dc84 100644
--- a/drivers/media/platform/fsl-viu.c
+++ b/drivers/media/platform/fsl-viu.c
@@ -339,9 +339,9 @@ static int restart_video_queue(struct viu_dmaqueue *vidq)
}
}
-static void viu_vid_timeout(unsigned long data)
+static void viu_vid_timeout(struct timer_list *t)
{
- struct viu_dev *dev = (struct viu_dev *)data;
+ struct viu_dev *dev = from_timer(dev, t, vidq.timeout);
struct viu_buf *buf;
struct viu_dmaqueue *vidq = &dev->vidq;
@@ -1466,8 +1466,7 @@ static int viu_of_probe(struct platform_device *op)
viu_dev->decoder = v4l2_i2c_new_subdev(&viu_dev->v4l2_dev, ad,
"saa7113", VIU_VIDEO_DECODER_ADDR, NULL);
- setup_timer(&viu_dev->vidq.timeout, viu_vid_timeout,
- (unsigned long)viu_dev);
+ timer_setup(&viu_dev->vidq.timeout, viu_vid_timeout, 0);
viu_dev->std = V4L2_STD_NTSC_M;
viu_dev->first = 1;
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c
index 1839a86cc2a5..bc68dbbcaec1 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c
@@ -145,9 +145,9 @@ void s5p_mfc_cleanup_queue(struct list_head *lh, struct vb2_queue *vq)
}
}
-static void s5p_mfc_watchdog(unsigned long arg)
+static void s5p_mfc_watchdog(struct timer_list *t)
{
- struct s5p_mfc_dev *dev = (struct s5p_mfc_dev *)arg;
+ struct s5p_mfc_dev *dev = from_timer(dev, t, watchdog_timer);
if (test_bit(0, &dev->hw_lock))
atomic_inc(&dev->watchdog_cnt);
@@ -1314,9 +1314,7 @@ static int s5p_mfc_probe(struct platform_device *pdev)
dev->hw_lock = 0;
INIT_WORK(&dev->watchdog_work, s5p_mfc_watchdog_worker);
atomic_set(&dev->watchdog_cnt, 0);
- init_timer(&dev->watchdog_timer);
- dev->watchdog_timer.data = (unsigned long)dev;
- dev->watchdog_timer.function = s5p_mfc_watchdog;
+ timer_setup(&dev->watchdog_timer, s5p_mfc_watchdog, 0);
ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev);
if (ret)
diff --git a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c
index 59280ac31937..a0acee7671b1 100644
--- a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c
+++ b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c
@@ -61,9 +61,9 @@ static int load_c8sectpfe_fw(struct c8sectpfei *fei);
#define FIFO_LEN 1024
-static void c8sectpfe_timer_interrupt(unsigned long ac8sectpfei)
+static void c8sectpfe_timer_interrupt(struct timer_list *t)
{
- struct c8sectpfei *fei = (struct c8sectpfei *)ac8sectpfei;
+ struct c8sectpfei *fei = from_timer(fei, t, timer);
struct channel_info *channel;
int chan_num;
@@ -865,8 +865,7 @@ static int c8sectpfe_probe(struct platform_device *pdev)
}
/* Setup timer interrupt */
- setup_timer(&fei->timer, c8sectpfe_timer_interrupt,
- (unsigned long)fei);
+ timer_setup(&fei->timer, c8sectpfe_timer_interrupt, 0);
mutex_init(&fei->lock);
diff --git a/drivers/media/platform/vim2m.c b/drivers/media/platform/vim2m.c
index b01fba020d5f..7bf9fa2f8534 100644
--- a/drivers/media/platform/vim2m.c
+++ b/drivers/media/platform/vim2m.c
@@ -388,9 +388,9 @@ static void device_run(void *priv)
schedule_irq(dev, ctx->transtime);
}
-static void device_isr(unsigned long priv)
+static void device_isr(struct timer_list *t)
{
- struct vim2m_dev *vim2m_dev = (struct vim2m_dev *)priv;
+ struct vim2m_dev *vim2m_dev = from_timer(vim2m_dev, t, timer);
struct vim2m_ctx *curr_ctx;
struct vb2_v4l2_buffer *src_vb, *dst_vb;
unsigned long flags;
@@ -1024,7 +1024,7 @@ static int vim2m_probe(struct platform_device *pdev)
v4l2_info(&dev->v4l2_dev,
"Device registered as /dev/video%d\n", vfd->num);
- setup_timer(&dev->timer, device_isr, (long)dev);
+ timer_setup(&dev->timer, device_isr, 0);
platform_set_drvdata(pdev, dev);
dev->m2m_dev = v4l2_m2m_init(&m2m_ops);
diff --git a/drivers/media/usb/au0828/au0828-dvb.c b/drivers/media/usb/au0828/au0828-dvb.c
index 34dc7e062471..d9093a3c57c5 100644
--- a/drivers/media/usb/au0828/au0828-dvb.c
+++ b/drivers/media/usb/au0828/au0828-dvb.c
@@ -105,9 +105,9 @@ static struct tda18271_config hauppauge_woodbury_tunerconfig = {
static void au0828_restart_dvb_streaming(struct work_struct *work);
-static void au0828_bulk_timeout(unsigned long data)
+static void au0828_bulk_timeout(struct timer_list *t)
{
- struct au0828_dev *dev = (struct au0828_dev *) data;
+ struct au0828_dev *dev = from_timer(dev, t, bulk_timeout);
dprintk(1, "%s called\n", __func__);
dev->bulk_timeout_running = 0;
@@ -648,9 +648,7 @@ int au0828_dvb_register(struct au0828_dev *dev)
return ret;
}
- dev->bulk_timeout.function = au0828_bulk_timeout;
- dev->bulk_timeout.data = (unsigned long) dev;
- init_timer(&dev->bulk_timeout);
+ timer_setup(&dev->bulk_timeout, au0828_bulk_timeout, 0);
return 0;
}
diff --git a/drivers/media/usb/au0828/au0828-video.c b/drivers/media/usb/au0828/au0828-video.c
index 654f67c25863..a240153821e0 100644
--- a/drivers/media/usb/au0828/au0828-video.c
+++ b/drivers/media/usb/au0828/au0828-video.c
@@ -954,9 +954,9 @@ int au0828_analog_unregister(struct au0828_dev *dev)
/* This function ensures that video frames continue to be delivered even if
the ITU-656 input isn't receiving any data (thereby preventing applications
such as tvtime from hanging) */
-static void au0828_vid_buffer_timeout(unsigned long data)
+static void au0828_vid_buffer_timeout(struct timer_list *t)
{
- struct au0828_dev *dev = (struct au0828_dev *) data;
+ struct au0828_dev *dev = from_timer(dev, t, vid_timeout);
struct au0828_dmaqueue *dma_q = &dev->vidq;
struct au0828_buffer *buf;
unsigned char *vid_data;
@@ -978,9 +978,9 @@ static void au0828_vid_buffer_timeout(unsigned long data)
spin_unlock_irqrestore(&dev->slock, flags);
}
-static void au0828_vbi_buffer_timeout(unsigned long data)
+static void au0828_vbi_buffer_timeout(struct timer_list *t)
{
- struct au0828_dev *dev = (struct au0828_dev *) data;
+ struct au0828_dev *dev = from_timer(dev, t, vbi_timeout);
struct au0828_dmaqueue *dma_q = &dev->vbiq;
struct au0828_buffer *buf;
unsigned char *vbi_data;
@@ -1953,10 +1953,8 @@ int au0828_analog_register(struct au0828_dev *dev,
INIT_LIST_HEAD(&dev->vidq.active);
INIT_LIST_HEAD(&dev->vbiq.active);
- setup_timer(&dev->vid_timeout, au0828_vid_buffer_timeout,
- (unsigned long)dev);
- setup_timer(&dev->vbi_timeout, au0828_vbi_buffer_timeout,
- (unsigned long)dev);
+ timer_setup(&dev->vid_timeout, au0828_vid_buffer_timeout, 0);
+ timer_setup(&dev->vbi_timeout, au0828_vbi_buffer_timeout, 0);
dev->width = NTSC_STD_W;
dev->height = NTSC_STD_H;
diff --git a/drivers/media/v4l2-core/videobuf-dma-sg.c b/drivers/media/v4l2-core/videobuf-dma-sg.c
index 0b5c43f7e020..f412429cf5ba 100644
--- a/drivers/media/v4l2-core/videobuf-dma-sg.c
+++ b/drivers/media/v4l2-core/videobuf-dma-sg.c
@@ -185,12 +185,13 @@ static int videobuf_dma_init_user_locked(struct videobuf_dmabuf *dma,
dprintk(1, "init user [0x%lx+0x%lx => %d pages]\n",
data, size, dma->nr_pages);
- err = get_user_pages(data & PAGE_MASK, dma->nr_pages,
+ err = get_user_pages_longterm(data & PAGE_MASK, dma->nr_pages,
flags, dma->pages, NULL);
if (err != dma->nr_pages) {
dma->nr_pages = (err >= 0) ? err : 0;
- dprintk(1, "get_user_pages: err=%d [%d]\n", err, dma->nr_pages);
+ dprintk(1, "get_user_pages_longterm: err=%d [%d]\n", err,
+ dma->nr_pages);
return err < 0 ? err : -EINVAL;
}
return 0;
diff --git a/drivers/memstick/core/ms_block.c b/drivers/memstick/core/ms_block.c
index 22de7f5ed032..57b13dfbd21e 100644
--- a/drivers/memstick/core/ms_block.c
+++ b/drivers/memstick/core/ms_block.c
@@ -1492,9 +1492,9 @@ static int msb_ftl_scan(struct msb_data *msb)
return 0;
}
-static void msb_cache_flush_timer(unsigned long data)
+static void msb_cache_flush_timer(struct timer_list *t)
{
- struct msb_data *msb = (struct msb_data *)data;
+ struct msb_data *msb = from_timer(msb, t, cache_flush_timer);
msb->need_flush_cache = true;
queue_work(msb->io_queue, &msb->io_work);
}
@@ -1514,8 +1514,7 @@ static void msb_cache_discard(struct msb_data *msb)
static int msb_cache_init(struct msb_data *msb)
{
- setup_timer(&msb->cache_flush_timer, msb_cache_flush_timer,
- (unsigned long)msb);
+ timer_setup(&msb->cache_flush_timer, msb_cache_flush_timer, 0);
if (!msb->cache)
msb->cache = kzalloc(msb->block_size, GFP_KERNEL);
diff --git a/drivers/mfd/rtsx_usb.c b/drivers/mfd/rtsx_usb.c
index 691dab791f7a..59d61b04c197 100644
--- a/drivers/mfd/rtsx_usb.c
+++ b/drivers/mfd/rtsx_usb.c
@@ -40,9 +40,9 @@ static const struct mfd_cell rtsx_usb_cells[] = {
},
};
-static void rtsx_usb_sg_timed_out(unsigned long data)
+static void rtsx_usb_sg_timed_out(struct timer_list *t)
{
- struct rtsx_ucr *ucr = (struct rtsx_ucr *)data;
+ struct rtsx_ucr *ucr = from_timer(ucr, t, sg_timer);
dev_dbg(&ucr->pusb_intf->dev, "%s: sg transfer timed out", __func__);
usb_sg_cancel(&ucr->current_sg);
@@ -663,7 +663,7 @@ static int rtsx_usb_probe(struct usb_interface *intf,
goto out_init_fail;
/* initialize USB SG transfer timer */
- setup_timer(&ucr->sg_timer, rtsx_usb_sg_timed_out, (unsigned long) ucr);
+ timer_setup(&ucr->sg_timer, rtsx_usb_sg_timed_out, 0);
ret = mfd_add_hotplug_devices(&intf->dev, rtsx_usb_cells,
ARRAY_SIZE(rtsx_usb_cells));
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index 35a9e4fd1a9f..64b03d6eaf18 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -160,9 +160,9 @@ out:
return err;
}
-static void mmc_retune_timer(unsigned long data)
+static void mmc_retune_timer(struct timer_list *t)
{
- struct mmc_host *host = (struct mmc_host *)data;
+ struct mmc_host *host = from_timer(host, t, retune_timer);
mmc_retune_needed(host);
}
@@ -389,7 +389,7 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
init_waitqueue_head(&host->wq);
INIT_DELAYED_WORK(&host->detect, mmc_rescan);
INIT_DELAYED_WORK(&host->sdio_irq_work, sdio_irq_work);
- setup_timer(&host->retune_timer, mmc_retune_timer, (unsigned long)host);
+ timer_setup(&host->retune_timer, mmc_retune_timer, 0);
/*
* By default, hosts do not support SGIO or large requests.
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig
index 5a2d71729b9a..2a8ac6829d42 100644
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -1,6 +1,5 @@
menuconfig MTD
tristate "Memory Technology Device (MTD) support"
- depends on GENERIC_IO
help
Memory Technology Devices are flash, RAM and similar chips, often
used for solid state file systems on embedded devices. This option
diff --git a/drivers/mtd/chips/map_ram.c b/drivers/mtd/chips/map_ram.c
index afb43d5e1782..1cd0fff0e940 100644
--- a/drivers/mtd/chips/map_ram.c
+++ b/drivers/mtd/chips/map_ram.c
@@ -20,8 +20,9 @@ static int mapram_write (struct mtd_info *, loff_t, size_t, size_t *, const u_ch
static int mapram_erase (struct mtd_info *, struct erase_info *);
static void mapram_nop (struct mtd_info *);
static struct mtd_info *map_ram_probe(struct map_info *map);
-static unsigned long mapram_unmapped_area(struct mtd_info *, unsigned long,
- unsigned long, unsigned long);
+static int mapram_point (struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, void **virt, resource_size_t *phys);
+static int mapram_unpoint(struct mtd_info *mtd, loff_t from, size_t len);
static struct mtd_chip_driver mapram_chipdrv = {
@@ -65,11 +66,12 @@ static struct mtd_info *map_ram_probe(struct map_info *map)
mtd->type = MTD_RAM;
mtd->size = map->size;
mtd->_erase = mapram_erase;
- mtd->_get_unmapped_area = mapram_unmapped_area;
mtd->_read = mapram_read;
mtd->_write = mapram_write;
mtd->_panic_write = mapram_write;
+ mtd->_point = mapram_point;
mtd->_sync = mapram_nop;
+ mtd->_unpoint = mapram_unpoint;
mtd->flags = MTD_CAP_RAM;
mtd->writesize = 1;
@@ -81,19 +83,23 @@ static struct mtd_info *map_ram_probe(struct map_info *map)
return mtd;
}
-
-/*
- * Allow NOMMU mmap() to directly map the device (if not NULL)
- * - return the address to which the offset maps
- * - return -ENOSYS to indicate refusal to do the mapping
- */
-static unsigned long mapram_unmapped_area(struct mtd_info *mtd,
- unsigned long len,
- unsigned long offset,
- unsigned long flags)
+static int mapram_point(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, void **virt, resource_size_t *phys)
{
struct map_info *map = mtd->priv;
- return (unsigned long) map->virt + offset;
+
+ if (!map->virt)
+ return -EINVAL;
+ *virt = map->virt + from;
+ if (phys)
+ *phys = map->phys + from;
+ *retlen = len;
+ return 0;
+}
+
+static int mapram_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
+{
+ return 0;
}
static int mapram_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
diff --git a/drivers/mtd/chips/map_rom.c b/drivers/mtd/chips/map_rom.c
index e67f73ab44c9..20e3604b4d71 100644
--- a/drivers/mtd/chips/map_rom.c
+++ b/drivers/mtd/chips/map_rom.c
@@ -20,8 +20,10 @@ static int maprom_write (struct mtd_info *, loff_t, size_t, size_t *, const u_ch
static void maprom_nop (struct mtd_info *);
static struct mtd_info *map_rom_probe(struct map_info *map);
static int maprom_erase (struct mtd_info *mtd, struct erase_info *info);
-static unsigned long maprom_unmapped_area(struct mtd_info *, unsigned long,
- unsigned long, unsigned long);
+static int maprom_point (struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, void **virt, resource_size_t *phys);
+static int maprom_unpoint(struct mtd_info *mtd, loff_t from, size_t len);
+
static struct mtd_chip_driver maprom_chipdrv = {
.probe = map_rom_probe,
@@ -51,7 +53,8 @@ static struct mtd_info *map_rom_probe(struct map_info *map)
mtd->name = map->name;
mtd->type = MTD_ROM;
mtd->size = map->size;
- mtd->_get_unmapped_area = maprom_unmapped_area;
+ mtd->_point = maprom_point;
+ mtd->_unpoint = maprom_unpoint;
mtd->_read = maprom_read;
mtd->_write = maprom_write;
mtd->_sync = maprom_nop;
@@ -66,18 +69,23 @@ static struct mtd_info *map_rom_probe(struct map_info *map)
}
-/*
- * Allow NOMMU mmap() to directly map the device (if not NULL)
- * - return the address to which the offset maps
- * - return -ENOSYS to indicate refusal to do the mapping
- */
-static unsigned long maprom_unmapped_area(struct mtd_info *mtd,
- unsigned long len,
- unsigned long offset,
- unsigned long flags)
+static int maprom_point(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, void **virt, resource_size_t *phys)
{
struct map_info *map = mtd->priv;
- return (unsigned long) map->virt + offset;
+
+ if (!map->virt)
+ return -EINVAL;
+ *virt = map->virt + from;
+ if (phys)
+ *phys = map->phys + from;
+ *retlen = len;
+ return 0;
+}
+
+static int maprom_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
+{
+ return 0;
}
static int maprom_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
diff --git a/drivers/mtd/devices/docg3.c b/drivers/mtd/devices/docg3.c
index 84b16133554b..0806f72102c0 100644
--- a/drivers/mtd/devices/docg3.c
+++ b/drivers/mtd/devices/docg3.c
@@ -1814,8 +1814,13 @@ static void __init doc_dbg_register(struct mtd_info *floor)
struct dentry *root = floor->dbg.dfs_dir;
struct docg3 *docg3 = floor->priv;
- if (IS_ERR_OR_NULL(root))
+ if (IS_ERR_OR_NULL(root)) {
+ if (IS_ENABLED(CONFIG_DEBUG_FS) &&
+ !IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER))
+ dev_warn(floor->dev.parent,
+ "CONFIG_MTD_PARTITIONED_MASTER must be enabled to expose debugfs stuff\n");
return;
+ }
debugfs_create_file("docg3_flashcontrol", S_IRUSR, root, docg3,
&flashcontrol_fops);
diff --git a/drivers/mtd/devices/lart.c b/drivers/mtd/devices/lart.c
index 268aae45b514..555b94406e0b 100644
--- a/drivers/mtd/devices/lart.c
+++ b/drivers/mtd/devices/lart.c
@@ -583,7 +583,7 @@ static struct mtd_erase_region_info erase_regions[] = {
}
};
-static struct mtd_partition lart_partitions[] = {
+static const struct mtd_partition lart_partitions[] = {
/* blob */
{
.name = "blob",
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index 00eea6fd379c..dbe6a1de2bb8 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -359,6 +359,7 @@ static const struct spi_device_id m25p_ids[] = {
{"m25p32-nonjedec"}, {"m25p64-nonjedec"}, {"m25p128-nonjedec"},
/* Everspin MRAMs (non-JEDEC) */
+ { "mr25h128" }, /* 128 Kib, 40 MHz */
{ "mr25h256" }, /* 256 Kib, 40 MHz */
{ "mr25h10" }, /* 1 Mib, 40 MHz */
{ "mr25h40" }, /* 4 Mib, 40 MHz */
diff --git a/drivers/mtd/devices/mtdram.c b/drivers/mtd/devices/mtdram.c
index cbd8547d7aad..0bf4aeaf0cb8 100644
--- a/drivers/mtd/devices/mtdram.c
+++ b/drivers/mtd/devices/mtdram.c
@@ -13,6 +13,7 @@
#include <linux/slab.h>
#include <linux/ioport.h>
#include <linux/vmalloc.h>
+#include <linux/mm.h>
#include <linux/init.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/mtdram.h>
@@ -69,6 +70,27 @@ static int ram_point(struct mtd_info *mtd, loff_t from, size_t len,
{
*virt = mtd->priv + from;
*retlen = len;
+
+ if (phys) {
+ /* limit retlen to the number of contiguous physical pages */
+ unsigned long page_ofs = offset_in_page(*virt);
+ void *addr = *virt - page_ofs;
+ unsigned long pfn1, pfn0 = vmalloc_to_pfn(addr);
+
+ *phys = __pfn_to_phys(pfn0) + page_ofs;
+ len += page_ofs;
+ while (len > PAGE_SIZE) {
+ len -= PAGE_SIZE;
+ addr += PAGE_SIZE;
+ pfn0++;
+ pfn1 = vmalloc_to_pfn(addr);
+ if (pfn1 != pfn0) {
+ *retlen = addr - *virt;
+ break;
+ }
+ }
+ }
+
return 0;
}
@@ -77,19 +99,6 @@ static int ram_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
return 0;
}
-/*
- * Allow NOMMU mmap() to directly map the device (if not NULL)
- * - return the address to which the offset maps
- * - return -ENOSYS to indicate refusal to do the mapping
- */
-static unsigned long ram_get_unmapped_area(struct mtd_info *mtd,
- unsigned long len,
- unsigned long offset,
- unsigned long flags)
-{
- return (unsigned long) mtd->priv + offset;
-}
-
static int ram_read(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, u_char *buf)
{
@@ -134,7 +143,6 @@ int mtdram_init_device(struct mtd_info *mtd, void *mapped_address,
mtd->_erase = ram_erase;
mtd->_point = ram_point;
mtd->_unpoint = ram_unpoint;
- mtd->_get_unmapped_area = ram_get_unmapped_area;
mtd->_read = ram_read;
mtd->_write = ram_write;
diff --git a/drivers/mtd/devices/slram.c b/drivers/mtd/devices/slram.c
index 8087c36dc693..0ec85f316d24 100644
--- a/drivers/mtd/devices/slram.c
+++ b/drivers/mtd/devices/slram.c
@@ -163,8 +163,9 @@ static int register_device(char *name, unsigned long start, unsigned long length
}
if (!(((slram_priv_t *)(*curmtd)->mtdinfo->priv)->start =
- ioremap(start, length))) {
- E("slram: ioremap failed\n");
+ memremap(start, length,
+ MEMREMAP_WB | MEMREMAP_WT | MEMREMAP_WC))) {
+ E("slram: memremap failed\n");
return -EIO;
}
((slram_priv_t *)(*curmtd)->mtdinfo->priv)->end =
@@ -186,7 +187,7 @@ static int register_device(char *name, unsigned long start, unsigned long length
if (mtd_device_register((*curmtd)->mtdinfo, NULL, 0)) {
E("slram: Failed to register new device\n");
- iounmap(((slram_priv_t *)(*curmtd)->mtdinfo->priv)->start);
+ memunmap(((slram_priv_t *)(*curmtd)->mtdinfo->priv)->start);
kfree((*curmtd)->mtdinfo->priv);
kfree((*curmtd)->mtdinfo);
return(-EAGAIN);
@@ -206,7 +207,7 @@ static void unregister_devices(void)
while (slram_mtdlist) {
nextitem = slram_mtdlist->next;
mtd_device_unregister(slram_mtdlist->mtdinfo);
- iounmap(((slram_priv_t *)slram_mtdlist->mtdinfo->priv)->start);
+ memunmap(((slram_priv_t *)slram_mtdlist->mtdinfo->priv)->start);
kfree(slram_mtdlist->mtdinfo->priv);
kfree(slram_mtdlist->mtdinfo);
kfree(slram_mtdlist);
diff --git a/drivers/mtd/maps/cfi_flagadm.c b/drivers/mtd/maps/cfi_flagadm.c
index d504b3d1791d..70f488628464 100644
--- a/drivers/mtd/maps/cfi_flagadm.c
+++ b/drivers/mtd/maps/cfi_flagadm.c
@@ -61,7 +61,7 @@ static struct map_info flagadm_map = {
.bankwidth = 2,
};
-static struct mtd_partition flagadm_parts[] = {
+static const struct mtd_partition flagadm_parts[] = {
{
.name = "Bootloader",
.offset = FLASH_PARTITION0_ADDR,
diff --git a/drivers/mtd/maps/impa7.c b/drivers/mtd/maps/impa7.c
index 15bbda03be65..a0b8fa7849a9 100644
--- a/drivers/mtd/maps/impa7.c
+++ b/drivers/mtd/maps/impa7.c
@@ -47,7 +47,7 @@ static struct map_info impa7_map[NUM_FLASHBANKS] = {
/*
* MTD partitioning stuff
*/
-static struct mtd_partition partitions[] =
+static const struct mtd_partition partitions[] =
{
{
.name = "FileSystem",
diff --git a/drivers/mtd/maps/netsc520.c b/drivers/mtd/maps/netsc520.c
index 81dc2598bc0a..3528497f96c7 100644
--- a/drivers/mtd/maps/netsc520.c
+++ b/drivers/mtd/maps/netsc520.c
@@ -52,7 +52,7 @@
/* partition_info gives details on the logical partitions that the split the
* single flash device into. If the size if zero we use up to the end of the
* device. */
-static struct mtd_partition partition_info[]={
+static const struct mtd_partition partition_info[] = {
{
.name = "NetSc520 boot kernel",
.offset = 0,
diff --git a/drivers/mtd/maps/nettel.c b/drivers/mtd/maps/nettel.c
index a577ef8553d0..729579fb654f 100644
--- a/drivers/mtd/maps/nettel.c
+++ b/drivers/mtd/maps/nettel.c
@@ -107,7 +107,7 @@ static struct map_info nettel_amd_map = {
.bankwidth = AMD_BUSWIDTH,
};
-static struct mtd_partition nettel_amd_partitions[] = {
+static const struct mtd_partition nettel_amd_partitions[] = {
{
.name = "SnapGear BIOS config",
.offset = 0x000e0000,
diff --git a/drivers/mtd/maps/plat-ram.c b/drivers/mtd/maps/plat-ram.c
index 51572895c02c..6d9a4d6f9839 100644
--- a/drivers/mtd/maps/plat-ram.c
+++ b/drivers/mtd/maps/plat-ram.c
@@ -43,7 +43,6 @@ struct platram_info {
struct device *dev;
struct mtd_info *mtd;
struct map_info map;
- struct resource *area;
struct platdata_mtd_ram *pdata;
};
@@ -97,16 +96,6 @@ static int platram_remove(struct platform_device *pdev)
platram_setrw(info, PLATRAM_RO);
- /* release resources */
-
- if (info->area) {
- release_resource(info->area);
- kfree(info->area);
- }
-
- if (info->map.virt != NULL)
- iounmap(info->map.virt);
-
kfree(info);
return 0;
@@ -147,12 +136,11 @@ static int platram_probe(struct platform_device *pdev)
info->pdata = pdata;
/* get the resource for the memory mapping */
-
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
- if (res == NULL) {
- dev_err(&pdev->dev, "no memory resource specified\n");
- err = -ENOENT;
+ info->map.virt = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(info->map.virt)) {
+ err = PTR_ERR(info->map.virt);
+ dev_err(&pdev->dev, "failed to ioremap() region\n");
goto exit_free;
}
@@ -167,26 +155,8 @@ static int platram_probe(struct platform_device *pdev)
(char *)pdata->mapname : (char *)pdev->name;
info->map.bankwidth = pdata->bankwidth;
- /* register our usage of the memory area */
-
- info->area = request_mem_region(res->start, info->map.size, pdev->name);
- if (info->area == NULL) {
- dev_err(&pdev->dev, "failed to request memory region\n");
- err = -EIO;
- goto exit_free;
- }
-
- /* remap the memory area */
-
- info->map.virt = ioremap(res->start, info->map.size);
dev_dbg(&pdev->dev, "virt %p, %lu bytes\n", info->map.virt, info->map.size);
- if (info->map.virt == NULL) {
- dev_err(&pdev->dev, "failed to ioremap() region\n");
- err = -EIO;
- goto exit_free;
- }
-
simple_map_init(&info->map);
dev_dbg(&pdev->dev, "initialised map, probing for mtd\n");
diff --git a/drivers/mtd/maps/sbc_gxx.c b/drivers/mtd/maps/sbc_gxx.c
index 556a2dfe94c5..4337d279ad83 100644
--- a/drivers/mtd/maps/sbc_gxx.c
+++ b/drivers/mtd/maps/sbc_gxx.c
@@ -87,7 +87,7 @@ static DEFINE_SPINLOCK(sbc_gxx_spin);
/* partition_info gives details on the logical partitions that the split the
* single flash device into. If the size if zero we use up to the end of the
* device. */
-static struct mtd_partition partition_info[]={
+static const struct mtd_partition partition_info[] = {
{ .name = "SBC-GXx flash boot partition",
.offset = 0,
.size = BOOT_PARTITION_SIZE_KiB*1024 },
diff --git a/drivers/mtd/maps/ts5500_flash.c b/drivers/mtd/maps/ts5500_flash.c
index 9969fedb1f13..8f177e0acb8c 100644
--- a/drivers/mtd/maps/ts5500_flash.c
+++ b/drivers/mtd/maps/ts5500_flash.c
@@ -43,7 +43,7 @@ static struct map_info ts5500_map = {
.phys = WINDOW_ADDR
};
-static struct mtd_partition ts5500_partitions[] = {
+static const struct mtd_partition ts5500_partitions[] = {
{
.name = "Drive A",
.offset = 0,
diff --git a/drivers/mtd/maps/uclinux.c b/drivers/mtd/maps/uclinux.c
index 00a8190797ec..aef030ca8601 100644
--- a/drivers/mtd/maps/uclinux.c
+++ b/drivers/mtd/maps/uclinux.c
@@ -49,7 +49,7 @@ static struct mtd_info *uclinux_ram_mtdinfo;
/****************************************************************************/
-static struct mtd_partition uclinux_romfs[] = {
+static const struct mtd_partition uclinux_romfs[] = {
{ .name = "ROMfs" }
};
diff --git a/drivers/mtd/mtdconcat.c b/drivers/mtd/mtdconcat.c
index d573606b91c2..60bf53df5454 100644
--- a/drivers/mtd/mtdconcat.c
+++ b/drivers/mtd/mtdconcat.c
@@ -644,32 +644,6 @@ static int concat_block_markbad(struct mtd_info *mtd, loff_t ofs)
}
/*
- * try to support NOMMU mmaps on concatenated devices
- * - we don't support subdev spanning as we can't guarantee it'll work
- */
-static unsigned long concat_get_unmapped_area(struct mtd_info *mtd,
- unsigned long len,
- unsigned long offset,
- unsigned long flags)
-{
- struct mtd_concat *concat = CONCAT(mtd);
- int i;
-
- for (i = 0; i < concat->num_subdev; i++) {
- struct mtd_info *subdev = concat->subdev[i];
-
- if (offset >= subdev->size) {
- offset -= subdev->size;
- continue;
- }
-
- return mtd_get_unmapped_area(subdev, len, offset, flags);
- }
-
- return (unsigned long) -ENOSYS;
-}
-
-/*
* This function constructs a virtual MTD device by concatenating
* num_devs MTD devices. A pointer to the new device object is
* stored to *new_dev upon success. This function does _not_
@@ -790,7 +764,6 @@ struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to c
concat->mtd._unlock = concat_unlock;
concat->mtd._suspend = concat_suspend;
concat->mtd._resume = concat_resume;
- concat->mtd._get_unmapped_area = concat_get_unmapped_area;
/*
* Combine the erase block size info of the subdevices:
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index e7ea842ba3db..f80e911b8843 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -1022,11 +1022,18 @@ EXPORT_SYMBOL_GPL(mtd_unpoint);
unsigned long mtd_get_unmapped_area(struct mtd_info *mtd, unsigned long len,
unsigned long offset, unsigned long flags)
{
- if (!mtd->_get_unmapped_area)
- return -EOPNOTSUPP;
- if (offset >= mtd->size || len > mtd->size - offset)
- return -EINVAL;
- return mtd->_get_unmapped_area(mtd, len, offset, flags);
+ size_t retlen;
+ void *virt;
+ int ret;
+
+ ret = mtd_point(mtd, offset, len, &retlen, &virt, NULL);
+ if (ret)
+ return ret;
+ if (retlen != len) {
+ mtd_unpoint(mtd, offset, retlen);
+ return -ENOSYS;
+ }
+ return (unsigned long)virt;
}
EXPORT_SYMBOL_GPL(mtd_get_unmapped_area);
@@ -1093,6 +1100,39 @@ int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
}
EXPORT_SYMBOL_GPL(mtd_panic_write);
+static int mtd_check_oob_ops(struct mtd_info *mtd, loff_t offs,
+ struct mtd_oob_ops *ops)
+{
+ /*
+ * Some users are setting ->datbuf or ->oobbuf to NULL, but are leaving
+ * ->len or ->ooblen uninitialized. Force ->len and ->ooblen to 0 in
+ * this case.
+ */
+ if (!ops->datbuf)
+ ops->len = 0;
+
+ if (!ops->oobbuf)
+ ops->ooblen = 0;
+
+ if (offs < 0 || offs + ops->len >= mtd->size)
+ return -EINVAL;
+
+ if (ops->ooblen) {
+ u64 maxooblen;
+
+ if (ops->ooboffs >= mtd_oobavail(mtd, ops))
+ return -EINVAL;
+
+ maxooblen = ((mtd_div_by_ws(mtd->size, mtd) -
+ mtd_div_by_ws(offs, mtd)) *
+ mtd_oobavail(mtd, ops)) - ops->ooboffs;
+ if (ops->ooblen > maxooblen)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
{
int ret_code;
@@ -1100,6 +1140,10 @@ int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
if (!mtd->_read_oob)
return -EOPNOTSUPP;
+ ret_code = mtd_check_oob_ops(mtd, from, ops);
+ if (ret_code)
+ return ret_code;
+
ledtrig_mtd_activity();
/*
* In cases where ops->datbuf != NULL, mtd->_read_oob() has semantics
@@ -1119,11 +1163,18 @@ EXPORT_SYMBOL_GPL(mtd_read_oob);
int mtd_write_oob(struct mtd_info *mtd, loff_t to,
struct mtd_oob_ops *ops)
{
+ int ret;
+
ops->retlen = ops->oobretlen = 0;
if (!mtd->_write_oob)
return -EOPNOTSUPP;
if (!(mtd->flags & MTD_WRITEABLE))
return -EROFS;
+
+ ret = mtd_check_oob_ops(mtd, to, ops);
+ if (ret)
+ return ret;
+
ledtrig_mtd_activity();
return mtd->_write_oob(mtd, to, ops);
}
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index a308e707392d..be088bccd593 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -101,18 +101,6 @@ static int part_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
return part->parent->_unpoint(part->parent, from + part->offset, len);
}
-static unsigned long part_get_unmapped_area(struct mtd_info *mtd,
- unsigned long len,
- unsigned long offset,
- unsigned long flags)
-{
- struct mtd_part *part = mtd_to_part(mtd);
-
- offset += part->offset;
- return part->parent->_get_unmapped_area(part->parent, len, offset,
- flags);
-}
-
static int part_read_oob(struct mtd_info *mtd, loff_t from,
struct mtd_oob_ops *ops)
{
@@ -458,8 +446,6 @@ static struct mtd_part *allocate_partition(struct mtd_info *parent,
slave->mtd._unpoint = part_unpoint;
}
- if (parent->_get_unmapped_area)
- slave->mtd._get_unmapped_area = part_get_unmapped_area;
if (parent->_read_oob)
slave->mtd._read_oob = part_read_oob;
if (parent->_write_oob)
diff --git a/drivers/mtd/mtdsuper.c b/drivers/mtd/mtdsuper.c
index e43fea896d1e..d58a61c09304 100644
--- a/drivers/mtd/mtdsuper.c
+++ b/drivers/mtd/mtdsuper.c
@@ -79,14 +79,14 @@ static struct dentry *mount_mtd_aux(struct file_system_type *fs_type, int flags,
pr_debug("MTDSB: New superblock for device %d (\"%s\")\n",
mtd->index, mtd->name);
- ret = fill_super(sb, data, flags & MS_SILENT ? 1 : 0);
+ ret = fill_super(sb, data, flags & SB_SILENT ? 1 : 0);
if (ret < 0) {
deactivate_locked_super(sb);
return ERR_PTR(ret);
}
/* go */
- sb->s_flags |= MS_ACTIVE;
+ sb->s_flags |= SB_ACTIVE;
return dget(sb->s_root);
/* new mountpoint for an already mounted superblock */
@@ -202,7 +202,7 @@ struct dentry *mount_mtd(struct file_system_type *fs_type, int flags,
not_an_MTD_device:
#endif /* CONFIG_BLOCK */
- if (!(flags & MS_SILENT))
+ if (!(flags & SB_SILENT))
printk(KERN_NOTICE
"MTD: Attempt to mount non-MTD device \"%s\"\n",
dev_name);
diff --git a/drivers/mtd/mtdswap.c b/drivers/mtd/mtdswap.c
index 7d9080e33865..f07492c6f4b2 100644
--- a/drivers/mtd/mtdswap.c
+++ b/drivers/mtd/mtdswap.c
@@ -50,7 +50,7 @@
* Number of free eraseblocks below which GC can also collect low frag
* blocks.
*/
-#define LOW_FRAG_GC_TRESHOLD 5
+#define LOW_FRAG_GC_THRESHOLD 5
/*
* Wear level cost amortization. We want to do wear leveling on the background
@@ -805,7 +805,7 @@ static int __mtdswap_choose_gc_tree(struct mtdswap_dev *d)
{
int idx, stopat;
- if (TREE_COUNT(d, CLEAN) < LOW_FRAG_GC_TRESHOLD)
+ if (TREE_COUNT(d, CLEAN) < LOW_FRAG_GC_THRESHOLD)
stopat = MTDSWAP_LOWFRAG;
else
stopat = MTDSWAP_HIFRAG;
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 3f2036f31da4..bb48aafed9a2 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -317,8 +317,11 @@ config MTD_NAND_PXA3xx
tristate "NAND support on PXA3xx and Armada 370/XP"
depends on PXA3xx || ARCH_MMP || PLAT_ORION || ARCH_MVEBU
help
+
This enables the driver for the NAND flash device found on
- PXA3xx processors (NFCv1) and also on Armada 370/XP (NFCv2).
+ PXA3xx processors (NFCv1) and also on 32-bit Armada
+ platforms (XP, 370, 375, 38x, 39x) and 64-bit Armada
+ platforms (7K, 8K) (NFCv2).
config MTD_NAND_SLC_LPC32XX
tristate "NXP LPC32xx SLC Controller"
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
index 6e2db700d923..118a1349aad3 100644
--- a/drivers/mtd/nand/Makefile
+++ b/drivers/mtd/nand/Makefile
@@ -59,7 +59,7 @@ obj-$(CONFIG_MTD_NAND_SUNXI) += sunxi_nand.o
obj-$(CONFIG_MTD_NAND_HISI504) += hisi504_nand.o
obj-$(CONFIG_MTD_NAND_BRCMNAND) += brcmnand/
obj-$(CONFIG_MTD_NAND_QCOM) += qcom_nandc.o
-obj-$(CONFIG_MTD_NAND_MTK) += mtk_nand.o mtk_ecc.o
+obj-$(CONFIG_MTD_NAND_MTK) += mtk_ecc.o mtk_nand.o
nand-objs := nand_base.o nand_bbt.o nand_timings.o nand_ids.o
nand-objs += nand_amd.o
diff --git a/drivers/mtd/nand/ams-delta.c b/drivers/mtd/nand/ams-delta.c
index dcec9cf4983f..d60ada45c549 100644
--- a/drivers/mtd/nand/ams-delta.c
+++ b/drivers/mtd/nand/ams-delta.c
@@ -41,7 +41,7 @@ static struct mtd_info *ams_delta_mtd = NULL;
* Define partitions for flash devices
*/
-static struct mtd_partition partition_info[] = {
+static const struct mtd_partition partition_info[] = {
{ .name = "Kernel",
.offset = 0,
.size = 3 * SZ_1M + SZ_512K },
diff --git a/drivers/mtd/nand/atmel/nand-controller.c b/drivers/mtd/nand/atmel/nand-controller.c
index f25eca79f4e5..90a71a56bc23 100644
--- a/drivers/mtd/nand/atmel/nand-controller.c
+++ b/drivers/mtd/nand/atmel/nand-controller.c
@@ -718,8 +718,7 @@ static void atmel_nfc_set_op_addr(struct nand_chip *chip, int page, int column)
nc->op.addrs[nc->op.naddrs++] = page;
nc->op.addrs[nc->op.naddrs++] = page >> 8;
- if ((mtd->writesize > 512 && chip->chipsize > SZ_128M) ||
- (mtd->writesize <= 512 && chip->chipsize > SZ_32M))
+ if (chip->options & NAND_ROW_ADDR_3)
nc->op.addrs[nc->op.naddrs++] = page >> 16;
}
}
@@ -2530,6 +2529,9 @@ static __maybe_unused int atmel_nand_controller_resume(struct device *dev)
struct atmel_nand_controller *nc = dev_get_drvdata(dev);
struct atmel_nand *nand;
+ if (nc->pmecc)
+ atmel_pmecc_reset(nc->pmecc);
+
list_for_each_entry(nand, &nc->chips, node) {
int i;
@@ -2547,6 +2549,7 @@ static struct platform_driver atmel_nand_controller_driver = {
.driver = {
.name = "atmel-nand-controller",
.of_match_table = of_match_ptr(atmel_nand_controller_of_ids),
+ .pm = &atmel_nand_controller_pm_ops,
},
.probe = atmel_nand_controller_probe,
.remove = atmel_nand_controller_remove,
diff --git a/drivers/mtd/nand/atmel/pmecc.c b/drivers/mtd/nand/atmel/pmecc.c
index 8268636675ef..fcbe4fd6e684 100644
--- a/drivers/mtd/nand/atmel/pmecc.c
+++ b/drivers/mtd/nand/atmel/pmecc.c
@@ -765,6 +765,13 @@ void atmel_pmecc_get_generated_eccbytes(struct atmel_pmecc_user *user,
}
EXPORT_SYMBOL_GPL(atmel_pmecc_get_generated_eccbytes);
+void atmel_pmecc_reset(struct atmel_pmecc *pmecc)
+{
+ writel(PMECC_CTRL_RST, pmecc->regs.base + ATMEL_PMECC_CTRL);
+ writel(PMECC_CTRL_DISABLE, pmecc->regs.base + ATMEL_PMECC_CTRL);
+}
+EXPORT_SYMBOL_GPL(atmel_pmecc_reset);
+
int atmel_pmecc_enable(struct atmel_pmecc_user *user, int op)
{
struct atmel_pmecc *pmecc = user->pmecc;
@@ -797,10 +804,7 @@ EXPORT_SYMBOL_GPL(atmel_pmecc_enable);
void atmel_pmecc_disable(struct atmel_pmecc_user *user)
{
- struct atmel_pmecc *pmecc = user->pmecc;
-
- writel(PMECC_CTRL_RST, pmecc->regs.base + ATMEL_PMECC_CTRL);
- writel(PMECC_CTRL_DISABLE, pmecc->regs.base + ATMEL_PMECC_CTRL);
+ atmel_pmecc_reset(user->pmecc);
mutex_unlock(&user->pmecc->lock);
}
EXPORT_SYMBOL_GPL(atmel_pmecc_disable);
@@ -855,10 +859,7 @@ static struct atmel_pmecc *atmel_pmecc_create(struct platform_device *pdev,
/* Disable all interrupts before registering the PMECC handler. */
writel(0xffffffff, pmecc->regs.base + ATMEL_PMECC_IDR);
-
- /* Reset the ECC engine */
- writel(PMECC_CTRL_RST, pmecc->regs.base + ATMEL_PMECC_CTRL);
- writel(PMECC_CTRL_DISABLE, pmecc->regs.base + ATMEL_PMECC_CTRL);
+ atmel_pmecc_reset(pmecc);
return pmecc;
}
diff --git a/drivers/mtd/nand/atmel/pmecc.h b/drivers/mtd/nand/atmel/pmecc.h
index a8ddbfca2ea5..817e0dd9fd15 100644
--- a/drivers/mtd/nand/atmel/pmecc.h
+++ b/drivers/mtd/nand/atmel/pmecc.h
@@ -61,6 +61,7 @@ atmel_pmecc_create_user(struct atmel_pmecc *pmecc,
struct atmel_pmecc_user_req *req);
void atmel_pmecc_destroy_user(struct atmel_pmecc_user *user);
+void atmel_pmecc_reset(struct atmel_pmecc *pmecc);
int atmel_pmecc_enable(struct atmel_pmecc_user *user, int op);
void atmel_pmecc_disable(struct atmel_pmecc_user *user);
int atmel_pmecc_wait_rdy(struct atmel_pmecc_user *user);
diff --git a/drivers/mtd/nand/au1550nd.c b/drivers/mtd/nand/au1550nd.c
index 9d4a28fa6b73..8ab827edf94e 100644
--- a/drivers/mtd/nand/au1550nd.c
+++ b/drivers/mtd/nand/au1550nd.c
@@ -331,8 +331,7 @@ static void au1550_command(struct mtd_info *mtd, unsigned command, int column, i
ctx->write_byte(mtd, (u8)(page_addr >> 8));
- /* One more address cycle for devices > 32MiB */
- if (this->chipsize > (32 << 20))
+ if (this->options & NAND_ROW_ADDR_3)
ctx->write_byte(mtd,
((page_addr >> 16) & 0x0f));
}
diff --git a/drivers/mtd/nand/cmx270_nand.c b/drivers/mtd/nand/cmx270_nand.c
index 1fc435f994e1..b01c9804590e 100644
--- a/drivers/mtd/nand/cmx270_nand.c
+++ b/drivers/mtd/nand/cmx270_nand.c
@@ -42,7 +42,7 @@ static void __iomem *cmx270_nand_io;
/*
* Define static partitions for flash device
*/
-static struct mtd_partition partition_info[] = {
+static const struct mtd_partition partition_info[] = {
[0] = {
.name = "cmx270-0",
.offset = 0,
diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
index 3087b0ba7b7f..5124f8ae8c04 100644
--- a/drivers/mtd/nand/denali.c
+++ b/drivers/mtd/nand/denali.c
@@ -10,20 +10,18 @@
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
*/
-#include <linux/interrupt.h>
-#include <linux/delay.h>
+
+#include <linux/bitfield.h>
+#include <linux/completion.h>
#include <linux/dma-mapping.h>
-#include <linux/wait.h>
-#include <linux/mutex.h>
-#include <linux/mtd/mtd.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
#include <linux/slab.h>
+#include <linux/spinlock.h>
#include "denali.h"
@@ -31,9 +29,9 @@ MODULE_LICENSE("GPL");
#define DENALI_NAND_NAME "denali-nand"
-/* Host Data/Command Interface */
-#define DENALI_HOST_ADDR 0x00
-#define DENALI_HOST_DATA 0x10
+/* for Indexed Addressing */
+#define DENALI_INDEXED_CTRL 0x00
+#define DENALI_INDEXED_DATA 0x10
#define DENALI_MAP00 (0 << 26) /* direct access to buffer */
#define DENALI_MAP01 (1 << 26) /* read/write pages in PIO */
@@ -61,31 +59,55 @@ MODULE_LICENSE("GPL");
*/
#define DENALI_CLK_X_MULT 6
-/*
- * this macro allows us to convert from an MTD structure to our own
- * device context (denali) structure.
- */
static inline struct denali_nand_info *mtd_to_denali(struct mtd_info *mtd)
{
return container_of(mtd_to_nand(mtd), struct denali_nand_info, nand);
}
-static void denali_host_write(struct denali_nand_info *denali,
- uint32_t addr, uint32_t data)
+/*
+ * Direct Addressing - the slave address forms the control information (command
+ * type, bank, block, and page address). The slave data is the actual data to
+ * be transferred. This mode requires 28 bits of address region allocated.
+ */
+static u32 denali_direct_read(struct denali_nand_info *denali, u32 addr)
+{
+ return ioread32(denali->host + addr);
+}
+
+static void denali_direct_write(struct denali_nand_info *denali, u32 addr,
+ u32 data)
{
- iowrite32(addr, denali->host + DENALI_HOST_ADDR);
- iowrite32(data, denali->host + DENALI_HOST_DATA);
+ iowrite32(data, denali->host + addr);
+}
+
+/*
+ * Indexed Addressing - address translation module intervenes in passing the
+ * control information. This mode reduces the required address range. The
+ * control information and transferred data are latched by the registers in
+ * the translation module.
+ */
+static u32 denali_indexed_read(struct denali_nand_info *denali, u32 addr)
+{
+ iowrite32(addr, denali->host + DENALI_INDEXED_CTRL);
+ return ioread32(denali->host + DENALI_INDEXED_DATA);
+}
+
+static void denali_indexed_write(struct denali_nand_info *denali, u32 addr,
+ u32 data)
+{
+ iowrite32(addr, denali->host + DENALI_INDEXED_CTRL);
+ iowrite32(data, denali->host + DENALI_INDEXED_DATA);
}
/*
* Use the configuration feature register to determine the maximum number of
* banks that the hardware supports.
*/
-static void detect_max_banks(struct denali_nand_info *denali)
+static void denali_detect_max_banks(struct denali_nand_info *denali)
{
uint32_t features = ioread32(denali->reg + FEATURES);
- denali->max_banks = 1 << (features & FEATURES__N_BANKS);
+ denali->max_banks = 1 << FIELD_GET(FEATURES__N_BANKS, features);
/* the encoding changed from rev 5.0 to 5.1 */
if (denali->revision < 0x0501)
@@ -189,7 +211,7 @@ static uint32_t denali_wait_for_irq(struct denali_nand_info *denali,
msecs_to_jiffies(1000));
if (!time_left) {
dev_err(denali->dev, "timeout while waiting for irq 0x%x\n",
- denali->irq_mask);
+ irq_mask);
return 0;
}
@@ -208,73 +230,47 @@ static uint32_t denali_check_irq(struct denali_nand_info *denali)
return irq_status;
}
-/*
- * This helper function setups the registers for ECC and whether or not
- * the spare area will be transferred.
- */
-static void setup_ecc_for_xfer(struct denali_nand_info *denali, bool ecc_en,
- bool transfer_spare)
-{
- int ecc_en_flag, transfer_spare_flag;
-
- /* set ECC, transfer spare bits if needed */
- ecc_en_flag = ecc_en ? ECC_ENABLE__FLAG : 0;
- transfer_spare_flag = transfer_spare ? TRANSFER_SPARE_REG__FLAG : 0;
-
- /* Enable spare area/ECC per user's request. */
- iowrite32(ecc_en_flag, denali->reg + ECC_ENABLE);
- iowrite32(transfer_spare_flag, denali->reg + TRANSFER_SPARE_REG);
-}
-
static void denali_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
{
struct denali_nand_info *denali = mtd_to_denali(mtd);
+ u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali);
int i;
- iowrite32(DENALI_MAP11_DATA | DENALI_BANK(denali),
- denali->host + DENALI_HOST_ADDR);
-
for (i = 0; i < len; i++)
- buf[i] = ioread32(denali->host + DENALI_HOST_DATA);
+ buf[i] = denali->host_read(denali, addr);
}
static void denali_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
{
struct denali_nand_info *denali = mtd_to_denali(mtd);
+ u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali);
int i;
- iowrite32(DENALI_MAP11_DATA | DENALI_BANK(denali),
- denali->host + DENALI_HOST_ADDR);
-
for (i = 0; i < len; i++)
- iowrite32(buf[i], denali->host + DENALI_HOST_DATA);
+ denali->host_write(denali, addr, buf[i]);
}
static void denali_read_buf16(struct mtd_info *mtd, uint8_t *buf, int len)
{
struct denali_nand_info *denali = mtd_to_denali(mtd);
+ u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali);
uint16_t *buf16 = (uint16_t *)buf;
int i;
- iowrite32(DENALI_MAP11_DATA | DENALI_BANK(denali),
- denali->host + DENALI_HOST_ADDR);
-
for (i = 0; i < len / 2; i++)
- buf16[i] = ioread32(denali->host + DENALI_HOST_DATA);
+ buf16[i] = denali->host_read(denali, addr);
}
static void denali_write_buf16(struct mtd_info *mtd, const uint8_t *buf,
int len)
{
struct denali_nand_info *denali = mtd_to_denali(mtd);
+ u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali);
const uint16_t *buf16 = (const uint16_t *)buf;
int i;
- iowrite32(DENALI_MAP11_DATA | DENALI_BANK(denali),
- denali->host + DENALI_HOST_ADDR);
-
for (i = 0; i < len / 2; i++)
- iowrite32(buf16[i], denali->host + DENALI_HOST_DATA);
+ denali->host_write(denali, addr, buf16[i]);
}
static uint8_t denali_read_byte(struct mtd_info *mtd)
@@ -319,7 +315,7 @@ static void denali_cmd_ctrl(struct mtd_info *mtd, int dat, unsigned int ctrl)
if (ctrl & NAND_CTRL_CHANGE)
denali_reset_irq(denali);
- denali_host_write(denali, DENALI_BANK(denali) | type, dat);
+ denali->host_write(denali, DENALI_BANK(denali) | type, dat);
}
static int denali_dev_ready(struct mtd_info *mtd)
@@ -389,7 +385,7 @@ static int denali_hw_ecc_fixup(struct mtd_info *mtd,
return 0;
}
- max_bitflips = ecc_cor & ECC_COR_INFO__MAX_ERRORS;
+ max_bitflips = FIELD_GET(ECC_COR_INFO__MAX_ERRORS, ecc_cor);
/*
* The register holds the maximum of per-sector corrected bitflips.
@@ -402,13 +398,6 @@ static int denali_hw_ecc_fixup(struct mtd_info *mtd,
return max_bitflips;
}
-#define ECC_SECTOR(x) (((x) & ECC_ERROR_ADDRESS__SECTOR_NR) >> 12)
-#define ECC_BYTE(x) (((x) & ECC_ERROR_ADDRESS__OFFSET))
-#define ECC_CORRECTION_VALUE(x) ((x) & ERR_CORRECTION_INFO__BYTEMASK)
-#define ECC_ERROR_UNCORRECTABLE(x) ((x) & ERR_CORRECTION_INFO__ERROR_TYPE)
-#define ECC_ERR_DEVICE(x) (((x) & ERR_CORRECTION_INFO__DEVICE_NR) >> 8)
-#define ECC_LAST_ERR(x) ((x) & ERR_CORRECTION_INFO__LAST_ERR_INFO)
-
static int denali_sw_ecc_fixup(struct mtd_info *mtd,
struct denali_nand_info *denali,
unsigned long *uncor_ecc_flags, uint8_t *buf)
@@ -426,18 +415,20 @@ static int denali_sw_ecc_fixup(struct mtd_info *mtd,
do {
err_addr = ioread32(denali->reg + ECC_ERROR_ADDRESS);
- err_sector = ECC_SECTOR(err_addr);
- err_byte = ECC_BYTE(err_addr);
+ err_sector = FIELD_GET(ECC_ERROR_ADDRESS__SECTOR, err_addr);
+ err_byte = FIELD_GET(ECC_ERROR_ADDRESS__OFFSET, err_addr);
err_cor_info = ioread32(denali->reg + ERR_CORRECTION_INFO);
- err_cor_value = ECC_CORRECTION_VALUE(err_cor_info);
- err_device = ECC_ERR_DEVICE(err_cor_info);
+ err_cor_value = FIELD_GET(ERR_CORRECTION_INFO__BYTE,
+ err_cor_info);
+ err_device = FIELD_GET(ERR_CORRECTION_INFO__DEVICE,
+ err_cor_info);
/* reset the bitflip counter when crossing ECC sector */
if (err_sector != prev_sector)
bitflips = 0;
- if (ECC_ERROR_UNCORRECTABLE(err_cor_info)) {
+ if (err_cor_info & ERR_CORRECTION_INFO__UNCOR) {
/*
* Check later if this is a real ECC error, or
* an erased sector.
@@ -467,12 +458,11 @@ static int denali_sw_ecc_fixup(struct mtd_info *mtd,
}
prev_sector = err_sector;
- } while (!ECC_LAST_ERR(err_cor_info));
+ } while (!(err_cor_info & ERR_CORRECTION_INFO__LAST_ERR));
/*
- * Once handle all ecc errors, controller will trigger a
- * ECC_TRANSACTION_DONE interrupt, so here just wait for
- * a while for this interrupt
+ * Once handle all ECC errors, controller will trigger an
+ * ECC_TRANSACTION_DONE interrupt.
*/
irq_status = denali_wait_for_irq(denali, INTR__ECC_TRANSACTION_DONE);
if (!(irq_status & INTR__ECC_TRANSACTION_DONE))
@@ -481,13 +471,6 @@ static int denali_sw_ecc_fixup(struct mtd_info *mtd,
return max_bitflips;
}
-/* programs the controller to either enable/disable DMA transfers */
-static void denali_enable_dma(struct denali_nand_info *denali, bool en)
-{
- iowrite32(en ? DMA_ENABLE__FLAG : 0, denali->reg + DMA_ENABLE);
- ioread32(denali->reg + DMA_ENABLE);
-}
-
static void denali_setup_dma64(struct denali_nand_info *denali,
dma_addr_t dma_addr, int page, int write)
{
@@ -502,14 +485,14 @@ static void denali_setup_dma64(struct denali_nand_info *denali,
* 1. setup transfer type, interrupt when complete,
* burst len = 64 bytes, the number of pages
*/
- denali_host_write(denali, mode,
- 0x01002000 | (64 << 16) | (write << 8) | page_count);
+ denali->host_write(denali, mode,
+ 0x01002000 | (64 << 16) | (write << 8) | page_count);
/* 2. set memory low address */
- denali_host_write(denali, mode, dma_addr);
+ denali->host_write(denali, mode, lower_32_bits(dma_addr));
/* 3. set memory high address */
- denali_host_write(denali, mode, (uint64_t)dma_addr >> 32);
+ denali->host_write(denali, mode, upper_32_bits(dma_addr));
}
static void denali_setup_dma32(struct denali_nand_info *denali,
@@ -523,32 +506,23 @@ static void denali_setup_dma32(struct denali_nand_info *denali,
/* DMA is a four step process */
/* 1. setup transfer type and # of pages */
- denali_host_write(denali, mode | page,
- 0x2000 | (write << 8) | page_count);
+ denali->host_write(denali, mode | page,
+ 0x2000 | (write << 8) | page_count);
/* 2. set memory high address bits 23:8 */
- denali_host_write(denali, mode | ((dma_addr >> 16) << 8), 0x2200);
+ denali->host_write(denali, mode | ((dma_addr >> 16) << 8), 0x2200);
/* 3. set memory low address bits 23:8 */
- denali_host_write(denali, mode | ((dma_addr & 0xffff) << 8), 0x2300);
+ denali->host_write(denali, mode | ((dma_addr & 0xffff) << 8), 0x2300);
/* 4. interrupt when complete, burst len = 64 bytes */
- denali_host_write(denali, mode | 0x14000, 0x2400);
-}
-
-static void denali_setup_dma(struct denali_nand_info *denali,
- dma_addr_t dma_addr, int page, int write)
-{
- if (denali->caps & DENALI_CAP_DMA_64BIT)
- denali_setup_dma64(denali, dma_addr, page, write);
- else
- denali_setup_dma32(denali, dma_addr, page, write);
+ denali->host_write(denali, mode | 0x14000, 0x2400);
}
static int denali_pio_read(struct denali_nand_info *denali, void *buf,
size_t size, int page, int raw)
{
- uint32_t addr = DENALI_BANK(denali) | page;
+ u32 addr = DENALI_MAP01 | DENALI_BANK(denali) | page;
uint32_t *buf32 = (uint32_t *)buf;
uint32_t irq_status, ecc_err_mask;
int i;
@@ -560,9 +534,8 @@ static int denali_pio_read(struct denali_nand_info *denali, void *buf,
denali_reset_irq(denali);
- iowrite32(DENALI_MAP01 | addr, denali->host + DENALI_HOST_ADDR);
for (i = 0; i < size / 4; i++)
- *buf32++ = ioread32(denali->host + DENALI_HOST_DATA);
+ *buf32++ = denali->host_read(denali, addr);
irq_status = denali_wait_for_irq(denali, INTR__PAGE_XFER_INC);
if (!(irq_status & INTR__PAGE_XFER_INC))
@@ -577,16 +550,15 @@ static int denali_pio_read(struct denali_nand_info *denali, void *buf,
static int denali_pio_write(struct denali_nand_info *denali,
const void *buf, size_t size, int page, int raw)
{
- uint32_t addr = DENALI_BANK(denali) | page;
+ u32 addr = DENALI_MAP01 | DENALI_BANK(denali) | page;
const uint32_t *buf32 = (uint32_t *)buf;
uint32_t irq_status;
int i;
denali_reset_irq(denali);
- iowrite32(DENALI_MAP01 | addr, denali->host + DENALI_HOST_ADDR);
for (i = 0; i < size / 4; i++)
- iowrite32(*buf32++, denali->host + DENALI_HOST_DATA);
+ denali->host_write(denali, addr, *buf32++);
irq_status = denali_wait_for_irq(denali,
INTR__PROGRAM_COMP | INTR__PROGRAM_FAIL);
@@ -635,19 +607,19 @@ static int denali_dma_xfer(struct denali_nand_info *denali, void *buf,
ecc_err_mask = INTR__ECC_ERR;
}
- denali_enable_dma(denali, true);
+ iowrite32(DMA_ENABLE__FLAG, denali->reg + DMA_ENABLE);
denali_reset_irq(denali);
- denali_setup_dma(denali, dma_addr, page, write);
+ denali->setup_dma(denali, dma_addr, page, write);
- /* wait for operation to complete */
irq_status = denali_wait_for_irq(denali, irq_mask);
if (!(irq_status & INTR__DMA_CMD_COMP))
ret = -EIO;
else if (irq_status & ecc_err_mask)
ret = -EBADMSG;
- denali_enable_dma(denali, false);
+ iowrite32(0, denali->reg + DMA_ENABLE);
+
dma_unmap_single(denali->dev, dma_addr, size, dir);
if (irq_status & INTR__ERASED_PAGE)
@@ -659,7 +631,9 @@ static int denali_dma_xfer(struct denali_nand_info *denali, void *buf,
static int denali_data_xfer(struct denali_nand_info *denali, void *buf,
size_t size, int page, int raw, int write)
{
- setup_ecc_for_xfer(denali, !raw, raw);
+ iowrite32(raw ? 0 : ECC_ENABLE__FLAG, denali->reg + ECC_ENABLE);
+ iowrite32(raw ? TRANSFER_SPARE_REG__FLAG : 0,
+ denali->reg + TRANSFER_SPARE_REG);
if (denali->dma_avail)
return denali_dma_xfer(denali, buf, size, page, raw, write);
@@ -970,8 +944,8 @@ static int denali_erase(struct mtd_info *mtd, int page)
denali_reset_irq(denali);
- denali_host_write(denali, DENALI_MAP10 | DENALI_BANK(denali) | page,
- DENALI_ERASE);
+ denali->host_write(denali, DENALI_MAP10 | DENALI_BANK(denali) | page,
+ DENALI_ERASE);
/* wait for erase to complete or failure to occur */
irq_status = denali_wait_for_irq(denali,
@@ -1009,7 +983,7 @@ static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr,
tmp = ioread32(denali->reg + ACC_CLKS);
tmp &= ~ACC_CLKS__VALUE;
- tmp |= acc_clks;
+ tmp |= FIELD_PREP(ACC_CLKS__VALUE, acc_clks);
iowrite32(tmp, denali->reg + ACC_CLKS);
/* tRWH -> RE_2_WE */
@@ -1018,7 +992,7 @@ static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr,
tmp = ioread32(denali->reg + RE_2_WE);
tmp &= ~RE_2_WE__VALUE;
- tmp |= re_2_we;
+ tmp |= FIELD_PREP(RE_2_WE__VALUE, re_2_we);
iowrite32(tmp, denali->reg + RE_2_WE);
/* tRHZ -> RE_2_RE */
@@ -1027,16 +1001,22 @@ static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr,
tmp = ioread32(denali->reg + RE_2_RE);
tmp &= ~RE_2_RE__VALUE;
- tmp |= re_2_re;
+ tmp |= FIELD_PREP(RE_2_RE__VALUE, re_2_re);
iowrite32(tmp, denali->reg + RE_2_RE);
- /* tWHR -> WE_2_RE */
- we_2_re = DIV_ROUND_UP(timings->tWHR_min, t_clk);
+ /*
+ * tCCS, tWHR -> WE_2_RE
+ *
+ * With WE_2_RE properly set, the Denali controller automatically takes
+ * care of the delay; the driver need not set NAND_WAIT_TCCS.
+ */
+ we_2_re = DIV_ROUND_UP(max(timings->tCCS_min, timings->tWHR_min),
+ t_clk);
we_2_re = min_t(int, we_2_re, TWHR2_AND_WE_2_RE__WE_2_RE);
tmp = ioread32(denali->reg + TWHR2_AND_WE_2_RE);
tmp &= ~TWHR2_AND_WE_2_RE__WE_2_RE;
- tmp |= we_2_re;
+ tmp |= FIELD_PREP(TWHR2_AND_WE_2_RE__WE_2_RE, we_2_re);
iowrite32(tmp, denali->reg + TWHR2_AND_WE_2_RE);
/* tADL -> ADDR_2_DATA */
@@ -1050,8 +1030,8 @@ static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr,
addr_2_data = min_t(int, addr_2_data, addr_2_data_mask);
tmp = ioread32(denali->reg + TCWAW_AND_ADDR_2_DATA);
- tmp &= ~addr_2_data_mask;
- tmp |= addr_2_data;
+ tmp &= ~TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA;
+ tmp |= FIELD_PREP(TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA, addr_2_data);
iowrite32(tmp, denali->reg + TCWAW_AND_ADDR_2_DATA);
/* tREH, tWH -> RDWR_EN_HI_CNT */
@@ -1061,7 +1041,7 @@ static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr,
tmp = ioread32(denali->reg + RDWR_EN_HI_CNT);
tmp &= ~RDWR_EN_HI_CNT__VALUE;
- tmp |= rdwr_en_hi;
+ tmp |= FIELD_PREP(RDWR_EN_HI_CNT__VALUE, rdwr_en_hi);
iowrite32(tmp, denali->reg + RDWR_EN_HI_CNT);
/* tRP, tWP -> RDWR_EN_LO_CNT */
@@ -1075,7 +1055,7 @@ static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr,
tmp = ioread32(denali->reg + RDWR_EN_LO_CNT);
tmp &= ~RDWR_EN_LO_CNT__VALUE;
- tmp |= rdwr_en_lo;
+ tmp |= FIELD_PREP(RDWR_EN_LO_CNT__VALUE, rdwr_en_lo);
iowrite32(tmp, denali->reg + RDWR_EN_LO_CNT);
/* tCS, tCEA -> CS_SETUP_CNT */
@@ -1086,7 +1066,7 @@ static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr,
tmp = ioread32(denali->reg + CS_SETUP_CNT);
tmp &= ~CS_SETUP_CNT__VALUE;
- tmp |= cs_setup;
+ tmp |= FIELD_PREP(CS_SETUP_CNT__VALUE, cs_setup);
iowrite32(tmp, denali->reg + CS_SETUP_CNT);
return 0;
@@ -1131,15 +1111,11 @@ static void denali_hw_init(struct denali_nand_info *denali)
* if this value is 0, just let it be.
*/
denali->oob_skip_bytes = ioread32(denali->reg + SPARE_AREA_SKIP_BYTES);
- detect_max_banks(denali);
+ denali_detect_max_banks(denali);
iowrite32(0x0F, denali->reg + RB_PIN_ENABLED);
iowrite32(CHIP_EN_DONT_CARE__FLAG, denali->reg + CHIP_ENABLE_DONT_CARE);
iowrite32(0xffff, denali->reg + SPARE_AREA_MARKER);
-
- /* Should set value for these registers when init */
- iowrite32(0, denali->reg + TWO_ROW_ADDR_CYCLES);
- iowrite32(1, denali->reg + ECC_ENABLE);
}
int denali_calc_ecc_bytes(int step_size, int strength)
@@ -1211,22 +1187,6 @@ static const struct mtd_ooblayout_ops denali_ooblayout_ops = {
.free = denali_ooblayout_free,
};
-/* initialize driver data structures */
-static void denali_drv_init(struct denali_nand_info *denali)
-{
- /*
- * the completion object will be used to notify
- * the callee that the interrupt is done
- */
- init_completion(&denali->complete);
-
- /*
- * the spinlock will be used to synchronize the ISR with any
- * element that might be access shared data (interrupt status)
- */
- spin_lock_init(&denali->irq_lock);
-}
-
static int denali_multidev_fixup(struct denali_nand_info *denali)
{
struct nand_chip *chip = &denali->nand;
@@ -1282,15 +1242,17 @@ int denali_init(struct denali_nand_info *denali)
{
struct nand_chip *chip = &denali->nand;
struct mtd_info *mtd = nand_to_mtd(chip);
+ u32 features = ioread32(denali->reg + FEATURES);
int ret;
mtd->dev.parent = denali->dev;
denali_hw_init(denali);
- denali_drv_init(denali);
+
+ init_completion(&denali->complete);
+ spin_lock_init(&denali->irq_lock);
denali_clear_irq_all(denali);
- /* Request IRQ after all the hardware initialization is finished */
ret = devm_request_irq(denali->dev, denali->irq, denali_isr,
IRQF_SHARED, DENALI_NAND_NAME, denali);
if (ret) {
@@ -1308,7 +1270,6 @@ int denali_init(struct denali_nand_info *denali)
if (!mtd->name)
mtd->name = "denali-nand";
- /* register the driver with the NAND core subsystem */
chip->select_chip = denali_select_chip;
chip->read_byte = denali_read_byte;
chip->write_byte = denali_write_byte;
@@ -1317,15 +1278,18 @@ int denali_init(struct denali_nand_info *denali)
chip->dev_ready = denali_dev_ready;
chip->waitfunc = denali_waitfunc;
+ if (features & FEATURES__INDEX_ADDR) {
+ denali->host_read = denali_indexed_read;
+ denali->host_write = denali_indexed_write;
+ } else {
+ denali->host_read = denali_direct_read;
+ denali->host_write = denali_direct_write;
+ }
+
/* clk rate info is needed for setup_data_interface */
if (denali->clk_x_rate)
chip->setup_data_interface = denali_setup_data_interface;
- /*
- * scan for NAND devices attached to the controller
- * this is the first stage in a two step process to register
- * with the nand subsystem
- */
ret = nand_scan_ident(mtd, denali->max_banks, NULL);
if (ret)
goto disable_irq;
@@ -1347,20 +1311,15 @@ int denali_init(struct denali_nand_info *denali)
if (denali->dma_avail) {
chip->options |= NAND_USE_BOUNCE_BUFFER;
chip->buf_align = 16;
+ if (denali->caps & DENALI_CAP_DMA_64BIT)
+ denali->setup_dma = denali_setup_dma64;
+ else
+ denali->setup_dma = denali_setup_dma32;
}
- /*
- * second stage of the NAND scan
- * this stage requires information regarding ECC and
- * bad block management.
- */
-
chip->bbt_options |= NAND_BBT_USE_FLASH;
chip->bbt_options |= NAND_BBT_NO_OOB;
-
chip->ecc.mode = NAND_ECC_HW_SYNDROME;
-
- /* no subpage writes on denali */
chip->options |= NAND_NO_SUBPAGE_WRITE;
ret = denali_ecc_setup(mtd, chip, denali);
@@ -1373,12 +1332,15 @@ int denali_init(struct denali_nand_info *denali)
"chosen ECC settings: step=%d, strength=%d, bytes=%d\n",
chip->ecc.size, chip->ecc.strength, chip->ecc.bytes);
- iowrite32(MAKE_ECC_CORRECTION(chip->ecc.strength, 1),
+ iowrite32(FIELD_PREP(ECC_CORRECTION__ERASE_THRESHOLD, 1) |
+ FIELD_PREP(ECC_CORRECTION__VALUE, chip->ecc.strength),
denali->reg + ECC_CORRECTION);
iowrite32(mtd->erasesize / mtd->writesize,
denali->reg + PAGES_PER_BLOCK);
iowrite32(chip->options & NAND_BUSWIDTH_16 ? 1 : 0,
denali->reg + DEVICE_WIDTH);
+ iowrite32(chip->options & NAND_ROW_ADDR_3 ? 0 : TWO_ROW_ADDR_CYCLES__FLAG,
+ denali->reg + TWO_ROW_ADDR_CYCLES);
iowrite32(mtd->writesize, denali->reg + DEVICE_MAIN_AREA_SIZE);
iowrite32(mtd->oobsize, denali->reg + DEVICE_SPARE_AREA_SIZE);
@@ -1441,7 +1403,6 @@ disable_irq:
}
EXPORT_SYMBOL(denali_init);
-/* driver exit point */
void denali_remove(struct denali_nand_info *denali)
{
struct mtd_info *mtd = nand_to_mtd(&denali->nand);
diff --git a/drivers/mtd/nand/denali.h b/drivers/mtd/nand/denali.h
index 9239e6793e6e..2911066dacac 100644
--- a/drivers/mtd/nand/denali.h
+++ b/drivers/mtd/nand/denali.h
@@ -10,18 +10,16 @@
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
*/
#ifndef __DENALI_H__
#define __DENALI_H__
#include <linux/bitops.h>
+#include <linux/completion.h>
#include <linux/mtd/rawnand.h>
+#include <linux/spinlock_types.h>
+#include <linux/types.h>
#define DEVICE_RESET 0x0
#define DEVICE_RESET__BANK(bank) BIT(bank)
@@ -111,9 +109,6 @@
#define ECC_CORRECTION 0x1b0
#define ECC_CORRECTION__VALUE GENMASK(4, 0)
#define ECC_CORRECTION__ERASE_THRESHOLD GENMASK(31, 16)
-#define MAKE_ECC_CORRECTION(val, thresh) \
- (((val) & (ECC_CORRECTION__VALUE)) | \
- (((thresh) << 16) & (ECC_CORRECTION__ERASE_THRESHOLD)))
#define READ_MODE 0x1c0
#define READ_MODE__VALUE GENMASK(3, 0)
@@ -255,13 +250,13 @@
#define ECC_ERROR_ADDRESS 0x630
#define ECC_ERROR_ADDRESS__OFFSET GENMASK(11, 0)
-#define ECC_ERROR_ADDRESS__SECTOR_NR GENMASK(15, 12)
+#define ECC_ERROR_ADDRESS__SECTOR GENMASK(15, 12)
#define ERR_CORRECTION_INFO 0x640
-#define ERR_CORRECTION_INFO__BYTEMASK GENMASK(7, 0)
-#define ERR_CORRECTION_INFO__DEVICE_NR GENMASK(11, 8)
-#define ERR_CORRECTION_INFO__ERROR_TYPE BIT(14)
-#define ERR_CORRECTION_INFO__LAST_ERR_INFO BIT(15)
+#define ERR_CORRECTION_INFO__BYTE GENMASK(7, 0)
+#define ERR_CORRECTION_INFO__DEVICE GENMASK(11, 8)
+#define ERR_CORRECTION_INFO__UNCOR BIT(14)
+#define ERR_CORRECTION_INFO__LAST_ERR BIT(15)
#define ECC_COR_INFO(bank) (0x650 + (bank) / 2 * 0x10)
#define ECC_COR_INFO__SHIFT(bank) ((bank) % 2 * 8)
@@ -310,23 +305,24 @@ struct denali_nand_info {
struct device *dev;
void __iomem *reg; /* Register Interface */
void __iomem *host; /* Host Data/Command Interface */
-
- /* elements used by ISR */
struct completion complete;
- spinlock_t irq_lock;
- uint32_t irq_mask;
- uint32_t irq_status;
+ spinlock_t irq_lock; /* protect irq_mask and irq_status */
+ u32 irq_mask; /* interrupts we are waiting for */
+ u32 irq_status; /* interrupts that have happened */
int irq;
-
- void *buf;
+ void *buf; /* for syndrome layout conversion */
dma_addr_t dma_addr;
- int dma_avail;
+ int dma_avail; /* can support DMA? */
int devs_per_cs; /* devices connected in parallel */
- int oob_skip_bytes;
+ int oob_skip_bytes; /* number of bytes reserved for BBM */
int max_banks;
- unsigned int revision;
- unsigned int caps;
+ unsigned int revision; /* IP revision */
+ unsigned int caps; /* IP capability (or quirk) */
const struct nand_ecc_caps *ecc_caps;
+ u32 (*host_read)(struct denali_nand_info *denali, u32 addr);
+ void (*host_write)(struct denali_nand_info *denali, u32 addr, u32 data);
+ void (*setup_dma)(struct denali_nand_info *denali, dma_addr_t dma_addr,
+ int page, int write);
};
#define DENALI_CAP_HW_ECC_FIXUP BIT(0)
diff --git a/drivers/mtd/nand/denali_dt.c b/drivers/mtd/nand/denali_dt.c
index 56e2e177644d..cfd33e6ca77f 100644
--- a/drivers/mtd/nand/denali_dt.c
+++ b/drivers/mtd/nand/denali_dt.c
@@ -12,15 +12,16 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
+
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/platform_device.h>
#include "denali.h"
@@ -155,7 +156,6 @@ static struct platform_driver denali_dt_driver = {
.of_match_table = denali_nand_dt_ids,
},
};
-
module_platform_driver(denali_dt_driver);
MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/nand/denali_pci.c b/drivers/mtd/nand/denali_pci.c
index 81370c79aa48..57fb7ae31412 100644
--- a/drivers/mtd/nand/denali_pci.c
+++ b/drivers/mtd/nand/denali_pci.c
@@ -11,6 +11,9 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
+
+#include <linux/errno.h>
+#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
@@ -106,7 +109,6 @@ failed_remap_reg:
return ret;
}
-/* driver exit point */
static void denali_pci_remove(struct pci_dev *dev)
{
struct denali_nand_info *denali = pci_get_drvdata(dev);
@@ -122,5 +124,4 @@ static struct pci_driver denali_pci_driver = {
.probe = denali_pci_probe,
.remove = denali_pci_remove,
};
-
module_pci_driver(denali_pci_driver);
diff --git a/drivers/mtd/nand/diskonchip.c b/drivers/mtd/nand/diskonchip.c
index c3aa53caab5c..72671dc52e2e 100644
--- a/drivers/mtd/nand/diskonchip.c
+++ b/drivers/mtd/nand/diskonchip.c
@@ -705,8 +705,7 @@ static void doc2001plus_command(struct mtd_info *mtd, unsigned command, int colu
if (page_addr != -1) {
WriteDOC((unsigned char)(page_addr & 0xff), docptr, Mplus_FlashAddress);
WriteDOC((unsigned char)((page_addr >> 8) & 0xff), docptr, Mplus_FlashAddress);
- /* One more address cycle for higher density devices */
- if (this->chipsize & 0x0c000000) {
+ if (this->options & NAND_ROW_ADDR_3) {
WriteDOC((unsigned char)((page_addr >> 16) & 0x0f), docptr, Mplus_FlashAddress);
printk("high density\n");
}
diff --git a/drivers/mtd/nand/gpio.c b/drivers/mtd/nand/gpio.c
index fd3648952b5a..484f7fbc3f7d 100644
--- a/drivers/mtd/nand/gpio.c
+++ b/drivers/mtd/nand/gpio.c
@@ -23,7 +23,7 @@
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/io.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/rawnand.h>
@@ -31,12 +31,16 @@
#include <linux/mtd/nand-gpio.h>
#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/of_gpio.h>
struct gpiomtd {
void __iomem *io_sync;
struct nand_chip nand_chip;
struct gpio_nand_platdata plat;
+ struct gpio_desc *nce; /* Optional chip enable */
+ struct gpio_desc *cle;
+ struct gpio_desc *ale;
+ struct gpio_desc *rdy;
+ struct gpio_desc *nwp; /* Optional write protection */
};
static inline struct gpiomtd *gpio_nand_getpriv(struct mtd_info *mtd)
@@ -78,11 +82,10 @@ static void gpio_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
gpio_nand_dosync(gpiomtd);
if (ctrl & NAND_CTRL_CHANGE) {
- if (gpio_is_valid(gpiomtd->plat.gpio_nce))
- gpio_set_value(gpiomtd->plat.gpio_nce,
- !(ctrl & NAND_NCE));
- gpio_set_value(gpiomtd->plat.gpio_cle, !!(ctrl & NAND_CLE));
- gpio_set_value(gpiomtd->plat.gpio_ale, !!(ctrl & NAND_ALE));
+ if (gpiomtd->nce)
+ gpiod_set_value(gpiomtd->nce, !(ctrl & NAND_NCE));
+ gpiod_set_value(gpiomtd->cle, !!(ctrl & NAND_CLE));
+ gpiod_set_value(gpiomtd->ale, !!(ctrl & NAND_ALE));
gpio_nand_dosync(gpiomtd);
}
if (cmd == NAND_CMD_NONE)
@@ -96,7 +99,7 @@ static int gpio_nand_devready(struct mtd_info *mtd)
{
struct gpiomtd *gpiomtd = gpio_nand_getpriv(mtd);
- return gpio_get_value(gpiomtd->plat.gpio_rdy);
+ return gpiod_get_value(gpiomtd->rdy);
}
#ifdef CONFIG_OF
@@ -123,12 +126,6 @@ static int gpio_nand_get_config_of(const struct device *dev,
}
}
- plat->gpio_rdy = of_get_gpio(dev->of_node, 0);
- plat->gpio_nce = of_get_gpio(dev->of_node, 1);
- plat->gpio_ale = of_get_gpio(dev->of_node, 2);
- plat->gpio_cle = of_get_gpio(dev->of_node, 3);
- plat->gpio_nwp = of_get_gpio(dev->of_node, 4);
-
if (!of_property_read_u32(dev->of_node, "chip-delay", &val))
plat->chip_delay = val;
@@ -201,10 +198,11 @@ static int gpio_nand_remove(struct platform_device *pdev)
nand_release(nand_to_mtd(&gpiomtd->nand_chip));
- if (gpio_is_valid(gpiomtd->plat.gpio_nwp))
- gpio_set_value(gpiomtd->plat.gpio_nwp, 0);
- if (gpio_is_valid(gpiomtd->plat.gpio_nce))
- gpio_set_value(gpiomtd->plat.gpio_nce, 1);
+ /* Enable write protection and disable the chip */
+ if (gpiomtd->nwp && !IS_ERR(gpiomtd->nwp))
+ gpiod_set_value(gpiomtd->nwp, 0);
+ if (gpiomtd->nce && !IS_ERR(gpiomtd->nce))
+ gpiod_set_value(gpiomtd->nce, 0);
return 0;
}
@@ -215,66 +213,66 @@ static int gpio_nand_probe(struct platform_device *pdev)
struct nand_chip *chip;
struct mtd_info *mtd;
struct resource *res;
+ struct device *dev = &pdev->dev;
int ret = 0;
- if (!pdev->dev.of_node && !dev_get_platdata(&pdev->dev))
+ if (!dev->of_node && !dev_get_platdata(dev))
return -EINVAL;
- gpiomtd = devm_kzalloc(&pdev->dev, sizeof(*gpiomtd), GFP_KERNEL);
+ gpiomtd = devm_kzalloc(dev, sizeof(*gpiomtd), GFP_KERNEL);
if (!gpiomtd)
return -ENOMEM;
chip = &gpiomtd->nand_chip;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- chip->IO_ADDR_R = devm_ioremap_resource(&pdev->dev, res);
+ chip->IO_ADDR_R = devm_ioremap_resource(dev, res);
if (IS_ERR(chip->IO_ADDR_R))
return PTR_ERR(chip->IO_ADDR_R);
res = gpio_nand_get_io_sync(pdev);
if (res) {
- gpiomtd->io_sync = devm_ioremap_resource(&pdev->dev, res);
+ gpiomtd->io_sync = devm_ioremap_resource(dev, res);
if (IS_ERR(gpiomtd->io_sync))
return PTR_ERR(gpiomtd->io_sync);
}
- ret = gpio_nand_get_config(&pdev->dev, &gpiomtd->plat);
+ ret = gpio_nand_get_config(dev, &gpiomtd->plat);
if (ret)
return ret;
- if (gpio_is_valid(gpiomtd->plat.gpio_nce)) {
- ret = devm_gpio_request(&pdev->dev, gpiomtd->plat.gpio_nce,
- "NAND NCE");
- if (ret)
- return ret;
- gpio_direction_output(gpiomtd->plat.gpio_nce, 1);
+ /* Just enable the chip */
+ gpiomtd->nce = devm_gpiod_get_optional(dev, "nce", GPIOD_OUT_HIGH);
+ if (IS_ERR(gpiomtd->nce))
+ return PTR_ERR(gpiomtd->nce);
+
+ /* We disable write protection once we know probe() will succeed */
+ gpiomtd->nwp = devm_gpiod_get_optional(dev, "nwp", GPIOD_OUT_LOW);
+ if (IS_ERR(gpiomtd->nwp)) {
+ ret = PTR_ERR(gpiomtd->nwp);
+ goto out_ce;
}
- if (gpio_is_valid(gpiomtd->plat.gpio_nwp)) {
- ret = devm_gpio_request(&pdev->dev, gpiomtd->plat.gpio_nwp,
- "NAND NWP");
- if (ret)
- return ret;
+ gpiomtd->nwp = devm_gpiod_get(dev, "ale", GPIOD_OUT_LOW);
+ if (IS_ERR(gpiomtd->nwp)) {
+ ret = PTR_ERR(gpiomtd->nwp);
+ goto out_ce;
}
- ret = devm_gpio_request(&pdev->dev, gpiomtd->plat.gpio_ale, "NAND ALE");
- if (ret)
- return ret;
- gpio_direction_output(gpiomtd->plat.gpio_ale, 0);
+ gpiomtd->cle = devm_gpiod_get(dev, "cle", GPIOD_OUT_LOW);
+ if (IS_ERR(gpiomtd->cle)) {
+ ret = PTR_ERR(gpiomtd->cle);
+ goto out_ce;
+ }
- ret = devm_gpio_request(&pdev->dev, gpiomtd->plat.gpio_cle, "NAND CLE");
- if (ret)
- return ret;
- gpio_direction_output(gpiomtd->plat.gpio_cle, 0);
-
- if (gpio_is_valid(gpiomtd->plat.gpio_rdy)) {
- ret = devm_gpio_request(&pdev->dev, gpiomtd->plat.gpio_rdy,
- "NAND RDY");
- if (ret)
- return ret;
- gpio_direction_input(gpiomtd->plat.gpio_rdy);
- chip->dev_ready = gpio_nand_devready;
+ gpiomtd->rdy = devm_gpiod_get_optional(dev, "rdy", GPIOD_IN);
+ if (IS_ERR(gpiomtd->rdy)) {
+ ret = PTR_ERR(gpiomtd->rdy);
+ goto out_ce;
}
+ /* Using RDY pin */
+ if (gpiomtd->rdy)
+ chip->dev_ready = gpio_nand_devready;
nand_set_flash_node(chip, pdev->dev.of_node);
chip->IO_ADDR_W = chip->IO_ADDR_R;
@@ -285,12 +283,13 @@ static int gpio_nand_probe(struct platform_device *pdev)
chip->cmd_ctrl = gpio_nand_cmd_ctrl;
mtd = nand_to_mtd(chip);
- mtd->dev.parent = &pdev->dev;
+ mtd->dev.parent = dev;
platform_set_drvdata(pdev, gpiomtd);
- if (gpio_is_valid(gpiomtd->plat.gpio_nwp))
- gpio_direction_output(gpiomtd->plat.gpio_nwp, 1);
+ /* Disable write protection, if wired up */
+ if (gpiomtd->nwp && !IS_ERR(gpiomtd->nwp))
+ gpiod_direction_output(gpiomtd->nwp, 1);
ret = nand_scan(mtd, 1);
if (ret)
@@ -305,8 +304,11 @@ static int gpio_nand_probe(struct platform_device *pdev)
return 0;
err_wp:
- if (gpio_is_valid(gpiomtd->plat.gpio_nwp))
- gpio_set_value(gpiomtd->plat.gpio_nwp, 0);
+ if (gpiomtd->nwp && !IS_ERR(gpiomtd->nwp))
+ gpiod_set_value(gpiomtd->nwp, 0);
+out_ce:
+ if (gpiomtd->nce && !IS_ERR(gpiomtd->nce))
+ gpiod_set_value(gpiomtd->nce, 0);
return ret;
}
diff --git a/drivers/mtd/nand/hisi504_nand.c b/drivers/mtd/nand/hisi504_nand.c
index d9ee1a7e6956..0897261c3e17 100644
--- a/drivers/mtd/nand/hisi504_nand.c
+++ b/drivers/mtd/nand/hisi504_nand.c
@@ -432,8 +432,7 @@ static void set_addr(struct mtd_info *mtd, int column, int page_addr)
host->addr_value[0] |= (page_addr & 0xffff)
<< (host->addr_cycle * 8);
host->addr_cycle += 2;
- /* One more address cycle for devices > 128MiB */
- if (chip->chipsize > (128 << 20)) {
+ if (chip->options & NAND_ROW_ADDR_3) {
host->addr_cycle += 1;
if (host->command == NAND_CMD_ERASE1)
host->addr_value[0] |= ((page_addr >> 16) & 0xff) << 16;
diff --git a/drivers/mtd/nand/mtk_ecc.c b/drivers/mtd/nand/mtk_ecc.c
index 7f3b065b6b8f..c51d214d169e 100644
--- a/drivers/mtd/nand/mtk_ecc.c
+++ b/drivers/mtd/nand/mtk_ecc.c
@@ -115,6 +115,11 @@ static irqreturn_t mtk_ecc_irq(int irq, void *id)
op = ECC_DECODE;
dec = readw(ecc->regs + ECC_DECDONE);
if (dec & ecc->sectors) {
+ /*
+ * Clear decode IRQ status once again to ensure that
+ * there will be no extra IRQ.
+ */
+ readw(ecc->regs + ECC_DECIRQ_STA);
ecc->sectors = 0;
complete(&ecc->done);
} else {
@@ -130,8 +135,6 @@ static irqreturn_t mtk_ecc_irq(int irq, void *id)
}
}
- writel(0, ecc->regs + ECC_IRQ_REG(op));
-
return IRQ_HANDLED;
}
@@ -307,6 +310,12 @@ void mtk_ecc_disable(struct mtk_ecc *ecc)
/* disable it */
mtk_ecc_wait_idle(ecc, op);
+ if (op == ECC_DECODE)
+ /*
+ * Clear decode IRQ status in case there is a timeout to wait
+ * decode IRQ.
+ */
+ readw(ecc->regs + ECC_DECIRQ_STA);
writew(0, ecc->regs + ECC_IRQ_REG(op));
writew(ECC_OP_DISABLE, ecc->regs + ECC_CTL_REG(op));
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
index 53e5e0337c3e..f3be0b2a8869 100644
--- a/drivers/mtd/nand/mxc_nand.c
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -415,7 +415,7 @@ static void send_cmd_v3(struct mxc_nand_host *host, uint16_t cmd, int useirq)
* waits for completion. */
static void send_cmd_v1_v2(struct mxc_nand_host *host, uint16_t cmd, int useirq)
{
- pr_debug("send_cmd(host, 0x%x, %d)\n", cmd, useirq);
+ dev_dbg(host->dev, "send_cmd(host, 0x%x, %d)\n", cmd, useirq);
writew(cmd, NFC_V1_V2_FLASH_CMD);
writew(NFC_CMD, NFC_V1_V2_CONFIG2);
@@ -431,7 +431,7 @@ static void send_cmd_v1_v2(struct mxc_nand_host *host, uint16_t cmd, int useirq)
udelay(1);
}
if (max_retries < 0)
- pr_debug("%s: RESET failed\n", __func__);
+ dev_dbg(host->dev, "%s: RESET failed\n", __func__);
} else {
/* Wait for operation to complete */
wait_op_done(host, useirq);
@@ -454,7 +454,7 @@ static void send_addr_v3(struct mxc_nand_host *host, uint16_t addr, int islast)
* a NAND command. */
static void send_addr_v1_v2(struct mxc_nand_host *host, uint16_t addr, int islast)
{
- pr_debug("send_addr(host, 0x%x %d)\n", addr, islast);
+ dev_dbg(host->dev, "send_addr(host, 0x%x %d)\n", addr, islast);
writew(addr, NFC_V1_V2_FLASH_ADDR);
writew(NFC_ADDR, NFC_V1_V2_CONFIG2);
@@ -607,7 +607,7 @@ static int mxc_nand_correct_data_v1(struct mtd_info *mtd, u_char *dat,
uint16_t ecc_status = get_ecc_status_v1(host);
if (((ecc_status & 0x3) == 2) || ((ecc_status >> 2) == 2)) {
- pr_debug("MXC_NAND: HWECC uncorrectable 2-bit ECC error\n");
+ dev_dbg(host->dev, "HWECC uncorrectable 2-bit ECC error\n");
return -EBADMSG;
}
@@ -634,7 +634,7 @@ static int mxc_nand_correct_data_v2_v3(struct mtd_info *mtd, u_char *dat,
do {
err = ecc_stat & ecc_bit_mask;
if (err > err_limit) {
- printk(KERN_WARNING "UnCorrectable RS-ECC Error\n");
+ dev_dbg(host->dev, "UnCorrectable RS-ECC Error\n");
return -EBADMSG;
} else {
ret += err;
@@ -642,7 +642,7 @@ static int mxc_nand_correct_data_v2_v3(struct mtd_info *mtd, u_char *dat,
ecc_stat >>= 4;
} while (--no_subpages);
- pr_debug("%d Symbol Correctable RS-ECC Error\n", ret);
+ dev_dbg(host->dev, "%d Symbol Correctable RS-ECC Error\n", ret);
return ret;
}
@@ -673,7 +673,7 @@ static u_char mxc_nand_read_byte(struct mtd_info *mtd)
host->buf_start++;
}
- pr_debug("%s: ret=0x%hhx (start=%u)\n", __func__, ret, host->buf_start);
+ dev_dbg(host->dev, "%s: ret=0x%hhx (start=%u)\n", __func__, ret, host->buf_start);
return ret;
}
@@ -859,8 +859,7 @@ static void mxc_do_addr_cycle(struct mtd_info *mtd, int column, int page_addr)
host->devtype_data->send_addr(host,
(page_addr >> 8) & 0xff, true);
} else {
- /* One more address cycle for higher density devices */
- if (mtd->size >= 0x4000000) {
+ if (nand_chip->options & NAND_ROW_ADDR_3) {
/* paddr_8 - paddr_15 */
host->devtype_data->send_addr(host,
(page_addr >> 8) & 0xff,
@@ -1212,7 +1211,7 @@ static void mxc_nand_command(struct mtd_info *mtd, unsigned command,
struct nand_chip *nand_chip = mtd_to_nand(mtd);
struct mxc_nand_host *host = nand_get_controller_data(nand_chip);
- pr_debug("mxc_nand_command (cmd = 0x%x, col = 0x%x, page = 0x%x)\n",
+ dev_dbg(host->dev, "mxc_nand_command (cmd = 0x%x, col = 0x%x, page = 0x%x)\n",
command, column, page_addr);
/* Reset command state information */
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 12edaae17d81..6135d007a068 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -115,7 +115,7 @@ static int nand_ooblayout_ecc_lp(struct mtd_info *mtd, int section,
struct nand_chip *chip = mtd_to_nand(mtd);
struct nand_ecc_ctrl *ecc = &chip->ecc;
- if (section)
+ if (section || !ecc->total)
return -ERANGE;
oobregion->length = ecc->total;
@@ -727,8 +727,7 @@ static void nand_command(struct mtd_info *mtd, unsigned int command,
chip->cmd_ctrl(mtd, page_addr, ctrl);
ctrl &= ~NAND_CTRL_CHANGE;
chip->cmd_ctrl(mtd, page_addr >> 8, ctrl);
- /* One more address cycle for devices > 32MiB */
- if (chip->chipsize > (32 << 20))
+ if (chip->options & NAND_ROW_ADDR_3)
chip->cmd_ctrl(mtd, page_addr >> 16, ctrl);
}
chip->cmd_ctrl(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
@@ -854,8 +853,7 @@ static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
chip->cmd_ctrl(mtd, page_addr, ctrl);
chip->cmd_ctrl(mtd, page_addr >> 8,
NAND_NCE | NAND_ALE);
- /* One more address cycle for devices > 128MiB */
- if (chip->chipsize > (128 << 20))
+ if (chip->options & NAND_ROW_ADDR_3)
chip->cmd_ctrl(mtd, page_addr >> 16,
NAND_NCE | NAND_ALE);
}
@@ -1246,6 +1244,7 @@ int nand_reset(struct nand_chip *chip, int chipnr)
return 0;
}
+EXPORT_SYMBOL_GPL(nand_reset);
/**
* nand_check_erased_buf - check if a buffer contains (almost) only 0xff data
@@ -2799,15 +2798,18 @@ static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
size_t *retlen, const uint8_t *buf)
{
struct nand_chip *chip = mtd_to_nand(mtd);
+ int chipnr = (int)(to >> chip->chip_shift);
struct mtd_oob_ops ops;
int ret;
- /* Wait for the device to get ready */
- panic_nand_wait(mtd, chip, 400);
-
/* Grab the device */
panic_nand_get_device(chip, mtd, FL_WRITING);
+ chip->select_chip(mtd, chipnr);
+
+ /* Wait for the device to get ready */
+ panic_nand_wait(mtd, chip, 400);
+
memset(&ops, 0, sizeof(ops));
ops.len = len;
ops.datbuf = (uint8_t *)buf;
@@ -3999,6 +4001,9 @@ ident_done:
chip->chip_shift += 32 - 1;
}
+ if (chip->chip_shift - chip->page_shift > 16)
+ chip->options |= NAND_ROW_ADDR_3;
+
chip->badblockbits = 8;
chip->erase = single_erase;
@@ -4700,6 +4705,19 @@ int nand_scan_tail(struct mtd_info *mtd)
mtd_set_ooblayout(mtd, &nand_ooblayout_lp_hamming_ops);
break;
default:
+ /*
+ * Expose the whole OOB area to users if ECC_NONE
+ * is passed. We could do that for all kind of
+ * ->oobsize, but we must keep the old large/small
+ * page with ECC layout when ->oobsize <= 128 for
+ * compatibility reasons.
+ */
+ if (ecc->mode == NAND_ECC_NONE) {
+ mtd_set_ooblayout(mtd,
+ &nand_ooblayout_lp_ops);
+ break;
+ }
+
WARN(1, "No oob scheme defined for oobsize %d\n",
mtd->oobsize);
ret = -EINVAL;
diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
index 246b4393118e..44322a363ba5 100644
--- a/drivers/mtd/nand/nandsim.c
+++ b/drivers/mtd/nand/nandsim.c
@@ -520,11 +520,16 @@ static int nandsim_debugfs_create(struct nandsim *dev)
struct dentry *root = nsmtd->dbg.dfs_dir;
struct dentry *dent;
- if (!IS_ENABLED(CONFIG_DEBUG_FS))
+ /*
+ * Just skip debugfs initialization when the debugfs directory is
+ * missing.
+ */
+ if (IS_ERR_OR_NULL(root)) {
+ if (IS_ENABLED(CONFIG_DEBUG_FS) &&
+ !IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER))
+ NS_WARN("CONFIG_MTD_PARTITIONED_MASTER must be enabled to expose debugfs stuff\n");
return 0;
-
- if (IS_ERR_OR_NULL(root))
- return -1;
+ }
dent = debugfs_create_file("nandsim_wear_report", S_IRUSR,
root, dev, &dfs_fops);
diff --git a/drivers/mtd/nand/nuc900_nand.c b/drivers/mtd/nand/nuc900_nand.c
index 7bb4d2ea9342..af5b32c9a791 100644
--- a/drivers/mtd/nand/nuc900_nand.c
+++ b/drivers/mtd/nand/nuc900_nand.c
@@ -154,7 +154,7 @@ static void nuc900_nand_command_lp(struct mtd_info *mtd, unsigned int command,
if (page_addr != -1) {
write_addr_reg(nand, page_addr);
- if (chip->chipsize > (128 << 20)) {
+ if (chip->options & NAND_ROW_ADDR_3) {
write_addr_reg(nand, page_addr >> 8);
write_addr_reg(nand, page_addr >> 16 | ENDADDR);
} else {
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index 54540c8fa1a2..dad438c4906a 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -1133,129 +1133,172 @@ static u8 bch8_polynomial[] = {0xef, 0x51, 0x2e, 0x09, 0xed, 0x93, 0x9a, 0xc2,
0x97, 0x79, 0xe5, 0x24, 0xb5};
/**
- * omap_calculate_ecc_bch - Generate bytes of ECC bytes
+ * _omap_calculate_ecc_bch - Generate ECC bytes for one sector
* @mtd: MTD device structure
* @dat: The pointer to data on which ecc is computed
* @ecc_code: The ecc_code buffer
+ * @i: The sector number (for a multi sector page)
*
- * Support calculating of BCH4/8 ecc vectors for the page
+ * Support calculating of BCH4/8/16 ECC vectors for one sector
+ * within a page. Sector number is in @i.
*/
-static int __maybe_unused omap_calculate_ecc_bch(struct mtd_info *mtd,
- const u_char *dat, u_char *ecc_calc)
+static int _omap_calculate_ecc_bch(struct mtd_info *mtd,
+ const u_char *dat, u_char *ecc_calc, int i)
{
struct omap_nand_info *info = mtd_to_omap(mtd);
int eccbytes = info->nand.ecc.bytes;
struct gpmc_nand_regs *gpmc_regs = &info->reg;
u8 *ecc_code;
- unsigned long nsectors, bch_val1, bch_val2, bch_val3, bch_val4;
+ unsigned long bch_val1, bch_val2, bch_val3, bch_val4;
u32 val;
- int i, j;
+ int j;
+
+ ecc_code = ecc_calc;
+ switch (info->ecc_opt) {
+ case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
+ case OMAP_ECC_BCH8_CODE_HW:
+ bch_val1 = readl(gpmc_regs->gpmc_bch_result0[i]);
+ bch_val2 = readl(gpmc_regs->gpmc_bch_result1[i]);
+ bch_val3 = readl(gpmc_regs->gpmc_bch_result2[i]);
+ bch_val4 = readl(gpmc_regs->gpmc_bch_result3[i]);
+ *ecc_code++ = (bch_val4 & 0xFF);
+ *ecc_code++ = ((bch_val3 >> 24) & 0xFF);
+ *ecc_code++ = ((bch_val3 >> 16) & 0xFF);
+ *ecc_code++ = ((bch_val3 >> 8) & 0xFF);
+ *ecc_code++ = (bch_val3 & 0xFF);
+ *ecc_code++ = ((bch_val2 >> 24) & 0xFF);
+ *ecc_code++ = ((bch_val2 >> 16) & 0xFF);
+ *ecc_code++ = ((bch_val2 >> 8) & 0xFF);
+ *ecc_code++ = (bch_val2 & 0xFF);
+ *ecc_code++ = ((bch_val1 >> 24) & 0xFF);
+ *ecc_code++ = ((bch_val1 >> 16) & 0xFF);
+ *ecc_code++ = ((bch_val1 >> 8) & 0xFF);
+ *ecc_code++ = (bch_val1 & 0xFF);
+ break;
+ case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
+ case OMAP_ECC_BCH4_CODE_HW:
+ bch_val1 = readl(gpmc_regs->gpmc_bch_result0[i]);
+ bch_val2 = readl(gpmc_regs->gpmc_bch_result1[i]);
+ *ecc_code++ = ((bch_val2 >> 12) & 0xFF);
+ *ecc_code++ = ((bch_val2 >> 4) & 0xFF);
+ *ecc_code++ = ((bch_val2 & 0xF) << 4) |
+ ((bch_val1 >> 28) & 0xF);
+ *ecc_code++ = ((bch_val1 >> 20) & 0xFF);
+ *ecc_code++ = ((bch_val1 >> 12) & 0xFF);
+ *ecc_code++ = ((bch_val1 >> 4) & 0xFF);
+ *ecc_code++ = ((bch_val1 & 0xF) << 4);
+ break;
+ case OMAP_ECC_BCH16_CODE_HW:
+ val = readl(gpmc_regs->gpmc_bch_result6[i]);
+ ecc_code[0] = ((val >> 8) & 0xFF);
+ ecc_code[1] = ((val >> 0) & 0xFF);
+ val = readl(gpmc_regs->gpmc_bch_result5[i]);
+ ecc_code[2] = ((val >> 24) & 0xFF);
+ ecc_code[3] = ((val >> 16) & 0xFF);
+ ecc_code[4] = ((val >> 8) & 0xFF);
+ ecc_code[5] = ((val >> 0) & 0xFF);
+ val = readl(gpmc_regs->gpmc_bch_result4[i]);
+ ecc_code[6] = ((val >> 24) & 0xFF);
+ ecc_code[7] = ((val >> 16) & 0xFF);
+ ecc_code[8] = ((val >> 8) & 0xFF);
+ ecc_code[9] = ((val >> 0) & 0xFF);
+ val = readl(gpmc_regs->gpmc_bch_result3[i]);
+ ecc_code[10] = ((val >> 24) & 0xFF);
+ ecc_code[11] = ((val >> 16) & 0xFF);
+ ecc_code[12] = ((val >> 8) & 0xFF);
+ ecc_code[13] = ((val >> 0) & 0xFF);
+ val = readl(gpmc_regs->gpmc_bch_result2[i]);
+ ecc_code[14] = ((val >> 24) & 0xFF);
+ ecc_code[15] = ((val >> 16) & 0xFF);
+ ecc_code[16] = ((val >> 8) & 0xFF);
+ ecc_code[17] = ((val >> 0) & 0xFF);
+ val = readl(gpmc_regs->gpmc_bch_result1[i]);
+ ecc_code[18] = ((val >> 24) & 0xFF);
+ ecc_code[19] = ((val >> 16) & 0xFF);
+ ecc_code[20] = ((val >> 8) & 0xFF);
+ ecc_code[21] = ((val >> 0) & 0xFF);
+ val = readl(gpmc_regs->gpmc_bch_result0[i]);
+ ecc_code[22] = ((val >> 24) & 0xFF);
+ ecc_code[23] = ((val >> 16) & 0xFF);
+ ecc_code[24] = ((val >> 8) & 0xFF);
+ ecc_code[25] = ((val >> 0) & 0xFF);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* ECC scheme specific syndrome customizations */
+ switch (info->ecc_opt) {
+ case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
+ /* Add constant polynomial to remainder, so that
+ * ECC of blank pages results in 0x0 on reading back
+ */
+ for (j = 0; j < eccbytes; j++)
+ ecc_calc[j] ^= bch4_polynomial[j];
+ break;
+ case OMAP_ECC_BCH4_CODE_HW:
+ /* Set 8th ECC byte as 0x0 for ROM compatibility */
+ ecc_calc[eccbytes - 1] = 0x0;
+ break;
+ case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
+ /* Add constant polynomial to remainder, so that
+ * ECC of blank pages results in 0x0 on reading back
+ */
+ for (j = 0; j < eccbytes; j++)
+ ecc_calc[j] ^= bch8_polynomial[j];
+ break;
+ case OMAP_ECC_BCH8_CODE_HW:
+ /* Set 14th ECC byte as 0x0 for ROM compatibility */
+ ecc_calc[eccbytes - 1] = 0x0;
+ break;
+ case OMAP_ECC_BCH16_CODE_HW:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * omap_calculate_ecc_bch_sw - ECC generator for sector for SW based correction
+ * @mtd: MTD device structure
+ * @dat: The pointer to data on which ecc is computed
+ * @ecc_code: The ecc_code buffer
+ *
+ * Support calculating of BCH4/8/16 ECC vectors for one sector. This is used
+ * when SW based correction is required as ECC is required for one sector
+ * at a time.
+ */
+static int omap_calculate_ecc_bch_sw(struct mtd_info *mtd,
+ const u_char *dat, u_char *ecc_calc)
+{
+ return _omap_calculate_ecc_bch(mtd, dat, ecc_calc, 0);
+}
+
+/**
+ * omap_calculate_ecc_bch_multi - Generate ECC for multiple sectors
+ * @mtd: MTD device structure
+ * @dat: The pointer to data on which ecc is computed
+ * @ecc_code: The ecc_code buffer
+ *
+ * Support calculating of BCH4/8/16 ecc vectors for the entire page in one go.
+ */
+static int omap_calculate_ecc_bch_multi(struct mtd_info *mtd,
+ const u_char *dat, u_char *ecc_calc)
+{
+ struct omap_nand_info *info = mtd_to_omap(mtd);
+ int eccbytes = info->nand.ecc.bytes;
+ unsigned long nsectors;
+ int i, ret;
nsectors = ((readl(info->reg.gpmc_ecc_config) >> 4) & 0x7) + 1;
for (i = 0; i < nsectors; i++) {
- ecc_code = ecc_calc;
- switch (info->ecc_opt) {
- case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
- case OMAP_ECC_BCH8_CODE_HW:
- bch_val1 = readl(gpmc_regs->gpmc_bch_result0[i]);
- bch_val2 = readl(gpmc_regs->gpmc_bch_result1[i]);
- bch_val3 = readl(gpmc_regs->gpmc_bch_result2[i]);
- bch_val4 = readl(gpmc_regs->gpmc_bch_result3[i]);
- *ecc_code++ = (bch_val4 & 0xFF);
- *ecc_code++ = ((bch_val3 >> 24) & 0xFF);
- *ecc_code++ = ((bch_val3 >> 16) & 0xFF);
- *ecc_code++ = ((bch_val3 >> 8) & 0xFF);
- *ecc_code++ = (bch_val3 & 0xFF);
- *ecc_code++ = ((bch_val2 >> 24) & 0xFF);
- *ecc_code++ = ((bch_val2 >> 16) & 0xFF);
- *ecc_code++ = ((bch_val2 >> 8) & 0xFF);
- *ecc_code++ = (bch_val2 & 0xFF);
- *ecc_code++ = ((bch_val1 >> 24) & 0xFF);
- *ecc_code++ = ((bch_val1 >> 16) & 0xFF);
- *ecc_code++ = ((bch_val1 >> 8) & 0xFF);
- *ecc_code++ = (bch_val1 & 0xFF);
- break;
- case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
- case OMAP_ECC_BCH4_CODE_HW:
- bch_val1 = readl(gpmc_regs->gpmc_bch_result0[i]);
- bch_val2 = readl(gpmc_regs->gpmc_bch_result1[i]);
- *ecc_code++ = ((bch_val2 >> 12) & 0xFF);
- *ecc_code++ = ((bch_val2 >> 4) & 0xFF);
- *ecc_code++ = ((bch_val2 & 0xF) << 4) |
- ((bch_val1 >> 28) & 0xF);
- *ecc_code++ = ((bch_val1 >> 20) & 0xFF);
- *ecc_code++ = ((bch_val1 >> 12) & 0xFF);
- *ecc_code++ = ((bch_val1 >> 4) & 0xFF);
- *ecc_code++ = ((bch_val1 & 0xF) << 4);
- break;
- case OMAP_ECC_BCH16_CODE_HW:
- val = readl(gpmc_regs->gpmc_bch_result6[i]);
- ecc_code[0] = ((val >> 8) & 0xFF);
- ecc_code[1] = ((val >> 0) & 0xFF);
- val = readl(gpmc_regs->gpmc_bch_result5[i]);
- ecc_code[2] = ((val >> 24) & 0xFF);
- ecc_code[3] = ((val >> 16) & 0xFF);
- ecc_code[4] = ((val >> 8) & 0xFF);
- ecc_code[5] = ((val >> 0) & 0xFF);
- val = readl(gpmc_regs->gpmc_bch_result4[i]);
- ecc_code[6] = ((val >> 24) & 0xFF);
- ecc_code[7] = ((val >> 16) & 0xFF);
- ecc_code[8] = ((val >> 8) & 0xFF);
- ecc_code[9] = ((val >> 0) & 0xFF);
- val = readl(gpmc_regs->gpmc_bch_result3[i]);
- ecc_code[10] = ((val >> 24) & 0xFF);
- ecc_code[11] = ((val >> 16) & 0xFF);
- ecc_code[12] = ((val >> 8) & 0xFF);
- ecc_code[13] = ((val >> 0) & 0xFF);
- val = readl(gpmc_regs->gpmc_bch_result2[i]);
- ecc_code[14] = ((val >> 24) & 0xFF);
- ecc_code[15] = ((val >> 16) & 0xFF);
- ecc_code[16] = ((val >> 8) & 0xFF);
- ecc_code[17] = ((val >> 0) & 0xFF);
- val = readl(gpmc_regs->gpmc_bch_result1[i]);
- ecc_code[18] = ((val >> 24) & 0xFF);
- ecc_code[19] = ((val >> 16) & 0xFF);
- ecc_code[20] = ((val >> 8) & 0xFF);
- ecc_code[21] = ((val >> 0) & 0xFF);
- val = readl(gpmc_regs->gpmc_bch_result0[i]);
- ecc_code[22] = ((val >> 24) & 0xFF);
- ecc_code[23] = ((val >> 16) & 0xFF);
- ecc_code[24] = ((val >> 8) & 0xFF);
- ecc_code[25] = ((val >> 0) & 0xFF);
- break;
- default:
- return -EINVAL;
- }
-
- /* ECC scheme specific syndrome customizations */
- switch (info->ecc_opt) {
- case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
- /* Add constant polynomial to remainder, so that
- * ECC of blank pages results in 0x0 on reading back */
- for (j = 0; j < eccbytes; j++)
- ecc_calc[j] ^= bch4_polynomial[j];
- break;
- case OMAP_ECC_BCH4_CODE_HW:
- /* Set 8th ECC byte as 0x0 for ROM compatibility */
- ecc_calc[eccbytes - 1] = 0x0;
- break;
- case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
- /* Add constant polynomial to remainder, so that
- * ECC of blank pages results in 0x0 on reading back */
- for (j = 0; j < eccbytes; j++)
- ecc_calc[j] ^= bch8_polynomial[j];
- break;
- case OMAP_ECC_BCH8_CODE_HW:
- /* Set 14th ECC byte as 0x0 for ROM compatibility */
- ecc_calc[eccbytes - 1] = 0x0;
- break;
- case OMAP_ECC_BCH16_CODE_HW:
- break;
- default:
- return -EINVAL;
- }
+ ret = _omap_calculate_ecc_bch(mtd, dat, ecc_calc, i);
+ if (ret)
+ return ret;
- ecc_calc += eccbytes;
+ ecc_calc += eccbytes;
}
return 0;
@@ -1496,7 +1539,7 @@ static int omap_write_page_bch(struct mtd_info *mtd, struct nand_chip *chip,
chip->write_buf(mtd, buf, mtd->writesize);
/* Update ecc vector from GPMC result registers */
- chip->ecc.calculate(mtd, buf, &ecc_calc[0]);
+ omap_calculate_ecc_bch_multi(mtd, buf, &ecc_calc[0]);
ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
chip->ecc.total);
@@ -1509,6 +1552,72 @@ static int omap_write_page_bch(struct mtd_info *mtd, struct nand_chip *chip,
}
/**
+ * omap_write_subpage_bch - BCH hardware ECC based subpage write
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @offset: column address of subpage within the page
+ * @data_len: data length
+ * @buf: data buffer
+ * @oob_required: must write chip->oob_poi to OOB
+ * @page: page number to write
+ *
+ * OMAP optimized subpage write method.
+ */
+static int omap_write_subpage_bch(struct mtd_info *mtd,
+ struct nand_chip *chip, u32 offset,
+ u32 data_len, const u8 *buf,
+ int oob_required, int page)
+{
+ u8 *ecc_calc = chip->buffers->ecccalc;
+ int ecc_size = chip->ecc.size;
+ int ecc_bytes = chip->ecc.bytes;
+ int ecc_steps = chip->ecc.steps;
+ u32 start_step = offset / ecc_size;
+ u32 end_step = (offset + data_len - 1) / ecc_size;
+ int step, ret = 0;
+
+ /*
+ * Write entire page at one go as it would be optimal
+ * as ECC is calculated by hardware.
+ * ECC is calculated for all subpages but we choose
+ * only what we want.
+ */
+
+ /* Enable GPMC ECC engine */
+ chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
+
+ /* Write data */
+ chip->write_buf(mtd, buf, mtd->writesize);
+
+ for (step = 0; step < ecc_steps; step++) {
+ /* mask ECC of un-touched subpages by padding 0xFF */
+ if (step < start_step || step > end_step)
+ memset(ecc_calc, 0xff, ecc_bytes);
+ else
+ ret = _omap_calculate_ecc_bch(mtd, buf, ecc_calc, step);
+
+ if (ret)
+ return ret;
+
+ buf += ecc_size;
+ ecc_calc += ecc_bytes;
+ }
+
+ /* copy calculated ECC for whole page to chip->buffer->oob */
+ /* this include masked-value(0xFF) for unwritten subpages */
+ ecc_calc = chip->buffers->ecccalc;
+ ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
+ chip->ecc.total);
+ if (ret)
+ return ret;
+
+ /* write OOB buffer to NAND device */
+ chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+ return 0;
+}
+
+/**
* omap_read_page_bch - BCH ecc based page read function for entire page
* @mtd: mtd info structure
* @chip: nand chip info structure
@@ -1544,7 +1653,7 @@ static int omap_read_page_bch(struct mtd_info *mtd, struct nand_chip *chip,
chip->ecc.total);
/* Calculate ecc bytes */
- chip->ecc.calculate(mtd, buf, ecc_calc);
+ omap_calculate_ecc_bch_multi(mtd, buf, ecc_calc);
ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
chip->ecc.total);
@@ -1588,8 +1697,7 @@ static bool is_elm_present(struct omap_nand_info *info,
return true;
}
-static bool omap2_nand_ecc_check(struct omap_nand_info *info,
- struct omap_nand_platform_data *pdata)
+static bool omap2_nand_ecc_check(struct omap_nand_info *info)
{
bool ecc_needs_bch, ecc_needs_omap_bch, ecc_needs_elm;
@@ -1804,7 +1912,6 @@ static const struct mtd_ooblayout_ops omap_sw_ooblayout_ops = {
static int omap_nand_probe(struct platform_device *pdev)
{
struct omap_nand_info *info;
- struct omap_nand_platform_data *pdata = NULL;
struct mtd_info *mtd;
struct nand_chip *nand_chip;
int err;
@@ -1821,29 +1928,10 @@ static int omap_nand_probe(struct platform_device *pdev)
info->pdev = pdev;
- if (dev->of_node) {
- if (omap_get_dt_info(dev, info))
- return -EINVAL;
- } else {
- pdata = dev_get_platdata(&pdev->dev);
- if (!pdata) {
- dev_err(&pdev->dev, "platform data missing\n");
- return -EINVAL;
- }
-
- info->gpmc_cs = pdata->cs;
- info->reg = pdata->reg;
- info->ecc_opt = pdata->ecc_opt;
- if (pdata->dev_ready)
- dev_info(&pdev->dev, "pdata->dev_ready is deprecated\n");
-
- info->xfer_type = pdata->xfer_type;
- info->devsize = pdata->devsize;
- info->elm_of_node = pdata->elm_of_node;
- info->flash_bbt = pdata->flash_bbt;
- }
+ err = omap_get_dt_info(dev, info);
+ if (err)
+ return err;
- platform_set_drvdata(pdev, info);
info->ops = gpmc_omap_get_nand_ops(&info->reg, info->gpmc_cs);
if (!info->ops) {
dev_err(&pdev->dev, "Failed to get GPMC->NAND interface\n");
@@ -2002,7 +2090,7 @@ static int omap_nand_probe(struct platform_device *pdev)
goto return_error;
}
- if (!omap2_nand_ecc_check(info, pdata)) {
+ if (!omap2_nand_ecc_check(info)) {
err = -EINVAL;
goto return_error;
}
@@ -2044,7 +2132,7 @@ static int omap_nand_probe(struct platform_device *pdev)
nand_chip->ecc.strength = 4;
nand_chip->ecc.hwctl = omap_enable_hwecc_bch;
nand_chip->ecc.correct = nand_bch_correct_data;
- nand_chip->ecc.calculate = omap_calculate_ecc_bch;
+ nand_chip->ecc.calculate = omap_calculate_ecc_bch_sw;
mtd_set_ooblayout(mtd, &omap_sw_ooblayout_ops);
/* Reserve one byte for the OMAP marker */
oobbytes_per_step = nand_chip->ecc.bytes + 1;
@@ -2066,9 +2154,9 @@ static int omap_nand_probe(struct platform_device *pdev)
nand_chip->ecc.strength = 4;
nand_chip->ecc.hwctl = omap_enable_hwecc_bch;
nand_chip->ecc.correct = omap_elm_correct_data;
- nand_chip->ecc.calculate = omap_calculate_ecc_bch;
nand_chip->ecc.read_page = omap_read_page_bch;
nand_chip->ecc.write_page = omap_write_page_bch;
+ nand_chip->ecc.write_subpage = omap_write_subpage_bch;
mtd_set_ooblayout(mtd, &omap_ooblayout_ops);
oobbytes_per_step = nand_chip->ecc.bytes;
@@ -2087,7 +2175,7 @@ static int omap_nand_probe(struct platform_device *pdev)
nand_chip->ecc.strength = 8;
nand_chip->ecc.hwctl = omap_enable_hwecc_bch;
nand_chip->ecc.correct = nand_bch_correct_data;
- nand_chip->ecc.calculate = omap_calculate_ecc_bch;
+ nand_chip->ecc.calculate = omap_calculate_ecc_bch_sw;
mtd_set_ooblayout(mtd, &omap_sw_ooblayout_ops);
/* Reserve one byte for the OMAP marker */
oobbytes_per_step = nand_chip->ecc.bytes + 1;
@@ -2109,9 +2197,9 @@ static int omap_nand_probe(struct platform_device *pdev)
nand_chip->ecc.strength = 8;
nand_chip->ecc.hwctl = omap_enable_hwecc_bch;
nand_chip->ecc.correct = omap_elm_correct_data;
- nand_chip->ecc.calculate = omap_calculate_ecc_bch;
nand_chip->ecc.read_page = omap_read_page_bch;
nand_chip->ecc.write_page = omap_write_page_bch;
+ nand_chip->ecc.write_subpage = omap_write_subpage_bch;
mtd_set_ooblayout(mtd, &omap_ooblayout_ops);
oobbytes_per_step = nand_chip->ecc.bytes;
@@ -2131,9 +2219,9 @@ static int omap_nand_probe(struct platform_device *pdev)
nand_chip->ecc.strength = 16;
nand_chip->ecc.hwctl = omap_enable_hwecc_bch;
nand_chip->ecc.correct = omap_elm_correct_data;
- nand_chip->ecc.calculate = omap_calculate_ecc_bch;
nand_chip->ecc.read_page = omap_read_page_bch;
nand_chip->ecc.write_page = omap_write_page_bch;
+ nand_chip->ecc.write_subpage = omap_write_subpage_bch;
mtd_set_ooblayout(mtd, &omap_ooblayout_ops);
oobbytes_per_step = nand_chip->ecc.bytes;
@@ -2167,10 +2255,9 @@ scan_tail:
if (err)
goto return_error;
- if (dev->of_node)
- mtd_device_register(mtd, NULL, 0);
- else
- mtd_device_register(mtd, pdata->parts, pdata->nr_parts);
+ err = mtd_device_register(mtd, NULL, 0);
+ if (err)
+ goto return_error;
platform_set_drvdata(pdev, mtd);
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index 85cff68643e0..90b9a9ccbe60 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -30,6 +30,8 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_data/mtd-nand-pxa3xx.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
#define CHIP_DELAY_TIMEOUT msecs_to_jiffies(200)
#define NAND_STOP_DELAY msecs_to_jiffies(40)
@@ -45,6 +47,10 @@
*/
#define INIT_BUFFER_SIZE 2048
+/* System control register and bit to enable NAND on some SoCs */
+#define GENCONF_SOC_DEVICE_MUX 0x208
+#define GENCONF_SOC_DEVICE_MUX_NFC_EN BIT(0)
+
/* registers and bit definitions */
#define NDCR (0x00) /* Control register */
#define NDTR0CS0 (0x04) /* Timing Parameter 0 for CS0 */
@@ -174,6 +180,7 @@ enum {
enum pxa3xx_nand_variant {
PXA3XX_NAND_VARIANT_PXA,
PXA3XX_NAND_VARIANT_ARMADA370,
+ PXA3XX_NAND_VARIANT_ARMADA_8K,
};
struct pxa3xx_nand_host {
@@ -425,6 +432,10 @@ static const struct of_device_id pxa3xx_nand_dt_ids[] = {
.compatible = "marvell,armada370-nand",
.data = (void *)PXA3XX_NAND_VARIANT_ARMADA370,
},
+ {
+ .compatible = "marvell,armada-8k-nand",
+ .data = (void *)PXA3XX_NAND_VARIANT_ARMADA_8K,
+ },
{}
};
MODULE_DEVICE_TABLE(of, pxa3xx_nand_dt_ids);
@@ -825,7 +836,8 @@ static irqreturn_t pxa3xx_nand_irq(int irq, void *devid)
info->retcode = ERR_UNCORERR;
if (status & NDSR_CORERR) {
info->retcode = ERR_CORERR;
- if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 &&
+ if ((info->variant == PXA3XX_NAND_VARIANT_ARMADA370 ||
+ info->variant == PXA3XX_NAND_VARIANT_ARMADA_8K) &&
info->ecc_bch)
info->ecc_err_cnt = NDSR_ERR_CNT(status);
else
@@ -888,7 +900,8 @@ static irqreturn_t pxa3xx_nand_irq(int irq, void *devid)
nand_writel(info, NDCB0, info->ndcb2);
/* NDCB3 register is available in NFCv2 (Armada 370/XP SoC) */
- if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
+ if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 ||
+ info->variant == PXA3XX_NAND_VARIANT_ARMADA_8K)
nand_writel(info, NDCB0, info->ndcb3);
}
@@ -1671,7 +1684,8 @@ static int pxa3xx_nand_scan(struct mtd_info *mtd)
chip->options |= NAND_BUSWIDTH_16;
/* Device detection must be done with ECC disabled */
- if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
+ if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 ||
+ info->variant == PXA3XX_NAND_VARIANT_ARMADA_8K)
nand_writel(info, NDECCCTRL, 0x0);
if (pdata->flash_bbt)
@@ -1709,7 +1723,8 @@ static int pxa3xx_nand_scan(struct mtd_info *mtd)
* (aka splitted) command handling,
*/
if (mtd->writesize > PAGE_CHUNK_SIZE) {
- if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370) {
+ if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 ||
+ info->variant == PXA3XX_NAND_VARIANT_ARMADA_8K) {
chip->cmdfunc = nand_cmdfunc_extended;
} else {
dev_err(&info->pdev->dev,
@@ -1928,6 +1943,24 @@ static int pxa3xx_nand_probe_dt(struct platform_device *pdev)
if (!of_id)
return 0;
+ /*
+ * Some SoCs like A7k/A8k need to enable manually the NAND
+ * controller to avoid being bootloader dependent. This is done
+ * through the use of a single bit in the System Functions registers.
+ */
+ if (pxa3xx_nand_get_variant(pdev) == PXA3XX_NAND_VARIANT_ARMADA_8K) {
+ struct regmap *sysctrl_base = syscon_regmap_lookup_by_phandle(
+ pdev->dev.of_node, "marvell,system-controller");
+ u32 reg;
+
+ if (IS_ERR(sysctrl_base))
+ return PTR_ERR(sysctrl_base);
+
+ regmap_read(sysctrl_base, GENCONF_SOC_DEVICE_MUX, &reg);
+ reg |= GENCONF_SOC_DEVICE_MUX_NFC_EN;
+ regmap_write(sysctrl_base, GENCONF_SOC_DEVICE_MUX, reg);
+ }
+
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return -ENOMEM;
diff --git a/drivers/mtd/nand/qcom_nandc.c b/drivers/mtd/nand/qcom_nandc.c
index 3baddfc997d1..2656c1ac5646 100644
--- a/drivers/mtd/nand/qcom_nandc.c
+++ b/drivers/mtd/nand/qcom_nandc.c
@@ -22,6 +22,7 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/delay.h>
+#include <linux/dma/qcom_bam_dma.h>
/* NANDc reg offsets */
#define NAND_FLASH_CMD 0x00
@@ -199,6 +200,15 @@ nandc_set_reg(nandc, NAND_READ_LOCATION_##reg, \
*/
#define dev_cmd_reg_addr(nandc, reg) ((nandc)->props->dev_cmd_reg_start + (reg))
+/* Returns the NAND register physical address */
+#define nandc_reg_phys(chip, offset) ((chip)->base_phys + (offset))
+
+/* Returns the dma address for reg read buffer */
+#define reg_buf_dma_addr(chip, vaddr) \
+ ((chip)->reg_read_dma + \
+ ((uint8_t *)(vaddr) - (uint8_t *)(chip)->reg_read_buf))
+
+#define QPIC_PER_CW_CMD_ELEMENTS 32
#define QPIC_PER_CW_CMD_SGL 32
#define QPIC_PER_CW_DATA_SGL 8
@@ -221,8 +231,13 @@ nandc_set_reg(nandc, NAND_READ_LOCATION_##reg, \
/*
* This data type corresponds to the BAM transaction which will be used for all
* NAND transfers.
+ * @bam_ce - the array of BAM command elements
* @cmd_sgl - sgl for NAND BAM command pipe
* @data_sgl - sgl for NAND BAM consumer/producer pipe
+ * @bam_ce_pos - the index in bam_ce which is available for next sgl
+ * @bam_ce_start - the index in bam_ce which marks the start position ce
+ * for current sgl. It will be used for size calculation
+ * for current sgl
* @cmd_sgl_pos - current index in command sgl.
* @cmd_sgl_start - start index in command sgl.
* @tx_sgl_pos - current index in data sgl for tx.
@@ -231,8 +246,11 @@ nandc_set_reg(nandc, NAND_READ_LOCATION_##reg, \
* @rx_sgl_start - start index in data sgl for rx.
*/
struct bam_transaction {
+ struct bam_cmd_element *bam_ce;
struct scatterlist *cmd_sgl;
struct scatterlist *data_sgl;
+ u32 bam_ce_pos;
+ u32 bam_ce_start;
u32 cmd_sgl_pos;
u32 cmd_sgl_start;
u32 tx_sgl_pos;
@@ -307,7 +325,8 @@ struct nandc_regs {
* controller
* @dev: parent device
* @base: MMIO base
- * @base_dma: physical base address of controller registers
+ * @base_phys: physical base address of controller registers
+ * @base_dma: dma base address of controller registers
* @core_clk: controller clock
* @aon_clk: another controller clock
*
@@ -340,6 +359,7 @@ struct qcom_nand_controller {
struct device *dev;
void __iomem *base;
+ phys_addr_t base_phys;
dma_addr_t base_dma;
struct clk *core_clk;
@@ -462,7 +482,8 @@ alloc_bam_transaction(struct qcom_nand_controller *nandc)
bam_txn_size =
sizeof(*bam_txn) + num_cw *
- ((sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL) +
+ ((sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS) +
+ (sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL) +
(sizeof(*bam_txn->data_sgl) * QPIC_PER_CW_DATA_SGL));
bam_txn_buf = devm_kzalloc(nandc->dev, bam_txn_size, GFP_KERNEL);
@@ -472,6 +493,10 @@ alloc_bam_transaction(struct qcom_nand_controller *nandc)
bam_txn = bam_txn_buf;
bam_txn_buf += sizeof(*bam_txn);
+ bam_txn->bam_ce = bam_txn_buf;
+ bam_txn_buf +=
+ sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS * num_cw;
+
bam_txn->cmd_sgl = bam_txn_buf;
bam_txn_buf +=
sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL * num_cw;
@@ -489,6 +514,8 @@ static void clear_bam_transaction(struct qcom_nand_controller *nandc)
if (!nandc->props->is_bam)
return;
+ bam_txn->bam_ce_pos = 0;
+ bam_txn->bam_ce_start = 0;
bam_txn->cmd_sgl_pos = 0;
bam_txn->cmd_sgl_start = 0;
bam_txn->tx_sgl_pos = 0;
@@ -734,6 +761,66 @@ static int prepare_bam_async_desc(struct qcom_nand_controller *nandc,
}
/*
+ * Prepares the command descriptor for BAM DMA which will be used for NAND
+ * register reads and writes. The command descriptor requires the command
+ * to be formed in command element type so this function uses the command
+ * element from bam transaction ce array and fills the same with required
+ * data. A single SGL can contain multiple command elements so
+ * NAND_BAM_NEXT_SGL will be used for starting the separate SGL
+ * after the current command element.
+ */
+static int prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read,
+ int reg_off, const void *vaddr,
+ int size, unsigned int flags)
+{
+ int bam_ce_size;
+ int i, ret;
+ struct bam_cmd_element *bam_ce_buffer;
+ struct bam_transaction *bam_txn = nandc->bam_txn;
+
+ bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_pos];
+
+ /* fill the command desc */
+ for (i = 0; i < size; i++) {
+ if (read)
+ bam_prep_ce(&bam_ce_buffer[i],
+ nandc_reg_phys(nandc, reg_off + 4 * i),
+ BAM_READ_COMMAND,
+ reg_buf_dma_addr(nandc,
+ (__le32 *)vaddr + i));
+ else
+ bam_prep_ce_le32(&bam_ce_buffer[i],
+ nandc_reg_phys(nandc, reg_off + 4 * i),
+ BAM_WRITE_COMMAND,
+ *((__le32 *)vaddr + i));
+ }
+
+ bam_txn->bam_ce_pos += size;
+
+ /* use the separate sgl after this command */
+ if (flags & NAND_BAM_NEXT_SGL) {
+ bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_start];
+ bam_ce_size = (bam_txn->bam_ce_pos -
+ bam_txn->bam_ce_start) *
+ sizeof(struct bam_cmd_element);
+ sg_set_buf(&bam_txn->cmd_sgl[bam_txn->cmd_sgl_pos],
+ bam_ce_buffer, bam_ce_size);
+ bam_txn->cmd_sgl_pos++;
+ bam_txn->bam_ce_start = bam_txn->bam_ce_pos;
+
+ if (flags & NAND_BAM_NWD) {
+ ret = prepare_bam_async_desc(nandc, nandc->cmd_chan,
+ DMA_PREP_FENCE |
+ DMA_PREP_CMD);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/*
* Prepares the data descriptor for BAM DMA which will be used for NAND
* data reads and writes.
*/
@@ -851,19 +938,22 @@ static int read_reg_dma(struct qcom_nand_controller *nandc, int first,
{
bool flow_control = false;
void *vaddr;
- int size;
- if (first == NAND_READ_ID || first == NAND_FLASH_STATUS)
- flow_control = true;
+ vaddr = nandc->reg_read_buf + nandc->reg_read_pos;
+ nandc->reg_read_pos += num_regs;
if (first == NAND_DEV_CMD_VLD || first == NAND_DEV_CMD1)
first = dev_cmd_reg_addr(nandc, first);
- size = num_regs * sizeof(u32);
- vaddr = nandc->reg_read_buf + nandc->reg_read_pos;
- nandc->reg_read_pos += num_regs;
+ if (nandc->props->is_bam)
+ return prep_bam_dma_desc_cmd(nandc, true, first, vaddr,
+ num_regs, flags);
+
+ if (first == NAND_READ_ID || first == NAND_FLASH_STATUS)
+ flow_control = true;
- return prep_adm_dma_desc(nandc, true, first, vaddr, size, flow_control);
+ return prep_adm_dma_desc(nandc, true, first, vaddr,
+ num_regs * sizeof(u32), flow_control);
}
/*
@@ -880,13 +970,9 @@ static int write_reg_dma(struct qcom_nand_controller *nandc, int first,
bool flow_control = false;
struct nandc_regs *regs = nandc->regs;
void *vaddr;
- int size;
vaddr = offset_to_nandc_reg(regs, first);
- if (first == NAND_FLASH_CMD)
- flow_control = true;
-
if (first == NAND_ERASED_CW_DETECT_CFG) {
if (flags & NAND_ERASED_CW_SET)
vaddr = &regs->erased_cw_detect_cfg_set;
@@ -903,10 +989,15 @@ static int write_reg_dma(struct qcom_nand_controller *nandc, int first,
if (first == NAND_DEV_CMD_VLD_RESTORE || first == NAND_DEV_CMD_VLD)
first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD);
- size = num_regs * sizeof(u32);
+ if (nandc->props->is_bam)
+ return prep_bam_dma_desc_cmd(nandc, false, first, vaddr,
+ num_regs, flags);
+
+ if (first == NAND_FLASH_CMD)
+ flow_control = true;
- return prep_adm_dma_desc(nandc, false, first, vaddr, size,
- flow_control);
+ return prep_adm_dma_desc(nandc, false, first, vaddr,
+ num_regs * sizeof(u32), flow_control);
}
/*
@@ -1170,7 +1261,8 @@ static int submit_descs(struct qcom_nand_controller *nandc)
}
if (bam_txn->cmd_sgl_pos > bam_txn->cmd_sgl_start) {
- r = prepare_bam_async_desc(nandc, nandc->cmd_chan, 0);
+ r = prepare_bam_async_desc(nandc, nandc->cmd_chan,
+ DMA_PREP_CMD);
if (r)
return r;
}
@@ -2705,6 +2797,7 @@ static int qcom_nandc_probe(struct platform_device *pdev)
if (IS_ERR(nandc->base))
return PTR_ERR(nandc->base);
+ nandc->base_phys = res->start;
nandc->base_dma = phys_to_dma(dev, (phys_addr_t)res->start);
nandc->core_clk = devm_clk_get(dev, "core");
diff --git a/drivers/mtd/nand/sh_flctl.c b/drivers/mtd/nand/sh_flctl.c
index e7f3c98487e6..3c5008a4f5f3 100644
--- a/drivers/mtd/nand/sh_flctl.c
+++ b/drivers/mtd/nand/sh_flctl.c
@@ -1094,14 +1094,11 @@ MODULE_DEVICE_TABLE(of, of_flctl_match);
static struct sh_flctl_platform_data *flctl_parse_dt(struct device *dev)
{
- const struct of_device_id *match;
- struct flctl_soc_config *config;
+ const struct flctl_soc_config *config;
struct sh_flctl_platform_data *pdata;
- match = of_match_device(of_flctl_match, dev);
- if (match)
- config = (struct flctl_soc_config *)match->data;
- else {
+ config = of_device_get_match_data(dev);
+ if (!config) {
dev_err(dev, "%s: no OF configuration attached\n", __func__);
return NULL;
}
diff --git a/drivers/mtd/parsers/Kconfig b/drivers/mtd/parsers/Kconfig
index d206b3c533bc..ee5ab994132f 100644
--- a/drivers/mtd/parsers/Kconfig
+++ b/drivers/mtd/parsers/Kconfig
@@ -6,3 +6,11 @@ config MTD_PARSER_TRX
may contain up to 3/4 partitions (depending on the version).
This driver will parse TRX header and report at least two partitions:
kernel and rootfs.
+
+config MTD_SHARPSL_PARTS
+ tristate "Sharp SL Series NAND flash partition parser"
+ depends on MTD_NAND_SHARPSL || MTD_NAND_TMIO || COMPILE_TEST
+ help
+ This provides the read-only FTL logic necessary to read the partition
+ table from the NAND flash of Sharp SL Series (Zaurus) and the MTD
+ partition parser using this code.
diff --git a/drivers/mtd/parsers/Makefile b/drivers/mtd/parsers/Makefile
index 4d9024e0be3b..5b1bcc3d90d9 100644
--- a/drivers/mtd/parsers/Makefile
+++ b/drivers/mtd/parsers/Makefile
@@ -1 +1,2 @@
obj-$(CONFIG_MTD_PARSER_TRX) += parser_trx.o
+obj-$(CONFIG_MTD_SHARPSL_PARTS) += sharpslpart.o
diff --git a/drivers/mtd/parsers/sharpslpart.c b/drivers/mtd/parsers/sharpslpart.c
new file mode 100644
index 000000000000..5fe0079ea5ed
--- /dev/null
+++ b/drivers/mtd/parsers/sharpslpart.c
@@ -0,0 +1,398 @@
+/*
+ * sharpslpart.c - MTD partition parser for NAND flash using the SHARP FTL
+ * for logical addressing, as used on the PXA models of the SHARP SL Series.
+ *
+ * Copyright (C) 2017 Andrea Adami <andrea.adami@gmail.com>
+ *
+ * Based on SHARP GPL 2.4 sources:
+ * http://support.ezaurus.com/developer/source/source_dl.asp
+ * drivers/mtd/nand/sharp_sl_logical.c
+ * linux/include/asm-arm/sharp_nand_logical.h
+ *
+ * Copyright (C) 2002 SHARP
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/sizes.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+
+/* oob structure */
+#define NAND_NOOB_LOGADDR_00 8
+#define NAND_NOOB_LOGADDR_01 9
+#define NAND_NOOB_LOGADDR_10 10
+#define NAND_NOOB_LOGADDR_11 11
+#define NAND_NOOB_LOGADDR_20 12
+#define NAND_NOOB_LOGADDR_21 13
+
+#define BLOCK_IS_RESERVED 0xffff
+#define BLOCK_UNMASK_COMPLEMENT 1
+
+/* factory defaults */
+#define SHARPSL_NAND_PARTS 3
+#define SHARPSL_FTL_PART_SIZE (7 * SZ_1M)
+#define SHARPSL_PARTINFO1_LADDR 0x00060000
+#define SHARPSL_PARTINFO2_LADDR 0x00064000
+
+#define BOOT_MAGIC 0x424f4f54
+#define FSRO_MAGIC 0x4653524f
+#define FSRW_MAGIC 0x46535257
+
+/**
+ * struct sharpsl_ftl - Sharp FTL Logical Table
+ * @logmax: number of logical blocks
+ * @log2phy: the logical-to-physical table
+ *
+ * Structure containing the logical-to-physical translation table
+ * used by the SHARP SL FTL.
+ */
+struct sharpsl_ftl {
+ unsigned int logmax;
+ unsigned int *log2phy;
+};
+
+/* verify that the OOB bytes 8 to 15 are free and available for the FTL */
+static int sharpsl_nand_check_ooblayout(struct mtd_info *mtd)
+{
+ u8 freebytes = 0;
+ int section = 0;
+
+ while (true) {
+ struct mtd_oob_region oobfree = { };
+ int ret, i;
+
+ ret = mtd_ooblayout_free(mtd, section++, &oobfree);
+ if (ret)
+ break;
+
+ if (!oobfree.length || oobfree.offset > 15 ||
+ (oobfree.offset + oobfree.length) < 8)
+ continue;
+
+ i = oobfree.offset >= 8 ? oobfree.offset : 8;
+ for (; i < oobfree.offset + oobfree.length && i < 16; i++)
+ freebytes |= BIT(i - 8);
+
+ if (freebytes == 0xff)
+ return 0;
+ }
+
+ return -ENOTSUPP;
+}
+
+static int sharpsl_nand_read_oob(struct mtd_info *mtd, loff_t offs, u8 *buf)
+{
+ struct mtd_oob_ops ops = { };
+ int ret;
+
+ ops.mode = MTD_OPS_PLACE_OOB;
+ ops.ooblen = mtd->oobsize;
+ ops.oobbuf = buf;
+
+ ret = mtd_read_oob(mtd, offs, &ops);
+ if (ret != 0 || mtd->oobsize != ops.oobretlen)
+ return -1;
+
+ return 0;
+}
+
+/*
+ * The logical block number assigned to a physical block is stored in the OOB
+ * of the first page, in 3 16-bit copies with the following layout:
+ *
+ * 01234567 89abcdef
+ * -------- --------
+ * ECC BB xyxyxy
+ *
+ * When reading we check that the first two copies agree.
+ * In case of error, matching is tried using the following pairs.
+ * Reserved values 0xffff mean the block is kept for wear leveling.
+ *
+ * 01234567 89abcdef
+ * -------- --------
+ * ECC BB xyxy oob[8]==oob[10] && oob[9]==oob[11] -> byte0=8 byte1=9
+ * ECC BB xyxy oob[10]==oob[12] && oob[11]==oob[13] -> byte0=10 byte1=11
+ * ECC BB xy xy oob[12]==oob[8] && oob[13]==oob[9] -> byte0=12 byte1=13
+ */
+static int sharpsl_nand_get_logical_num(u8 *oob)
+{
+ u16 us;
+ int good0, good1;
+
+ if (oob[NAND_NOOB_LOGADDR_00] == oob[NAND_NOOB_LOGADDR_10] &&
+ oob[NAND_NOOB_LOGADDR_01] == oob[NAND_NOOB_LOGADDR_11]) {
+ good0 = NAND_NOOB_LOGADDR_00;
+ good1 = NAND_NOOB_LOGADDR_01;
+ } else if (oob[NAND_NOOB_LOGADDR_10] == oob[NAND_NOOB_LOGADDR_20] &&
+ oob[NAND_NOOB_LOGADDR_11] == oob[NAND_NOOB_LOGADDR_21]) {
+ good0 = NAND_NOOB_LOGADDR_10;
+ good1 = NAND_NOOB_LOGADDR_11;
+ } else if (oob[NAND_NOOB_LOGADDR_20] == oob[NAND_NOOB_LOGADDR_00] &&
+ oob[NAND_NOOB_LOGADDR_21] == oob[NAND_NOOB_LOGADDR_01]) {
+ good0 = NAND_NOOB_LOGADDR_20;
+ good1 = NAND_NOOB_LOGADDR_21;
+ } else {
+ return -EINVAL;
+ }
+
+ us = oob[good0] | oob[good1] << 8;
+
+ /* parity check */
+ if (hweight16(us) & BLOCK_UNMASK_COMPLEMENT)
+ return -EINVAL;
+
+ /* reserved */
+ if (us == BLOCK_IS_RESERVED)
+ return BLOCK_IS_RESERVED;
+
+ return (us >> 1) & GENMASK(9, 0);
+}
+
+static int sharpsl_nand_init_ftl(struct mtd_info *mtd, struct sharpsl_ftl *ftl)
+{
+ unsigned int block_num, log_num, phymax;
+ loff_t block_adr;
+ u8 *oob;
+ int i, ret;
+
+ oob = kzalloc(mtd->oobsize, GFP_KERNEL);
+ if (!oob)
+ return -ENOMEM;
+
+ phymax = mtd_div_by_eb(SHARPSL_FTL_PART_SIZE, mtd);
+
+ /* FTL reserves 5% of the blocks + 1 spare */
+ ftl->logmax = ((phymax * 95) / 100) - 1;
+
+ ftl->log2phy = kmalloc_array(ftl->logmax, sizeof(*ftl->log2phy),
+ GFP_KERNEL);
+ if (!ftl->log2phy) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ /* initialize ftl->log2phy */
+ for (i = 0; i < ftl->logmax; i++)
+ ftl->log2phy[i] = UINT_MAX;
+
+ /* create physical-logical table */
+ for (block_num = 0; block_num < phymax; block_num++) {
+ block_adr = block_num * mtd->erasesize;
+
+ if (mtd_block_isbad(mtd, block_adr))
+ continue;
+
+ if (sharpsl_nand_read_oob(mtd, block_adr, oob))
+ continue;
+
+ /* get logical block */
+ log_num = sharpsl_nand_get_logical_num(oob);
+
+ /* cut-off errors and skip the out-of-range values */
+ if (log_num > 0 && log_num < ftl->logmax) {
+ if (ftl->log2phy[log_num] == UINT_MAX)
+ ftl->log2phy[log_num] = block_num;
+ }
+ }
+
+ pr_info("Sharp SL FTL: %d blocks used (%d logical, %d reserved)\n",
+ phymax, ftl->logmax, phymax - ftl->logmax);
+
+ ret = 0;
+exit:
+ kfree(oob);
+ return ret;
+}
+
+void sharpsl_nand_cleanup_ftl(struct sharpsl_ftl *ftl)
+{
+ kfree(ftl->log2phy);
+}
+
+static int sharpsl_nand_read_laddr(struct mtd_info *mtd,
+ loff_t from,
+ size_t len,
+ void *buf,
+ struct sharpsl_ftl *ftl)
+{
+ unsigned int log_num, final_log_num;
+ unsigned int block_num;
+ loff_t block_adr;
+ loff_t block_ofs;
+ size_t retlen;
+ int err;
+
+ log_num = mtd_div_by_eb((u32)from, mtd);
+ final_log_num = mtd_div_by_eb(((u32)from + len - 1), mtd);
+
+ if (len <= 0 || log_num >= ftl->logmax || final_log_num > log_num)
+ return -EINVAL;
+
+ block_num = ftl->log2phy[log_num];
+ block_adr = block_num * mtd->erasesize;
+ block_ofs = mtd_mod_by_eb((u32)from, mtd);
+
+ err = mtd_read(mtd, block_adr + block_ofs, len, &retlen, buf);
+ /* Ignore corrected ECC errors */
+ if (mtd_is_bitflip(err))
+ err = 0;
+
+ if (!err && retlen != len)
+ err = -EIO;
+
+ if (err)
+ pr_err("sharpslpart: error, read failed at %#llx\n",
+ block_adr + block_ofs);
+
+ return err;
+}
+
+/*
+ * MTD Partition Parser
+ *
+ * Sample values read from SL-C860
+ *
+ * # cat /proc/mtd
+ * dev: size erasesize name
+ * mtd0: 006d0000 00020000 "Filesystem"
+ * mtd1: 00700000 00004000 "smf"
+ * mtd2: 03500000 00004000 "root"
+ * mtd3: 04400000 00004000 "home"
+ *
+ * PARTITIONINFO1
+ * 0x00060000: 00 00 00 00 00 00 70 00 42 4f 4f 54 00 00 00 00 ......p.BOOT....
+ * 0x00060010: 00 00 70 00 00 00 c0 03 46 53 52 4f 00 00 00 00 ..p.....FSRO....
+ * 0x00060020: 00 00 c0 03 00 00 00 04 46 53 52 57 00 00 00 00 ........FSRW....
+ */
+struct sharpsl_nand_partinfo {
+ __le32 start;
+ __le32 end;
+ __be32 magic;
+ u32 reserved;
+};
+
+static int sharpsl_nand_read_partinfo(struct mtd_info *master,
+ loff_t from,
+ size_t len,
+ struct sharpsl_nand_partinfo *buf,
+ struct sharpsl_ftl *ftl)
+{
+ int ret;
+
+ ret = sharpsl_nand_read_laddr(master, from, len, buf, ftl);
+ if (ret)
+ return ret;
+
+ /* check for magics */
+ if (be32_to_cpu(buf[0].magic) != BOOT_MAGIC ||
+ be32_to_cpu(buf[1].magic) != FSRO_MAGIC ||
+ be32_to_cpu(buf[2].magic) != FSRW_MAGIC) {
+ pr_err("sharpslpart: magic values mismatch\n");
+ return -EINVAL;
+ }
+
+ /* fixup for hardcoded value 64 MiB (for older models) */
+ buf[2].end = cpu_to_le32(master->size);
+
+ /* extra sanity check */
+ if (le32_to_cpu(buf[0].end) <= le32_to_cpu(buf[0].start) ||
+ le32_to_cpu(buf[1].start) < le32_to_cpu(buf[0].end) ||
+ le32_to_cpu(buf[1].end) <= le32_to_cpu(buf[1].start) ||
+ le32_to_cpu(buf[2].start) < le32_to_cpu(buf[1].end) ||
+ le32_to_cpu(buf[2].end) <= le32_to_cpu(buf[2].start)) {
+ pr_err("sharpslpart: partition sizes mismatch\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int sharpsl_parse_mtd_partitions(struct mtd_info *master,
+ const struct mtd_partition **pparts,
+ struct mtd_part_parser_data *data)
+{
+ struct sharpsl_ftl ftl;
+ struct sharpsl_nand_partinfo buf[SHARPSL_NAND_PARTS];
+ struct mtd_partition *sharpsl_nand_parts;
+ int err;
+
+ /* check that OOB bytes 8 to 15 used by the FTL are actually free */
+ err = sharpsl_nand_check_ooblayout(master);
+ if (err)
+ return err;
+
+ /* init logical mgmt (FTL) */
+ err = sharpsl_nand_init_ftl(master, &ftl);
+ if (err)
+ return err;
+
+ /* read and validate first partition table */
+ pr_info("sharpslpart: try reading first partition table\n");
+ err = sharpsl_nand_read_partinfo(master,
+ SHARPSL_PARTINFO1_LADDR,
+ sizeof(buf), buf, &ftl);
+ if (err) {
+ /* fallback: read second partition table */
+ pr_warn("sharpslpart: first partition table is invalid, retry using the second\n");
+ err = sharpsl_nand_read_partinfo(master,
+ SHARPSL_PARTINFO2_LADDR,
+ sizeof(buf), buf, &ftl);
+ }
+
+ /* cleanup logical mgmt (FTL) */
+ sharpsl_nand_cleanup_ftl(&ftl);
+
+ if (err) {
+ pr_err("sharpslpart: both partition tables are invalid\n");
+ return err;
+ }
+
+ sharpsl_nand_parts = kzalloc(sizeof(*sharpsl_nand_parts) *
+ SHARPSL_NAND_PARTS, GFP_KERNEL);
+ if (!sharpsl_nand_parts)
+ return -ENOMEM;
+
+ /* original names */
+ sharpsl_nand_parts[0].name = "smf";
+ sharpsl_nand_parts[0].offset = le32_to_cpu(buf[0].start);
+ sharpsl_nand_parts[0].size = le32_to_cpu(buf[0].end) -
+ le32_to_cpu(buf[0].start);
+
+ sharpsl_nand_parts[1].name = "root";
+ sharpsl_nand_parts[1].offset = le32_to_cpu(buf[1].start);
+ sharpsl_nand_parts[1].size = le32_to_cpu(buf[1].end) -
+ le32_to_cpu(buf[1].start);
+
+ sharpsl_nand_parts[2].name = "home";
+ sharpsl_nand_parts[2].offset = le32_to_cpu(buf[2].start);
+ sharpsl_nand_parts[2].size = le32_to_cpu(buf[2].end) -
+ le32_to_cpu(buf[2].start);
+
+ *pparts = sharpsl_nand_parts;
+ return SHARPSL_NAND_PARTS;
+}
+
+static struct mtd_part_parser sharpsl_mtd_parser = {
+ .parse_fn = sharpsl_parse_mtd_partitions,
+ .name = "sharpslpart",
+};
+module_mtd_part_parser(sharpsl_mtd_parser);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Andrea Adami <andrea.adami@gmail.com>");
+MODULE_DESCRIPTION("MTD partitioning for NAND flash on Sharp SL Series");
diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c
index 3692dd547879..4237c7cebf02 100644
--- a/drivers/mtd/sm_ftl.c
+++ b/drivers/mtd/sm_ftl.c
@@ -989,9 +989,9 @@ restart:
/* flush timer, runs a second after last write */
-static void sm_cache_flush_timer(unsigned long data)
+static void sm_cache_flush_timer(struct timer_list *t)
{
- struct sm_ftl *ftl = (struct sm_ftl *)data;
+ struct sm_ftl *ftl = from_timer(ftl, t, timer);
queue_work(cache_flush_workqueue, &ftl->flush_work);
}
@@ -1139,7 +1139,7 @@ static void sm_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
mutex_init(&ftl->mutex);
- setup_timer(&ftl->timer, sm_cache_flush_timer, (unsigned long)ftl);
+ timer_setup(&ftl->timer, sm_cache_flush_timer, 0);
INIT_WORK(&ftl->flush_work, sm_cache_flush_work);
init_completion(&ftl->erase_completion);
diff --git a/drivers/mtd/spi-nor/Kconfig b/drivers/mtd/spi-nor/Kconfig
index 69c638dd0484..89da88e59121 100644
--- a/drivers/mtd/spi-nor/Kconfig
+++ b/drivers/mtd/spi-nor/Kconfig
@@ -50,7 +50,7 @@ config SPI_ATMEL_QUADSPI
config SPI_CADENCE_QUADSPI
tristate "Cadence Quad SPI controller"
- depends on OF && (ARM || COMPILE_TEST)
+ depends on OF && (ARM || ARM64 || COMPILE_TEST)
help
Enable support for the Cadence Quad SPI Flash controller.
@@ -90,7 +90,7 @@ config SPI_INTEL_SPI
tristate
config SPI_INTEL_SPI_PCI
- tristate "Intel PCH/PCU SPI flash PCI driver" if EXPERT
+ tristate "Intel PCH/PCU SPI flash PCI driver"
depends on X86 && PCI
select SPI_INTEL_SPI
help
@@ -106,7 +106,7 @@ config SPI_INTEL_SPI_PCI
will be called intel-spi-pci.
config SPI_INTEL_SPI_PLATFORM
- tristate "Intel PCH/PCU SPI flash platform driver" if EXPERT
+ tristate "Intel PCH/PCU SPI flash platform driver"
depends on X86
select SPI_INTEL_SPI
help
diff --git a/drivers/mtd/spi-nor/cadence-quadspi.c b/drivers/mtd/spi-nor/cadence-quadspi.c
index 53c7d8e0327a..75a2bc447a99 100644
--- a/drivers/mtd/spi-nor/cadence-quadspi.c
+++ b/drivers/mtd/spi-nor/cadence-quadspi.c
@@ -31,6 +31,7 @@
#include <linux/of_device.h>
#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/sched.h>
#include <linux/spi/spi.h>
#include <linux/timer.h>
@@ -38,6 +39,9 @@
#define CQSPI_NAME "cadence-qspi"
#define CQSPI_MAX_CHIPSELECT 16
+/* Quirks */
+#define CQSPI_NEEDS_WR_DELAY BIT(0)
+
struct cqspi_st;
struct cqspi_flash_pdata {
@@ -75,7 +79,9 @@ struct cqspi_st {
bool is_decoded_cs;
u32 fifo_depth;
u32 fifo_width;
+ bool rclk_en;
u32 trigger_address;
+ u32 wr_delay;
struct cqspi_flash_pdata f_pdata[CQSPI_MAX_CHIPSELECT];
};
@@ -608,6 +614,15 @@ static int cqspi_indirect_write_execute(struct spi_nor *nor,
reinit_completion(&cqspi->transfer_complete);
writel(CQSPI_REG_INDIRECTWR_START_MASK,
reg_base + CQSPI_REG_INDIRECTWR);
+ /*
+ * As per 66AK2G02 TRM SPRUHY8F section 11.15.5.3 Indirect Access
+ * Controller programming sequence, couple of cycles of
+ * QSPI_REF_CLK delay is required for the above bit to
+ * be internally synchronized by the QSPI module. Provide 5
+ * cycles of delay.
+ */
+ if (cqspi->wr_delay)
+ ndelay(cqspi->wr_delay);
while (remaining > 0) {
write_bytes = remaining > page_size ? page_size : remaining;
@@ -775,7 +790,7 @@ static void cqspi_config_baudrate_div(struct cqspi_st *cqspi)
}
static void cqspi_readdata_capture(struct cqspi_st *cqspi,
- const unsigned int bypass,
+ const bool bypass,
const unsigned int delay)
{
void __iomem *reg_base = cqspi->iobase;
@@ -839,7 +854,8 @@ static void cqspi_configure(struct spi_nor *nor)
cqspi->sclk = sclk;
cqspi_config_baudrate_div(cqspi);
cqspi_delay(nor);
- cqspi_readdata_capture(cqspi, 1, f_pdata->read_delay);
+ cqspi_readdata_capture(cqspi, !cqspi->rclk_en,
+ f_pdata->read_delay);
}
if (switch_cs || switch_ck)
@@ -1036,6 +1052,8 @@ static int cqspi_of_get_pdata(struct platform_device *pdev)
return -ENXIO;
}
+ cqspi->rclk_en = of_property_read_bool(np, "cdns,rclk-en");
+
return 0;
}
@@ -1156,6 +1174,7 @@ static int cqspi_probe(struct platform_device *pdev)
struct cqspi_st *cqspi;
struct resource *res;
struct resource *res_ahb;
+ unsigned long data;
int ret;
int irq;
@@ -1206,13 +1225,24 @@ static int cqspi_probe(struct platform_device *pdev)
return -ENXIO;
}
+ pm_runtime_enable(dev);
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(dev);
+ return ret;
+ }
+
ret = clk_prepare_enable(cqspi->clk);
if (ret) {
dev_err(dev, "Cannot enable QSPI clock.\n");
- return ret;
+ goto probe_clk_failed;
}
cqspi->master_ref_clk_hz = clk_get_rate(cqspi->clk);
+ data = (unsigned long)of_device_get_match_data(dev);
+ if (data & CQSPI_NEEDS_WR_DELAY)
+ cqspi->wr_delay = 5 * DIV_ROUND_UP(NSEC_PER_SEC,
+ cqspi->master_ref_clk_hz);
ret = devm_request_irq(dev, irq, cqspi_irq_handler, 0,
pdev->name, cqspi);
@@ -1233,10 +1263,13 @@ static int cqspi_probe(struct platform_device *pdev)
}
return ret;
-probe_irq_failed:
- cqspi_controller_enable(cqspi, 0);
probe_setup_failed:
+ cqspi_controller_enable(cqspi, 0);
+probe_irq_failed:
clk_disable_unprepare(cqspi->clk);
+probe_clk_failed:
+ pm_runtime_put_sync(dev);
+ pm_runtime_disable(dev);
return ret;
}
@@ -1253,6 +1286,9 @@ static int cqspi_remove(struct platform_device *pdev)
clk_disable_unprepare(cqspi->clk);
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
return 0;
}
@@ -1284,7 +1320,14 @@ static const struct dev_pm_ops cqspi__dev_pm_ops = {
#endif
static const struct of_device_id cqspi_dt_ids[] = {
- {.compatible = "cdns,qspi-nor",},
+ {
+ .compatible = "cdns,qspi-nor",
+ .data = (void *)0,
+ },
+ {
+ .compatible = "ti,k2g-qspi",
+ .data = (void *)CQSPI_NEEDS_WR_DELAY,
+ },
{ /* end of table */ }
};
diff --git a/drivers/mtd/spi-nor/intel-spi-pci.c b/drivers/mtd/spi-nor/intel-spi-pci.c
index e82652335ede..c0976f2e3dd1 100644
--- a/drivers/mtd/spi-nor/intel-spi-pci.c
+++ b/drivers/mtd/spi-nor/intel-spi-pci.c
@@ -63,7 +63,10 @@ static void intel_spi_pci_remove(struct pci_dev *pdev)
}
static const struct pci_device_id intel_spi_pci_ids[] = {
+ { PCI_VDEVICE(INTEL, 0x18e0), (unsigned long)&bxt_info },
{ PCI_VDEVICE(INTEL, 0x19e0), (unsigned long)&bxt_info },
+ { PCI_VDEVICE(INTEL, 0xa1a4), (unsigned long)&bxt_info },
+ { PCI_VDEVICE(INTEL, 0xa224), (unsigned long)&bxt_info },
{ },
};
MODULE_DEVICE_TABLE(pci, intel_spi_pci_ids);
diff --git a/drivers/mtd/spi-nor/intel-spi.c b/drivers/mtd/spi-nor/intel-spi.c
index 8a596bfeddff..ef034d898a23 100644
--- a/drivers/mtd/spi-nor/intel-spi.c
+++ b/drivers/mtd/spi-nor/intel-spi.c
@@ -67,8 +67,6 @@
#define PR_LIMIT_MASK (0x3fff << PR_LIMIT_SHIFT)
#define PR_RPE BIT(15)
#define PR_BASE_MASK 0x3fff
-/* Last PR is GPR0 */
-#define PR_NUM (5 + 1)
/* Offsets are from @ispi->sregs */
#define SSFSTS_CTL 0x00
@@ -90,20 +88,35 @@
#define OPMENU0 0x08
#define OPMENU1 0x0c
+#define OPTYPE_READ_NO_ADDR 0
+#define OPTYPE_WRITE_NO_ADDR 1
+#define OPTYPE_READ_WITH_ADDR 2
+#define OPTYPE_WRITE_WITH_ADDR 3
+
/* CPU specifics */
#define BYT_PR 0x74
#define BYT_SSFSTS_CTL 0x90
#define BYT_BCR 0xfc
#define BYT_BCR_WPD BIT(0)
#define BYT_FREG_NUM 5
+#define BYT_PR_NUM 5
#define LPT_PR 0x74
#define LPT_SSFSTS_CTL 0x90
#define LPT_FREG_NUM 5
+#define LPT_PR_NUM 5
#define BXT_PR 0x84
#define BXT_SSFSTS_CTL 0xa0
#define BXT_FREG_NUM 12
+#define BXT_PR_NUM 6
+
+#define LVSCC 0xc4
+#define UVSCC 0xc8
+#define ERASE_OPCODE_SHIFT 8
+#define ERASE_OPCODE_MASK (0xff << ERASE_OPCODE_SHIFT)
+#define ERASE_64K_OPCODE_SHIFT 16
+#define ERASE_64K_OPCODE_MASK (0xff << ERASE_OPCODE_SHIFT)
#define INTEL_SPI_TIMEOUT 5000 /* ms */
#define INTEL_SPI_FIFO_SZ 64
@@ -117,8 +130,11 @@
* @pregs: Start of protection registers
* @sregs: Start of software sequencer registers
* @nregions: Maximum number of regions
+ * @pr_num: Maximum number of protected range registers
* @writeable: Is the chip writeable
- * @swseq: Use SW sequencer in register reads/writes
+ * @locked: Is SPI setting locked
+ * @swseq_reg: Use SW sequencer in register reads/writes
+ * @swseq_erase: Use SW sequencer in erase operation
* @erase_64k: 64k erase supported
* @opcodes: Opcodes which are supported. This are programmed by BIOS
* before it locks down the controller.
@@ -132,8 +148,11 @@ struct intel_spi {
void __iomem *pregs;
void __iomem *sregs;
size_t nregions;
+ size_t pr_num;
bool writeable;
- bool swseq;
+ bool locked;
+ bool swseq_reg;
+ bool swseq_erase;
bool erase_64k;
u8 opcodes[8];
u8 preopcodes[2];
@@ -167,7 +186,7 @@ static void intel_spi_dump_regs(struct intel_spi *ispi)
for (i = 0; i < ispi->nregions; i++)
dev_dbg(ispi->dev, "FREG(%d)=0x%08x\n", i,
readl(ispi->base + FREG(i)));
- for (i = 0; i < PR_NUM; i++)
+ for (i = 0; i < ispi->pr_num; i++)
dev_dbg(ispi->dev, "PR(%d)=0x%08x\n", i,
readl(ispi->pregs + PR(i)));
@@ -181,8 +200,11 @@ static void intel_spi_dump_regs(struct intel_spi *ispi)
if (ispi->info->type == INTEL_SPI_BYT)
dev_dbg(ispi->dev, "BCR=0x%08x\n", readl(ispi->base + BYT_BCR));
+ dev_dbg(ispi->dev, "LVSCC=0x%08x\n", readl(ispi->base + LVSCC));
+ dev_dbg(ispi->dev, "UVSCC=0x%08x\n", readl(ispi->base + UVSCC));
+
dev_dbg(ispi->dev, "Protected regions:\n");
- for (i = 0; i < PR_NUM; i++) {
+ for (i = 0; i < ispi->pr_num; i++) {
u32 base, limit;
value = readl(ispi->pregs + PR(i));
@@ -214,7 +236,9 @@ static void intel_spi_dump_regs(struct intel_spi *ispi)
}
dev_dbg(ispi->dev, "Using %cW sequencer for register access\n",
- ispi->swseq ? 'S' : 'H');
+ ispi->swseq_reg ? 'S' : 'H');
+ dev_dbg(ispi->dev, "Using %cW sequencer for erase operation\n",
+ ispi->swseq_erase ? 'S' : 'H');
}
/* Reads max INTEL_SPI_FIFO_SZ bytes from the device fifo */
@@ -278,7 +302,7 @@ static int intel_spi_wait_sw_busy(struct intel_spi *ispi)
static int intel_spi_init(struct intel_spi *ispi)
{
- u32 opmenu0, opmenu1, val;
+ u32 opmenu0, opmenu1, lvscc, uvscc, val;
int i;
switch (ispi->info->type) {
@@ -286,6 +310,8 @@ static int intel_spi_init(struct intel_spi *ispi)
ispi->sregs = ispi->base + BYT_SSFSTS_CTL;
ispi->pregs = ispi->base + BYT_PR;
ispi->nregions = BYT_FREG_NUM;
+ ispi->pr_num = BYT_PR_NUM;
+ ispi->swseq_reg = true;
if (writeable) {
/* Disable write protection */
@@ -305,12 +331,15 @@ static int intel_spi_init(struct intel_spi *ispi)
ispi->sregs = ispi->base + LPT_SSFSTS_CTL;
ispi->pregs = ispi->base + LPT_PR;
ispi->nregions = LPT_FREG_NUM;
+ ispi->pr_num = LPT_PR_NUM;
+ ispi->swseq_reg = true;
break;
case INTEL_SPI_BXT:
ispi->sregs = ispi->base + BXT_SSFSTS_CTL;
ispi->pregs = ispi->base + BXT_PR;
ispi->nregions = BXT_FREG_NUM;
+ ispi->pr_num = BXT_PR_NUM;
ispi->erase_64k = true;
break;
@@ -318,42 +347,64 @@ static int intel_spi_init(struct intel_spi *ispi)
return -EINVAL;
}
- /* Disable #SMI generation */
+ /* Disable #SMI generation from HW sequencer */
val = readl(ispi->base + HSFSTS_CTL);
val &= ~HSFSTS_CTL_FSMIE;
writel(val, ispi->base + HSFSTS_CTL);
/*
- * BIOS programs allowed opcodes and then locks down the register.
- * So read back what opcodes it decided to support. That's the set
- * we are going to support as well.
+ * Determine whether erase operation should use HW or SW sequencer.
+ *
+ * The HW sequencer has a predefined list of opcodes, with only the
+ * erase opcode being programmable in LVSCC and UVSCC registers.
+ * If these registers don't contain a valid erase opcode, erase
+ * cannot be done using HW sequencer.
*/
- opmenu0 = readl(ispi->sregs + OPMENU0);
- opmenu1 = readl(ispi->sregs + OPMENU1);
+ lvscc = readl(ispi->base + LVSCC);
+ uvscc = readl(ispi->base + UVSCC);
+ if (!(lvscc & ERASE_OPCODE_MASK) || !(uvscc & ERASE_OPCODE_MASK))
+ ispi->swseq_erase = true;
+ /* SPI controller on Intel BXT supports 64K erase opcode */
+ if (ispi->info->type == INTEL_SPI_BXT && !ispi->swseq_erase)
+ if (!(lvscc & ERASE_64K_OPCODE_MASK) ||
+ !(uvscc & ERASE_64K_OPCODE_MASK))
+ ispi->erase_64k = false;
/*
* Some controllers can only do basic operations using hardware
* sequencer. All other operations are supposed to be carried out
- * using software sequencer. If we find that BIOS has programmed
- * opcodes for the software sequencer we use that over the hardware
- * sequencer.
+ * using software sequencer.
*/
- if (opmenu0 && opmenu1) {
- for (i = 0; i < ARRAY_SIZE(ispi->opcodes) / 2; i++) {
- ispi->opcodes[i] = opmenu0 >> i * 8;
- ispi->opcodes[i + 4] = opmenu1 >> i * 8;
- }
-
- val = readl(ispi->sregs + PREOP_OPTYPE);
- ispi->preopcodes[0] = val;
- ispi->preopcodes[1] = val >> 8;
-
+ if (ispi->swseq_reg) {
/* Disable #SMI generation from SW sequencer */
val = readl(ispi->sregs + SSFSTS_CTL);
val &= ~SSFSTS_CTL_FSMIE;
writel(val, ispi->sregs + SSFSTS_CTL);
+ }
+
+ /* Check controller's lock status */
+ val = readl(ispi->base + HSFSTS_CTL);
+ ispi->locked = !!(val & HSFSTS_CTL_FLOCKDN);
+
+ if (ispi->locked) {
+ /*
+ * BIOS programs allowed opcodes and then locks down the
+ * register. So read back what opcodes it decided to support.
+ * That's the set we are going to support as well.
+ */
+ opmenu0 = readl(ispi->sregs + OPMENU0);
+ opmenu1 = readl(ispi->sregs + OPMENU1);
- ispi->swseq = true;
+ if (opmenu0 && opmenu1) {
+ for (i = 0; i < ARRAY_SIZE(ispi->opcodes) / 2; i++) {
+ ispi->opcodes[i] = opmenu0 >> i * 8;
+ ispi->opcodes[i + 4] = opmenu1 >> i * 8;
+ }
+
+ val = readl(ispi->sregs + PREOP_OPTYPE);
+ ispi->preopcodes[0] = val;
+ ispi->preopcodes[1] = val >> 8;
+ }
}
intel_spi_dump_regs(ispi);
@@ -361,18 +412,28 @@ static int intel_spi_init(struct intel_spi *ispi)
return 0;
}
-static int intel_spi_opcode_index(struct intel_spi *ispi, u8 opcode)
+static int intel_spi_opcode_index(struct intel_spi *ispi, u8 opcode, int optype)
{
int i;
+ int preop;
- for (i = 0; i < ARRAY_SIZE(ispi->opcodes); i++)
- if (ispi->opcodes[i] == opcode)
- return i;
- return -EINVAL;
+ if (ispi->locked) {
+ for (i = 0; i < ARRAY_SIZE(ispi->opcodes); i++)
+ if (ispi->opcodes[i] == opcode)
+ return i;
+
+ return -EINVAL;
+ }
+
+ /* The lock is off, so just use index 0 */
+ writel(opcode, ispi->sregs + OPMENU0);
+ preop = readw(ispi->sregs + PREOP_OPTYPE);
+ writel(optype << 16 | preop, ispi->sregs + PREOP_OPTYPE);
+
+ return 0;
}
-static int intel_spi_hw_cycle(struct intel_spi *ispi, u8 opcode, u8 *buf,
- int len)
+static int intel_spi_hw_cycle(struct intel_spi *ispi, u8 opcode, int len)
{
u32 val, status;
int ret;
@@ -394,6 +455,9 @@ static int intel_spi_hw_cycle(struct intel_spi *ispi, u8 opcode, u8 *buf,
return -EINVAL;
}
+ if (len > INTEL_SPI_FIFO_SZ)
+ return -EINVAL;
+
val |= (len - 1) << HSFSTS_CTL_FDBC_SHIFT;
val |= HSFSTS_CTL_FCERR | HSFSTS_CTL_FDONE;
val |= HSFSTS_CTL_FGO;
@@ -412,27 +476,39 @@ static int intel_spi_hw_cycle(struct intel_spi *ispi, u8 opcode, u8 *buf,
return 0;
}
-static int intel_spi_sw_cycle(struct intel_spi *ispi, u8 opcode, u8 *buf,
- int len)
+static int intel_spi_sw_cycle(struct intel_spi *ispi, u8 opcode, int len,
+ int optype)
{
- u32 val, status;
+ u32 val = 0, status;
+ u16 preop;
int ret;
- ret = intel_spi_opcode_index(ispi, opcode);
+ ret = intel_spi_opcode_index(ispi, opcode, optype);
if (ret < 0)
return ret;
- val = (len << SSFSTS_CTL_DBC_SHIFT) | SSFSTS_CTL_DS;
+ if (len > INTEL_SPI_FIFO_SZ)
+ return -EINVAL;
+
+ /* Only mark 'Data Cycle' bit when there is data to be transferred */
+ if (len > 0)
+ val = ((len - 1) << SSFSTS_CTL_DBC_SHIFT) | SSFSTS_CTL_DS;
val |= ret << SSFSTS_CTL_COP_SHIFT;
val |= SSFSTS_CTL_FCERR | SSFSTS_CTL_FDONE;
val |= SSFSTS_CTL_SCGO;
+ preop = readw(ispi->sregs + PREOP_OPTYPE);
+ if (preop) {
+ val |= SSFSTS_CTL_ACS;
+ if (preop >> 8)
+ val |= SSFSTS_CTL_SPOP;
+ }
writel(val, ispi->sregs + SSFSTS_CTL);
ret = intel_spi_wait_sw_busy(ispi);
if (ret)
return ret;
- status = readl(ispi->base + SSFSTS_CTL);
+ status = readl(ispi->sregs + SSFSTS_CTL);
if (status & SSFSTS_CTL_FCERR)
return -EIO;
else if (status & SSFSTS_CTL_AEL)
@@ -449,10 +525,11 @@ static int intel_spi_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
/* Address of the first chip */
writel(0, ispi->base + FADDR);
- if (ispi->swseq)
- ret = intel_spi_sw_cycle(ispi, opcode, buf, len);
+ if (ispi->swseq_reg)
+ ret = intel_spi_sw_cycle(ispi, opcode, len,
+ OPTYPE_READ_NO_ADDR);
else
- ret = intel_spi_hw_cycle(ispi, opcode, buf, len);
+ ret = intel_spi_hw_cycle(ispi, opcode, len);
if (ret)
return ret;
@@ -467,10 +544,15 @@ static int intel_spi_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
/*
* This is handled with atomic operation and preop code in Intel
- * controller so skip it here now.
+ * controller so skip it here now. If the controller is not locked,
+ * program the opcode to the PREOP register for later use.
*/
- if (opcode == SPINOR_OP_WREN)
+ if (opcode == SPINOR_OP_WREN) {
+ if (!ispi->locked)
+ writel(opcode, ispi->sregs + PREOP_OPTYPE);
+
return 0;
+ }
writel(0, ispi->base + FADDR);
@@ -479,9 +561,10 @@ static int intel_spi_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
if (ret)
return ret;
- if (ispi->swseq)
- return intel_spi_sw_cycle(ispi, opcode, buf, len);
- return intel_spi_hw_cycle(ispi, opcode, buf, len);
+ if (ispi->swseq_reg)
+ return intel_spi_sw_cycle(ispi, opcode, len,
+ OPTYPE_WRITE_NO_ADDR);
+ return intel_spi_hw_cycle(ispi, opcode, len);
}
static ssize_t intel_spi_read(struct spi_nor *nor, loff_t from, size_t len,
@@ -561,12 +644,6 @@ static ssize_t intel_spi_write(struct spi_nor *nor, loff_t to, size_t len,
val |= (block_size - 1) << HSFSTS_CTL_FDBC_SHIFT;
val |= HSFSTS_CTL_FCYCLE_WRITE;
- /* Write enable */
- if (ispi->preopcodes[1] == SPINOR_OP_WREN)
- val |= SSFSTS_CTL_SPOP;
- val |= SSFSTS_CTL_ACS;
- writel(val, ispi->base + HSFSTS_CTL);
-
ret = intel_spi_write_block(ispi, write_buf, block_size);
if (ret) {
dev_err(ispi->dev, "failed to write block\n");
@@ -574,8 +651,8 @@ static ssize_t intel_spi_write(struct spi_nor *nor, loff_t to, size_t len,
}
/* Start the write now */
- val = readl(ispi->base + HSFSTS_CTL);
- writel(val | HSFSTS_CTL_FGO, ispi->base + HSFSTS_CTL);
+ val |= HSFSTS_CTL_FGO;
+ writel(val, ispi->base + HSFSTS_CTL);
ret = intel_spi_wait_hw_busy(ispi);
if (ret) {
@@ -620,6 +697,22 @@ static int intel_spi_erase(struct spi_nor *nor, loff_t offs)
erase_size = SZ_4K;
}
+ if (ispi->swseq_erase) {
+ while (len > 0) {
+ writel(offs, ispi->base + FADDR);
+
+ ret = intel_spi_sw_cycle(ispi, nor->erase_opcode,
+ 0, OPTYPE_WRITE_WITH_ADDR);
+ if (ret)
+ return ret;
+
+ offs += erase_size;
+ len -= erase_size;
+ }
+
+ return 0;
+ }
+
while (len > 0) {
writel(offs, ispi->base + FADDR);
@@ -652,7 +745,7 @@ static bool intel_spi_is_protected(const struct intel_spi *ispi,
{
int i;
- for (i = 0; i < PR_NUM; i++) {
+ for (i = 0; i < ispi->pr_num; i++) {
u32 pr_base, pr_limit, pr_value;
pr_value = readl(ispi->pregs + PR(i));
diff --git a/drivers/mtd/spi-nor/mtk-quadspi.c b/drivers/mtd/spi-nor/mtk-quadspi.c
index c258c7adf1c5..abe455ccd68b 100644
--- a/drivers/mtd/spi-nor/mtk-quadspi.c
+++ b/drivers/mtd/spi-nor/mtk-quadspi.c
@@ -404,6 +404,29 @@ static int mt8173_nor_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf,
return ret;
}
+static void mt8173_nor_disable_clk(struct mt8173_nor *mt8173_nor)
+{
+ clk_disable_unprepare(mt8173_nor->spi_clk);
+ clk_disable_unprepare(mt8173_nor->nor_clk);
+}
+
+static int mt8173_nor_enable_clk(struct mt8173_nor *mt8173_nor)
+{
+ int ret;
+
+ ret = clk_prepare_enable(mt8173_nor->spi_clk);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(mt8173_nor->nor_clk);
+ if (ret) {
+ clk_disable_unprepare(mt8173_nor->spi_clk);
+ return ret;
+ }
+
+ return 0;
+}
+
static int mtk_nor_init(struct mt8173_nor *mt8173_nor,
struct device_node *flash_node)
{
@@ -468,15 +491,11 @@ static int mtk_nor_drv_probe(struct platform_device *pdev)
return PTR_ERR(mt8173_nor->nor_clk);
mt8173_nor->dev = &pdev->dev;
- ret = clk_prepare_enable(mt8173_nor->spi_clk);
+
+ ret = mt8173_nor_enable_clk(mt8173_nor);
if (ret)
return ret;
- ret = clk_prepare_enable(mt8173_nor->nor_clk);
- if (ret) {
- clk_disable_unprepare(mt8173_nor->spi_clk);
- return ret;
- }
/* only support one attached flash */
flash_np = of_get_next_available_child(pdev->dev.of_node, NULL);
if (!flash_np) {
@@ -487,10 +506,9 @@ static int mtk_nor_drv_probe(struct platform_device *pdev)
ret = mtk_nor_init(mt8173_nor, flash_np);
nor_free:
- if (ret) {
- clk_disable_unprepare(mt8173_nor->spi_clk);
- clk_disable_unprepare(mt8173_nor->nor_clk);
- }
+ if (ret)
+ mt8173_nor_disable_clk(mt8173_nor);
+
return ret;
}
@@ -498,11 +516,38 @@ static int mtk_nor_drv_remove(struct platform_device *pdev)
{
struct mt8173_nor *mt8173_nor = platform_get_drvdata(pdev);
- clk_disable_unprepare(mt8173_nor->spi_clk);
- clk_disable_unprepare(mt8173_nor->nor_clk);
+ mt8173_nor_disable_clk(mt8173_nor);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int mtk_nor_suspend(struct device *dev)
+{
+ struct mt8173_nor *mt8173_nor = dev_get_drvdata(dev);
+
+ mt8173_nor_disable_clk(mt8173_nor);
+
return 0;
}
+static int mtk_nor_resume(struct device *dev)
+{
+ struct mt8173_nor *mt8173_nor = dev_get_drvdata(dev);
+
+ return mt8173_nor_enable_clk(mt8173_nor);
+}
+
+static const struct dev_pm_ops mtk_nor_dev_pm_ops = {
+ .suspend = mtk_nor_suspend,
+ .resume = mtk_nor_resume,
+};
+
+#define MTK_NOR_DEV_PM_OPS (&mtk_nor_dev_pm_ops)
+#else
+#define MTK_NOR_DEV_PM_OPS NULL
+#endif
+
static const struct of_device_id mtk_nor_of_ids[] = {
{ .compatible = "mediatek,mt8173-nor"},
{ /* sentinel */ }
@@ -514,6 +559,7 @@ static struct platform_driver mtk_nor_driver = {
.remove = mtk_nor_drv_remove,
.driver = {
.name = "mtk-nor",
+ .pm = MTK_NOR_DEV_PM_OPS,
.of_match_table = mtk_nor_of_ids,
},
};
diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c
index 19c000722cbc..bc266f70a15b 100644
--- a/drivers/mtd/spi-nor/spi-nor.c
+++ b/drivers/mtd/spi-nor/spi-nor.c
@@ -89,6 +89,8 @@ struct flash_info {
#define NO_CHIP_ERASE BIT(12) /* Chip does not support chip erase */
#define SPI_NOR_SKIP_SFDP BIT(13) /* Skip parsing of SFDP tables */
#define USE_CLSR BIT(14) /* use CLSR command */
+
+ int (*quad_enable)(struct spi_nor *nor);
};
#define JEDEC_MFR(info) ((info)->id[0])
@@ -870,6 +872,8 @@ static int spi_nor_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
return ret;
}
+static int macronix_quad_enable(struct spi_nor *nor);
+
/* Used when the "_ext_id" is two bytes at most */
#define INFO(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags) \
.id = { \
@@ -964,6 +968,7 @@ static const struct flash_info spi_nor_ids[] = {
{ "f25l64qa", INFO(0x8c4117, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_HAS_LOCK) },
/* Everspin */
+ { "mr25h128", CAT25_INFO( 16 * 1024, 1, 256, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
{ "mr25h256", CAT25_INFO( 32 * 1024, 1, 256, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
{ "mr25h10", CAT25_INFO(128 * 1024, 1, 256, 3, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
{ "mr25h40", CAT25_INFO(512 * 1024, 1, 256, 3, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
@@ -983,6 +988,11 @@ static const struct flash_info spi_nor_ids[] = {
SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
},
{
+ "gd25lq32", INFO(0xc86016, 0, 64 * 1024, 64,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ },
+ {
"gd25q64", INFO(0xc84017, 0, 64 * 1024, 128,
SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
@@ -997,6 +1007,12 @@ static const struct flash_info spi_nor_ids[] = {
SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
},
+ {
+ "gd25q256", INFO(0xc84019, 0, 64 * 1024, 512,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_4B_OPCODES | SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ .quad_enable = macronix_quad_enable,
+ },
/* Intel/Numonyx -- xxxs33b */
{ "160s33b", INFO(0x898911, 0, 64 * 1024, 32, 0) },
@@ -1024,7 +1040,7 @@ static const struct flash_info spi_nor_ids[] = {
{ "mx25l25635e", INFO(0xc22019, 0, 64 * 1024, 512, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "mx25u25635f", INFO(0xc22539, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_4B_OPCODES) },
{ "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) },
- { "mx66l51235l", INFO(0xc2201a, 0, 64 * 1024, 1024, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "mx66l51235l", INFO(0xc2201a, 0, 64 * 1024, 1024, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
{ "mx66u51235f", INFO(0xc2253a, 0, 64 * 1024, 1024, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
{ "mx66l1g45g", INFO(0xc2201b, 0, 64 * 1024, 2048, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "mx66l1g55g", INFO(0xc2261b, 0, 64 * 1024, 2048, SPI_NOR_QUAD_READ) },
@@ -1137,6 +1153,11 @@ static const struct flash_info spi_nor_ids[] = {
{ "w25x40", INFO(0xef3013, 0, 64 * 1024, 8, SECT_4K) },
{ "w25x80", INFO(0xef3014, 0, 64 * 1024, 16, SECT_4K) },
{ "w25x16", INFO(0xef3015, 0, 64 * 1024, 32, SECT_4K) },
+ {
+ "w25q16dw", INFO(0xef6015, 0, 64 * 1024, 32,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ },
{ "w25x32", INFO(0xef3016, 0, 64 * 1024, 64, SECT_4K) },
{ "w25q20cl", INFO(0xef4012, 0, 64 * 1024, 4, SECT_4K) },
{ "w25q20bw", INFO(0xef5012, 0, 64 * 1024, 4, SECT_4K) },
@@ -2288,8 +2309,7 @@ static int spi_nor_parse_sfdp(struct spi_nor *nor,
/* Check the SFDP header version. */
if (le32_to_cpu(header.signature) != SFDP_SIGNATURE ||
- header.major != SFDP_JESD216_MAJOR ||
- header.minor < SFDP_JESD216_MINOR)
+ header.major != SFDP_JESD216_MAJOR)
return -EINVAL;
/*
@@ -2427,6 +2447,15 @@ static int spi_nor_init_params(struct spi_nor *nor,
params->quad_enable = spansion_quad_enable;
break;
}
+
+ /*
+ * Some manufacturer like GigaDevice may use different
+ * bit to set QE on different memories, so the MFR can't
+ * indicate the quad_enable method for this case, we need
+ * set it in flash info list.
+ */
+ if (info->quad_enable)
+ params->quad_enable = info->quad_enable;
}
/* Override the parameters with data read from SFDP tables. */
@@ -2630,17 +2659,60 @@ static int spi_nor_setup(struct spi_nor *nor, const struct flash_info *info,
/* Enable Quad I/O if needed. */
enable_quad_io = (spi_nor_get_protocol_width(nor->read_proto) == 4 ||
spi_nor_get_protocol_width(nor->write_proto) == 4);
- if (enable_quad_io && params->quad_enable) {
- err = params->quad_enable(nor);
+ if (enable_quad_io && params->quad_enable)
+ nor->quad_enable = params->quad_enable;
+ else
+ nor->quad_enable = NULL;
+
+ return 0;
+}
+
+static int spi_nor_init(struct spi_nor *nor)
+{
+ int err;
+
+ /*
+ * Atmel, SST, Intel/Numonyx, and others serial NOR tend to power up
+ * with the software protection bits set
+ */
+ if (JEDEC_MFR(nor->info) == SNOR_MFR_ATMEL ||
+ JEDEC_MFR(nor->info) == SNOR_MFR_INTEL ||
+ JEDEC_MFR(nor->info) == SNOR_MFR_SST ||
+ nor->info->flags & SPI_NOR_HAS_LOCK) {
+ write_enable(nor);
+ write_sr(nor, 0);
+ spi_nor_wait_till_ready(nor);
+ }
+
+ if (nor->quad_enable) {
+ err = nor->quad_enable(nor);
if (err) {
dev_err(nor->dev, "quad mode not supported\n");
return err;
}
}
+ if ((nor->addr_width == 4) &&
+ (JEDEC_MFR(nor->info) != SNOR_MFR_SPANSION) &&
+ !(nor->info->flags & SPI_NOR_4B_OPCODES))
+ set_4byte(nor, nor->info, 1);
+
return 0;
}
+/* mtd resume handler */
+static void spi_nor_resume(struct mtd_info *mtd)
+{
+ struct spi_nor *nor = mtd_to_spi_nor(mtd);
+ struct device *dev = nor->dev;
+ int ret;
+
+ /* re-initialize the nor chip */
+ ret = spi_nor_init(nor);
+ if (ret)
+ dev_err(dev, "resume() failed\n");
+}
+
int spi_nor_scan(struct spi_nor *nor, const char *name,
const struct spi_nor_hwcaps *hwcaps)
{
@@ -2708,20 +2780,6 @@ int spi_nor_scan(struct spi_nor *nor, const char *name,
if (ret)
return ret;
- /*
- * Atmel, SST, Intel/Numonyx, and others serial NOR tend to power up
- * with the software protection bits set
- */
-
- if (JEDEC_MFR(info) == SNOR_MFR_ATMEL ||
- JEDEC_MFR(info) == SNOR_MFR_INTEL ||
- JEDEC_MFR(info) == SNOR_MFR_SST ||
- info->flags & SPI_NOR_HAS_LOCK) {
- write_enable(nor);
- write_sr(nor, 0);
- spi_nor_wait_till_ready(nor);
- }
-
if (!mtd->name)
mtd->name = dev_name(dev);
mtd->priv = nor;
@@ -2731,6 +2789,7 @@ int spi_nor_scan(struct spi_nor *nor, const char *name,
mtd->size = params.size;
mtd->_erase = spi_nor_erase;
mtd->_read = spi_nor_read;
+ mtd->_resume = spi_nor_resume;
/* NOR protection support for STmicro/Micron chips and similar */
if (JEDEC_MFR(info) == SNOR_MFR_MICRON ||
@@ -2804,8 +2863,6 @@ int spi_nor_scan(struct spi_nor *nor, const char *name,
if (JEDEC_MFR(info) == SNOR_MFR_SPANSION ||
info->flags & SPI_NOR_4B_OPCODES)
spi_nor_set_4byte_opcodes(nor, info);
- else
- set_4byte(nor, info, 1);
} else {
nor->addr_width = 3;
}
@@ -2822,6 +2879,12 @@ int spi_nor_scan(struct spi_nor *nor, const char *name,
return ret;
}
+ /* Send all the required SPI flash commands to initialize device */
+ nor->info = info;
+ ret = spi_nor_init(nor);
+ if (ret)
+ return ret;
+
dev_info(dev, "%s (%lld Kbytes)\n", info->name,
(long long)mtd->size >> 10);
diff --git a/drivers/mtd/spi-nor/stm32-quadspi.c b/drivers/mtd/spi-nor/stm32-quadspi.c
index 86c0931543c5..b3c7f6addba7 100644
--- a/drivers/mtd/spi-nor/stm32-quadspi.c
+++ b/drivers/mtd/spi-nor/stm32-quadspi.c
@@ -1,9 +1,22 @@
/*
- * stm32_quadspi.c
+ * Driver for stm32 quadspi controller
*
- * Copyright (C) 2017, Ludovic Barre
+ * Copyright (C) 2017, STMicroelectronics - All Rights Reserved
+ * Author(s): Ludovic Barre author <ludovic.barre@st.com>.
*
- * License terms: GNU General Public License (GPL), version 2
+ * License terms: GPL V2.0.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * This program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/clk.h>
#include <linux/errno.h>
@@ -113,6 +126,7 @@
#define STM32_MAX_MMAP_SZ SZ_256M
#define STM32_MAX_NORCHIP 2
+#define STM32_QSPI_FIFO_SZ 32
#define STM32_QSPI_FIFO_TIMEOUT_US 30000
#define STM32_QSPI_BUSY_TIMEOUT_US 100000
@@ -124,6 +138,7 @@ struct stm32_qspi_flash {
u32 presc;
u32 read_mode;
bool registered;
+ u32 prefetch_limit;
};
struct stm32_qspi {
@@ -240,12 +255,12 @@ static int stm32_qspi_tx_poll(struct stm32_qspi *qspi,
STM32_QSPI_FIFO_TIMEOUT_US);
if (ret) {
dev_err(qspi->dev, "fifo timeout (stat:%#x)\n", sr);
- break;
+ return ret;
}
tx_fifo(buf++, qspi->io_base + QUADSPI_DR);
}
- return ret;
+ return 0;
}
static int stm32_qspi_tx_mm(struct stm32_qspi *qspi,
@@ -272,6 +287,7 @@ static int stm32_qspi_send(struct stm32_qspi_flash *flash,
{
struct stm32_qspi *qspi = flash->qspi;
u32 ccr, dcr, cr;
+ u32 last_byte;
int err;
err = stm32_qspi_wait_nobusy(qspi);
@@ -314,6 +330,10 @@ static int stm32_qspi_send(struct stm32_qspi_flash *flash,
if (err)
goto abort;
writel_relaxed(FCR_CTCF, qspi->io_base + QUADSPI_FCR);
+ } else {
+ last_byte = cmd->addr + cmd->len;
+ if (last_byte > flash->prefetch_limit)
+ goto abort;
}
return err;
@@ -322,7 +342,9 @@ abort:
cr = readl_relaxed(qspi->io_base + QUADSPI_CR) | CR_ABORT;
writel_relaxed(cr, qspi->io_base + QUADSPI_CR);
- dev_err(qspi->dev, "%s abort err:%d\n", __func__, err);
+ if (err)
+ dev_err(qspi->dev, "%s abort err:%d\n", __func__, err);
+
return err;
}
@@ -550,6 +572,7 @@ static int stm32_qspi_flash_setup(struct stm32_qspi *qspi,
}
flash->fsize = FSIZE_VAL(mtd->size);
+ flash->prefetch_limit = mtd->size - STM32_QSPI_FIFO_SZ;
flash->read_mode = CCR_FMODE_MM;
if (mtd->size > qspi->mm_size)
diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
index a1b33aa6054a..9697977b80f0 100644
--- a/drivers/net/bonding/bond_netlink.c
+++ b/drivers/net/bonding/bond_netlink.c
@@ -423,7 +423,7 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
return -EINVAL;
bond_opt_initval(&newval,
- nla_get_be64(data[IFLA_BOND_AD_ACTOR_SYSTEM]));
+ nla_get_u64(data[IFLA_BOND_AD_ACTOR_SYSTEM]));
err = __bond_opt_set(bond, BOND_OPT_AD_ACTOR_SYSTEM, &newval);
if (err)
return err;
diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c
index fed75e75207a..b8029ea03307 100644
--- a/drivers/net/caif/caif_hsi.c
+++ b/drivers/net/caif/caif_hsi.c
@@ -66,9 +66,9 @@ static const struct cfhsi_config hsi_default_config = {
static LIST_HEAD(cfhsi_list);
-static void cfhsi_inactivity_tout(unsigned long arg)
+static void cfhsi_inactivity_tout(struct timer_list *t)
{
- struct cfhsi *cfhsi = (struct cfhsi *)arg;
+ struct cfhsi *cfhsi = from_timer(cfhsi, t, inactivity_timer);
netdev_dbg(cfhsi->ndev, "%s.\n",
__func__);
@@ -737,9 +737,9 @@ out_of_sync:
schedule_work(&cfhsi->out_of_sync_work);
}
-static void cfhsi_rx_slowpath(unsigned long arg)
+static void cfhsi_rx_slowpath(struct timer_list *t)
{
- struct cfhsi *cfhsi = (struct cfhsi *)arg;
+ struct cfhsi *cfhsi = from_timer(cfhsi, t, rx_slowpath_timer);
netdev_dbg(cfhsi->ndev, "%s.\n",
__func__);
@@ -997,9 +997,9 @@ static void cfhsi_wake_down_cb(struct cfhsi_cb_ops *cb_ops)
wake_up_interruptible(&cfhsi->wake_down_wait);
}
-static void cfhsi_aggregation_tout(unsigned long arg)
+static void cfhsi_aggregation_tout(struct timer_list *t)
{
- struct cfhsi *cfhsi = (struct cfhsi *)arg;
+ struct cfhsi *cfhsi = from_timer(cfhsi, t, aggregation_timer);
netdev_dbg(cfhsi->ndev, "%s.\n",
__func__);
@@ -1211,14 +1211,11 @@ static int cfhsi_open(struct net_device *ndev)
init_waitqueue_head(&cfhsi->flush_fifo_wait);
/* Setup the inactivity timer. */
- setup_timer(&cfhsi->inactivity_timer, cfhsi_inactivity_tout,
- (unsigned long)cfhsi);
+ timer_setup(&cfhsi->inactivity_timer, cfhsi_inactivity_tout, 0);
/* Setup the slowpath RX timer. */
- setup_timer(&cfhsi->rx_slowpath_timer, cfhsi_rx_slowpath,
- (unsigned long)cfhsi);
+ timer_setup(&cfhsi->rx_slowpath_timer, cfhsi_rx_slowpath, 0);
/* Setup the aggregation timer. */
- setup_timer(&cfhsi->aggregation_timer, cfhsi_aggregation_tout,
- (unsigned long)cfhsi);
+ timer_setup(&cfhsi->aggregation_timer, cfhsi_aggregation_tout, 0);
/* Activate HSI interface. */
res = cfhsi->ops->cfhsi_up(cfhsi->ops);
diff --git a/drivers/net/cris/eth_v10.c b/drivers/net/cris/eth_v10.c
index b6e2bfd7d2d6..8b1a859f5140 100644
--- a/drivers/net/cris/eth_v10.c
+++ b/drivers/net/cris/eth_v10.c
@@ -165,9 +165,16 @@ static unsigned int network_rec_config_shadow = 0;
static unsigned int network_tr_ctrl_shadow = 0;
+/* Timers */
+static void e100_check_speed(struct timer_list *unused);
+static void e100_clear_network_leds(struct timer_list *unused);
+static void e100_check_duplex(struct timer_list *unused);
+static DEFINE_TIMER(speed_timer, e100_check_speed);
+static DEFINE_TIMER(clear_led_timer, e100_clear_network_leds);
+static DEFINE_TIMER(duplex_timer, e100_check_duplex);
+static struct net_device *timer_dev;
+
/* Network speed indication. */
-static DEFINE_TIMER(speed_timer, NULL);
-static DEFINE_TIMER(clear_led_timer, NULL);
static int current_speed; /* Speed read from transceiver */
static int current_speed_selection; /* Speed selected by user */
static unsigned long led_next_time;
@@ -175,7 +182,6 @@ static int led_active;
static int rx_queue_len;
/* Duplex */
-static DEFINE_TIMER(duplex_timer, NULL);
static int full_duplex;
static enum duplex current_duplex;
@@ -200,9 +206,7 @@ static void update_rx_stats(struct net_device_stats *);
static void update_tx_stats(struct net_device_stats *);
static int e100_probe_transceiver(struct net_device* dev);
-static void e100_check_speed(unsigned long priv);
static void e100_set_speed(struct net_device* dev, unsigned long speed);
-static void e100_check_duplex(unsigned long priv);
static void e100_set_duplex(struct net_device* dev, enum duplex);
static void e100_negotiate(struct net_device* dev);
@@ -214,7 +218,6 @@ static void e100_send_mdio_bit(unsigned char bit);
static unsigned char e100_receive_mdio_bit(void);
static void e100_reset_transceiver(struct net_device* net);
-static void e100_clear_network_leds(unsigned long dummy);
static void e100_set_network_leds(int active);
static const struct ethtool_ops e100_ethtool_ops;
@@ -381,17 +384,12 @@ etrax_ethernet_init(void)
current_speed = 10;
current_speed_selection = 0; /* Auto */
speed_timer.expires = jiffies + NET_LINK_UP_CHECK_INTERVAL;
- speed_timer.data = (unsigned long)dev;
- speed_timer.function = e100_check_speed;
-
- clear_led_timer.function = e100_clear_network_leds;
- clear_led_timer.data = (unsigned long)dev;
full_duplex = 0;
current_duplex = autoneg;
duplex_timer.expires = jiffies + NET_DUPLEX_CHECK_INTERVAL;
- duplex_timer.data = (unsigned long)dev;
- duplex_timer.function = e100_check_duplex;
+
+ timer_dev = dev;
/* Initialize mii interface */
np->mii_if.phy_id_mask = 0x1f;
@@ -680,9 +678,9 @@ intel_check_speed(struct net_device* dev)
}
#endif
static void
-e100_check_speed(unsigned long priv)
+e100_check_speed(struct timer_list *unused)
{
- struct net_device* dev = (struct net_device*)priv;
+ struct net_device* dev = timer_dev;
struct net_local *np = netdev_priv(dev);
static int led_initiated = 0;
unsigned long data;
@@ -799,9 +797,9 @@ e100_set_speed(struct net_device* dev, unsigned long speed)
}
static void
-e100_check_duplex(unsigned long priv)
+e100_check_duplex(struct timer_list *unused)
{
- struct net_device *dev = (struct net_device *)priv;
+ struct net_device *dev = timer_dev;
struct net_local *np = netdev_priv(dev);
int old_duplex;
@@ -1669,9 +1667,9 @@ e100_hardware_send_packet(struct net_local *np, char *buf, int length)
}
static void
-e100_clear_network_leds(unsigned long dummy)
+e100_clear_network_leds(struct timer_list *unused)
{
- struct net_device *dev = (struct net_device *)dummy;
+ struct net_device *dev = timer_dev;
struct net_local *np = netdev_priv(dev);
spin_lock(&np->led_lock);
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 93faa1fed6f2..ea01f24f15e7 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -95,7 +95,7 @@ static void bcm_sf2_gphy_enable_set(struct dsa_switch *ds, bool enable)
reg = reg_readl(priv, REG_SPHY_CNTRL);
if (enable) {
reg |= PHY_RESET;
- reg &= ~(EXT_PWR_DOWN | IDDQ_BIAS | CK25_DIS);
+ reg &= ~(EXT_PWR_DOWN | IDDQ_BIAS | IDDQ_GLOBAL_PWR | CK25_DIS);
reg_writel(priv, reg, REG_SPHY_CNTRL);
udelay(21);
reg = reg_readl(priv, REG_SPHY_CNTRL);
diff --git a/drivers/net/dsa/mv88e6xxx/phy.c b/drivers/net/dsa/mv88e6xxx/phy.c
index 436668bd50dc..46af8052e535 100644
--- a/drivers/net/dsa/mv88e6xxx/phy.c
+++ b/drivers/net/dsa/mv88e6xxx/phy.c
@@ -149,9 +149,9 @@ static void mv88e6xxx_phy_ppu_reenable_work(struct work_struct *ugly)
mutex_unlock(&chip->reg_lock);
}
-static void mv88e6xxx_phy_ppu_reenable_timer(unsigned long _ps)
+static void mv88e6xxx_phy_ppu_reenable_timer(struct timer_list *t)
{
- struct mv88e6xxx_chip *chip = (void *)_ps;
+ struct mv88e6xxx_chip *chip = from_timer(chip, t, ppu_timer);
schedule_work(&chip->ppu_work);
}
@@ -193,8 +193,7 @@ static void mv88e6xxx_phy_ppu_state_init(struct mv88e6xxx_chip *chip)
{
mutex_init(&chip->ppu_mutex);
INIT_WORK(&chip->ppu_work, mv88e6xxx_phy_ppu_reenable_work);
- setup_timer(&chip->ppu_timer, mv88e6xxx_phy_ppu_reenable_timer,
- (unsigned long)chip);
+ timer_setup(&chip->ppu_timer, mv88e6xxx_phy_ppu_reenable_timer, 0);
}
static void mv88e6xxx_phy_ppu_state_destroy(struct mv88e6xxx_chip *chip)
diff --git a/drivers/net/eql.c b/drivers/net/eql.c
index fccce4b47778..74263f8efe1a 100644
--- a/drivers/net/eql.c
+++ b/drivers/net/eql.c
@@ -139,9 +139,9 @@ static netdev_tx_t eql_slave_xmit(struct sk_buff *skb, struct net_device *dev);
static void eql_kill_one_slave(slave_queue_t *queue, slave_t *slave);
-static void eql_timer(unsigned long param)
+static void eql_timer(struct timer_list *t)
{
- equalizer_t *eql = (equalizer_t *) param;
+ equalizer_t *eql = from_timer(eql, t, timer);
struct list_head *this, *tmp, *head;
spin_lock(&eql->queue.lock);
@@ -178,7 +178,7 @@ static void __init eql_setup(struct net_device *dev)
{
equalizer_t *eql = netdev_priv(dev);
- setup_timer(&eql->timer, eql_timer, (unsigned long)eql);
+ timer_setup(&eql->timer, eql_timer, 0);
eql->timer.expires = jiffies + EQL_DEFAULT_RESCHED_IVAL;
spin_lock_init(&eql->queue.lock);
diff --git a/drivers/net/ethernet/adi/bfin_mac.c b/drivers/net/ethernet/adi/bfin_mac.c
index 0658cde1586a..7120f2b9c6ef 100644
--- a/drivers/net/ethernet/adi/bfin_mac.c
+++ b/drivers/net/ethernet/adi/bfin_mac.c
@@ -1092,9 +1092,11 @@ static void tx_reclaim_skb(struct bfin_mac_local *lp)
return;
}
-static void tx_reclaim_skb_timeout(unsigned long lp)
+static void tx_reclaim_skb_timeout(struct timer_list *t)
{
- tx_reclaim_skb((struct bfin_mac_local *)lp);
+ struct bfin_mac_local *lp = from_timer(lp, t, tx_reclaim_timer);
+
+ tx_reclaim_skb(lp);
}
static int bfin_mac_hard_start_xmit(struct sk_buff *skb,
@@ -1650,8 +1652,7 @@ static int bfin_mac_probe(struct platform_device *pdev)
ndev->netdev_ops = &bfin_mac_netdev_ops;
ndev->ethtool_ops = &bfin_mac_ethtool_ops;
- setup_timer(&lp->tx_reclaim_timer, tx_reclaim_skb_timeout,
- (unsigned long)lp);
+ timer_setup(&lp->tx_reclaim_timer, tx_reclaim_skb_timeout, 0);
lp->flags = 0;
netif_napi_add(ndev, &lp->napi, bfin_mac_poll, CONFIG_BFIN_RX_DESC_NUM);
diff --git a/drivers/net/ethernet/agere/et131x.c b/drivers/net/ethernet/agere/et131x.c
index 658e92f79d36..48220b6c600d 100644
--- a/drivers/net/ethernet/agere/et131x.c
+++ b/drivers/net/ethernet/agere/et131x.c
@@ -3080,9 +3080,9 @@ err_out:
* The routine called when the error timer expires, to track the number of
* recurring errors.
*/
-static void et131x_error_timer_handler(unsigned long data)
+static void et131x_error_timer_handler(struct timer_list *t)
{
- struct et131x_adapter *adapter = (struct et131x_adapter *)data;
+ struct et131x_adapter *adapter = from_timer(adapter, t, error_timer);
struct phy_device *phydev = adapter->netdev->phydev;
if (et1310_in_phy_coma(adapter)) {
@@ -3624,8 +3624,7 @@ static int et131x_open(struct net_device *netdev)
int result;
/* Start the timer to track NIC errors */
- setup_timer(&adapter->error_timer, et131x_error_timer_handler,
- (unsigned long)adapter);
+ timer_setup(&adapter->error_timer, et131x_error_timer_handler, 0);
adapter->error_timer.expires = jiffies +
msecs_to_jiffies(TX_ERROR_PERIOD);
add_timer(&adapter->error_timer);
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 7451922c209d..97c5a89a9cf7 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -2579,6 +2579,7 @@ static int ena_restore_device(struct ena_adapter *adapter)
bool wd_state;
int rc;
+ set_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
rc = ena_device_init(ena_dev, adapter->pdev, &get_feat_ctx, &wd_state);
if (rc) {
dev_err(&pdev->dev, "Can not initialize device\n");
@@ -2592,6 +2593,11 @@ static int ena_restore_device(struct ena_adapter *adapter)
goto err_device_destroy;
}
+ clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
+ /* Make sure we don't have a race with AENQ Links state handler */
+ if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags))
+ netif_carrier_on(adapter->netdev);
+
rc = ena_enable_msix_and_set_admin_interrupts(adapter,
adapter->num_queues);
if (rc) {
@@ -2618,7 +2624,7 @@ err_device_destroy:
ena_com_admin_destroy(ena_dev);
err:
clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
-
+ clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
dev_err(&pdev->dev,
"Reset attempt failed. Can not reset the device\n");
@@ -2853,9 +2859,9 @@ static void ena_update_host_info(struct ena_admin_host_info *host_info,
(netdev->features & GENMASK_ULL(63, 32)) >> 32;
}
-static void ena_timer_service(unsigned long data)
+static void ena_timer_service(struct timer_list *t)
{
- struct ena_adapter *adapter = (struct ena_adapter *)data;
+ struct ena_adapter *adapter = from_timer(adapter, t, timer_service);
u8 *debug_area = adapter->ena_dev->host_attr.debug_area_virt_addr;
struct ena_admin_host_info *host_info =
adapter->ena_dev->host_attr.host_info;
@@ -3272,8 +3278,7 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ena_update_hints(adapter, &get_feat_ctx.hw_hints);
- setup_timer(&adapter->timer_service, ena_timer_service,
- (unsigned long)adapter);
+ timer_setup(&adapter->timer_service, ena_timer_service, 0);
mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
dev_info(&pdev->dev, "%s found at mem %lx, mac addr %pM Queues %d\n",
@@ -3495,7 +3500,8 @@ static void ena_update_on_link_change(void *adapter_data,
if (status) {
netdev_dbg(adapter->netdev, "%s\n", __func__);
set_bit(ENA_FLAG_LINK_UP, &adapter->flags);
- netif_carrier_on(adapter->netdev);
+ if (!test_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags))
+ netif_carrier_on(adapter->netdev);
} else {
clear_bit(ENA_FLAG_LINK_UP, &adapter->flags);
netif_carrier_off(adapter->netdev);
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h
index ed8bd0a579c4..3bbc003871de 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.h
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h
@@ -272,7 +272,8 @@ enum ena_flags_t {
ENA_FLAG_DEV_UP,
ENA_FLAG_LINK_UP,
ENA_FLAG_MSIX_ENABLED,
- ENA_FLAG_TRIGGER_RESET
+ ENA_FLAG_TRIGGER_RESET,
+ ENA_FLAG_ONGOING_RESET
};
/* adapter specific private data structure */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index 483e97691eea..78dfb2ab78ce 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -163,9 +163,9 @@ static int aq_nic_update_link_status(struct aq_nic_s *self)
return 0;
}
-static void aq_nic_service_timer_cb(unsigned long param)
+static void aq_nic_service_timer_cb(struct timer_list *t)
{
- struct aq_nic_s *self = (struct aq_nic_s *)param;
+ struct aq_nic_s *self = from_timer(self, t, service_timer);
struct net_device *ndev = aq_nic_get_ndev(self);
int err = 0;
unsigned int i = 0U;
@@ -201,9 +201,9 @@ err_exit:
jiffies + AQ_CFG_SERVICE_TIMER_INTERVAL);
}
-static void aq_nic_polling_timer_cb(unsigned long param)
+static void aq_nic_polling_timer_cb(struct timer_list *t)
{
- struct aq_nic_s *self = (struct aq_nic_s *)param;
+ struct aq_nic_s *self = from_timer(self, t, polling_timer);
struct aq_vec_s *aq_vec = NULL;
unsigned int i = 0U;
@@ -440,14 +440,12 @@ int aq_nic_start(struct aq_nic_s *self)
err = aq_nic_update_interrupt_moderation_settings(self);
if (err)
goto err_exit;
- setup_timer(&self->service_timer, &aq_nic_service_timer_cb,
- (unsigned long)self);
+ timer_setup(&self->service_timer, aq_nic_service_timer_cb, 0);
mod_timer(&self->service_timer, jiffies +
AQ_CFG_SERVICE_TIMER_INTERVAL);
if (self->aq_nic_cfg.is_polling) {
- setup_timer(&self->polling_timer, &aq_nic_polling_timer_cb,
- (unsigned long)self);
+ timer_setup(&self->polling_timer, aq_nic_polling_timer_cb, 0);
mod_timer(&self->polling_timer, jiffies +
AQ_CFG_POLLING_TIMER_INTERVAL);
} else {
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index 8c9986f3fc01..94270f654b3b 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -222,9 +222,10 @@ static u32 atl1c_wait_until_idle(struct atl1c_hw *hw, u32 modu_ctrl)
* atl1c_phy_config - Timer Call-back
* @data: pointer to netdev cast into an unsigned long
*/
-static void atl1c_phy_config(unsigned long data)
+static void atl1c_phy_config(struct timer_list *t)
{
- struct atl1c_adapter *adapter = (struct atl1c_adapter *) data;
+ struct atl1c_adapter *adapter = from_timer(adapter, t,
+ phy_config_timer);
struct atl1c_hw *hw = &adapter->hw;
unsigned long flags;
@@ -2613,8 +2614,7 @@ static int atl1c_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
adapter->mii.phy_id_mask = 0x1f;
adapter->mii.reg_num_mask = MDIO_CTRL_REG_MASK;
netif_napi_add(netdev, &adapter->napi, atl1c_clean, 64);
- setup_timer(&adapter->phy_config_timer, atl1c_phy_config,
- (unsigned long)adapter);
+ timer_setup(&adapter->phy_config_timer, atl1c_phy_config, 0);
/* setup the private structure */
err = atl1c_sw_init(adapter);
if (err) {
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
index 4f7e195af0bc..9dc6da039a6d 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -130,9 +130,10 @@ static inline void atl1e_irq_reset(struct atl1e_adapter *adapter)
* atl1e_phy_config - Timer Call-back
* @data: pointer to netdev cast into an unsigned long
*/
-static void atl1e_phy_config(unsigned long data)
+static void atl1e_phy_config(struct timer_list *t)
{
- struct atl1e_adapter *adapter = (struct atl1e_adapter *) data;
+ struct atl1e_adapter *adapter = from_timer(adapter, t,
+ phy_config_timer);
struct atl1e_hw *hw = &adapter->hw;
unsigned long flags;
@@ -2361,8 +2362,7 @@ static int atl1e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netif_napi_add(netdev, &adapter->napi, atl1e_clean, 64);
- setup_timer(&adapter->phy_config_timer, atl1e_phy_config,
- (unsigned long)adapter);
+ timer_setup(&adapter->phy_config_timer, atl1e_phy_config, 0);
/* get user settings */
atl1e_check_options(adapter);
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c
index 83d2db2abb45..b81fbf119bce 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.c
+++ b/drivers/net/ethernet/atheros/atlx/atl1.c
@@ -2575,9 +2575,10 @@ static irqreturn_t atl1_intr(int irq, void *data)
* atl1_phy_config - Timer Call-back
* @data: pointer to netdev cast into an unsigned long
*/
-static void atl1_phy_config(unsigned long data)
+static void atl1_phy_config(struct timer_list *t)
{
- struct atl1_adapter *adapter = (struct atl1_adapter *)data;
+ struct atl1_adapter *adapter = from_timer(adapter, t,
+ phy_config_timer);
struct atl1_hw *hw = &adapter->hw;
unsigned long flags;
@@ -3071,8 +3072,7 @@ static int atl1_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* assume we have no link for now */
netif_carrier_off(netdev);
- setup_timer(&adapter->phy_config_timer, atl1_phy_config,
- (unsigned long)adapter);
+ timer_setup(&adapter->phy_config_timer, atl1_phy_config, 0);
adapter->phy_timer_pending = false;
INIT_WORK(&adapter->reset_dev_task, atl1_reset_dev_task);
diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
index 77a1c03255de..db4bcc51023a 100644
--- a/drivers/net/ethernet/atheros/atlx/atl2.c
+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
@@ -1028,9 +1028,9 @@ static void atl2_tx_timeout(struct net_device *netdev)
* atl2_watchdog - Timer Call-back
* @data: pointer to netdev cast into an unsigned long
*/
-static void atl2_watchdog(unsigned long data)
+static void atl2_watchdog(struct timer_list *t)
{
- struct atl2_adapter *adapter = (struct atl2_adapter *) data;
+ struct atl2_adapter *adapter = from_timer(adapter, t, watchdog_timer);
if (!test_bit(__ATL2_DOWN, &adapter->flags)) {
u32 drop_rxd, drop_rxs;
@@ -1053,9 +1053,10 @@ static void atl2_watchdog(unsigned long data)
* atl2_phy_config - Timer Call-back
* @data: pointer to netdev cast into an unsigned long
*/
-static void atl2_phy_config(unsigned long data)
+static void atl2_phy_config(struct timer_list *t)
{
- struct atl2_adapter *adapter = (struct atl2_adapter *) data;
+ struct atl2_adapter *adapter = from_timer(adapter, t,
+ phy_config_timer);
struct atl2_hw *hw = &adapter->hw;
unsigned long flags;
@@ -1434,11 +1435,9 @@ static int atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
atl2_check_options(adapter);
- setup_timer(&adapter->watchdog_timer, atl2_watchdog,
- (unsigned long)adapter);
+ timer_setup(&adapter->watchdog_timer, atl2_watchdog, 0);
- setup_timer(&adapter->phy_config_timer, atl2_phy_config,
- (unsigned long)adapter);
+ timer_setup(&adapter->phy_config_timer, atl2_phy_config, 0);
INIT_WORK(&adapter->reset_task, atl2_reset_task);
INIT_WORK(&adapter->link_chg_task, atl2_link_chg_task);
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index 42e44fc03a18..e445ab724827 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -599,9 +599,9 @@ static void b44_check_phy(struct b44 *bp)
}
}
-static void b44_timer(unsigned long __opaque)
+static void b44_timer(struct timer_list *t)
{
- struct b44 *bp = (struct b44 *) __opaque;
+ struct b44 *bp = from_timer(bp, t, timer);
spin_lock_irq(&bp->lock);
@@ -1474,7 +1474,7 @@ static int b44_open(struct net_device *dev)
goto out;
}
- setup_timer(&bp->timer, b44_timer, (unsigned long)bp);
+ timer_setup(&bp->timer, b44_timer, 0);
bp->timer.expires = jiffies + HZ;
add_timer(&bp->timer);
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index b3055a76dfbf..7919f6112ecf 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -6183,9 +6183,9 @@ bnx2_5708_serdes_timer(struct bnx2 *bp)
}
static void
-bnx2_timer(unsigned long data)
+bnx2_timer(struct timer_list *t)
{
- struct bnx2 *bp = (struct bnx2 *) data;
+ struct bnx2 *bp = from_timer(bp, t, timer);
if (!netif_running(bp->dev))
return;
@@ -8462,7 +8462,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
bnx2_set_default_link(bp);
bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
- setup_timer(&bp->timer, bnx2_timer, (unsigned long)bp);
+ timer_setup(&bp->timer, bnx2_timer, 0);
bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL);
#ifdef BCM_CNIC
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index be9fd7d184d0..91e2a7560b48 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -5761,9 +5761,9 @@ void bnx2x_drv_pulse(struct bnx2x *bp)
bp->fw_drv_pulse_wr_seq);
}
-static void bnx2x_timer(unsigned long data)
+static void bnx2x_timer(struct timer_list *t)
{
- struct bnx2x *bp = (struct bnx2x *) data;
+ struct bnx2x *bp = from_timer(bp, t, timer);
if (!netif_running(bp->dev))
return;
@@ -12421,7 +12421,7 @@ static int bnx2x_init_bp(struct bnx2x *bp)
bp->current_interval = CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ;
- setup_timer(&bp->timer, bnx2x_timer, (unsigned long)bp);
+ timer_setup(&bp->timer, bnx2x_timer, 0);
bp->timer.expires = jiffies + bp->current_interval;
if (SHMEM2_HAS(bp, dcbx_lldp_params_offset) &&
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 33c49ad697e4..c5c38d4b7d1c 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -6962,9 +6962,9 @@ static void bnxt_poll_controller(struct net_device *dev)
}
#endif
-static void bnxt_timer(unsigned long data)
+static void bnxt_timer(struct timer_list *t)
{
- struct bnxt *bp = (struct bnxt *)data;
+ struct bnxt *bp = from_timer(bp, t, timer);
struct net_device *dev = bp->dev;
if (!netif_running(dev))
@@ -7236,7 +7236,7 @@ static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
bnxt_init_dflt_coal(bp);
- setup_timer(&bp->timer, bnxt_timer, (unsigned long)bp);
+ timer_setup(&bp->timer, bnxt_timer, 0);
bp->current_interval = BNXT_TIMER_INTERVAL;
clear_bit(BNXT_STATE_OPEN, &bp->state);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 7ce1d4b7e67d..b13ce5ebde8d 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -2136,8 +2136,8 @@ static int bnxt_get_module_eeprom(struct net_device *dev,
/* Read A2 portion of the EEPROM */
if (length) {
start -= ETH_MODULE_SFF_8436_LEN;
- bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A2, 1, start,
- length, data);
+ rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A2, 1,
+ start, length, data);
}
return rc;
}
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index d8d5f207c759..de51c2177d03 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -10931,9 +10931,9 @@ static void tg3_chk_missed_msi(struct tg3 *tp)
}
}
-static void tg3_timer(unsigned long __opaque)
+static void tg3_timer(struct timer_list *t)
{
- struct tg3 *tp = (struct tg3 *) __opaque;
+ struct tg3 *tp = from_timer(tp, t, timer);
spin_lock(&tp->lock);
@@ -11087,7 +11087,7 @@ static void tg3_timer_init(struct tg3 *tp)
tp->asf_multiplier = (HZ / tp->timer_offset) *
TG3_FW_UPDATE_FREQ_SEC;
- setup_timer(&tp->timer, tg3_timer, (unsigned long)tp);
+ timer_setup(&tp->timer, tg3_timer, 0);
}
static void tg3_timer_start(struct tg3 *tp)
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index d4496e9afcdf..8b2c31e2a2b0 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -1355,7 +1355,6 @@ nicvf_sq_add_hdr_subdesc(struct nicvf *nic, struct snd_queue *sq, int qentry,
/* Offload checksum calculation to HW */
if (skb->ip_summed == CHECKSUM_PARTIAL) {
- hdr->csum_l3 = 1; /* Enable IP csum calculation */
hdr->l3_offset = skb_network_offset(skb);
hdr->l4_offset = skb_transport_offset(skb);
diff --git a/drivers/net/ethernet/cisco/enic/enic_clsf.c b/drivers/net/ethernet/cisco/enic/enic_clsf.c
index 8dc21c9f9716..973c1fb70d09 100644
--- a/drivers/net/ethernet/cisco/enic/enic_clsf.c
+++ b/drivers/net/ethernet/cisco/enic/enic_clsf.c
@@ -123,9 +123,9 @@ struct enic_rfs_fltr_node *htbl_fltr_search(struct enic *enic, u16 fltr_id)
}
#ifdef CONFIG_RFS_ACCEL
-void enic_flow_may_expire(unsigned long data)
+void enic_flow_may_expire(struct timer_list *t)
{
- struct enic *enic = (struct enic *)data;
+ struct enic *enic = from_timer(enic, t, rfs_h.rfs_may_expire);
bool res;
int j;
diff --git a/drivers/net/ethernet/cisco/enic/enic_clsf.h b/drivers/net/ethernet/cisco/enic/enic_clsf.h
index 0ae83e091a62..8c4ce50da6e1 100644
--- a/drivers/net/ethernet/cisco/enic/enic_clsf.h
+++ b/drivers/net/ethernet/cisco/enic/enic_clsf.h
@@ -16,12 +16,11 @@ struct enic_rfs_fltr_node *htbl_fltr_search(struct enic *enic, u16 fltr_id);
#ifdef CONFIG_RFS_ACCEL
int enic_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
u16 rxq_index, u32 flow_id);
-void enic_flow_may_expire(unsigned long data);
+void enic_flow_may_expire(struct timer_list *t);
static inline void enic_rfs_timer_start(struct enic *enic)
{
- setup_timer(&enic->rfs_h.rfs_may_expire, enic_flow_may_expire,
- (unsigned long)enic);
+ timer_setup(&enic->rfs_h.rfs_may_expire, enic_flow_may_expire, 0);
mod_timer(&enic->rfs_h.rfs_may_expire, jiffies + HZ/4);
}
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 4a11baffe02d..e130fb757e7b 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -1676,9 +1676,9 @@ static int enic_poll_msix_rq(struct napi_struct *napi, int budget)
return work_done;
}
-static void enic_notify_timer(unsigned long data)
+static void enic_notify_timer(struct timer_list *t)
{
- struct enic *enic = (struct enic *)data;
+ struct enic *enic = from_timer(enic, t, notify_timer);
enic_notify_check(enic);
@@ -2846,8 +2846,7 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Setup notification timer, HW reset task, and wq locks
*/
- setup_timer(&enic->notify_timer, enic_notify_timer,
- (unsigned long)enic);
+ timer_setup(&enic->notify_timer, enic_notify_timer, 0);
enic_set_rx_coal_setting(enic);
INIT_WORK(&enic->reset, enic_reset);
diff --git a/drivers/net/ethernet/intel/e1000/e1000_hw.c b/drivers/net/ethernet/intel/e1000/e1000_hw.c
index 8172cf08cc33..3bac9df1c099 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_hw.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_hw.c
@@ -4307,8 +4307,10 @@ static void e1000_init_rx_addrs(struct e1000_hw *hw)
rar_num = E1000_RAR_ENTRIES;
- /* Zero out the other 15 receive addresses. */
- e_dbg("Clearing RAR[1-15]\n");
+ /* Zero out the following 14 receive addresses. RAR[15] is for
+ * manageability
+ */
+ e_dbg("Clearing RAR[1-14]\n");
for (i = 1; i < rar_num; i++) {
E1000_WRITE_REG_ARRAY(hw, RA, (i << 1), 0);
E1000_WRITE_FLUSH();
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h
index 67163ca898ba..00a36df02a3f 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.h
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h
@@ -113,7 +113,8 @@
#define NVM_SIZE_MULTIPLIER 4096 /*multiplier for NVMS field */
#define E1000_FLASH_BASE_ADDR 0xE000 /*offset of NVM access regs */
#define E1000_CTRL_EXT_NVMVS 0x3 /*NVM valid sector */
-#define E1000_TARC0_CB_MULTIQ_3_REQ (1 << 28 | 1 << 29)
+#define E1000_TARC0_CB_MULTIQ_3_REQ 0x30000000
+#define E1000_TARC0_CB_MULTIQ_2_REQ 0x20000000
#define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL
#define E1000_ICH_RAR_ENTRIES 7
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index f2f49239b015..9f18d39bdc8f 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -3034,9 +3034,12 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
ew32(IOSFPC, reg_val);
reg_val = er32(TARC(0));
- /* SPT and KBL Si errata workaround to avoid Tx hang */
- reg_val &= ~BIT(28);
- reg_val |= BIT(29);
+ /* SPT and KBL Si errata workaround to avoid Tx hang.
+ * Dropping the number of outstanding requests from
+ * 3 to 2 in order to avoid a buffer overrun.
+ */
+ reg_val &= ~E1000_TARC0_CB_MULTIQ_3_REQ;
+ reg_val |= E1000_TARC0_CB_MULTIQ_2_REQ;
ew32(TARC(0), reg_val);
}
}
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index dbd69310f263..538b42d5c187 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -1231,7 +1231,7 @@ static bool fm10k_clean_tx_irq(struct fm10k_q_vector *q_vector,
break;
/* prevent any other reads prior to eop_desc */
- read_barrier_depends();
+ smp_rmb();
/* if DD is not set pending work has not been completed */
if (!(eop_desc->flags & FM10K_TXD_FLAG_DONE))
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 5829715fa342..e019baa905c5 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -90,7 +90,6 @@
#define I40E_AQ_LEN 256
#define I40E_AQ_WORK_LIMIT 66 /* max number of VFs + a little */
#define I40E_MAX_USER_PRIORITY 8
-#define I40E_MAX_QUEUES_PER_CH 64
#define I40E_DEFAULT_TRAFFIC_CLASS BIT(0)
#define I40E_DEFAULT_MSG_ENABLE 4
#define I40E_QUEUE_WAIT_RETRY_LIMIT 10
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index 9dcb2a961197..9af74253c3f7 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -613,6 +613,12 @@ i40e_status i40e_init_adminq(struct i40e_hw *hw)
hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE;
}
+ /* Newer versions of firmware require lock when reading the NVM */
+ if (hw->aq.api_maj_ver > 1 ||
+ (hw->aq.api_maj_ver == 1 &&
+ hw->aq.api_min_ver >= 5))
+ hw->flags |= I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK;
+
/* The ability to RX (not drop) 802.1ad frames was added in API 1.7 */
if (hw->aq.api_maj_ver > 1 ||
(hw->aq.api_maj_ver == 1 &&
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index 0203665cb53c..095965f268bd 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -948,7 +948,8 @@ i40e_status i40e_init_shared_code(struct i40e_hw *hw)
hw->pf_id = (u8)(func_rid & 0x7);
if (hw->mac.type == I40E_MAC_X722)
- hw->flags |= I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE;
+ hw->flags |= I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE |
+ I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK;
status = i40e_init_nvm(hw);
return status;
@@ -1268,6 +1269,7 @@ i40e_status i40e_pf_reset(struct i40e_hw *hw)
* we don't need to do the PF Reset
*/
if (!cnt) {
+ u32 reg2 = 0;
if (hw->revision_id == 0)
cnt = I40E_PF_RESET_WAIT_COUNT_A0;
else
@@ -1279,6 +1281,12 @@ i40e_status i40e_pf_reset(struct i40e_hw *hw)
reg = rd32(hw, I40E_PFGEN_CTRL);
if (!(reg & I40E_PFGEN_CTRL_PFSWR_MASK))
break;
+ reg2 = rd32(hw, I40E_GLGEN_RSTAT);
+ if (reg2 & I40E_GLGEN_RSTAT_DEVSTATE_MASK) {
+ hw_dbg(hw, "Core reset upcoming. Skipping PF reset request.\n");
+ hw_dbg(hw, "I40E_GLGEN_RSTAT = 0x%x\n", reg2);
+ return I40E_ERR_NOT_READY;
+ }
usleep_range(1000, 2000);
}
if (reg & I40E_PFGEN_CTRL_PFSWR_MASK) {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 4a964d6e4a9e..321d8be80871 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -2167,6 +2167,73 @@ i40e_aqc_broadcast_filter(struct i40e_vsi *vsi, const char *vsi_name,
}
/**
+ * i40e_set_promiscuous - set promiscuous mode
+ * @pf: board private structure
+ * @promisc: promisc on or off
+ *
+ * There are different ways of setting promiscuous mode on a PF depending on
+ * what state/environment we're in. This identifies and sets it appropriately.
+ * Returns 0 on success.
+ **/
+static int i40e_set_promiscuous(struct i40e_pf *pf, bool promisc)
+{
+ struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+ struct i40e_hw *hw = &pf->hw;
+ i40e_status aq_ret;
+
+ if (vsi->type == I40E_VSI_MAIN &&
+ pf->lan_veb != I40E_NO_VEB &&
+ !(pf->flags & I40E_FLAG_MFP_ENABLED)) {
+ /* set defport ON for Main VSI instead of true promisc
+ * this way we will get all unicast/multicast and VLAN
+ * promisc behavior but will not get VF or VMDq traffic
+ * replicated on the Main VSI.
+ */
+ if (promisc)
+ aq_ret = i40e_aq_set_default_vsi(hw,
+ vsi->seid,
+ NULL);
+ else
+ aq_ret = i40e_aq_clear_default_vsi(hw,
+ vsi->seid,
+ NULL);
+ if (aq_ret) {
+ dev_info(&pf->pdev->dev,
+ "Set default VSI failed, err %s, aq_err %s\n",
+ i40e_stat_str(hw, aq_ret),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ }
+ } else {
+ aq_ret = i40e_aq_set_vsi_unicast_promiscuous(
+ hw,
+ vsi->seid,
+ promisc, NULL,
+ true);
+ if (aq_ret) {
+ dev_info(&pf->pdev->dev,
+ "set unicast promisc failed, err %s, aq_err %s\n",
+ i40e_stat_str(hw, aq_ret),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ }
+ aq_ret = i40e_aq_set_vsi_multicast_promiscuous(
+ hw,
+ vsi->seid,
+ promisc, NULL);
+ if (aq_ret) {
+ dev_info(&pf->pdev->dev,
+ "set multicast promisc failed, err %s, aq_err %s\n",
+ i40e_stat_str(hw, aq_ret),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ }
+ }
+
+ if (!aq_ret)
+ pf->cur_promisc = promisc;
+
+ return aq_ret;
+}
+
+/**
* i40e_sync_vsi_filters - Update the VSI filter list to the HW
* @vsi: ptr to the VSI
*
@@ -2467,81 +2534,16 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) ||
test_bit(__I40E_VSI_OVERFLOW_PROMISC,
vsi->state));
- if ((vsi->type == I40E_VSI_MAIN) &&
- (pf->lan_veb != I40E_NO_VEB) &&
- !(pf->flags & I40E_FLAG_MFP_ENABLED)) {
- /* set defport ON for Main VSI instead of true promisc
- * this way we will get all unicast/multicast and VLAN
- * promisc behavior but will not get VF or VMDq traffic
- * replicated on the Main VSI.
- */
- if (pf->cur_promisc != cur_promisc) {
- pf->cur_promisc = cur_promisc;
- if (cur_promisc)
- aq_ret =
- i40e_aq_set_default_vsi(hw,
- vsi->seid,
- NULL);
- else
- aq_ret =
- i40e_aq_clear_default_vsi(hw,
- vsi->seid,
- NULL);
- if (aq_ret) {
- retval = i40e_aq_rc_to_posix(aq_ret,
- hw->aq.asq_last_status);
- dev_info(&pf->pdev->dev,
- "Set default VSI failed on %s, err %s, aq_err %s\n",
- vsi_name,
- i40e_stat_str(hw, aq_ret),
- i40e_aq_str(hw,
- hw->aq.asq_last_status));
- }
- }
- } else {
- aq_ret = i40e_aq_set_vsi_unicast_promiscuous(
- hw,
- vsi->seid,
- cur_promisc, NULL,
- true);
- if (aq_ret) {
- retval =
- i40e_aq_rc_to_posix(aq_ret,
- hw->aq.asq_last_status);
- dev_info(&pf->pdev->dev,
- "set unicast promisc failed on %s, err %s, aq_err %s\n",
- vsi_name,
- i40e_stat_str(hw, aq_ret),
- i40e_aq_str(hw,
- hw->aq.asq_last_status));
- }
- aq_ret = i40e_aq_set_vsi_multicast_promiscuous(
- hw,
- vsi->seid,
- cur_promisc, NULL);
- if (aq_ret) {
- retval =
- i40e_aq_rc_to_posix(aq_ret,
- hw->aq.asq_last_status);
- dev_info(&pf->pdev->dev,
- "set multicast promisc failed on %s, err %s, aq_err %s\n",
- vsi_name,
- i40e_stat_str(hw, aq_ret),
- i40e_aq_str(hw,
- hw->aq.asq_last_status));
- }
- }
- aq_ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw,
- vsi->seid,
- cur_promisc, NULL);
+ aq_ret = i40e_set_promiscuous(pf, cur_promisc);
if (aq_ret) {
retval = i40e_aq_rc_to_posix(aq_ret,
- pf->hw.aq.asq_last_status);
+ hw->aq.asq_last_status);
dev_info(&pf->pdev->dev,
- "set brdcast promisc failed, err %s, aq_err %s\n",
- i40e_stat_str(hw, aq_ret),
- i40e_aq_str(hw,
- hw->aq.asq_last_status));
+ "Setting promiscuous %s failed on %s, err %s aq_err %s\n",
+ cur_promisc ? "on" : "off",
+ vsi_name,
+ i40e_stat_str(hw, aq_ret),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
}
}
out:
@@ -3964,7 +3966,7 @@ static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget)
break;
/* prevent any other reads prior to eop_desc */
- read_barrier_depends();
+ smp_rmb();
/* if the descriptor isn't done, no work yet to do */
if (!(eop_desc->cmd_type_offset_bsz &
@@ -5629,14 +5631,6 @@ static int i40e_validate_num_queues(struct i40e_pf *pf, int num_queues,
return -EINVAL;
*reconfig_rss = false;
-
- if (num_queues > I40E_MAX_QUEUES_PER_CH) {
- dev_err(&pf->pdev->dev,
- "Failed to create VMDq VSI. User requested num_queues (%d) > I40E_MAX_QUEUES_PER_VSI (%u)\n",
- num_queues, I40E_MAX_QUEUES_PER_CH);
- return -EINVAL;
- }
-
if (vsi->current_rss_size) {
if (num_queues > vsi->current_rss_size) {
dev_dbg(&pf->pdev->dev,
@@ -7407,7 +7401,6 @@ static int i40e_configure_clsflower(struct i40e_vsi *vsi,
dev_err(&pf->pdev->dev,
"Failed to add cloud filter, err %s\n",
i40e_stat_str(&pf->hw, err));
- err = i40e_aq_rc_to_posix(err, pf->hw.aq.asq_last_status);
goto err;
}
@@ -9429,6 +9422,15 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
if (!lock_acquired)
rtnl_unlock();
+ /* Restore promiscuous settings */
+ ret = i40e_set_promiscuous(pf, pf->cur_promisc);
+ if (ret)
+ dev_warn(&pf->pdev->dev,
+ "Failed to restore promiscuous setting: %s, err %s aq_err %s\n",
+ pf->cur_promisc ? "on" : "off",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+
i40e_reset_all_vfs(pf, true);
/* tell the firmware that we're starting */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
index 0ccab0a5d717..7689c2ee0d46 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
@@ -328,15 +328,17 @@ static i40e_status __i40e_read_nvm_word(struct i40e_hw *hw,
i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
u16 *data)
{
- i40e_status ret_code;
+ i40e_status ret_code = 0;
- ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+ if (hw->flags & I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK)
+ ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
if (ret_code)
return ret_code;
ret_code = __i40e_read_nvm_word(hw, offset, data);
- i40e_release_nvm(hw);
+ if (hw->flags & I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK)
+ i40e_release_nvm(hw);
return ret_code;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index d6d352a6e6ea..4566d66ffc7c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -759,7 +759,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
break;
/* prevent any other reads prior to eop_desc */
- read_barrier_depends();
+ smp_rmb();
i40e_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf);
/* we have caught up to head, no work left to do */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index 00d4833e9925..0e8568719b4e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -629,6 +629,7 @@ struct i40e_hw {
#define I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE BIT_ULL(0)
#define I40E_HW_FLAG_802_1AD_CAPABLE BIT_ULL(1)
#define I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE BIT_ULL(2)
+#define I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK BIT_ULL(3)
u64 flags;
/* Used in set switch config AQ command */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index f8a794b72462..36cb8e068e85 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -2086,7 +2086,7 @@ static int i40e_vc_request_queues_msg(struct i40e_vf *vf, u8 *msg, int msglen)
}
return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES, 0,
- (u8 *)vfres, sizeof(vfres));
+ (u8 *)vfres, sizeof(*vfres));
}
/**
@@ -2218,18 +2218,19 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
struct i40e_mac_filter *f;
f = i40e_find_mac(vsi, al->list[i].addr);
- if (!f)
+ if (!f) {
f = i40e_add_mac_filter(vsi, al->list[i].addr);
- if (!f) {
- dev_err(&pf->pdev->dev,
- "Unable to add MAC filter %pM for VF %d\n",
- al->list[i].addr, vf->vf_id);
- ret = I40E_ERR_PARAM;
- spin_unlock_bh(&vsi->mac_filter_hash_lock);
- goto error_param;
- } else {
- vf->num_mac++;
+ if (!f) {
+ dev_err(&pf->pdev->dev,
+ "Unable to add MAC filter %pM for VF %d\n",
+ al->list[i].addr, vf->vf_id);
+ ret = I40E_ERR_PARAM;
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
+ goto error_param;
+ } else {
+ vf->num_mac++;
+ }
}
}
spin_unlock_bh(&vsi->mac_filter_hash_lock);
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index fe817e2b6fef..50864f99446d 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -179,7 +179,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
break;
/* prevent any other reads prior to eop_desc */
- read_barrier_depends();
+ smp_rmb();
i40e_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf);
/* if the descriptor isn't done, no work yet to do */
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_client.c b/drivers/net/ethernet/intel/i40evf/i40evf_client.c
index d8131139565e..da60ce12b33d 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_client.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_client.c
@@ -26,6 +26,26 @@ static struct i40e_ops i40evf_lan_ops = {
};
/**
+ * i40evf_client_get_params - retrieve relevant client parameters
+ * @vsi: VSI with parameters
+ * @params: client param struct
+ **/
+static
+void i40evf_client_get_params(struct i40e_vsi *vsi, struct i40e_params *params)
+{
+ int i;
+
+ memset(params, 0, sizeof(struct i40e_params));
+ params->mtu = vsi->netdev->mtu;
+ params->link_up = vsi->back->link_up;
+
+ for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
+ params->qos.prio_qos[i].tc = 0;
+ params->qos.prio_qos[i].qs_handle = vsi->qs_handle;
+ }
+}
+
+/**
* i40evf_notify_client_message - call the client message receive callback
* @vsi: the VSI associated with this client
* @msg: message buffer
@@ -66,10 +86,6 @@ void i40evf_notify_client_l2_params(struct i40e_vsi *vsi)
return;
cinst = vsi->back->cinst;
- memset(&params, 0, sizeof(params));
- params.mtu = vsi->netdev->mtu;
- params.link_up = vsi->back->link_up;
- params.qos.prio_qos[0].qs_handle = vsi->qs_handle;
if (!cinst || !cinst->client || !cinst->client->ops ||
!cinst->client->ops->l2_param_change) {
@@ -77,6 +93,8 @@ void i40evf_notify_client_l2_params(struct i40e_vsi *vsi)
"Cannot locate client instance l2_param_change function\n");
return;
}
+ i40evf_client_get_params(vsi, &params);
+ cinst->lan_info.params = params;
cinst->client->ops->l2_param_change(&cinst->lan_info, cinst->client,
&params);
}
@@ -166,9 +184,9 @@ static struct i40e_client_instance *
i40evf_client_add_instance(struct i40evf_adapter *adapter)
{
struct i40e_client_instance *cinst = NULL;
- struct netdev_hw_addr *mac = NULL;
struct i40e_vsi *vsi = &adapter->vsi;
- int i;
+ struct netdev_hw_addr *mac = NULL;
+ struct i40e_params params;
if (!vf_registered_client)
goto out;
@@ -192,18 +210,14 @@ i40evf_client_add_instance(struct i40evf_adapter *adapter)
cinst->lan_info.version.major = I40EVF_CLIENT_VERSION_MAJOR;
cinst->lan_info.version.minor = I40EVF_CLIENT_VERSION_MINOR;
cinst->lan_info.version.build = I40EVF_CLIENT_VERSION_BUILD;
+ i40evf_client_get_params(vsi, &params);
+ cinst->lan_info.params = params;
set_bit(__I40E_CLIENT_INSTANCE_NONE, &cinst->state);
cinst->lan_info.msix_count = adapter->num_iwarp_msix;
cinst->lan_info.msix_entries =
&adapter->msix_entries[adapter->iwarp_base_vector];
- for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
- cinst->lan_info.params.qos.prio_qos[i].tc = 0;
- cinst->lan_info.params.qos.prio_qos[i].qs_handle =
- vsi->qs_handle;
- }
-
mac = list_first_entry(&cinst->lan_info.netdev->dev_addrs.list,
struct netdev_hw_addr, list);
if (mac)
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
index ca2ebdbd24d7..7b2a4eba92e2 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
@@ -2110,6 +2110,11 @@ static void i40evf_client_task(struct work_struct *work)
adapter->flags &= ~I40EVF_FLAG_SERVICE_CLIENT_REQUESTED;
goto out;
}
+ if (adapter->flags & I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS) {
+ i40evf_notify_client_l2_params(&adapter->vsi);
+ adapter->flags &= ~I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS;
+ goto out;
+ }
if (adapter->flags & I40EVF_FLAG_CLIENT_NEEDS_CLOSE) {
i40evf_notify_client_close(&adapter->vsi, false);
adapter->flags &= ~I40EVF_FLAG_CLIENT_NEEDS_CLOSE;
@@ -2118,11 +2123,6 @@ static void i40evf_client_task(struct work_struct *work)
if (adapter->flags & I40EVF_FLAG_CLIENT_NEEDS_OPEN) {
i40evf_notify_client_open(&adapter->vsi);
adapter->flags &= ~I40EVF_FLAG_CLIENT_NEEDS_OPEN;
- goto out;
- }
- if (adapter->flags & I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS) {
- i40evf_notify_client_l2_params(&adapter->vsi);
- adapter->flags &= ~I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS;
}
out:
clear_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section);
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index e94d3c256667..c208753ff5b7 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -7317,7 +7317,7 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget)
break;
/* prevent any other reads prior to eop_desc */
- read_barrier_depends();
+ smp_rmb();
/* if DD is not set pending work has not been completed */
if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)))
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index 713e8df23744..4214c1519a87 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -810,7 +810,7 @@ static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring)
break;
/* prevent any other reads prior to eop_desc */
- read_barrier_depends();
+ smp_rmb();
/* if DD is not set pending work has not been completed */
if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)))
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index ca06c3cc2ca8..62a18914f00f 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -1192,7 +1192,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
break;
/* prevent any other reads prior to eop_desc */
- read_barrier_depends();
+ smp_rmb();
/* if DD is not set pending work has not been completed */
if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index feed11bc9ddf..1f4a69134ade 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -326,7 +326,7 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
break;
/* prevent any other reads prior to eop_desc */
- read_barrier_depends();
+ smp_rmb();
/* if DD is not set pending work has not been completed */
if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 81c1fac00d33..62f204f32316 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -1346,9 +1346,9 @@ static void mib_counters_update(struct mv643xx_eth_private *mp)
spin_unlock_bh(&mp->mib_counters_lock);
}
-static void mib_counters_timer_wrapper(unsigned long _mp)
+static void mib_counters_timer_wrapper(struct timer_list *t)
{
- struct mv643xx_eth_private *mp = (void *)_mp;
+ struct mv643xx_eth_private *mp = from_timer(mp, t, mib_counters_timer);
mib_counters_update(mp);
mod_timer(&mp->mib_counters_timer, jiffies + 30 * HZ);
}
@@ -2321,9 +2321,9 @@ static int mv643xx_eth_poll(struct napi_struct *napi, int budget)
return work_done;
}
-static inline void oom_timer_wrapper(unsigned long data)
+static inline void oom_timer_wrapper(struct timer_list *t)
{
- struct mv643xx_eth_private *mp = (void *)data;
+ struct mv643xx_eth_private *mp = from_timer(mp, t, rx_oom);
napi_schedule(&mp->napi);
}
@@ -3178,8 +3178,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
mib_counters_clear(mp);
- setup_timer(&mp->mib_counters_timer, mib_counters_timer_wrapper,
- (unsigned long)mp);
+ timer_setup(&mp->mib_counters_timer, mib_counters_timer_wrapper, 0);
mp->mib_counters_timer.expires = jiffies + 30 * HZ;
spin_lock_init(&mp->mib_counters_lock);
@@ -3188,7 +3187,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
netif_napi_add(dev, &mp->napi, mv643xx_eth_poll, NAPI_POLL_WEIGHT);
- setup_timer(&mp->rx_oom, oom_timer_wrapper, (unsigned long)mp);
+ timer_setup(&mp->rx_oom, oom_timer_wrapper, 0);
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
index 6c20e811f973..d83a78be98a2 100644
--- a/drivers/net/ethernet/marvell/mvpp2.c
+++ b/drivers/net/ethernet/marvell/mvpp2.c
@@ -4629,11 +4629,6 @@ static void mvpp2_port_mii_gmac_configure_mode(struct mvpp2_port *port)
MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
val &= ~MVPP22_CTRL4_EXT_PIN_GMII_SEL;
writel(val, port->base + MVPP22_GMAC_CTRL_4_REG);
-
- val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
- val |= MVPP2_GMAC_DISABLE_PADDING;
- val &= ~MVPP2_GMAC_FLOW_CTRL_MASK;
- writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
} else if (phy_interface_mode_is_rgmii(port->phy_interface)) {
val = readl(port->base + MVPP22_GMAC_CTRL_4_REG);
val |= MVPP22_CTRL4_EXT_PIN_GMII_SEL |
@@ -4641,10 +4636,6 @@ static void mvpp2_port_mii_gmac_configure_mode(struct mvpp2_port *port)
MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
val &= ~MVPP22_CTRL4_DP_CLK_SEL;
writel(val, port->base + MVPP22_GMAC_CTRL_4_REG);
-
- val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
- val &= ~MVPP2_GMAC_DISABLE_PADDING;
- writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
}
/* The port is connected to a copper PHY */
@@ -5805,7 +5796,7 @@ static int mvpp2_txq_init(struct mvpp2_port *port,
sizeof(*txq_pcpu->buffs),
GFP_KERNEL);
if (!txq_pcpu->buffs)
- goto cleanup;
+ return -ENOMEM;
txq_pcpu->count = 0;
txq_pcpu->reserved_num = 0;
@@ -5821,26 +5812,10 @@ static int mvpp2_txq_init(struct mvpp2_port *port,
&txq_pcpu->tso_headers_dma,
GFP_KERNEL);
if (!txq_pcpu->tso_headers)
- goto cleanup;
+ return -ENOMEM;
}
return 0;
-cleanup:
- for_each_present_cpu(cpu) {
- txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
- kfree(txq_pcpu->buffs);
-
- dma_free_coherent(port->dev->dev.parent,
- txq_pcpu->size * TSO_HEADER_SIZE,
- txq_pcpu->tso_headers,
- txq_pcpu->tso_headers_dma);
- }
-
- dma_free_coherent(port->dev->dev.parent,
- txq->size * MVPP2_DESC_ALIGNED_SIZE,
- txq->descs, txq->descs_dma);
-
- return -ENOMEM;
}
/* Free allocated TXQ resources */
@@ -6867,6 +6842,12 @@ static int mvpp2_check_ringparam_valid(struct net_device *dev,
else if (!IS_ALIGNED(ring->tx_pending, 32))
new_tx_pending = ALIGN(ring->tx_pending, 32);
+ /* The Tx ring size cannot be smaller than the minimum number of
+ * descriptors needed for TSO.
+ */
+ if (new_tx_pending < MVPP2_MAX_SKB_DESCS)
+ new_tx_pending = ALIGN(MVPP2_MAX_SKB_DESCS, 32);
+
if (ring->rx_pending != new_rx_pending) {
netdev_info(dev, "illegal Rx ring size value %d, round to %d\n",
ring->rx_pending, new_rx_pending);
@@ -8345,7 +8326,7 @@ static int mvpp2_probe(struct platform_device *pdev)
for_each_available_child_of_node(dn, port_node) {
err = mvpp2_port_probe(pdev, port_node, priv, i);
if (err < 0)
- goto err_mg_clk;
+ goto err_port_probe;
i++;
}
@@ -8361,12 +8342,19 @@ static int mvpp2_probe(struct platform_device *pdev)
priv->stats_queue = create_singlethread_workqueue(priv->queue_name);
if (!priv->stats_queue) {
err = -ENOMEM;
- goto err_mg_clk;
+ goto err_port_probe;
}
platform_set_drvdata(pdev, priv);
return 0;
+err_port_probe:
+ i = 0;
+ for_each_available_child_of_node(dn, port_node) {
+ if (priv->port_list[i])
+ mvpp2_port_remove(priv->port_list[i]);
+ i++;
+ }
err_mg_clk:
clk_disable_unprepare(priv->axi_clk);
if (priv->hw_version == MVPP22)
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index 91b1c154fd29..7bbd86f08e5f 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -362,9 +362,9 @@ static void rxq_refill(struct net_device *dev)
}
}
-static inline void rxq_refill_timer_wrapper(unsigned long data)
+static inline void rxq_refill_timer_wrapper(struct timer_list *t)
{
- struct pxa168_eth_private *pep = (void *)data;
+ struct pxa168_eth_private *pep = from_timer(pep, t, timeout);
napi_schedule(&pep->napi);
}
@@ -1496,8 +1496,7 @@ static int pxa168_eth_probe(struct platform_device *pdev)
netif_napi_add(dev, &pep->napi, pxa168_rx_poll, pep->rx_ring_size);
memset(&pep->timeout, 0, sizeof(struct timer_list));
- setup_timer(&pep->timeout, rxq_refill_timer_wrapper,
- (unsigned long)pep);
+ timer_setup(&pep->timeout, rxq_refill_timer_wrapper, 0);
pep->smi_bus = mdiobus_alloc();
if (!pep->smi_bus) {
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index eef35bf3e849..6e423f098a60 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -1495,9 +1495,9 @@ static int xm_check_link(struct net_device *dev)
* get an interrupt when carrier is detected, need to poll for
* link coming up.
*/
-static void xm_link_timer(unsigned long arg)
+static void xm_link_timer(struct timer_list *t)
{
- struct skge_port *skge = (struct skge_port *) arg;
+ struct skge_port *skge = from_timer(skge, t, link_timer);
struct net_device *dev = skge->netdev;
struct skge_hw *hw = skge->hw;
int port = skge->port;
@@ -3897,7 +3897,7 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port,
/* Only used for Genesis XMAC */
if (is_genesis(hw))
- setup_timer(&skge->link_timer, xm_link_timer, (unsigned long) skge);
+ timer_setup(&skge->link_timer, xm_link_timer, 0);
else {
dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
NETIF_F_RXCSUM;
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index 1145cde2274a..9efe1771423c 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -2974,9 +2974,9 @@ static int sky2_rx_hung(struct net_device *dev)
}
}
-static void sky2_watchdog(unsigned long arg)
+static void sky2_watchdog(struct timer_list *t)
{
- struct sky2_hw *hw = (struct sky2_hw *) arg;
+ struct sky2_hw *hw = from_timer(hw, t, watchdog_timer);
/* Check for lost IRQ once a second */
if (sky2_read32(hw, B0_ISRC)) {
@@ -5083,7 +5083,7 @@ static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
sky2_show_addr(dev1);
}
- setup_timer(&hw->watchdog_timer, sky2_watchdog, (unsigned long) hw);
+ timer_setup(&hw->watchdog_timer, sky2_watchdog, 0);
INIT_WORK(&hw->restart_work, sky2_restart);
pci_set_drvdata(pdev, hw);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 2d46ec84ebdf..2d0897b7d860 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -3142,13 +3142,17 @@ static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
if (!mlxsw_sp->ports)
return -ENOMEM;
- mlxsw_sp->port_to_module = kcalloc(max_ports, sizeof(u8), GFP_KERNEL);
+ mlxsw_sp->port_to_module = kmalloc_array(max_ports, sizeof(int),
+ GFP_KERNEL);
if (!mlxsw_sp->port_to_module) {
err = -ENOMEM;
goto err_port_to_module_alloc;
}
for (i = 1; i < max_ports; i++) {
+ /* Mark as invalid */
+ mlxsw_sp->port_to_module[i] = -1;
+
err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module,
&width, &lane);
if (err)
@@ -3216,6 +3220,8 @@ static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp,
for (i = 0; i < count; i++) {
local_port = base_port + i * 2;
+ if (mlxsw_sp->port_to_module[local_port] < 0)
+ continue;
module = mlxsw_sp->port_to_module[local_port];
mlxsw_sp_port_create(mlxsw_sp, local_port, false, module,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 58cf222fb985..432ab9b12b7f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -152,7 +152,7 @@ struct mlxsw_sp {
const struct mlxsw_bus_info *bus_info;
unsigned char base_mac[ETH_ALEN];
struct mlxsw_sp_upper *lags;
- u8 *port_to_module;
+ int *port_to_module;
struct mlxsw_sp_sb *sb;
struct mlxsw_sp_bridge *bridge;
struct mlxsw_sp_router *router;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 632c7b229054..72ef4f8025f0 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -1370,8 +1370,9 @@ static void mlxsw_sp_netdevice_ipip_ol_down_event(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_ipip_entry_ol_down_event(mlxsw_sp, ipip_entry);
}
-static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_rif *rif);
+static void mlxsw_sp_nexthop_rif_migrate(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_rif *old_rif,
+ struct mlxsw_sp_rif *new_rif);
static int
mlxsw_sp_ipip_entry_ol_lb_update(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_ipip_entry *ipip_entry,
@@ -1389,17 +1390,18 @@ mlxsw_sp_ipip_entry_ol_lb_update(struct mlxsw_sp *mlxsw_sp,
return PTR_ERR(new_lb_rif);
ipip_entry->ol_lb = new_lb_rif;
- if (keep_encap) {
- list_splice_init(&old_lb_rif->common.nexthop_list,
- &new_lb_rif->common.nexthop_list);
- mlxsw_sp_nexthop_rif_update(mlxsw_sp, &new_lb_rif->common);
- }
+ if (keep_encap)
+ mlxsw_sp_nexthop_rif_migrate(mlxsw_sp, &old_lb_rif->common,
+ &new_lb_rif->common);
mlxsw_sp_rif_destroy(&old_lb_rif->common);
return 0;
}
+static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_rif *rif);
+
/**
* Update the offload related to an IPIP entry. This always updates decap, and
* in addition to that it also:
@@ -1449,9 +1451,27 @@ static int mlxsw_sp_netdevice_ipip_ol_vrf_event(struct mlxsw_sp *mlxsw_sp,
{
struct mlxsw_sp_ipip_entry *ipip_entry =
mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
+ enum mlxsw_sp_l3proto ul_proto;
+ union mlxsw_sp_l3addr saddr;
+ u32 ul_tb_id;
if (!ipip_entry)
return 0;
+
+ /* For flat configuration cases, moving overlay to a different VRF might
+ * cause local address conflict, and the conflicting tunnels need to be
+ * demoted.
+ */
+ ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ol_dev);
+ ul_proto = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt]->ul_proto;
+ saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ol_dev);
+ if (mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp, ul_proto,
+ saddr, ul_tb_id,
+ ipip_entry)) {
+ mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
+ return 0;
+ }
+
return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
true, false, false, extack);
}
@@ -3343,22 +3363,19 @@ static bool mlxsw_sp_ipip_netdev_ul_up(struct net_device *ol_dev)
return ul_dev ? (ul_dev->flags & IFF_UP) : true;
}
-static int mlxsw_sp_nexthop_ipip_init(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_nexthop *nh,
- struct net_device *ol_dev)
+static void mlxsw_sp_nexthop_ipip_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop *nh,
+ struct mlxsw_sp_ipip_entry *ipip_entry)
{
bool removing;
if (!nh->nh_grp->gateway || nh->ipip_entry)
- return 0;
-
- nh->ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
- if (!nh->ipip_entry)
- return -ENOENT;
+ return;
- removing = !mlxsw_sp_ipip_netdev_ul_up(ol_dev);
+ nh->ipip_entry = ipip_entry;
+ removing = !mlxsw_sp_ipip_netdev_ul_up(ipip_entry->ol_dev);
__mlxsw_sp_nexthop_neigh_update(nh, removing);
- return 0;
+ mlxsw_sp_nexthop_rif_init(nh, &ipip_entry->ol_lb->common);
}
static void mlxsw_sp_nexthop_ipip_fini(struct mlxsw_sp *mlxsw_sp,
@@ -3403,21 +3420,21 @@ static int mlxsw_sp_nexthop4_type_init(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop *nh,
struct fib_nh *fib_nh)
{
- struct mlxsw_sp_router *router = mlxsw_sp->router;
+ const struct mlxsw_sp_ipip_ops *ipip_ops;
struct net_device *dev = fib_nh->nh_dev;
- enum mlxsw_sp_ipip_type ipipt;
+ struct mlxsw_sp_ipip_entry *ipip_entry;
struct mlxsw_sp_rif *rif;
int err;
- if (mlxsw_sp_nexthop4_ipip_type(mlxsw_sp, fib_nh, &ipipt) &&
- router->ipip_ops_arr[ipipt]->can_offload(mlxsw_sp, dev,
- MLXSW_SP_L3_PROTO_IPV4)) {
- nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
- err = mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, dev);
- if (err)
- return err;
- mlxsw_sp_nexthop_rif_init(nh, &nh->ipip_entry->ol_lb->common);
- return 0;
+ ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, dev);
+ if (ipip_entry) {
+ ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
+ if (ipip_ops->can_offload(mlxsw_sp, dev,
+ MLXSW_SP_L3_PROTO_IPV4)) {
+ nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
+ mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, ipip_entry);
+ return 0;
+ }
}
nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH;
@@ -3545,6 +3562,18 @@ static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
}
}
+static void mlxsw_sp_nexthop_rif_migrate(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_rif *old_rif,
+ struct mlxsw_sp_rif *new_rif)
+{
+ struct mlxsw_sp_nexthop *nh;
+
+ list_splice_init(&old_rif->nexthop_list, &new_rif->nexthop_list);
+ list_for_each_entry(nh, &new_rif->nexthop_list, rif_list_node)
+ nh->rif = new_rif;
+ mlxsw_sp_nexthop_rif_update(mlxsw_sp, new_rif);
+}
+
static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_rif *rif)
{
@@ -3996,7 +4025,7 @@ mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp,
case RTN_LOCAL:
ipip_entry = mlxsw_sp_ipip_entry_find_by_decap(mlxsw_sp, dev,
MLXSW_SP_L3_PROTO_IPV4, dip);
- if (ipip_entry) {
+ if (ipip_entry && ipip_entry->ol_dev->flags & IFF_UP) {
fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
return mlxsw_sp_fib_entry_decap_init(mlxsw_sp,
fib_entry,
@@ -4694,21 +4723,21 @@ static int mlxsw_sp_nexthop6_type_init(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop *nh,
const struct rt6_info *rt)
{
- struct mlxsw_sp_router *router = mlxsw_sp->router;
+ const struct mlxsw_sp_ipip_ops *ipip_ops;
+ struct mlxsw_sp_ipip_entry *ipip_entry;
struct net_device *dev = rt->dst.dev;
- enum mlxsw_sp_ipip_type ipipt;
struct mlxsw_sp_rif *rif;
int err;
- if (mlxsw_sp_nexthop6_ipip_type(mlxsw_sp, rt, &ipipt) &&
- router->ipip_ops_arr[ipipt]->can_offload(mlxsw_sp, dev,
- MLXSW_SP_L3_PROTO_IPV6)) {
- nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
- err = mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, dev);
- if (err)
- return err;
- mlxsw_sp_nexthop_rif_init(nh, &nh->ipip_entry->ol_lb->common);
- return 0;
+ ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, dev);
+ if (ipip_entry) {
+ ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
+ if (ipip_ops->can_offload(mlxsw_sp, dev,
+ MLXSW_SP_L3_PROTO_IPV6)) {
+ nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
+ mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, ipip_entry);
+ return 0;
+ }
}
nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH;
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index b171ed2015fe..2521c8c40015 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -3501,7 +3501,7 @@ static void myri10ge_watchdog(struct work_struct *work)
* cannot detect a NIC with a parity error in a timely fashion if the
* NIC is lightly loaded.
*/
-static void myri10ge_watchdog_timer(unsigned long arg)
+static void myri10ge_watchdog_timer(struct timer_list *t)
{
struct myri10ge_priv *mgp;
struct myri10ge_slice_state *ss;
@@ -3509,7 +3509,7 @@ static void myri10ge_watchdog_timer(unsigned long arg)
u32 rx_pause_cnt;
u16 cmd;
- mgp = (struct myri10ge_priv *)arg;
+ mgp = from_timer(mgp, t, watchdog_timer);
rx_pause_cnt = ntohl(mgp->ss[0].fw_stats->dropped_pause);
busy_slice_cnt = 0;
@@ -3930,8 +3930,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_save_state(pdev);
/* Setup the watchdog timer */
- setup_timer(&mgp->watchdog_timer, myri10ge_watchdog_timer,
- (unsigned long)mgp);
+ timer_setup(&mgp->watchdog_timer, myri10ge_watchdog_timer, 0);
netdev->ethtool_ops = &myri10ge_ethtool_ops;
INIT_WORK(&mgp->watchdog_work, myri10ge_watchdog);
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c
index fe7e0e1dd01d..b2299f2b2155 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c
@@ -1530,7 +1530,7 @@ static int vxge_reset_vpath(struct vxgedev *vdev, int vp_id)
vxge_debug_init(VXGE_ERR,
"vxge_hw_vpath_reset failed for"
"vpath:%d", vp_id);
- return status;
+ return status;
}
} else
return VXGE_HW_FAIL;
@@ -1950,19 +1950,19 @@ static enum vxge_hw_status vxge_rth_configure(struct vxgedev *vdev)
* for all VPATHs. The h/w only uses the lowest numbered VPATH
* when steering frames.
*/
- for (index = 0; index < vdev->no_of_vpath; index++) {
+ for (index = 0; index < vdev->no_of_vpath; index++) {
status = vxge_hw_vpath_rts_rth_set(
vdev->vpaths[index].handle,
vdev->config.rth_algorithm,
&hash_types,
vdev->config.rth_bkt_sz);
- if (status != VXGE_HW_OK) {
+ if (status != VXGE_HW_OK) {
vxge_debug_init(VXGE_ERR,
"RTH configuration failed for vpath:%d",
vdev->vpaths[index].device_id);
return status;
- }
- }
+ }
+ }
return status;
}
@@ -1991,7 +1991,7 @@ static enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev)
vxge_debug_init(VXGE_ERR,
"vxge_hw_vpath_reset failed for "
"vpath:%d", i);
- return status;
+ return status;
}
}
}
@@ -2474,32 +2474,31 @@ static int vxge_add_isr(struct vxgedev *vdev)
switch (msix_idx) {
case 0:
snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN,
- "%s:vxge:MSI-X %d - Tx - fn:%d vpath:%d",
+ "%s:vxge:MSI-X %d - Tx - fn:%d vpath:%d",
vdev->ndev->name,
vdev->entries[intr_cnt].entry,
pci_fun, vp_idx);
ret = request_irq(
- vdev->entries[intr_cnt].vector,
+ vdev->entries[intr_cnt].vector,
vxge_tx_msix_handle, 0,
vdev->desc[intr_cnt],
&vdev->vpaths[vp_idx].fifo);
- vdev->vxge_entries[intr_cnt].arg =
+ vdev->vxge_entries[intr_cnt].arg =
&vdev->vpaths[vp_idx].fifo;
irq_req = 1;
break;
case 1:
snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN,
- "%s:vxge:MSI-X %d - Rx - fn:%d vpath:%d",
+ "%s:vxge:MSI-X %d - Rx - fn:%d vpath:%d",
vdev->ndev->name,
vdev->entries[intr_cnt].entry,
pci_fun, vp_idx);
ret = request_irq(
- vdev->entries[intr_cnt].vector,
- vxge_rx_msix_napi_handle,
- 0,
+ vdev->entries[intr_cnt].vector,
+ vxge_rx_msix_napi_handle, 0,
vdev->desc[intr_cnt],
&vdev->vpaths[vp_idx].ring);
- vdev->vxge_entries[intr_cnt].arg =
+ vdev->vxge_entries[intr_cnt].arg =
&vdev->vpaths[vp_idx].ring;
irq_req = 1;
break;
@@ -2512,9 +2511,9 @@ static int vxge_add_isr(struct vxgedev *vdev)
vxge_rem_msix_isr(vdev);
vdev->config.intr_type = INTA;
vxge_debug_init(VXGE_ERR,
- "%s: Defaulting to INTA"
- , vdev->ndev->name);
- goto INTA_MODE;
+ "%s: Defaulting to INTA",
+ vdev->ndev->name);
+ goto INTA_MODE;
}
if (irq_req) {
@@ -4505,8 +4504,8 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
if (status != VXGE_HW_OK) {
vxge_debug_init(VXGE_ERR,
"Failed to initialize device (%d)", status);
- ret = -EINVAL;
- goto _exit3;
+ ret = -EINVAL;
+ goto _exit3;
}
if (VXGE_FW_VER(ll_config->device_hw_info.fw_version.major,
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/offload.c b/drivers/net/ethernet/netronome/nfp/bpf/offload.c
index b6cee71f49d3..bc879aeb62d4 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/offload.c
@@ -214,8 +214,14 @@ int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog,
{
int err;
- if (prog && !prog->aux->offload)
- return -EINVAL;
+ if (prog) {
+ struct bpf_dev_offload *offload = prog->aux->offload;
+
+ if (!offload)
+ return -EINVAL;
+ if (offload->netdev != nn->dp.netdev)
+ return -EINVAL;
+ }
if (prog && old_prog) {
u8 cap;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
index f5d73b83dcc2..553f94f55dce 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@ -315,6 +315,7 @@ err_free_flow:
* @app: Pointer to the APP handle
* @netdev: netdev structure.
* @flow: TC flower classifier offload structure.
+ * @egress: NFP netdev is the egress.
*
* Adds a new flow to the repeated hash structure and action payload.
*
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index ac8439ceea10..481876b5424c 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -1986,9 +1986,9 @@ static void nv_unmap_txskb(struct fe_priv *np, struct nv_skb_map *tx_skb)
tx_skb->dma_len,
DMA_TO_DEVICE);
else
- pci_unmap_page(np->pci_dev, tx_skb->dma,
+ dma_unmap_page(&np->pci_dev->dev, tx_skb->dma,
tx_skb->dma_len,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
tx_skb->dma = 0;
}
}
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index 457ee80307ea..40e52ffb732f 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -1089,9 +1089,10 @@ static void pch_gbe_set_mode(struct pch_gbe_adapter *adapter, u16 speed,
* pch_gbe_watchdog - Watchdog process
* @data: Board private structure
*/
-static void pch_gbe_watchdog(unsigned long data)
+static void pch_gbe_watchdog(struct timer_list *t)
{
- struct pch_gbe_adapter *adapter = (struct pch_gbe_adapter *)data;
+ struct pch_gbe_adapter *adapter = from_timer(adapter, t,
+ watchdog_timer);
struct net_device *netdev = adapter->netdev;
struct pch_gbe_hw *hw = &adapter->hw;
@@ -2644,8 +2645,7 @@ static int pch_gbe_probe(struct pci_dev *pdev,
dev_err(&pdev->dev, "Invalid MAC address, "
"interface disabled.\n");
}
- setup_timer(&adapter->watchdog_timer, pch_gbe_watchdog,
- (unsigned long)adapter);
+ timer_setup(&adapter->watchdog_timer, pch_gbe_watchdog, 0);
INIT_WORK(&adapter->reset_task, pch_gbe_reset_task);
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c
index 49591d9c2e1b..c9a55b774935 100644
--- a/drivers/net/ethernet/pasemi/pasemi_mac.c
+++ b/drivers/net/ethernet/pasemi/pasemi_mac.c
@@ -943,9 +943,9 @@ static irqreturn_t pasemi_mac_rx_intr(int irq, void *data)
#define TX_CLEAN_INTERVAL HZ
-static void pasemi_mac_tx_timer(unsigned long data)
+static void pasemi_mac_tx_timer(struct timer_list *t)
{
- struct pasemi_mac_txring *txring = (struct pasemi_mac_txring *)data;
+ struct pasemi_mac_txring *txring = from_timer(txring, t, clean_timer);
struct pasemi_mac *mac = txring->mac;
pasemi_mac_clean_tx(txring);
@@ -1199,8 +1199,7 @@ static int pasemi_mac_open(struct net_device *dev)
if (dev->phydev)
phy_start(dev->phydev);
- setup_timer(&mac->tx->clean_timer, pasemi_mac_tx_timer,
- (unsigned long)mac->tx);
+ timer_setup(&mac->tx->clean_timer, pasemi_mac_tx_timer, 0);
mod_timer(&mac->tx->clean_timer, jiffies + HZ);
return 0;
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
index 05479d435469..9e5264d8773b 100644
--- a/drivers/net/ethernet/qlogic/qla3xxx.c
+++ b/drivers/net/ethernet/qlogic/qla3xxx.c
@@ -3749,9 +3749,9 @@ static void ql_get_board_info(struct ql3_adapter *qdev)
qdev->pci_slot = (u8) PCI_SLOT(qdev->pdev->devfn);
}
-static void ql3xxx_timer(unsigned long ptr)
+static void ql3xxx_timer(struct timer_list *t)
{
- struct ql3_adapter *qdev = (struct ql3_adapter *)ptr;
+ struct ql3_adapter *qdev = from_timer(qdev, t, adapter_timer);
queue_delayed_work(qdev->workqueue, &qdev->link_state_work, 0);
}
@@ -3891,7 +3891,7 @@ static int ql3xxx_probe(struct pci_dev *pdev,
INIT_DELAYED_WORK(&qdev->tx_timeout_work, ql_tx_timeout_work);
INIT_DELAYED_WORK(&qdev->link_state_work, ql_link_state_machine_work);
- setup_timer(&qdev->adapter_timer, ql3xxx_timer, (unsigned long)qdev);
+ timer_setup(&qdev->adapter_timer, ql3xxx_timer, 0);
qdev->adapter_timer.expires = jiffies + HZ * 2; /* two second delay */
if (!cards_found) {
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index dcb8c39382e7..fc0d5fa65ad4 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -2030,21 +2030,6 @@ out:
return ret;
}
-static int rtl8169_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct rtl8169_private *tp = netdev_priv(dev);
- int ret;
-
- del_timer_sync(&tp->timer);
-
- rtl_lock_work(tp);
- ret = rtl8169_set_speed(dev, cmd->autoneg, ethtool_cmd_speed(cmd),
- cmd->duplex, cmd->advertising);
- rtl_unlock_work(tp);
-
- return ret;
-}
-
static netdev_features_t rtl8169_fix_features(struct net_device *dev,
netdev_features_t features)
{
@@ -2171,6 +2156,27 @@ static int rtl8169_get_link_ksettings(struct net_device *dev,
return rc;
}
+static int rtl8169_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+ int rc;
+ u32 advertising;
+
+ if (!ethtool_convert_link_mode_to_legacy_u32(&advertising,
+ cmd->link_modes.advertising))
+ return -EINVAL;
+
+ del_timer_sync(&tp->timer);
+
+ rtl_lock_work(tp);
+ rc = rtl8169_set_speed(dev, cmd->base.autoneg, cmd->base.speed,
+ cmd->base.duplex, advertising);
+ rtl_unlock_work(tp);
+
+ return rc;
+}
+
static void rtl8169_get_regs(struct net_device *dev, struct ethtool_regs *regs,
void *p)
{
@@ -2591,7 +2597,6 @@ static const struct ethtool_ops rtl8169_ethtool_ops = {
.get_link = ethtool_op_get_link,
.get_coalesce = rtl_get_coalesce,
.set_coalesce = rtl_set_coalesce,
- .set_settings = rtl8169_set_settings,
.get_msglevel = rtl8169_get_msglevel,
.set_msglevel = rtl8169_set_msglevel,
.get_regs = rtl8169_get_regs,
@@ -2603,6 +2608,7 @@ static const struct ethtool_ops rtl8169_ethtool_ops = {
.get_ts_info = ethtool_op_get_ts_info,
.nway_reset = rtl8169_nway_reset,
.get_link_ksettings = rtl8169_get_link_ksettings,
+ .set_link_ksettings = rtl8169_set_link_ksettings,
};
static void rtl8169_get_mac_version(struct rtl8169_private *tp,
@@ -3789,27 +3795,32 @@ static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp)
rtl_writephy(tp, 0x1f, 0x0000);
/* EEE setting */
- rtl_w0w1_eri(tp, 0x1b0, ERIAR_MASK_1111, 0x0000, 0x0003, ERIAR_EXGMAC);
+ rtl_w0w1_eri(tp, 0x1b0, ERIAR_MASK_1111, 0x0003, 0x0000, ERIAR_EXGMAC);
rtl_writephy(tp, 0x1f, 0x0005);
rtl_writephy(tp, 0x05, 0x8b85);
- rtl_w0w1_phy(tp, 0x06, 0x0000, 0x2000);
+ rtl_w0w1_phy(tp, 0x06, 0x2000, 0x0000);
rtl_writephy(tp, 0x1f, 0x0004);
rtl_writephy(tp, 0x1f, 0x0007);
rtl_writephy(tp, 0x1e, 0x0020);
- rtl_w0w1_phy(tp, 0x15, 0x0000, 0x0100);
+ rtl_w0w1_phy(tp, 0x15, 0x0100, 0x0000);
rtl_writephy(tp, 0x1f, 0x0002);
rtl_writephy(tp, 0x1f, 0x0000);
rtl_writephy(tp, 0x0d, 0x0007);
rtl_writephy(tp, 0x0e, 0x003c);
rtl_writephy(tp, 0x0d, 0x4007);
- rtl_writephy(tp, 0x0e, 0x0000);
+ rtl_writephy(tp, 0x0e, 0x0006);
rtl_writephy(tp, 0x0d, 0x0000);
/* Green feature */
rtl_writephy(tp, 0x1f, 0x0003);
- rtl_w0w1_phy(tp, 0x19, 0x0000, 0x0001);
- rtl_w0w1_phy(tp, 0x10, 0x0000, 0x0400);
+ rtl_w0w1_phy(tp, 0x19, 0x0001, 0x0000);
+ rtl_w0w1_phy(tp, 0x10, 0x0400, 0x0000);
+ rtl_writephy(tp, 0x1f, 0x0000);
+ rtl_writephy(tp, 0x1f, 0x0005);
+ rtl_w0w1_phy(tp, 0x01, 0x0100, 0x0000);
rtl_writephy(tp, 0x1f, 0x0000);
+ /* soft-reset phy */
+ rtl_writephy(tp, MII_BMCR, BMCR_RESET | BMCR_ANENABLE | BMCR_ANRESTART);
/* Broken BIOS workaround: feed GigaMAC registers with MAC address. */
rtl_rar_exgmac_set(tp, tp->dev->dev_addr);
diff --git a/drivers/net/ethernet/rocker/rocker_ofdpa.c b/drivers/net/ethernet/rocker/rocker_ofdpa.c
index 0653b70723a3..6d6fb8cf3e7c 100644
--- a/drivers/net/ethernet/rocker/rocker_ofdpa.c
+++ b/drivers/net/ethernet/rocker/rocker_ofdpa.c
@@ -1983,9 +1983,9 @@ err_out:
return err;
}
-static void ofdpa_fdb_cleanup(unsigned long data)
+static void ofdpa_fdb_cleanup(struct timer_list *t)
{
- struct ofdpa *ofdpa = (struct ofdpa *)data;
+ struct ofdpa *ofdpa = from_timer(ofdpa, t, fdb_cleanup_timer);
struct ofdpa_port *ofdpa_port;
struct ofdpa_fdb_tbl_entry *entry;
struct hlist_node *tmp;
@@ -2368,8 +2368,7 @@ static int ofdpa_init(struct rocker *rocker)
hash_init(ofdpa->neigh_tbl);
spin_lock_init(&ofdpa->neigh_tbl_lock);
- setup_timer(&ofdpa->fdb_cleanup_timer, ofdpa_fdb_cleanup,
- (unsigned long) ofdpa);
+ timer_setup(&ofdpa->fdb_cleanup_timer, ofdpa_fdb_cleanup, 0);
mod_timer(&ofdpa->fdb_cleanup_timer, jiffies);
ofdpa->ageing_time = BR_DEFAULT_AGEING_TIME;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
index 61cb24810d10..9e6db16af663 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
@@ -1,8 +1,8 @@
/*
* dwmac-stm32.c - DWMAC Specific Glue layer for STM32 MCU
*
- * Copyright (C) Alexandre Torgue 2015
- * Author: Alexandre Torgue <alexandre.torgue@gmail.com>
+ * Copyright (C) STMicroelectronics SA 2017
+ * Author: Alexandre Torgue <alexandre.torgue@st.com> for STMicroelectronics.
* License terms: GNU General Public License (GPL), version 2
*
*/
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index ff4fb5eae1af..f63c2ddced3c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -345,9 +345,9 @@ void stmmac_disable_eee_mode(struct stmmac_priv *priv)
* if there is no data transfer and if we are not in LPI state,
* then MAC Transmitter can be moved to LPI state.
*/
-static void stmmac_eee_ctrl_timer(unsigned long arg)
+static void stmmac_eee_ctrl_timer(struct timer_list *t)
{
- struct stmmac_priv *priv = (struct stmmac_priv *)arg;
+ struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
stmmac_enable_eee_mode(priv);
mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
@@ -401,9 +401,8 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
spin_lock_irqsave(&priv->lock, flags);
if (!priv->eee_active) {
priv->eee_active = 1;
- setup_timer(&priv->eee_ctrl_timer,
- stmmac_eee_ctrl_timer,
- (unsigned long)priv);
+ timer_setup(&priv->eee_ctrl_timer,
+ stmmac_eee_ctrl_timer, 0);
mod_timer(&priv->eee_ctrl_timer,
STMMAC_LPI_T(eee_timer));
@@ -2221,9 +2220,9 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
* Description:
* This is the timer handler to directly invoke the stmmac_tx_clean.
*/
-static void stmmac_tx_timer(unsigned long data)
+static void stmmac_tx_timer(struct timer_list *t)
{
- struct stmmac_priv *priv = (struct stmmac_priv *)data;
+ struct stmmac_priv *priv = from_timer(priv, t, txtimer);
u32 tx_queues_count = priv->plat->tx_queues_to_use;
u32 queue;
@@ -2244,7 +2243,7 @@ static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
{
priv->tx_coal_frames = STMMAC_TX_FRAMES;
priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
- setup_timer(&priv->txtimer, stmmac_tx_timer, (unsigned long)priv);
+ timer_setup(&priv->txtimer, stmmac_tx_timer, 0);
priv->txtimer.expires = STMMAC_COAL_TIMER(priv->tx_coal_timer);
add_timer(&priv->txtimer);
}
diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c
index e1b55b8fb8e0..1f8e9601592a 100644
--- a/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c
+++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c
@@ -358,9 +358,9 @@ static irqreturn_t xlgmac_dma_isr(int irq, void *data)
return IRQ_HANDLED;
}
-static void xlgmac_tx_timer(unsigned long data)
+static void xlgmac_tx_timer(struct timer_list *t)
{
- struct xlgmac_channel *channel = (struct xlgmac_channel *)data;
+ struct xlgmac_channel *channel = from_timer(channel, t, tx_timer);
struct xlgmac_pdata *pdata = channel->pdata;
struct napi_struct *napi;
@@ -391,8 +391,7 @@ static void xlgmac_init_timers(struct xlgmac_pdata *pdata)
if (!channel->tx_ring)
break;
- setup_timer(&channel->tx_timer, xlgmac_tx_timer,
- (unsigned long)channel);
+ timer_setup(&channel->tx_timer, xlgmac_tx_timer, 0);
}
}
diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c
index cd1185e66133..b432a75fb874 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.c
+++ b/drivers/net/ethernet/ti/cpsw_ale.c
@@ -765,9 +765,9 @@ int cpsw_ale_control_get(struct cpsw_ale *ale, int port, int control)
}
EXPORT_SYMBOL_GPL(cpsw_ale_control_get);
-static void cpsw_ale_timer(unsigned long arg)
+static void cpsw_ale_timer(struct timer_list *t)
{
- struct cpsw_ale *ale = (struct cpsw_ale *)arg;
+ struct cpsw_ale *ale = from_timer(ale, t, timer);
cpsw_ale_control_set(ale, 0, ALE_AGEOUT, 1);
@@ -859,7 +859,7 @@ void cpsw_ale_start(struct cpsw_ale *ale)
cpsw_ale_control_set(ale, 0, ALE_ENABLE, 1);
cpsw_ale_control_set(ale, 0, ALE_CLEAR, 1);
- setup_timer(&ale->timer, cpsw_ale_timer, (unsigned long)ale);
+ timer_setup(&ale->timer, cpsw_ale_timer, 0);
if (ale->ageout) {
ale->timer.expires = jiffies + ale->ageout;
add_timer(&ale->timer);
diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c
index 4ad821655e51..e831c49713ee 100644
--- a/drivers/net/ethernet/ti/netcp_ethss.c
+++ b/drivers/net/ethernet/ti/netcp_ethss.c
@@ -2745,9 +2745,9 @@ static int gbe_ioctl(void *intf_priv, struct ifreq *req, int cmd)
return -EOPNOTSUPP;
}
-static void netcp_ethss_timer(unsigned long arg)
+static void netcp_ethss_timer(struct timer_list *t)
{
- struct gbe_priv *gbe_dev = (struct gbe_priv *)arg;
+ struct gbe_priv *gbe_dev = from_timer(gbe_dev, t, timer);
struct gbe_intf *gbe_intf;
struct gbe_slave *slave;
@@ -3616,8 +3616,7 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
}
spin_unlock_bh(&gbe_dev->hw_stats_lock);
- setup_timer(&gbe_dev->timer, netcp_ethss_timer,
- (unsigned long)gbe_dev);
+ timer_setup(&gbe_dev->timer, netcp_ethss_timer, 0);
gbe_dev->timer.expires = jiffies + GBE_TIMER_INTERVAL;
add_timer(&gbe_dev->timer);
*inst_priv = gbe_dev;
diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c
index 8f53d762fbc4..5a4e78fde530 100644
--- a/drivers/net/ethernet/ti/tlan.c
+++ b/drivers/net/ethernet/ti/tlan.c
@@ -254,7 +254,7 @@ tlan_set_timer(struct net_device *dev, u32 ticks, u32 type)
spin_unlock_irqrestore(&priv->lock, flags);
return;
}
- priv->timer.function = (TIMER_FUNC_TYPE)tlan_timer;
+ priv->timer.function = tlan_timer;
if (!in_irq())
spin_unlock_irqrestore(&priv->lock, flags);
@@ -1425,7 +1425,7 @@ static u32 tlan_handle_tx_eof(struct net_device *dev, u16 host_int)
tlan_dio_write8(dev->base_addr,
TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT);
if (priv->timer.function == NULL) {
- priv->timer.function = (TIMER_FUNC_TYPE)tlan_timer;
+ priv->timer.function = tlan_timer;
priv->timer.expires = jiffies + TLAN_TIMER_ACT_DELAY;
priv->timer_set_at = jiffies;
priv->timer_type = TLAN_TIMER_ACTIVITY;
@@ -1576,7 +1576,7 @@ drop_and_reuse:
tlan_dio_write8(dev->base_addr,
TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT);
if (priv->timer.function == NULL) {
- priv->timer.function = (TIMER_FUNC_TYPE)tlan_timer;
+ priv->timer.function = tlan_timer;
priv->timer.expires = jiffies + TLAN_TIMER_ACT_DELAY;
priv->timer_set_at = jiffies;
priv->timer_type = TLAN_TIMER_ACTIVITY;
diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c
index a913538d3213..d925b8203996 100644
--- a/drivers/net/ethernet/toshiba/spider_net.c
+++ b/drivers/net/ethernet/toshiba/spider_net.c
@@ -912,8 +912,9 @@ spider_net_xmit(struct sk_buff *skb, struct net_device *netdev)
* packets, including updating the queue tail pointer.
*/
static void
-spider_net_cleanup_tx_ring(struct spider_net_card *card)
+spider_net_cleanup_tx_ring(struct timer_list *t)
{
+ struct spider_net_card *card = from_timer(card, t, tx_timer);
if ((spider_net_release_tx_chain(card, 0) != 0) &&
(card->netdev->flags & IFF_UP)) {
spider_net_kick_tx_dma(card);
@@ -1265,7 +1266,7 @@ static int spider_net_poll(struct napi_struct *napi, int budget)
spider_net_refill_rx_chain(card);
spider_net_enable_rxdmac(card);
- spider_net_cleanup_tx_ring(card);
+ spider_net_cleanup_tx_ring(&card->tx_timer);
/* if all packets are in the stack, enable interrupts and return 0 */
/* if not, return 1 */
@@ -1977,9 +1978,9 @@ init_firmware_failed:
* @data: used for pointer to card structure
*
*/
-static void spider_net_link_phy(unsigned long data)
+static void spider_net_link_phy(struct timer_list *t)
{
- struct spider_net_card *card = (struct spider_net_card *)data;
+ struct spider_net_card *card = from_timer(card, t, aneg_timer);
struct mii_phy *phy = &card->phy;
/* if link didn't come up after SPIDER_NET_ANEG_TIMEOUT tries, setup phy again */
@@ -2256,14 +2257,11 @@ spider_net_setup_netdev(struct spider_net_card *card)
pci_set_drvdata(card->pdev, netdev);
- setup_timer(&card->tx_timer,
- (void(*)(unsigned long))spider_net_cleanup_tx_ring,
- (unsigned long)card);
+ timer_setup(&card->tx_timer, spider_net_cleanup_tx_ring, 0);
netdev->irq = card->pdev->irq;
card->aneg_count = 0;
- setup_timer(&card->aneg_timer, spider_net_link_phy,
- (unsigned long)card);
+ timer_setup(&card->aneg_timer, spider_net_link_phy, 0);
netif_napi_add(netdev, &card->napi,
spider_net_poll, SPIDER_NET_NAPI_WEIGHT);
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index 83e6f76eb965..33949248c829 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -995,8 +995,8 @@ static int rhine_init_one_common(struct device *hwdev, u32 quirks,
else
name = "Rhine III";
- netdev_info(dev, "VIA %s at 0x%lx, %pM, IRQ %d\n",
- name, (long)ioaddr, dev->dev_addr, rp->irq);
+ netdev_info(dev, "VIA %s at %p, %pM, IRQ %d\n",
+ name, ioaddr, dev->dev_addr, rp->irq);
dev_set_drvdata(hwdev, dev);
diff --git a/drivers/net/ethernet/xilinx/Kconfig b/drivers/net/ethernet/xilinx/Kconfig
index 6d68c8a8f4f2..da4ec575ccf9 100644
--- a/drivers/net/ethernet/xilinx/Kconfig
+++ b/drivers/net/ethernet/xilinx/Kconfig
@@ -34,6 +34,7 @@ config XILINX_AXI_EMAC
config XILINX_LL_TEMAC
tristate "Xilinx LL TEMAC (LocalLink Tri-mode Ethernet MAC) driver"
depends on (PPC || MICROBLAZE)
+ depends on !64BIT || BROKEN
select PHYLIB
---help---
This driver supports the Xilinx 10/100/1000 LocalLink TEMAC
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 4e16d839c311..b718a02a6bb6 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -1337,21 +1337,33 @@ static int geneve_nl2info(struct nlattr *tb[], struct nlattr *data[],
}
if (data[IFLA_GENEVE_UDP_ZERO_CSUM6_TX]) {
+#if IS_ENABLED(CONFIG_IPV6)
if (changelink) {
attrtype = IFLA_GENEVE_UDP_ZERO_CSUM6_TX;
goto change_notsup;
}
if (nla_get_u8(data[IFLA_GENEVE_UDP_ZERO_CSUM6_TX]))
info->key.tun_flags &= ~TUNNEL_CSUM;
+#else
+ NL_SET_ERR_MSG_ATTR(extack, data[IFLA_GENEVE_UDP_ZERO_CSUM6_TX],
+ "IPv6 support not enabled in the kernel");
+ return -EPFNOSUPPORT;
+#endif
}
if (data[IFLA_GENEVE_UDP_ZERO_CSUM6_RX]) {
+#if IS_ENABLED(CONFIG_IPV6)
if (changelink) {
attrtype = IFLA_GENEVE_UDP_ZERO_CSUM6_RX;
goto change_notsup;
}
if (nla_get_u8(data[IFLA_GENEVE_UDP_ZERO_CSUM6_RX]))
*use_udp6_rx_checksums = false;
+#else
+ NL_SET_ERR_MSG_ATTR(extack, data[IFLA_GENEVE_UDP_ZERO_CSUM6_RX],
+ "IPv6 support not enabled in the kernel");
+ return -EPFNOSUPPORT;
+#endif
}
return 0;
@@ -1527,11 +1539,13 @@ static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev)
goto nla_put_failure;
if (metadata && nla_put_flag(skb, IFLA_GENEVE_COLLECT_METADATA))
- goto nla_put_failure;
+ goto nla_put_failure;
+#if IS_ENABLED(CONFIG_IPV6)
if (nla_put_u8(skb, IFLA_GENEVE_UDP_ZERO_CSUM6_RX,
!geneve->use_udp6_rx_checksums))
goto nla_put_failure;
+#endif
return 0;
diff --git a/drivers/net/hamradio/scc.c b/drivers/net/hamradio/scc.c
index c9f7215c5dc2..3de272959090 100644
--- a/drivers/net/hamradio/scc.c
+++ b/drivers/net/hamradio/scc.c
@@ -1005,7 +1005,7 @@ static void __scc_start_tx_timer(struct scc_channel *scc,
} else
if (when != TIMER_OFF)
{
- scc->tx_t.function = (TIMER_FUNC_TYPE)handler;
+ scc->tx_t.function = handler;
scc->tx_t.expires = jiffies + (when*HZ)/100;
add_timer(&scc->tx_t);
}
@@ -1031,7 +1031,7 @@ static void scc_start_defer(struct scc_channel *scc)
if (scc->kiss.maxdefer != 0 && scc->kiss.maxdefer != TIMER_OFF)
{
- scc->tx_wdog.function = (TIMER_FUNC_TYPE)t_busy;
+ scc->tx_wdog.function = t_busy;
scc->tx_wdog.expires = jiffies + HZ*scc->kiss.maxdefer;
add_timer(&scc->tx_wdog);
}
@@ -1047,7 +1047,7 @@ static void scc_start_maxkeyup(struct scc_channel *scc)
if (scc->kiss.maxkeyup != 0 && scc->kiss.maxkeyup != TIMER_OFF)
{
- scc->tx_wdog.function = (TIMER_FUNC_TYPE)t_maxkeyup;
+ scc->tx_wdog.function = t_maxkeyup;
scc->tx_wdog.expires = jiffies + HZ*scc->kiss.maxkeyup;
add_timer(&scc->tx_wdog);
}
@@ -1428,7 +1428,7 @@ scc_start_calibrate(struct scc_channel *scc, int duration, unsigned char pattern
del_timer(&scc->tx_wdog);
- scc->tx_wdog.function = (TIMER_FUNC_TYPE)scc_stop_calibrate;
+ scc->tx_wdog.function = scc_stop_calibrate;
scc->tx_wdog.expires = jiffies + HZ*duration;
add_timer(&scc->tx_wdog);
diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
index f2a7e929316e..11c1e7950fe5 100644
--- a/drivers/net/ipvlan/ipvlan_core.c
+++ b/drivers/net/ipvlan/ipvlan_core.c
@@ -116,7 +116,7 @@ bool ipvlan_addr_busy(struct ipvl_port *port, void *iaddr, bool is_v6)
return false;
}
-static void *ipvlan_get_L3_hdr(struct sk_buff *skb, int *type)
+static void *ipvlan_get_L3_hdr(struct ipvl_port *port, struct sk_buff *skb, int *type)
{
void *lyr3h = NULL;
@@ -124,7 +124,7 @@ static void *ipvlan_get_L3_hdr(struct sk_buff *skb, int *type)
case htons(ETH_P_ARP): {
struct arphdr *arph;
- if (unlikely(!pskb_may_pull(skb, sizeof(*arph))))
+ if (unlikely(!pskb_may_pull(skb, arp_hdr_len(port->dev))))
return NULL;
arph = arp_hdr(skb);
@@ -165,8 +165,26 @@ static void *ipvlan_get_L3_hdr(struct sk_buff *skb, int *type)
/* Only Neighbour Solicitation pkts need different treatment */
if (ipv6_addr_any(&ip6h->saddr) &&
ip6h->nexthdr == NEXTHDR_ICMP) {
+ struct icmp6hdr *icmph;
+
+ if (unlikely(!pskb_may_pull(skb, sizeof(*ip6h) + sizeof(*icmph))))
+ return NULL;
+
+ ip6h = ipv6_hdr(skb);
+ icmph = (struct icmp6hdr *)(ip6h + 1);
+
+ if (icmph->icmp6_type == NDISC_NEIGHBOUR_SOLICITATION) {
+ /* Need to access the ipv6 address in body */
+ if (unlikely(!pskb_may_pull(skb, sizeof(*ip6h) + sizeof(*icmph)
+ + sizeof(struct in6_addr))))
+ return NULL;
+
+ ip6h = ipv6_hdr(skb);
+ icmph = (struct icmp6hdr *)(ip6h + 1);
+ }
+
*type = IPVL_ICMPV6;
- lyr3h = ip6h + 1;
+ lyr3h = icmph;
}
break;
}
@@ -510,7 +528,7 @@ static int ipvlan_xmit_mode_l3(struct sk_buff *skb, struct net_device *dev)
struct ipvl_addr *addr;
int addr_type;
- lyr3h = ipvlan_get_L3_hdr(skb, &addr_type);
+ lyr3h = ipvlan_get_L3_hdr(ipvlan->port, skb, &addr_type);
if (!lyr3h)
goto out;
@@ -539,7 +557,7 @@ static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev)
if (!ipvlan_is_vepa(ipvlan->port) &&
ether_addr_equal(eth->h_dest, eth->h_source)) {
- lyr3h = ipvlan_get_L3_hdr(skb, &addr_type);
+ lyr3h = ipvlan_get_L3_hdr(ipvlan->port, skb, &addr_type);
if (lyr3h) {
addr = ipvlan_addr_lookup(ipvlan->port, lyr3h, addr_type, true);
if (addr) {
@@ -606,7 +624,7 @@ static bool ipvlan_external_frame(struct sk_buff *skb, struct ipvl_port *port)
int addr_type;
if (ether_addr_equal(eth->h_source, skb->dev->dev_addr)) {
- lyr3h = ipvlan_get_L3_hdr(skb, &addr_type);
+ lyr3h = ipvlan_get_L3_hdr(port, skb, &addr_type);
if (!lyr3h)
return true;
@@ -627,7 +645,7 @@ static rx_handler_result_t ipvlan_handle_mode_l3(struct sk_buff **pskb,
struct sk_buff *skb = *pskb;
rx_handler_result_t ret = RX_HANDLER_PASS;
- lyr3h = ipvlan_get_L3_hdr(skb, &addr_type);
+ lyr3h = ipvlan_get_L3_hdr(port, skb, &addr_type);
if (!lyr3h)
goto out;
@@ -666,7 +684,7 @@ static rx_handler_result_t ipvlan_handle_mode_l2(struct sk_buff **pskb,
} else {
struct ipvl_addr *addr;
- lyr3h = ipvlan_get_L3_hdr(skb, &addr_type);
+ lyr3h = ipvlan_get_L3_hdr(port, skb, &addr_type);
if (!lyr3h)
return ret;
@@ -717,7 +735,7 @@ static struct ipvl_addr *ipvlan_skb_to_addr(struct sk_buff *skb,
if (!port || port->mode != IPVLAN_MODE_L3S)
goto out;
- lyr3h = ipvlan_get_L3_hdr(skb, &addr_type);
+ lyr3h = ipvlan_get_L3_hdr(port, skb, &addr_type);
if (!lyr3h)
goto out;
diff --git a/drivers/net/phy/cortina.c b/drivers/net/phy/cortina.c
index 72f4228a63bb..9442db221834 100644
--- a/drivers/net/phy/cortina.c
+++ b/drivers/net/phy/cortina.c
@@ -116,3 +116,7 @@ static struct mdio_device_id __maybe_unused cortina_tbl[] = {
};
MODULE_DEVICE_TABLE(mdio, cortina_tbl);
+
+MODULE_DESCRIPTION("Cortina EDC CDR 10G Ethernet PHY driver");
+MODULE_AUTHOR("NXP");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c
index aebc08beceba..21b3f36e023a 100644
--- a/drivers/net/phy/marvell10g.c
+++ b/drivers/net/phy/marvell10g.c
@@ -16,6 +16,7 @@
* link takes priority and the other port is completely locked out.
*/
#include <linux/phy.h>
+#include <linux/marvell_phy.h>
enum {
MV_PCS_BASE_T = 0x0000,
@@ -338,7 +339,7 @@ static int mv3310_read_status(struct phy_device *phydev)
static struct phy_driver mv3310_drivers[] = {
{
.phy_id = 0x002b09aa,
- .phy_id_mask = 0xffffffff,
+ .phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "mv88x3310",
.features = SUPPORTED_10baseT_Full |
SUPPORTED_100baseT_Full |
@@ -360,7 +361,7 @@ static struct phy_driver mv3310_drivers[] = {
module_phy_driver(mv3310_drivers);
static struct mdio_device_id __maybe_unused mv3310_tbl[] = {
- { 0x002b09aa, 0xffffffff },
+ { 0x002b09aa, MARVELL_PHY_ID_MASK },
{ },
};
MODULE_DEVICE_TABLE(mdio, mv3310_tbl);
diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c
index eb8a18991d8c..cc63102ca96e 100644
--- a/drivers/net/slip/slip.c
+++ b/drivers/net/slip/slip.c
@@ -106,8 +106,8 @@ static int slip_esc6(unsigned char *p, unsigned char *d, int len);
static void slip_unesc6(struct slip *sl, unsigned char c);
#endif
#ifdef CONFIG_SLIP_SMART
-static void sl_keepalive(unsigned long sls);
-static void sl_outfill(unsigned long sls);
+static void sl_keepalive(struct timer_list *t);
+static void sl_outfill(struct timer_list *t);
static int sl_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
#endif
@@ -763,8 +763,8 @@ static struct slip *sl_alloc(dev_t line)
sl->mode = SL_MODE_DEFAULT;
#ifdef CONFIG_SLIP_SMART
/* initialize timer_list struct */
- setup_timer(&sl->keepalive_timer, sl_keepalive, (unsigned long)sl);
- setup_timer(&sl->outfill_timer, sl_outfill, (unsigned long)sl);
+ timer_setup(&sl->keepalive_timer, sl_keepalive, 0);
+ timer_setup(&sl->outfill_timer, sl_outfill, 0);
#endif
slip_devs[i] = dev;
return sl;
@@ -1388,9 +1388,9 @@ module_exit(slip_exit);
* added by Stanislav Voronyi. All changes before marked VSV
*/
-static void sl_outfill(unsigned long sls)
+static void sl_outfill(struct timer_list *t)
{
- struct slip *sl = (struct slip *)sls;
+ struct slip *sl = from_timer(sl, t, outfill_timer);
spin_lock(&sl->lock);
@@ -1419,9 +1419,9 @@ out:
spin_unlock(&sl->lock);
}
-static void sl_keepalive(unsigned long sls)
+static void sl_keepalive(struct timer_list *t)
{
- struct slip *sl = (struct slip *)sls;
+ struct slip *sl = from_timer(sl, t, keepalive_timer);
spin_lock(&sl->lock);
diff --git a/drivers/net/tap.c b/drivers/net/tap.c
index b13890953ebb..e9489b88407c 100644
--- a/drivers/net/tap.c
+++ b/drivers/net/tap.c
@@ -1077,7 +1077,7 @@ static long tap_ioctl(struct file *file, unsigned int cmd,
case TUNSETOFFLOAD:
/* let the user check for future flags */
if (arg & ~(TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 |
- TUN_F_TSO_ECN))
+ TUN_F_TSO_ECN | TUN_F_UFO))
return -EINVAL;
rtnl_lock();
diff --git a/drivers/net/thunderbolt.c b/drivers/net/thunderbolt.c
index 228d4aa6d9ae..ca5e375de27c 100644
--- a/drivers/net/thunderbolt.c
+++ b/drivers/net/thunderbolt.c
@@ -335,7 +335,7 @@ static void tbnet_free_buffers(struct tbnet_ring *ring)
if (ring->ring->is_tx) {
dir = DMA_TO_DEVICE;
order = 0;
- size = tbnet_frame_size(tf);
+ size = TBNET_FRAME_SIZE;
} else {
dir = DMA_FROM_DEVICE;
order = TBNET_RX_PAGE_ORDER;
@@ -512,6 +512,7 @@ err_free:
static struct tbnet_frame *tbnet_get_tx_buffer(struct tbnet *net)
{
struct tbnet_ring *ring = &net->tx_ring;
+ struct device *dma_dev = tb_ring_dma_device(ring->ring);
struct tbnet_frame *tf;
unsigned int index;
@@ -522,7 +523,9 @@ static struct tbnet_frame *tbnet_get_tx_buffer(struct tbnet *net)
tf = &ring->frames[index];
tf->frame.size = 0;
- tf->frame.buffer_phy = 0;
+
+ dma_sync_single_for_cpu(dma_dev, tf->frame.buffer_phy,
+ tbnet_frame_size(tf), DMA_TO_DEVICE);
return tf;
}
@@ -531,13 +534,8 @@ static void tbnet_tx_callback(struct tb_ring *ring, struct ring_frame *frame,
bool canceled)
{
struct tbnet_frame *tf = container_of(frame, typeof(*tf), frame);
- struct device *dma_dev = tb_ring_dma_device(ring);
struct tbnet *net = netdev_priv(tf->dev);
- dma_unmap_page(dma_dev, tf->frame.buffer_phy, tbnet_frame_size(tf),
- DMA_TO_DEVICE);
- tf->frame.buffer_phy = 0;
-
/* Return buffer to the ring */
net->tx_ring.prod++;
@@ -548,10 +546,12 @@ static void tbnet_tx_callback(struct tb_ring *ring, struct ring_frame *frame,
static int tbnet_alloc_tx_buffers(struct tbnet *net)
{
struct tbnet_ring *ring = &net->tx_ring;
+ struct device *dma_dev = tb_ring_dma_device(ring->ring);
unsigned int i;
for (i = 0; i < TBNET_RING_SIZE; i++) {
struct tbnet_frame *tf = &ring->frames[i];
+ dma_addr_t dma_addr;
tf->page = alloc_page(GFP_KERNEL);
if (!tf->page) {
@@ -559,7 +559,17 @@ static int tbnet_alloc_tx_buffers(struct tbnet *net)
return -ENOMEM;
}
+ dma_addr = dma_map_page(dma_dev, tf->page, 0, TBNET_FRAME_SIZE,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dma_dev, dma_addr)) {
+ __free_page(tf->page);
+ tf->page = NULL;
+ tbnet_free_buffers(ring);
+ return -ENOMEM;
+ }
+
tf->dev = net->dev;
+ tf->frame.buffer_phy = dma_addr;
tf->frame.callback = tbnet_tx_callback;
tf->frame.sof = TBIP_PDF_FRAME_START;
tf->frame.eof = TBIP_PDF_FRAME_END;
@@ -881,19 +891,6 @@ static int tbnet_stop(struct net_device *dev)
return 0;
}
-static bool tbnet_xmit_map(struct device *dma_dev, struct tbnet_frame *tf)
-{
- dma_addr_t dma_addr;
-
- dma_addr = dma_map_page(dma_dev, tf->page, 0, tbnet_frame_size(tf),
- DMA_TO_DEVICE);
- if (dma_mapping_error(dma_dev, dma_addr))
- return false;
-
- tf->frame.buffer_phy = dma_addr;
- return true;
-}
-
static bool tbnet_xmit_csum_and_map(struct tbnet *net, struct sk_buff *skb,
struct tbnet_frame **frames, u32 frame_count)
{
@@ -908,13 +905,14 @@ static bool tbnet_xmit_csum_and_map(struct tbnet *net, struct sk_buff *skb,
if (skb->ip_summed != CHECKSUM_PARTIAL) {
/* No need to calculate checksum so we just update the
- * total frame count and map the frames for DMA.
+ * total frame count and sync the frames for DMA.
*/
for (i = 0; i < frame_count; i++) {
hdr = page_address(frames[i]->page);
hdr->frame_count = cpu_to_le32(frame_count);
- if (!tbnet_xmit_map(dma_dev, frames[i]))
- goto err_unmap;
+ dma_sync_single_for_device(dma_dev,
+ frames[i]->frame.buffer_phy,
+ tbnet_frame_size(frames[i]), DMA_TO_DEVICE);
}
return true;
@@ -983,21 +981,14 @@ static bool tbnet_xmit_csum_and_map(struct tbnet *net, struct sk_buff *skb,
*tucso = csum_fold(wsum);
/* Checksum is finally calculated and we don't touch the memory
- * anymore, so DMA map the frames now.
+ * anymore, so DMA sync the frames now.
*/
for (i = 0; i < frame_count; i++) {
- if (!tbnet_xmit_map(dma_dev, frames[i]))
- goto err_unmap;
+ dma_sync_single_for_device(dma_dev, frames[i]->frame.buffer_phy,
+ tbnet_frame_size(frames[i]), DMA_TO_DEVICE);
}
return true;
-
-err_unmap:
- while (i--)
- dma_unmap_page(dma_dev, frames[i]->frame.buffer_phy,
- tbnet_frame_size(frames[i]), DMA_TO_DEVICE);
-
- return false;
}
static void *tbnet_kmap_frag(struct sk_buff *skb, unsigned int frag_num,
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 6bb1e604aadd..95749006d687 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -444,9 +444,9 @@ static void tun_flow_delete_by_queue(struct tun_struct *tun, u16 queue_index)
spin_unlock_bh(&tun->lock);
}
-static void tun_flow_cleanup(unsigned long data)
+static void tun_flow_cleanup(struct timer_list *t)
{
- struct tun_struct *tun = (struct tun_struct *)data;
+ struct tun_struct *tun = from_timer(tun, t, flow_gc_timer);
unsigned long delay = tun->ageing_time;
unsigned long next_timer = jiffies + delay;
unsigned long count = 0;
@@ -1196,7 +1196,9 @@ static void tun_flow_init(struct tun_struct *tun)
INIT_HLIST_HEAD(&tun->flows[i]);
tun->ageing_time = TUN_FLOW_EXPIRE;
- setup_timer(&tun->flow_gc_timer, tun_flow_cleanup, (unsigned long)tun);
+ timer_setup(&tun->flow_gc_timer, tun_flow_cleanup, 0);
+ mod_timer(&tun->flow_gc_timer,
+ round_jiffies_up(jiffies + tun->ageing_time));
}
static void tun_flow_uninit(struct tun_struct *tun)
@@ -1485,6 +1487,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
err = xdp_do_redirect(tun->dev, &xdp, xdp_prog);
if (err)
goto err_redirect;
+ rcu_read_unlock();
return NULL;
case XDP_TX:
xdp_xmit = true;
@@ -1517,7 +1520,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
if (xdp_xmit) {
skb->dev = tun->dev;
generic_xdp_tx(skb, xdp_prog);
- rcu_read_lock();
+ rcu_read_unlock();
return NULL;
}
@@ -2369,6 +2372,8 @@ static int set_offload(struct tun_struct *tun, unsigned long arg)
features |= NETIF_F_TSO6;
arg &= ~(TUN_F_TSO4|TUN_F_TSO6);
}
+
+ arg &= ~TUN_F_UFO;
}
/* This gives the user a way to test for new features in future by
diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
index ca71f6c03859..7275761a1177 100644
--- a/drivers/net/usb/ipheth.c
+++ b/drivers/net/usb/ipheth.c
@@ -291,12 +291,15 @@ static void ipheth_sndbulk_callback(struct urb *urb)
static int ipheth_carrier_set(struct ipheth_device *dev)
{
- struct usb_device *udev = dev->udev;
+ struct usb_device *udev;
int retval;
+
if (!dev)
return 0;
if (!dev->confirmed_pairing)
return 0;
+
+ udev = dev->udev;
retval = usb_control_msg(udev,
usb_rcvctrlpipe(udev, IPHETH_CTRL_ENDP),
IPHETH_CMD_CARRIER_CHECK, /* request */
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 720a3a248070..c750cf7c042b 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1239,6 +1239,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x1e0e, 0x9001, 5)}, /* SIMCom 7230E */
{QMI_QUIRK_SET_DTR(0x2c7c, 0x0125, 4)}, /* Quectel EC25, EC20 R2.0 Mini PCIe */
{QMI_QUIRK_SET_DTR(0x2c7c, 0x0121, 4)}, /* Quectel EC21 Mini PCIe */
+ {QMI_FIXED_INTF(0x2c7c, 0x0296, 4)}, /* Quectel BG96 */
/* 4. Gobi 1000 devices */
{QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 7ac487031b4b..19b9cc51079e 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -874,8 +874,8 @@ static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
static int __vxlan_fdb_delete(struct vxlan_dev *vxlan,
const unsigned char *addr, union vxlan_addr ip,
- __be16 port, __be32 src_vni, u32 vni, u32 ifindex,
- u16 vid)
+ __be16 port, __be32 src_vni, __be32 vni,
+ u32 ifindex, u16 vid)
{
struct vxlan_fdb *f;
struct vxlan_rdst *rd = NULL;
diff --git a/drivers/net/wan/hdlc_ppp.c b/drivers/net/wan/hdlc_ppp.c
index c7721c729541..afeca6bcdade 100644
--- a/drivers/net/wan/hdlc_ppp.c
+++ b/drivers/net/wan/hdlc_ppp.c
@@ -558,9 +558,9 @@ out:
return NET_RX_DROP;
}
-static void ppp_timer(unsigned long arg)
+static void ppp_timer(struct timer_list *t)
{
- struct proto *proto = (struct proto *)arg;
+ struct proto *proto = from_timer(proto, t, timer);
struct ppp *ppp = get_ppp(proto->dev);
unsigned long flags;
@@ -610,7 +610,7 @@ static void ppp_start(struct net_device *dev)
for (i = 0; i < IDX_COUNT; i++) {
struct proto *proto = &ppp->protos[i];
proto->dev = dev;
- setup_timer(&proto->timer, ppp_timer, (unsigned long)proto);
+ timer_setup(&proto->timer, ppp_timer, 0);
proto->state = CLOSED;
}
ppp->protos[IDX_LCP].pid = PID_LCP;
diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c
index 37b1e0d03e31..90a4ad9a2d08 100644
--- a/drivers/net/wan/lmc/lmc_main.c
+++ b/drivers/net/wan/lmc/lmc_main.c
@@ -494,18 +494,11 @@ int lmc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/
break;
}
- data = kmalloc(xc.len, GFP_KERNEL);
- if (!data) {
- ret = -ENOMEM;
+ data = memdup_user(xc.data, xc.len);
+ if (IS_ERR(data)) {
+ ret = PTR_ERR(data);
break;
}
-
- if(copy_from_user(data, xc.data, xc.len))
- {
- kfree(data);
- ret = -ENOMEM;
- break;
- }
printk("%s: Starting load of data Len: %d at 0x%p == 0x%p\n", dev->name, xc.len, xc.data, data);
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index e31438541ee1..7d295ee71534 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -566,18 +566,16 @@ static int ath10k_htt_rx_crypto_param_len(struct ath10k *ar,
#define MICHAEL_MIC_LEN 8
-static int ath10k_htt_rx_crypto_tail_len(struct ath10k *ar,
- enum htt_rx_mpdu_encrypt_type type)
+static int ath10k_htt_rx_crypto_mic_len(struct ath10k *ar,
+ enum htt_rx_mpdu_encrypt_type type)
{
switch (type) {
case HTT_RX_MPDU_ENCRYPT_NONE:
- return 0;
case HTT_RX_MPDU_ENCRYPT_WEP40:
case HTT_RX_MPDU_ENCRYPT_WEP104:
- return IEEE80211_WEP_ICV_LEN;
case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
- return IEEE80211_TKIP_ICV_LEN;
+ return 0;
case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
return IEEE80211_CCMP_MIC_LEN;
case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2:
@@ -594,6 +592,31 @@ static int ath10k_htt_rx_crypto_tail_len(struct ath10k *ar,
return 0;
}
+static int ath10k_htt_rx_crypto_icv_len(struct ath10k *ar,
+ enum htt_rx_mpdu_encrypt_type type)
+{
+ switch (type) {
+ case HTT_RX_MPDU_ENCRYPT_NONE:
+ case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
+ case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2:
+ case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2:
+ case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2:
+ return 0;
+ case HTT_RX_MPDU_ENCRYPT_WEP40:
+ case HTT_RX_MPDU_ENCRYPT_WEP104:
+ return IEEE80211_WEP_ICV_LEN;
+ case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
+ case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
+ return IEEE80211_TKIP_ICV_LEN;
+ case HTT_RX_MPDU_ENCRYPT_WEP128:
+ case HTT_RX_MPDU_ENCRYPT_WAPI:
+ break;
+ }
+
+ ath10k_warn(ar, "unsupported encryption type %d\n", type);
+ return 0;
+}
+
struct amsdu_subframe_hdr {
u8 dst[ETH_ALEN];
u8 src[ETH_ALEN];
@@ -1063,25 +1086,27 @@ static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar,
/* Tail */
if (status->flag & RX_FLAG_IV_STRIPPED) {
skb_trim(msdu, msdu->len -
- ath10k_htt_rx_crypto_tail_len(ar, enctype));
+ ath10k_htt_rx_crypto_mic_len(ar, enctype));
+
+ skb_trim(msdu, msdu->len -
+ ath10k_htt_rx_crypto_icv_len(ar, enctype));
} else {
/* MIC */
- if ((status->flag & RX_FLAG_MIC_STRIPPED) &&
- enctype == HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2)
- skb_trim(msdu, msdu->len - 8);
+ if (status->flag & RX_FLAG_MIC_STRIPPED)
+ skb_trim(msdu, msdu->len -
+ ath10k_htt_rx_crypto_mic_len(ar, enctype));
/* ICV */
- if (status->flag & RX_FLAG_ICV_STRIPPED &&
- enctype != HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2)
+ if (status->flag & RX_FLAG_ICV_STRIPPED)
skb_trim(msdu, msdu->len -
- ath10k_htt_rx_crypto_tail_len(ar, enctype));
+ ath10k_htt_rx_crypto_icv_len(ar, enctype));
}
/* MMIC */
if ((status->flag & RX_FLAG_MMIC_STRIPPED) &&
!ieee80211_has_morefrags(hdr->frame_control) &&
enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
- skb_trim(msdu, msdu->len - 8);
+ skb_trim(msdu, msdu->len - MICHAEL_MIC_LEN);
/* Head */
if (status->flag & RX_FLAG_IV_STRIPPED) {
diff --git a/drivers/net/wireless/ath/ath9k/channel.c b/drivers/net/wireless/ath/ath9k/channel.c
index dfb26f03c1a2..1b05b5d7a038 100644
--- a/drivers/net/wireless/ath/ath9k/channel.c
+++ b/drivers/net/wireless/ath/ath9k/channel.c
@@ -1113,7 +1113,7 @@ ath_chanctx_send_vif_ps_frame(struct ath_softc *sc, struct ath_vif *avp,
if (!avp->assoc)
return false;
- skb = ieee80211_nullfunc_get(sc->hw, vif);
+ skb = ieee80211_nullfunc_get(sc->hw, vif, false);
if (!skb)
return false;
diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
index 71812a2dd513..f7d228b5ba93 100644
--- a/drivers/net/wireless/ath/wcn36xx/main.c
+++ b/drivers/net/wireless/ath/wcn36xx/main.c
@@ -1233,7 +1233,7 @@ static int wcn36xx_platform_get_resources(struct wcn36xx *wcn,
}
/* External RF module */
- iris_node = of_find_node_by_name(mmio_node, "iris");
+ iris_node = of_get_child_by_name(mmio_node, "iris");
if (iris_node) {
if (of_device_is_compatible(iris_node, "qcom,wcn3620"))
wcn->rf_id = RF_IRIS_WCN3620;
diff --git a/drivers/net/wireless/atmel/at76c50x-usb.c b/drivers/net/wireless/atmel/at76c50x-usb.c
index ede89d4ffc88..e99e766a3028 100644
--- a/drivers/net/wireless/atmel/at76c50x-usb.c
+++ b/drivers/net/wireless/atmel/at76c50x-usb.c
@@ -518,11 +518,11 @@ exit:
/* LED trigger */
static int tx_activity;
-static void at76_ledtrig_tx_timerfunc(unsigned long data);
+static void at76_ledtrig_tx_timerfunc(struct timer_list *unused);
static DEFINE_TIMER(ledtrig_tx_timer, at76_ledtrig_tx_timerfunc);
DEFINE_LED_TRIGGER(ledtrig_tx);
-static void at76_ledtrig_tx_timerfunc(unsigned long data)
+static void at76_ledtrig_tx_timerfunc(struct timer_list *unused)
{
static int tx_lastactivity;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c
index 3559fb5b8fb0..03aae6bc1838 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c
@@ -280,9 +280,9 @@ static void brcmf_btcoex_restore_part1(struct brcmf_btcoex_info *btci)
/**
* brcmf_btcoex_timerfunc() - BT coex timer callback
*/
-static void brcmf_btcoex_timerfunc(ulong data)
+static void brcmf_btcoex_timerfunc(struct timer_list *t)
{
- struct brcmf_btcoex_info *bt_local = (struct brcmf_btcoex_info *)data;
+ struct brcmf_btcoex_info *bt_local = from_timer(bt_local, t, timer);
brcmf_dbg(TRACE, "enter\n");
bt_local->timer_on = false;
@@ -380,7 +380,7 @@ int brcmf_btcoex_attach(struct brcmf_cfg80211_info *cfg)
/* Set up timer for BT */
btci->timer_on = false;
btci->timeout = BRCMF_BTCOEX_OPPR_WIN_TIME;
- setup_timer(&btci->timer, brcmf_btcoex_timerfunc, (ulong)btci);
+ timer_setup(&btci->timer, brcmf_btcoex_timerfunc, 0);
btci->cfg = cfg;
btci->saved_regs_part1 = false;
btci->saved_regs_part2 = false;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index 6e70df978159..15fa00d79fc6 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -2983,10 +2983,10 @@ static void brcmf_cfg80211_escan_timeout_worker(struct work_struct *work)
brcmf_notify_escan_complete(cfg, cfg->escan_info.ifp, true, true);
}
-static void brcmf_escan_timeout(unsigned long data)
+static void brcmf_escan_timeout(struct timer_list *t)
{
struct brcmf_cfg80211_info *cfg =
- (struct brcmf_cfg80211_info *)data;
+ from_timer(cfg, t, escan_timeout);
if (cfg->int_escan_map || cfg->scan_request) {
brcmf_err("timer expired\n");
@@ -3150,8 +3150,7 @@ static void brcmf_init_escan(struct brcmf_cfg80211_info *cfg)
brcmf_cfg80211_escan_handler);
cfg->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
/* Init scan_timeout timer */
- setup_timer(&cfg->escan_timeout, brcmf_escan_timeout,
- (unsigned long)cfg);
+ timer_setup(&cfg->escan_timeout, brcmf_escan_timeout, 0);
INIT_WORK(&cfg->escan_timeout_work,
brcmf_cfg80211_escan_timeout_worker);
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
index e3495ea95553..310c4e2746aa 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
@@ -3972,9 +3972,9 @@ brcmf_sdio_watchdog_thread(void *data)
}
static void
-brcmf_sdio_watchdog(unsigned long data)
+brcmf_sdio_watchdog(struct timer_list *t)
{
- struct brcmf_sdio *bus = (struct brcmf_sdio *)data;
+ struct brcmf_sdio *bus = from_timer(bus, t, timer);
if (bus->watchdog_tsk) {
complete(&bus->watchdog_wait);
@@ -4169,8 +4169,7 @@ struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
init_waitqueue_head(&bus->dcmd_resp_wait);
/* Set up the watchdog timer */
- setup_timer(&bus->timer, brcmf_sdio_watchdog,
- (unsigned long)bus);
+ timer_setup(&bus->timer, brcmf_sdio_watchdog, 0);
/* Initialize watchdog thread */
init_completion(&bus->watchdog_wait);
bus->watchdog_tsk = kthread_run(brcmf_sdio_watchdog_thread,
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
index af7c4f36b66f..e7e75b458005 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
@@ -72,18 +72,21 @@
#define IWL9000_SMEM_OFFSET 0x400000
#define IWL9000_SMEM_LEN 0x68000
-#define IWL9000_FW_PRE "iwlwifi-9000-pu-a0-jf-a0-"
+#define IWL9000A_FW_PRE "iwlwifi-9000-pu-a0-jf-a0-"
+#define IWL9000B_FW_PRE "iwlwifi-9000-pu-b0-jf-b0-"
#define IWL9000RFB_FW_PRE "iwlwifi-9000-pu-a0-jf-b0-"
#define IWL9260A_FW_PRE "iwlwifi-9260-th-a0-jf-a0-"
#define IWL9260B_FW_PRE "iwlwifi-9260-th-b0-jf-b0-"
-#define IWL9000_MODULE_FIRMWARE(api) \
- IWL9000_FW_PRE "-" __stringify(api) ".ucode"
+#define IWL9000A_MODULE_FIRMWARE(api) \
+ IWL9000A_FW_PRE __stringify(api) ".ucode"
+#define IWL9000B_MODULE_FIRMWARE(api) \
+ IWL9000B_FW_PRE __stringify(api) ".ucode"
#define IWL9000RFB_MODULE_FIRMWARE(api) \
- IWL9000RFB_FW_PRE "-" __stringify(api) ".ucode"
+ IWL9000RFB_FW_PRE __stringify(api) ".ucode"
#define IWL9260A_MODULE_FIRMWARE(api) \
- IWL9260A_FW_PRE "-" __stringify(api) ".ucode"
+ IWL9260A_FW_PRE __stringify(api) ".ucode"
#define IWL9260B_MODULE_FIRMWARE(api) \
- IWL9260B_FW_PRE "-" __stringify(api) ".ucode"
+ IWL9260B_FW_PRE __stringify(api) ".ucode"
#define NVM_HW_SECTION_NUM_FAMILY_9000 10
@@ -194,7 +197,48 @@ const struct iwl_cfg iwl9460_2ac_cfg = {
.nvm_ver = IWL9000_NVM_VERSION,
.nvm_calib_ver = IWL9000_TX_POWER_VERSION,
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+};
+
+const struct iwl_cfg iwl9460_2ac_cfg_soc = {
+ .name = "Intel(R) Dual Band Wireless AC 9460",
+ .fw_name_pre = IWL9000A_FW_PRE,
+ .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
+ .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
+ IWL_DEVICE_9000,
+ .ht_params = &iwl9000_ht_params,
+ .nvm_ver = IWL9000_NVM_VERSION,
+ .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
.integrated = true,
+ .soc_latency = 5000,
+};
+
+const struct iwl_cfg iwl9461_2ac_cfg_soc = {
+ .name = "Intel(R) Dual Band Wireless AC 9461",
+ .fw_name_pre = IWL9000A_FW_PRE,
+ .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
+ .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
+ IWL_DEVICE_9000,
+ .ht_params = &iwl9000_ht_params,
+ .nvm_ver = IWL9000_NVM_VERSION,
+ .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+ .integrated = true,
+ .soc_latency = 5000,
+};
+
+const struct iwl_cfg iwl9462_2ac_cfg_soc = {
+ .name = "Intel(R) Dual Band Wireless AC 9462",
+ .fw_name_pre = IWL9000A_FW_PRE,
+ .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
+ .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
+ IWL_DEVICE_9000,
+ .ht_params = &iwl9000_ht_params,
+ .nvm_ver = IWL9000_NVM_VERSION,
+ .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+ .integrated = true,
+ .soc_latency = 5000,
};
const struct iwl_cfg iwl9560_2ac_cfg = {
@@ -206,10 +250,23 @@ const struct iwl_cfg iwl9560_2ac_cfg = {
.nvm_ver = IWL9000_NVM_VERSION,
.nvm_calib_ver = IWL9000_TX_POWER_VERSION,
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
- .integrated = true,
};
-MODULE_FIRMWARE(IWL9000_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
+const struct iwl_cfg iwl9560_2ac_cfg_soc = {
+ .name = "Intel(R) Dual Band Wireless AC 9560",
+ .fw_name_pre = IWL9000A_FW_PRE,
+ .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
+ .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
+ IWL_DEVICE_9000,
+ .ht_params = &iwl9000_ht_params,
+ .nvm_ver = IWL9000_NVM_VERSION,
+ .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+ .integrated = true,
+ .soc_latency = 5000,
+};
+MODULE_FIRMWARE(IWL9000A_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL9000B_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL9000RFB_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL9260A_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL9260B_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/a000.c b/drivers/net/wireless/intel/iwlwifi/cfg/a000.c
index ea8206515171..705f83b02e13 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/a000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/a000.c
@@ -80,15 +80,15 @@
#define IWL_A000_HR_A0_FW_PRE "iwlwifi-QuQnj-a0-hr-a0-"
#define IWL_A000_HR_MODULE_FIRMWARE(api) \
- IWL_A000_HR_FW_PRE "-" __stringify(api) ".ucode"
+ IWL_A000_HR_FW_PRE __stringify(api) ".ucode"
#define IWL_A000_JF_MODULE_FIRMWARE(api) \
- IWL_A000_JF_FW_PRE "-" __stringify(api) ".ucode"
+ IWL_A000_JF_FW_PRE __stringify(api) ".ucode"
#define IWL_A000_HR_F0_QNJ_MODULE_FIRMWARE(api) \
- IWL_A000_HR_F0_FW_PRE "-" __stringify(api) ".ucode"
+ IWL_A000_HR_F0_FW_PRE __stringify(api) ".ucode"
#define IWL_A000_JF_B0_QNJ_MODULE_FIRMWARE(api) \
- IWL_A000_JF_B0_FW_PRE "-" __stringify(api) ".ucode"
+ IWL_A000_JF_B0_FW_PRE __stringify(api) ".ucode"
#define IWL_A000_HR_A0_QNJ_MODULE_FIRMWARE(api) \
- IWL_A000_HR_A0_FW_PRE "-" __stringify(api) ".ucode"
+ IWL_A000_HR_A0_FW_PRE __stringify(api) ".ucode"
#define NVM_HW_SECTION_NUM_FAMILY_A000 10
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/main.c b/drivers/net/wireless/intel/iwlwifi/dvm/main.c
index 2acd94da9efe..d11d72615de2 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/main.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/main.c
@@ -399,9 +399,9 @@ int iwl_send_statistics_request(struct iwl_priv *priv, u8 flags, bool clear)
* was received. We need to ensure we receive the statistics in order
* to update the temperature used for calibrating the TXPOWER.
*/
-static void iwl_bg_statistics_periodic(unsigned long data)
+static void iwl_bg_statistics_periodic(struct timer_list *t)
{
- struct iwl_priv *priv = (struct iwl_priv *)data;
+ struct iwl_priv *priv = from_timer(priv, t, statistics_periodic);
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return;
@@ -556,9 +556,9 @@ static void iwl_continuous_event_trace(struct iwl_priv *priv)
* this function is to perform continuous uCode event logging operation
* if enabled
*/
-static void iwl_bg_ucode_trace(unsigned long data)
+static void iwl_bg_ucode_trace(struct timer_list *t)
{
- struct iwl_priv *priv = (struct iwl_priv *)data;
+ struct iwl_priv *priv = from_timer(priv, t, ucode_trace);
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return;
@@ -1085,11 +1085,9 @@ static void iwl_setup_deferred_work(struct iwl_priv *priv)
if (priv->lib->bt_params)
iwlagn_bt_setup_deferred_work(priv);
- setup_timer(&priv->statistics_periodic, iwl_bg_statistics_periodic,
- (unsigned long)priv);
+ timer_setup(&priv->statistics_periodic, iwl_bg_statistics_periodic, 0);
- setup_timer(&priv->ucode_trace, iwl_bg_ucode_trace,
- (unsigned long)priv);
+ timer_setup(&priv->ucode_trace, iwl_bg_ucode_trace, 0);
}
void iwl_cancel_deferred_work(struct iwl_priv *priv)
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/tt.c b/drivers/net/wireless/intel/iwlwifi/dvm/tt.c
index 5b73492e7ff7..6524533d723c 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/tt.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/tt.c
@@ -164,9 +164,10 @@ enum iwl_antenna_ok iwl_rx_ant_restriction(struct iwl_priv *priv)
* without doing anything, driver should continue the 5 seconds timer
* to wake up uCode for temperature check until temperature drop below CT
*/
-static void iwl_tt_check_exit_ct_kill(unsigned long data)
+static void iwl_tt_check_exit_ct_kill(struct timer_list *t)
{
- struct iwl_priv *priv = (struct iwl_priv *)data;
+ struct iwl_priv *priv = from_timer(priv, t,
+ thermal_throttle.ct_kill_exit_tm);
struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
unsigned long flags;
@@ -214,9 +215,10 @@ static void iwl_perform_ct_kill_task(struct iwl_priv *priv,
}
}
-static void iwl_tt_ready_for_ct_kill(unsigned long data)
+static void iwl_tt_ready_for_ct_kill(struct timer_list *t)
{
- struct iwl_priv *priv = (struct iwl_priv *)data;
+ struct iwl_priv *priv = from_timer(priv, t,
+ thermal_throttle.ct_kill_waiting_tm);
struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
@@ -612,10 +614,10 @@ void iwl_tt_initialize(struct iwl_priv *priv)
memset(tt, 0, sizeof(struct iwl_tt_mgmt));
tt->state = IWL_TI_0;
- setup_timer(&priv->thermal_throttle.ct_kill_exit_tm,
- iwl_tt_check_exit_ct_kill, (unsigned long)priv);
- setup_timer(&priv->thermal_throttle.ct_kill_waiting_tm,
- iwl_tt_ready_for_ct_kill, (unsigned long)priv);
+ timer_setup(&priv->thermal_throttle.ct_kill_exit_tm,
+ iwl_tt_check_exit_ct_kill, 0);
+ timer_setup(&priv->thermal_throttle.ct_kill_waiting_tm,
+ iwl_tt_ready_for_ct_kill, 0);
/* setup deferred ct kill work */
INIT_WORK(&priv->tt_work, iwl_bg_tt_work);
INIT_WORK(&priv->ct_enter, iwl_bg_ct_enter);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
index 5a40092febfb..3bfc657f6b42 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
@@ -531,6 +531,8 @@ struct iwl_scan_config_v1 {
} __packed; /* SCAN_CONFIG_DB_CMD_API_S */
#define SCAN_TWO_LMACS 2
+#define SCAN_LB_LMAC_IDX 0
+#define SCAN_HB_LMAC_IDX 1
struct iwl_scan_config {
__le32 flags;
@@ -578,6 +580,7 @@ enum iwl_umac_scan_general_flags {
IWL_UMAC_SCAN_GEN_FLAGS_MATCH = BIT(9),
IWL_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL = BIT(10),
IWL_UMAC_SCAN_GEN_FLAGS_LMAC2_FRAGMENTED = BIT(11),
+ IWL_UMAC_SCAN_GEN_FLAGS_ADAPTIVE_DWELL = BIT(13),
};
/**
@@ -631,12 +634,17 @@ struct iwl_scan_req_umac_tail {
* @uid: scan id, &enum iwl_umac_scan_uid_offsets
* @ooc_priority: out of channel priority - &enum iwl_scan_priority
* @general_flags: &enum iwl_umac_scan_general_flags
- * @reserved2: for future use and alignment
* @scan_start_mac_id: report the scan start TSF time according to this mac TSF
* @extended_dwell: dwell time for channels 1, 6 and 11
* @active_dwell: dwell time for active scan
* @passive_dwell: dwell time for passive scan
* @fragmented_dwell: dwell time for fragmented passive scan
+ * @adwell_default_n_aps: for adaptive dwell the default number of APs
+ * per channel
+ * @adwell_default_n_aps_social: for adaptive dwell the default
+ * number of APs per social (1,6,11) channel
+ * @adwell_max_budget: for adaptive dwell the maximal budget of TU to be added
+ * to total scan time
* @max_out_time: max out of serving channel time, per LMAC - for CDB there
* are 2 LMACs
* @suspend_time: max suspend time, per LMAC - for CDB there are 2 LMACs
@@ -644,6 +652,8 @@ struct iwl_scan_req_umac_tail {
* @channel_flags: &enum iwl_scan_channel_flags
* @n_channels: num of channels in scan request
* @reserved: for future use and alignment
+ * @reserved2: for future use and alignment
+ * @reserved3: for future use and alignment
* @data: &struct iwl_scan_channel_cfg_umac and
* &struct iwl_scan_req_umac_tail
*/
@@ -651,41 +661,64 @@ struct iwl_scan_req_umac {
__le32 flags;
__le32 uid;
__le32 ooc_priority;
- /* SCAN_GENERAL_PARAMS_API_S_VER_4 */
__le16 general_flags;
- u8 reserved2;
+ u8 reserved;
u8 scan_start_mac_id;
- u8 extended_dwell;
- u8 active_dwell;
- u8 passive_dwell;
- u8 fragmented_dwell;
union {
struct {
+ u8 extended_dwell;
+ u8 active_dwell;
+ u8 passive_dwell;
+ u8 fragmented_dwell;
__le32 max_out_time;
__le32 suspend_time;
__le32 scan_priority;
- /* SCAN_CHANNEL_PARAMS_API_S_VER_4 */
+ /* SCAN_CHANNEL_PARAMS_API_S_VER_1 */
u8 channel_flags;
u8 n_channels;
- __le16 reserved;
+ __le16 reserved2;
u8 data[];
} v1; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_1 */
struct {
+ u8 extended_dwell;
+ u8 active_dwell;
+ u8 passive_dwell;
+ u8 fragmented_dwell;
__le32 max_out_time[SCAN_TWO_LMACS];
__le32 suspend_time[SCAN_TWO_LMACS];
__le32 scan_priority;
- /* SCAN_CHANNEL_PARAMS_API_S_VER_4 */
+ /* SCAN_CHANNEL_PARAMS_API_S_VER_1 */
u8 channel_flags;
u8 n_channels;
- __le16 reserved;
+ __le16 reserved2;
u8 data[];
} v6; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_6 */
+ struct {
+ u8 active_dwell;
+ u8 passive_dwell;
+ u8 fragmented_dwell;
+ u8 adwell_default_n_aps;
+ u8 adwell_default_n_aps_social;
+ u8 reserved3;
+ __le16 adwell_max_budget;
+ __le32 max_out_time[SCAN_TWO_LMACS];
+ __le32 suspend_time[SCAN_TWO_LMACS];
+ __le32 scan_priority;
+ /* SCAN_CHANNEL_PARAMS_API_S_VER_1 */
+ u8 channel_flags;
+ u8 n_channels;
+ __le16 reserved2;
+ u8 data[];
+ } v7; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_7 */
};
} __packed;
-#define IWL_SCAN_REQ_UMAC_SIZE sizeof(struct iwl_scan_req_umac)
+#define IWL_SCAN_REQ_UMAC_SIZE_V7 sizeof(struct iwl_scan_req_umac)
+#define IWL_SCAN_REQ_UMAC_SIZE_V6 (sizeof(struct iwl_scan_req_umac) - \
+ 2 * sizeof(u8) - sizeof(__le16))
#define IWL_SCAN_REQ_UMAC_SIZE_V1 (sizeof(struct iwl_scan_req_umac) - \
- 2 * sizeof(__le32))
+ 2 * sizeof(__le32) - 2 * sizeof(u8) - \
+ sizeof(__le16))
/**
* struct iwl_umac_scan_abort
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/file.h b/drivers/net/wireless/intel/iwlwifi/fw/file.h
index 740d97093d1c..37a5c5b4eda6 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/file.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/file.h
@@ -264,6 +264,7 @@ enum iwl_ucode_tlv_api {
IWL_UCODE_TLV_API_STA_TYPE = (__force iwl_ucode_tlv_api_t)30,
IWL_UCODE_TLV_API_NAN2_VER2 = (__force iwl_ucode_tlv_api_t)31,
/* API Set 1 */
+ IWL_UCODE_TLV_API_ADAPTIVE_DWELL = (__force iwl_ucode_tlv_api_t)32,
IWL_UCODE_TLV_API_NEW_BEACON_TEMPLATE = (__force iwl_ucode_tlv_api_t)34,
IWL_UCODE_TLV_API_NEW_RX_STATS = (__force iwl_ucode_tlv_api_t)35,
IWL_UCODE_TLV_API_COEX_ATS_EXTERNAL = (__force iwl_ucode_tlv_api_t)37,
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
index d1263a554420..e21e46cf6f9a 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
@@ -366,6 +366,7 @@ struct iwl_cfg {
u32 dccm2_len;
u32 smem_offset;
u32 smem_len;
+ u32 soc_latency;
u16 nvm_ver;
u16 nvm_calib_ver;
u16 rx_with_siso_diversity:1,
@@ -472,6 +473,10 @@ extern const struct iwl_cfg iwl9260_2ac_cfg;
extern const struct iwl_cfg iwl9270_2ac_cfg;
extern const struct iwl_cfg iwl9460_2ac_cfg;
extern const struct iwl_cfg iwl9560_2ac_cfg;
+extern const struct iwl_cfg iwl9460_2ac_cfg_soc;
+extern const struct iwl_cfg iwl9461_2ac_cfg_soc;
+extern const struct iwl_cfg iwl9462_2ac_cfg_soc;
+extern const struct iwl_cfg iwl9560_2ac_cfg_soc;
extern const struct iwl_cfg iwla000_2ac_cfg_hr;
extern const struct iwl_cfg iwla000_2ac_cfg_hr_cdb;
extern const struct iwl_cfg iwla000_2ac_cfg_jf;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index 0e18c5066f04..4575595ab022 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -1142,6 +1142,12 @@ static inline bool iwl_mvm_is_d0i3_supported(struct iwl_mvm *mvm)
IWL_UCODE_TLV_CAPA_D0I3_SUPPORT);
}
+static inline bool iwl_mvm_is_adaptive_dwell_supported(struct iwl_mvm *mvm)
+{
+ return fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_ADAPTIVE_DWELL);
+}
+
static inline bool iwl_mvm_enter_d0i3_on_suspend(struct iwl_mvm *mvm)
{
/* For now we only use this mode to differentiate between
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
index 774122fed454..e4fd476e9ccb 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
@@ -130,6 +130,19 @@ struct iwl_mvm_scan_params {
u32 measurement_dwell;
};
+static inline void *iwl_mvm_get_scan_req_umac_data(struct iwl_mvm *mvm)
+{
+ struct iwl_scan_req_umac *cmd = mvm->scan_cmd;
+
+ if (iwl_mvm_is_adaptive_dwell_supported(mvm))
+ return (void *)&cmd->v7.data;
+
+ if (iwl_mvm_has_new_tx_api(mvm))
+ return (void *)&cmd->v6.data;
+
+ return (void *)&cmd->v1.data;
+}
+
static u8 iwl_mvm_scan_rx_ant(struct iwl_mvm *mvm)
{
if (mvm->scan_rx_ant != ANT_NONE)
@@ -1075,25 +1088,57 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm,
{
struct iwl_mvm_scan_timing_params *timing = &scan_timing[params->type];
+ if (iwl_mvm_is_regular_scan(params))
+ cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
+ else
+ cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_2);
+
+ if (iwl_mvm_is_adaptive_dwell_supported(mvm)) {
+ if (params->measurement_dwell) {
+ cmd->v7.active_dwell = params->measurement_dwell;
+ cmd->v7.passive_dwell = params->measurement_dwell;
+ } else {
+ cmd->v7.active_dwell = IWL_SCAN_DWELL_ACTIVE;
+ cmd->v7.passive_dwell = IWL_SCAN_DWELL_PASSIVE;
+ }
+ cmd->v7.fragmented_dwell = IWL_SCAN_DWELL_FRAGMENTED;
+
+ cmd->v7.scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
+ cmd->v7.max_out_time[SCAN_LB_LMAC_IDX] =
+ cpu_to_le32(timing->max_out_time);
+ cmd->v7.suspend_time[SCAN_LB_LMAC_IDX] =
+ cpu_to_le32(timing->suspend_time);
+ if (iwl_mvm_is_cdb_supported(mvm)) {
+ cmd->v7.max_out_time[SCAN_HB_LMAC_IDX] =
+ cpu_to_le32(timing->max_out_time);
+ cmd->v7.suspend_time[SCAN_HB_LMAC_IDX] =
+ cpu_to_le32(timing->suspend_time);
+ }
+
+ return;
+ }
+
if (params->measurement_dwell) {
- cmd->active_dwell = params->measurement_dwell;
- cmd->passive_dwell = params->measurement_dwell;
- cmd->extended_dwell = params->measurement_dwell;
+ cmd->v1.active_dwell = params->measurement_dwell;
+ cmd->v1.passive_dwell = params->measurement_dwell;
+ cmd->v1.extended_dwell = params->measurement_dwell;
} else {
- cmd->active_dwell = IWL_SCAN_DWELL_ACTIVE;
- cmd->passive_dwell = IWL_SCAN_DWELL_PASSIVE;
- cmd->extended_dwell = IWL_SCAN_DWELL_EXTENDED;
+ cmd->v1.active_dwell = IWL_SCAN_DWELL_ACTIVE;
+ cmd->v1.passive_dwell = IWL_SCAN_DWELL_PASSIVE;
+ cmd->v1.extended_dwell = IWL_SCAN_DWELL_EXTENDED;
}
- cmd->fragmented_dwell = IWL_SCAN_DWELL_FRAGMENTED;
+ cmd->v1.fragmented_dwell = IWL_SCAN_DWELL_FRAGMENTED;
if (iwl_mvm_has_new_tx_api(mvm)) {
cmd->v6.scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
- cmd->v6.max_out_time[0] = cpu_to_le32(timing->max_out_time);
- cmd->v6.suspend_time[0] = cpu_to_le32(timing->suspend_time);
+ cmd->v6.max_out_time[SCAN_LB_LMAC_IDX] =
+ cpu_to_le32(timing->max_out_time);
+ cmd->v6.suspend_time[SCAN_LB_LMAC_IDX] =
+ cpu_to_le32(timing->suspend_time);
if (iwl_mvm_is_cdb_supported(mvm)) {
- cmd->v6.max_out_time[1] =
+ cmd->v6.max_out_time[SCAN_HB_LMAC_IDX] =
cpu_to_le32(timing->max_out_time);
- cmd->v6.suspend_time[1] =
+ cmd->v6.suspend_time[SCAN_HB_LMAC_IDX] =
cpu_to_le32(timing->suspend_time);
}
} else {
@@ -1102,11 +1147,6 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm,
cmd->v1.scan_priority =
cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
}
-
- if (iwl_mvm_is_regular_scan(params))
- cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
- else
- cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_2);
}
static void
@@ -1178,8 +1218,7 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
int type)
{
struct iwl_scan_req_umac *cmd = mvm->scan_cmd;
- void *cmd_data = iwl_mvm_has_new_tx_api(mvm) ?
- (void *)&cmd->v6.data : (void *)&cmd->v1.data;
+ void *cmd_data = iwl_mvm_get_scan_req_umac_data(mvm);
struct iwl_scan_req_umac_tail *sec_part = cmd_data +
sizeof(struct iwl_scan_channel_cfg_umac) *
mvm->fw->ucode_capa.n_scan_channels;
@@ -1216,7 +1255,10 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
IWL_SCAN_CHANNEL_FLAG_CACHE_ADD;
- if (iwl_mvm_has_new_tx_api(mvm)) {
+ if (iwl_mvm_is_adaptive_dwell_supported(mvm)) {
+ cmd->v7.channel_flags = channel_flags;
+ cmd->v7.n_channels = params->n_channels;
+ } else if (iwl_mvm_has_new_tx_api(mvm)) {
cmd->v6.channel_flags = channel_flags;
cmd->v6.n_channels = params->n_channels;
} else {
@@ -1661,8 +1703,10 @@ int iwl_mvm_scan_size(struct iwl_mvm *mvm)
{
int base_size = IWL_SCAN_REQ_UMAC_SIZE_V1;
- if (iwl_mvm_has_new_tx_api(mvm))
- base_size = IWL_SCAN_REQ_UMAC_SIZE;
+ if (iwl_mvm_is_adaptive_dwell_supported(mvm))
+ base_size = IWL_SCAN_REQ_UMAC_SIZE_V7;
+ else if (iwl_mvm_has_new_tx_api(mvm))
+ base_size = IWL_SCAN_REQ_UMAC_SIZE_V6;
if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN))
return base_size +
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index 4a21c12276d7..f21fe59faccf 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -535,47 +535,121 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x2526, 0x0238, iwl9560_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x023C, iwl9560_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x0260, iwl9460_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x0264, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2526, 0x02A0, iwl9460_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x02A4, iwl9460_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x1010, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x1030, iwl9560_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x1210, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x1410, iwl9270_2ac_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x1420, iwl9460_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x1420, iwl9460_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2526, 0x1610, iwl9270_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x4010, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x4030, iwl9560_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x40A4, iwl9460_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0xA014, iwl9260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x42A4, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x271B, 0x0010, iwl9160_2ac_cfg)},
{IWL_PCI_DEVICE(0x271B, 0x0014, iwl9160_2ac_cfg)},
{IWL_PCI_DEVICE(0x271B, 0x0210, iwl9160_2ac_cfg)},
- {IWL_PCI_DEVICE(0x30DC, 0x0060, iwl9460_2ac_cfg)},
- {IWL_PCI_DEVICE(0x31DC, 0x0030, iwl9560_2ac_cfg)},
- {IWL_PCI_DEVICE(0x31DC, 0x0034, iwl9560_2ac_cfg)},
- {IWL_PCI_DEVICE(0x31DC, 0x0038, iwl9560_2ac_cfg)},
- {IWL_PCI_DEVICE(0x31DC, 0x003C, iwl9560_2ac_cfg)},
- {IWL_PCI_DEVICE(0x31DC, 0x0060, iwl9460_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0000, iwl9460_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0010, iwl9460_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0030, iwl9560_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0034, iwl9560_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0038, iwl9560_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x003C, iwl9560_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0060, iwl9460_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0210, iwl9460_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0310, iwl9460_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0410, iwl9460_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0510, iwl9460_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0610, iwl9460_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0710, iwl9460_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0A10, iwl9460_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x2010, iwl9460_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x2A10, iwl9460_2ac_cfg)},
- {IWL_PCI_DEVICE(0xA370, 0x0030, iwl9560_2ac_cfg)},
- {IWL_PCI_DEVICE(0xA370, 0x0034, iwl9560_2ac_cfg)},
- {IWL_PCI_DEVICE(0xA370, 0x0038, iwl9560_2ac_cfg)},
- {IWL_PCI_DEVICE(0xA370, 0x003C, iwl9560_2ac_cfg)},
- {IWL_PCI_DEVICE(0xA370, 0x0060, iwl9460_2ac_cfg)},
- {IWL_PCI_DEVICE(0xA370, 0x1030, iwl9560_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x271B, 0x0214, iwl9260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2720, 0x0034, iwl9560_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2720, 0x0038, iwl9560_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2720, 0x003C, iwl9560_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2720, 0x0060, iwl9461_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x2720, 0x0064, iwl9461_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x2720, 0x00A0, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x2720, 0x00A4, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x2720, 0x0230, iwl9560_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2720, 0x0234, iwl9560_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2720, 0x0238, iwl9560_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2720, 0x023C, iwl9560_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2720, 0x0260, iwl9461_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x2720, 0x0264, iwl9461_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x2720, 0x02A0, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x2720, 0x02A4, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x2720, 0x4030, iwl9560_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2720, 0x40A4, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x30DC, 0x0060, iwl9460_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x30DC, 0x0064, iwl9461_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x30DC, 0x00A0, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x30DC, 0x00A4, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x30DC, 0x0260, iwl9461_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x30DC, 0x0264, iwl9461_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x30DC, 0x02A0, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x30DC, 0x02A4, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x31DC, 0x0030, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x31DC, 0x0034, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x31DC, 0x0038, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x31DC, 0x003C, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x31DC, 0x0060, iwl9460_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x31DC, 0x0064, iwl9461_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x31DC, 0x00A0, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x31DC, 0x00A4, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x31DC, 0x0230, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x31DC, 0x0234, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x31DC, 0x0238, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x31DC, 0x023C, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x31DC, 0x0260, iwl9461_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x31DC, 0x0264, iwl9461_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x31DC, 0x02A0, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x31DC, 0x02A4, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x31DC, 0x4030, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x31DC, 0x4034, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x31DC, 0x40A4, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x34F0, 0x0030, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x34F0, 0x0034, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x34F0, 0x02A4, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0000, iwl9460_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0010, iwl9460_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0030, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0034, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0038, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x003C, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0060, iwl9460_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0064, iwl9461_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x00A0, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x00A4, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0210, iwl9460_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0230, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0234, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0238, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x023C, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0260, iwl9461_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0264, iwl9461_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x02A0, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x02A4, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0310, iwl9460_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0410, iwl9460_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0510, iwl9460_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0610, iwl9460_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0710, iwl9460_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0A10, iwl9460_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x2010, iwl9460_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x2A10, iwl9460_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x4030, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x4034, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x40A4, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA370, 0x0030, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA370, 0x0034, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA370, 0x0038, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA370, 0x003C, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA370, 0x0060, iwl9460_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA370, 0x0064, iwl9461_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA370, 0x00A0, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA370, 0x00A4, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA370, 0x0230, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA370, 0x0234, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA370, 0x0238, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA370, 0x023C, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA370, 0x0260, iwl9461_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA370, 0x0264, iwl9461_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA370, 0x02A0, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA370, 0x02A4, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA370, 0x1030, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA370, 0x4030, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA370, 0x4034, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA370, 0x40A4, iwl9462_2ac_cfg_soc)},
/* a000 Series */
{IWL_PCI_DEVICE(0x2720, 0x0A10, iwla000_2ac_cfg_hr_cdb)},
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
index b5c459cd70ce..fed6d842a5e1 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
@@ -147,9 +147,9 @@ void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr)
memset(ptr, 0, sizeof(*ptr));
}
-static void iwl_pcie_txq_stuck_timer(unsigned long data)
+static void iwl_pcie_txq_stuck_timer(struct timer_list *t)
{
- struct iwl_txq *txq = (void *)data;
+ struct iwl_txq *txq = from_timer(txq, t, stuck_timer);
struct iwl_trans_pcie *trans_pcie = txq->trans_pcie;
struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie);
@@ -495,8 +495,7 @@ int iwl_pcie_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq,
if (WARN_ON(txq->entries || txq->tfds))
return -EINVAL;
- setup_timer(&txq->stuck_timer, iwl_pcie_txq_stuck_timer,
- (unsigned long)txq);
+ timer_setup(&txq->stuck_timer, iwl_pcie_txq_stuck_timer, 0);
txq->trans_pcie = trans_pcie;
txq->n_window = slots_num;
diff --git a/drivers/net/wireless/intersil/hostap/hostap_ap.c b/drivers/net/wireless/intersil/hostap/hostap_ap.c
index 1a8d8db80b05..b4dfe1893d18 100644
--- a/drivers/net/wireless/intersil/hostap/hostap_ap.c
+++ b/drivers/net/wireless/intersil/hostap/hostap_ap.c
@@ -185,9 +185,9 @@ static void hostap_event_expired_sta(struct net_device *dev,
#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
-static void ap_handle_timer(unsigned long data)
+static void ap_handle_timer(struct timer_list *t)
{
- struct sta_info *sta = (struct sta_info *) data;
+ struct sta_info *sta = from_timer(sta, t, timer);
local_info_t *local;
struct ap_data *ap;
unsigned long next_time = 0;
@@ -1189,10 +1189,8 @@ static struct sta_info * ap_add_sta(struct ap_data *ap, u8 *addr)
}
#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
- init_timer(&sta->timer);
+ timer_setup(&sta->timer, ap_handle_timer, 0);
sta->timer.expires = jiffies + ap->max_inactivity;
- sta->timer.data = (unsigned long) sta;
- sta->timer.function = ap_handle_timer;
if (!ap->local->hostapd)
add_timer(&sta->timer);
#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
diff --git a/drivers/net/wireless/intersil/hostap/hostap_hw.c b/drivers/net/wireless/intersil/hostap/hostap_hw.c
index 72b46eaf3de2..5c4a17a18968 100644
--- a/drivers/net/wireless/intersil/hostap/hostap_hw.c
+++ b/drivers/net/wireless/intersil/hostap/hostap_hw.c
@@ -2794,9 +2794,9 @@ static void prism2_check_sta_fw_version(local_info_t *local)
}
-static void hostap_passive_scan(unsigned long data)
+static void hostap_passive_scan(struct timer_list *t)
{
- local_info_t *local = (local_info_t *) data;
+ local_info_t *local = from_timer(local, t, passive_scan_timer);
struct net_device *dev = local->dev;
u16 chan;
@@ -2869,10 +2869,10 @@ static void handle_comms_qual_update(struct work_struct *work)
* used to monitor that local->last_tick_timer is being updated. If not,
* interrupt busy-loop is assumed and driver tries to recover by masking out
* some events. */
-static void hostap_tick_timer(unsigned long data)
+static void hostap_tick_timer(struct timer_list *t)
{
static unsigned long last_inquire = 0;
- local_info_t *local = (local_info_t *) data;
+ local_info_t *local = from_timer(local, t, tick_timer);
local->last_tick_timer = jiffies;
/* Inquire CommTallies every 10 seconds to keep the statistics updated
@@ -3225,13 +3225,8 @@ while (0)
lib80211_crypt_info_init(&local->crypt_info, dev->name, &local->lock);
- init_timer(&local->passive_scan_timer);
- local->passive_scan_timer.data = (unsigned long) local;
- local->passive_scan_timer.function = hostap_passive_scan;
-
- init_timer(&local->tick_timer);
- local->tick_timer.data = (unsigned long) local;
- local->tick_timer.function = hostap_tick_timer;
+ timer_setup(&local->passive_scan_timer, hostap_passive_scan, 0);
+ timer_setup(&local->tick_timer, hostap_tick_timer, 0);
local->tick_timer.expires = jiffies + 2 * HZ;
add_timer(&local->tick_timer);
diff --git a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c
index 501180584b4b..94ad6fe29e69 100644
--- a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c
+++ b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c
@@ -319,9 +319,9 @@ static inline void ezusb_mod_timer(struct ezusb_priv *upriv,
mod_timer(timer, expire);
}
-static void ezusb_request_timerfn(u_long _ctx)
+static void ezusb_request_timerfn(struct timer_list *t)
{
- struct request_context *ctx = (void *) _ctx;
+ struct request_context *ctx = from_timer(ctx, t, timer);
ctx->outurb->transfer_flags |= URB_ASYNC_UNLINK;
if (usb_unlink_urb(ctx->outurb) == -EINPROGRESS) {
@@ -365,7 +365,7 @@ static struct request_context *ezusb_alloc_ctx(struct ezusb_priv *upriv,
refcount_set(&ctx->refcount, 1);
init_completion(&ctx->done);
- setup_timer(&ctx->timer, ezusb_request_timerfn, (u_long)ctx);
+ timer_setup(&ctx->timer, ezusb_request_timerfn, 0);
return ctx;
}
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 7c3600643c7f..10b075a46b26 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -3108,6 +3108,7 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info)
{
struct hwsim_new_radio_params param = { 0 };
const char *hwname = NULL;
+ int ret;
param.reg_strict = info->attrs[HWSIM_ATTR_REG_STRICT_REG];
param.p2p_device = info->attrs[HWSIM_ATTR_SUPPORT_P2P_DEVICE];
@@ -3147,7 +3148,9 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info)
param.regd = hwsim_world_regdom_custom[idx];
}
- return mac80211_hwsim_new_radio(info, &param);
+ ret = mac80211_hwsim_new_radio(info, &param);
+ kfree(hwname);
+ return ret;
}
static int hwsim_del_radio_nl(struct sk_buff *msg, struct genl_info *info)
diff --git a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
index 7d6dc76c930a..6711e7fb6926 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
@@ -554,7 +554,7 @@ qtnf_scan(struct wiphy *wiphy, struct cfg80211_scan_request *request)
return -EFAULT;
}
- mac->scan_timeout.function = (TIMER_FUNC_TYPE)qtnf_scan_timeout;
+ mac->scan_timeout.function = qtnf_scan_timeout;
mod_timer(&mac->scan_timeout,
jiffies + QTNF_SCAN_TIMEOUT_SEC * HZ);
diff --git a/drivers/net/wireless/quantenna/qtnfmac/core.c b/drivers/net/wireless/quantenna/qtnfmac/core.c
index 2d2c1ea65cb2..3423dc51198b 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/core.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/core.c
@@ -288,7 +288,7 @@ static struct qtnf_wmac *qtnf_core_mac_alloc(struct qtnf_bus *bus,
mac->iflist[i].vifid = i;
qtnf_sta_list_init(&mac->iflist[i].sta_list);
mutex_init(&mac->mac_lock);
- setup_timer(&mac->scan_timeout, NULL, 0);
+ timer_setup(&mac->scan_timeout, NULL, 0);
}
qtnf_mac_init_primary_intf(mac);
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index d8afcdfca1ed..0133fcd4601b 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -569,7 +569,7 @@ static int dl_startup_params(struct net_device *dev)
local->card_status = CARD_DL_PARAM;
/* Start kernel timer to wait for dl startup to complete. */
local->timer.expires = jiffies + HZ / 2;
- local->timer.function = (TIMER_FUNC_TYPE)verify_dl_startup;
+ local->timer.function = verify_dl_startup;
add_timer(&local->timer);
dev_dbg(&link->dev,
"ray_cs dl_startup_params started timer for verify_dl_startup\n");
@@ -1947,12 +1947,12 @@ static irqreturn_t ray_interrupt(int irq, void *dev_id)
dev_dbg(&link->dev,
"ray_cs interrupt network \"%s\" start failed\n",
memtmp);
- local->timer.function = (TIMER_FUNC_TYPE)start_net;
+ local->timer.function = start_net;
} else {
dev_dbg(&link->dev,
"ray_cs interrupt network \"%s\" join failed\n",
memtmp);
- local->timer.function = (TIMER_FUNC_TYPE)join_net;
+ local->timer.function = join_net;
}
add_timer(&local->timer);
}
@@ -2417,9 +2417,9 @@ static void authenticate(ray_dev_t *local)
del_timer(&local->timer);
if (build_auth_frame(local, local->bss_id, OPEN_AUTH_REQUEST)) {
- local->timer.function = (TIMER_FUNC_TYPE)join_net;
+ local->timer.function = join_net;
} else {
- local->timer.function = (TIMER_FUNC_TYPE)authenticate_timeout;
+ local->timer.function = authenticate_timeout;
}
local->timer.expires = jiffies + HZ * 2;
add_timer(&local->timer);
@@ -2502,7 +2502,7 @@ static void associate(ray_dev_t *local)
del_timer(&local->timer);
local->timer.expires = jiffies + HZ * 2;
- local->timer.function = (TIMER_FUNC_TYPE)join_net;
+ local->timer.function = join_net;
add_timer(&local->timer);
local->card_status = CARD_ASSOC_FAILED;
return;
diff --git a/drivers/net/wireless/st/cw1200/sta.c b/drivers/net/wireless/st/cw1200/sta.c
index 03687a80d6e9..38678e9a0562 100644
--- a/drivers/net/wireless/st/cw1200/sta.c
+++ b/drivers/net/wireless/st/cw1200/sta.c
@@ -198,7 +198,7 @@ void __cw1200_cqm_bssloss_sm(struct cw1200_common *priv,
priv->bss_loss_state++;
- skb = ieee80211_nullfunc_get(priv->hw, priv->vif);
+ skb = ieee80211_nullfunc_get(priv->hw, priv->vif, false);
WARN_ON(!skb);
if (skb)
cw1200_tx(priv->hw, NULL, skb);
@@ -2265,7 +2265,7 @@ static int cw1200_upload_null(struct cw1200_common *priv)
.rate = 0xFF,
};
- frame.skb = ieee80211_nullfunc_get(priv->hw, priv->vif);
+ frame.skb = ieee80211_nullfunc_get(priv->hw, priv->vif, false);
if (!frame.skb)
return -ENOMEM;
diff --git a/drivers/net/wireless/ti/wl1251/main.c b/drivers/net/wireless/ti/wl1251/main.c
index 9915d83a4a30..6d02c660b4ab 100644
--- a/drivers/net/wireless/ti/wl1251/main.c
+++ b/drivers/net/wireless/ti/wl1251/main.c
@@ -566,7 +566,7 @@ static int wl1251_build_null_data(struct wl1251 *wl)
size = sizeof(struct wl12xx_null_data_template);
ptr = NULL;
} else {
- skb = ieee80211_nullfunc_get(wl->hw, wl->vif);
+ skb = ieee80211_nullfunc_get(wl->hw, wl->vif, false);
if (!skb)
goto out;
size = skb->len;
diff --git a/drivers/net/wireless/ti/wlcore/cmd.c b/drivers/net/wireless/ti/wlcore/cmd.c
index 2bfc12fdc929..761cf8573a80 100644
--- a/drivers/net/wireless/ti/wlcore/cmd.c
+++ b/drivers/net/wireless/ti/wlcore/cmd.c
@@ -1069,7 +1069,8 @@ int wl12xx_cmd_build_null_data(struct wl1271 *wl, struct wl12xx_vif *wlvif)
ptr = NULL;
} else {
skb = ieee80211_nullfunc_get(wl->hw,
- wl12xx_wlvif_to_vif(wlvif));
+ wl12xx_wlvif_to_vif(wlvif),
+ false);
if (!skb)
goto out;
size = skb->len;
@@ -1096,7 +1097,7 @@ int wl12xx_cmd_build_klv_null_data(struct wl1271 *wl,
struct sk_buff *skb = NULL;
int ret = -ENOMEM;
- skb = ieee80211_nullfunc_get(wl->hw, vif);
+ skb = ieee80211_nullfunc_get(wl->hw, vif, false);
if (!skb)
goto out;
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index c346c021b999..d47921a84509 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -196,9 +196,9 @@ out:
mutex_unlock(&wl->mutex);
}
-static void wl1271_rx_streaming_timer(unsigned long data)
+static void wl1271_rx_streaming_timer(struct timer_list *t)
{
- struct wl12xx_vif *wlvif = (struct wl12xx_vif *)data;
+ struct wl12xx_vif *wlvif = from_timer(wlvif, t, rx_streaming_timer);
struct wl1271 *wl = wlvif->wl;
ieee80211_queue_work(wl->hw, &wlvif->rx_streaming_disable_work);
}
@@ -2279,8 +2279,7 @@ static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
wlcore_pending_auth_complete_work);
INIT_LIST_HEAD(&wlvif->list);
- setup_timer(&wlvif->rx_streaming_timer, wl1271_rx_streaming_timer,
- (unsigned long) wlvif);
+ timer_setup(&wlvif->rx_streaming_timer, wl1271_rx_streaming_timer, 0);
return 0;
}
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 8b8689c6d887..c5a34671abda 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -87,6 +87,8 @@ struct netfront_cb {
/* IRQ name is queue name with "-tx" or "-rx" appended */
#define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3)
+static DECLARE_WAIT_QUEUE_HEAD(module_unload_q);
+
struct netfront_stats {
u64 packets;
u64 bytes;
@@ -228,9 +230,9 @@ static bool xennet_can_sg(struct net_device *dev)
}
-static void rx_refill_timeout(unsigned long data)
+static void rx_refill_timeout(struct timer_list *t)
{
- struct netfront_queue *queue = (struct netfront_queue *)data;
+ struct netfront_queue *queue = from_timer(queue, t, rx_refill_timer);
napi_schedule(&queue->napi);
}
@@ -1605,8 +1607,7 @@ static int xennet_init_queue(struct netfront_queue *queue)
spin_lock_init(&queue->tx_lock);
spin_lock_init(&queue->rx_lock);
- setup_timer(&queue->rx_refill_timer, rx_refill_timeout,
- (unsigned long)queue);
+ timer_setup(&queue->rx_refill_timer, rx_refill_timeout, 0);
snprintf(queue->name, sizeof(queue->name), "%s-q%u",
queue->info->netdev->name, queue->id);
@@ -2021,10 +2022,12 @@ static void netback_changed(struct xenbus_device *dev,
break;
case XenbusStateClosed:
+ wake_up_all(&module_unload_q);
if (dev->state == XenbusStateClosed)
break;
/* Missed the backend's CLOSING state -- fallthrough */
case XenbusStateClosing:
+ wake_up_all(&module_unload_q);
xenbus_frontend_closed(dev);
break;
}
@@ -2130,6 +2133,20 @@ static int xennet_remove(struct xenbus_device *dev)
dev_dbg(&dev->dev, "%s\n", dev->nodename);
+ if (xenbus_read_driver_state(dev->otherend) != XenbusStateClosed) {
+ xenbus_switch_state(dev, XenbusStateClosing);
+ wait_event(module_unload_q,
+ xenbus_read_driver_state(dev->otherend) ==
+ XenbusStateClosing);
+
+ xenbus_switch_state(dev, XenbusStateClosed);
+ wait_event(module_unload_q,
+ xenbus_read_driver_state(dev->otherend) ==
+ XenbusStateClosed ||
+ xenbus_read_driver_state(dev->otherend) ==
+ XenbusStateUnknown);
+ }
+
xennet_disconnect_backend(info);
unregister_netdev(info->netdev);
diff --git a/drivers/nfc/nfcmrvl/fw_dnld.c b/drivers/nfc/nfcmrvl/fw_dnld.c
index 7f8960a46aab..52c8ae504e32 100644
--- a/drivers/nfc/nfcmrvl/fw_dnld.c
+++ b/drivers/nfc/nfcmrvl/fw_dnld.c
@@ -130,9 +130,9 @@ static void fw_dnld_over(struct nfcmrvl_private *priv, u32 error)
nfc_fw_download_done(priv->ndev->nfc_dev, priv->fw_dnld.name, error);
}
-static void fw_dnld_timeout(unsigned long arg)
+static void fw_dnld_timeout(struct timer_list *t)
{
- struct nfcmrvl_private *priv = (struct nfcmrvl_private *) arg;
+ struct nfcmrvl_private *priv = from_timer(priv, t, fw_dnld.timer);
nfc_err(priv->dev, "FW loading timeout");
priv->fw_dnld.state = STATE_RESET;
@@ -538,8 +538,7 @@ int nfcmrvl_fw_dnld_start(struct nci_dev *ndev, const char *firmware_name)
}
/* Configure a timer for timeout */
- setup_timer(&priv->fw_dnld.timer, fw_dnld_timeout,
- (unsigned long) priv);
+ timer_setup(&priv->fw_dnld.timer, fw_dnld_timeout, 0);
mod_timer(&priv->fw_dnld.timer,
jiffies + msecs_to_jiffies(FW_DNLD_TIMEOUT));
diff --git a/drivers/nfc/pn533/pn533.c b/drivers/nfc/pn533/pn533.c
index c05cb637ba92..a0cc1cc45292 100644
--- a/drivers/nfc/pn533/pn533.c
+++ b/drivers/nfc/pn533/pn533.c
@@ -1232,9 +1232,9 @@ static int pn533_init_target_complete(struct pn533 *dev, struct sk_buff *resp)
return 0;
}
-static void pn533_listen_mode_timer(unsigned long data)
+static void pn533_listen_mode_timer(struct timer_list *t)
{
- struct pn533 *dev = (struct pn533 *)data;
+ struct pn533 *dev = from_timer(dev, t, listen_timer);
dev_dbg(dev->dev, "Listen mode timeout\n");
@@ -2632,9 +2632,7 @@ struct pn533 *pn533_register_device(u32 device_type,
if (priv->wq == NULL)
goto error;
- init_timer(&priv->listen_timer);
- priv->listen_timer.data = (unsigned long) priv;
- priv->listen_timer.function = pn533_listen_mode_timer;
+ timer_setup(&priv->listen_timer, pn533_listen_mode_timer, 0);
skb_queue_head_init(&priv->resp_q);
skb_queue_head_init(&priv->fragment_skb);
diff --git a/drivers/nfc/st-nci/ndlc.c b/drivers/nfc/st-nci/ndlc.c
index 9477994cf975..f26d938d240f 100644
--- a/drivers/nfc/st-nci/ndlc.c
+++ b/drivers/nfc/st-nci/ndlc.c
@@ -246,18 +246,18 @@ void ndlc_recv(struct llt_ndlc *ndlc, struct sk_buff *skb)
}
EXPORT_SYMBOL(ndlc_recv);
-static void ndlc_t1_timeout(unsigned long data)
+static void ndlc_t1_timeout(struct timer_list *t)
{
- struct llt_ndlc *ndlc = (struct llt_ndlc *)data;
+ struct llt_ndlc *ndlc = from_timer(ndlc, t, t1_timer);
pr_debug("\n");
schedule_work(&ndlc->sm_work);
}
-static void ndlc_t2_timeout(unsigned long data)
+static void ndlc_t2_timeout(struct timer_list *t)
{
- struct llt_ndlc *ndlc = (struct llt_ndlc *)data;
+ struct llt_ndlc *ndlc = from_timer(ndlc, t, t2_timer);
pr_debug("\n");
@@ -282,13 +282,8 @@ int ndlc_probe(void *phy_id, struct nfc_phy_ops *phy_ops, struct device *dev,
*ndlc_id = ndlc;
/* initialize timers */
- init_timer(&ndlc->t1_timer);
- ndlc->t1_timer.data = (unsigned long)ndlc;
- ndlc->t1_timer.function = ndlc_t1_timeout;
-
- init_timer(&ndlc->t2_timer);
- ndlc->t2_timer.data = (unsigned long)ndlc;
- ndlc->t2_timer.function = ndlc_t2_timeout;
+ timer_setup(&ndlc->t1_timer, ndlc_t1_timeout, 0);
+ timer_setup(&ndlc->t2_timer, ndlc_t2_timeout, 0);
skb_queue_head_init(&ndlc->rcv_q);
skb_queue_head_init(&ndlc->send_q);
diff --git a/drivers/nfc/st-nci/se.c b/drivers/nfc/st-nci/se.c
index 56f2112e0cd8..f55d082ace71 100644
--- a/drivers/nfc/st-nci/se.c
+++ b/drivers/nfc/st-nci/se.c
@@ -677,7 +677,7 @@ int st_nci_se_io(struct nci_dev *ndev, u32 se_idx,
}
EXPORT_SYMBOL(st_nci_se_io);
-static void st_nci_se_wt_timeout(unsigned long data)
+static void st_nci_se_wt_timeout(struct timer_list *t)
{
/*
* No answer from the secure element
@@ -690,7 +690,7 @@ static void st_nci_se_wt_timeout(unsigned long data)
*/
/* hardware reset managed through VCC_UICC_OUT power supply */
u8 param = 0x01;
- struct st_nci_info *info = (struct st_nci_info *) data;
+ struct st_nci_info *info = from_timer(info, t, se_info.bwi_timer);
pr_debug("\n");
@@ -708,9 +708,10 @@ static void st_nci_se_wt_timeout(unsigned long data)
info->se_info.cb(info->se_info.cb_context, NULL, 0, -ETIME);
}
-static void st_nci_se_activation_timeout(unsigned long data)
+static void st_nci_se_activation_timeout(struct timer_list *t)
{
- struct st_nci_info *info = (struct st_nci_info *) data;
+ struct st_nci_info *info = from_timer(info, t,
+ se_info.se_active_timer);
pr_debug("\n");
@@ -725,15 +726,11 @@ int st_nci_se_init(struct nci_dev *ndev, struct st_nci_se_status *se_status)
init_completion(&info->se_info.req_completion);
/* initialize timers */
- init_timer(&info->se_info.bwi_timer);
- info->se_info.bwi_timer.data = (unsigned long)info;
- info->se_info.bwi_timer.function = st_nci_se_wt_timeout;
+ timer_setup(&info->se_info.bwi_timer, st_nci_se_wt_timeout, 0);
info->se_info.bwi_active = false;
- init_timer(&info->se_info.se_active_timer);
- info->se_info.se_active_timer.data = (unsigned long)info;
- info->se_info.se_active_timer.function =
- st_nci_se_activation_timeout;
+ timer_setup(&info->se_info.se_active_timer,
+ st_nci_se_activation_timeout, 0);
info->se_info.se_active = false;
info->se_info.xch_error = false;
diff --git a/drivers/nfc/st21nfca/se.c b/drivers/nfc/st21nfca/se.c
index 3a98563d4a12..4bed9e842db3 100644
--- a/drivers/nfc/st21nfca/se.c
+++ b/drivers/nfc/st21nfca/se.c
@@ -252,7 +252,7 @@ int st21nfca_hci_se_io(struct nfc_hci_dev *hdev, u32 se_idx,
}
EXPORT_SYMBOL(st21nfca_hci_se_io);
-static void st21nfca_se_wt_timeout(unsigned long data)
+static void st21nfca_se_wt_timeout(struct timer_list *t)
{
/*
* No answer from the secure element
@@ -265,7 +265,8 @@ static void st21nfca_se_wt_timeout(unsigned long data)
*/
/* hardware reset managed through VCC_UICC_OUT power supply */
u8 param = 0x01;
- struct st21nfca_hci_info *info = (struct st21nfca_hci_info *) data;
+ struct st21nfca_hci_info *info = from_timer(info, t,
+ se_info.bwi_timer);
pr_debug("\n");
@@ -283,9 +284,10 @@ static void st21nfca_se_wt_timeout(unsigned long data)
info->se_info.cb(info->se_info.cb_context, NULL, 0, -ETIME);
}
-static void st21nfca_se_activation_timeout(unsigned long data)
+static void st21nfca_se_activation_timeout(struct timer_list *t)
{
- struct st21nfca_hci_info *info = (struct st21nfca_hci_info *) data;
+ struct st21nfca_hci_info *info = from_timer(info, t,
+ se_info.se_active_timer);
pr_debug("\n");
@@ -392,14 +394,11 @@ void st21nfca_se_init(struct nfc_hci_dev *hdev)
init_completion(&info->se_info.req_completion);
/* initialize timers */
- init_timer(&info->se_info.bwi_timer);
- info->se_info.bwi_timer.data = (unsigned long)info;
- info->se_info.bwi_timer.function = st21nfca_se_wt_timeout;
+ timer_setup(&info->se_info.bwi_timer, st21nfca_se_wt_timeout, 0);
info->se_info.bwi_active = false;
- init_timer(&info->se_info.se_active_timer);
- info->se_info.se_active_timer.data = (unsigned long)info;
- info->se_info.se_active_timer.function = st21nfca_se_activation_timeout;
+ timer_setup(&info->se_info.se_active_timer,
+ st21nfca_se_activation_timeout, 0);
info->se_info.se_active = false;
info->se_info.count_pipes = 0;
diff --git a/drivers/ntb/test/ntb_pingpong.c b/drivers/ntb/test/ntb_pingpong.c
index 938a18bcfc3f..3f5a92bae6f8 100644
--- a/drivers/ntb/test/ntb_pingpong.c
+++ b/drivers/ntb/test/ntb_pingpong.c
@@ -107,9 +107,9 @@ struct pp_ctx {
static struct dentry *pp_debugfs_dir;
-static void pp_ping(unsigned long ctx)
+static void pp_ping(struct timer_list *t)
{
- struct pp_ctx *pp = (void *)ctx;
+ struct pp_ctx *pp = from_timer(pp, t, db_timer);
unsigned long irqflags;
u64 db_bits, db_mask;
u32 spad_rd, spad_wr;
@@ -153,7 +153,7 @@ static void pp_link_event(void *ctx)
if (ntb_link_is_up(pp->ntb, NULL, NULL) == 1) {
dev_dbg(&pp->ntb->dev, "link is up\n");
- pp_ping((unsigned long)pp);
+ pp_ping(&pp->db_timer);
} else {
dev_dbg(&pp->ntb->dev, "link is down\n");
del_timer(&pp->db_timer);
@@ -252,7 +252,7 @@ static int pp_probe(struct ntb_client *client,
pp->db_bits = 0;
atomic_set(&pp->count, 0);
spin_lock_init(&pp->db_lock);
- setup_timer(&pp->db_timer, pp_ping, (unsigned long)pp);
+ timer_setup(&pp->db_timer, pp_ping, 0);
pp->db_delay = msecs_to_jiffies(delay_ms);
rc = ntb_set_ctx(ntb, pp, &pp_ops);
diff --git a/drivers/of/base.c b/drivers/of/base.c
index f2e649ff746f..26618ba8f92a 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -761,10 +761,10 @@ EXPORT_SYMBOL(of_find_node_opts_by_path);
/**
* of_find_node_by_name - Find a node by its "name" property
- * @from: The node to start searching from or NULL, the node
+ * @from: The node to start searching from or NULL; the node
* you pass will not be searched, only the next one
- * will; typically, you pass what the previous call
- * returned. of_node_put() will be called on it
+ * will. Typically, you pass what the previous call
+ * returned. of_node_put() will be called on @from.
* @name: The name string to match against
*
* Returns a node pointer with refcount incremented, use
diff --git a/drivers/of/of_pci.c b/drivers/of/of_pci.c
index e9ec931f5b9a..a7b1cb6c2f65 100644
--- a/drivers/of/of_pci.c
+++ b/drivers/of/of_pci.c
@@ -374,7 +374,7 @@ int of_pci_map_rid(struct device_node *np, u32 rid,
pr_debug("%pOF: %s, using mask %08x, rid-base: %08x, out-base: %08x, length: %08x, rid: %08x -> %08x\n",
np, map_name, map_mask, rid_base, out_base,
- rid_len, rid, *id_out);
+ rid_len, rid, masked_rid - rid_base + out_base);
return 0;
}
diff --git a/drivers/of/unittest-data/Makefile b/drivers/of/unittest-data/Makefile
index 3031fc2f18f6..32389acfa616 100644
--- a/drivers/of/unittest-data/Makefile
+++ b/drivers/of/unittest-data/Makefile
@@ -1,4 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
+DTC_FLAGS_testcases := -Wno-interrupts_property
obj-y += testcases.dtb.o
targets += testcases.dtb testcases.dtb.S
diff --git a/drivers/of/unittest-data/testcases.dts b/drivers/of/unittest-data/testcases.dts
index ce49463d9d32..55fe0ee20109 100644
--- a/drivers/of/unittest-data/testcases.dts
+++ b/drivers/of/unittest-data/testcases.dts
@@ -1,5 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/dts-v1/;
+/plugin/;
+
/ {
testcase-data {
changeset {
@@ -15,66 +17,3 @@
#include "tests-match.dtsi"
#include "tests-platform.dtsi"
#include "tests-overlay.dtsi"
-
-/*
- * phandle fixup data - generated by dtc patches that aren't upstream.
- * This data must be regenerated whenever phandle references are modified in
- * the testdata tree.
- *
- * The format of this data may be subject to change. For the time being consider
- * this a kernel-internal data format.
- */
-/ { __local_fixups__ {
- testcase-data {
- phandle-tests {
- consumer-a {
- phandle-list = <0x00000000 0x00000008
- 0x00000018 0x00000028
- 0x00000034 0x00000038>;
- phandle-list-bad-args = <0x00000000 0x0000000c>;
- };
- };
- interrupts {
- intmap0 {
- interrupt-map = <0x00000004 0x00000010
- 0x00000024 0x00000034>;
- };
- intmap1 {
- interrupt-map = <0x0000000c>;
- };
- interrupts0 {
- interrupt-parent = <0x00000000>;
- };
- interrupts1 {
- interrupt-parent = <0x00000000>;
- };
- interrupts-extended0 {
- interrupts-extended = <0x00000000 0x00000008
- 0x00000018 0x00000024
- 0x0000002c 0x00000034
- 0x0000003c>;
- };
- };
- testcase-device1 {
- interrupt-parent = <0x00000000>;
- };
- testcase-device2 {
- interrupt-parent = <0x00000000>;
- };
- overlay2 {
- fragment@0 {
- target = <0x00000000>;
- };
- };
- overlay3 {
- fragment@0 {
- target = <0x00000000>;
- };
- };
- overlay4 {
- fragment@0 {
- target = <0x00000000>;
- };
- };
- };
-}; };
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index 90944667ccea..bda151788f3f 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -80,15 +80,6 @@ config XEN_PCIDEV_FRONTEND
The PCI device frontend driver allows the kernel to import arbitrary
PCI devices from a PCI backend to support PCI driver domains.
-config HT_IRQ
- bool "Interrupts on hypertransport devices"
- default y
- depends on PCI && X86_LOCAL_APIC
- help
- This allows native hypertransport devices to use interrupts.
-
- If unsure say Y.
-
config PCI_ATS
bool
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index 3d5e047f0a32..c7819b973df7 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -21,9 +21,6 @@ obj-$(CONFIG_HOTPLUG_PCI) += hotplug/
# Build the PCI MSI interrupt support
obj-$(CONFIG_PCI_MSI) += msi.o
-# Build the Hypertransport interrupt support
-obj-$(CONFIG_HT_IRQ) += htirq.o
-
obj-$(CONFIG_PCI_ATS) += ats.o
obj-$(CONFIG_PCI_IOV) += iov.o
diff --git a/drivers/pci/htirq.c b/drivers/pci/htirq.c
deleted file mode 100644
index bb88c26f5144..000000000000
--- a/drivers/pci/htirq.c
+++ /dev/null
@@ -1,135 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * File: htirq.c
- * Purpose: Hypertransport Interrupt Capability
- *
- * Copyright (C) 2006 Linux Networx
- * Copyright (C) Eric Biederman <ebiederman@lnxi.com>
- */
-
-#include <linux/irq.h>
-#include <linux/pci.h>
-#include <linux/spinlock.h>
-#include <linux/export.h>
-#include <linux/slab.h>
-#include <linux/htirq.h>
-
-/* Global ht irq lock.
- *
- * This is needed to serialize access to the data port in hypertransport
- * irq capability.
- *
- * With multiple simultaneous hypertransport irq devices it might pay
- * to make this more fine grained. But start with simple, stupid, and correct.
- */
-static DEFINE_SPINLOCK(ht_irq_lock);
-
-void write_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg)
-{
- struct ht_irq_cfg *cfg = irq_get_handler_data(irq);
- unsigned long flags;
-
- spin_lock_irqsave(&ht_irq_lock, flags);
- if (cfg->msg.address_lo != msg->address_lo) {
- pci_write_config_byte(cfg->dev, cfg->pos + 2, cfg->idx);
- pci_write_config_dword(cfg->dev, cfg->pos + 4, msg->address_lo);
- }
- if (cfg->msg.address_hi != msg->address_hi) {
- pci_write_config_byte(cfg->dev, cfg->pos + 2, cfg->idx + 1);
- pci_write_config_dword(cfg->dev, cfg->pos + 4, msg->address_hi);
- }
- if (cfg->update)
- cfg->update(cfg->dev, irq, msg);
- spin_unlock_irqrestore(&ht_irq_lock, flags);
- cfg->msg = *msg;
-}
-
-void fetch_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg)
-{
- struct ht_irq_cfg *cfg = irq_get_handler_data(irq);
-
- *msg = cfg->msg;
-}
-
-void mask_ht_irq(struct irq_data *data)
-{
- struct ht_irq_cfg *cfg = irq_data_get_irq_handler_data(data);
- struct ht_irq_msg msg = cfg->msg;
-
- msg.address_lo |= 1;
- write_ht_irq_msg(data->irq, &msg);
-}
-
-void unmask_ht_irq(struct irq_data *data)
-{
- struct ht_irq_cfg *cfg = irq_data_get_irq_handler_data(data);
- struct ht_irq_msg msg = cfg->msg;
-
- msg.address_lo &= ~1;
- write_ht_irq_msg(data->irq, &msg);
-}
-
-/**
- * __ht_create_irq - create an irq and attach it to a device.
- * @dev: The hypertransport device to find the irq capability on.
- * @idx: Which of the possible irqs to attach to.
- * @update: Function to be called when changing the htirq message
- *
- * The irq number of the new irq or a negative error value is returned.
- */
-int __ht_create_irq(struct pci_dev *dev, int idx, ht_irq_update_t *update)
-{
- int max_irq, pos, irq;
- unsigned long flags;
- u32 data;
-
- pos = pci_find_ht_capability(dev, HT_CAPTYPE_IRQ);
- if (!pos)
- return -EINVAL;
-
- /* Verify the idx I want to use is in range */
- spin_lock_irqsave(&ht_irq_lock, flags);
- pci_write_config_byte(dev, pos + 2, 1);
- pci_read_config_dword(dev, pos + 4, &data);
- spin_unlock_irqrestore(&ht_irq_lock, flags);
-
- max_irq = (data >> 16) & 0xff;
- if (idx > max_irq)
- return -EINVAL;
-
- irq = arch_setup_ht_irq(idx, pos, dev, update);
- if (irq > 0)
- dev_dbg(&dev->dev, "irq %d for HT\n", irq);
-
- return irq;
-}
-EXPORT_SYMBOL(__ht_create_irq);
-
-/**
- * ht_create_irq - create an irq and attach it to a device.
- * @dev: The hypertransport device to find the irq capability on.
- * @idx: Which of the possible irqs to attach to.
- *
- * ht_create_irq needs to be called for all hypertransport devices
- * that generate irqs.
- *
- * The irq number of the new irq or a negative error value is returned.
- */
-int ht_create_irq(struct pci_dev *dev, int idx)
-{
- return __ht_create_irq(dev, idx, NULL);
-}
-EXPORT_SYMBOL(ht_create_irq);
-
-/**
- * ht_destroy_irq - destroy an irq created with ht_create_irq
- * @irq: irq to be destroyed
- *
- * This reverses ht_create_irq removing the specified irq from
- * existence. The irq should be free before this happens.
- */
-void ht_destroy_irq(unsigned int irq)
-{
- arch_teardown_ht_irq(irq);
-}
-EXPORT_SYMBOL(ht_destroy_irq);
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
index 2d704361f672..bf897b1832b1 100644
--- a/drivers/platform/x86/dell-laptop.c
+++ b/drivers/platform/x86/dell-laptop.c
@@ -2074,8 +2074,10 @@ static int __init dell_init(void)
goto fail_platform_device2;
buffer = kzalloc(sizeof(struct calling_interface_buffer), GFP_KERNEL);
- if (!buffer)
+ if (!buffer) {
+ ret = -ENOMEM;
goto fail_buffer;
+ }
ret = dell_setup_rfkill();
diff --git a/drivers/platform/x86/dell-smbios-wmi.c b/drivers/platform/x86/dell-smbios-wmi.c
index 0cab1f9c35af..609557aa5868 100644
--- a/drivers/platform/x86/dell-smbios-wmi.c
+++ b/drivers/platform/x86/dell-smbios-wmi.c
@@ -147,7 +147,10 @@ fail_smbios_cmd:
static int dell_smbios_wmi_probe(struct wmi_device *wdev)
{
+ struct wmi_driver *wdriver =
+ container_of(wdev->dev.driver, struct wmi_driver, driver);
struct wmi_smbios_priv *priv;
+ u32 hotfix;
int count;
int ret;
@@ -164,6 +167,16 @@ static int dell_smbios_wmi_probe(struct wmi_device *wdev)
if (!dell_wmi_get_size(&priv->req_buf_size))
return -EPROBE_DEFER;
+ /* some SMBIOS calls fail unless BIOS contains hotfix */
+ if (!dell_wmi_get_hotfix(&hotfix))
+ return -EPROBE_DEFER;
+ if (!hotfix) {
+ dev_warn(&wdev->dev,
+ "WMI SMBIOS userspace interface not supported(%u), try upgrading to a newer BIOS\n",
+ hotfix);
+ wdriver->filter_callback = NULL;
+ }
+
/* add in the length object we will use internally with ioctl */
priv->req_buf_size += sizeof(u64);
ret = set_required_buffer_size(wdev, priv->req_buf_size);
diff --git a/drivers/platform/x86/dell-wmi-descriptor.c b/drivers/platform/x86/dell-wmi-descriptor.c
index 4dfef1f53481..072821aa47fc 100644
--- a/drivers/platform/x86/dell-wmi-descriptor.c
+++ b/drivers/platform/x86/dell-wmi-descriptor.c
@@ -27,6 +27,7 @@ struct descriptor_priv {
struct list_head list;
u32 interface_version;
u32 size;
+ u32 hotfix;
};
static int descriptor_valid = -EPROBE_DEFER;
static LIST_HEAD(wmi_list);
@@ -77,6 +78,24 @@ bool dell_wmi_get_size(u32 *size)
}
EXPORT_SYMBOL_GPL(dell_wmi_get_size);
+bool dell_wmi_get_hotfix(u32 *hotfix)
+{
+ struct descriptor_priv *priv;
+ bool ret = false;
+
+ mutex_lock(&list_mutex);
+ priv = list_first_entry_or_null(&wmi_list,
+ struct descriptor_priv,
+ list);
+ if (priv) {
+ *hotfix = priv->hotfix;
+ ret = true;
+ }
+ mutex_unlock(&list_mutex);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dell_wmi_get_hotfix);
+
/*
* Descriptor buffer is 128 byte long and contains:
*
@@ -85,6 +104,7 @@ EXPORT_SYMBOL_GPL(dell_wmi_get_size);
* Object Signature 4 4 " WMI"
* WMI Interface Version 8 4 <version>
* WMI buffer length 12 4 <length>
+ * WMI hotfix number 16 4 <hotfix>
*/
static int dell_wmi_descriptor_probe(struct wmi_device *wdev)
{
@@ -144,15 +164,17 @@ static int dell_wmi_descriptor_probe(struct wmi_device *wdev)
priv->interface_version = buffer[2];
priv->size = buffer[3];
+ priv->hotfix = buffer[4];
ret = 0;
dev_set_drvdata(&wdev->dev, priv);
mutex_lock(&list_mutex);
list_add_tail(&priv->list, &wmi_list);
mutex_unlock(&list_mutex);
- dev_dbg(&wdev->dev, "Detected Dell WMI interface version %lu and buffer size %lu\n",
+ dev_dbg(&wdev->dev, "Detected Dell WMI interface version %lu, buffer size %lu, hotfix %lu\n",
(unsigned long) priv->interface_version,
- (unsigned long) priv->size);
+ (unsigned long) priv->size,
+ (unsigned long) priv->hotfix);
out:
kfree(obj);
diff --git a/drivers/platform/x86/dell-wmi-descriptor.h b/drivers/platform/x86/dell-wmi-descriptor.h
index 1e8cb96ffd78..a6123a4d06a7 100644
--- a/drivers/platform/x86/dell-wmi-descriptor.h
+++ b/drivers/platform/x86/dell-wmi-descriptor.h
@@ -23,5 +23,6 @@ int dell_wmi_get_descriptor_valid(void);
bool dell_wmi_get_interface_version(u32 *version);
bool dell_wmi_get_size(u32 *size);
+bool dell_wmi_get_hotfix(u32 *hotfix);
#endif
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
index 62aa2c37b8d2..935121814c97 100644
--- a/drivers/platform/x86/sony-laptop.c
+++ b/drivers/platform/x86/sony-laptop.c
@@ -363,7 +363,7 @@ static int sony_laptop_input_keycode_map[] = {
};
/* release buttons after a short delay if pressed */
-static void do_sony_laptop_release_key(unsigned long unused)
+static void do_sony_laptop_release_key(struct timer_list *unused)
{
struct sony_laptop_keypress kp;
unsigned long flags;
@@ -470,7 +470,7 @@ static int sony_laptop_setup_input(struct acpi_device *acpi_device)
goto err_dec_users;
}
- setup_timer(&sony_laptop_input.release_key_timer,
+ timer_setup(&sony_laptop_input.release_key_timer,
do_sony_laptop_release_key, 0);
/* input keys */
diff --git a/drivers/pps/clients/pps-ktimer.c b/drivers/pps/clients/pps-ktimer.c
index 436b4e4e71a1..04735649052a 100644
--- a/drivers/pps/clients/pps-ktimer.c
+++ b/drivers/pps/clients/pps-ktimer.c
@@ -39,7 +39,7 @@ static struct timer_list ktimer;
* The kernel timer
*/
-static void pps_ktimer_event(unsigned long ptr)
+static void pps_ktimer_event(struct timer_list *unused)
{
struct pps_event_time ts;
@@ -85,7 +85,7 @@ static int __init pps_ktimer_init(void)
return -ENOMEM;
}
- setup_timer(&ktimer, pps_ktimer_event, 0);
+ timer_setup(&ktimer, pps_ktimer_event, 0);
mod_timer(&ktimer, jiffies + HZ);
dev_info(pps->dev, "ktimer PPS source registered\n");
diff --git a/drivers/pwm/pwm-atmel-tcb.c b/drivers/pwm/pwm-atmel-tcb.c
index 75db585a2a94..acd3ce8ecf3f 100644
--- a/drivers/pwm/pwm-atmel-tcb.c
+++ b/drivers/pwm/pwm-atmel-tcb.c
@@ -37,11 +37,20 @@ struct atmel_tcb_pwm_device {
unsigned period; /* PWM period expressed in clk cycles */
};
+struct atmel_tcb_channel {
+ u32 enabled;
+ u32 cmr;
+ u32 ra;
+ u32 rb;
+ u32 rc;
+};
+
struct atmel_tcb_pwm_chip {
struct pwm_chip chip;
spinlock_t lock;
struct atmel_tc *tc;
struct atmel_tcb_pwm_device *pwms[NPWM];
+ struct atmel_tcb_channel bkup[NPWM / 2];
};
static inline struct atmel_tcb_pwm_chip *to_tcb_chip(struct pwm_chip *chip)
@@ -175,12 +184,15 @@ static void atmel_tcb_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
* Use software trigger to apply the new setting.
* If both PWM devices in this group are disabled we stop the clock.
*/
- if (!(cmr & (ATMEL_TC_ACPC | ATMEL_TC_BCPC)))
+ if (!(cmr & (ATMEL_TC_ACPC | ATMEL_TC_BCPC))) {
__raw_writel(ATMEL_TC_SWTRG | ATMEL_TC_CLKDIS,
regs + ATMEL_TC_REG(group, CCR));
- else
+ tcbpwmc->bkup[group].enabled = 1;
+ } else {
__raw_writel(ATMEL_TC_SWTRG, regs +
ATMEL_TC_REG(group, CCR));
+ tcbpwmc->bkup[group].enabled = 0;
+ }
spin_unlock(&tcbpwmc->lock);
}
@@ -263,6 +275,7 @@ static int atmel_tcb_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
/* Use software trigger to apply the new setting */
__raw_writel(ATMEL_TC_CLKEN | ATMEL_TC_SWTRG,
regs + ATMEL_TC_REG(group, CCR));
+ tcbpwmc->bkup[group].enabled = 1;
spin_unlock(&tcbpwmc->lock);
return 0;
}
@@ -445,10 +458,56 @@ static const struct of_device_id atmel_tcb_pwm_dt_ids[] = {
};
MODULE_DEVICE_TABLE(of, atmel_tcb_pwm_dt_ids);
+#ifdef CONFIG_PM_SLEEP
+static int atmel_tcb_pwm_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct atmel_tcb_pwm_chip *tcbpwm = platform_get_drvdata(pdev);
+ void __iomem *base = tcbpwm->tc->regs;
+ int i;
+
+ for (i = 0; i < (NPWM / 2); i++) {
+ struct atmel_tcb_channel *chan = &tcbpwm->bkup[i];
+
+ chan->cmr = readl(base + ATMEL_TC_REG(i, CMR));
+ chan->ra = readl(base + ATMEL_TC_REG(i, RA));
+ chan->rb = readl(base + ATMEL_TC_REG(i, RB));
+ chan->rc = readl(base + ATMEL_TC_REG(i, RC));
+ }
+ return 0;
+}
+
+static int atmel_tcb_pwm_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct atmel_tcb_pwm_chip *tcbpwm = platform_get_drvdata(pdev);
+ void __iomem *base = tcbpwm->tc->regs;
+ int i;
+
+ for (i = 0; i < (NPWM / 2); i++) {
+ struct atmel_tcb_channel *chan = &tcbpwm->bkup[i];
+
+ writel(chan->cmr, base + ATMEL_TC_REG(i, CMR));
+ writel(chan->ra, base + ATMEL_TC_REG(i, RA));
+ writel(chan->rb, base + ATMEL_TC_REG(i, RB));
+ writel(chan->rc, base + ATMEL_TC_REG(i, RC));
+ if (chan->enabled) {
+ writel(ATMEL_TC_CLKEN | ATMEL_TC_SWTRG,
+ base + ATMEL_TC_REG(i, CCR));
+ }
+ }
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(atmel_tcb_pwm_pm_ops, atmel_tcb_pwm_suspend,
+ atmel_tcb_pwm_resume);
+
static struct platform_driver atmel_tcb_pwm_driver = {
.driver = {
.name = "atmel-tcb-pwm",
.of_match_table = atmel_tcb_pwm_dt_ids,
+ .pm = &atmel_tcb_pwm_pm_ops,
},
.probe = atmel_tcb_pwm_probe,
.remove = atmel_tcb_pwm_remove,
diff --git a/drivers/pwm/pwm-img.c b/drivers/pwm/pwm-img.c
index 2fb30deee345..815f5333bb8f 100644
--- a/drivers/pwm/pwm-img.c
+++ b/drivers/pwm/pwm-img.c
@@ -18,6 +18,7 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/pwm.h>
#include <linux/regmap.h>
#include <linux/slab.h>
@@ -39,6 +40,8 @@
#define PERIP_PWM_PDM_CONTROL_CH_MASK 0x1
#define PERIP_PWM_PDM_CONTROL_CH_SHIFT(ch) ((ch) * 4)
+#define IMG_PWM_PM_TIMEOUT 1000 /* ms */
+
/*
* PWM period is specified with a timebase register,
* in number of step periods. The PWM duty cycle is also
@@ -52,6 +55,8 @@
*/
#define MIN_TMBASE_STEPS 16
+#define IMG_PWM_NPWM 4
+
struct img_pwm_soc_data {
u32 max_timebase;
};
@@ -66,6 +71,8 @@ struct img_pwm_chip {
int max_period_ns;
int min_period_ns;
const struct img_pwm_soc_data *data;
+ u32 suspend_ctrl_cfg;
+ u32 suspend_ch_cfg[IMG_PWM_NPWM];
};
static inline struct img_pwm_chip *to_img_pwm_chip(struct pwm_chip *chip)
@@ -92,6 +99,7 @@ static int img_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
unsigned long mul, output_clk_hz, input_clk_hz;
struct img_pwm_chip *pwm_chip = to_img_pwm_chip(chip);
unsigned int max_timebase = pwm_chip->data->max_timebase;
+ int ret;
if (period_ns < pwm_chip->min_period_ns ||
period_ns > pwm_chip->max_period_ns) {
@@ -123,6 +131,10 @@ static int img_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
duty = DIV_ROUND_UP(timebase * duty_ns, period_ns);
+ ret = pm_runtime_get_sync(chip->dev);
+ if (ret < 0)
+ return ret;
+
val = img_pwm_readl(pwm_chip, PWM_CTRL_CFG);
val &= ~(PWM_CTRL_CFG_DIV_MASK << PWM_CTRL_CFG_DIV_SHIFT(pwm->hwpwm));
val |= (div & PWM_CTRL_CFG_DIV_MASK) <<
@@ -133,6 +145,9 @@ static int img_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
(timebase << PWM_CH_CFG_TMBASE_SHIFT);
img_pwm_writel(pwm_chip, PWM_CH_CFG(pwm->hwpwm), val);
+ pm_runtime_mark_last_busy(chip->dev);
+ pm_runtime_put_autosuspend(chip->dev);
+
return 0;
}
@@ -140,6 +155,11 @@ static int img_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
{
u32 val;
struct img_pwm_chip *pwm_chip = to_img_pwm_chip(chip);
+ int ret;
+
+ ret = pm_runtime_get_sync(chip->dev);
+ if (ret < 0)
+ return ret;
val = img_pwm_readl(pwm_chip, PWM_CTRL_CFG);
val |= BIT(pwm->hwpwm);
@@ -160,6 +180,9 @@ static void img_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
val = img_pwm_readl(pwm_chip, PWM_CTRL_CFG);
val &= ~BIT(pwm->hwpwm);
img_pwm_writel(pwm_chip, PWM_CTRL_CFG, val);
+
+ pm_runtime_mark_last_busy(chip->dev);
+ pm_runtime_put_autosuspend(chip->dev);
}
static const struct pwm_ops img_pwm_ops = {
@@ -182,6 +205,37 @@ static const struct of_device_id img_pwm_of_match[] = {
};
MODULE_DEVICE_TABLE(of, img_pwm_of_match);
+static int img_pwm_runtime_suspend(struct device *dev)
+{
+ struct img_pwm_chip *pwm_chip = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(pwm_chip->pwm_clk);
+ clk_disable_unprepare(pwm_chip->sys_clk);
+
+ return 0;
+}
+
+static int img_pwm_runtime_resume(struct device *dev)
+{
+ struct img_pwm_chip *pwm_chip = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(pwm_chip->sys_clk);
+ if (ret < 0) {
+ dev_err(dev, "could not prepare or enable sys clock\n");
+ return ret;
+ }
+
+ ret = clk_prepare_enable(pwm_chip->pwm_clk);
+ if (ret < 0) {
+ dev_err(dev, "could not prepare or enable pwm clock\n");
+ clk_disable_unprepare(pwm_chip->sys_clk);
+ return ret;
+ }
+
+ return 0;
+}
+
static int img_pwm_probe(struct platform_device *pdev)
{
int ret;
@@ -224,23 +278,20 @@ static int img_pwm_probe(struct platform_device *pdev)
return PTR_ERR(pwm->pwm_clk);
}
- ret = clk_prepare_enable(pwm->sys_clk);
- if (ret < 0) {
- dev_err(&pdev->dev, "could not prepare or enable sys clock\n");
- return ret;
- }
-
- ret = clk_prepare_enable(pwm->pwm_clk);
- if (ret < 0) {
- dev_err(&pdev->dev, "could not prepare or enable pwm clock\n");
- goto disable_sysclk;
+ pm_runtime_set_autosuspend_delay(&pdev->dev, IMG_PWM_PM_TIMEOUT);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+ if (!pm_runtime_enabled(&pdev->dev)) {
+ ret = img_pwm_runtime_resume(&pdev->dev);
+ if (ret)
+ goto err_pm_disable;
}
clk_rate = clk_get_rate(pwm->pwm_clk);
if (!clk_rate) {
dev_err(&pdev->dev, "pwm clock has no frequency\n");
ret = -EINVAL;
- goto disable_pwmclk;
+ goto err_suspend;
}
/* The maximum input clock divider is 512 */
@@ -255,21 +306,23 @@ static int img_pwm_probe(struct platform_device *pdev)
pwm->chip.dev = &pdev->dev;
pwm->chip.ops = &img_pwm_ops;
pwm->chip.base = -1;
- pwm->chip.npwm = 4;
+ pwm->chip.npwm = IMG_PWM_NPWM;
ret = pwmchip_add(&pwm->chip);
if (ret < 0) {
dev_err(&pdev->dev, "pwmchip_add failed: %d\n", ret);
- goto disable_pwmclk;
+ goto err_suspend;
}
platform_set_drvdata(pdev, pwm);
return 0;
-disable_pwmclk:
- clk_disable_unprepare(pwm->pwm_clk);
-disable_sysclk:
- clk_disable_unprepare(pwm->sys_clk);
+err_suspend:
+ if (!pm_runtime_enabled(&pdev->dev))
+ img_pwm_runtime_suspend(&pdev->dev);
+err_pm_disable:
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
return ret;
}
@@ -278,6 +331,11 @@ static int img_pwm_remove(struct platform_device *pdev)
struct img_pwm_chip *pwm_chip = platform_get_drvdata(pdev);
u32 val;
unsigned int i;
+ int ret;
+
+ ret = pm_runtime_get_sync(&pdev->dev);
+ if (ret < 0)
+ return ret;
for (i = 0; i < pwm_chip->chip.npwm; i++) {
val = img_pwm_readl(pwm_chip, PWM_CTRL_CFG);
@@ -285,15 +343,79 @@ static int img_pwm_remove(struct platform_device *pdev)
img_pwm_writel(pwm_chip, PWM_CTRL_CFG, val);
}
- clk_disable_unprepare(pwm_chip->pwm_clk);
- clk_disable_unprepare(pwm_chip->sys_clk);
+ pm_runtime_put(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ if (!pm_runtime_status_suspended(&pdev->dev))
+ img_pwm_runtime_suspend(&pdev->dev);
return pwmchip_remove(&pwm_chip->chip);
}
+#ifdef CONFIG_PM_SLEEP
+static int img_pwm_suspend(struct device *dev)
+{
+ struct img_pwm_chip *pwm_chip = dev_get_drvdata(dev);
+ int i, ret;
+
+ if (pm_runtime_status_suspended(dev)) {
+ ret = img_pwm_runtime_resume(dev);
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0; i < pwm_chip->chip.npwm; i++)
+ pwm_chip->suspend_ch_cfg[i] = img_pwm_readl(pwm_chip,
+ PWM_CH_CFG(i));
+
+ pwm_chip->suspend_ctrl_cfg = img_pwm_readl(pwm_chip, PWM_CTRL_CFG);
+
+ img_pwm_runtime_suspend(dev);
+
+ return 0;
+}
+
+static int img_pwm_resume(struct device *dev)
+{
+ struct img_pwm_chip *pwm_chip = dev_get_drvdata(dev);
+ int ret;
+ int i;
+
+ ret = img_pwm_runtime_resume(dev);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < pwm_chip->chip.npwm; i++)
+ img_pwm_writel(pwm_chip, PWM_CH_CFG(i),
+ pwm_chip->suspend_ch_cfg[i]);
+
+ img_pwm_writel(pwm_chip, PWM_CTRL_CFG, pwm_chip->suspend_ctrl_cfg);
+
+ for (i = 0; i < pwm_chip->chip.npwm; i++)
+ if (pwm_chip->suspend_ctrl_cfg & BIT(i))
+ regmap_update_bits(pwm_chip->periph_regs,
+ PERIP_PWM_PDM_CONTROL,
+ PERIP_PWM_PDM_CONTROL_CH_MASK <<
+ PERIP_PWM_PDM_CONTROL_CH_SHIFT(i),
+ 0);
+
+ if (pm_runtime_status_suspended(dev))
+ img_pwm_runtime_suspend(dev);
+
+ return 0;
+}
+#endif /* CONFIG_PM */
+
+static const struct dev_pm_ops img_pwm_pm_ops = {
+ SET_RUNTIME_PM_OPS(img_pwm_runtime_suspend,
+ img_pwm_runtime_resume,
+ NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(img_pwm_suspend, img_pwm_resume)
+};
+
static struct platform_driver img_pwm_driver = {
.driver = {
.name = "img-pwm",
+ .pm = &img_pwm_pm_ops,
.of_match_table = img_pwm_of_match,
},
.probe = img_pwm_probe,
diff --git a/drivers/pwm/pwm-mediatek.c b/drivers/pwm/pwm-mediatek.c
index b52f3afb2ba1..f5d97e0ad52b 100644
--- a/drivers/pwm/pwm-mediatek.c
+++ b/drivers/pwm/pwm-mediatek.c
@@ -16,6 +16,7 @@
#include <linux/module.h>
#include <linux/clk.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pwm.h>
#include <linux/slab.h>
@@ -40,11 +41,19 @@ enum {
MTK_CLK_PWM3,
MTK_CLK_PWM4,
MTK_CLK_PWM5,
+ MTK_CLK_PWM6,
+ MTK_CLK_PWM7,
+ MTK_CLK_PWM8,
MTK_CLK_MAX,
};
-static const char * const mtk_pwm_clk_name[] = {
- "main", "top", "pwm1", "pwm2", "pwm3", "pwm4", "pwm5"
+static const char * const mtk_pwm_clk_name[MTK_CLK_MAX] = {
+ "main", "top", "pwm1", "pwm2", "pwm3", "pwm4", "pwm5", "pwm6", "pwm7",
+ "pwm8"
+};
+
+struct mtk_pwm_platform_data {
+ unsigned int num_pwms;
};
/**
@@ -59,6 +68,10 @@ struct mtk_pwm_chip {
struct clk *clks[MTK_CLK_MAX];
};
+static const unsigned int mtk_pwm_reg_offset[] = {
+ 0x0010, 0x0050, 0x0090, 0x00d0, 0x0110, 0x0150, 0x0190, 0x0220
+};
+
static inline struct mtk_pwm_chip *to_mtk_pwm_chip(struct pwm_chip *chip)
{
return container_of(chip, struct mtk_pwm_chip, chip);
@@ -103,14 +116,14 @@ static void mtk_pwm_clk_disable(struct pwm_chip *chip, struct pwm_device *pwm)
static inline u32 mtk_pwm_readl(struct mtk_pwm_chip *chip, unsigned int num,
unsigned int offset)
{
- return readl(chip->regs + 0x10 + (num * 0x40) + offset);
+ return readl(chip->regs + mtk_pwm_reg_offset[num] + offset);
}
static inline void mtk_pwm_writel(struct mtk_pwm_chip *chip,
unsigned int num, unsigned int offset,
u32 value)
{
- writel(value, chip->regs + 0x10 + (num * 0x40) + offset);
+ writel(value, chip->regs + mtk_pwm_reg_offset[num] + offset);
}
static int mtk_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
@@ -185,6 +198,7 @@ static const struct pwm_ops mtk_pwm_ops = {
static int mtk_pwm_probe(struct platform_device *pdev)
{
+ const struct mtk_pwm_platform_data *data;
struct mtk_pwm_chip *pc;
struct resource *res;
unsigned int i;
@@ -194,15 +208,22 @@ static int mtk_pwm_probe(struct platform_device *pdev)
if (!pc)
return -ENOMEM;
+ data = of_device_get_match_data(&pdev->dev);
+ if (data == NULL)
+ return -EINVAL;
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
pc->regs = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(pc->regs))
return PTR_ERR(pc->regs);
- for (i = 0; i < MTK_CLK_MAX; i++) {
+ for (i = 0; i < data->num_pwms + 2; i++) {
pc->clks[i] = devm_clk_get(&pdev->dev, mtk_pwm_clk_name[i]);
- if (IS_ERR(pc->clks[i]))
+ if (IS_ERR(pc->clks[i])) {
+ dev_err(&pdev->dev, "clock: %s fail: %ld\n",
+ mtk_pwm_clk_name[i], PTR_ERR(pc->clks[i]));
return PTR_ERR(pc->clks[i]);
+ }
}
platform_set_drvdata(pdev, pc);
@@ -210,7 +231,7 @@ static int mtk_pwm_probe(struct platform_device *pdev)
pc->chip.dev = &pdev->dev;
pc->chip.ops = &mtk_pwm_ops;
pc->chip.base = -1;
- pc->chip.npwm = 5;
+ pc->chip.npwm = data->num_pwms;
ret = pwmchip_add(&pc->chip);
if (ret < 0) {
@@ -228,9 +249,23 @@ static int mtk_pwm_remove(struct platform_device *pdev)
return pwmchip_remove(&pc->chip);
}
+static const struct mtk_pwm_platform_data mt2712_pwm_data = {
+ .num_pwms = 8,
+};
+
+static const struct mtk_pwm_platform_data mt7622_pwm_data = {
+ .num_pwms = 6,
+};
+
+static const struct mtk_pwm_platform_data mt7623_pwm_data = {
+ .num_pwms = 5,
+};
+
static const struct of_device_id mtk_pwm_of_match[] = {
- { .compatible = "mediatek,mt7623-pwm" },
- { }
+ { .compatible = "mediatek,mt2712-pwm", .data = &mt2712_pwm_data },
+ { .compatible = "mediatek,mt7622-pwm", .data = &mt7622_pwm_data },
+ { .compatible = "mediatek,mt7623-pwm", .data = &mt7623_pwm_data },
+ { },
};
MODULE_DEVICE_TABLE(of, mtk_pwm_of_match);
diff --git a/drivers/pwm/pwm-stm32-lp.c b/drivers/pwm/pwm-stm32-lp.c
index 9793b296108f..1ac9e4384142 100644
--- a/drivers/pwm/pwm-stm32-lp.c
+++ b/drivers/pwm/pwm-stm32-lp.c
@@ -219,8 +219,7 @@ static int stm32_pwm_lp_remove(struct platform_device *pdev)
unsigned int i;
for (i = 0; i < priv->chip.npwm; i++)
- if (pwm_is_enabled(&priv->chip.pwms[i]))
- pwm_disable(&priv->chip.pwms[i]);
+ pwm_disable(&priv->chip.pwms[i]);
return pwmchip_remove(&priv->chip);
}
diff --git a/drivers/pwm/pwm-sun4i.c b/drivers/pwm/pwm-sun4i.c
index 6d23f1d1c9b7..334199c58f1d 100644
--- a/drivers/pwm/pwm-sun4i.c
+++ b/drivers/pwm/pwm-sun4i.c
@@ -368,14 +368,15 @@ static int sun4i_pwm_probe(struct platform_device *pdev)
struct sun4i_pwm_chip *pwm;
struct resource *res;
int ret;
- const struct of_device_id *match;
-
- match = of_match_device(sun4i_pwm_dt_ids, &pdev->dev);
pwm = devm_kzalloc(&pdev->dev, sizeof(*pwm), GFP_KERNEL);
if (!pwm)
return -ENOMEM;
+ pwm->data = of_device_get_match_data(&pdev->dev);
+ if (!pwm->data)
+ return -ENODEV;
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
pwm->base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(pwm->base))
@@ -385,7 +386,6 @@ static int sun4i_pwm_probe(struct platform_device *pdev)
if (IS_ERR(pwm->clk))
return PTR_ERR(pwm->clk);
- pwm->data = match->data;
pwm->chip.dev = &pdev->dev;
pwm->chip.ops = &sun4i_pwm_ops;
pwm->chip.base = -1;
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index e0e58f3b1420..b59a31b079a5 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -433,6 +433,19 @@ config RTC_DRV_PCF85063
This driver can also be built as a module. If so, the module
will be called rtc-pcf85063.
+config RTC_DRV_PCF85363
+ tristate "NXP PCF85363"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ If you say yes here you get support for the PCF85363 RTC chip.
+
+ This driver can also be built as a module. If so, the module
+ will be called rtc-pcf85363.
+
+ The nvmem interface will be named pcf85363-#, where # is the
+ zero-based instance number.
+
config RTC_DRV_PCF8563
tristate "Philips PCF8563/Epson RTC8564"
help
@@ -1174,6 +1187,17 @@ config RTC_DRV_WM8350
This driver can also be built as a module. If so, the module
will be called "rtc-wm8350".
+config RTC_DRV_SC27XX
+ tristate "Spreadtrum SC27xx RTC"
+ depends on MFD_SC27XX_PMIC || COMPILE_TEST
+ help
+ If you say Y here you will get support for the RTC subsystem
+ of the Spreadtrum SC27xx series PMICs. The SC27xx series PMICs
+ includes the SC2720, SC2721, SC2723, SC2730 and SC2731 chips.
+
+ This driver can also be built as a module. If so, the module
+ will be called rtc-sc27xx.
+
config RTC_DRV_SPEAR
tristate "SPEAR ST RTC"
depends on PLAT_SPEAR || COMPILE_TEST
@@ -1706,14 +1730,24 @@ config RTC_DRV_MOXART
will be called rtc-moxart
config RTC_DRV_MT6397
- tristate "Mediatek Real Time Clock driver"
+ tristate "MediaTek PMIC based RTC"
depends on MFD_MT6397 || (COMPILE_TEST && IRQ_DOMAIN)
help
- This selects the Mediatek(R) RTC driver. RTC is part of Mediatek
+ This selects the MediaTek(R) RTC driver. RTC is part of MediaTek
MT6397 PMIC. You should enable MT6397 PMIC MFD before select
- Mediatek(R) RTC driver.
+ MediaTek(R) RTC driver.
+
+ If you want to use MediaTek(R) RTC interface, select Y or M here.
- If you want to use Mediatek(R) RTC interface, select Y or M here.
+config RTC_DRV_MT7622
+ tristate "MediaTek SoC based RTC"
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ help
+ This enables support for the real time clock built in the MediaTek
+ SoCs.
+
+ This drive can also be built as a module. If so, the module
+ will be called rtc-mt7622.
config RTC_DRV_XGENE
tristate "APM X-Gene RTC"
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index 0bf1fc02b82c..f2f50c11dc38 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -103,6 +103,7 @@ obj-$(CONFIG_RTC_DRV_MPC5121) += rtc-mpc5121.o
obj-$(CONFIG_RTC_DRV_VRTC) += rtc-mrst.o
obj-$(CONFIG_RTC_DRV_MSM6242) += rtc-msm6242.o
obj-$(CONFIG_RTC_DRV_MT6397) += rtc-mt6397.o
+obj-$(CONFIG_RTC_DRV_MT7622) += rtc-mt7622.o
obj-$(CONFIG_RTC_DRV_MV) += rtc-mv.o
obj-$(CONFIG_RTC_DRV_MXC) += rtc-mxc.o
obj-$(CONFIG_RTC_DRV_NUC900) += rtc-nuc900.o
@@ -114,6 +115,7 @@ obj-$(CONFIG_RTC_DRV_PCF2123) += rtc-pcf2123.o
obj-$(CONFIG_RTC_DRV_PCF2127) += rtc-pcf2127.o
obj-$(CONFIG_RTC_DRV_PCF50633) += rtc-pcf50633.o
obj-$(CONFIG_RTC_DRV_PCF85063) += rtc-pcf85063.o
+obj-$(CONFIG_RTC_DRV_PCF85363) += rtc-pcf85363.o
obj-$(CONFIG_RTC_DRV_PCF8523) += rtc-pcf8523.o
obj-$(CONFIG_RTC_DRV_PCF8563) += rtc-pcf8563.o
obj-$(CONFIG_RTC_DRV_PCF8583) += rtc-pcf8583.o
@@ -144,6 +146,7 @@ obj-$(CONFIG_RTC_DRV_S35390A) += rtc-s35390a.o
obj-$(CONFIG_RTC_DRV_S3C) += rtc-s3c.o
obj-$(CONFIG_RTC_DRV_S5M) += rtc-s5m.o
obj-$(CONFIG_RTC_DRV_SA1100) += rtc-sa1100.o
+obj-$(CONFIG_RTC_DRV_SC27XX) += rtc-sc27xx.o
obj-$(CONFIG_RTC_DRV_SH) += rtc-sh.o
obj-$(CONFIG_RTC_DRV_SIRFSOC) += rtc-sirfsoc.o
obj-$(CONFIG_RTC_DRV_SNVS) += rtc-snvs.o
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index 8cec9a02c0b8..672b192f8153 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -779,7 +779,7 @@ static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer)
}
timerqueue_add(&rtc->timerqueue, &timer->node);
- if (!next) {
+ if (!next || ktime_before(timer->node.expires, next->expires)) {
struct rtc_wkalrm alarm;
int err;
alarm.time = rtc_ktime_to_tm(timer->node.expires);
@@ -1004,6 +1004,10 @@ int rtc_read_offset(struct rtc_device *rtc, long *offset)
* to compensate for differences in the actual clock rate due to temperature,
* the crystal, capacitor, etc.
*
+ * The adjustment applied is as follows:
+ * t = t0 * (1 + offset * 1e-9)
+ * where t0 is the measured length of 1 RTC second with offset = 0
+ *
* Kernel interface to adjust an rtc clock offset.
* Return 0 on success, or a negative number on error.
* If the rtc offset is not setable (or not implemented), return -EINVAL
diff --git a/drivers/rtc/rtc-abx80x.c b/drivers/rtc/rtc-abx80x.c
index fea9a60b06cf..b033bc556f5d 100644
--- a/drivers/rtc/rtc-abx80x.c
+++ b/drivers/rtc/rtc-abx80x.c
@@ -614,12 +614,12 @@ static int abx80x_probe(struct i2c_client *client,
if (err)
return err;
- rtc = devm_rtc_device_register(&client->dev, "abx8xx",
- &abx80x_rtc_ops, THIS_MODULE);
-
+ rtc = devm_rtc_allocate_device(&client->dev);
if (IS_ERR(rtc))
return PTR_ERR(rtc);
+ rtc->ops = &abx80x_rtc_ops;
+
i2c_set_clientdata(client, rtc);
if (client->irq > 0) {
@@ -646,10 +646,14 @@ static int abx80x_probe(struct i2c_client *client,
err = devm_add_action_or_reset(&client->dev,
rtc_calib_remove_sysfs_group,
&client->dev);
- if (err)
+ if (err) {
dev_err(&client->dev,
"Failed to add sysfs cleanup action: %d\n",
err);
+ return err;
+ }
+
+ err = rtc_register_device(rtc);
return err;
}
diff --git a/drivers/rtc/rtc-armada38x.c b/drivers/rtc/rtc-armada38x.c
index 21f355c37eab..1e4978c96ffd 100644
--- a/drivers/rtc/rtc-armada38x.c
+++ b/drivers/rtc/rtc-armada38x.c
@@ -28,6 +28,8 @@
#define RTC_IRQ_AL_EN BIT(0)
#define RTC_IRQ_FREQ_EN BIT(1)
#define RTC_IRQ_FREQ_1HZ BIT(2)
+#define RTC_CCR 0x18
+#define RTC_CCR_MODE BIT(15)
#define RTC_TIME 0xC
#define RTC_ALARM1 0x10
@@ -343,18 +345,117 @@ static irqreturn_t armada38x_rtc_alarm_irq(int irq, void *data)
return IRQ_HANDLED;
}
+/*
+ * The information given in the Armada 388 functional spec is complex.
+ * They give two different formulas for calculating the offset value,
+ * but when considering "Offset" as an 8-bit signed integer, they both
+ * reduce down to (we shall rename "Offset" as "val" here):
+ *
+ * val = (f_ideal / f_measured - 1) / resolution where f_ideal = 32768
+ *
+ * Converting to time, f = 1/t:
+ * val = (t_measured / t_ideal - 1) / resolution where t_ideal = 1/32768
+ *
+ * => t_measured / t_ideal = val * resolution + 1
+ *
+ * "offset" in the RTC interface is defined as:
+ * t = t0 * (1 + offset * 1e-9)
+ * where t is the desired period, t0 is the measured period with a zero
+ * offset, which is t_measured above. With t0 = t_measured and t = t_ideal,
+ * offset = (t_ideal / t_measured - 1) / 1e-9
+ *
+ * => t_ideal / t_measured = offset * 1e-9 + 1
+ *
+ * so:
+ *
+ * offset * 1e-9 + 1 = 1 / (val * resolution + 1)
+ *
+ * We want "resolution" to be an integer, so resolution = R * 1e-9, giving
+ * offset = 1e18 / (val * R + 1e9) - 1e9
+ * val = (1e18 / (offset + 1e9) - 1e9) / R
+ * with a common transformation:
+ * f(x) = 1e18 / (x + 1e9) - 1e9
+ * offset = f(val * R)
+ * val = f(offset) / R
+ *
+ * Armada 38x supports two modes, fine mode (954ppb) and coarse mode (3815ppb).
+ */
+static long armada38x_ppb_convert(long ppb)
+{
+ long div = ppb + 1000000000L;
+
+ return div_s64(1000000000000000000LL + div / 2, div) - 1000000000L;
+}
+
+static int armada38x_rtc_read_offset(struct device *dev, long *offset)
+{
+ struct armada38x_rtc *rtc = dev_get_drvdata(dev);
+ unsigned long ccr, flags;
+ long ppb_cor;
+
+ spin_lock_irqsave(&rtc->lock, flags);
+ ccr = rtc->data->read_rtc_reg(rtc, RTC_CCR);
+ spin_unlock_irqrestore(&rtc->lock, flags);
+
+ ppb_cor = (ccr & RTC_CCR_MODE ? 3815 : 954) * (s8)ccr;
+ /* ppb_cor + 1000000000L can never be zero */
+ *offset = armada38x_ppb_convert(ppb_cor);
+
+ return 0;
+}
+
+static int armada38x_rtc_set_offset(struct device *dev, long offset)
+{
+ struct armada38x_rtc *rtc = dev_get_drvdata(dev);
+ unsigned long ccr = 0;
+ long ppb_cor, off;
+
+ /*
+ * The maximum ppb_cor is -128 * 3815 .. 127 * 3815, but we
+ * need to clamp the input. This equates to -484270 .. 488558.
+ * Not only is this to stop out of range "off" but also to
+ * avoid the division by zero in armada38x_ppb_convert().
+ */
+ offset = clamp(offset, -484270L, 488558L);
+
+ ppb_cor = armada38x_ppb_convert(offset);
+
+ /*
+ * Use low update mode where possible, which gives a better
+ * resolution of correction.
+ */
+ off = DIV_ROUND_CLOSEST(ppb_cor, 954);
+ if (off > 127 || off < -128) {
+ ccr = RTC_CCR_MODE;
+ off = DIV_ROUND_CLOSEST(ppb_cor, 3815);
+ }
+
+ /*
+ * Armada 388 requires a bit pattern in bits 14..8 depending on
+ * the sign bit: { 0, ~S, S, S, S, S, S }
+ */
+ ccr |= (off & 0x3fff) ^ 0x2000;
+ rtc_delayed_write(ccr, rtc, RTC_CCR);
+
+ return 0;
+}
+
static const struct rtc_class_ops armada38x_rtc_ops = {
.read_time = armada38x_rtc_read_time,
.set_time = armada38x_rtc_set_time,
.read_alarm = armada38x_rtc_read_alarm,
.set_alarm = armada38x_rtc_set_alarm,
.alarm_irq_enable = armada38x_rtc_alarm_irq_enable,
+ .read_offset = armada38x_rtc_read_offset,
+ .set_offset = armada38x_rtc_set_offset,
};
static const struct rtc_class_ops armada38x_rtc_ops_noirq = {
.read_time = armada38x_rtc_read_time,
.set_time = armada38x_rtc_set_time,
.read_alarm = armada38x_rtc_read_alarm,
+ .read_offset = armada38x_rtc_read_offset,
+ .set_offset = armada38x_rtc_set_offset,
};
static const struct armada38x_rtc_data armada38x_data = {
diff --git a/drivers/rtc/rtc-at91rm9200.c b/drivers/rtc/rtc-at91rm9200.c
index e221b78b6f10..de81ecedd571 100644
--- a/drivers/rtc/rtc-at91rm9200.c
+++ b/drivers/rtc/rtc-at91rm9200.c
@@ -42,8 +42,6 @@
#define at91_rtc_write(field, val) \
writel_relaxed((val), at91_rtc_regs + field)
-#define AT91_RTC_EPOCH 1900UL /* just like arch/arm/common/rtctime.c */
-
struct at91_rtc_config {
bool use_shadow_imr;
};
@@ -51,7 +49,6 @@ struct at91_rtc_config {
static const struct at91_rtc_config *at91_rtc_config;
static DECLARE_COMPLETION(at91_rtc_updated);
static DECLARE_COMPLETION(at91_rtc_upd_rdy);
-static unsigned int at91_alarm_year = AT91_RTC_EPOCH;
static void __iomem *at91_rtc_regs;
static int irq;
static DEFINE_SPINLOCK(at91_rtc_lock);
@@ -131,8 +128,7 @@ static void at91_rtc_decodetime(unsigned int timereg, unsigned int calreg,
/*
* The Calendar Alarm register does not have a field for
- * the year - so these will return an invalid value. When an
- * alarm is set, at91_alarm_year will store the current year.
+ * the year - so these will return an invalid value.
*/
tm->tm_year = bcd2bin(date & AT91_RTC_CENT) * 100; /* century */
tm->tm_year += bcd2bin((date & AT91_RTC_YEAR) >> 8); /* year */
@@ -208,15 +204,14 @@ static int at91_rtc_readalarm(struct device *dev, struct rtc_wkalrm *alrm)
struct rtc_time *tm = &alrm->time;
at91_rtc_decodetime(AT91_RTC_TIMALR, AT91_RTC_CALALR, tm);
- tm->tm_yday = rtc_year_days(tm->tm_mday, tm->tm_mon, tm->tm_year);
- tm->tm_year = at91_alarm_year - 1900;
+ tm->tm_year = -1;
alrm->enabled = (at91_rtc_read_imr() & AT91_RTC_ALARM)
? 1 : 0;
- dev_dbg(dev, "%s(): %4d-%02d-%02d %02d:%02d:%02d\n", __func__,
- 1900 + tm->tm_year, tm->tm_mon, tm->tm_mday,
- tm->tm_hour, tm->tm_min, tm->tm_sec);
+ dev_dbg(dev, "%s(): %02d-%02d %02d:%02d:%02d %sabled\n", __func__,
+ tm->tm_mon, tm->tm_mday, tm->tm_hour, tm->tm_min, tm->tm_sec,
+ alrm->enabled ? "en" : "dis");
return 0;
}
@@ -230,8 +225,6 @@ static int at91_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
at91_rtc_decodetime(AT91_RTC_TIMR, AT91_RTC_CALR, &tm);
- at91_alarm_year = tm.tm_year;
-
tm.tm_mon = alrm->time.tm_mon;
tm.tm_mday = alrm->time.tm_mday;
tm.tm_hour = alrm->time.tm_hour;
@@ -255,7 +248,7 @@ static int at91_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
}
dev_dbg(dev, "%s(): %4d-%02d-%02d %02d:%02d:%02d\n", __func__,
- at91_alarm_year, tm.tm_mon, tm.tm_mday, tm.tm_hour,
+ tm.tm_year, tm.tm_mon, tm.tm_mday, tm.tm_hour,
tm.tm_min, tm.tm_sec);
return 0;
diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
index 00efe24a6063..215eac68ae2d 100644
--- a/drivers/rtc/rtc-dev.c
+++ b/drivers/rtc/rtc-dev.c
@@ -71,9 +71,9 @@ static void rtc_uie_task(struct work_struct *work)
if (num)
rtc_handle_legacy_irq(rtc, num, RTC_UF);
}
-static void rtc_uie_timer(unsigned long data)
+static void rtc_uie_timer(struct timer_list *t)
{
- struct rtc_device *rtc = (struct rtc_device *)data;
+ struct rtc_device *rtc = from_timer(rtc, t, uie_timer);
unsigned long flags;
spin_lock_irqsave(&rtc->irq_lock, flags);
@@ -460,7 +460,7 @@ void rtc_dev_prepare(struct rtc_device *rtc)
#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
INIT_WORK(&rtc->uie_task, rtc_uie_task);
- setup_timer(&rtc->uie_timer, rtc_uie_timer, (unsigned long)rtc);
+ timer_setup(&rtc->uie_timer, rtc_uie_timer, 0);
#endif
cdev_init(&rtc->char_dev, &rtc_dev_fops);
diff --git a/drivers/rtc/rtc-ds1305.c b/drivers/rtc/rtc-ds1305.c
index 72b22935eb62..d8df2e9e14ad 100644
--- a/drivers/rtc/rtc-ds1305.c
+++ b/drivers/rtc/rtc-ds1305.c
@@ -514,56 +514,43 @@ static void msg_init(struct spi_message *m, struct spi_transfer *x,
spi_message_add_tail(x, m);
}
-static ssize_t
-ds1305_nvram_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
- char *buf, loff_t off, size_t count)
+static int ds1305_nvram_read(void *priv, unsigned int off, void *buf,
+ size_t count)
{
- struct spi_device *spi;
+ struct ds1305 *ds1305 = priv;
+ struct spi_device *spi = ds1305->spi;
u8 addr;
struct spi_message m;
struct spi_transfer x[2];
- int status;
-
- spi = to_spi_device(kobj_to_dev(kobj));
addr = DS1305_NVRAM + off;
msg_init(&m, x, &addr, count, NULL, buf);
- status = spi_sync(spi, &m);
- if (status < 0)
- dev_err(&spi->dev, "nvram %s error %d\n", "read", status);
- return (status < 0) ? status : count;
+ return spi_sync(spi, &m);
}
-static ssize_t
-ds1305_nvram_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
- char *buf, loff_t off, size_t count)
+static int ds1305_nvram_write(void *priv, unsigned int off, void *buf,
+ size_t count)
{
- struct spi_device *spi;
+ struct ds1305 *ds1305 = priv;
+ struct spi_device *spi = ds1305->spi;
u8 addr;
struct spi_message m;
struct spi_transfer x[2];
- int status;
-
- spi = to_spi_device(kobj_to_dev(kobj));
addr = (DS1305_WRITE | DS1305_NVRAM) + off;
msg_init(&m, x, &addr, count, buf, NULL);
- status = spi_sync(spi, &m);
- if (status < 0)
- dev_err(&spi->dev, "nvram %s error %d\n", "write", status);
- return (status < 0) ? status : count;
+ return spi_sync(spi, &m);
}
-static struct bin_attribute nvram = {
- .attr.name = "nvram",
- .attr.mode = S_IRUGO | S_IWUSR,
- .read = ds1305_nvram_read,
- .write = ds1305_nvram_write,
- .size = DS1305_NVRAM_LEN,
+static struct nvmem_config ds1305_nvmem_cfg = {
+ .name = "ds1305_nvram",
+ .word_size = 1,
+ .stride = 1,
+ .size = DS1305_NVRAM_LEN,
+ .reg_read = ds1305_nvram_read,
+ .reg_write = ds1305_nvram_write,
};
/*----------------------------------------------------------------------*/
@@ -708,10 +695,19 @@ static int ds1305_probe(struct spi_device *spi)
dev_dbg(&spi->dev, "AM/PM\n");
/* register RTC ... from here on, ds1305->ctrl needs locking */
- ds1305->rtc = devm_rtc_device_register(&spi->dev, "ds1305",
- &ds1305_ops, THIS_MODULE);
+ ds1305->rtc = devm_rtc_allocate_device(&spi->dev);
if (IS_ERR(ds1305->rtc)) {
- status = PTR_ERR(ds1305->rtc);
+ return PTR_ERR(ds1305->rtc);
+ }
+
+ ds1305->rtc->ops = &ds1305_ops;
+
+ ds1305_nvmem_cfg.priv = ds1305;
+ ds1305->rtc->nvmem_config = &ds1305_nvmem_cfg;
+ ds1305->rtc->nvram_old_abi = true;
+
+ status = rtc_register_device(ds1305->rtc);
+ if (status) {
dev_dbg(&spi->dev, "register rtc --> %d\n", status);
return status;
}
@@ -734,12 +730,6 @@ static int ds1305_probe(struct spi_device *spi)
}
}
- /* export NVRAM */
- status = sysfs_create_bin_file(&spi->dev.kobj, &nvram);
- if (status < 0) {
- dev_err(&spi->dev, "register nvram --> %d\n", status);
- }
-
return 0;
}
@@ -747,8 +737,6 @@ static int ds1305_remove(struct spi_device *spi)
{
struct ds1305 *ds1305 = spi_get_drvdata(spi);
- sysfs_remove_bin_file(&spi->dev.kobj, &nvram);
-
/* carefully shut down irq and workqueue, if present */
if (spi->irq) {
set_bit(FLAG_EXITING, &ds1305->flags);
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
index e7d9215c9201..923dde912f60 100644
--- a/drivers/rtc/rtc-ds1307.c
+++ b/drivers/rtc/rtc-ds1307.c
@@ -325,6 +325,10 @@ static const struct of_device_id ds1307_of_match[] = {
.compatible = "isil,isl12057",
.data = (void *)ds_1337
},
+ {
+ .compatible = "epson,rx8130",
+ .data = (void *)rx_8130
+ },
{ }
};
MODULE_DEVICE_TABLE(of, ds1307_of_match);
@@ -348,6 +352,7 @@ static const struct acpi_device_id ds1307_acpi_ids[] = {
{ .id = "PT7C4338", .driver_data = ds_1307 },
{ .id = "RX8025", .driver_data = rx_8025 },
{ .id = "ISL12057", .driver_data = ds_1337 },
+ { .id = "RX8130", .driver_data = rx_8130 },
{ }
};
MODULE_DEVICE_TABLE(acpi, ds1307_acpi_ids);
@@ -787,8 +792,6 @@ static int rx8130_alarm_irq_enable(struct device *dev, unsigned int enabled)
* Alarm support for mcp794xx devices.
*/
-#define MCP794XX_REG_WEEKDAY 0x3
-#define MCP794XX_REG_WEEKDAY_WDAY_MASK 0x7
#define MCP794XX_REG_CONTROL 0x07
# define MCP794XX_BIT_ALM0_EN 0x10
# define MCP794XX_BIT_ALM1_EN 0x20
@@ -877,15 +880,38 @@ static int mcp794xx_read_alarm(struct device *dev, struct rtc_wkalrm *t)
return 0;
}
+/*
+ * We may have a random RTC weekday, therefore calculate alarm weekday based
+ * on current weekday we read from the RTC timekeeping regs
+ */
+static int mcp794xx_alm_weekday(struct device *dev, struct rtc_time *tm_alarm)
+{
+ struct rtc_time tm_now;
+ int days_now, days_alarm, ret;
+
+ ret = ds1307_get_time(dev, &tm_now);
+ if (ret)
+ return ret;
+
+ days_now = div_s64(rtc_tm_to_time64(&tm_now), 24 * 60 * 60);
+ days_alarm = div_s64(rtc_tm_to_time64(tm_alarm), 24 * 60 * 60);
+
+ return (tm_now.tm_wday + days_alarm - days_now) % 7 + 1;
+}
+
static int mcp794xx_set_alarm(struct device *dev, struct rtc_wkalrm *t)
{
struct ds1307 *ds1307 = dev_get_drvdata(dev);
unsigned char regs[10];
- int ret;
+ int wday, ret;
if (!test_bit(HAS_ALARM, &ds1307->flags))
return -EINVAL;
+ wday = mcp794xx_alm_weekday(dev, &t->time);
+ if (wday < 0)
+ return wday;
+
dev_dbg(dev, "%s, sec=%d min=%d hour=%d wday=%d mday=%d mon=%d "
"enabled=%d pending=%d\n", __func__,
t->time.tm_sec, t->time.tm_min, t->time.tm_hour,
@@ -902,7 +928,7 @@ static int mcp794xx_set_alarm(struct device *dev, struct rtc_wkalrm *t)
regs[3] = bin2bcd(t->time.tm_sec);
regs[4] = bin2bcd(t->time.tm_min);
regs[5] = bin2bcd(t->time.tm_hour);
- regs[6] = bin2bcd(t->time.tm_wday + 1);
+ regs[6] = wday;
regs[7] = bin2bcd(t->time.tm_mday);
regs[8] = bin2bcd(t->time.tm_mon + 1);
@@ -1354,14 +1380,12 @@ static int ds1307_probe(struct i2c_client *client,
{
struct ds1307 *ds1307;
int err = -ENODEV;
- int tmp, wday;
+ int tmp;
const struct chip_desc *chip;
bool want_irq;
bool ds1307_can_wakeup_device = false;
unsigned char regs[8];
struct ds1307_platform_data *pdata = dev_get_platdata(&client->dev);
- struct rtc_time tm;
- unsigned long timestamp;
u8 trickle_charger_setup = 0;
ds1307 = devm_kzalloc(&client->dev, sizeof(struct ds1307), GFP_KERNEL);
@@ -1641,25 +1665,6 @@ read_rtc:
bin2bcd(tmp));
}
- /*
- * Some IPs have weekday reset value = 0x1 which might not correct
- * hence compute the wday using the current date/month/year values
- */
- ds1307_get_time(ds1307->dev, &tm);
- wday = tm.tm_wday;
- timestamp = rtc_tm_to_time64(&tm);
- rtc_time64_to_tm(timestamp, &tm);
-
- /*
- * Check if reset wday is different from the computed wday
- * If different then set the wday which we computed using
- * timestamp
- */
- if (wday != tm.tm_wday)
- regmap_update_bits(ds1307->regmap, MCP794XX_REG_WEEKDAY,
- MCP794XX_REG_WEEKDAY_WDAY_MASK,
- tm.tm_wday + 1);
-
if (want_irq || ds1307_can_wakeup_device) {
device_set_wakeup_capable(ds1307->dev, true);
set_bit(HAS_ALARM, &ds1307->flags);
diff --git a/drivers/rtc/rtc-ds1390.c b/drivers/rtc/rtc-ds1390.c
index aa0d2c6f1edc..4d5b007d7fc6 100644
--- a/drivers/rtc/rtc-ds1390.c
+++ b/drivers/rtc/rtc-ds1390.c
@@ -216,9 +216,16 @@ static int ds1390_probe(struct spi_device *spi)
return res;
}
+static const struct of_device_id ds1390_of_match[] = {
+ { .compatible = "dallas,ds1390" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, ds1390_of_match);
+
static struct spi_driver ds1390_driver = {
.driver = {
.name = "rtc-ds1390",
+ .of_match_table = of_match_ptr(ds1390_of_match),
},
.probe = ds1390_probe,
};
diff --git a/drivers/rtc/rtc-ds1511.c b/drivers/rtc/rtc-ds1511.c
index 1b2dcb58c0ab..1e95312a6f2e 100644
--- a/drivers/rtc/rtc-ds1511.c
+++ b/drivers/rtc/rtc-ds1511.c
@@ -398,42 +398,37 @@ static const struct rtc_class_ops ds1511_rtc_ops = {
.alarm_irq_enable = ds1511_rtc_alarm_irq_enable,
};
-static ssize_t
-ds1511_nvram_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *ba,
- char *buf, loff_t pos, size_t size)
+static int ds1511_nvram_read(void *priv, unsigned int pos, void *buf,
+ size_t size)
{
- ssize_t count;
+ int i;
rtc_write(pos, DS1511_RAMADDR_LSB);
- for (count = 0; count < size; count++)
- *buf++ = rtc_read(DS1511_RAMDATA);
+ for (i = 0; i < size; i++)
+ *(char *)buf++ = rtc_read(DS1511_RAMDATA);
- return count;
+ return 0;
}
-static ssize_t
-ds1511_nvram_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buf, loff_t pos, size_t size)
+static int ds1511_nvram_write(void *priv, unsigned int pos, void *buf,
+ size_t size)
{
- ssize_t count;
+ int i;
rtc_write(pos, DS1511_RAMADDR_LSB);
- for (count = 0; count < size; count++)
- rtc_write(*buf++, DS1511_RAMDATA);
+ for (i = 0; i < size; i++)
+ rtc_write(*(char *)buf++, DS1511_RAMDATA);
- return count;
+ return 0;
}
-static struct bin_attribute ds1511_nvram_attr = {
- .attr = {
- .name = "nvram",
- .mode = S_IRUGO | S_IWUSR,
- },
+static struct nvmem_config ds1511_nvmem_cfg = {
+ .name = "ds1511_nvram",
+ .word_size = 1,
+ .stride = 1,
.size = DS1511_RAM_MAX,
- .read = ds1511_nvram_read,
- .write = ds1511_nvram_write,
+ .reg_read = ds1511_nvram_read,
+ .reg_write = ds1511_nvram_write,
};
static int ds1511_rtc_probe(struct platform_device *pdev)
@@ -477,11 +472,20 @@ static int ds1511_rtc_probe(struct platform_device *pdev)
spin_lock_init(&pdata->lock);
platform_set_drvdata(pdev, pdata);
- pdata->rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
- &ds1511_rtc_ops, THIS_MODULE);
+ pdata->rtc = devm_rtc_allocate_device(&pdev->dev);
if (IS_ERR(pdata->rtc))
return PTR_ERR(pdata->rtc);
+ pdata->rtc->ops = &ds1511_rtc_ops;
+
+ ds1511_nvmem_cfg.priv = &pdev->dev;
+ pdata->rtc->nvmem_config = &ds1511_nvmem_cfg;
+ pdata->rtc->nvram_old_abi = true;
+
+ ret = rtc_register_device(pdata->rtc);
+ if (ret)
+ return ret;
+
/*
* if the platform has an interrupt in mind for this device,
* then by all means, set it
@@ -496,26 +500,6 @@ static int ds1511_rtc_probe(struct platform_device *pdev)
}
}
- ret = sysfs_create_bin_file(&pdev->dev.kobj, &ds1511_nvram_attr);
- if (ret)
- dev_err(&pdev->dev, "Unable to create sysfs entry: %s\n",
- ds1511_nvram_attr.attr.name);
-
- return 0;
-}
-
-static int ds1511_rtc_remove(struct platform_device *pdev)
-{
- struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
-
- sysfs_remove_bin_file(&pdev->dev.kobj, &ds1511_nvram_attr);
- if (pdata->irq > 0) {
- /*
- * disable the alarm interrupt
- */
- rtc_write(rtc_read(RTC_CMD) & ~RTC_TIE, RTC_CMD);
- rtc_read(RTC_CMD1);
- }
return 0;
}
@@ -524,7 +508,6 @@ MODULE_ALIAS("platform:ds1511");
static struct platform_driver ds1511_rtc_driver = {
.probe = ds1511_rtc_probe,
- .remove = ds1511_rtc_remove,
.driver = {
.name = "ds1511",
},
diff --git a/drivers/rtc/rtc-jz4740.c b/drivers/rtc/rtc-jz4740.c
index 64989afffa3d..ff65a7d2b9c9 100644
--- a/drivers/rtc/rtc-jz4740.c
+++ b/drivers/rtc/rtc-jz4740.c
@@ -82,7 +82,7 @@ static inline uint32_t jz4740_rtc_reg_read(struct jz4740_rtc *rtc, size_t reg)
static int jz4740_rtc_wait_write_ready(struct jz4740_rtc *rtc)
{
uint32_t ctrl;
- int timeout = 1000;
+ int timeout = 10000;
do {
ctrl = jz4740_rtc_reg_read(rtc, JZ_REG_RTC_CTRL);
@@ -94,7 +94,7 @@ static int jz4740_rtc_wait_write_ready(struct jz4740_rtc *rtc)
static inline int jz4780_rtc_enable_write(struct jz4740_rtc *rtc)
{
uint32_t ctrl;
- int ret, timeout = 1000;
+ int ret, timeout = 10000;
ret = jz4740_rtc_wait_write_ready(rtc);
if (ret != 0)
@@ -368,7 +368,7 @@ static int jz4740_rtc_probe(struct platform_device *pdev)
ret = jz4740_rtc_reg_write(rtc, JZ_REG_RTC_SCRATCHPAD, 0x12345678);
ret = jz4740_rtc_reg_write(rtc, JZ_REG_RTC_SEC, 0);
if (ret) {
- dev_err(&pdev->dev, "Could not write write to RTC registers\n");
+ dev_err(&pdev->dev, "Could not write to RTC registers\n");
return ret;
}
}
diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c
index f4c070ea8384..c90fba3ed861 100644
--- a/drivers/rtc/rtc-m41t80.c
+++ b/drivers/rtc/rtc-m41t80.c
@@ -154,6 +154,8 @@ struct m41t80_data {
struct rtc_device *rtc;
#ifdef CONFIG_COMMON_CLK
struct clk_hw sqw;
+ unsigned long freq;
+ unsigned int sqwe;
#endif
};
@@ -443,43 +445,40 @@ static SIMPLE_DEV_PM_OPS(m41t80_pm, m41t80_suspend, m41t80_resume);
#ifdef CONFIG_COMMON_CLK
#define sqw_to_m41t80_data(_hw) container_of(_hw, struct m41t80_data, sqw)
-static unsigned long m41t80_sqw_recalc_rate(struct clk_hw *hw,
- unsigned long parent_rate)
+static unsigned long m41t80_decode_freq(int setting)
+{
+ return (setting == 0) ? 0 : (setting == 1) ? M41T80_SQW_MAX_FREQ :
+ M41T80_SQW_MAX_FREQ >> setting;
+}
+
+static unsigned long m41t80_get_freq(struct m41t80_data *m41t80)
{
- struct m41t80_data *m41t80 = sqw_to_m41t80_data(hw);
struct i2c_client *client = m41t80->client;
int reg_sqw = (m41t80->features & M41T80_FEATURE_SQ_ALT) ?
M41T80_REG_WDAY : M41T80_REG_SQW;
int ret = i2c_smbus_read_byte_data(client, reg_sqw);
- unsigned long val = M41T80_SQW_MAX_FREQ;
if (ret < 0)
return 0;
+ return m41t80_decode_freq(ret >> 4);
+}
- ret >>= 4;
- if (ret == 0)
- val = 0;
- else if (ret > 1)
- val = val / (1 << ret);
-
- return val;
+static unsigned long m41t80_sqw_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ return sqw_to_m41t80_data(hw)->freq;
}
static long m41t80_sqw_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *prate)
{
- int i, freq = M41T80_SQW_MAX_FREQ;
-
- if (freq <= rate)
- return freq;
-
- for (i = 2; i <= ilog2(M41T80_SQW_MAX_FREQ); i++) {
- freq /= 1 << i;
- if (freq <= rate)
- return freq;
- }
-
- return 0;
+ if (rate >= M41T80_SQW_MAX_FREQ)
+ return M41T80_SQW_MAX_FREQ;
+ if (rate >= M41T80_SQW_MAX_FREQ / 4)
+ return M41T80_SQW_MAX_FREQ / 4;
+ if (!rate)
+ return 0;
+ return 1 << ilog2(rate);
}
static int m41t80_sqw_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -491,17 +490,12 @@ static int m41t80_sqw_set_rate(struct clk_hw *hw, unsigned long rate,
M41T80_REG_WDAY : M41T80_REG_SQW;
int reg, ret, val = 0;
- if (rate) {
- if (!is_power_of_2(rate))
- return -EINVAL;
- val = ilog2(rate);
- if (val == ilog2(M41T80_SQW_MAX_FREQ))
- val = 1;
- else if (val < (ilog2(M41T80_SQW_MAX_FREQ) - 1))
- val = ilog2(M41T80_SQW_MAX_FREQ) - val;
- else
- return -EINVAL;
- }
+ if (rate >= M41T80_SQW_MAX_FREQ)
+ val = 1;
+ else if (rate >= M41T80_SQW_MAX_FREQ / 4)
+ val = 2;
+ else if (rate)
+ val = 15 - ilog2(rate);
reg = i2c_smbus_read_byte_data(client, reg_sqw);
if (reg < 0)
@@ -510,10 +504,9 @@ static int m41t80_sqw_set_rate(struct clk_hw *hw, unsigned long rate,
reg = (reg & 0x0f) | (val << 4);
ret = i2c_smbus_write_byte_data(client, reg_sqw, reg);
- if (ret < 0)
- return ret;
-
- return -EINVAL;
+ if (!ret)
+ m41t80->freq = m41t80_decode_freq(val);
+ return ret;
}
static int m41t80_sqw_control(struct clk_hw *hw, bool enable)
@@ -530,7 +523,10 @@ static int m41t80_sqw_control(struct clk_hw *hw, bool enable)
else
ret &= ~M41T80_ALMON_SQWE;
- return i2c_smbus_write_byte_data(client, M41T80_REG_ALARM_MON, ret);
+ ret = i2c_smbus_write_byte_data(client, M41T80_REG_ALARM_MON, ret);
+ if (!ret)
+ m41t80->sqwe = enable;
+ return ret;
}
static int m41t80_sqw_prepare(struct clk_hw *hw)
@@ -545,14 +541,7 @@ static void m41t80_sqw_unprepare(struct clk_hw *hw)
static int m41t80_sqw_is_prepared(struct clk_hw *hw)
{
- struct m41t80_data *m41t80 = sqw_to_m41t80_data(hw);
- struct i2c_client *client = m41t80->client;
- int ret = i2c_smbus_read_byte_data(client, M41T80_REG_ALARM_MON);
-
- if (ret < 0)
- return ret;
-
- return !!(ret & M41T80_ALMON_SQWE);
+ return sqw_to_m41t80_data(hw)->sqwe;
}
static const struct clk_ops m41t80_sqw_ops = {
@@ -587,6 +576,7 @@ static struct clk *m41t80_sqw_register_clk(struct m41t80_data *m41t80)
init.parent_names = NULL;
init.num_parents = 0;
m41t80->sqw.init = &init;
+ m41t80->freq = m41t80_get_freq(m41t80);
/* optional override of the clockname */
of_property_read_string(node, "clock-output-names", &init.name);
diff --git a/drivers/rtc/rtc-m48t86.c b/drivers/rtc/rtc-m48t86.c
index 02af045305dd..d9aea9b6d9cd 100644
--- a/drivers/rtc/rtc-m48t86.c
+++ b/drivers/rtc/rtc-m48t86.c
@@ -163,35 +163,30 @@ static const struct rtc_class_ops m48t86_rtc_ops = {
.proc = m48t86_rtc_proc,
};
-static ssize_t m48t86_nvram_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
- char *buf, loff_t off, size_t count)
+static int m48t86_nvram_read(void *priv, unsigned int off, void *buf,
+ size_t count)
{
- struct device *dev = kobj_to_dev(kobj);
+ struct device *dev = priv;
unsigned int i;
for (i = 0; i < count; i++)
- buf[i] = m48t86_readb(dev, M48T86_NVRAM(off + i));
+ ((u8 *)buf)[i] = m48t86_readb(dev, M48T86_NVRAM(off + i));
- return count;
+ return 0;
}
-static ssize_t m48t86_nvram_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
- char *buf, loff_t off, size_t count)
+static int m48t86_nvram_write(void *priv, unsigned int off, void *buf,
+ size_t count)
{
- struct device *dev = kobj_to_dev(kobj);
+ struct device *dev = priv;
unsigned int i;
for (i = 0; i < count; i++)
- m48t86_writeb(dev, buf[i], M48T86_NVRAM(off + i));
+ m48t86_writeb(dev, ((u8 *)buf)[i], M48T86_NVRAM(off + i));
- return count;
+ return 0;
}
-static BIN_ATTR(nvram, 0644, m48t86_nvram_read, m48t86_nvram_write,
- M48T86_NVRAM_LEN);
-
/*
* The RTC is an optional feature at purchase time on some Technologic Systems
* boards. Verify that it actually exists by checking if the last two bytes
@@ -223,11 +218,21 @@ static bool m48t86_verify_chip(struct platform_device *pdev)
return false;
}
+static struct nvmem_config m48t86_nvmem_cfg = {
+ .name = "m48t86_nvram",
+ .word_size = 1,
+ .stride = 1,
+ .size = M48T86_NVRAM_LEN,
+ .reg_read = m48t86_nvram_read,
+ .reg_write = m48t86_nvram_write,
+};
+
static int m48t86_rtc_probe(struct platform_device *pdev)
{
struct m48t86_rtc_info *info;
struct resource *res;
unsigned char reg;
+ int err;
info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
if (!info)
@@ -254,25 +259,25 @@ static int m48t86_rtc_probe(struct platform_device *pdev)
return -ENODEV;
}
- info->rtc = devm_rtc_device_register(&pdev->dev, "m48t86",
- &m48t86_rtc_ops, THIS_MODULE);
+ info->rtc = devm_rtc_allocate_device(&pdev->dev);
if (IS_ERR(info->rtc))
return PTR_ERR(info->rtc);
+ info->rtc->ops = &m48t86_rtc_ops;
+
+ m48t86_nvmem_cfg.priv = &pdev->dev;
+ info->rtc->nvmem_config = &m48t86_nvmem_cfg;
+ info->rtc->nvram_old_abi = true;
+
+ err = rtc_register_device(info->rtc);
+ if (err)
+ return err;
+
/* read battery status */
reg = m48t86_readb(&pdev->dev, M48T86_D);
dev_info(&pdev->dev, "battery %s\n",
(reg & M48T86_D_VRT) ? "ok" : "exhausted");
- if (device_create_bin_file(&pdev->dev, &bin_attr_nvram))
- dev_err(&pdev->dev, "failed to create nvram sysfs entry\n");
-
- return 0;
-}
-
-static int m48t86_rtc_remove(struct platform_device *pdev)
-{
- device_remove_bin_file(&pdev->dev, &bin_attr_nvram);
return 0;
}
@@ -281,7 +286,6 @@ static struct platform_driver m48t86_rtc_platform_driver = {
.name = "rtc-m48t86",
},
.probe = m48t86_rtc_probe,
- .remove = m48t86_rtc_remove,
};
module_platform_driver(m48t86_rtc_platform_driver);
diff --git a/drivers/rtc/rtc-mt7622.c b/drivers/rtc/rtc-mt7622.c
new file mode 100644
index 000000000000..d79b9ae4d237
--- /dev/null
+++ b/drivers/rtc/rtc-mt7622.c
@@ -0,0 +1,422 @@
+/*
+ * Driver for MediaTek SoC based RTC
+ *
+ * Copyright (C) 2017 Sean Wang <sean.wang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/rtc.h>
+
+#define MTK_RTC_DEV KBUILD_MODNAME
+
+#define MTK_RTC_PWRCHK1 0x4
+#define RTC_PWRCHK1_MAGIC 0xc6
+
+#define MTK_RTC_PWRCHK2 0x8
+#define RTC_PWRCHK2_MAGIC 0x9a
+
+#define MTK_RTC_KEY 0xc
+#define RTC_KEY_MAGIC 0x59
+
+#define MTK_RTC_PROT1 0x10
+#define RTC_PROT1_MAGIC 0xa3
+
+#define MTK_RTC_PROT2 0x14
+#define RTC_PROT2_MAGIC 0x57
+
+#define MTK_RTC_PROT3 0x18
+#define RTC_PROT3_MAGIC 0x67
+
+#define MTK_RTC_PROT4 0x1c
+#define RTC_PROT4_MAGIC 0xd2
+
+#define MTK_RTC_CTL 0x20
+#define RTC_RC_STOP BIT(0)
+
+#define MTK_RTC_DEBNCE 0x2c
+#define RTC_DEBNCE_MASK GENMASK(2, 0)
+
+#define MTK_RTC_INT 0x30
+#define RTC_INT_AL_STA BIT(4)
+
+/*
+ * Ranges from 0x40 to 0x78 provide RTC time setup for year, month,
+ * day of month, day of week, hour, minute and second.
+ */
+#define MTK_RTC_TREG(_t, _f) (0x40 + (0x4 * (_f)) + ((_t) * 0x20))
+
+#define MTK_RTC_AL_CTL 0x7c
+#define RTC_AL_EN BIT(0)
+#define RTC_AL_ALL GENMASK(7, 0)
+
+/*
+ * The offset is used in the translation for the year between in struct
+ * rtc_time and in hardware register MTK_RTC_TREG(x,MTK_YEA)
+ */
+#define MTK_RTC_TM_YR_OFFSET 100
+
+/*
+ * The lowest value for the valid tm_year. RTC hardware would take incorrectly
+ * tm_year 100 as not a leap year and thus it is also required being excluded
+ * from the valid options.
+ */
+#define MTK_RTC_TM_YR_L (MTK_RTC_TM_YR_OFFSET + 1)
+
+/*
+ * The most year the RTC can hold is 99 and the next to 99 in year register
+ * would be wraparound to 0, for MT7622.
+ */
+#define MTK_RTC_HW_YR_LIMIT 99
+
+/* The highest value for the valid tm_year */
+#define MTK_RTC_TM_YR_H (MTK_RTC_TM_YR_OFFSET + MTK_RTC_HW_YR_LIMIT)
+
+/* Simple macro helps to check whether the hardware supports the tm_year */
+#define MTK_RTC_TM_YR_VALID(_y) ((_y) >= MTK_RTC_TM_YR_L && \
+ (_y) <= MTK_RTC_TM_YR_H)
+
+/* Types of the function the RTC provides are time counter and alarm. */
+enum {
+ MTK_TC,
+ MTK_AL,
+};
+
+/* Indexes are used for the pointer to relevant registers in MTK_RTC_TREG */
+enum {
+ MTK_YEA,
+ MTK_MON,
+ MTK_DOM,
+ MTK_DOW,
+ MTK_HOU,
+ MTK_MIN,
+ MTK_SEC
+};
+
+struct mtk_rtc {
+ struct rtc_device *rtc;
+ void __iomem *base;
+ int irq;
+ struct clk *clk;
+};
+
+static void mtk_w32(struct mtk_rtc *rtc, u32 reg, u32 val)
+{
+ writel_relaxed(val, rtc->base + reg);
+}
+
+static u32 mtk_r32(struct mtk_rtc *rtc, u32 reg)
+{
+ return readl_relaxed(rtc->base + reg);
+}
+
+static void mtk_rmw(struct mtk_rtc *rtc, u32 reg, u32 mask, u32 set)
+{
+ u32 val;
+
+ val = mtk_r32(rtc, reg);
+ val &= ~mask;
+ val |= set;
+ mtk_w32(rtc, reg, val);
+}
+
+static void mtk_set(struct mtk_rtc *rtc, u32 reg, u32 val)
+{
+ mtk_rmw(rtc, reg, 0, val);
+}
+
+static void mtk_clr(struct mtk_rtc *rtc, u32 reg, u32 val)
+{
+ mtk_rmw(rtc, reg, val, 0);
+}
+
+static void mtk_rtc_hw_init(struct mtk_rtc *hw)
+{
+ /* The setup of the init sequence is for allowing RTC got to work */
+ mtk_w32(hw, MTK_RTC_PWRCHK1, RTC_PWRCHK1_MAGIC);
+ mtk_w32(hw, MTK_RTC_PWRCHK2, RTC_PWRCHK2_MAGIC);
+ mtk_w32(hw, MTK_RTC_KEY, RTC_KEY_MAGIC);
+ mtk_w32(hw, MTK_RTC_PROT1, RTC_PROT1_MAGIC);
+ mtk_w32(hw, MTK_RTC_PROT2, RTC_PROT2_MAGIC);
+ mtk_w32(hw, MTK_RTC_PROT3, RTC_PROT3_MAGIC);
+ mtk_w32(hw, MTK_RTC_PROT4, RTC_PROT4_MAGIC);
+ mtk_rmw(hw, MTK_RTC_DEBNCE, RTC_DEBNCE_MASK, 0);
+ mtk_clr(hw, MTK_RTC_CTL, RTC_RC_STOP);
+}
+
+static void mtk_rtc_get_alarm_or_time(struct mtk_rtc *hw, struct rtc_time *tm,
+ int time_alarm)
+{
+ u32 year, mon, mday, wday, hour, min, sec;
+
+ /*
+ * Read again until the field of the second is not changed which
+ * ensures all fields in the consistent state. Note that MTK_SEC must
+ * be read first. In this way, it guarantees the others remain not
+ * changed when the results for two MTK_SEC consecutive reads are same.
+ */
+ do {
+ sec = mtk_r32(hw, MTK_RTC_TREG(time_alarm, MTK_SEC));
+ min = mtk_r32(hw, MTK_RTC_TREG(time_alarm, MTK_MIN));
+ hour = mtk_r32(hw, MTK_RTC_TREG(time_alarm, MTK_HOU));
+ wday = mtk_r32(hw, MTK_RTC_TREG(time_alarm, MTK_DOW));
+ mday = mtk_r32(hw, MTK_RTC_TREG(time_alarm, MTK_DOM));
+ mon = mtk_r32(hw, MTK_RTC_TREG(time_alarm, MTK_MON));
+ year = mtk_r32(hw, MTK_RTC_TREG(time_alarm, MTK_YEA));
+ } while (sec != mtk_r32(hw, MTK_RTC_TREG(time_alarm, MTK_SEC)));
+
+ tm->tm_sec = sec;
+ tm->tm_min = min;
+ tm->tm_hour = hour;
+ tm->tm_wday = wday;
+ tm->tm_mday = mday;
+ tm->tm_mon = mon - 1;
+
+ /* Rebase to the absolute year which userspace queries */
+ tm->tm_year = year + MTK_RTC_TM_YR_OFFSET;
+}
+
+static void mtk_rtc_set_alarm_or_time(struct mtk_rtc *hw, struct rtc_time *tm,
+ int time_alarm)
+{
+ u32 year;
+
+ /* Rebase to the relative year which RTC hardware requires */
+ year = tm->tm_year - MTK_RTC_TM_YR_OFFSET;
+
+ mtk_w32(hw, MTK_RTC_TREG(time_alarm, MTK_YEA), year);
+ mtk_w32(hw, MTK_RTC_TREG(time_alarm, MTK_MON), tm->tm_mon + 1);
+ mtk_w32(hw, MTK_RTC_TREG(time_alarm, MTK_DOW), tm->tm_wday);
+ mtk_w32(hw, MTK_RTC_TREG(time_alarm, MTK_DOM), tm->tm_mday);
+ mtk_w32(hw, MTK_RTC_TREG(time_alarm, MTK_HOU), tm->tm_hour);
+ mtk_w32(hw, MTK_RTC_TREG(time_alarm, MTK_MIN), tm->tm_min);
+ mtk_w32(hw, MTK_RTC_TREG(time_alarm, MTK_SEC), tm->tm_sec);
+}
+
+static irqreturn_t mtk_rtc_alarmirq(int irq, void *id)
+{
+ struct mtk_rtc *hw = (struct mtk_rtc *)id;
+ u32 irq_sta;
+
+ irq_sta = mtk_r32(hw, MTK_RTC_INT);
+ if (irq_sta & RTC_INT_AL_STA) {
+ /* Stop alarm also implicitly disables the alarm interrupt */
+ mtk_w32(hw, MTK_RTC_AL_CTL, 0);
+ rtc_update_irq(hw->rtc, 1, RTC_IRQF | RTC_AF);
+
+ /* Ack alarm interrupt status */
+ mtk_w32(hw, MTK_RTC_INT, RTC_INT_AL_STA);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static int mtk_rtc_gettime(struct device *dev, struct rtc_time *tm)
+{
+ struct mtk_rtc *hw = dev_get_drvdata(dev);
+
+ mtk_rtc_get_alarm_or_time(hw, tm, MTK_TC);
+
+ return rtc_valid_tm(tm);
+}
+
+static int mtk_rtc_settime(struct device *dev, struct rtc_time *tm)
+{
+ struct mtk_rtc *hw = dev_get_drvdata(dev);
+
+ if (!MTK_RTC_TM_YR_VALID(tm->tm_year))
+ return -EINVAL;
+
+ /* Stop time counter before setting a new one*/
+ mtk_set(hw, MTK_RTC_CTL, RTC_RC_STOP);
+
+ mtk_rtc_set_alarm_or_time(hw, tm, MTK_TC);
+
+ /* Restart the time counter */
+ mtk_clr(hw, MTK_RTC_CTL, RTC_RC_STOP);
+
+ return 0;
+}
+
+static int mtk_rtc_getalarm(struct device *dev, struct rtc_wkalrm *wkalrm)
+{
+ struct mtk_rtc *hw = dev_get_drvdata(dev);
+ struct rtc_time *alrm_tm = &wkalrm->time;
+
+ mtk_rtc_get_alarm_or_time(hw, alrm_tm, MTK_AL);
+
+ wkalrm->enabled = !!(mtk_r32(hw, MTK_RTC_AL_CTL) & RTC_AL_EN);
+ wkalrm->pending = !!(mtk_r32(hw, MTK_RTC_INT) & RTC_INT_AL_STA);
+
+ return 0;
+}
+
+static int mtk_rtc_setalarm(struct device *dev, struct rtc_wkalrm *wkalrm)
+{
+ struct mtk_rtc *hw = dev_get_drvdata(dev);
+ struct rtc_time *alrm_tm = &wkalrm->time;
+
+ if (!MTK_RTC_TM_YR_VALID(alrm_tm->tm_year))
+ return -EINVAL;
+
+ /*
+ * Stop the alarm also implicitly including disables interrupt before
+ * setting a new one.
+ */
+ mtk_clr(hw, MTK_RTC_AL_CTL, RTC_AL_EN);
+
+ /*
+ * Avoid contention between mtk_rtc_setalarm and IRQ handler so that
+ * disabling the interrupt and awaiting for pending IRQ handler to
+ * complete.
+ */
+ synchronize_irq(hw->irq);
+
+ mtk_rtc_set_alarm_or_time(hw, alrm_tm, MTK_AL);
+
+ /* Restart the alarm with the new setup */
+ mtk_w32(hw, MTK_RTC_AL_CTL, RTC_AL_ALL);
+
+ return 0;
+}
+
+static const struct rtc_class_ops mtk_rtc_ops = {
+ .read_time = mtk_rtc_gettime,
+ .set_time = mtk_rtc_settime,
+ .read_alarm = mtk_rtc_getalarm,
+ .set_alarm = mtk_rtc_setalarm,
+};
+
+static const struct of_device_id mtk_rtc_match[] = {
+ { .compatible = "mediatek,mt7622-rtc" },
+ { .compatible = "mediatek,soc-rtc" },
+ {},
+};
+
+static int mtk_rtc_probe(struct platform_device *pdev)
+{
+ struct mtk_rtc *hw;
+ struct resource *res;
+ int ret;
+
+ hw = devm_kzalloc(&pdev->dev, sizeof(*hw), GFP_KERNEL);
+ if (!hw)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, hw);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ hw->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(hw->base))
+ return PTR_ERR(hw->base);
+
+ hw->clk = devm_clk_get(&pdev->dev, "rtc");
+ if (IS_ERR(hw->clk)) {
+ dev_err(&pdev->dev, "No clock\n");
+ return PTR_ERR(hw->clk);
+ }
+
+ ret = clk_prepare_enable(hw->clk);
+ if (ret)
+ return ret;
+
+ hw->irq = platform_get_irq(pdev, 0);
+ if (hw->irq < 0) {
+ dev_err(&pdev->dev, "No IRQ resource\n");
+ ret = hw->irq;
+ goto err;
+ }
+
+ ret = devm_request_irq(&pdev->dev, hw->irq, mtk_rtc_alarmirq,
+ 0, dev_name(&pdev->dev), hw);
+ if (ret) {
+ dev_err(&pdev->dev, "Can't request IRQ\n");
+ goto err;
+ }
+
+ mtk_rtc_hw_init(hw);
+
+ device_init_wakeup(&pdev->dev, true);
+
+ hw->rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
+ &mtk_rtc_ops, THIS_MODULE);
+ if (IS_ERR(hw->rtc)) {
+ ret = PTR_ERR(hw->rtc);
+ dev_err(&pdev->dev, "Unable to register device\n");
+ goto err;
+ }
+
+ return 0;
+err:
+ clk_disable_unprepare(hw->clk);
+
+ return ret;
+}
+
+static int mtk_rtc_remove(struct platform_device *pdev)
+{
+ struct mtk_rtc *hw = platform_get_drvdata(pdev);
+
+ clk_disable_unprepare(hw->clk);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int mtk_rtc_suspend(struct device *dev)
+{
+ struct mtk_rtc *hw = dev_get_drvdata(dev);
+
+ if (device_may_wakeup(dev))
+ enable_irq_wake(hw->irq);
+
+ return 0;
+}
+
+static int mtk_rtc_resume(struct device *dev)
+{
+ struct mtk_rtc *hw = dev_get_drvdata(dev);
+
+ if (device_may_wakeup(dev))
+ disable_irq_wake(hw->irq);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(mtk_rtc_pm_ops, mtk_rtc_suspend, mtk_rtc_resume);
+
+#define MTK_RTC_PM_OPS (&mtk_rtc_pm_ops)
+#else /* CONFIG_PM */
+#define MTK_RTC_PM_OPS NULL
+#endif /* CONFIG_PM */
+
+static struct platform_driver mtk_rtc_driver = {
+ .probe = mtk_rtc_probe,
+ .remove = mtk_rtc_remove,
+ .driver = {
+ .name = MTK_RTC_DEV,
+ .of_match_table = mtk_rtc_match,
+ .pm = MTK_RTC_PM_OPS,
+ },
+};
+
+module_platform_driver(mtk_rtc_driver);
+
+MODULE_DESCRIPTION("MediaTek SoC based RTC Driver");
+MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c
index 13f7cd11c07e..1d666ac9ef70 100644
--- a/drivers/rtc/rtc-omap.c
+++ b/drivers/rtc/rtc-omap.c
@@ -70,6 +70,10 @@
#define OMAP_RTC_COMP_MSB_REG 0x50
#define OMAP_RTC_OSC_REG 0x54
+#define OMAP_RTC_SCRATCH0_REG 0x60
+#define OMAP_RTC_SCRATCH1_REG 0x64
+#define OMAP_RTC_SCRATCH2_REG 0x68
+
#define OMAP_RTC_KICK0_REG 0x6c
#define OMAP_RTC_KICK1_REG 0x70
@@ -667,6 +671,45 @@ static struct pinctrl_desc rtc_pinctrl_desc = {
.owner = THIS_MODULE,
};
+static int omap_rtc_scratch_read(void *priv, unsigned int offset, void *_val,
+ size_t bytes)
+{
+ struct omap_rtc *rtc = priv;
+ u32 *val = _val;
+ int i;
+
+ for (i = 0; i < bytes / 4; i++)
+ val[i] = rtc_readl(rtc,
+ OMAP_RTC_SCRATCH0_REG + offset + (i * 4));
+
+ return 0;
+}
+
+static int omap_rtc_scratch_write(void *priv, unsigned int offset, void *_val,
+ size_t bytes)
+{
+ struct omap_rtc *rtc = priv;
+ u32 *val = _val;
+ int i;
+
+ rtc->type->unlock(rtc);
+ for (i = 0; i < bytes / 4; i++)
+ rtc_writel(rtc,
+ OMAP_RTC_SCRATCH0_REG + offset + (i * 4), val[i]);
+ rtc->type->lock(rtc);
+
+ return 0;
+}
+
+static struct nvmem_config omap_rtc_nvmem_config = {
+ .name = "omap_rtc_scratch",
+ .word_size = 4,
+ .stride = 4,
+ .size = OMAP_RTC_KICK0_REG - OMAP_RTC_SCRATCH0_REG,
+ .reg_read = omap_rtc_scratch_read,
+ .reg_write = omap_rtc_scratch_write,
+};
+
static int omap_rtc_probe(struct platform_device *pdev)
{
struct omap_rtc *rtc;
@@ -797,13 +840,16 @@ static int omap_rtc_probe(struct platform_device *pdev)
device_init_wakeup(&pdev->dev, true);
- rtc->rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
- &omap_rtc_ops, THIS_MODULE);
+ rtc->rtc = devm_rtc_allocate_device(&pdev->dev);
if (IS_ERR(rtc->rtc)) {
ret = PTR_ERR(rtc->rtc);
goto err;
}
+ rtc->rtc->ops = &omap_rtc_ops;
+ omap_rtc_nvmem_config.priv = rtc;
+ rtc->rtc->nvmem_config = &omap_rtc_nvmem_config;
+
/* handle periodic and alarm irqs */
ret = devm_request_irq(&pdev->dev, rtc->irq_timer, rtc_irq, 0,
dev_name(&rtc->rtc->dev), rtc);
@@ -830,9 +876,14 @@ static int omap_rtc_probe(struct platform_device *pdev)
rtc->pctldev = pinctrl_register(&rtc_pinctrl_desc, &pdev->dev, rtc);
if (IS_ERR(rtc->pctldev)) {
dev_err(&pdev->dev, "Couldn't register pinctrl driver\n");
- return PTR_ERR(rtc->pctldev);
+ ret = PTR_ERR(rtc->pctldev);
+ goto err;
}
+ ret = rtc_register_device(rtc->rtc);
+ if (ret)
+ goto err;
+
return 0;
err:
diff --git a/drivers/rtc/rtc-pcf8523.c b/drivers/rtc/rtc-pcf8523.c
index 28c48b3c1946..c312af0db729 100644
--- a/drivers/rtc/rtc-pcf8523.c
+++ b/drivers/rtc/rtc-pcf8523.c
@@ -35,6 +35,9 @@
#define REG_MONTHS 0x08
#define REG_YEARS 0x09
+#define REG_OFFSET 0x0e
+#define REG_OFFSET_MODE BIT(7)
+
struct pcf8523 {
struct rtc_device *rtc;
};
@@ -272,10 +275,47 @@ static int pcf8523_rtc_ioctl(struct device *dev, unsigned int cmd,
#define pcf8523_rtc_ioctl NULL
#endif
+static int pcf8523_rtc_read_offset(struct device *dev, long *offset)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ int err;
+ u8 value;
+ s8 val;
+
+ err = pcf8523_read(client, REG_OFFSET, &value);
+ if (err < 0)
+ return err;
+
+ /* sign extend the 7-bit offset value */
+ val = value << 1;
+ *offset = (value & REG_OFFSET_MODE ? 4069 : 4340) * (val >> 1);
+
+ return 0;
+}
+
+static int pcf8523_rtc_set_offset(struct device *dev, long offset)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ long reg_m0, reg_m1;
+ u8 value;
+
+ reg_m0 = clamp(DIV_ROUND_CLOSEST(offset, 4340), -64L, 63L);
+ reg_m1 = clamp(DIV_ROUND_CLOSEST(offset, 4069), -64L, 63L);
+
+ if (abs(reg_m0 * 4340 - offset) < abs(reg_m1 * 4069 - offset))
+ value = reg_m0 & 0x7f;
+ else
+ value = (reg_m1 & 0x7f) | REG_OFFSET_MODE;
+
+ return pcf8523_write(client, REG_OFFSET, value);
+}
+
static const struct rtc_class_ops pcf8523_rtc_ops = {
.read_time = pcf8523_rtc_read_time,
.set_time = pcf8523_rtc_set_time,
.ioctl = pcf8523_rtc_ioctl,
+ .read_offset = pcf8523_rtc_read_offset,
+ .set_offset = pcf8523_rtc_set_offset,
};
static int pcf8523_probe(struct i2c_client *client,
diff --git a/drivers/rtc/rtc-pcf85363.c b/drivers/rtc/rtc-pcf85363.c
new file mode 100644
index 000000000000..ea04e9f0930b
--- /dev/null
+++ b/drivers/rtc/rtc-pcf85363.c
@@ -0,0 +1,220 @@
+/*
+ * drivers/rtc/rtc-pcf85363.c
+ *
+ * Driver for NXP PCF85363 real-time clock.
+ *
+ * Copyright (C) 2017 Eric Nelson
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Based loosely on rtc-8583 by Russell King, Wolfram Sang and Juergen Beisert
+ */
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <linux/rtc.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/bcd.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+
+/*
+ * Date/Time registers
+ */
+#define DT_100THS 0x00
+#define DT_SECS 0x01
+#define DT_MINUTES 0x02
+#define DT_HOURS 0x03
+#define DT_DAYS 0x04
+#define DT_WEEKDAYS 0x05
+#define DT_MONTHS 0x06
+#define DT_YEARS 0x07
+
+/*
+ * Alarm registers
+ */
+#define DT_SECOND_ALM1 0x08
+#define DT_MINUTE_ALM1 0x09
+#define DT_HOUR_ALM1 0x0a
+#define DT_DAY_ALM1 0x0b
+#define DT_MONTH_ALM1 0x0c
+#define DT_MINUTE_ALM2 0x0d
+#define DT_HOUR_ALM2 0x0e
+#define DT_WEEKDAY_ALM2 0x0f
+#define DT_ALARM_EN 0x10
+
+/*
+ * Time stamp registers
+ */
+#define DT_TIMESTAMP1 0x11
+#define DT_TIMESTAMP2 0x17
+#define DT_TIMESTAMP3 0x1d
+#define DT_TS_MODE 0x23
+
+/*
+ * control registers
+ */
+#define CTRL_OFFSET 0x24
+#define CTRL_OSCILLATOR 0x25
+#define CTRL_BATTERY 0x26
+#define CTRL_PIN_IO 0x27
+#define CTRL_FUNCTION 0x28
+#define CTRL_INTA_EN 0x29
+#define CTRL_INTB_EN 0x2a
+#define CTRL_FLAGS 0x2b
+#define CTRL_RAMBYTE 0x2c
+#define CTRL_WDOG 0x2d
+#define CTRL_STOP_EN 0x2e
+#define CTRL_RESETS 0x2f
+#define CTRL_RAM 0x40
+
+#define NVRAM_SIZE 0x40
+
+static struct i2c_driver pcf85363_driver;
+
+struct pcf85363 {
+ struct device *dev;
+ struct rtc_device *rtc;
+ struct nvmem_config nvmem_cfg;
+ struct regmap *regmap;
+};
+
+static int pcf85363_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ struct pcf85363 *pcf85363 = dev_get_drvdata(dev);
+ unsigned char buf[DT_YEARS + 1];
+ int ret, len = sizeof(buf);
+
+ /* read the RTC date and time registers all at once */
+ ret = regmap_bulk_read(pcf85363->regmap, DT_100THS, buf, len);
+ if (ret) {
+ dev_err(dev, "%s: error %d\n", __func__, ret);
+ return ret;
+ }
+
+ tm->tm_year = bcd2bin(buf[DT_YEARS]);
+ /* adjust for 1900 base of rtc_time */
+ tm->tm_year += 100;
+
+ tm->tm_wday = buf[DT_WEEKDAYS] & 7;
+ buf[DT_SECS] &= 0x7F;
+ tm->tm_sec = bcd2bin(buf[DT_SECS]);
+ buf[DT_MINUTES] &= 0x7F;
+ tm->tm_min = bcd2bin(buf[DT_MINUTES]);
+ tm->tm_hour = bcd2bin(buf[DT_HOURS]);
+ tm->tm_mday = bcd2bin(buf[DT_DAYS]);
+ tm->tm_mon = bcd2bin(buf[DT_MONTHS]) - 1;
+
+ return 0;
+}
+
+static int pcf85363_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+ struct pcf85363 *pcf85363 = dev_get_drvdata(dev);
+ unsigned char buf[DT_YEARS + 1];
+ int len = sizeof(buf);
+
+ buf[DT_100THS] = 0;
+ buf[DT_SECS] = bin2bcd(tm->tm_sec);
+ buf[DT_MINUTES] = bin2bcd(tm->tm_min);
+ buf[DT_HOURS] = bin2bcd(tm->tm_hour);
+ buf[DT_DAYS] = bin2bcd(tm->tm_mday);
+ buf[DT_WEEKDAYS] = tm->tm_wday;
+ buf[DT_MONTHS] = bin2bcd(tm->tm_mon + 1);
+ buf[DT_YEARS] = bin2bcd(tm->tm_year % 100);
+
+ return regmap_bulk_write(pcf85363->regmap, DT_100THS,
+ buf, len);
+}
+
+static const struct rtc_class_ops rtc_ops = {
+ .read_time = pcf85363_rtc_read_time,
+ .set_time = pcf85363_rtc_set_time,
+};
+
+static int pcf85363_nvram_read(void *priv, unsigned int offset, void *val,
+ size_t bytes)
+{
+ struct pcf85363 *pcf85363 = priv;
+
+ return regmap_bulk_read(pcf85363->regmap, CTRL_RAM + offset,
+ val, bytes);
+}
+
+static int pcf85363_nvram_write(void *priv, unsigned int offset, void *val,
+ size_t bytes)
+{
+ struct pcf85363 *pcf85363 = priv;
+
+ return regmap_bulk_write(pcf85363->regmap, CTRL_RAM + offset,
+ val, bytes);
+}
+
+static const struct regmap_config regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+};
+
+static int pcf85363_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct pcf85363 *pcf85363;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
+ return -ENODEV;
+
+ pcf85363 = devm_kzalloc(&client->dev, sizeof(struct pcf85363),
+ GFP_KERNEL);
+ if (!pcf85363)
+ return -ENOMEM;
+
+ pcf85363->regmap = devm_regmap_init_i2c(client, &regmap_config);
+ if (IS_ERR(pcf85363->regmap)) {
+ dev_err(&client->dev, "regmap allocation failed\n");
+ return PTR_ERR(pcf85363->regmap);
+ }
+
+ pcf85363->dev = &client->dev;
+ i2c_set_clientdata(client, pcf85363);
+
+ pcf85363->rtc = devm_rtc_allocate_device(pcf85363->dev);
+ if (IS_ERR(pcf85363->rtc))
+ return PTR_ERR(pcf85363->rtc);
+
+ pcf85363->nvmem_cfg.name = "pcf85363-";
+ pcf85363->nvmem_cfg.word_size = 1;
+ pcf85363->nvmem_cfg.stride = 1;
+ pcf85363->nvmem_cfg.size = NVRAM_SIZE;
+ pcf85363->nvmem_cfg.reg_read = pcf85363_nvram_read;
+ pcf85363->nvmem_cfg.reg_write = pcf85363_nvram_write;
+ pcf85363->nvmem_cfg.priv = pcf85363;
+ pcf85363->rtc->nvmem_config = &pcf85363->nvmem_cfg;
+ pcf85363->rtc->ops = &rtc_ops;
+
+ return rtc_register_device(pcf85363->rtc);
+}
+
+static const struct of_device_id dev_ids[] = {
+ { .compatible = "nxp,pcf85363" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, dev_ids);
+
+static struct i2c_driver pcf85363_driver = {
+ .driver = {
+ .name = "pcf85363",
+ .of_match_table = of_match_ptr(dev_ids),
+ },
+ .probe = pcf85363_probe,
+};
+
+module_i2c_driver(pcf85363_driver);
+
+MODULE_AUTHOR("Eric Nelson");
+MODULE_DESCRIPTION("pcf85363 I2C RTC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-pcf8563.c b/drivers/rtc/rtc-pcf8563.c
index cea6ea4df970..3efc86c25d27 100644
--- a/drivers/rtc/rtc-pcf8563.c
+++ b/drivers/rtc/rtc-pcf8563.c
@@ -387,7 +387,7 @@ static int pcf8563_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *tm)
if (err)
return err;
- return pcf8563_set_alarm_mode(client, 1);
+ return pcf8563_set_alarm_mode(client, !!tm->enabled);
}
static int pcf8563_irq_enable(struct device *dev, unsigned int enabled)
@@ -422,7 +422,7 @@ static unsigned long pcf8563_clkout_recalc_rate(struct clk_hw *hw,
return 0;
buf &= PCF8563_REG_CLKO_F_MASK;
- return clkout_rates[ret];
+ return clkout_rates[buf];
}
static long pcf8563_clkout_round_rate(struct clk_hw *hw, unsigned long rate,
diff --git a/drivers/rtc/rtc-pl031.c b/drivers/rtc/rtc-pl031.c
index e1687e19c59f..82eb7da2c478 100644
--- a/drivers/rtc/rtc-pl031.c
+++ b/drivers/rtc/rtc-pl031.c
@@ -308,10 +308,9 @@ static int pl031_remove(struct amba_device *adev)
dev_pm_clear_wake_irq(&adev->dev);
device_init_wakeup(&adev->dev, false);
- free_irq(adev->irq[0], ldata);
+ if (adev->irq[0])
+ free_irq(adev->irq[0], ldata);
rtc_device_unregister(ldata->rtc);
- iounmap(ldata->base);
- kfree(ldata);
amba_release_regions(adev);
return 0;
@@ -322,25 +321,28 @@ static int pl031_probe(struct amba_device *adev, const struct amba_id *id)
int ret;
struct pl031_local *ldata;
struct pl031_vendor_data *vendor = id->data;
- struct rtc_class_ops *ops = &vendor->ops;
+ struct rtc_class_ops *ops;
unsigned long time, data;
ret = amba_request_regions(adev, NULL);
if (ret)
goto err_req;
- ldata = kzalloc(sizeof(struct pl031_local), GFP_KERNEL);
- if (!ldata) {
+ ldata = devm_kzalloc(&adev->dev, sizeof(struct pl031_local),
+ GFP_KERNEL);
+ ops = devm_kmemdup(&adev->dev, &vendor->ops, sizeof(vendor->ops),
+ GFP_KERNEL);
+ if (!ldata || !ops) {
ret = -ENOMEM;
goto out;
}
- ldata->vendor = vendor;
-
- ldata->base = ioremap(adev->res.start, resource_size(&adev->res));
+ ldata->vendor = vendor;
+ ldata->base = devm_ioremap(&adev->dev, adev->res.start,
+ resource_size(&adev->res));
if (!ldata->base) {
ret = -ENOMEM;
- goto out_no_remap;
+ goto out;
}
amba_set_drvdata(adev, ldata);
@@ -373,28 +375,32 @@ static int pl031_probe(struct amba_device *adev, const struct amba_id *id)
}
}
+ if (!adev->irq[0]) {
+ /* When there's no interrupt, no point in exposing the alarm */
+ ops->read_alarm = NULL;
+ ops->set_alarm = NULL;
+ ops->alarm_irq_enable = NULL;
+ }
+
device_init_wakeup(&adev->dev, true);
ldata->rtc = rtc_device_register("pl031", &adev->dev, ops,
THIS_MODULE);
if (IS_ERR(ldata->rtc)) {
ret = PTR_ERR(ldata->rtc);
- goto out_no_rtc;
+ goto out;
}
- if (request_irq(adev->irq[0], pl031_interrupt,
- vendor->irqflags, "rtc-pl031", ldata)) {
- ret = -EIO;
- goto out_no_irq;
+ if (adev->irq[0]) {
+ ret = request_irq(adev->irq[0], pl031_interrupt,
+ vendor->irqflags, "rtc-pl031", ldata);
+ if (ret)
+ goto out_no_irq;
+ dev_pm_set_wake_irq(&adev->dev, adev->irq[0]);
}
- dev_pm_set_wake_irq(&adev->dev, adev->irq[0]);
return 0;
out_no_irq:
rtc_device_unregister(ldata->rtc);
-out_no_rtc:
- iounmap(ldata->base);
-out_no_remap:
- kfree(ldata);
out:
amba_release_regions(adev);
err_req:
@@ -446,7 +452,7 @@ static struct pl031_vendor_data stv2_pl031 = {
.irqflags = IRQF_SHARED | IRQF_COND_SUSPEND,
};
-static struct amba_id pl031_ids[] = {
+static const struct amba_id pl031_ids[] = {
{
.id = 0x00041031,
.mask = 0x000fffff,
diff --git a/drivers/rtc/rtc-rv3029c2.c b/drivers/rtc/rtc-rv3029c2.c
index aa09771de04f..3d6174eb32f6 100644
--- a/drivers/rtc/rtc-rv3029c2.c
+++ b/drivers/rtc/rtc-rv3029c2.c
@@ -282,13 +282,13 @@ static int rv3029_eeprom_read(struct device *dev, u8 reg,
static int rv3029_eeprom_write(struct device *dev, u8 reg,
u8 const buf[], size_t len)
{
- int ret, err;
+ int ret;
size_t i;
u8 tmp;
- err = rv3029_eeprom_enter(dev);
- if (err < 0)
- return err;
+ ret = rv3029_eeprom_enter(dev);
+ if (ret < 0)
+ return ret;
for (i = 0; i < len; i++, reg++) {
ret = rv3029_read_regs(dev, reg, &tmp, 1);
@@ -304,11 +304,11 @@ static int rv3029_eeprom_write(struct device *dev, u8 reg,
break;
}
- err = rv3029_eeprom_exit(dev);
- if (err < 0)
- return err;
+ ret = rv3029_eeprom_exit(dev);
+ if (ret < 0)
+ return ret;
- return ret;
+ return 0;
}
static int rv3029_eeprom_update_bits(struct device *dev,
@@ -876,6 +876,8 @@ static const struct i2c_device_id rv3029_id[] = {
MODULE_DEVICE_TABLE(i2c, rv3029_id);
static const struct of_device_id rv3029_of_match[] = {
+ { .compatible = "microcrystal,rv3029" },
+ /* Backward compatibility only, do not use compatibles below: */
{ .compatible = "rv3029" },
{ .compatible = "rv3029c2" },
{ .compatible = "mc,rv3029c2" },
diff --git a/drivers/rtc/rtc-rx8010.c b/drivers/rtc/rtc-rx8010.c
index 1ed3403ff8ac..5c5938ab3d86 100644
--- a/drivers/rtc/rtc-rx8010.c
+++ b/drivers/rtc/rtc-rx8010.c
@@ -24,7 +24,6 @@
#define RX8010_MDAY 0x14
#define RX8010_MONTH 0x15
#define RX8010_YEAR 0x16
-#define RX8010_YEAR 0x16
#define RX8010_RESV17 0x17
#define RX8010_ALMIN 0x18
#define RX8010_ALHOUR 0x19
@@ -36,7 +35,7 @@
#define RX8010_CTRL 0x1F
/* 0x20 to 0x2F are user registers */
#define RX8010_RESV30 0x30
-#define RX8010_RESV31 0x32
+#define RX8010_RESV31 0x31
#define RX8010_IRQ 0x32
#define RX8010_EXT_WADA BIT(3)
@@ -248,7 +247,7 @@ static int rx8010_init_client(struct i2c_client *client)
rx8010->ctrlreg = (ctrl[1] & ~RX8010_CTRL_TEST);
- return err;
+ return 0;
}
static int rx8010_read_alarm(struct device *dev, struct rtc_wkalrm *t)
@@ -277,7 +276,7 @@ static int rx8010_read_alarm(struct device *dev, struct rtc_wkalrm *t)
t->enabled = !!(rx8010->ctrlreg & RX8010_CTRL_AIE);
t->pending = (flagreg & RX8010_FLAG_AF) && t->enabled;
- return err;
+ return 0;
}
static int rx8010_set_alarm(struct device *dev, struct rtc_wkalrm *t)
diff --git a/drivers/rtc/rtc-sc27xx.c b/drivers/rtc/rtc-sc27xx.c
new file mode 100644
index 000000000000..d544d5268757
--- /dev/null
+++ b/drivers/rtc/rtc-sc27xx.c
@@ -0,0 +1,662 @@
+/*
+ * Copyright (C) 2017 Spreadtrum Communications Inc.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ */
+
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/rtc.h>
+
+#define SPRD_RTC_SEC_CNT_VALUE 0x0
+#define SPRD_RTC_MIN_CNT_VALUE 0x4
+#define SPRD_RTC_HOUR_CNT_VALUE 0x8
+#define SPRD_RTC_DAY_CNT_VALUE 0xc
+#define SPRD_RTC_SEC_CNT_UPD 0x10
+#define SPRD_RTC_MIN_CNT_UPD 0x14
+#define SPRD_RTC_HOUR_CNT_UPD 0x18
+#define SPRD_RTC_DAY_CNT_UPD 0x1c
+#define SPRD_RTC_SEC_ALM_UPD 0x20
+#define SPRD_RTC_MIN_ALM_UPD 0x24
+#define SPRD_RTC_HOUR_ALM_UPD 0x28
+#define SPRD_RTC_DAY_ALM_UPD 0x2c
+#define SPRD_RTC_INT_EN 0x30
+#define SPRD_RTC_INT_RAW_STS 0x34
+#define SPRD_RTC_INT_CLR 0x38
+#define SPRD_RTC_INT_MASK_STS 0x3C
+#define SPRD_RTC_SEC_ALM_VALUE 0x40
+#define SPRD_RTC_MIN_ALM_VALUE 0x44
+#define SPRD_RTC_HOUR_ALM_VALUE 0x48
+#define SPRD_RTC_DAY_ALM_VALUE 0x4c
+#define SPRD_RTC_SPG_VALUE 0x50
+#define SPRD_RTC_SPG_UPD 0x54
+#define SPRD_RTC_SEC_AUXALM_UPD 0x60
+#define SPRD_RTC_MIN_AUXALM_UPD 0x64
+#define SPRD_RTC_HOUR_AUXALM_UPD 0x68
+#define SPRD_RTC_DAY_AUXALM_UPD 0x6c
+
+/* BIT & MASK definition for SPRD_RTC_INT_* registers */
+#define SPRD_RTC_SEC_EN BIT(0)
+#define SPRD_RTC_MIN_EN BIT(1)
+#define SPRD_RTC_HOUR_EN BIT(2)
+#define SPRD_RTC_DAY_EN BIT(3)
+#define SPRD_RTC_ALARM_EN BIT(4)
+#define SPRD_RTC_HRS_FORMAT_EN BIT(5)
+#define SPRD_RTC_AUXALM_EN BIT(6)
+#define SPRD_RTC_SPG_UPD_EN BIT(7)
+#define SPRD_RTC_SEC_UPD_EN BIT(8)
+#define SPRD_RTC_MIN_UPD_EN BIT(9)
+#define SPRD_RTC_HOUR_UPD_EN BIT(10)
+#define SPRD_RTC_DAY_UPD_EN BIT(11)
+#define SPRD_RTC_ALMSEC_UPD_EN BIT(12)
+#define SPRD_RTC_ALMMIN_UPD_EN BIT(13)
+#define SPRD_RTC_ALMHOUR_UPD_EN BIT(14)
+#define SPRD_RTC_ALMDAY_UPD_EN BIT(15)
+#define SPRD_RTC_INT_MASK GENMASK(15, 0)
+
+#define SPRD_RTC_TIME_INT_MASK \
+ (SPRD_RTC_SEC_UPD_EN | SPRD_RTC_MIN_UPD_EN | \
+ SPRD_RTC_HOUR_UPD_EN | SPRD_RTC_DAY_UPD_EN)
+
+#define SPRD_RTC_ALMTIME_INT_MASK \
+ (SPRD_RTC_ALMSEC_UPD_EN | SPRD_RTC_ALMMIN_UPD_EN | \
+ SPRD_RTC_ALMHOUR_UPD_EN | SPRD_RTC_ALMDAY_UPD_EN)
+
+#define SPRD_RTC_ALM_INT_MASK \
+ (SPRD_RTC_SEC_EN | SPRD_RTC_MIN_EN | \
+ SPRD_RTC_HOUR_EN | SPRD_RTC_DAY_EN | \
+ SPRD_RTC_ALARM_EN | SPRD_RTC_AUXALM_EN)
+
+/* second/minute/hour/day values mask definition */
+#define SPRD_RTC_SEC_MASK GENMASK(5, 0)
+#define SPRD_RTC_MIN_MASK GENMASK(5, 0)
+#define SPRD_RTC_HOUR_MASK GENMASK(4, 0)
+#define SPRD_RTC_DAY_MASK GENMASK(15, 0)
+
+/* alarm lock definition for SPRD_RTC_SPG_UPD register */
+#define SPRD_RTC_ALMLOCK_MASK GENMASK(7, 0)
+#define SPRD_RTC_ALM_UNLOCK 0xa5
+#define SPRD_RTC_ALM_LOCK (~SPRD_RTC_ALM_UNLOCK & \
+ SPRD_RTC_ALMLOCK_MASK)
+
+/* SPG values definition for SPRD_RTC_SPG_UPD register */
+#define SPRD_RTC_POWEROFF_ALM_FLAG BIT(8)
+#define SPRD_RTC_POWER_RESET_FLAG BIT(9)
+
+/* timeout of synchronizing time and alarm registers (us) */
+#define SPRD_RTC_POLL_TIMEOUT 200000
+#define SPRD_RTC_POLL_DELAY_US 20000
+
+struct sprd_rtc {
+ struct rtc_device *rtc;
+ struct regmap *regmap;
+ struct device *dev;
+ u32 base;
+ int irq;
+ bool valid;
+};
+
+/*
+ * The Spreadtrum RTC controller has 3 groups registers, including time, normal
+ * alarm and auxiliary alarm. The time group registers are used to set RTC time,
+ * the normal alarm registers are used to set normal alarm, and the auxiliary
+ * alarm registers are used to set auxiliary alarm. Both alarm event and
+ * auxiliary alarm event can wake up system from deep sleep, but only alarm
+ * event can power up system from power down status.
+ */
+enum sprd_rtc_reg_types {
+ SPRD_RTC_TIME,
+ SPRD_RTC_ALARM,
+ SPRD_RTC_AUX_ALARM,
+};
+
+static int sprd_rtc_clear_alarm_ints(struct sprd_rtc *rtc)
+{
+ return regmap_write(rtc->regmap, rtc->base + SPRD_RTC_INT_CLR,
+ SPRD_RTC_ALM_INT_MASK);
+}
+
+static int sprd_rtc_disable_ints(struct sprd_rtc *rtc)
+{
+ int ret;
+
+ ret = regmap_update_bits(rtc->regmap, rtc->base + SPRD_RTC_INT_EN,
+ SPRD_RTC_INT_MASK, 0);
+ if (ret)
+ return ret;
+
+ return regmap_write(rtc->regmap, rtc->base + SPRD_RTC_INT_CLR,
+ SPRD_RTC_INT_MASK);
+}
+
+static int sprd_rtc_lock_alarm(struct sprd_rtc *rtc, bool lock)
+{
+ int ret;
+ u32 val;
+
+ ret = regmap_read(rtc->regmap, rtc->base + SPRD_RTC_SPG_VALUE, &val);
+ if (ret)
+ return ret;
+
+ val &= ~(SPRD_RTC_ALMLOCK_MASK | SPRD_RTC_POWEROFF_ALM_FLAG);
+ if (lock)
+ val |= SPRD_RTC_ALM_LOCK;
+ else
+ val |= SPRD_RTC_ALM_UNLOCK | SPRD_RTC_POWEROFF_ALM_FLAG;
+
+ ret = regmap_write(rtc->regmap, rtc->base + SPRD_RTC_SPG_UPD, val);
+ if (ret)
+ return ret;
+
+ /* wait until the SPG value is updated successfully */
+ ret = regmap_read_poll_timeout(rtc->regmap,
+ rtc->base + SPRD_RTC_INT_RAW_STS, val,
+ (val & SPRD_RTC_SPG_UPD_EN),
+ SPRD_RTC_POLL_DELAY_US,
+ SPRD_RTC_POLL_TIMEOUT);
+ if (ret) {
+ dev_err(rtc->dev, "failed to update SPG value:%d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int sprd_rtc_get_secs(struct sprd_rtc *rtc, enum sprd_rtc_reg_types type,
+ time64_t *secs)
+{
+ u32 sec_reg, min_reg, hour_reg, day_reg;
+ u32 val, sec, min, hour, day;
+ int ret;
+
+ switch (type) {
+ case SPRD_RTC_TIME:
+ sec_reg = SPRD_RTC_SEC_CNT_VALUE;
+ min_reg = SPRD_RTC_MIN_CNT_VALUE;
+ hour_reg = SPRD_RTC_HOUR_CNT_VALUE;
+ day_reg = SPRD_RTC_DAY_CNT_VALUE;
+ break;
+ case SPRD_RTC_ALARM:
+ sec_reg = SPRD_RTC_SEC_ALM_VALUE;
+ min_reg = SPRD_RTC_MIN_ALM_VALUE;
+ hour_reg = SPRD_RTC_HOUR_ALM_VALUE;
+ day_reg = SPRD_RTC_DAY_ALM_VALUE;
+ break;
+ case SPRD_RTC_AUX_ALARM:
+ sec_reg = SPRD_RTC_SEC_AUXALM_UPD;
+ min_reg = SPRD_RTC_MIN_AUXALM_UPD;
+ hour_reg = SPRD_RTC_HOUR_AUXALM_UPD;
+ day_reg = SPRD_RTC_DAY_AUXALM_UPD;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = regmap_read(rtc->regmap, rtc->base + sec_reg, &val);
+ if (ret)
+ return ret;
+
+ sec = val & SPRD_RTC_SEC_MASK;
+
+ ret = regmap_read(rtc->regmap, rtc->base + min_reg, &val);
+ if (ret)
+ return ret;
+
+ min = val & SPRD_RTC_MIN_MASK;
+
+ ret = regmap_read(rtc->regmap, rtc->base + hour_reg, &val);
+ if (ret)
+ return ret;
+
+ hour = val & SPRD_RTC_HOUR_MASK;
+
+ ret = regmap_read(rtc->regmap, rtc->base + day_reg, &val);
+ if (ret)
+ return ret;
+
+ day = val & SPRD_RTC_DAY_MASK;
+ *secs = (((time64_t)(day * 24) + hour) * 60 + min) * 60 + sec;
+ return 0;
+}
+
+static int sprd_rtc_set_secs(struct sprd_rtc *rtc, enum sprd_rtc_reg_types type,
+ time64_t secs)
+{
+ u32 sec_reg, min_reg, hour_reg, day_reg, sts_mask;
+ u32 sec, min, hour, day, val;
+ int ret, rem;
+
+ /* convert seconds to RTC time format */
+ day = div_s64_rem(secs, 86400, &rem);
+ hour = rem / 3600;
+ rem -= hour * 3600;
+ min = rem / 60;
+ sec = rem - min * 60;
+
+ switch (type) {
+ case SPRD_RTC_TIME:
+ sec_reg = SPRD_RTC_SEC_CNT_UPD;
+ min_reg = SPRD_RTC_MIN_CNT_UPD;
+ hour_reg = SPRD_RTC_HOUR_CNT_UPD;
+ day_reg = SPRD_RTC_DAY_CNT_UPD;
+ sts_mask = SPRD_RTC_TIME_INT_MASK;
+ break;
+ case SPRD_RTC_ALARM:
+ sec_reg = SPRD_RTC_SEC_ALM_UPD;
+ min_reg = SPRD_RTC_MIN_ALM_UPD;
+ hour_reg = SPRD_RTC_HOUR_ALM_UPD;
+ day_reg = SPRD_RTC_DAY_ALM_UPD;
+ sts_mask = SPRD_RTC_ALMTIME_INT_MASK;
+ break;
+ case SPRD_RTC_AUX_ALARM:
+ sec_reg = SPRD_RTC_SEC_AUXALM_UPD;
+ min_reg = SPRD_RTC_MIN_AUXALM_UPD;
+ hour_reg = SPRD_RTC_HOUR_AUXALM_UPD;
+ day_reg = SPRD_RTC_DAY_AUXALM_UPD;
+ sts_mask = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = regmap_write(rtc->regmap, rtc->base + sec_reg, sec);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(rtc->regmap, rtc->base + min_reg, min);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(rtc->regmap, rtc->base + hour_reg, hour);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(rtc->regmap, rtc->base + day_reg, day);
+ if (ret)
+ return ret;
+
+ if (type == SPRD_RTC_AUX_ALARM)
+ return 0;
+
+ /*
+ * Since the time and normal alarm registers are put in always-power-on
+ * region supplied by VDDRTC, then these registers changing time will
+ * be very long, about 125ms. Thus here we should wait until all
+ * values are updated successfully.
+ */
+ ret = regmap_read_poll_timeout(rtc->regmap,
+ rtc->base + SPRD_RTC_INT_RAW_STS, val,
+ ((val & sts_mask) == sts_mask),
+ SPRD_RTC_POLL_DELAY_US,
+ SPRD_RTC_POLL_TIMEOUT);
+ if (ret < 0) {
+ dev_err(rtc->dev, "set time/alarm values timeout\n");
+ return ret;
+ }
+
+ return regmap_write(rtc->regmap, rtc->base + SPRD_RTC_INT_CLR,
+ sts_mask);
+}
+
+static int sprd_rtc_read_aux_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct sprd_rtc *rtc = dev_get_drvdata(dev);
+ time64_t secs;
+ u32 val;
+ int ret;
+
+ ret = sprd_rtc_get_secs(rtc, SPRD_RTC_AUX_ALARM, &secs);
+ if (ret)
+ return ret;
+
+ rtc_time64_to_tm(secs, &alrm->time);
+
+ ret = regmap_read(rtc->regmap, rtc->base + SPRD_RTC_INT_EN, &val);
+ if (ret)
+ return ret;
+
+ alrm->enabled = !!(val & SPRD_RTC_AUXALM_EN);
+
+ ret = regmap_read(rtc->regmap, rtc->base + SPRD_RTC_INT_RAW_STS, &val);
+ if (ret)
+ return ret;
+
+ alrm->pending = !!(val & SPRD_RTC_AUXALM_EN);
+ return 0;
+}
+
+static int sprd_rtc_set_aux_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct sprd_rtc *rtc = dev_get_drvdata(dev);
+ time64_t secs = rtc_tm_to_time64(&alrm->time);
+ int ret;
+
+ /* clear the auxiliary alarm interrupt status */
+ ret = regmap_write(rtc->regmap, rtc->base + SPRD_RTC_INT_CLR,
+ SPRD_RTC_AUXALM_EN);
+ if (ret)
+ return ret;
+
+ ret = sprd_rtc_set_secs(rtc, SPRD_RTC_AUX_ALARM, secs);
+ if (ret)
+ return ret;
+
+ if (alrm->enabled) {
+ ret = regmap_update_bits(rtc->regmap,
+ rtc->base + SPRD_RTC_INT_EN,
+ SPRD_RTC_AUXALM_EN,
+ SPRD_RTC_AUXALM_EN);
+ } else {
+ ret = regmap_update_bits(rtc->regmap,
+ rtc->base + SPRD_RTC_INT_EN,
+ SPRD_RTC_AUXALM_EN, 0);
+ }
+
+ return ret;
+}
+
+static int sprd_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ struct sprd_rtc *rtc = dev_get_drvdata(dev);
+ time64_t secs;
+ int ret;
+
+ if (!rtc->valid) {
+ dev_warn(dev, "RTC values are invalid\n");
+ return -EINVAL;
+ }
+
+ ret = sprd_rtc_get_secs(rtc, SPRD_RTC_TIME, &secs);
+ if (ret)
+ return ret;
+
+ rtc_time64_to_tm(secs, tm);
+ return rtc_valid_tm(tm);
+}
+
+static int sprd_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+ struct sprd_rtc *rtc = dev_get_drvdata(dev);
+ time64_t secs = rtc_tm_to_time64(tm);
+ u32 val;
+ int ret;
+
+ ret = sprd_rtc_set_secs(rtc, SPRD_RTC_TIME, secs);
+ if (ret)
+ return ret;
+
+ if (!rtc->valid) {
+ /*
+ * Set SPRD_RTC_POWER_RESET_FLAG to indicate now RTC has valid
+ * time values.
+ */
+ ret = regmap_update_bits(rtc->regmap,
+ rtc->base + SPRD_RTC_SPG_UPD,
+ SPRD_RTC_POWER_RESET_FLAG,
+ SPRD_RTC_POWER_RESET_FLAG);
+ if (ret)
+ return ret;
+
+ ret = regmap_read_poll_timeout(rtc->regmap,
+ rtc->base + SPRD_RTC_INT_RAW_STS,
+ val, (val & SPRD_RTC_SPG_UPD_EN),
+ SPRD_RTC_POLL_DELAY_US,
+ SPRD_RTC_POLL_TIMEOUT);
+ if (ret) {
+ dev_err(rtc->dev, "failed to update SPG value:%d\n",
+ ret);
+ return ret;
+ }
+
+ rtc->valid = true;
+ }
+
+ return 0;
+}
+
+static int sprd_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct sprd_rtc *rtc = dev_get_drvdata(dev);
+ time64_t secs;
+ int ret;
+ u32 val;
+
+ /*
+ * If aie_timer is enabled, we should get the normal alarm time.
+ * Otherwise we should get auxiliary alarm time.
+ */
+ if (rtc->rtc && rtc->rtc->aie_timer.enabled == 0)
+ return sprd_rtc_read_aux_alarm(dev, alrm);
+
+ ret = sprd_rtc_get_secs(rtc, SPRD_RTC_ALARM, &secs);
+ if (ret)
+ return ret;
+
+ rtc_time64_to_tm(secs, &alrm->time);
+
+ ret = regmap_read(rtc->regmap, rtc->base + SPRD_RTC_INT_EN, &val);
+ if (ret)
+ return ret;
+
+ alrm->enabled = !!(val & SPRD_RTC_ALARM_EN);
+
+ ret = regmap_read(rtc->regmap, rtc->base + SPRD_RTC_INT_RAW_STS, &val);
+ if (ret)
+ return ret;
+
+ alrm->pending = !!(val & SPRD_RTC_ALARM_EN);
+ return 0;
+}
+
+static int sprd_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct sprd_rtc *rtc = dev_get_drvdata(dev);
+ time64_t secs = rtc_tm_to_time64(&alrm->time);
+ struct rtc_time aie_time =
+ rtc_ktime_to_tm(rtc->rtc->aie_timer.node.expires);
+ int ret;
+
+ /*
+ * We have 2 groups alarms: normal alarm and auxiliary alarm. Since
+ * both normal alarm event and auxiliary alarm event can wake up system
+ * from deep sleep, but only alarm event can power up system from power
+ * down status. Moreover we do not need to poll about 125ms when
+ * updating auxiliary alarm registers. Thus we usually set auxiliary
+ * alarm when wake up system from deep sleep, and for other scenarios,
+ * we should set normal alarm with polling status.
+ *
+ * So here we check if the alarm time is set by aie_timer, if yes, we
+ * should set normal alarm, if not, we should set auxiliary alarm which
+ * means it is just a wake event.
+ */
+ if (!rtc->rtc->aie_timer.enabled || rtc_tm_sub(&aie_time, &alrm->time))
+ return sprd_rtc_set_aux_alarm(dev, alrm);
+
+ /* clear the alarm interrupt status firstly */
+ ret = regmap_write(rtc->regmap, rtc->base + SPRD_RTC_INT_CLR,
+ SPRD_RTC_ALARM_EN);
+ if (ret)
+ return ret;
+
+ ret = sprd_rtc_set_secs(rtc, SPRD_RTC_ALARM, secs);
+ if (ret)
+ return ret;
+
+ if (alrm->enabled) {
+ ret = regmap_update_bits(rtc->regmap,
+ rtc->base + SPRD_RTC_INT_EN,
+ SPRD_RTC_ALARM_EN,
+ SPRD_RTC_ALARM_EN);
+ if (ret)
+ return ret;
+
+ /* unlock the alarm to enable the alarm function. */
+ ret = sprd_rtc_lock_alarm(rtc, false);
+ } else {
+ regmap_update_bits(rtc->regmap,
+ rtc->base + SPRD_RTC_INT_EN,
+ SPRD_RTC_ALARM_EN, 0);
+
+ /*
+ * Lock the alarm function in case fake alarm event will power
+ * up systems.
+ */
+ ret = sprd_rtc_lock_alarm(rtc, true);
+ }
+
+ return ret;
+}
+
+static int sprd_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
+{
+ struct sprd_rtc *rtc = dev_get_drvdata(dev);
+ int ret;
+
+ if (enabled) {
+ ret = regmap_update_bits(rtc->regmap,
+ rtc->base + SPRD_RTC_INT_EN,
+ SPRD_RTC_ALARM_EN | SPRD_RTC_AUXALM_EN,
+ SPRD_RTC_ALARM_EN | SPRD_RTC_AUXALM_EN);
+ if (ret)
+ return ret;
+
+ ret = sprd_rtc_lock_alarm(rtc, false);
+ } else {
+ regmap_update_bits(rtc->regmap, rtc->base + SPRD_RTC_INT_EN,
+ SPRD_RTC_ALARM_EN | SPRD_RTC_AUXALM_EN, 0);
+
+ ret = sprd_rtc_lock_alarm(rtc, true);
+ }
+
+ return ret;
+}
+
+static const struct rtc_class_ops sprd_rtc_ops = {
+ .read_time = sprd_rtc_read_time,
+ .set_time = sprd_rtc_set_time,
+ .read_alarm = sprd_rtc_read_alarm,
+ .set_alarm = sprd_rtc_set_alarm,
+ .alarm_irq_enable = sprd_rtc_alarm_irq_enable,
+};
+
+static irqreturn_t sprd_rtc_handler(int irq, void *dev_id)
+{
+ struct sprd_rtc *rtc = dev_id;
+ int ret;
+
+ ret = sprd_rtc_clear_alarm_ints(rtc);
+ if (ret)
+ return IRQ_RETVAL(ret);
+
+ rtc_update_irq(rtc->rtc, 1, RTC_AF | RTC_IRQF);
+ return IRQ_HANDLED;
+}
+
+static int sprd_rtc_check_power_down(struct sprd_rtc *rtc)
+{
+ u32 val;
+ int ret;
+
+ ret = regmap_read(rtc->regmap, rtc->base + SPRD_RTC_SPG_VALUE, &val);
+ if (ret)
+ return ret;
+
+ /*
+ * If the SPRD_RTC_POWER_RESET_FLAG was not set, which means the RTC has
+ * been powered down, so the RTC time values are invalid.
+ */
+ rtc->valid = (val & SPRD_RTC_POWER_RESET_FLAG) ? true : false;
+ return 0;
+}
+
+static int sprd_rtc_probe(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct sprd_rtc *rtc;
+ int ret;
+
+ rtc = devm_kzalloc(&pdev->dev, sizeof(*rtc), GFP_KERNEL);
+ if (!rtc)
+ return -ENOMEM;
+
+ rtc->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+ if (!rtc->regmap)
+ return -ENODEV;
+
+ ret = of_property_read_u32(node, "reg", &rtc->base);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to get RTC base address\n");
+ return ret;
+ }
+
+ rtc->irq = platform_get_irq(pdev, 0);
+ if (rtc->irq < 0) {
+ dev_err(&pdev->dev, "failed to get RTC irq number\n");
+ return rtc->irq;
+ }
+
+ rtc->dev = &pdev->dev;
+ platform_set_drvdata(pdev, rtc);
+
+ /* clear all RTC interrupts and disable all RTC interrupts */
+ ret = sprd_rtc_disable_ints(rtc);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to disable RTC interrupts\n");
+ return ret;
+ }
+
+ /* check if RTC time values are valid */
+ ret = sprd_rtc_check_power_down(rtc);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to check RTC time values\n");
+ return ret;
+ }
+
+ ret = devm_request_threaded_irq(&pdev->dev, rtc->irq, NULL,
+ sprd_rtc_handler,
+ IRQF_ONESHOT | IRQF_EARLY_RESUME,
+ pdev->name, rtc);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to request RTC irq\n");
+ return ret;
+ }
+
+ rtc->rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
+ &sprd_rtc_ops, THIS_MODULE);
+ if (IS_ERR(rtc->rtc))
+ return PTR_ERR(rtc->rtc);
+
+ device_init_wakeup(&pdev->dev, 1);
+ return 0;
+}
+
+static int sprd_rtc_remove(struct platform_device *pdev)
+{
+ device_init_wakeup(&pdev->dev, 0);
+ return 0;
+}
+
+static const struct of_device_id sprd_rtc_of_match[] = {
+ { .compatible = "sprd,sc2731-rtc", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, sprd_rtc_of_match);
+
+static struct platform_driver sprd_rtc_driver = {
+ .driver = {
+ .name = "sprd-rtc",
+ .of_match_table = sprd_rtc_of_match,
+ },
+ .probe = sprd_rtc_probe,
+ .remove = sprd_rtc_remove,
+};
+module_platform_driver(sprd_rtc_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Spreadtrum RTC Device Driver");
+MODULE_AUTHOR("Baolin Wang <baolin.wang@spreadtrum.com>");
diff --git a/drivers/rtc/rtc-sysfs.c b/drivers/rtc/rtc-sysfs.c
index e364550eb9a7..92ff2edb86a6 100644
--- a/drivers/rtc/rtc-sysfs.c
+++ b/drivers/rtc/rtc-sysfs.c
@@ -72,9 +72,10 @@ since_epoch_show(struct device *dev, struct device_attribute *attr, char *buf)
retval = rtc_read_time(to_rtc_device(dev), &tm);
if (retval == 0) {
- unsigned long time;
- rtc_tm_to_time(&tm, &time);
- retval = sprintf(buf, "%lu\n", time);
+ time64_t time;
+
+ time = rtc_tm_to_time64(&tm);
+ retval = sprintf(buf, "%lld\n", time);
}
return retval;
@@ -132,7 +133,7 @@ static ssize_t
wakealarm_show(struct device *dev, struct device_attribute *attr, char *buf)
{
ssize_t retval;
- unsigned long alarm;
+ time64_t alarm;
struct rtc_wkalrm alm;
/* Don't show disabled alarms. For uniformity, RTC alarms are
@@ -145,8 +146,8 @@ wakealarm_show(struct device *dev, struct device_attribute *attr, char *buf)
*/
retval = rtc_read_alarm(to_rtc_device(dev), &alm);
if (retval == 0 && alm.enabled) {
- rtc_tm_to_time(&alm.time, &alarm);
- retval = sprintf(buf, "%lu\n", alarm);
+ alarm = rtc_tm_to_time64(&alm.time);
+ retval = sprintf(buf, "%lld\n", alarm);
}
return retval;
@@ -157,8 +158,8 @@ wakealarm_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t n)
{
ssize_t retval;
- unsigned long now, alarm;
- unsigned long push = 0;
+ time64_t now, alarm;
+ time64_t push = 0;
struct rtc_wkalrm alm;
struct rtc_device *rtc = to_rtc_device(dev);
const char *buf_ptr;
@@ -170,7 +171,7 @@ wakealarm_store(struct device *dev, struct device_attribute *attr,
retval = rtc_read_time(rtc, &alm.time);
if (retval < 0)
return retval;
- rtc_tm_to_time(&alm.time, &now);
+ now = rtc_tm_to_time64(&alm.time);
buf_ptr = buf;
if (*buf_ptr == '+') {
@@ -181,7 +182,7 @@ wakealarm_store(struct device *dev, struct device_attribute *attr,
} else
adjust = 1;
}
- retval = kstrtoul(buf_ptr, 0, &alarm);
+ retval = kstrtos64(buf_ptr, 0, &alarm);
if (retval)
return retval;
if (adjust) {
@@ -197,7 +198,7 @@ wakealarm_store(struct device *dev, struct device_attribute *attr,
return retval;
if (alm.enabled) {
if (push) {
- rtc_tm_to_time(&alm.time, &push);
+ push = rtc_tm_to_time64(&alm.time);
alarm += push;
} else
return -EBUSY;
@@ -212,7 +213,7 @@ wakealarm_store(struct device *dev, struct device_attribute *attr,
*/
alarm = now + 300;
}
- rtc_time_to_tm(alarm, &alm.time);
+ rtc_time64_to_tm(alarm, &alm.time);
retval = rtc_set_alarm(rtc, &alm);
return (retval < 0) ? retval : n;
diff --git a/drivers/rtc/rtc-xgene.c b/drivers/rtc/rtc-xgene.c
index 65b432a096fe..0c34d3b81279 100644
--- a/drivers/rtc/rtc-xgene.c
+++ b/drivers/rtc/rtc-xgene.c
@@ -52,6 +52,7 @@ struct xgene_rtc_dev {
void __iomem *csr_base;
struct clk *clk;
unsigned int irq_wake;
+ unsigned int irq_enabled;
};
static int xgene_rtc_read_time(struct device *dev, struct rtc_time *tm)
@@ -104,15 +105,19 @@ static int xgene_rtc_alarm_irq_enable(struct device *dev, u32 enabled)
return 0;
}
+static int xgene_rtc_alarm_irq_enabled(struct device *dev)
+{
+ struct xgene_rtc_dev *pdata = dev_get_drvdata(dev);
+
+ return readl(pdata->csr_base + RTC_CCR) & RTC_CCR_IE ? 1 : 0;
+}
+
static int xgene_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
struct xgene_rtc_dev *pdata = dev_get_drvdata(dev);
- unsigned long rtc_time;
unsigned long alarm_time;
- rtc_time = readl(pdata->csr_base + RTC_CCVR);
rtc_tm_to_time(&alrm->time, &alarm_time);
-
pdata->alarm_time = alarm_time;
writel((u32) pdata->alarm_time, pdata->csr_base + RTC_CMR);
@@ -180,12 +185,18 @@ static int xgene_rtc_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "Couldn't get the clock for RTC\n");
return -ENODEV;
}
- clk_prepare_enable(pdata->clk);
+ ret = clk_prepare_enable(pdata->clk);
+ if (ret)
+ return ret;
/* Turn on the clock and the crystal */
writel(RTC_CCR_EN, pdata->csr_base + RTC_CCR);
- device_init_wakeup(&pdev->dev, 1);
+ ret = device_init_wakeup(&pdev->dev, 1);
+ if (ret) {
+ clk_disable_unprepare(pdata->clk);
+ return ret;
+ }
pdata->rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
&xgene_rtc_ops, THIS_MODULE);
@@ -210,45 +221,55 @@ static int xgene_rtc_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int xgene_rtc_suspend(struct device *dev)
+static int __maybe_unused xgene_rtc_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct xgene_rtc_dev *pdata = platform_get_drvdata(pdev);
int irq;
irq = platform_get_irq(pdev, 0);
+
+ /*
+ * If this RTC alarm will be used for waking the system up,
+ * don't disable it of course. Else we just disable the alarm
+ * and await suspension.
+ */
if (device_may_wakeup(&pdev->dev)) {
if (!enable_irq_wake(irq))
pdata->irq_wake = 1;
} else {
+ pdata->irq_enabled = xgene_rtc_alarm_irq_enabled(dev);
xgene_rtc_alarm_irq_enable(dev, 0);
- clk_disable(pdata->clk);
+ clk_disable_unprepare(pdata->clk);
}
-
return 0;
}
-static int xgene_rtc_resume(struct device *dev)
+static int __maybe_unused xgene_rtc_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct xgene_rtc_dev *pdata = platform_get_drvdata(pdev);
int irq;
+ int rc;
irq = platform_get_irq(pdev, 0);
+
if (device_may_wakeup(&pdev->dev)) {
if (pdata->irq_wake) {
disable_irq_wake(irq);
pdata->irq_wake = 0;
}
} else {
- clk_enable(pdata->clk);
- xgene_rtc_alarm_irq_enable(dev, 1);
+ rc = clk_prepare_enable(pdata->clk);
+ if (rc) {
+ dev_err(dev, "Unable to enable clock error %d\n", rc);
+ return rc;
+ }
+ xgene_rtc_alarm_irq_enable(dev, pdata->irq_enabled);
}
return 0;
}
-#endif
static SIMPLE_DEV_PM_OPS(xgene_rtc_pm_ops, xgene_rtc_suspend, xgene_rtc_resume);
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index fdb3b133e5e0..66e008f7adb6 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -71,8 +71,8 @@ static void do_restore_device(struct work_struct *);
static void do_reload_device(struct work_struct *);
static void do_requeue_requests(struct work_struct *);
static void dasd_return_cqr_cb(struct dasd_ccw_req *, void *);
-static void dasd_device_timeout(unsigned long);
-static void dasd_block_timeout(unsigned long);
+static void dasd_device_timeout(struct timer_list *);
+static void dasd_block_timeout(struct timer_list *);
static void __dasd_process_erp(struct dasd_device *, struct dasd_ccw_req *);
static void dasd_profile_init(struct dasd_profile *, struct dentry *);
static void dasd_profile_exit(struct dasd_profile *);
@@ -120,9 +120,7 @@ struct dasd_device *dasd_alloc_device(void)
(void (*)(unsigned long)) dasd_device_tasklet,
(unsigned long) device);
INIT_LIST_HEAD(&device->ccw_queue);
- init_timer(&device->timer);
- device->timer.function = dasd_device_timeout;
- device->timer.data = (unsigned long) device;
+ timer_setup(&device->timer, dasd_device_timeout, 0);
INIT_WORK(&device->kick_work, do_kick_device);
INIT_WORK(&device->restore_device, do_restore_device);
INIT_WORK(&device->reload_device, do_reload_device);
@@ -164,9 +162,7 @@ struct dasd_block *dasd_alloc_block(void)
(unsigned long) block);
INIT_LIST_HEAD(&block->ccw_queue);
spin_lock_init(&block->queue_lock);
- init_timer(&block->timer);
- block->timer.function = dasd_block_timeout;
- block->timer.data = (unsigned long) block;
+ timer_setup(&block->timer, dasd_block_timeout, 0);
spin_lock_init(&block->profile.lock);
return block;
@@ -1561,12 +1557,12 @@ EXPORT_SYMBOL(dasd_start_IO);
* The head of the ccw queue will have status DASD_CQR_IN_IO for 1),
* DASD_CQR_QUEUED for 2) and 3).
*/
-static void dasd_device_timeout(unsigned long ptr)
+static void dasd_device_timeout(struct timer_list *t)
{
unsigned long flags;
struct dasd_device *device;
- device = (struct dasd_device *) ptr;
+ device = from_timer(device, t, timer);
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
/* re-activate request queue */
dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING);
@@ -2629,12 +2625,12 @@ EXPORT_SYMBOL(dasd_cancel_req);
* is waiting for something that may not come reliably, (e.g. a state
* change interrupt)
*/
-static void dasd_block_timeout(unsigned long ptr)
+static void dasd_block_timeout(struct timer_list *t)
{
unsigned long flags;
struct dasd_block *block;
- block = (struct dasd_block *) ptr;
+ block = from_timer(block, t, timer);
spin_lock_irqsave(get_ccwdev_lock(block->base->cdev), flags);
/* re-activate request queue */
dasd_device_remove_stop_bits(block->base, DASD_STOPPED_PENDING);
diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c
index 9b4c61c1e309..e4e2df7a478e 100644
--- a/drivers/s390/char/sclp.c
+++ b/drivers/s390/char/sclp.c
@@ -158,7 +158,7 @@ static inline void
__sclp_set_request_timer(unsigned long time, void (*cb)(struct timer_list *))
{
del_timer(&sclp_request_timer);
- sclp_request_timer.function = (TIMER_FUNC_TYPE)cb;
+ sclp_request_timer.function = cb;
sclp_request_timer.expires = jiffies + time;
add_timer(&sclp_request_timer);
}
@@ -566,7 +566,7 @@ sclp_sync_wait(void)
if (timer_pending(&sclp_request_timer) &&
get_tod_clock_fast() > timeout &&
del_timer(&sclp_request_timer))
- sclp_request_timer.function((TIMER_DATA_TYPE)&sclp_request_timer);
+ sclp_request_timer.function(&sclp_request_timer);
cpu_relax();
}
local_irq_disable();
diff --git a/drivers/s390/net/fsm.c b/drivers/s390/net/fsm.c
index f0c7c182b077..eb07862bd36a 100644
--- a/drivers/s390/net/fsm.c
+++ b/drivers/s390/net/fsm.c
@@ -130,8 +130,9 @@ fsm_getstate_str(fsm_instance *fi)
}
static void
-fsm_expire_timer(fsm_timer *this)
+fsm_expire_timer(struct timer_list *t)
{
+ fsm_timer *this = from_timer(this, t, tl);
#if FSM_TIMER_DEBUG
printk(KERN_DEBUG "fsm(%s): Timer %p expired\n",
this->fi->name, this);
@@ -143,13 +144,11 @@ void
fsm_settimer(fsm_instance *fi, fsm_timer *this)
{
this->fi = fi;
- this->tl.function = (void *)fsm_expire_timer;
- this->tl.data = (long)this;
#if FSM_TIMER_DEBUG
printk(KERN_DEBUG "fsm(%s): Create timer %p\n", fi->name,
this);
#endif
- init_timer(&this->tl);
+ timer_setup(&this->tl, fsm_expire_timer, 0);
}
void
@@ -171,7 +170,7 @@ fsm_addtimer(fsm_timer *this, int millisec, int event, void *arg)
this->fi->name, this, millisec);
#endif
- setup_timer(&this->tl, (void *)fsm_expire_timer, (long)this);
+ timer_setup(&this->tl, fsm_expire_timer, 0);
this->expire_event = event;
this->event_arg = arg;
this->tl.expires = jiffies + (millisec * HZ) / 1000;
@@ -190,7 +189,7 @@ fsm_modtimer(fsm_timer *this, int millisec, int event, void *arg)
#endif
del_timer(&this->tl);
- setup_timer(&this->tl, (void *)fsm_expire_timer, (long)this);
+ timer_setup(&this->tl, fsm_expire_timer, 0);
this->expire_event = event;
this->event_arg = arg;
this->tl.expires = jiffies + (millisec * HZ) / 1000;
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 51b81c0a0652..b12cb81ad8a2 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -34,7 +34,7 @@ static void zfcp_fsf_request_timeout_handler(struct timer_list *t)
static void zfcp_fsf_start_timer(struct zfcp_fsf_req *fsf_req,
unsigned long timeout)
{
- fsf_req->timer.function = (TIMER_FUNC_TYPE)zfcp_fsf_request_timeout_handler;
+ fsf_req->timer.function = zfcp_fsf_request_timeout_handler;
fsf_req->timer.expires = jiffies + timeout;
add_timer(&fsf_req->timer);
}
@@ -42,7 +42,7 @@ static void zfcp_fsf_start_timer(struct zfcp_fsf_req *fsf_req,
static void zfcp_fsf_start_erp_timer(struct zfcp_fsf_req *fsf_req)
{
BUG_ON(!fsf_req->erp_action);
- fsf_req->timer.function = (TIMER_FUNC_TYPE)zfcp_erp_timeout_handler;
+ fsf_req->timer.function = zfcp_erp_timeout_handler;
fsf_req->timer.expires = jiffies + 30 * HZ;
add_timer(&fsf_req->timer);
}
diff --git a/drivers/scsi/aic94xx/aic94xx_hwi.c b/drivers/scsi/aic94xx/aic94xx_hwi.c
index 5402b85b0bdc..2dbc8330d7d3 100644
--- a/drivers/scsi/aic94xx/aic94xx_hwi.c
+++ b/drivers/scsi/aic94xx/aic94xx_hwi.c
@@ -1175,7 +1175,7 @@ static void asd_start_scb_timers(struct list_head *list)
struct asd_ascb *ascb;
list_for_each_entry(ascb, list, list) {
if (!ascb->uldd_timer) {
- ascb->timer.function = (TIMER_FUNC_TYPE)asd_ascb_timedout;
+ ascb->timer.function = asd_ascb_timedout;
ascb->timer.expires = jiffies + AIC94XX_SCB_TIMEOUT;
add_timer(&ascb->timer);
}
diff --git a/drivers/scsi/aic94xx/aic94xx_tmf.c b/drivers/scsi/aic94xx/aic94xx_tmf.c
index 4637119c09d8..2a01702d5ba7 100644
--- a/drivers/scsi/aic94xx/aic94xx_tmf.c
+++ b/drivers/scsi/aic94xx/aic94xx_tmf.c
@@ -42,7 +42,7 @@ static int asd_enqueue_internal(struct asd_ascb *ascb,
ascb->tasklet_complete = tasklet_complete;
ascb->uldd_timer = 1;
- ascb->timer.function = (TIMER_FUNC_TYPE)timed_out;
+ ascb->timer.function = timed_out;
ascb->timer.expires = jiffies + AIC94XX_SCB_TIMEOUT;
add_timer(&ascb->timer);
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index af032c46ec0e..21f6421536a0 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -101,7 +101,7 @@ static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb,
static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb);
static void arcmsr_hbaA_flush_cache(struct AdapterControlBlock *acb);
static void arcmsr_hbaB_flush_cache(struct AdapterControlBlock *acb);
-static void arcmsr_request_device_map(unsigned long pacb);
+static void arcmsr_request_device_map(struct timer_list *t);
static void arcmsr_hbaA_request_device_map(struct AdapterControlBlock *acb);
static void arcmsr_hbaB_request_device_map(struct AdapterControlBlock *acb);
static void arcmsr_hbaC_request_device_map(struct AdapterControlBlock *acb);
@@ -837,10 +837,8 @@ static int arcmsr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
atomic_set(&acb->rq_map_token, 16);
atomic_set(&acb->ante_token_value, 16);
acb->fw_flag = FW_NORMAL;
- init_timer(&acb->eternal_timer);
+ timer_setup(&acb->eternal_timer, arcmsr_request_device_map, 0);
acb->eternal_timer.expires = jiffies + msecs_to_jiffies(6 * HZ);
- acb->eternal_timer.data = (unsigned long) acb;
- acb->eternal_timer.function = &arcmsr_request_device_map;
add_timer(&acb->eternal_timer);
if(arcmsr_alloc_sysfs_attr(acb))
goto out_free_sysfs;
@@ -930,10 +928,8 @@ static int arcmsr_resume(struct pci_dev *pdev)
atomic_set(&acb->rq_map_token, 16);
atomic_set(&acb->ante_token_value, 16);
acb->fw_flag = FW_NORMAL;
- init_timer(&acb->eternal_timer);
+ timer_setup(&acb->eternal_timer, arcmsr_request_device_map, 0);
acb->eternal_timer.expires = jiffies + msecs_to_jiffies(6 * HZ);
- acb->eternal_timer.data = (unsigned long) acb;
- acb->eternal_timer.function = &arcmsr_request_device_map;
add_timer(&acb->eternal_timer);
return 0;
controller_stop:
@@ -3459,9 +3455,9 @@ static void arcmsr_hbaD_request_device_map(struct AdapterControlBlock *acb)
}
}
-static void arcmsr_request_device_map(unsigned long pacb)
+static void arcmsr_request_device_map(struct timer_list *t)
{
- struct AdapterControlBlock *acb = (struct AdapterControlBlock *)pacb;
+ struct AdapterControlBlock *acb = from_timer(acb, t, eternal_timer);
switch (acb->adapter_type) {
case ACB_ADAPTER_TYPE_A: {
arcmsr_hbaA_request_device_map(acb);
diff --git a/drivers/scsi/arm/fas216.c b/drivers/scsi/arm/fas216.c
index 24388795ee9a..f4775ca70bab 100644
--- a/drivers/scsi/arm/fas216.c
+++ b/drivers/scsi/arm/fas216.c
@@ -2318,9 +2318,9 @@ DEF_SCSI_QCMD(fas216_noqueue_command)
* Error handler timeout function. Indicate that we timed out,
* and wake up any error handler process so it can continue.
*/
-static void fas216_eh_timer(unsigned long data)
+static void fas216_eh_timer(struct timer_list *t)
{
- FAS216_Info *info = (FAS216_Info *)data;
+ FAS216_Info *info = from_timer(info, t, eh_timer);
fas216_log(info, LOG_ERROR, "error handling timed out\n");
@@ -2849,9 +2849,7 @@ int fas216_init(struct Scsi_Host *host)
info->rst_dev_status = -1;
info->rst_bus_status = -1;
init_waitqueue_head(&info->eh_wait);
- init_timer(&info->eh_timer);
- info->eh_timer.data = (unsigned long)info;
- info->eh_timer.function = fas216_eh_timer;
+ timer_setup(&info->eh_timer, fas216_eh_timer, 0);
spin_lock_init(&info->host_lock);
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index be96aa1e5077..b3cfdd5f4d1c 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -5279,7 +5279,7 @@ static void beiscsi_hw_health_check(struct timer_list *t)
if (!test_bit(BEISCSI_HBA_UER_SUPP, &phba->state))
return;
/* modify this timer to check TPE */
- phba->hw_check.function = (TIMER_FUNC_TYPE)beiscsi_hw_tpe_check;
+ phba->hw_check.function = beiscsi_hw_tpe_check;
}
mod_timer(&phba->hw_check,
@@ -5367,7 +5367,7 @@ static int beiscsi_enable_port(struct beiscsi_hba *phba)
* Timer function gets modified for TPE detection.
* Always reinit to do health check first.
*/
- phba->hw_check.function = (TIMER_FUNC_TYPE)beiscsi_hw_health_check;
+ phba->hw_check.function = beiscsi_hw_health_check;
mod_timer(&phba->hw_check,
jiffies + msecs_to_jiffies(BEISCSI_UE_DETECT_INTERVAL));
return 0;
diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
index 5caf5f3ff642..cf0466686804 100644
--- a/drivers/scsi/bfa/bfad.c
+++ b/drivers/scsi/bfa/bfad.c
@@ -692,9 +692,9 @@ ext:
}
void
-bfad_bfa_tmo(unsigned long data)
+bfad_bfa_tmo(struct timer_list *t)
{
- struct bfad_s *bfad = (struct bfad_s *) data;
+ struct bfad_s *bfad = from_timer(bfad, t, hal_tmo);
unsigned long flags;
struct list_head doneq;
@@ -719,9 +719,7 @@ bfad_bfa_tmo(unsigned long data)
void
bfad_init_timer(struct bfad_s *bfad)
{
- init_timer(&bfad->hal_tmo);
- bfad->hal_tmo.function = bfad_bfa_tmo;
- bfad->hal_tmo.data = (unsigned long)bfad;
+ timer_setup(&bfad->hal_tmo, bfad_bfa_tmo, 0);
mod_timer(&bfad->hal_tmo,
jiffies + msecs_to_jiffies(BFA_TIMER_FREQ));
diff --git a/drivers/scsi/bfa/bfad_drv.h b/drivers/scsi/bfa/bfad_drv.h
index cfcfff48e8e1..4fe980a6441f 100644
--- a/drivers/scsi/bfa/bfad_drv.h
+++ b/drivers/scsi/bfa/bfad_drv.h
@@ -314,7 +314,7 @@ int bfad_setup_intr(struct bfad_s *bfad);
void bfad_remove_intr(struct bfad_s *bfad);
void bfad_update_hal_cfg(struct bfa_iocfc_cfg_s *bfa_cfg);
bfa_status_t bfad_hal_mem_alloc(struct bfad_s *bfad);
-void bfad_bfa_tmo(unsigned long data);
+void bfad_bfa_tmo(struct timer_list *t);
void bfad_init_timer(struct bfad_s *bfad);
int bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad);
void bfad_pci_uninit(struct pci_dev *pdev, struct bfad_s *bfad);
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
index 5b6153f23f01..8e2f767147cb 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -1084,24 +1084,35 @@ static int bnx2fc_abts_cleanup(struct bnx2fc_cmd *io_req)
{
struct bnx2fc_rport *tgt = io_req->tgt;
int rc = SUCCESS;
+ unsigned int time_left;
io_req->wait_for_comp = 1;
bnx2fc_initiate_cleanup(io_req);
spin_unlock_bh(&tgt->tgt_lock);
- wait_for_completion(&io_req->tm_done);
-
+ /*
+ * Can't wait forever on cleanup response lest we let the SCSI error
+ * handler wait forever
+ */
+ time_left = wait_for_completion_timeout(&io_req->tm_done,
+ BNX2FC_FW_TIMEOUT);
io_req->wait_for_comp = 0;
+ if (!time_left)
+ BNX2FC_IO_DBG(io_req, "%s(): Wait for cleanup timed out.\n",
+ __func__);
+
/*
- * release the reference taken in eh_abort to allow the
- * target to re-login after flushing IOs
+ * Release reference held by SCSI command the cleanup completion
+ * hits the BNX2FC_CLEANUP case in bnx2fc_process_cq_compl() and
+ * thus the SCSI command is not returnedi by bnx2fc_scsi_done().
*/
kref_put(&io_req->refcount, bnx2fc_cmd_release);
spin_lock_bh(&tgt->tgt_lock);
return rc;
}
+
/**
* bnx2fc_eh_abort - eh_abort_handler api to abort an outstanding
* SCSI command
@@ -1118,6 +1129,7 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
struct fc_lport *lport;
struct bnx2fc_rport *tgt;
int rc;
+ unsigned int time_left;
rc = fc_block_scsi_eh(sc_cmd);
if (rc)
@@ -1194,6 +1206,11 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
if (cancel_delayed_work(&io_req->timeout_work))
kref_put(&io_req->refcount,
bnx2fc_cmd_release); /* drop timer hold */
+ /*
+ * We don't want to hold off the upper layer timer so simply
+ * cleanup the command and return that I/O was successfully
+ * aborted.
+ */
rc = bnx2fc_abts_cleanup(io_req);
/* This only occurs when an task abort was requested while ABTS
is in progress. Setting the IO_CLEANUP flag will skip the
@@ -1201,7 +1218,7 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
was a result from the ABTS request rather than the CLEANUP
request */
set_bit(BNX2FC_FLAG_IO_CLEANUP, &io_req->req_flags);
- goto out;
+ goto done;
}
/* Cancel the current timer running on this io_req */
@@ -1221,7 +1238,11 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
}
spin_unlock_bh(&tgt->tgt_lock);
- wait_for_completion(&io_req->tm_done);
+ /* Wait 2 * RA_TOV + 1 to be sure timeout function hasn't fired */
+ time_left = wait_for_completion_timeout(&io_req->tm_done,
+ (2 * rp->r_a_tov + 1) * HZ);
+ if (time_left)
+ BNX2FC_IO_DBG(io_req, "Timed out in eh_abort waiting for tm_done");
spin_lock_bh(&tgt->tgt_lock);
io_req->wait_for_comp = 0;
@@ -1233,8 +1254,12 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
/* Let the scsi-ml try to recover this command */
printk(KERN_ERR PFX "abort failed, xid = 0x%x\n",
io_req->xid);
+ /*
+ * Cleanup firmware residuals before returning control back
+ * to SCSI ML.
+ */
rc = bnx2fc_abts_cleanup(io_req);
- goto out;
+ goto done;
} else {
/*
* We come here even when there was a race condition
@@ -1249,7 +1274,6 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
done:
/* release the reference taken in eh_abort */
kref_put(&io_req->refcount, bnx2fc_cmd_release);
-out:
spin_unlock_bh(&tgt->tgt_lock);
return rc;
}
diff --git a/drivers/scsi/bnx2fc/bnx2fc_tgt.c b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
index 59a2dfbcbc69..a8ae1a019eea 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_tgt.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
@@ -14,8 +14,8 @@
*/
#include "bnx2fc.h"
-static void bnx2fc_upld_timer(unsigned long data);
-static void bnx2fc_ofld_timer(unsigned long data);
+static void bnx2fc_upld_timer(struct timer_list *t);
+static void bnx2fc_ofld_timer(struct timer_list *t);
static int bnx2fc_init_tgt(struct bnx2fc_rport *tgt,
struct fcoe_port *port,
struct fc_rport_priv *rdata);
@@ -27,10 +27,10 @@ static void bnx2fc_free_session_resc(struct bnx2fc_hba *hba,
struct bnx2fc_rport *tgt);
static void bnx2fc_free_conn_id(struct bnx2fc_hba *hba, u32 conn_id);
-static void bnx2fc_upld_timer(unsigned long data)
+static void bnx2fc_upld_timer(struct timer_list *t)
{
- struct bnx2fc_rport *tgt = (struct bnx2fc_rport *)data;
+ struct bnx2fc_rport *tgt = from_timer(tgt, t, upld_timer);
BNX2FC_TGT_DBG(tgt, "upld_timer - Upload compl not received!!\n");
/* fake upload completion */
@@ -40,10 +40,10 @@ static void bnx2fc_upld_timer(unsigned long data)
wake_up_interruptible(&tgt->upld_wait);
}
-static void bnx2fc_ofld_timer(unsigned long data)
+static void bnx2fc_ofld_timer(struct timer_list *t)
{
- struct bnx2fc_rport *tgt = (struct bnx2fc_rport *)data;
+ struct bnx2fc_rport *tgt = from_timer(tgt, t, ofld_timer);
BNX2FC_TGT_DBG(tgt, "entered bnx2fc_ofld_timer\n");
/* NOTE: This function should never be called, as
@@ -65,7 +65,7 @@ static void bnx2fc_ofld_timer(unsigned long data)
static void bnx2fc_ofld_wait(struct bnx2fc_rport *tgt)
{
- setup_timer(&tgt->ofld_timer, bnx2fc_ofld_timer, (unsigned long)tgt);
+ timer_setup(&tgt->ofld_timer, bnx2fc_ofld_timer, 0);
mod_timer(&tgt->ofld_timer, jiffies + BNX2FC_FW_TIMEOUT);
wait_event_interruptible(tgt->ofld_wait,
@@ -277,7 +277,7 @@ void bnx2fc_flush_active_ios(struct bnx2fc_rport *tgt)
static void bnx2fc_upld_wait(struct bnx2fc_rport *tgt)
{
- setup_timer(&tgt->upld_timer, bnx2fc_upld_timer, (unsigned long)tgt);
+ timer_setup(&tgt->upld_timer, bnx2fc_upld_timer, 0);
mod_timer(&tgt->upld_timer, jiffies + BNX2FC_FW_TIMEOUT);
wait_event_interruptible(tgt->upld_wait,
(test_bit(
diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
index babd79361a46..bf07735275a4 100644
--- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
+++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
@@ -586,8 +586,8 @@ static int do_act_open_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
cxgbi_sock_get(csk);
spin_lock_bh(&csk->lock);
if (rpl->status == CPL_ERR_CONN_EXIST &&
- csk->retry_timer.function != (TIMER_FUNC_TYPE)act_open_retry_timer) {
- csk->retry_timer.function = (TIMER_FUNC_TYPE)act_open_retry_timer;
+ csk->retry_timer.function != act_open_retry_timer) {
+ csk->retry_timer.function = act_open_retry_timer;
mod_timer(&csk->retry_timer, jiffies + HZ / 2);
} else
cxgbi_sock_fail_act_open(csk,
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index 266eddf17a99..406e94312d4e 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -963,8 +963,8 @@ static void do_act_open_rpl(struct cxgbi_device *cdev, struct sk_buff *skb)
spin_lock_bh(&csk->lock);
if (status == CPL_ERR_CONN_EXIST &&
- csk->retry_timer.function != (TIMER_FUNC_TYPE)csk_act_open_retry_timer) {
- csk->retry_timer.function = (TIMER_FUNC_TYPE)csk_act_open_retry_timer;
+ csk->retry_timer.function != csk_act_open_retry_timer) {
+ csk->retry_timer.function = csk_act_open_retry_timer;
mod_timer(&csk->retry_timer, jiffies + HZ / 2);
} else
cxgbi_sock_fail_act_open(csk,
diff --git a/drivers/scsi/esas2r/esas2r_main.c b/drivers/scsi/esas2r/esas2r_main.c
index 81f226be3e3b..4eb14301a497 100644
--- a/drivers/scsi/esas2r/esas2r_main.c
+++ b/drivers/scsi/esas2r/esas2r_main.c
@@ -1631,23 +1631,21 @@ void esas2r_adapter_tasklet(unsigned long context)
}
}
-static void esas2r_timer_callback(unsigned long context);
+static void esas2r_timer_callback(struct timer_list *t);
void esas2r_kickoff_timer(struct esas2r_adapter *a)
{
- init_timer(&a->timer);
+ timer_setup(&a->timer, esas2r_timer_callback, 0);
- a->timer.function = esas2r_timer_callback;
- a->timer.data = (unsigned long)a;
a->timer.expires = jiffies +
msecs_to_jiffies(100);
add_timer(&a->timer);
}
-static void esas2r_timer_callback(unsigned long context)
+static void esas2r_timer_callback(struct timer_list *t)
{
- struct esas2r_adapter *a = (struct esas2r_adapter *)context;
+ struct esas2r_adapter *a = from_timer(a, t, timer);
set_bit(AF2_TIMER_TICK, &a->flags2);
diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
index fff6f1851dc1..097f37de6ce9 100644
--- a/drivers/scsi/fcoe/fcoe_ctlr.c
+++ b/drivers/scsi/fcoe/fcoe_ctlr.c
@@ -49,7 +49,7 @@
#define FCOE_CTLR_MIN_FKA 500 /* min keep alive (mS) */
#define FCOE_CTLR_DEF_FKA FIP_DEF_FKA /* default keep alive (mS) */
-static void fcoe_ctlr_timeout(unsigned long);
+static void fcoe_ctlr_timeout(struct timer_list *);
static void fcoe_ctlr_timer_work(struct work_struct *);
static void fcoe_ctlr_recv_work(struct work_struct *);
static int fcoe_ctlr_flogi_retry(struct fcoe_ctlr *);
@@ -156,7 +156,7 @@ void fcoe_ctlr_init(struct fcoe_ctlr *fip, enum fip_state mode)
mutex_init(&fip->ctlr_mutex);
spin_lock_init(&fip->ctlr_lock);
fip->flogi_oxid = FC_XID_UNKNOWN;
- setup_timer(&fip->timer, fcoe_ctlr_timeout, (unsigned long)fip);
+ timer_setup(&fip->timer, fcoe_ctlr_timeout, 0);
INIT_WORK(&fip->timer_work, fcoe_ctlr_timer_work);
INIT_WORK(&fip->recv_work, fcoe_ctlr_recv_work);
skb_queue_head_init(&fip->fip_recv_list);
@@ -1786,9 +1786,9 @@ unlock:
* fcoe_ctlr_timeout() - FIP timeout handler
* @arg: The FCoE controller that timed out
*/
-static void fcoe_ctlr_timeout(unsigned long arg)
+static void fcoe_ctlr_timeout(struct timer_list *t)
{
- struct fcoe_ctlr *fip = (struct fcoe_ctlr *)arg;
+ struct fcoe_ctlr *fip = from_timer(fip, t, timer);
schedule_work(&fip->timer_work);
}
diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
index aacadbf20b69..e52599f44170 100644
--- a/drivers/scsi/fnic/fnic_main.c
+++ b/drivers/scsi/fnic/fnic_main.c
@@ -407,18 +407,18 @@ static int fnic_notify_set(struct fnic *fnic)
return err;
}
-static void fnic_notify_timer(unsigned long data)
+static void fnic_notify_timer(struct timer_list *t)
{
- struct fnic *fnic = (struct fnic *)data;
+ struct fnic *fnic = from_timer(fnic, t, notify_timer);
fnic_handle_link_event(fnic);
mod_timer(&fnic->notify_timer,
round_jiffies(jiffies + FNIC_NOTIFY_TIMER_PERIOD));
}
-static void fnic_fip_notify_timer(unsigned long data)
+static void fnic_fip_notify_timer(struct timer_list *t)
{
- struct fnic *fnic = (struct fnic *)data;
+ struct fnic *fnic = from_timer(fnic, t, fip_timer);
fnic_handle_fip_timer(fnic);
}
@@ -777,8 +777,7 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
vnic_dev_add_addr(fnic->vdev, fnic->ctlr.ctl_src_addr);
fnic->set_vlan = fnic_set_vlan;
fcoe_ctlr_init(&fnic->ctlr, FIP_MODE_AUTO);
- setup_timer(&fnic->fip_timer, fnic_fip_notify_timer,
- (unsigned long)fnic);
+ timer_setup(&fnic->fip_timer, fnic_fip_notify_timer, 0);
spin_lock_init(&fnic->vlans_lock);
INIT_WORK(&fnic->fip_frame_work, fnic_handle_fip_frame);
INIT_WORK(&fnic->event_work, fnic_handle_event);
@@ -809,8 +808,7 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Setup notify timer when using MSI interrupts */
if (vnic_dev_get_intr_mode(fnic->vdev) == VNIC_DEV_INTR_MODE_MSI)
- setup_timer(&fnic->notify_timer,
- fnic_notify_timer, (unsigned long)fnic);
+ timer_setup(&fnic->notify_timer, fnic_notify_timer, 0);
/* allocate RQ buffers and post them to RQ*/
for (i = 0; i < fnic->rq_count; i++) {
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index 61a85ff8e459..5f503cb09508 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -839,7 +839,7 @@ static int hisi_sas_exec_internal_tmf_task(struct domain_device *device,
}
task->task_done = hisi_sas_task_done;
- task->slow_task->timer.function = (TIMER_FUNC_TYPE)hisi_sas_tmf_timedout;
+ task->slow_task->timer.function = hisi_sas_tmf_timedout;
task->slow_task->timer.expires = jiffies + TASK_TIMEOUT*HZ;
add_timer(&task->slow_task->timer);
@@ -1451,7 +1451,7 @@ hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
task->dev = device;
task->task_proto = device->tproto;
task->task_done = hisi_sas_task_done;
- task->slow_task->timer.function = (TIMER_FUNC_TYPE)hisi_sas_tmf_timedout;
+ task->slow_task->timer.function = hisi_sas_tmf_timedout;
task->slow_task->timer.expires = jiffies + msecs_to_jiffies(110);
add_timer(&task->slow_task->timer);
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
index d02c2a791981..5d3467fd728d 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
@@ -1268,7 +1268,7 @@ static void link_timeout_enable_link(struct timer_list *t)
}
}
- hisi_hba->timer.function = (TIMER_FUNC_TYPE)link_timeout_disable_link;
+ hisi_hba->timer.function = link_timeout_disable_link;
mod_timer(&hisi_hba->timer, jiffies + msecs_to_jiffies(900));
}
@@ -1289,13 +1289,13 @@ static void link_timeout_disable_link(struct timer_list *t)
}
}
- hisi_hba->timer.function = (TIMER_FUNC_TYPE)link_timeout_enable_link;
+ hisi_hba->timer.function = link_timeout_enable_link;
mod_timer(&hisi_hba->timer, jiffies + msecs_to_jiffies(100));
}
static void set_link_timer_quirk(struct hisi_hba *hisi_hba)
{
- hisi_hba->timer.function = (TIMER_FUNC_TYPE)link_timeout_disable_link;
+ hisi_hba->timer.function = link_timeout_disable_link;
hisi_hba->timer.expires = jiffies + msecs_to_jiffies(1000);
add_timer(&hisi_hba->timer);
}
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index d53429371127..cc0187965eee 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -997,7 +997,7 @@ static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
ipr_cmd->done = done;
ipr_cmd->timer.expires = jiffies + timeout;
- ipr_cmd->timer.function = (TIMER_FUNC_TYPE)timeout_func;
+ ipr_cmd->timer.function = timeout_func;
add_timer(&ipr_cmd->timer);
@@ -8312,7 +8312,7 @@ static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
ipr_cmd->done = ipr_reset_ioa_job;
ipr_cmd->timer.expires = jiffies + timeout;
- ipr_cmd->timer.function = (TIMER_FUNC_TYPE)ipr_reset_timer_done;
+ ipr_cmd->timer.function = ipr_reset_timer_done;
add_timer(&ipr_cmd->timer);
}
@@ -8397,7 +8397,7 @@ static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd)
}
ipr_cmd->timer.expires = jiffies + stage_time * HZ;
- ipr_cmd->timer.function = (TIMER_FUNC_TYPE)ipr_oper_timeout;
+ ipr_cmd->timer.function = ipr_oper_timeout;
ipr_cmd->done = ipr_reset_ioa_job;
add_timer(&ipr_cmd->timer);
@@ -8468,7 +8468,7 @@ static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
}
ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
- ipr_cmd->timer.function = (TIMER_FUNC_TYPE)ipr_oper_timeout;
+ ipr_cmd->timer.function = ipr_oper_timeout;
ipr_cmd->done = ipr_reset_ioa_job;
add_timer(&ipr_cmd->timer);
list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index 1a4e701a8449..4fae253d4f3d 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -1214,7 +1214,7 @@ static int fc_fcp_cmd_send(struct fc_lport *lport, struct fc_fcp_pkt *fsp,
fsp->seq_ptr = seq;
fc_fcp_pkt_hold(fsp); /* hold for fc_fcp_pkt_destroy */
- fsp->timer.function = (TIMER_FUNC_TYPE)fc_fcp_timeout;
+ fsp->timer.function = fc_fcp_timeout;
if (rpriv->flags & FC_RP_FLAGS_REC_SUPPORTED)
fc_fcp_timer_set(fsp, get_fsp_rec_tov(fsp));
@@ -1307,7 +1307,7 @@ static void fc_lun_reset_send(struct timer_list *t)
return;
if (fc_fcp_lock_pkt(fsp))
return;
- fsp->timer.function = (TIMER_FUNC_TYPE)fc_lun_reset_send;
+ fsp->timer.function = fc_lun_reset_send;
fc_fcp_timer_set(fsp, get_fsp_rec_tov(fsp));
fc_fcp_unlock_pkt(fsp);
}
@@ -1445,7 +1445,7 @@ static void fc_fcp_timeout(struct timer_list *t)
if (fsp->lp->qfull) {
FC_FCP_DBG(fsp, "fcp timeout, resetting timer delay %d\n",
fsp->timer_delay);
- fsp->timer.function = (TIMER_FUNC_TYPE)fc_fcp_timeout;
+ fsp->timer.function = fc_fcp_timeout;
fc_fcp_timer_set(fsp, fsp->timer_delay);
goto unlock;
}
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index 174e5eff6155..ca1566237ae7 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -92,7 +92,7 @@ static int smp_execute_task_sg(struct domain_device *dev,
task->task_done = smp_task_done;
- task->slow_task->timer.function = (TIMER_FUNC_TYPE)smp_task_timedout;
+ task->slow_task->timer.function = smp_task_timedout;
task->slow_task->timer.expires = jiffies + SMP_TIMEOUT*HZ;
add_timer(&task->slow_task->timer);
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index 91795eb56206..58476b728c57 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -919,7 +919,7 @@ void sas_task_abort(struct sas_task *task)
return;
if (!del_timer(&slow->timer))
return;
- slow->timer.function((TIMER_DATA_TYPE)&slow->timer);
+ slow->timer.function(&slow->timer);
return;
}
diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
index cff1c37b8d2e..cff43bd9f675 100644
--- a/drivers/scsi/mvsas/mv_sas.c
+++ b/drivers/scsi/mvsas/mv_sas.c
@@ -1310,7 +1310,7 @@ static int mvs_exec_internal_tmf_task(struct domain_device *dev,
memcpy(&task->ssp_task, parameter, para_len);
task->task_done = mvs_task_done;
- task->slow_task->timer.function = (TIMER_FUNC_TYPE)mvs_tmf_timedout;
+ task->slow_task->timer.function = mvs_tmf_timedout;
task->slow_task->timer.expires = jiffies + MVS_TASK_TIMEOUT*HZ;
add_timer(&task->slow_task->timer);
@@ -2020,7 +2020,7 @@ void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events)
MVS_CHIP_DISP->write_port_irq_mask(mvi, phy_no,
tmp | PHYEV_SIG_FIS);
if (phy->timer.function == NULL) {
- phy->timer.function = (TIMER_FUNC_TYPE)mvs_sig_time_out;
+ phy->timer.function = mvs_sig_time_out;
phy->timer.expires = jiffies + 5*HZ;
add_timer(&phy->timer);
}
diff --git a/drivers/scsi/ncr53c8xx.c b/drivers/scsi/ncr53c8xx.c
index 5b93ed810f6e..dc4e801b2cef 100644
--- a/drivers/scsi/ncr53c8xx.c
+++ b/drivers/scsi/ncr53c8xx.c
@@ -8093,9 +8093,9 @@ irqreturn_t ncr53c8xx_intr(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static void ncr53c8xx_timeout(unsigned long npref)
+static void ncr53c8xx_timeout(struct timer_list *t)
{
- struct ncb *np = (struct ncb *) npref;
+ struct ncb *np = from_timer(np, t, timer);
unsigned long flags;
struct scsi_cmnd *done_list;
@@ -8357,9 +8357,7 @@ struct Scsi_Host * __init ncr_attach(struct scsi_host_template *tpnt,
if (!np->scripth0)
goto attach_error;
- init_timer(&np->timer);
- np->timer.data = (unsigned long) np;
- np->timer.function = ncr53c8xx_timeout;
+ timer_setup(&np->timer, ncr53c8xx_timeout, 0);
/* Try to map the controller chip to virtual and physical memory. */
diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
index 0e294e80c169..947d6017d004 100644
--- a/drivers/scsi/pm8001/pm8001_sas.c
+++ b/drivers/scsi/pm8001/pm8001_sas.c
@@ -695,7 +695,7 @@ static int pm8001_exec_internal_tmf_task(struct domain_device *dev,
task->task_proto = dev->tproto;
memcpy(&task->ssp_task, parameter, para_len);
task->task_done = pm8001_task_done;
- task->slow_task->timer.function = (TIMER_FUNC_TYPE)pm8001_tmf_timedout;
+ task->slow_task->timer.function = pm8001_tmf_timedout;
task->slow_task->timer.expires = jiffies + PM8001_TASK_TIMEOUT*HZ;
add_timer(&task->slow_task->timer);
@@ -781,7 +781,7 @@ pm8001_exec_internal_task_abort(struct pm8001_hba_info *pm8001_ha,
task->dev = dev;
task->task_proto = dev->tproto;
task->task_done = pm8001_task_done;
- task->slow_task->timer.function = (TIMER_FUNC_TYPE)pm8001_tmf_timedout;
+ task->slow_task->timer.function = pm8001_tmf_timedout;
task->slow_task->timer.expires = jiffies + PM8001_TASK_TIMEOUT * HZ;
add_timer(&task->slow_task->timer);
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index 4f9f115fb6a0..e58be98430b0 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -604,7 +604,7 @@ static void pmcraid_start_bist(struct pmcraid_cmd *cmd)
cmd->time_left = msecs_to_jiffies(PMCRAID_BIST_TIMEOUT);
cmd->timer.expires = jiffies + msecs_to_jiffies(PMCRAID_BIST_TIMEOUT);
- cmd->timer.function = (TIMER_FUNC_TYPE)pmcraid_bist_done;
+ cmd->timer.function = pmcraid_bist_done;
add_timer(&cmd->timer);
}
@@ -636,7 +636,7 @@ static void pmcraid_reset_alert_done(struct timer_list *t)
/* restart timer if some more time is available to wait */
cmd->time_left -= PMCRAID_CHECK_FOR_RESET_TIMEOUT;
cmd->timer.expires = jiffies + PMCRAID_CHECK_FOR_RESET_TIMEOUT;
- cmd->timer.function = (TIMER_FUNC_TYPE)pmcraid_reset_alert_done;
+ cmd->timer.function = pmcraid_reset_alert_done;
add_timer(&cmd->timer);
}
}
@@ -673,7 +673,7 @@ static void pmcraid_reset_alert(struct pmcraid_cmd *cmd)
*/
cmd->time_left = PMCRAID_RESET_TIMEOUT;
cmd->timer.expires = jiffies + PMCRAID_CHECK_FOR_RESET_TIMEOUT;
- cmd->timer.function = (TIMER_FUNC_TYPE)pmcraid_reset_alert_done;
+ cmd->timer.function = pmcraid_reset_alert_done;
add_timer(&cmd->timer);
iowrite32(DOORBELL_IOA_RESET_ALERT,
@@ -923,7 +923,7 @@ static void pmcraid_send_cmd(
if (timeout_func) {
/* setup timeout handler */
cmd->timer.expires = jiffies + timeout;
- cmd->timer.function = (TIMER_FUNC_TYPE)timeout_func;
+ cmd->timer.function = timeout_func;
add_timer(&cmd->timer);
}
@@ -1951,7 +1951,7 @@ static void pmcraid_soft_reset(struct pmcraid_cmd *cmd)
cmd->cmd_done = pmcraid_ioa_reset;
cmd->timer.expires = jiffies +
msecs_to_jiffies(PMCRAID_TRANSOP_TIMEOUT);
- cmd->timer.function = (TIMER_FUNC_TYPE)pmcraid_timeout_handler;
+ cmd->timer.function = pmcraid_timeout_handler;
if (!timer_pending(&cmd->timer))
add_timer(&cmd->timer);
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index fe5a9ea27b5e..78d4aa8df675 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -22,7 +22,7 @@ struct scsi_dev_info_list {
struct list_head dev_info_list;
char vendor[8];
char model[16];
- unsigned flags;
+ blist_flags_t flags;
unsigned compatible; /* for use with scsi_static_device_list entries */
};
@@ -35,7 +35,7 @@ struct scsi_dev_info_list_table {
static const char spaces[] = " "; /* 16 of them */
-static unsigned scsi_default_dev_flags;
+static blist_flags_t scsi_default_dev_flags;
static LIST_HEAD(scsi_dev_info_list);
static char scsi_dev_flags[256];
@@ -52,7 +52,7 @@ static struct {
char *vendor;
char *model;
char *revision; /* revision known to be bad, unused */
- unsigned flags;
+ blist_flags_t flags;
} scsi_static_device_list[] __initdata = {
/*
* The following devices are known not to tolerate a lun != 0 scan
@@ -335,7 +335,7 @@ static void scsi_strcpy_devinfo(char *name, char *to, size_t to_length,
* Returns: 0 OK, -error on failure.
**/
static int scsi_dev_info_list_add(int compatible, char *vendor, char *model,
- char *strflags, int flags)
+ char *strflags, blist_flags_t flags)
{
return scsi_dev_info_list_add_keyed(compatible, vendor, model,
strflags, flags,
@@ -361,7 +361,7 @@ static int scsi_dev_info_list_add(int compatible, char *vendor, char *model,
* Returns: 0 OK, -error on failure.
**/
int scsi_dev_info_list_add_keyed(int compatible, char *vendor, char *model,
- char *strflags, int flags, int key)
+ char *strflags, blist_flags_t flags, int key)
{
struct scsi_dev_info_list *devinfo;
struct scsi_dev_info_list_table *devinfo_table =
@@ -571,9 +571,9 @@ static int scsi_dev_info_list_add_str(char *dev_list)
* matching flags value, else return the host or global default
* settings. Called during scan time.
**/
-int scsi_get_device_flags(struct scsi_device *sdev,
- const unsigned char *vendor,
- const unsigned char *model)
+blist_flags_t scsi_get_device_flags(struct scsi_device *sdev,
+ const unsigned char *vendor,
+ const unsigned char *model)
{
return scsi_get_device_flags_keyed(sdev, vendor, model,
SCSI_DEVINFO_GLOBAL);
@@ -593,7 +593,7 @@ int scsi_get_device_flags(struct scsi_device *sdev,
* flags value, else return the host or global default settings.
* Called during scan time.
**/
-int scsi_get_device_flags_keyed(struct scsi_device *sdev,
+blist_flags_t scsi_get_device_flags_keyed(struct scsi_device *sdev,
const unsigned char *vendor,
const unsigned char *model,
int key)
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index df1368aea9a3..a5946cd64caa 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -50,15 +50,16 @@ enum {
SCSI_DEVINFO_SPI,
};
-extern int scsi_get_device_flags(struct scsi_device *sdev,
- const unsigned char *vendor,
- const unsigned char *model);
-extern int scsi_get_device_flags_keyed(struct scsi_device *sdev,
- const unsigned char *vendor,
- const unsigned char *model, int key);
+extern blist_flags_t scsi_get_device_flags(struct scsi_device *sdev,
+ const unsigned char *vendor,
+ const unsigned char *model);
+extern blist_flags_t scsi_get_device_flags_keyed(struct scsi_device *sdev,
+ const unsigned char *vendor,
+ const unsigned char *model,
+ int key);
extern int scsi_dev_info_list_add_keyed(int compatible, char *vendor,
char *model, char *strflags,
- int flags, int key);
+ blist_flags_t flags, int key);
extern int scsi_dev_info_list_del_keyed(char *vendor, char *model, int key);
extern int scsi_dev_info_add_list(int key, const char *name);
extern int scsi_dev_info_remove_list(int key);
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index a0f2a20ea9e9..be5e919db0e8 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -566,7 +566,7 @@ EXPORT_SYMBOL(scsi_sanitize_inquiry_string);
* are copied to the scsi_device any flags value is stored in *@bflags.
**/
static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result,
- int result_len, int *bflags)
+ int result_len, blist_flags_t *bflags)
{
unsigned char scsi_cmd[MAX_COMMAND_SIZE];
int first_inquiry_len, try_inquiry_len, next_inquiry_len;
diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c
index d32e3ba8863e..791a2182de53 100644
--- a/drivers/scsi/sym53c8xx_2/sym_glue.c
+++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
@@ -565,9 +565,9 @@ static irqreturn_t sym53c8xx_intr(int irq, void *dev_id)
/*
* Linux entry point of the timer handler
*/
-static void sym53c8xx_timer(unsigned long npref)
+static void sym53c8xx_timer(struct timer_list *t)
{
- struct sym_hcb *np = (struct sym_hcb *)npref;
+ struct sym_hcb *np = from_timer(np, t, s.timer);
unsigned long flags;
spin_lock_irqsave(np->s.host->host_lock, flags);
@@ -1351,9 +1351,7 @@ static struct Scsi_Host *sym_attach(struct scsi_host_template *tpnt, int unit,
/*
* Start the timer daemon
*/
- init_timer(&np->s.timer);
- np->s.timer.data = (unsigned long) np;
- np->s.timer.function = sym53c8xx_timer;
+ timer_setup(&np->s.timer, sym53c8xx_timer, 0);
np->s.lasttime=0;
sym_timer (np);
diff --git a/drivers/staging/greybus/operation.c b/drivers/staging/greybus/operation.c
index 609332b3e15b..c462b1c046cd 100644
--- a/drivers/staging/greybus/operation.c
+++ b/drivers/staging/greybus/operation.c
@@ -293,9 +293,9 @@ static void gb_operation_work(struct work_struct *work)
gb_operation_put(operation);
}
-static void gb_operation_timeout(unsigned long arg)
+static void gb_operation_timeout(struct timer_list *t)
{
- struct gb_operation *operation = (void *)arg;
+ struct gb_operation *operation = from_timer(operation, t, timer);
if (gb_operation_result_set(operation, -ETIMEDOUT)) {
/*
@@ -540,8 +540,7 @@ gb_operation_create_common(struct gb_connection *connection, u8 type,
goto err_request;
}
- setup_timer(&operation->timer, gb_operation_timeout,
- (unsigned long)operation);
+ timer_setup(&operation->timer, gb_operation_timeout, 0);
}
operation->flags = op_flags;
diff --git a/drivers/staging/irda/include/net/irda/timer.h b/drivers/staging/irda/include/net/irda/timer.h
index a6635f0afae9..6dab15f5dae1 100644
--- a/drivers/staging/irda/include/net/irda/timer.h
+++ b/drivers/staging/irda/include/net/irda/timer.h
@@ -75,7 +75,7 @@ struct lap_cb;
static inline void irda_start_timer(struct timer_list *ptimer, int timeout,
void (*callback)(struct timer_list *))
{
- ptimer->function = (TIMER_FUNC_TYPE) callback;
+ ptimer->function = callback;
/* Set new value for timer (update or add timer).
* We use mod_timer() because it's more efficient and also
diff --git a/drivers/staging/lustre/lnet/lnet/net_fault.c b/drivers/staging/lustre/lnet/lnet/net_fault.c
index 3c83aa31e2c2..5a5d1811ffbe 100644
--- a/drivers/staging/lustre/lnet/lnet/net_fault.c
+++ b/drivers/staging/lustre/lnet/lnet/net_fault.c
@@ -700,9 +700,9 @@ lnet_delay_rule_daemon(void *arg)
}
static void
-delay_timer_cb(unsigned long arg)
+delay_timer_cb(struct timer_list *t)
{
- struct lnet_delay_rule *rule = (struct lnet_delay_rule *)arg;
+ struct lnet_delay_rule *rule = from_timer(rule, t, dl_timer);
spin_lock_bh(&delay_dd.dd_lock);
if (list_empty(&rule->dl_sched_link) && delay_dd.dd_running) {
@@ -762,7 +762,7 @@ lnet_delay_rule_add(struct lnet_fault_attr *attr)
wait_event(delay_dd.dd_ctl_waitq, delay_dd.dd_running);
}
- setup_timer(&rule->dl_timer, delay_timer_cb, (unsigned long)rule);
+ timer_setup(&rule->dl_timer, delay_timer_cb, 0);
spin_lock_init(&rule->dl_lock);
INIT_LIST_HEAD(&rule->dl_msg_list);
diff --git a/drivers/staging/lustre/lustre/llite/file.c b/drivers/staging/lustre/lustre/llite/file.c
index 2d6e64dea266..938b859b6650 100644
--- a/drivers/staging/lustre/lustre/llite/file.c
+++ b/drivers/staging/lustre/lustre/llite/file.c
@@ -1016,7 +1016,7 @@ static bool file_is_noatime(const struct file *file)
if ((mnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode))
return true;
- if ((inode->i_sb->s_flags & MS_NODIRATIME) && S_ISDIR(inode->i_mode))
+ if ((inode->i_sb->s_flags & SB_NODIRATIME) && S_ISDIR(inode->i_mode))
return true;
return false;
diff --git a/drivers/staging/lustre/lustre/llite/llite_lib.c b/drivers/staging/lustre/lustre/llite/llite_lib.c
index 65ac5128f005..8666f1e81ade 100644
--- a/drivers/staging/lustre/lustre/llite/llite_lib.c
+++ b/drivers/staging/lustre/lustre/llite/llite_lib.c
@@ -313,11 +313,11 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
}
if (data->ocd_connect_flags & OBD_CONNECT_ACL) {
- sb->s_flags |= MS_POSIXACL;
+ sb->s_flags |= SB_POSIXACL;
sbi->ll_flags |= LL_SBI_ACL;
} else {
LCONSOLE_INFO("client wants to enable acl, but mdt not!\n");
- sb->s_flags &= ~MS_POSIXACL;
+ sb->s_flags &= ~SB_POSIXACL;
sbi->ll_flags &= ~LL_SBI_ACL;
}
@@ -660,7 +660,7 @@ void ll_kill_super(struct super_block *sb)
struct ll_sb_info *sbi;
/* not init sb ?*/
- if (!(sb->s_flags & MS_ACTIVE))
+ if (!(sb->s_flags & SB_ACTIVE))
return;
sbi = ll_s2sbi(sb);
@@ -2039,8 +2039,8 @@ int ll_remount_fs(struct super_block *sb, int *flags, char *data)
int err;
__u32 read_only;
- if ((bool)(*flags & MS_RDONLY) != sb_rdonly(sb)) {
- read_only = *flags & MS_RDONLY;
+ if ((bool)(*flags & SB_RDONLY) != sb_rdonly(sb)) {
+ read_only = *flags & SB_RDONLY;
err = obd_set_info_async(NULL, sbi->ll_md_exp,
sizeof(KEY_READ_ONLY),
KEY_READ_ONLY, sizeof(read_only),
@@ -2053,9 +2053,9 @@ int ll_remount_fs(struct super_block *sb, int *flags, char *data)
}
if (read_only)
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
else
- sb->s_flags &= ~MS_RDONLY;
+ sb->s_flags &= ~SB_RDONLY;
if (sbi->ll_flags & LL_SBI_VERBOSE)
LCONSOLE_WARN("Remounted %s %s\n", profilenm,
diff --git a/drivers/staging/lustre/lustre/ptlrpc/service.c b/drivers/staging/lustre/lustre/ptlrpc/service.c
index 23cdb7c4476c..63be6e7273f3 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/service.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/service.c
@@ -329,11 +329,11 @@ ptlrpc_server_post_idle_rqbds(struct ptlrpc_service_part *svcpt)
return -1;
}
-static void ptlrpc_at_timer(unsigned long castmeharder)
+static void ptlrpc_at_timer(struct timer_list *t)
{
struct ptlrpc_service_part *svcpt;
- svcpt = (struct ptlrpc_service_part *)castmeharder;
+ svcpt = from_timer(svcpt, t, scp_at_timer);
svcpt->scp_at_check = 1;
svcpt->scp_at_checktime = cfs_time_current();
@@ -506,8 +506,7 @@ ptlrpc_service_part_init(struct ptlrpc_service *svc,
if (!array->paa_reqs_count)
goto free_reqs_array;
- setup_timer(&svcpt->scp_at_timer, ptlrpc_at_timer,
- (unsigned long)svcpt);
+ timer_setup(&svcpt->scp_at_timer, ptlrpc_at_timer, 0);
/* At SOW, service time should be quick; 10s seems generous. If client
* timeout is less than this, we'll be sending an early reply.
@@ -926,7 +925,7 @@ static void ptlrpc_at_set_timer(struct ptlrpc_service_part *svcpt)
next = (__s32)(array->paa_deadline - ktime_get_real_seconds() -
at_early_margin);
if (next <= 0) {
- ptlrpc_at_timer((unsigned long)svcpt);
+ ptlrpc_at_timer(&svcpt->scp_at_timer);
} else {
mod_timer(&svcpt->scp_at_timer, cfs_time_shift(next));
CDEBUG(D_INFO, "armed %s at %+ds\n",
diff --git a/drivers/staging/media/imx/imx-ic-prpencvf.c b/drivers/staging/media/imx/imx-ic-prpencvf.c
index 0790b3d9e255..143038c6c403 100644
--- a/drivers/staging/media/imx/imx-ic-prpencvf.c
+++ b/drivers/staging/media/imx/imx-ic-prpencvf.c
@@ -293,9 +293,9 @@ static irqreturn_t prp_nfb4eof_interrupt(int irq, void *dev_id)
* EOF timeout timer function. This is an unrecoverable condition
* without a stream restart.
*/
-static void prp_eof_timeout(unsigned long data)
+static void prp_eof_timeout(struct timer_list *t)
{
- struct prp_priv *priv = (struct prp_priv *)data;
+ struct prp_priv *priv = from_timer(priv, t, eof_timeout_timer);
struct imx_media_video_dev *vdev = priv->vdev;
struct imx_ic_priv *ic_priv = priv->ic_priv;
@@ -1292,8 +1292,7 @@ static int prp_init(struct imx_ic_priv *ic_priv)
priv->ic_priv = ic_priv;
spin_lock_init(&priv->irqlock);
- setup_timer(&priv->eof_timeout_timer, prp_eof_timeout,
- (unsigned long)priv);
+ timer_setup(&priv->eof_timeout_timer, prp_eof_timeout, 0);
priv->vdev = imx_media_capture_device_init(&ic_priv->sd,
PRPENCVF_SRC_PAD);
diff --git a/drivers/staging/media/imx/imx-media-csi.c b/drivers/staging/media/imx/imx-media-csi.c
index 6d856118c223..bb1d6dafca83 100644
--- a/drivers/staging/media/imx/imx-media-csi.c
+++ b/drivers/staging/media/imx/imx-media-csi.c
@@ -254,9 +254,9 @@ static irqreturn_t csi_idmac_nfb4eof_interrupt(int irq, void *dev_id)
* EOF timeout timer function. This is an unrecoverable condition
* without a stream restart.
*/
-static void csi_idmac_eof_timeout(unsigned long data)
+static void csi_idmac_eof_timeout(struct timer_list *t)
{
- struct csi_priv *priv = (struct csi_priv *)data;
+ struct csi_priv *priv = from_timer(priv, t, eof_timeout_timer);
struct imx_media_video_dev *vdev = priv->vdev;
v4l2_err(&priv->sd, "EOF timeout\n");
@@ -1739,8 +1739,7 @@ static int imx_csi_probe(struct platform_device *pdev)
priv->csi_id = pdata->csi;
priv->smfc_id = (priv->csi_id == 0) ? 0 : 2;
- setup_timer(&priv->eof_timeout_timer, csi_idmac_eof_timeout,
- (unsigned long)priv);
+ timer_setup(&priv->eof_timeout_timer, csi_idmac_eof_timeout, 0);
spin_lock_init(&priv->irqlock);
v4l2_subdev_init(&priv->sd, &csi_subdev_ops);
diff --git a/drivers/staging/most/hdm-usb/hdm_usb.c b/drivers/staging/most/hdm-usb/hdm_usb.c
index 85775da293fb..667dacac81f0 100644
--- a/drivers/staging/most/hdm-usb/hdm_usb.c
+++ b/drivers/staging/most/hdm-usb/hdm_usb.c
@@ -744,9 +744,9 @@ static void hdm_request_netinfo(struct most_interface *iface, int channel,
* The handler runs in interrupt context. That's why we need to defer the
* tasks to a work queue.
*/
-static void link_stat_timer_handler(unsigned long data)
+static void link_stat_timer_handler(struct timer_list *t)
{
- struct most_dev *mdev = (struct most_dev *)data;
+ struct most_dev *mdev = from_timer(mdev, t, link_stat_timer);
schedule_work(&mdev->poll_work_obj);
mdev->link_stat_timer.expires = jiffies + (2 * HZ);
@@ -1138,8 +1138,7 @@ hdm_probe(struct usb_interface *interface, const struct usb_device_id *id)
num_endpoints = usb_iface_desc->desc.bNumEndpoints;
mutex_init(&mdev->io_mutex);
INIT_WORK(&mdev->poll_work_obj, wq_netinfo);
- setup_timer(&mdev->link_stat_timer, link_stat_timer_handler,
- (unsigned long)mdev);
+ timer_setup(&mdev->link_stat_timer, link_stat_timer_handler, 0);
mdev->usb_device = usb_dev;
mdev->link_stat_timer.expires = jiffies + (2 * HZ);
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
index 4e7908322d77..f56fdc7a4b61 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
@@ -391,10 +391,10 @@ static void ieee80211_send_beacon(struct ieee80211_device *ieee)
}
-static void ieee80211_send_beacon_cb(unsigned long _ieee)
+static void ieee80211_send_beacon_cb(struct timer_list *t)
{
struct ieee80211_device *ieee =
- (struct ieee80211_device *) _ieee;
+ from_timer(ieee, t, beacon_timer);
unsigned long flags;
spin_lock_irqsave(&ieee->beacon_lock, flags);
@@ -1251,9 +1251,11 @@ void ieee80211_associate_abort(struct ieee80211_device *ieee)
spin_unlock_irqrestore(&ieee->lock, flags);
}
-static void ieee80211_associate_abort_cb(unsigned long dev)
+static void ieee80211_associate_abort_cb(struct timer_list *t)
{
- ieee80211_associate_abort((struct ieee80211_device *) dev);
+ struct ieee80211_device *dev = from_timer(dev, t, associate_timer);
+
+ ieee80211_associate_abort(dev);
}
@@ -2718,11 +2720,9 @@ void ieee80211_softmac_init(struct ieee80211_device *ieee)
ieee->enable_rx_imm_BA = true;
ieee->tx_pending.txb = NULL;
- setup_timer(&ieee->associate_timer, ieee80211_associate_abort_cb,
- (unsigned long)ieee);
+ timer_setup(&ieee->associate_timer, ieee80211_associate_abort_cb, 0);
- setup_timer(&ieee->beacon_timer, ieee80211_send_beacon_cb,
- (unsigned long)ieee);
+ timer_setup(&ieee->beacon_timer, ieee80211_send_beacon_cb, 0);
INIT_DELAYED_WORK(&ieee->start_ibss_wq, ieee80211_start_ibss_wq);
diff --git a/drivers/staging/rtl8712/recv_linux.c b/drivers/staging/rtl8712/recv_linux.c
index 576c15d25a0f..986a55bb9877 100644
--- a/drivers/staging/rtl8712/recv_linux.c
+++ b/drivers/staging/rtl8712/recv_linux.c
@@ -138,17 +138,16 @@ _recv_indicatepkt_drop:
precvpriv->rx_drop++;
}
-static void _r8712_reordering_ctrl_timeout_handler (unsigned long data)
+static void _r8712_reordering_ctrl_timeout_handler (struct timer_list *t)
{
struct recv_reorder_ctrl *preorder_ctrl =
- (struct recv_reorder_ctrl *)data;
+ from_timer(preorder_ctrl, t, reordering_ctrl_timer);
r8712_reordering_ctrl_timeout_handler(preorder_ctrl);
}
void r8712_init_recv_timer(struct recv_reorder_ctrl *preorder_ctrl)
{
- setup_timer(&preorder_ctrl->reordering_ctrl_timer,
- _r8712_reordering_ctrl_timeout_handler,
- (unsigned long)preorder_ctrl);
+ timer_setup(&preorder_ctrl->reordering_ctrl_timer,
+ _r8712_reordering_ctrl_timeout_handler, 0);
}
diff --git a/drivers/staging/rtl8712/rtl8712_led.c b/drivers/staging/rtl8712/rtl8712_led.c
index da1d4a641dcd..455fba721135 100644
--- a/drivers/staging/rtl8712/rtl8712_led.c
+++ b/drivers/staging/rtl8712/rtl8712_led.c
@@ -74,7 +74,7 @@ enum _LED_STATE_871x {
* Prototype of protected function.
*===========================================================================
*/
-static void BlinkTimerCallback(unsigned long data);
+static void BlinkTimerCallback(struct timer_list *t);
static void BlinkWorkItemCallback(struct work_struct *work);
/*===========================================================================
@@ -99,8 +99,7 @@ static void InitLed871x(struct _adapter *padapter, struct LED_871x *pLed,
pLed->bLedBlinkInProgress = false;
pLed->BlinkTimes = 0;
pLed->BlinkingLedState = LED_UNKNOWN;
- setup_timer(&pLed->BlinkTimer, BlinkTimerCallback,
- (unsigned long)pLed);
+ timer_setup(&pLed->BlinkTimer, BlinkTimerCallback, 0);
INIT_WORK(&pLed->BlinkWorkItem, BlinkWorkItemCallback);
}
@@ -825,9 +824,9 @@ static void SwLedBlink6(struct LED_871x *pLed)
* Callback function of LED BlinkTimer,
* it just schedules to corresponding BlinkWorkItem.
*/
-static void BlinkTimerCallback(unsigned long data)
+static void BlinkTimerCallback(struct timer_list *t)
{
- struct LED_871x *pLed = (struct LED_871x *)data;
+ struct LED_871x *pLed = from_timer(pLed, t, BlinkTimer);
/* This fixed the crash problem on Fedora 12 when trying to do the
* insmod;ifconfig up;rmmod commands.
diff --git a/drivers/staging/speakup/main.c b/drivers/staging/speakup/main.c
index 16497202473f..aae868509e13 100644
--- a/drivers/staging/speakup/main.c
+++ b/drivers/staging/speakup/main.c
@@ -1164,7 +1164,7 @@ static void spkup_write(const u16 *in_buf, int count)
static const int NUM_CTL_LABELS = (MSG_CTL_END - MSG_CTL_START + 1);
static void read_all_doc(struct vc_data *vc);
-static void cursor_done(u_long data);
+static void cursor_done(struct timer_list *unused);
static DEFINE_TIMER(cursor_timer, cursor_done);
static void do_handle_shift(struct vc_data *vc, u_char value, char up_flag)
@@ -1682,7 +1682,7 @@ static int speak_highlight(struct vc_data *vc)
return 0;
}
-static void cursor_done(u_long data)
+static void cursor_done(struct timer_list *unused)
{
struct vc_data *vc = vc_cons[cursor_con].d;
unsigned long flags;
diff --git a/drivers/staging/speakup/synth.c b/drivers/staging/speakup/synth.c
index 6ddd3fc3f08d..aac29c816d09 100644
--- a/drivers/staging/speakup/synth.c
+++ b/drivers/staging/speakup/synth.c
@@ -153,7 +153,7 @@ int spk_synth_is_alive_restart(struct spk_synth *synth)
}
EXPORT_SYMBOL_GPL(spk_synth_is_alive_restart);
-static void thread_wake_up(u_long data)
+static void thread_wake_up(struct timer_list *unused)
{
wake_up_interruptible_all(&speakup_event);
}
diff --git a/drivers/staging/unisys/visorbus/visorbus_main.c b/drivers/staging/unisys/visorbus/visorbus_main.c
index b604d0cccef1..6cb6eb0673c6 100644
--- a/drivers/staging/unisys/visorbus/visorbus_main.c
+++ b/drivers/staging/unisys/visorbus/visorbus_main.c
@@ -493,9 +493,9 @@ static const struct file_operations bus_info_debugfs_fops = {
.release = single_release,
};
-static void dev_periodic_work(unsigned long __opaque)
+static void dev_periodic_work(struct timer_list *t)
{
- struct visor_device *dev = (struct visor_device *)__opaque;
+ struct visor_device *dev = from_timer(dev, t, timer);
struct visor_driver *drv = to_visor_driver(dev->device.driver);
drv->channel_interrupt(dev);
@@ -667,7 +667,7 @@ int create_visor_device(struct visor_device *dev)
dev->device.release = visorbus_release_device;
/* keep a reference just for us (now 2) */
get_device(&dev->device);
- setup_timer(&dev->timer, dev_periodic_work, (unsigned long)dev);
+ timer_setup(&dev->timer, dev_periodic_work, 0);
/*
* bus_id must be a unique name with respect to this bus TYPE (NOT bus
* instance). That's why we need to include the bus number within the
diff --git a/drivers/staging/unisys/visornic/visornic_main.c b/drivers/staging/unisys/visornic/visornic_main.c
index 735d7e5fa86b..6d8239163ba5 100644
--- a/drivers/staging/unisys/visornic/visornic_main.c
+++ b/drivers/staging/unisys/visornic/visornic_main.c
@@ -1766,9 +1766,10 @@ static int visornic_poll(struct napi_struct *napi, int budget)
* Main function of the vnic_incoming thread. Periodically check the response
* queue and drain it if needed.
*/
-static void poll_for_irq(unsigned long v)
+static void poll_for_irq(struct timer_list *t)
{
- struct visornic_devdata *devdata = (struct visornic_devdata *)v;
+ struct visornic_devdata *devdata = from_timer(devdata, t,
+ irq_poll_timer);
if (!visorchannel_signalempty(
devdata->dev->visorchannel,
@@ -1899,8 +1900,7 @@ static int visornic_probe(struct visor_device *dev)
/* Let's start our threads to get responses */
netif_napi_add(netdev, &devdata->napi, visornic_poll, NAPI_WEIGHT);
- setup_timer(&devdata->irq_poll_timer, poll_for_irq,
- (unsigned long)devdata);
+ timer_setup(&devdata->irq_poll_timer, poll_for_irq, 0);
/* Note: This time has to start running before the while
* loop below because the napi routine is responsible for
* setting enab_dis_acked
diff --git a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
index 8a275996d4e6..028da1dc1b81 100644
--- a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
+++ b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
@@ -267,7 +267,7 @@ static void update_scan_time(void)
last_scanned_shadow[i].time_scan = jiffies;
}
-static void remove_network_from_shadow(unsigned long unused)
+static void remove_network_from_shadow(struct timer_list *unused)
{
unsigned long now = jiffies;
int i, j;
@@ -292,7 +292,7 @@ static void remove_network_from_shadow(unsigned long unused)
}
}
-static void clear_duringIP(unsigned long arg)
+static void clear_duringIP(struct timer_list *unused)
{
wilc_optaining_ip = false;
}
@@ -2278,8 +2278,8 @@ int wilc_init_host_int(struct net_device *net)
priv = wdev_priv(net->ieee80211_ptr);
if (op_ifcs == 0) {
- setup_timer(&hAgingTimer, remove_network_from_shadow, 0);
- setup_timer(&wilc_during_ip_timer, clear_duringIP, 0);
+ timer_setup(&hAgingTimer, remove_network_from_shadow, 0);
+ timer_setup(&wilc_during_ip_timer, clear_duringIP, 0);
}
op_ifcs++;
diff --git a/drivers/target/iscsi/cxgbit/cxgbit.h b/drivers/target/iscsi/cxgbit/cxgbit.h
index 90388698c222..417b9e66b0cd 100644
--- a/drivers/target/iscsi/cxgbit/cxgbit.h
+++ b/drivers/target/iscsi/cxgbit/cxgbit.h
@@ -165,6 +165,7 @@ enum cxgbit_csk_flags {
CSK_LOGIN_PDU_DONE,
CSK_LOGIN_DONE,
CSK_DDP_ENABLE,
+ CSK_ABORT_RPL_WAIT,
};
struct cxgbit_sock_common {
@@ -321,6 +322,7 @@ int cxgbit_setup_np(struct iscsi_np *, struct sockaddr_storage *);
int cxgbit_setup_conn_digest(struct cxgbit_sock *);
int cxgbit_accept_np(struct iscsi_np *, struct iscsi_conn *);
void cxgbit_free_np(struct iscsi_np *);
+void cxgbit_abort_conn(struct cxgbit_sock *csk);
void cxgbit_free_conn(struct iscsi_conn *);
extern cxgbit_cplhandler_func cxgbit_cplhandlers[NUM_CPL_CMDS];
int cxgbit_get_login_rx(struct iscsi_conn *, struct iscsi_login *);
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_cm.c b/drivers/target/iscsi/cxgbit/cxgbit_cm.c
index d4fa41be80f9..92eb57e2adaf 100644
--- a/drivers/target/iscsi/cxgbit/cxgbit_cm.c
+++ b/drivers/target/iscsi/cxgbit/cxgbit_cm.c
@@ -665,6 +665,46 @@ static int cxgbit_send_abort_req(struct cxgbit_sock *csk)
return cxgbit_l2t_send(csk->com.cdev, skb, csk->l2t);
}
+static void
+__cxgbit_abort_conn(struct cxgbit_sock *csk, struct sk_buff *skb)
+{
+ __kfree_skb(skb);
+
+ if (csk->com.state != CSK_STATE_ESTABLISHED)
+ goto no_abort;
+
+ set_bit(CSK_ABORT_RPL_WAIT, &csk->com.flags);
+ csk->com.state = CSK_STATE_ABORTING;
+
+ cxgbit_send_abort_req(csk);
+
+ return;
+
+no_abort:
+ cxgbit_wake_up(&csk->com.wr_wait, __func__, CPL_ERR_NONE);
+ cxgbit_put_csk(csk);
+}
+
+void cxgbit_abort_conn(struct cxgbit_sock *csk)
+{
+ struct sk_buff *skb = alloc_skb(0, GFP_KERNEL | __GFP_NOFAIL);
+
+ cxgbit_get_csk(csk);
+ cxgbit_init_wr_wait(&csk->com.wr_wait);
+
+ spin_lock_bh(&csk->lock);
+ if (csk->lock_owner) {
+ cxgbit_skcb_rx_backlog_fn(skb) = __cxgbit_abort_conn;
+ __skb_queue_tail(&csk->backlogq, skb);
+ } else {
+ __cxgbit_abort_conn(csk, skb);
+ }
+ spin_unlock_bh(&csk->lock);
+
+ cxgbit_wait_for_reply(csk->com.cdev, &csk->com.wr_wait,
+ csk->tid, 600, __func__);
+}
+
void cxgbit_free_conn(struct iscsi_conn *conn)
{
struct cxgbit_sock *csk = conn->context;
@@ -1709,12 +1749,17 @@ rel_skb:
static void cxgbit_abort_rpl_rss(struct cxgbit_sock *csk, struct sk_buff *skb)
{
+ struct cpl_abort_rpl_rss *rpl = cplhdr(skb);
+
pr_debug("%s: csk %p; tid %u; state %d\n",
__func__, csk, csk->tid, csk->com.state);
switch (csk->com.state) {
case CSK_STATE_ABORTING:
csk->com.state = CSK_STATE_DEAD;
+ if (test_bit(CSK_ABORT_RPL_WAIT, &csk->com.flags))
+ cxgbit_wake_up(&csk->com.wr_wait, __func__,
+ rpl->status);
cxgbit_put_csk(csk);
break;
default:
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_ddp.c b/drivers/target/iscsi/cxgbit/cxgbit_ddp.c
index 5fdb57cac968..768cce0ccb80 100644
--- a/drivers/target/iscsi/cxgbit/cxgbit_ddp.c
+++ b/drivers/target/iscsi/cxgbit/cxgbit_ddp.c
@@ -275,6 +275,14 @@ void cxgbit_release_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
struct cxgbit_device *cdev = csk->com.cdev;
struct cxgbi_ppm *ppm = cdev2ppm(cdev);
+ /* Abort the TCP conn if DDP is not complete to
+ * avoid any possibility of DDP after freeing
+ * the cmd.
+ */
+ if (unlikely(cmd->write_data_done !=
+ cmd->se_cmd.data_length))
+ cxgbit_abort_conn(csk);
+
cxgbi_ppm_ppod_release(ppm, ttinfo->idx);
dma_unmap_sg(&ppm->pdev->dev, ttinfo->sgl,
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_main.c b/drivers/target/iscsi/cxgbit/cxgbit_main.c
index 4fd775ace541..f3f8856bfb68 100644
--- a/drivers/target/iscsi/cxgbit/cxgbit_main.c
+++ b/drivers/target/iscsi/cxgbit/cxgbit_main.c
@@ -446,6 +446,7 @@ cxgbit_uld_lro_rx_handler(void *hndl, const __be64 *rsp,
case CPL_RX_ISCSI_DDP:
case CPL_FW4_ACK:
lro_flush = false;
+ /* fall through */
case CPL_ABORT_RPL_RSS:
case CPL_PASS_ESTABLISH:
case CPL_PEER_CLOSE:
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 9e67c7678c86..9eb10d34682c 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -502,7 +502,7 @@ void iscsit_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
EXPORT_SYMBOL(iscsit_aborted_task);
static void iscsit_do_crypto_hash_buf(struct ahash_request *, const void *,
- u32, u32, u8 *, u8 *);
+ u32, u32, const void *, void *);
static void iscsit_tx_thread_wait_for_tcp(struct iscsi_conn *);
static int
@@ -523,7 +523,7 @@ iscsit_xmit_nondatain_pdu(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
iscsit_do_crypto_hash_buf(conn->conn_tx_hash, hdr,
ISCSI_HDR_LEN, 0, NULL,
- (u8 *)header_digest);
+ header_digest);
iov[0].iov_len += ISCSI_CRC_LEN;
tx_size += ISCSI_CRC_LEN;
@@ -550,9 +550,8 @@ iscsit_xmit_nondatain_pdu(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
if (conn->conn_ops->DataDigest) {
iscsit_do_crypto_hash_buf(conn->conn_tx_hash,
data_buf, data_buf_len,
- padding,
- (u8 *)&cmd->pad_bytes,
- (u8 *)&cmd->data_crc);
+ padding, &cmd->pad_bytes,
+ &cmd->data_crc);
iov[niov].iov_base = &cmd->data_crc;
iov[niov++].iov_len = ISCSI_CRC_LEN;
@@ -597,7 +596,7 @@ iscsit_xmit_datain_pdu(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
iscsit_do_crypto_hash_buf(conn->conn_tx_hash, cmd->pdu,
ISCSI_HDR_LEN, 0, NULL,
- (u8 *)header_digest);
+ header_digest);
iov[0].iov_len += ISCSI_CRC_LEN;
tx_size += ISCSI_CRC_LEN;
@@ -836,6 +835,7 @@ static int iscsit_add_reject_from_cmd(
unsigned char *buf)
{
struct iscsi_conn *conn;
+ const bool do_put = cmd->se_cmd.se_tfo != NULL;
if (!cmd->conn) {
pr_err("cmd->conn is NULL for ITT: 0x%08x\n",
@@ -866,7 +866,7 @@ static int iscsit_add_reject_from_cmd(
* Perform the kref_put now if se_cmd has already been setup by
* scsit_setup_scsi_cmd()
*/
- if (cmd->se_cmd.se_tfo != NULL) {
+ if (do_put) {
pr_debug("iscsi reject: calling target_put_sess_cmd >>>>>>\n");
target_put_sess_cmd(&cmd->se_cmd);
}
@@ -1410,13 +1410,9 @@ static u32 iscsit_do_crypto_hash_sg(
return data_crc;
}
-static void iscsit_do_crypto_hash_buf(
- struct ahash_request *hash,
- const void *buf,
- u32 payload_length,
- u32 padding,
- u8 *pad_bytes,
- u8 *data_crc)
+static void iscsit_do_crypto_hash_buf(struct ahash_request *hash,
+ const void *buf, u32 payload_length, u32 padding,
+ const void *pad_bytes, void *data_crc)
{
struct scatterlist sg[2];
@@ -1462,9 +1458,9 @@ __iscsit_check_dataout_hdr(struct iscsi_conn *conn, void *buf,
iscsit_mod_dataout_timer(cmd);
if ((be32_to_cpu(hdr->offset) + payload_length) > cmd->se_cmd.data_length) {
- pr_err("DataOut Offset: %u, Length %u greater than"
- " iSCSI Command EDTL %u, protocol error.\n",
- hdr->offset, payload_length, cmd->se_cmd.data_length);
+ pr_err("DataOut Offset: %u, Length %u greater than iSCSI Command EDTL %u, protocol error.\n",
+ be32_to_cpu(hdr->offset), payload_length,
+ cmd->se_cmd.data_length);
return iscsit_reject_cmd(cmd, ISCSI_REASON_BOOKMARK_INVALID, buf);
}
@@ -1878,10 +1874,9 @@ static int iscsit_handle_nop_out(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
}
if (conn->conn_ops->DataDigest) {
- iscsit_do_crypto_hash_buf(conn->conn_rx_hash,
- ping_data, payload_length,
- padding, cmd->pad_bytes,
- (u8 *)&data_crc);
+ iscsit_do_crypto_hash_buf(conn->conn_rx_hash, ping_data,
+ payload_length, padding,
+ cmd->pad_bytes, &data_crc);
if (checksum != data_crc) {
pr_err("Ping data CRC32C DataDigest"
@@ -1962,7 +1957,6 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
struct iscsi_tmr_req *tmr_req;
struct iscsi_tm *hdr;
int out_of_order_cmdsn = 0, ret;
- bool sess_ref = false;
u8 function, tcm_function = TMR_UNKNOWN;
hdr = (struct iscsi_tm *) buf;
@@ -1995,22 +1989,23 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
cmd->data_direction = DMA_NONE;
cmd->tmr_req = kzalloc(sizeof(*cmd->tmr_req), GFP_KERNEL);
- if (!cmd->tmr_req)
+ if (!cmd->tmr_req) {
return iscsit_add_reject_cmd(cmd,
ISCSI_REASON_BOOKMARK_NO_RESOURCES,
buf);
+ }
+
+ transport_init_se_cmd(&cmd->se_cmd, &iscsi_ops,
+ conn->sess->se_sess, 0, DMA_NONE,
+ TCM_SIMPLE_TAG, cmd->sense_buffer + 2);
+
+ target_get_sess_cmd(&cmd->se_cmd, true);
/*
* TASK_REASSIGN for ERL=2 / connection stays inside of
* LIO-Target $FABRIC_MOD
*/
if (function != ISCSI_TM_FUNC_TASK_REASSIGN) {
- transport_init_se_cmd(&cmd->se_cmd, &iscsi_ops,
- conn->sess->se_sess, 0, DMA_NONE,
- TCM_SIMPLE_TAG, cmd->sense_buffer + 2);
-
- target_get_sess_cmd(&cmd->se_cmd, true);
- sess_ref = true;
tcm_function = iscsit_convert_tmf(function);
if (tcm_function == TMR_UNKNOWN) {
pr_err("Unknown iSCSI TMR Function:"
@@ -2101,12 +2096,14 @@ attach:
if (!(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
int cmdsn_ret = iscsit_sequence_cmd(conn, cmd, buf, hdr->cmdsn);
- if (cmdsn_ret == CMDSN_HIGHER_THAN_EXP)
+ if (cmdsn_ret == CMDSN_HIGHER_THAN_EXP) {
out_of_order_cmdsn = 1;
- else if (cmdsn_ret == CMDSN_LOWER_THAN_EXP)
+ } else if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) {
+ target_put_sess_cmd(&cmd->se_cmd);
return 0;
- else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
+ } else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) {
return -1;
+ }
}
iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn));
@@ -2126,12 +2123,8 @@ attach:
* For connection recovery, this is also the default action for
* TMR TASK_REASSIGN.
*/
- if (sess_ref) {
- pr_debug("Handle TMR, using sess_ref=true check\n");
- target_put_sess_cmd(&cmd->se_cmd);
- }
-
iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
+ target_put_sess_cmd(&cmd->se_cmd);
return 0;
}
EXPORT_SYMBOL(iscsit_handle_task_mgt_cmd);
@@ -2287,10 +2280,9 @@ iscsit_handle_text_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
goto reject;
if (conn->conn_ops->DataDigest) {
- iscsit_do_crypto_hash_buf(conn->conn_rx_hash,
- text_in, payload_length,
- padding, (u8 *)&pad_bytes,
- (u8 *)&data_crc);
+ iscsit_do_crypto_hash_buf(conn->conn_rx_hash, text_in,
+ payload_length, padding,
+ &pad_bytes, &data_crc);
if (checksum != data_crc) {
pr_err("Text data CRC32C DataDigest"
@@ -3978,9 +3970,9 @@ static void iscsit_get_rx_pdu(struct iscsi_conn *conn)
return;
}
- iscsit_do_crypto_hash_buf(conn->conn_rx_hash,
- buffer, ISCSI_HDR_LEN,
- 0, NULL, (u8 *)&checksum);
+ iscsit_do_crypto_hash_buf(conn->conn_rx_hash, buffer,
+ ISCSI_HDR_LEN, 0, NULL,
+ &checksum);
if (digest != checksum) {
pr_err("HeaderDigest CRC32C failed,"
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
index 0dd4c45f7575..0ebc4818e132 100644
--- a/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -1123,7 +1123,7 @@ static struct se_portal_group *lio_target_tiqn_addtpg(
ret = core_tpg_register(wwn, &tpg->tpg_se_tpg, SCSI_PROTOCOL_ISCSI);
if (ret < 0)
- return NULL;
+ goto free_out;
ret = iscsit_tpg_add_portal_group(tiqn, tpg);
if (ret != 0)
@@ -1135,6 +1135,7 @@ static struct se_portal_group *lio_target_tiqn_addtpg(
return &tpg->tpg_se_tpg;
out:
core_tpg_deregister(&tpg->tpg_se_tpg);
+free_out:
kfree(tpg);
return NULL;
}
diff --git a/drivers/target/iscsi/iscsi_target_erl1.c b/drivers/target/iscsi/iscsi_target_erl1.c
index 76184094a0cf..5efa42b939a1 100644
--- a/drivers/target/iscsi/iscsi_target_erl1.c
+++ b/drivers/target/iscsi/iscsi_target_erl1.c
@@ -34,7 +34,7 @@
#include "iscsi_target_erl2.h"
#include "iscsi_target.h"
-#define OFFLOAD_BUF_SIZE 32768
+#define OFFLOAD_BUF_SIZE 32768U
/*
* Used to dump excess datain payload for certain error recovery
@@ -56,7 +56,7 @@ int iscsit_dump_data_payload(
if (conn->sess->sess_ops->RDMAExtensions)
return 0;
- length = (buf_len > OFFLOAD_BUF_SIZE) ? OFFLOAD_BUF_SIZE : buf_len;
+ length = min(buf_len, OFFLOAD_BUF_SIZE);
buf = kzalloc(length, GFP_ATOMIC);
if (!buf) {
@@ -67,8 +67,7 @@ int iscsit_dump_data_payload(
memset(&iov, 0, sizeof(struct kvec));
while (offset < buf_len) {
- size = ((offset + length) > buf_len) ?
- (buf_len - offset) : length;
+ size = min(buf_len - offset, length);
iov.iov_len = size;
iov.iov_base = buf;
diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c
index caab1045742d..29a37b242d30 100644
--- a/drivers/target/iscsi/iscsi_target_parameters.c
+++ b/drivers/target/iscsi/iscsi_target_parameters.c
@@ -1380,10 +1380,8 @@ int iscsi_decode_text_input(
char *key, *value;
struct iscsi_param *param;
- if (iscsi_extract_key_value(start, &key, &value) < 0) {
- kfree(tmpbuf);
- return -1;
- }
+ if (iscsi_extract_key_value(start, &key, &value) < 0)
+ goto free_buffer;
pr_debug("Got key: %s=%s\n", key, value);
@@ -1396,38 +1394,37 @@ int iscsi_decode_text_input(
param = iscsi_check_key(key, phase, sender, param_list);
if (!param) {
- if (iscsi_add_notunderstood_response(key,
- value, param_list) < 0) {
- kfree(tmpbuf);
- return -1;
- }
+ if (iscsi_add_notunderstood_response(key, value,
+ param_list) < 0)
+ goto free_buffer;
+
start += strlen(key) + strlen(value) + 2;
continue;
}
- if (iscsi_check_value(param, value) < 0) {
- kfree(tmpbuf);
- return -1;
- }
+ if (iscsi_check_value(param, value) < 0)
+ goto free_buffer;
start += strlen(key) + strlen(value) + 2;
if (IS_PSTATE_PROPOSER(param)) {
- if (iscsi_check_proposer_state(param, value) < 0) {
- kfree(tmpbuf);
- return -1;
- }
+ if (iscsi_check_proposer_state(param, value) < 0)
+ goto free_buffer;
+
SET_PSTATE_RESPONSE_GOT(param);
} else {
- if (iscsi_check_acceptor_state(param, value, conn) < 0) {
- kfree(tmpbuf);
- return -1;
- }
+ if (iscsi_check_acceptor_state(param, value, conn) < 0)
+ goto free_buffer;
+
SET_PSTATE_ACCEPTOR(param);
}
}
kfree(tmpbuf);
return 0;
+
+free_buffer:
+ kfree(tmpbuf);
+ return -1;
}
int iscsi_encode_text_output(
diff --git a/drivers/target/iscsi/iscsi_target_seq_pdu_list.c b/drivers/target/iscsi/iscsi_target_seq_pdu_list.c
index e446a09c886b..f65e5e584212 100644
--- a/drivers/target/iscsi/iscsi_target_seq_pdu_list.c
+++ b/drivers/target/iscsi/iscsi_target_seq_pdu_list.c
@@ -25,8 +25,6 @@
#include "iscsi_target_tpg.h"
#include "iscsi_target_seq_pdu_list.h"
-#define OFFLOAD_BUF_SIZE 32768
-
#ifdef DEBUG
static void iscsit_dump_seq_list(struct iscsi_cmd *cmd)
{
diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c
index 594d07a1e995..4b34f71547c6 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.c
+++ b/drivers/target/iscsi/iscsi_target_tpg.c
@@ -90,10 +90,10 @@ int iscsit_load_discovery_tpg(void)
*/
param = iscsi_find_param_from_key(AUTHMETHOD, tpg->param_list);
if (!param)
- goto out;
+ goto free_pl_out;
if (iscsi_update_param_value(param, "CHAP,None") < 0)
- goto out;
+ goto free_pl_out;
tpg->tpg_attrib.authentication = 0;
@@ -105,6 +105,8 @@ int iscsit_load_discovery_tpg(void)
pr_debug("CORE[0] - Allocated Discovery TPG\n");
return 0;
+free_pl_out:
+ iscsi_release_param_list(tpg->param_list);
out:
if (tpg->sid == 1)
core_tpg_deregister(&tpg->tpg_se_tpg);
@@ -119,6 +121,7 @@ void iscsit_release_discovery_tpg(void)
if (!tpg)
return;
+ iscsi_release_param_list(tpg->param_list);
core_tpg_deregister(&tpg->tpg_se_tpg);
kfree(tpg);
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
index 54f20f184dd6..4435bf374d2d 100644
--- a/drivers/target/iscsi/iscsi_target_util.c
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -695,6 +695,8 @@ void iscsit_release_cmd(struct iscsi_cmd *cmd)
struct iscsi_session *sess;
struct se_cmd *se_cmd = &cmd->se_cmd;
+ WARN_ON(!list_empty(&cmd->i_conn_node));
+
if (cmd->conn)
sess = cmd->conn->sess;
else
@@ -717,6 +719,8 @@ void __iscsit_free_cmd(struct iscsi_cmd *cmd, bool check_queues)
{
struct iscsi_conn *conn = cmd->conn;
+ WARN_ON(!list_empty(&cmd->i_conn_node));
+
if (cmd->data_direction == DMA_TO_DEVICE) {
iscsit_stop_dataout_timer(cmd);
iscsit_free_r2ts_from_list(cmd);
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index 928127642574..e46ca968009c 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -918,7 +918,7 @@ static int core_alua_update_tpg_primary_metadata(
{
unsigned char *md_buf;
struct t10_wwn *wwn = &tg_pt_gp->tg_pt_gp_dev->t10_wwn;
- char path[ALUA_METADATA_PATH_LEN];
+ char *path;
int len, rc;
md_buf = kzalloc(ALUA_MD_BUF_LEN, GFP_KERNEL);
@@ -927,8 +927,6 @@ static int core_alua_update_tpg_primary_metadata(
return -ENOMEM;
}
- memset(path, 0, ALUA_METADATA_PATH_LEN);
-
len = snprintf(md_buf, ALUA_MD_BUF_LEN,
"tg_pt_gp_id=%hu\n"
"alua_access_state=0x%02x\n"
@@ -937,11 +935,14 @@ static int core_alua_update_tpg_primary_metadata(
tg_pt_gp->tg_pt_gp_alua_access_state,
tg_pt_gp->tg_pt_gp_alua_access_status);
- snprintf(path, ALUA_METADATA_PATH_LEN,
- "%s/alua/tpgs_%s/%s", db_root, &wwn->unit_serial[0],
- config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item));
-
- rc = core_alua_write_tpg_metadata(path, md_buf, len);
+ rc = -ENOMEM;
+ path = kasprintf(GFP_KERNEL, "%s/alua/tpgs_%s/%s", db_root,
+ &wwn->unit_serial[0],
+ config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item));
+ if (path) {
+ rc = core_alua_write_tpg_metadata(path, md_buf, len);
+ kfree(path);
+ }
kfree(md_buf);
return rc;
}
@@ -1209,7 +1210,7 @@ static int core_alua_update_tpg_secondary_metadata(struct se_lun *lun)
{
struct se_portal_group *se_tpg = lun->lun_tpg;
unsigned char *md_buf;
- char path[ALUA_METADATA_PATH_LEN], wwn[ALUA_SECONDARY_METADATA_WWN_LEN];
+ char *path;
int len, rc;
mutex_lock(&lun->lun_tg_pt_md_mutex);
@@ -1221,28 +1222,32 @@ static int core_alua_update_tpg_secondary_metadata(struct se_lun *lun)
goto out_unlock;
}
- memset(path, 0, ALUA_METADATA_PATH_LEN);
- memset(wwn, 0, ALUA_SECONDARY_METADATA_WWN_LEN);
-
- len = snprintf(wwn, ALUA_SECONDARY_METADATA_WWN_LEN, "%s",
- se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg));
-
- if (se_tpg->se_tpg_tfo->tpg_get_tag != NULL)
- snprintf(wwn+len, ALUA_SECONDARY_METADATA_WWN_LEN-len, "+%hu",
- se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg));
-
len = snprintf(md_buf, ALUA_MD_BUF_LEN, "alua_tg_pt_offline=%d\n"
"alua_tg_pt_status=0x%02x\n",
atomic_read(&lun->lun_tg_pt_secondary_offline),
lun->lun_tg_pt_secondary_stat);
- snprintf(path, ALUA_METADATA_PATH_LEN, "%s/alua/%s/%s/lun_%llu",
- db_root, se_tpg->se_tpg_tfo->get_fabric_name(), wwn,
- lun->unpacked_lun);
+ if (se_tpg->se_tpg_tfo->tpg_get_tag != NULL) {
+ path = kasprintf(GFP_KERNEL, "%s/alua/%s/%s+%hu/lun_%llu",
+ db_root, se_tpg->se_tpg_tfo->get_fabric_name(),
+ se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg),
+ se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg),
+ lun->unpacked_lun);
+ } else {
+ path = kasprintf(GFP_KERNEL, "%s/alua/%s/%s/lun_%llu",
+ db_root, se_tpg->se_tpg_tfo->get_fabric_name(),
+ se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg),
+ lun->unpacked_lun);
+ }
+ if (!path) {
+ rc = -ENOMEM;
+ goto out_free;
+ }
rc = core_alua_write_tpg_metadata(path, md_buf, len);
+ kfree(path);
+out_free:
kfree(md_buf);
-
out_unlock:
mutex_unlock(&lun->lun_tg_pt_md_mutex);
return rc;
diff --git a/drivers/target/target_core_alua.h b/drivers/target/target_core_alua.h
index 1902cb5c3b52..fc9637cce825 100644
--- a/drivers/target/target_core_alua.h
+++ b/drivers/target/target_core_alua.h
@@ -72,15 +72,6 @@
*/
#define ALUA_DEFAULT_IMPLICIT_TRANS_SECS 0
#define ALUA_MAX_IMPLICIT_TRANS_SECS 255
-/*
- * Used by core_alua_update_tpg_primary_metadata() and
- * core_alua_update_tpg_secondary_metadata()
- */
-#define ALUA_METADATA_PATH_LEN 512
-/*
- * Used by core_alua_update_tpg_secondary_metadata()
- */
-#define ALUA_SECONDARY_METADATA_WWN_LEN 256
/* Used by core_alua_update_tpg_(primary,secondary)_metadata */
#define ALUA_MD_BUF_LEN 1024
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index bd87cc26c6e5..72b1cd1bf9d9 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -1611,12 +1611,12 @@ static match_table_t tokens = {
{Opt_res_type, "res_type=%d"},
{Opt_res_scope, "res_scope=%d"},
{Opt_res_all_tg_pt, "res_all_tg_pt=%d"},
- {Opt_mapped_lun, "mapped_lun=%lld"},
+ {Opt_mapped_lun, "mapped_lun=%u"},
{Opt_target_fabric, "target_fabric=%s"},
{Opt_target_node, "target_node=%s"},
{Opt_tpgt, "tpgt=%d"},
{Opt_port_rtpi, "port_rtpi=%d"},
- {Opt_target_lun, "target_lun=%lld"},
+ {Opt_target_lun, "target_lun=%u"},
{Opt_err, NULL}
};
@@ -1693,7 +1693,7 @@ static ssize_t target_pr_res_aptpl_metadata_store(struct config_item *item,
}
break;
case Opt_sa_res_key:
- ret = kstrtoull(args->from, 0, &tmp_ll);
+ ret = match_u64(args, &tmp_ll);
if (ret < 0) {
pr_err("kstrtoull() failed for sa_res_key=\n");
goto out;
@@ -1727,10 +1727,10 @@ static ssize_t target_pr_res_aptpl_metadata_store(struct config_item *item,
all_tg_pt = (int)arg;
break;
case Opt_mapped_lun:
- ret = match_int(args, &arg);
+ ret = match_u64(args, &tmp_ll);
if (ret)
goto out;
- mapped_lun = (u64)arg;
+ mapped_lun = (u64)tmp_ll;
break;
/*
* PR APTPL Metadata for Target Port
@@ -1768,10 +1768,10 @@ static ssize_t target_pr_res_aptpl_metadata_store(struct config_item *item,
goto out;
break;
case Opt_target_lun:
- ret = match_int(args, &arg);
+ ret = match_u64(args, &tmp_ll);
if (ret)
goto out;
- target_lun = (u64)arg;
+ target_lun = (u64)tmp_ll;
break;
default:
break;
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c
index e9e917cc6441..e1416b007aa4 100644
--- a/drivers/target/target_core_fabric_configfs.c
+++ b/drivers/target/target_core_fabric_configfs.c
@@ -623,8 +623,6 @@ static struct configfs_attribute *target_fabric_port_attrs[] = {
NULL,
};
-extern struct configfs_item_operations target_core_dev_item_ops;
-
static int target_fabric_port_link(
struct config_item *lun_ci,
struct config_item *se_dev_ci)
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index c629817a8854..9b2c0c773022 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -482,6 +482,10 @@ fd_execute_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb)
struct inode *inode = file->f_mapping->host;
int ret;
+ if (!nolb) {
+ return 0;
+ }
+
if (cmd->se_dev->dev_attrib.pi_prot_type) {
ret = fd_do_prot_unmap(cmd, lba, nolb);
if (ret)
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
index 18e3eb16e756..9384d19a7326 100644
--- a/drivers/target/target_core_internal.h
+++ b/drivers/target/target_core_internal.h
@@ -89,6 +89,7 @@ int target_for_each_device(int (*fn)(struct se_device *dev, void *data),
void *data);
/* target_core_configfs.c */
+extern struct configfs_item_operations target_core_dev_item_ops;
void target_setup_backend_cits(struct target_backend *);
/* target_core_fabric_configfs.c */
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index dd2cd8048582..b024613f9217 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -58,8 +58,10 @@ void core_pr_dump_initiator_port(
char *buf,
u32 size)
{
- if (!pr_reg->isid_present_at_reg)
+ if (!pr_reg->isid_present_at_reg) {
buf[0] = '\0';
+ return;
+ }
snprintf(buf, size, ",i,0x%s", pr_reg->pr_reg_isid);
}
@@ -351,6 +353,7 @@ static int core_scsi3_pr_seq_non_holder(struct se_cmd *cmd, u32 pr_reg_type,
break;
case PR_TYPE_WRITE_EXCLUSIVE_REGONLY:
we = 1;
+ /* fall through */
case PR_TYPE_EXCLUSIVE_ACCESS_REGONLY:
/*
* Some commands are only allowed for registered I_T Nexuses.
@@ -359,6 +362,7 @@ static int core_scsi3_pr_seq_non_holder(struct se_cmd *cmd, u32 pr_reg_type,
break;
case PR_TYPE_WRITE_EXCLUSIVE_ALLREG:
we = 1;
+ /* fall through */
case PR_TYPE_EXCLUSIVE_ACCESS_ALLREG:
/*
* Each registered I_T Nexus is a reservation holder.
@@ -1521,7 +1525,7 @@ core_scsi3_decode_spec_i_port(
tidh_new = kzalloc(sizeof(struct pr_transport_id_holder), GFP_KERNEL);
if (!tidh_new) {
pr_err("Unable to allocate tidh_new\n");
- return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return TCM_INSUFFICIENT_REGISTRATION_RESOURCES;
}
INIT_LIST_HEAD(&tidh_new->dest_list);
tidh_new->dest_tpg = tpg;
@@ -1533,7 +1537,7 @@ core_scsi3_decode_spec_i_port(
sa_res_key, all_tg_pt, aptpl);
if (!local_pr_reg) {
kfree(tidh_new);
- return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return TCM_INSUFFICIENT_REGISTRATION_RESOURCES;
}
tidh_new->dest_pr_reg = local_pr_reg;
/*
@@ -1553,7 +1557,7 @@ core_scsi3_decode_spec_i_port(
buf = transport_kmap_data_sg(cmd);
if (!buf) {
- ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ ret = TCM_INSUFFICIENT_REGISTRATION_RESOURCES;
goto out;
}
@@ -1767,7 +1771,7 @@ core_scsi3_decode_spec_i_port(
core_scsi3_nodeacl_undepend_item(dest_node_acl);
core_scsi3_tpg_undepend_item(dest_tpg);
kfree(tidh_new);
- ret = TCM_INVALID_PARAMETER_LIST;
+ ret = TCM_INSUFFICIENT_REGISTRATION_RESOURCES;
goto out_unmap;
}
tidh_new->dest_pr_reg = dest_pr_reg;
@@ -1971,24 +1975,21 @@ static int __core_scsi3_write_aptpl_to_file(
struct t10_wwn *wwn = &dev->t10_wwn;
struct file *file;
int flags = O_RDWR | O_CREAT | O_TRUNC;
- char path[512];
+ char *path;
u32 pr_aptpl_buf_len;
int ret;
loff_t pos = 0;
- memset(path, 0, 512);
-
- if (strlen(&wwn->unit_serial[0]) >= 512) {
- pr_err("WWN value for struct se_device does not fit"
- " into path buffer\n");
- return -EMSGSIZE;
- }
+ path = kasprintf(GFP_KERNEL, "%s/pr/aptpl_%s", db_root,
+ &wwn->unit_serial[0]);
+ if (!path)
+ return -ENOMEM;
- snprintf(path, 512, "%s/pr/aptpl_%s", db_root, &wwn->unit_serial[0]);
file = filp_open(path, flags, 0600);
if (IS_ERR(file)) {
pr_err("filp_open(%s) for APTPL metadata"
" failed\n", path);
+ kfree(path);
return PTR_ERR(file);
}
@@ -1999,6 +2000,7 @@ static int __core_scsi3_write_aptpl_to_file(
if (ret < 0)
pr_debug("Error writing APTPL metadata file: %s\n", path);
fput(file);
+ kfree(path);
return (ret < 0) ? -EIO : 0;
}
@@ -2103,7 +2105,7 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key,
register_type, 0)) {
pr_err("Unable to allocate"
" struct t10_pr_registration\n");
- return TCM_INVALID_PARAMETER_LIST;
+ return TCM_INSUFFICIENT_REGISTRATION_RESOURCES;
}
} else {
/*
@@ -3215,7 +3217,7 @@ core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key,
*/
buf = transport_kmap_data_sg(cmd);
if (!buf) {
- ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ ret = TCM_INSUFFICIENT_REGISTRATION_RESOURCES;
goto out_put_pr_reg;
}
@@ -3267,7 +3269,7 @@ core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key,
buf = transport_kmap_data_sg(cmd);
if (!buf) {
- ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ ret = TCM_INSUFFICIENT_REGISTRATION_RESOURCES;
goto out_put_pr_reg;
}
proto_ident = (buf[24] & 0x0f);
@@ -3466,7 +3468,7 @@ after_iport_check:
if (core_scsi3_alloc_registration(cmd->se_dev, dest_node_acl,
dest_lun, dest_se_deve, dest_se_deve->mapped_lun,
iport_ptr, sa_res_key, 0, aptpl, 2, 1)) {
- ret = TCM_INVALID_PARAMETER_LIST;
+ ret = TCM_INSUFFICIENT_REGISTRATION_RESOURCES;
goto out;
}
spin_lock(&dev->dev_reservation_lock);
@@ -3528,8 +3530,6 @@ after_iport_check:
core_scsi3_update_and_write_aptpl(cmd->se_dev, aptpl);
- transport_kunmap_data_sg(cmd);
-
core_scsi3_put_pr_reg(dest_pr_reg);
return 0;
out:
@@ -4011,6 +4011,7 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd)
* Set the ADDITIONAL DESCRIPTOR LENGTH
*/
put_unaligned_be32(desc_len, &buf[off]);
+ off += 4;
/*
* Size of full desctipor header minus TransportID
* containing $FABRIC_MOD specific) initiator device/port
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index e22847bd79b9..9c7bc1ca341a 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -133,6 +133,15 @@ static bool __target_check_io_state(struct se_cmd *se_cmd,
spin_unlock(&se_cmd->t_state_lock);
return false;
}
+ if (se_cmd->transport_state & CMD_T_PRE_EXECUTE) {
+ if (se_cmd->scsi_status) {
+ pr_debug("Attempted to abort io tag: %llu early failure"
+ " status: 0x%02x\n", se_cmd->tag,
+ se_cmd->scsi_status);
+ spin_unlock(&se_cmd->t_state_lock);
+ return false;
+ }
+ }
if (sess->sess_tearing_down || se_cmd->cmd_wait_set) {
pr_debug("Attempted to abort io tag: %llu already shutdown,"
" skipping\n", se_cmd->tag);
@@ -217,7 +226,8 @@ static void core_tmr_drain_tmr_list(
* LUN_RESET tmr..
*/
spin_lock_irqsave(&dev->se_tmr_lock, flags);
- list_del_init(&tmr->tmr_list);
+ if (tmr)
+ list_del_init(&tmr->tmr_list);
list_for_each_entry_safe(tmr_p, tmr_pp, &dev->dev_tmr_list, tmr_list) {
cmd = tmr_p->task_cmd;
if (!cmd) {
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 836d552b0385..58caacd54a3b 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -67,7 +67,6 @@ static void transport_complete_task_attr(struct se_cmd *cmd);
static int translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason);
static void transport_handle_queue_full(struct se_cmd *cmd,
struct se_device *dev, int err, bool write_pending);
-static int transport_put_cmd(struct se_cmd *cmd);
static void target_complete_ok_work(struct work_struct *work);
int init_se_kmem_caches(void)
@@ -668,7 +667,7 @@ int transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
if (transport_cmd_check_stop_to_fabric(cmd))
return 1;
if (remove && ack_kref)
- ret = transport_put_cmd(cmd);
+ ret = target_put_sess_cmd(cmd);
return ret;
}
@@ -1730,9 +1729,6 @@ void transport_generic_request_failure(struct se_cmd *cmd,
{
int ret = 0, post_ret = 0;
- if (transport_check_aborted_status(cmd, 1))
- return;
-
pr_debug("-----[ Storage Engine Exception; sense_reason %d\n",
sense_reason);
target_show_cmd("-----[ ", cmd);
@@ -1741,6 +1737,7 @@ void transport_generic_request_failure(struct se_cmd *cmd,
* For SAM Task Attribute emulation for failed struct se_cmd
*/
transport_complete_task_attr(cmd);
+
/*
* Handle special case for COMPARE_AND_WRITE failure, where the
* callback is expected to drop the per device ->caw_sem.
@@ -1749,6 +1746,9 @@ void transport_generic_request_failure(struct se_cmd *cmd,
cmd->transport_complete_callback)
cmd->transport_complete_callback(cmd, false, &post_ret);
+ if (transport_check_aborted_status(cmd, 1))
+ return;
+
switch (sense_reason) {
case TCM_NON_EXISTENT_LUN:
case TCM_UNSUPPORTED_SCSI_OPCODE:
@@ -1772,8 +1772,8 @@ void transport_generic_request_failure(struct se_cmd *cmd,
case TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE:
break;
case TCM_OUT_OF_RESOURCES:
- sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- break;
+ cmd->scsi_status = SAM_STAT_TASK_SET_FULL;
+ goto queue_status;
case TCM_RESERVATION_CONFLICT:
/*
* No SENSE Data payload for this case, set SCSI Status
@@ -1795,11 +1795,8 @@ void transport_generic_request_failure(struct se_cmd *cmd,
cmd->orig_fe_lun, 0x2C,
ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
}
- trace_target_cmd_complete(cmd);
- ret = cmd->se_tfo->queue_status(cmd);
- if (ret)
- goto queue_full;
- goto check_stop;
+
+ goto queue_status;
default:
pr_err("Unknown transport error for CDB 0x%02x: %d\n",
cmd->t_task_cdb[0], sense_reason);
@@ -1816,6 +1813,11 @@ check_stop:
transport_cmd_check_stop_to_fabric(cmd);
return;
+queue_status:
+ trace_target_cmd_complete(cmd);
+ ret = cmd->se_tfo->queue_status(cmd);
+ if (!ret)
+ goto check_stop;
queue_full:
transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
}
@@ -1973,6 +1975,7 @@ void target_execute_cmd(struct se_cmd *cmd)
}
cmd->t_state = TRANSPORT_PROCESSING;
+ cmd->transport_state &= ~CMD_T_PRE_EXECUTE;
cmd->transport_state |= CMD_T_ACTIVE | CMD_T_SENT;
spin_unlock_irq(&cmd->t_state_lock);
@@ -2010,6 +2013,8 @@ static void target_restart_delayed_cmds(struct se_device *dev)
list_del(&cmd->se_delayed_node);
spin_unlock(&dev->delayed_cmd_lock);
+ cmd->transport_state |= CMD_T_SENT;
+
__target_execute_cmd(cmd, true);
if (cmd->sam_task_attr == TCM_ORDERED_TAG)
@@ -2045,6 +2050,8 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED\n",
dev->dev_cur_ordered_id);
}
+ cmd->se_cmd_flags &= ~SCF_TASK_ATTR_SET;
+
restart:
target_restart_delayed_cmds(dev);
}
@@ -2090,7 +2097,7 @@ static void transport_complete_qf(struct se_cmd *cmd)
ret = cmd->se_tfo->queue_data_in(cmd);
break;
}
- /* Fall through for DMA_TO_DEVICE */
+ /* fall through */
case DMA_NONE:
queue_status:
trace_target_cmd_complete(cmd);
@@ -2268,7 +2275,7 @@ queue_rsp:
goto queue_full;
break;
}
- /* Fall through for DMA_TO_DEVICE */
+ /* fall through */
case DMA_NONE:
queue_status:
trace_target_cmd_complete(cmd);
@@ -2352,22 +2359,6 @@ static inline void transport_free_pages(struct se_cmd *cmd)
cmd->t_bidi_data_nents = 0;
}
-/**
- * transport_put_cmd - release a reference to a command
- * @cmd: command to release
- *
- * This routine releases our reference to the command and frees it if possible.
- */
-static int transport_put_cmd(struct se_cmd *cmd)
-{
- BUG_ON(!cmd->se_tfo);
- /*
- * If this cmd has been setup with target_get_sess_cmd(), drop
- * the kref and call ->release_cmd() in kref callback.
- */
- return target_put_sess_cmd(cmd);
-}
-
void *transport_kmap_data_sg(struct se_cmd *cmd)
{
struct scatterlist *sg = cmd->t_data_sg;
@@ -2570,7 +2561,20 @@ EXPORT_SYMBOL(transport_generic_new_cmd);
static void transport_write_pending_qf(struct se_cmd *cmd)
{
+ unsigned long flags;
int ret;
+ bool stop;
+
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
+ stop = (cmd->transport_state & (CMD_T_STOP | CMD_T_ABORTED));
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+
+ if (stop) {
+ pr_debug("%s:%d CMD_T_STOP|CMD_T_ABORTED for ITT: 0x%08llx\n",
+ __func__, __LINE__, cmd->tag);
+ complete_all(&cmd->t_transport_stop_comp);
+ return;
+ }
ret = cmd->se_tfo->write_pending(cmd);
if (ret) {
@@ -2603,7 +2607,7 @@ int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
target_wait_free_cmd(cmd, &aborted, &tas);
if (!aborted || tas)
- ret = transport_put_cmd(cmd);
+ ret = target_put_sess_cmd(cmd);
} else {
if (wait_for_tasks)
target_wait_free_cmd(cmd, &aborted, &tas);
@@ -2619,7 +2623,7 @@ int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
transport_lun_remove_cmd(cmd);
if (!aborted || tas)
- ret = transport_put_cmd(cmd);
+ ret = target_put_sess_cmd(cmd);
}
/*
* If the task has been internally aborted due to TMR ABORT_TASK
@@ -2664,6 +2668,7 @@ int target_get_sess_cmd(struct se_cmd *se_cmd, bool ack_kref)
ret = -ESHUTDOWN;
goto out;
}
+ se_cmd->transport_state |= CMD_T_PRE_EXECUTE;
list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list);
out:
spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
@@ -3145,6 +3150,21 @@ static const struct sense_info sense_info_table[] = {
.key = NOT_READY,
.asc = 0x08, /* LOGICAL UNIT COMMUNICATION FAILURE */
},
+ [TCM_INSUFFICIENT_REGISTRATION_RESOURCES] = {
+ /*
+ * From spc4r22 section5.7.7,5.7.8
+ * If a PERSISTENT RESERVE OUT command with a REGISTER service action
+ * or a REGISTER AND IGNORE EXISTING KEY service action or
+ * REGISTER AND MOVE service actionis attempted,
+ * but there are insufficient device server resources to complete the
+ * operation, then the command shall be terminated with CHECK CONDITION
+ * status, with the sense key set to ILLEGAL REQUEST,and the additonal
+ * sense code set to INSUFFICIENT REGISTRATION RESOURCES.
+ */
+ .key = ILLEGAL_REQUEST,
+ .asc = 0x55,
+ .ascq = 0x04, /* INSUFFICIENT REGISTRATION RESOURCES */
+ },
};
static int translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason)
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index 9469695f5871..a415d87f22d2 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -150,6 +150,8 @@ struct tcmu_dev {
wait_queue_head_t nl_cmd_wq;
char dev_config[TCMU_CONFIG_LEN];
+
+ int nl_reply_supported;
};
#define TCMU_DEV(_se_dev) container_of(_se_dev, struct tcmu_dev, se_dev)
@@ -430,7 +432,6 @@ static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd)
struct se_device *se_dev = se_cmd->se_dev;
struct tcmu_dev *udev = TCMU_DEV(se_dev);
struct tcmu_cmd *tcmu_cmd;
- int cmd_id;
tcmu_cmd = kmem_cache_zalloc(tcmu_cmd_cache, GFP_KERNEL);
if (!tcmu_cmd)
@@ -438,9 +439,6 @@ static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd)
tcmu_cmd->se_cmd = se_cmd;
tcmu_cmd->tcmu_dev = udev;
- if (udev->cmd_time_out)
- tcmu_cmd->deadline = jiffies +
- msecs_to_jiffies(udev->cmd_time_out);
tcmu_cmd_reset_dbi_cur(tcmu_cmd);
tcmu_cmd->dbi_cnt = tcmu_cmd_get_block_cnt(tcmu_cmd);
@@ -451,19 +449,6 @@ static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd)
return NULL;
}
- idr_preload(GFP_KERNEL);
- spin_lock_irq(&udev->commands_lock);
- cmd_id = idr_alloc(&udev->commands, tcmu_cmd, 0,
- USHRT_MAX, GFP_NOWAIT);
- spin_unlock_irq(&udev->commands_lock);
- idr_preload_end();
-
- if (cmd_id < 0) {
- tcmu_free_cmd(tcmu_cmd);
- return NULL;
- }
- tcmu_cmd->cmd_id = cmd_id;
-
return tcmu_cmd;
}
@@ -746,6 +731,30 @@ static inline size_t tcmu_cmd_get_cmd_size(struct tcmu_cmd *tcmu_cmd,
return command_size;
}
+static int tcmu_setup_cmd_timer(struct tcmu_cmd *tcmu_cmd)
+{
+ struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
+ unsigned long tmo = udev->cmd_time_out;
+ int cmd_id;
+
+ if (tcmu_cmd->cmd_id)
+ return 0;
+
+ cmd_id = idr_alloc(&udev->commands, tcmu_cmd, 1, USHRT_MAX, GFP_NOWAIT);
+ if (cmd_id < 0) {
+ pr_err("tcmu: Could not allocate cmd id.\n");
+ return cmd_id;
+ }
+ tcmu_cmd->cmd_id = cmd_id;
+
+ if (!tmo)
+ return 0;
+
+ tcmu_cmd->deadline = round_jiffies_up(jiffies + msecs_to_jiffies(tmo));
+ mod_timer(&udev->timeout, tcmu_cmd->deadline);
+ return 0;
+}
+
static sense_reason_t
tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
{
@@ -839,7 +848,6 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
entry = (void *) mb + CMDR_OFF + cmd_head;
memset(entry, 0, command_size);
tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_CMD);
- entry->hdr.cmd_id = tcmu_cmd->cmd_id;
/* Handle allocating space from the data area */
tcmu_cmd_reset_dbi_cur(tcmu_cmd);
@@ -877,6 +885,14 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
}
entry->req.iov_bidi_cnt = iov_cnt;
+ ret = tcmu_setup_cmd_timer(tcmu_cmd);
+ if (ret) {
+ tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cnt);
+ mutex_unlock(&udev->cmdr_lock);
+ return TCM_OUT_OF_RESOURCES;
+ }
+ entry->hdr.cmd_id = tcmu_cmd->cmd_id;
+
/*
* Recalaulate the command's base size and size according
* to the actual needs
@@ -910,8 +926,6 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
static sense_reason_t
tcmu_queue_cmd(struct se_cmd *se_cmd)
{
- struct se_device *se_dev = se_cmd->se_dev;
- struct tcmu_dev *udev = TCMU_DEV(se_dev);
struct tcmu_cmd *tcmu_cmd;
sense_reason_t ret;
@@ -922,9 +936,6 @@ tcmu_queue_cmd(struct se_cmd *se_cmd)
ret = tcmu_queue_cmd_ring(tcmu_cmd);
if (ret != TCM_NO_SENSE) {
pr_err("TCMU: Could not queue command\n");
- spin_lock_irq(&udev->commands_lock);
- idr_remove(&udev->commands, tcmu_cmd->cmd_id);
- spin_unlock_irq(&udev->commands_lock);
tcmu_free_cmd(tcmu_cmd);
}
@@ -1044,9 +1055,9 @@ static int tcmu_check_expired_cmd(int id, void *p, void *data)
return 0;
}
-static void tcmu_device_timedout(unsigned long data)
+static void tcmu_device_timedout(struct timer_list *t)
{
- struct tcmu_dev *udev = (struct tcmu_dev *)data;
+ struct tcmu_dev *udev = from_timer(udev, t, timeout);
unsigned long flags;
spin_lock_irqsave(&udev->commands_lock, flags);
@@ -1106,12 +1117,13 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
idr_init(&udev->commands);
spin_lock_init(&udev->commands_lock);
- setup_timer(&udev->timeout, tcmu_device_timedout,
- (unsigned long)udev);
+ timer_setup(&udev->timeout, tcmu_device_timedout, 0);
init_waitqueue_head(&udev->nl_cmd_wq);
spin_lock_init(&udev->nl_cmd_lock);
+ INIT_RADIX_TREE(&udev->data_blocks, GFP_KERNEL);
+
return &udev->se_dev;
}
@@ -1280,10 +1292,54 @@ static void tcmu_dev_call_rcu(struct rcu_head *p)
kfree(udev);
}
+static int tcmu_check_and_free_pending_cmd(struct tcmu_cmd *cmd)
+{
+ if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
+ kmem_cache_free(tcmu_cmd_cache, cmd);
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static void tcmu_blocks_release(struct tcmu_dev *udev)
+{
+ int i;
+ struct page *page;
+
+ /* Try to release all block pages */
+ mutex_lock(&udev->cmdr_lock);
+ for (i = 0; i <= udev->dbi_max; i++) {
+ page = radix_tree_delete(&udev->data_blocks, i);
+ if (page) {
+ __free_page(page);
+ atomic_dec(&global_db_count);
+ }
+ }
+ mutex_unlock(&udev->cmdr_lock);
+}
+
static void tcmu_dev_kref_release(struct kref *kref)
{
struct tcmu_dev *udev = container_of(kref, struct tcmu_dev, kref);
struct se_device *dev = &udev->se_dev;
+ struct tcmu_cmd *cmd;
+ bool all_expired = true;
+ int i;
+
+ vfree(udev->mb_addr);
+ udev->mb_addr = NULL;
+
+ /* Upper layer should drain all requests before calling this */
+ spin_lock_irq(&udev->commands_lock);
+ idr_for_each_entry(&udev->commands, cmd, i) {
+ if (tcmu_check_and_free_pending_cmd(cmd) != 0)
+ all_expired = false;
+ }
+ idr_destroy(&udev->commands);
+ spin_unlock_irq(&udev->commands_lock);
+ WARN_ON(!all_expired);
+
+ tcmu_blocks_release(udev);
call_rcu(&dev->rcu_head, tcmu_dev_call_rcu);
}
@@ -1306,6 +1362,10 @@ static void tcmu_init_genl_cmd_reply(struct tcmu_dev *udev, int cmd)
if (!tcmu_kern_cmd_reply_supported)
return;
+
+ if (udev->nl_reply_supported <= 0)
+ return;
+
relock:
spin_lock(&udev->nl_cmd_lock);
@@ -1332,6 +1392,9 @@ static int tcmu_wait_genl_cmd_reply(struct tcmu_dev *udev)
if (!tcmu_kern_cmd_reply_supported)
return 0;
+ if (udev->nl_reply_supported <= 0)
+ return 0;
+
pr_debug("sleeping for nl reply\n");
wait_for_completion(&nl_cmd->complete);
@@ -1476,8 +1539,6 @@ static int tcmu_configure_device(struct se_device *dev)
WARN_ON(udev->data_size % PAGE_SIZE);
WARN_ON(udev->data_size % DATA_BLOCK_SIZE);
- INIT_RADIX_TREE(&udev->data_blocks, GFP_KERNEL);
-
info->version = __stringify(TCMU_MAILBOX_VERSION);
info->mem[0].name = "tcm-user command & data buffer";
@@ -1506,6 +1567,12 @@ static int tcmu_configure_device(struct se_device *dev)
dev->dev_attrib.emulate_write_cache = 0;
dev->dev_attrib.hw_queue_depth = 128;
+ /* If user didn't explicitly disable netlink reply support, use
+ * module scope setting.
+ */
+ if (udev->nl_reply_supported >= 0)
+ udev->nl_reply_supported = tcmu_kern_cmd_reply_supported;
+
/*
* Get a ref incase userspace does a close on the uio device before
* LIO has initiated tcmu_free_device.
@@ -1527,6 +1594,7 @@ err_netlink:
uio_unregister_device(&udev->uio_info);
err_register:
vfree(udev->mb_addr);
+ udev->mb_addr = NULL;
err_vzalloc:
kfree(info->name);
info->name = NULL;
@@ -1534,37 +1602,11 @@ err_vzalloc:
return ret;
}
-static int tcmu_check_and_free_pending_cmd(struct tcmu_cmd *cmd)
-{
- if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
- kmem_cache_free(tcmu_cmd_cache, cmd);
- return 0;
- }
- return -EINVAL;
-}
-
static bool tcmu_dev_configured(struct tcmu_dev *udev)
{
return udev->uio_info.uio_dev ? true : false;
}
-static void tcmu_blocks_release(struct tcmu_dev *udev)
-{
- int i;
- struct page *page;
-
- /* Try to release all block pages */
- mutex_lock(&udev->cmdr_lock);
- for (i = 0; i <= udev->dbi_max; i++) {
- page = radix_tree_delete(&udev->data_blocks, i);
- if (page) {
- __free_page(page);
- atomic_dec(&global_db_count);
- }
- }
- mutex_unlock(&udev->cmdr_lock);
-}
-
static void tcmu_free_device(struct se_device *dev)
{
struct tcmu_dev *udev = TCMU_DEV(dev);
@@ -1576,9 +1618,6 @@ static void tcmu_free_device(struct se_device *dev)
static void tcmu_destroy_device(struct se_device *dev)
{
struct tcmu_dev *udev = TCMU_DEV(dev);
- struct tcmu_cmd *cmd;
- bool all_expired = true;
- int i;
del_timer_sync(&udev->timeout);
@@ -1586,20 +1625,6 @@ static void tcmu_destroy_device(struct se_device *dev)
list_del(&udev->node);
mutex_unlock(&root_udev_mutex);
- vfree(udev->mb_addr);
-
- /* Upper layer should drain all requests before calling this */
- spin_lock_irq(&udev->commands_lock);
- idr_for_each_entry(&udev->commands, cmd, i) {
- if (tcmu_check_and_free_pending_cmd(cmd) != 0)
- all_expired = false;
- }
- idr_destroy(&udev->commands);
- spin_unlock_irq(&udev->commands_lock);
- WARN_ON(!all_expired);
-
- tcmu_blocks_release(udev);
-
tcmu_netlink_event(udev, TCMU_CMD_REMOVED_DEVICE, 0, NULL);
uio_unregister_device(&udev->uio_info);
@@ -1610,7 +1635,7 @@ static void tcmu_destroy_device(struct se_device *dev)
enum {
Opt_dev_config, Opt_dev_size, Opt_hw_block_size, Opt_hw_max_sectors,
- Opt_err,
+ Opt_nl_reply_supported, Opt_err,
};
static match_table_t tokens = {
@@ -1618,6 +1643,7 @@ static match_table_t tokens = {
{Opt_dev_size, "dev_size=%u"},
{Opt_hw_block_size, "hw_block_size=%u"},
{Opt_hw_max_sectors, "hw_max_sectors=%u"},
+ {Opt_nl_reply_supported, "nl_reply_supported=%d"},
{Opt_err, NULL}
};
@@ -1692,6 +1718,17 @@ static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev,
ret = tcmu_set_dev_attrib(&args[0],
&(dev->dev_attrib.hw_max_sectors));
break;
+ case Opt_nl_reply_supported:
+ arg_p = match_strdup(&args[0]);
+ if (!arg_p) {
+ ret = -ENOMEM;
+ break;
+ }
+ ret = kstrtoint(arg_p, 0, &udev->nl_reply_supported);
+ kfree(arg_p);
+ if (ret < 0)
+ pr_err("kstrtoint() failed for nl_reply_supported=\n");
+ break;
default:
break;
}
@@ -1734,8 +1771,7 @@ static ssize_t tcmu_cmd_time_out_show(struct config_item *item, char *page)
{
struct se_dev_attrib *da = container_of(to_config_group(item),
struct se_dev_attrib, da_group);
- struct tcmu_dev *udev = container_of(da->da_dev,
- struct tcmu_dev, se_dev);
+ struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
return snprintf(page, PAGE_SIZE, "%lu\n", udev->cmd_time_out / MSEC_PER_SEC);
}
@@ -1842,6 +1878,34 @@ static ssize_t tcmu_dev_size_store(struct config_item *item, const char *page,
}
CONFIGFS_ATTR(tcmu_, dev_size);
+static ssize_t tcmu_nl_reply_supported_show(struct config_item *item,
+ char *page)
+{
+ struct se_dev_attrib *da = container_of(to_config_group(item),
+ struct se_dev_attrib, da_group);
+ struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
+
+ return snprintf(page, PAGE_SIZE, "%d\n", udev->nl_reply_supported);
+}
+
+static ssize_t tcmu_nl_reply_supported_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct se_dev_attrib *da = container_of(to_config_group(item),
+ struct se_dev_attrib, da_group);
+ struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
+ s8 val;
+ int ret;
+
+ ret = kstrtos8(page, 0, &val);
+ if (ret < 0)
+ return ret;
+
+ udev->nl_reply_supported = val;
+ return count;
+}
+CONFIGFS_ATTR(tcmu_, nl_reply_supported);
+
static ssize_t tcmu_emulate_write_cache_show(struct config_item *item,
char *page)
{
@@ -1884,6 +1948,7 @@ static struct configfs_attribute *tcmu_attrib_attrs[] = {
&tcmu_attr_dev_config,
&tcmu_attr_dev_size,
&tcmu_attr_emulate_write_cache,
+ &tcmu_attr_nl_reply_supported,
NULL,
};
diff --git a/drivers/tty/cyclades.c b/drivers/tty/cyclades.c
index 5d442469c95e..cf0bde3bb927 100644
--- a/drivers/tty/cyclades.c
+++ b/drivers/tty/cyclades.c
@@ -279,7 +279,7 @@ static unsigned detect_isa_irq(void __iomem *);
#endif /* CONFIG_ISA */
#ifndef CONFIG_CYZ_INTR
-static void cyz_poll(unsigned long);
+static void cyz_poll(struct timer_list *);
/* The Cyclades-Z polling cycle is defined by this variable */
static long cyz_polling_cycle = CZ_DEF_POLL;
@@ -1214,7 +1214,7 @@ static void cyz_rx_restart(struct timer_list *t)
#else /* CONFIG_CYZ_INTR */
-static void cyz_poll(unsigned long arg)
+static void cyz_poll(struct timer_list *unused)
{
struct cyclades_card *cinfo;
struct cyclades_port *info;
diff --git a/drivers/tty/ipwireless/hardware.c b/drivers/tty/ipwireless/hardware.c
index a6b8240af6cd..b0baa4ce10f9 100644
--- a/drivers/tty/ipwireless/hardware.c
+++ b/drivers/tty/ipwireless/hardware.c
@@ -33,7 +33,7 @@ static void handle_received_SETUP_packet(struct ipw_hardware *ipw,
unsigned int address,
const unsigned char *data, int len,
int is_last);
-static void ipwireless_setup_timer(unsigned long data);
+static void ipwireless_setup_timer(struct timer_list *t);
static void handle_received_CTRL_packet(struct ipw_hardware *hw,
unsigned int channel_idx, const unsigned char *data, int len);
@@ -1635,8 +1635,7 @@ struct ipw_hardware *ipwireless_hardware_create(void)
spin_lock_init(&hw->lock);
tasklet_init(&hw->tasklet, ipwireless_do_tasklet, (unsigned long) hw);
INIT_WORK(&hw->work_rx, ipw_receive_data_work);
- setup_timer(&hw->setup_timer, ipwireless_setup_timer,
- (unsigned long) hw);
+ timer_setup(&hw->setup_timer, ipwireless_setup_timer, 0);
return hw;
}
@@ -1670,12 +1669,12 @@ void ipwireless_init_hardware_v2_v3(struct ipw_hardware *hw)
hw->init_loops = 0;
printk(KERN_INFO IPWIRELESS_PCCARD_NAME
": waiting for card to start up...\n");
- ipwireless_setup_timer((unsigned long) hw);
+ ipwireless_setup_timer(&hw->setup_timer);
}
-static void ipwireless_setup_timer(unsigned long data)
+static void ipwireless_setup_timer(struct timer_list *t)
{
- struct ipw_hardware *hw = (struct ipw_hardware *) data;
+ struct ipw_hardware *hw = from_timer(hw, t, setup_timer);
hw->init_loops++;
diff --git a/drivers/tty/isicom.c b/drivers/tty/isicom.c
index ee7958ab269f..015686ff4825 100644
--- a/drivers/tty/isicom.c
+++ b/drivers/tty/isicom.c
@@ -170,7 +170,7 @@ static struct pci_driver isicom_driver = {
static int prev_card = 3; /* start servicing isi_card[0] */
static struct tty_driver *isicom_normal;
-static void isicom_tx(unsigned long _data);
+static void isicom_tx(struct timer_list *unused);
static void isicom_start(struct tty_struct *tty);
static DEFINE_TIMER(tx, isicom_tx);
@@ -394,7 +394,7 @@ static inline int __isicom_paranoia_check(struct isi_port const *port,
* will do the rest of the work for us.
*/
-static void isicom_tx(unsigned long _data)
+static void isicom_tx(struct timer_list *unused)
{
unsigned long flags, base;
unsigned int retries;
diff --git a/drivers/tty/moxa.c b/drivers/tty/moxa.c
index 65a70f3c7cde..68cbc03aab4b 100644
--- a/drivers/tty/moxa.c
+++ b/drivers/tty/moxa.c
@@ -198,7 +198,7 @@ static void moxa_hangup(struct tty_struct *);
static int moxa_tiocmget(struct tty_struct *tty);
static int moxa_tiocmset(struct tty_struct *tty,
unsigned int set, unsigned int clear);
-static void moxa_poll(unsigned long);
+static void moxa_poll(struct timer_list *);
static void moxa_set_tty_param(struct tty_struct *, struct ktermios *);
static void moxa_shutdown(struct tty_port *);
static int moxa_carrier_raised(struct tty_port *);
@@ -1429,7 +1429,7 @@ put:
return 0;
}
-static void moxa_poll(unsigned long ignored)
+static void moxa_poll(struct timer_list *unused)
{
struct moxa_board_conf *brd;
u16 __iomem *ip;
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index 3a39eb685c69..5131bdc9e765 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -1310,9 +1310,9 @@ static void gsm_control_transmit(struct gsm_mux *gsm, struct gsm_control *ctrl)
* gsm->pending_cmd will be NULL and we just let the timer expire.
*/
-static void gsm_control_retransmit(unsigned long data)
+static void gsm_control_retransmit(struct timer_list *t)
{
- struct gsm_mux *gsm = (struct gsm_mux *)data;
+ struct gsm_mux *gsm = from_timer(gsm, t, t2_timer);
struct gsm_control *ctrl;
unsigned long flags;
spin_lock_irqsave(&gsm->control_lock, flags);
@@ -1453,9 +1453,9 @@ static void gsm_dlci_open(struct gsm_dlci *dlci)
* end will get a DM response)
*/
-static void gsm_dlci_t1(unsigned long data)
+static void gsm_dlci_t1(struct timer_list *t)
{
- struct gsm_dlci *dlci = (struct gsm_dlci *)data;
+ struct gsm_dlci *dlci = from_timer(dlci, t, t1);
struct gsm_mux *gsm = dlci->gsm;
switch (dlci->state) {
@@ -1634,7 +1634,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
}
skb_queue_head_init(&dlci->skb_list);
- setup_timer(&dlci->t1, gsm_dlci_t1, (unsigned long)dlci);
+ timer_setup(&dlci->t1, gsm_dlci_t1, 0);
tty_port_init(&dlci->port);
dlci->port.ops = &gsm_port_ops;
dlci->gsm = gsm;
@@ -2088,7 +2088,7 @@ static int gsm_activate_mux(struct gsm_mux *gsm)
struct gsm_dlci *dlci;
int i = 0;
- setup_timer(&gsm->t2_timer, gsm_control_retransmit, (unsigned long)gsm);
+ timer_setup(&gsm->t2_timer, gsm_control_retransmit, 0);
init_waitqueue_head(&gsm->event);
spin_lock_init(&gsm->control_lock);
spin_lock_init(&gsm->tx_lock);
diff --git a/drivers/tty/n_r3964.c b/drivers/tty/n_r3964.c
index 9f246d4db3ca..30bb0900cd2f 100644
--- a/drivers/tty/n_r3964.c
+++ b/drivers/tty/n_r3964.c
@@ -115,7 +115,7 @@ static void retry_transmit(struct r3964_info *pInfo);
static void transmit_block(struct r3964_info *pInfo);
static void receive_char(struct r3964_info *pInfo, const unsigned char c);
static void receive_error(struct r3964_info *pInfo, const char flag);
-static void on_timeout(unsigned long priv);
+static void on_timeout(struct timer_list *t);
static int enable_signals(struct r3964_info *pInfo, struct pid *pid, int arg);
static int read_telegram(struct r3964_info *pInfo, struct pid *pid,
unsigned char __user * buf);
@@ -688,9 +688,9 @@ static void receive_error(struct r3964_info *pInfo, const char flag)
}
}
-static void on_timeout(unsigned long priv)
+static void on_timeout(struct timer_list *t)
{
- struct r3964_info *pInfo = (void *)priv;
+ struct r3964_info *pInfo = from_timer(pInfo, t, tmr);
switch (pInfo->state) {
case R3964_TX_REQUEST:
@@ -993,7 +993,7 @@ static int r3964_open(struct tty_struct *tty)
tty->disc_data = pInfo;
tty->receive_room = 65536;
- setup_timer(&pInfo->tmr, on_timeout, (unsigned long)pInfo);
+ timer_setup(&pInfo->tmr, on_timeout, 0);
return 0;
}
diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c
index f7dc9b1ea806..bdd17d2aaafd 100644
--- a/drivers/tty/rocket.c
+++ b/drivers/tty/rocket.c
@@ -86,7 +86,7 @@
/****** RocketPort Local Variables ******/
-static void rp_do_poll(unsigned long dummy);
+static void rp_do_poll(struct timer_list *unused);
static struct tty_driver *rocket_driver;
@@ -525,7 +525,7 @@ static void rp_handle_port(struct r_port *info)
/*
* The top level polling routine. Repeats every 1/100 HZ (10ms).
*/
-static void rp_do_poll(unsigned long dummy)
+static void rp_do_poll(struct timer_list *unused)
{
CONTROLLER_t *ctlp;
int ctrl, aiop, ch, line;
diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
index d64afdd93872..9342fc2ee7df 100644
--- a/drivers/tty/serial/8250/8250_core.c
+++ b/drivers/tty/serial/8250/8250_core.c
@@ -325,7 +325,7 @@ static int univ8250_setup_irq(struct uart_8250_port *up)
if (up->bugs & UART_BUG_THRE) {
pr_debug("ttyS%d - using backup timer\n", serial_index(port));
- up->timer.function = (TIMER_FUNC_TYPE)serial8250_backup_timeout;
+ up->timer.function = serial8250_backup_timeout;
mod_timer(&up->timer, jiffies +
uart_poll_timeout(port) + HZ / 5);
}
@@ -348,7 +348,7 @@ static void univ8250_release_irq(struct uart_8250_port *up)
struct uart_port *port = &up->port;
del_timer_sync(&up->timer);
- up->timer.function = (TIMER_FUNC_TYPE)serial8250_timeout;
+ up->timer.function = serial8250_timeout;
if (port->irq)
serial_unlink_irq_chain(up);
}
diff --git a/drivers/tty/serial/crisv10.c b/drivers/tty/serial/crisv10.c
index 1421804975e0..c9458a033e3c 100644
--- a/drivers/tty/serial/crisv10.c
+++ b/drivers/tty/serial/crisv10.c
@@ -2059,7 +2059,7 @@ static void flush_timeout_function(unsigned long data)
static struct timer_list flush_timer;
static void
-timed_flush_handler(unsigned long ptr)
+timed_flush_handler(struct timer_list *unused)
{
struct e100_serial *info;
int i;
@@ -4137,7 +4137,7 @@ static int __init rs_init(void)
/* Setup the timed flush handler system */
#if !defined(CONFIG_ETRAX_SERIAL_FAST_TIMER)
- setup_timer(&flush_timer, timed_flush_handler, 0);
+ timer_setup(&flush_timer, timed_flush_handler, 0);
mod_timer(&flush_timer, jiffies + 5);
#endif
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
index c84e6f0db54e..1c4d3f387138 100644
--- a/drivers/tty/serial/fsl_lpuart.c
+++ b/drivers/tty/serial/fsl_lpuart.c
@@ -966,9 +966,9 @@ static void lpuart_dma_rx_complete(void *arg)
lpuart_copy_rx_to_tty(sport);
}
-static void lpuart_timer_func(unsigned long data)
+static void lpuart_timer_func(struct timer_list *t)
{
- struct lpuart_port *sport = (struct lpuart_port *)data;
+ struct lpuart_port *sport = from_timer(sport, t, lpuart_timer);
lpuart_copy_rx_to_tty(sport);
}
@@ -1263,8 +1263,7 @@ static void lpuart32_setup_watermark(struct lpuart_port *sport)
static void rx_dma_timer_init(struct lpuart_port *sport)
{
- setup_timer(&sport->lpuart_timer, lpuart_timer_func,
- (unsigned long)sport);
+ timer_setup(&sport->lpuart_timer, lpuart_timer_func, 0);
sport->lpuart_timer.expires = jiffies + sport->dma_rx_timeout;
add_timer(&sport->lpuart_timer);
}
diff --git a/drivers/tty/serial/ifx6x60.c b/drivers/tty/serial/ifx6x60.c
index 473f4f81d690..ffefd218761e 100644
--- a/drivers/tty/serial/ifx6x60.c
+++ b/drivers/tty/serial/ifx6x60.c
@@ -263,9 +263,9 @@ static void mrdy_assert(struct ifx_spi_device *ifx_dev)
* The SPI has timed out: hang up the tty. Users will then see a hangup
* and error events.
*/
-static void ifx_spi_timeout(unsigned long arg)
+static void ifx_spi_timeout(struct timer_list *t)
{
- struct ifx_spi_device *ifx_dev = (struct ifx_spi_device *)arg;
+ struct ifx_spi_device *ifx_dev = from_timer(ifx_dev, t, spi_timer);
dev_warn(&ifx_dev->spi_dev->dev, "*** SPI Timeout ***");
tty_port_tty_hangup(&ifx_dev->tty_port, false);
@@ -1016,8 +1016,7 @@ static int ifx_spi_spi_probe(struct spi_device *spi)
spin_lock_init(&ifx_dev->write_lock);
spin_lock_init(&ifx_dev->power_lock);
ifx_dev->power_status = 0;
- setup_timer(&ifx_dev->spi_timer, ifx_spi_timeout,
- (unsigned long)ifx_dev);
+ timer_setup(&ifx_dev->spi_timer, ifx_spi_timeout, 0);
ifx_dev->modem = pl_data->modem_type;
ifx_dev->use_dma = pl_data->use_dma;
ifx_dev->max_hz = pl_data->max_hz;
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index a67a606c38eb..e4b3d9123a03 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -906,9 +906,9 @@ static void imx_break_ctl(struct uart_port *port, int break_state)
* This is our per-port timeout handler, for checking the
* modem status signals.
*/
-static void imx_timeout(unsigned long data)
+static void imx_timeout(struct timer_list *t)
{
- struct imx_port *sport = (struct imx_port *)data;
+ struct imx_port *sport = from_timer(sport, t, timer);
unsigned long flags;
if (sport->port.state) {
@@ -2082,7 +2082,7 @@ static int serial_imx_probe(struct platform_device *pdev)
sport->port.rs485_config = imx_rs485_config;
sport->port.rs485.flags |= SER_RS485_RTS_ON_SEND;
sport->port.flags = UPF_BOOT_AUTOCONF;
- setup_timer(&sport->timer, imx_timeout, (unsigned long)sport);
+ timer_setup(&sport->timer, imx_timeout, 0);
sport->gpios = mctrl_gpio_init(&sport->port, 0);
if (IS_ERR(sport->gpios))
diff --git a/drivers/tty/serial/kgdb_nmi.c b/drivers/tty/serial/kgdb_nmi.c
index ed2b03058627..4029272891f9 100644
--- a/drivers/tty/serial/kgdb_nmi.c
+++ b/drivers/tty/serial/kgdb_nmi.c
@@ -188,9 +188,9 @@ bool kgdb_nmi_poll_knock(void)
* The tasklet is cheap, it does not cause wakeups when reschedules itself,
* instead it waits for the next tick.
*/
-static void kgdb_nmi_tty_receiver(unsigned long data)
+static void kgdb_nmi_tty_receiver(struct timer_list *t)
{
- struct kgdb_nmi_tty_priv *priv = (void *)data;
+ struct kgdb_nmi_tty_priv *priv = from_timer(priv, t, timer);
char ch;
priv->timer.expires = jiffies + (HZ/100);
@@ -241,7 +241,7 @@ static int kgdb_nmi_tty_install(struct tty_driver *drv, struct tty_struct *tty)
return -ENOMEM;
INIT_KFIFO(priv->fifo);
- setup_timer(&priv->timer, kgdb_nmi_tty_receiver, (unsigned long)priv);
+ timer_setup(&priv->timer, kgdb_nmi_tty_receiver, 0);
tty_port_init(&priv->port);
priv->port.ops = &kgdb_nmi_tty_port_ops;
tty->driver_data = priv;
diff --git a/drivers/tty/serial/max3100.c b/drivers/tty/serial/max3100.c
index 27d6049eb6a9..371569a0fd00 100644
--- a/drivers/tty/serial/max3100.c
+++ b/drivers/tty/serial/max3100.c
@@ -178,9 +178,9 @@ static void max3100_dowork(struct max3100_port *s)
queue_work(s->workqueue, &s->work);
}
-static void max3100_timeout(unsigned long data)
+static void max3100_timeout(struct timer_list *t)
{
- struct max3100_port *s = (struct max3100_port *)data;
+ struct max3100_port *s = from_timer(s, t, timer);
if (s->port.state) {
max3100_dowork(s);
@@ -780,8 +780,7 @@ static int max3100_probe(struct spi_device *spi)
max3100s[i]->poll_time = 1;
max3100s[i]->max3100_hw_suspend = pdata->max3100_hw_suspend;
max3100s[i]->minor = i;
- setup_timer(&max3100s[i]->timer, max3100_timeout,
- (unsigned long)max3100s[i]);
+ timer_setup(&max3100s[i]->timer, max3100_timeout, 0);
dev_dbg(&spi->dev, "%s: adding port %d\n", __func__, i);
max3100s[i]->port.irq = max3100s[i]->irq;
diff --git a/drivers/tty/serial/mux.c b/drivers/tty/serial/mux.c
index 3b74369c262f..00ce31e8d19a 100644
--- a/drivers/tty/serial/mux.c
+++ b/drivers/tty/serial/mux.c
@@ -371,7 +371,7 @@ static int mux_verify_port(struct uart_port *port, struct serial_struct *ser)
*
* This function periodically polls the Serial MUX to check for new data.
*/
-static void mux_poll(unsigned long unused)
+static void mux_poll(struct timer_list *unused)
{
int i;
@@ -572,7 +572,7 @@ static int __init mux_init(void)
if(port_cnt > 0) {
/* Start the Mux timer */
- setup_timer(&mux_timer, mux_poll, 0UL);
+ timer_setup(&mux_timer, mux_poll, 0);
mod_timer(&mux_timer, jiffies + MUX_POLL_DELAY);
#ifdef CONFIG_SERIAL_MUX_CONSOLE
diff --git a/drivers/tty/serial/pnx8xxx_uart.c b/drivers/tty/serial/pnx8xxx_uart.c
index f8812389b8a8..223a9499104e 100644
--- a/drivers/tty/serial/pnx8xxx_uart.c
+++ b/drivers/tty/serial/pnx8xxx_uart.c
@@ -103,9 +103,9 @@ static void pnx8xxx_mctrl_check(struct pnx8xxx_port *sport)
* This is our per-port timeout handler, for checking the
* modem status signals.
*/
-static void pnx8xxx_timeout(unsigned long data)
+static void pnx8xxx_timeout(struct timer_list *t)
{
- struct pnx8xxx_port *sport = (struct pnx8xxx_port *)data;
+ struct pnx8xxx_port *sport = from_timer(sport, t, timer);
unsigned long flags;
if (sport->port.state) {
@@ -662,8 +662,7 @@ static void __init pnx8xxx_init_ports(void)
first = 0;
for (i = 0; i < NR_PORTS; i++) {
- setup_timer(&pnx8xxx_ports[i].timer, pnx8xxx_timeout,
- (unsigned long)&pnx8xxx_ports[i]);
+ timer_setup(&pnx8xxx_ports[i].timer, pnx8xxx_timeout, 0);
pnx8xxx_ports[i].port.ops = &pnx8xxx_pops;
}
}
diff --git a/drivers/tty/serial/sa1100.c b/drivers/tty/serial/sa1100.c
index 4e3f169b30cf..a399772be3fc 100644
--- a/drivers/tty/serial/sa1100.c
+++ b/drivers/tty/serial/sa1100.c
@@ -110,9 +110,9 @@ static void sa1100_mctrl_check(struct sa1100_port *sport)
* This is our per-port timeout handler, for checking the
* modem status signals.
*/
-static void sa1100_timeout(unsigned long data)
+static void sa1100_timeout(struct timer_list *t)
{
- struct sa1100_port *sport = (struct sa1100_port *)data;
+ struct sa1100_port *sport = from_timer(sport, t, timer);
unsigned long flags;
if (sport->port.state) {
@@ -627,8 +627,7 @@ static void __init sa1100_init_ports(void)
sa1100_ports[i].port.fifosize = 8;
sa1100_ports[i].port.line = i;
sa1100_ports[i].port.iotype = UPIO_MEM;
- setup_timer(&sa1100_ports[i].timer, sa1100_timeout,
- (unsigned long)&sa1100_ports[i]);
+ timer_setup(&sa1100_ports[i].timer, sa1100_timeout, 0);
}
/*
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index 31fcc7072a90..d9f399c4e90c 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -1058,9 +1058,9 @@ static int scif_rtrg_enabled(struct uart_port *port)
(SCFCR_RTRG0 | SCFCR_RTRG1)) != 0;
}
-static void rx_fifo_timer_fn(unsigned long arg)
+static void rx_fifo_timer_fn(struct timer_list *t)
{
- struct sci_port *s = (struct sci_port *)arg;
+ struct sci_port *s = from_timer(s, t, rx_fifo_timer);
struct uart_port *port = &s->port;
dev_dbg(port->dev, "Rx timed out\n");
@@ -1138,8 +1138,7 @@ static ssize_t rx_fifo_timeout_store(struct device *dev,
sci->rx_fifo_timeout = r;
scif_set_rtrg(port, 1);
if (r > 0)
- setup_timer(&sci->rx_fifo_timer, rx_fifo_timer_fn,
- (unsigned long)sci);
+ timer_setup(&sci->rx_fifo_timer, rx_fifo_timer_fn, 0);
}
return count;
@@ -1392,9 +1391,9 @@ static void work_fn_tx(struct work_struct *work)
dma_async_issue_pending(chan);
}
-static void rx_timer_fn(unsigned long arg)
+static void rx_timer_fn(struct timer_list *t)
{
- struct sci_port *s = (struct sci_port *)arg;
+ struct sci_port *s = from_timer(s, t, rx_timer);
struct dma_chan *chan = s->chan_rx;
struct uart_port *port = &s->port;
struct dma_tx_state state;
@@ -1572,7 +1571,7 @@ static void sci_request_dma(struct uart_port *port)
dma += s->buf_len_rx;
}
- setup_timer(&s->rx_timer, rx_timer_fn, (unsigned long)s);
+ timer_setup(&s->rx_timer, rx_timer_fn, 0);
if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
sci_submit_rx(s);
@@ -2238,8 +2237,7 @@ static void sci_reset(struct uart_port *port)
if (s->rx_trigger > 1) {
if (s->rx_fifo_timeout) {
scif_set_rtrg(port, 1);
- setup_timer(&s->rx_fifo_timer, rx_fifo_timer_fn,
- (unsigned long)s);
+ timer_setup(&s->rx_fifo_timer, rx_fifo_timer_fn, 0);
} else {
if (port->type == PORT_SCIFA ||
port->type == PORT_SCIFB)
diff --git a/drivers/tty/serial/sn_console.c b/drivers/tty/serial/sn_console.c
index ed78542c4c37..42b9aded4eb1 100644
--- a/drivers/tty/serial/sn_console.c
+++ b/drivers/tty/serial/sn_console.c
@@ -612,9 +612,9 @@ static irqreturn_t sn_sal_interrupt(int irq, void *dev_id)
* Obviously not used in interrupt mode
*
*/
-static void sn_sal_timer_poll(unsigned long data)
+static void sn_sal_timer_poll(struct timer_list *t)
{
- struct sn_cons_port *port = (struct sn_cons_port *)data;
+ struct sn_cons_port *port = from_timer(port, t, sc_timer);
unsigned long flags;
if (!port)
@@ -668,7 +668,7 @@ static void __init sn_sal_switch_to_asynch(struct sn_cons_port *port)
* timer to poll for input and push data from the console
* buffer.
*/
- setup_timer(&port->sc_timer, sn_sal_timer_poll, (unsigned long)port);
+ timer_setup(&port->sc_timer, sn_sal_timer_poll, 0);
if (IS_RUNNING_ON_SIMULATOR())
port->sc_interrupt_timeout = 6;
diff --git a/drivers/tty/synclink.c b/drivers/tty/synclink.c
index f2c34d656144..3c4ad71f261d 100644
--- a/drivers/tty/synclink.c
+++ b/drivers/tty/synclink.c
@@ -700,7 +700,7 @@ static void usc_enable_async_clock( struct mgsl_struct *info, u32 DataRate );
static void usc_loopback_frame( struct mgsl_struct *info );
-static void mgsl_tx_timeout(unsigned long context);
+static void mgsl_tx_timeout(struct timer_list *t);
static void usc_loopmode_cancel_transmit( struct mgsl_struct * info );
@@ -1768,7 +1768,7 @@ static int startup(struct mgsl_struct * info)
memset(&info->icount, 0, sizeof(info->icount));
- setup_timer(&info->tx_timer, mgsl_tx_timeout, (unsigned long)info);
+ timer_setup(&info->tx_timer, mgsl_tx_timeout, 0);
/* Allocate and claim adapter resources */
retval = mgsl_claim_resources(info);
@@ -7517,9 +7517,9 @@ static void mgsl_trace_block(struct mgsl_struct *info,const char* data, int coun
* Arguments: context pointer to device instance data
* Return Value: None
*/
-static void mgsl_tx_timeout(unsigned long context)
+static void mgsl_tx_timeout(struct timer_list *t)
{
- struct mgsl_struct *info = (struct mgsl_struct*)context;
+ struct mgsl_struct *info = from_timer(info, t, tx_timer);
unsigned long flags;
if ( debug_level >= DEBUG_LEVEL_INFO )
diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c
index 06a03731bba7..255c49687877 100644
--- a/drivers/tty/synclink_gt.c
+++ b/drivers/tty/synclink_gt.c
@@ -493,8 +493,8 @@ static void free_bufs(struct slgt_info *info, struct slgt_desc *bufs, int count)
static int alloc_tmp_rbuf(struct slgt_info *info);
static void free_tmp_rbuf(struct slgt_info *info);
-static void tx_timeout(unsigned long context);
-static void rx_timeout(unsigned long context);
+static void tx_timeout(struct timer_list *t);
+static void rx_timeout(struct timer_list *t);
/*
* ioctl handlers
@@ -3597,8 +3597,8 @@ static struct slgt_info *alloc_dev(int adapter_num, int port_num, struct pci_dev
info->adapter_num = adapter_num;
info->port_num = port_num;
- setup_timer(&info->tx_timer, tx_timeout, (unsigned long)info);
- setup_timer(&info->rx_timer, rx_timeout, (unsigned long)info);
+ timer_setup(&info->tx_timer, tx_timeout, 0);
+ timer_setup(&info->rx_timer, rx_timeout, 0);
/* Copy configuration info to device instance data */
info->pdev = pdev;
@@ -5112,9 +5112,9 @@ static int adapter_test(struct slgt_info *info)
/*
* transmit timeout handler
*/
-static void tx_timeout(unsigned long context)
+static void tx_timeout(struct timer_list *t)
{
- struct slgt_info *info = (struct slgt_info*)context;
+ struct slgt_info *info = from_timer(info, t, tx_timer);
unsigned long flags;
DBGINFO(("%s tx_timeout\n", info->device_name));
@@ -5136,9 +5136,9 @@ static void tx_timeout(unsigned long context)
/*
* receive buffer polling timer
*/
-static void rx_timeout(unsigned long context)
+static void rx_timeout(struct timer_list *t)
{
- struct slgt_info *info = (struct slgt_info*)context;
+ struct slgt_info *info = from_timer(info, t, rx_timer);
unsigned long flags;
DBGINFO(("%s rx_timeout\n", info->device_name));
diff --git a/drivers/tty/synclinkmp.c b/drivers/tty/synclinkmp.c
index d45f234e1914..75f11ce1f0a1 100644
--- a/drivers/tty/synclinkmp.c
+++ b/drivers/tty/synclinkmp.c
@@ -615,8 +615,8 @@ static void free_tmp_rx_buf(SLMP_INFO *info);
static void load_pci_memory(SLMP_INFO *info, char* dest, const char* src, unsigned short count);
static void trace_block(SLMP_INFO *info, const char* data, int count, int xmit);
-static void tx_timeout(unsigned long context);
-static void status_timeout(unsigned long context);
+static void tx_timeout(struct timer_list *t);
+static void status_timeout(struct timer_list *t);
static unsigned char read_reg(SLMP_INFO *info, unsigned char addr);
static void write_reg(SLMP_INFO *info, unsigned char addr, unsigned char val);
@@ -3782,9 +3782,8 @@ static SLMP_INFO *alloc_dev(int adapter_num, int port_num, struct pci_dev *pdev)
info->bus_type = MGSL_BUS_TYPE_PCI;
info->irq_flags = IRQF_SHARED;
- setup_timer(&info->tx_timer, tx_timeout, (unsigned long)info);
- setup_timer(&info->status_timer, status_timeout,
- (unsigned long)info);
+ timer_setup(&info->tx_timer, tx_timeout, 0);
+ timer_setup(&info->status_timer, status_timeout, 0);
/* Store the PCI9050 misc control register value because a flaw
* in the PCI9050 prevents LCR registers from being read if
@@ -5468,9 +5467,9 @@ static void trace_block(SLMP_INFO *info,const char* data, int count, int xmit)
/* called when HDLC frame times out
* update stats and do tx completion processing
*/
-static void tx_timeout(unsigned long context)
+static void tx_timeout(struct timer_list *t)
{
- SLMP_INFO *info = (SLMP_INFO*)context;
+ SLMP_INFO *info = from_timer(info, t, tx_timer);
unsigned long flags;
if ( debug_level >= DEBUG_LEVEL_INFO )
@@ -5495,10 +5494,10 @@ static void tx_timeout(unsigned long context)
/* called to periodically check the DSR/RI modem signal input status
*/
-static void status_timeout(unsigned long context)
+static void status_timeout(struct timer_list *t)
{
u16 status = 0;
- SLMP_INFO *info = (SLMP_INFO*)context;
+ SLMP_INFO *info = from_timer(info, t, status_timer);
unsigned long flags;
unsigned char delta;
diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
index c8d90d7e7e37..5d412df8e943 100644
--- a/drivers/tty/vt/keyboard.c
+++ b/drivers/tty/vt/keyboard.c
@@ -244,7 +244,7 @@ static int kd_sound_helper(struct input_handle *handle, void *data)
return 0;
}
-static void kd_nosound(unsigned long ignored)
+static void kd_nosound(struct timer_list *unused)
{
static unsigned int zero;
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index bce4c71cb338..88b902c525d7 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -158,7 +158,7 @@ static void set_cursor(struct vc_data *vc);
static void hide_cursor(struct vc_data *vc);
static void console_callback(struct work_struct *ignored);
static void con_driver_unregister_callback(struct work_struct *ignored);
-static void blank_screen_t(unsigned long dummy);
+static void blank_screen_t(struct timer_list *unused);
static void set_palette(struct vc_data *vc);
#define vt_get_kmsg_redirect() vt_kmsg_redirect(-1)
@@ -3929,7 +3929,7 @@ void unblank_screen(void)
* (console operations can still happen at irq time, but only from printk which
* has the console mutex. Not perfect yet, but better than no locking
*/
-static void blank_screen_t(unsigned long dummy)
+static void blank_screen_t(struct timer_list *unused)
{
blank_timer_expired = 1;
schedule_work(&console_work);
diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
index 6470d259b7d8..8af797252af2 100644
--- a/drivers/usb/atm/cxacru.c
+++ b/drivers/usb/atm/cxacru.c
@@ -547,21 +547,30 @@ static void cxacru_blocking_completion(struct urb *urb)
complete(urb->context);
}
-static void cxacru_timeout_kill(unsigned long data)
+struct cxacru_timer {
+ struct timer_list timer;
+ struct urb *urb;
+};
+
+static void cxacru_timeout_kill(struct timer_list *t)
{
- usb_unlink_urb((struct urb *) data);
+ struct cxacru_timer *timer = from_timer(timer, t, timer);
+
+ usb_unlink_urb(timer->urb);
}
static int cxacru_start_wait_urb(struct urb *urb, struct completion *done,
int *actual_length)
{
- struct timer_list timer;
+ struct cxacru_timer timer = {
+ .urb = urb,
+ };
- setup_timer(&timer, cxacru_timeout_kill, (unsigned long)urb);
- timer.expires = jiffies + msecs_to_jiffies(CMD_TIMEOUT);
- add_timer(&timer);
+ timer_setup_on_stack(&timer.timer, cxacru_timeout_kill, 0);
+ mod_timer(&timer.timer, jiffies + msecs_to_jiffies(CMD_TIMEOUT));
wait_for_completion(done);
- del_timer_sync(&timer);
+ del_timer_sync(&timer.timer);
+ destroy_timer_on_stack(&timer.timer);
if (actual_length)
*actual_length = urb->actual_length;
diff --git a/drivers/usb/atm/speedtch.c b/drivers/usb/atm/speedtch.c
index 5a5e8c0aaa39..973548b5c15c 100644
--- a/drivers/usb/atm/speedtch.c
+++ b/drivers/usb/atm/speedtch.c
@@ -557,9 +557,10 @@ static void speedtch_check_status(struct work_struct *work)
}
}
-static void speedtch_status_poll(unsigned long data)
+static void speedtch_status_poll(struct timer_list *t)
{
- struct speedtch_instance_data *instance = (void *)data;
+ struct speedtch_instance_data *instance = from_timer(instance, t,
+ status_check_timer);
schedule_work(&instance->status_check_work);
@@ -570,9 +571,10 @@ static void speedtch_status_poll(unsigned long data)
atm_warn(instance->usbatm, "Too many failures - disabling line status polling\n");
}
-static void speedtch_resubmit_int(unsigned long data)
+static void speedtch_resubmit_int(struct timer_list *t)
{
- struct speedtch_instance_data *instance = (void *)data;
+ struct speedtch_instance_data *instance = from_timer(instance, t,
+ resubmit_timer);
struct urb *int_urb = instance->int_urb;
int ret;
@@ -860,13 +862,11 @@ static int speedtch_bind(struct usbatm_data *usbatm,
usbatm->flags |= (use_isoc ? UDSL_USE_ISOC : 0);
INIT_WORK(&instance->status_check_work, speedtch_check_status);
- setup_timer(&instance->status_check_timer, speedtch_status_poll,
- (unsigned long)instance);
+ timer_setup(&instance->status_check_timer, speedtch_status_poll, 0);
instance->last_status = 0xff;
instance->poll_delay = MIN_POLL_DELAY;
- setup_timer(&instance->resubmit_timer, speedtch_resubmit_int,
- (unsigned long)instance);
+ timer_setup(&instance->resubmit_timer, speedtch_resubmit_int, 0);
instance->int_urb = usb_alloc_urb(0, GFP_KERNEL);
diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
index 044264aa1f96..dbea28495e1d 100644
--- a/drivers/usb/atm/usbatm.c
+++ b/drivers/usb/atm/usbatm.c
@@ -989,18 +989,18 @@ static int usbatm_heavy_init(struct usbatm_data *instance)
return 0;
}
-static void usbatm_tasklet_schedule(unsigned long data)
+static void usbatm_tasklet_schedule(struct timer_list *t)
{
- tasklet_schedule((struct tasklet_struct *) data);
+ struct usbatm_channel *channel = from_timer(channel, t, delay);
+
+ tasklet_schedule(&channel->tasklet);
}
static void usbatm_init_channel(struct usbatm_channel *channel)
{
spin_lock_init(&channel->lock);
INIT_LIST_HEAD(&channel->list);
- channel->delay.function = usbatm_tasklet_schedule;
- channel->delay.data = (unsigned long) &channel->tasklet;
- init_timer(&channel->delay);
+ timer_setup(&channel->delay, usbatm_tasklet_schedule, 0);
}
int usbatm_usb_probe(struct usb_interface *intf, const struct usb_device_id *id,
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 19b5c4afeef2..fc32391a34d5 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -788,9 +788,11 @@ void usb_hcd_poll_rh_status(struct usb_hcd *hcd)
EXPORT_SYMBOL_GPL(usb_hcd_poll_rh_status);
/* timer callback */
-static void rh_timer_func (unsigned long _hcd)
+static void rh_timer_func (struct timer_list *t)
{
- usb_hcd_poll_rh_status((struct usb_hcd *) _hcd);
+ struct usb_hcd *_hcd = from_timer(_hcd, t, rh_timer);
+
+ usb_hcd_poll_rh_status(_hcd);
}
/*-------------------------------------------------------------------------*/
@@ -2545,7 +2547,7 @@ struct usb_hcd *__usb_create_hcd(const struct hc_driver *driver,
hcd->self.bus_name = bus_name;
hcd->self.uses_dma = (sysdev->dma_mask != NULL);
- setup_timer(&hcd->rh_timer, rh_timer_func, (unsigned long)hcd);
+ timer_setup(&hcd->rh_timer, rh_timer_func, 0);
#ifdef CONFIG_PM
INIT_WORK(&hcd->wakeup_work, hcd_resume_work);
#endif
diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
index 69eb40cd1b47..7b6eb0ad513b 100644
--- a/drivers/usb/dwc2/hcd.c
+++ b/drivers/usb/dwc2/hcd.c
@@ -3314,9 +3314,9 @@ host:
}
}
-static void dwc2_wakeup_detected(unsigned long data)
+static void dwc2_wakeup_detected(struct timer_list *t)
{
- struct dwc2_hsotg *hsotg = (struct dwc2_hsotg *)data;
+ struct dwc2_hsotg *hsotg = from_timer(hsotg, t, wkp_timer);
u32 hprt0;
dev_dbg(hsotg->dev, "%s()\n", __func__);
@@ -5155,8 +5155,7 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg)
}
INIT_WORK(&hsotg->wf_otg, dwc2_conn_id_status_change);
- setup_timer(&hsotg->wkp_timer, dwc2_wakeup_detected,
- (unsigned long)hsotg);
+ timer_setup(&hsotg->wkp_timer, dwc2_wakeup_detected, 0);
/* Initialize the non-periodic schedule */
INIT_LIST_HEAD(&hsotg->non_periodic_sched_inactive);
diff --git a/drivers/usb/dwc2/hcd_queue.c b/drivers/usb/dwc2/hcd_queue.c
index f472de238ac2..fcd1676c7f0b 100644
--- a/drivers/usb/dwc2/hcd_queue.c
+++ b/drivers/usb/dwc2/hcd_queue.c
@@ -1275,9 +1275,9 @@ static void dwc2_do_unreserve(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
*
* @work: Pointer to a qh unreserve_work.
*/
-static void dwc2_unreserve_timer_fn(unsigned long data)
+static void dwc2_unreserve_timer_fn(struct timer_list *t)
{
- struct dwc2_qh *qh = (struct dwc2_qh *)data;
+ struct dwc2_qh *qh = from_timer(qh, t, unreserve_timer);
struct dwc2_hsotg *hsotg = qh->hsotg;
unsigned long flags;
@@ -1467,8 +1467,7 @@ static void dwc2_qh_init(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
/* Initialize QH */
qh->hsotg = hsotg;
- setup_timer(&qh->unreserve_timer, dwc2_unreserve_timer_fn,
- (unsigned long)qh);
+ timer_setup(&qh->unreserve_timer, dwc2_unreserve_timer_fn, 0);
qh->ep_type = ep_type;
qh->ep_is_in = ep_is_in;
diff --git a/drivers/usb/gadget/udc/at91_udc.c b/drivers/usb/gadget/udc/at91_udc.c
index bfe278294e88..ad743a8493be 100644
--- a/drivers/usb/gadget/udc/at91_udc.c
+++ b/drivers/usb/gadget/udc/at91_udc.c
@@ -1550,9 +1550,9 @@ static void at91_vbus_timer_work(struct work_struct *work)
mod_timer(&udc->vbus_timer, jiffies + VBUS_POLL_TIMEOUT);
}
-static void at91_vbus_timer(unsigned long data)
+static void at91_vbus_timer(struct timer_list *t)
{
- struct at91_udc *udc = (struct at91_udc *)data;
+ struct at91_udc *udc = from_timer(udc, t, vbus_timer);
/*
* If we are polling vbus it is likely that the gpio is on an
@@ -1918,8 +1918,7 @@ static int at91udc_probe(struct platform_device *pdev)
if (udc->board.vbus_polled) {
INIT_WORK(&udc->vbus_timer_work, at91_vbus_timer_work);
- setup_timer(&udc->vbus_timer, at91_vbus_timer,
- (unsigned long)udc);
+ timer_setup(&udc->vbus_timer, at91_vbus_timer, 0);
mod_timer(&udc->vbus_timer,
jiffies + VBUS_POLL_TIMEOUT);
} else {
diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c
index 4f1b1809472c..d0128f92ec5a 100644
--- a/drivers/usb/gadget/udc/dummy_hcd.c
+++ b/drivers/usb/gadget/udc/dummy_hcd.c
@@ -1771,9 +1771,9 @@ static int handle_control_request(struct dummy_hcd *dum_hcd, struct urb *urb,
/* drive both sides of the transfers; looks like irq handlers to
* both drivers except the callbacks aren't in_irq().
*/
-static void dummy_timer(unsigned long _dum_hcd)
+static void dummy_timer(struct timer_list *t)
{
- struct dummy_hcd *dum_hcd = (struct dummy_hcd *) _dum_hcd;
+ struct dummy_hcd *dum_hcd = from_timer(dum_hcd, t, timer);
struct dummy *dum = dum_hcd->dum;
struct urbp *urbp, *tmp;
unsigned long flags;
@@ -2445,7 +2445,7 @@ static DEVICE_ATTR_RO(urbs);
static int dummy_start_ss(struct dummy_hcd *dum_hcd)
{
- setup_timer(&dum_hcd->timer, dummy_timer, (unsigned long)dum_hcd);
+ timer_setup(&dum_hcd->timer, dummy_timer, 0);
dum_hcd->rh_state = DUMMY_RH_RUNNING;
dum_hcd->stream_en_ep = 0;
INIT_LIST_HEAD(&dum_hcd->urbp_list);
@@ -2474,7 +2474,7 @@ static int dummy_start(struct usb_hcd *hcd)
return dummy_start_ss(dum_hcd);
spin_lock_init(&dum_hcd->dum->lock);
- setup_timer(&dum_hcd->timer, dummy_timer, (unsigned long)dum_hcd);
+ timer_setup(&dum_hcd->timer, dummy_timer, 0);
dum_hcd->rh_state = DUMMY_RH_RUNNING;
INIT_LIST_HEAD(&dum_hcd->urbp_list);
diff --git a/drivers/usb/gadget/udc/m66592-udc.c b/drivers/usb/gadget/udc/m66592-udc.c
index f19e6282a688..a8288df6aadf 100644
--- a/drivers/usb/gadget/udc/m66592-udc.c
+++ b/drivers/usb/gadget/udc/m66592-udc.c
@@ -1259,9 +1259,9 @@ static irqreturn_t m66592_irq(int irq, void *_m66592)
return IRQ_HANDLED;
}
-static void m66592_timer(unsigned long _m66592)
+static void m66592_timer(struct timer_list *t)
{
- struct m66592 *m66592 = (struct m66592 *)_m66592;
+ struct m66592 *m66592 = from_timer(m66592, t, timer);
unsigned long flags;
u16 tmp;
@@ -1589,7 +1589,7 @@ static int m66592_probe(struct platform_device *pdev)
m66592->gadget.max_speed = USB_SPEED_HIGH;
m66592->gadget.name = udc_name;
- setup_timer(&m66592->timer, m66592_timer, (unsigned long)m66592);
+ timer_setup(&m66592->timer, m66592_timer, 0);
m66592->reg = reg;
ret = request_irq(ires->start, m66592_irq, IRQF_SHARED,
diff --git a/drivers/usb/gadget/udc/omap_udc.c b/drivers/usb/gadget/udc/omap_udc.c
index fc7f810baef7..dc35a54bad90 100644
--- a/drivers/usb/gadget/udc/omap_udc.c
+++ b/drivers/usb/gadget/udc/omap_udc.c
@@ -1854,9 +1854,9 @@ static irqreturn_t omap_udc_irq(int irq, void *_udc)
#define PIO_OUT_TIMEOUT (jiffies + HZ/3)
#define HALF_FULL(f) (!((f)&(UDC_NON_ISO_FIFO_FULL|UDC_NON_ISO_FIFO_EMPTY)))
-static void pio_out_timer(unsigned long _ep)
+static void pio_out_timer(struct timer_list *t)
{
- struct omap_ep *ep = (void *) _ep;
+ struct omap_ep *ep = from_timer(ep, t, timer);
unsigned long flags;
u16 stat_flg;
@@ -2542,9 +2542,7 @@ omap_ep_setup(char *name, u8 addr, u8 type,
}
if (dbuf && addr)
epn_rxtx |= UDC_EPN_RX_DB;
- init_timer(&ep->timer);
- ep->timer.function = pio_out_timer;
- ep->timer.data = (unsigned long) ep;
+ timer_setup(&ep->timer, pio_out_timer, 0);
}
if (addr)
epn_rxtx |= UDC_EPN_RX_VALID;
diff --git a/drivers/usb/gadget/udc/pxa25x_udc.c b/drivers/usb/gadget/udc/pxa25x_udc.c
index 8f135d9fa245..0e3f5faa000e 100644
--- a/drivers/usb/gadget/udc/pxa25x_udc.c
+++ b/drivers/usb/gadget/udc/pxa25x_udc.c
@@ -1624,9 +1624,9 @@ static inline void clear_ep_state (struct pxa25x_udc *dev)
nuke(&dev->ep[i], -ECONNABORTED);
}
-static void udc_watchdog(unsigned long _dev)
+static void udc_watchdog(struct timer_list *t)
{
- struct pxa25x_udc *dev = (void *)_dev;
+ struct pxa25x_udc *dev = from_timer(dev, t, timer);
local_irq_disable();
if (dev->ep0state == EP0_STALL
@@ -2413,7 +2413,7 @@ static int pxa25x_udc_probe(struct platform_device *pdev)
gpio_direction_output(dev->mach->gpio_pullup, 0);
}
- setup_timer(&dev->timer, udc_watchdog, (unsigned long)dev);
+ timer_setup(&dev->timer, udc_watchdog, 0);
the_controller = dev;
platform_set_drvdata(pdev, dev);
diff --git a/drivers/usb/gadget/udc/r8a66597-udc.c b/drivers/usb/gadget/udc/r8a66597-udc.c
index 143122ed3c66..a3ecce62662b 100644
--- a/drivers/usb/gadget/udc/r8a66597-udc.c
+++ b/drivers/usb/gadget/udc/r8a66597-udc.c
@@ -1514,9 +1514,9 @@ static irqreturn_t r8a66597_irq(int irq, void *_r8a66597)
return IRQ_HANDLED;
}
-static void r8a66597_timer(unsigned long _r8a66597)
+static void r8a66597_timer(struct timer_list *t)
{
- struct r8a66597 *r8a66597 = (struct r8a66597 *)_r8a66597;
+ struct r8a66597 *r8a66597 = from_timer(r8a66597, t, timer);
unsigned long flags;
u16 tmp;
@@ -1874,7 +1874,7 @@ static int r8a66597_probe(struct platform_device *pdev)
r8a66597->gadget.max_speed = USB_SPEED_HIGH;
r8a66597->gadget.name = udc_name;
- setup_timer(&r8a66597->timer, r8a66597_timer, (unsigned long)r8a66597);
+ timer_setup(&r8a66597->timer, r8a66597_timer, 0);
r8a66597->reg = reg;
if (r8a66597->pdata->on_chip) {
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index 10887e09e9bc..ee9676349333 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -80,7 +80,7 @@ static const char hcd_name [] = "ohci_hcd";
static void ohci_dump(struct ohci_hcd *ohci);
static void ohci_stop(struct usb_hcd *hcd);
-static void io_watchdog_func(unsigned long _ohci);
+static void io_watchdog_func(struct timer_list *t);
#include "ohci-hub.c"
#include "ohci-dbg.c"
@@ -500,8 +500,7 @@ static int ohci_init (struct ohci_hcd *ohci)
if (ohci->hcca)
return 0;
- setup_timer(&ohci->io_watchdog, io_watchdog_func,
- (unsigned long) ohci);
+ timer_setup(&ohci->io_watchdog, io_watchdog_func, 0);
ohci->hcca = dma_alloc_coherent (hcd->self.controller,
sizeof(*ohci->hcca), &ohci->hcca_dma, GFP_KERNEL);
@@ -723,9 +722,9 @@ static int ohci_start(struct usb_hcd *hcd)
* the unlink list. As a result, URBs could never be dequeued and
* endpoints could never be released.
*/
-static void io_watchdog_func(unsigned long _ohci)
+static void io_watchdog_func(struct timer_list *t)
{
- struct ohci_hcd *ohci = (struct ohci_hcd *) _ohci;
+ struct ohci_hcd *ohci = from_timer(ohci, t, io_watchdog);
bool takeback_all_pending = false;
u32 status;
u32 head;
diff --git a/drivers/usb/host/oxu210hp-hcd.c b/drivers/usb/host/oxu210hp-hcd.c
index 0bf7759aae78..c5e6e8d0b5ef 100644
--- a/drivers/usb/host/oxu210hp-hcd.c
+++ b/drivers/usb/host/oxu210hp-hcd.c
@@ -2539,9 +2539,9 @@ static irqreturn_t oxu_irq(struct usb_hcd *hcd)
return ret;
}
-static void oxu_watchdog(unsigned long param)
+static void oxu_watchdog(struct timer_list *t)
{
- struct oxu_hcd *oxu = (struct oxu_hcd *) param;
+ struct oxu_hcd *oxu = from_timer(oxu, t, watchdog);
unsigned long flags;
spin_lock_irqsave(&oxu->lock, flags);
@@ -2577,7 +2577,7 @@ static int oxu_hcd_init(struct usb_hcd *hcd)
spin_lock_init(&oxu->lock);
- setup_timer(&oxu->watchdog, oxu_watchdog, (unsigned long)oxu);
+ timer_setup(&oxu->watchdog, oxu_watchdog, 0);
/*
* hw default: 1K periodic list heads, one per frame.
diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c
index f3d9ba420a97..984892dd72f5 100644
--- a/drivers/usb/host/r8a66597-hcd.c
+++ b/drivers/usb/host/r8a66597-hcd.c
@@ -1798,9 +1798,9 @@ static void r8a66597_td_timer(struct timer_list *t)
spin_unlock_irqrestore(&r8a66597->lock, flags);
}
-static void r8a66597_timer(unsigned long _r8a66597)
+static void r8a66597_timer(struct timer_list *t)
{
- struct r8a66597 *r8a66597 = (struct r8a66597 *)_r8a66597;
+ struct r8a66597 *r8a66597 = from_timer(r8a66597, t, rh_timer);
unsigned long flags;
int port;
@@ -2472,8 +2472,7 @@ static int r8a66597_probe(struct platform_device *pdev)
r8a66597->max_root_hub = 2;
spin_lock_init(&r8a66597->lock);
- setup_timer(&r8a66597->rh_timer, r8a66597_timer,
- (unsigned long)r8a66597);
+ timer_setup(&r8a66597->rh_timer, r8a66597_timer, 0);
r8a66597->reg = reg;
/* make sure no interrupts are pending */
diff --git a/drivers/usb/host/sl811-hcd.c b/drivers/usb/host/sl811-hcd.c
index 601fb00603cc..fa88a903fa2e 100644
--- a/drivers/usb/host/sl811-hcd.c
+++ b/drivers/usb/host/sl811-hcd.c
@@ -1119,9 +1119,9 @@ sl811h_hub_descriptor (
}
static void
-sl811h_timer(unsigned long _sl811)
+sl811h_timer(struct timer_list *t)
{
- struct sl811 *sl811 = (void *) _sl811;
+ struct sl811 *sl811 = from_timer(sl811, t, timer);
unsigned long flags;
u8 irqstat;
u8 signaling = sl811->ctrl1 & SL11H_CTL1MASK_FORCE;
@@ -1692,7 +1692,7 @@ sl811h_probe(struct platform_device *dev)
spin_lock_init(&sl811->lock);
INIT_LIST_HEAD(&sl811->async);
sl811->board = dev_get_platdata(&dev->dev);
- setup_timer(&sl811->timer, sl811h_timer, (unsigned long)sl811);
+ timer_setup(&sl811->timer, sl811h_timer, 0);
sl811->addr_reg = addr_reg;
sl811->data_reg = data_reg;
diff --git a/drivers/usb/host/uhci-hcd.c b/drivers/usb/host/uhci-hcd.c
index babeefd84ffd..f5c90217777a 100644
--- a/drivers/usb/host/uhci-hcd.c
+++ b/drivers/usb/host/uhci-hcd.c
@@ -585,8 +585,7 @@ static int uhci_start(struct usb_hcd *hcd)
hcd->self.sg_tablesize = ~0;
spin_lock_init(&uhci->lock);
- setup_timer(&uhci->fsbr_timer, uhci_fsbr_timeout,
- (unsigned long) uhci);
+ timer_setup(&uhci->fsbr_timer, uhci_fsbr_timeout, 0);
INIT_LIST_HEAD(&uhci->idle_qh_list);
init_waitqueue_head(&uhci->waitqh);
diff --git a/drivers/usb/host/uhci-q.c b/drivers/usb/host/uhci-q.c
index 49d4edc03cc2..d40438238938 100644
--- a/drivers/usb/host/uhci-q.c
+++ b/drivers/usb/host/uhci-q.c
@@ -90,9 +90,9 @@ static void uhci_urbp_wants_fsbr(struct uhci_hcd *uhci, struct urb_priv *urbp)
}
}
-static void uhci_fsbr_timeout(unsigned long _uhci)
+static void uhci_fsbr_timeout(struct timer_list *t)
{
- struct uhci_hcd *uhci = (struct uhci_hcd *) _uhci;
+ struct uhci_hcd *uhci = from_timer(uhci, t, fsbr_timer);
unsigned long flags;
spin_lock_irqsave(&uhci->lock, flags);
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 327ba8b8a98b..2424d3020ca3 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -395,14 +395,14 @@ static inline void xhci_msix_sync_irqs(struct xhci_hcd *xhci)
#endif
-static void compliance_mode_recovery(unsigned long arg)
+static void compliance_mode_recovery(struct timer_list *t)
{
struct xhci_hcd *xhci;
struct usb_hcd *hcd;
u32 temp;
int i;
- xhci = (struct xhci_hcd *)arg;
+ xhci = from_timer(xhci, t, comp_mode_recovery_timer);
for (i = 0; i < xhci->num_usb3_ports; i++) {
temp = readl(xhci->usb3_ports[i]);
@@ -443,8 +443,8 @@ static void compliance_mode_recovery(unsigned long arg)
static void compliance_mode_recovery_timer_init(struct xhci_hcd *xhci)
{
xhci->port_status_u0 = 0;
- setup_timer(&xhci->comp_mode_recovery_timer,
- compliance_mode_recovery, (unsigned long)xhci);
+ timer_setup(&xhci->comp_mode_recovery_timer, compliance_mode_recovery,
+ 0);
xhci->comp_mode_recovery_timer.expires = jiffies +
msecs_to_jiffies(COMP_MODE_RCVRY_MSECS);
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index a859c2d33c29..fdceb46d9fc6 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -555,9 +555,9 @@ static void mos7840_set_led_sync(struct usb_serial_port *port, __u16 reg,
val, reg, NULL, 0, MOS_WDR_TIMEOUT);
}
-static void mos7840_led_off(unsigned long arg)
+static void mos7840_led_off(struct timer_list *t)
{
- struct moschip_port *mcs = (struct moschip_port *) arg;
+ struct moschip_port *mcs = from_timer(mcs, t, led_timer1);
/* Turn off LED */
mos7840_set_led_async(mcs, 0x0300, MODEM_CONTROL_REGISTER);
@@ -565,9 +565,9 @@ static void mos7840_led_off(unsigned long arg)
jiffies + msecs_to_jiffies(LED_OFF_MS));
}
-static void mos7840_led_flag_off(unsigned long arg)
+static void mos7840_led_flag_off(struct timer_list *t)
{
- struct moschip_port *mcs = (struct moschip_port *) arg;
+ struct moschip_port *mcs = from_timer(mcs, t, led_timer2);
clear_bit_unlock(MOS7840_FLAG_LED_BUSY, &mcs->flags);
}
@@ -2289,12 +2289,11 @@ static int mos7840_port_probe(struct usb_serial_port *port)
goto error;
}
- setup_timer(&mos7840_port->led_timer1, mos7840_led_off,
- (unsigned long)mos7840_port);
+ timer_setup(&mos7840_port->led_timer1, mos7840_led_off, 0);
mos7840_port->led_timer1.expires =
jiffies + msecs_to_jiffies(LED_ON_MS);
- setup_timer(&mos7840_port->led_timer2, mos7840_led_flag_off,
- (unsigned long)mos7840_port);
+ timer_setup(&mos7840_port->led_timer2, mos7840_led_flag_off,
+ 0);
mos7840_port->led_timer2.expires =
jiffies + msecs_to_jiffies(LED_OFF_MS);
diff --git a/drivers/usb/storage/realtek_cr.c b/drivers/usb/storage/realtek_cr.c
index 48e2e32c97e8..31b024441938 100644
--- a/drivers/usb/storage/realtek_cr.c
+++ b/drivers/usb/storage/realtek_cr.c
@@ -751,9 +751,9 @@ static void rts51x_modi_suspend_timer(struct rts51x_chip *chip)
mod_timer(&chip->rts51x_suspend_timer, chip->timer_expires);
}
-static void rts51x_suspend_timer_fn(unsigned long data)
+static void rts51x_suspend_timer_fn(struct timer_list *t)
{
- struct rts51x_chip *chip = (struct rts51x_chip *)data;
+ struct rts51x_chip *chip = from_timer(chip, t, rts51x_suspend_timer);
struct us_data *us = chip->us;
switch (rts51x_get_stat(chip)) {
@@ -917,8 +917,7 @@ static int realtek_cr_autosuspend_setup(struct us_data *us)
us->proto_handler = rts51x_invoke_transport;
chip->timer_expires = 0;
- setup_timer(&chip->rts51x_suspend_timer, rts51x_suspend_timer_fn,
- (unsigned long)chip);
+ timer_setup(&chip->rts51x_suspend_timer, rts51x_suspend_timer_fn, 0);
fw5895_init(us);
/* enable autosuspend function of the usb device */
diff --git a/drivers/uwb/drp.c b/drivers/uwb/drp.c
index 38d0504a1bbc..625f706b8160 100644
--- a/drivers/uwb/drp.c
+++ b/drivers/uwb/drp.c
@@ -603,9 +603,9 @@ static void uwb_cnflt_update_work(struct work_struct *work)
mutex_unlock(&rc->rsvs_mutex);
}
-static void uwb_cnflt_timer(unsigned long arg)
+static void uwb_cnflt_timer(struct timer_list *t)
{
- struct uwb_cnflt_alien *cnflt = (struct uwb_cnflt_alien *)arg;
+ struct uwb_cnflt_alien *cnflt = from_timer(cnflt, t, timer);
queue_work(cnflt->rc->rsv_workq, &cnflt->cnflt_update_work);
}
@@ -642,7 +642,7 @@ static void uwb_drp_handle_alien_drp(struct uwb_rc *rc, struct uwb_ie_drp *drp_i
}
INIT_LIST_HEAD(&cnflt->rc_node);
- setup_timer(&cnflt->timer, uwb_cnflt_timer, (unsigned long)cnflt);
+ timer_setup(&cnflt->timer, uwb_cnflt_timer, 0);
cnflt->rc = rc;
INIT_WORK(&cnflt->cnflt_update_work, uwb_cnflt_update_work);
diff --git a/drivers/uwb/neh.c b/drivers/uwb/neh.c
index 36b5cb62c15d..fbdca728bd9f 100644
--- a/drivers/uwb/neh.c
+++ b/drivers/uwb/neh.c
@@ -115,7 +115,7 @@ struct uwb_rc_neh {
struct list_head list_node;
};
-static void uwb_rc_neh_timer(unsigned long arg);
+static void uwb_rc_neh_timer(struct timer_list *t);
static void uwb_rc_neh_release(struct kref *kref)
{
@@ -223,7 +223,7 @@ struct uwb_rc_neh *uwb_rc_neh_add(struct uwb_rc *rc, struct uwb_rccb *cmd,
kref_init(&neh->kref);
INIT_LIST_HEAD(&neh->list_node);
- setup_timer(&neh->timer, uwb_rc_neh_timer, (unsigned long)neh);
+ timer_setup(&neh->timer, uwb_rc_neh_timer, 0);
neh->rc = rc;
neh->evt_type = expected_type;
@@ -565,9 +565,9 @@ void uwb_rc_neh_error(struct uwb_rc *rc, int error)
EXPORT_SYMBOL_GPL(uwb_rc_neh_error);
-static void uwb_rc_neh_timer(unsigned long arg)
+static void uwb_rc_neh_timer(struct timer_list *t)
{
- struct uwb_rc_neh *neh = (struct uwb_rc_neh *)arg;
+ struct uwb_rc_neh *neh = from_timer(neh, t, timer);
struct uwb_rc *rc = neh->rc;
unsigned long flags;
diff --git a/drivers/uwb/rsv.c b/drivers/uwb/rsv.c
index f5e27247a38f..fe25a8cc6fa1 100644
--- a/drivers/uwb/rsv.c
+++ b/drivers/uwb/rsv.c
@@ -23,7 +23,7 @@
#include "uwb-internal.h"
-static void uwb_rsv_timer(unsigned long arg);
+static void uwb_rsv_timer(struct timer_list *t);
static const char *rsv_states[] = {
[UWB_RSV_STATE_NONE] = "none ",
@@ -198,9 +198,9 @@ static void uwb_rsv_put_stream(struct uwb_rsv *rsv)
dev_dbg(dev, "put stream %d\n", rsv->stream);
}
-void uwb_rsv_backoff_win_timer(unsigned long arg)
+void uwb_rsv_backoff_win_timer(struct timer_list *t)
{
- struct uwb_drp_backoff_win *bow = (struct uwb_drp_backoff_win *)arg;
+ struct uwb_drp_backoff_win *bow = from_timer(bow, t, timer);
struct uwb_rc *rc = container_of(bow, struct uwb_rc, bow);
struct device *dev = &rc->uwb_dev.dev;
@@ -470,7 +470,7 @@ static struct uwb_rsv *uwb_rsv_alloc(struct uwb_rc *rc)
INIT_LIST_HEAD(&rsv->rc_node);
INIT_LIST_HEAD(&rsv->pal_node);
kref_init(&rsv->kref);
- setup_timer(&rsv->timer, uwb_rsv_timer, (unsigned long)rsv);
+ timer_setup(&rsv->timer, uwb_rsv_timer, 0);
rsv->rc = rc;
INIT_WORK(&rsv->handle_timeout_work, uwb_rsv_handle_timeout_work);
@@ -939,9 +939,9 @@ static void uwb_rsv_alien_bp_work(struct work_struct *work)
mutex_unlock(&rc->rsvs_mutex);
}
-static void uwb_rsv_timer(unsigned long arg)
+static void uwb_rsv_timer(struct timer_list *t)
{
- struct uwb_rsv *rsv = (struct uwb_rsv *)arg;
+ struct uwb_rsv *rsv = from_timer(rsv, t, timer);
queue_work(rsv->rc->rsv_workq, &rsv->handle_timeout_work);
}
@@ -987,8 +987,7 @@ void uwb_rsv_init(struct uwb_rc *rc)
rc->bow.can_reserve_extra_mases = true;
rc->bow.total_expired = 0;
rc->bow.window = UWB_DRP_BACKOFF_WIN_MIN >> 1;
- setup_timer(&rc->bow.timer, uwb_rsv_backoff_win_timer,
- (unsigned long)&rc->bow);
+ timer_setup(&rc->bow.timer, uwb_rsv_backoff_win_timer, 0);
bitmap_complement(rc->uwb_dev.streams, rc->uwb_dev.streams, UWB_NUM_STREAMS);
}
diff --git a/drivers/uwb/uwb-internal.h b/drivers/uwb/uwb-internal.h
index 353c0555a1f5..91326ce093a7 100644
--- a/drivers/uwb/uwb-internal.h
+++ b/drivers/uwb/uwb-internal.h
@@ -329,7 +329,7 @@ void uwb_rsv_put(struct uwb_rsv *rsv);
bool uwb_rsv_has_two_drp_ies(struct uwb_rsv *rsv);
void uwb_rsv_dump(char *text, struct uwb_rsv *rsv);
int uwb_rsv_try_move(struct uwb_rsv *rsv, struct uwb_mas_bm *available);
-void uwb_rsv_backoff_win_timer(unsigned long arg);
+void uwb_rsv_backoff_win_timer(struct timer_list *t);
void uwb_rsv_backoff_win_increment(struct uwb_rc *rc);
int uwb_rsv_status(struct uwb_rsv *rsv);
int uwb_rsv_companion_status(struct uwb_rsv *rsv);
diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
index 5e58f5ec0a28..2f615b7f1c9f 100644
--- a/drivers/video/fbdev/Kconfig
+++ b/drivers/video/fbdev/Kconfig
@@ -905,16 +905,6 @@ config FB_LEO
This is the frame buffer device driver for the SBUS-based Sun ZX
(leo) frame buffer cards.
-config FB_IGA
- bool "IGA 168x display support"
- depends on (FB = y) && SPARC32
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
- help
- This is the framebuffer device for the INTERGRAPHICS 1680 and
- successor frame buffer cards.
-
config FB_XVR500
bool "Sun XVR-500 3DLABS Wildcat support"
depends on (FB = y) && PCI && SPARC64
diff --git a/drivers/video/fbdev/Makefile b/drivers/video/fbdev/Makefile
index 8895536a20d6..115961e0721b 100644
--- a/drivers/video/fbdev/Makefile
+++ b/drivers/video/fbdev/Makefile
@@ -65,7 +65,6 @@ obj-$(CONFIG_FB_HGA) += hgafb.o
obj-$(CONFIG_FB_XVR500) += sunxvr500.o
obj-$(CONFIG_FB_XVR2500) += sunxvr2500.o
obj-$(CONFIG_FB_XVR1000) += sunxvr1000.o
-obj-$(CONFIG_FB_IGA) += igafb.o
obj-$(CONFIG_FB_APOLLO) += dnfb.o
obj-$(CONFIG_FB_Q40) += q40fb.o
obj-$(CONFIG_FB_TGA) += tgafb.o
diff --git a/drivers/video/fbdev/aty/atyfb_base.c b/drivers/video/fbdev/aty/atyfb_base.c
index 3ec72f19114b..a9a8272f7a6e 100644
--- a/drivers/video/fbdev/aty/atyfb_base.c
+++ b/drivers/video/fbdev/aty/atyfb_base.c
@@ -2272,10 +2272,10 @@ static void aty_bl_exit(struct backlight_device *bd)
static void aty_calc_mem_refresh(struct atyfb_par *par, int xclk)
{
- const int ragepro_tbl[] = {
+ static const int ragepro_tbl[] = {
44, 50, 55, 66, 75, 80, 100
};
- const int ragexl_tbl[] = {
+ static const int ragexl_tbl[] = {
50, 66, 75, 83, 90, 95, 100, 105,
110, 115, 120, 125, 133, 143, 166
};
diff --git a/drivers/video/fbdev/aty/radeon_base.c b/drivers/video/fbdev/aty/radeon_base.c
index 1e2ec360f8c1..4d77daeecf99 100644
--- a/drivers/video/fbdev/aty/radeon_base.c
+++ b/drivers/video/fbdev/aty/radeon_base.c
@@ -1454,9 +1454,9 @@ static void radeon_write_pll_regs(struct radeonfb_info *rinfo, struct radeon_reg
/*
* Timer function for delayed LVDS panel power up/down
*/
-static void radeon_lvds_timer_func(unsigned long data)
+static void radeon_lvds_timer_func(struct timer_list *t)
{
- struct radeonfb_info *rinfo = (struct radeonfb_info *)data;
+ struct radeonfb_info *rinfo = from_timer(rinfo, t, lvds_timer);
radeon_engine_idle();
@@ -1534,7 +1534,7 @@ void radeon_write_mode (struct radeonfb_info *rinfo, struct radeon_regs *mode,
static void radeon_calc_pll_regs(struct radeonfb_info *rinfo, struct radeon_regs *regs,
unsigned long freq)
{
- const struct {
+ static const struct {
int divider;
int bitvalue;
} *post_div,
@@ -2291,9 +2291,7 @@ static int radeonfb_pci_register(struct pci_dev *pdev,
rinfo->pdev = pdev;
spin_lock_init(&rinfo->reg_lock);
- init_timer(&rinfo->lvds_timer);
- rinfo->lvds_timer.function = radeon_lvds_timer_func;
- rinfo->lvds_timer.data = (unsigned long)rinfo;
+ timer_setup(&rinfo->lvds_timer, radeon_lvds_timer_func, 0);
c1 = ent->device >> 8;
c2 = ent->device & 0xff;
diff --git a/drivers/video/fbdev/aty/radeon_pm.c b/drivers/video/fbdev/aty/radeon_pm.c
index f7c253dd5899..7137c12cbcee 100644
--- a/drivers/video/fbdev/aty/radeon_pm.c
+++ b/drivers/video/fbdev/aty/radeon_pm.c
@@ -1208,9 +1208,11 @@ static void radeon_pm_enable_dll_m10(struct radeonfb_info *rinfo)
case 1:
if (mc & 0x4)
break;
+ /* fall through */
case 2:
dll_sleep_mask |= MDLL_R300_RDCK__MRDCKB_SLEEP;
dll_reset_mask |= MDLL_R300_RDCK__MRDCKB_RESET;
+ /* fall through */
case 0:
dll_sleep_mask |= MDLL_R300_RDCK__MRDCKA_SLEEP;
dll_reset_mask |= MDLL_R300_RDCK__MRDCKA_RESET;
@@ -1219,6 +1221,7 @@ static void radeon_pm_enable_dll_m10(struct radeonfb_info *rinfo)
case 1:
if (!(mc & 0x4))
break;
+ /* fall through */
case 2:
dll_sleep_mask |= MDLL_R300_RDCK__MRDCKD_SLEEP;
dll_reset_mask |= MDLL_R300_RDCK__MRDCKD_RESET;
diff --git a/drivers/video/fbdev/au1200fb.c b/drivers/video/fbdev/au1200fb.c
index 5f04b4096c42..87d5a62bf6ca 100644
--- a/drivers/video/fbdev/au1200fb.c
+++ b/drivers/video/fbdev/au1200fb.c
@@ -1518,7 +1518,7 @@ static irqreturn_t au1200fb_handle_irq(int irq, void* dev_id)
static int au1200fb_init_fbinfo(struct au1200fb_device *fbdev)
{
struct fb_info *fbi = fbdev->fb_info;
- int bpp;
+ int bpp, ret;
fbi->fbops = &au1200fb_fb_ops;
@@ -1546,15 +1546,14 @@ static int au1200fb_init_fbinfo(struct au1200fb_device *fbdev)
}
fbi->pseudo_palette = kcalloc(16, sizeof(u32), GFP_KERNEL);
- if (!fbi->pseudo_palette) {
+ if (!fbi->pseudo_palette)
return -ENOMEM;
- }
- if (fb_alloc_cmap(&fbi->cmap, AU1200_LCD_NBR_PALETTE_ENTRIES, 0) < 0) {
+ ret = fb_alloc_cmap(&fbi->cmap, AU1200_LCD_NBR_PALETTE_ENTRIES, 0);
+ if (ret < 0) {
print_err("Fail to allocate colormap (%d entries)",
- AU1200_LCD_NBR_PALETTE_ENTRIES);
- kfree(fbi->pseudo_palette);
- return -EFAULT;
+ AU1200_LCD_NBR_PALETTE_ENTRIES);
+ return ret;
}
strncpy(fbi->fix.id, "AU1200", sizeof(fbi->fix.id));
@@ -1668,10 +1667,6 @@ static int au1200fb_drv_probe(struct platform_device *dev)
printk(DRIVER_NAME ": Panel %d %s\n", panel_index, panel->name);
printk(DRIVER_NAME ": Win %d %s\n", window_index, win->name);
- /* shut gcc up */
- ret = 0;
- fbdev = NULL;
-
for (plane = 0; plane < device_count; ++plane) {
bpp = winbpp(win->w[plane].mode_winctrl1);
if (win->w[plane].xres == 0)
@@ -1681,8 +1676,10 @@ static int au1200fb_drv_probe(struct platform_device *dev)
fbi = framebuffer_alloc(sizeof(struct au1200fb_device),
&dev->dev);
- if (!fbi)
+ if (!fbi) {
+ ret = -ENOMEM;
goto failed;
+ }
_au1200fb_infos[plane] = fbi;
fbdev = fbi->par;
@@ -1701,7 +1698,8 @@ static int au1200fb_drv_probe(struct platform_device *dev)
if (!fbdev->fb_mem) {
print_err("fail to allocate frambuffer (size: %dK))",
fbdev->fb_len / 1024);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto failed;
}
/*
@@ -1718,7 +1716,8 @@ static int au1200fb_drv_probe(struct platform_device *dev)
print_dbg("phys=0x%08x, size=%dK", fbdev->fb_phys, fbdev->fb_len / 1024);
/* Init FB data */
- if ((ret = au1200fb_init_fbinfo(fbdev)) < 0)
+ ret = au1200fb_init_fbinfo(fbdev);
+ if (ret < 0)
goto failed;
/* Register new framebuffer */
@@ -1758,21 +1757,26 @@ static int au1200fb_drv_probe(struct platform_device *dev)
return 0;
failed:
- /* NOTE: This only does the current plane/window that failed; others are still active */
- if (fbi) {
+ for (plane = 0; plane < device_count; ++plane) {
+ fbi = _au1200fb_infos[plane];
+ if (!fbi)
+ break;
+
+ /* Clean up all probe data */
+ unregister_framebuffer(fbi);
if (fbi->cmap.len != 0)
fb_dealloc_cmap(&fbi->cmap);
kfree(fbi->pseudo_palette);
+
+ framebuffer_release(fbi);
+ _au1200fb_infos[plane] = NULL;
}
- if (plane == 0)
- free_irq(AU1200_LCD_INT, (void*)dev);
return ret;
}
static int au1200fb_drv_remove(struct platform_device *dev)
{
struct au1200fb_platdata *pd = platform_get_drvdata(dev);
- struct au1200fb_device *fbdev;
struct fb_info *fbi;
int plane;
@@ -1781,7 +1785,6 @@ static int au1200fb_drv_remove(struct platform_device *dev)
for (plane = 0; plane < device_count; ++plane) {
fbi = _au1200fb_infos[plane];
- fbdev = fbi->par;
/* Clean up all probe data */
unregister_framebuffer(fbi);
diff --git a/drivers/video/fbdev/cirrusfb.c b/drivers/video/fbdev/cirrusfb.c
index d992aa5eb3f0..b3be06dd2908 100644
--- a/drivers/video/fbdev/cirrusfb.c
+++ b/drivers/video/fbdev/cirrusfb.c
@@ -1477,10 +1477,12 @@ static void init_vgachip(struct fb_info *info)
mdelay(100);
/* mode */
vga_wgfx(cinfo->regbase, CL_GR31, 0x00);
- case BT_GD5480: /* fall through */
+ /* fall through */
+ case BT_GD5480:
/* from Klaus' NetBSD driver: */
vga_wgfx(cinfo->regbase, CL_GR2F, 0x00);
- case BT_ALPINE: /* fall through */
+ /* fall through */
+ case BT_ALPINE:
/* put blitter into 542x compat */
vga_wgfx(cinfo->regbase, CL_GR33, 0x00);
break;
diff --git a/drivers/video/fbdev/controlfb.h b/drivers/video/fbdev/controlfb.h
index 6026c60fc100..261522fabdac 100644
--- a/drivers/video/fbdev/controlfb.h
+++ b/drivers/video/fbdev/controlfb.h
@@ -141,5 +141,7 @@ static struct max_cmodes control_mac_modes[] = {
{{ 1, 2}}, /* 1152x870, 75Hz */
{{ 0, 1}}, /* 1280x960, 75Hz */
{{ 0, 1}}, /* 1280x1024, 75Hz */
+ {{ 1, 2}}, /* 1152x768, 60Hz */
+ {{ 0, 1}}, /* 1600x1024, 60Hz */
};
diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
index 04612f938bab..929ca472c524 100644
--- a/drivers/video/fbdev/core/fbcon.c
+++ b/drivers/video/fbdev/core/fbcon.c
@@ -395,10 +395,10 @@ static void fb_flashcursor(struct work_struct *work)
console_unlock();
}
-static void cursor_timer_handler(unsigned long dev_addr)
+static void cursor_timer_handler(struct timer_list *t)
{
- struct fb_info *info = (struct fb_info *) dev_addr;
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_ops *ops = from_timer(ops, t, cursor_timer);
+ struct fb_info *info = ops->info;
queue_work(system_power_efficient_wq, &info->queue);
mod_timer(&ops->cursor_timer, jiffies + ops->cur_blink_jiffies);
@@ -414,8 +414,7 @@ static void fbcon_add_cursor_timer(struct fb_info *info)
if (!info->queue.func)
INIT_WORK(&info->queue, fb_flashcursor);
- setup_timer(&ops->cursor_timer, cursor_timer_handler,
- (unsigned long) info);
+ timer_setup(&ops->cursor_timer, cursor_timer_handler, 0);
mod_timer(&ops->cursor_timer, jiffies + ops->cur_blink_jiffies);
ops->flags |= FBCON_FLAGS_CURSOR_TIMER;
}
@@ -714,6 +713,7 @@ static int con2fb_acquire_newinfo(struct vc_data *vc, struct fb_info *info,
if (!err) {
ops->cur_blink_jiffies = HZ / 5;
+ ops->info = info;
info->fbcon_par = ops;
if (vc)
@@ -962,6 +962,7 @@ static const char *fbcon_startup(void)
ops->graphics = 1;
ops->cur_rotate = -1;
ops->cur_blink_jiffies = HZ / 5;
+ ops->info = info;
info->fbcon_par = ops;
if (initial_rotation != -1)
p->con_rotate = initial_rotation;
diff --git a/drivers/video/fbdev/core/fbcon.h b/drivers/video/fbdev/core/fbcon.h
index 18f3ac144237..9f7744fbc962 100644
--- a/drivers/video/fbdev/core/fbcon.h
+++ b/drivers/video/fbdev/core/fbcon.h
@@ -69,6 +69,7 @@ struct fbcon_ops {
struct timer_list cursor_timer; /* Cursor timer */
struct fb_cursor cursor_state;
struct display *p;
+ struct fb_info *info;
int currcon; /* Current VC. */
int cur_blink_jiffies;
int cursor_flash;
diff --git a/drivers/video/fbdev/dnfb.c b/drivers/video/fbdev/dnfb.c
index 7b1492d34e98..5505fa00c634 100644
--- a/drivers/video/fbdev/dnfb.c
+++ b/drivers/video/fbdev/dnfb.c
@@ -115,7 +115,7 @@ static struct fb_ops dn_fb_ops = {
.fb_imageblit = cfb_imageblit,
};
-struct fb_var_screeninfo dnfb_var = {
+static const struct fb_var_screeninfo dnfb_var = {
.xres = 1280,
.yres = 1024,
.xres_virtual = 2048,
@@ -242,16 +242,13 @@ static int dnfb_probe(struct platform_device *dev)
info->screen_base = (u_char *) info->fix.smem_start;
err = fb_alloc_cmap(&info->cmap, 2, 0);
- if (err < 0) {
- framebuffer_release(info);
- return err;
- }
+ if (err < 0)
+ goto release_framebuffer;
err = register_framebuffer(info);
if (err < 0) {
fb_dealloc_cmap(&info->cmap);
- framebuffer_release(info);
- return err;
+ goto release_framebuffer;
}
platform_set_drvdata(dev, info);
@@ -265,6 +262,10 @@ static int dnfb_probe(struct platform_device *dev)
printk("apollo frame buffer alive and kicking !\n");
return err;
+
+release_framebuffer:
+ framebuffer_release(info);
+ return err;
}
static struct platform_driver dnfb_driver = {
diff --git a/drivers/video/fbdev/goldfishfb.c b/drivers/video/fbdev/goldfishfb.c
index 7f6c9e6cfc6c..3b70044773b6 100644
--- a/drivers/video/fbdev/goldfishfb.c
+++ b/drivers/video/fbdev/goldfishfb.c
@@ -304,12 +304,18 @@ static int goldfish_fb_remove(struct platform_device *pdev)
return 0;
}
+static const struct of_device_id goldfish_fb_of_match[] = {
+ { .compatible = "google,goldfish-fb", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, goldfish_fb_of_match);
static struct platform_driver goldfish_fb_driver = {
.probe = goldfish_fb_probe,
.remove = goldfish_fb_remove,
.driver = {
- .name = "goldfish_fb"
+ .name = "goldfish_fb",
+ .of_match_table = goldfish_fb_of_match,
}
};
diff --git a/drivers/video/fbdev/igafb.c b/drivers/video/fbdev/igafb.c
deleted file mode 100644
index 486f18897414..000000000000
--- a/drivers/video/fbdev/igafb.c
+++ /dev/null
@@ -1,579 +0,0 @@
-/*
- * linux/drivers/video/igafb.c -- Frame buffer device for IGA 1682
- *
- * Copyright (C) 1998 Vladimir Roganov and Gleb Raiko
- *
- * This driver is partly based on the Frame buffer device for ATI Mach64
- * and partially on VESA-related code.
- *
- * Copyright (C) 1997-1998 Geert Uytterhoeven
- * Copyright (C) 1998 Bernd Harries
- * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be)
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file COPYING in the main directory of this archive for
- * more details.
- */
-
-/******************************************************************************
-
- TODO:
- Despite of IGA Card has advanced graphic acceleration,
- initial version is almost dummy and does not support it.
- Support for video modes and acceleration must be added
- together with accelerated X-Windows driver implementation.
-
- Most important thing at this moment is that we have working
- JavaEngine1 console & X with new console interface.
-
-******************************************************************************/
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/string.h>
-#include <linux/mm.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-#include <linux/delay.h>
-#include <linux/interrupt.h>
-#include <linux/fb.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/nvram.h>
-
-#include <asm/io.h>
-
-#ifdef CONFIG_SPARC
-#include <asm/prom.h>
-#include <asm/pcic.h>
-#endif
-
-#include <video/iga.h>
-
-struct pci_mmap_map {
- unsigned long voff;
- unsigned long poff;
- unsigned long size;
- unsigned long prot_flag;
- unsigned long prot_mask;
-};
-
-struct iga_par {
- struct pci_mmap_map *mmap_map;
- unsigned long frame_buffer_phys;
- unsigned long io_base;
-};
-
-struct fb_info fb_info;
-
-struct fb_fix_screeninfo igafb_fix __initdata = {
- .id = "IGA 1682",
- .type = FB_TYPE_PACKED_PIXELS,
- .mmio_len = 1000
-};
-
-struct fb_var_screeninfo default_var = {
- /* 640x480, 60 Hz, Non-Interlaced (25.175 MHz dotclock) */
- .xres = 640,
- .yres = 480,
- .xres_virtual = 640,
- .yres_virtual = 480,
- .bits_per_pixel = 8,
- .red = {0, 8, 0 },
- .green = {0, 8, 0 },
- .blue = {0, 8, 0 },
- .height = -1,
- .width = -1,
- .accel_flags = FB_ACCEL_NONE,
- .pixclock = 39722,
- .left_margin = 48,
- .right_margin = 16,
- .upper_margin = 33,
- .lower_margin = 10,
- .hsync_len = 96,
- .vsync_len = 2,
- .vmode = FB_VMODE_NONINTERLACED
-};
-
-#ifdef CONFIG_SPARC
-struct fb_var_screeninfo default_var_1024x768 __initdata = {
- /* 1024x768, 75 Hz, Non-Interlaced (78.75 MHz dotclock) */
- .xres = 1024,
- .yres = 768,
- .xres_virtual = 1024,
- .yres_virtual = 768,
- .bits_per_pixel = 8,
- .red = {0, 8, 0 },
- .green = {0, 8, 0 },
- .blue = {0, 8, 0 },
- .height = -1,
- .width = -1,
- .accel_flags = FB_ACCEL_NONE,
- .pixclock = 12699,
- .left_margin = 176,
- .right_margin = 16,
- .upper_margin = 28,
- .lower_margin = 1,
- .hsync_len = 96,
- .vsync_len = 3,
- .vmode = FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED
-};
-
-struct fb_var_screeninfo default_var_1152x900 __initdata = {
- /* 1152x900, 76 Hz, Non-Interlaced (110.0 MHz dotclock) */
- .xres = 1152,
- .yres = 900,
- .xres_virtual = 1152,
- .yres_virtual = 900,
- .bits_per_pixel = 8,
- .red = { 0, 8, 0 },
- .green = { 0, 8, 0 },
- .blue = { 0, 8, 0 },
- .height = -1,
- .width = -1,
- .accel_flags = FB_ACCEL_NONE,
- .pixclock = 9091,
- .left_margin = 234,
- .right_margin = 24,
- .upper_margin = 34,
- .lower_margin = 3,
- .hsync_len = 100,
- .vsync_len = 3,
- .vmode = FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED
-};
-
-struct fb_var_screeninfo default_var_1280x1024 __initdata = {
- /* 1280x1024, 75 Hz, Non-Interlaced (135.00 MHz dotclock) */
- .xres = 1280,
- .yres = 1024,
- .xres_virtual = 1280,
- .yres_virtual = 1024,
- .bits_per_pixel = 8,
- .red = {0, 8, 0 },
- .green = {0, 8, 0 },
- .blue = {0, 8, 0 },
- .height = -1,
- .width = -1,
- .accel_flags = 0,
- .pixclock = 7408,
- .left_margin = 248,
- .right_margin = 16,
- .upper_margin = 38,
- .lower_margin = 1,
- .hsync_len = 144,
- .vsync_len = 3,
- .vmode = FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED
-};
-
-/*
- * Memory-mapped I/O functions for Sparc PCI
- *
- * On sparc we happen to access I/O with memory mapped functions too.
- */
-#define pci_inb(par, reg) readb(par->io_base+(reg))
-#define pci_outb(par, val, reg) writeb(val, par->io_base+(reg))
-
-static inline unsigned int iga_inb(struct iga_par *par, unsigned int reg,
- unsigned int idx)
-{
- pci_outb(par, idx, reg);
- return pci_inb(par, reg + 1);
-}
-
-static inline void iga_outb(struct iga_par *par, unsigned char val,
- unsigned int reg, unsigned int idx )
-{
- pci_outb(par, idx, reg);
- pci_outb(par, val, reg+1);
-}
-
-#endif /* CONFIG_SPARC */
-
-/*
- * Very important functionality for the JavaEngine1 computer:
- * make screen border black (usign special IGA registers)
- */
-static void iga_blank_border(struct iga_par *par)
-{
- int i;
-#if 0
- /*
- * PROM does this for us, so keep this code as a reminder
- * about required read from 0x3DA and writing of 0x20 in the end.
- */
- (void) pci_inb(par, 0x3DA); /* required for every access */
- pci_outb(par, IGA_IDX_VGA_OVERSCAN, IGA_ATTR_CTL);
- (void) pci_inb(par, IGA_ATTR_CTL+1);
- pci_outb(par, 0x38, IGA_ATTR_CTL);
- pci_outb(par, 0x20, IGA_ATTR_CTL); /* re-enable visual */
-#endif
- /*
- * This does not work as it was designed because the overscan
- * color is looked up in the palette. Therefore, under X11
- * overscan changes color.
- */
- for (i=0; i < 3; i++)
- iga_outb(par, 0, IGA_EXT_CNTRL, IGA_IDX_OVERSCAN_COLOR + i);
-}
-
-#ifdef CONFIG_SPARC
-static int igafb_mmap(struct fb_info *info,
- struct vm_area_struct *vma)
-{
- struct iga_par *par = (struct iga_par *)info->par;
- unsigned int size, page, map_size = 0;
- unsigned long map_offset = 0;
- int i;
-
- if (!par->mmap_map)
- return -ENXIO;
-
- size = vma->vm_end - vma->vm_start;
-
- /* Each page, see which map applies */
- for (page = 0; page < size; ) {
- map_size = 0;
- for (i = 0; par->mmap_map[i].size; i++) {
- unsigned long start = par->mmap_map[i].voff;
- unsigned long end = start + par->mmap_map[i].size;
- unsigned long offset = (vma->vm_pgoff << PAGE_SHIFT) + page;
-
- if (start > offset)
- continue;
- if (offset >= end)
- continue;
-
- map_size = par->mmap_map[i].size - (offset - start);
- map_offset = par->mmap_map[i].poff + (offset - start);
- break;
- }
- if (!map_size) {
- page += PAGE_SIZE;
- continue;
- }
- if (page + map_size > size)
- map_size = size - page;
-
- pgprot_val(vma->vm_page_prot) &= ~(par->mmap_map[i].prot_mask);
- pgprot_val(vma->vm_page_prot) |= par->mmap_map[i].prot_flag;
-
- if (remap_pfn_range(vma, vma->vm_start + page,
- map_offset >> PAGE_SHIFT, map_size, vma->vm_page_prot))
- return -EAGAIN;
-
- page += map_size;
- }
-
- if (!map_size)
- return -EINVAL;
-
- vma->vm_flags |= VM_IO;
- return 0;
-}
-#endif /* CONFIG_SPARC */
-
-static int igafb_setcolreg(unsigned regno, unsigned red, unsigned green,
- unsigned blue, unsigned transp,
- struct fb_info *info)
-{
- /*
- * Set a single color register. The values supplied are
- * already rounded down to the hardware's capabilities
- * (according to the entries in the `var' structure). Return
- * != 0 for invalid regno.
- */
- struct iga_par *par = (struct iga_par *)info->par;
-
- if (regno >= info->cmap.len)
- return 1;
-
- pci_outb(par, regno, DAC_W_INDEX);
- pci_outb(par, red, DAC_DATA);
- pci_outb(par, green, DAC_DATA);
- pci_outb(par, blue, DAC_DATA);
-
- if (regno < 16) {
- switch (info->var.bits_per_pixel) {
- case 16:
- ((u16*)(info->pseudo_palette))[regno] =
- (regno << 10) | (regno << 5) | regno;
- break;
- case 24:
- ((u32*)(info->pseudo_palette))[regno] =
- (regno << 16) | (regno << 8) | regno;
- break;
- case 32:
- { int i;
- i = (regno << 8) | regno;
- ((u32*)(info->pseudo_palette))[regno] = (i << 16) | i;
- }
- break;
- }
- }
- return 0;
-}
-
-/*
- * Framebuffer option structure
- */
-static struct fb_ops igafb_ops = {
- .owner = THIS_MODULE,
- .fb_setcolreg = igafb_setcolreg,
- .fb_fillrect = cfb_fillrect,
- .fb_copyarea = cfb_copyarea,
- .fb_imageblit = cfb_imageblit,
-#ifdef CONFIG_SPARC
- .fb_mmap = igafb_mmap,
-#endif
-};
-
-static int __init iga_init(struct fb_info *info, struct iga_par *par)
-{
- char vramsz = iga_inb(par, IGA_EXT_CNTRL, IGA_IDX_EXT_BUS_CNTL)
- & MEM_SIZE_ALIAS;
- int video_cmap_len;
-
- switch (vramsz) {
- case MEM_SIZE_1M:
- info->fix.smem_len = 0x100000;
- break;
- case MEM_SIZE_2M:
- info->fix.smem_len = 0x200000;
- break;
- case MEM_SIZE_4M:
- case MEM_SIZE_RESERVED:
- info->fix.smem_len = 0x400000;
- break;
- }
-
- if (info->var.bits_per_pixel > 8)
- video_cmap_len = 16;
- else
- video_cmap_len = 256;
-
- info->fbops = &igafb_ops;
- info->flags = FBINFO_DEFAULT;
-
- fb_alloc_cmap(&info->cmap, video_cmap_len, 0);
-
- if (register_framebuffer(info) < 0)
- return 0;
-
- fb_info(info, "%s frame buffer device at 0x%08lx [%dMB VRAM]\n",
- info->fix.id, par->frame_buffer_phys, info->fix.smem_len >> 20);
-
- iga_blank_border(par);
- return 1;
-}
-
-static int __init igafb_init(void)
-{
- struct fb_info *info;
- struct pci_dev *pdev;
- struct iga_par *par;
- unsigned long addr;
- int size, iga2000 = 0;
-
- if (fb_get_options("igafb", NULL))
- return -ENODEV;
-
- pdev = pci_get_device(PCI_VENDOR_ID_INTERG,
- PCI_DEVICE_ID_INTERG_1682, 0);
- if (pdev == NULL) {
- /*
- * XXX We tried to use cyber2000fb.c for IGS 2000.
- * But it does not initialize the chip in JavaStation-E, alas.
- */
- pdev = pci_get_device(PCI_VENDOR_ID_INTERG, 0x2000, 0);
- if(pdev == NULL) {
- return -ENXIO;
- }
- iga2000 = 1;
- }
- /* We leak a reference here but as it cannot be unloaded this is
- fine. If you write unload code remember to free it in unload */
-
- size = sizeof(struct iga_par) + sizeof(u32)*16;
-
- info = framebuffer_alloc(size, &pdev->dev);
- if (!info) {
- printk("igafb_init: can't alloc fb_info\n");
- pci_dev_put(pdev);
- return -ENOMEM;
- }
-
- par = info->par;
-
- if ((addr = pdev->resource[0].start) == 0) {
- printk("igafb_init: no memory start\n");
- kfree(info);
- pci_dev_put(pdev);
- return -ENXIO;
- }
-
- if ((info->screen_base = ioremap(addr, 1024*1024*2)) == 0) {
- printk("igafb_init: can't remap %lx[2M]\n", addr);
- kfree(info);
- pci_dev_put(pdev);
- return -ENXIO;
- }
-
- par->frame_buffer_phys = addr & PCI_BASE_ADDRESS_MEM_MASK;
-
-#ifdef CONFIG_SPARC
- /*
- * The following is sparc specific and this is why:
- *
- * IGS2000 has its I/O memory mapped and we want
- * to generate memory cycles on PCI, e.g. do ioremap(),
- * then readb/writeb() as in Documentation/io-mapping.txt.
- *
- * IGS1682 is more traditional, it responds to PCI I/O
- * cycles, so we want to access it with inb()/outb().
- *
- * On sparc, PCIC converts CPU memory access within
- * phys window 0x3000xxxx into PCI I/O cycles. Therefore
- * we may use readb/writeb to access them with IGS1682.
- *
- * We do not take io_base_phys from resource[n].start
- * on IGS1682 because that chip is BROKEN. It does not
- * have a base register for I/O. We just "know" what its
- * I/O addresses are.
- */
- if (iga2000) {
- igafb_fix.mmio_start = par->frame_buffer_phys | 0x00800000;
- } else {
- igafb_fix.mmio_start = 0x30000000; /* XXX */
- }
- if ((par->io_base = (int) ioremap(igafb_fix.mmio_start, igafb_fix.smem_len)) == 0) {
- printk("igafb_init: can't remap %lx[4K]\n", igafb_fix.mmio_start);
- iounmap((void *)info->screen_base);
- kfree(info);
- pci_dev_put(pdev);
- return -ENXIO;
- }
-
- /*
- * Figure mmap addresses from PCI config space.
- * We need two regions: for video memory and for I/O ports.
- * Later one can add region for video coprocessor registers.
- * However, mmap routine loops until size != 0, so we put
- * one additional region with size == 0.
- */
-
- par->mmap_map = kzalloc(4 * sizeof(*par->mmap_map), GFP_ATOMIC);
- if (!par->mmap_map) {
- printk("igafb_init: can't alloc mmap_map\n");
- iounmap((void *)par->io_base);
- iounmap(info->screen_base);
- kfree(info);
- pci_dev_put(pdev);
- return -ENOMEM;
- }
-
- /*
- * Set default vmode and cmode from PROM properties.
- */
- {
- struct device_node *dp = pci_device_to_OF_node(pdev);
- int node = dp->node;
- int width = prom_getintdefault(node, "width", 1024);
- int height = prom_getintdefault(node, "height", 768);
- int depth = prom_getintdefault(node, "depth", 8);
- switch (width) {
- case 1024:
- if (height == 768)
- default_var = default_var_1024x768;
- break;
- case 1152:
- if (height == 900)
- default_var = default_var_1152x900;
- break;
- case 1280:
- if (height == 1024)
- default_var = default_var_1280x1024;
- break;
- default:
- break;
- }
-
- switch (depth) {
- case 8:
- default_var.bits_per_pixel = 8;
- break;
- case 16:
- default_var.bits_per_pixel = 16;
- break;
- case 24:
- default_var.bits_per_pixel = 24;
- break;
- case 32:
- default_var.bits_per_pixel = 32;
- break;
- default:
- break;
- }
- }
-
-#endif
- igafb_fix.smem_start = (unsigned long) info->screen_base;
- igafb_fix.line_length = default_var.xres*(default_var.bits_per_pixel/8);
- igafb_fix.visual = default_var.bits_per_pixel <= 8 ? FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_DIRECTCOLOR;
-
- info->var = default_var;
- info->fix = igafb_fix;
- info->pseudo_palette = (void *)(par + 1);
-
- if (!iga_init(info, par)) {
- iounmap((void *)par->io_base);
- iounmap(info->screen_base);
- kfree(par->mmap_map);
- kfree(info);
- return -ENODEV;
- }
-
-#ifdef CONFIG_SPARC
- /*
- * Add /dev/fb mmap values.
- */
-
- /* First region is for video memory */
- par->mmap_map[0].voff = 0x0;
- par->mmap_map[0].poff = par->frame_buffer_phys & PAGE_MASK;
- par->mmap_map[0].size = info->fix.smem_len & PAGE_MASK;
- par->mmap_map[0].prot_mask = SRMMU_CACHE;
- par->mmap_map[0].prot_flag = SRMMU_WRITE;
-
- /* Second region is for I/O ports */
- par->mmap_map[1].voff = par->frame_buffer_phys & PAGE_MASK;
- par->mmap_map[1].poff = info->fix.smem_start & PAGE_MASK;
- par->mmap_map[1].size = PAGE_SIZE * 2; /* X wants 2 pages */
- par->mmap_map[1].prot_mask = SRMMU_CACHE;
- par->mmap_map[1].prot_flag = SRMMU_WRITE;
-#endif /* CONFIG_SPARC */
-
- return 0;
-}
-
-static int __init igafb_setup(char *options)
-{
- char *this_opt;
-
- if (!options || !*options)
- return 0;
-
- while ((this_opt = strsep(&options, ",")) != NULL) {
- }
- return 0;
-}
-
-module_init(igafb_init);
-MODULE_LICENSE("GPL");
-static struct pci_device_id igafb_pci_tbl[] = {
- { PCI_VENDOR_ID_INTERG, PCI_DEVICE_ID_INTERG_1682,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- { }
-};
-
-MODULE_DEVICE_TABLE(pci, igafb_pci_tbl);
diff --git a/drivers/video/fbdev/intelfb/intelfbhw.c b/drivers/video/fbdev/intelfb/intelfbhw.c
index d31ed4e2c46f..83fec573cceb 100644
--- a/drivers/video/fbdev/intelfb/intelfbhw.c
+++ b/drivers/video/fbdev/intelfb/intelfbhw.c
@@ -937,15 +937,11 @@ static int calc_pll_params(int index, int clock, u32 *retm1, u32 *retm2,
{
u32 m1, m2, n, p1, p2, n1, testm;
u32 f_vco, p, p_best = 0, m, f_out = 0;
- u32 err_max, err_target, err_best = 10000000;
- u32 n_best = 0, m_best = 0, f_best, f_err;
+ u32 err_best = 10000000;
+ u32 n_best = 0, m_best = 0, f_err;
u32 p_min, p_max, p_inc, div_max;
struct pll_min_max *pll = &plls[index];
- /* Accept 0.5% difference, but aim for 0.1% */
- err_max = 5 * clock / 1000;
- err_target = clock / 1000;
-
DBG_MSG("Clock is %d\n", clock);
div_max = pll->max_vco / clock;
@@ -992,7 +988,6 @@ static int calc_pll_params(int index, int clock, u32 *retm1, u32 *retm2,
m_best = testm;
n_best = n;
p_best = p;
- f_best = f_out;
err_best = f_err;
}
}
diff --git a/drivers/video/fbdev/matrox/matroxfb_base.c b/drivers/video/fbdev/matrox/matroxfb_base.c
index b9b284d79631..838869c6490c 100644
--- a/drivers/video/fbdev/matrox/matroxfb_base.c
+++ b/drivers/video/fbdev/matrox/matroxfb_base.c
@@ -2056,7 +2056,7 @@ static int matroxfb_probe(struct pci_dev* pdev, const struct pci_device_id* dumm
minfo = kzalloc(sizeof(*minfo), GFP_KERNEL);
if (!minfo)
- return -1;
+ return -ENOMEM;
minfo->pcidev = pdev;
minfo->dead = 0;
diff --git a/drivers/video/fbdev/mxsfb.c b/drivers/video/fbdev/mxsfb.c
index 7846f0e8bbbb..79b1dc7f042b 100644
--- a/drivers/video/fbdev/mxsfb.c
+++ b/drivers/video/fbdev/mxsfb.c
@@ -150,7 +150,7 @@
#define STMLCDIF_24BIT 3 /** pixel data bus to the display is of 24 bit width */
#define MXSFB_SYNC_DATA_ENABLE_HIGH_ACT (1 << 6)
-#define MXSFB_SYNC_DOTCLK_FALLING_ACT (1 << 7) /* negtive edge sampling */
+#define MXSFB_SYNC_DOTCLK_FALLING_ACT (1 << 7) /* negative edge sampling */
enum mxsfb_devtype {
MXSFB_V3,
@@ -788,7 +788,16 @@ static int mxsfb_init_fbinfo_dt(struct mxsfb_info *host,
if (vm.flags & DISPLAY_FLAGS_DE_HIGH)
host->sync |= MXSFB_SYNC_DATA_ENABLE_HIGH_ACT;
- if (vm.flags & DISPLAY_FLAGS_PIXDATA_NEGEDGE)
+
+ /*
+ * The PIXDATA flags of the display_flags enum are controller
+ * centric, e.g. NEGEDGE means drive data on negative edge.
+ * However, the drivers flag is display centric: Sample the
+ * data on negative (falling) edge. Therefore, check for the
+ * POSEDGE flag:
+ * drive on positive edge => sample on negative edge
+ */
+ if (vm.flags & DISPLAY_FLAGS_PIXDATA_POSEDGE)
host->sync |= MXSFB_SYNC_DOTCLK_FALLING_ACT;
put_display_node:
diff --git a/drivers/video/fbdev/omap/hwa742.c b/drivers/video/fbdev/omap/hwa742.c
index a4ee65b8f918..6199d4806193 100644
--- a/drivers/video/fbdev/omap/hwa742.c
+++ b/drivers/video/fbdev/omap/hwa742.c
@@ -474,7 +474,7 @@ static void auto_update_complete(void *data)
jiffies + HWA742_AUTO_UPDATE_TIME);
}
-static void hwa742_update_window_auto(unsigned long arg)
+static void hwa742_update_window_auto(struct timer_list *unused)
{
LIST_HEAD(req_list);
struct hwa742_request *last;
@@ -1002,9 +1002,7 @@ static int hwa742_init(struct omapfb_device *fbdev, int ext_mode,
hwa742.auto_update_window.height = fbdev->panel->y_res;
hwa742.auto_update_window.format = 0;
- init_timer(&hwa742.auto_update_timer);
- hwa742.auto_update_timer.function = hwa742_update_window_auto;
- hwa742.auto_update_timer.data = 0;
+ timer_setup(&hwa742.auto_update_timer, hwa742_update_window_auto, 0);
hwa742.prev_color_mode = -1;
hwa742.prev_flags = 0;
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dsi.c b/drivers/video/fbdev/omap2/omapfb/dss/dsi.c
index 30d49f3800b3..8e1d60d48dbb 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/dsi.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/dsi.c
@@ -3988,7 +3988,7 @@ static void dsi_update_screen_dispc(struct platform_device *dsidev)
}
#ifdef DSI_CATCH_MISSING_TE
-static void dsi_te_timeout(unsigned long arg)
+static void dsi_te_timeout(struct timer_list *unused)
{
DSSERR("TE not received for 250ms!\n");
}
@@ -5298,9 +5298,7 @@ static int dsi_bind(struct device *dev, struct device *master, void *data)
dsi_framedone_timeout_work_callback);
#ifdef DSI_CATCH_MISSING_TE
- init_timer(&dsi->te_timer);
- dsi->te_timer.function = dsi_te_timeout;
- dsi->te_timer.data = 0;
+ timer_setup(&dsi->te_timer, dsi_te_timeout, 0);
#endif
res = platform_get_resource_byname(dsidev, IORESOURCE_MEM, "proto");
diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb-main.c b/drivers/video/fbdev/omap2/omapfb/omapfb-main.c
index 1d7c012f09db..e08e5664e330 100644
--- a/drivers/video/fbdev/omap2/omapfb/omapfb-main.c
+++ b/drivers/video/fbdev/omap2/omapfb/omapfb-main.c
@@ -1477,7 +1477,7 @@ static int omapfb_alloc_fbmem_display(struct fb_info *fbi, unsigned long size,
static int omapfb_parse_vram_param(const char *param, int max_entries,
unsigned long *sizes, unsigned long *paddrs)
{
- int fbnum;
+ unsigned int fbnum;
unsigned long size;
unsigned long paddr = 0;
char *p, *start;
diff --git a/drivers/video/fbdev/pxa3xx-gcu.c b/drivers/video/fbdev/pxa3xx-gcu.c
index 933619da1a94..55fbb432c053 100644
--- a/drivers/video/fbdev/pxa3xx-gcu.c
+++ b/drivers/video/fbdev/pxa3xx-gcu.c
@@ -512,28 +512,26 @@ pxa3xx_gcu_mmap(struct file *file, struct vm_area_struct *vma)
#ifdef PXA3XX_GCU_DEBUG_TIMER
static struct timer_list pxa3xx_gcu_debug_timer;
+static struct pxa3xx_gcu_priv *debug_timer_priv;
-static void pxa3xx_gcu_debug_timedout(unsigned long ptr)
+static void pxa3xx_gcu_debug_timedout(struct timer_list *unused)
{
- struct pxa3xx_gcu_priv *priv = (struct pxa3xx_gcu_priv *) ptr;
+ struct pxa3xx_gcu_priv *priv = debug_timer_priv;
QERROR("Timer DUMP");
- /* init the timer structure */
- init_timer(&pxa3xx_gcu_debug_timer);
- pxa3xx_gcu_debug_timer.function = pxa3xx_gcu_debug_timedout;
- pxa3xx_gcu_debug_timer.data = ptr;
- pxa3xx_gcu_debug_timer.expires = jiffies + 5*HZ; /* one second */
-
- add_timer(&pxa3xx_gcu_debug_timer);
+ mod_timer(&pxa3xx_gcu_debug_timer, jiffies + 5 * HZ);
}
-static void pxa3xx_gcu_init_debug_timer(void)
+static void pxa3xx_gcu_init_debug_timer(struct pxa3xx_gcu_priv *priv)
{
- pxa3xx_gcu_debug_timedout((unsigned long) &pxa3xx_gcu_debug_timer);
+ /* init the timer structure */
+ debug_timer_priv = priv;
+ timer_setup(&pxa3xx_gcu_debug_timer, pxa3xx_gcu_debug_timedout, 0);
+ pxa3xx_gcu_debug_timedout(NULL);
}
#else
-static inline void pxa3xx_gcu_init_debug_timer(void) {}
+static inline void pxa3xx_gcu_init_debug_timer(struct pxa3xx_gcu_priv *priv) {}
#endif
static int
@@ -670,7 +668,7 @@ static int pxa3xx_gcu_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, priv);
priv->resource_mem = r;
pxa3xx_gcu_reset(priv);
- pxa3xx_gcu_init_debug_timer();
+ pxa3xx_gcu_init_debug_timer(priv);
dev_info(dev, "registered @0x%p, DMA 0x%p (%d bytes), IRQ %d\n",
(void *) r->start, (void *) priv->shared_phys,
diff --git a/drivers/video/fbdev/sa1100fb.c b/drivers/video/fbdev/sa1100fb.c
index fc2aaa5aca23..15ae50063296 100644
--- a/drivers/video/fbdev/sa1100fb.c
+++ b/drivers/video/fbdev/sa1100fb.c
@@ -323,13 +323,11 @@ sa1100fb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
* according to the RGB bitfield information.
*/
if (regno < 16) {
- u32 *pal = fbi->fb.pseudo_palette;
-
val = chan_to_field(red, &fbi->fb.var.red);
val |= chan_to_field(green, &fbi->fb.var.green);
val |= chan_to_field(blue, &fbi->fb.var.blue);
- pal[regno] = val;
+ fbi->pseudo_palette[regno] = val;
ret = 0;
}
break;
@@ -1132,12 +1130,10 @@ static struct sa1100fb_info *sa1100fb_init_fbinfo(struct device *dev)
struct sa1100fb_info *fbi;
unsigned i;
- fbi = kmalloc(sizeof(struct sa1100fb_info) + sizeof(u32) * 16,
- GFP_KERNEL);
+ fbi = devm_kzalloc(dev, sizeof(struct sa1100fb_info), GFP_KERNEL);
if (!fbi)
return NULL;
- memset(fbi, 0, sizeof(struct sa1100fb_info));
fbi->dev = dev;
strcpy(fbi->fb.fix.id, SA1100_NAME);
@@ -1159,7 +1155,7 @@ static struct sa1100fb_info *sa1100fb_init_fbinfo(struct device *dev)
fbi->fb.fbops = &sa1100fb_ops;
fbi->fb.flags = FBINFO_DEFAULT;
fbi->fb.monspecs = monspecs;
- fbi->fb.pseudo_palette = (fbi + 1);
+ fbi->fb.pseudo_palette = fbi->pseudo_palette;
fbi->rgb[RGB_4] = &rgb_4;
fbi->rgb[RGB_8] = &rgb_8;
@@ -1218,48 +1214,42 @@ static int sa1100fb_probe(struct platform_device *pdev)
return -EINVAL;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq = platform_get_irq(pdev, 0);
- if (irq < 0 || !res)
+ if (irq < 0)
return -EINVAL;
- if (!request_mem_region(res->start, resource_size(res), "LCD"))
- return -EBUSY;
-
fbi = sa1100fb_init_fbinfo(&pdev->dev);
- ret = -ENOMEM;
if (!fbi)
- goto failed;
-
- fbi->clk = clk_get(&pdev->dev, NULL);
- if (IS_ERR(fbi->clk)) {
- ret = PTR_ERR(fbi->clk);
- fbi->clk = NULL;
- goto failed;
- }
+ return -ENOMEM;
- fbi->base = ioremap(res->start, resource_size(res));
- if (!fbi->base)
- goto failed;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ fbi->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(fbi->base))
+ return PTR_ERR(fbi->base);
- /* Initialize video memory */
- ret = sa1100fb_map_video_memory(fbi);
- if (ret)
- goto failed;
+ fbi->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(fbi->clk))
+ return PTR_ERR(fbi->clk);
- ret = request_irq(irq, sa1100fb_handle_irq, 0, "LCD", fbi);
+ ret = devm_request_irq(&pdev->dev, irq, sa1100fb_handle_irq, 0,
+ "LCD", fbi);
if (ret) {
dev_err(&pdev->dev, "request_irq failed: %d\n", ret);
- goto failed;
+ return ret;
}
if (machine_is_shannon()) {
- ret = gpio_request_one(SHANNON_GPIO_DISP_EN,
+ ret = devm_gpio_request_one(&pdev->dev, SHANNON_GPIO_DISP_EN,
GPIOF_OUT_INIT_LOW, "display enable");
if (ret)
- goto err_free_irq;
+ return ret;
}
+ /* Initialize video memory */
+ ret = sa1100fb_map_video_memory(fbi);
+ if (ret)
+ return ret;
+
/*
* This makes sure that our colour bitfield
* descriptors are correctly initialised.
@@ -1269,8 +1259,11 @@ static int sa1100fb_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, fbi);
ret = register_framebuffer(&fbi->fb);
- if (ret < 0)
- goto err_reg_fb;
+ if (ret < 0) {
+ dma_free_wc(fbi->dev, fbi->map_size, fbi->map_cpu,
+ fbi->map_dma);
+ return ret;
+ }
#ifdef CONFIG_CPU_FREQ
fbi->freq_transition.notifier_call = sa1100fb_freq_transition;
@@ -1281,20 +1274,6 @@ static int sa1100fb_probe(struct platform_device *pdev)
/* This driver cannot be unloaded at the moment */
return 0;
-
- err_reg_fb:
- if (machine_is_shannon())
- gpio_free(SHANNON_GPIO_DISP_EN);
- err_free_irq:
- free_irq(irq, fbi);
- failed:
- if (fbi)
- iounmap(fbi->base);
- if (fbi->clk)
- clk_put(fbi->clk);
- kfree(fbi);
- release_mem_region(res->start, resource_size(res));
- return ret;
}
static struct platform_driver sa1100fb_driver = {
diff --git a/drivers/video/fbdev/sa1100fb.h b/drivers/video/fbdev/sa1100fb.h
index 0139d13377a5..7a1a9ca33cec 100644
--- a/drivers/video/fbdev/sa1100fb.h
+++ b/drivers/video/fbdev/sa1100fb.h
@@ -69,6 +69,8 @@ struct sa1100fb_info {
const struct sa1100fb_mach_info *inf;
struct clk *clk;
+
+ u32 pseudo_palette[16];
};
#define TO_INF(ptr,member) container_of(ptr,struct sa1100fb_info,member)
diff --git a/drivers/video/fbdev/sis/init301.c b/drivers/video/fbdev/sis/init301.c
index 1ec9c3e0e1d8..02ee752d5000 100644
--- a/drivers/video/fbdev/sis/init301.c
+++ b/drivers/video/fbdev/sis/init301.c
@@ -6486,7 +6486,7 @@ SiS_SetTVSpecial(struct SiS_Private *SiS_Pr, unsigned short ModeNo)
if(!(SiS_Pr->SiS_TVMode & TVSetPAL)) {
if(SiS_Pr->SiS_TVMode & TVSetNTSC1024) {
- const unsigned char specialtv[] = {
+ static const unsigned char specialtv[] = {
0xa7,0x07,0xf2,0x6e,0x17,0x8b,0x73,0x53,
0x13,0x40,0x34,0xf4,0x63,0xbb,0xcc,0x7a,
0x58,0xe4,0x73,0xda,0x13
diff --git a/drivers/video/fbdev/sis/sis_main.c b/drivers/video/fbdev/sis/sis_main.c
index e92303823a4b..ecdd054d8951 100644
--- a/drivers/video/fbdev/sis/sis_main.c
+++ b/drivers/video/fbdev/sis/sis_main.c
@@ -1702,6 +1702,7 @@ static int sisfb_ioctl(struct fb_info *info, unsigned int cmd,
if(ivideo->warncount++ < 10)
printk(KERN_INFO
"sisfb: Deprecated ioctl call received - update your application!\n");
+ /* fall through */
case SISFB_GET_INFO: /* For communication with X driver */
ivideo->sisfb_infoblock.sisfb_id = SISFB_ID;
ivideo->sisfb_infoblock.sisfb_version = VER_MAJOR;
@@ -1755,6 +1756,7 @@ static int sisfb_ioctl(struct fb_info *info, unsigned int cmd,
if(ivideo->warncount++ < 10)
printk(KERN_INFO
"sisfb: Deprecated ioctl call received - update your application!\n");
+ /* fall through */
case SISFB_GET_VBRSTATUS:
if(sisfb_CheckVBRetrace(ivideo))
return put_user((u32)1, argp);
@@ -1765,6 +1767,7 @@ static int sisfb_ioctl(struct fb_info *info, unsigned int cmd,
if(ivideo->warncount++ < 10)
printk(KERN_INFO
"sisfb: Deprecated ioctl call received - update your application!\n");
+ /* fall through */
case SISFB_GET_AUTOMAXIMIZE:
if(ivideo->sisfb_max)
return put_user((u32)1, argp);
@@ -1775,6 +1778,7 @@ static int sisfb_ioctl(struct fb_info *info, unsigned int cmd,
if(ivideo->warncount++ < 10)
printk(KERN_INFO
"sisfb: Deprecated ioctl call received - update your application!\n");
+ /* fall through */
case SISFB_SET_AUTOMAXIMIZE:
if(get_user(gpu32, argp))
return -EFAULT;
diff --git a/drivers/video/fbdev/sm501fb.c b/drivers/video/fbdev/sm501fb.c
index 076dd2711630..6f0a19501c6a 100644
--- a/drivers/video/fbdev/sm501fb.c
+++ b/drivers/video/fbdev/sm501fb.c
@@ -1008,6 +1008,7 @@ static int sm501fb_blank_crt(int blank_mode, struct fb_info *info)
case FB_BLANK_POWERDOWN:
ctrl &= ~SM501_DC_CRT_CONTROL_ENABLE;
sm501_misc_control(fbi->dev->parent, SM501_MISC_DAC_POWER, 0);
+ /* fall through */
case FB_BLANK_NORMAL:
ctrl |= SM501_DC_CRT_CONTROL_BLANK;
@@ -1889,6 +1890,9 @@ static void sm501_free_init_fb(struct sm501fb_info *info,
{
struct fb_info *fbi = info->fb[head];
+ if (!fbi)
+ return;
+
fb_dealloc_cmap(&fbi->cmap);
}
@@ -2076,8 +2080,10 @@ static int sm501fb_remove(struct platform_device *pdev)
sm501_free_init_fb(info, HEAD_CRT);
sm501_free_init_fb(info, HEAD_PANEL);
- unregister_framebuffer(fbinfo_crt);
- unregister_framebuffer(fbinfo_pnl);
+ if (fbinfo_crt)
+ unregister_framebuffer(fbinfo_crt);
+ if (fbinfo_pnl)
+ unregister_framebuffer(fbinfo_pnl);
sm501fb_stop(info);
kfree(info);
@@ -2094,8 +2100,12 @@ static int sm501fb_suspend_fb(struct sm501fb_info *info,
enum sm501_controller head)
{
struct fb_info *fbi = info->fb[head];
- struct sm501fb_par *par = fbi->par;
+ struct sm501fb_par *par;
+
+ if (!fbi)
+ return 0;
+ par = fbi->par;
if (par->screen.size == 0)
return 0;
@@ -2141,8 +2151,12 @@ static void sm501fb_resume_fb(struct sm501fb_info *info,
enum sm501_controller head)
{
struct fb_info *fbi = info->fb[head];
- struct sm501fb_par *par = fbi->par;
+ struct sm501fb_par *par;
+
+ if (!fbi)
+ return;
+ par = fbi->par;
if (par->screen.size == 0)
return;
diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c
index ef08a104fb42..d44f14242016 100644
--- a/drivers/video/fbdev/udlfb.c
+++ b/drivers/video/fbdev/udlfb.c
@@ -769,11 +769,11 @@ static int dlfb_get_edid(struct dlfb_data *dev, char *edid, int len)
for (i = 0; i < len; i++) {
ret = usb_control_msg(dev->udev,
- usb_rcvctrlpipe(dev->udev, 0), (0x02),
- (0x80 | (0x02 << 5)), i << 8, 0xA1, rbuf, 2,
- HZ);
- if (ret < 1) {
- pr_err("Read EDID byte %d failed err %x\n", i, ret);
+ usb_rcvctrlpipe(dev->udev, 0), 0x02,
+ (0x80 | (0x02 << 5)), i << 8, 0xA1,
+ rbuf, 2, USB_CTRL_GET_TIMEOUT);
+ if (ret < 2) {
+ pr_err("Read EDID byte %d failed: %d\n", i, ret);
i--;
break;
}
diff --git a/drivers/watchdog/alim7101_wdt.c b/drivers/watchdog/alim7101_wdt.c
index 18e896eeca62..12f7ea62dddd 100644
--- a/drivers/watchdog/alim7101_wdt.c
+++ b/drivers/watchdog/alim7101_wdt.c
@@ -70,7 +70,7 @@ module_param(use_gpio, int, 0);
MODULE_PARM_DESC(use_gpio,
"Use the gpio watchdog (required by old cobalt boards).");
-static void wdt_timer_ping(unsigned long);
+static void wdt_timer_ping(struct timer_list *);
static DEFINE_TIMER(timer, wdt_timer_ping);
static unsigned long next_heartbeat;
static unsigned long wdt_is_open;
@@ -87,7 +87,7 @@ MODULE_PARM_DESC(nowayout,
* Whack the dog
*/
-static void wdt_timer_ping(unsigned long unused)
+static void wdt_timer_ping(struct timer_list *unused)
{
/* If we got a heartbeat pulse within the WDT_US_INTERVAL
* we agree to ping the WDT
diff --git a/drivers/watchdog/at91sam9_wdt.c b/drivers/watchdog/at91sam9_wdt.c
index 7e6acaf3ece4..88c05d0448b2 100644
--- a/drivers/watchdog/at91sam9_wdt.c
+++ b/drivers/watchdog/at91sam9_wdt.c
@@ -120,9 +120,9 @@ static inline void at91_wdt_reset(struct at91wdt *wdt)
/*
* Timer tick
*/
-static void at91_ping(unsigned long data)
+static void at91_ping(struct timer_list *t)
{
- struct at91wdt *wdt = (struct at91wdt *)data;
+ struct at91wdt *wdt = from_timer(wdt, t, timer);
if (time_before(jiffies, wdt->next_heartbeat) ||
!watchdog_active(&wdt->wdd)) {
at91_wdt_reset(wdt);
@@ -222,7 +222,7 @@ static int at91_wdt_init(struct platform_device *pdev, struct at91wdt *wdt)
"watchdog already configured differently (mr = %x expecting %x)\n",
tmp & wdt->mr_mask, wdt->mr & wdt->mr_mask);
- setup_timer(&wdt->timer, at91_ping, (unsigned long)wdt);
+ timer_setup(&wdt->timer, at91_ping, 0);
/*
* Use min_heartbeat the first time to avoid spurious watchdog reset:
diff --git a/drivers/watchdog/bcm47xx_wdt.c b/drivers/watchdog/bcm47xx_wdt.c
index 236582809336..f41b756d6dd5 100644
--- a/drivers/watchdog/bcm47xx_wdt.c
+++ b/drivers/watchdog/bcm47xx_wdt.c
@@ -106,9 +106,9 @@ static const struct watchdog_ops bcm47xx_wdt_hard_ops = {
.restart = bcm47xx_wdt_restart,
};
-static void bcm47xx_wdt_soft_timer_tick(unsigned long data)
+static void bcm47xx_wdt_soft_timer_tick(struct timer_list *t)
{
- struct bcm47xx_wdt *wdt = (struct bcm47xx_wdt *)data;
+ struct bcm47xx_wdt *wdt = from_timer(wdt, t, soft_timer);
u32 next_tick = min(wdt->wdd.timeout * 1000, wdt->max_timer_ms);
if (!atomic_dec_and_test(&wdt->soft_ticks)) {
@@ -133,7 +133,7 @@ static int bcm47xx_wdt_soft_start(struct watchdog_device *wdd)
struct bcm47xx_wdt *wdt = bcm47xx_wdt_get(wdd);
bcm47xx_wdt_soft_keepalive(wdd);
- bcm47xx_wdt_soft_timer_tick((unsigned long)wdt);
+ bcm47xx_wdt_soft_timer_tick(&wdt->soft_timer);
return 0;
}
@@ -190,8 +190,7 @@ static int bcm47xx_wdt_probe(struct platform_device *pdev)
if (soft) {
wdt->wdd.ops = &bcm47xx_wdt_soft_ops;
- setup_timer(&wdt->soft_timer, bcm47xx_wdt_soft_timer_tick,
- (long unsigned int)wdt);
+ timer_setup(&wdt->soft_timer, bcm47xx_wdt_soft_timer_tick, 0);
} else {
wdt->wdd.ops = &bcm47xx_wdt_hard_ops;
}
diff --git a/drivers/watchdog/bcm63xx_wdt.c b/drivers/watchdog/bcm63xx_wdt.c
index ab26fd90729e..8555afc70f9b 100644
--- a/drivers/watchdog/bcm63xx_wdt.c
+++ b/drivers/watchdog/bcm63xx_wdt.c
@@ -77,7 +77,7 @@ static void bcm63xx_wdt_isr(void *data)
die(PFX " fire", regs);
}
-static void bcm63xx_timer_tick(unsigned long unused)
+static void bcm63xx_timer_tick(struct timer_list *unused)
{
if (!atomic_dec_and_test(&bcm63xx_wdt_device.ticks)) {
bcm63xx_wdt_hw_start();
@@ -240,7 +240,7 @@ static int bcm63xx_wdt_probe(struct platform_device *pdev)
int ret;
struct resource *r;
- setup_timer(&bcm63xx_wdt_device.timer, bcm63xx_timer_tick, 0L);
+ timer_setup(&bcm63xx_wdt_device.timer, bcm63xx_timer_tick, 0);
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!r) {
diff --git a/drivers/watchdog/cpu5wdt.c b/drivers/watchdog/cpu5wdt.c
index 6c3f78e45c26..6cfb102c397c 100644
--- a/drivers/watchdog/cpu5wdt.c
+++ b/drivers/watchdog/cpu5wdt.c
@@ -69,7 +69,7 @@ static struct {
/* generic helper functions */
-static void cpu5wdt_trigger(unsigned long unused)
+static void cpu5wdt_trigger(struct timer_list *unused)
{
if (verbose > 2)
pr_debug("trigger at %i ticks\n", ticks);
@@ -224,7 +224,7 @@ static int cpu5wdt_init(void)
init_completion(&cpu5wdt_device.stop);
cpu5wdt_device.queue = 0;
- setup_timer(&cpu5wdt_device.timer, cpu5wdt_trigger, 0);
+ timer_setup(&cpu5wdt_device.timer, cpu5wdt_trigger, 0);
cpu5wdt_device.default_ticks = ticks;
if (!request_region(port, CPU5WDT_EXTENT, PFX)) {
diff --git a/drivers/watchdog/machzwd.c b/drivers/watchdog/machzwd.c
index 8a616a57bb90..88d823d87a4b 100644
--- a/drivers/watchdog/machzwd.c
+++ b/drivers/watchdog/machzwd.c
@@ -121,7 +121,7 @@ module_param(action, int, 0);
MODULE_PARM_DESC(action, "after watchdog resets, generate: "
"0 = RESET(*) 1 = SMI 2 = NMI 3 = SCI");
-static void zf_ping(unsigned long data);
+static void zf_ping(struct timer_list *unused);
static int zf_action = GEN_RESET;
static unsigned long zf_is_open;
@@ -237,7 +237,7 @@ static void zf_timer_on(void)
}
-static void zf_ping(unsigned long data)
+static void zf_ping(struct timer_list *unused)
{
unsigned int ctrl_reg = 0;
unsigned long flags;
diff --git a/drivers/watchdog/mixcomwd.c b/drivers/watchdog/mixcomwd.c
index c9e38096ea91..3cc07447c655 100644
--- a/drivers/watchdog/mixcomwd.c
+++ b/drivers/watchdog/mixcomwd.c
@@ -99,7 +99,7 @@ static struct {
{0x0000, 0},
};
-static void mixcomwd_timerfun(unsigned long d);
+static void mixcomwd_timerfun(struct timer_list *unused);
static unsigned long mixcomwd_opened; /* long req'd for setbit --RR */
@@ -120,7 +120,7 @@ static void mixcomwd_ping(void)
return;
}
-static void mixcomwd_timerfun(unsigned long d)
+static void mixcomwd_timerfun(struct timer_list *unused)
{
mixcomwd_ping();
mod_timer(&mixcomwd_timer, jiffies + 5 * HZ);
diff --git a/drivers/watchdog/mpc8xxx_wdt.c b/drivers/watchdog/mpc8xxx_wdt.c
index 366e5c7e650b..6610e9217dbc 100644
--- a/drivers/watchdog/mpc8xxx_wdt.c
+++ b/drivers/watchdog/mpc8xxx_wdt.c
@@ -80,9 +80,9 @@ static void mpc8xxx_wdt_keepalive(struct mpc8xxx_wdt_ddata *ddata)
spin_unlock(&ddata->lock);
}
-static void mpc8xxx_wdt_timer_ping(unsigned long arg)
+static void mpc8xxx_wdt_timer_ping(struct timer_list *t)
{
- struct mpc8xxx_wdt_ddata *ddata = (void *)arg;
+ struct mpc8xxx_wdt_ddata *ddata = from_timer(ddata, t, timer);
mpc8xxx_wdt_keepalive(ddata);
/* We're pinging it twice faster than needed, just to be sure. */
@@ -173,8 +173,7 @@ static int mpc8xxx_wdt_probe(struct platform_device *ofdev)
}
spin_lock_init(&ddata->lock);
- setup_timer(&ddata->timer, mpc8xxx_wdt_timer_ping,
- (unsigned long)ddata);
+ timer_setup(&ddata->timer, mpc8xxx_wdt_timer_ping, 0);
ddata->wdd.info = &mpc8xxx_wdt_info,
ddata->wdd.ops = &mpc8xxx_wdt_ops,
diff --git a/drivers/watchdog/mtx-1_wdt.c b/drivers/watchdog/mtx-1_wdt.c
index ff27c4ac96e4..ca360d204548 100644
--- a/drivers/watchdog/mtx-1_wdt.c
+++ b/drivers/watchdog/mtx-1_wdt.c
@@ -68,7 +68,7 @@ static struct {
unsigned int gstate;
} mtx1_wdt_device;
-static void mtx1_wdt_trigger(unsigned long unused)
+static void mtx1_wdt_trigger(struct timer_list *unused)
{
spin_lock(&mtx1_wdt_device.lock);
if (mtx1_wdt_device.running)
@@ -219,7 +219,7 @@ static int mtx1_wdt_probe(struct platform_device *pdev)
init_completion(&mtx1_wdt_device.stop);
mtx1_wdt_device.queue = 0;
clear_bit(0, &mtx1_wdt_device.inuse);
- setup_timer(&mtx1_wdt_device.timer, mtx1_wdt_trigger, 0L);
+ timer_setup(&mtx1_wdt_device.timer, mtx1_wdt_trigger, 0);
mtx1_wdt_device.default_ticks = ticks;
ret = misc_register(&mtx1_wdt_misc);
diff --git a/drivers/watchdog/nuc900_wdt.c b/drivers/watchdog/nuc900_wdt.c
index d5bed78c4d9f..830bd04ff911 100644
--- a/drivers/watchdog/nuc900_wdt.c
+++ b/drivers/watchdog/nuc900_wdt.c
@@ -216,7 +216,7 @@ static ssize_t nuc900_wdt_write(struct file *file, const char __user *data,
return len;
}
-static void nuc900_wdt_timer_ping(unsigned long data)
+static void nuc900_wdt_timer_ping(struct timer_list *unused)
{
if (time_before(jiffies, nuc900_wdt->next_heartbeat)) {
nuc900_wdt_keepalive();
@@ -267,7 +267,7 @@ static int nuc900wdt_probe(struct platform_device *pdev)
clk_enable(nuc900_wdt->wdt_clock);
- setup_timer(&nuc900_wdt->timer, nuc900_wdt_timer_ping, 0);
+ timer_setup(&nuc900_wdt->timer, nuc900_wdt_timer_ping, 0);
ret = misc_register(&nuc900wdt_miscdev);
if (ret) {
diff --git a/drivers/watchdog/pcwd.c b/drivers/watchdog/pcwd.c
index 3ad5206d7935..b72ce68eacd3 100644
--- a/drivers/watchdog/pcwd.c
+++ b/drivers/watchdog/pcwd.c
@@ -367,7 +367,7 @@ static void pcwd_show_card_info(void)
pr_info("No previous trip detected - Cold boot or reset\n");
}
-static void pcwd_timer_ping(unsigned long data)
+static void pcwd_timer_ping(struct timer_list *unused)
{
int wdrst_stat;
@@ -893,7 +893,7 @@ static int pcwd_isa_probe(struct device *dev, unsigned int id)
/* clear the "card caused reboot" flag */
pcwd_clear_status();
- setup_timer(&pcwd_private.timer, pcwd_timer_ping, 0);
+ timer_setup(&pcwd_private.timer, pcwd_timer_ping, 0);
/* Disable the board */
pcwd_stop();
diff --git a/drivers/watchdog/pika_wdt.c b/drivers/watchdog/pika_wdt.c
index e35cf5e87907..e0a6f8c0f03c 100644
--- a/drivers/watchdog/pika_wdt.c
+++ b/drivers/watchdog/pika_wdt.c
@@ -85,7 +85,7 @@ static inline void pikawdt_reset(void)
/*
* Timer tick
*/
-static void pikawdt_ping(unsigned long data)
+static void pikawdt_ping(struct timer_list *unused)
{
if (time_before(jiffies, pikawdt_private.next_heartbeat) ||
(!nowayout && !pikawdt_private.open)) {
@@ -269,7 +269,7 @@ static int __init pikawdt_init(void)
iounmap(fpga);
- setup_timer(&pikawdt_private.timer, pikawdt_ping, 0);
+ timer_setup(&pikawdt_private.timer, pikawdt_ping, 0);
ret = misc_register(&pikawdt_miscdev);
if (ret) {
diff --git a/drivers/watchdog/rdc321x_wdt.c b/drivers/watchdog/rdc321x_wdt.c
index 47a8f1b1087d..a281aa84bfb1 100644
--- a/drivers/watchdog/rdc321x_wdt.c
+++ b/drivers/watchdog/rdc321x_wdt.c
@@ -67,7 +67,7 @@ static struct {
/* generic helper functions */
-static void rdc321x_wdt_trigger(unsigned long unused)
+static void rdc321x_wdt_trigger(struct timer_list *unused)
{
unsigned long flags;
u32 val;
@@ -262,7 +262,7 @@ static int rdc321x_wdt_probe(struct platform_device *pdev)
clear_bit(0, &rdc321x_wdt_device.inuse);
- setup_timer(&rdc321x_wdt_device.timer, rdc321x_wdt_trigger, 0);
+ timer_setup(&rdc321x_wdt_device.timer, rdc321x_wdt_trigger, 0);
rdc321x_wdt_device.default_ticks = ticks;
diff --git a/drivers/watchdog/sbc60xxwdt.c b/drivers/watchdog/sbc60xxwdt.c
index 8d589939bc84..87333a41f753 100644
--- a/drivers/watchdog/sbc60xxwdt.c
+++ b/drivers/watchdog/sbc60xxwdt.c
@@ -112,7 +112,7 @@ MODULE_PARM_DESC(nowayout,
"Watchdog cannot be stopped once started (default="
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
-static void wdt_timer_ping(unsigned long);
+static void wdt_timer_ping(struct timer_list *);
static DEFINE_TIMER(timer, wdt_timer_ping);
static unsigned long next_heartbeat;
static unsigned long wdt_is_open;
@@ -122,7 +122,7 @@ static char wdt_expect_close;
* Whack the dog
*/
-static void wdt_timer_ping(unsigned long data)
+static void wdt_timer_ping(struct timer_list *unused)
{
/* If we got a heartbeat pulse within the WDT_US_INTERVAL
* we agree to ping the WDT
diff --git a/drivers/watchdog/sc520_wdt.c b/drivers/watchdog/sc520_wdt.c
index 3e9bbaa37bf4..6aadb56e7faa 100644
--- a/drivers/watchdog/sc520_wdt.c
+++ b/drivers/watchdog/sc520_wdt.c
@@ -123,7 +123,7 @@ MODULE_PARM_DESC(nowayout,
static __u16 __iomem *wdtmrctl;
-static void wdt_timer_ping(unsigned long);
+static void wdt_timer_ping(struct timer_list *);
static DEFINE_TIMER(timer, wdt_timer_ping);
static unsigned long next_heartbeat;
static unsigned long wdt_is_open;
@@ -134,7 +134,7 @@ static DEFINE_SPINLOCK(wdt_spinlock);
* Whack the dog
*/
-static void wdt_timer_ping(unsigned long data)
+static void wdt_timer_ping(struct timer_list *unused)
{
/* If we got a heartbeat pulse within the WDT_US_INTERVAL
* we agree to ping the WDT
diff --git a/drivers/watchdog/shwdt.c b/drivers/watchdog/shwdt.c
index 517a733175ef..a7d6425db807 100644
--- a/drivers/watchdog/shwdt.c
+++ b/drivers/watchdog/shwdt.c
@@ -175,9 +175,9 @@ static int sh_wdt_set_heartbeat(struct watchdog_device *wdt_dev, unsigned t)
return 0;
}
-static void sh_wdt_ping(unsigned long data)
+static void sh_wdt_ping(struct timer_list *t)
{
- struct sh_wdt *wdt = (struct sh_wdt *)data;
+ struct sh_wdt *wdt = from_timer(wdt, t, timer);
unsigned long flags;
spin_lock_irqsave(&wdt->lock, flags);
@@ -275,7 +275,7 @@ static int sh_wdt_probe(struct platform_device *pdev)
return rc;
}
- setup_timer(&wdt->timer, sh_wdt_ping, (unsigned long)wdt);
+ timer_setup(&wdt->timer, sh_wdt_ping, 0);
wdt->timer.expires = next_ping_period(clock_division_ratio);
dev_info(&pdev->dev, "initialized.\n");
diff --git a/drivers/watchdog/via_wdt.c b/drivers/watchdog/via_wdt.c
index ad3c3be13b40..b085ef1084ec 100644
--- a/drivers/watchdog/via_wdt.c
+++ b/drivers/watchdog/via_wdt.c
@@ -67,7 +67,7 @@ static struct watchdog_device wdt_dev;
static struct resource wdt_res;
static void __iomem *wdt_mem;
static unsigned int mmio;
-static void wdt_timer_tick(unsigned long data);
+static void wdt_timer_tick(struct timer_list *unused);
static DEFINE_TIMER(timer, wdt_timer_tick);
/* The timer that pings the watchdog */
static unsigned long next_heartbeat; /* the next_heartbeat for the timer */
@@ -88,7 +88,7 @@ static inline void wdt_reset(void)
* then the external/userspace heartbeat).
* 2) the watchdog timer has been stopped by userspace.
*/
-static void wdt_timer_tick(unsigned long data)
+static void wdt_timer_tick(struct timer_list *unused)
{
if (time_before(jiffies, next_heartbeat) ||
(!watchdog_active(&wdt_dev))) {
diff --git a/drivers/watchdog/w83877f_wdt.c b/drivers/watchdog/w83877f_wdt.c
index ba6b680af100..05658ecc0aa4 100644
--- a/drivers/watchdog/w83877f_wdt.c
+++ b/drivers/watchdog/w83877f_wdt.c
@@ -97,7 +97,7 @@ MODULE_PARM_DESC(nowayout,
"Watchdog cannot be stopped once started (default="
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
-static void wdt_timer_ping(unsigned long);
+static void wdt_timer_ping(struct timer_list *);
static DEFINE_TIMER(timer, wdt_timer_ping);
static unsigned long next_heartbeat;
static unsigned long wdt_is_open;
@@ -108,7 +108,7 @@ static DEFINE_SPINLOCK(wdt_spinlock);
* Whack the dog
*/
-static void wdt_timer_ping(unsigned long data)
+static void wdt_timer_ping(struct timer_list *unused)
{
/* If we got a heartbeat pulse within the WDT_US_INTERVAL
* we agree to ping the WDT
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index 139e018a82b0..f45114fd8e1e 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -358,10 +358,10 @@ struct deferred_entry {
struct page *page;
};
static LIST_HEAD(deferred_list);
-static void gnttab_handle_deferred(unsigned long);
+static void gnttab_handle_deferred(struct timer_list *);
static DEFINE_TIMER(deferred_timer, gnttab_handle_deferred);
-static void gnttab_handle_deferred(unsigned long unused)
+static void gnttab_handle_deferred(struct timer_list *unused)
{
unsigned int nr = 10;
struct deferred_entry *first = NULL;
diff --git a/firmware/Makefile b/firmware/Makefile
index 168094a3fae7..29641383e136 100644
--- a/firmware/Makefile
+++ b/firmware/Makefile
@@ -59,6 +59,3 @@ endif
targets := $(patsubst $(obj)/%,%, \
$(shell find $(obj) -name \*.gen.S 2>/dev/null))
-# Without this, built-in.o won't be created when it's empty, and the
-# final vmlinux link will fail.
-obj- := dummy
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
index 2a5de610dd8f..bdabb2765d1b 100644
--- a/fs/9p/vfs_inode.c
+++ b/fs/9p/vfs_inode.c
@@ -483,6 +483,9 @@ static int v9fs_test_inode(struct inode *inode, void *data)
if (v9inode->qid.type != st->qid.type)
return 0;
+
+ if (v9inode->qid.path != st->qid.path)
+ return 0;
return 1;
}
diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c
index 70f9887c59a9..7f6ae21a27b3 100644
--- a/fs/9p/vfs_inode_dotl.c
+++ b/fs/9p/vfs_inode_dotl.c
@@ -87,6 +87,9 @@ static int v9fs_test_inode_dotl(struct inode *inode, void *data)
if (v9inode->qid.type != st->qid.type)
return 0;
+
+ if (v9inode->qid.path != st->qid.path)
+ return 0;
return 1;
}
diff --git a/fs/9p/vfs_super.c b/fs/9p/vfs_super.c
index 8b75463cb211..af03c2a901eb 100644
--- a/fs/9p/vfs_super.c
+++ b/fs/9p/vfs_super.c
@@ -94,13 +94,13 @@ v9fs_fill_super(struct super_block *sb, struct v9fs_session_info *v9ses,
if (v9ses->cache)
sb->s_bdi->ra_pages = (VM_MAX_READAHEAD * 1024)/PAGE_SIZE;
- sb->s_flags |= MS_ACTIVE | MS_DIRSYNC | MS_NOATIME;
+ sb->s_flags |= SB_ACTIVE | SB_DIRSYNC | SB_NOATIME;
if (!v9ses->cache)
- sb->s_flags |= MS_SYNCHRONOUS;
+ sb->s_flags |= SB_SYNCHRONOUS;
#ifdef CONFIG_9P_FS_POSIX_ACL
if ((v9ses->flags & V9FS_ACL_MASK) == V9FS_POSIX_ACL)
- sb->s_flags |= MS_POSIXACL;
+ sb->s_flags |= SB_POSIXACL;
#endif
return 0;
diff --git a/fs/adfs/super.c b/fs/adfs/super.c
index c9fdfb112933..cfda2c7caedc 100644
--- a/fs/adfs/super.c
+++ b/fs/adfs/super.c
@@ -213,7 +213,7 @@ static int parse_options(struct super_block *sb, char *options)
static int adfs_remount(struct super_block *sb, int *flags, char *data)
{
sync_filesystem(sb);
- *flags |= MS_NODIRATIME;
+ *flags |= SB_NODIRATIME;
return parse_options(sb, data);
}
@@ -372,7 +372,7 @@ static int adfs_fill_super(struct super_block *sb, void *data, int silent)
struct inode *root;
int ret = -EINVAL;
- sb->s_flags |= MS_NODIRATIME;
+ sb->s_flags |= SB_NODIRATIME;
asb = kzalloc(sizeof(*asb), GFP_KERNEL);
if (!asb)
diff --git a/fs/affs/amigaffs.c b/fs/affs/amigaffs.c
index 185d5ab7e986..0f0e6925e97d 100644
--- a/fs/affs/amigaffs.c
+++ b/fs/affs/amigaffs.c
@@ -453,7 +453,7 @@ affs_error(struct super_block *sb, const char *function, const char *fmt, ...)
pr_crit("error (device %s): %s(): %pV\n", sb->s_id, function, &vaf);
if (!sb_rdonly(sb))
pr_warn("Remounting filesystem read-only\n");
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
va_end(args);
}
diff --git a/fs/affs/bitmap.c b/fs/affs/bitmap.c
index 2b1399611d9e..5ba9ef2742f6 100644
--- a/fs/affs/bitmap.c
+++ b/fs/affs/bitmap.c
@@ -250,12 +250,12 @@ int affs_init_bitmap(struct super_block *sb, int *flags)
int i, res = 0;
struct affs_sb_info *sbi = AFFS_SB(sb);
- if (*flags & MS_RDONLY)
+ if (*flags & SB_RDONLY)
return 0;
if (!AFFS_ROOT_TAIL(sb, sbi->s_root_bh)->bm_flag) {
pr_notice("Bitmap invalid - mounting %s read only\n", sb->s_id);
- *flags |= MS_RDONLY;
+ *flags |= SB_RDONLY;
return 0;
}
@@ -288,7 +288,7 @@ int affs_init_bitmap(struct super_block *sb, int *flags)
if (affs_checksum_block(sb, bh)) {
pr_warn("Bitmap %u invalid - mounting %s read only.\n",
bm->bm_key, sb->s_id);
- *flags |= MS_RDONLY;
+ *flags |= SB_RDONLY;
goto out;
}
pr_debug("read bitmap block %d: %d\n", blk, bm->bm_key);
diff --git a/fs/affs/super.c b/fs/affs/super.c
index 884bedab7266..1117e36134cc 100644
--- a/fs/affs/super.c
+++ b/fs/affs/super.c
@@ -356,7 +356,7 @@ static int affs_fill_super(struct super_block *sb, void *data, int silent)
sb->s_magic = AFFS_SUPER_MAGIC;
sb->s_op = &affs_sops;
- sb->s_flags |= MS_NODIRATIME;
+ sb->s_flags |= SB_NODIRATIME;
sbi = kzalloc(sizeof(struct affs_sb_info), GFP_KERNEL);
if (!sbi)
@@ -466,7 +466,7 @@ got_root:
if ((chksum == FS_DCFFS || chksum == MUFS_DCFFS || chksum == FS_DCOFS
|| chksum == MUFS_DCOFS) && !sb_rdonly(sb)) {
pr_notice("Dircache FS - mounting %s read only\n", sb->s_id);
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
}
switch (chksum) {
case MUFS_FS:
@@ -488,7 +488,7 @@ got_root:
/* fall thru */
case FS_OFS:
affs_set_opt(sbi->s_flags, SF_OFS);
- sb->s_flags |= MS_NOEXEC;
+ sb->s_flags |= SB_NOEXEC;
break;
case MUFS_DCOFS:
case MUFS_INTLOFS:
@@ -497,7 +497,7 @@ got_root:
case FS_INTLOFS:
affs_set_opt(sbi->s_flags, SF_INTL);
affs_set_opt(sbi->s_flags, SF_OFS);
- sb->s_flags |= MS_NOEXEC;
+ sb->s_flags |= SB_NOEXEC;
break;
default:
pr_err("Unknown filesystem on device %s: %08X\n",
@@ -513,7 +513,7 @@ got_root:
sig, sig[3] + '0', blocksize);
}
- sb->s_flags |= MS_NODEV | MS_NOSUID;
+ sb->s_flags |= SB_NODEV | SB_NOSUID;
sbi->s_data_blksize = sb->s_blocksize;
if (affs_test_opt(sbi->s_flags, SF_OFS))
@@ -570,7 +570,7 @@ affs_remount(struct super_block *sb, int *flags, char *data)
pr_debug("%s(flags=0x%x,opts=\"%s\")\n", __func__, *flags, data);
sync_filesystem(sb);
- *flags |= MS_NODIRATIME;
+ *flags |= SB_NODIRATIME;
memcpy(volume, sbi->s_volume, 32);
if (!parse_options(data, &uid, &gid, &mode, &reserved, &root_block,
@@ -596,10 +596,10 @@ affs_remount(struct super_block *sb, int *flags, char *data)
memcpy(sbi->s_volume, volume, 32);
spin_unlock(&sbi->symlink_lock);
- if ((bool)(*flags & MS_RDONLY) == sb_rdonly(sb))
+ if ((bool)(*flags & SB_RDONLY) == sb_rdonly(sb))
return 0;
- if (*flags & MS_RDONLY)
+ if (*flags & SB_RDONLY)
affs_free_bitmap(sb);
else
res = affs_init_bitmap(sb, flags);
diff --git a/fs/afs/cell.c b/fs/afs/cell.c
index 1858c91169e4..9bb921d120d0 100644
--- a/fs/afs/cell.c
+++ b/fs/afs/cell.c
@@ -207,13 +207,8 @@ struct afs_cell *afs_lookup_cell(struct afs_net *net,
rcu_read_lock();
cell = afs_lookup_cell_rcu(net, name, namesz);
rcu_read_unlock();
- if (!IS_ERR(cell)) {
- if (excl) {
- afs_put_cell(net, cell);
- return ERR_PTR(-EEXIST);
- }
+ if (!IS_ERR(cell))
goto wait_for_cell;
- }
}
/* Assume we're probably going to create a cell and preallocate and
diff --git a/fs/afs/dir.c b/fs/afs/dir.c
index ab618d32554c..ff8d5bf4354f 100644
--- a/fs/afs/dir.c
+++ b/fs/afs/dir.c
@@ -765,6 +765,8 @@ static void afs_vnode_new_inode(struct afs_fs_cursor *fc,
if (fc->ac.error < 0)
return;
+ d_drop(new_dentry);
+
inode = afs_iget(fc->vnode->vfs_inode.i_sb, fc->key,
newfid, newstatus, newcb, fc->cbi);
if (IS_ERR(inode)) {
@@ -775,9 +777,7 @@ static void afs_vnode_new_inode(struct afs_fs_cursor *fc,
return;
}
- d_instantiate(new_dentry, inode);
- if (d_unhashed(new_dentry))
- d_rehash(new_dentry);
+ d_add(new_dentry, inode);
}
/*
@@ -818,6 +818,8 @@ static int afs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
ret = afs_end_vnode_operation(&fc);
if (ret < 0)
goto error_key;
+ } else {
+ goto error_key;
}
key_put(key);
@@ -972,7 +974,7 @@ static int afs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
struct afs_fs_cursor fc;
struct afs_file_status newstatus;
struct afs_callback newcb;
- struct afs_vnode *dvnode = dvnode = AFS_FS_I(dir);
+ struct afs_vnode *dvnode = AFS_FS_I(dir);
struct afs_fid newfid;
struct key *key;
int ret;
@@ -1006,6 +1008,8 @@ static int afs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
ret = afs_end_vnode_operation(&fc);
if (ret < 0)
goto error_key;
+ } else {
+ goto error_key;
}
key_put(key);
@@ -1053,7 +1057,7 @@ static int afs_link(struct dentry *from, struct inode *dir,
if (afs_begin_vnode_operation(&fc, dvnode, key)) {
if (mutex_lock_interruptible_nested(&vnode->io_lock, 1) < 0) {
afs_end_vnode_operation(&fc);
- return -ERESTARTSYS;
+ goto error_key;
}
while (afs_select_fileserver(&fc)) {
@@ -1071,6 +1075,8 @@ static int afs_link(struct dentry *from, struct inode *dir,
ret = afs_end_vnode_operation(&fc);
if (ret < 0)
goto error_key;
+ } else {
+ goto error_key;
}
key_put(key);
@@ -1130,6 +1136,8 @@ static int afs_symlink(struct inode *dir, struct dentry *dentry,
ret = afs_end_vnode_operation(&fc);
if (ret < 0)
goto error_key;
+ } else {
+ goto error_key;
}
key_put(key);
@@ -1180,7 +1188,7 @@ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry,
if (orig_dvnode != new_dvnode) {
if (mutex_lock_interruptible_nested(&new_dvnode->io_lock, 1) < 0) {
afs_end_vnode_operation(&fc);
- return -ERESTARTSYS;
+ goto error_key;
}
}
while (afs_select_fileserver(&fc)) {
@@ -1199,14 +1207,9 @@ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry,
goto error_key;
}
- key_put(key);
- _leave(" = 0");
- return 0;
-
error_key:
key_put(key);
error:
- d_drop(new_dentry);
_leave(" = %d", ret);
return ret;
}
diff --git a/fs/afs/flock.c b/fs/afs/flock.c
index 7571a5dfd5a3..c40ba2fe3cbe 100644
--- a/fs/afs/flock.c
+++ b/fs/afs/flock.c
@@ -170,7 +170,7 @@ void afs_lock_work(struct work_struct *work)
{
struct afs_vnode *vnode =
container_of(work, struct afs_vnode, lock_work.work);
- struct file_lock *fl;
+ struct file_lock *fl, *next;
afs_lock_type_t type;
struct key *key;
int ret;
@@ -179,117 +179,136 @@ void afs_lock_work(struct work_struct *work)
spin_lock(&vnode->lock);
- if (test_bit(AFS_VNODE_UNLOCKING, &vnode->flags)) {
+again:
+ _debug("wstate %u for %p", vnode->lock_state, vnode);
+ switch (vnode->lock_state) {
+ case AFS_VNODE_LOCK_NEED_UNLOCK:
_debug("unlock");
+ vnode->lock_state = AFS_VNODE_LOCK_UNLOCKING;
spin_unlock(&vnode->lock);
/* attempt to release the server lock; if it fails, we just
- * wait 5 minutes and it'll time out anyway */
- ret = afs_release_lock(vnode, vnode->unlock_key);
+ * wait 5 minutes and it'll expire anyway */
+ ret = afs_release_lock(vnode, vnode->lock_key);
if (ret < 0)
printk(KERN_WARNING "AFS:"
" Failed to release lock on {%x:%x} error %d\n",
vnode->fid.vid, vnode->fid.vnode, ret);
spin_lock(&vnode->lock);
- key_put(vnode->unlock_key);
- vnode->unlock_key = NULL;
- clear_bit(AFS_VNODE_UNLOCKING, &vnode->flags);
- }
+ key_put(vnode->lock_key);
+ vnode->lock_key = NULL;
+ vnode->lock_state = AFS_VNODE_LOCK_NONE;
+
+ if (list_empty(&vnode->pending_locks)) {
+ spin_unlock(&vnode->lock);
+ return;
+ }
+
+ /* The new front of the queue now owns the state variables. */
+ next = list_entry(vnode->pending_locks.next,
+ struct file_lock, fl_u.afs.link);
+ vnode->lock_key = afs_file_key(next->fl_file);
+ vnode->lock_type = (next->fl_type == F_RDLCK) ? AFS_LOCK_READ : AFS_LOCK_WRITE;
+ vnode->lock_state = AFS_VNODE_LOCK_WAITING_FOR_CB;
+ goto again;
- /* if we've got a lock, then it must be time to extend that lock as AFS
- * locks time out after 5 minutes */
- if (!list_empty(&vnode->granted_locks)) {
+ /* If we've already got a lock, then it must be time to extend that
+ * lock as AFS locks time out after 5 minutes.
+ */
+ case AFS_VNODE_LOCK_GRANTED:
_debug("extend");
- if (test_and_set_bit(AFS_VNODE_LOCKING, &vnode->flags))
- BUG();
- fl = list_entry(vnode->granted_locks.next,
- struct file_lock, fl_u.afs.link);
- key = key_get(afs_file_key(fl->fl_file));
+ ASSERT(!list_empty(&vnode->granted_locks));
+
+ key = key_get(vnode->lock_key);
+ vnode->lock_state = AFS_VNODE_LOCK_EXTENDING;
spin_unlock(&vnode->lock);
- ret = afs_extend_lock(vnode, key);
- clear_bit(AFS_VNODE_LOCKING, &vnode->flags);
+ ret = afs_extend_lock(vnode, key); /* RPC */
key_put(key);
- switch (ret) {
- case 0:
+
+ if (ret < 0)
+ pr_warning("AFS: Failed to extend lock on {%x:%x} error %d\n",
+ vnode->fid.vid, vnode->fid.vnode, ret);
+
+ spin_lock(&vnode->lock);
+
+ if (vnode->lock_state != AFS_VNODE_LOCK_EXTENDING)
+ goto again;
+ vnode->lock_state = AFS_VNODE_LOCK_GRANTED;
+
+ if (ret == 0)
afs_schedule_lock_extension(vnode);
- break;
- default:
- /* ummm... we failed to extend the lock - retry
- * extension shortly */
- printk(KERN_WARNING "AFS:"
- " Failed to extend lock on {%x:%x} error %d\n",
- vnode->fid.vid, vnode->fid.vnode, ret);
+ else
queue_delayed_work(afs_lock_manager, &vnode->lock_work,
HZ * 10);
- break;
- }
- _leave(" [extend]");
+ spin_unlock(&vnode->lock);
+ _leave(" [ext]");
return;
- }
- /* if we don't have a granted lock, then we must've been called back by
- * the server, and so if might be possible to get a lock we're
- * currently waiting for */
- if (!list_empty(&vnode->pending_locks)) {
+ /* If we don't have a granted lock, then we must've been called
+ * back by the server, and so if might be possible to get a
+ * lock we're currently waiting for.
+ */
+ case AFS_VNODE_LOCK_WAITING_FOR_CB:
_debug("get");
- if (test_and_set_bit(AFS_VNODE_LOCKING, &vnode->flags))
- BUG();
- fl = list_entry(vnode->pending_locks.next,
- struct file_lock, fl_u.afs.link);
- key = key_get(afs_file_key(fl->fl_file));
- type = (fl->fl_type == F_RDLCK) ?
- AFS_LOCK_READ : AFS_LOCK_WRITE;
+ key = key_get(vnode->lock_key);
+ type = vnode->lock_type;
+ vnode->lock_state = AFS_VNODE_LOCK_SETTING;
spin_unlock(&vnode->lock);
- ret = afs_set_lock(vnode, key, type);
- clear_bit(AFS_VNODE_LOCKING, &vnode->flags);
+ ret = afs_set_lock(vnode, key, type); /* RPC */
+ key_put(key);
+
+ spin_lock(&vnode->lock);
switch (ret) {
case -EWOULDBLOCK:
_debug("blocked");
break;
case 0:
_debug("acquired");
- if (type == AFS_LOCK_READ)
- set_bit(AFS_VNODE_READLOCKED, &vnode->flags);
- else
- set_bit(AFS_VNODE_WRITELOCKED, &vnode->flags);
- ret = AFS_LOCK_GRANTED;
+ vnode->lock_state = AFS_VNODE_LOCK_GRANTED;
+ /* Fall through */
default:
- spin_lock(&vnode->lock);
- /* the pending lock may have been withdrawn due to a
- * signal */
- if (list_entry(vnode->pending_locks.next,
- struct file_lock, fl_u.afs.link) == fl) {
- fl->fl_u.afs.state = ret;
- if (ret == AFS_LOCK_GRANTED)
- afs_grant_locks(vnode, fl);
- else
- list_del_init(&fl->fl_u.afs.link);
- wake_up(&fl->fl_wait);
- spin_unlock(&vnode->lock);
- } else {
+ /* Pass the lock or the error onto the first locker in
+ * the list - if they're looking for this type of lock.
+ * If they're not, we assume that whoever asked for it
+ * took a signal.
+ */
+ if (list_empty(&vnode->pending_locks)) {
_debug("withdrawn");
- clear_bit(AFS_VNODE_READLOCKED, &vnode->flags);
- clear_bit(AFS_VNODE_WRITELOCKED, &vnode->flags);
- spin_unlock(&vnode->lock);
- afs_release_lock(vnode, key);
- if (!list_empty(&vnode->pending_locks))
- afs_lock_may_be_available(vnode);
+ vnode->lock_state = AFS_VNODE_LOCK_NEED_UNLOCK;
+ goto again;
}
- break;
+
+ fl = list_entry(vnode->pending_locks.next,
+ struct file_lock, fl_u.afs.link);
+ type = (fl->fl_type == F_RDLCK) ? AFS_LOCK_READ : AFS_LOCK_WRITE;
+ if (vnode->lock_type != type) {
+ _debug("changed");
+ vnode->lock_state = AFS_VNODE_LOCK_NEED_UNLOCK;
+ goto again;
+ }
+
+ fl->fl_u.afs.state = ret;
+ if (ret == 0)
+ afs_grant_locks(vnode, fl);
+ else
+ list_del_init(&fl->fl_u.afs.link);
+ wake_up(&fl->fl_wait);
+ spin_unlock(&vnode->lock);
+ _leave(" [granted]");
+ return;
}
- key_put(key);
- _leave(" [pend]");
+
+ default:
+ /* Looks like a lock request was withdrawn. */
+ spin_unlock(&vnode->lock);
+ _leave(" [no]");
return;
}
-
- /* looks like the lock request was withdrawn on a signal */
- spin_unlock(&vnode->lock);
- _leave(" [no locks]");
}
/*
@@ -298,15 +317,105 @@ void afs_lock_work(struct work_struct *work)
* AF_RXRPC
* - the caller must hold the vnode lock
*/
-static void afs_defer_unlock(struct afs_vnode *vnode, struct key *key)
+static void afs_defer_unlock(struct afs_vnode *vnode)
{
- cancel_delayed_work(&vnode->lock_work);
- if (!test_and_clear_bit(AFS_VNODE_READLOCKED, &vnode->flags) &&
- !test_and_clear_bit(AFS_VNODE_WRITELOCKED, &vnode->flags))
- BUG();
- if (test_and_set_bit(AFS_VNODE_UNLOCKING, &vnode->flags))
- BUG();
- vnode->unlock_key = key_get(key);
+ _enter("");
+
+ if (vnode->lock_state == AFS_VNODE_LOCK_GRANTED ||
+ vnode->lock_state == AFS_VNODE_LOCK_EXTENDING) {
+ cancel_delayed_work(&vnode->lock_work);
+
+ vnode->lock_state = AFS_VNODE_LOCK_NEED_UNLOCK;
+ afs_lock_may_be_available(vnode);
+ }
+}
+
+/*
+ * Check that our view of the file metadata is up to date and check to see
+ * whether we think that we have a locking permit.
+ */
+static int afs_do_setlk_check(struct afs_vnode *vnode, struct key *key,
+ afs_lock_type_t type, bool can_sleep)
+{
+ afs_access_t access;
+ int ret;
+
+ /* Make sure we've got a callback on this file and that our view of the
+ * data version is up to date.
+ */
+ ret = afs_validate(vnode, key);
+ if (ret < 0)
+ return ret;
+
+ /* Check the permission set to see if we're actually going to be
+ * allowed to get a lock on this file.
+ */
+ ret = afs_check_permit(vnode, key, &access);
+ if (ret < 0)
+ return ret;
+
+ /* At a rough estimation, you need LOCK, WRITE or INSERT perm to
+ * read-lock a file and WRITE or INSERT perm to write-lock a file.
+ *
+ * We can't rely on the server to do this for us since if we want to
+ * share a read lock that we already have, we won't go the server.
+ */
+ if (type == AFS_LOCK_READ) {
+ if (!(access & (AFS_ACE_INSERT | AFS_ACE_WRITE | AFS_ACE_LOCK)))
+ return -EACCES;
+ if (vnode->status.lock_count == -1 && !can_sleep)
+ return -EAGAIN; /* Write locked */
+ } else {
+ if (!(access & (AFS_ACE_INSERT | AFS_ACE_WRITE)))
+ return -EACCES;
+ if (vnode->status.lock_count != 0 && !can_sleep)
+ return -EAGAIN; /* Locked */
+ }
+
+ return 0;
+}
+
+/*
+ * Remove the front runner from the pending queue.
+ * - The caller must hold vnode->lock.
+ */
+static void afs_dequeue_lock(struct afs_vnode *vnode, struct file_lock *fl)
+{
+ struct file_lock *next;
+
+ _enter("");
+
+ /* ->lock_type, ->lock_key and ->lock_state only belong to this
+ * file_lock if we're at the front of the pending queue or if we have
+ * the lock granted or if the lock_state is NEED_UNLOCK or UNLOCKING.
+ */
+ if (vnode->granted_locks.next == &fl->fl_u.afs.link &&
+ vnode->granted_locks.prev == &fl->fl_u.afs.link) {
+ list_del_init(&fl->fl_u.afs.link);
+ afs_defer_unlock(vnode);
+ return;
+ }
+
+ if (!list_empty(&vnode->granted_locks) ||
+ vnode->pending_locks.next != &fl->fl_u.afs.link) {
+ list_del_init(&fl->fl_u.afs.link);
+ return;
+ }
+
+ list_del_init(&fl->fl_u.afs.link);
+ key_put(vnode->lock_key);
+ vnode->lock_key = NULL;
+ vnode->lock_state = AFS_VNODE_LOCK_NONE;
+
+ if (list_empty(&vnode->pending_locks))
+ return;
+
+ /* The new front of the queue now owns the state variables. */
+ next = list_entry(vnode->pending_locks.next,
+ struct file_lock, fl_u.afs.link);
+ vnode->lock_key = afs_file_key(next->fl_file);
+ vnode->lock_type = (next->fl_type == F_RDLCK) ? AFS_LOCK_READ : AFS_LOCK_WRITE;
+ vnode->lock_state = AFS_VNODE_LOCK_WAITING_FOR_CB;
afs_lock_may_be_available(vnode);
}
@@ -315,7 +424,7 @@ static void afs_defer_unlock(struct afs_vnode *vnode, struct key *key)
*/
static int afs_do_setlk(struct file *file, struct file_lock *fl)
{
- struct inode *inode = file_inode(file);
+ struct inode *inode = locks_inode(file);
struct afs_vnode *vnode = AFS_FS_I(inode);
afs_lock_type_t type;
struct key *key = afs_file_key(file);
@@ -333,165 +442,136 @@ static int afs_do_setlk(struct file *file, struct file_lock *fl)
type = (fl->fl_type == F_RDLCK) ? AFS_LOCK_READ : AFS_LOCK_WRITE;
- spin_lock(&inode->i_lock);
-
- /* make sure we've got a callback on this file and that our view of the
- * data version is up to date */
- ret = afs_validate(vnode, key);
+ ret = afs_do_setlk_check(vnode, key, type, fl->fl_flags & FL_SLEEP);
if (ret < 0)
- goto error;
-
- if (vnode->status.lock_count != 0 && !(fl->fl_flags & FL_SLEEP)) {
- ret = -EAGAIN;
- goto error;
- }
+ return ret;
spin_lock(&vnode->lock);
- /* if we've already got a readlock on the server then we can instantly
+ /* If we've already got a readlock on the server then we instantly
* grant another readlock, irrespective of whether there are any
- * pending writelocks */
+ * pending writelocks.
+ */
if (type == AFS_LOCK_READ &&
- vnode->flags & (1 << AFS_VNODE_READLOCKED)) {
+ vnode->lock_state == AFS_VNODE_LOCK_GRANTED &&
+ vnode->lock_type == AFS_LOCK_READ) {
_debug("instant readlock");
- ASSERTCMP(vnode->flags &
- ((1 << AFS_VNODE_LOCKING) |
- (1 << AFS_VNODE_WRITELOCKED)), ==, 0);
ASSERT(!list_empty(&vnode->granted_locks));
- goto sharing_existing_lock;
+ goto share_existing_lock;
}
- /* if there's no-one else with a lock on this vnode, then we need to
- * ask the server for a lock */
- if (list_empty(&vnode->pending_locks) &&
- list_empty(&vnode->granted_locks)) {
- _debug("not locked");
- ASSERTCMP(vnode->flags &
- ((1 << AFS_VNODE_LOCKING) |
- (1 << AFS_VNODE_READLOCKED) |
- (1 << AFS_VNODE_WRITELOCKED)), ==, 0);
- list_add_tail(&fl->fl_u.afs.link, &vnode->pending_locks);
- set_bit(AFS_VNODE_LOCKING, &vnode->flags);
- spin_unlock(&vnode->lock);
+ list_add_tail(&fl->fl_u.afs.link, &vnode->pending_locks);
- ret = afs_set_lock(vnode, key, type);
- clear_bit(AFS_VNODE_LOCKING, &vnode->flags);
- switch (ret) {
- case 0:
- _debug("acquired");
- goto acquired_server_lock;
- case -EWOULDBLOCK:
- _debug("would block");
- spin_lock(&vnode->lock);
- ASSERT(list_empty(&vnode->granted_locks));
- ASSERTCMP(vnode->pending_locks.next, ==,
- &fl->fl_u.afs.link);
- goto wait;
- default:
- spin_lock(&vnode->lock);
- list_del_init(&fl->fl_u.afs.link);
- spin_unlock(&vnode->lock);
- goto error;
- }
- }
+ if (vnode->lock_state != AFS_VNODE_LOCK_NONE)
+ goto need_to_wait;
- /* otherwise, we need to wait for a local lock to become available */
- _debug("wait local");
- list_add_tail(&fl->fl_u.afs.link, &vnode->pending_locks);
-wait:
- if (!(fl->fl_flags & FL_SLEEP)) {
- _debug("noblock");
- ret = -EAGAIN;
- goto abort_attempt;
- }
+ /* We don't have a lock on this vnode and we aren't currently waiting
+ * for one either, so ask the server for a lock.
+ *
+ * Note that we need to be careful if we get interrupted by a signal
+ * after dispatching the request as we may still get the lock, even
+ * though we don't wait for the reply (it's not too bad a problem - the
+ * lock will expire in 10 mins anyway).
+ */
+ _debug("not locked");
+ vnode->lock_key = key_get(key);
+ vnode->lock_type = type;
+ vnode->lock_state = AFS_VNODE_LOCK_SETTING;
spin_unlock(&vnode->lock);
- /* now we need to sleep and wait for the lock manager thread to get the
- * lock from the server */
- _debug("sleep");
- ret = wait_event_interruptible(fl->fl_wait,
- fl->fl_u.afs.state <= AFS_LOCK_GRANTED);
- if (fl->fl_u.afs.state <= AFS_LOCK_GRANTED) {
- ret = fl->fl_u.afs.state;
- if (ret < 0)
- goto error;
- spin_lock(&vnode->lock);
- goto given_lock;
- }
-
- /* we were interrupted, but someone may still be in the throes of
- * giving us the lock */
- _debug("intr");
- ASSERTCMP(ret, ==, -ERESTARTSYS);
+ ret = afs_set_lock(vnode, key, type); /* RPC */
spin_lock(&vnode->lock);
- if (fl->fl_u.afs.state <= AFS_LOCK_GRANTED) {
- ret = fl->fl_u.afs.state;
- if (ret < 0) {
- spin_unlock(&vnode->lock);
- goto error;
- }
- goto given_lock;
- }
+ switch (ret) {
+ default:
+ goto abort_attempt;
-abort_attempt:
- /* we aren't going to get the lock, either because we're unwilling to
- * wait, or because some signal happened */
- _debug("abort");
- if (list_empty(&vnode->granted_locks) &&
- vnode->pending_locks.next == &fl->fl_u.afs.link) {
- if (vnode->pending_locks.prev != &fl->fl_u.afs.link) {
- /* kick the next pending lock into having a go */
- list_del_init(&fl->fl_u.afs.link);
- afs_lock_may_be_available(vnode);
- }
- } else {
- list_del_init(&fl->fl_u.afs.link);
+ case -EWOULDBLOCK:
+ /* The server doesn't have a lock-waiting queue, so the client
+ * will have to retry. The server will break the outstanding
+ * callbacks on a file when a lock is released.
+ */
+ _debug("would block");
+ ASSERT(list_empty(&vnode->granted_locks));
+ ASSERTCMP(vnode->pending_locks.next, ==, &fl->fl_u.afs.link);
+ vnode->lock_state = AFS_VNODE_LOCK_WAITING_FOR_CB;
+ goto need_to_wait;
+
+ case 0:
+ _debug("acquired");
+ break;
}
- spin_unlock(&vnode->lock);
- goto error;
-acquired_server_lock:
/* we've acquired a server lock, but it needs to be renewed after 5
* mins */
- spin_lock(&vnode->lock);
+ vnode->lock_state = AFS_VNODE_LOCK_GRANTED;
afs_schedule_lock_extension(vnode);
- if (type == AFS_LOCK_READ)
- set_bit(AFS_VNODE_READLOCKED, &vnode->flags);
- else
- set_bit(AFS_VNODE_WRITELOCKED, &vnode->flags);
-sharing_existing_lock:
+
+share_existing_lock:
/* the lock has been granted as far as we're concerned... */
fl->fl_u.afs.state = AFS_LOCK_GRANTED;
list_move_tail(&fl->fl_u.afs.link, &vnode->granted_locks);
+
given_lock:
/* ... but we do still need to get the VFS's blessing */
- ASSERT(!(vnode->flags & (1 << AFS_VNODE_LOCKING)));
- ASSERT((vnode->flags & ((1 << AFS_VNODE_READLOCKED) |
- (1 << AFS_VNODE_WRITELOCKED))) != 0);
+ spin_unlock(&vnode->lock);
+
ret = posix_lock_file(file, fl, NULL);
if (ret < 0)
goto vfs_rejected_lock;
- spin_unlock(&vnode->lock);
- /* again, make sure we've got a callback on this file and, again, make
+ /* Again, make sure we've got a callback on this file and, again, make
* sure that our view of the data version is up to date (we ignore
- * errors incurred here and deal with the consequences elsewhere) */
+ * errors incurred here and deal with the consequences elsewhere).
+ */
afs_validate(vnode, key);
+ _leave(" = 0");
+ return 0;
-error:
- spin_unlock(&inode->i_lock);
+need_to_wait:
+ /* We're going to have to wait. Either this client doesn't have a lock
+ * on the server yet and we need to wait for a callback to occur, or
+ * the client does have a lock on the server, but it belongs to some
+ * other process(es) and is incompatible with the lock we want.
+ */
+ ret = -EAGAIN;
+ if (fl->fl_flags & FL_SLEEP) {
+ spin_unlock(&vnode->lock);
+
+ _debug("sleep");
+ ret = wait_event_interruptible(fl->fl_wait,
+ fl->fl_u.afs.state != AFS_LOCK_PENDING);
+
+ spin_lock(&vnode->lock);
+ }
+
+ if (fl->fl_u.afs.state == AFS_LOCK_GRANTED)
+ goto given_lock;
+ if (fl->fl_u.afs.state < 0)
+ ret = fl->fl_u.afs.state;
+
+abort_attempt:
+ /* we aren't going to get the lock, either because we're unwilling to
+ * wait, or because some signal happened */
+ _debug("abort");
+ afs_dequeue_lock(vnode, fl);
+
+error_unlock:
+ spin_unlock(&vnode->lock);
_leave(" = %d", ret);
return ret;
vfs_rejected_lock:
- /* the VFS rejected the lock we just obtained, so we have to discard
- * what we just got */
+ /* The VFS rejected the lock we just obtained, so we have to discard
+ * what we just got. We defer this to the lock manager work item to
+ * deal with.
+ */
_debug("vfs refused %d", ret);
+ spin_lock(&vnode->lock);
list_del_init(&fl->fl_u.afs.link);
if (list_empty(&vnode->granted_locks))
- afs_defer_unlock(vnode, key);
- goto abort_attempt;
+ afs_defer_unlock(vnode);
+ goto error_unlock;
}
/*
@@ -499,34 +579,21 @@ vfs_rejected_lock:
*/
static int afs_do_unlk(struct file *file, struct file_lock *fl)
{
- struct afs_vnode *vnode = AFS_FS_I(file->f_mapping->host);
- struct key *key = afs_file_key(file);
+ struct afs_vnode *vnode = AFS_FS_I(locks_inode(file));
int ret;
_enter("{%x:%u},%u", vnode->fid.vid, vnode->fid.vnode, fl->fl_type);
+ /* Flush all pending writes before doing anything with locks. */
+ vfs_fsync(file, 0);
+
/* only whole-file unlocks are supported */
if (fl->fl_start != 0 || fl->fl_end != OFFSET_MAX)
return -EINVAL;
- fl->fl_ops = &afs_lock_ops;
- INIT_LIST_HEAD(&fl->fl_u.afs.link);
- fl->fl_u.afs.state = AFS_LOCK_PENDING;
-
- spin_lock(&vnode->lock);
ret = posix_lock_file(file, fl, NULL);
- if (ret < 0) {
- spin_unlock(&vnode->lock);
- _leave(" = %d [vfs]", ret);
- return ret;
- }
-
- /* discard the server lock only if all granted locks are gone */
- if (list_empty(&vnode->granted_locks))
- afs_defer_unlock(vnode, key);
- spin_unlock(&vnode->lock);
- _leave(" = 0");
- return 0;
+ _leave(" = %d [%u]", ret, vnode->lock_state);
+ return ret;
}
/*
@@ -534,7 +601,7 @@ static int afs_do_unlk(struct file *file, struct file_lock *fl)
*/
static int afs_do_getlk(struct file *file, struct file_lock *fl)
{
- struct afs_vnode *vnode = AFS_FS_I(file->f_mapping->host);
+ struct afs_vnode *vnode = AFS_FS_I(locks_inode(file));
struct key *key = afs_file_key(file);
int ret, lock_count;
@@ -542,29 +609,25 @@ static int afs_do_getlk(struct file *file, struct file_lock *fl)
fl->fl_type = F_UNLCK;
- inode_lock(&vnode->vfs_inode);
-
/* check local lock records first */
- ret = 0;
posix_test_lock(file, fl);
if (fl->fl_type == F_UNLCK) {
/* no local locks; consult the server */
ret = afs_fetch_status(vnode, key);
if (ret < 0)
goto error;
- lock_count = vnode->status.lock_count;
- if (lock_count) {
- if (lock_count > 0)
- fl->fl_type = F_RDLCK;
- else
- fl->fl_type = F_WRLCK;
- fl->fl_start = 0;
- fl->fl_end = OFFSET_MAX;
- }
+
+ lock_count = READ_ONCE(vnode->status.lock_count);
+ if (lock_count > 0)
+ fl->fl_type = F_RDLCK;
+ else
+ fl->fl_type = F_WRLCK;
+ fl->fl_start = 0;
+ fl->fl_end = OFFSET_MAX;
}
+ ret = 0;
error:
- inode_unlock(&vnode->vfs_inode);
_leave(" = %d [%hd]", ret, fl->fl_type);
return ret;
}
@@ -574,7 +637,7 @@ error:
*/
int afs_lock(struct file *file, int cmd, struct file_lock *fl)
{
- struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
+ struct afs_vnode *vnode = AFS_FS_I(locks_inode(file));
_enter("{%x:%u},%d,{t=%x,fl=%x,r=%Ld:%Ld}",
vnode->fid.vid, vnode->fid.vnode, cmd,
@@ -597,7 +660,7 @@ int afs_lock(struct file *file, int cmd, struct file_lock *fl)
*/
int afs_flock(struct file *file, int cmd, struct file_lock *fl)
{
- struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
+ struct afs_vnode *vnode = AFS_FS_I(locks_inode(file));
_enter("{%x:%u},%d,{t=%x,fl=%x}",
vnode->fid.vid, vnode->fid.vnode, cmd,
@@ -627,9 +690,13 @@ int afs_flock(struct file *file, int cmd, struct file_lock *fl)
*/
static void afs_fl_copy_lock(struct file_lock *new, struct file_lock *fl)
{
+ struct afs_vnode *vnode = AFS_FS_I(locks_inode(fl->fl_file));
+
_enter("");
+ spin_lock(&vnode->lock);
list_add(&new->fl_u.afs.link, &fl->fl_u.afs.link);
+ spin_unlock(&vnode->lock);
}
/*
@@ -638,7 +705,12 @@ static void afs_fl_copy_lock(struct file_lock *new, struct file_lock *fl)
*/
static void afs_fl_release_private(struct file_lock *fl)
{
+ struct afs_vnode *vnode = AFS_FS_I(locks_inode(fl->fl_file));
+
_enter("");
- list_del_init(&fl->fl_u.afs.link);
+ spin_lock(&vnode->lock);
+ afs_dequeue_lock(vnode, fl);
+ _debug("state %u for %p", vnode->lock_state, vnode);
+ spin_unlock(&vnode->lock);
}
diff --git a/fs/afs/internal.h b/fs/afs/internal.h
index bd8dcee7e066..e03910cebdd4 100644
--- a/fs/afs/internal.h
+++ b/fs/afs/internal.h
@@ -430,6 +430,16 @@ struct afs_volume {
u8 name[AFS_MAXVOLNAME + 1]; /* NUL-padded volume name */
};
+enum afs_lock_state {
+ AFS_VNODE_LOCK_NONE, /* The vnode has no lock on the server */
+ AFS_VNODE_LOCK_WAITING_FOR_CB, /* We're waiting for the server to break the callback */
+ AFS_VNODE_LOCK_SETTING, /* We're asking the server for a lock */
+ AFS_VNODE_LOCK_GRANTED, /* We have a lock on the server */
+ AFS_VNODE_LOCK_EXTENDING, /* We're extending a lock on the server */
+ AFS_VNODE_LOCK_NEED_UNLOCK, /* We need to unlock on the server */
+ AFS_VNODE_LOCK_UNLOCKING, /* We're telling the server to unlock */
+};
+
/*
* AFS inode private data
*/
@@ -454,18 +464,16 @@ struct afs_vnode {
#define AFS_VNODE_ZAP_DATA 3 /* set if vnode's data should be invalidated */
#define AFS_VNODE_DELETED 4 /* set if vnode deleted on server */
#define AFS_VNODE_MOUNTPOINT 5 /* set if vnode is a mountpoint symlink */
-#define AFS_VNODE_LOCKING 6 /* set if waiting for lock on vnode */
-#define AFS_VNODE_READLOCKED 7 /* set if vnode is read-locked on the server */
-#define AFS_VNODE_WRITELOCKED 8 /* set if vnode is write-locked on the server */
-#define AFS_VNODE_UNLOCKING 9 /* set if vnode is being unlocked on the server */
-#define AFS_VNODE_AUTOCELL 10 /* set if Vnode is an auto mount point */
-#define AFS_VNODE_PSEUDODIR 11 /* set if Vnode is a pseudo directory */
+#define AFS_VNODE_AUTOCELL 6 /* set if Vnode is an auto mount point */
+#define AFS_VNODE_PSEUDODIR 7 /* set if Vnode is a pseudo directory */
struct list_head wb_keys; /* List of keys available for writeback */
struct list_head pending_locks; /* locks waiting to be granted */
struct list_head granted_locks; /* locks granted on this file */
struct delayed_work lock_work; /* work to be done in locking */
- struct key *unlock_key; /* key to be used in unlocking */
+ struct key *lock_key; /* Key to be used in lock ops */
+ enum afs_lock_state lock_state : 8;
+ afs_lock_type_t lock_type : 8;
/* outstanding callback notification on this file */
struct afs_cb_interest *cb_interest; /* Server on which this resides */
@@ -843,6 +851,7 @@ extern void afs_clear_permits(struct afs_vnode *);
extern void afs_cache_permit(struct afs_vnode *, struct key *, unsigned int);
extern void afs_zap_permits(struct rcu_head *);
extern struct key *afs_request_key(struct afs_cell *);
+extern int afs_check_permit(struct afs_vnode *, struct key *, afs_access_t *);
extern int afs_permission(struct inode *, int);
extern void __exit afs_clean_up_permit_cache(void);
diff --git a/fs/afs/rotate.c b/fs/afs/rotate.c
index e728ca1776c9..d04511fb3879 100644
--- a/fs/afs/rotate.c
+++ b/fs/afs/rotate.c
@@ -46,8 +46,7 @@ bool afs_begin_vnode_operation(struct afs_fs_cursor *fc, struct afs_vnode *vnode
return false;
}
- if (test_bit(AFS_VNODE_READLOCKED, &vnode->flags) ||
- test_bit(AFS_VNODE_WRITELOCKED, &vnode->flags))
+ if (vnode->lock_state != AFS_VNODE_LOCK_NONE)
fc->flags |= AFS_FS_CURSOR_CUR_ONLY;
return true;
}
@@ -117,7 +116,7 @@ static void afs_busy(struct afs_volume *volume, u32 abort_code)
case VSALVAGING: m = "being salvaged"; break;
default: m = "busy"; break;
}
-
+
pr_notice("kAFS: Volume %u '%s' is %s\n", volume->vid, volume->name, m);
}
@@ -438,24 +437,67 @@ bool afs_select_current_fileserver(struct afs_fs_cursor *fc)
_enter("");
- if (!cbi) {
- fc->ac.error = -ESTALE;
+ switch (fc->ac.error) {
+ case SHRT_MAX:
+ if (!cbi) {
+ fc->ac.error = -ESTALE;
+ fc->flags |= AFS_FS_CURSOR_STOP;
+ return false;
+ }
+
+ fc->cbi = afs_get_cb_interest(vnode->cb_interest);
+
+ read_lock(&cbi->server->fs_lock);
+ alist = rcu_dereference_protected(cbi->server->addresses,
+ lockdep_is_held(&cbi->server->fs_lock));
+ afs_get_addrlist(alist);
+ read_unlock(&cbi->server->fs_lock);
+ if (!alist) {
+ fc->ac.error = -ESTALE;
+ fc->flags |= AFS_FS_CURSOR_STOP;
+ return false;
+ }
+
+ fc->ac.alist = alist;
+ fc->ac.addr = NULL;
+ fc->ac.start = READ_ONCE(alist->index);
+ fc->ac.index = fc->ac.start;
+ fc->ac.error = 0;
+ fc->ac.begun = false;
+ goto iterate_address;
+
+ case 0:
+ default:
+ /* Success or local failure. Stop. */
fc->flags |= AFS_FS_CURSOR_STOP;
+ _leave(" = f [okay/local %d]", fc->ac.error);
return false;
- }
- read_lock(&cbi->server->fs_lock);
- alist = afs_get_addrlist(cbi->server->addresses);
- read_unlock(&cbi->server->fs_lock);
- if (!alist) {
- fc->ac.error = -ESTALE;
+ case -ECONNABORTED:
fc->flags |= AFS_FS_CURSOR_STOP;
+ _leave(" = f [abort]");
return false;
+
+ case -ENETUNREACH:
+ case -EHOSTUNREACH:
+ case -ECONNREFUSED:
+ case -ETIMEDOUT:
+ case -ETIME:
+ _debug("no conn");
+ goto iterate_address;
}
- fc->ac.alist = alist;
- fc->ac.error = 0;
- return true;
+iterate_address:
+ /* Iterate over the current server's address list to try and find an
+ * address on which it will respond to us.
+ */
+ if (afs_iterate_addresses(&fc->ac)) {
+ _leave(" = t");
+ return true;
+ }
+
+ afs_end_cursor(&fc->ac);
+ return false;
}
/*
diff --git a/fs/afs/security.c b/fs/afs/security.c
index 46a881a4d08f..2b00097101b3 100644
--- a/fs/afs/security.c
+++ b/fs/afs/security.c
@@ -284,8 +284,8 @@ someone_else_changed_it:
* permitted to be accessed with this authorisation, and if so, what access it
* is granted
*/
-static int afs_check_permit(struct afs_vnode *vnode, struct key *key,
- afs_access_t *_access)
+int afs_check_permit(struct afs_vnode *vnode, struct key *key,
+ afs_access_t *_access)
{
struct afs_permits *permits;
bool valid = false;
diff --git a/fs/afs/server_list.c b/fs/afs/server_list.c
index 26bad7032bba..0ab3f8457839 100644
--- a/fs/afs/server_list.c
+++ b/fs/afs/server_list.c
@@ -17,7 +17,7 @@ void afs_put_serverlist(struct afs_net *net, struct afs_server_list *slist)
{
int i;
- if (refcount_dec_and_test(&slist->usage)) {
+ if (slist && refcount_dec_and_test(&slist->usage)) {
for (i = 0; i < slist->nr_servers; i++) {
afs_put_cb_interest(net, slist->servers[i].cb_interest);
afs_put_server(net, slist->servers[i].server);
diff --git a/fs/afs/super.c b/fs/afs/super.c
index 875b5eb02242..d3f97da61bdf 100644
--- a/fs/afs/super.c
+++ b/fs/afs/super.c
@@ -496,10 +496,10 @@ static struct dentry *afs_mount(struct file_system_type *fs_type,
if (ret < 0)
goto error_sb;
as = NULL;
- sb->s_flags |= MS_ACTIVE;
+ sb->s_flags |= SB_ACTIVE;
} else {
_debug("reuse");
- ASSERTCMP(sb->s_flags, &, MS_ACTIVE);
+ ASSERTCMP(sb->s_flags, &, SB_ACTIVE);
afs_destroy_sbi(as);
as = NULL;
}
diff --git a/fs/afs/write.c b/fs/afs/write.c
index 18e46e31523c..cb5f8a3df577 100644
--- a/fs/afs/write.c
+++ b/fs/afs/write.c
@@ -119,6 +119,11 @@ try_again:
}
if (f != t) {
+ if (PageWriteback(page)) {
+ trace_afs_page_dirty(vnode, tracepoint_string("alrdy"),
+ page->index, priv);
+ goto flush_conflicting_write;
+ }
if (to < f || from > t)
goto flush_conflicting_write;
if (from < f)
diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c
index d79ced925861..82e8f6edfb48 100644
--- a/fs/autofs4/root.c
+++ b/fs/autofs4/root.c
@@ -281,8 +281,8 @@ static int autofs4_mount_wait(const struct path *path, bool rcu_walk)
pr_debug("waiting for mount name=%pd\n", path->dentry);
status = autofs4_wait(sbi, path, NFY_MOUNT);
pr_debug("mount wait done status=%d\n", status);
- ino->last_used = jiffies;
}
+ ino->last_used = jiffies;
return status;
}
@@ -321,21 +321,16 @@ static struct dentry *autofs4_mountpoint_changed(struct path *path)
*/
if (autofs_type_indirect(sbi->type) && d_unhashed(dentry)) {
struct dentry *parent = dentry->d_parent;
+ struct autofs_info *ino;
struct dentry *new;
new = d_lookup(parent, &dentry->d_name);
if (!new)
return NULL;
- if (new == dentry)
- dput(new);
- else {
- struct autofs_info *ino;
-
- ino = autofs4_dentry_ino(new);
- ino->last_used = jiffies;
- dput(path->dentry);
- path->dentry = new;
- }
+ ino = autofs4_dentry_ino(new);
+ ino->last_used = jiffies;
+ dput(path->dentry);
+ path->dentry = new;
}
return path->dentry;
}
diff --git a/fs/befs/ChangeLog b/fs/befs/ChangeLog
index 75a461cfaca6..16f2dfe8c2f7 100644
--- a/fs/befs/ChangeLog
+++ b/fs/befs/ChangeLog
@@ -365,7 +365,7 @@ Version 0.4 (2001-10-28)
(fs/befs/super.c)
* Tell the kernel to only mount befs read-only.
- By setting the MS_RDONLY flag in befs_read_super().
+ By setting the SB_RDONLY flag in befs_read_super().
Not that it was possible to write before. But now the kernel won't even try.
(fs/befs/super.c)
diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
index a92355cc453b..ee236231cafa 100644
--- a/fs/befs/linuxvfs.c
+++ b/fs/befs/linuxvfs.c
@@ -841,7 +841,7 @@ befs_fill_super(struct super_block *sb, void *data, int silent)
if (!sb_rdonly(sb)) {
befs_warning(sb,
"No write support. Marking filesystem read-only");
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
}
/*
@@ -948,7 +948,7 @@ static int
befs_remount(struct super_block *sb, int *flags, char *data)
{
sync_filesystem(sb);
- if (!(*flags & MS_RDONLY))
+ if (!(*flags & SB_RDONLY))
return -EINVAL;
return 0;
}
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index b35ce16b3df3..5982c8a71f02 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -295,7 +295,8 @@ blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start,
unsigned long len, u64 disk_start,
unsigned long compressed_len,
struct page **compressed_pages,
- unsigned long nr_pages)
+ unsigned long nr_pages,
+ unsigned int write_flags)
{
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct bio *bio = NULL;
@@ -327,7 +328,7 @@ blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start,
bdev = fs_info->fs_devices->latest_bdev;
bio = btrfs_bio_alloc(bdev, first_byte);
- bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
+ bio->bi_opf = REQ_OP_WRITE | write_flags;
bio->bi_private = cb;
bio->bi_end_io = end_compressed_bio_write;
refcount_set(&cb->pending_bios, 1);
@@ -374,7 +375,7 @@ blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start,
bio_put(bio);
bio = btrfs_bio_alloc(bdev, first_byte);
- bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
+ bio->bi_opf = REQ_OP_WRITE | write_flags;
bio->bi_private = cb;
bio->bi_end_io = end_compressed_bio_write;
bio_add_page(bio, page, PAGE_SIZE, 0);
@@ -1528,5 +1529,5 @@ unsigned int btrfs_compress_str2level(const char *str)
if (str[4] == ':' && '1' <= str[5] && str[5] <= '9' && str[6] == 0)
return str[5] - '0';
- return 0;
+ return BTRFS_ZLIB_DEFAULT_LEVEL;
}
diff --git a/fs/btrfs/compression.h b/fs/btrfs/compression.h
index da20755ebf21..0868cc554f14 100644
--- a/fs/btrfs/compression.h
+++ b/fs/btrfs/compression.h
@@ -34,6 +34,8 @@
/* Maximum size of data before compression */
#define BTRFS_MAX_UNCOMPRESSED (SZ_128K)
+#define BTRFS_ZLIB_DEFAULT_LEVEL 3
+
struct compressed_bio {
/* number of bios pending for this compressed extent */
refcount_t pending_bios;
@@ -91,7 +93,8 @@ blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start,
unsigned long len, u64 disk_start,
unsigned long compressed_len,
struct page **compressed_pages,
- unsigned long nr_pages);
+ unsigned long nr_pages,
+ unsigned int write_flags);
blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
int mirror_num, unsigned long bio_flags);
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index f7df5536ab61..13c260b525a1 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -2957,7 +2957,7 @@ static inline int btrfs_fs_closing(struct btrfs_fs_info *fs_info)
*/
static inline int btrfs_need_cleaner_sleep(struct btrfs_fs_info *fs_info)
{
- return fs_info->sb->s_flags & MS_RDONLY || btrfs_fs_closing(fs_info);
+ return fs_info->sb->s_flags & SB_RDONLY || btrfs_fs_closing(fs_info);
}
static inline void free_fs_info(struct btrfs_fs_info *fs_info)
@@ -3180,6 +3180,7 @@ int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput);
int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, int delay_iput,
int nr);
int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end,
+ unsigned int extra_bits,
struct extent_state **cached_state, int dedupe);
int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
struct btrfs_root *new_root,
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index efce9a2fa9be..10a2a579cc7f 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -610,7 +610,7 @@ static int btree_readpage_end_io_hook(struct btrfs_io_bio *io_bio,
* that we don't try and read the other copies of this block, just
* return -EIO.
*/
- if (found_level == 0 && btrfs_check_leaf(root, eb)) {
+ if (found_level == 0 && btrfs_check_leaf_full(root, eb)) {
set_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
ret = -EIO;
}
@@ -3848,7 +3848,13 @@ void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
buf->len,
fs_info->dirty_metadata_batch);
#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
- if (btrfs_header_level(buf) == 0 && btrfs_check_leaf(root, buf)) {
+ /*
+ * Since btrfs_mark_buffer_dirty() can be called with item pointer set
+ * but item data not updated.
+ * So here we should only check item pointers, not item data.
+ */
+ if (btrfs_header_level(buf) == 0 &&
+ btrfs_check_leaf_relaxed(root, buf)) {
btrfs_print_leaf(buf);
ASSERT(0);
}
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 7208ecef7088..4497f937e8fb 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -3502,13 +3502,6 @@ again:
goto again;
}
- /* We've already setup this transaction, go ahead and exit */
- if (block_group->cache_generation == trans->transid &&
- i_size_read(inode)) {
- dcs = BTRFS_DC_SETUP;
- goto out_put;
- }
-
/*
* We want to set the generation to 0, that way if anything goes wrong
* from here on out we know not to trust this cache when we load up next
@@ -3532,6 +3525,13 @@ again:
}
WARN_ON(ret);
+ /* We've already setup this transaction, go ahead and exit */
+ if (block_group->cache_generation == trans->transid &&
+ i_size_read(inode)) {
+ dcs = BTRFS_DC_SETUP;
+ goto out_put;
+ }
+
if (i_size_read(inode) > 0) {
ret = btrfs_check_trunc_cache_free_space(fs_info,
&fs_info->global_block_rsv);
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 16045ea86fc1..012d63870b99 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -1984,7 +1984,7 @@ int repair_io_failure(struct btrfs_fs_info *fs_info, u64 ino, u64 start,
struct btrfs_bio *bbio = NULL;
int ret;
- ASSERT(!(fs_info->sb->s_flags & MS_RDONLY));
+ ASSERT(!(fs_info->sb->s_flags & SB_RDONLY));
BUG_ON(!mirror_num);
bio = btrfs_io_bio_alloc(1);
@@ -3253,7 +3253,7 @@ static noinline_for_stack int writepage_delalloc(struct inode *inode,
delalloc_start,
delalloc_end,
&page_started,
- nr_written);
+ nr_written, wbc);
/* File system has been set read-only */
if (ret) {
SetPageError(page);
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index 4a8861379d3e..93dcae0c3183 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -116,7 +116,8 @@ struct extent_io_ops {
*/
int (*fill_delalloc)(void *private_data, struct page *locked_page,
u64 start, u64 end, int *page_started,
- unsigned long *nr_written);
+ unsigned long *nr_written,
+ struct writeback_control *wbc);
int (*writepage_start_hook)(struct page *page, u64 start, u64 end);
void (*writepage_end_io_hook)(struct page *page, u64 start, u64 end,
@@ -365,10 +366,11 @@ int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
struct extent_state **cached_state);
static inline int set_extent_delalloc(struct extent_io_tree *tree, u64 start,
- u64 end, struct extent_state **cached_state)
+ u64 end, unsigned int extra_bits,
+ struct extent_state **cached_state)
{
return set_extent_bit(tree, start, end,
- EXTENT_DELALLOC | EXTENT_UPTODATE,
+ EXTENT_DELALLOC | EXTENT_UPTODATE | extra_bits,
NULL, cached_state, GFP_NOFS);
}
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index f80254d82f40..eb1bac7c8553 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -477,6 +477,47 @@ static void btrfs_drop_pages(struct page **pages, size_t num_pages)
}
}
+static int btrfs_find_new_delalloc_bytes(struct btrfs_inode *inode,
+ const u64 start,
+ const u64 len,
+ struct extent_state **cached_state)
+{
+ u64 search_start = start;
+ const u64 end = start + len - 1;
+
+ while (search_start < end) {
+ const u64 search_len = end - search_start + 1;
+ struct extent_map *em;
+ u64 em_len;
+ int ret = 0;
+
+ em = btrfs_get_extent(inode, NULL, 0, search_start,
+ search_len, 0);
+ if (IS_ERR(em))
+ return PTR_ERR(em);
+
+ if (em->block_start != EXTENT_MAP_HOLE)
+ goto next;
+
+ em_len = em->len;
+ if (em->start < search_start)
+ em_len -= search_start - em->start;
+ if (em_len > search_len)
+ em_len = search_len;
+
+ ret = set_extent_bit(&inode->io_tree, search_start,
+ search_start + em_len - 1,
+ EXTENT_DELALLOC_NEW,
+ NULL, cached_state, GFP_NOFS);
+next:
+ search_start = extent_map_end(em);
+ free_extent_map(em);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
/*
* after copy_from_user, pages need to be dirtied and we need to make
* sure holes are created between the current EOF and the start of
@@ -497,14 +538,34 @@ int btrfs_dirty_pages(struct inode *inode, struct page **pages,
u64 end_of_last_block;
u64 end_pos = pos + write_bytes;
loff_t isize = i_size_read(inode);
+ unsigned int extra_bits = 0;
start_pos = pos & ~((u64) fs_info->sectorsize - 1);
num_bytes = round_up(write_bytes + pos - start_pos,
fs_info->sectorsize);
end_of_last_block = start_pos + num_bytes - 1;
+
+ if (!btrfs_is_free_space_inode(BTRFS_I(inode))) {
+ if (start_pos >= isize &&
+ !(BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC)) {
+ /*
+ * There can't be any extents following eof in this case
+ * so just set the delalloc new bit for the range
+ * directly.
+ */
+ extra_bits |= EXTENT_DELALLOC_NEW;
+ } else {
+ err = btrfs_find_new_delalloc_bytes(BTRFS_I(inode),
+ start_pos,
+ num_bytes, cached);
+ if (err)
+ return err;
+ }
+ }
+
err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block,
- cached, 0);
+ extra_bits, cached, 0);
if (err)
return err;
@@ -1404,47 +1465,6 @@ fail:
}
-static int btrfs_find_new_delalloc_bytes(struct btrfs_inode *inode,
- const u64 start,
- const u64 len,
- struct extent_state **cached_state)
-{
- u64 search_start = start;
- const u64 end = start + len - 1;
-
- while (search_start < end) {
- const u64 search_len = end - search_start + 1;
- struct extent_map *em;
- u64 em_len;
- int ret = 0;
-
- em = btrfs_get_extent(inode, NULL, 0, search_start,
- search_len, 0);
- if (IS_ERR(em))
- return PTR_ERR(em);
-
- if (em->block_start != EXTENT_MAP_HOLE)
- goto next;
-
- em_len = em->len;
- if (em->start < search_start)
- em_len -= search_start - em->start;
- if (em_len > search_len)
- em_len = search_len;
-
- ret = set_extent_bit(&inode->io_tree, search_start,
- search_start + em_len - 1,
- EXTENT_DELALLOC_NEW,
- NULL, cached_state, GFP_NOFS);
-next:
- search_start = extent_map_end(em);
- free_extent_map(em);
- if (ret)
- return ret;
- }
- return 0;
-}
-
/*
* This function locks the extent and properly waits for data=ordered extents
* to finish before allowing the pages to be modified if need.
@@ -1473,10 +1493,8 @@ lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct page **pages,
+ round_up(pos + write_bytes - start_pos,
fs_info->sectorsize) - 1;
- if (start_pos < inode->vfs_inode.i_size ||
- (inode->flags & BTRFS_INODE_PREALLOC)) {
+ if (start_pos < inode->vfs_inode.i_size) {
struct btrfs_ordered_extent *ordered;
- unsigned int clear_bits;
lock_extent_bits(&inode->io_tree, start_pos, last_pos,
cached_state);
@@ -1498,19 +1516,10 @@ lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct page **pages,
}
if (ordered)
btrfs_put_ordered_extent(ordered);
- ret = btrfs_find_new_delalloc_bytes(inode, start_pos,
- last_pos - start_pos + 1,
- cached_state);
- clear_bits = EXTENT_DIRTY | EXTENT_DELALLOC |
- EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG;
- if (ret)
- clear_bits |= EXTENT_DELALLOC_NEW | EXTENT_LOCKED;
- clear_extent_bit(&inode->io_tree, start_pos,
- last_pos, clear_bits,
- (clear_bits & EXTENT_LOCKED) ? 1 : 0,
- 0, cached_state, GFP_NOFS);
- if (ret)
- return ret;
+ clear_extent_bit(&inode->io_tree, start_pos, last_pos,
+ EXTENT_DIRTY | EXTENT_DELALLOC |
+ EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
+ 0, 0, cached_state, GFP_NOFS);
*lockstart = start_pos;
*lockend = last_pos;
ret = 1;
@@ -2048,6 +2057,8 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
len = (u64)end - (u64)start + 1;
trace_btrfs_sync_file(file, datasync);
+ btrfs_init_log_ctx(&ctx, inode);
+
/*
* We write the dirty pages in the range and wait until they complete
* out of the ->i_mutex. If so, we can flush the dirty pages by
@@ -2194,8 +2205,6 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
}
trans->sync = true;
- btrfs_init_log_ctx(&ctx, inode);
-
ret = btrfs_log_dentry_safe(trans, root, dentry, start, end, &ctx);
if (ret < 0) {
/* Fallthrough and commit/free transaction. */
@@ -2253,6 +2262,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
ret = btrfs_end_transaction(trans);
}
out:
+ ASSERT(list_empty(&ctx.list));
err = file_check_and_advance_wb_err(file);
if (!ret)
ret = err;
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index cdc9f4015ec3..4426d1c73e50 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -1264,7 +1264,7 @@ static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
/* Lock all pages first so we can lock the extent safely. */
ret = io_ctl_prepare_pages(io_ctl, inode, 0);
if (ret)
- goto out;
+ goto out_unlock;
lock_extent_bits(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1,
&cached_state);
@@ -1358,6 +1358,7 @@ out_nospc_locked:
out_nospc:
cleanup_write_cache_enospc(inode, io_ctl, &cached_state);
+out_unlock:
if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA))
up_write(&block_group->data_rwsem);
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index b93fe05a39c7..993061f83067 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -378,6 +378,7 @@ struct async_cow {
struct page *locked_page;
u64 start;
u64 end;
+ unsigned int write_flags;
struct list_head extents;
struct btrfs_work work;
};
@@ -857,7 +858,8 @@ retry:
async_extent->ram_size,
ins.objectid,
ins.offset, async_extent->pages,
- async_extent->nr_pages)) {
+ async_extent->nr_pages,
+ async_cow->write_flags)) {
struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
struct page *p = async_extent->pages[0];
const u64 start = async_extent->start;
@@ -1191,7 +1193,8 @@ static noinline void async_cow_free(struct btrfs_work *work)
static int cow_file_range_async(struct inode *inode, struct page *locked_page,
u64 start, u64 end, int *page_started,
- unsigned long *nr_written)
+ unsigned long *nr_written,
+ unsigned int write_flags)
{
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct async_cow *async_cow;
@@ -1208,6 +1211,7 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page,
async_cow->root = root;
async_cow->locked_page = locked_page;
async_cow->start = start;
+ async_cow->write_flags = write_flags;
if (BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS &&
!btrfs_test_opt(fs_info, FORCE_COMPRESS))
@@ -1577,11 +1581,13 @@ static inline int need_force_cow(struct inode *inode, u64 start, u64 end)
*/
static int run_delalloc_range(void *private_data, struct page *locked_page,
u64 start, u64 end, int *page_started,
- unsigned long *nr_written)
+ unsigned long *nr_written,
+ struct writeback_control *wbc)
{
struct inode *inode = private_data;
int ret;
int force_cow = need_force_cow(inode, start, end);
+ unsigned int write_flags = wbc_to_write_flags(wbc);
if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW && !force_cow) {
ret = run_delalloc_nocow(inode, locked_page, start, end,
@@ -1596,7 +1602,8 @@ static int run_delalloc_range(void *private_data, struct page *locked_page,
set_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
&BTRFS_I(inode)->runtime_flags);
ret = cow_file_range_async(inode, locked_page, start, end,
- page_started, nr_written);
+ page_started, nr_written,
+ write_flags);
}
if (ret)
btrfs_cleanup_ordered_extents(inode, start, end - start + 1);
@@ -2025,11 +2032,12 @@ static noinline int add_pending_csums(struct btrfs_trans_handle *trans,
}
int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end,
+ unsigned int extra_bits,
struct extent_state **cached_state, int dedupe)
{
WARN_ON((end & (PAGE_SIZE - 1)) == 0);
return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end,
- cached_state);
+ extra_bits, cached_state);
}
/* see btrfs_writepage_start_hook for details on why this is required */
@@ -2090,7 +2098,7 @@ again:
goto out;
}
- btrfs_set_extent_delalloc(inode, page_start, page_end, &cached_state,
+ btrfs_set_extent_delalloc(inode, page_start, page_end, 0, &cached_state,
0);
ClearPageChecked(page);
set_page_dirty(page);
@@ -4790,7 +4798,7 @@ again:
EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
0, 0, &cached_state, GFP_NOFS);
- ret = btrfs_set_extent_delalloc(inode, block_start, block_end,
+ ret = btrfs_set_extent_delalloc(inode, block_start, block_end, 0,
&cached_state, 0);
if (ret) {
unlock_extent_cached(io_tree, block_start, block_end,
@@ -5438,6 +5446,14 @@ static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry,
goto out_err;
btrfs_dir_item_key_to_cpu(path->nodes[0], di, location);
+ if (location->type != BTRFS_INODE_ITEM_KEY &&
+ location->type != BTRFS_ROOT_ITEM_KEY) {
+ btrfs_warn(root->fs_info,
+"%s gets something invalid in DIR_ITEM (name %s, directory ino %llu, location(%llu %u %llu))",
+ __func__, name, btrfs_ino(BTRFS_I(dir)),
+ location->objectid, location->type, location->offset);
+ goto out_err;
+ }
out:
btrfs_free_path(path);
return ret;
@@ -5754,8 +5770,6 @@ struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
return inode;
}
- BUG_ON(location.type != BTRFS_ROOT_ITEM_KEY);
-
index = srcu_read_lock(&fs_info->subvol_srcu);
ret = fixup_tree_root_location(fs_info, dir, dentry,
&location, &sub_root);
@@ -9150,7 +9164,7 @@ again:
EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
0, 0, &cached_state, GFP_NOFS);
- ret = btrfs_set_extent_delalloc(inode, page_start, end,
+ ret = btrfs_set_extent_delalloc(inode, page_start, end, 0,
&cached_state, 0);
if (ret) {
unlock_extent_cached(io_tree, page_start, page_end,
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index fd172a93d11a..d748ad1c3620 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -1172,7 +1172,7 @@ again:
if (!i_done || ret)
goto out;
- if (!(inode->i_sb->s_flags & MS_ACTIVE))
+ if (!(inode->i_sb->s_flags & SB_ACTIVE))
goto out;
/*
@@ -1333,7 +1333,7 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
* make sure we stop running if someone unmounts
* the FS
*/
- if (!(inode->i_sb->s_flags & MS_ACTIVE))
+ if (!(inode->i_sb->s_flags & SB_ACTIVE))
break;
if (btrfs_defrag_cancelled(fs_info)) {
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index 4cf2eb67eba6..f0c3f00e97cb 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -3268,7 +3268,8 @@ static int relocate_file_extent_cluster(struct inode *inode,
nr++;
}
- btrfs_set_extent_delalloc(inode, page_start, page_end, NULL, 0);
+ btrfs_set_extent_delalloc(inode, page_start, page_end, 0, NULL,
+ 0);
set_page_dirty(page);
unlock_extent(&BTRFS_I(inode)->io_tree,
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index c10e4c70f02d..20d3300bd268 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -3521,7 +3521,40 @@ out:
}
/*
- * Check if ino ino1 is an ancestor of inode ino2 in the given root.
+ * Check if inode ino2, or any of its ancestors, is inode ino1.
+ * Return 1 if true, 0 if false and < 0 on error.
+ */
+static int check_ino_in_path(struct btrfs_root *root,
+ const u64 ino1,
+ const u64 ino1_gen,
+ const u64 ino2,
+ const u64 ino2_gen,
+ struct fs_path *fs_path)
+{
+ u64 ino = ino2;
+
+ if (ino1 == ino2)
+ return ino1_gen == ino2_gen;
+
+ while (ino > BTRFS_FIRST_FREE_OBJECTID) {
+ u64 parent;
+ u64 parent_gen;
+ int ret;
+
+ fs_path_reset(fs_path);
+ ret = get_first_ref(root, ino, &parent, &parent_gen, fs_path);
+ if (ret < 0)
+ return ret;
+ if (parent == ino1)
+ return parent_gen == ino1_gen;
+ ino = parent;
+ }
+ return 0;
+}
+
+/*
+ * Check if ino ino1 is an ancestor of inode ino2 in the given root for any
+ * possible path (in case ino2 is not a directory and has multiple hard links).
* Return 1 if true, 0 if false and < 0 on error.
*/
static int is_ancestor(struct btrfs_root *root,
@@ -3530,36 +3563,91 @@ static int is_ancestor(struct btrfs_root *root,
const u64 ino2,
struct fs_path *fs_path)
{
- u64 ino = ino2;
- bool free_path = false;
+ bool free_fs_path = false;
int ret = 0;
+ struct btrfs_path *path = NULL;
+ struct btrfs_key key;
if (!fs_path) {
fs_path = fs_path_alloc();
if (!fs_path)
return -ENOMEM;
- free_path = true;
+ free_fs_path = true;
}
- while (ino > BTRFS_FIRST_FREE_OBJECTID) {
- u64 parent;
- u64 parent_gen;
+ path = alloc_path_for_send();
+ if (!path) {
+ ret = -ENOMEM;
+ goto out;
+ }
- fs_path_reset(fs_path);
- ret = get_first_ref(root, ino, &parent, &parent_gen, fs_path);
- if (ret < 0) {
- if (ret == -ENOENT && ino == ino2)
- ret = 0;
- goto out;
+ key.objectid = ino2;
+ key.type = BTRFS_INODE_REF_KEY;
+ key.offset = 0;
+
+ ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+ if (ret < 0)
+ goto out;
+
+ while (true) {
+ struct extent_buffer *leaf = path->nodes[0];
+ int slot = path->slots[0];
+ u32 cur_offset = 0;
+ u32 item_size;
+
+ if (slot >= btrfs_header_nritems(leaf)) {
+ ret = btrfs_next_leaf(root, path);
+ if (ret < 0)
+ goto out;
+ if (ret > 0)
+ break;
+ continue;
}
- if (parent == ino1) {
- ret = parent_gen == ino1_gen ? 1 : 0;
- goto out;
+
+ btrfs_item_key_to_cpu(leaf, &key, slot);
+ if (key.objectid != ino2)
+ break;
+ if (key.type != BTRFS_INODE_REF_KEY &&
+ key.type != BTRFS_INODE_EXTREF_KEY)
+ break;
+
+ item_size = btrfs_item_size_nr(leaf, slot);
+ while (cur_offset < item_size) {
+ u64 parent;
+ u64 parent_gen;
+
+ if (key.type == BTRFS_INODE_EXTREF_KEY) {
+ unsigned long ptr;
+ struct btrfs_inode_extref *extref;
+
+ ptr = btrfs_item_ptr_offset(leaf, slot);
+ extref = (struct btrfs_inode_extref *)
+ (ptr + cur_offset);
+ parent = btrfs_inode_extref_parent(leaf,
+ extref);
+ cur_offset += sizeof(*extref);
+ cur_offset += btrfs_inode_extref_name_len(leaf,
+ extref);
+ } else {
+ parent = key.offset;
+ cur_offset = item_size;
+ }
+
+ ret = get_inode_info(root, parent, NULL, &parent_gen,
+ NULL, NULL, NULL, NULL);
+ if (ret < 0)
+ goto out;
+ ret = check_ino_in_path(root, ino1, ino1_gen,
+ parent, parent_gen, fs_path);
+ if (ret)
+ goto out;
}
- ino = parent;
+ path->slots[0]++;
}
+ ret = 0;
out:
- if (free_path)
+ btrfs_free_path(path);
+ if (free_fs_path)
fs_path_free(fs_path);
return ret;
}
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 65af029559b5..3a4dce153645 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -107,7 +107,7 @@ static void btrfs_handle_error(struct btrfs_fs_info *fs_info)
return;
if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
btrfs_info(fs_info, "forced readonly");
/*
* Note that a running device replace operation is not
@@ -137,7 +137,7 @@ void __btrfs_handle_fs_error(struct btrfs_fs_info *fs_info, const char *function
/*
* Special case: if the error is EROFS, and we're already
- * under MS_RDONLY, then it is safe here.
+ * under SB_RDONLY, then it is safe here.
*/
if (errno == -EROFS && sb_rdonly(sb))
return;
@@ -168,7 +168,7 @@ void __btrfs_handle_fs_error(struct btrfs_fs_info *fs_info, const char *function
set_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state);
/* Don't go through full error handling during mount */
- if (sb->s_flags & MS_BORN)
+ if (sb->s_flags & SB_BORN)
btrfs_handle_error(fs_info);
}
@@ -507,9 +507,18 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
token == Opt_compress_force ||
strncmp(args[0].from, "zlib", 4) == 0) {
compress_type = "zlib";
+
info->compress_type = BTRFS_COMPRESS_ZLIB;
- info->compress_level =
- btrfs_compress_str2level(args[0].from);
+ info->compress_level = BTRFS_ZLIB_DEFAULT_LEVEL;
+ /*
+ * args[0] contains uninitialized data since
+ * for these tokens we don't expect any
+ * parameter.
+ */
+ if (token != Opt_compress &&
+ token != Opt_compress_force)
+ info->compress_level =
+ btrfs_compress_str2level(args[0].from);
btrfs_set_opt(info->mount_opt, COMPRESS);
btrfs_clear_opt(info->mount_opt, NODATACOW);
btrfs_clear_opt(info->mount_opt, NODATASUM);
@@ -625,7 +634,7 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
break;
case Opt_acl:
#ifdef CONFIG_BTRFS_FS_POSIX_ACL
- info->sb->s_flags |= MS_POSIXACL;
+ info->sb->s_flags |= SB_POSIXACL;
break;
#else
btrfs_err(info, "support for ACL not compiled in!");
@@ -633,7 +642,7 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
goto out;
#endif
case Opt_noacl:
- info->sb->s_flags &= ~MS_POSIXACL;
+ info->sb->s_flags &= ~SB_POSIXACL;
break;
case Opt_notreelog:
btrfs_set_and_info(info, NOTREELOG,
@@ -851,7 +860,7 @@ check:
/*
* Extra check for current option against current flag
*/
- if (btrfs_test_opt(info, NOLOGREPLAY) && !(new_flags & MS_RDONLY)) {
+ if (btrfs_test_opt(info, NOLOGREPLAY) && !(new_flags & SB_RDONLY)) {
btrfs_err(info,
"nologreplay must be used with ro mount option");
ret = -EINVAL;
@@ -1147,7 +1156,7 @@ static int btrfs_fill_super(struct super_block *sb,
sb->s_xattr = btrfs_xattr_handlers;
sb->s_time_gran = 1;
#ifdef CONFIG_BTRFS_FS_POSIX_ACL
- sb->s_flags |= MS_POSIXACL;
+ sb->s_flags |= SB_POSIXACL;
#endif
sb->s_flags |= SB_I_VERSION;
sb->s_iflags |= SB_I_CGROUPWB;
@@ -1180,7 +1189,7 @@ static int btrfs_fill_super(struct super_block *sb,
}
cleancache_init_fs(sb);
- sb->s_flags |= MS_ACTIVE;
+ sb->s_flags |= SB_ACTIVE;
return 0;
fail_close:
@@ -1277,7 +1286,7 @@ static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry)
seq_puts(seq, ",flushoncommit");
if (btrfs_test_opt(info, DISCARD))
seq_puts(seq, ",discard");
- if (!(info->sb->s_flags & MS_POSIXACL))
+ if (!(info->sb->s_flags & SB_POSIXACL))
seq_puts(seq, ",noacl");
if (btrfs_test_opt(info, SPACE_CACHE))
seq_puts(seq, ",space_cache");
@@ -1409,11 +1418,11 @@ static struct dentry *mount_subvol(const char *subvol_name, u64 subvol_objectid,
mnt = vfs_kern_mount(&btrfs_fs_type, flags, device_name, newargs);
if (PTR_ERR_OR_ZERO(mnt) == -EBUSY) {
- if (flags & MS_RDONLY) {
- mnt = vfs_kern_mount(&btrfs_fs_type, flags & ~MS_RDONLY,
+ if (flags & SB_RDONLY) {
+ mnt = vfs_kern_mount(&btrfs_fs_type, flags & ~SB_RDONLY,
device_name, newargs);
} else {
- mnt = vfs_kern_mount(&btrfs_fs_type, flags | MS_RDONLY,
+ mnt = vfs_kern_mount(&btrfs_fs_type, flags | SB_RDONLY,
device_name, newargs);
if (IS_ERR(mnt)) {
root = ERR_CAST(mnt);
@@ -1565,7 +1574,7 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
u64 subvol_objectid = 0;
int error = 0;
- if (!(flags & MS_RDONLY))
+ if (!(flags & SB_RDONLY))
mode |= FMODE_WRITE;
error = btrfs_parse_early_options(data, mode, fs_type,
@@ -1619,13 +1628,13 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
if (error)
goto error_fs_info;
- if (!(flags & MS_RDONLY) && fs_devices->rw_devices == 0) {
+ if (!(flags & SB_RDONLY) && fs_devices->rw_devices == 0) {
error = -EACCES;
goto error_close_devices;
}
bdev = fs_devices->latest_bdev;
- s = sget(fs_type, btrfs_test_super, btrfs_set_super, flags | MS_NOSEC,
+ s = sget(fs_type, btrfs_test_super, btrfs_set_super, flags | SB_NOSEC,
fs_info);
if (IS_ERR(s)) {
error = PTR_ERR(s);
@@ -1635,7 +1644,7 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
if (s->s_root) {
btrfs_close_devices(fs_devices);
free_fs_info(fs_info);
- if ((flags ^ s->s_flags) & MS_RDONLY)
+ if ((flags ^ s->s_flags) & SB_RDONLY)
error = -EBUSY;
} else {
snprintf(s->s_id, sizeof(s->s_id), "%pg", bdev);
@@ -1702,11 +1711,11 @@ static inline void btrfs_remount_begin(struct btrfs_fs_info *fs_info,
{
if (btrfs_raw_test_opt(old_opts, AUTO_DEFRAG) &&
(!btrfs_raw_test_opt(fs_info->mount_opt, AUTO_DEFRAG) ||
- (flags & MS_RDONLY))) {
+ (flags & SB_RDONLY))) {
/* wait for any defraggers to finish */
wait_event(fs_info->transaction_wait,
(atomic_read(&fs_info->defrag_running) == 0));
- if (flags & MS_RDONLY)
+ if (flags & SB_RDONLY)
sync_filesystem(fs_info->sb);
}
}
@@ -1766,10 +1775,10 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
btrfs_resize_thread_pool(fs_info,
fs_info->thread_pool_size, old_thread_pool_size);
- if ((bool)(*flags & MS_RDONLY) == sb_rdonly(sb))
+ if ((bool)(*flags & SB_RDONLY) == sb_rdonly(sb))
goto out;
- if (*flags & MS_RDONLY) {
+ if (*flags & SB_RDONLY) {
/*
* this also happens on 'umount -rf' or on shutdown, when
* the filesystem is busy.
@@ -1781,10 +1790,10 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
/* avoid complains from lockdep et al. */
up(&fs_info->uuid_tree_rescan_sem);
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
/*
- * Setting MS_RDONLY will put the cleaner thread to
+ * Setting SB_RDONLY will put the cleaner thread to
* sleep at the next loop if it's already active.
* If it's already asleep, we'll leave unused block
* groups on disk until we're mounted read-write again
@@ -1856,7 +1865,7 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
goto restore;
}
}
- sb->s_flags &= ~MS_RDONLY;
+ sb->s_flags &= ~SB_RDONLY;
set_bit(BTRFS_FS_OPEN, &fs_info->flags);
}
@@ -1866,9 +1875,9 @@ out:
return 0;
restore:
- /* We've hit an error - don't reset MS_RDONLY */
+ /* We've hit an error - don't reset SB_RDONLY */
if (sb_rdonly(sb))
- old_flags |= MS_RDONLY;
+ old_flags |= SB_RDONLY;
sb->s_flags = old_flags;
fs_info->mount_opt = old_opts;
fs_info->compress_type = old_compress_type;
diff --git a/fs/btrfs/tests/extent-io-tests.c b/fs/btrfs/tests/extent-io-tests.c
index d06b1c931d05..2e7f64a3b22b 100644
--- a/fs/btrfs/tests/extent-io-tests.c
+++ b/fs/btrfs/tests/extent-io-tests.c
@@ -114,7 +114,7 @@ static int test_find_delalloc(u32 sectorsize)
* |--- delalloc ---|
* |--- search ---|
*/
- set_extent_delalloc(&tmp, 0, sectorsize - 1, NULL);
+ set_extent_delalloc(&tmp, 0, sectorsize - 1, 0, NULL);
start = 0;
end = 0;
found = find_lock_delalloc_range(inode, &tmp, locked_page, &start,
@@ -145,7 +145,7 @@ static int test_find_delalloc(u32 sectorsize)
test_msg("Couldn't find the locked page\n");
goto out_bits;
}
- set_extent_delalloc(&tmp, sectorsize, max_bytes - 1, NULL);
+ set_extent_delalloc(&tmp, sectorsize, max_bytes - 1, 0, NULL);
start = test_start;
end = 0;
found = find_lock_delalloc_range(inode, &tmp, locked_page, &start,
@@ -200,7 +200,7 @@ static int test_find_delalloc(u32 sectorsize)
*
* We are re-using our test_start from above since it works out well.
*/
- set_extent_delalloc(&tmp, max_bytes, total_dirty - 1, NULL);
+ set_extent_delalloc(&tmp, max_bytes, total_dirty - 1, 0, NULL);
start = test_start;
end = 0;
found = find_lock_delalloc_range(inode, &tmp, locked_page, &start,
diff --git a/fs/btrfs/tests/inode-tests.c b/fs/btrfs/tests/inode-tests.c
index f797642c013d..30affb60da51 100644
--- a/fs/btrfs/tests/inode-tests.c
+++ b/fs/btrfs/tests/inode-tests.c
@@ -968,7 +968,7 @@ static int test_extent_accounting(u32 sectorsize, u32 nodesize)
btrfs_test_inode_set_ops(inode);
/* [BTRFS_MAX_EXTENT_SIZE] */
- ret = btrfs_set_extent_delalloc(inode, 0, BTRFS_MAX_EXTENT_SIZE - 1,
+ ret = btrfs_set_extent_delalloc(inode, 0, BTRFS_MAX_EXTENT_SIZE - 1, 0,
NULL, 0);
if (ret) {
test_msg("btrfs_set_extent_delalloc returned %d\n", ret);
@@ -984,7 +984,7 @@ static int test_extent_accounting(u32 sectorsize, u32 nodesize)
/* [BTRFS_MAX_EXTENT_SIZE][sectorsize] */
ret = btrfs_set_extent_delalloc(inode, BTRFS_MAX_EXTENT_SIZE,
BTRFS_MAX_EXTENT_SIZE + sectorsize - 1,
- NULL, 0);
+ 0, NULL, 0);
if (ret) {
test_msg("btrfs_set_extent_delalloc returned %d\n", ret);
goto out;
@@ -1018,7 +1018,7 @@ static int test_extent_accounting(u32 sectorsize, u32 nodesize)
ret = btrfs_set_extent_delalloc(inode, BTRFS_MAX_EXTENT_SIZE >> 1,
(BTRFS_MAX_EXTENT_SIZE >> 1)
+ sectorsize - 1,
- NULL, 0);
+ 0, NULL, 0);
if (ret) {
test_msg("btrfs_set_extent_delalloc returned %d\n", ret);
goto out;
@@ -1036,7 +1036,7 @@ static int test_extent_accounting(u32 sectorsize, u32 nodesize)
ret = btrfs_set_extent_delalloc(inode,
BTRFS_MAX_EXTENT_SIZE + 2 * sectorsize,
(BTRFS_MAX_EXTENT_SIZE << 1) + 3 * sectorsize - 1,
- NULL, 0);
+ 0, NULL, 0);
if (ret) {
test_msg("btrfs_set_extent_delalloc returned %d\n", ret);
goto out;
@@ -1053,7 +1053,7 @@ static int test_extent_accounting(u32 sectorsize, u32 nodesize)
*/
ret = btrfs_set_extent_delalloc(inode,
BTRFS_MAX_EXTENT_SIZE + sectorsize,
- BTRFS_MAX_EXTENT_SIZE + 2 * sectorsize - 1, NULL, 0);
+ BTRFS_MAX_EXTENT_SIZE + 2 * sectorsize - 1, 0, NULL, 0);
if (ret) {
test_msg("btrfs_set_extent_delalloc returned %d\n", ret);
goto out;
@@ -1089,7 +1089,7 @@ static int test_extent_accounting(u32 sectorsize, u32 nodesize)
*/
ret = btrfs_set_extent_delalloc(inode,
BTRFS_MAX_EXTENT_SIZE + sectorsize,
- BTRFS_MAX_EXTENT_SIZE + 2 * sectorsize - 1, NULL, 0);
+ BTRFS_MAX_EXTENT_SIZE + 2 * sectorsize - 1, 0, NULL, 0);
if (ret) {
test_msg("btrfs_set_extent_delalloc returned %d\n", ret);
goto out;
diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c
index 114fc5f0ecc5..ce4ed6ec8f39 100644
--- a/fs/btrfs/tree-checker.c
+++ b/fs/btrfs/tree-checker.c
@@ -242,7 +242,8 @@ static int check_leaf_item(struct btrfs_root *root,
return ret;
}
-int btrfs_check_leaf(struct btrfs_root *root, struct extent_buffer *leaf)
+static int check_leaf(struct btrfs_root *root, struct extent_buffer *leaf,
+ bool check_item_data)
{
struct btrfs_fs_info *fs_info = root->fs_info;
/* No valid key type is 0, so all key should be larger than this key */
@@ -361,10 +362,15 @@ int btrfs_check_leaf(struct btrfs_root *root, struct extent_buffer *leaf)
return -EUCLEAN;
}
- /* Check if the item size and content meet other criteria */
- ret = check_leaf_item(root, leaf, &key, slot);
- if (ret < 0)
- return ret;
+ if (check_item_data) {
+ /*
+ * Check if the item size and content meet other
+ * criteria
+ */
+ ret = check_leaf_item(root, leaf, &key, slot);
+ if (ret < 0)
+ return ret;
+ }
prev_key.objectid = key.objectid;
prev_key.type = key.type;
@@ -374,6 +380,17 @@ int btrfs_check_leaf(struct btrfs_root *root, struct extent_buffer *leaf)
return 0;
}
+int btrfs_check_leaf_full(struct btrfs_root *root, struct extent_buffer *leaf)
+{
+ return check_leaf(root, leaf, true);
+}
+
+int btrfs_check_leaf_relaxed(struct btrfs_root *root,
+ struct extent_buffer *leaf)
+{
+ return check_leaf(root, leaf, false);
+}
+
int btrfs_check_node(struct btrfs_root *root, struct extent_buffer *node)
{
unsigned long nr = btrfs_header_nritems(node);
diff --git a/fs/btrfs/tree-checker.h b/fs/btrfs/tree-checker.h
index 96c486e95d70..3d53e8d6fda0 100644
--- a/fs/btrfs/tree-checker.h
+++ b/fs/btrfs/tree-checker.h
@@ -20,7 +20,19 @@
#include "ctree.h"
#include "extent_io.h"
-int btrfs_check_leaf(struct btrfs_root *root, struct extent_buffer *leaf);
+/*
+ * Comprehensive leaf checker.
+ * Will check not only the item pointers, but also every possible member
+ * in item data.
+ */
+int btrfs_check_leaf_full(struct btrfs_root *root, struct extent_buffer *leaf);
+
+/*
+ * Less strict leaf checker.
+ * Will only check item pointers, not reading item data.
+ */
+int btrfs_check_leaf_relaxed(struct btrfs_root *root,
+ struct extent_buffer *leaf);
int btrfs_check_node(struct btrfs_root *root, struct extent_buffer *node);
#endif
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index aa7c71cff575..7bf9b31561db 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -4102,7 +4102,7 @@ static int log_one_extent(struct btrfs_trans_handle *trans,
if (ordered_io_err) {
ctx->io_err = -EIO;
- return 0;
+ return ctx->io_err;
}
btrfs_init_map_token(&token);
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index f1ecb938ba4d..49810b70afd3 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -189,6 +189,7 @@ static void free_fs_devices(struct btrfs_fs_devices *fs_devices)
struct btrfs_device, dev_list);
list_del(&device->dev_list);
rcu_string_free(device->name);
+ bio_put(device->flush_bio);
kfree(device);
}
kfree(fs_devices);
@@ -578,6 +579,7 @@ static void btrfs_free_stale_device(struct btrfs_device *cur_dev)
fs_devs->num_devices--;
list_del(&dev->dev_list);
rcu_string_free(dev->name);
+ bio_put(dev->flush_bio);
kfree(dev);
}
break;
@@ -630,6 +632,7 @@ static noinline int device_list_add(const char *path,
name = rcu_string_strdup(path, GFP_NOFS);
if (!name) {
+ bio_put(device->flush_bio);
kfree(device);
return -ENOMEM;
}
@@ -742,6 +745,7 @@ static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
name = rcu_string_strdup(orig_dev->name->str,
GFP_KERNEL);
if (!name) {
+ bio_put(device->flush_bio);
kfree(device);
goto error;
}
@@ -807,6 +811,7 @@ again:
list_del_init(&device->dev_list);
fs_devices->num_devices--;
rcu_string_free(device->name);
+ bio_put(device->flush_bio);
kfree(device);
}
@@ -1750,20 +1755,24 @@ static int btrfs_rm_dev_item(struct btrfs_fs_info *fs_info,
key.offset = device->devid;
ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
- if (ret < 0)
- goto out;
-
- if (ret > 0) {
- ret = -ENOENT;
+ if (ret) {
+ if (ret > 0)
+ ret = -ENOENT;
+ btrfs_abort_transaction(trans, ret);
+ btrfs_end_transaction(trans);
goto out;
}
ret = btrfs_del_item(trans, root, path);
- if (ret)
- goto out;
+ if (ret) {
+ btrfs_abort_transaction(trans, ret);
+ btrfs_end_transaction(trans);
+ }
+
out:
btrfs_free_path(path);
- btrfs_commit_transaction(trans);
+ if (!ret)
+ ret = btrfs_commit_transaction(trans);
return ret;
}
@@ -1993,7 +2002,7 @@ void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_fs_info *fs_info,
fs_devices = srcdev->fs_devices;
list_del_rcu(&srcdev->dev_list);
- list_del_rcu(&srcdev->dev_alloc_list);
+ list_del(&srcdev->dev_alloc_list);
fs_devices->num_devices--;
if (srcdev->missing)
fs_devices->missing_devices--;
@@ -2349,6 +2358,7 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path
name = rcu_string_strdup(device_path, GFP_KERNEL);
if (!name) {
+ bio_put(device->flush_bio);
kfree(device);
ret = -ENOMEM;
goto error;
@@ -2358,6 +2368,7 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path
trans = btrfs_start_transaction(root, 0);
if (IS_ERR(trans)) {
rcu_string_free(device->name);
+ bio_put(device->flush_bio);
kfree(device);
ret = PTR_ERR(trans);
goto error;
@@ -2384,7 +2395,7 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path
set_blocksize(device->bdev, BTRFS_BDEV_BLOCKSIZE);
if (seeding_dev) {
- sb->s_flags &= ~MS_RDONLY;
+ sb->s_flags &= ~SB_RDONLY;
ret = btrfs_prepare_sprout(fs_info);
if (ret) {
btrfs_abort_transaction(trans, ret);
@@ -2497,10 +2508,11 @@ error_sysfs:
btrfs_sysfs_rm_device_link(fs_info->fs_devices, device);
error_trans:
if (seeding_dev)
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
if (trans)
btrfs_end_transaction(trans);
rcu_string_free(device->name);
+ bio_put(device->flush_bio);
kfree(device);
error:
blkdev_put(bdev, FMODE_EXCL);
@@ -2567,6 +2579,7 @@ int btrfs_init_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
name = rcu_string_strdup(device_path, GFP_KERNEL);
if (!name) {
+ bio_put(device->flush_bio);
kfree(device);
ret = -ENOMEM;
goto error;
@@ -6284,6 +6297,7 @@ struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info,
ret = find_next_devid(fs_info, &tmp);
if (ret) {
+ bio_put(dev->flush_bio);
kfree(dev);
return ERR_PTR(ret);
}
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index ff5d32cf9578..a14b2c974c9e 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -1160,7 +1160,7 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
struct ceph_inode_info *ci = cap->ci;
struct inode *inode = &ci->vfs_inode;
struct cap_msg_args arg;
- int held, revoking, dropping;
+ int held, revoking;
int wake = 0;
int delayed = 0;
int ret;
@@ -1168,7 +1168,6 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
held = cap->issued | cap->implemented;
revoking = cap->implemented & ~cap->issued;
retain &= ~revoking;
- dropping = cap->issued & ~retain;
dout("__send_cap %p cap %p session %p %s -> %s (revoking %s)\n",
inode, cap, cap->session,
@@ -1712,7 +1711,7 @@ void ceph_check_caps(struct ceph_inode_info *ci, int flags,
/* if we are unmounting, flush any unused caps immediately. */
if (mdsc->stopping)
- is_delayed = 1;
+ is_delayed = true;
spin_lock(&ci->i_ceph_lock);
@@ -3189,8 +3188,8 @@ static void handle_cap_flush_ack(struct inode *inode, u64 flush_tid,
int dirty = le32_to_cpu(m->dirty);
int cleaned = 0;
bool drop = false;
- bool wake_ci = 0;
- bool wake_mdsc = 0;
+ bool wake_ci = false;
+ bool wake_mdsc = false;
list_for_each_entry_safe(cf, tmp_cf, &ci->i_cap_flush_list, i_list) {
if (cf->tid == flush_tid)
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index f2550a076edc..ab81652198c4 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -493,6 +493,7 @@ struct inode *ceph_alloc_inode(struct super_block *sb)
ci->i_wb_ref = 0;
ci->i_wrbuffer_ref = 0;
ci->i_wrbuffer_ref_head = 0;
+ atomic_set(&ci->i_filelock_ref, 0);
ci->i_shared_gen = 0;
ci->i_rdcache_gen = 0;
ci->i_rdcache_revoking = 0;
@@ -786,7 +787,6 @@ static int fill_inode(struct inode *inode, struct page *locked_page,
/* update inode */
ci->i_version = le64_to_cpu(info->version);
- inode->i_version++;
inode->i_rdev = le32_to_cpu(info->rdev);
inode->i_blkbits = fls(le32_to_cpu(info->layout.fl_stripe_unit)) - 1;
@@ -1185,6 +1185,7 @@ retry_lookup:
ceph_snap(d_inode(dn)) != tvino.snap)) {
dout(" dn %p points to wrong inode %p\n",
dn, d_inode(dn));
+ ceph_dir_clear_ordered(dir);
d_delete(dn);
dput(dn);
goto retry_lookup;
@@ -1322,6 +1323,7 @@ retry_lookup:
dout(" %p links to %p %llx.%llx, not %llx.%llx\n",
dn, d_inode(dn), ceph_vinop(d_inode(dn)),
ceph_vinop(in));
+ ceph_dir_clear_ordered(dir);
d_invalidate(dn);
have_lease = false;
}
@@ -1573,6 +1575,7 @@ retry_lookup:
ceph_snap(d_inode(dn)) != tvino.snap)) {
dout(" dn %p points to wrong inode %p\n",
dn, d_inode(dn));
+ __ceph_dir_clear_ordered(ci);
d_delete(dn);
dput(dn);
goto retry_lookup;
@@ -1597,7 +1600,9 @@ retry_lookup:
&req->r_caps_reservation);
if (ret < 0) {
pr_err("fill_inode badness on %p\n", in);
- if (d_really_is_negative(dn))
+ if (d_really_is_positive(dn))
+ __ceph_dir_clear_ordered(ci);
+ else
iput(in);
d_drop(dn);
err = ret;
diff --git a/fs/ceph/locks.c b/fs/ceph/locks.c
index e7cce412f2cf..9e66f69ee8a5 100644
--- a/fs/ceph/locks.c
+++ b/fs/ceph/locks.c
@@ -30,19 +30,52 @@ void __init ceph_flock_init(void)
get_random_bytes(&lock_secret, sizeof(lock_secret));
}
+static void ceph_fl_copy_lock(struct file_lock *dst, struct file_lock *src)
+{
+ struct inode *inode = file_inode(src->fl_file);
+ atomic_inc(&ceph_inode(inode)->i_filelock_ref);
+}
+
+static void ceph_fl_release_lock(struct file_lock *fl)
+{
+ struct inode *inode = file_inode(fl->fl_file);
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ if (atomic_dec_and_test(&ci->i_filelock_ref)) {
+ /* clear error when all locks are released */
+ spin_lock(&ci->i_ceph_lock);
+ ci->i_ceph_flags &= ~CEPH_I_ERROR_FILELOCK;
+ spin_unlock(&ci->i_ceph_lock);
+ }
+}
+
+static const struct file_lock_operations ceph_fl_lock_ops = {
+ .fl_copy_lock = ceph_fl_copy_lock,
+ .fl_release_private = ceph_fl_release_lock,
+};
+
/**
* Implement fcntl and flock locking functions.
*/
-static int ceph_lock_message(u8 lock_type, u16 operation, struct file *file,
+static int ceph_lock_message(u8 lock_type, u16 operation, struct inode *inode,
int cmd, u8 wait, struct file_lock *fl)
{
- struct inode *inode = file_inode(file);
struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
struct ceph_mds_request *req;
int err;
u64 length = 0;
u64 owner;
+ if (operation == CEPH_MDS_OP_SETFILELOCK) {
+ /*
+ * increasing i_filelock_ref closes race window between
+ * handling request reply and adding file_lock struct to
+ * inode. Otherwise, auth caps may get trimmed in the
+ * window. Caller function will decrease the counter.
+ */
+ fl->fl_ops = &ceph_fl_lock_ops;
+ atomic_inc(&ceph_inode(inode)->i_filelock_ref);
+ }
+
if (operation != CEPH_MDS_OP_SETFILELOCK || cmd == CEPH_LOCK_UNLOCK)
wait = 0;
@@ -180,10 +213,12 @@ static int ceph_lock_wait_for_completion(struct ceph_mds_client *mdsc,
*/
int ceph_lock(struct file *file, int cmd, struct file_lock *fl)
{
- u8 lock_cmd;
- int err;
- u8 wait = 0;
+ struct inode *inode = file_inode(file);
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ int err = 0;
u16 op = CEPH_MDS_OP_SETFILELOCK;
+ u8 wait = 0;
+ u8 lock_cmd;
if (!(fl->fl_flags & FL_POSIX))
return -ENOLCK;
@@ -199,6 +234,26 @@ int ceph_lock(struct file *file, int cmd, struct file_lock *fl)
else if (IS_SETLKW(cmd))
wait = 1;
+ spin_lock(&ci->i_ceph_lock);
+ if (ci->i_ceph_flags & CEPH_I_ERROR_FILELOCK) {
+ err = -EIO;
+ } else if (op == CEPH_MDS_OP_SETFILELOCK) {
+ /*
+ * increasing i_filelock_ref closes race window between
+ * handling request reply and adding file_lock struct to
+ * inode. Otherwise, i_auth_cap may get trimmed in the
+ * window. Caller function will decrease the counter.
+ */
+ fl->fl_ops = &ceph_fl_lock_ops;
+ atomic_inc(&ci->i_filelock_ref);
+ }
+ spin_unlock(&ci->i_ceph_lock);
+ if (err < 0) {
+ if (op == CEPH_MDS_OP_SETFILELOCK && F_UNLCK == fl->fl_type)
+ posix_lock_file(file, fl, NULL);
+ return err;
+ }
+
if (F_RDLCK == fl->fl_type)
lock_cmd = CEPH_LOCK_SHARED;
else if (F_WRLCK == fl->fl_type)
@@ -206,16 +261,16 @@ int ceph_lock(struct file *file, int cmd, struct file_lock *fl)
else
lock_cmd = CEPH_LOCK_UNLOCK;
- err = ceph_lock_message(CEPH_LOCK_FCNTL, op, file, lock_cmd, wait, fl);
+ err = ceph_lock_message(CEPH_LOCK_FCNTL, op, inode, lock_cmd, wait, fl);
if (!err) {
- if (op != CEPH_MDS_OP_GETFILELOCK) {
+ if (op == CEPH_MDS_OP_SETFILELOCK) {
dout("mds locked, locking locally");
err = posix_lock_file(file, fl, NULL);
- if (err && (CEPH_MDS_OP_SETFILELOCK == op)) {
+ if (err) {
/* undo! This should only happen if
* the kernel detects local
* deadlock. */
- ceph_lock_message(CEPH_LOCK_FCNTL, op, file,
+ ceph_lock_message(CEPH_LOCK_FCNTL, op, inode,
CEPH_LOCK_UNLOCK, 0, fl);
dout("got %d on posix_lock_file, undid lock",
err);
@@ -227,9 +282,11 @@ int ceph_lock(struct file *file, int cmd, struct file_lock *fl)
int ceph_flock(struct file *file, int cmd, struct file_lock *fl)
{
- u8 lock_cmd;
- int err;
+ struct inode *inode = file_inode(file);
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ int err = 0;
u8 wait = 0;
+ u8 lock_cmd;
if (!(fl->fl_flags & FL_FLOCK))
return -ENOLCK;
@@ -239,6 +296,21 @@ int ceph_flock(struct file *file, int cmd, struct file_lock *fl)
dout("ceph_flock, fl_file: %p", fl->fl_file);
+ spin_lock(&ci->i_ceph_lock);
+ if (ci->i_ceph_flags & CEPH_I_ERROR_FILELOCK) {
+ err = -EIO;
+ } else {
+ /* see comment in ceph_lock */
+ fl->fl_ops = &ceph_fl_lock_ops;
+ atomic_inc(&ci->i_filelock_ref);
+ }
+ spin_unlock(&ci->i_ceph_lock);
+ if (err < 0) {
+ if (F_UNLCK == fl->fl_type)
+ locks_lock_file_wait(file, fl);
+ return err;
+ }
+
if (IS_SETLKW(cmd))
wait = 1;
@@ -250,13 +322,13 @@ int ceph_flock(struct file *file, int cmd, struct file_lock *fl)
lock_cmd = CEPH_LOCK_UNLOCK;
err = ceph_lock_message(CEPH_LOCK_FLOCK, CEPH_MDS_OP_SETFILELOCK,
- file, lock_cmd, wait, fl);
+ inode, lock_cmd, wait, fl);
if (!err) {
err = locks_lock_file_wait(file, fl);
if (err) {
ceph_lock_message(CEPH_LOCK_FLOCK,
CEPH_MDS_OP_SETFILELOCK,
- file, CEPH_LOCK_UNLOCK, 0, fl);
+ inode, CEPH_LOCK_UNLOCK, 0, fl);
dout("got %d on locks_lock_file_wait, undid lock", err);
}
}
@@ -288,6 +360,37 @@ void ceph_count_locks(struct inode *inode, int *fcntl_count, int *flock_count)
*flock_count, *fcntl_count);
}
+/*
+ * Given a pointer to a lock, convert it to a ceph filelock
+ */
+static int lock_to_ceph_filelock(struct file_lock *lock,
+ struct ceph_filelock *cephlock)
+{
+ int err = 0;
+ cephlock->start = cpu_to_le64(lock->fl_start);
+ cephlock->length = cpu_to_le64(lock->fl_end - lock->fl_start + 1);
+ cephlock->client = cpu_to_le64(0);
+ cephlock->pid = cpu_to_le64((u64)lock->fl_pid);
+ cephlock->owner = cpu_to_le64(secure_addr(lock->fl_owner));
+
+ switch (lock->fl_type) {
+ case F_RDLCK:
+ cephlock->type = CEPH_LOCK_SHARED;
+ break;
+ case F_WRLCK:
+ cephlock->type = CEPH_LOCK_EXCL;
+ break;
+ case F_UNLCK:
+ cephlock->type = CEPH_LOCK_UNLOCK;
+ break;
+ default:
+ dout("Have unknown lock type %d", lock->fl_type);
+ err = -EINVAL;
+ }
+
+ return err;
+}
+
/**
* Encode the flock and fcntl locks for the given inode into the ceph_filelock
* array. Must be called with inode->i_lock already held.
@@ -356,50 +459,22 @@ int ceph_locks_to_pagelist(struct ceph_filelock *flocks,
if (err)
goto out_fail;
- err = ceph_pagelist_append(pagelist, flocks,
- num_fcntl_locks * sizeof(*flocks));
- if (err)
- goto out_fail;
+ if (num_fcntl_locks > 0) {
+ err = ceph_pagelist_append(pagelist, flocks,
+ num_fcntl_locks * sizeof(*flocks));
+ if (err)
+ goto out_fail;
+ }
nlocks = cpu_to_le32(num_flock_locks);
err = ceph_pagelist_append(pagelist, &nlocks, sizeof(nlocks));
if (err)
goto out_fail;
- err = ceph_pagelist_append(pagelist,
- &flocks[num_fcntl_locks],
- num_flock_locks * sizeof(*flocks));
-out_fail:
- return err;
-}
-
-/*
- * Given a pointer to a lock, convert it to a ceph filelock
- */
-int lock_to_ceph_filelock(struct file_lock *lock,
- struct ceph_filelock *cephlock)
-{
- int err = 0;
- cephlock->start = cpu_to_le64(lock->fl_start);
- cephlock->length = cpu_to_le64(lock->fl_end - lock->fl_start + 1);
- cephlock->client = cpu_to_le64(0);
- cephlock->pid = cpu_to_le64((u64)lock->fl_pid);
- cephlock->owner = cpu_to_le64(secure_addr(lock->fl_owner));
-
- switch (lock->fl_type) {
- case F_RDLCK:
- cephlock->type = CEPH_LOCK_SHARED;
- break;
- case F_WRLCK:
- cephlock->type = CEPH_LOCK_EXCL;
- break;
- case F_UNLCK:
- cephlock->type = CEPH_LOCK_UNLOCK;
- break;
- default:
- dout("Have unknown lock type %d", lock->fl_type);
- err = -EINVAL;
+ if (num_flock_locks > 0) {
+ err = ceph_pagelist_append(pagelist, &flocks[num_fcntl_locks],
+ num_flock_locks * sizeof(*flocks));
}
-
+out_fail:
return err;
}
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index 0687ab3c3267..ab69dcb70e8a 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -1039,22 +1039,23 @@ void ceph_mdsc_open_export_target_sessions(struct ceph_mds_client *mdsc,
* session caps
*/
-/* caller holds s_cap_lock, we drop it */
-static void cleanup_cap_releases(struct ceph_mds_client *mdsc,
- struct ceph_mds_session *session)
- __releases(session->s_cap_lock)
+static void detach_cap_releases(struct ceph_mds_session *session,
+ struct list_head *target)
{
- LIST_HEAD(tmp_list);
- list_splice_init(&session->s_cap_releases, &tmp_list);
+ lockdep_assert_held(&session->s_cap_lock);
+
+ list_splice_init(&session->s_cap_releases, target);
session->s_num_cap_releases = 0;
- spin_unlock(&session->s_cap_lock);
+ dout("dispose_cap_releases mds%d\n", session->s_mds);
+}
- dout("cleanup_cap_releases mds%d\n", session->s_mds);
- while (!list_empty(&tmp_list)) {
+static void dispose_cap_releases(struct ceph_mds_client *mdsc,
+ struct list_head *dispose)
+{
+ while (!list_empty(dispose)) {
struct ceph_cap *cap;
/* zero out the in-progress message */
- cap = list_first_entry(&tmp_list,
- struct ceph_cap, session_caps);
+ cap = list_first_entry(dispose, struct ceph_cap, session_caps);
list_del(&cap->session_caps);
ceph_put_cap(mdsc, cap);
}
@@ -1215,6 +1216,13 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
}
spin_unlock(&mdsc->cap_dirty_lock);
+ if (atomic_read(&ci->i_filelock_ref) > 0) {
+ /* make further file lock syscall return -EIO */
+ ci->i_ceph_flags |= CEPH_I_ERROR_FILELOCK;
+ pr_warn_ratelimited(" dropping file locks for %p %lld\n",
+ inode, ceph_ino(inode));
+ }
+
if (!ci->i_dirty_caps && ci->i_prealloc_cap_flush) {
list_add(&ci->i_prealloc_cap_flush->i_list, &to_remove);
ci->i_prealloc_cap_flush = NULL;
@@ -1244,6 +1252,8 @@ static void remove_session_caps(struct ceph_mds_session *session)
{
struct ceph_fs_client *fsc = session->s_mdsc->fsc;
struct super_block *sb = fsc->sb;
+ LIST_HEAD(dispose);
+
dout("remove_session_caps on %p\n", session);
iterate_session_caps(session, remove_session_caps_cb, fsc);
@@ -1278,10 +1288,12 @@ static void remove_session_caps(struct ceph_mds_session *session)
}
// drop cap expires and unlock s_cap_lock
- cleanup_cap_releases(session->s_mdsc, session);
+ detach_cap_releases(session, &dispose);
BUG_ON(session->s_nr_caps > 0);
BUG_ON(!list_empty(&session->s_cap_flushing));
+ spin_unlock(&session->s_cap_lock);
+ dispose_cap_releases(session->s_mdsc, &dispose);
}
/*
@@ -1462,6 +1474,11 @@ static int trim_caps_cb(struct inode *inode, struct ceph_cap *cap, void *arg)
goto out;
if ((used | wanted) & CEPH_CAP_ANY_WR)
goto out;
+ /* Note: it's possible that i_filelock_ref becomes non-zero
+ * after dropping auth caps. It doesn't hurt because reply
+ * of lock mds request will re-add auth caps. */
+ if (atomic_read(&ci->i_filelock_ref) > 0)
+ goto out;
}
/* The inode has cached pages, but it's no longer used.
* we can safely drop it */
@@ -2827,7 +2844,7 @@ static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap,
struct ceph_mds_cap_reconnect v2;
struct ceph_mds_cap_reconnect_v1 v1;
} rec;
- struct ceph_inode_info *ci;
+ struct ceph_inode_info *ci = cap->ci;
struct ceph_reconnect_state *recon_state = arg;
struct ceph_pagelist *pagelist = recon_state->pagelist;
char *path;
@@ -2836,8 +2853,6 @@ static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap,
u64 snap_follows;
struct dentry *dentry;
- ci = cap->ci;
-
dout(" adding %p ino %llx.%llx cap %p %lld %s\n",
inode, ceph_vinop(inode), cap, cap->cap_id,
ceph_cap_string(cap->issued));
@@ -2870,7 +2885,8 @@ static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap,
rec.v2.issued = cpu_to_le32(cap->issued);
rec.v2.snaprealm = cpu_to_le64(ci->i_snap_realm->ino);
rec.v2.pathbase = cpu_to_le64(pathbase);
- rec.v2.flock_len = 0;
+ rec.v2.flock_len = (__force __le32)
+ ((ci->i_ceph_flags & CEPH_I_ERROR_FILELOCK) ? 0 : 1);
} else {
rec.v1.cap_id = cpu_to_le64(cap->cap_id);
rec.v1.wanted = cpu_to_le32(__ceph_caps_wanted(ci));
@@ -2894,26 +2910,37 @@ static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap,
if (recon_state->msg_version >= 2) {
int num_fcntl_locks, num_flock_locks;
- struct ceph_filelock *flocks;
+ struct ceph_filelock *flocks = NULL;
size_t struct_len, total_len = 0;
u8 struct_v = 0;
encode_again:
- ceph_count_locks(inode, &num_fcntl_locks, &num_flock_locks);
- flocks = kmalloc((num_fcntl_locks+num_flock_locks) *
- sizeof(struct ceph_filelock), GFP_NOFS);
- if (!flocks) {
- err = -ENOMEM;
- goto out_free;
+ if (rec.v2.flock_len) {
+ ceph_count_locks(inode, &num_fcntl_locks, &num_flock_locks);
+ } else {
+ num_fcntl_locks = 0;
+ num_flock_locks = 0;
}
- err = ceph_encode_locks_to_buffer(inode, flocks,
- num_fcntl_locks,
- num_flock_locks);
- if (err) {
+ if (num_fcntl_locks + num_flock_locks > 0) {
+ flocks = kmalloc((num_fcntl_locks + num_flock_locks) *
+ sizeof(struct ceph_filelock), GFP_NOFS);
+ if (!flocks) {
+ err = -ENOMEM;
+ goto out_free;
+ }
+ err = ceph_encode_locks_to_buffer(inode, flocks,
+ num_fcntl_locks,
+ num_flock_locks);
+ if (err) {
+ kfree(flocks);
+ flocks = NULL;
+ if (err == -ENOSPC)
+ goto encode_again;
+ goto out_free;
+ }
+ } else {
kfree(flocks);
- if (err == -ENOSPC)
- goto encode_again;
- goto out_free;
+ flocks = NULL;
}
if (recon_state->msg_version >= 3) {
@@ -2993,6 +3020,7 @@ static void send_mds_reconnect(struct ceph_mds_client *mdsc,
int s_nr_caps;
struct ceph_pagelist *pagelist;
struct ceph_reconnect_state recon_state;
+ LIST_HEAD(dispose);
pr_info("mds%d reconnect start\n", mds);
@@ -3026,7 +3054,9 @@ static void send_mds_reconnect(struct ceph_mds_client *mdsc,
*/
session->s_cap_reconnect = 1;
/* drop old cap expires; we're about to reestablish that state */
- cleanup_cap_releases(mdsc, session);
+ detach_cap_releases(session, &dispose);
+ spin_unlock(&session->s_cap_lock);
+ dispose_cap_releases(mdsc, &dispose);
/* trim unused caps to reduce MDS's cache rejoin time */
if (mdsc->fsc->sb->s_root)
@@ -3857,14 +3887,14 @@ void ceph_mdsc_handle_fsmap(struct ceph_mds_client *mdsc, struct ceph_msg *msg)
goto err_out;
}
return;
+
bad:
pr_err("error decoding fsmap\n");
err_out:
mutex_lock(&mdsc->mutex);
- mdsc->mdsmap_err = -ENOENT;
+ mdsc->mdsmap_err = err;
__wake_requests(mdsc, &mdsc->waiting_for_map);
mutex_unlock(&mdsc->mutex);
- return;
}
/*
diff --git a/fs/ceph/super.c b/fs/ceph/super.c
index e4082afedcb1..a62d2a9841dc 100644
--- a/fs/ceph/super.c
+++ b/fs/ceph/super.c
@@ -84,8 +84,9 @@ static int ceph_statfs(struct dentry *dentry, struct kstatfs *buf)
buf->f_ffree = -1;
buf->f_namelen = NAME_MAX;
- /* leave fsid little-endian, regardless of host endianness */
- fsid = *(u64 *)(&monmap->fsid) ^ *((u64 *)&monmap->fsid + 1);
+ /* Must convert the fsid, for consistent values across arches */
+ fsid = le64_to_cpu(*(__le64 *)(&monmap->fsid)) ^
+ le64_to_cpu(*((__le64 *)&monmap->fsid + 1));
buf->f_fsid.val[0] = fsid & 0xffffffff;
buf->f_fsid.val[1] = fsid >> 32;
@@ -330,11 +331,11 @@ static int parse_fsopt_token(char *c, void *private)
break;
#ifdef CONFIG_CEPH_FS_POSIX_ACL
case Opt_acl:
- fsopt->sb_flags |= MS_POSIXACL;
+ fsopt->sb_flags |= SB_POSIXACL;
break;
#endif
case Opt_noacl:
- fsopt->sb_flags &= ~MS_POSIXACL;
+ fsopt->sb_flags &= ~SB_POSIXACL;
break;
default:
BUG_ON(token);
@@ -519,7 +520,7 @@ static int ceph_show_options(struct seq_file *m, struct dentry *root)
seq_puts(m, ",nopoolperm");
#ifdef CONFIG_CEPH_FS_POSIX_ACL
- if (fsopt->sb_flags & MS_POSIXACL)
+ if (fsopt->sb_flags & SB_POSIXACL)
seq_puts(m, ",acl");
else
seq_puts(m, ",noacl");
@@ -987,7 +988,7 @@ static struct dentry *ceph_mount(struct file_system_type *fs_type,
dout("ceph_mount\n");
#ifdef CONFIG_CEPH_FS_POSIX_ACL
- flags |= MS_POSIXACL;
+ flags |= SB_POSIXACL;
#endif
err = parse_mount_options(&fsopt, &opt, flags, data, dev_name);
if (err < 0) {
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index 3e27a28aa44a..2beeec07fa76 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -352,6 +352,7 @@ struct ceph_inode_info {
int i_pin_ref;
int i_rd_ref, i_rdcache_ref, i_wr_ref, i_wb_ref;
int i_wrbuffer_ref, i_wrbuffer_ref_head;
+ atomic_t i_filelock_ref;
u32 i_shared_gen; /* increment each time we get FILE_SHARED */
u32 i_rdcache_gen; /* incremented each time we get FILE_CACHE. */
u32 i_rdcache_revoking; /* RDCACHE gen to async invalidate, if any */
@@ -487,6 +488,8 @@ static inline struct inode *ceph_find_inode(struct super_block *sb,
#define CEPH_I_KICK_FLUSH (1 << 9) /* kick flushing caps */
#define CEPH_I_FLUSH_SNAPS (1 << 10) /* need flush snapss */
#define CEPH_I_ERROR_WRITE (1 << 11) /* have seen write errors */
+#define CEPH_I_ERROR_FILELOCK (1 << 12) /* have seen file lock errors */
+
/*
* We set the ERROR_WRITE bit when we start seeing write errors on an inode
@@ -1011,7 +1014,6 @@ extern int ceph_encode_locks_to_buffer(struct inode *inode,
extern int ceph_locks_to_pagelist(struct ceph_filelock *flocks,
struct ceph_pagelist *pagelist,
int num_fcntl_locks, int num_flock_locks);
-extern int lock_to_ceph_filelock(struct file_lock *fl, struct ceph_filelock *c);
/* debugfs.c */
extern int ceph_fs_debugfs_init(struct ceph_fs_client *client);
diff --git a/fs/cifs/cifs_fs_sb.h b/fs/cifs/cifs_fs_sb.h
index cbd216b57239..350fa55a1bf7 100644
--- a/fs/cifs/cifs_fs_sb.h
+++ b/fs/cifs/cifs_fs_sb.h
@@ -42,7 +42,7 @@
#define CIFS_MOUNT_MULTIUSER 0x20000 /* multiuser mount */
#define CIFS_MOUNT_STRICT_IO 0x40000 /* strict cache mode */
#define CIFS_MOUNT_RWPIDFORWARD 0x80000 /* use pid forwarding for rw */
-#define CIFS_MOUNT_POSIXACL 0x100000 /* mirror of MS_POSIXACL in mnt_cifs_flags */
+#define CIFS_MOUNT_POSIXACL 0x100000 /* mirror of SB_POSIXACL in mnt_cifs_flags */
#define CIFS_MOUNT_CIFS_BACKUPUID 0x200000 /* backup intent bit for a user */
#define CIFS_MOUNT_CIFS_BACKUPGID 0x400000 /* backup intent bit for a group */
#define CIFS_MOUNT_MAP_SFM_CHR 0x800000 /* SFM/MAC mapping for illegal chars */
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 8c8b75d33f31..31b7565b1617 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -125,7 +125,7 @@ cifs_read_super(struct super_block *sb)
tcon = cifs_sb_master_tcon(cifs_sb);
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIXACL)
- sb->s_flags |= MS_POSIXACL;
+ sb->s_flags |= SB_POSIXACL;
if (tcon->ses->capabilities & tcon->ses->server->vals->cap_large_files)
sb->s_maxbytes = MAX_LFS_FILESIZE;
@@ -497,7 +497,7 @@ cifs_show_options(struct seq_file *s, struct dentry *root)
seq_puts(s, ",cifsacl");
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM)
seq_puts(s, ",dynperm");
- if (root->d_sb->s_flags & MS_POSIXACL)
+ if (root->d_sb->s_flags & SB_POSIXACL)
seq_puts(s, ",acl");
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS)
seq_puts(s, ",mfsymlinks");
@@ -573,7 +573,7 @@ static int cifs_show_stats(struct seq_file *s, struct dentry *root)
static int cifs_remount(struct super_block *sb, int *flags, char *data)
{
sync_filesystem(sb);
- *flags |= MS_NODIRATIME;
+ *flags |= SB_NODIRATIME;
return 0;
}
@@ -708,7 +708,7 @@ cifs_do_mount(struct file_system_type *fs_type,
rc = cifs_mount(cifs_sb, volume_info);
if (rc) {
- if (!(flags & MS_SILENT))
+ if (!(flags & SB_SILENT))
cifs_dbg(VFS, "cifs_mount failed w/return code = %d\n",
rc);
root = ERR_PTR(rc);
@@ -720,7 +720,7 @@ cifs_do_mount(struct file_system_type *fs_type,
mnt_data.flags = flags;
/* BB should we make this contingent on mount parm? */
- flags |= MS_NODIRATIME | MS_NOATIME;
+ flags |= SB_NODIRATIME | SB_NOATIME;
sb = sget(fs_type, cifs_match_super, cifs_set_super, flags, &mnt_data);
if (IS_ERR(sb)) {
@@ -739,7 +739,7 @@ cifs_do_mount(struct file_system_type *fs_type,
goto out_super;
}
- sb->s_flags |= MS_ACTIVE;
+ sb->s_flags |= SB_ACTIVE;
}
root = cifs_get_root(volume_info, sb);
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index e185b2853eab..b16583594d1a 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -559,8 +559,8 @@ struct smb_vol {
CIFS_MOUNT_MULTIUSER | CIFS_MOUNT_STRICT_IO | \
CIFS_MOUNT_CIFS_BACKUPUID | CIFS_MOUNT_CIFS_BACKUPGID)
-#define CIFS_MS_MASK (MS_RDONLY | MS_MANDLOCK | MS_NOEXEC | MS_NOSUID | \
- MS_NODEV | MS_SYNCHRONOUS)
+#define CIFS_MS_MASK (SB_RDONLY | SB_MANDLOCK | SB_NOEXEC | SB_NOSUID | \
+ SB_NODEV | SB_SYNCHRONOUS)
struct cifs_mnt_data {
struct cifs_sb_info *cifs_sb;
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 7c732cb44164..ecb99079363a 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -985,7 +985,7 @@ retry_iget5_locked:
}
cifs_fattr_to_inode(inode, fattr);
- if (sb->s_flags & MS_NOATIME)
+ if (sb->s_flags & SB_NOATIME)
inode->i_flags |= S_NOATIME | S_NOCMTIME;
if (inode->i_state & I_NEW) {
inode->i_ino = hash;
diff --git a/fs/cifs/xattr.c b/fs/cifs/xattr.c
index 52f975d848a0..316af84674f1 100644
--- a/fs/cifs/xattr.c
+++ b/fs/cifs/xattr.c
@@ -117,7 +117,7 @@ static int cifs_xattr_set(const struct xattr_handler *handler,
#ifdef CONFIG_CIFS_POSIX
if (!value)
goto out;
- if (sb->s_flags & MS_POSIXACL)
+ if (sb->s_flags & SB_POSIXACL)
rc = CIFSSMBSetPosixACL(xid, pTcon, full_path,
value, (const int)size,
ACL_TYPE_ACCESS, cifs_sb->local_nls,
@@ -129,7 +129,7 @@ static int cifs_xattr_set(const struct xattr_handler *handler,
#ifdef CONFIG_CIFS_POSIX
if (!value)
goto out;
- if (sb->s_flags & MS_POSIXACL)
+ if (sb->s_flags & SB_POSIXACL)
rc = CIFSSMBSetPosixACL(xid, pTcon, full_path,
value, (const int)size,
ACL_TYPE_DEFAULT, cifs_sb->local_nls,
@@ -266,7 +266,7 @@ static int cifs_xattr_get(const struct xattr_handler *handler,
case XATTR_ACL_ACCESS:
#ifdef CONFIG_CIFS_POSIX
- if (sb->s_flags & MS_POSIXACL)
+ if (sb->s_flags & SB_POSIXACL)
rc = CIFSSMBGetPosixACL(xid, pTcon, full_path,
value, size, ACL_TYPE_ACCESS,
cifs_sb->local_nls,
@@ -276,7 +276,7 @@ static int cifs_xattr_get(const struct xattr_handler *handler,
case XATTR_ACL_DEFAULT:
#ifdef CONFIG_CIFS_POSIX
- if (sb->s_flags & MS_POSIXACL)
+ if (sb->s_flags & SB_POSIXACL)
rc = CIFSSMBGetPosixACL(xid, pTcon, full_path,
value, size, ACL_TYPE_DEFAULT,
cifs_sb->local_nls,
diff --git a/fs/coda/inode.c b/fs/coda/inode.c
index 6f0a6a4d5faa..97424cf206c0 100644
--- a/fs/coda/inode.c
+++ b/fs/coda/inode.c
@@ -96,7 +96,7 @@ void coda_destroy_inodecache(void)
static int coda_remount(struct super_block *sb, int *flags, char *data)
{
sync_filesystem(sb);
- *flags |= MS_NOATIME;
+ *flags |= SB_NOATIME;
return 0;
}
@@ -188,7 +188,7 @@ static int coda_fill_super(struct super_block *sb, void *data, int silent)
mutex_unlock(&vc->vc_mutex);
sb->s_fs_info = vc;
- sb->s_flags |= MS_NOATIME;
+ sb->s_flags |= SB_NOATIME;
sb->s_blocksize = 4096; /* XXXXX what do we put here?? */
sb->s_blocksize_bits = 12;
sb->s_magic = CODA_SUPER_MAGIC;
diff --git a/fs/cramfs/inode.c b/fs/cramfs/inode.c
index 9a2ab419ba62..017b0ab19bc4 100644
--- a/fs/cramfs/inode.c
+++ b/fs/cramfs/inode.c
@@ -505,7 +505,7 @@ static void cramfs_kill_sb(struct super_block *sb)
static int cramfs_remount(struct super_block *sb, int *flags, char *data)
{
sync_filesystem(sb);
- *flags |= MS_RDONLY;
+ *flags |= SB_RDONLY;
return 0;
}
@@ -592,7 +592,7 @@ static int cramfs_finalize_super(struct super_block *sb,
struct inode *root;
/* Set it all up.. */
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
sb->s_op = &cramfs_ops;
root = get_cramfs_inode(sb, cramfs_root, 0);
if (IS_ERR(root))
diff --git a/fs/dax.c b/fs/dax.c
index 95981591977a..78b72c48374e 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -627,7 +627,8 @@ static void dax_mapping_entry_mkclean(struct address_space *mapping,
if (pfn != pmd_pfn(*pmdp))
goto unlock_pmd;
- if (!pmd_dirty(*pmdp) && !pmd_write(*pmdp))
+ if (!pmd_dirty(*pmdp)
+ && !pmd_access_permitted(*pmdp, WRITE))
goto unlock_pmd;
flush_cache_page(vma, address, pfn);
diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c
index f2677c90d96e..025d66a705db 100644
--- a/fs/ecryptfs/main.c
+++ b/fs/ecryptfs/main.c
@@ -560,8 +560,8 @@ static struct dentry *ecryptfs_mount(struct file_system_type *fs_type, int flags
* Set the POSIX ACL flag based on whether they're enabled in the lower
* mount.
*/
- s->s_flags = flags & ~MS_POSIXACL;
- s->s_flags |= path.dentry->d_sb->s_flags & MS_POSIXACL;
+ s->s_flags = flags & ~SB_POSIXACL;
+ s->s_flags |= path.dentry->d_sb->s_flags & SB_POSIXACL;
/**
* Force a read-only eCryptfs mount when:
@@ -569,7 +569,7 @@ static struct dentry *ecryptfs_mount(struct file_system_type *fs_type, int flags
* 2) The ecryptfs_encrypted_view mount option is specified
*/
if (sb_rdonly(path.dentry->d_sb) || mount_crypt_stat->flags & ECRYPTFS_ENCRYPTED_VIEW_ENABLED)
- s->s_flags |= MS_RDONLY;
+ s->s_flags |= SB_RDONLY;
s->s_maxbytes = path.dentry->d_sb->s_maxbytes;
s->s_blocksize = path.dentry->d_sb->s_blocksize;
@@ -602,7 +602,7 @@ static struct dentry *ecryptfs_mount(struct file_system_type *fs_type, int flags
ecryptfs_set_dentry_private(s->s_root, root_info);
root_info->lower_path = path;
- s->s_flags |= MS_ACTIVE;
+ s->s_flags |= SB_ACTIVE;
return dget(s->s_root);
out_free:
diff --git a/fs/efs/super.c b/fs/efs/super.c
index 65b59009555b..6ffb7ba1547a 100644
--- a/fs/efs/super.c
+++ b/fs/efs/super.c
@@ -116,7 +116,7 @@ static void destroy_inodecache(void)
static int efs_remount(struct super_block *sb, int *flags, char *data)
{
sync_filesystem(sb);
- *flags |= MS_RDONLY;
+ *flags |= SB_RDONLY;
return 0;
}
@@ -311,7 +311,7 @@ static int efs_fill_super(struct super_block *s, void *d, int silent)
#ifdef DEBUG
pr_info("forcing read-only mode\n");
#endif
- s->s_flags |= MS_RDONLY;
+ s->s_flags |= SB_RDONLY;
}
s->s_op = &efs_superblock_operations;
s->s_export_op = &efs_export_ops;
diff --git a/fs/exec.c b/fs/exec.c
index 1d6243d9f2b6..6be2aa0ab26f 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1340,10 +1340,15 @@ void setup_new_exec(struct linux_binprm * bprm)
* avoid bad behavior from the prior rlimits. This has to
* happen before arch_pick_mmap_layout(), which examines
* RLIMIT_STACK, but after the point of no return to avoid
- * needing to clean up the change on failure.
+ * races from other threads changing the limits. This also
+ * must be protected from races with prlimit() calls.
*/
+ task_lock(current->group_leader);
if (current->signal->rlim[RLIMIT_STACK].rlim_cur > _STK_LIM)
current->signal->rlim[RLIMIT_STACK].rlim_cur = _STK_LIM;
+ if (current->signal->rlim[RLIMIT_STACK].rlim_max > _STK_LIM)
+ current->signal->rlim[RLIMIT_STACK].rlim_max = _STK_LIM;
+ task_unlock(current->group_leader);
}
arch_pick_mmap_layout(current->mm);
diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
index e1b3724bebf2..33db13365c5e 100644
--- a/fs/ext2/balloc.c
+++ b/fs/ext2/balloc.c
@@ -548,7 +548,7 @@ do_more:
}
mark_buffer_dirty(bitmap_bh);
- if (sb->s_flags & MS_SYNCHRONOUS)
+ if (sb->s_flags & SB_SYNCHRONOUS)
sync_dirty_buffer(bitmap_bh);
group_adjust_blocks(sb, block_group, desc, bh2, group_freed);
@@ -1424,7 +1424,7 @@ allocated:
percpu_counter_sub(&sbi->s_freeblocks_counter, num);
mark_buffer_dirty(bitmap_bh);
- if (sb->s_flags & MS_SYNCHRONOUS)
+ if (sb->s_flags & SB_SYNCHRONOUS)
sync_dirty_buffer(bitmap_bh);
*errp = 0;
diff --git a/fs/ext2/ialloc.c b/fs/ext2/ialloc.c
index a1fc3dabca41..6484199b35d1 100644
--- a/fs/ext2/ialloc.c
+++ b/fs/ext2/ialloc.c
@@ -145,7 +145,7 @@ void ext2_free_inode (struct inode * inode)
else
ext2_release_inode(sb, block_group, is_directory);
mark_buffer_dirty(bitmap_bh);
- if (sb->s_flags & MS_SYNCHRONOUS)
+ if (sb->s_flags & SB_SYNCHRONOUS)
sync_dirty_buffer(bitmap_bh);
brelse(bitmap_bh);
@@ -517,7 +517,7 @@ repeat_in_this_group:
goto fail;
got:
mark_buffer_dirty(bitmap_bh);
- if (sb->s_flags & MS_SYNCHRONOUS)
+ if (sb->s_flags & SB_SYNCHRONOUS)
sync_dirty_buffer(bitmap_bh);
brelse(bitmap_bh);
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index e2b6be03e69b..7646818ab266 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -75,7 +75,7 @@ void ext2_error(struct super_block *sb, const char *function,
if (test_opt(sb, ERRORS_RO)) {
ext2_msg(sb, KERN_CRIT,
"error: remounting filesystem read-only");
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
}
}
@@ -656,7 +656,7 @@ static int ext2_setup_super (struct super_block * sb,
ext2_msg(sb, KERN_ERR,
"error: revision level too high, "
"forcing read-only mode");
- res = MS_RDONLY;
+ res = SB_RDONLY;
}
if (read_only)
return res;
@@ -924,9 +924,9 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
sbi->s_resuid = opts.s_resuid;
sbi->s_resgid = opts.s_resgid;
- sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
+ sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
((EXT2_SB(sb)->s_mount_opt & EXT2_MOUNT_POSIX_ACL) ?
- MS_POSIXACL : 0);
+ SB_POSIXACL : 0);
sb->s_iflags |= SB_I_CGROUPWB;
if (le32_to_cpu(es->s_rev_level) == EXT2_GOOD_OLD_REV &&
@@ -1178,7 +1178,7 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
ext2_msg(sb, KERN_WARNING,
"warning: mounting ext3 filesystem as ext2");
if (ext2_setup_super (sb, es, sb_rdonly(sb)))
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
ext2_write_super(sb);
return 0;
@@ -1341,9 +1341,9 @@ static int ext2_remount (struct super_block * sb, int * flags, char * data)
"dax flag with busy inodes while remounting");
new_opts.s_mount_opt ^= EXT2_MOUNT_DAX;
}
- if ((bool)(*flags & MS_RDONLY) == sb_rdonly(sb))
+ if ((bool)(*flags & SB_RDONLY) == sb_rdonly(sb))
goto out_set;
- if (*flags & MS_RDONLY) {
+ if (*flags & SB_RDONLY) {
if (le16_to_cpu(es->s_state) & EXT2_VALID_FS ||
!(sbi->s_mount_state & EXT2_VALID_FS))
goto out_set;
@@ -1379,7 +1379,7 @@ static int ext2_remount (struct super_block * sb, int * flags, char * data)
*/
sbi->s_mount_state = le16_to_cpu(es->s_state);
if (!ext2_setup_super (sb, es, 0))
- sb->s_flags &= ~MS_RDONLY;
+ sb->s_flags &= ~SB_RDONLY;
spin_unlock(&sbi->s_lock);
ext2_write_super(sb);
@@ -1392,8 +1392,8 @@ out_set:
sbi->s_mount_opt = new_opts.s_mount_opt;
sbi->s_resuid = new_opts.s_resuid;
sbi->s_resgid = new_opts.s_resgid;
- sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
- ((sbi->s_mount_opt & EXT2_MOUNT_POSIX_ACL) ? MS_POSIXACL : 0);
+ sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
+ ((sbi->s_mount_opt & EXT2_MOUNT_POSIX_ACL) ? SB_POSIXACL : 0);
spin_unlock(&sbi->s_lock);
return 0;
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 0992d76f7ab1..7df2c5644e59 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -2742,7 +2742,7 @@ static int ext4_writepages(struct address_space *mapping,
* If the filesystem has aborted, it is read-only, so return
* right away instead of dumping stack traces later on that
* will obscure the real source of the problem. We test
- * EXT4_MF_FS_ABORTED instead of sb->s_flag's MS_RDONLY because
+ * EXT4_MF_FS_ABORTED instead of sb->s_flag's SB_RDONLY because
* the latter could be true if the filesystem is mounted
* read-only, and in that case, ext4_writepages should
* *never* be called, so if that ever happens, we would want
@@ -5183,7 +5183,7 @@ static int ext4_do_update_inode(handle_t *handle,
ext4_inode_csum_set(inode, raw_inode, ei);
spin_unlock(&ei->i_raw_lock);
- if (inode->i_sb->s_flags & MS_LAZYTIME)
+ if (inode->i_sb->s_flags & SB_LAZYTIME)
ext4_update_other_inodes_time(inode->i_sb, inode->i_ino,
bh->b_data);
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 0556cd036b69..7c46693a14d7 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -422,7 +422,7 @@ static void ext4_handle_error(struct super_block *sb)
* before ->s_flags update
*/
smp_wmb();
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
}
if (test_opt(sb, ERRORS_PANIC)) {
if (EXT4_SB(sb)->s_journal &&
@@ -635,7 +635,7 @@ void __ext4_abort(struct super_block *sb, const char *function,
* before ->s_flags update
*/
smp_wmb();
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
if (EXT4_SB(sb)->s_journal)
jbd2_journal_abort(EXT4_SB(sb)->s_journal, -EIO);
save_error_info(sb, function, line);
@@ -1682,10 +1682,10 @@ static int handle_mount_opt(struct super_block *sb, char *opt, int token,
sb->s_flags |= SB_I_VERSION;
return 1;
case Opt_lazytime:
- sb->s_flags |= MS_LAZYTIME;
+ sb->s_flags |= SB_LAZYTIME;
return 1;
case Opt_nolazytime:
- sb->s_flags &= ~MS_LAZYTIME;
+ sb->s_flags &= ~SB_LAZYTIME;
return 1;
}
@@ -2116,7 +2116,7 @@ static int ext4_setup_super(struct super_block *sb, struct ext4_super_block *es,
if (le32_to_cpu(es->s_rev_level) > EXT4_MAX_SUPP_REV) {
ext4_msg(sb, KERN_ERR, "revision level too high, "
"forcing read-only mode");
- res = MS_RDONLY;
+ res = SB_RDONLY;
}
if (read_only)
goto done;
@@ -2429,7 +2429,7 @@ static void ext4_orphan_cleanup(struct super_block *sb,
if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) {
/* don't clear list on RO mount w/ errors */
- if (es->s_last_orphan && !(s_flags & MS_RDONLY)) {
+ if (es->s_last_orphan && !(s_flags & SB_RDONLY)) {
ext4_msg(sb, KERN_INFO, "Errors on filesystem, "
"clearing orphan list.\n");
es->s_last_orphan = 0;
@@ -2438,19 +2438,19 @@ static void ext4_orphan_cleanup(struct super_block *sb,
return;
}
- if (s_flags & MS_RDONLY) {
+ if (s_flags & SB_RDONLY) {
ext4_msg(sb, KERN_INFO, "orphan cleanup on readonly fs");
- sb->s_flags &= ~MS_RDONLY;
+ sb->s_flags &= ~SB_RDONLY;
}
#ifdef CONFIG_QUOTA
/* Needed for iput() to work correctly and not trash data */
- sb->s_flags |= MS_ACTIVE;
+ sb->s_flags |= SB_ACTIVE;
/*
* Turn on quotas which were not enabled for read-only mounts if
* filesystem has quota feature, so that they are updated correctly.
*/
- if (ext4_has_feature_quota(sb) && (s_flags & MS_RDONLY)) {
+ if (ext4_has_feature_quota(sb) && (s_flags & SB_RDONLY)) {
int ret = ext4_enable_quotas(sb);
if (!ret)
@@ -2539,7 +2539,7 @@ static void ext4_orphan_cleanup(struct super_block *sb,
}
}
#endif
- sb->s_flags = s_flags; /* Restore MS_RDONLY status */
+ sb->s_flags = s_flags; /* Restore SB_RDONLY status */
}
/*
@@ -2741,7 +2741,7 @@ static int ext4_feature_set_ok(struct super_block *sb, int readonly)
if (ext4_has_feature_readonly(sb)) {
ext4_msg(sb, KERN_INFO, "filesystem is read-only");
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
return 1;
}
@@ -3623,8 +3623,8 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
sb->s_iflags |= SB_I_CGROUPWB;
}
- sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
- (test_opt(sb, POSIX_ACL) ? MS_POSIXACL : 0);
+ sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
+ (test_opt(sb, POSIX_ACL) ? SB_POSIXACL : 0);
if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV &&
(ext4_has_compat_features(sb) ||
@@ -4199,7 +4199,7 @@ no_journal:
}
if (ext4_setup_super(sb, es, sb_rdonly(sb)))
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
/* determine the minimum size of new large inodes, if present */
if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE &&
@@ -4693,7 +4693,7 @@ static int ext4_commit_super(struct super_block *sb, int sync)
* the clock is set in the future, and this will cause e2fsck
* to complain and force a full file system check.
*/
- if (!(sb->s_flags & MS_RDONLY))
+ if (!(sb->s_flags & SB_RDONLY))
es->s_wtime = cpu_to_le32(get_seconds());
if (sb->s_bdev->bd_part)
es->s_kbytes_written =
@@ -5047,8 +5047,8 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED)
ext4_abort(sb, "Abort forced by user");
- sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
- (test_opt(sb, POSIX_ACL) ? MS_POSIXACL : 0);
+ sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
+ (test_opt(sb, POSIX_ACL) ? SB_POSIXACL : 0);
es = sbi->s_es;
@@ -5057,16 +5057,16 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
set_task_ioprio(sbi->s_journal->j_task, journal_ioprio);
}
- if (*flags & MS_LAZYTIME)
- sb->s_flags |= MS_LAZYTIME;
+ if (*flags & SB_LAZYTIME)
+ sb->s_flags |= SB_LAZYTIME;
- if ((bool)(*flags & MS_RDONLY) != sb_rdonly(sb)) {
+ if ((bool)(*flags & SB_RDONLY) != sb_rdonly(sb)) {
if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED) {
err = -EROFS;
goto restore_opts;
}
- if (*flags & MS_RDONLY) {
+ if (*flags & SB_RDONLY) {
err = sync_filesystem(sb);
if (err < 0)
goto restore_opts;
@@ -5078,7 +5078,7 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
* First of all, the unconditional stuff we have to do
* to disable replay of the journal when we next remount
*/
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
/*
* OK, test if we are remounting a valid rw partition
@@ -5140,7 +5140,7 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
ext4_clear_journal_err(sb, es);
sbi->s_mount_state = le16_to_cpu(es->s_state);
if (!ext4_setup_super(sb, es, 0))
- sb->s_flags &= ~MS_RDONLY;
+ sb->s_flags &= ~SB_RDONLY;
if (ext4_has_feature_mmp(sb))
if (ext4_multi_mount_protect(sb,
le64_to_cpu(es->s_mmp_block))) {
@@ -5164,7 +5164,7 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
}
ext4_setup_system_zone(sb);
- if (sbi->s_journal == NULL && !(old_sb_flags & MS_RDONLY))
+ if (sbi->s_journal == NULL && !(old_sb_flags & SB_RDONLY))
ext4_commit_super(sb, 1);
#ifdef CONFIG_QUOTA
@@ -5182,7 +5182,7 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
}
#endif
- *flags = (*flags & ~MS_LAZYTIME) | (sb->s_flags & MS_LAZYTIME);
+ *flags = (*flags & ~SB_LAZYTIME) | (sb->s_flags & SB_LAZYTIME);
ext4_msg(sb, KERN_INFO, "re-mounted. Opts: %s", orig_data);
kfree(orig_data);
return 0;
diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
index dd2e73e10857..4aa69bc1c70a 100644
--- a/fs/f2fs/checkpoint.c
+++ b/fs/f2fs/checkpoint.c
@@ -617,17 +617,17 @@ int recover_orphan_inodes(struct f2fs_sb_info *sbi)
if (!is_set_ckpt_flags(sbi, CP_ORPHAN_PRESENT_FLAG))
return 0;
- if (s_flags & MS_RDONLY) {
+ if (s_flags & SB_RDONLY) {
f2fs_msg(sbi->sb, KERN_INFO, "orphan cleanup on readonly fs");
- sbi->sb->s_flags &= ~MS_RDONLY;
+ sbi->sb->s_flags &= ~SB_RDONLY;
}
#ifdef CONFIG_QUOTA
/* Needed for iput() to work correctly and not trash data */
- sbi->sb->s_flags |= MS_ACTIVE;
+ sbi->sb->s_flags |= SB_ACTIVE;
/* Turn on quotas so that they are updated correctly */
- quota_enabled = f2fs_enable_quota_files(sbi, s_flags & MS_RDONLY);
+ quota_enabled = f2fs_enable_quota_files(sbi, s_flags & SB_RDONLY);
#endif
start_blk = __start_cp_addr(sbi) + 1 + __cp_payload(sbi);
@@ -658,7 +658,7 @@ out:
if (quota_enabled)
f2fs_quota_off_umount(sbi->sb);
#endif
- sbi->sb->s_flags = s_flags; /* Restore MS_RDONLY status */
+ sbi->sb->s_flags = s_flags; /* Restore SB_RDONLY status */
return err;
}
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index f4e094e816c6..6abf26c31d01 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -2378,7 +2378,7 @@ static inline bool f2fs_skip_inode_update(struct inode *inode, int dsync)
static inline int f2fs_readonly(struct super_block *sb)
{
- return sb->s_flags & MS_RDONLY;
+ return sb->s_flags & SB_RDONLY;
}
static inline bool f2fs_cp_error(struct f2fs_sb_info *sbi)
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
index 5d5bba462f26..d844dcb80570 100644
--- a/fs/f2fs/gc.c
+++ b/fs/f2fs/gc.c
@@ -1005,7 +1005,7 @@ int f2fs_gc(struct f2fs_sb_info *sbi, bool sync,
cpc.reason = __get_cp_reason(sbi);
gc_more:
- if (unlikely(!(sbi->sb->s_flags & MS_ACTIVE))) {
+ if (unlikely(!(sbi->sb->s_flags & SB_ACTIVE))) {
ret = -EINVAL;
goto stop;
}
diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
index 92c57ace1939..b3a14b0429f2 100644
--- a/fs/f2fs/recovery.c
+++ b/fs/f2fs/recovery.c
@@ -598,16 +598,16 @@ int recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only)
int quota_enabled;
#endif
- if (s_flags & MS_RDONLY) {
+ if (s_flags & SB_RDONLY) {
f2fs_msg(sbi->sb, KERN_INFO, "orphan cleanup on readonly fs");
- sbi->sb->s_flags &= ~MS_RDONLY;
+ sbi->sb->s_flags &= ~SB_RDONLY;
}
#ifdef CONFIG_QUOTA
/* Needed for iput() to work correctly and not trash data */
- sbi->sb->s_flags |= MS_ACTIVE;
+ sbi->sb->s_flags |= SB_ACTIVE;
/* Turn on quotas so that they are updated correctly */
- quota_enabled = f2fs_enable_quota_files(sbi, s_flags & MS_RDONLY);
+ quota_enabled = f2fs_enable_quota_files(sbi, s_flags & SB_RDONLY);
#endif
fsync_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_inode_entry",
@@ -671,7 +671,7 @@ out:
if (quota_enabled)
f2fs_quota_off_umount(sbi->sb);
#endif
- sbi->sb->s_flags = s_flags; /* Restore MS_RDONLY status */
+ sbi->sb->s_flags = s_flags; /* Restore SB_RDONLY status */
return ret ? ret: err;
}
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index a6c5dd450002..708155d9c2e4 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -534,10 +534,10 @@ static int parse_options(struct super_block *sb, char *options)
#endif
break;
case Opt_lazytime:
- sb->s_flags |= MS_LAZYTIME;
+ sb->s_flags |= SB_LAZYTIME;
break;
case Opt_nolazytime:
- sb->s_flags &= ~MS_LAZYTIME;
+ sb->s_flags &= ~SB_LAZYTIME;
break;
#ifdef CONFIG_QUOTA
case Opt_quota:
@@ -1168,7 +1168,7 @@ static void default_options(struct f2fs_sb_info *sbi)
set_opt(sbi, INLINE_DENTRY);
set_opt(sbi, EXTENT_CACHE);
set_opt(sbi, NOHEAP);
- sbi->sb->s_flags |= MS_LAZYTIME;
+ sbi->sb->s_flags |= SB_LAZYTIME;
set_opt(sbi, FLUSH_MERGE);
if (f2fs_sb_mounted_blkzoned(sbi->sb)) {
set_opt_mode(sbi, F2FS_MOUNT_LFS);
@@ -1236,7 +1236,7 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
#endif
/* recover superblocks we couldn't write due to previous RO mount */
- if (!(*flags & MS_RDONLY) && is_sbi_flag_set(sbi, SBI_NEED_SB_WRITE)) {
+ if (!(*flags & SB_RDONLY) && is_sbi_flag_set(sbi, SBI_NEED_SB_WRITE)) {
err = f2fs_commit_super(sbi, false);
f2fs_msg(sb, KERN_INFO,
"Try to recover all the superblocks, ret: %d", err);
@@ -1255,17 +1255,17 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
* Previous and new state of filesystem is RO,
* so skip checking GC and FLUSH_MERGE conditions.
*/
- if (f2fs_readonly(sb) && (*flags & MS_RDONLY))
+ if (f2fs_readonly(sb) && (*flags & SB_RDONLY))
goto skip;
#ifdef CONFIG_QUOTA
- if (!f2fs_readonly(sb) && (*flags & MS_RDONLY)) {
+ if (!f2fs_readonly(sb) && (*flags & SB_RDONLY)) {
err = dquot_suspend(sb, -1);
if (err < 0)
goto restore_opts;
} else {
/* dquot_resume needs RW */
- sb->s_flags &= ~MS_RDONLY;
+ sb->s_flags &= ~SB_RDONLY;
if (sb_any_quota_suspended(sb)) {
dquot_resume(sb, -1);
} else if (f2fs_sb_has_quota_ino(sb)) {
@@ -1288,7 +1288,7 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
* or if background_gc = off is passed in mount
* option. Also sync the filesystem.
*/
- if ((*flags & MS_RDONLY) || !test_opt(sbi, BG_GC)) {
+ if ((*flags & SB_RDONLY) || !test_opt(sbi, BG_GC)) {
if (sbi->gc_thread) {
stop_gc_thread(sbi);
need_restart_gc = true;
@@ -1300,7 +1300,7 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
need_stop_gc = true;
}
- if (*flags & MS_RDONLY) {
+ if (*flags & SB_RDONLY) {
writeback_inodes_sb(sb, WB_REASON_SYNC);
sync_inodes_sb(sb);
@@ -1314,7 +1314,7 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
* We stop issue flush thread if FS is mounted as RO
* or if flush_merge is not passed in mount option.
*/
- if ((*flags & MS_RDONLY) || !test_opt(sbi, FLUSH_MERGE)) {
+ if ((*flags & SB_RDONLY) || !test_opt(sbi, FLUSH_MERGE)) {
clear_opt(sbi, FLUSH_MERGE);
destroy_flush_cmd_control(sbi, false);
} else {
@@ -1329,8 +1329,8 @@ skip:
kfree(s_qf_names[i]);
#endif
/* Update the POSIXACL Flag */
- sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
- (test_opt(sbi, POSIX_ACL) ? MS_POSIXACL : 0);
+ sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
+ (test_opt(sbi, POSIX_ACL) ? SB_POSIXACL : 0);
return 0;
restore_gc:
@@ -2472,8 +2472,8 @@ try_onemore:
sb->s_export_op = &f2fs_export_ops;
sb->s_magic = F2FS_SUPER_MAGIC;
sb->s_time_gran = 1;
- sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
- (test_opt(sbi, POSIX_ACL) ? MS_POSIXACL : 0);
+ sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
+ (test_opt(sbi, POSIX_ACL) ? SB_POSIXACL : 0);
memcpy(&sb->s_uuid, raw_super->uuid, sizeof(raw_super->uuid));
/* init f2fs-specific super block info */
diff --git a/fs/fat/fatent.c b/fs/fat/fatent.c
index 48b2336692f9..bac10de678cc 100644
--- a/fs/fat/fatent.c
+++ b/fs/fat/fatent.c
@@ -392,7 +392,7 @@ static int fat_mirror_bhs(struct super_block *sb, struct buffer_head **bhs,
memcpy(c_bh->b_data, bhs[n]->b_data, sb->s_blocksize);
set_buffer_uptodate(c_bh);
mark_buffer_dirty_inode(c_bh, sbi->fat_inode);
- if (sb->s_flags & MS_SYNCHRONOUS)
+ if (sb->s_flags & SB_SYNCHRONOUS)
err = sync_dirty_buffer(c_bh);
brelse(c_bh);
if (err)
@@ -597,7 +597,7 @@ int fat_free_clusters(struct inode *inode, int cluster)
}
if (nr_bhs + fatent.nr_bhs > MAX_BUF_PER_PAGE) {
- if (sb->s_flags & MS_SYNCHRONOUS) {
+ if (sb->s_flags & SB_SYNCHRONOUS) {
err = fat_sync_bhs(bhs, nr_bhs);
if (err)
goto error;
@@ -612,7 +612,7 @@ int fat_free_clusters(struct inode *inode, int cluster)
fat_collect_bhs(bhs, &nr_bhs, &fatent);
} while (cluster != FAT_ENT_EOF);
- if (sb->s_flags & MS_SYNCHRONOUS) {
+ if (sb->s_flags & SB_SYNCHRONOUS) {
err = fat_sync_bhs(bhs, nr_bhs);
if (err)
goto error;
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index 30c52394a7ad..20a0a89eaca5 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -779,14 +779,14 @@ static void __exit fat_destroy_inodecache(void)
static int fat_remount(struct super_block *sb, int *flags, char *data)
{
- int new_rdonly;
+ bool new_rdonly;
struct msdos_sb_info *sbi = MSDOS_SB(sb);
- *flags |= MS_NODIRATIME | (sbi->options.isvfat ? 0 : MS_NOATIME);
+ *flags |= SB_NODIRATIME | (sbi->options.isvfat ? 0 : SB_NOATIME);
sync_filesystem(sb);
/* make sure we update state on remount. */
- new_rdonly = *flags & MS_RDONLY;
+ new_rdonly = *flags & SB_RDONLY;
if (new_rdonly != sb_rdonly(sb)) {
if (new_rdonly)
fat_set_state(sb, 0, 0);
@@ -1352,7 +1352,7 @@ out:
if (opts->unicode_xlate)
opts->utf8 = 0;
if (opts->nfs == FAT_NFS_NOSTALE_RO) {
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
sb->s_export_op = &fat_export_ops_nostale;
}
@@ -1608,7 +1608,7 @@ int fat_fill_super(struct super_block *sb, void *data, int silent, int isvfat,
return -ENOMEM;
sb->s_fs_info = sbi;
- sb->s_flags |= MS_NODIRATIME;
+ sb->s_flags |= SB_NODIRATIME;
sb->s_magic = MSDOS_SUPER_MAGIC;
sb->s_op = &fat_sops;
sb->s_export_op = &fat_export_ops;
diff --git a/fs/fat/misc.c b/fs/fat/misc.c
index acc3aa30ee54..f9bdc1e01c98 100644
--- a/fs/fat/misc.c
+++ b/fs/fat/misc.c
@@ -33,7 +33,7 @@ void __fat_fs_error(struct super_block *sb, int report, const char *fmt, ...)
if (opts->errors == FAT_ERRORS_PANIC)
panic("FAT-fs (%s): fs panic from previous error\n", sb->s_id);
else if (opts->errors == FAT_ERRORS_RO && !sb_rdonly(sb)) {
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
fat_msg(sb, KERN_ERR, "Filesystem has been set read-only");
}
}
diff --git a/fs/fat/namei_msdos.c b/fs/fat/namei_msdos.c
index 7d6a105d601b..d24d2758a363 100644
--- a/fs/fat/namei_msdos.c
+++ b/fs/fat/namei_msdos.c
@@ -646,7 +646,7 @@ static void setup(struct super_block *sb)
{
MSDOS_SB(sb)->dir_ops = &msdos_dir_inode_operations;
sb->s_d_op = &msdos_dentry_operations;
- sb->s_flags |= MS_NOATIME;
+ sb->s_flags |= SB_NOATIME;
}
static int msdos_fill_super(struct super_block *sb, void *data, int silent)
diff --git a/fs/freevxfs/vxfs_super.c b/fs/freevxfs/vxfs_super.c
index 455ce5b77e9b..f989efa051a0 100644
--- a/fs/freevxfs/vxfs_super.c
+++ b/fs/freevxfs/vxfs_super.c
@@ -116,7 +116,7 @@ vxfs_statfs(struct dentry *dentry, struct kstatfs *bufp)
static int vxfs_remount(struct super_block *sb, int *flags, char *data)
{
sync_filesystem(sb);
- *flags |= MS_RDONLY;
+ *flags |= SB_RDONLY;
return 0;
}
@@ -220,7 +220,7 @@ static int vxfs_fill_super(struct super_block *sbp, void *dp, int silent)
int ret = -EINVAL;
u32 j;
- sbp->s_flags |= MS_RDONLY;
+ sbp->s_flags |= SB_RDONLY;
infp = kzalloc(sizeof(*infp), GFP_KERNEL);
if (!infp) {
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 08f5debd07d1..cea4836385b7 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -490,7 +490,7 @@ static void inode_switch_wbs(struct inode *inode, int new_wb_id)
/* while holding I_WB_SWITCH, no one else can update the association */
spin_lock(&inode->i_lock);
- if (!(inode->i_sb->s_flags & MS_ACTIVE) ||
+ if (!(inode->i_sb->s_flags & SB_ACTIVE) ||
inode->i_state & (I_WB_SWITCH | I_FREEING) ||
inode_to_wb(inode) == isw->new_wb) {
spin_unlock(&inode->i_lock);
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index 2f504d615d92..624f18bbfd2b 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -130,7 +130,7 @@ static void fuse_evict_inode(struct inode *inode)
{
truncate_inode_pages_final(&inode->i_data);
clear_inode(inode);
- if (inode->i_sb->s_flags & MS_ACTIVE) {
+ if (inode->i_sb->s_flags & SB_ACTIVE) {
struct fuse_conn *fc = get_fuse_conn(inode);
struct fuse_inode *fi = get_fuse_inode(inode);
fuse_queue_forget(fc, fi->forget, fi->nodeid, fi->nlookup);
@@ -141,7 +141,7 @@ static void fuse_evict_inode(struct inode *inode)
static int fuse_remount_fs(struct super_block *sb, int *flags, char *data)
{
sync_filesystem(sb);
- if (*flags & MS_MANDLOCK)
+ if (*flags & SB_MANDLOCK)
return -EINVAL;
return 0;
@@ -1056,10 +1056,10 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
int is_bdev = sb->s_bdev != NULL;
err = -EINVAL;
- if (sb->s_flags & MS_MANDLOCK)
+ if (sb->s_flags & SB_MANDLOCK)
goto err;
- sb->s_flags &= ~(MS_NOSEC | SB_I_VERSION);
+ sb->s_flags &= ~(SB_NOSEC | SB_I_VERSION);
if (!parse_fuse_opt(data, &d, is_bdev))
goto err;
@@ -1109,9 +1109,9 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
goto err_dev_free;
/* Handle umasking inside the fuse code */
- if (sb->s_flags & MS_POSIXACL)
+ if (sb->s_flags & SB_POSIXACL)
fc->dont_mask = 1;
- sb->s_flags |= MS_POSIXACL;
+ sb->s_flags |= SB_POSIXACL;
fc->default_permissions = d.default_permissions;
fc->allow_other = d.allow_other;
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index a3711f543405..ad55eb86a250 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -1065,15 +1065,15 @@ static int fill_super(struct super_block *sb, struct gfs2_args *args, int silent
sdp->sd_args = *args;
if (sdp->sd_args.ar_spectator) {
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
set_bit(SDF_RORECOVERY, &sdp->sd_flags);
}
if (sdp->sd_args.ar_posix_acl)
- sb->s_flags |= MS_POSIXACL;
+ sb->s_flags |= SB_POSIXACL;
if (sdp->sd_args.ar_nobarrier)
set_bit(SDF_NOBARRIERS, &sdp->sd_flags);
- sb->s_flags |= MS_NOSEC;
+ sb->s_flags |= SB_NOSEC;
sb->s_magic = GFS2_MAGIC;
sb->s_op = &gfs2_super_ops;
sb->s_d_op = &gfs2_dops;
@@ -1257,7 +1257,7 @@ static struct dentry *gfs2_mount(struct file_system_type *fs_type, int flags,
struct gfs2_args args;
struct gfs2_sbd *sdp;
- if (!(flags & MS_RDONLY))
+ if (!(flags & SB_RDONLY))
mode |= FMODE_WRITE;
bdev = blkdev_get_by_path(dev_name, mode, fs_type);
@@ -1313,15 +1313,15 @@ static struct dentry *gfs2_mount(struct file_system_type *fs_type, int flags,
if (s->s_root) {
error = -EBUSY;
- if ((flags ^ s->s_flags) & MS_RDONLY)
+ if ((flags ^ s->s_flags) & SB_RDONLY)
goto error_super;
} else {
snprintf(s->s_id, sizeof(s->s_id), "%pg", bdev);
sb_set_blocksize(s, block_size(bdev));
- error = fill_super(s, &args, flags & MS_SILENT ? 1 : 0);
+ error = fill_super(s, &args, flags & SB_SILENT ? 1 : 0);
if (error)
goto error_super;
- s->s_flags |= MS_ACTIVE;
+ s->s_flags |= SB_ACTIVE;
bdev->bd_super = s;
}
@@ -1365,7 +1365,7 @@ static struct dentry *gfs2_mount_meta(struct file_system_type *fs_type,
pr_warn("gfs2 mount does not exist\n");
return ERR_CAST(s);
}
- if ((flags ^ s->s_flags) & MS_RDONLY) {
+ if ((flags ^ s->s_flags) & SB_RDONLY) {
deactivate_locked_super(s);
return ERR_PTR(-EBUSY);
}
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index 9cb5c9a97d69..d81d46e19726 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -1256,10 +1256,10 @@ static int gfs2_remount_fs(struct super_block *sb, int *flags, char *data)
return -EINVAL;
if (sdp->sd_args.ar_spectator)
- *flags |= MS_RDONLY;
+ *flags |= SB_RDONLY;
- if ((sb->s_flags ^ *flags) & MS_RDONLY) {
- if (*flags & MS_RDONLY)
+ if ((sb->s_flags ^ *flags) & SB_RDONLY) {
+ if (*flags & SB_RDONLY)
error = gfs2_make_fs_ro(sdp);
else
error = gfs2_make_fs_rw(sdp);
@@ -1269,9 +1269,9 @@ static int gfs2_remount_fs(struct super_block *sb, int *flags, char *data)
sdp->sd_args = args;
if (sdp->sd_args.ar_posix_acl)
- sb->s_flags |= MS_POSIXACL;
+ sb->s_flags |= SB_POSIXACL;
else
- sb->s_flags &= ~MS_POSIXACL;
+ sb->s_flags &= ~SB_POSIXACL;
if (sdp->sd_args.ar_nobarrier)
set_bit(SDF_NOBARRIERS, &sdp->sd_flags);
else
diff --git a/fs/gfs2/trans.c b/fs/gfs2/trans.c
index a85ca8b2c9ba..ca8b72d0a831 100644
--- a/fs/gfs2/trans.c
+++ b/fs/gfs2/trans.c
@@ -117,7 +117,7 @@ void gfs2_trans_end(struct gfs2_sbd *sdp)
kfree(tr);
up_read(&sdp->sd_log_flush_lock);
- if (sdp->sd_vfs->s_flags & MS_SYNCHRONOUS)
+ if (sdp->sd_vfs->s_flags & SB_SYNCHRONOUS)
gfs2_log_flush(sdp, NULL, NORMAL_FLUSH);
if (alloced)
sb_end_intwrite(sdp->sd_vfs);
diff --git a/fs/hfs/mdb.c b/fs/hfs/mdb.c
index 894994d2c885..460281b1299e 100644
--- a/fs/hfs/mdb.c
+++ b/fs/hfs/mdb.c
@@ -204,11 +204,11 @@ int hfs_mdb_get(struct super_block *sb)
attrib = mdb->drAtrb;
if (!(attrib & cpu_to_be16(HFS_SB_ATTRIB_UNMNT))) {
pr_warn("filesystem was not cleanly unmounted, running fsck.hfs is recommended. mounting read-only.\n");
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
}
if ((attrib & cpu_to_be16(HFS_SB_ATTRIB_SLOCK))) {
pr_warn("filesystem is marked locked, mounting read-only.\n");
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
}
if (!sb_rdonly(sb)) {
/* Mark the volume uncleanly unmounted in case we crash */
diff --git a/fs/hfs/super.c b/fs/hfs/super.c
index 7e0d65e9586c..173876782f73 100644
--- a/fs/hfs/super.c
+++ b/fs/hfs/super.c
@@ -114,18 +114,18 @@ static int hfs_statfs(struct dentry *dentry, struct kstatfs *buf)
static int hfs_remount(struct super_block *sb, int *flags, char *data)
{
sync_filesystem(sb);
- *flags |= MS_NODIRATIME;
- if ((bool)(*flags & MS_RDONLY) == sb_rdonly(sb))
+ *flags |= SB_NODIRATIME;
+ if ((bool)(*flags & SB_RDONLY) == sb_rdonly(sb))
return 0;
- if (!(*flags & MS_RDONLY)) {
+ if (!(*flags & SB_RDONLY)) {
if (!(HFS_SB(sb)->mdb->drAtrb & cpu_to_be16(HFS_SB_ATTRIB_UNMNT))) {
pr_warn("filesystem was not cleanly unmounted, running fsck.hfs is recommended. leaving read-only.\n");
- sb->s_flags |= MS_RDONLY;
- *flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
+ *flags |= SB_RDONLY;
} else if (HFS_SB(sb)->mdb->drAtrb & cpu_to_be16(HFS_SB_ATTRIB_SLOCK)) {
pr_warn("filesystem is marked locked, leaving read-only.\n");
- sb->s_flags |= MS_RDONLY;
- *flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
+ *flags |= SB_RDONLY;
}
}
return 0;
@@ -407,7 +407,7 @@ static int hfs_fill_super(struct super_block *sb, void *data, int silent)
sb->s_op = &hfs_super_operations;
sb->s_xattr = hfs_xattr_handlers;
- sb->s_flags |= MS_NODIRATIME;
+ sb->s_flags |= SB_NODIRATIME;
mutex_init(&sbi->bitmap_lock);
res = hfs_mdb_get(sb);
diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
index e5bb2de2262a..1d458b716957 100644
--- a/fs/hfsplus/super.c
+++ b/fs/hfsplus/super.c
@@ -329,9 +329,9 @@ static int hfsplus_statfs(struct dentry *dentry, struct kstatfs *buf)
static int hfsplus_remount(struct super_block *sb, int *flags, char *data)
{
sync_filesystem(sb);
- if ((bool)(*flags & MS_RDONLY) == sb_rdonly(sb))
+ if ((bool)(*flags & SB_RDONLY) == sb_rdonly(sb))
return 0;
- if (!(*flags & MS_RDONLY)) {
+ if (!(*flags & SB_RDONLY)) {
struct hfsplus_vh *vhdr = HFSPLUS_SB(sb)->s_vhdr;
int force = 0;
@@ -340,20 +340,20 @@ static int hfsplus_remount(struct super_block *sb, int *flags, char *data)
if (!(vhdr->attributes & cpu_to_be32(HFSPLUS_VOL_UNMNT))) {
pr_warn("filesystem was not cleanly unmounted, running fsck.hfsplus is recommended. leaving read-only.\n");
- sb->s_flags |= MS_RDONLY;
- *flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
+ *flags |= SB_RDONLY;
} else if (force) {
/* nothing */
} else if (vhdr->attributes &
cpu_to_be32(HFSPLUS_VOL_SOFTLOCK)) {
pr_warn("filesystem is marked locked, leaving read-only.\n");
- sb->s_flags |= MS_RDONLY;
- *flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
+ *flags |= SB_RDONLY;
} else if (vhdr->attributes &
cpu_to_be32(HFSPLUS_VOL_JOURNALED)) {
pr_warn("filesystem is marked journaled, leaving read-only.\n");
- sb->s_flags |= MS_RDONLY;
- *flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
+ *flags |= SB_RDONLY;
}
}
return 0;
@@ -455,16 +455,16 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
if (!(vhdr->attributes & cpu_to_be32(HFSPLUS_VOL_UNMNT))) {
pr_warn("Filesystem was not cleanly unmounted, running fsck.hfsplus is recommended. mounting read-only.\n");
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
} else if (test_and_clear_bit(HFSPLUS_SB_FORCE, &sbi->flags)) {
/* nothing */
} else if (vhdr->attributes & cpu_to_be32(HFSPLUS_VOL_SOFTLOCK)) {
pr_warn("Filesystem is marked locked, mounting read-only.\n");
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
} else if ((vhdr->attributes & cpu_to_be32(HFSPLUS_VOL_JOURNALED)) &&
!sb_rdonly(sb)) {
pr_warn("write access to a journaled filesystem is not supported, use the force option at your own risk, mounting read-only.\n");
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
}
err = -EINVAL;
diff --git a/fs/hpfs/map.c b/fs/hpfs/map.c
index e0e60b148400..7c49f1ef0c85 100644
--- a/fs/hpfs/map.c
+++ b/fs/hpfs/map.c
@@ -288,7 +288,7 @@ struct dnode *hpfs_map_dnode(struct super_block *s, unsigned secno,
goto bail;
}
if (((31 + de->namelen + de->down*4 + 3) & ~3) != le16_to_cpu(de->length)) {
- if (((31 + de->namelen + de->down*4 + 3) & ~3) < le16_to_cpu(de->length) && s->s_flags & MS_RDONLY) goto ok;
+ if (((31 + de->namelen + de->down*4 + 3) & ~3) < le16_to_cpu(de->length) && s->s_flags & SB_RDONLY) goto ok;
hpfs_error(s, "namelen does not match dirent size in dnode %08x, dirent %03x, last %03x", secno, p, pp);
goto bail;
}
diff --git a/fs/hpfs/super.c b/fs/hpfs/super.c
index 1516fb4e28f4..c45a3b9b9ac7 100644
--- a/fs/hpfs/super.c
+++ b/fs/hpfs/super.c
@@ -78,7 +78,7 @@ void hpfs_error(struct super_block *s, const char *fmt, ...)
else {
pr_cont("; remounting read-only\n");
mark_dirty(s, 0);
- s->s_flags |= MS_RDONLY;
+ s->s_flags |= SB_RDONLY;
}
} else if (sb_rdonly(s))
pr_cont("; going on - but anything won't be destroyed because it's read-only\n");
@@ -457,7 +457,7 @@ static int hpfs_remount_fs(struct super_block *s, int *flags, char *data)
sync_filesystem(s);
- *flags |= MS_NOATIME;
+ *flags |= SB_NOATIME;
hpfs_lock(s);
uid = sbi->sb_uid; gid = sbi->sb_gid;
@@ -488,7 +488,7 @@ static int hpfs_remount_fs(struct super_block *s, int *flags, char *data)
sbi->sb_eas = eas; sbi->sb_chk = chk; sbi->sb_chkdsk = chkdsk;
sbi->sb_err = errs; sbi->sb_timeshift = timeshift;
- if (!(*flags & MS_RDONLY)) mark_dirty(s, 1);
+ if (!(*flags & SB_RDONLY)) mark_dirty(s, 1);
hpfs_unlock(s);
return 0;
@@ -614,7 +614,7 @@ static int hpfs_fill_super(struct super_block *s, void *options, int silent)
goto bail4;
}
- s->s_flags |= MS_NOATIME;
+ s->s_flags |= SB_NOATIME;
/* Fill superblock stuff */
s->s_magic = HPFS_SUPER_MAGIC;
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 1e76730aac0d..8a85f3f53446 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -639,11 +639,11 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
/*
- * page_put due to reference from alloc_huge_page()
* unlock_page because locked by add_to_page_cache()
+ * page_put due to reference from alloc_huge_page()
*/
- put_page(page);
unlock_page(page);
+ put_page(page);
}
if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size)
diff --git a/fs/inode.c b/fs/inode.c
index fd401028a309..03102d6ef044 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -416,7 +416,7 @@ void inode_add_lru(struct inode *inode)
{
if (!(inode->i_state & (I_DIRTY_ALL | I_SYNC |
I_FREEING | I_WILL_FREE)) &&
- !atomic_read(&inode->i_count) && inode->i_sb->s_flags & MS_ACTIVE)
+ !atomic_read(&inode->i_count) && inode->i_sb->s_flags & SB_ACTIVE)
inode_lru_list_add(inode);
}
@@ -595,7 +595,7 @@ static void dispose_list(struct list_head *head)
* @sb: superblock to operate on
*
* Make sure that no inodes with zero refcount are retained. This is
- * called by superblock shutdown after having MS_ACTIVE flag removed,
+ * called by superblock shutdown after having SB_ACTIVE flag removed,
* so any inode reaching zero refcount during or after that call will
* be immediately evicted.
*/
@@ -1492,7 +1492,7 @@ static void iput_final(struct inode *inode)
else
drop = generic_drop_inode(inode);
- if (!drop && (sb->s_flags & MS_ACTIVE)) {
+ if (!drop && (sb->s_flags & SB_ACTIVE)) {
inode_add_lru(inode);
spin_unlock(&inode->i_lock);
return;
@@ -1644,7 +1644,7 @@ int generic_update_time(struct inode *inode, struct timespec *time, int flags)
if (flags & S_MTIME)
inode->i_mtime = *time;
- if (!(inode->i_sb->s_flags & MS_LAZYTIME) || (flags & S_VERSION))
+ if (!(inode->i_sb->s_flags & SB_LAZYTIME) || (flags & S_VERSION))
iflags |= I_DIRTY_SYNC;
__mark_inode_dirty(inode, iflags);
return 0;
@@ -1691,7 +1691,7 @@ bool __atime_needs_update(const struct path *path, struct inode *inode,
if (IS_NOATIME(inode))
return false;
- if ((inode->i_sb->s_flags & MS_NODIRATIME) && S_ISDIR(inode->i_mode))
+ if ((inode->i_sb->s_flags & SB_NODIRATIME) && S_ISDIR(inode->i_mode))
return false;
if (mnt->mnt_flags & MNT_NOATIME)
diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c
index 447a24d77b89..bc258a4402f6 100644
--- a/fs/isofs/inode.c
+++ b/fs/isofs/inode.c
@@ -114,7 +114,7 @@ static void destroy_inodecache(void)
static int isofs_remount(struct super_block *sb, int *flags, char *data)
{
sync_filesystem(sb);
- if (!(*flags & MS_RDONLY))
+ if (!(*flags & SB_RDONLY))
return -EROFS;
return 0;
}
diff --git a/fs/jffs2/fs.c b/fs/jffs2/fs.c
index e96c6b05e43e..d8c274d39ddb 100644
--- a/fs/jffs2/fs.c
+++ b/fs/jffs2/fs.c
@@ -409,10 +409,10 @@ int jffs2_do_remount_fs(struct super_block *sb, int *flags, char *data)
mutex_unlock(&c->alloc_sem);
}
- if (!(*flags & MS_RDONLY))
+ if (!(*flags & SB_RDONLY))
jffs2_start_garbage_collect_thread(c);
- *flags |= MS_NOATIME;
+ *flags |= SB_NOATIME;
return 0;
}
diff --git a/fs/jffs2/os-linux.h b/fs/jffs2/os-linux.h
index 824e61ede465..c2fbec19c616 100644
--- a/fs/jffs2/os-linux.h
+++ b/fs/jffs2/os-linux.h
@@ -59,7 +59,7 @@ static inline void jffs2_init_inode_info(struct jffs2_inode_info *f)
}
-#define jffs2_is_readonly(c) (OFNI_BS_2SFFJ(c)->s_flags & MS_RDONLY)
+#define jffs2_is_readonly(c) (OFNI_BS_2SFFJ(c)->s_flags & SB_RDONLY)
#define SECTOR_ADDR(x) ( (((unsigned long)(x) / c->sector_size) * c->sector_size) )
#ifndef CONFIG_JFFS2_FS_WRITEBUFFER
diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c
index 153f1c6eb169..f60dee7faf03 100644
--- a/fs/jffs2/super.c
+++ b/fs/jffs2/super.c
@@ -301,10 +301,10 @@ static int jffs2_fill_super(struct super_block *sb, void *data, int silent)
sb->s_op = &jffs2_super_operations;
sb->s_export_op = &jffs2_export_ops;
- sb->s_flags = sb->s_flags | MS_NOATIME;
+ sb->s_flags = sb->s_flags | SB_NOATIME;
sb->s_xattr = jffs2_xattr_handlers;
#ifdef CONFIG_JFFS2_FS_POSIX_ACL
- sb->s_flags |= MS_POSIXACL;
+ sb->s_flags |= SB_POSIXACL;
#endif
ret = jffs2_do_fill_super(sb, data, silent);
return ret;
diff --git a/fs/jfs/super.c b/fs/jfs/super.c
index 2f7b3af5b8b7..90373aebfdca 100644
--- a/fs/jfs/super.c
+++ b/fs/jfs/super.c
@@ -87,7 +87,7 @@ static void jfs_handle_error(struct super_block *sb)
else if (sbi->flag & JFS_ERR_REMOUNT_RO) {
jfs_err("ERROR: (device %s): remounting filesystem as read-only",
sb->s_id);
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
}
/* nothing is done for continue beyond marking the superblock dirty */
@@ -477,7 +477,7 @@ static int jfs_remount(struct super_block *sb, int *flags, char *data)
return rc;
}
- if (sb_rdonly(sb) && !(*flags & MS_RDONLY)) {
+ if (sb_rdonly(sb) && !(*flags & SB_RDONLY)) {
/*
* Invalidate any previously read metadata. fsck may have
* changed the on-disk data since we mounted r/o
@@ -488,12 +488,12 @@ static int jfs_remount(struct super_block *sb, int *flags, char *data)
ret = jfs_mount_rw(sb, 1);
/* mark the fs r/w for quota activity */
- sb->s_flags &= ~MS_RDONLY;
+ sb->s_flags &= ~SB_RDONLY;
dquot_resume(sb, -1);
return ret;
}
- if (!sb_rdonly(sb) && (*flags & MS_RDONLY)) {
+ if (!sb_rdonly(sb) && (*flags & SB_RDONLY)) {
rc = dquot_suspend(sb, -1);
if (rc < 0)
return rc;
@@ -545,7 +545,7 @@ static int jfs_fill_super(struct super_block *sb, void *data, int silent)
sbi->flag = flag;
#ifdef CONFIG_JFS_POSIX_ACL
- sb->s_flags |= MS_POSIXACL;
+ sb->s_flags |= SB_POSIXACL;
#endif
if (newLVSize) {
diff --git a/fs/kernfs/mount.c b/fs/kernfs/mount.c
index 95a7c88baed9..26dd9a50f383 100644
--- a/fs/kernfs/mount.c
+++ b/fs/kernfs/mount.c
@@ -335,7 +335,7 @@ struct dentry *kernfs_mount_ns(struct file_system_type *fs_type, int flags,
deactivate_locked_super(sb);
return ERR_PTR(error);
}
- sb->s_flags |= MS_ACTIVE;
+ sb->s_flags |= SB_ACTIVE;
mutex_lock(&kernfs_mutex);
list_add(&info->node, &root->supers);
diff --git a/fs/libfs.c b/fs/libfs.c
index 3aabe553fc45..7ff3cb904acd 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -246,7 +246,7 @@ struct dentry *mount_pseudo_xattr(struct file_system_type *fs_type, char *name,
struct inode *root;
struct qstr d_name = QSTR_INIT(name, strlen(name));
- s = sget_userns(fs_type, NULL, set_anon_super, MS_KERNMOUNT|MS_NOUSER,
+ s = sget_userns(fs_type, NULL, set_anon_super, SB_KERNMOUNT|SB_NOUSER,
&init_user_ns, NULL);
if (IS_ERR(s))
return ERR_CAST(s);
@@ -277,7 +277,7 @@ struct dentry *mount_pseudo_xattr(struct file_system_type *fs_type, char *name,
d_instantiate(dentry, root);
s->s_root = dentry;
s->s_d_op = dops;
- s->s_flags |= MS_ACTIVE;
+ s->s_flags |= SB_ACTIVE;
return dget(s->s_root);
Enomem:
@@ -578,7 +578,7 @@ int simple_pin_fs(struct file_system_type *type, struct vfsmount **mount, int *c
spin_lock(&pin_fs_lock);
if (unlikely(!*mount)) {
spin_unlock(&pin_fs_lock);
- mnt = vfs_kern_mount(type, MS_KERNMOUNT, type->name, NULL);
+ mnt = vfs_kern_mount(type, SB_KERNMOUNT, type->name, NULL);
if (IS_ERR(mnt))
return PTR_ERR(mnt);
spin_lock(&pin_fs_lock);
diff --git a/fs/lockd/host.c b/fs/lockd/host.c
index 0d4e590e0549..826a89184f90 100644
--- a/fs/lockd/host.c
+++ b/fs/lockd/host.c
@@ -578,8 +578,10 @@ static void nlm_complain_hosts(struct net *net)
if (ln->nrhosts == 0)
return;
- printk(KERN_WARNING "lockd: couldn't shutdown host module for net %p!\n", net);
- dprintk("lockd: %lu hosts left in net %p:\n", ln->nrhosts, net);
+ pr_warn("lockd: couldn't shutdown host module for net %x!\n",
+ net->ns.inum);
+ dprintk("lockd: %lu hosts left in net %x:\n", ln->nrhosts,
+ net->ns.inum);
} else {
if (nrhosts == 0)
return;
@@ -590,9 +592,9 @@ static void nlm_complain_hosts(struct net *net)
for_each_host(host, chain, nlm_server_hosts) {
if (net && host->net != net)
continue;
- dprintk(" %s (cnt %d use %d exp %ld net %p)\n",
+ dprintk(" %s (cnt %d use %d exp %ld net %x)\n",
host->h_name, atomic_read(&host->h_count),
- host->h_inuse, host->h_expires, host->net);
+ host->h_inuse, host->h_expires, host->net->ns.inum);
}
}
@@ -605,7 +607,8 @@ nlm_shutdown_hosts_net(struct net *net)
mutex_lock(&nlm_host_mutex);
/* First, make all hosts eligible for gc */
- dprintk("lockd: nuking all hosts in net %p...\n", net);
+ dprintk("lockd: nuking all hosts in net %x...\n",
+ net ? net->ns.inum : 0);
for_each_host(host, chain, nlm_server_hosts) {
if (net && host->net != net)
continue;
@@ -618,9 +621,8 @@ nlm_shutdown_hosts_net(struct net *net)
/* Then, perform a garbage collection pass */
nlm_gc_hosts(net);
- mutex_unlock(&nlm_host_mutex);
-
nlm_complain_hosts(net);
+ mutex_unlock(&nlm_host_mutex);
}
/*
@@ -646,7 +648,8 @@ nlm_gc_hosts(struct net *net)
struct hlist_node *next;
struct nlm_host *host;
- dprintk("lockd: host garbage collection for net %p\n", net);
+ dprintk("lockd: host garbage collection for net %x\n",
+ net ? net->ns.inum : 0);
for_each_host(host, chain, nlm_server_hosts) {
if (net && host->net != net)
continue;
@@ -662,9 +665,10 @@ nlm_gc_hosts(struct net *net)
if (atomic_read(&host->h_count) || host->h_inuse
|| time_before(jiffies, host->h_expires)) {
dprintk("nlm_gc_hosts skipping %s "
- "(cnt %d use %d exp %ld net %p)\n",
+ "(cnt %d use %d exp %ld net %x)\n",
host->h_name, atomic_read(&host->h_count),
- host->h_inuse, host->h_expires, host->net);
+ host->h_inuse, host->h_expires,
+ host->net->ns.inum);
continue;
}
nlm_destroy_host_locked(host);
diff --git a/fs/lockd/mon.c b/fs/lockd/mon.c
index 9fbbd11f9ecb..96cfb2967ac7 100644
--- a/fs/lockd/mon.c
+++ b/fs/lockd/mon.c
@@ -110,7 +110,8 @@ static int nsm_mon_unmon(struct nsm_handle *nsm, u32 proc, struct nsm_res *res,
clnt = nsm_create(host->net, host->nodename);
if (IS_ERR(clnt)) {
dprintk("lockd: failed to create NSM upcall transport, "
- "status=%ld, net=%p\n", PTR_ERR(clnt), host->net);
+ "status=%ld, net=%x\n", PTR_ERR(clnt),
+ host->net->ns.inum);
return PTR_ERR(clnt);
}
diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
index a8e3777c94dc..9c36d614bf89 100644
--- a/fs/lockd/svc.c
+++ b/fs/lockd/svc.c
@@ -57,6 +57,9 @@ static struct task_struct *nlmsvc_task;
static struct svc_rqst *nlmsvc_rqst;
unsigned long nlmsvc_timeout;
+atomic_t nlm_ntf_refcnt = ATOMIC_INIT(0);
+DECLARE_WAIT_QUEUE_HEAD(nlm_ntf_wq);
+
unsigned int lockd_net_id;
/*
@@ -259,7 +262,7 @@ static int lockd_up_net(struct svc_serv *serv, struct net *net)
if (error < 0)
goto err_bind;
set_grace_period(net);
- dprintk("lockd_up_net: per-net data created; net=%p\n", net);
+ dprintk("%s: per-net data created; net=%x\n", __func__, net->ns.inum);
return 0;
err_bind:
@@ -274,12 +277,15 @@ static void lockd_down_net(struct svc_serv *serv, struct net *net)
if (ln->nlmsvc_users) {
if (--ln->nlmsvc_users == 0) {
nlm_shutdown_hosts_net(net);
+ cancel_delayed_work_sync(&ln->grace_period_end);
+ locks_end_grace(&ln->lockd_manager);
svc_shutdown_net(serv, net);
- dprintk("lockd_down_net: per-net data destroyed; net=%p\n", net);
+ dprintk("%s: per-net data destroyed; net=%x\n",
+ __func__, net->ns.inum);
}
} else {
- printk(KERN_ERR "lockd_down_net: no users! task=%p, net=%p\n",
- nlmsvc_task, net);
+ pr_err("%s: no users! task=%p, net=%x\n",
+ __func__, nlmsvc_task, net->ns.inum);
BUG();
}
}
@@ -290,7 +296,8 @@ static int lockd_inetaddr_event(struct notifier_block *this,
struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
struct sockaddr_in sin;
- if (event != NETDEV_DOWN)
+ if ((event != NETDEV_DOWN) ||
+ !atomic_inc_not_zero(&nlm_ntf_refcnt))
goto out;
if (nlmsvc_rqst) {
@@ -301,6 +308,8 @@ static int lockd_inetaddr_event(struct notifier_block *this,
svc_age_temp_xprts_now(nlmsvc_rqst->rq_server,
(struct sockaddr *)&sin);
}
+ atomic_dec(&nlm_ntf_refcnt);
+ wake_up(&nlm_ntf_wq);
out:
return NOTIFY_DONE;
@@ -317,7 +326,8 @@ static int lockd_inet6addr_event(struct notifier_block *this,
struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr;
struct sockaddr_in6 sin6;
- if (event != NETDEV_DOWN)
+ if ((event != NETDEV_DOWN) ||
+ !atomic_inc_not_zero(&nlm_ntf_refcnt))
goto out;
if (nlmsvc_rqst) {
@@ -329,6 +339,8 @@ static int lockd_inet6addr_event(struct notifier_block *this,
svc_age_temp_xprts_now(nlmsvc_rqst->rq_server,
(struct sockaddr *)&sin6);
}
+ atomic_dec(&nlm_ntf_refcnt);
+ wake_up(&nlm_ntf_wq);
out:
return NOTIFY_DONE;
@@ -345,10 +357,12 @@ static void lockd_unregister_notifiers(void)
#if IS_ENABLED(CONFIG_IPV6)
unregister_inet6addr_notifier(&lockd_inet6addr_notifier);
#endif
+ wait_event(nlm_ntf_wq, atomic_read(&nlm_ntf_refcnt) == 0);
}
static void lockd_svc_exit_thread(void)
{
+ atomic_dec(&nlm_ntf_refcnt);
lockd_unregister_notifiers();
svc_exit_thread(nlmsvc_rqst);
}
@@ -373,6 +387,7 @@ static int lockd_start_svc(struct svc_serv *serv)
goto out_rqst;
}
+ atomic_inc(&nlm_ntf_refcnt);
svc_sock_update_bufs(serv);
serv->sv_maxconn = nlm_max_connections;
@@ -676,6 +691,17 @@ static int lockd_init_net(struct net *net)
static void lockd_exit_net(struct net *net)
{
+ struct lockd_net *ln = net_generic(net, lockd_net_id);
+
+ WARN_ONCE(!list_empty(&ln->lockd_manager.list),
+ "net %x %s: lockd_manager.list is not empty\n",
+ net->ns.inum, __func__);
+ WARN_ONCE(!list_empty(&ln->nsm_handles),
+ "net %x %s: nsm_handles list is not empty\n",
+ net->ns.inum, __func__);
+ WARN_ONCE(delayed_work_pending(&ln->grace_period_end),
+ "net %x %s: grace_period_end was not cancelled\n",
+ net->ns.inum, __func__);
}
static struct pernet_operations lockd_net_ops = {
diff --git a/fs/lockd/svcsubs.c b/fs/lockd/svcsubs.c
index a563ddbc19e6..4ec3d6e03e76 100644
--- a/fs/lockd/svcsubs.c
+++ b/fs/lockd/svcsubs.c
@@ -370,7 +370,7 @@ nlmsvc_mark_resources(struct net *net)
{
struct nlm_host hint;
- dprintk("lockd: nlmsvc_mark_resources for net %p\n", net);
+ dprintk("lockd: %s for net %x\n", __func__, net ? net->ns.inum : 0);
hint.net = net;
nlm_traverse_files(&hint, nlmsvc_mark_host, NULL);
}
diff --git a/fs/locks.c b/fs/locks.c
index 1bd71c4d663a..21b4dfa289ee 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -141,7 +141,7 @@
static inline bool is_remote_lock(struct file *filp)
{
- return likely(!(filp->f_path.dentry->d_sb->s_flags & MS_NOREMOTELOCK));
+ return likely(!(filp->f_path.dentry->d_sb->s_flags & SB_NOREMOTELOCK));
}
static bool lease_breaking(struct file_lock *fl)
diff --git a/fs/mbcache.c b/fs/mbcache.c
index d818fd236787..b8b8b9ced9f8 100644
--- a/fs/mbcache.c
+++ b/fs/mbcache.c
@@ -269,6 +269,9 @@ static unsigned long mb_cache_count(struct shrinker *shrink,
struct mb_cache *cache = container_of(shrink, struct mb_cache,
c_shrink);
+ /* Unlikely, but not impossible */
+ if (unlikely(cache->c_entry_count < 0))
+ return 0;
return cache->c_entry_count;
}
diff --git a/fs/minix/inode.c b/fs/minix/inode.c
index b6829d679643..72e308c3e66b 100644
--- a/fs/minix/inode.c
+++ b/fs/minix/inode.c
@@ -125,9 +125,9 @@ static int minix_remount (struct super_block * sb, int * flags, char * data)
sync_filesystem(sb);
ms = sbi->s_ms;
- if ((bool)(*flags & MS_RDONLY) == sb_rdonly(sb))
+ if ((bool)(*flags & SB_RDONLY) == sb_rdonly(sb))
return 0;
- if (*flags & MS_RDONLY) {
+ if (*flags & SB_RDONLY) {
if (ms->s_state & MINIX_VALID_FS ||
!(sbi->s_mount_state & MINIX_VALID_FS))
return 0;
diff --git a/fs/namei.c b/fs/namei.c
index f0c7a7b9b6ca..9cc91fb7f156 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -1129,18 +1129,9 @@ static int follow_automount(struct path *path, struct nameidata *nd,
* of the daemon to instantiate them before they can be used.
*/
if (!(nd->flags & (LOOKUP_PARENT | LOOKUP_DIRECTORY |
- LOOKUP_OPEN | LOOKUP_CREATE |
- LOOKUP_AUTOMOUNT))) {
- /* Positive dentry that isn't meant to trigger an
- * automount, EISDIR will allow it to be used,
- * otherwise there's no mount here "now" so return
- * ENOENT.
- */
- if (path->dentry->d_inode)
- return -EISDIR;
- else
- return -ENOENT;
- }
+ LOOKUP_OPEN | LOOKUP_CREATE | LOOKUP_AUTOMOUNT)) &&
+ path->dentry->d_inode)
+ return -EISDIR;
if (path->dentry->d_sb->s_user_ns != &init_user_ns)
return -EACCES;
diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c
index 129f1937fa2c..41de88cdc053 100644
--- a/fs/ncpfs/inode.c
+++ b/fs/ncpfs/inode.c
@@ -103,7 +103,7 @@ static void destroy_inodecache(void)
static int ncp_remount(struct super_block *sb, int *flags, char* data)
{
sync_filesystem(sb);
- *flags |= MS_NODIRATIME;
+ *flags |= SB_NODIRATIME;
return 0;
}
@@ -547,7 +547,7 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent)
else
default_bufsize = 1024;
- sb->s_flags |= MS_NODIRATIME; /* probably even noatime */
+ sb->s_flags |= SB_NODIRATIME; /* probably even noatime */
sb->s_maxbytes = 0xFFFFFFFFU;
sb->s_blocksize = 1024; /* Eh... Is this correct? */
sb->s_blocksize_bits = 10;
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index e51ae52ed14f..2f3f86726f5b 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -1256,7 +1256,7 @@ static int nfs_dentry_delete(const struct dentry *dentry)
/* Unhash it, so that ->d_iput() would be called */
return 1;
}
- if (!(dentry->d_sb->s_flags & MS_ACTIVE)) {
+ if (!(dentry->d_sb->s_flags & SB_ACTIVE)) {
/* Unhash it, so that ancestors of killed async unlink
* files will be cleaned up during umount */
return 1;
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 38b93d54c02e..b992d2382ffa 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -752,7 +752,7 @@ int nfs_getattr(const struct path *path, struct kstat *stat,
* Note that we only have to check the vfsmount flags here:
* - NFS always sets S_NOATIME by so checking it would give a
* bogus result
- * - NFS never sets MS_NOATIME or MS_NODIRATIME so there is
+ * - NFS never sets SB_NOATIME or SB_NODIRATIME so there is
* no point in checking those.
*/
if ((path->mnt->mnt_flags & MNT_NOATIME) ||
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index 5ab17fd4700a..8357ff69962f 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -10,7 +10,7 @@
#include <linux/nfs_page.h>
#include <linux/wait_bit.h>
-#define NFS_MS_MASK (MS_RDONLY|MS_NOSUID|MS_NODEV|MS_NOEXEC|MS_SYNCHRONOUS)
+#define NFS_MS_MASK (SB_RDONLY|SB_NOSUID|SB_NODEV|SB_NOEXEC|SB_SYNCHRONOUS)
extern const struct export_operations nfs_export_ops;
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index 43cadb28db6e..29bacdc56f6a 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -813,9 +813,9 @@ int nfs_show_stats(struct seq_file *m, struct dentry *root)
*/
seq_printf(m, "\n\topts:\t");
seq_puts(m, sb_rdonly(root->d_sb) ? "ro" : "rw");
- seq_puts(m, root->d_sb->s_flags & MS_SYNCHRONOUS ? ",sync" : "");
- seq_puts(m, root->d_sb->s_flags & MS_NOATIME ? ",noatime" : "");
- seq_puts(m, root->d_sb->s_flags & MS_NODIRATIME ? ",nodiratime" : "");
+ seq_puts(m, root->d_sb->s_flags & SB_SYNCHRONOUS ? ",sync" : "");
+ seq_puts(m, root->d_sb->s_flags & SB_NOATIME ? ",noatime" : "");
+ seq_puts(m, root->d_sb->s_flags & SB_NODIRATIME ? ",nodiratime" : "");
nfs_show_mount_options(m, nfss, 1);
seq_printf(m, "\n\tage:\t%lu", (jiffies - nfss->mount_time) / HZ);
@@ -2296,11 +2296,11 @@ nfs_remount(struct super_block *sb, int *flags, char *raw_data)
/*
* noac is a special case. It implies -o sync, but that's not
* necessarily reflected in the mtab options. do_remount_sb
- * will clear MS_SYNCHRONOUS if -o sync wasn't specified in the
+ * will clear SB_SYNCHRONOUS if -o sync wasn't specified in the
* remount options, so we have to explicitly reset it.
*/
if (data->flags & NFS_MOUNT_NOAC)
- *flags |= MS_SYNCHRONOUS;
+ *flags |= SB_SYNCHRONOUS;
/* compare new mount options with old ones */
error = nfs_compare_remount_data(nfss, data);
@@ -2349,7 +2349,7 @@ void nfs_fill_super(struct super_block *sb, struct nfs_mount_info *mount_info)
/* The VFS shouldn't apply the umask to mode bits. We will do
* so ourselves when necessary.
*/
- sb->s_flags |= MS_POSIXACL;
+ sb->s_flags |= SB_POSIXACL;
sb->s_time_gran = 1;
sb->s_export_op = &nfs_export_ops;
}
@@ -2379,7 +2379,7 @@ static void nfs_clone_super(struct super_block *sb,
/* The VFS shouldn't apply the umask to mode bits. We will do
* so ourselves when necessary.
*/
- sb->s_flags |= MS_POSIXACL;
+ sb->s_flags |= SB_POSIXACL;
}
nfs_initialise_sb(sb);
@@ -2600,11 +2600,11 @@ struct dentry *nfs_fs_mount_common(struct nfs_server *server,
/* -o noac implies -o sync */
if (server->flags & NFS_MOUNT_NOAC)
- sb_mntdata.mntflags |= MS_SYNCHRONOUS;
+ sb_mntdata.mntflags |= SB_SYNCHRONOUS;
if (mount_info->cloned != NULL && mount_info->cloned->sb != NULL)
- if (mount_info->cloned->sb->s_flags & MS_SYNCHRONOUS)
- sb_mntdata.mntflags |= MS_SYNCHRONOUS;
+ if (mount_info->cloned->sb->s_flags & SB_SYNCHRONOUS)
+ sb_mntdata.mntflags |= SB_SYNCHRONOUS;
/* Get a superblock - note that we may end up sharing one that already exists */
s = sget(nfs_mod->nfs_fs, compare_super, nfs_set_super, flags, &sb_mntdata);
@@ -2641,7 +2641,7 @@ struct dentry *nfs_fs_mount_common(struct nfs_server *server,
if (error)
goto error_splat_root;
- s->s_flags |= MS_ACTIVE;
+ s->s_flags |= SB_ACTIVE;
out:
return mntroot;
diff --git a/fs/nfs_common/grace.c b/fs/nfs_common/grace.c
index 897b299db55e..5be08f02a76b 100644
--- a/fs/nfs_common/grace.c
+++ b/fs/nfs_common/grace.c
@@ -30,7 +30,11 @@ locks_start_grace(struct net *net, struct lock_manager *lm)
struct list_head *grace_list = net_generic(net, grace_net_id);
spin_lock(&grace_lock);
- list_add(&lm->list, grace_list);
+ if (list_empty(&lm->list))
+ list_add(&lm->list, grace_list);
+ else
+ WARN(1, "double list_add attempt detected in net %x %s\n",
+ net->ns.inum, (net == &init_net) ? "(init_net)" : "");
spin_unlock(&grace_lock);
}
EXPORT_SYMBOL_GPL(locks_start_grace);
@@ -104,7 +108,9 @@ grace_exit_net(struct net *net)
{
struct list_head *grace_list = net_generic(net, grace_net_id);
- BUG_ON(!list_empty(grace_list));
+ WARN_ONCE(!list_empty(grace_list),
+ "net %x %s: grace_list is not empty\n",
+ net->ns.inum, __func__);
}
static struct pernet_operations grace_net_ops = {
diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c
index 46b48dbbdd32..8ceb25a10ea0 100644
--- a/fs/nfsd/export.c
+++ b/fs/nfsd/export.c
@@ -232,7 +232,7 @@ static struct cache_head *expkey_alloc(void)
return NULL;
}
-static struct cache_detail svc_expkey_cache_template = {
+static const struct cache_detail svc_expkey_cache_template = {
.owner = THIS_MODULE,
.hash_size = EXPKEY_HASHMAX,
.name = "nfsd.fh",
@@ -748,7 +748,7 @@ static struct cache_head *svc_export_alloc(void)
return NULL;
}
-static struct cache_detail svc_export_cache_template = {
+static const struct cache_detail svc_export_cache_template = {
.owner = THIS_MODULE,
.hash_size = EXPORT_HASHMAX,
.name = "nfsd.export",
@@ -1230,7 +1230,7 @@ nfsd_export_init(struct net *net)
int rv;
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
- dprintk("nfsd: initializing export module (net: %p).\n", net);
+ dprintk("nfsd: initializing export module (net: %x).\n", net->ns.inum);
nn->svc_export_cache = cache_create_net(&svc_export_cache_template, net);
if (IS_ERR(nn->svc_export_cache))
@@ -1278,7 +1278,7 @@ nfsd_export_shutdown(struct net *net)
{
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
- dprintk("nfsd: shutting down export module (net: %p).\n", net);
+ dprintk("nfsd: shutting down export module (net: %x).\n", net->ns.inum);
cache_unregister_net(nn->svc_expkey_cache, net);
cache_unregister_net(nn->svc_export_cache, net);
@@ -1286,5 +1286,5 @@ nfsd_export_shutdown(struct net *net)
cache_destroy_net(nn->svc_export_cache, net);
svcauth_unix_purge(net);
- dprintk("nfsd: export shutdown complete (net: %p).\n", net);
+ dprintk("nfsd: export shutdown complete (net: %x).\n", net->ns.inum);
}
diff --git a/fs/nfsd/netns.h b/fs/nfsd/netns.h
index 1c91391f4805..36358d435cb0 100644
--- a/fs/nfsd/netns.h
+++ b/fs/nfsd/netns.h
@@ -119,6 +119,9 @@ struct nfsd_net {
u32 clverifier_counter;
struct svc_serv *nfsd_serv;
+
+ wait_queue_head_t ntf_wq;
+ atomic_t ntf_refcnt;
};
/* Simple check to find out if a given net was properly initialized */
diff --git a/fs/nfsd/nfs4idmap.c b/fs/nfsd/nfs4idmap.c
index 6b9b6cca469f..a5bb76593ce7 100644
--- a/fs/nfsd/nfs4idmap.c
+++ b/fs/nfsd/nfs4idmap.c
@@ -178,7 +178,7 @@ static struct ent *idtoname_lookup(struct cache_detail *, struct ent *);
static struct ent *idtoname_update(struct cache_detail *, struct ent *,
struct ent *);
-static struct cache_detail idtoname_cache_template = {
+static const struct cache_detail idtoname_cache_template = {
.owner = THIS_MODULE,
.hash_size = ENT_HASHMAX,
.name = "nfs4.idtoname",
@@ -341,7 +341,7 @@ static struct ent *nametoid_update(struct cache_detail *, struct ent *,
struct ent *);
static int nametoid_parse(struct cache_detail *, char *, int);
-static struct cache_detail nametoid_cache_template = {
+static const struct cache_detail nametoid_cache_template = {
.owner = THIS_MODULE,
.hash_size = ENT_HASHMAX,
.name = "nfs4.nametoid",
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index b82817767b9d..b29b5a185a2c 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -63,12 +63,16 @@ static const stateid_t zero_stateid = {
static const stateid_t currentstateid = {
.si_generation = 1,
};
+static const stateid_t close_stateid = {
+ .si_generation = 0xffffffffU,
+};
static u64 current_sessionid = 1;
#define ZERO_STATEID(stateid) (!memcmp((stateid), &zero_stateid, sizeof(stateid_t)))
#define ONE_STATEID(stateid) (!memcmp((stateid), &one_stateid, sizeof(stateid_t)))
#define CURRENT_STATEID(stateid) (!memcmp((stateid), &currentstateid, sizeof(stateid_t)))
+#define CLOSE_STATEID(stateid) (!memcmp((stateid), &close_stateid, sizeof(stateid_t)))
/* forward declarations */
static bool check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner);
@@ -83,6 +87,11 @@ static void nfs4_free_ol_stateid(struct nfs4_stid *stid);
*/
static DEFINE_SPINLOCK(state_lock);
+enum nfsd4_st_mutex_lock_subclass {
+ OPEN_STATEID_MUTEX = 0,
+ LOCK_STATEID_MUTEX = 1,
+};
+
/*
* A waitqueue for all in-progress 4.0 CLOSE operations that are waiting for
* the refcount on the open stateid to drop.
@@ -3562,7 +3571,9 @@ nfsd4_find_existing_open(struct nfs4_file *fp, struct nfsd4_open *open)
/* ignore lock owners */
if (local->st_stateowner->so_is_open_owner == 0)
continue;
- if (local->st_stateowner == &oo->oo_owner) {
+ if (local->st_stateowner != &oo->oo_owner)
+ continue;
+ if (local->st_stid.sc_type == NFS4_OPEN_STID) {
ret = local;
refcount_inc(&ret->st_stid.sc_count);
break;
@@ -3571,6 +3582,52 @@ nfsd4_find_existing_open(struct nfs4_file *fp, struct nfsd4_open *open)
return ret;
}
+static __be32
+nfsd4_verify_open_stid(struct nfs4_stid *s)
+{
+ __be32 ret = nfs_ok;
+
+ switch (s->sc_type) {
+ default:
+ break;
+ case NFS4_CLOSED_STID:
+ case NFS4_CLOSED_DELEG_STID:
+ ret = nfserr_bad_stateid;
+ break;
+ case NFS4_REVOKED_DELEG_STID:
+ ret = nfserr_deleg_revoked;
+ }
+ return ret;
+}
+
+/* Lock the stateid st_mutex, and deal with races with CLOSE */
+static __be32
+nfsd4_lock_ol_stateid(struct nfs4_ol_stateid *stp)
+{
+ __be32 ret;
+
+ mutex_lock_nested(&stp->st_mutex, LOCK_STATEID_MUTEX);
+ ret = nfsd4_verify_open_stid(&stp->st_stid);
+ if (ret != nfs_ok)
+ mutex_unlock(&stp->st_mutex);
+ return ret;
+}
+
+static struct nfs4_ol_stateid *
+nfsd4_find_and_lock_existing_open(struct nfs4_file *fp, struct nfsd4_open *open)
+{
+ struct nfs4_ol_stateid *stp;
+ for (;;) {
+ spin_lock(&fp->fi_lock);
+ stp = nfsd4_find_existing_open(fp, open);
+ spin_unlock(&fp->fi_lock);
+ if (!stp || nfsd4_lock_ol_stateid(stp) == nfs_ok)
+ break;
+ nfs4_put_stid(&stp->st_stid);
+ }
+ return stp;
+}
+
static struct nfs4_openowner *
alloc_init_open_stateowner(unsigned int strhashval, struct nfsd4_open *open,
struct nfsd4_compound_state *cstate)
@@ -3613,8 +3670,9 @@ init_open_stateid(struct nfs4_file *fp, struct nfsd4_open *open)
stp = open->op_stp;
/* We are moving these outside of the spinlocks to avoid the warnings */
mutex_init(&stp->st_mutex);
- mutex_lock(&stp->st_mutex);
+ mutex_lock_nested(&stp->st_mutex, OPEN_STATEID_MUTEX);
+retry:
spin_lock(&oo->oo_owner.so_client->cl_lock);
spin_lock(&fp->fi_lock);
@@ -3639,7 +3697,11 @@ out_unlock:
spin_unlock(&fp->fi_lock);
spin_unlock(&oo->oo_owner.so_client->cl_lock);
if (retstp) {
- mutex_lock(&retstp->st_mutex);
+ /* Handle races with CLOSE */
+ if (nfsd4_lock_ol_stateid(retstp) != nfs_ok) {
+ nfs4_put_stid(&retstp->st_stid);
+ goto retry;
+ }
/* To keep mutex tracking happy */
mutex_unlock(&stp->st_mutex);
stp = retstp;
@@ -4449,6 +4511,7 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
struct nfs4_ol_stateid *stp = NULL;
struct nfs4_delegation *dp = NULL;
__be32 status;
+ bool new_stp = false;
/*
* Lookup file; if found, lookup stateid and check open request,
@@ -4460,9 +4523,7 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
status = nfs4_check_deleg(cl, open, &dp);
if (status)
goto out;
- spin_lock(&fp->fi_lock);
- stp = nfsd4_find_existing_open(fp, open);
- spin_unlock(&fp->fi_lock);
+ stp = nfsd4_find_and_lock_existing_open(fp, open);
} else {
open->op_file = NULL;
status = nfserr_bad_stateid;
@@ -4470,35 +4531,31 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
goto out;
}
+ if (!stp) {
+ stp = init_open_stateid(fp, open);
+ if (!open->op_stp)
+ new_stp = true;
+ }
+
/*
* OPEN the file, or upgrade an existing OPEN.
* If truncate fails, the OPEN fails.
+ *
+ * stp is already locked.
*/
- if (stp) {
+ if (!new_stp) {
/* Stateid was found, this is an OPEN upgrade */
- mutex_lock(&stp->st_mutex);
status = nfs4_upgrade_open(rqstp, fp, current_fh, stp, open);
if (status) {
mutex_unlock(&stp->st_mutex);
goto out;
}
} else {
- /* stp is returned locked. */
- stp = init_open_stateid(fp, open);
- /* See if we lost the race to some other thread */
- if (stp->st_access_bmap != 0) {
- status = nfs4_upgrade_open(rqstp, fp, current_fh,
- stp, open);
- if (status) {
- mutex_unlock(&stp->st_mutex);
- goto out;
- }
- goto upgrade_out;
- }
status = nfs4_get_vfs_file(rqstp, fp, current_fh, stp, open);
if (status) {
- mutex_unlock(&stp->st_mutex);
+ stp->st_stid.sc_type = NFS4_CLOSED_STID;
release_open_stateid(stp);
+ mutex_unlock(&stp->st_mutex);
goto out;
}
@@ -4507,7 +4564,7 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
if (stp->st_clnt_odstate == open->op_odstate)
open->op_odstate = NULL;
}
-upgrade_out:
+
nfs4_inc_and_copy_stateid(&open->op_stateid, &stp->st_stid);
mutex_unlock(&stp->st_mutex);
@@ -4734,7 +4791,7 @@ nfs4_laundromat(struct nfsd_net *nn)
spin_unlock(&nn->blocked_locks_lock);
while (!list_empty(&reaplist)) {
- nbl = list_first_entry(&nn->blocked_locks_lru,
+ nbl = list_first_entry(&reaplist,
struct nfsd4_blocked_lock, nbl_lru);
list_del_init(&nbl->nbl_lru);
posix_unblock_lock(&nbl->nbl_lock);
@@ -4855,6 +4912,18 @@ static __be32 check_stateid_generation(stateid_t *in, stateid_t *ref, bool has_s
return nfserr_old_stateid;
}
+static __be32 nfsd4_stid_check_stateid_generation(stateid_t *in, struct nfs4_stid *s, bool has_session)
+{
+ __be32 ret;
+
+ spin_lock(&s->sc_lock);
+ ret = nfsd4_verify_open_stid(s);
+ if (ret == nfs_ok)
+ ret = check_stateid_generation(in, &s->sc_stateid, has_session);
+ spin_unlock(&s->sc_lock);
+ return ret;
+}
+
static __be32 nfsd4_check_openowner_confirmed(struct nfs4_ol_stateid *ols)
{
if (ols->st_stateowner->so_is_open_owner &&
@@ -4868,7 +4937,8 @@ static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid)
struct nfs4_stid *s;
__be32 status = nfserr_bad_stateid;
- if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
+ if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) ||
+ CLOSE_STATEID(stateid))
return status;
/* Client debugging aid. */
if (!same_clid(&stateid->si_opaque.so_clid, &cl->cl_clientid)) {
@@ -4883,7 +4953,7 @@ static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid)
s = find_stateid_locked(cl, stateid);
if (!s)
goto out_unlock;
- status = check_stateid_generation(stateid, &s->sc_stateid, 1);
+ status = nfsd4_stid_check_stateid_generation(stateid, s, 1);
if (status)
goto out_unlock;
switch (s->sc_type) {
@@ -4926,7 +4996,8 @@ nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate,
else if (typemask & NFS4_DELEG_STID)
typemask |= NFS4_REVOKED_DELEG_STID;
- if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
+ if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) ||
+ CLOSE_STATEID(stateid))
return nfserr_bad_stateid;
status = lookup_clientid(&stateid->si_opaque.so_clid, cstate, nn);
if (status == nfserr_stale_clientid) {
@@ -5044,7 +5115,7 @@ nfs4_preprocess_stateid_op(struct svc_rqst *rqstp,
&s, nn);
if (status)
return status;
- status = check_stateid_generation(stateid, &s->sc_stateid,
+ status = nfsd4_stid_check_stateid_generation(stateid, s,
nfsd4_has_session(cstate));
if (status)
goto out;
@@ -5098,7 +5169,9 @@ nfsd4_free_lock_stateid(stateid_t *stateid, struct nfs4_stid *s)
struct nfs4_ol_stateid *stp = openlockstateid(s);
__be32 ret;
- mutex_lock(&stp->st_mutex);
+ ret = nfsd4_lock_ol_stateid(stp);
+ if (ret)
+ goto out_put_stid;
ret = check_stateid_generation(stateid, &s->sc_stateid, 1);
if (ret)
@@ -5109,11 +5182,13 @@ nfsd4_free_lock_stateid(stateid_t *stateid, struct nfs4_stid *s)
lockowner(stp->st_stateowner)))
goto out;
+ stp->st_stid.sc_type = NFS4_CLOSED_STID;
release_lock_stateid(stp);
ret = nfs_ok;
out:
mutex_unlock(&stp->st_mutex);
+out_put_stid:
nfs4_put_stid(s);
return ret;
}
@@ -5133,6 +5208,7 @@ nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
s = find_stateid_locked(cl, stateid);
if (!s)
goto out_unlock;
+ spin_lock(&s->sc_lock);
switch (s->sc_type) {
case NFS4_DELEG_STID:
ret = nfserr_locks_held;
@@ -5144,11 +5220,13 @@ nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
ret = nfserr_locks_held;
break;
case NFS4_LOCK_STID:
+ spin_unlock(&s->sc_lock);
refcount_inc(&s->sc_count);
spin_unlock(&cl->cl_lock);
ret = nfsd4_free_lock_stateid(stateid, s);
goto out;
case NFS4_REVOKED_DELEG_STID:
+ spin_unlock(&s->sc_lock);
dp = delegstateid(s);
list_del_init(&dp->dl_recall_lru);
spin_unlock(&cl->cl_lock);
@@ -5157,6 +5235,7 @@ nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
goto out;
/* Default falls through and returns nfserr_bad_stateid */
}
+ spin_unlock(&s->sc_lock);
out_unlock:
spin_unlock(&cl->cl_lock);
out:
@@ -5179,15 +5258,9 @@ static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_
status = nfsd4_check_seqid(cstate, sop, seqid);
if (status)
return status;
- if (stp->st_stid.sc_type == NFS4_CLOSED_STID
- || stp->st_stid.sc_type == NFS4_REVOKED_DELEG_STID)
- /*
- * "Closed" stateid's exist *only* to return
- * nfserr_replay_me from the previous step, and
- * revoked delegations are kept only for free_stateid.
- */
- return nfserr_bad_stateid;
- mutex_lock(&stp->st_mutex);
+ status = nfsd4_lock_ol_stateid(stp);
+ if (status != nfs_ok)
+ return status;
status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate));
if (status == nfs_ok)
status = nfs4_check_fh(current_fh, &stp->st_stid);
@@ -5367,7 +5440,6 @@ static void nfsd4_close_open_stateid(struct nfs4_ol_stateid *s)
bool unhashed;
LIST_HEAD(reaplist);
- s->st_stid.sc_type = NFS4_CLOSED_STID;
spin_lock(&clp->cl_lock);
unhashed = unhash_open_stateid(s, &reaplist);
@@ -5407,10 +5479,17 @@ nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
nfsd4_bump_seqid(cstate, status);
if (status)
goto out;
+
+ stp->st_stid.sc_type = NFS4_CLOSED_STID;
nfs4_inc_and_copy_stateid(&close->cl_stateid, &stp->st_stid);
- mutex_unlock(&stp->st_mutex);
nfsd4_close_open_stateid(stp);
+ mutex_unlock(&stp->st_mutex);
+
+ /* See RFC5661 sectionm 18.2.4 */
+ if (stp->st_stid.sc_client->cl_minorversion)
+ memcpy(&close->cl_stateid, &close_stateid,
+ sizeof(close->cl_stateid));
/* put reference from nfs4_preprocess_seqid_op */
nfs4_put_stid(&stp->st_stid);
@@ -5436,7 +5515,7 @@ nfsd4_delegreturn(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
if (status)
goto out;
dp = delegstateid(s);
- status = check_stateid_generation(stateid, &dp->dl_stid.sc_stateid, nfsd4_has_session(cstate));
+ status = nfsd4_stid_check_stateid_generation(stateid, &dp->dl_stid, nfsd4_has_session(cstate));
if (status)
goto put_stateid;
@@ -5642,14 +5721,41 @@ alloc_init_lock_stateowner(unsigned int strhashval, struct nfs4_client *clp,
return ret;
}
-static void
+static struct nfs4_ol_stateid *
+find_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fp)
+{
+ struct nfs4_ol_stateid *lst;
+ struct nfs4_client *clp = lo->lo_owner.so_client;
+
+ lockdep_assert_held(&clp->cl_lock);
+
+ list_for_each_entry(lst, &lo->lo_owner.so_stateids, st_perstateowner) {
+ if (lst->st_stid.sc_type != NFS4_LOCK_STID)
+ continue;
+ if (lst->st_stid.sc_file == fp) {
+ refcount_inc(&lst->st_stid.sc_count);
+ return lst;
+ }
+ }
+ return NULL;
+}
+
+static struct nfs4_ol_stateid *
init_lock_stateid(struct nfs4_ol_stateid *stp, struct nfs4_lockowner *lo,
struct nfs4_file *fp, struct inode *inode,
struct nfs4_ol_stateid *open_stp)
{
struct nfs4_client *clp = lo->lo_owner.so_client;
+ struct nfs4_ol_stateid *retstp;
- lockdep_assert_held(&clp->cl_lock);
+ mutex_init(&stp->st_mutex);
+ mutex_lock_nested(&stp->st_mutex, OPEN_STATEID_MUTEX);
+retry:
+ spin_lock(&clp->cl_lock);
+ spin_lock(&fp->fi_lock);
+ retstp = find_lock_stateid(lo, fp);
+ if (retstp)
+ goto out_unlock;
refcount_inc(&stp->st_stid.sc_count);
stp->st_stid.sc_type = NFS4_LOCK_STID;
@@ -5659,29 +5765,22 @@ init_lock_stateid(struct nfs4_ol_stateid *stp, struct nfs4_lockowner *lo,
stp->st_access_bmap = 0;
stp->st_deny_bmap = open_stp->st_deny_bmap;
stp->st_openstp = open_stp;
- mutex_init(&stp->st_mutex);
list_add(&stp->st_locks, &open_stp->st_locks);
list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids);
- spin_lock(&fp->fi_lock);
list_add(&stp->st_perfile, &fp->fi_stateids);
+out_unlock:
spin_unlock(&fp->fi_lock);
-}
-
-static struct nfs4_ol_stateid *
-find_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fp)
-{
- struct nfs4_ol_stateid *lst;
- struct nfs4_client *clp = lo->lo_owner.so_client;
-
- lockdep_assert_held(&clp->cl_lock);
-
- list_for_each_entry(lst, &lo->lo_owner.so_stateids, st_perstateowner) {
- if (lst->st_stid.sc_file == fp) {
- refcount_inc(&lst->st_stid.sc_count);
- return lst;
+ spin_unlock(&clp->cl_lock);
+ if (retstp) {
+ if (nfsd4_lock_ol_stateid(retstp) != nfs_ok) {
+ nfs4_put_stid(&retstp->st_stid);
+ goto retry;
}
+ /* To keep mutex tracking happy */
+ mutex_unlock(&stp->st_mutex);
+ stp = retstp;
}
- return NULL;
+ return stp;
}
static struct nfs4_ol_stateid *
@@ -5694,26 +5793,25 @@ find_or_create_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fi,
struct nfs4_openowner *oo = openowner(ost->st_stateowner);
struct nfs4_client *clp = oo->oo_owner.so_client;
+ *new = false;
spin_lock(&clp->cl_lock);
lst = find_lock_stateid(lo, fi);
- if (lst == NULL) {
- spin_unlock(&clp->cl_lock);
- ns = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_lock_stateid);
- if (ns == NULL)
- return NULL;
-
- spin_lock(&clp->cl_lock);
- lst = find_lock_stateid(lo, fi);
- if (likely(!lst)) {
- lst = openlockstateid(ns);
- init_lock_stateid(lst, lo, fi, inode, ost);
- ns = NULL;
- *new = true;
- }
- }
spin_unlock(&clp->cl_lock);
- if (ns)
+ if (lst != NULL) {
+ if (nfsd4_lock_ol_stateid(lst) == nfs_ok)
+ goto out;
+ nfs4_put_stid(&lst->st_stid);
+ }
+ ns = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_lock_stateid);
+ if (ns == NULL)
+ return NULL;
+
+ lst = init_lock_stateid(openlockstateid(ns), lo, fi, inode, ost);
+ if (lst == openlockstateid(ns))
+ *new = true;
+ else
nfs4_put_stid(ns);
+out:
return lst;
}
@@ -5750,7 +5848,6 @@ lookup_or_create_lock_state(struct nfsd4_compound_state *cstate,
struct nfs4_lockowner *lo;
struct nfs4_ol_stateid *lst;
unsigned int strhashval;
- bool hashed;
lo = find_lockowner_str(cl, &lock->lk_new_owner);
if (!lo) {
@@ -5766,25 +5863,12 @@ lookup_or_create_lock_state(struct nfsd4_compound_state *cstate,
goto out;
}
-retry:
lst = find_or_create_lock_stateid(lo, fi, inode, ost, new);
if (lst == NULL) {
status = nfserr_jukebox;
goto out;
}
- mutex_lock(&lst->st_mutex);
-
- /* See if it's still hashed to avoid race with FREE_STATEID */
- spin_lock(&cl->cl_lock);
- hashed = !list_empty(&lst->st_perfile);
- spin_unlock(&cl->cl_lock);
-
- if (!hashed) {
- mutex_unlock(&lst->st_mutex);
- nfs4_put_stid(&lst->st_stid);
- goto retry;
- }
status = nfs_ok;
*plst = lst;
out:
@@ -5990,14 +6074,16 @@ out:
seqid_mutating_err(ntohl(status)))
lock_sop->lo_owner.so_seqid++;
- mutex_unlock(&lock_stp->st_mutex);
-
/*
* If this is a new, never-before-used stateid, and we are
* returning an error, then just go ahead and release it.
*/
- if (status && new)
+ if (status && new) {
+ lock_stp->st_stid.sc_type = NFS4_CLOSED_STID;
release_lock_stateid(lock_stp);
+ }
+
+ mutex_unlock(&lock_stp->st_mutex);
nfs4_put_stid(&lock_stp->st_stid);
}
@@ -7017,6 +7103,10 @@ static int nfs4_state_create_net(struct net *net)
INIT_LIST_HEAD(&nn->sessionid_hashtbl[i]);
nn->conf_name_tree = RB_ROOT;
nn->unconf_name_tree = RB_ROOT;
+ nn->boot_time = get_seconds();
+ nn->grace_ended = false;
+ nn->nfsd4_manager.block_opens = true;
+ INIT_LIST_HEAD(&nn->nfsd4_manager.list);
INIT_LIST_HEAD(&nn->client_lru);
INIT_LIST_HEAD(&nn->close_lru);
INIT_LIST_HEAD(&nn->del_recall_lru);
@@ -7074,9 +7164,6 @@ nfs4_state_start_net(struct net *net)
ret = nfs4_state_create_net(net);
if (ret)
return ret;
- nn->boot_time = get_seconds();
- nn->grace_ended = false;
- nn->nfsd4_manager.block_opens = true;
locks_start_grace(net, &nn->nfsd4_manager);
nfsd4_client_tracking_init(net);
printk(KERN_INFO "NFSD: starting %ld-second grace period (net %x)\n",
@@ -7153,7 +7240,7 @@ nfs4_state_shutdown_net(struct net *net)
spin_unlock(&nn->blocked_locks_lock);
while (!list_empty(&reaplist)) {
- nbl = list_first_entry(&nn->blocked_locks_lru,
+ nbl = list_first_entry(&reaplist,
struct nfsd4_blocked_lock, nbl_lru);
list_del_init(&nbl->nbl_lru);
posix_unblock_lock(&nbl->nbl_lock);
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index 6493df6b1bd5..d107b4426f7e 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -1241,6 +1241,9 @@ static __net_init int nfsd_init_net(struct net *net)
nn->nfsd4_grace = 90;
nn->clverifier_counter = prandom_u32();
nn->clientid_counter = prandom_u32();
+
+ atomic_set(&nn->ntf_refcnt, 0);
+ init_waitqueue_head(&nn->ntf_wq);
return 0;
out_idmap_error:
diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
index 33117d4ffce0..89cb484f1cfb 100644
--- a/fs/nfsd/nfssvc.c
+++ b/fs/nfsd/nfssvc.c
@@ -335,7 +335,8 @@ static int nfsd_inetaddr_event(struct notifier_block *this, unsigned long event,
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
struct sockaddr_in sin;
- if (event != NETDEV_DOWN)
+ if ((event != NETDEV_DOWN) ||
+ !atomic_inc_not_zero(&nn->ntf_refcnt))
goto out;
if (nn->nfsd_serv) {
@@ -344,6 +345,8 @@ static int nfsd_inetaddr_event(struct notifier_block *this, unsigned long event,
sin.sin_addr.s_addr = ifa->ifa_local;
svc_age_temp_xprts_now(nn->nfsd_serv, (struct sockaddr *)&sin);
}
+ atomic_dec(&nn->ntf_refcnt);
+ wake_up(&nn->ntf_wq);
out:
return NOTIFY_DONE;
@@ -363,7 +366,8 @@ static int nfsd_inet6addr_event(struct notifier_block *this,
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
struct sockaddr_in6 sin6;
- if (event != NETDEV_DOWN)
+ if ((event != NETDEV_DOWN) ||
+ !atomic_inc_not_zero(&nn->ntf_refcnt))
goto out;
if (nn->nfsd_serv) {
@@ -374,7 +378,8 @@ static int nfsd_inet6addr_event(struct notifier_block *this,
sin6.sin6_scope_id = ifa->idev->dev->ifindex;
svc_age_temp_xprts_now(nn->nfsd_serv, (struct sockaddr *)&sin6);
}
-
+ atomic_dec(&nn->ntf_refcnt);
+ wake_up(&nn->ntf_wq);
out:
return NOTIFY_DONE;
}
@@ -391,6 +396,7 @@ static void nfsd_last_thread(struct svc_serv *serv, struct net *net)
{
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+ atomic_dec(&nn->ntf_refcnt);
/* check if the notifier still has clients */
if (atomic_dec_return(&nfsd_notifier_refcount) == 0) {
unregister_inetaddr_notifier(&nfsd_inetaddr_notifier);
@@ -398,6 +404,7 @@ static void nfsd_last_thread(struct svc_serv *serv, struct net *net)
unregister_inet6addr_notifier(&nfsd_inet6addr_notifier);
#endif
}
+ wait_event(nn->ntf_wq, atomic_read(&nn->ntf_refcnt) == 0);
/*
* write_ports can create the server without actually starting
@@ -517,6 +524,7 @@ int nfsd_create_serv(struct net *net)
register_inet6addr_notifier(&nfsd_inet6addr_notifier);
#endif
}
+ atomic_inc(&nn->ntf_refcnt);
ktime_get_real_ts64(&nn->nfssvc_boot); /* record boot time */
return 0;
}
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
index f572538dcc4f..9f3ffba41533 100644
--- a/fs/nilfs2/segment.c
+++ b/fs/nilfs2/segment.c
@@ -1979,7 +1979,7 @@ static void nilfs_segctor_drop_written_files(struct nilfs_sc_info *sci,
struct the_nilfs *nilfs)
{
struct nilfs_inode_info *ii, *n;
- int during_mount = !(sci->sc_super->s_flags & MS_ACTIVE);
+ int during_mount = !(sci->sc_super->s_flags & SB_ACTIVE);
int defer_iput = false;
spin_lock(&nilfs->ns_inode_lock);
diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c
index 3ce20cd44a20..3073b646e1ba 100644
--- a/fs/nilfs2/super.c
+++ b/fs/nilfs2/super.c
@@ -141,7 +141,7 @@ void __nilfs_error(struct super_block *sb, const char *function,
if (nilfs_test_opt(nilfs, ERRORS_RO)) {
printk(KERN_CRIT "Remounting filesystem read-only\n");
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
}
}
@@ -869,7 +869,7 @@ int nilfs_store_magic_and_option(struct super_block *sb,
/* FS independent flags */
#ifdef NILFS_ATIME_DISABLE
- sb->s_flags |= MS_NOATIME;
+ sb->s_flags |= SB_NOATIME;
#endif
nilfs_set_default_options(sb, sbp);
@@ -1133,7 +1133,7 @@ static int nilfs_remount(struct super_block *sb, int *flags, char *data)
err = -EINVAL;
goto restore_opts;
}
- sb->s_flags = (sb->s_flags & ~MS_POSIXACL);
+ sb->s_flags = (sb->s_flags & ~SB_POSIXACL);
err = -EINVAL;
@@ -1143,12 +1143,12 @@ static int nilfs_remount(struct super_block *sb, int *flags, char *data)
goto restore_opts;
}
- if ((bool)(*flags & MS_RDONLY) == sb_rdonly(sb))
+ if ((bool)(*flags & SB_RDONLY) == sb_rdonly(sb))
goto out;
- if (*flags & MS_RDONLY) {
+ if (*flags & SB_RDONLY) {
/* Shutting down log writer */
nilfs_detach_log_writer(sb);
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
/*
* Remounting a valid RW partition RDONLY, so set
@@ -1178,7 +1178,7 @@ static int nilfs_remount(struct super_block *sb, int *flags, char *data)
goto restore_opts;
}
- sb->s_flags &= ~MS_RDONLY;
+ sb->s_flags &= ~SB_RDONLY;
root = NILFS_I(d_inode(sb->s_root))->i_root;
err = nilfs_attach_log_writer(sb, root);
@@ -1212,7 +1212,7 @@ static int nilfs_parse_snapshot_option(const char *option,
const char *msg = NULL;
int err;
- if (!(sd->flags & MS_RDONLY)) {
+ if (!(sd->flags & SB_RDONLY)) {
msg = "read-only option is not specified";
goto parse_error;
}
@@ -1286,7 +1286,7 @@ nilfs_mount(struct file_system_type *fs_type, int flags,
struct dentry *root_dentry;
int err, s_new = false;
- if (!(flags & MS_RDONLY))
+ if (!(flags & SB_RDONLY))
mode |= FMODE_WRITE;
sd.bdev = blkdev_get_by_path(dev_name, mode, fs_type);
@@ -1327,14 +1327,14 @@ nilfs_mount(struct file_system_type *fs_type, int flags,
snprintf(s->s_id, sizeof(s->s_id), "%pg", sd.bdev);
sb_set_blocksize(s, block_size(sd.bdev));
- err = nilfs_fill_super(s, data, flags & MS_SILENT ? 1 : 0);
+ err = nilfs_fill_super(s, data, flags & SB_SILENT ? 1 : 0);
if (err)
goto failed_super;
- s->s_flags |= MS_ACTIVE;
+ s->s_flags |= SB_ACTIVE;
} else if (!sd.cno) {
if (nilfs_tree_is_busy(s->s_root)) {
- if ((flags ^ s->s_flags) & MS_RDONLY) {
+ if ((flags ^ s->s_flags) & SB_RDONLY) {
nilfs_msg(s, KERN_ERR,
"the device already has a %s mount.",
sb_rdonly(s) ? "read-only" : "read/write");
diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c
index afebb5067cec..1a85317e83f0 100644
--- a/fs/nilfs2/the_nilfs.c
+++ b/fs/nilfs2/the_nilfs.c
@@ -220,7 +220,7 @@ int load_nilfs(struct the_nilfs *nilfs, struct super_block *sb)
if (!valid_fs) {
nilfs_msg(sb, KERN_WARNING, "mounting unchecked fs");
- if (s_flags & MS_RDONLY) {
+ if (s_flags & SB_RDONLY) {
nilfs_msg(sb, KERN_INFO,
"recovery required for readonly filesystem");
nilfs_msg(sb, KERN_INFO,
@@ -286,7 +286,7 @@ int load_nilfs(struct the_nilfs *nilfs, struct super_block *sb)
if (valid_fs)
goto skip_recovery;
- if (s_flags & MS_RDONLY) {
+ if (s_flags & SB_RDONLY) {
__u64 features;
if (nilfs_test_opt(nilfs, NORECOVERY)) {
@@ -309,7 +309,7 @@ int load_nilfs(struct the_nilfs *nilfs, struct super_block *sb)
err = -EROFS;
goto failed_unload;
}
- sb->s_flags &= ~MS_RDONLY;
+ sb->s_flags &= ~SB_RDONLY;
} else if (nilfs_test_opt(nilfs, NORECOVERY)) {
nilfs_msg(sb, KERN_ERR,
"recovery cancelled because norecovery option was specified for a read/write mount");
diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c
index 81d8959b6aef..219b269c737e 100644
--- a/fs/notify/fsnotify.c
+++ b/fs/notify/fsnotify.c
@@ -67,7 +67,7 @@ void fsnotify_unmount_inodes(struct super_block *sb)
/*
* If i_count is zero, the inode cannot have any watches and
- * doing an __iget/iput with MS_ACTIVE clear would actually
+ * doing an __iget/iput with SB_ACTIVE clear would actually
* evict all inodes with zero i_count from icache which is
* unnecessarily violent and may in fact be illegal to do.
*/
diff --git a/fs/nsfs.c b/fs/nsfs.c
index ef243e14b6eb..7c6f76d29f56 100644
--- a/fs/nsfs.c
+++ b/fs/nsfs.c
@@ -255,5 +255,5 @@ void __init nsfs_init(void)
nsfs_mnt = kern_mount(&nsfs);
if (IS_ERR(nsfs_mnt))
panic("can't set nsfs up\n");
- nsfs_mnt->mnt_sb->s_flags &= ~MS_NOUSER;
+ nsfs_mnt->mnt_sb->s_flags &= ~SB_NOUSER;
}
diff --git a/fs/ntfs/super.c b/fs/ntfs/super.c
index 3f70f041dbe9..bb7159f697f2 100644
--- a/fs/ntfs/super.c
+++ b/fs/ntfs/super.c
@@ -473,7 +473,7 @@ static int ntfs_remount(struct super_block *sb, int *flags, char *opt)
#ifndef NTFS_RW
/* For read-only compiled driver, enforce read-only flag. */
- *flags |= MS_RDONLY;
+ *flags |= SB_RDONLY;
#else /* NTFS_RW */
/*
* For the read-write compiled driver, if we are remounting read-write,
@@ -487,7 +487,7 @@ static int ntfs_remount(struct super_block *sb, int *flags, char *opt)
* When remounting read-only, mark the volume clean if no volume errors
* have occurred.
*/
- if (sb_rdonly(sb) && !(*flags & MS_RDONLY)) {
+ if (sb_rdonly(sb) && !(*flags & SB_RDONLY)) {
static const char *es = ". Cannot remount read-write.";
/* Remounting read-write. */
@@ -548,7 +548,7 @@ static int ntfs_remount(struct super_block *sb, int *flags, char *opt)
NVolSetErrors(vol);
return -EROFS;
}
- } else if (!sb_rdonly(sb) && (*flags & MS_RDONLY)) {
+ } else if (!sb_rdonly(sb) && (*flags & SB_RDONLY)) {
/* Remounting read-only. */
if (!NVolErrors(vol)) {
if (ntfs_clear_volume_flags(vol, VOLUME_IS_DIRTY))
@@ -1799,7 +1799,7 @@ static bool load_system_files(ntfs_volume *vol)
es3);
goto iput_mirr_err_out;
}
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
ntfs_error(sb, "%s. Mounting read-only%s",
!vol->mftmirr_ino ? es1 : es2, es3);
} else
@@ -1937,7 +1937,7 @@ get_ctx_vol_failed:
es1, es2);
goto iput_vol_err_out;
}
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
ntfs_error(sb, "%s. Mounting read-only%s", es1, es2);
} else
ntfs_warning(sb, "%s. Will not be able to remount "
@@ -1974,7 +1974,7 @@ get_ctx_vol_failed:
}
goto iput_logfile_err_out;
}
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
ntfs_error(sb, "%s. Mounting read-only%s", es1, es2);
} else
ntfs_warning(sb, "%s. Will not be able to remount "
@@ -2019,7 +2019,7 @@ get_ctx_vol_failed:
es1, es2);
goto iput_root_err_out;
}
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
ntfs_error(sb, "%s. Mounting read-only%s", es1, es2);
} else
ntfs_warning(sb, "%s. Will not be able to remount "
@@ -2042,7 +2042,7 @@ get_ctx_vol_failed:
goto iput_root_err_out;
}
ntfs_error(sb, "%s. Mounting read-only%s", es1, es2);
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
/*
* Do not set NVolErrors() because ntfs_remount() might manage
* to set the dirty flag in which case all would be well.
@@ -2055,7 +2055,7 @@ get_ctx_vol_failed:
* If (still) a read-write mount, set the NT4 compatibility flag on
* newer NTFS version volumes.
*/
- if (!(sb->s_flags & MS_RDONLY) && (vol->major_ver > 1) &&
+ if (!(sb->s_flags & SB_RDONLY) && (vol->major_ver > 1) &&
ntfs_set_volume_flags(vol, VOLUME_MOUNTED_ON_NT4)) {
static const char *es1 = "Failed to set NT4 compatibility flag";
static const char *es2 = ". Run chkdsk.";
@@ -2069,7 +2069,7 @@ get_ctx_vol_failed:
goto iput_root_err_out;
}
ntfs_error(sb, "%s. Mounting read-only%s", es1, es2);
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
NVolSetErrors(vol);
}
#endif
@@ -2087,7 +2087,7 @@ get_ctx_vol_failed:
goto iput_root_err_out;
}
ntfs_error(sb, "%s. Mounting read-only%s", es1, es2);
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
NVolSetErrors(vol);
}
#endif /* NTFS_RW */
@@ -2128,7 +2128,7 @@ get_ctx_vol_failed:
es1, es2);
goto iput_quota_err_out;
}
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
ntfs_error(sb, "%s. Mounting read-only%s", es1, es2);
} else
ntfs_warning(sb, "%s. Will not be able to remount "
@@ -2150,7 +2150,7 @@ get_ctx_vol_failed:
goto iput_quota_err_out;
}
ntfs_error(sb, "%s. Mounting read-only%s", es1, es2);
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
NVolSetErrors(vol);
}
/*
@@ -2171,7 +2171,7 @@ get_ctx_vol_failed:
es1, es2);
goto iput_usnjrnl_err_out;
}
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
ntfs_error(sb, "%s. Mounting read-only%s", es1, es2);
} else
ntfs_warning(sb, "%s. Will not be able to remount "
@@ -2194,7 +2194,7 @@ get_ctx_vol_failed:
goto iput_usnjrnl_err_out;
}
ntfs_error(sb, "%s. Mounting read-only%s", es1, es2);
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
NVolSetErrors(vol);
}
#endif /* NTFS_RW */
@@ -2728,7 +2728,7 @@ static int ntfs_fill_super(struct super_block *sb, void *opt, const int silent)
lockdep_off();
ntfs_debug("Entering.");
#ifndef NTFS_RW
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
#endif /* ! NTFS_RW */
/* Allocate a new ntfs_volume and place it in sb->s_fs_info. */
sb->s_fs_info = kmalloc(sizeof(ntfs_volume), GFP_NOFS);
diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c
index 8d779227370a..bebe59feca58 100644
--- a/fs/ocfs2/cluster/tcp.c
+++ b/fs/ocfs2/cluster/tcp.c
@@ -140,7 +140,7 @@ static void o2net_rx_until_empty(struct work_struct *work);
static void o2net_shutdown_sc(struct work_struct *work);
static void o2net_listen_data_ready(struct sock *sk);
static void o2net_sc_send_keep_req(struct work_struct *work);
-static void o2net_idle_timer(unsigned long data);
+static void o2net_idle_timer(struct timer_list *t);
static void o2net_sc_postpone_idle(struct o2net_sock_container *sc);
static void o2net_sc_reset_idle_timer(struct o2net_sock_container *sc);
@@ -450,8 +450,7 @@ static struct o2net_sock_container *sc_alloc(struct o2nm_node *node)
INIT_WORK(&sc->sc_shutdown_work, o2net_shutdown_sc);
INIT_DELAYED_WORK(&sc->sc_keepalive_work, o2net_sc_send_keep_req);
- setup_timer(&sc->sc_idle_timeout, o2net_idle_timer,
- (unsigned long)sc);
+ timer_setup(&sc->sc_idle_timeout, o2net_idle_timer, 0);
sclog(sc, "alloced\n");
@@ -1517,9 +1516,9 @@ static void o2net_sc_send_keep_req(struct work_struct *work)
/* socket shutdown does a del_timer_sync against this as it tears down.
* we can't start this timer until we've got to the point in sc buildup
* where shutdown is going to be involved */
-static void o2net_idle_timer(unsigned long data)
+static void o2net_idle_timer(struct timer_list *t)
{
- struct o2net_sock_container *sc = (struct o2net_sock_container *)data;
+ struct o2net_sock_container *sc = from_timer(sc, t, sc_idle_timeout);
struct o2net_node *nn = o2net_nn_from_num(sc->sc_node->nd_num);
#ifdef CONFIG_DEBUG_FS
unsigned long msecs = ktime_to_ms(ktime_get()) -
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index dc455d45a66a..a1d051055472 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -227,7 +227,7 @@ int ocfs2_should_update_atime(struct inode *inode,
return 0;
if ((inode->i_flags & S_NOATIME) ||
- ((inode->i_sb->s_flags & MS_NODIRATIME) && S_ISDIR(inode->i_mode)))
+ ((inode->i_sb->s_flags & SB_NODIRATIME) && S_ISDIR(inode->i_mode)))
return 0;
/*
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index 040bbb6a6e4b..80efa5699fb0 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -675,9 +675,9 @@ static int ocfs2_remount(struct super_block *sb, int *flags, char *data)
}
/* We're going to/from readonly mode. */
- if ((bool)(*flags & MS_RDONLY) != sb_rdonly(sb)) {
+ if ((bool)(*flags & SB_RDONLY) != sb_rdonly(sb)) {
/* Disable quota accounting before remounting RO */
- if (*flags & MS_RDONLY) {
+ if (*flags & SB_RDONLY) {
ret = ocfs2_susp_quotas(osb, 0);
if (ret < 0)
goto out;
@@ -691,8 +691,8 @@ static int ocfs2_remount(struct super_block *sb, int *flags, char *data)
goto unlock_osb;
}
- if (*flags & MS_RDONLY) {
- sb->s_flags |= MS_RDONLY;
+ if (*flags & SB_RDONLY) {
+ sb->s_flags |= SB_RDONLY;
osb->osb_flags |= OCFS2_OSB_SOFT_RO;
} else {
if (osb->osb_flags & OCFS2_OSB_ERROR_FS) {
@@ -709,14 +709,14 @@ static int ocfs2_remount(struct super_block *sb, int *flags, char *data)
ret = -EINVAL;
goto unlock_osb;
}
- sb->s_flags &= ~MS_RDONLY;
+ sb->s_flags &= ~SB_RDONLY;
osb->osb_flags &= ~OCFS2_OSB_SOFT_RO;
}
trace_ocfs2_remount(sb->s_flags, osb->osb_flags, *flags);
unlock_osb:
spin_unlock(&osb->osb_lock);
/* Enable quota accounting after remounting RW */
- if (!ret && !(*flags & MS_RDONLY)) {
+ if (!ret && !(*flags & SB_RDONLY)) {
if (sb_any_quota_suspended(sb))
ret = ocfs2_susp_quotas(osb, 1);
else
@@ -724,7 +724,7 @@ unlock_osb:
if (ret < 0) {
/* Return back changes... */
spin_lock(&osb->osb_lock);
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
osb->osb_flags |= OCFS2_OSB_SOFT_RO;
spin_unlock(&osb->osb_lock);
goto out;
@@ -744,9 +744,9 @@ unlock_osb:
if (!ocfs2_is_hard_readonly(osb))
ocfs2_set_journal_params(osb);
- sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
+ sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
((osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL) ?
- MS_POSIXACL : 0);
+ SB_POSIXACL : 0);
}
out:
return ret;
@@ -1057,10 +1057,10 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
sb->s_magic = OCFS2_SUPER_MAGIC;
- sb->s_flags = (sb->s_flags & ~(MS_POSIXACL | MS_NOSEC)) |
- ((osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL) ? MS_POSIXACL : 0);
+ sb->s_flags = (sb->s_flags & ~(SB_POSIXACL | SB_NOSEC)) |
+ ((osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL) ? SB_POSIXACL : 0);
- /* Hard readonly mode only if: bdev_read_only, MS_RDONLY,
+ /* Hard readonly mode only if: bdev_read_only, SB_RDONLY,
* heartbeat=none */
if (bdev_read_only(sb->s_bdev)) {
if (!sb_rdonly(sb)) {
@@ -2057,7 +2057,7 @@ static int ocfs2_initialize_super(struct super_block *sb,
sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP;
sb->s_xattr = ocfs2_xattr_handlers;
sb->s_time_gran = 1;
- sb->s_flags |= MS_NOATIME;
+ sb->s_flags |= SB_NOATIME;
/* this is needed to support O_LARGEFILE */
cbits = le32_to_cpu(di->id2.i_super.s_clustersize_bits);
bbits = le32_to_cpu(di->id2.i_super.s_blocksize_bits);
@@ -2568,7 +2568,7 @@ static int ocfs2_handle_error(struct super_block *sb)
return rv;
pr_crit("OCFS2: File system is now read-only.\n");
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
ocfs2_set_ro_flag(osb, 0);
}
diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
index 5fdf269ba82e..c5898c59d411 100644
--- a/fs/ocfs2/xattr.c
+++ b/fs/ocfs2/xattr.c
@@ -901,7 +901,7 @@ static int ocfs2_xattr_list_entry(struct super_block *sb,
case OCFS2_XATTR_INDEX_POSIX_ACL_ACCESS:
case OCFS2_XATTR_INDEX_POSIX_ACL_DEFAULT:
- if (!(sb->s_flags & MS_POSIXACL))
+ if (!(sb->s_flags & SB_POSIXACL))
return 0;
break;
diff --git a/fs/openpromfs/inode.c b/fs/openpromfs/inode.c
index 13215f26e321..2200662a9bf1 100644
--- a/fs/openpromfs/inode.c
+++ b/fs/openpromfs/inode.c
@@ -369,7 +369,7 @@ static struct inode *openprom_iget(struct super_block *sb, ino_t ino)
static int openprom_remount(struct super_block *sb, int *flags, char *data)
{
sync_filesystem(sb);
- *flags |= MS_NOATIME;
+ *flags |= SB_NOATIME;
return 0;
}
@@ -386,7 +386,7 @@ static int openprom_fill_super(struct super_block *s, void *data, int silent)
struct op_inode_info *oi;
int ret;
- s->s_flags |= MS_NOATIME;
+ s->s_flags |= SB_NOATIME;
s->s_blocksize = 1024;
s->s_blocksize_bits = 10;
s->s_magic = OPENPROM_SUPER_MAGIC;
diff --git a/fs/orangefs/acl.c b/fs/orangefs/acl.c
index c2d8233b1e82..480ea059a680 100644
--- a/fs/orangefs/acl.c
+++ b/fs/orangefs/acl.c
@@ -155,13 +155,11 @@ int orangefs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
int orangefs_init_acl(struct inode *inode, struct inode *dir)
{
- struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode);
struct posix_acl *default_acl, *acl;
umode_t mode = inode->i_mode;
+ struct iattr iattr;
int error = 0;
- ClearModeFlag(orangefs_inode);
-
error = posix_acl_create(dir, &mode, &default_acl, &acl);
if (error)
return error;
@@ -180,9 +178,11 @@ int orangefs_init_acl(struct inode *inode, struct inode *dir)
/* If mode of the inode was changed, then do a forcible ->setattr */
if (mode != inode->i_mode) {
- SetModeFlag(orangefs_inode);
+ memset(&iattr, 0, sizeof iattr);
inode->i_mode = mode;
- orangefs_flush_inode(inode);
+ iattr.ia_mode = mode;
+ iattr.ia_valid |= ATTR_MODE;
+ orangefs_inode_setattr(inode, &iattr);
}
return error;
diff --git a/fs/orangefs/dir.c b/fs/orangefs/dir.c
index a8cc588d6224..e2c2699d8016 100644
--- a/fs/orangefs/dir.c
+++ b/fs/orangefs/dir.c
@@ -386,7 +386,6 @@ static int orangefs_dir_release(struct inode *inode, struct file *file)
{
struct orangefs_dir *od = file->private_data;
struct orangefs_dir_part *part = od->part;
- orangefs_flush_inode(inode);
while (part) {
struct orangefs_dir_part *next = part->next;
vfree(part);
diff --git a/fs/orangefs/file.c b/fs/orangefs/file.c
index e4a8e6a7eb17..1668fd645c45 100644
--- a/fs/orangefs/file.c
+++ b/fs/orangefs/file.c
@@ -383,9 +383,15 @@ out:
if (type == ORANGEFS_IO_READ) {
file_accessed(file);
} else {
- SetMtimeFlag(orangefs_inode);
- inode->i_mtime = current_time(inode);
- mark_inode_dirty_sync(inode);
+ file_update_time(file);
+ /*
+ * Must invalidate to ensure write loop doesn't
+ * prevent kernel from reading updated
+ * attribute. Size probably changed because of
+ * the write, and other clients could update
+ * any other attribute.
+ */
+ orangefs_inode->getattr_time = jiffies - 1;
}
}
@@ -615,8 +621,6 @@ static int orangefs_file_release(struct inode *inode, struct file *file)
"orangefs_file_release: called on %pD\n",
file);
- orangefs_flush_inode(inode);
-
/*
* remove all associated inode pages from the page cache and
* readahead cache (if any); this forces an expensive refresh of
@@ -666,8 +670,6 @@ static int orangefs_fsync(struct file *file,
ret);
op_release(new_op);
-
- orangefs_flush_inode(file_inode(file));
return ret;
}
diff --git a/fs/orangefs/inode.c b/fs/orangefs/inode.c
index 28825a5b6d09..fe1d705ad91f 100644
--- a/fs/orangefs/inode.c
+++ b/fs/orangefs/inode.c
@@ -290,6 +290,22 @@ int orangefs_permission(struct inode *inode, int mask)
return generic_permission(inode, mask);
}
+int orangefs_update_time(struct inode *inode, struct timespec *time, int flags)
+{
+ struct iattr iattr;
+ gossip_debug(GOSSIP_INODE_DEBUG, "orangefs_update_time: %pU\n",
+ get_khandle_from_ino(inode));
+ generic_update_time(inode, time, flags);
+ memset(&iattr, 0, sizeof iattr);
+ if (flags & S_ATIME)
+ iattr.ia_valid |= ATTR_ATIME;
+ if (flags & S_CTIME)
+ iattr.ia_valid |= ATTR_CTIME;
+ if (flags & S_MTIME)
+ iattr.ia_valid |= ATTR_MTIME;
+ return orangefs_inode_setattr(inode, &iattr);
+}
+
/* ORANGEDS2 implementation of VFS inode operations for files */
const struct inode_operations orangefs_file_inode_operations = {
.get_acl = orangefs_get_acl,
@@ -298,6 +314,7 @@ const struct inode_operations orangefs_file_inode_operations = {
.getattr = orangefs_getattr,
.listxattr = orangefs_listxattr,
.permission = orangefs_permission,
+ .update_time = orangefs_update_time,
};
static int orangefs_init_iops(struct inode *inode)
diff --git a/fs/orangefs/namei.c b/fs/orangefs/namei.c
index 7e9e5d0ea3bc..c98bba2dbc94 100644
--- a/fs/orangefs/namei.c
+++ b/fs/orangefs/namei.c
@@ -22,7 +22,9 @@ static int orangefs_create(struct inode *dir,
{
struct orangefs_inode_s *parent = ORANGEFS_I(dir);
struct orangefs_kernel_op_s *new_op;
+ struct orangefs_object_kref ref;
struct inode *inode;
+ struct iattr iattr;
int ret;
gossip_debug(GOSSIP_NAME_DEBUG, "%s: %pd\n",
@@ -55,8 +57,10 @@ static int orangefs_create(struct inode *dir,
if (ret < 0)
goto out;
- inode = orangefs_new_inode(dir->i_sb, dir, S_IFREG | mode, 0,
- &new_op->downcall.resp.create.refn);
+ ref = new_op->downcall.resp.create.refn;
+ op_release(new_op);
+
+ inode = orangefs_new_inode(dir->i_sb, dir, S_IFREG | mode, 0, &ref);
if (IS_ERR(inode)) {
gossip_err("%s: Failed to allocate inode for file :%pd:\n",
__func__,
@@ -82,12 +86,13 @@ static int orangefs_create(struct inode *dir,
__func__,
dentry);
- SetMtimeFlag(parent);
dir->i_mtime = dir->i_ctime = current_time(dir);
+ memset(&iattr, 0, sizeof iattr);
+ iattr.ia_valid |= ATTR_MTIME;
+ orangefs_inode_setattr(dir, &iattr);
mark_inode_dirty_sync(dir);
ret = 0;
out:
- op_release(new_op);
gossip_debug(GOSSIP_NAME_DEBUG,
"%s: %pd: returning %d\n",
__func__,
@@ -221,6 +226,7 @@ static int orangefs_unlink(struct inode *dir, struct dentry *dentry)
struct inode *inode = dentry->d_inode;
struct orangefs_inode_s *parent = ORANGEFS_I(dir);
struct orangefs_kernel_op_s *new_op;
+ struct iattr iattr;
int ret;
gossip_debug(GOSSIP_NAME_DEBUG,
@@ -253,8 +259,10 @@ static int orangefs_unlink(struct inode *dir, struct dentry *dentry)
if (!ret) {
drop_nlink(inode);
- SetMtimeFlag(parent);
dir->i_mtime = dir->i_ctime = current_time(dir);
+ memset(&iattr, 0, sizeof iattr);
+ iattr.ia_valid |= ATTR_MTIME;
+ orangefs_inode_setattr(dir, &iattr);
mark_inode_dirty_sync(dir);
}
return ret;
@@ -266,7 +274,9 @@ static int orangefs_symlink(struct inode *dir,
{
struct orangefs_inode_s *parent = ORANGEFS_I(dir);
struct orangefs_kernel_op_s *new_op;
+ struct orangefs_object_kref ref;
struct inode *inode;
+ struct iattr iattr;
int mode = 755;
int ret;
@@ -307,8 +317,10 @@ static int orangefs_symlink(struct inode *dir,
goto out;
}
- inode = orangefs_new_inode(dir->i_sb, dir, S_IFLNK | mode, 0,
- &new_op->downcall.resp.sym.refn);
+ ref = new_op->downcall.resp.sym.refn;
+ op_release(new_op);
+
+ inode = orangefs_new_inode(dir->i_sb, dir, S_IFLNK | mode, 0, &ref);
if (IS_ERR(inode)) {
gossip_err
("*** Failed to allocate orangefs symlink inode\n");
@@ -331,12 +343,13 @@ static int orangefs_symlink(struct inode *dir,
get_khandle_from_ino(inode),
dentry);
- SetMtimeFlag(parent);
dir->i_mtime = dir->i_ctime = current_time(dir);
+ memset(&iattr, 0, sizeof iattr);
+ iattr.ia_valid |= ATTR_MTIME;
+ orangefs_inode_setattr(dir, &iattr);
mark_inode_dirty_sync(dir);
ret = 0;
out:
- op_release(new_op);
return ret;
}
@@ -344,7 +357,9 @@ static int orangefs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode
{
struct orangefs_inode_s *parent = ORANGEFS_I(dir);
struct orangefs_kernel_op_s *new_op;
+ struct orangefs_object_kref ref;
struct inode *inode;
+ struct iattr iattr;
int ret;
new_op = op_alloc(ORANGEFS_VFS_OP_MKDIR);
@@ -373,8 +388,10 @@ static int orangefs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode
goto out;
}
- inode = orangefs_new_inode(dir->i_sb, dir, S_IFDIR | mode, 0,
- &new_op->downcall.resp.mkdir.refn);
+ ref = new_op->downcall.resp.mkdir.refn;
+ op_release(new_op);
+
+ inode = orangefs_new_inode(dir->i_sb, dir, S_IFDIR | mode, 0, &ref);
if (IS_ERR(inode)) {
gossip_err("*** Failed to allocate orangefs dir inode\n");
ret = PTR_ERR(inode);
@@ -400,11 +417,12 @@ static int orangefs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode
* NOTE: we have no good way to keep nlink consistent for directories
* across clients; keep constant at 1.
*/
- SetMtimeFlag(parent);
dir->i_mtime = dir->i_ctime = current_time(dir);
+ memset(&iattr, 0, sizeof iattr);
+ iattr.ia_valid |= ATTR_MTIME;
+ orangefs_inode_setattr(dir, &iattr);
mark_inode_dirty_sync(dir);
out:
- op_release(new_op);
return ret;
}
@@ -470,4 +488,5 @@ const struct inode_operations orangefs_dir_inode_operations = {
.getattr = orangefs_getattr,
.listxattr = orangefs_listxattr,
.permission = orangefs_permission,
+ .update_time = orangefs_update_time,
};
diff --git a/fs/orangefs/orangefs-debug.h b/fs/orangefs/orangefs-debug.h
index b6001bb28f5a..c7db56a31b92 100644
--- a/fs/orangefs/orangefs-debug.h
+++ b/fs/orangefs/orangefs-debug.h
@@ -15,8 +15,10 @@
#ifdef __KERNEL__
#include <linux/types.h>
+#include <linux/kernel.h>
#else
#include <stdint.h>
+#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
#endif
#define GOSSIP_NO_DEBUG (__u64)0
@@ -88,6 +90,6 @@ static struct __keyword_mask_s s_kmod_keyword_mask_map[] = {
};
static const int num_kmod_keyword_mask_map = (int)
- (sizeof(s_kmod_keyword_mask_map) / sizeof(struct __keyword_mask_s));
+ (ARRAY_SIZE(s_kmod_keyword_mask_map));
#endif /* __ORANGEFS_DEBUG_H */
diff --git a/fs/orangefs/orangefs-kernel.h b/fs/orangefs/orangefs-kernel.h
index f44d5eb74fcc..97adf7d100b5 100644
--- a/fs/orangefs/orangefs-kernel.h
+++ b/fs/orangefs/orangefs-kernel.h
@@ -209,37 +209,10 @@ struct orangefs_inode_s {
struct inode vfs_inode;
sector_t last_failed_block_index_read;
- /*
- * State of in-memory attributes not yet flushed to disk associated
- * with this object
- */
- unsigned long pinode_flags;
-
unsigned long getattr_time;
u32 getattr_mask;
};
-#define P_ATIME_FLAG 0
-#define P_MTIME_FLAG 1
-#define P_CTIME_FLAG 2
-#define P_MODE_FLAG 3
-
-#define ClearAtimeFlag(pinode) clear_bit(P_ATIME_FLAG, &(pinode)->pinode_flags)
-#define SetAtimeFlag(pinode) set_bit(P_ATIME_FLAG, &(pinode)->pinode_flags)
-#define AtimeFlag(pinode) test_bit(P_ATIME_FLAG, &(pinode)->pinode_flags)
-
-#define ClearMtimeFlag(pinode) clear_bit(P_MTIME_FLAG, &(pinode)->pinode_flags)
-#define SetMtimeFlag(pinode) set_bit(P_MTIME_FLAG, &(pinode)->pinode_flags)
-#define MtimeFlag(pinode) test_bit(P_MTIME_FLAG, &(pinode)->pinode_flags)
-
-#define ClearCtimeFlag(pinode) clear_bit(P_CTIME_FLAG, &(pinode)->pinode_flags)
-#define SetCtimeFlag(pinode) set_bit(P_CTIME_FLAG, &(pinode)->pinode_flags)
-#define CtimeFlag(pinode) test_bit(P_CTIME_FLAG, &(pinode)->pinode_flags)
-
-#define ClearModeFlag(pinode) clear_bit(P_MODE_FLAG, &(pinode)->pinode_flags)
-#define SetModeFlag(pinode) set_bit(P_MODE_FLAG, &(pinode)->pinode_flags)
-#define ModeFlag(pinode) test_bit(P_MODE_FLAG, &(pinode)->pinode_flags)
-
/* per superblock private orangefs info */
struct orangefs_sb_info_s {
struct orangefs_khandle root_khandle;
@@ -436,6 +409,8 @@ int orangefs_getattr(const struct path *path, struct kstat *stat,
int orangefs_permission(struct inode *inode, int mask);
+int orangefs_update_time(struct inode *, struct timespec *, int);
+
/*
* defined in xattr.c
*/
@@ -478,8 +453,6 @@ bool __is_daemon_in_service(void);
*/
__s32 fsid_of_op(struct orangefs_kernel_op_s *op);
-int orangefs_flush_inode(struct inode *inode);
-
ssize_t orangefs_inode_getxattr(struct inode *inode,
const char *name,
void *buffer,
diff --git a/fs/orangefs/orangefs-utils.c b/fs/orangefs/orangefs-utils.c
index f82336496311..97fe93129f38 100644
--- a/fs/orangefs/orangefs-utils.c
+++ b/fs/orangefs/orangefs-utils.c
@@ -4,6 +4,7 @@
*
* See COPYING in top-level directory.
*/
+#include <linux/kernel.h>
#include "protocol.h"
#include "orangefs-kernel.h"
#include "orangefs-dev-proto.h"
@@ -437,89 +438,8 @@ int orangefs_inode_setattr(struct inode *inode, struct iattr *iattr)
op_release(new_op);
- /*
- * successful setattr should clear the atime, mtime and
- * ctime flags.
- */
- if (ret == 0) {
- ClearAtimeFlag(orangefs_inode);
- ClearMtimeFlag(orangefs_inode);
- ClearCtimeFlag(orangefs_inode);
- ClearModeFlag(orangefs_inode);
+ if (ret == 0)
orangefs_inode->getattr_time = jiffies - 1;
- }
-
- return ret;
-}
-
-int orangefs_flush_inode(struct inode *inode)
-{
- /*
- * If it is a dirty inode, this function gets called.
- * Gather all the information that needs to be setattr'ed
- * Right now, this will only be used for mode, atime, mtime
- * and/or ctime.
- */
- struct iattr wbattr;
- int ret;
- int mtime_flag;
- int ctime_flag;
- int atime_flag;
- int mode_flag;
- struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode);
-
- memset(&wbattr, 0, sizeof(wbattr));
-
- /*
- * check inode flags up front, and clear them if they are set. This
- * will prevent multiple processes from all trying to flush the same
- * inode if they call close() simultaneously
- */
- mtime_flag = MtimeFlag(orangefs_inode);
- ClearMtimeFlag(orangefs_inode);
- ctime_flag = CtimeFlag(orangefs_inode);
- ClearCtimeFlag(orangefs_inode);
- atime_flag = AtimeFlag(orangefs_inode);
- ClearAtimeFlag(orangefs_inode);
- mode_flag = ModeFlag(orangefs_inode);
- ClearModeFlag(orangefs_inode);
-
- /* -- Lazy atime,mtime and ctime update --
- * Note: all times are dictated by server in the new scheme
- * and not by the clients
- *
- * Also mode updates are being handled now..
- */
-
- if (mtime_flag)
- wbattr.ia_valid |= ATTR_MTIME;
- if (ctime_flag)
- wbattr.ia_valid |= ATTR_CTIME;
- if (atime_flag)
- wbattr.ia_valid |= ATTR_ATIME;
-
- if (mode_flag) {
- wbattr.ia_mode = inode->i_mode;
- wbattr.ia_valid |= ATTR_MODE;
- }
-
- gossip_debug(GOSSIP_UTILS_DEBUG,
- "*********** orangefs_flush_inode: %pU "
- "(ia_valid %d)\n",
- get_khandle_from_ino(inode),
- wbattr.ia_valid);
- if (wbattr.ia_valid == 0) {
- gossip_debug(GOSSIP_UTILS_DEBUG,
- "orangefs_flush_inode skipping setattr()\n");
- return 0;
- }
-
- gossip_debug(GOSSIP_UTILS_DEBUG,
- "orangefs_flush_inode (%pU) writing mode %o\n",
- get_khandle_from_ino(inode),
- inode->i_mode);
-
- ret = orangefs_inode_setattr(inode, &wbattr);
return ret;
}
@@ -606,7 +526,7 @@ int orangefs_normalize_to_errno(__s32 error_code)
/* Convert ORANGEFS encoded errno values into regular errno values. */
} else if ((-error_code) & ORANGEFS_ERROR_BIT) {
i = (-error_code) & ~(ORANGEFS_ERROR_BIT|ORANGEFS_ERROR_CLASS_BITS);
- if (i < sizeof(PINT_errno_mapping)/sizeof(*PINT_errno_mapping))
+ if (i < ARRAY_SIZE(PINT_errno_mapping))
error_code = -PINT_errno_mapping[i];
else
error_code = -EINVAL;
diff --git a/fs/orangefs/super.c b/fs/orangefs/super.c
index 47ebd9bfd1a1..36f1390b5ed7 100644
--- a/fs/orangefs/super.c
+++ b/fs/orangefs/super.c
@@ -40,7 +40,7 @@ static int orangefs_show_options(struct seq_file *m, struct dentry *root)
{
struct orangefs_sb_info_s *orangefs_sb = ORANGEFS_SB(root->d_sb);
- if (root->d_sb->s_flags & MS_POSIXACL)
+ if (root->d_sb->s_flags & SB_POSIXACL)
seq_puts(m, ",acl");
if (orangefs_sb->flags & ORANGEFS_OPT_INTR)
seq_puts(m, ",intr");
@@ -60,7 +60,7 @@ static int parse_mount_options(struct super_block *sb, char *options,
* Force any potential flags that might be set from the mount
* to zero, ie, initialize to unset.
*/
- sb->s_flags &= ~MS_POSIXACL;
+ sb->s_flags &= ~SB_POSIXACL;
orangefs_sb->flags &= ~ORANGEFS_OPT_INTR;
orangefs_sb->flags &= ~ORANGEFS_OPT_LOCAL_LOCK;
@@ -73,7 +73,7 @@ static int parse_mount_options(struct super_block *sb, char *options,
token = match_token(p, tokens, args);
switch (token) {
case Opt_acl:
- sb->s_flags |= MS_POSIXACL;
+ sb->s_flags |= SB_POSIXACL;
break;
case Opt_intr:
orangefs_sb->flags |= ORANGEFS_OPT_INTR;
@@ -99,8 +99,6 @@ static void orangefs_inode_cache_ctor(void *req)
inode_init_once(&orangefs_inode->vfs_inode);
init_rwsem(&orangefs_inode->xattr_sem);
-
- orangefs_inode->vfs_inode.i_version = 1;
}
static struct inode *orangefs_alloc_inode(struct super_block *sb)
@@ -119,7 +117,6 @@ static struct inode *orangefs_alloc_inode(struct super_block *sb)
orangefs_inode->refn.fs_id = ORANGEFS_FS_ID_NULL;
orangefs_inode->last_failed_block_index_read = 0;
memset(orangefs_inode->link_target, 0, sizeof(orangefs_inode->link_target));
- orangefs_inode->pinode_flags = 0;
gossip_debug(GOSSIP_SUPER_DEBUG,
"orangefs_alloc_inode: allocated %p\n",
@@ -299,21 +296,9 @@ void fsid_key_table_finalize(void)
{
}
-/* Called whenever the VFS dirties the inode in response to atime updates */
-static void orangefs_dirty_inode(struct inode *inode, int flags)
-{
- struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode);
-
- gossip_debug(GOSSIP_SUPER_DEBUG,
- "orangefs_dirty_inode: %pU\n",
- get_khandle_from_ino(inode));
- SetAtimeFlag(orangefs_inode);
-}
-
static const struct super_operations orangefs_s_ops = {
.alloc_inode = orangefs_alloc_inode,
.destroy_inode = orangefs_destroy_inode,
- .dirty_inode = orangefs_dirty_inode,
.drop_inode = generic_delete_inode,
.statfs = orangefs_statfs,
.remount_fs = orangefs_remount_fs,
@@ -522,7 +507,7 @@ struct dentry *orangefs_mount(struct file_system_type *fst,
ret = orangefs_fill_sb(sb,
&new_op->downcall.resp.fs_mount, data,
- flags & MS_SILENT ? 1 : 0);
+ flags & SB_SILENT ? 1 : 0);
if (ret) {
d = ERR_PTR(ret);
diff --git a/fs/orangefs/symlink.c b/fs/orangefs/symlink.c
index d856cdf91763..db107fe91ab3 100644
--- a/fs/orangefs/symlink.c
+++ b/fs/orangefs/symlink.c
@@ -15,4 +15,5 @@ const struct inode_operations orangefs_symlink_inode_operations = {
.getattr = orangefs_getattr,
.listxattr = orangefs_listxattr,
.permission = orangefs_permission,
+ .update_time = orangefs_update_time,
};
diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
index be03578181d2..288d20f9a55a 100644
--- a/fs/overlayfs/super.c
+++ b/fs/overlayfs/super.c
@@ -326,7 +326,7 @@ static int ovl_remount(struct super_block *sb, int *flags, char *data)
{
struct ovl_fs *ofs = sb->s_fs_info;
- if (!(*flags & MS_RDONLY) && ovl_force_readonly(ofs))
+ if (!(*flags & SB_RDONLY) && ovl_force_readonly(ofs))
return -EROFS;
return 0;
@@ -1190,7 +1190,7 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
goto out_err;
if (!ofs->workdir)
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
sb->s_stack_depth = ofs->upper_mnt->mnt_sb->s_stack_depth;
sb->s_time_gran = ofs->upper_mnt->mnt_sb->s_time_gran;
@@ -1203,7 +1203,7 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
/* If the upper fs is nonexistent, we mark overlayfs r/o too */
if (!ofs->upper_mnt)
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
else if (ofs->upper_mnt->mnt_sb != ofs->same_sb)
ofs->same_sb = NULL;
@@ -1213,7 +1213,7 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
goto out_free_oe;
if (!ofs->indexdir)
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
}
/* Show index=off/on in /proc/mounts for any of the reasons above */
@@ -1227,7 +1227,7 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
sb->s_op = &ovl_super_operations;
sb->s_xattr = ovl_xattr_handlers;
sb->s_fs_info = ofs;
- sb->s_flags |= MS_POSIXACL | MS_NOREMOTELOCK;
+ sb->s_flags |= SB_POSIXACL | SB_NOREMOTELOCK;
err = -ENOMEM;
root_dentry = d_make_root(ovl_new_inode(sb, S_IFDIR, 0));
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 9d357b2ea6cb..28fa85276eec 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -443,8 +443,7 @@ static int proc_pid_stack(struct seq_file *m, struct pid_namespace *ns,
save_stack_trace_tsk(task, &trace);
for (i = 0; i < trace.nr_entries; i++) {
- seq_printf(m, "[<%pK>] %pB\n",
- (void *)entries[i], (void *)entries[i]);
+ seq_printf(m, "[<0>] %pB\n", (void *)entries[i]);
}
unlock_trace(task);
}
@@ -1682,7 +1681,7 @@ const struct inode_operations proc_pid_link_inode_operations = {
/* building an inode */
-void task_dump_owner(struct task_struct *task, mode_t mode,
+void task_dump_owner(struct task_struct *task, umode_t mode,
kuid_t *ruid, kgid_t *rgid)
{
/* Depending on the state of dumpable compute who should own a
diff --git a/fs/proc/inode.c b/fs/proc/inode.c
index 225f541f7078..dd0f82622427 100644
--- a/fs/proc/inode.c
+++ b/fs/proc/inode.c
@@ -483,7 +483,7 @@ int proc_fill_super(struct super_block *s, void *data, int silent)
/* User space would break if executables or devices appear on proc */
s->s_iflags |= SB_I_USERNS_VISIBLE | SB_I_NOEXEC | SB_I_NODEV;
- s->s_flags |= MS_NODIRATIME | MS_NOSUID | MS_NOEXEC;
+ s->s_flags |= SB_NODIRATIME | SB_NOSUID | SB_NOEXEC;
s->s_blocksize = 1024;
s->s_blocksize_bits = 10;
s->s_magic = PROC_SUPER_MAGIC;
diff --git a/fs/proc/internal.h b/fs/proc/internal.h
index 9aad373cf11d..4a67188c8d74 100644
--- a/fs/proc/internal.h
+++ b/fs/proc/internal.h
@@ -100,7 +100,7 @@ static inline struct task_struct *get_proc_task(struct inode *inode)
return get_pid_task(proc_pid(inode), PIDTYPE_PID);
}
-void task_dump_owner(struct task_struct *task, mode_t mode,
+void task_dump_owner(struct task_struct *task, umode_t mode,
kuid_t *ruid, kgid_t *rgid);
unsigned name_to_int(const struct qstr *qstr);
diff --git a/fs/proc/root.c b/fs/proc/root.c
index 4e42aba97f2e..ede8e64974be 100644
--- a/fs/proc/root.c
+++ b/fs/proc/root.c
@@ -91,7 +91,7 @@ static struct dentry *proc_mount(struct file_system_type *fs_type,
{
struct pid_namespace *ns;
- if (flags & MS_KERNMOUNT) {
+ if (flags & SB_KERNMOUNT) {
ns = data;
data = NULL;
} else {
diff --git a/fs/proc_namespace.c b/fs/proc_namespace.c
index 7b635d173213..b786840facd9 100644
--- a/fs/proc_namespace.c
+++ b/fs/proc_namespace.c
@@ -45,10 +45,10 @@ struct proc_fs_info {
static int show_sb_opts(struct seq_file *m, struct super_block *sb)
{
static const struct proc_fs_info fs_info[] = {
- { MS_SYNCHRONOUS, ",sync" },
- { MS_DIRSYNC, ",dirsync" },
- { MS_MANDLOCK, ",mand" },
- { MS_LAZYTIME, ",lazytime" },
+ { SB_SYNCHRONOUS, ",sync" },
+ { SB_DIRSYNC, ",dirsync" },
+ { SB_MANDLOCK, ",mand" },
+ { SB_LAZYTIME, ",lazytime" },
{ 0, NULL }
};
const struct proc_fs_info *fs_infop;
diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c
index 423159abd501..691032107f8c 100644
--- a/fs/pstore/platform.c
+++ b/fs/pstore/platform.c
@@ -61,7 +61,7 @@ MODULE_PARM_DESC(update_ms, "milliseconds before pstore updates its content "
static int pstore_new_entry;
-static void pstore_timefunc(unsigned long);
+static void pstore_timefunc(struct timer_list *);
static DEFINE_TIMER(pstore_timer, pstore_timefunc);
static void pstore_dowork(struct work_struct *);
@@ -890,7 +890,7 @@ static void pstore_dowork(struct work_struct *work)
pstore_get_records(1);
}
-static void pstore_timefunc(unsigned long dummy)
+static void pstore_timefunc(struct timer_list *unused)
{
if (pstore_new_entry) {
pstore_new_entry = 0;
diff --git a/fs/qnx4/inode.c b/fs/qnx4/inode.c
index 3a67cfb142d8..3d46fe302fcb 100644
--- a/fs/qnx4/inode.c
+++ b/fs/qnx4/inode.c
@@ -47,7 +47,7 @@ static int qnx4_remount(struct super_block *sb, int *flags, char *data)
sync_filesystem(sb);
qs = qnx4_sb(sb);
qs->Version = QNX4_VERSION;
- *flags |= MS_RDONLY;
+ *flags |= SB_RDONLY;
return 0;
}
@@ -199,7 +199,7 @@ static int qnx4_fill_super(struct super_block *s, void *data, int silent)
s->s_op = &qnx4_sops;
s->s_magic = QNX4_SUPER_MAGIC;
- s->s_flags |= MS_RDONLY; /* Yup, read-only yet */
+ s->s_flags |= SB_RDONLY; /* Yup, read-only yet */
/* Check the superblock signature. Since the qnx4 code is
dangerous, we should leave as quickly as possible
diff --git a/fs/qnx6/inode.c b/fs/qnx6/inode.c
index 1192422a1c56..4aeb26bcb4d0 100644
--- a/fs/qnx6/inode.c
+++ b/fs/qnx6/inode.c
@@ -56,7 +56,7 @@ static int qnx6_show_options(struct seq_file *seq, struct dentry *root)
static int qnx6_remount(struct super_block *sb, int *flags, char *data)
{
sync_filesystem(sb);
- *flags |= MS_RDONLY;
+ *flags |= SB_RDONLY;
return 0;
}
@@ -427,7 +427,7 @@ mmi_success:
}
s->s_op = &qnx6_sops;
s->s_magic = QNX6_SUPER_MAGIC;
- s->s_flags |= MS_RDONLY; /* Yup, read-only yet */
+ s->s_flags |= SB_RDONLY; /* Yup, read-only yet */
/* ease the later tree level calculations */
sbi = QNX6_SB(s);
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index 11a48affa882..b13fc024d2ee 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -2106,7 +2106,7 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th,
journal_end(th);
goto out_inserted_sd;
}
- } else if (inode->i_sb->s_flags & MS_POSIXACL) {
+ } else if (inode->i_sb->s_flags & SB_POSIXACL) {
reiserfs_warning(inode->i_sb, "jdm-13090",
"ACLs aren't enabled in the fs, "
"but vfs thinks they are!");
diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
index 69ff280bdfe8..70057359fbaf 100644
--- a/fs/reiserfs/journal.c
+++ b/fs/reiserfs/journal.c
@@ -1960,7 +1960,7 @@ static int do_journal_release(struct reiserfs_transaction_handle *th,
/*
* Cancel flushing of old commits. Note that neither of these works
* will be requeued because superblock is being shutdown and doesn't
- * have MS_ACTIVE set.
+ * have SB_ACTIVE set.
*/
reiserfs_cancel_old_flush(sb);
/* wait for all commits to finish */
@@ -4302,7 +4302,7 @@ static int do_journal_end(struct reiserfs_transaction_handle *th, int flags)
* Avoid queueing work when sb is being shut down. Transaction
* will be flushed on journal shutdown.
*/
- if (sb->s_flags & MS_ACTIVE)
+ if (sb->s_flags & SB_ACTIVE)
queue_delayed_work(REISERFS_SB(sb)->commit_wq,
&journal->j_work, HZ / 10);
}
@@ -4393,7 +4393,7 @@ void reiserfs_abort_journal(struct super_block *sb, int errno)
if (!journal->j_errno)
journal->j_errno = errno;
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
set_bit(J_ABORTED, &journal->j_state);
#ifdef CONFIG_REISERFS_CHECK
diff --git a/fs/reiserfs/prints.c b/fs/reiserfs/prints.c
index 64f49cafbc5b..7e288d97adcb 100644
--- a/fs/reiserfs/prints.c
+++ b/fs/reiserfs/prints.c
@@ -390,7 +390,7 @@ void __reiserfs_error(struct super_block *sb, const char *id,
return;
reiserfs_info(sb, "Remounting filesystem read-only\n");
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
reiserfs_abort_journal(sb, -EIO);
}
@@ -409,7 +409,7 @@ void reiserfs_abort(struct super_block *sb, int errno, const char *fmt, ...)
printk(KERN_CRIT "REISERFS abort (device %s): %s\n", sb->s_id,
error_buf);
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
reiserfs_abort_journal(sb, errno);
}
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index 5464ec517702..020c9cacbb2f 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -121,7 +121,7 @@ void reiserfs_schedule_old_flush(struct super_block *s)
* Avoid scheduling flush when sb is being shut down. It can race
* with journal shutdown and free still queued delayed work.
*/
- if (sb_rdonly(s) || !(s->s_flags & MS_ACTIVE))
+ if (sb_rdonly(s) || !(s->s_flags & SB_ACTIVE))
return;
spin_lock(&sbi->old_work_lock);
@@ -252,11 +252,11 @@ static int finish_unfinished(struct super_block *s)
#ifdef CONFIG_QUOTA
/* Needed for iput() to work correctly and not trash data */
- if (s->s_flags & MS_ACTIVE) {
+ if (s->s_flags & SB_ACTIVE) {
ms_active_set = 0;
} else {
ms_active_set = 1;
- s->s_flags |= MS_ACTIVE;
+ s->s_flags |= SB_ACTIVE;
}
/* Turn on quotas so that they are updated correctly */
for (i = 0; i < REISERFS_MAXQUOTAS; i++) {
@@ -411,7 +411,7 @@ static int finish_unfinished(struct super_block *s)
reiserfs_write_lock(s);
if (ms_active_set)
/* Restore the flag back */
- s->s_flags &= ~MS_ACTIVE;
+ s->s_flags &= ~SB_ACTIVE;
#endif
pathrelse(&path);
if (done)
@@ -1521,7 +1521,7 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
goto out_err_unlock;
}
- if (*mount_flags & MS_RDONLY) {
+ if (*mount_flags & SB_RDONLY) {
reiserfs_write_unlock(s);
reiserfs_xattr_init(s, *mount_flags);
/* remount read-only */
@@ -1567,7 +1567,7 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
REISERFS_SB(s)->s_mount_state = sb_umount_state(rs);
/* now it is safe to call journal_begin */
- s->s_flags &= ~MS_RDONLY;
+ s->s_flags &= ~SB_RDONLY;
err = journal_begin(&th, s, 10);
if (err)
goto out_err_unlock;
@@ -1575,7 +1575,7 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
/* Mount a partition which is read-only, read-write */
reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1);
REISERFS_SB(s)->s_mount_state = sb_umount_state(rs);
- s->s_flags &= ~MS_RDONLY;
+ s->s_flags &= ~SB_RDONLY;
set_sb_umount_state(rs, REISERFS_ERROR_FS);
if (!old_format_only(s))
set_sb_mnt_count(rs, sb_mnt_count(rs) + 1);
@@ -1590,7 +1590,7 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
goto out_err_unlock;
reiserfs_write_unlock(s);
- if (!(*mount_flags & MS_RDONLY)) {
+ if (!(*mount_flags & SB_RDONLY)) {
dquot_resume(s, -1);
reiserfs_write_lock(s);
finish_unfinished(s);
@@ -2055,7 +2055,7 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
if (bdev_read_only(s->s_bdev) && !sb_rdonly(s)) {
SWARN(silent, s, "clm-7000",
"Detected readonly device, marking FS readonly");
- s->s_flags |= MS_RDONLY;
+ s->s_flags |= SB_RDONLY;
}
args.objectid = REISERFS_ROOT_OBJECTID;
args.dirid = REISERFS_ROOT_PARENT_OBJECTID;
diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c
index 46492fb37a4c..5dbf5324bdda 100644
--- a/fs/reiserfs/xattr.c
+++ b/fs/reiserfs/xattr.c
@@ -959,7 +959,7 @@ int reiserfs_lookup_privroot(struct super_block *s)
/*
* We need to take a copy of the mount flags since things like
- * MS_RDONLY don't get set until *after* we're called.
+ * SB_RDONLY don't get set until *after* we're called.
* mount_flags != mount_options
*/
int reiserfs_xattr_init(struct super_block *s, int mount_flags)
@@ -971,7 +971,7 @@ int reiserfs_xattr_init(struct super_block *s, int mount_flags)
if (err)
goto error;
- if (d_really_is_negative(privroot) && !(mount_flags & MS_RDONLY)) {
+ if (d_really_is_negative(privroot) && !(mount_flags & SB_RDONLY)) {
inode_lock(d_inode(s->s_root));
err = create_privroot(REISERFS_SB(s)->priv_root);
inode_unlock(d_inode(s->s_root));
@@ -999,11 +999,11 @@ error:
clear_bit(REISERFS_POSIXACL, &REISERFS_SB(s)->s_mount_opt);
}
- /* The super_block MS_POSIXACL must mirror the (no)acl mount option. */
+ /* The super_block SB_POSIXACL must mirror the (no)acl mount option. */
if (reiserfs_posixacl(s))
- s->s_flags |= MS_POSIXACL;
+ s->s_flags |= SB_POSIXACL;
else
- s->s_flags &= ~MS_POSIXACL;
+ s->s_flags &= ~SB_POSIXACL;
return err;
}
diff --git a/fs/romfs/super.c b/fs/romfs/super.c
index 0186fe6d39f3..8f06fd1f3d69 100644
--- a/fs/romfs/super.c
+++ b/fs/romfs/super.c
@@ -451,7 +451,7 @@ static int romfs_statfs(struct dentry *dentry, struct kstatfs *buf)
static int romfs_remount(struct super_block *sb, int *flags, char *data)
{
sync_filesystem(sb);
- *flags |= MS_RDONLY;
+ *flags |= SB_RDONLY;
return 0;
}
@@ -502,7 +502,7 @@ static int romfs_fill_super(struct super_block *sb, void *data, int silent)
sb->s_maxbytes = 0xFFFFFFFF;
sb->s_magic = ROMFS_MAGIC;
- sb->s_flags |= MS_RDONLY | MS_NOATIME;
+ sb->s_flags |= SB_RDONLY | SB_NOATIME;
sb->s_op = &romfs_super_ops;
#ifdef CONFIG_ROMFS_ON_MTD
diff --git a/fs/squashfs/super.c b/fs/squashfs/super.c
index cf01e15a7b16..8a73b97217c8 100644
--- a/fs/squashfs/super.c
+++ b/fs/squashfs/super.c
@@ -195,7 +195,7 @@ static int squashfs_fill_super(struct super_block *sb, void *data, int silent)
(u64) le64_to_cpu(sblk->id_table_start));
sb->s_maxbytes = MAX_LFS_FILESIZE;
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
sb->s_op = &squashfs_super_ops;
err = -ENOMEM;
@@ -373,7 +373,7 @@ static int squashfs_statfs(struct dentry *dentry, struct kstatfs *buf)
static int squashfs_remount(struct super_block *sb, int *flags, char *data)
{
sync_filesystem(sb);
- *flags |= MS_RDONLY;
+ *flags |= SB_RDONLY;
return 0;
}
diff --git a/fs/statfs.c b/fs/statfs.c
index b072a8bab71a..5b2a24f0f263 100644
--- a/fs/statfs.c
+++ b/fs/statfs.c
@@ -35,11 +35,11 @@ static int flags_by_mnt(int mnt_flags)
static int flags_by_sb(int s_flags)
{
int flags = 0;
- if (s_flags & MS_SYNCHRONOUS)
+ if (s_flags & SB_SYNCHRONOUS)
flags |= ST_SYNCHRONOUS;
- if (s_flags & MS_MANDLOCK)
+ if (s_flags & SB_MANDLOCK)
flags |= ST_MANDLOCK;
- if (s_flags & MS_RDONLY)
+ if (s_flags & SB_RDONLY)
flags |= ST_RDONLY;
return flags;
}
diff --git a/fs/sysfs/mount.c b/fs/sysfs/mount.c
index 20b8f82e115b..fb49510c5dcf 100644
--- a/fs/sysfs/mount.c
+++ b/fs/sysfs/mount.c
@@ -30,7 +30,7 @@ static struct dentry *sysfs_mount(struct file_system_type *fs_type,
void *ns;
bool new_sb;
- if (!(flags & MS_KERNMOUNT)) {
+ if (!(flags & SB_KERNMOUNT)) {
if (!kobj_ns_current_may_mount(KOBJ_NS_TYPE_NET))
return ERR_PTR(-EPERM);
}
diff --git a/fs/sysv/inode.c b/fs/sysv/inode.c
index 3c47b7d5d4cf..bec9f79adb25 100644
--- a/fs/sysv/inode.c
+++ b/fs/sysv/inode.c
@@ -63,7 +63,7 @@ static int sysv_remount(struct super_block *sb, int *flags, char *data)
sync_filesystem(sb);
if (sbi->s_forced_ro)
- *flags |= MS_RDONLY;
+ *flags |= SB_RDONLY;
return 0;
}
diff --git a/fs/sysv/super.c b/fs/sysv/super.c
index 0d56e486b392..89765ddfb738 100644
--- a/fs/sysv/super.c
+++ b/fs/sysv/super.c
@@ -333,7 +333,7 @@ static int complete_read_super(struct super_block *sb, int silent, int size)
/* set up enough so that it can read an inode */
sb->s_op = &sysv_sops;
if (sbi->s_forced_ro)
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
if (sbi->s_truncate)
sb->s_d_op = &sysv_dentry_operations;
root_inode = sysv_iget(sb, SYSV_ROOT_INO);
diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c
index a02aa59d1e24..dfe85069586e 100644
--- a/fs/ubifs/file.c
+++ b/fs/ubifs/file.c
@@ -1406,7 +1406,7 @@ int ubifs_update_time(struct inode *inode, struct timespec *time,
if (flags & S_MTIME)
inode->i_mtime = *time;
- if (!(inode->i_sb->s_flags & MS_LAZYTIME))
+ if (!(inode->i_sb->s_flags & SB_LAZYTIME))
iflags |= I_DIRTY_SYNC;
release = ui->dirty;
diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c
index 3be28900bf37..fe77e9625e84 100644
--- a/fs/ubifs/io.c
+++ b/fs/ubifs/io.c
@@ -84,7 +84,7 @@ void ubifs_ro_mode(struct ubifs_info *c, int err)
if (!c->ro_error) {
c->ro_error = 1;
c->no_chk_data_crc = 0;
- c->vfs_sb->s_flags |= MS_RDONLY;
+ c->vfs_sb->s_flags |= SB_RDONLY;
ubifs_warn(c, "switched to read-only mode, error %d", err);
dump_stack();
}
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index 7503e7cdf870..0beb285b143d 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -968,7 +968,7 @@ static int parse_standard_option(const char *option)
pr_notice("UBIFS: parse %s\n", option);
if (!strcmp(option, "sync"))
- return MS_SYNCHRONOUS;
+ return SB_SYNCHRONOUS;
return 0;
}
@@ -1160,8 +1160,8 @@ static int mount_ubifs(struct ubifs_info *c)
size_t sz;
c->ro_mount = !!sb_rdonly(c->vfs_sb);
- /* Suppress error messages while probing if MS_SILENT is set */
- c->probing = !!(c->vfs_sb->s_flags & MS_SILENT);
+ /* Suppress error messages while probing if SB_SILENT is set */
+ c->probing = !!(c->vfs_sb->s_flags & SB_SILENT);
err = init_constants_early(c);
if (err)
@@ -1852,7 +1852,7 @@ static int ubifs_remount_fs(struct super_block *sb, int *flags, char *data)
return err;
}
- if (c->ro_mount && !(*flags & MS_RDONLY)) {
+ if (c->ro_mount && !(*flags & SB_RDONLY)) {
if (c->ro_error) {
ubifs_msg(c, "cannot re-mount R/W due to prior errors");
return -EROFS;
@@ -1864,7 +1864,7 @@ static int ubifs_remount_fs(struct super_block *sb, int *flags, char *data)
err = ubifs_remount_rw(c);
if (err)
return err;
- } else if (!c->ro_mount && (*flags & MS_RDONLY)) {
+ } else if (!c->ro_mount && (*flags & SB_RDONLY)) {
if (c->ro_error) {
ubifs_msg(c, "cannot re-mount R/O due to prior errors");
return -EROFS;
@@ -2117,7 +2117,7 @@ static struct dentry *ubifs_mount(struct file_system_type *fs_type, int flags,
*/
ubi = open_ubi(name, UBI_READONLY);
if (IS_ERR(ubi)) {
- if (!(flags & MS_SILENT))
+ if (!(flags & SB_SILENT))
pr_err("UBIFS error (pid: %d): cannot open \"%s\", error %d",
current->pid, name, (int)PTR_ERR(ubi));
return ERR_CAST(ubi);
@@ -2143,18 +2143,18 @@ static struct dentry *ubifs_mount(struct file_system_type *fs_type, int flags,
kfree(c);
/* A new mount point for already mounted UBIFS */
dbg_gen("this ubi volume is already mounted");
- if (!!(flags & MS_RDONLY) != c1->ro_mount) {
+ if (!!(flags & SB_RDONLY) != c1->ro_mount) {
err = -EBUSY;
goto out_deact;
}
} else {
- err = ubifs_fill_super(sb, data, flags & MS_SILENT ? 1 : 0);
+ err = ubifs_fill_super(sb, data, flags & SB_SILENT ? 1 : 0);
if (err)
goto out_deact;
/* We do not support atime */
- sb->s_flags |= MS_ACTIVE;
+ sb->s_flags |= SB_ACTIVE;
#ifndef CONFIG_UBIFS_ATIME_SUPPORT
- sb->s_flags |= MS_NOATIME;
+ sb->s_flags |= SB_NOATIME;
#else
ubifs_msg(c, "full atime support is enabled.");
#endif
diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h
index 63c7468147eb..5ee7af879cc4 100644
--- a/fs/ubifs/ubifs.h
+++ b/fs/ubifs/ubifs.h
@@ -1201,7 +1201,7 @@ struct ubifs_debug_info;
* @need_recovery: %1 if the file-system needs recovery
* @replaying: %1 during journal replay
* @mounting: %1 while mounting
- * @probing: %1 while attempting to mount if MS_SILENT mount flag is set
+ * @probing: %1 while attempting to mount if SB_SILENT mount flag is set
* @remounting_rw: %1 while re-mounting from R/O mode to R/W mode
* @replay_list: temporary list used during journal replay
* @replay_buds: list of buds to replay
@@ -1850,7 +1850,7 @@ __printf(2, 3)
void ubifs_warn(const struct ubifs_info *c, const char *fmt, ...);
/*
* A conditional variant of 'ubifs_err()' which doesn't output anything
- * if probing (ie. MS_SILENT set).
+ * if probing (ie. SB_SILENT set).
*/
#define ubifs_errc(c, fmt, ...) \
do { \
diff --git a/fs/udf/super.c b/fs/udf/super.c
index f80e0a0f24d3..f73239a9a97d 100644
--- a/fs/udf/super.c
+++ b/fs/udf/super.c
@@ -650,7 +650,7 @@ static int udf_remount_fs(struct super_block *sb, int *flags, char *options)
sync_filesystem(sb);
if (lvidiu) {
int write_rev = le16_to_cpu(lvidiu->minUDFWriteRev);
- if (write_rev > UDF_MAX_WRITE_VERSION && !(*flags & MS_RDONLY))
+ if (write_rev > UDF_MAX_WRITE_VERSION && !(*flags & SB_RDONLY))
return -EACCES;
}
@@ -673,10 +673,10 @@ static int udf_remount_fs(struct super_block *sb, int *flags, char *options)
sbi->s_dmode = uopt.dmode;
write_unlock(&sbi->s_cred_lock);
- if ((bool)(*flags & MS_RDONLY) == sb_rdonly(sb))
+ if ((bool)(*flags & SB_RDONLY) == sb_rdonly(sb))
goto out_unlock;
- if (*flags & MS_RDONLY)
+ if (*flags & SB_RDONLY)
udf_close_lvid(sb);
else
udf_open_lvid(sb);
diff --git a/fs/ufs/balloc.c b/fs/ufs/balloc.c
index b5cd79065ef9..e727ee07dbe4 100644
--- a/fs/ufs/balloc.c
+++ b/fs/ufs/balloc.c
@@ -115,7 +115,7 @@ void ufs_free_fragments(struct inode *inode, u64 fragment, unsigned count)
ubh_mark_buffer_dirty (USPI_UBH(uspi));
ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
- if (sb->s_flags & MS_SYNCHRONOUS)
+ if (sb->s_flags & SB_SYNCHRONOUS)
ubh_sync_block(UCPI_UBH(ucpi));
ufs_mark_sb_dirty(sb);
@@ -205,7 +205,7 @@ do_more:
ubh_mark_buffer_dirty (USPI_UBH(uspi));
ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
- if (sb->s_flags & MS_SYNCHRONOUS)
+ if (sb->s_flags & SB_SYNCHRONOUS)
ubh_sync_block(UCPI_UBH(ucpi));
if (overflow) {
@@ -567,7 +567,7 @@ static u64 ufs_add_fragments(struct inode *inode, u64 fragment,
ubh_mark_buffer_dirty (USPI_UBH(uspi));
ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
- if (sb->s_flags & MS_SYNCHRONOUS)
+ if (sb->s_flags & SB_SYNCHRONOUS)
ubh_sync_block(UCPI_UBH(ucpi));
ufs_mark_sb_dirty(sb);
@@ -688,7 +688,7 @@ cg_found:
succed:
ubh_mark_buffer_dirty (USPI_UBH(uspi));
ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
- if (sb->s_flags & MS_SYNCHRONOUS)
+ if (sb->s_flags & SB_SYNCHRONOUS)
ubh_sync_block(UCPI_UBH(ucpi));
ufs_mark_sb_dirty(sb);
diff --git a/fs/ufs/ialloc.c b/fs/ufs/ialloc.c
index 916b4a428933..e1ef0f0a1353 100644
--- a/fs/ufs/ialloc.c
+++ b/fs/ufs/ialloc.c
@@ -112,7 +112,7 @@ void ufs_free_inode (struct inode * inode)
ubh_mark_buffer_dirty (USPI_UBH(uspi));
ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
- if (sb->s_flags & MS_SYNCHRONOUS)
+ if (sb->s_flags & SB_SYNCHRONOUS)
ubh_sync_block(UCPI_UBH(ucpi));
ufs_mark_sb_dirty(sb);
@@ -146,14 +146,14 @@ static void ufs2_init_inodes_chunk(struct super_block *sb,
set_buffer_uptodate(bh);
mark_buffer_dirty(bh);
unlock_buffer(bh);
- if (sb->s_flags & MS_SYNCHRONOUS)
+ if (sb->s_flags & SB_SYNCHRONOUS)
sync_dirty_buffer(bh);
brelse(bh);
}
fs32_add(sb, &ucg->cg_u.cg_u2.cg_initediblk, uspi->s_inopb);
ubh_mark_buffer_dirty(UCPI_UBH(ucpi));
- if (sb->s_flags & MS_SYNCHRONOUS)
+ if (sb->s_flags & SB_SYNCHRONOUS)
ubh_sync_block(UCPI_UBH(ucpi));
UFSD("EXIT\n");
@@ -284,7 +284,7 @@ cg_found:
}
ubh_mark_buffer_dirty (USPI_UBH(uspi));
ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
- if (sb->s_flags & MS_SYNCHRONOUS)
+ if (sb->s_flags & SB_SYNCHRONOUS)
ubh_sync_block(UCPI_UBH(ucpi));
ufs_mark_sb_dirty(sb);
@@ -330,7 +330,7 @@ cg_found:
ufs2_inode->ui_birthnsec = cpu_to_fs32(sb, ts.tv_nsec);
mark_buffer_dirty(bh);
unlock_buffer(bh);
- if (sb->s_flags & MS_SYNCHRONOUS)
+ if (sb->s_flags & SB_SYNCHRONOUS)
sync_dirty_buffer(bh);
brelse(bh);
}
diff --git a/fs/ufs/super.c b/fs/ufs/super.c
index 6440003f8ddc..4d497e9c6883 100644
--- a/fs/ufs/super.c
+++ b/fs/ufs/super.c
@@ -282,7 +282,7 @@ void ufs_error (struct super_block * sb, const char * function,
usb1->fs_clean = UFS_FSBAD;
ubh_mark_buffer_dirty(USPI_UBH(uspi));
ufs_mark_sb_dirty(sb);
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
}
va_start(args, fmt);
vaf.fmt = fmt;
@@ -320,7 +320,7 @@ void ufs_panic (struct super_block * sb, const char * function,
va_start(args, fmt);
vaf.fmt = fmt;
vaf.va = &args;
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
pr_crit("panic (device %s): %s: %pV\n",
sb->s_id, function, &vaf);
va_end(args);
@@ -905,7 +905,7 @@ static int ufs_fill_super(struct super_block *sb, void *data, int silent)
if (!sb_rdonly(sb)) {
if (!silent)
pr_info("ufstype=old is supported read-only\n");
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
}
break;
@@ -921,7 +921,7 @@ static int ufs_fill_super(struct super_block *sb, void *data, int silent)
if (!sb_rdonly(sb)) {
if (!silent)
pr_info("ufstype=nextstep is supported read-only\n");
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
}
break;
@@ -937,7 +937,7 @@ static int ufs_fill_super(struct super_block *sb, void *data, int silent)
if (!sb_rdonly(sb)) {
if (!silent)
pr_info("ufstype=nextstep-cd is supported read-only\n");
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
}
break;
@@ -953,7 +953,7 @@ static int ufs_fill_super(struct super_block *sb, void *data, int silent)
if (!sb_rdonly(sb)) {
if (!silent)
pr_info("ufstype=openstep is supported read-only\n");
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
}
break;
@@ -968,7 +968,7 @@ static int ufs_fill_super(struct super_block *sb, void *data, int silent)
if (!sb_rdonly(sb)) {
if (!silent)
pr_info("ufstype=hp is supported read-only\n");
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
}
break;
default:
@@ -1125,21 +1125,21 @@ magic_found:
break;
case UFS_FSACTIVE:
pr_err("%s(): fs is active\n", __func__);
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
break;
case UFS_FSBAD:
pr_err("%s(): fs is bad\n", __func__);
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
break;
default:
pr_err("%s(): can't grok fs_clean 0x%x\n",
__func__, usb1->fs_clean);
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
break;
}
} else {
pr_err("%s(): fs needs fsck\n", __func__);
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
}
/*
@@ -1328,7 +1328,7 @@ static int ufs_remount (struct super_block *sb, int *mount_flags, char *data)
return -EINVAL;
}
- if ((bool)(*mount_flags & MS_RDONLY) == sb_rdonly(sb)) {
+ if ((bool)(*mount_flags & SB_RDONLY) == sb_rdonly(sb)) {
UFS_SB(sb)->s_mount_opt = new_mount_opt;
mutex_unlock(&UFS_SB(sb)->s_lock);
return 0;
@@ -1337,7 +1337,7 @@ static int ufs_remount (struct super_block *sb, int *mount_flags, char *data)
/*
* fs was mouted as rw, remounting ro
*/
- if (*mount_flags & MS_RDONLY) {
+ if (*mount_flags & SB_RDONLY) {
ufs_put_super_internal(sb);
usb1->fs_time = cpu_to_fs32(sb, get_seconds());
if ((flags & UFS_ST_MASK) == UFS_ST_SUN
@@ -1346,7 +1346,7 @@ static int ufs_remount (struct super_block *sb, int *mount_flags, char *data)
ufs_set_fs_state(sb, usb1, usb3,
UFS_FSOK - fs32_to_cpu(sb, usb1->fs_time));
ubh_mark_buffer_dirty (USPI_UBH(uspi));
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
} else {
/*
* fs was mounted as ro, remounting rw
@@ -1370,7 +1370,7 @@ static int ufs_remount (struct super_block *sb, int *mount_flags, char *data)
mutex_unlock(&UFS_SB(sb)->s_lock);
return -EPERM;
}
- sb->s_flags &= ~MS_RDONLY;
+ sb->s_flags &= ~SB_RDONLY;
#endif
}
UFS_SB(sb)->s_mount_opt = new_mount_opt;
diff --git a/fs/xfs/libxfs/xfs_iext_tree.c b/fs/xfs/libxfs/xfs_iext_tree.c
index 19e546a41251..89bf16b4d937 100644
--- a/fs/xfs/libxfs/xfs_iext_tree.c
+++ b/fs/xfs/libxfs/xfs_iext_tree.c
@@ -850,9 +850,9 @@ static void
xfs_iext_free_last_leaf(
struct xfs_ifork *ifp)
{
- ifp->if_u1.if_root = NULL;
ifp->if_height--;
kmem_free(ifp->if_u1.if_root);
+ ifp->if_u1.if_root = NULL;
}
void
diff --git a/fs/xfs/libxfs/xfs_inode_fork.c b/fs/xfs/libxfs/xfs_inode_fork.c
index 1c90ec41e9df..c79a1616b79d 100644
--- a/fs/xfs/libxfs/xfs_inode_fork.c
+++ b/fs/xfs/libxfs/xfs_inode_fork.c
@@ -42,11 +42,6 @@ STATIC int xfs_iformat_local(xfs_inode_t *, xfs_dinode_t *, int, int);
STATIC int xfs_iformat_extents(xfs_inode_t *, xfs_dinode_t *, int);
STATIC int xfs_iformat_btree(xfs_inode_t *, xfs_dinode_t *, int);
-static inline dev_t xfs_to_linux_dev_t(xfs_dev_t dev)
-{
- return MKDEV(sysv_major(dev) & 0x1ff, sysv_minor(dev));
-}
-
/*
* Copy inode type and data and attr format specific information from the
* on-disk inode to the in-core inode and fork structures. For fifos, devices,
@@ -792,7 +787,8 @@ xfs_iflush_fork(
case XFS_DINODE_FMT_DEV:
if (iip->ili_fields & XFS_ILOG_DEV) {
ASSERT(whichfork == XFS_DATA_FORK);
- xfs_dinode_put_rdev(dip, sysv_encode_dev(VFS_I(ip)->i_rdev));
+ xfs_dinode_put_rdev(dip,
+ linux_to_xfs_dev_t(VFS_I(ip)->i_rdev));
}
break;
diff --git a/fs/xfs/xfs_linux.h b/fs/xfs/xfs_linux.h
index 6282bfc1afa9..99562ec0de56 100644
--- a/fs/xfs/xfs_linux.h
+++ b/fs/xfs/xfs_linux.h
@@ -204,6 +204,16 @@ static inline kgid_t xfs_gid_to_kgid(uint32_t gid)
return make_kgid(&init_user_ns, gid);
}
+static inline dev_t xfs_to_linux_dev_t(xfs_dev_t dev)
+{
+ return MKDEV(sysv_major(dev) & 0x1ff, sysv_minor(dev));
+}
+
+static inline xfs_dev_t linux_to_xfs_dev_t(dev_t dev)
+{
+ return sysv_encode_dev(dev);
+}
+
/*
* Various platform dependent calls that don't fit anywhere else
*/
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index 38d4227895ae..a503af96d780 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -781,17 +781,17 @@ xfs_log_mount_finish(
* something to an unlinked inode, the irele won't cause
* premature truncation and freeing of the inode, which results
* in log recovery failure. We have to evict the unreferenced
- * lru inodes after clearing MS_ACTIVE because we don't
+ * lru inodes after clearing SB_ACTIVE because we don't
* otherwise clean up the lru if there's a subsequent failure in
* xfs_mountfs, which leads to us leaking the inodes if nothing
* else (e.g. quotacheck) references the inodes before the
* mount failure occurs.
*/
- mp->m_super->s_flags |= MS_ACTIVE;
+ mp->m_super->s_flags |= SB_ACTIVE;
error = xlog_recover_finish(mp->m_log);
if (!error)
xfs_log_work_queue(mp);
- mp->m_super->s_flags &= ~MS_ACTIVE;
+ mp->m_super->s_flags &= ~SB_ACTIVE;
evict_inodes(mp->m_super);
/*
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index f663022353c0..5122d3021117 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -212,9 +212,9 @@ xfs_parseargs(
*/
if (sb_rdonly(sb))
mp->m_flags |= XFS_MOUNT_RDONLY;
- if (sb->s_flags & MS_DIRSYNC)
+ if (sb->s_flags & SB_DIRSYNC)
mp->m_flags |= XFS_MOUNT_DIRSYNC;
- if (sb->s_flags & MS_SYNCHRONOUS)
+ if (sb->s_flags & SB_SYNCHRONOUS)
mp->m_flags |= XFS_MOUNT_WSYNC;
/*
@@ -1312,7 +1312,7 @@ xfs_fs_remount(
}
/* ro -> rw */
- if ((mp->m_flags & XFS_MOUNT_RDONLY) && !(*flags & MS_RDONLY)) {
+ if ((mp->m_flags & XFS_MOUNT_RDONLY) && !(*flags & SB_RDONLY)) {
if (mp->m_flags & XFS_MOUNT_NORECOVERY) {
xfs_warn(mp,
"ro->rw transition prohibited on norecovery mount");
@@ -1368,7 +1368,7 @@ xfs_fs_remount(
}
/* rw -> ro */
- if (!(mp->m_flags & XFS_MOUNT_RDONLY) && (*flags & MS_RDONLY)) {
+ if (!(mp->m_flags & XFS_MOUNT_RDONLY) && (*flags & SB_RDONLY)) {
/* Free the per-AG metadata reservation pool. */
error = xfs_fs_unreserve_ag_blocks(mp);
if (error) {
diff --git a/fs/xfs/xfs_super.h b/fs/xfs/xfs_super.h
index 5f2f32408011..fcc5dfc70aa0 100644
--- a/fs/xfs/xfs_super.h
+++ b/fs/xfs/xfs_super.h
@@ -30,7 +30,7 @@ extern void xfs_qm_exit(void);
#ifdef CONFIG_XFS_POSIX_ACL
# define XFS_ACL_STRING "ACLs, "
-# define set_posix_acl_flag(sb) ((sb)->s_flags |= MS_POSIXACL)
+# define set_posix_acl_flag(sb) ((sb)->s_flags |= SB_POSIXACL)
#else
# define XFS_ACL_STRING
# define set_posix_acl_flag(sb) do { } while (0)
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 757dc6ffc7ba..b234d54f2cb6 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -805,15 +805,23 @@ static inline int pmd_trans_huge(pmd_t pmd)
{
return 0;
}
-#ifndef __HAVE_ARCH_PMD_WRITE
+#ifndef pmd_write
static inline int pmd_write(pmd_t pmd)
{
BUG();
return 0;
}
-#endif /* __HAVE_ARCH_PMD_WRITE */
+#endif /* pmd_write */
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+#ifndef pud_write
+static inline int pud_write(pud_t pud)
+{
+ BUG();
+ return 0;
+}
+#endif /* pud_write */
+
#if !defined(CONFIG_TRANSPARENT_HUGEPAGE) || \
(defined(CONFIG_TRANSPARENT_HUGEPAGE) && \
!defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD))
diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h
index 6abf0a3604dc..38d9c5861ed8 100644
--- a/include/crypto/if_alg.h
+++ b/include/crypto/if_alg.h
@@ -242,6 +242,7 @@ int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size,
unsigned int ivsize);
ssize_t af_alg_sendpage(struct socket *sock, struct page *page,
int offset, size_t size, int flags);
+void af_alg_free_resources(struct af_alg_async_req *areq);
void af_alg_async_cb(struct crypto_async_request *_req, int err);
unsigned int af_alg_poll(struct file *file, struct socket *sock,
poll_table *wait);
diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h
index 7a7140543012..df9807a3caae 100644
--- a/include/drm/drm_connector.h
+++ b/include/drm/drm_connector.h
@@ -284,6 +284,11 @@ struct drm_display_info {
* @hdmi: advance features of a HDMI sink.
*/
struct drm_hdmi_info hdmi;
+
+ /**
+ * @non_desktop: Non desktop display (HMD).
+ */
+ bool non_desktop;
};
int drm_display_info_set_bus_formats(struct drm_display_info *info,
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
index 6f35909b8add..2ec41d032e56 100644
--- a/include/drm/drm_edid.h
+++ b/include/drm/drm_edid.h
@@ -362,7 +362,8 @@ void
drm_hdmi_avi_infoframe_quant_range(struct hdmi_avi_infoframe *frame,
const struct drm_display_mode *mode,
enum hdmi_quantization_range rgb_quant_range,
- bool rgb_quant_range_selectable);
+ bool rgb_quant_range_selectable,
+ bool is_hdmi2_sink);
/**
* drm_eld_mnl - Get ELD monitor name length in bytes.
diff --git a/include/drm/drm_mode_config.h b/include/drm/drm_mode_config.h
index 0b4ac2ebc610..b21e827c5c78 100644
--- a/include/drm/drm_mode_config.h
+++ b/include/drm/drm_mode_config.h
@@ -728,6 +728,13 @@ struct drm_mode_config {
*/
struct drm_property *suggested_y_property;
+ /**
+ * @non_desktop_property: Optional connector property with a hint
+ * that device isn't a standard display, and the console/desktop,
+ * should not be displayed on it.
+ */
+ struct drm_property *non_desktop_property;
+
/* dumb ioctl parameters */
uint32_t preferred_depth, prefer_shadow;
diff --git a/include/dt-bindings/msm/msm-bus-ids.h b/include/dt-bindings/msm/msm-bus-ids.h
deleted file mode 100644
index a75d304473d5..000000000000
--- a/include/dt-bindings/msm/msm-bus-ids.h
+++ /dev/null
@@ -1,887 +0,0 @@
-/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef __MSM_BUS_IDS_H
-#define __MSM_BUS_IDS_H
-
-/* Aggregation types */
-#define AGG_SCHEME_NONE 0
-#define AGG_SCHEME_LEG 1
-#define AGG_SCHEME_1 2
-
-/* Topology related enums */
-#define MSM_BUS_FAB_DEFAULT 0
-#define MSM_BUS_FAB_APPSS 0
-#define MSM_BUS_FAB_SYSTEM 1024
-#define MSM_BUS_FAB_MMSS 2048
-#define MSM_BUS_FAB_SYSTEM_FPB 3072
-#define MSM_BUS_FAB_CPSS_FPB 4096
-
-#define MSM_BUS_FAB_BIMC 0
-#define MSM_BUS_FAB_SYS_NOC 1024
-#define MSM_BUS_FAB_MMSS_NOC 2048
-#define MSM_BUS_FAB_OCMEM_NOC 3072
-#define MSM_BUS_FAB_PERIPH_NOC 4096
-#define MSM_BUS_FAB_CONFIG_NOC 5120
-#define MSM_BUS_FAB_OCMEM_VNOC 6144
-#define MSM_BUS_FAB_MMSS_AHB 2049
-#define MSM_BUS_FAB_A0_NOC 6145
-#define MSM_BUS_FAB_A1_NOC 6146
-#define MSM_BUS_FAB_A2_NOC 6147
-#define MSM_BUS_FAB_GNOC 6148
-#define MSM_BUS_FAB_CR_VIRT 6149
-
-#define MSM_BUS_MASTER_FIRST 1
-#define MSM_BUS_MASTER_AMPSS_M0 1
-#define MSM_BUS_MASTER_AMPSS_M1 2
-#define MSM_BUS_APPSS_MASTER_FAB_MMSS 3
-#define MSM_BUS_APPSS_MASTER_FAB_SYSTEM 4
-#define MSM_BUS_SYSTEM_MASTER_FAB_APPSS 5
-#define MSM_BUS_MASTER_SPS 6
-#define MSM_BUS_MASTER_ADM_PORT0 7
-#define MSM_BUS_MASTER_ADM_PORT1 8
-#define MSM_BUS_SYSTEM_MASTER_ADM1_PORT0 9
-#define MSM_BUS_MASTER_ADM1_PORT1 10
-#define MSM_BUS_MASTER_LPASS_PROC 11
-#define MSM_BUS_MASTER_MSS_PROCI 12
-#define MSM_BUS_MASTER_MSS_PROCD 13
-#define MSM_BUS_MASTER_MSS_MDM_PORT0 14
-#define MSM_BUS_MASTER_LPASS 15
-#define MSM_BUS_SYSTEM_MASTER_CPSS_FPB 16
-#define MSM_BUS_SYSTEM_MASTER_SYSTEM_FPB 17
-#define MSM_BUS_SYSTEM_MASTER_MMSS_FPB 18
-#define MSM_BUS_MASTER_ADM1_CI 19
-#define MSM_BUS_MASTER_ADM0_CI 20
-#define MSM_BUS_MASTER_MSS_MDM_PORT1 21
-#define MSM_BUS_MASTER_MDP_PORT0 22
-#define MSM_BUS_MASTER_MDP_PORT1 23
-#define MSM_BUS_MMSS_MASTER_ADM1_PORT0 24
-#define MSM_BUS_MASTER_ROTATOR 25
-#define MSM_BUS_MASTER_GRAPHICS_3D 26
-#define MSM_BUS_MASTER_JPEG_DEC 27
-#define MSM_BUS_MASTER_GRAPHICS_2D_CORE0 28
-#define MSM_BUS_MASTER_VFE 29
-#define MSM_BUS_MASTER_VFE0 MSM_BUS_MASTER_VFE
-#define MSM_BUS_MASTER_VPE 30
-#define MSM_BUS_MASTER_JPEG_ENC 31
-#define MSM_BUS_MASTER_GRAPHICS_2D_CORE1 32
-#define MSM_BUS_MMSS_MASTER_APPS_FAB 33
-#define MSM_BUS_MASTER_HD_CODEC_PORT0 34
-#define MSM_BUS_MASTER_HD_CODEC_PORT1 35
-#define MSM_BUS_MASTER_SPDM 36
-#define MSM_BUS_MASTER_RPM 37
-#define MSM_BUS_MASTER_MSS 38
-#define MSM_BUS_MASTER_RIVA 39
-#define MSM_BUS_MASTER_SNOC_VMEM 40
-#define MSM_BUS_MASTER_MSS_SW_PROC 41
-#define MSM_BUS_MASTER_MSS_FW_PROC 42
-#define MSM_BUS_MASTER_HMSS 43
-#define MSM_BUS_MASTER_GSS_NAV 44
-#define MSM_BUS_MASTER_PCIE 45
-#define MSM_BUS_MASTER_SATA 46
-#define MSM_BUS_MASTER_CRYPTO 47
-#define MSM_BUS_MASTER_VIDEO_CAP 48
-#define MSM_BUS_MASTER_GRAPHICS_3D_PORT1 49
-#define MSM_BUS_MASTER_VIDEO_ENC 50
-#define MSM_BUS_MASTER_VIDEO_DEC 51
-#define MSM_BUS_MASTER_LPASS_AHB 52
-#define MSM_BUS_MASTER_QDSS_BAM 53
-#define MSM_BUS_MASTER_SNOC_CFG 54
-#define MSM_BUS_MASTER_CRYPTO_CORE0 55
-#define MSM_BUS_MASTER_CRYPTO_CORE1 56
-#define MSM_BUS_MASTER_MSS_NAV 57
-#define MSM_BUS_MASTER_OCMEM_DMA 58
-#define MSM_BUS_MASTER_WCSS 59
-#define MSM_BUS_MASTER_QDSS_ETR 60
-#define MSM_BUS_MASTER_USB3 61
-#define MSM_BUS_MASTER_JPEG 62
-#define MSM_BUS_MASTER_VIDEO_P0 63
-#define MSM_BUS_MASTER_VIDEO_P1 64
-#define MSM_BUS_MASTER_MSS_PROC 65
-#define MSM_BUS_MASTER_JPEG_OCMEM 66
-#define MSM_BUS_MASTER_MDP_OCMEM 67
-#define MSM_BUS_MASTER_VIDEO_P0_OCMEM 68
-#define MSM_BUS_MASTER_VIDEO_P1_OCMEM 69
-#define MSM_BUS_MASTER_VFE_OCMEM 70
-#define MSM_BUS_MASTER_CNOC_ONOC_CFG 71
-#define MSM_BUS_MASTER_RPM_INST 72
-#define MSM_BUS_MASTER_RPM_DATA 73
-#define MSM_BUS_MASTER_RPM_SYS 74
-#define MSM_BUS_MASTER_DEHR 75
-#define MSM_BUS_MASTER_QDSS_DAP 76
-#define MSM_BUS_MASTER_TIC 77
-#define MSM_BUS_MASTER_SDCC_1 78
-#define MSM_BUS_MASTER_SDCC_3 79
-#define MSM_BUS_MASTER_SDCC_4 80
-#define MSM_BUS_MASTER_SDCC_2 81
-#define MSM_BUS_MASTER_TSIF 82
-#define MSM_BUS_MASTER_BAM_DMA 83
-#define MSM_BUS_MASTER_BLSP_2 84
-#define MSM_BUS_MASTER_USB_HSIC 85
-#define MSM_BUS_MASTER_BLSP_1 86
-#define MSM_BUS_MASTER_USB_HS 87
-#define MSM_BUS_MASTER_PNOC_CFG 88
-#define MSM_BUS_MASTER_V_OCMEM_GFX3D 89
-#define MSM_BUS_MASTER_IPA 90
-#define MSM_BUS_MASTER_QPIC 91
-#define MSM_BUS_MASTER_MDPE 92
-#define MSM_BUS_MASTER_USB_HS2 93
-#define MSM_BUS_MASTER_VPU 94
-#define MSM_BUS_MASTER_UFS 95
-#define MSM_BUS_MASTER_BCAST 96
-#define MSM_BUS_MASTER_CRYPTO_CORE2 97
-#define MSM_BUS_MASTER_EMAC 98
-#define MSM_BUS_MASTER_VPU_1 99
-#define MSM_BUS_MASTER_PCIE_1 100
-#define MSM_BUS_MASTER_USB3_1 101
-#define MSM_BUS_MASTER_CNOC_MNOC_MMSS_CFG 102
-#define MSM_BUS_MASTER_CNOC_MNOC_CFG 103
-#define MSM_BUS_MASTER_TCU_0 104
-#define MSM_BUS_MASTER_TCU_1 105
-#define MSM_BUS_MASTER_CPP 106
-#define MSM_BUS_MASTER_AUDIO 107
-#define MSM_BUS_MASTER_PCIE_2 108
-#define MSM_BUS_MASTER_VFE1 109
-#define MSM_BUS_MASTER_XM_USB_HS1 110
-#define MSM_BUS_MASTER_PCNOC_BIMC_1 111
-#define MSM_BUS_MASTER_BIMC_PCNOC 112
-#define MSM_BUS_MASTER_XI_USB_HSIC 113
-#define MSM_BUS_MASTER_SGMII 114
-#define MSM_BUS_SPMI_FETCHER 115
-#define MSM_BUS_MASTER_GNOC_BIMC 116
-#define MSM_BUS_MASTER_CRVIRT_A2NOC 117
-#define MSM_BUS_MASTER_CNOC_A2NOC 118
-#define MSM_BUS_MASTER_WLAN 119
-#define MSM_BUS_MASTER_MSS_CE 120
-#define MSM_BUS_MASTER_CDSP_PROC 121
-#define MSM_BUS_MASTER_GNOC_SNOC 122
-#define MSM_BUS_MASTER_PIMEM 123
-#define MSM_BUS_MASTER_MASTER_LAST 124
-
-#define MSM_BUS_SYSTEM_FPB_MASTER_SYSTEM MSM_BUS_SYSTEM_MASTER_SYSTEM_FPB
-#define MSM_BUS_CPSS_FPB_MASTER_SYSTEM MSM_BUS_SYSTEM_MASTER_CPSS_FPB
-
-#define MSM_BUS_SNOC_MM_INT_0 10000
-#define MSM_BUS_SNOC_MM_INT_1 10001
-#define MSM_BUS_SNOC_MM_INT_2 10002
-#define MSM_BUS_SNOC_MM_INT_BIMC 10003
-#define MSM_BUS_SNOC_INT_0 10004
-#define MSM_BUS_SNOC_INT_1 10005
-#define MSM_BUS_SNOC_INT_BIMC 10006
-#define MSM_BUS_SNOC_BIMC_0_MAS 10007
-#define MSM_BUS_SNOC_BIMC_1_MAS 10008
-#define MSM_BUS_SNOC_QDSS_INT 10009
-#define MSM_BUS_PNOC_SNOC_MAS 10010
-#define MSM_BUS_PNOC_SNOC_SLV 10011
-#define MSM_BUS_PNOC_INT_0 10012
-#define MSM_BUS_PNOC_INT_1 10013
-#define MSM_BUS_PNOC_M_0 10014
-#define MSM_BUS_PNOC_M_1 10015
-#define MSM_BUS_BIMC_SNOC_MAS 10016
-#define MSM_BUS_BIMC_SNOC_SLV 10017
-#define MSM_BUS_PNOC_SLV_0 10018
-#define MSM_BUS_PNOC_SLV_1 10019
-#define MSM_BUS_PNOC_SLV_2 10020
-#define MSM_BUS_PNOC_SLV_3 10021
-#define MSM_BUS_PNOC_SLV_4 10022
-#define MSM_BUS_PNOC_SLV_8 10023
-#define MSM_BUS_PNOC_SLV_9 10024
-#define MSM_BUS_SNOC_BIMC_0_SLV 10025
-#define MSM_BUS_SNOC_BIMC_1_SLV 10026
-#define MSM_BUS_MNOC_BIMC_MAS 10027
-#define MSM_BUS_MNOC_BIMC_SLV 10028
-#define MSM_BUS_BIMC_MNOC_MAS 10029
-#define MSM_BUS_BIMC_MNOC_SLV 10030
-#define MSM_BUS_SNOC_BIMC_MAS 10031
-#define MSM_BUS_SNOC_BIMC_SLV 10032
-#define MSM_BUS_CNOC_SNOC_MAS 10033
-#define MSM_BUS_CNOC_SNOC_SLV 10034
-#define MSM_BUS_SNOC_CNOC_MAS 10035
-#define MSM_BUS_SNOC_CNOC_SLV 10036
-#define MSM_BUS_OVNOC_SNOC_MAS 10037
-#define MSM_BUS_OVNOC_SNOC_SLV 10038
-#define MSM_BUS_SNOC_OVNOC_MAS 10039
-#define MSM_BUS_SNOC_OVNOC_SLV 10040
-#define MSM_BUS_SNOC_PNOC_MAS 10041
-#define MSM_BUS_SNOC_PNOC_SLV 10042
-#define MSM_BUS_BIMC_INT_APPS_EBI 10043
-#define MSM_BUS_BIMC_INT_APPS_SNOC 10044
-#define MSM_BUS_SNOC_BIMC_2_MAS 10045
-#define MSM_BUS_SNOC_BIMC_2_SLV 10046
-#define MSM_BUS_PNOC_SLV_5 10047
-#define MSM_BUS_PNOC_SLV_7 10048
-#define MSM_BUS_PNOC_INT_2 10049
-#define MSM_BUS_PNOC_INT_3 10050
-#define MSM_BUS_PNOC_INT_4 10051
-#define MSM_BUS_PNOC_INT_5 10052
-#define MSM_BUS_PNOC_INT_6 10053
-#define MSM_BUS_PNOC_INT_7 10054
-#define MSM_BUS_BIMC_SNOC_1_MAS 10055
-#define MSM_BUS_BIMC_SNOC_1_SLV 10056
-#define MSM_BUS_PNOC_A1NOC_MAS 10057
-#define MSM_BUS_PNOC_A1NOC_SLV 10058
-#define MSM_BUS_CNOC_A1NOC_MAS 10059
-#define MSM_BUS_A0NOC_SNOC_MAS 10060
-#define MSM_BUS_A0NOC_SNOC_SLV 10061
-#define MSM_BUS_A1NOC_SNOC_SLV 10062
-#define MSM_BUS_A1NOC_SNOC_MAS 10063
-#define MSM_BUS_A2NOC_SNOC_MAS 10064
-#define MSM_BUS_A2NOC_SNOC_SLV 10065
-#define MSM_BUS_SNOC_INT_2 10066
-#define MSM_BUS_A0NOC_QDSS_INT 10067
-#define MSM_BUS_INT_LAST 10068
-
-#define MSM_BUS_INT_TEST_ID 20000
-#define MSM_BUS_INT_TEST_LAST 20050
-
-#define MSM_BUS_SLAVE_FIRST 512
-#define MSM_BUS_SLAVE_EBI_CH0 512
-#define MSM_BUS_SLAVE_EBI_CH1 513
-#define MSM_BUS_SLAVE_AMPSS_L2 514
-#define MSM_BUS_APPSS_SLAVE_FAB_MMSS 515
-#define MSM_BUS_APPSS_SLAVE_FAB_SYSTEM 516
-#define MSM_BUS_SYSTEM_SLAVE_FAB_APPS 517
-#define MSM_BUS_SLAVE_SPS 518
-#define MSM_BUS_SLAVE_SYSTEM_IMEM 519
-#define MSM_BUS_SLAVE_AMPSS 520
-#define MSM_BUS_SLAVE_MSS 521
-#define MSM_BUS_SLAVE_LPASS 522
-#define MSM_BUS_SYSTEM_SLAVE_CPSS_FPB 523
-#define MSM_BUS_SYSTEM_SLAVE_SYSTEM_FPB 524
-#define MSM_BUS_SYSTEM_SLAVE_MMSS_FPB 525
-#define MSM_BUS_SLAVE_CORESIGHT 526
-#define MSM_BUS_SLAVE_RIVA 527
-#define MSM_BUS_SLAVE_SMI 528
-#define MSM_BUS_MMSS_SLAVE_FAB_APPS 529
-#define MSM_BUS_MMSS_SLAVE_FAB_APPS_1 530
-#define MSM_BUS_SLAVE_MM_IMEM 531
-#define MSM_BUS_SLAVE_CRYPTO 532
-#define MSM_BUS_SLAVE_SPDM 533
-#define MSM_BUS_SLAVE_RPM 534
-#define MSM_BUS_SLAVE_RPM_MSG_RAM 535
-#define MSM_BUS_SLAVE_MPM 536
-#define MSM_BUS_SLAVE_PMIC1_SSBI1_A 537
-#define MSM_BUS_SLAVE_PMIC1_SSBI1_B 538
-#define MSM_BUS_SLAVE_PMIC1_SSBI1_C 539
-#define MSM_BUS_SLAVE_PMIC2_SSBI2_A 540
-#define MSM_BUS_SLAVE_PMIC2_SSBI2_B 541
-#define MSM_BUS_SLAVE_GSBI1_UART 542
-#define MSM_BUS_SLAVE_GSBI2_UART 543
-#define MSM_BUS_SLAVE_GSBI3_UART 544
-#define MSM_BUS_SLAVE_GSBI4_UART 545
-#define MSM_BUS_SLAVE_GSBI5_UART 546
-#define MSM_BUS_SLAVE_GSBI6_UART 547
-#define MSM_BUS_SLAVE_GSBI7_UART 548
-#define MSM_BUS_SLAVE_GSBI8_UART 549
-#define MSM_BUS_SLAVE_GSBI9_UART 550
-#define MSM_BUS_SLAVE_GSBI10_UART 551
-#define MSM_BUS_SLAVE_GSBI11_UART 552
-#define MSM_BUS_SLAVE_GSBI12_UART 553
-#define MSM_BUS_SLAVE_GSBI1_QUP 554
-#define MSM_BUS_SLAVE_GSBI2_QUP 555
-#define MSM_BUS_SLAVE_GSBI3_QUP 556
-#define MSM_BUS_SLAVE_GSBI4_QUP 557
-#define MSM_BUS_SLAVE_GSBI5_QUP 558
-#define MSM_BUS_SLAVE_GSBI6_QUP 559
-#define MSM_BUS_SLAVE_GSBI7_QUP 560
-#define MSM_BUS_SLAVE_GSBI8_QUP 561
-#define MSM_BUS_SLAVE_GSBI9_QUP 562
-#define MSM_BUS_SLAVE_GSBI10_QUP 563
-#define MSM_BUS_SLAVE_GSBI11_QUP 564
-#define MSM_BUS_SLAVE_GSBI12_QUP 565
-#define MSM_BUS_SLAVE_EBI2_NAND 566
-#define MSM_BUS_SLAVE_EBI2_CS0 567
-#define MSM_BUS_SLAVE_EBI2_CS1 568
-#define MSM_BUS_SLAVE_EBI2_CS2 569
-#define MSM_BUS_SLAVE_EBI2_CS3 570
-#define MSM_BUS_SLAVE_EBI2_CS4 571
-#define MSM_BUS_SLAVE_EBI2_CS5 572
-#define MSM_BUS_SLAVE_USB_FS1 573
-#define MSM_BUS_SLAVE_USB_FS2 574
-#define MSM_BUS_SLAVE_TSIF 575
-#define MSM_BUS_SLAVE_MSM_TSSC 576
-#define MSM_BUS_SLAVE_MSM_PDM 577
-#define MSM_BUS_SLAVE_MSM_DIMEM 578
-#define MSM_BUS_SLAVE_MSM_TCSR 579
-#define MSM_BUS_SLAVE_MSM_PRNG 580
-#define MSM_BUS_SLAVE_GSS 581
-#define MSM_BUS_SLAVE_SATA 582
-#define MSM_BUS_SLAVE_USB3 583
-#define MSM_BUS_SLAVE_WCSS 584
-#define MSM_BUS_SLAVE_OCIMEM 585
-#define MSM_BUS_SLAVE_SNOC_OCMEM 586
-#define MSM_BUS_SLAVE_SERVICE_SNOC 587
-#define MSM_BUS_SLAVE_QDSS_STM 588
-#define MSM_BUS_SLAVE_CAMERA_CFG 589
-#define MSM_BUS_SLAVE_DISPLAY_CFG 590
-#define MSM_BUS_SLAVE_OCMEM_CFG 591
-#define MSM_BUS_SLAVE_CPR_CFG 592
-#define MSM_BUS_SLAVE_CPR_XPU_CFG 593
-#define MSM_BUS_SLAVE_MISC_CFG 594
-#define MSM_BUS_SLAVE_MISC_XPU_CFG 595
-#define MSM_BUS_SLAVE_VENUS_CFG 596
-#define MSM_BUS_SLAVE_MISC_VENUS_CFG 597
-#define MSM_BUS_SLAVE_GRAPHICS_3D_CFG 598
-#define MSM_BUS_SLAVE_MMSS_CLK_CFG 599
-#define MSM_BUS_SLAVE_MMSS_CLK_XPU_CFG 600
-#define MSM_BUS_SLAVE_MNOC_MPU_CFG 601
-#define MSM_BUS_SLAVE_ONOC_MPU_CFG 602
-#define MSM_BUS_SLAVE_SERVICE_MNOC 603
-#define MSM_BUS_SLAVE_OCMEM 604
-#define MSM_BUS_SLAVE_SERVICE_ONOC 605
-#define MSM_BUS_SLAVE_SDCC_1 606
-#define MSM_BUS_SLAVE_SDCC_3 607
-#define MSM_BUS_SLAVE_SDCC_2 608
-#define MSM_BUS_SLAVE_SDCC_4 609
-#define MSM_BUS_SLAVE_BAM_DMA 610
-#define MSM_BUS_SLAVE_BLSP_2 611
-#define MSM_BUS_SLAVE_USB_HSIC 612
-#define MSM_BUS_SLAVE_BLSP_1 613
-#define MSM_BUS_SLAVE_USB_HS 614
-#define MSM_BUS_SLAVE_PDM 615
-#define MSM_BUS_SLAVE_PERIPH_APU_CFG 616
-#define MSM_BUS_SLAVE_PNOC_MPU_CFG 617
-#define MSM_BUS_SLAVE_PRNG 618
-#define MSM_BUS_SLAVE_SERVICE_PNOC 619
-#define MSM_BUS_SLAVE_CLK_CTL 620
-#define MSM_BUS_SLAVE_CNOC_MSS 621
-#define MSM_BUS_SLAVE_SECURITY 622
-#define MSM_BUS_SLAVE_TCSR 623
-#define MSM_BUS_SLAVE_TLMM 624
-#define MSM_BUS_SLAVE_CRYPTO_0_CFG 625
-#define MSM_BUS_SLAVE_CRYPTO_1_CFG 626
-#define MSM_BUS_SLAVE_IMEM_CFG 627
-#define MSM_BUS_SLAVE_MESSAGE_RAM 628
-#define MSM_BUS_SLAVE_BIMC_CFG 629
-#define MSM_BUS_SLAVE_BOOT_ROM 630
-#define MSM_BUS_SLAVE_CNOC_MNOC_MMSS_CFG 631
-#define MSM_BUS_SLAVE_PMIC_ARB 632
-#define MSM_BUS_SLAVE_SPDM_WRAPPER 633
-#define MSM_BUS_SLAVE_DEHR_CFG 634
-#define MSM_BUS_SLAVE_QDSS_CFG 635
-#define MSM_BUS_SLAVE_RBCPR_CFG 636
-#define MSM_BUS_SLAVE_RBCPR_QDSS_APU_CFG 637
-#define MSM_BUS_SLAVE_SNOC_MPU_CFG 638
-#define MSM_BUS_SLAVE_CNOC_ONOC_CFG 639
-#define MSM_BUS_SLAVE_CNOC_MNOC_CFG 640
-#define MSM_BUS_SLAVE_PNOC_CFG 641
-#define MSM_BUS_SLAVE_SNOC_CFG 642
-#define MSM_BUS_SLAVE_EBI1_DLL_CFG 643
-#define MSM_BUS_SLAVE_PHY_APU_CFG 644
-#define MSM_BUS_SLAVE_EBI1_PHY_CFG 645
-#define MSM_BUS_SLAVE_SERVICE_CNOC 646
-#define MSM_BUS_SLAVE_IPS_CFG 647
-#define MSM_BUS_SLAVE_QPIC 648
-#define MSM_BUS_SLAVE_DSI_CFG 649
-#define MSM_BUS_SLAVE_UFS_CFG 650
-#define MSM_BUS_SLAVE_RBCPR_CX_CFG 651
-#define MSM_BUS_SLAVE_RBCPR_MX_CFG 652
-#define MSM_BUS_SLAVE_PCIE_CFG 653
-#define MSM_BUS_SLAVE_USB_PHYS_CFG 654
-#define MSM_BUS_SLAVE_VIDEO_CAP_CFG 655
-#define MSM_BUS_SLAVE_AVSYNC_CFG 656
-#define MSM_BUS_SLAVE_CRYPTO_2_CFG 657
-#define MSM_BUS_SLAVE_VPU_CFG 658
-#define MSM_BUS_SLAVE_BCAST_CFG 659
-#define MSM_BUS_SLAVE_KLM_CFG 660
-#define MSM_BUS_SLAVE_GENI_IR_CFG 661
-#define MSM_BUS_SLAVE_OCMEM_GFX 662
-#define MSM_BUS_SLAVE_CATS_128 663
-#define MSM_BUS_SLAVE_OCMEM_64 664
-#define MSM_BUS_SLAVE_PCIE_0 665
-#define MSM_BUS_SLAVE_PCIE_1 666
-#define MSM_BUS_SLAVE_PCIE_0_CFG 667
-#define MSM_BUS_SLAVE_PCIE_1_CFG 668
-#define MSM_BUS_SLAVE_SRVC_MNOC 669
-#define MSM_BUS_SLAVE_USB_HS2 670
-#define MSM_BUS_SLAVE_AUDIO 671
-#define MSM_BUS_SLAVE_TCU 672
-#define MSM_BUS_SLAVE_APPSS 673
-#define MSM_BUS_SLAVE_PCIE_PARF 674
-#define MSM_BUS_SLAVE_USB3_PHY_CFG 675
-#define MSM_BUS_SLAVE_IPA_CFG 676
-#define MSM_BUS_SLAVE_A0NOC_SNOC 677
-#define MSM_BUS_SLAVE_A1NOC_SNOC 678
-#define MSM_BUS_SLAVE_A2NOC_SNOC 679
-#define MSM_BUS_SLAVE_HMSS_L3 680
-#define MSM_BUS_SLAVE_PIMEM_CFG 681
-#define MSM_BUS_SLAVE_DCC_CFG 682
-#define MSM_BUS_SLAVE_QDSS_RBCPR_APU_CFG 683
-#define MSM_BUS_SLAVE_PCIE_2_CFG 684
-#define MSM_BUS_SLAVE_PCIE20_AHB2PHY 685
-#define MSM_BUS_SLAVE_A0NOC_CFG 686
-#define MSM_BUS_SLAVE_A1NOC_CFG 687
-#define MSM_BUS_SLAVE_A2NOC_CFG 688
-#define MSM_BUS_SLAVE_A1NOC_MPU_CFG 689
-#define MSM_BUS_SLAVE_A2NOC_MPU_CFG 690
-#define MSM_BUS_SLAVE_A0NOC_SMMU_CFG 691
-#define MSM_BUS_SLAVE_A1NOC_SMMU_CFG 692
-#define MSM_BUS_SLAVE_A2NOC_SMMU_CFG 693
-#define MSM_BUS_SLAVE_LPASS_SMMU_CFG 694
-#define MSM_BUS_SLAVE_MMAGIC_CFG 695
-#define MSM_BUS_SLAVE_VENUS_THROTTLE_CFG 696
-#define MSM_BUS_SLAVE_SSC_CFG 697
-#define MSM_BUS_SLAVE_DSA_CFG 698
-#define MSM_BUS_SLAVE_DSA_MPU_CFG 699
-#define MSM_BUS_SLAVE_DISPLAY_THROTTLE_CFG 700
-#define MSM_BUS_SLAVE_SMMU_CPP_CFG 701
-#define MSM_BUS_SLAVE_SMMU_JPEG_CFG 702
-#define MSM_BUS_SLAVE_SMMU_MDP_CFG 703
-#define MSM_BUS_SLAVE_SMMU_ROTATOR_CFG 704
-#define MSM_BUS_SLAVE_SMMU_VENUS_CFG 705
-#define MSM_BUS_SLAVE_SMMU_VFE_CFG 706
-#define MSM_BUS_SLAVE_A0NOC_MPU_CFG 707
-#define MSM_BUS_SLAVE_VMEM_CFG 708
-#define MSM_BUS_SLAVE_CAMERA_THROTTLE_CFG 709
-#define MSM_BUS_SLAVE_VMEM 710
-#define MSM_BUS_SLAVE_AHB2PHY 711
-#define MSM_BUS_SLAVE_PIMEM 712
-#define MSM_BUS_SLAVE_SNOC_VMEM 713
-#define MSM_BUS_SLAVE_PCIE_2 714
-#define MSM_BUS_SLAVE_RBCPR_MX 715
-#define MSM_BUS_SLAVE_RBCPR_CX 716
-#define MSM_BUS_SLAVE_BIMC_PCNOC 717
-#define MSM_BUS_SLAVE_PCNOC_BIMC_1 718
-#define MSM_BUS_SLAVE_SGMII 719
-#define MSM_BUS_SLAVE_SPMI_FETCHER 720
-#define MSM_BUS_PNOC_SLV_6 721
-#define MSM_BUS_SLAVE_MMSS_SMMU_CFG 722
-#define MSM_BUS_SLAVE_WLAN 723
-#define MSM_BUS_SLAVE_CRVIRT_A2NOC 724
-#define MSM_BUS_SLAVE_CNOC_A2NOC 725
-#define MSM_BUS_SLAVE_GLM 726
-#define MSM_BUS_SLAVE_GNOC_BIMC 727
-#define MSM_BUS_SLAVE_GNOC_SNOC 728
-#define MSM_BUS_SLAVE_QM_CFG 729
-#define MSM_BUS_SLAVE_TLMM_EAST 730
-#define MSM_BUS_SLAVE_TLMM_NORTH 731
-#define MSM_BUS_SLAVE_TLMM_WEST 732
-#define MSM_BUS_SLAVE_SKL 733
-#define MSM_BUS_SLAVE_LPASS_TCM 734
-#define MSM_BUS_SLAVE_TLMM_SOUTH 735
-#define MSM_BUS_SLAVE_TLMM_CENTER 736
-#define MSM_BUS_MSS_NAV_CE_MPU_CFG 737
-#define MSM_BUS_SLAVE_A2NOC_THROTTLE_CFG 738
-#define MSM_BUS_SLAVE_CDSP 739
-#define MSM_BUS_SLAVE_CDSP_SMMU_CFG 740
-#define MSM_BUS_SLAVE_LPASS_MPU_CFG 741
-#define MSM_BUS_SLAVE_CSI_PHY_CFG 742
-#define MSM_BUS_SLAVE_LAST 743
-
-#define MSM_BUS_SYSTEM_FPB_SLAVE_SYSTEM MSM_BUS_SYSTEM_SLAVE_SYSTEM_FPB
-#define MSM_BUS_CPSS_FPB_SLAVE_SYSTEM MSM_BUS_SYSTEM_SLAVE_CPSS_FPB
-
-/*
- * ID's used in RPM messages
- */
-#define ICBID_MASTER_APPSS_PROC 0
-#define ICBID_MASTER_MSS_PROC 1
-#define ICBID_MASTER_MNOC_BIMC 2
-#define ICBID_MASTER_SNOC_BIMC 3
-#define ICBID_MASTER_SNOC_BIMC_0 ICBID_MASTER_SNOC_BIMC
-#define ICBID_MASTER_CNOC_MNOC_MMSS_CFG 4
-#define ICBID_MASTER_CNOC_MNOC_CFG 5
-#define ICBID_MASTER_GFX3D 6
-#define ICBID_MASTER_JPEG 7
-#define ICBID_MASTER_MDP 8
-#define ICBID_MASTER_MDP0 ICBID_MASTER_MDP
-#define ICBID_MASTER_MDPS ICBID_MASTER_MDP
-#define ICBID_MASTER_VIDEO 9
-#define ICBID_MASTER_VIDEO_P0 ICBID_MASTER_VIDEO
-#define ICBID_MASTER_VIDEO_P1 10
-#define ICBID_MASTER_VFE 11
-#define ICBID_MASTER_VFE0 ICBID_MASTER_VFE
-#define ICBID_MASTER_CNOC_ONOC_CFG 12
-#define ICBID_MASTER_JPEG_OCMEM 13
-#define ICBID_MASTER_MDP_OCMEM 14
-#define ICBID_MASTER_VIDEO_P0_OCMEM 15
-#define ICBID_MASTER_VIDEO_P1_OCMEM 16
-#define ICBID_MASTER_VFE_OCMEM 17
-#define ICBID_MASTER_LPASS_AHB 18
-#define ICBID_MASTER_QDSS_BAM 19
-#define ICBID_MASTER_SNOC_CFG 20
-#define ICBID_MASTER_BIMC_SNOC 21
-#define ICBID_MASTER_BIMC_SNOC_0 ICBID_MASTER_BIMC_SNOC
-#define ICBID_MASTER_CNOC_SNOC 22
-#define ICBID_MASTER_CRYPTO 23
-#define ICBID_MASTER_CRYPTO_CORE0 ICBID_MASTER_CRYPTO
-#define ICBID_MASTER_CRYPTO_CORE1 24
-#define ICBID_MASTER_LPASS_PROC 25
-#define ICBID_MASTER_MSS 26
-#define ICBID_MASTER_MSS_NAV 27
-#define ICBID_MASTER_OCMEM_DMA 28
-#define ICBID_MASTER_PNOC_SNOC 29
-#define ICBID_MASTER_WCSS 30
-#define ICBID_MASTER_QDSS_ETR 31
-#define ICBID_MASTER_USB3 32
-#define ICBID_MASTER_USB3_0 ICBID_MASTER_USB3
-#define ICBID_MASTER_SDCC_1 33
-#define ICBID_MASTER_SDCC_3 34
-#define ICBID_MASTER_SDCC_2 35
-#define ICBID_MASTER_SDCC_4 36
-#define ICBID_MASTER_TSIF 37
-#define ICBID_MASTER_BAM_DMA 38
-#define ICBID_MASTER_BLSP_2 39
-#define ICBID_MASTER_USB_HSIC 40
-#define ICBID_MASTER_BLSP_1 41
-#define ICBID_MASTER_USB_HS 42
-#define ICBID_MASTER_USB_HS1 ICBID_MASTER_USB_HS
-#define ICBID_MASTER_PNOC_CFG 43
-#define ICBID_MASTER_SNOC_PNOC 44
-#define ICBID_MASTER_RPM_INST 45
-#define ICBID_MASTER_RPM_DATA 46
-#define ICBID_MASTER_RPM_SYS 47
-#define ICBID_MASTER_DEHR 48
-#define ICBID_MASTER_QDSS_DAP 49
-#define ICBID_MASTER_SPDM 50
-#define ICBID_MASTER_TIC 51
-#define ICBID_MASTER_SNOC_CNOC 52
-#define ICBID_MASTER_GFX3D_OCMEM 53
-#define ICBID_MASTER_GFX3D_GMEM ICBID_MASTER_GFX3D_OCMEM
-#define ICBID_MASTER_OVIRT_SNOC 54
-#define ICBID_MASTER_SNOC_OVIRT 55
-#define ICBID_MASTER_SNOC_GVIRT ICBID_MASTER_SNOC_OVIRT
-#define ICBID_MASTER_ONOC_OVIRT 56
-#define ICBID_MASTER_USB_HS2 57
-#define ICBID_MASTER_QPIC 58
-#define ICBID_MASTER_IPA 59
-#define ICBID_MASTER_DSI 60
-#define ICBID_MASTER_MDP1 61
-#define ICBID_MASTER_MDPE ICBID_MASTER_MDP1
-#define ICBID_MASTER_VPU_PROC 62
-#define ICBID_MASTER_VPU 63
-#define ICBID_MASTER_VPU0 ICBID_MASTER_VPU
-#define ICBID_MASTER_CRYPTO_CORE2 64
-#define ICBID_MASTER_PCIE_0 65
-#define ICBID_MASTER_PCIE_1 66
-#define ICBID_MASTER_SATA 67
-#define ICBID_MASTER_UFS 68
-#define ICBID_MASTER_USB3_1 69
-#define ICBID_MASTER_VIDEO_OCMEM 70
-#define ICBID_MASTER_VPU1 71
-#define ICBID_MASTER_VCAP 72
-#define ICBID_MASTER_EMAC 73
-#define ICBID_MASTER_BCAST 74
-#define ICBID_MASTER_MMSS_PROC 75
-#define ICBID_MASTER_SNOC_BIMC_1 76
-#define ICBID_MASTER_SNOC_PCNOC 77
-#define ICBID_MASTER_AUDIO 78
-#define ICBID_MASTER_MM_INT_0 79
-#define ICBID_MASTER_MM_INT_1 80
-#define ICBID_MASTER_MM_INT_2 81
-#define ICBID_MASTER_MM_INT_BIMC 82
-#define ICBID_MASTER_MSS_INT 83
-#define ICBID_MASTER_PCNOC_CFG 84
-#define ICBID_MASTER_PCNOC_INT_0 85
-#define ICBID_MASTER_PCNOC_INT_1 86
-#define ICBID_MASTER_PCNOC_M_0 87
-#define ICBID_MASTER_PCNOC_M_1 88
-#define ICBID_MASTER_PCNOC_S_0 89
-#define ICBID_MASTER_PCNOC_S_1 90
-#define ICBID_MASTER_PCNOC_S_2 91
-#define ICBID_MASTER_PCNOC_S_3 92
-#define ICBID_MASTER_PCNOC_S_4 93
-#define ICBID_MASTER_PCNOC_S_6 94
-#define ICBID_MASTER_PCNOC_S_7 95
-#define ICBID_MASTER_PCNOC_S_8 96
-#define ICBID_MASTER_PCNOC_S_9 97
-#define ICBID_MASTER_QDSS_INT 98
-#define ICBID_MASTER_SNOC_INT_0 99
-#define ICBID_MASTER_SNOC_INT_1 100
-#define ICBID_MASTER_SNOC_INT_BIMC 101
-#define ICBID_MASTER_TCU_0 102
-#define ICBID_MASTER_TCU_1 103
-#define ICBID_MASTER_BIMC_INT_0 104
-#define ICBID_MASTER_BIMC_INT_1 105
-#define ICBID_MASTER_CAMERA 106
-#define ICBID_MASTER_RICA 107
-#define ICBID_MASTER_SNOC_BIMC_2 108
-#define ICBID_MASTER_BIMC_SNOC_1 109
-#define ICBID_MASTER_A0NOC_SNOC 110
-#define ICBID_MASTER_A1NOC_SNOC 111
-#define ICBID_MASTER_A2NOC_SNOC 112
-#define ICBID_MASTER_PIMEM 113
-#define ICBID_MASTER_SNOC_VMEM 114
-#define ICBID_MASTER_CPP 115
-#define ICBID_MASTER_CNOC_A1NOC 116
-#define ICBID_MASTER_PNOC_A1NOC 117
-#define ICBID_MASTER_HMSS 118
-#define ICBID_MASTER_PCIE_2 119
-#define ICBID_MASTER_ROTATOR 120
-#define ICBID_MASTER_VENUS_VMEM 121
-#define ICBID_MASTER_DCC 122
-#define ICBID_MASTER_MCDMA 123
-#define ICBID_MASTER_PCNOC_INT_2 124
-#define ICBID_MASTER_PCNOC_INT_3 125
-#define ICBID_MASTER_PCNOC_INT_4 126
-#define ICBID_MASTER_PCNOC_INT_5 127
-#define ICBID_MASTER_PCNOC_INT_6 128
-#define ICBID_MASTER_PCNOC_S_5 129
-#define ICBID_MASTER_SENSORS_AHB 130
-#define ICBID_MASTER_SENSORS_PROC 131
-#define ICBID_MASTER_QSPI 132
-#define ICBID_MASTER_VFE1 133
-#define ICBID_MASTER_SNOC_INT_2 134
-#define ICBID_MASTER_SMMNOC_BIMC 135
-#define ICBID_MASTER_CRVIRT_A1NOC 136
-#define ICBID_MASTER_XM_USB_HS1 137
-#define ICBID_MASTER_XI_USB_HS1 138
-#define ICBID_MASTER_PCNOC_BIMC_1 139
-#define ICBID_MASTER_BIMC_PCNOC 140
-#define ICBID_MASTER_XI_HSIC 141
-#define ICBID_MASTER_SGMII 142
-#define ICBID_MASTER_SPMI_FETCHER 143
-#define ICBID_MASTER_GNOC_BIMC 144
-#define ICBID_MASTER_CRVIRT_A2NOC 145
-#define ICBID_MASTER_CNOC_A2NOC 146
-#define ICBID_MASTER_WLAN 147
-#define ICBID_MASTER_MSS_CE 148
-#define ICBID_MASTER_CDSP_PROC 149
-#define ICBID_MASTER_GNOC_SNOC 150
-
-#define ICBID_SLAVE_EBI1 0
-#define ICBID_SLAVE_APPSS_L2 1
-#define ICBID_SLAVE_BIMC_SNOC 2
-#define ICBID_SLAVE_BIMC_SNOC_0 ICBID_SLAVE_BIMC_SNOC
-#define ICBID_SLAVE_CAMERA_CFG 3
-#define ICBID_SLAVE_DISPLAY_CFG 4
-#define ICBID_SLAVE_OCMEM_CFG 5
-#define ICBID_SLAVE_CPR_CFG 6
-#define ICBID_SLAVE_CPR_XPU_CFG 7
-#define ICBID_SLAVE_MISC_CFG 8
-#define ICBID_SLAVE_MISC_XPU_CFG 9
-#define ICBID_SLAVE_VENUS_CFG 10
-#define ICBID_SLAVE_GFX3D_CFG 11
-#define ICBID_SLAVE_MMSS_CLK_CFG 12
-#define ICBID_SLAVE_MMSS_CLK_XPU_CFG 13
-#define ICBID_SLAVE_MNOC_MPU_CFG 14
-#define ICBID_SLAVE_ONOC_MPU_CFG 15
-#define ICBID_SLAVE_MNOC_BIMC 16
-#define ICBID_SLAVE_SERVICE_MNOC 17
-#define ICBID_SLAVE_OCMEM 18
-#define ICBID_SLAVE_GMEM ICBID_SLAVE_OCMEM
-#define ICBID_SLAVE_SERVICE_ONOC 19
-#define ICBID_SLAVE_APPSS 20
-#define ICBID_SLAVE_LPASS 21
-#define ICBID_SLAVE_USB3 22
-#define ICBID_SLAVE_USB3_0 ICBID_SLAVE_USB3
-#define ICBID_SLAVE_WCSS 23
-#define ICBID_SLAVE_SNOC_BIMC 24
-#define ICBID_SLAVE_SNOC_BIMC_0 ICBID_SLAVE_SNOC_BIMC
-#define ICBID_SLAVE_SNOC_CNOC 25
-#define ICBID_SLAVE_IMEM 26
-#define ICBID_SLAVE_OCIMEM ICBID_SLAVE_IMEM
-#define ICBID_SLAVE_SNOC_OVIRT 27
-#define ICBID_SLAVE_SNOC_GVIRT ICBID_SLAVE_SNOC_OVIRT
-#define ICBID_SLAVE_SNOC_PNOC 28
-#define ICBID_SLAVE_SNOC_PCNOC ICBID_SLAVE_SNOC_PNOC
-#define ICBID_SLAVE_SERVICE_SNOC 29
-#define ICBID_SLAVE_QDSS_STM 30
-#define ICBID_SLAVE_SDCC_1 31
-#define ICBID_SLAVE_SDCC_3 32
-#define ICBID_SLAVE_SDCC_2 33
-#define ICBID_SLAVE_SDCC_4 34
-#define ICBID_SLAVE_TSIF 35
-#define ICBID_SLAVE_BAM_DMA 36
-#define ICBID_SLAVE_BLSP_2 37
-#define ICBID_SLAVE_USB_HSIC 38
-#define ICBID_SLAVE_BLSP_1 39
-#define ICBID_SLAVE_USB_HS 40
-#define ICBID_SLAVE_USB_HS1 ICBID_SLAVE_USB_HS
-#define ICBID_SLAVE_PDM 41
-#define ICBID_SLAVE_PERIPH_APU_CFG 42
-#define ICBID_SLAVE_PNOC_MPU_CFG 43
-#define ICBID_SLAVE_PRNG 44
-#define ICBID_SLAVE_PNOC_SNOC 45
-#define ICBID_SLAVE_PCNOC_SNOC ICBID_SLAVE_PNOC_SNOC
-#define ICBID_SLAVE_SERVICE_PNOC 46
-#define ICBID_SLAVE_CLK_CTL 47
-#define ICBID_SLAVE_CNOC_MSS 48
-#define ICBID_SLAVE_PCNOC_MSS ICBID_SLAVE_CNOC_MSS
-#define ICBID_SLAVE_SECURITY 49
-#define ICBID_SLAVE_TCSR 50
-#define ICBID_SLAVE_TLMM 51
-#define ICBID_SLAVE_CRYPTO_0_CFG 52
-#define ICBID_SLAVE_CRYPTO_1_CFG 53
-#define ICBID_SLAVE_IMEM_CFG 54
-#define ICBID_SLAVE_MESSAGE_RAM 55
-#define ICBID_SLAVE_BIMC_CFG 56
-#define ICBID_SLAVE_BOOT_ROM 57
-#define ICBID_SLAVE_CNOC_MNOC_MMSS_CFG 58
-#define ICBID_SLAVE_PMIC_ARB 59
-#define ICBID_SLAVE_SPDM_WRAPPER 60
-#define ICBID_SLAVE_DEHR_CFG 61
-#define ICBID_SLAVE_MPM 62
-#define ICBID_SLAVE_QDSS_CFG 63
-#define ICBID_SLAVE_RBCPR_CFG 64
-#define ICBID_SLAVE_RBCPR_CX_CFG ICBID_SLAVE_RBCPR_CFG
-#define ICBID_SLAVE_RBCPR_QDSS_APU_CFG 65
-#define ICBID_SLAVE_CNOC_MNOC_CFG 66
-#define ICBID_SLAVE_SNOC_MPU_CFG 67
-#define ICBID_SLAVE_CNOC_ONOC_CFG 68
-#define ICBID_SLAVE_PNOC_CFG 69
-#define ICBID_SLAVE_SNOC_CFG 70
-#define ICBID_SLAVE_EBI1_DLL_CFG 71
-#define ICBID_SLAVE_PHY_APU_CFG 72
-#define ICBID_SLAVE_EBI1_PHY_CFG 73
-#define ICBID_SLAVE_RPM 74
-#define ICBID_SLAVE_CNOC_SNOC 75
-#define ICBID_SLAVE_SERVICE_CNOC 76
-#define ICBID_SLAVE_OVIRT_SNOC 77
-#define ICBID_SLAVE_OVIRT_OCMEM 78
-#define ICBID_SLAVE_USB_HS2 79
-#define ICBID_SLAVE_QPIC 80
-#define ICBID_SLAVE_IPS_CFG 81
-#define ICBID_SLAVE_DSI_CFG 82
-#define ICBID_SLAVE_USB3_1 83
-#define ICBID_SLAVE_PCIE_0 84
-#define ICBID_SLAVE_PCIE_1 85
-#define ICBID_SLAVE_PSS_SMMU_CFG 86
-#define ICBID_SLAVE_CRYPTO_2_CFG 87
-#define ICBID_SLAVE_PCIE_0_CFG 88
-#define ICBID_SLAVE_PCIE_1_CFG 89
-#define ICBID_SLAVE_SATA_CFG 90
-#define ICBID_SLAVE_SPSS_GENI_IR 91
-#define ICBID_SLAVE_UFS_CFG 92
-#define ICBID_SLAVE_AVSYNC_CFG 93
-#define ICBID_SLAVE_VPU_CFG 94
-#define ICBID_SLAVE_USB_PHY_CFG 95
-#define ICBID_SLAVE_RBCPR_MX_CFG 96
-#define ICBID_SLAVE_PCIE_PARF 97
-#define ICBID_SLAVE_VCAP_CFG 98
-#define ICBID_SLAVE_EMAC_CFG 99
-#define ICBID_SLAVE_BCAST_CFG 100
-#define ICBID_SLAVE_KLM_CFG 101
-#define ICBID_SLAVE_DISPLAY_PWM 102
-#define ICBID_SLAVE_GENI 103
-#define ICBID_SLAVE_SNOC_BIMC_1 104
-#define ICBID_SLAVE_AUDIO 105
-#define ICBID_SLAVE_CATS_0 106
-#define ICBID_SLAVE_CATS_1 107
-#define ICBID_SLAVE_MM_INT_0 108
-#define ICBID_SLAVE_MM_INT_1 109
-#define ICBID_SLAVE_MM_INT_2 110
-#define ICBID_SLAVE_MM_INT_BIMC 111
-#define ICBID_SLAVE_MMU_MODEM_XPU_CFG 112
-#define ICBID_SLAVE_MSS_INT 113
-#define ICBID_SLAVE_PCNOC_INT_0 114
-#define ICBID_SLAVE_PCNOC_INT_1 115
-#define ICBID_SLAVE_PCNOC_M_0 116
-#define ICBID_SLAVE_PCNOC_M_1 117
-#define ICBID_SLAVE_PCNOC_S_0 118
-#define ICBID_SLAVE_PCNOC_S_1 119
-#define ICBID_SLAVE_PCNOC_S_2 120
-#define ICBID_SLAVE_PCNOC_S_3 121
-#define ICBID_SLAVE_PCNOC_S_4 122
-#define ICBID_SLAVE_PCNOC_S_6 123
-#define ICBID_SLAVE_PCNOC_S_7 124
-#define ICBID_SLAVE_PCNOC_S_8 125
-#define ICBID_SLAVE_PCNOC_S_9 126
-#define ICBID_SLAVE_PRNG_XPU_CFG 127
-#define ICBID_SLAVE_QDSS_INT 128
-#define ICBID_SLAVE_RPM_XPU_CFG 129
-#define ICBID_SLAVE_SNOC_INT_0 130
-#define ICBID_SLAVE_SNOC_INT_1 131
-#define ICBID_SLAVE_SNOC_INT_BIMC 132
-#define ICBID_SLAVE_TCU 133
-#define ICBID_SLAVE_BIMC_INT_0 134
-#define ICBID_SLAVE_BIMC_INT_1 135
-#define ICBID_SLAVE_RICA_CFG 136
-#define ICBID_SLAVE_SNOC_BIMC_2 137
-#define ICBID_SLAVE_BIMC_SNOC_1 138
-#define ICBID_SLAVE_PNOC_A1NOC 139
-#define ICBID_SLAVE_SNOC_VMEM 140
-#define ICBID_SLAVE_A0NOC_SNOC 141
-#define ICBID_SLAVE_A1NOC_SNOC 142
-#define ICBID_SLAVE_A2NOC_SNOC 143
-#define ICBID_SLAVE_A0NOC_CFG 144
-#define ICBID_SLAVE_A0NOC_MPU_CFG 145
-#define ICBID_SLAVE_A0NOC_SMMU_CFG 146
-#define ICBID_SLAVE_A1NOC_CFG 147
-#define ICBID_SLAVE_A1NOC_MPU_CFG 148
-#define ICBID_SLAVE_A1NOC_SMMU_CFG 149
-#define ICBID_SLAVE_A2NOC_CFG 150
-#define ICBID_SLAVE_A2NOC_MPU_CFG 151
-#define ICBID_SLAVE_A2NOC_SMMU_CFG 152
-#define ICBID_SLAVE_AHB2PHY 153
-#define ICBID_SLAVE_CAMERA_THROTTLE_CFG 154
-#define ICBID_SLAVE_DCC_CFG 155
-#define ICBID_SLAVE_DISPLAY_THROTTLE_CFG 156
-#define ICBID_SLAVE_DSA_CFG 157
-#define ICBID_SLAVE_DSA_MPU_CFG 158
-#define ICBID_SLAVE_SSC_MPU_CFG 159
-#define ICBID_SLAVE_HMSS_L3 160
-#define ICBID_SLAVE_LPASS_SMMU_CFG 161
-#define ICBID_SLAVE_MMAGIC_CFG 162
-#define ICBID_SLAVE_PCIE20_AHB2PHY 163
-#define ICBID_SLAVE_PCIE_2 164
-#define ICBID_SLAVE_PCIE_2_CFG 165
-#define ICBID_SLAVE_PIMEM 166
-#define ICBID_SLAVE_PIMEM_CFG 167
-#define ICBID_SLAVE_QDSS_RBCPR_APU_CFG 168
-#define ICBID_SLAVE_RBCPR_CX 169
-#define ICBID_SLAVE_RBCPR_MX 170
-#define ICBID_SLAVE_SMMU_CPP_CFG 171
-#define ICBID_SLAVE_SMMU_JPEG_CFG 172
-#define ICBID_SLAVE_SMMU_MDP_CFG 173
-#define ICBID_SLAVE_SMMU_ROTATOR_CFG 174
-#define ICBID_SLAVE_SMMU_VENUS_CFG 175
-#define ICBID_SLAVE_SMMU_VFE_CFG 176
-#define ICBID_SLAVE_SSC_CFG 177
-#define ICBID_SLAVE_VENUS_THROTTLE_CFG 178
-#define ICBID_SLAVE_VMEM 179
-#define ICBID_SLAVE_VMEM_CFG 180
-#define ICBID_SLAVE_QDSS_MPU_CFG 181
-#define ICBID_SLAVE_USB3_PHY_CFG 182
-#define ICBID_SLAVE_IPA_CFG 183
-#define ICBID_SLAVE_PCNOC_INT_2 184
-#define ICBID_SLAVE_PCNOC_INT_3 185
-#define ICBID_SLAVE_PCNOC_INT_4 186
-#define ICBID_SLAVE_PCNOC_INT_5 187
-#define ICBID_SLAVE_PCNOC_INT_6 188
-#define ICBID_SLAVE_PCNOC_S_5 189
-#define ICBID_SLAVE_QSPI 190
-#define ICBID_SLAVE_A1NOC_MS_MPU_CFG 191
-#define ICBID_SLAVE_A2NOC_MS_MPU_CFG 192
-#define ICBID_SLAVE_MODEM_Q6_SMMU_CFG 193
-#define ICBID_SLAVE_MSS_MPU_CFG 194
-#define ICBID_SLAVE_MSS_PROC_MS_MPU_CFG 195
-#define ICBID_SLAVE_SKL 196
-#define ICBID_SLAVE_SNOC_INT_2 197
-#define ICBID_SLAVE_SMMNOC_BIMC 198
-#define ICBID_SLAVE_CRVIRT_A1NOC 199
-#define ICBID_SLAVE_SGMII 200
-#define ICBID_SLAVE_QHS4_APPS 201
-#define ICBID_SLAVE_BIMC_PCNOC 202
-#define ICBID_SLAVE_PCNOC_BIMC_1 203
-#define ICBID_SLAVE_SPMI_FETCHER 204
-#define ICBID_SLAVE_MMSS_SMMU_CFG 205
-#define ICBID_SLAVE_WLAN 206
-#define ICBID_SLAVE_CRVIRT_A2NOC 207
-#define ICBID_SLAVE_CNOC_A2NOC 208
-#define ICBID_SLAVE_GLM 209
-#define ICBID_SLAVE_GNOC_BIMC 210
-#define ICBID_SLAVE_GNOC_SNOC 211
-#define ICBID_SLAVE_QM_CFG 212
-#define ICBID_SLAVE_TLMM_EAST 213
-#define ICBID_SLAVE_TLMM_NORTH 214
-#define ICBID_SLAVE_TLMM_WEST 215
-#define ICBID_SLAVE_LPASS_TCM 216
-#define ICBID_SLAVE_TLMM_SOUTH 217
-#define ICBID_SLAVE_TLMM_CENTER 218
-#define ICBID_SLAVE_MSS_NAV_CE_MPU_CFG 219
-#define ICBID_SLAVE_A2NOC_THROTTLE_CFG 220
-#define ICBID_SLAVE_CDSP 221
-#define ICBID_SLAVE_CDSP_SMMU_CFG 222
-#define ICBID_SLAVE_LPASS_MPU_CFG 223
-#define ICBID_SLAVE_CSI_PHY_CFG 224
-#endif
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index 34dba516ef24..8c896540a72c 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -26,6 +26,8 @@
#include <linux/list.h>
#include <linux/jump_label.h>
+#include <linux/irqchip/arm-gic-v4.h>
+
#define VGIC_V3_MAX_CPUS 255
#define VGIC_V2_MAX_CPUS 8
#define VGIC_NR_IRQS_LEGACY 256
@@ -73,6 +75,9 @@ struct vgic_global {
/* Only needed for the legacy KVM_CREATE_IRQCHIP */
bool can_emulate_gicv2;
+ /* Hardware has GICv4? */
+ bool has_gicv4;
+
/* GIC system register CPU interface */
struct static_key_false gicv3_cpuif;
@@ -116,6 +121,7 @@ struct vgic_irq {
bool hw; /* Tied to HW IRQ */
struct kref refcount; /* Used for LPIs */
u32 hwintid; /* HW INTID number */
+ unsigned int host_irq; /* linux irq corresponding to hwintid */
union {
u8 targets; /* GICv2 target VCPUs mask */
u32 mpidr; /* GICv3 target VCPU */
@@ -232,6 +238,15 @@ struct vgic_dist {
/* used by vgic-debug */
struct vgic_state_iter *iter;
+
+ /*
+ * GICv4 ITS per-VM data, containing the IRQ domain, the VPE
+ * array, the property table pointer as well as allocation
+ * data. This essentially ties the Linux IRQ core and ITS
+ * together, and avoids leaking KVM's data structures anywhere
+ * else.
+ */
+ struct its_vm its_vm;
};
struct vgic_v2_cpu_if {
@@ -250,6 +265,14 @@ struct vgic_v3_cpu_if {
u32 vgic_ap0r[4];
u32 vgic_ap1r[4];
u64 vgic_lr[VGIC_V3_MAX_LRS];
+
+ /*
+ * GICv4 ITS per-VPE data, containing the doorbell IRQ, the
+ * pending table pointer, the its_vm pointer and a few other
+ * HW specific things. As for the its_vm structure, this is
+ * linking the Linux IRQ subsystem and the ITS together.
+ */
+ struct its_vpe its_vpe;
};
struct vgic_cpu {
@@ -307,9 +330,10 @@ void kvm_vgic_init_cpu_hardware(void);
int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid,
bool level, void *owner);
-int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, u32 virt_irq, u32 phys_irq);
-int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int virt_irq);
-bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int virt_irq);
+int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, unsigned int host_irq,
+ u32 vintid);
+int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int vintid);
+bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int vintid);
int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu);
@@ -349,4 +373,15 @@ int kvm_vgic_setup_default_irq_routing(struct kvm *kvm);
int kvm_vgic_set_owner(struct kvm_vcpu *vcpu, unsigned int intid, void *owner);
+struct kvm_kernel_irq_routing_entry;
+
+int kvm_vgic_v4_set_forwarding(struct kvm *kvm, int irq,
+ struct kvm_kernel_irq_routing_entry *irq_entry);
+
+int kvm_vgic_v4_unset_forwarding(struct kvm *kvm, int irq,
+ struct kvm_kernel_irq_routing_entry *irq_entry);
+
+void kvm_vgic_v4_enable_doorbell(struct kvm_vcpu *vcpu);
+void kvm_vgic_v4_disable_doorbell(struct kvm_vcpu *vcpu);
+
#endif /* __KVM_ARM_VGIC_H */
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index c397934f91dd..e55e4255a210 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -78,6 +78,7 @@ enum bpf_arg_type {
* functions that access data on eBPF program stack
*/
ARG_PTR_TO_MEM, /* pointer to valid memory (stack, packet, map value) */
+ ARG_PTR_TO_MEM_OR_NULL, /* pointer to valid memory or NULL */
ARG_PTR_TO_UNINIT_MEM, /* pointer to memory does not need to be initialized,
* helper function must fill all bytes or clear
* them in error case.
@@ -334,9 +335,8 @@ extern const struct bpf_verifier_ops tc_cls_act_analyzer_ops;
extern const struct bpf_verifier_ops xdp_analyzer_ops;
struct bpf_prog *bpf_prog_get(u32 ufd);
-struct bpf_prog *bpf_prog_get_type(u32 ufd, enum bpf_prog_type type);
struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type,
- struct net_device *netdev);
+ bool attach_drv);
struct bpf_prog * __must_check bpf_prog_add(struct bpf_prog *prog, int i);
void bpf_prog_sub(struct bpf_prog *prog, int i);
struct bpf_prog * __must_check bpf_prog_inc(struct bpf_prog *prog);
@@ -425,15 +425,9 @@ static inline struct bpf_prog *bpf_prog_get(u32 ufd)
return ERR_PTR(-EOPNOTSUPP);
}
-static inline struct bpf_prog *bpf_prog_get_type(u32 ufd,
- enum bpf_prog_type type)
-{
- return ERR_PTR(-EOPNOTSUPP);
-}
-
static inline struct bpf_prog *bpf_prog_get_type_dev(u32 ufd,
enum bpf_prog_type type,
- struct net_device *netdev)
+ bool attach_drv)
{
return ERR_PTR(-EOPNOTSUPP);
}
@@ -514,9 +508,14 @@ static inline int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu,
}
#endif /* CONFIG_BPF_SYSCALL */
+static inline struct bpf_prog *bpf_prog_get_type(u32 ufd,
+ enum bpf_prog_type type)
+{
+ return bpf_prog_get_type_dev(ufd, type, false);
+}
+
int bpf_prog_offload_compile(struct bpf_prog *prog);
void bpf_prog_offload_destroy(struct bpf_prog *prog);
-u32 bpf_prog_offload_ifindex(struct bpf_prog *prog);
#if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL)
int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr);
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index 07b96aaca256..c561b986bab0 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -115,7 +115,7 @@ struct bpf_insn_aux_data {
struct bpf_map *map_ptr; /* pointer for call insn into lookup_elem */
};
int ctx_field_size; /* the ctx field size for load insn, maybe 0 */
- int converted_op_size; /* the valid value width after perceived conversion */
+ bool seen; /* this insn was processed by the verifier */
};
#define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */
@@ -171,7 +171,7 @@ static inline struct bpf_reg_state *cur_regs(struct bpf_verifier_env *env)
#if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL)
int bpf_prog_offload_verifier_prep(struct bpf_verifier_env *env);
#else
-int bpf_prog_offload_verifier_prep(struct bpf_verifier_env *env)
+static inline int bpf_prog_offload_verifier_prep(struct bpf_verifier_env *env)
{
return -EOPNOTSUPP;
}
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 3672353a0acd..188ed9f65517 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -88,17 +88,22 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
/* Unreachable code */
#ifdef CONFIG_STACK_VALIDATION
+/*
+ * These macros help objtool understand GCC code flow for unreachable code.
+ * The __COUNTER__ based labels are a hack to make each instance of the macros
+ * unique, to convince GCC not to merge duplicate inline asm statements.
+ */
#define annotate_reachable() ({ \
- asm("%c0:\n\t" \
- ".pushsection .discard.reachable\n\t" \
- ".long %c0b - .\n\t" \
- ".popsection\n\t" : : "i" (__COUNTER__)); \
+ asm volatile("%c0:\n\t" \
+ ".pushsection .discard.reachable\n\t" \
+ ".long %c0b - .\n\t" \
+ ".popsection\n\t" : : "i" (__COUNTER__)); \
})
#define annotate_unreachable() ({ \
- asm("%c0:\n\t" \
- ".pushsection .discard.unreachable\n\t" \
- ".long %c0b - .\n\t" \
- ".popsection\n\t" : : "i" (__COUNTER__)); \
+ asm volatile("%c0:\n\t" \
+ ".pushsection .discard.unreachable\n\t" \
+ ".long %c0b - .\n\t" \
+ ".popsection\n\t" : : "i" (__COUNTER__)); \
})
#define ASM_UNREACHABLE \
"999:\n\t" \
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 2995a271ec46..511fbaabf624 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -1872,7 +1872,7 @@ struct super_operations {
*/
#define __IS_FLG(inode, flg) ((inode)->i_sb->s_flags & (flg))
-static inline bool sb_rdonly(const struct super_block *sb) { return sb->s_flags & MS_RDONLY; }
+static inline bool sb_rdonly(const struct super_block *sb) { return sb->s_flags & SB_RDONLY; }
#define IS_RDONLY(inode) sb_rdonly((inode)->i_sb)
#define IS_SYNC(inode) (__IS_FLG(inode, SB_SYNCHRONOUS) || \
((inode)->i_flags & S_SYNC))
@@ -3088,7 +3088,8 @@ static inline int vfs_lstat(const char __user *name, struct kstat *stat)
static inline int vfs_fstatat(int dfd, const char __user *filename,
struct kstat *stat, int flags)
{
- return vfs_statx(dfd, filename, flags, stat, STATX_BASIC_STATS);
+ return vfs_statx(dfd, filename, flags | AT_NO_AUTOMOUNT,
+ stat, STATX_BASIC_STATS);
}
static inline int vfs_fstat(int fd, struct kstat *stat)
{
@@ -3194,6 +3195,20 @@ static inline bool vma_is_dax(struct vm_area_struct *vma)
return vma->vm_file && IS_DAX(vma->vm_file->f_mapping->host);
}
+static inline bool vma_is_fsdax(struct vm_area_struct *vma)
+{
+ struct inode *inode;
+
+ if (!vma->vm_file)
+ return false;
+ if (!vma_is_dax(vma))
+ return false;
+ inode = file_inode(vma->vm_file);
+ if (inode->i_mode == S_IFCHR)
+ return false; /* device-dax */
+ return true;
+}
+
static inline int iocb_flags(struct file *file)
{
int res = 0;
diff --git a/include/linux/htirq.h b/include/linux/htirq.h
deleted file mode 100644
index 127c39d815ba..000000000000
--- a/include/linux/htirq.h
+++ /dev/null
@@ -1,39 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef LINUX_HTIRQ_H
-#define LINUX_HTIRQ_H
-
-struct pci_dev;
-struct irq_data;
-
-struct ht_irq_msg {
- u32 address_lo; /* low 32 bits of the ht irq message */
- u32 address_hi; /* high 32 bits of the it irq message */
-};
-
-typedef void (ht_irq_update_t)(struct pci_dev *dev, int irq,
- struct ht_irq_msg *msg);
-
-struct ht_irq_cfg {
- struct pci_dev *dev;
- /* Update callback used to cope with buggy hardware */
- ht_irq_update_t *update;
- unsigned pos;
- unsigned idx;
- struct ht_irq_msg msg;
-};
-
-/* Helper functions.. */
-void fetch_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg);
-void write_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg);
-void mask_ht_irq(struct irq_data *data);
-void unmask_ht_irq(struct irq_data *data);
-
-/* The arch hook for getting things started */
-int arch_setup_ht_irq(int idx, int pos, struct pci_dev *dev,
- ht_irq_update_t *update);
-void arch_teardown_ht_irq(unsigned int irq);
-
-/* For drivers of buggy hardware */
-int __ht_create_irq(struct pci_dev *dev, int idx, ht_irq_update_t *update);
-
-#endif /* LINUX_HTIRQ_H */
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index fbf5b31d47ee..82a25880714a 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -239,14 +239,6 @@ static inline int pgd_write(pgd_t pgd)
}
#endif
-#ifndef pud_write
-static inline int pud_write(pud_t pud)
-{
- BUG();
- return 0;
-}
-#endif
-
#define HUGETLB_ANON_FILE "anon_hugepage"
enum {
diff --git a/include/linux/irq.h b/include/linux/irq.h
index b01d06db9101..e140f69163b6 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -211,6 +211,7 @@ struct irq_data {
* IRQD_MANAGED_SHUTDOWN - Interrupt was shutdown due to empty affinity
* mask. Applies only to affinity managed irqs.
* IRQD_SINGLE_TARGET - IRQ allows only a single affinity target
+ * IRQD_DEFAULT_TRIGGER_SET - Expected trigger already been set
*/
enum {
IRQD_TRIGGER_MASK = 0xf,
@@ -231,6 +232,7 @@ enum {
IRQD_IRQ_STARTED = (1 << 22),
IRQD_MANAGED_SHUTDOWN = (1 << 23),
IRQD_SINGLE_TARGET = (1 << 24),
+ IRQD_DEFAULT_TRIGGER_SET = (1 << 25),
};
#define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors)
@@ -260,18 +262,25 @@ static inline void irqd_mark_affinity_was_set(struct irq_data *d)
__irqd_to_state(d) |= IRQD_AFFINITY_SET;
}
+static inline bool irqd_trigger_type_was_set(struct irq_data *d)
+{
+ return __irqd_to_state(d) & IRQD_DEFAULT_TRIGGER_SET;
+}
+
static inline u32 irqd_get_trigger_type(struct irq_data *d)
{
return __irqd_to_state(d) & IRQD_TRIGGER_MASK;
}
/*
- * Must only be called inside irq_chip.irq_set_type() functions.
+ * Must only be called inside irq_chip.irq_set_type() functions or
+ * from the DT/ACPI setup code.
*/
static inline void irqd_set_trigger_type(struct irq_data *d, u32 type)
{
__irqd_to_state(d) &= ~IRQD_TRIGGER_MASK;
__irqd_to_state(d) |= type & IRQD_TRIGGER_MASK;
+ __irqd_to_state(d) |= IRQD_DEFAULT_TRIGGER_SET;
}
static inline bool irqd_is_level_type(struct irq_data *d)
diff --git a/include/linux/irqchip/arm-gic-v4.h b/include/linux/irqchip/arm-gic-v4.h
index 447da8ca2156..fa683ea5c769 100644
--- a/include/linux/irqchip/arm-gic-v4.h
+++ b/include/linux/irqchip/arm-gic-v4.h
@@ -109,6 +109,7 @@ int its_get_vlpi(int irq, struct its_vlpi_map *map);
int its_unmap_vlpi(int irq);
int its_prop_update_vlpi(int irq, u8 config, bool inv);
+struct irq_domain_ops;
int its_init_v4(struct irq_domain *domain, const struct irq_domain_ops *ops);
#endif
diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
index 708f337d780b..bd118a6c60cb 100644
--- a/include/linux/kallsyms.h
+++ b/include/linux/kallsyms.h
@@ -14,12 +14,6 @@
#define KSYM_SYMBOL_LEN (sizeof("%s+%#lx/%#lx [%s]") + (KSYM_NAME_LEN - 1) + \
2*(BITS_PER_LONG*3/10) + (MODULE_NAME_LEN - 1) + 1)
-#ifndef CONFIG_64BIT
-# define KALLSYM_FMT "%08lx"
-#else
-# define KALLSYM_FMT "%016lx"
-#endif
-
struct module;
#ifdef CONFIG_KALLSYMS
diff --git a/include/linux/key-type.h b/include/linux/key-type.h
index 9520fc3c3b9a..05d8fb5a06c4 100644
--- a/include/linux/key-type.h
+++ b/include/linux/key-type.h
@@ -44,7 +44,7 @@ struct key_preparsed_payload {
const void *data; /* Raw data */
size_t datalen; /* Raw datalen */
size_t quotalen; /* Quota length for proposed payload */
- time_t expiry; /* Expiry time of key */
+ time64_t expiry; /* Expiry time of key */
} __randomize_layout;
typedef int (*request_key_actor_t)(struct key_construction *key,
diff --git a/include/linux/key.h b/include/linux/key.h
index 8a15cabe928d..e58ee10f6e58 100644
--- a/include/linux/key.h
+++ b/include/linux/key.h
@@ -24,6 +24,7 @@
#include <linux/atomic.h>
#include <linux/assoc_array.h>
#include <linux/refcount.h>
+#include <linux/time64.h>
#ifdef __KERNEL__
#include <linux/uidgid.h>
@@ -162,10 +163,10 @@ struct key {
struct key_user *user; /* owner of this key */
void *security; /* security data for this key */
union {
- time_t expiry; /* time at which key expires (or 0) */
- time_t revoked_at; /* time at which key was revoked */
+ time64_t expiry; /* time at which key expires (or 0) */
+ time64_t revoked_at; /* time at which key was revoked */
};
- time_t last_used_at; /* last time used for LRU keyring discard */
+ time64_t last_used_at; /* last time used for LRU keyring discard */
kuid_t uid;
kgid_t gid;
key_perm_t perm; /* access permissions */
diff --git a/include/linux/kthread.h b/include/linux/kthread.h
index 3203e36b2ee8..c1961761311d 100644
--- a/include/linux/kthread.h
+++ b/include/linux/kthread.h
@@ -118,8 +118,7 @@ struct kthread_delayed_work {
#define KTHREAD_DELAYED_WORK_INIT(dwork, fn) { \
.work = KTHREAD_WORK_INIT((dwork).work, (fn)), \
- .timer = __TIMER_INITIALIZER((TIMER_FUNC_TYPE)kthread_delayed_work_timer_fn,\
- (TIMER_DATA_TYPE)&(dwork.timer), \
+ .timer = __TIMER_INITIALIZER(kthread_delayed_work_timer_fn,\
TIMER_IRQSAFE), \
}
@@ -165,10 +164,9 @@ extern void __kthread_init_worker(struct kthread_worker *worker,
#define kthread_init_delayed_work(dwork, fn) \
do { \
kthread_init_work(&(dwork)->work, (fn)); \
- __setup_timer(&(dwork)->timer, \
- (TIMER_FUNC_TYPE)kthread_delayed_work_timer_fn,\
- (TIMER_DATA_TYPE)&(dwork)->timer, \
- TIMER_IRQSAFE); \
+ __init_timer(&(dwork)->timer, \
+ kthread_delayed_work_timer_fn, \
+ TIMER_IRQSAFE); \
} while (0)
int kthread_worker_fn(void *worker_ptr);
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index 895ec0c4942e..a2246cf670ba 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -54,7 +54,7 @@ static inline struct page *new_page_nodemask(struct page *page,
new_page = __alloc_pages_nodemask(gfp_mask, order,
preferred_nid, nodemask);
- if (new_page && PageTransHuge(page))
+ if (new_page && PageTransHuge(new_page))
prep_transhuge_page(new_page);
return new_page;
diff --git a/include/linux/mm.h b/include/linux/mm.h
index ee073146aaa7..ea818ff739cd 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -377,6 +377,7 @@ enum page_entry_size {
struct vm_operations_struct {
void (*open)(struct vm_area_struct * area);
void (*close)(struct vm_area_struct * area);
+ int (*split)(struct vm_area_struct * area, unsigned long addr);
int (*mremap)(struct vm_area_struct * area);
int (*fault)(struct vm_fault *vmf);
int (*huge_fault)(struct vm_fault *vmf, enum page_entry_size pe_size);
@@ -1379,6 +1380,19 @@ long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
unsigned int gup_flags, struct page **pages, int *locked);
long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
struct page **pages, unsigned int gup_flags);
+#ifdef CONFIG_FS_DAX
+long get_user_pages_longterm(unsigned long start, unsigned long nr_pages,
+ unsigned int gup_flags, struct page **pages,
+ struct vm_area_struct **vmas);
+#else
+static inline long get_user_pages_longterm(unsigned long start,
+ unsigned long nr_pages, unsigned int gup_flags,
+ struct page **pages, struct vm_area_struct **vmas)
+{
+ return get_user_pages(start, nr_pages, gup_flags, pages, vmas);
+}
+#endif /* CONFIG_FS_DAX */
+
int get_user_pages_fast(unsigned long start, int nr_pages, int write,
struct page **pages);
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h
index 6cd0f6b7658b..cd55bf14ad51 100644
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -267,7 +267,7 @@ struct mtd_info {
*/
unsigned int bitflip_threshold;
- // Kernel-only stuff starts here.
+ /* Kernel-only stuff starts here. */
const char *name;
int index;
@@ -297,10 +297,6 @@ struct mtd_info {
int (*_point) (struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, void **virt, resource_size_t *phys);
int (*_unpoint) (struct mtd_info *mtd, loff_t from, size_t len);
- unsigned long (*_get_unmapped_area) (struct mtd_info *mtd,
- unsigned long len,
- unsigned long offset,
- unsigned long flags);
int (*_read) (struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, u_char *buf);
int (*_write) (struct mtd_info *mtd, loff_t to, size_t len,
diff --git a/include/linux/mtd/nand-gpio.h b/include/linux/mtd/nand-gpio.h
index fdef72d6e198..7ab51bc4a380 100644
--- a/include/linux/mtd/nand-gpio.h
+++ b/include/linux/mtd/nand-gpio.h
@@ -5,11 +5,6 @@
#include <linux/mtd/rawnand.h>
struct gpio_nand_platdata {
- int gpio_nce;
- int gpio_nwp;
- int gpio_cle;
- int gpio_ale;
- int gpio_rdy;
void (*adjust_parts)(struct gpio_nand_platdata *, size_t);
struct mtd_partition *parts;
unsigned int num_parts;
diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h
index 2b05f4273bab..749bb08c4772 100644
--- a/include/linux/mtd/rawnand.h
+++ b/include/linux/mtd/rawnand.h
@@ -177,6 +177,9 @@ enum nand_ecc_algo {
*/
#define NAND_NEED_SCRAMBLING 0x00002000
+/* Device needs 3rd row address cycle */
+#define NAND_ROW_ADDR_3 0x00004000
+
/* Options valid for Samsung large page devices */
#define NAND_SAMSUNG_LP_OPTIONS NAND_CACHEPRG
diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h
index 1f0a7fc7772f..d0c66a0975cf 100644
--- a/include/linux/mtd/spi-nor.h
+++ b/include/linux/mtd/spi-nor.h
@@ -232,10 +232,17 @@ enum spi_nor_option_flags {
};
/**
+ * struct flash_info - Forward declaration of a structure used internally by
+ * spi_nor_scan()
+ */
+struct flash_info;
+
+/**
* struct spi_nor - Structure for defining a the SPI NOR layer
* @mtd: point to a mtd_info structure
* @lock: the lock for the read/write/erase/lock/unlock operations
* @dev: point to a spi device, or a spi nor controller device.
+ * @info: spi-nor part JDEC MFR id and other info
* @page_size: the page size of the SPI NOR
* @addr_width: number of address bytes
* @erase_opcode: the opcode for erasing a sector
@@ -262,6 +269,7 @@ enum spi_nor_option_flags {
* @flash_lock: [FLASH-SPECIFIC] lock a region of the SPI NOR
* @flash_unlock: [FLASH-SPECIFIC] unlock a region of the SPI NOR
* @flash_is_locked: [FLASH-SPECIFIC] check if a region of the SPI NOR is
+ * @quad_enable: [FLASH-SPECIFIC] enables SPI NOR quad mode
* completely locked
* @priv: the private data
*/
@@ -269,6 +277,7 @@ struct spi_nor {
struct mtd_info mtd;
struct mutex lock;
struct device *dev;
+ const struct flash_info *info;
u32 page_size;
u8 addr_width;
u8 erase_opcode;
@@ -296,6 +305,7 @@ struct spi_nor {
int (*flash_lock)(struct spi_nor *nor, loff_t ofs, uint64_t len);
int (*flash_unlock)(struct spi_nor *nor, loff_t ofs, uint64_t len);
int (*flash_is_locked)(struct spi_nor *nor, loff_t ofs, uint64_t len);
+ int (*quad_enable)(struct spi_nor *nor);
void *priv;
};
diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h
index dc8b4896b77b..b1b0ca7ccb2b 100644
--- a/include/linux/netdev_features.h
+++ b/include/linux/netdev_features.h
@@ -54,8 +54,9 @@ enum {
NETIF_F_GSO_TUNNEL_REMCSUM_BIT, /* ... TUNNEL with TSO & REMCSUM */
NETIF_F_GSO_SCTP_BIT, /* ... SCTP fragmentation */
NETIF_F_GSO_ESP_BIT, /* ... ESP with TSO */
+ NETIF_F_GSO_UDP_BIT, /* ... UFO, deprecated except tuntap */
/**/NETIF_F_GSO_LAST = /* last bit, see GSO_MASK */
- NETIF_F_GSO_ESP_BIT,
+ NETIF_F_GSO_UDP_BIT,
NETIF_F_FCOE_CRC_BIT, /* FCoE CRC32 */
NETIF_F_SCTP_CRC_BIT, /* SCTP checksum offload */
@@ -132,6 +133,7 @@ enum {
#define NETIF_F_GSO_TUNNEL_REMCSUM __NETIF_F(GSO_TUNNEL_REMCSUM)
#define NETIF_F_GSO_SCTP __NETIF_F(GSO_SCTP)
#define NETIF_F_GSO_ESP __NETIF_F(GSO_ESP)
+#define NETIF_F_GSO_UDP __NETIF_F(GSO_UDP)
#define NETIF_F_HW_VLAN_STAG_FILTER __NETIF_F(HW_VLAN_STAG_FILTER)
#define NETIF_F_HW_VLAN_STAG_RX __NETIF_F(HW_VLAN_STAG_RX)
#define NETIF_F_HW_VLAN_STAG_TX __NETIF_F(HW_VLAN_STAG_TX)
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 6b274bfe489f..ef789e1d679e 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -4140,6 +4140,7 @@ static inline bool net_gso_ok(netdev_features_t features, int gso_type)
BUILD_BUG_ON(SKB_GSO_TUNNEL_REMCSUM != (NETIF_F_GSO_TUNNEL_REMCSUM >> NETIF_F_GSO_SHIFT));
BUILD_BUG_ON(SKB_GSO_SCTP != (NETIF_F_GSO_SCTP >> NETIF_F_GSO_SHIFT));
BUILD_BUG_ON(SKB_GSO_ESP != (NETIF_F_GSO_ESP >> NETIF_F_GSO_SHIFT));
+ BUILD_BUG_ON(SKB_GSO_UDP != (NETIF_F_GSO_UDP >> NETIF_F_GSO_SHIFT));
return (features & feature) == feature;
}
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 96c94980d1ff..0403894147a3 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -1485,12 +1485,6 @@ static inline void pcie_set_ecrc_checking(struct pci_dev *dev) { }
static inline void pcie_ecrc_get_policy(char *str) { }
#endif
-#ifdef CONFIG_HT_IRQ
-/* The functions a driver should call */
-int ht_create_irq(struct pci_dev *dev, int idx);
-void ht_destroy_irq(unsigned int irq);
-#endif /* CONFIG_HT_IRQ */
-
#ifdef CONFIG_PCI_ATS
/* Address Translation Service */
void pci_ats_init(struct pci_dev *dev);
diff --git a/include/linux/platform_data/mtd-nand-omap2.h b/include/linux/platform_data/mtd-nand-omap2.h
index 25e267f1970c..619df2431e75 100644
--- a/include/linux/platform_data/mtd-nand-omap2.h
+++ b/include/linux/platform_data/mtd-nand-omap2.h
@@ -64,21 +64,4 @@ struct gpmc_nand_regs {
void __iomem *gpmc_bch_result5[GPMC_BCH_NUM_REMAINDER];
void __iomem *gpmc_bch_result6[GPMC_BCH_NUM_REMAINDER];
};
-
-struct omap_nand_platform_data {
- int cs;
- struct mtd_partition *parts;
- int nr_parts;
- bool flash_bbt;
- enum nand_io xfer_type;
- int devsize;
- enum omap_ecc ecc_opt;
-
- struct device_node *elm_of_node;
-
- /* deprecated */
- struct gpmc_nand_regs reg;
- struct device_node *of_node;
- bool dev_ready;
-};
#endif
diff --git a/include/linux/printk.h b/include/linux/printk.h
index 905bba92f015..e9b603ee9953 100644
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
@@ -132,10 +132,8 @@ struct va_format {
*/
#define no_printk(fmt, ...) \
({ \
- do { \
- if (0) \
- printk(fmt, ##__VA_ARGS__); \
- } while (0); \
+ if (0) \
+ printk(fmt, ##__VA_ARGS__); \
0; \
})
diff --git a/include/linux/sched.h b/include/linux/sched.h
index a5dc7c98b0a2..21991d668d35 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -473,10 +473,10 @@ struct sched_dl_entity {
* conditions between the inactive timer handler and the wakeup
* code.
*/
- int dl_throttled : 1;
- int dl_boosted : 1;
- int dl_yielded : 1;
- int dl_non_contending : 1;
+ unsigned int dl_throttled : 1;
+ unsigned int dl_boosted : 1;
+ unsigned int dl_yielded : 1;
+ unsigned int dl_non_contending : 1;
/*
* Bandwidth enforcement timer. Each -deadline task has its
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index ed06e1c28fc7..bc486ef23f20 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -568,6 +568,8 @@ enum {
SKB_GSO_SCTP = 1 << 14,
SKB_GSO_ESP = 1 << 15,
+
+ SKB_GSO_UDP = 1 << 16,
};
#if BITS_PER_LONG > 32
diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h
index 270bad0e1bed..40d2822f0e2f 100644
--- a/include/linux/sunrpc/cache.h
+++ b/include/linux/sunrpc/cache.h
@@ -213,7 +213,7 @@ extern void __init cache_initialize(void);
extern int cache_register_net(struct cache_detail *cd, struct net *net);
extern void cache_unregister_net(struct cache_detail *cd, struct net *net);
-extern struct cache_detail *cache_create_net(struct cache_detail *tmpl, struct net *net);
+extern struct cache_detail *cache_create_net(const struct cache_detail *tmpl, struct net *net);
extern void cache_destroy_net(struct cache_detail *cd, struct net *net);
extern void sunrpc_init_cache_detail(struct cache_detail *cd);
diff --git a/include/linux/timekeeper_internal.h b/include/linux/timekeeper_internal.h
index 7e9011101cb0..d315c3d6725c 100644
--- a/include/linux/timekeeper_internal.h
+++ b/include/linux/timekeeper_internal.h
@@ -136,13 +136,6 @@ struct timekeeper {
extern void update_vsyscall(struct timekeeper *tk);
extern void update_vsyscall_tz(void);
-#elif defined(CONFIG_GENERIC_TIME_VSYSCALL_OLD)
-
-extern void update_vsyscall_old(struct timespec *ts, struct timespec *wtm,
- struct clocksource *c, u32 mult,
- u64 cycle_last);
-extern void update_vsyscall_tz(void);
-
#else
static inline void update_vsyscall(struct timekeeper *tk)
diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h
index c198ab40c04f..b17bcce58bc4 100644
--- a/include/linux/timekeeping.h
+++ b/include/linux/timekeeping.h
@@ -143,12 +143,6 @@ extern bool timekeeping_rtc_skipresume(void);
extern void timekeeping_inject_sleeptime64(struct timespec64 *delta);
/*
- * PPS accessor
- */
-extern void ktime_get_raw_and_real_ts64(struct timespec64 *ts_raw,
- struct timespec64 *ts_real);
-
-/*
* struct system_time_snapshot - simultaneous raw/real time capture with
* counter value
* @cycles: Clocksource counter value to produce the system times
diff --git a/include/linux/timer.h b/include/linux/timer.h
index bf781acfc6d8..04af640ea95b 100644
--- a/include/linux/timer.h
+++ b/include/linux/timer.h
@@ -17,8 +17,7 @@ struct timer_list {
*/
struct hlist_node entry;
unsigned long expires;
- void (*function)(unsigned long);
- unsigned long data;
+ void (*function)(struct timer_list *);
u32 flags;
#ifdef CONFIG_LOCKDEP
@@ -64,13 +63,9 @@ struct timer_list {
#define TIMER_TRACE_FLAGMASK (TIMER_MIGRATING | TIMER_DEFERRABLE | TIMER_PINNED | TIMER_IRQSAFE)
-#define TIMER_DATA_TYPE unsigned long
-#define TIMER_FUNC_TYPE void (*)(TIMER_DATA_TYPE)
-
-#define __TIMER_INITIALIZER(_function, _data, _flags) { \
+#define __TIMER_INITIALIZER(_function, _flags) { \
.entry = { .next = TIMER_ENTRY_STATIC }, \
.function = (_function), \
- .data = (_data), \
.flags = (_flags), \
__TIMER_LOCKDEP_MAP_INITIALIZER( \
__FILE__ ":" __stringify(__LINE__)) \
@@ -78,108 +73,71 @@ struct timer_list {
#define DEFINE_TIMER(_name, _function) \
struct timer_list _name = \
- __TIMER_INITIALIZER((TIMER_FUNC_TYPE)_function, 0, 0)
+ __TIMER_INITIALIZER(_function, 0)
-void init_timer_key(struct timer_list *timer, unsigned int flags,
+/*
+ * LOCKDEP and DEBUG timer interfaces.
+ */
+void init_timer_key(struct timer_list *timer,
+ void (*func)(struct timer_list *), unsigned int flags,
const char *name, struct lock_class_key *key);
#ifdef CONFIG_DEBUG_OBJECTS_TIMERS
extern void init_timer_on_stack_key(struct timer_list *timer,
+ void (*func)(struct timer_list *),
unsigned int flags, const char *name,
struct lock_class_key *key);
-extern void destroy_timer_on_stack(struct timer_list *timer);
#else
-static inline void destroy_timer_on_stack(struct timer_list *timer) { }
static inline void init_timer_on_stack_key(struct timer_list *timer,
- unsigned int flags, const char *name,
+ void (*func)(struct timer_list *),
+ unsigned int flags,
+ const char *name,
struct lock_class_key *key)
{
- init_timer_key(timer, flags, name, key);
+ init_timer_key(timer, func, flags, name, key);
}
#endif
#ifdef CONFIG_LOCKDEP
-#define __init_timer(_timer, _flags) \
+#define __init_timer(_timer, _fn, _flags) \
do { \
static struct lock_class_key __key; \
- init_timer_key((_timer), (_flags), #_timer, &__key); \
+ init_timer_key((_timer), (_fn), (_flags), #_timer, &__key);\
} while (0)
-#define __init_timer_on_stack(_timer, _flags) \
+#define __init_timer_on_stack(_timer, _fn, _flags) \
do { \
static struct lock_class_key __key; \
- init_timer_on_stack_key((_timer), (_flags), #_timer, &__key); \
+ init_timer_on_stack_key((_timer), (_fn), (_flags), \
+ #_timer, &__key); \
} while (0)
#else
-#define __init_timer(_timer, _flags) \
- init_timer_key((_timer), (_flags), NULL, NULL)
-#define __init_timer_on_stack(_timer, _flags) \
- init_timer_on_stack_key((_timer), (_flags), NULL, NULL)
+#define __init_timer(_timer, _fn, _flags) \
+ init_timer_key((_timer), (_fn), (_flags), NULL, NULL)
+#define __init_timer_on_stack(_timer, _fn, _flags) \
+ init_timer_on_stack_key((_timer), (_fn), (_flags), NULL, NULL)
#endif
-#define init_timer(timer) \
- __init_timer((timer), 0)
-
-#define __setup_timer(_timer, _fn, _data, _flags) \
- do { \
- __init_timer((_timer), (_flags)); \
- (_timer)->function = (_fn); \
- (_timer)->data = (_data); \
- } while (0)
-
-#define __setup_timer_on_stack(_timer, _fn, _data, _flags) \
- do { \
- __init_timer_on_stack((_timer), (_flags)); \
- (_timer)->function = (_fn); \
- (_timer)->data = (_data); \
- } while (0)
-
-#define setup_timer(timer, fn, data) \
- __setup_timer((timer), (fn), (data), 0)
-#define setup_pinned_timer(timer, fn, data) \
- __setup_timer((timer), (fn), (data), TIMER_PINNED)
-#define setup_deferrable_timer(timer, fn, data) \
- __setup_timer((timer), (fn), (data), TIMER_DEFERRABLE)
-#define setup_pinned_deferrable_timer(timer, fn, data) \
- __setup_timer((timer), (fn), (data), TIMER_DEFERRABLE | TIMER_PINNED)
-#define setup_timer_on_stack(timer, fn, data) \
- __setup_timer_on_stack((timer), (fn), (data), 0)
-#define setup_pinned_timer_on_stack(timer, fn, data) \
- __setup_timer_on_stack((timer), (fn), (data), TIMER_PINNED)
-#define setup_deferrable_timer_on_stack(timer, fn, data) \
- __setup_timer_on_stack((timer), (fn), (data), TIMER_DEFERRABLE)
-#define setup_pinned_deferrable_timer_on_stack(timer, fn, data) \
- __setup_timer_on_stack((timer), (fn), (data), TIMER_DEFERRABLE | TIMER_PINNED)
+/**
+ * timer_setup - prepare a timer for first use
+ * @timer: the timer in question
+ * @callback: the function to call when timer expires
+ * @flags: any TIMER_* flags
+ *
+ * Regular timer initialization should use either DEFINE_TIMER() above,
+ * or timer_setup(). For timers on the stack, timer_setup_on_stack() must
+ * be used and must be balanced with a call to destroy_timer_on_stack().
+ */
+#define timer_setup(timer, callback, flags) \
+ __init_timer((timer), (callback), (flags))
-#ifndef CONFIG_LOCKDEP
-static inline void timer_setup(struct timer_list *timer,
- void (*callback)(struct timer_list *),
- unsigned int flags)
-{
- __setup_timer(timer, (TIMER_FUNC_TYPE)callback,
- (TIMER_DATA_TYPE)timer, flags);
-}
+#define timer_setup_on_stack(timer, callback, flags) \
+ __init_timer_on_stack((timer), (callback), (flags))
-static inline void timer_setup_on_stack(struct timer_list *timer,
- void (*callback)(struct timer_list *),
- unsigned int flags)
-{
- __setup_timer_on_stack(timer, (TIMER_FUNC_TYPE)callback,
- (TIMER_DATA_TYPE)timer, flags);
-}
+#ifdef CONFIG_DEBUG_OBJECTS_TIMERS
+extern void destroy_timer_on_stack(struct timer_list *timer);
#else
-/*
- * Under LOCKDEP, the timer lock_class_key (set up in __init_timer) needs
- * to be tied to the caller's context, so an inline (above) won't work. We
- * do want to keep the inline for argument type checking, though.
- */
-# define timer_setup(timer, callback, flags) \
- __setup_timer((timer), (TIMER_FUNC_TYPE)(callback), \
- (TIMER_DATA_TYPE)(timer), (flags))
-# define timer_setup_on_stack(timer, callback, flags) \
- __setup_timer_on_stack((timer), \
- (TIMER_FUNC_TYPE)(callback), \
- (TIMER_DATA_TYPE)(timer), (flags))
+static inline void destroy_timer_on_stack(struct timer_list *timer) { }
#endif
#define from_timer(var, callback_timer, timer_fieldname) \
diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h
index 210034c896e3..f144216febc6 100644
--- a/include/linux/virtio_net.h
+++ b/include/linux/virtio_net.h
@@ -9,7 +9,7 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
const struct virtio_net_hdr *hdr,
bool little_endian)
{
- unsigned short gso_type = 0;
+ unsigned int gso_type = 0;
if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
@@ -19,6 +19,9 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
case VIRTIO_NET_HDR_GSO_TCPV6:
gso_type = SKB_GSO_TCPV6;
break;
+ case VIRTIO_NET_HDR_GSO_UDP:
+ gso_type = SKB_GSO_UDP;
+ break;
default:
return -EINVAL;
}
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 01a050fc6650..4a54ef96aff5 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -176,8 +176,7 @@ struct execute_work {
#define __DELAYED_WORK_INITIALIZER(n, f, tflags) { \
.work = __WORK_INITIALIZER((n).work, (f)), \
- .timer = __TIMER_INITIALIZER((TIMER_FUNC_TYPE)delayed_work_timer_fn,\
- (TIMER_DATA_TYPE)&(n.timer), \
+ .timer = __TIMER_INITIALIZER(delayed_work_timer_fn,\
(tflags) | TIMER_IRQSAFE), \
}
@@ -242,19 +241,17 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; }
#define __INIT_DELAYED_WORK(_work, _func, _tflags) \
do { \
INIT_WORK(&(_work)->work, (_func)); \
- __setup_timer(&(_work)->timer, \
- (TIMER_FUNC_TYPE)delayed_work_timer_fn, \
- (TIMER_DATA_TYPE)&(_work)->timer, \
- (_tflags) | TIMER_IRQSAFE); \
+ __init_timer(&(_work)->timer, \
+ delayed_work_timer_fn, \
+ (_tflags) | TIMER_IRQSAFE); \
} while (0)
#define __INIT_DELAYED_WORK_ONSTACK(_work, _func, _tflags) \
do { \
INIT_WORK_ONSTACK(&(_work)->work, (_func)); \
- __setup_timer_on_stack(&(_work)->timer, \
- (TIMER_FUNC_TYPE)delayed_work_timer_fn,\
- (TIMER_DATA_TYPE)&(_work)->timer,\
- (_tflags) | TIMER_IRQSAFE); \
+ __init_timer_on_stack(&(_work)->timer, \
+ delayed_work_timer_fn, \
+ (_tflags) | TIMER_IRQSAFE); \
} while (0)
#define INIT_DELAYED_WORK(_work, _func) \
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index f42d85631d17..fdfd04e348f6 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -308,7 +308,7 @@ static inline void cgroup_writeback_umount(void)
void laptop_io_completion(struct backing_dev_info *info);
void laptop_sync_completion(void);
void laptop_mode_sync(struct work_struct *work);
-void laptop_mode_timer_fn(unsigned long data);
+void laptop_mode_timer_fn(struct timer_list *t);
#else
static inline void laptop_sync_completion(void) { }
#endif
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index ec14f0d5a3a1..f73797e2fa60 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -767,6 +767,7 @@ static inline int ipv6_addr_diff(const struct in6_addr *a1, const struct in6_add
__be32 ipv6_select_ident(struct net *net,
const struct in6_addr *daddr,
const struct in6_addr *saddr);
+__be32 ipv6_proxy_select_ident(struct net *net, struct sk_buff *skb);
int ip6_dst_hoplimit(struct dst_entry *dst);
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index cc9073e45be9..eec143cca1c0 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -4470,18 +4470,24 @@ struct sk_buff *ieee80211_pspoll_get(struct ieee80211_hw *hw,
* ieee80211_nullfunc_get - retrieve a nullfunc template
* @hw: pointer obtained from ieee80211_alloc_hw().
* @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ * @qos_ok: QoS NDP is acceptable to the caller, this should be set
+ * if at all possible
*
* Creates a Nullfunc template which can, for example, uploaded to
* hardware. The template must be updated after association so that correct
* BSSID and address is used.
*
+ * If @qos_ndp is set and the association is to an AP with QoS/WMM, the
+ * returned packet will be QoS NDP.
+ *
* Note: Caller (or hardware) is responsible for setting the
* &IEEE80211_FCTL_PM bit as well as Duration and Sequence Control fields.
*
* Return: The nullfunc template. %NULL on error.
*/
struct sk_buff *ieee80211_nullfunc_get(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif);
+ struct ieee80211_vif *vif,
+ bool qos_ok);
/**
* ieee80211_probereq_get - retrieve a Probe Request template
diff --git a/include/net/sctp/checksum.h b/include/net/sctp/checksum.h
index 4a5b9a306c69..32ee65a30aff 100644
--- a/include/net/sctp/checksum.h
+++ b/include/net/sctp/checksum.h
@@ -48,31 +48,32 @@ static inline __wsum sctp_csum_update(const void *buff, int len, __wsum sum)
/* This uses the crypto implementation of crc32c, which is either
* implemented w/ hardware support or resolves to __crc32c_le().
*/
- return crc32c(sum, buff, len);
+ return (__force __wsum)crc32c((__force __u32)sum, buff, len);
}
static inline __wsum sctp_csum_combine(__wsum csum, __wsum csum2,
int offset, int len)
{
- return __crc32c_le_combine(csum, csum2, len);
+ return (__force __wsum)__crc32c_le_combine((__force __u32)csum,
+ (__force __u32)csum2, len);
}
static inline __le32 sctp_compute_cksum(const struct sk_buff *skb,
unsigned int offset)
{
struct sctphdr *sh = sctp_hdr(skb);
- __le32 ret, old = sh->checksum;
const struct skb_checksum_ops ops = {
.update = sctp_csum_update,
.combine = sctp_csum_combine,
};
+ __le32 old = sh->checksum;
+ __wsum new;
sh->checksum = 0;
- ret = cpu_to_le32(~__skb_checksum(skb, offset, skb->len - offset,
- ~(__u32)0, &ops));
+ new = ~__skb_checksum(skb, offset, skb->len - offset, ~(__wsum)0, &ops);
sh->checksum = old;
- return ret;
+ return cpu_to_le32((__force __u32)new);
}
#endif /* __sctp_checksum_h__ */
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index 749a42882437..906a9c0efa71 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -195,6 +195,11 @@ void sctp_remaddr_proc_exit(struct net *net);
int sctp_offload_init(void);
/*
+ * sctp/stream_sched.c
+ */
+void sctp_sched_ops_init(void);
+
+/*
* sctp/stream.c
*/
int sctp_send_reset_streams(struct sctp_association *asoc,
diff --git a/include/net/sctp/stream_sched.h b/include/net/sctp/stream_sched.h
index c676550a4c7d..5c5da48f65e7 100644
--- a/include/net/sctp/stream_sched.h
+++ b/include/net/sctp/stream_sched.h
@@ -69,4 +69,9 @@ void sctp_sched_dequeue_common(struct sctp_outq *q, struct sctp_chunk *ch);
int sctp_sched_init_sid(struct sctp_stream *stream, __u16 sid, gfp_t gfp);
struct sctp_sched_ops *sctp_sched_ops_from_stream(struct sctp_stream *stream);
+void sctp_sched_ops_register(enum sctp_sched_type sched,
+ struct sctp_sched_ops *sched_ops);
+void sctp_sched_ops_prio_init(void);
+void sctp_sched_ops_rr_init(void);
+
#endif /* __sctp_stream_sched_h__ */
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 85ea578195d4..4e09398009c1 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -539,7 +539,7 @@ void tcp_push_one(struct sock *, unsigned int mss_now);
void tcp_send_ack(struct sock *sk);
void tcp_send_delayed_ack(struct sock *sk);
void tcp_send_loss_probe(struct sock *sk);
-bool tcp_schedule_loss_probe(struct sock *sk);
+bool tcp_schedule_loss_probe(struct sock *sk, bool advancing_rto);
void tcp_skb_collapse_tstamp(struct sk_buff *skb,
const struct sk_buff *next_skb);
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index 1fb6ad3c5006..7ae177c8e399 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -15,6 +15,8 @@ struct scsi_cmnd;
struct scsi_lun;
struct scsi_sense_hdr;
+typedef unsigned int __bitwise blist_flags_t;
+
struct scsi_mode_data {
__u32 length;
__u16 block_descriptor_length;
@@ -141,7 +143,7 @@ struct scsi_device {
unsigned char current_tag; /* current tag */
struct scsi_target *sdev_target; /* used only for single_lun */
- unsigned int sdev_bflags; /* black/white flags as also found in
+ blist_flags_t sdev_bflags; /* black/white flags as also found in
* scsi_devinfo.[hc]. For now used only to
* pass settings from slave_alloc to scsi
* core. */
diff --git a/include/scsi/scsi_devinfo.h b/include/scsi/scsi_devinfo.h
index 3cf125b56c3a..ea67c32e870e 100644
--- a/include/scsi/scsi_devinfo.h
+++ b/include/scsi/scsi_devinfo.h
@@ -6,55 +6,55 @@
*/
/* Only scan LUN 0 */
-#define BLIST_NOLUN ((__force __u32 __bitwise)(1 << 0))
+#define BLIST_NOLUN ((__force blist_flags_t)(1 << 0))
/* Known to have LUNs, force scanning.
* DEPRECATED: Use max_luns=N */
-#define BLIST_FORCELUN ((__force __u32 __bitwise)(1 << 1))
+#define BLIST_FORCELUN ((__force blist_flags_t)(1 << 1))
/* Flag for broken handshaking */
-#define BLIST_BORKEN ((__force __u32 __bitwise)(1 << 2))
+#define BLIST_BORKEN ((__force blist_flags_t)(1 << 2))
/* unlock by special command */
-#define BLIST_KEY ((__force __u32 __bitwise)(1 << 3))
+#define BLIST_KEY ((__force blist_flags_t)(1 << 3))
/* Do not use LUNs in parallel */
-#define BLIST_SINGLELUN ((__force __u32 __bitwise)(1 << 4))
+#define BLIST_SINGLELUN ((__force blist_flags_t)(1 << 4))
/* Buggy Tagged Command Queuing */
-#define BLIST_NOTQ ((__force __u32 __bitwise)(1 << 5))
+#define BLIST_NOTQ ((__force blist_flags_t)(1 << 5))
/* Non consecutive LUN numbering */
-#define BLIST_SPARSELUN ((__force __u32 __bitwise)(1 << 6))
+#define BLIST_SPARSELUN ((__force blist_flags_t)(1 << 6))
/* Avoid LUNS >= 5 */
-#define BLIST_MAX5LUN ((__force __u32 __bitwise)(1 << 7))
+#define BLIST_MAX5LUN ((__force blist_flags_t)(1 << 7))
/* Treat as (removable) CD-ROM */
-#define BLIST_ISROM ((__force __u32 __bitwise)(1 << 8))
+#define BLIST_ISROM ((__force blist_flags_t)(1 << 8))
/* LUNs past 7 on a SCSI-2 device */
-#define BLIST_LARGELUN ((__force __u32 __bitwise)(1 << 9))
+#define BLIST_LARGELUN ((__force blist_flags_t)(1 << 9))
/* override additional length field */
-#define BLIST_INQUIRY_36 ((__force __u32 __bitwise)(1 << 10))
+#define BLIST_INQUIRY_36 ((__force blist_flags_t)(1 << 10))
/* do not do automatic start on add */
-#define BLIST_NOSTARTONADD ((__force __u32 __bitwise)(1 << 12))
+#define BLIST_NOSTARTONADD ((__force blist_flags_t)(1 << 12))
/* try REPORT_LUNS even for SCSI-2 devs (if HBA supports more than 8 LUNs) */
-#define BLIST_REPORTLUN2 ((__force __u32 __bitwise)(1 << 17))
+#define BLIST_REPORTLUN2 ((__force blist_flags_t)(1 << 17))
/* don't try REPORT_LUNS scan (SCSI-3 devs) */
-#define BLIST_NOREPORTLUN ((__force __u32 __bitwise)(1 << 18))
+#define BLIST_NOREPORTLUN ((__force blist_flags_t)(1 << 18))
/* don't use PREVENT-ALLOW commands */
-#define BLIST_NOT_LOCKABLE ((__force __u32 __bitwise)(1 << 19))
+#define BLIST_NOT_LOCKABLE ((__force blist_flags_t)(1 << 19))
/* device is actually for RAID config */
-#define BLIST_NO_ULD_ATTACH ((__force __u32 __bitwise)(1 << 20))
+#define BLIST_NO_ULD_ATTACH ((__force blist_flags_t)(1 << 20))
/* select without ATN */
-#define BLIST_SELECT_NO_ATN ((__force __u32 __bitwise)(1 << 21))
+#define BLIST_SELECT_NO_ATN ((__force blist_flags_t)(1 << 21))
/* retry HARDWARE_ERROR */
-#define BLIST_RETRY_HWERROR ((__force __u32 __bitwise)(1 << 22))
+#define BLIST_RETRY_HWERROR ((__force blist_flags_t)(1 << 22))
/* maximum 512 sector cdb length */
-#define BLIST_MAX_512 ((__force __u32 __bitwise)(1 << 23))
+#define BLIST_MAX_512 ((__force blist_flags_t)(1 << 23))
/* Disable T10 PI (DIF) */
-#define BLIST_NO_DIF ((__force __u32 __bitwise)(1 << 25))
+#define BLIST_NO_DIF ((__force blist_flags_t)(1 << 25))
/* Ignore SBC-3 VPD pages */
-#define BLIST_SKIP_VPD_PAGES ((__force __u32 __bitwise)(1 << 26))
+#define BLIST_SKIP_VPD_PAGES ((__force blist_flags_t)(1 << 26))
/* Attempt to read VPD pages */
-#define BLIST_TRY_VPD_PAGES ((__force __u32 __bitwise)(1 << 28))
+#define BLIST_TRY_VPD_PAGES ((__force blist_flags_t)(1 << 28))
/* don't try to issue RSOC */
-#define BLIST_NO_RSOC ((__force __u32 __bitwise)(1 << 29))
+#define BLIST_NO_RSOC ((__force blist_flags_t)(1 << 29))
/* maximum 1024 sector cdb length */
-#define BLIST_MAX_1024 ((__force __u32 __bitwise)(1 << 30))
+#define BLIST_MAX_1024 ((__force blist_flags_t)(1 << 30))
/* Use UNMAP limit for WRITE SAME */
-#define BLIST_UNMAP_LIMIT_WS ((__force __u32 __bitwise)(1 << 31))
+#define BLIST_UNMAP_LIMIT_WS ((__force blist_flags_t)(1 << 31))
#endif
diff --git a/include/sound/control.h b/include/sound/control.h
index a1f1152bc687..ca13a44ae9d4 100644
--- a/include/sound/control.h
+++ b/include/sound/control.h
@@ -249,7 +249,9 @@ int snd_ctl_add_vmaster_hook(struct snd_kcontrol *kctl,
void snd_ctl_sync_vmaster(struct snd_kcontrol *kctl, bool hook_only);
#define snd_ctl_sync_vmaster_hook(kctl) snd_ctl_sync_vmaster(kctl, true)
int snd_ctl_apply_vmaster_slaves(struct snd_kcontrol *kctl,
- int (*func)(struct snd_kcontrol *, void *),
+ int (*func)(struct snd_kcontrol *vslave,
+ struct snd_kcontrol *slave,
+ void *arg),
void *arg);
/*
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index f5db145e68ec..2c8d8115469d 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -182,6 +182,7 @@ enum tcm_sense_reason_table {
TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE = R(0x1a),
TCM_TOO_MANY_SEGMENT_DESCS = R(0x1b),
TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE = R(0x1c),
+ TCM_INSUFFICIENT_REGISTRATION_RESOURCES = R(0x1d),
#undef R
};
@@ -490,6 +491,7 @@ struct se_cmd {
#define CMD_T_STOP (1 << 5)
#define CMD_T_TAS (1 << 10)
#define CMD_T_FABRIC_STOP (1 << 11)
+#define CMD_T_PRE_EXECUTE (1 << 12)
spinlock_t t_state_lock;
struct kref cmd_kref;
struct completion t_transport_stop_comp;
diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h
index ebe96796027a..36cb50c111a6 100644
--- a/include/trace/events/rxrpc.h
+++ b/include/trace/events/rxrpc.h
@@ -49,6 +49,7 @@ enum rxrpc_conn_trace {
rxrpc_conn_put_client,
rxrpc_conn_put_service,
rxrpc_conn_queued,
+ rxrpc_conn_reap_service,
rxrpc_conn_seen,
};
@@ -138,10 +139,24 @@ enum rxrpc_rtt_rx_trace {
enum rxrpc_timer_trace {
rxrpc_timer_begin,
+ rxrpc_timer_exp_ack,
+ rxrpc_timer_exp_hard,
+ rxrpc_timer_exp_idle,
+ rxrpc_timer_exp_keepalive,
+ rxrpc_timer_exp_lost_ack,
+ rxrpc_timer_exp_normal,
+ rxrpc_timer_exp_ping,
+ rxrpc_timer_exp_resend,
rxrpc_timer_expired,
rxrpc_timer_init_for_reply,
rxrpc_timer_init_for_send_reply,
+ rxrpc_timer_restart,
rxrpc_timer_set_for_ack,
+ rxrpc_timer_set_for_hard,
+ rxrpc_timer_set_for_idle,
+ rxrpc_timer_set_for_keepalive,
+ rxrpc_timer_set_for_lost_ack,
+ rxrpc_timer_set_for_normal,
rxrpc_timer_set_for_ping,
rxrpc_timer_set_for_resend,
rxrpc_timer_set_for_send,
@@ -150,6 +165,7 @@ enum rxrpc_timer_trace {
enum rxrpc_propose_ack_trace {
rxrpc_propose_ack_client_tx_end,
rxrpc_propose_ack_input_data,
+ rxrpc_propose_ack_ping_for_keepalive,
rxrpc_propose_ack_ping_for_lost_ack,
rxrpc_propose_ack_ping_for_lost_reply,
rxrpc_propose_ack_ping_for_params,
@@ -206,6 +222,7 @@ enum rxrpc_congest_change {
EM(rxrpc_conn_put_client, "PTc") \
EM(rxrpc_conn_put_service, "PTs") \
EM(rxrpc_conn_queued, "QUE") \
+ EM(rxrpc_conn_reap_service, "RPs") \
E_(rxrpc_conn_seen, "SEE")
#define rxrpc_client_traces \
@@ -296,16 +313,31 @@ enum rxrpc_congest_change {
#define rxrpc_timer_traces \
EM(rxrpc_timer_begin, "Begin ") \
EM(rxrpc_timer_expired, "*EXPR*") \
+ EM(rxrpc_timer_exp_ack, "ExpAck") \
+ EM(rxrpc_timer_exp_hard, "ExpHrd") \
+ EM(rxrpc_timer_exp_idle, "ExpIdl") \
+ EM(rxrpc_timer_exp_keepalive, "ExpKA ") \
+ EM(rxrpc_timer_exp_lost_ack, "ExpLoA") \
+ EM(rxrpc_timer_exp_normal, "ExpNml") \
+ EM(rxrpc_timer_exp_ping, "ExpPng") \
+ EM(rxrpc_timer_exp_resend, "ExpRsn") \
EM(rxrpc_timer_init_for_reply, "IniRpl") \
EM(rxrpc_timer_init_for_send_reply, "SndRpl") \
+ EM(rxrpc_timer_restart, "Restrt") \
EM(rxrpc_timer_set_for_ack, "SetAck") \
+ EM(rxrpc_timer_set_for_hard, "SetHrd") \
+ EM(rxrpc_timer_set_for_idle, "SetIdl") \
+ EM(rxrpc_timer_set_for_keepalive, "KeepAl") \
+ EM(rxrpc_timer_set_for_lost_ack, "SetLoA") \
+ EM(rxrpc_timer_set_for_normal, "SetNml") \
EM(rxrpc_timer_set_for_ping, "SetPng") \
EM(rxrpc_timer_set_for_resend, "SetRTx") \
- E_(rxrpc_timer_set_for_send, "SetTx ")
+ E_(rxrpc_timer_set_for_send, "SetSnd")
#define rxrpc_propose_ack_traces \
EM(rxrpc_propose_ack_client_tx_end, "ClTxEnd") \
EM(rxrpc_propose_ack_input_data, "DataIn ") \
+ EM(rxrpc_propose_ack_ping_for_keepalive, "KeepAlv") \
EM(rxrpc_propose_ack_ping_for_lost_ack, "LostAck") \
EM(rxrpc_propose_ack_ping_for_lost_reply, "LostRpl") \
EM(rxrpc_propose_ack_ping_for_params, "Params ") \
@@ -932,39 +964,47 @@ TRACE_EVENT(rxrpc_rtt_rx,
TRACE_EVENT(rxrpc_timer,
TP_PROTO(struct rxrpc_call *call, enum rxrpc_timer_trace why,
- ktime_t now, unsigned long now_j),
+ unsigned long now),
- TP_ARGS(call, why, now, now_j),
+ TP_ARGS(call, why, now),
TP_STRUCT__entry(
__field(struct rxrpc_call *, call )
__field(enum rxrpc_timer_trace, why )
- __field_struct(ktime_t, now )
- __field_struct(ktime_t, expire_at )
- __field_struct(ktime_t, ack_at )
- __field_struct(ktime_t, resend_at )
- __field(unsigned long, now_j )
- __field(unsigned long, timer )
+ __field(long, now )
+ __field(long, ack_at )
+ __field(long, ack_lost_at )
+ __field(long, resend_at )
+ __field(long, ping_at )
+ __field(long, expect_rx_by )
+ __field(long, expect_req_by )
+ __field(long, expect_term_by )
+ __field(long, timer )
),
TP_fast_assign(
- __entry->call = call;
- __entry->why = why;
- __entry->now = now;
- __entry->expire_at = call->expire_at;
- __entry->ack_at = call->ack_at;
- __entry->resend_at = call->resend_at;
- __entry->now_j = now_j;
- __entry->timer = call->timer.expires;
+ __entry->call = call;
+ __entry->why = why;
+ __entry->now = now;
+ __entry->ack_at = call->ack_at;
+ __entry->ack_lost_at = call->ack_lost_at;
+ __entry->resend_at = call->resend_at;
+ __entry->expect_rx_by = call->expect_rx_by;
+ __entry->expect_req_by = call->expect_req_by;
+ __entry->expect_term_by = call->expect_term_by;
+ __entry->timer = call->timer.expires;
),
- TP_printk("c=%p %s x=%lld a=%lld r=%lld t=%ld",
+ TP_printk("c=%p %s a=%ld la=%ld r=%ld xr=%ld xq=%ld xt=%ld t=%ld",
__entry->call,
__print_symbolic(__entry->why, rxrpc_timer_traces),
- ktime_to_ns(ktime_sub(__entry->expire_at, __entry->now)),
- ktime_to_ns(ktime_sub(__entry->ack_at, __entry->now)),
- ktime_to_ns(ktime_sub(__entry->resend_at, __entry->now)),
- __entry->timer - __entry->now_j)
+ __entry->ack_at - __entry->now,
+ __entry->ack_lost_at - __entry->now,
+ __entry->resend_at - __entry->now,
+ __entry->expect_rx_by - __entry->now,
+ __entry->expect_req_by - __entry->now,
+ __entry->expect_term_by - __entry->now,
+ __entry->timer - __entry->now)
);
TRACE_EVENT(rxrpc_rx_lose,
@@ -1080,7 +1120,7 @@ TRACE_EVENT(rxrpc_congest,
memcpy(&__entry->sum, summary, sizeof(__entry->sum));
),
- TP_printk("c=%p %08x %s %08x %s cw=%u ss=%u nr=%u,%u nw=%u,%u r=%u b=%u u=%u d=%u l=%x%s%s%s",
+ TP_printk("c=%p r=%08x %s q=%08x %s cw=%u ss=%u nr=%u,%u nw=%u,%u r=%u b=%u u=%u d=%u l=%x%s%s%s",
__entry->call,
__entry->ack_serial,
__print_symbolic(__entry->sum.ack_reason, rxrpc_ack_names),
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index 306b31de5194..bc01e06bc716 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -116,9 +116,9 @@ static inline long __trace_sched_switch_state(bool preempt, struct task_struct *
* RUNNING (we will not have dequeued if state != RUNNING).
*/
if (preempt)
- return TASK_STATE_MAX;
+ return TASK_REPORT_MAX;
- return task_state_index(p);
+ return 1 << task_state_index(p);
}
#endif /* CREATE_TRACE_POINTS */
@@ -164,7 +164,7 @@ TRACE_EVENT(sched_switch,
{ 0x40, "P" }, { 0x80, "I" }) :
"R",
- __entry->prev_state & TASK_STATE_MAX ? "+" : "",
+ __entry->prev_state & TASK_REPORT_MAX ? "+" : "",
__entry->next_comm, __entry->next_pid, __entry->next_prio)
);
diff --git a/include/uapi/linux/bfs_fs.h b/include/uapi/linux/bfs_fs.h
index 73445ef07dda..940b04772af8 100644
--- a/include/uapi/linux/bfs_fs.h
+++ b/include/uapi/linux/bfs_fs.h
@@ -76,7 +76,7 @@ struct bfs_super_block {
#define BFS_FILEBLOCKS(ip) \
((ip)->i_sblock == 0 ? 0 : (le32_to_cpu((ip)->i_eblock) + 1) - le32_to_cpu((ip)->i_sblock))
#define BFS_UNCLEAN(bfs_sb, sb) \
- ((le32_to_cpu(bfs_sb->s_from) != -1) && (le32_to_cpu(bfs_sb->s_to) != -1) && !(sb->s_flags & MS_RDONLY))
+ ((le32_to_cpu(bfs_sb->s_from) != -1) && (le32_to_cpu(bfs_sb->s_to) != -1) && !(sb->s_flags & SB_RDONLY))
#endif /* _LINUX_BFS_FS_H */
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index e880ae6434ee..4c223ab30293 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -262,7 +262,7 @@ union bpf_attr {
__u32 kern_version; /* checked when prog_type=kprobe */
__u32 prog_flags;
char prog_name[BPF_OBJ_NAME_LEN];
- __u32 prog_target_ifindex; /* ifindex of netdev to prep for */
+ __u32 prog_ifindex; /* ifindex of netdev to prep for */
};
struct { /* anonymous struct used by BPF_OBJ_* commands */
@@ -897,10 +897,6 @@ enum sk_action {
#define BPF_TAG_SIZE 8
-enum bpf_prog_status {
- BPF_PROG_STATUS_DEV_BOUND = (1 << 0),
-};
-
struct bpf_prog_info {
__u32 type;
__u32 id;
@@ -914,8 +910,6 @@ struct bpf_prog_info {
__u32 nr_map_ids;
__aligned_u64 map_ids;
char name[BPF_OBJ_NAME_LEN];
- __u32 ifindex;
- __u32 status;
} __attribute__((aligned(8)));
struct bpf_map_info {
diff --git a/include/uapi/linux/rxrpc.h b/include/uapi/linux/rxrpc.h
index 9d4afea308a4..9335d92c14a4 100644
--- a/include/uapi/linux/rxrpc.h
+++ b/include/uapi/linux/rxrpc.h
@@ -59,6 +59,7 @@ enum rxrpc_cmsg_type {
RXRPC_EXCLUSIVE_CALL = 10, /* s-: Call should be on exclusive connection */
RXRPC_UPGRADE_SERVICE = 11, /* s-: Request service upgrade for client call */
RXRPC_TX_LENGTH = 12, /* s-: Total length of Tx data */
+ RXRPC_SET_CALL_TIMEOUT = 13, /* s-: Set one or more call timeouts */
RXRPC__SUPPORTED
};
diff --git a/include/uapi/linux/vm_sockets_diag.h b/include/uapi/linux/vm_sockets_diag.h
index 14cd7dc5a187..0b4dd54f3d1e 100644
--- a/include/uapi/linux/vm_sockets_diag.h
+++ b/include/uapi/linux/vm_sockets_diag.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/* AF_VSOCK sock_diag(7) interface for querying open sockets */
#ifndef _UAPI__VM_SOCKETS_DIAG_H__
diff --git a/include/video/iga.h b/include/video/iga.h
deleted file mode 100644
index 83ca18492e00..000000000000
--- a/include/video/iga.h
+++ /dev/null
@@ -1,25 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* $Id: iga.h,v 1.2 1999/09/11 22:56:31 zaitcev Exp $
- * iga1682.h: Sparc/PCI iga1682 driver constants etc.
- *
- * Copyleft 1998 V. Roganov and G. Raiko
- */
-
-#ifndef _IGA1682_H
-#define _IGA1682_H 1
-
-#define IGA_ATTR_CTL 0x3C0
-#define IGA_IDX_VGA_OVERSCAN 0x11
-#define DAC_W_INDEX 0x03C8
-#define DAC_DATA 0x03C9
-#define IGA_EXT_CNTRL 0x3CE
-#define IGA_IDX_EXT_BUS_CNTL 0x30
-#define MEM_SIZE_ALIAS 0x3
-#define MEM_SIZE_1M 0x0
-#define MEM_SIZE_2M 0x1
-#define MEM_SIZE_4M 0x2
-#define MEM_SIZE_RESERVED 0x3
-#define IGA_IDX_OVERSCAN_COLOR 0x58
-#define IGA_IDX_EXT_MEM_2 0x72
-
-#endif /* !(_IGA1682_H) */
diff --git a/ipc/mqueue.c b/ipc/mqueue.c
index d24025626310..9649ecd8a73a 100644
--- a/ipc/mqueue.c
+++ b/ipc/mqueue.c
@@ -331,7 +331,7 @@ static struct dentry *mqueue_mount(struct file_system_type *fs_type,
void *data)
{
struct ipc_namespace *ns;
- if (flags & MS_KERNMOUNT) {
+ if (flags & SB_KERNMOUNT) {
ns = data;
data = NULL;
} else {
diff --git a/kernel/bpf/offload.c b/kernel/bpf/offload.c
index 2816feb38be1..68ec884440b7 100644
--- a/kernel/bpf/offload.c
+++ b/kernel/bpf/offload.c
@@ -14,8 +14,9 @@ int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr)
struct net *net = current->nsproxy->net_ns;
struct bpf_dev_offload *offload;
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
+ if (attr->prog_type != BPF_PROG_TYPE_SCHED_CLS &&
+ attr->prog_type != BPF_PROG_TYPE_XDP)
+ return -EINVAL;
if (attr->prog_flags)
return -EINVAL;
@@ -28,7 +29,7 @@ int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr)
init_waitqueue_head(&offload->verifier_done);
rtnl_lock();
- offload->netdev = __dev_get_by_index(net, attr->prog_target_ifindex);
+ offload->netdev = __dev_get_by_index(net, attr->prog_ifindex);
if (!offload->netdev) {
rtnl_unlock();
kfree(offload);
@@ -85,6 +86,10 @@ static void __bpf_prog_offload_destroy(struct bpf_prog *prog)
struct bpf_dev_offload *offload = prog->aux->offload;
struct netdev_bpf data = {};
+ /* Caution - if netdev is destroyed before the program, this function
+ * will be called twice.
+ */
+
data.offload.prog = prog;
if (offload->verifier_running)
@@ -144,18 +149,6 @@ int bpf_prog_offload_compile(struct bpf_prog *prog)
return bpf_prog_offload_translate(prog);
}
-u32 bpf_prog_offload_ifindex(struct bpf_prog *prog)
-{
- struct bpf_dev_offload *offload = prog->aux->offload;
- u32 ifindex;
-
- rtnl_lock();
- ifindex = offload->netdev ? offload->netdev->ifindex : 0;
- rtnl_unlock();
-
- return ifindex;
-}
-
const struct bpf_prog_ops bpf_offload_prog_ops = {
};
@@ -169,6 +162,10 @@ static int bpf_offload_notification(struct notifier_block *notifier,
switch (event) {
case NETDEV_UNREGISTER:
+ /* ignore namespace changes */
+ if (netdev->reg_state != NETREG_UNREGISTERING)
+ break;
+
list_for_each_entry_safe(offload, tmp, &bpf_prog_offload_devs,
offloads) {
if (offload->netdev == netdev)
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 09badc37e864..2c4cfeaa8d5e 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -1057,22 +1057,23 @@ struct bpf_prog *bpf_prog_inc_not_zero(struct bpf_prog *prog)
}
EXPORT_SYMBOL_GPL(bpf_prog_inc_not_zero);
-static bool bpf_prog_can_attach(struct bpf_prog *prog,
- enum bpf_prog_type *attach_type,
- struct net_device *netdev)
+static bool bpf_prog_get_ok(struct bpf_prog *prog,
+ enum bpf_prog_type *attach_type, bool attach_drv)
{
- struct bpf_dev_offload *offload = prog->aux->offload;
+ /* not an attachment, just a refcount inc, always allow */
+ if (!attach_type)
+ return true;
if (prog->type != *attach_type)
return false;
- if (offload && offload->netdev != netdev)
+ if (bpf_prog_is_dev_bound(prog->aux) && !attach_drv)
return false;
return true;
}
static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *attach_type,
- struct net_device *netdev)
+ bool attach_drv)
{
struct fd f = fdget(ufd);
struct bpf_prog *prog;
@@ -1080,7 +1081,7 @@ static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *attach_type,
prog = ____bpf_prog_get(f);
if (IS_ERR(prog))
return prog;
- if (attach_type && !bpf_prog_can_attach(prog, attach_type, netdev)) {
+ if (!bpf_prog_get_ok(prog, attach_type, attach_drv)) {
prog = ERR_PTR(-EINVAL);
goto out;
}
@@ -1093,23 +1094,13 @@ out:
struct bpf_prog *bpf_prog_get(u32 ufd)
{
- return __bpf_prog_get(ufd, NULL, NULL);
+ return __bpf_prog_get(ufd, NULL, false);
}
-struct bpf_prog *bpf_prog_get_type(u32 ufd, enum bpf_prog_type type)
-{
- struct bpf_prog *prog = __bpf_prog_get(ufd, &type, NULL);
-
- if (!IS_ERR(prog))
- trace_bpf_prog_get_type(prog);
- return prog;
-}
-EXPORT_SYMBOL_GPL(bpf_prog_get_type);
-
struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type,
- struct net_device *netdev)
+ bool attach_drv)
{
- struct bpf_prog *prog = __bpf_prog_get(ufd, &type, netdev);
+ struct bpf_prog *prog = __bpf_prog_get(ufd, &type, attach_drv);
if (!IS_ERR(prog))
trace_bpf_prog_get_type(prog);
@@ -1118,7 +1109,7 @@ struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type,
EXPORT_SYMBOL_GPL(bpf_prog_get_type_dev);
/* last field in 'union bpf_attr' used by this command */
-#define BPF_PROG_LOAD_LAST_FIELD prog_target_ifindex
+#define BPF_PROG_LOAD_LAST_FIELD prog_ifindex
static int bpf_prog_load(union bpf_attr *attr)
{
@@ -1181,7 +1172,7 @@ static int bpf_prog_load(union bpf_attr *attr)
atomic_set(&prog->aux->refcnt, 1);
prog->gpl_compatible = is_gpl ? 1 : 0;
- if (attr->prog_target_ifindex) {
+ if (attr->prog_ifindex) {
err = bpf_prog_offload_init(prog, attr);
if (err)
goto free_prog;
@@ -1625,11 +1616,6 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
return -EFAULT;
}
- if (bpf_prog_is_dev_bound(prog->aux)) {
- info.status |= BPF_PROG_STATUS_DEV_BOUND;
- info.ifindex = bpf_prog_offload_ifindex(prog);
- }
-
done:
if (copy_to_user(uinfo, &info, info_len) ||
put_user(info_len, &uattr->info.info_len))
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index dd54d20ace2f..d4593571c404 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -1384,13 +1384,15 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 regno,
if (type != expected_type)
goto err_type;
} else if (arg_type == ARG_PTR_TO_MEM ||
+ arg_type == ARG_PTR_TO_MEM_OR_NULL ||
arg_type == ARG_PTR_TO_UNINIT_MEM) {
expected_type = PTR_TO_STACK;
/* One exception here. In case function allows for NULL to be
* passed in as argument, it's a SCALAR_VALUE type. Final test
* happens during stack boundary checking.
*/
- if (register_is_null(*reg))
+ if (register_is_null(*reg) &&
+ arg_type == ARG_PTR_TO_MEM_OR_NULL)
/* final test in check_stack_boundary() */;
else if (!type_is_pkt_pointer(type) &&
type != PTR_TO_MAP_VALUE &&
@@ -3825,6 +3827,7 @@ static int do_check(struct bpf_verifier_env *env)
return err;
regs = cur_regs(env);
+ env->insn_aux_data[insn_idx].seen = true;
if (class == BPF_ALU || class == BPF_ALU64) {
err = check_alu_op(env, insn);
if (err)
@@ -4020,6 +4023,7 @@ process_bpf_exit:
return err;
insn_idx++;
+ env->insn_aux_data[insn_idx].seen = true;
} else {
verbose(env, "invalid BPF_LD mode\n");
return -EINVAL;
@@ -4202,6 +4206,7 @@ static int adjust_insn_aux_data(struct bpf_verifier_env *env, u32 prog_len,
u32 off, u32 cnt)
{
struct bpf_insn_aux_data *new_data, *old_data = env->insn_aux_data;
+ int i;
if (cnt == 1)
return 0;
@@ -4211,6 +4216,8 @@ static int adjust_insn_aux_data(struct bpf_verifier_env *env, u32 prog_len,
memcpy(new_data, old_data, sizeof(struct bpf_insn_aux_data) * off);
memcpy(new_data + off + cnt - 1, old_data + off,
sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1));
+ for (i = off; i < off + cnt - 1; i++)
+ new_data[i].seen = true;
env->insn_aux_data = new_data;
vfree(old_data);
return 0;
@@ -4229,6 +4236,25 @@ static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 of
return new_prog;
}
+/* The verifier does more data flow analysis than llvm and will not explore
+ * branches that are dead at run time. Malicious programs can have dead code
+ * too. Therefore replace all dead at-run-time code with nops.
+ */
+static void sanitize_dead_code(struct bpf_verifier_env *env)
+{
+ struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
+ struct bpf_insn nop = BPF_MOV64_REG(BPF_REG_0, BPF_REG_0);
+ struct bpf_insn *insn = env->prog->insnsi;
+ const int insn_cnt = env->prog->len;
+ int i;
+
+ for (i = 0; i < insn_cnt; i++) {
+ if (aux_data[i].seen)
+ continue;
+ memcpy(insn + i, &nop, sizeof(nop));
+ }
+}
+
/* convert load instructions that access fields of 'struct __sk_buff'
* into sequence of instructions that access fields of 'struct sk_buff'
*/
@@ -4556,6 +4582,9 @@ skip_full_check:
free_states(env);
if (ret == 0)
+ sanitize_dead_code(env);
+
+ if (ret == 0)
/* program is valid, convert *(u32*)(ctx + off) accesses */
ret = convert_ctx_accesses(env);
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 9404c631bd3f..16beab4767e1 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -6676,6 +6676,7 @@ static void perf_fill_ns_link_info(struct perf_ns_link_info *ns_link_info,
ns_inode = ns_path.dentry->d_inode;
ns_link_info->dev = new_encode_dev(ns_inode->i_sb->s_dev);
ns_link_info->ino = ns_inode->i_ino;
+ path_put(&ns_path);
}
}
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 2ff1c0c82fc9..0f922729bab9 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -1246,7 +1246,18 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
* set the trigger type must match. Also all must
* agree on ONESHOT.
*/
- unsigned int oldtype = irqd_get_trigger_type(&desc->irq_data);
+ unsigned int oldtype;
+
+ /*
+ * If nobody did set the configuration before, inherit
+ * the one provided by the requester.
+ */
+ if (irqd_trigger_type_was_set(&desc->irq_data)) {
+ oldtype = irqd_get_trigger_type(&desc->irq_data);
+ } else {
+ oldtype = new->flags & IRQF_TRIGGER_MASK;
+ irqd_set_trigger_type(&desc->irq_data, oldtype);
+ }
if (!((old->flags & new->flags) & IRQF_SHARED) ||
(oldtype != (new->flags & IRQF_TRIGGER_MASK)) ||
diff --git a/kernel/irq/matrix.c b/kernel/irq/matrix.c
index a3cbbc8191c5..7df2480005f8 100644
--- a/kernel/irq/matrix.c
+++ b/kernel/irq/matrix.c
@@ -384,7 +384,7 @@ unsigned int irq_matrix_available(struct irq_matrix *m, bool cpudown)
{
struct cpumap *cm = this_cpu_ptr(m->maps);
- return m->global_available - cpudown ? cm->available : 0;
+ return (m->global_available - cpudown) ? cm->available : 0;
}
/**
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
index 1215229d1c12..ef2a47e0eab6 100644
--- a/kernel/irq/spurious.c
+++ b/kernel/irq/spurious.c
@@ -20,7 +20,7 @@
static int irqfixup __read_mostly;
#define POLL_SPURIOUS_IRQ_INTERVAL (HZ/10)
-static void poll_spurious_irqs(unsigned long dummy);
+static void poll_spurious_irqs(struct timer_list *unused);
static DEFINE_TIMER(poll_spurious_irq_timer, poll_spurious_irqs);
static int irq_poll_cpu;
static atomic_t irq_poll_active;
@@ -143,7 +143,7 @@ out:
return ok;
}
-static void poll_spurious_irqs(unsigned long dummy)
+static void poll_spurious_irqs(struct timer_list *unused)
{
struct irq_desc *desc;
int i;
diff --git a/kernel/jump_label.c b/kernel/jump_label.c
index 8ff4ca4665ff..8594d24e4adc 100644
--- a/kernel/jump_label.c
+++ b/kernel/jump_label.c
@@ -769,7 +769,7 @@ static __init int jump_label_test(void)
return 0;
}
-late_initcall(jump_label_test);
+early_initcall(jump_label_test);
#endif /* STATIC_KEYS_SELFTEST */
#endif /* HAVE_JUMP_LABEL */
diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
index 531ffa984bc2..d5fa4116688a 100644
--- a/kernel/kallsyms.c
+++ b/kernel/kallsyms.c
@@ -614,14 +614,14 @@ static void s_stop(struct seq_file *m, void *p)
static int s_show(struct seq_file *m, void *p)
{
- unsigned long value;
+ void *value;
struct kallsym_iter *iter = m->private;
/* Some debugging symbols have no name. Ignore them. */
if (!iter->name[0])
return 0;
- value = iter->show_value ? iter->value : 0;
+ value = iter->show_value ? (void *)iter->value : NULL;
if (iter->module_name[0]) {
char type;
@@ -632,10 +632,10 @@ static int s_show(struct seq_file *m, void *p)
*/
type = iter->exported ? toupper(iter->type) :
tolower(iter->type);
- seq_printf(m, KALLSYM_FMT " %c %s\t[%s]\n", value,
+ seq_printf(m, "%px %c %s\t[%s]\n", value,
type, iter->name, iter->module_name);
} else
- seq_printf(m, KALLSYM_FMT " %c %s\n", value,
+ seq_printf(m, "%px %c %s\n", value,
iter->type, iter->name);
return 0;
}
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 8af313081b0d..cd50e99202b0 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -843,7 +843,7 @@ void __kthread_queue_delayed_work(struct kthread_worker *worker,
struct timer_list *timer = &dwork->timer;
struct kthread_work *work = &dwork->work;
- WARN_ON_ONCE(timer->function != (TIMER_FUNC_TYPE)kthread_delayed_work_timer_fn);
+ WARN_ON_ONCE(timer->function != kthread_delayed_work_timer_fn);
/*
* If @delay is 0, queue @dwork->work immediately. This is for
diff --git a/kernel/module.c b/kernel/module.c
index f0411a271765..dea01ac9cb74 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -4157,7 +4157,7 @@ static int m_show(struct seq_file *m, void *p)
{
struct module *mod = list_entry(p, struct module, list);
char buf[MODULE_FLAGS_BUF_SIZE];
- unsigned long value;
+ void *value;
/* We always ignore unformed modules. */
if (mod->state == MODULE_STATE_UNFORMED)
@@ -4173,8 +4173,8 @@ static int m_show(struct seq_file *m, void *p)
mod->state == MODULE_STATE_COMING ? "Loading" :
"Live");
/* Used by oprofile and other similar tools. */
- value = m->private ? 0 : (unsigned long)mod->core_layout.base;
- seq_printf(m, " 0x" KALLSYM_FMT, value);
+ value = m->private ? NULL : mod->core_layout.base;
+ seq_printf(m, " 0x%px", value);
/* Taints info */
if (mod->taints)
diff --git a/kernel/padata.c b/kernel/padata.c
index f262c9a4e70a..57c0074d50cc 100644
--- a/kernel/padata.c
+++ b/kernel/padata.c
@@ -288,9 +288,9 @@ static void invoke_padata_reorder(struct work_struct *work)
local_bh_enable();
}
-static void padata_reorder_timer(unsigned long arg)
+static void padata_reorder_timer(struct timer_list *t)
{
- struct parallel_data *pd = (struct parallel_data *)arg;
+ struct parallel_data *pd = from_timer(pd, t, timer);
unsigned int weight;
int target_cpu, cpu;
@@ -485,7 +485,7 @@ static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst,
padata_init_pqueues(pd);
padata_init_squeues(pd);
- setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
+ timer_setup(&pd->timer, padata_reorder_timer, 0);
atomic_set(&pd->seq_nr, -1);
atomic_set(&pd->reorder_objects, 0);
atomic_set(&pd->refcnt, 0);
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index 512f7c2baedd..5d81206a572d 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -2190,7 +2190,7 @@ again:
}
if (console_seq < log_first_seq) {
- len = sprintf(text, "** %u printk messages dropped ** ",
+ len = sprintf(text, "** %u printk messages dropped **\n",
(unsigned)(log_first_seq - console_seq));
/* messages are gone, move to first one */
diff --git a/kernel/printk/printk_safe.c b/kernel/printk/printk_safe.c
index 724d9292d4b9..3e3c2004bb23 100644
--- a/kernel/printk/printk_safe.c
+++ b/kernel/printk/printk_safe.c
@@ -72,7 +72,7 @@ static void queue_flush_work(struct printk_safe_seq_buf *s)
* have dedicated buffers, because otherwise printk-safe preempted by
* NMI-printk would have overwritten the NMI messages.
*
- * The messages are fushed from irq work (or from panic()), possibly,
+ * The messages are flushed from irq work (or from panic()), possibly,
* from other CPU, concurrently with printk_safe_log_store(). Should this
* happen, printk_safe_log_store() will notice the buffer->len mismatch
* and repeat the write.
diff --git a/kernel/time/Kconfig b/kernel/time/Kconfig
index d689a9557e17..e776fc8cc1df 100644
--- a/kernel/time/Kconfig
+++ b/kernel/time/Kconfig
@@ -21,10 +21,6 @@ config CLOCKSOURCE_VALIDATE_LAST_CYCLE
config GENERIC_TIME_VSYSCALL
bool
-# Timekeeping vsyscall support
-config GENERIC_TIME_VSYSCALL_OLD
- bool
-
# Old style timekeeping
config ARCH_USES_GETTIMEOFFSET
bool
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index 03918a19cf2d..65f9e3f24dde 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -171,7 +171,7 @@ void clocksource_mark_unstable(struct clocksource *cs)
spin_unlock_irqrestore(&watchdog_lock, flags);
}
-static void clocksource_watchdog(unsigned long data)
+static void clocksource_watchdog(struct timer_list *unused)
{
struct clocksource *cs;
u64 csnow, wdnow, cslast, wdlast, delta;
@@ -290,8 +290,7 @@ static inline void clocksource_start_watchdog(void)
{
if (watchdog_running || !watchdog || list_empty(&watchdog_list))
return;
- init_timer(&watchdog_timer);
- watchdog_timer.function = clocksource_watchdog;
+ timer_setup(&watchdog_timer, clocksource_watchdog, 0);
watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL;
add_timer_on(&watchdog_timer, cpumask_first(cpu_online_mask));
watchdog_running = 1;
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 198afa78bf69..cd03317e7b57 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -557,45 +557,6 @@ static void halt_fast_timekeeper(struct timekeeper *tk)
update_fast_timekeeper(&tkr_dummy, &tk_fast_raw);
}
-#ifdef CONFIG_GENERIC_TIME_VSYSCALL_OLD
-#warning Please contact your maintainers, as GENERIC_TIME_VSYSCALL_OLD compatibity will disappear soon.
-
-static inline void update_vsyscall(struct timekeeper *tk)
-{
- struct timespec xt, wm;
-
- xt = timespec64_to_timespec(tk_xtime(tk));
- wm = timespec64_to_timespec(tk->wall_to_monotonic);
- update_vsyscall_old(&xt, &wm, tk->tkr_mono.clock, tk->tkr_mono.mult,
- tk->tkr_mono.cycle_last);
-}
-
-static inline void old_vsyscall_fixup(struct timekeeper *tk)
-{
- s64 remainder;
-
- /*
- * Store only full nanoseconds into xtime_nsec after rounding
- * it up and add the remainder to the error difference.
- * XXX - This is necessary to avoid small 1ns inconsistnecies caused
- * by truncating the remainder in vsyscalls. However, it causes
- * additional work to be done in timekeeping_adjust(). Once
- * the vsyscall implementations are converted to use xtime_nsec
- * (shifted nanoseconds), and CONFIG_GENERIC_TIME_VSYSCALL_OLD
- * users are removed, this can be killed.
- */
- remainder = tk->tkr_mono.xtime_nsec & ((1ULL << tk->tkr_mono.shift) - 1);
- if (remainder != 0) {
- tk->tkr_mono.xtime_nsec -= remainder;
- tk->tkr_mono.xtime_nsec += 1ULL << tk->tkr_mono.shift;
- tk->ntp_error += remainder << tk->ntp_error_shift;
- tk->ntp_error -= (1ULL << tk->tkr_mono.shift) << tk->ntp_error_shift;
- }
-}
-#else
-#define old_vsyscall_fixup(tk)
-#endif
-
static RAW_NOTIFIER_HEAD(pvclock_gtod_chain);
static void update_pvclock_gtod(struct timekeeper *tk, bool was_set)
@@ -2164,12 +2125,6 @@ void update_wall_time(void)
timekeeping_adjust(tk, offset);
/*
- * XXX This can be killed once everyone converts
- * to the new update_vsyscall.
- */
- old_vsyscall_fixup(tk);
-
- /*
* Finally, make sure that after the rounding
* xtime_nsec isn't larger than NSEC_PER_SEC
*/
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index af0b8bae4502..ffebcf878fba 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -707,14 +707,18 @@ static inline void debug_timer_assert_init(struct timer_list *timer)
debug_object_assert_init(timer, &timer_debug_descr);
}
-static void do_init_timer(struct timer_list *timer, unsigned int flags,
+static void do_init_timer(struct timer_list *timer,
+ void (*func)(struct timer_list *),
+ unsigned int flags,
const char *name, struct lock_class_key *key);
-void init_timer_on_stack_key(struct timer_list *timer, unsigned int flags,
+void init_timer_on_stack_key(struct timer_list *timer,
+ void (*func)(struct timer_list *),
+ unsigned int flags,
const char *name, struct lock_class_key *key)
{
debug_object_init_on_stack(timer, &timer_debug_descr);
- do_init_timer(timer, flags, name, key);
+ do_init_timer(timer, func, flags, name, key);
}
EXPORT_SYMBOL_GPL(init_timer_on_stack_key);
@@ -755,10 +759,13 @@ static inline void debug_assert_init(struct timer_list *timer)
debug_timer_assert_init(timer);
}
-static void do_init_timer(struct timer_list *timer, unsigned int flags,
+static void do_init_timer(struct timer_list *timer,
+ void (*func)(struct timer_list *),
+ unsigned int flags,
const char *name, struct lock_class_key *key)
{
timer->entry.pprev = NULL;
+ timer->function = func;
timer->flags = flags | raw_smp_processor_id();
lockdep_init_map(&timer->lockdep_map, name, key, 0);
}
@@ -766,6 +773,7 @@ static void do_init_timer(struct timer_list *timer, unsigned int flags,
/**
* init_timer_key - initialize a timer
* @timer: the timer to be initialized
+ * @func: timer callback function
* @flags: timer flags
* @name: name of the timer
* @key: lockdep class key of the fake lock used for tracking timer
@@ -774,11 +782,12 @@ static void do_init_timer(struct timer_list *timer, unsigned int flags,
* init_timer_key() must be done to a timer prior calling *any* of the
* other timer functions.
*/
-void init_timer_key(struct timer_list *timer, unsigned int flags,
+void init_timer_key(struct timer_list *timer,
+ void (*func)(struct timer_list *), unsigned int flags,
const char *name, struct lock_class_key *key)
{
debug_init(timer);
- do_init_timer(timer, flags, name, key);
+ do_init_timer(timer, func, flags, name, key);
}
EXPORT_SYMBOL(init_timer_key);
@@ -1107,12 +1116,12 @@ EXPORT_SYMBOL(timer_reduce);
* add_timer - start a timer
* @timer: the timer to be added
*
- * The kernel will do a ->function(->data) callback from the
+ * The kernel will do a ->function(@timer) callback from the
* timer interrupt at the ->expires point in the future. The
* current time is 'jiffies'.
*
- * The timer's ->expires, ->function (and if the handler uses it, ->data)
- * fields must be set prior calling this function.
+ * The timer's ->expires, ->function fields must be set prior calling this
+ * function.
*
* Timers with an ->expires field in the past will be executed in the next
* timer tick.
@@ -1284,8 +1293,7 @@ int del_timer_sync(struct timer_list *timer)
EXPORT_SYMBOL(del_timer_sync);
#endif
-static void call_timer_fn(struct timer_list *timer, void (*fn)(unsigned long),
- unsigned long data)
+static void call_timer_fn(struct timer_list *timer, void (*fn)(struct timer_list *))
{
int count = preempt_count();
@@ -1309,7 +1317,7 @@ static void call_timer_fn(struct timer_list *timer, void (*fn)(unsigned long),
lock_map_acquire(&lockdep_map);
trace_timer_expire_entry(timer);
- fn(data);
+ fn(timer);
trace_timer_expire_exit(timer);
lock_map_release(&lockdep_map);
@@ -1331,8 +1339,7 @@ static void expire_timers(struct timer_base *base, struct hlist_head *head)
{
while (!hlist_empty(head)) {
struct timer_list *timer;
- void (*fn)(unsigned long);
- unsigned long data;
+ void (*fn)(struct timer_list *);
timer = hlist_entry(head->first, struct timer_list, entry);
@@ -1340,15 +1347,14 @@ static void expire_timers(struct timer_base *base, struct hlist_head *head)
detach_timer(timer, true);
fn = timer->function;
- data = timer->data;
if (timer->flags & TIMER_IRQSAFE) {
raw_spin_unlock(&base->lock);
- call_timer_fn(timer, fn, data);
+ call_timer_fn(timer, fn);
raw_spin_lock(&base->lock);
} else {
raw_spin_unlock_irq(&base->lock);
- call_timer_fn(timer, fn, data);
+ call_timer_fn(timer, fn);
raw_spin_lock_irq(&base->lock);
}
}
diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
index 0e7f5428a148..0ed768b56c60 100644
--- a/kernel/time/timer_list.c
+++ b/kernel/time/timer_list.c
@@ -389,7 +389,7 @@ static int __init init_timer_list_procfs(void)
{
struct proc_dir_entry *pe;
- pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
+ pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
if (!pe)
return -ENOMEM;
return 0;
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index a5580c670866..27d1f4ffa3de 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -78,16 +78,12 @@ EXPORT_SYMBOL_GPL(trace_call_bpf);
BPF_CALL_3(bpf_probe_read, void *, dst, u32, size, const void *, unsafe_ptr)
{
- int ret = 0;
-
- if (unlikely(size == 0))
- goto out;
+ int ret;
ret = probe_kernel_read(dst, unsafe_ptr, size);
if (unlikely(ret < 0))
memset(dst, 0, size);
- out:
return ret;
}
@@ -407,7 +403,7 @@ static const struct bpf_func_proto bpf_perf_event_output_proto = {
.arg2_type = ARG_CONST_MAP_PTR,
.arg3_type = ARG_ANYTHING,
.arg4_type = ARG_PTR_TO_MEM,
- .arg5_type = ARG_CONST_SIZE,
+ .arg5_type = ARG_CONST_SIZE_OR_ZERO,
};
static DEFINE_PER_CPU(struct pt_regs, bpf_pt_regs);
@@ -498,7 +494,7 @@ static const struct bpf_func_proto bpf_probe_read_str_proto = {
.gpl_only = true,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_UNINIT_MEM,
- .arg2_type = ARG_CONST_SIZE,
+ .arg2_type = ARG_CONST_SIZE_OR_ZERO,
.arg3_type = ARG_ANYTHING,
};
@@ -609,7 +605,7 @@ static const struct bpf_func_proto bpf_perf_event_output_proto_tp = {
.arg2_type = ARG_CONST_MAP_PTR,
.arg3_type = ARG_ANYTHING,
.arg4_type = ARG_PTR_TO_MEM,
- .arg5_type = ARG_CONST_SIZE,
+ .arg5_type = ARG_CONST_SIZE_OR_ZERO,
};
BPF_CALL_3(bpf_get_stackid_tp, void *, tp_buff, struct bpf_map *, map,
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index dde6298f6b22..8fdb710bfdd7 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -1509,7 +1509,7 @@ static void __queue_delayed_work(int cpu, struct workqueue_struct *wq,
struct work_struct *work = &dwork->work;
WARN_ON_ONCE(!wq);
- WARN_ON_ONCE(timer->function != (TIMER_FUNC_TYPE)delayed_work_timer_fn);
+ WARN_ON_ONCE(timer->function != delayed_work_timer_fn);
WARN_ON_ONCE(timer_pending(timer));
WARN_ON_ONCE(!list_empty(&work->entry));
diff --git a/lib/Kconfig b/lib/Kconfig
index 368972f0db78..c5e84fbcb30b 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -46,10 +46,6 @@ config GENERIC_IOMAP
bool
select GENERIC_PCI_IOMAP
-config GENERIC_IO
- bool
- default n
-
config STMP_DEVICE
bool
diff --git a/lib/random32.c b/lib/random32.c
index 65cc018fef40..4aaa76404d56 100644
--- a/lib/random32.c
+++ b/lib/random32.c
@@ -213,11 +213,11 @@ static int __init prandom_init(void)
}
core_initcall(prandom_init);
-static void __prandom_timer(unsigned long dontcare);
+static void __prandom_timer(struct timer_list *unused);
static DEFINE_TIMER(seed_timer, __prandom_timer);
-static void __prandom_timer(unsigned long dontcare)
+static void __prandom_timer(struct timer_list *unused)
{
u32 entropy;
unsigned long expires;
diff --git a/lib/test_printf.c b/lib/test_printf.c
index 563f10e6876a..71ebfa43ad05 100644
--- a/lib/test_printf.c
+++ b/lib/test_printf.c
@@ -24,24 +24,6 @@
#define PAD_SIZE 16
#define FILL_CHAR '$'
-#define PTR1 ((void*)0x01234567)
-#define PTR2 ((void*)(long)(int)0xfedcba98)
-
-#if BITS_PER_LONG == 64
-#define PTR1_ZEROES "000000000"
-#define PTR1_SPACES " "
-#define PTR1_STR "1234567"
-#define PTR2_STR "fffffffffedcba98"
-#define PTR_WIDTH 16
-#else
-#define PTR1_ZEROES "0"
-#define PTR1_SPACES " "
-#define PTR1_STR "1234567"
-#define PTR2_STR "fedcba98"
-#define PTR_WIDTH 8
-#endif
-#define PTR_WIDTH_STR stringify(PTR_WIDTH)
-
static unsigned total_tests __initdata;
static unsigned failed_tests __initdata;
static char *test_buffer __initdata;
@@ -217,30 +199,79 @@ test_string(void)
test("a | | ", "%-3.s|%-3.0s|%-3.*s", "a", "b", 0, "c");
}
+#define PLAIN_BUF_SIZE 64 /* leave some space so we don't oops */
+
+#if BITS_PER_LONG == 64
+
+#define PTR_WIDTH 16
+#define PTR ((void *)0xffff0123456789ab)
+#define PTR_STR "ffff0123456789ab"
+#define ZEROS "00000000" /* hex 32 zero bits */
+
+static int __init
+plain_format(void)
+{
+ char buf[PLAIN_BUF_SIZE];
+ int nchars;
+
+ nchars = snprintf(buf, PLAIN_BUF_SIZE, "%p", PTR);
+
+ if (nchars != PTR_WIDTH || strncmp(buf, ZEROS, strlen(ZEROS)) != 0)
+ return -1;
+
+ return 0;
+}
+
+#else
+
+#define PTR_WIDTH 8
+#define PTR ((void *)0x456789ab)
+#define PTR_STR "456789ab"
+
+static int __init
+plain_format(void)
+{
+ /* Format is implicitly tested for 32 bit machines by plain_hash() */
+ return 0;
+}
+
+#endif /* BITS_PER_LONG == 64 */
+
+static int __init
+plain_hash(void)
+{
+ char buf[PLAIN_BUF_SIZE];
+ int nchars;
+
+ nchars = snprintf(buf, PLAIN_BUF_SIZE, "%p", PTR);
+
+ if (nchars != PTR_WIDTH || strncmp(buf, PTR_STR, PTR_WIDTH) == 0)
+ return -1;
+
+ return 0;
+}
+
+/*
+ * We can't use test() to test %p because we don't know what output to expect
+ * after an address is hashed.
+ */
static void __init
plain(void)
{
- test(PTR1_ZEROES PTR1_STR " " PTR2_STR, "%p %p", PTR1, PTR2);
- /*
- * The field width is overloaded for some %p extensions to
- * pass another piece of information. For plain pointers, the
- * behaviour is slightly odd: One cannot pass either the 0
- * flag nor a precision to %p without gcc complaining, and if
- * one explicitly gives a field width, the number is no longer
- * zero-padded.
- */
- test("|" PTR1_STR PTR1_SPACES " | " PTR1_SPACES PTR1_STR "|",
- "|%-*p|%*p|", PTR_WIDTH+2, PTR1, PTR_WIDTH+2, PTR1);
- test("|" PTR2_STR " | " PTR2_STR "|",
- "|%-*p|%*p|", PTR_WIDTH+2, PTR2, PTR_WIDTH+2, PTR2);
+ int err;
- /*
- * Unrecognized %p extensions are treated as plain %p, but the
- * alphanumeric suffix is ignored (that is, does not occur in
- * the output.)
- */
- test("|"PTR1_ZEROES PTR1_STR"|", "|%p0y|", PTR1);
- test("|"PTR2_STR"|", "|%p0y|", PTR2);
+ err = plain_hash();
+ if (err) {
+ pr_warn("plain 'p' does not appear to be hashed\n");
+ failed_tests++;
+ return;
+ }
+
+ err = plain_format();
+ if (err) {
+ pr_warn("hashing plain 'p' has unexpected format\n");
+ failed_tests++;
+ }
}
static void __init
@@ -251,6 +282,7 @@ symbol_ptr(void)
static void __init
kernel_ptr(void)
{
+ /* We can't test this without access to kptr_restrict. */
}
static void __init
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 1746bae94d41..01c3957b2de6 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -33,6 +33,8 @@
#include <linux/uuid.h>
#include <linux/of.h>
#include <net/addrconf.h>
+#include <linux/siphash.h>
+#include <linux/compiler.h>
#ifdef CONFIG_BLOCK
#include <linux/blkdev.h>
#endif
@@ -1343,6 +1345,59 @@ char *uuid_string(char *buf, char *end, const u8 *addr,
return string(buf, end, uuid, spec);
}
+int kptr_restrict __read_mostly;
+
+static noinline_for_stack
+char *restricted_pointer(char *buf, char *end, const void *ptr,
+ struct printf_spec spec)
+{
+ spec.base = 16;
+ spec.flags |= SMALL;
+ if (spec.field_width == -1) {
+ spec.field_width = 2 * sizeof(ptr);
+ spec.flags |= ZEROPAD;
+ }
+
+ switch (kptr_restrict) {
+ case 0:
+ /* Always print %pK values */
+ break;
+ case 1: {
+ const struct cred *cred;
+
+ /*
+ * kptr_restrict==1 cannot be used in IRQ context
+ * because its test for CAP_SYSLOG would be meaningless.
+ */
+ if (in_irq() || in_serving_softirq() || in_nmi())
+ return string(buf, end, "pK-error", spec);
+
+ /*
+ * Only print the real pointer value if the current
+ * process has CAP_SYSLOG and is running with the
+ * same credentials it started with. This is because
+ * access to files is checked at open() time, but %pK
+ * checks permission at read() time. We don't want to
+ * leak pointer values if a binary opens a file using
+ * %pK and then elevates privileges before reading it.
+ */
+ cred = current_cred();
+ if (!has_capability_noaudit(current, CAP_SYSLOG) ||
+ !uid_eq(cred->euid, cred->uid) ||
+ !gid_eq(cred->egid, cred->gid))
+ ptr = NULL;
+ break;
+ }
+ case 2:
+ default:
+ /* Always print 0's for %pK */
+ ptr = NULL;
+ break;
+ }
+
+ return number(buf, end, (unsigned long)ptr, spec);
+}
+
static noinline_for_stack
char *netdev_bits(char *buf, char *end, const void *addr, const char *fmt)
{
@@ -1591,7 +1646,86 @@ char *device_node_string(char *buf, char *end, struct device_node *dn,
return widen_string(buf, buf - buf_start, end, spec);
}
-int kptr_restrict __read_mostly;
+static noinline_for_stack
+char *pointer_string(char *buf, char *end, const void *ptr,
+ struct printf_spec spec)
+{
+ spec.base = 16;
+ spec.flags |= SMALL;
+ if (spec.field_width == -1) {
+ spec.field_width = 2 * sizeof(ptr);
+ spec.flags |= ZEROPAD;
+ }
+
+ return number(buf, end, (unsigned long int)ptr, spec);
+}
+
+static bool have_filled_random_ptr_key __read_mostly;
+static siphash_key_t ptr_key __read_mostly;
+
+static void fill_random_ptr_key(struct random_ready_callback *unused)
+{
+ get_random_bytes(&ptr_key, sizeof(ptr_key));
+ /*
+ * have_filled_random_ptr_key==true is dependent on get_random_bytes().
+ * ptr_to_id() needs to see have_filled_random_ptr_key==true
+ * after get_random_bytes() returns.
+ */
+ smp_mb();
+ WRITE_ONCE(have_filled_random_ptr_key, true);
+}
+
+static struct random_ready_callback random_ready = {
+ .func = fill_random_ptr_key
+};
+
+static int __init initialize_ptr_random(void)
+{
+ int ret = add_random_ready_callback(&random_ready);
+
+ if (!ret) {
+ return 0;
+ } else if (ret == -EALREADY) {
+ fill_random_ptr_key(&random_ready);
+ return 0;
+ }
+
+ return ret;
+}
+early_initcall(initialize_ptr_random);
+
+/* Maps a pointer to a 32 bit unique identifier. */
+static char *ptr_to_id(char *buf, char *end, void *ptr, struct printf_spec spec)
+{
+ unsigned long hashval;
+ const int default_width = 2 * sizeof(ptr);
+
+ if (unlikely(!have_filled_random_ptr_key)) {
+ spec.field_width = default_width;
+ /* string length must be less than default_width */
+ return string(buf, end, "(ptrval)", spec);
+ }
+
+#ifdef CONFIG_64BIT
+ hashval = (unsigned long)siphash_1u64((u64)ptr, &ptr_key);
+ /*
+ * Mask off the first 32 bits, this makes explicit that we have
+ * modified the address (and 32 bits is plenty for a unique ID).
+ */
+ hashval = hashval & 0xffffffff;
+#else
+ hashval = (unsigned long)siphash_1u32((u32)ptr, &ptr_key);
+#endif
+
+ spec.flags |= SMALL;
+ if (spec.field_width == -1) {
+ spec.field_width = default_width;
+ spec.flags |= ZEROPAD;
+ }
+ spec.base = 16;
+
+ return number(buf, end, hashval, spec);
+}
/*
* Show a '%p' thing. A kernel extension is that the '%p' is followed
@@ -1698,11 +1832,16 @@ int kptr_restrict __read_mostly;
* c major compatible string
* C full compatible string
*
+ * - 'x' For printing the address. Equivalent to "%lx".
+ *
* ** Please update also Documentation/printk-formats.txt when making changes **
*
* Note: The difference between 'S' and 'F' is that on ia64 and ppc64
* function pointers are really function descriptors, which contain a
* pointer to the real address.
+ *
+ * Note: The default behaviour (unadorned %p) is to hash the address,
+ * rendering it useful as a unique identifier.
*/
static noinline_for_stack
char *pointer(const char *fmt, char *buf, char *end, void *ptr,
@@ -1792,47 +1931,9 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
return buf;
}
case 'K':
- switch (kptr_restrict) {
- case 0:
- /* Always print %pK values */
- break;
- case 1: {
- const struct cred *cred;
-
- /*
- * kptr_restrict==1 cannot be used in IRQ context
- * because its test for CAP_SYSLOG would be meaningless.
- */
- if (in_irq() || in_serving_softirq() || in_nmi()) {
- if (spec.field_width == -1)
- spec.field_width = default_width;
- return string(buf, end, "pK-error", spec);
- }
-
- /*
- * Only print the real pointer value if the current
- * process has CAP_SYSLOG and is running with the
- * same credentials it started with. This is because
- * access to files is checked at open() time, but %pK
- * checks permission at read() time. We don't want to
- * leak pointer values if a binary opens a file using
- * %pK and then elevates privileges before reading it.
- */
- cred = current_cred();
- if (!has_capability_noaudit(current, CAP_SYSLOG) ||
- !uid_eq(cred->euid, cred->uid) ||
- !gid_eq(cred->egid, cred->gid))
- ptr = NULL;
- break;
- }
- case 2:
- default:
- /* Always print 0's for %pK */
- ptr = NULL;
+ if (!kptr_restrict)
break;
- }
- break;
-
+ return restricted_pointer(buf, end, ptr, spec);
case 'N':
return netdev_bits(buf, end, ptr, fmt);
case 'a':
@@ -1857,15 +1958,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
case 'F':
return device_node_string(buf, end, ptr, spec, fmt + 1);
}
+ case 'x':
+ return pointer_string(buf, end, ptr, spec);
}
- spec.flags |= SMALL;
- if (spec.field_width == -1) {
- spec.field_width = default_width;
- spec.flags |= ZEROPAD;
- }
- spec.base = 16;
- return number(buf, end, (unsigned long) ptr, spec);
+ /* default is to _not_ leak addresses, hash before printing */
+ return ptr_to_id(buf, end, ptr, spec);
}
/*
diff --git a/mm/frame_vector.c b/mm/frame_vector.c
index 2f98df0d460e..297c7238f7d4 100644
--- a/mm/frame_vector.c
+++ b/mm/frame_vector.c
@@ -53,6 +53,18 @@ int get_vaddr_frames(unsigned long start, unsigned int nr_frames,
ret = -EFAULT;
goto out;
}
+
+ /*
+ * While get_vaddr_frames() could be used for transient (kernel
+ * controlled lifetime) pinning of memory pages all current
+ * users establish long term (userspace controlled lifetime)
+ * page pinning. Treat get_vaddr_frames() like
+ * get_user_pages_longterm() and disallow it for filesystem-dax
+ * mappings.
+ */
+ if (vma_is_fsdax(vma))
+ return -EOPNOTSUPP;
+
if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) {
vec->got_ref = true;
vec->is_pfns = false;
diff --git a/mm/gup.c b/mm/gup.c
index dfcde13f289a..d3fb60e5bfac 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -66,7 +66,7 @@ static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address,
*/
static inline bool can_follow_write_pte(pte_t pte, unsigned int flags)
{
- return pte_write(pte) ||
+ return pte_access_permitted(pte, WRITE) ||
((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte));
}
@@ -1095,6 +1095,70 @@ long get_user_pages(unsigned long start, unsigned long nr_pages,
}
EXPORT_SYMBOL(get_user_pages);
+#ifdef CONFIG_FS_DAX
+/*
+ * This is the same as get_user_pages() in that it assumes we are
+ * operating on the current task's mm, but it goes further to validate
+ * that the vmas associated with the address range are suitable for
+ * longterm elevated page reference counts. For example, filesystem-dax
+ * mappings are subject to the lifetime enforced by the filesystem and
+ * we need guarantees that longterm users like RDMA and V4L2 only
+ * establish mappings that have a kernel enforced revocation mechanism.
+ *
+ * "longterm" == userspace controlled elevated page count lifetime.
+ * Contrast this to iov_iter_get_pages() usages which are transient.
+ */
+long get_user_pages_longterm(unsigned long start, unsigned long nr_pages,
+ unsigned int gup_flags, struct page **pages,
+ struct vm_area_struct **vmas_arg)
+{
+ struct vm_area_struct **vmas = vmas_arg;
+ struct vm_area_struct *vma_prev = NULL;
+ long rc, i;
+
+ if (!pages)
+ return -EINVAL;
+
+ if (!vmas) {
+ vmas = kcalloc(nr_pages, sizeof(struct vm_area_struct *),
+ GFP_KERNEL);
+ if (!vmas)
+ return -ENOMEM;
+ }
+
+ rc = get_user_pages(start, nr_pages, gup_flags, pages, vmas);
+
+ for (i = 0; i < rc; i++) {
+ struct vm_area_struct *vma = vmas[i];
+
+ if (vma == vma_prev)
+ continue;
+
+ vma_prev = vma;
+
+ if (vma_is_fsdax(vma))
+ break;
+ }
+
+ /*
+ * Either get_user_pages() failed, or the vma validation
+ * succeeded, in either case we don't need to put_page() before
+ * returning.
+ */
+ if (i >= rc)
+ goto out;
+
+ for (i = 0; i < rc; i++)
+ put_page(pages[i]);
+ rc = -EOPNOTSUPP;
+out:
+ if (vmas != vmas_arg)
+ kfree(vmas);
+ return rc;
+}
+EXPORT_SYMBOL(get_user_pages_longterm);
+#endif /* CONFIG_FS_DAX */
+
/**
* populate_vma_page_range() - populate a range of pages in the vma.
* @vma: target vma
diff --git a/mm/hmm.c b/mm/hmm.c
index ea19742a5d60..3a5c172af560 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -391,11 +391,11 @@ again:
if (pmd_protnone(pmd))
return hmm_vma_walk_clear(start, end, walk);
- if (write_fault && !pmd_write(pmd))
+ if (!pmd_access_permitted(pmd, write_fault))
return hmm_vma_walk_clear(start, end, walk);
pfn = pmd_pfn(pmd) + pte_index(addr);
- flag |= pmd_write(pmd) ? HMM_PFN_WRITE : 0;
+ flag |= pmd_access_permitted(pmd, WRITE) ? HMM_PFN_WRITE : 0;
for (; addr < end; addr += PAGE_SIZE, i++, pfn++)
pfns[i] = hmm_pfn_t_from_pfn(pfn) | flag;
return 0;
@@ -456,11 +456,11 @@ again:
continue;
}
- if (write_fault && !pte_write(pte))
+ if (!pte_access_permitted(pte, write_fault))
goto fault;
pfns[i] = hmm_pfn_t_from_pfn(pte_pfn(pte)) | flag;
- pfns[i] |= pte_write(pte) ? HMM_PFN_WRITE : 0;
+ pfns[i] |= pte_access_permitted(pte, WRITE) ? HMM_PFN_WRITE : 0;
continue;
fault:
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 86fe697e8bfb..2f2f5e774902 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -842,20 +842,15 @@ EXPORT_SYMBOL_GPL(vmf_insert_pfn_pud);
#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
static void touch_pmd(struct vm_area_struct *vma, unsigned long addr,
- pmd_t *pmd)
+ pmd_t *pmd, int flags)
{
pmd_t _pmd;
- /*
- * We should set the dirty bit only for FOLL_WRITE but for now
- * the dirty bit in the pmd is meaningless. And if the dirty
- * bit will become meaningful and we'll only set it with
- * FOLL_WRITE, an atomic set_bit will be required on the pmd to
- * set the young bit, instead of the current set_pmd_at.
- */
- _pmd = pmd_mkyoung(pmd_mkdirty(*pmd));
+ _pmd = pmd_mkyoung(*pmd);
+ if (flags & FOLL_WRITE)
+ _pmd = pmd_mkdirty(_pmd);
if (pmdp_set_access_flags(vma, addr & HPAGE_PMD_MASK,
- pmd, _pmd, 1))
+ pmd, _pmd, flags & FOLL_WRITE))
update_mmu_cache_pmd(vma, addr, pmd);
}
@@ -875,7 +870,7 @@ struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
*/
WARN_ONCE(flags & FOLL_COW, "mm: In follow_devmap_pmd with FOLL_COW set");
- if (flags & FOLL_WRITE && !pmd_write(*pmd))
+ if (!pmd_access_permitted(*pmd, flags & FOLL_WRITE))
return NULL;
if (pmd_present(*pmd) && pmd_devmap(*pmd))
@@ -884,7 +879,7 @@ struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
return NULL;
if (flags & FOLL_TOUCH)
- touch_pmd(vma, addr, pmd);
+ touch_pmd(vma, addr, pmd, flags);
/*
* device mapped pages can only be returned if the
@@ -995,20 +990,15 @@ out:
#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
static void touch_pud(struct vm_area_struct *vma, unsigned long addr,
- pud_t *pud)
+ pud_t *pud, int flags)
{
pud_t _pud;
- /*
- * We should set the dirty bit only for FOLL_WRITE but for now
- * the dirty bit in the pud is meaningless. And if the dirty
- * bit will become meaningful and we'll only set it with
- * FOLL_WRITE, an atomic set_bit will be required on the pud to
- * set the young bit, instead of the current set_pud_at.
- */
- _pud = pud_mkyoung(pud_mkdirty(*pud));
+ _pud = pud_mkyoung(*pud);
+ if (flags & FOLL_WRITE)
+ _pud = pud_mkdirty(_pud);
if (pudp_set_access_flags(vma, addr & HPAGE_PUD_MASK,
- pud, _pud, 1))
+ pud, _pud, flags & FOLL_WRITE))
update_mmu_cache_pud(vma, addr, pud);
}
@@ -1022,7 +1012,7 @@ struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
assert_spin_locked(pud_lockptr(mm, pud));
- if (flags & FOLL_WRITE && !pud_write(*pud))
+ if (!pud_access_permitted(*pud, flags & FOLL_WRITE))
return NULL;
if (pud_present(*pud) && pud_devmap(*pud))
@@ -1031,7 +1021,7 @@ struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
return NULL;
if (flags & FOLL_TOUCH)
- touch_pud(vma, addr, pud);
+ touch_pud(vma, addr, pud, flags);
/*
* device mapped pages can only be returned if the
@@ -1396,7 +1386,7 @@ out_unlock:
*/
static inline bool can_follow_write_pmd(pmd_t pmd, unsigned int flags)
{
- return pmd_write(pmd) ||
+ return pmd_access_permitted(pmd, WRITE) ||
((flags & FOLL_FORCE) && (flags & FOLL_COW) && pmd_dirty(pmd));
}
@@ -1424,7 +1414,7 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
page = pmd_page(*pmd);
VM_BUG_ON_PAGE(!PageHead(page) && !is_zone_device_page(page), page);
if (flags & FOLL_TOUCH)
- touch_pmd(vma, addr, pmd);
+ touch_pmd(vma, addr, pmd, flags);
if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {
/*
* We don't mlock() pte-mapped THPs. This way we can avoid
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 681b300185c0..9a334f5fb730 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -3125,6 +3125,13 @@ static void hugetlb_vm_op_close(struct vm_area_struct *vma)
}
}
+static int hugetlb_vm_op_split(struct vm_area_struct *vma, unsigned long addr)
+{
+ if (addr & ~(huge_page_mask(hstate_vma(vma))))
+ return -EINVAL;
+ return 0;
+}
+
/*
* We cannot handle pagefaults against hugetlb pages at all. They cause
* handle_mm_fault() to try to instantiate regular-sized pages in the
@@ -3141,6 +3148,7 @@ const struct vm_operations_struct hugetlb_vm_ops = {
.fault = hugetlb_vm_op_fault,
.open = hugetlb_vm_op_open,
.close = hugetlb_vm_op_close,
+ .split = hugetlb_vm_op_split,
};
static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
@@ -4627,7 +4635,9 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
pte_t *pte = NULL;
pgd = pgd_offset(mm, addr);
- p4d = p4d_offset(pgd, addr);
+ p4d = p4d_alloc(mm, pgd, addr);
+ if (!p4d)
+ return NULL;
pud = pud_alloc(mm, p4d, addr);
if (pud) {
if (sz == PUD_SIZE) {
diff --git a/mm/kasan/report.c b/mm/kasan/report.c
index 6bcfb01ba038..410c8235e671 100644
--- a/mm/kasan/report.c
+++ b/mm/kasan/report.c
@@ -134,7 +134,7 @@ static void print_error_description(struct kasan_access_info *info)
pr_err("BUG: KASAN: %s in %pS\n",
bug_type, (void *)info->ip);
- pr_err("%s of size %zu at addr %p by task %s/%d\n",
+ pr_err("%s of size %zu at addr %px by task %s/%d\n",
info->is_write ? "Write" : "Read", info->access_size,
info->access_addr, current->comm, task_pid_nr(current));
}
@@ -206,7 +206,7 @@ static void describe_object_addr(struct kmem_cache *cache, void *object,
const char *rel_type;
int rel_bytes;
- pr_err("The buggy address belongs to the object at %p\n"
+ pr_err("The buggy address belongs to the object at %px\n"
" which belongs to the cache %s of size %d\n",
object, cache->name, cache->object_size);
@@ -225,7 +225,7 @@ static void describe_object_addr(struct kmem_cache *cache, void *object,
}
pr_err("The buggy address is located %d bytes %s of\n"
- " %d-byte region [%p, %p)\n",
+ " %d-byte region [%px, %px)\n",
rel_bytes, rel_type, cache->object_size, (void *)object_addr,
(void *)(object_addr + cache->object_size));
}
@@ -302,7 +302,7 @@ static void print_shadow_for_address(const void *addr)
char shadow_buf[SHADOW_BYTES_PER_ROW];
snprintf(buffer, sizeof(buffer),
- (i == 0) ? ">%p: " : " %p: ", kaddr);
+ (i == 0) ? ">%px: " : " %px: ", kaddr);
/*
* We should not pass a shadow pointer to generic
* function, because generic functions may try to
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index e4738d5e9b8c..3d4781756d50 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -1523,6 +1523,8 @@ static void kmemleak_scan(void)
if (page_count(page) == 0)
continue;
scan_block(page, page + 1, NULL);
+ if (!(pfn % (MAX_SCAN_SIZE / sizeof(*page))))
+ cond_resched();
}
}
put_online_mems();
diff --git a/mm/madvise.c b/mm/madvise.c
index 375cf32087e4..751e97aa2210 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -276,15 +276,14 @@ static long madvise_willneed(struct vm_area_struct *vma,
{
struct file *file = vma->vm_file;
+ *prev = vma;
#ifdef CONFIG_SWAP
if (!file) {
- *prev = vma;
force_swapin_readahead(vma, start, end);
return 0;
}
if (shmem_mapping(file->f_mapping)) {
- *prev = vma;
force_shm_swapin_readahead(vma, start, end,
file->f_mapping);
return 0;
@@ -299,7 +298,6 @@ static long madvise_willneed(struct vm_area_struct *vma,
return 0;
}
- *prev = vma;
start = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
if (end > vma->vm_end)
end = vma->vm_end;
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 50e6906314f8..ac2ffd5e02b9 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -6044,7 +6044,7 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
memcg_check_events(memcg, page);
if (!mem_cgroup_is_root(memcg))
- css_put(&memcg->css);
+ css_put_many(&memcg->css, nr_entries);
}
/**
diff --git a/mm/memory.c b/mm/memory.c
index 85e7a87da79f..5eb3d2524bdc 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3948,7 +3948,7 @@ static int handle_pte_fault(struct vm_fault *vmf)
if (unlikely(!pte_same(*vmf->pte, entry)))
goto unlock;
if (vmf->flags & FAULT_FLAG_WRITE) {
- if (!pte_write(entry))
+ if (!pte_access_permitted(entry, WRITE))
return do_wp_page(vmf);
entry = pte_mkdirty(entry);
}
@@ -4013,7 +4013,7 @@ static int __handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
/* NUMA case for anonymous PUDs would go here */
- if (dirty && !pud_write(orig_pud)) {
+ if (dirty && !pud_access_permitted(orig_pud, WRITE)) {
ret = wp_huge_pud(&vmf, orig_pud);
if (!(ret & VM_FAULT_FALLBACK))
return ret;
@@ -4046,7 +4046,7 @@ static int __handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
if (pmd_protnone(orig_pmd) && vma_is_accessible(vma))
return do_huge_pmd_numa_page(&vmf, orig_pmd);
- if (dirty && !pmd_write(orig_pmd)) {
+ if (dirty && !pmd_access_permitted(orig_pmd, WRITE)) {
ret = wp_huge_pmd(&vmf, orig_pmd);
if (!(ret & VM_FAULT_FALLBACK))
return ret;
@@ -4336,7 +4336,7 @@ int follow_phys(struct vm_area_struct *vma,
goto out;
pte = *ptep;
- if ((flags & FOLL_WRITE) && !pte_write(pte))
+ if (!pte_access_permitted(pte, flags & FOLL_WRITE))
goto unlock;
*prot = pgprot_val(pte_pgprot(pte));
diff --git a/mm/mmap.c b/mm/mmap.c
index 924839fac0e6..a4d546821214 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2555,9 +2555,11 @@ int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
struct vm_area_struct *new;
int err;
- if (is_vm_hugetlb_page(vma) && (addr &
- ~(huge_page_mask(hstate_vma(vma)))))
- return -EINVAL;
+ if (vma->vm_ops && vma->vm_ops->split) {
+ err = vma->vm_ops->split(vma, addr);
+ if (err)
+ return err;
+ }
new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
if (!new)
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index c86fbd1b590e..c957be32b27a 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -550,7 +550,6 @@ static bool __oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm)
*/
set_bit(MMF_UNSTABLE, &mm->flags);
- tlb_gather_mmu(&tlb, mm, 0, -1);
for (vma = mm->mmap ; vma; vma = vma->vm_next) {
if (!can_madv_dontneed_vma(vma))
continue;
@@ -565,11 +564,13 @@ static bool __oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm)
* we do not want to block exit_mmap by keeping mm ref
* count elevated without a good reason.
*/
- if (vma_is_anonymous(vma) || !(vma->vm_flags & VM_SHARED))
+ if (vma_is_anonymous(vma) || !(vma->vm_flags & VM_SHARED)) {
+ tlb_gather_mmu(&tlb, mm, vma->vm_start, vma->vm_end);
unmap_page_range(&tlb, vma, vma->vm_start, vma->vm_end,
NULL);
+ tlb_finish_mmu(&tlb, vma->vm_start, vma->vm_end);
+ }
}
- tlb_finish_mmu(&tlb, 0, -1);
pr_info("oom_reaper: reaped process %d (%s), now anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n",
task_pid_nr(tsk), tsk->comm,
K(get_mm_counter(mm, MM_ANONPAGES)),
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 8a1551154285..586f31261c83 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -433,11 +433,8 @@ static void domain_dirty_limits(struct dirty_throttle_control *dtc)
else
bg_thresh = (bg_ratio * available_memory) / PAGE_SIZE;
- if (unlikely(bg_thresh >= thresh)) {
- pr_warn("vm direct limit must be set greater than background limit.\n");
+ if (bg_thresh >= thresh)
bg_thresh = thresh / 2;
- }
-
tsk = current;
if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk)) {
bg_thresh += bg_thresh / 4 + global_wb_domain.dirty_limit / 32;
@@ -1993,11 +1990,12 @@ int dirty_writeback_centisecs_handler(struct ctl_table *table, int write,
}
#ifdef CONFIG_BLOCK
-void laptop_mode_timer_fn(unsigned long data)
+void laptop_mode_timer_fn(struct timer_list *t)
{
- struct request_queue *q = (struct request_queue *)data;
+ struct backing_dev_info *backing_dev_info =
+ from_timer(backing_dev_info, t, laptop_mode_wb_timer);
- wakeup_flusher_threads_bdi(q->backing_dev_info, WB_REASON_LAPTOP_TIMER);
+ wakeup_flusher_threads_bdi(backing_dev_info, WB_REASON_LAPTOP_TIMER);
}
/*
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index d4096f4a5c1f..73f5d4556b3d 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2507,10 +2507,6 @@ void drain_all_pages(struct zone *zone)
if (WARN_ON_ONCE(!mm_percpu_wq))
return;
- /* Workqueues cannot recurse */
- if (current->flags & PF_WQ_WORKER)
- return;
-
/*
* Do not drain if one is already in progress unless it's specific to
* a zone. Such callers are primarily CMA and memory hotplug and need
@@ -7656,11 +7652,18 @@ int alloc_contig_range(unsigned long start, unsigned long end,
/*
* In case of -EBUSY, we'd like to know which page causes problem.
- * So, just fall through. We will check it in test_pages_isolated().
+ * So, just fall through. test_pages_isolated() has a tracepoint
+ * which will report the busy page.
+ *
+ * It is possible that busy pages could become available before
+ * the call to test_pages_isolated, and the range will actually be
+ * allocated. So, if we fall through be sure to clear ret so that
+ * -EBUSY is not accidentally used or returned to caller.
*/
ret = __alloc_contig_migrate_range(&cc, start, end);
if (ret && ret != -EBUSY)
goto done;
+ ret =0;
/*
* Pages from [start, end) are within a MAX_ORDER_NR_PAGES
diff --git a/mm/shmem.c b/mm/shmem.c
index 4aa9307feab0..7fbe67be86fa 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -3776,7 +3776,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
* tmpfs instance, limiting inodes to one per page of lowmem;
* but the internal instance is left unlimited.
*/
- if (!(sb->s_flags & MS_KERNMOUNT)) {
+ if (!(sb->s_flags & SB_KERNMOUNT)) {
sbinfo->max_blocks = shmem_default_max_blocks();
sbinfo->max_inodes = shmem_default_max_inodes();
if (shmem_parse_options(data, sbinfo, false)) {
@@ -3784,12 +3784,12 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
goto failed;
}
} else {
- sb->s_flags |= MS_NOUSER;
+ sb->s_flags |= SB_NOUSER;
}
sb->s_export_op = &shmem_export_ops;
- sb->s_flags |= MS_NOSEC;
+ sb->s_flags |= SB_NOSEC;
#else
- sb->s_flags |= MS_NOUSER;
+ sb->s_flags |= SB_NOUSER;
#endif
spin_lock_init(&sbinfo->stat_lock);
@@ -3809,7 +3809,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
sb->s_xattr = shmem_xattr_handlers;
#endif
#ifdef CONFIG_TMPFS_POSIX_ACL
- sb->s_flags |= MS_POSIXACL;
+ sb->s_flags |= SB_POSIXACL;
#endif
uuid_gen(&sb->s_uuid);
diff --git a/net/802/garp.c b/net/802/garp.c
index 2dac647ff420..7f50d47470bd 100644
--- a/net/802/garp.c
+++ b/net/802/garp.c
@@ -401,9 +401,9 @@ static void garp_join_timer_arm(struct garp_applicant *app)
mod_timer(&app->join_timer, jiffies + delay);
}
-static void garp_join_timer(unsigned long data)
+static void garp_join_timer(struct timer_list *t)
{
- struct garp_applicant *app = (struct garp_applicant *)data;
+ struct garp_applicant *app = from_timer(app, t, join_timer);
spin_lock(&app->lock);
garp_gid_event(app, GARP_EVENT_TRANSMIT_PDU);
@@ -584,7 +584,7 @@ int garp_init_applicant(struct net_device *dev, struct garp_application *appl)
spin_lock_init(&app->lock);
skb_queue_head_init(&app->queue);
rcu_assign_pointer(dev->garp_port->applicants[appl->type], app);
- setup_timer(&app->join_timer, garp_join_timer, (unsigned long)app);
+ timer_setup(&app->join_timer, garp_join_timer, 0);
garp_join_timer_arm(app);
return 0;
diff --git a/net/802/mrp.c b/net/802/mrp.c
index be4dd3165347..a808dd5bbb27 100644
--- a/net/802/mrp.c
+++ b/net/802/mrp.c
@@ -586,9 +586,9 @@ static void mrp_join_timer_arm(struct mrp_applicant *app)
mod_timer(&app->join_timer, jiffies + delay);
}
-static void mrp_join_timer(unsigned long data)
+static void mrp_join_timer(struct timer_list *t)
{
- struct mrp_applicant *app = (struct mrp_applicant *)data;
+ struct mrp_applicant *app = from_timer(app, t, join_timer);
spin_lock(&app->lock);
mrp_mad_event(app, MRP_EVENT_TX);
@@ -605,9 +605,9 @@ static void mrp_periodic_timer_arm(struct mrp_applicant *app)
jiffies + msecs_to_jiffies(mrp_periodic_time));
}
-static void mrp_periodic_timer(unsigned long data)
+static void mrp_periodic_timer(struct timer_list *t)
{
- struct mrp_applicant *app = (struct mrp_applicant *)data;
+ struct mrp_applicant *app = from_timer(app, t, periodic_timer);
spin_lock(&app->lock);
mrp_mad_event(app, MRP_EVENT_PERIODIC);
@@ -865,10 +865,9 @@ int mrp_init_applicant(struct net_device *dev, struct mrp_application *appl)
spin_lock_init(&app->lock);
skb_queue_head_init(&app->queue);
rcu_assign_pointer(dev->mrp_port->applicants[appl->type], app);
- setup_timer(&app->join_timer, mrp_join_timer, (unsigned long)app);
+ timer_setup(&app->join_timer, mrp_join_timer, 0);
mrp_join_timer_arm(app);
- setup_timer(&app->periodic_timer, mrp_periodic_timer,
- (unsigned long)app);
+ timer_setup(&app->periodic_timer, mrp_periodic_timer, 0);
mrp_periodic_timer_arm(app);
return 0;
diff --git a/net/9p/client.c b/net/9p/client.c
index 4674235b0d9b..b433aff5ff13 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -82,7 +82,7 @@ int p9_show_client_options(struct seq_file *m, struct p9_client *clnt)
{
if (clnt->msize != 8192)
seq_printf(m, ",msize=%u", clnt->msize);
- seq_printf(m, "trans=%s", clnt->trans_mod->name);
+ seq_printf(m, ",trans=%s", clnt->trans_mod->name);
switch (clnt->proto_version) {
case p9_proto_legacy:
@@ -773,8 +773,7 @@ p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...)
}
again:
/* Wait for the response */
- err = wait_event_interruptible(*req->wq,
- req->status >= REQ_STATUS_RCVD);
+ err = wait_event_killable(*req->wq, req->status >= REQ_STATUS_RCVD);
/*
* Make sure our req is coherent with regard to updates in other
diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
index 903a190319b9..985046ae4231 100644
--- a/net/9p/trans_fd.c
+++ b/net/9p/trans_fd.c
@@ -724,12 +724,12 @@ static int p9_fd_show_options(struct seq_file *m, struct p9_client *clnt)
{
if (clnt->trans_mod == &p9_tcp_trans) {
if (clnt->trans_opts.tcp.port != P9_PORT)
- seq_printf(m, "port=%u", clnt->trans_opts.tcp.port);
+ seq_printf(m, ",port=%u", clnt->trans_opts.tcp.port);
} else if (clnt->trans_mod == &p9_fd_trans) {
if (clnt->trans_opts.fd.rfd != ~0)
- seq_printf(m, "rfd=%u", clnt->trans_opts.fd.rfd);
+ seq_printf(m, ",rfd=%u", clnt->trans_opts.fd.rfd);
if (clnt->trans_opts.fd.wfd != ~0)
- seq_printf(m, "wfd=%u", clnt->trans_opts.fd.wfd);
+ seq_printf(m, ",wfd=%u", clnt->trans_opts.fd.wfd);
}
return 0;
}
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index f24b25c25106..f3a4efcf1456 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -286,8 +286,8 @@ req_retry:
if (err == -ENOSPC) {
chan->ring_bufs_avail = 0;
spin_unlock_irqrestore(&chan->lock, flags);
- err = wait_event_interruptible(*chan->vc_wq,
- chan->ring_bufs_avail);
+ err = wait_event_killable(*chan->vc_wq,
+ chan->ring_bufs_avail);
if (err == -ERESTARTSYS)
return err;
@@ -327,7 +327,7 @@ static int p9_get_mapped_pages(struct virtio_chan *chan,
* Other zc request to finish here
*/
if (atomic_read(&vp_pinned) >= chan->p9_max_pages) {
- err = wait_event_interruptible(vp_wq,
+ err = wait_event_killable(vp_wq,
(atomic_read(&vp_pinned) < chan->p9_max_pages));
if (err == -ERESTARTSYS)
return err;
@@ -471,8 +471,8 @@ req_retry_pinned:
if (err == -ENOSPC) {
chan->ring_bufs_avail = 0;
spin_unlock_irqrestore(&chan->lock, flags);
- err = wait_event_interruptible(*chan->vc_wq,
- chan->ring_bufs_avail);
+ err = wait_event_killable(*chan->vc_wq,
+ chan->ring_bufs_avail);
if (err == -ERESTARTSYS)
goto err_out;
@@ -489,8 +489,7 @@ req_retry_pinned:
virtqueue_kick(chan->vq);
spin_unlock_irqrestore(&chan->lock, flags);
p9_debug(P9_DEBUG_TRANS, "virtio request kicked\n");
- err = wait_event_interruptible(*req->wq,
- req->status >= REQ_STATUS_RCVD);
+ err = wait_event_killable(*req->wq, req->status >= REQ_STATUS_RCVD);
/*
* Non kernel buffers are pinned, unpin them
*/
diff --git a/net/9p/trans_xen.c b/net/9p/trans_xen.c
index 6ad3e043c617..325c56043007 100644
--- a/net/9p/trans_xen.c
+++ b/net/9p/trans_xen.c
@@ -156,8 +156,8 @@ static int p9_xen_request(struct p9_client *client, struct p9_req_t *p9_req)
ring = &priv->rings[num];
again:
- while (wait_event_interruptible(ring->wq,
- p9_xen_write_todo(ring, size)) != 0)
+ while (wait_event_killable(ring->wq,
+ p9_xen_write_todo(ring, size)) != 0)
;
spin_lock_irqsave(&ring->lock, flags);
diff --git a/net/appletalk/aarp.c b/net/appletalk/aarp.c
index 8ad3ec2610b6..309d7dbb36e8 100644
--- a/net/appletalk/aarp.c
+++ b/net/appletalk/aarp.c
@@ -310,7 +310,7 @@ static void __aarp_expire_device(struct aarp_entry **n, struct net_device *dev)
}
/* Handle the timer event */
-static void aarp_expire_timeout(unsigned long unused)
+static void aarp_expire_timeout(struct timer_list *unused)
{
int ct;
@@ -884,7 +884,7 @@ void __init aarp_proto_init(void)
aarp_dl = register_snap_client(aarp_snap_id, aarp_rcv);
if (!aarp_dl)
printk(KERN_CRIT "Unable to register AARP with SNAP.\n");
- setup_timer(&aarp_timer, aarp_expire_timeout, 0);
+ timer_setup(&aarp_timer, aarp_expire_timeout, 0);
aarp_timer.expires = jiffies + sysctl_aarp_expiry_time;
add_timer(&aarp_timer);
register_netdevice_notifier(&aarp_notifier);
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
index 5d035c1f1156..03a9fc0771c0 100644
--- a/net/appletalk/ddp.c
+++ b/net/appletalk/ddp.c
@@ -158,9 +158,9 @@ found:
return s;
}
-static void atalk_destroy_timer(unsigned long data)
+static void atalk_destroy_timer(struct timer_list *t)
{
- struct sock *sk = (struct sock *)data;
+ struct sock *sk = from_timer(sk, t, sk_timer);
if (sk_has_allocations(sk)) {
sk->sk_timer.expires = jiffies + SOCK_DESTROY_TIME;
@@ -175,8 +175,7 @@ static inline void atalk_destroy_socket(struct sock *sk)
skb_queue_purge(&sk->sk_receive_queue);
if (sk_has_allocations(sk)) {
- setup_timer(&sk->sk_timer, atalk_destroy_timer,
- (unsigned long)sk);
+ timer_setup(&sk->sk_timer, atalk_destroy_timer, 0);
sk->sk_timer.expires = jiffies + SOCK_DESTROY_TIME;
add_timer(&sk->sk_timer);
} else
diff --git a/net/atm/lec.c b/net/atm/lec.c
index c976196da3ea..6676e3433261 100644
--- a/net/atm/lec.c
+++ b/net/atm/lec.c
@@ -1798,7 +1798,7 @@ static struct atm_vcc *lec_arp_resolve(struct lec_priv *priv,
else
send_to_lecd(priv, l_arp_xmt, mac_to_find, NULL, NULL);
entry->timer.expires = jiffies + (1 * HZ);
- entry->timer.function = (TIMER_FUNC_TYPE)lec_arp_expire_arp;
+ entry->timer.function = lec_arp_expire_arp;
add_timer(&entry->timer);
found = priv->mcast_vcc;
}
@@ -1998,7 +1998,7 @@ lec_vcc_added(struct lec_priv *priv, const struct atmlec_ioc *ioc_data,
entry->old_recv_push = old_push;
entry->status = ESI_UNKNOWN;
entry->timer.expires = jiffies + priv->vcc_timeout_period;
- entry->timer.function = (TIMER_FUNC_TYPE)lec_arp_expire_vcc;
+ entry->timer.function = lec_arp_expire_vcc;
hlist_add_head(&entry->next, &priv->lec_no_forward);
add_timer(&entry->timer);
dump_arp_table(priv);
@@ -2082,7 +2082,7 @@ lec_vcc_added(struct lec_priv *priv, const struct atmlec_ioc *ioc_data,
entry->status = ESI_UNKNOWN;
hlist_add_head(&entry->next, &priv->lec_arp_empty_ones);
entry->timer.expires = jiffies + priv->vcc_timeout_period;
- entry->timer.function = (TIMER_FUNC_TYPE)lec_arp_expire_vcc;
+ entry->timer.function = lec_arp_expire_vcc;
add_timer(&entry->timer);
pr_debug("After vcc was added\n");
dump_arp_table(priv);
diff --git a/net/atm/mpc.c b/net/atm/mpc.c
index e882d8b5db05..7c6a1cc760a2 100644
--- a/net/atm/mpc.c
+++ b/net/atm/mpc.c
@@ -121,7 +121,7 @@ static struct notifier_block mpoa_notifier = {
struct mpoa_client *mpcs = NULL; /* FIXME */
static struct atm_mpoa_qos *qos_head = NULL;
-static DEFINE_TIMER(mpc_timer, NULL);
+static DEFINE_TIMER(mpc_timer, mpc_cache_check);
static struct mpoa_client *find_mpc_by_itfnum(int itf)
@@ -1413,7 +1413,6 @@ static void mpc_timer_refresh(void)
{
mpc_timer.expires = jiffies + (MPC_P2 * HZ);
checking_time = mpc_timer.expires;
- mpc_timer.function = (TIMER_FUNC_TYPE)mpc_cache_check;
add_timer(&mpc_timer);
}
diff --git a/net/batman-adv/tp_meter.c b/net/batman-adv/tp_meter.c
index 4b90033f35a8..15cd2139381e 100644
--- a/net/batman-adv/tp_meter.c
+++ b/net/batman-adv/tp_meter.c
@@ -488,9 +488,9 @@ static void batadv_tp_reset_sender_timer(struct batadv_tp_vars *tp_vars)
* Switch to Slow Start, set the ss_threshold to half of the current cwnd and
* reset the cwnd to 3*MSS
*/
-static void batadv_tp_sender_timeout(unsigned long arg)
+static void batadv_tp_sender_timeout(struct timer_list *t)
{
- struct batadv_tp_vars *tp_vars = (struct batadv_tp_vars *)arg;
+ struct batadv_tp_vars *tp_vars = from_timer(tp_vars, t, timer);
struct batadv_priv *bat_priv = tp_vars->bat_priv;
if (atomic_read(&tp_vars->sending) == 0)
@@ -1020,8 +1020,7 @@ void batadv_tp_start(struct batadv_priv *bat_priv, const u8 *dst,
atomic64_set(&tp_vars->tot_sent, 0);
kref_get(&tp_vars->refcount);
- setup_timer(&tp_vars->timer, batadv_tp_sender_timeout,
- (unsigned long)tp_vars);
+ timer_setup(&tp_vars->timer, batadv_tp_sender_timeout, 0);
tp_vars->bat_priv = bat_priv;
tp_vars->start_time = jiffies;
@@ -1109,9 +1108,9 @@ static void batadv_tp_reset_receiver_timer(struct batadv_tp_vars *tp_vars)
* reached without received ack
* @arg: address of the related tp_vars
*/
-static void batadv_tp_receiver_shutdown(unsigned long arg)
+static void batadv_tp_receiver_shutdown(struct timer_list *t)
{
- struct batadv_tp_vars *tp_vars = (struct batadv_tp_vars *)arg;
+ struct batadv_tp_vars *tp_vars = from_timer(tp_vars, t, timer);
struct batadv_tp_unacked *un, *safe;
struct batadv_priv *bat_priv;
@@ -1373,8 +1372,7 @@ batadv_tp_init_recv(struct batadv_priv *bat_priv,
hlist_add_head_rcu(&tp_vars->list, &bat_priv->tp_list);
kref_get(&tp_vars->refcount);
- setup_timer(&tp_vars->timer, batadv_tp_receiver_shutdown,
- (unsigned long)tp_vars);
+ timer_setup(&tp_vars->timer, batadv_tp_receiver_shutdown, 0);
batadv_tp_reset_receiver_timer(tp_vars);
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index 8112893037bd..f2cec70d520c 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -398,9 +398,9 @@ static int hidp_raw_request(struct hid_device *hid, unsigned char reportnum,
}
}
-static void hidp_idle_timeout(unsigned long arg)
+static void hidp_idle_timeout(struct timer_list *t)
{
- struct hidp_session *session = (struct hidp_session *) arg;
+ struct hidp_session *session = from_timer(session, t, timer);
/* The HIDP user-space API only contains calls to add and remove
* devices. There is no way to forward events of any kind. Therefore,
@@ -944,8 +944,7 @@ static int hidp_session_new(struct hidp_session **out, const bdaddr_t *bdaddr,
/* device management */
INIT_WORK(&session->dev_init, hidp_session_dev_work);
- setup_timer(&session->timer, hidp_idle_timeout,
- (unsigned long)session);
+ timer_setup(&session->timer, hidp_idle_timeout, 0);
/* session data */
mutex_init(&session->report_mutex);
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index 4a0b41d75c84..b98225d65e87 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -233,9 +233,9 @@ static int rfcomm_check_security(struct rfcomm_dlc *d)
d->out);
}
-static void rfcomm_session_timeout(unsigned long arg)
+static void rfcomm_session_timeout(struct timer_list *t)
{
- struct rfcomm_session *s = (void *) arg;
+ struct rfcomm_session *s = from_timer(s, t, timer);
BT_DBG("session %p state %ld", s, s->state);
@@ -258,9 +258,9 @@ static void rfcomm_session_clear_timer(struct rfcomm_session *s)
}
/* ---- RFCOMM DLCs ---- */
-static void rfcomm_dlc_timeout(unsigned long arg)
+static void rfcomm_dlc_timeout(struct timer_list *t)
{
- struct rfcomm_dlc *d = (void *) arg;
+ struct rfcomm_dlc *d = from_timer(d, t, timer);
BT_DBG("dlc %p state %ld", d, d->state);
@@ -307,7 +307,7 @@ struct rfcomm_dlc *rfcomm_dlc_alloc(gfp_t prio)
if (!d)
return NULL;
- setup_timer(&d->timer, rfcomm_dlc_timeout, (unsigned long)d);
+ timer_setup(&d->timer, rfcomm_dlc_timeout, 0);
skb_queue_head_init(&d->tx_queue);
mutex_init(&d->lock);
@@ -650,7 +650,7 @@ static struct rfcomm_session *rfcomm_session_add(struct socket *sock, int state)
BT_DBG("session %p sock %p", s, sock);
- setup_timer(&s->timer, rfcomm_session_timeout, (unsigned long) s);
+ timer_setup(&s->timer, rfcomm_session_timeout, 0);
INIT_LIST_HEAD(&s->dlcs);
s->state = state;
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index 795e920a3281..08df57665e1f 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -73,9 +73,9 @@ struct sco_pinfo {
#define SCO_CONN_TIMEOUT (HZ * 40)
#define SCO_DISCONN_TIMEOUT (HZ * 2)
-static void sco_sock_timeout(unsigned long arg)
+static void sco_sock_timeout(struct timer_list *t)
{
- struct sock *sk = (struct sock *)arg;
+ struct sock *sk = from_timer(sk, t, sk_timer);
BT_DBG("sock %p state %d", sk, sk->sk_state);
@@ -487,7 +487,7 @@ static struct sock *sco_sock_alloc(struct net *net, struct socket *sock,
sco_pi(sk)->setting = BT_VOICE_CVSD_16BIT;
- setup_timer(&sk->sk_timer, sco_sock_timeout, (unsigned long)sk);
+ timer_setup(&sk->sk_timer, sco_sock_timeout, 0);
bt_sock_link(&sco_sk_list, sk);
return sk;
diff --git a/net/can/proc.c b/net/can/proc.c
index d979b3dc49a6..0c59f876fe6f 100644
--- a/net/can/proc.c
+++ b/net/can/proc.c
@@ -221,7 +221,7 @@ static int can_stats_proc_show(struct seq_file *m, void *v)
seq_putc(m, '\n');
- if (net->can.can_stattimer.function == (TIMER_FUNC_TYPE)can_stat_update) {
+ if (net->can.can_stattimer.function == can_stat_update) {
seq_printf(m, " %8ld %% total match ratio (RXMR)\n",
can_stats->total_rx_match_ratio);
@@ -291,7 +291,7 @@ static int can_reset_stats_proc_show(struct seq_file *m, void *v)
user_reset = 1;
- if (net->can.can_stattimer.function == (TIMER_FUNC_TYPE)can_stat_update) {
+ if (net->can.can_stattimer.function == can_stat_update) {
seq_printf(m, "Scheduled statistic reset #%ld.\n",
can_pstats->stats_reset + 1);
} else {
diff --git a/net/ceph/ceph_hash.c b/net/ceph/ceph_hash.c
index 67bb1f11e613..9a5850f264ed 100644
--- a/net/ceph/ceph_hash.c
+++ b/net/ceph/ceph_hash.c
@@ -47,28 +47,38 @@ unsigned int ceph_str_hash_rjenkins(const char *str, unsigned int length)
/* handle the last 11 bytes */
c = c + length;
- switch (len) { /* all the case statements fall through */
+ switch (len) {
case 11:
c = c + ((__u32)k[10] << 24);
+ /* fall through */
case 10:
c = c + ((__u32)k[9] << 16);
+ /* fall through */
case 9:
c = c + ((__u32)k[8] << 8);
/* the first byte of c is reserved for the length */
+ /* fall through */
case 8:
b = b + ((__u32)k[7] << 24);
+ /* fall through */
case 7:
b = b + ((__u32)k[6] << 16);
+ /* fall through */
case 6:
b = b + ((__u32)k[5] << 8);
+ /* fall through */
case 5:
b = b + k[4];
+ /* fall through */
case 4:
a = a + ((__u32)k[3] << 24);
+ /* fall through */
case 3:
a = a + ((__u32)k[2] << 16);
+ /* fall through */
case 2:
a = a + ((__u32)k[1] << 8);
+ /* fall through */
case 1:
a = a + k[0];
/* case 0: nothing left to add */
diff --git a/net/ceph/crypto.c b/net/ceph/crypto.c
index 489610ac1cdd..bf9d079cbafd 100644
--- a/net/ceph/crypto.c
+++ b/net/ceph/crypto.c
@@ -37,7 +37,9 @@ static int set_secret(struct ceph_crypto_key *key, void *buf)
return -ENOTSUPP;
}
- WARN_ON(!key->len);
+ if (!key->len)
+ return -EINVAL;
+
key->key = kmemdup(buf, key->len, GFP_NOIO);
if (!key->key) {
ret = -ENOMEM;
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index ad93342c90d7..8a4d3758030b 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -430,6 +430,7 @@ static void ceph_sock_state_change(struct sock *sk)
switch (sk->sk_state) {
case TCP_CLOSE:
dout("%s TCP_CLOSE\n", __func__);
+ /* fall through */
case TCP_CLOSE_WAIT:
dout("%s TCP_CLOSE_WAIT\n", __func__);
con_sock_state_closing(con);
diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c
index 9ae1bab8c05d..1547107f4854 100644
--- a/net/ceph/mon_client.c
+++ b/net/ceph/mon_client.c
@@ -1279,9 +1279,10 @@ static struct ceph_msg *mon_alloc_msg(struct ceph_connection *con,
/*
* Older OSDs don't set reply tid even if the orignal
- * request had a non-zero tid. Workaround this weirdness
- * by falling through to the allocate case.
+ * request had a non-zero tid. Work around this weirdness
+ * by allocating a new message.
*/
+ /* fall through */
case CEPH_MSG_MON_MAP:
case CEPH_MSG_MDS_MAP:
case CEPH_MSG_OSD_MAP:
diff --git a/net/core/dev.c b/net/core/dev.c
index 8ee29f4f5fa9..07ed21d64f92 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2746,7 +2746,8 @@ EXPORT_SYMBOL(skb_mac_gso_segment);
static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
{
if (tx_path)
- return skb->ip_summed != CHECKSUM_PARTIAL;
+ return skb->ip_summed != CHECKSUM_PARTIAL &&
+ skb->ip_summed != CHECKSUM_UNNECESSARY;
return skb->ip_summed == CHECKSUM_NONE;
}
@@ -7139,13 +7140,17 @@ int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
__dev_xdp_attached(dev, bpf_op, NULL))
return -EBUSY;
- if (bpf_op == ops->ndo_bpf)
- prog = bpf_prog_get_type_dev(fd, BPF_PROG_TYPE_XDP,
- dev);
- else
- prog = bpf_prog_get_type(fd, BPF_PROG_TYPE_XDP);
+ prog = bpf_prog_get_type_dev(fd, BPF_PROG_TYPE_XDP,
+ bpf_op == ops->ndo_bpf);
if (IS_ERR(prog))
return PTR_ERR(prog);
+
+ if (!(flags & XDP_FLAGS_HW_MODE) &&
+ bpf_prog_is_dev_bound(prog->aux)) {
+ NL_SET_ERR_MSG(extack, "using device-bound program without HW_MODE flag is not supported");
+ bpf_prog_put(prog);
+ return -EINVAL;
+ }
}
err = dev_xdp_install(dev, bpf_op, extack, flags, prog);
diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c
index 70ccda233bd1..c7785efeea57 100644
--- a/net/core/drop_monitor.c
+++ b/net/core/drop_monitor.c
@@ -144,9 +144,9 @@ static void send_dm_alert(struct work_struct *work)
* in the event that more drops will arrive during the
* hysteresis period.
*/
-static void sched_send_work(unsigned long _data)
+static void sched_send_work(struct timer_list *t)
{
- struct per_cpu_dm_data *data = (struct per_cpu_dm_data *)_data;
+ struct per_cpu_dm_data *data = from_timer(data, t, send_timer);
schedule_work(&data->dm_alert_work);
}
@@ -412,8 +412,7 @@ static int __init init_net_drop_monitor(void)
for_each_possible_cpu(cpu) {
data = &per_cpu(dm_cpu_data, cpu);
INIT_WORK(&data->dm_alert_work, send_dm_alert);
- setup_timer(&data->send_timer, sched_send_work,
- (unsigned long)data);
+ timer_setup(&data->send_timer, sched_send_work, 0);
spin_lock_init(&data->lock);
reset_per_cpu_data(data);
}
diff --git a/net/core/filter.c b/net/core/filter.c
index 1afa17935954..6a85e67fafce 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -1646,9 +1646,9 @@ static const struct bpf_func_proto bpf_csum_diff_proto = {
.gpl_only = false,
.pkt_access = true,
.ret_type = RET_INTEGER,
- .arg1_type = ARG_PTR_TO_MEM,
+ .arg1_type = ARG_PTR_TO_MEM_OR_NULL,
.arg2_type = ARG_CONST_SIZE_OR_ZERO,
- .arg3_type = ARG_PTR_TO_MEM,
+ .arg3_type = ARG_PTR_TO_MEM_OR_NULL,
.arg4_type = ARG_CONST_SIZE_OR_ZERO,
.arg5_type = ARG_ANYTHING,
};
diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c
index 7c1ffd6f9501..9834cfa21b21 100644
--- a/net/core/gen_estimator.c
+++ b/net/core/gen_estimator.c
@@ -76,9 +76,9 @@ static void est_fetch_counters(struct net_rate_estimator *e,
}
-static void est_timer(unsigned long arg)
+static void est_timer(struct timer_list *t)
{
- struct net_rate_estimator *est = (struct net_rate_estimator *)arg;
+ struct net_rate_estimator *est = from_timer(est, t, timer);
struct gnet_stats_basic_packed b;
u64 rate, brate;
@@ -170,7 +170,7 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
}
est->next_jiffies = jiffies + ((HZ/4) << intvl_log);
- setup_timer(&est->timer, est_timer, (unsigned long)est);
+ timer_setup(&est->timer, est_timer, 0);
mod_timer(&est->timer, est->next_jiffies);
rcu_assign_pointer(*rate_est, est);
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 6ea3a1a7f36a..d1f5fe986edd 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -51,7 +51,7 @@ do { \
#define PNEIGH_HASHMASK 0xF
-static void neigh_timer_handler(unsigned long arg);
+static void neigh_timer_handler(struct timer_list *t);
static void __neigh_notify(struct neighbour *n, int type, int flags,
u32 pid);
static void neigh_update_notify(struct neighbour *neigh, u32 nlmsg_pid);
@@ -331,7 +331,7 @@ static struct neighbour *neigh_alloc(struct neigh_table *tbl, struct net_device
n->output = neigh_blackhole;
seqlock_init(&n->hh.hh_lock);
n->parms = neigh_parms_clone(&tbl->parms);
- setup_timer(&n->timer, neigh_timer_handler, (unsigned long)n);
+ timer_setup(&n->timer, neigh_timer_handler, 0);
NEIGH_CACHE_STAT_INC(tbl, allocs);
n->tbl = tbl;
@@ -903,10 +903,10 @@ static void neigh_probe(struct neighbour *neigh)
/* Called when a timer expires for a neighbour entry. */
-static void neigh_timer_handler(unsigned long arg)
+static void neigh_timer_handler(struct timer_list *t)
{
unsigned long now, next;
- struct neighbour *neigh = (struct neighbour *)arg;
+ struct neighbour *neigh = from_timer(neigh, t, timer);
unsigned int state;
int notify = 0;
@@ -1391,9 +1391,9 @@ int neigh_direct_output(struct neighbour *neigh, struct sk_buff *skb)
}
EXPORT_SYMBOL(neigh_direct_output);
-static void neigh_proxy_process(unsigned long arg)
+static void neigh_proxy_process(struct timer_list *t)
{
- struct neigh_table *tbl = (struct neigh_table *)arg;
+ struct neigh_table *tbl = from_timer(tbl, t, proxy_timer);
long sched_next = 0;
unsigned long now = jiffies;
struct sk_buff *skb, *n;
@@ -1573,7 +1573,7 @@ void neigh_table_init(int index, struct neigh_table *tbl)
INIT_DEFERRABLE_WORK(&tbl->gc_work, neigh_periodic_work);
queue_delayed_work(system_power_efficient_wq, &tbl->gc_work,
tbl->parms.reachable_time);
- setup_timer(&tbl->proxy_timer, neigh_proxy_process, (unsigned long)tbl);
+ timer_setup(&tbl->proxy_timer, neigh_proxy_process, 0);
skb_queue_head_init_class(&tbl->proxy_queue,
&neigh_table_proxy_queue_class);
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
index b36dceab0dc1..324cb9f2f551 100644
--- a/net/decnet/dn_route.c
+++ b/net/decnet/dn_route.c
@@ -125,7 +125,7 @@ static struct neighbour *dn_dst_neigh_lookup(const struct dst_entry *dst,
struct sk_buff *skb,
const void *daddr);
static int dn_route_input(struct sk_buff *);
-static void dn_run_flush(unsigned long dummy);
+static void dn_run_flush(struct timer_list *unused);
static struct dn_rt_hash_bucket *dn_rt_hash_table;
static unsigned int dn_rt_hash_mask;
@@ -183,7 +183,7 @@ static __inline__ unsigned int dn_hash(__le16 src, __le16 dst)
return dn_rt_hash_mask & (unsigned int)tmp;
}
-static void dn_dst_check_expire(unsigned long dummy)
+static void dn_dst_check_expire(struct timer_list *unused)
{
int i;
struct dn_route *rt;
@@ -357,7 +357,7 @@ static int dn_insert_route(struct dn_route *rt, unsigned int hash, struct dn_rou
return 0;
}
-static void dn_run_flush(unsigned long dummy)
+static void dn_run_flush(struct timer_list *unused)
{
int i;
struct dn_route *rt, *next;
@@ -1875,7 +1875,7 @@ void __init dn_route_init(void)
kmem_cache_create("dn_dst_cache", sizeof(struct dn_route), 0,
SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
dst_entries_init(&dn_dst_ops);
- setup_timer(&dn_route_timer, dn_dst_check_expire, 0);
+ timer_setup(&dn_route_timer, dn_dst_check_expire, 0);
dn_route_timer.expires = jiffies + decnet_dst_gc_interval * HZ;
add_timer(&dn_route_timer);
diff --git a/net/decnet/dn_timer.c b/net/decnet/dn_timer.c
index f430daed24a0..aa4155875ca8 100644
--- a/net/decnet/dn_timer.c
+++ b/net/decnet/dn_timer.c
@@ -34,11 +34,11 @@
#define SLOW_INTERVAL (HZ/2)
-static void dn_slow_timer(unsigned long arg);
+static void dn_slow_timer(struct timer_list *t);
void dn_start_slow_timer(struct sock *sk)
{
- setup_timer(&sk->sk_timer, dn_slow_timer, (unsigned long)sk);
+ timer_setup(&sk->sk_timer, dn_slow_timer, 0);
sk_reset_timer(sk, &sk->sk_timer, jiffies + SLOW_INTERVAL);
}
@@ -47,9 +47,9 @@ void dn_stop_slow_timer(struct sock *sk)
sk_stop_timer(sk, &sk->sk_timer);
}
-static void dn_slow_timer(unsigned long arg)
+static void dn_slow_timer(struct timer_list *t)
{
- struct sock *sk = (struct sock *)arg;
+ struct sock *sk = from_timer(sk, t, sk_timer);
struct dn_scp *scp = DN_SK(sk);
bh_lock_sock(sk);
diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c
index 44e3fb7dec8c..1e287420ff49 100644
--- a/net/dsa/dsa2.c
+++ b/net/dsa/dsa2.c
@@ -51,9 +51,7 @@ static struct dsa_switch_tree *dsa_tree_alloc(int index)
INIT_LIST_HEAD(&dst->list);
list_add_tail(&dsa_tree_list, &dst->list);
- /* Initialize the reference counter to the number of switches, not 1 */
kref_init(&dst->refcount);
- refcount_set(&dst->refcount.refcount, 0);
return dst;
}
@@ -64,20 +62,23 @@ static void dsa_tree_free(struct dsa_switch_tree *dst)
kfree(dst);
}
-static struct dsa_switch_tree *dsa_tree_touch(int index)
+static struct dsa_switch_tree *dsa_tree_get(struct dsa_switch_tree *dst)
{
- struct dsa_switch_tree *dst;
-
- dst = dsa_tree_find(index);
- if (!dst)
- dst = dsa_tree_alloc(index);
+ if (dst)
+ kref_get(&dst->refcount);
return dst;
}
-static void dsa_tree_get(struct dsa_switch_tree *dst)
+static struct dsa_switch_tree *dsa_tree_touch(int index)
{
- kref_get(&dst->refcount);
+ struct dsa_switch_tree *dst;
+
+ dst = dsa_tree_find(index);
+ if (dst)
+ return dsa_tree_get(dst);
+ else
+ return dsa_tree_alloc(index);
}
static void dsa_tree_release(struct kref *ref)
@@ -91,7 +92,8 @@ static void dsa_tree_release(struct kref *ref)
static void dsa_tree_put(struct dsa_switch_tree *dst)
{
- kref_put(&dst->refcount, dsa_tree_release);
+ if (dst)
+ kref_put(&dst->refcount, dsa_tree_release);
}
static bool dsa_port_is_dsa(struct dsa_port *port)
@@ -765,6 +767,7 @@ int dsa_register_switch(struct dsa_switch *ds)
mutex_lock(&dsa2_mutex);
err = dsa_switch_probe(ds);
+ dsa_tree_put(ds->dst);
mutex_unlock(&dsa2_mutex);
return err;
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index ce4aa827be05..f00499a46927 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1223,9 +1223,10 @@ EXPORT_SYMBOL(inet_sk_rebuild_header);
struct sk_buff *inet_gso_segment(struct sk_buff *skb,
netdev_features_t features)
{
- bool fixedid = false, gso_partial, encap;
+ bool udpfrag = false, fixedid = false, gso_partial, encap;
struct sk_buff *segs = ERR_PTR(-EINVAL);
const struct net_offload *ops;
+ unsigned int offset = 0;
struct iphdr *iph;
int proto, tot_len;
int nhoff;
@@ -1260,6 +1261,7 @@ struct sk_buff *inet_gso_segment(struct sk_buff *skb,
segs = ERR_PTR(-EPROTONOSUPPORT);
if (!skb->encapsulation || encap) {
+ udpfrag = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP);
fixedid = !!(skb_shinfo(skb)->gso_type & SKB_GSO_TCP_FIXEDID);
/* fixed ID is invalid if DF bit is not set */
@@ -1279,7 +1281,13 @@ struct sk_buff *inet_gso_segment(struct sk_buff *skb,
skb = segs;
do {
iph = (struct iphdr *)(skb_mac_header(skb) + nhoff);
- if (skb_is_gso(skb)) {
+ if (udpfrag) {
+ iph->frag_off = htons(offset >> 3);
+ if (skb->next)
+ iph->frag_off |= htons(IP_MF);
+ offset += skb->len - nhoff - ihl;
+ tot_len = skb->len - nhoff;
+ } else if (skb_is_gso(skb)) {
if (!fixedid) {
iph->id = htons(id);
id += skb_shinfo(skb)->gso_segs;
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index ab183af0b5b6..d1f8f302dbf3 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -752,18 +752,18 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc,
return ip_local_out(net, skb->sk, skb);
}
-static void igmp_gq_timer_expire(unsigned long data)
+static void igmp_gq_timer_expire(struct timer_list *t)
{
- struct in_device *in_dev = (struct in_device *)data;
+ struct in_device *in_dev = from_timer(in_dev, t, mr_gq_timer);
in_dev->mr_gq_running = 0;
igmpv3_send_report(in_dev, NULL);
in_dev_put(in_dev);
}
-static void igmp_ifc_timer_expire(unsigned long data)
+static void igmp_ifc_timer_expire(struct timer_list *t)
{
- struct in_device *in_dev = (struct in_device *)data;
+ struct in_device *in_dev = from_timer(in_dev, t, mr_ifc_timer);
igmpv3_send_cr(in_dev);
if (in_dev->mr_ifc_count) {
@@ -784,9 +784,9 @@ static void igmp_ifc_event(struct in_device *in_dev)
}
-static void igmp_timer_expire(unsigned long data)
+static void igmp_timer_expire(struct timer_list *t)
{
- struct ip_mc_list *im = (struct ip_mc_list *)data;
+ struct ip_mc_list *im = from_timer(im, t, timer);
struct in_device *in_dev = im->interface;
spin_lock(&im->lock);
@@ -1385,7 +1385,7 @@ void ip_mc_inc_group(struct in_device *in_dev, __be32 addr)
refcount_set(&im->refcnt, 1);
spin_lock_init(&im->lock);
#ifdef CONFIG_IP_MULTICAST
- setup_timer(&im->timer, igmp_timer_expire, (unsigned long)im);
+ timer_setup(&im->timer, igmp_timer_expire, 0);
im->unsolicit_count = net->ipv4.sysctl_igmp_qrv;
#endif
@@ -1695,10 +1695,8 @@ void ip_mc_init_dev(struct in_device *in_dev)
ASSERT_RTNL();
#ifdef CONFIG_IP_MULTICAST
- setup_timer(&in_dev->mr_gq_timer, igmp_gq_timer_expire,
- (unsigned long)in_dev);
- setup_timer(&in_dev->mr_ifc_timer, igmp_ifc_timer_expire,
- (unsigned long)in_dev);
+ timer_setup(&in_dev->mr_gq_timer, igmp_gq_timer_expire, 0);
+ timer_setup(&in_dev->mr_ifc_timer, igmp_ifc_timer_expire, 0);
in_dev->mr_qrv = net->ipv4.sysctl_igmp_qrv;
#endif
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 40a43ad294cb..fd5f19c988e4 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -112,7 +112,7 @@ static void mroute_netlink_event(struct mr_table *mrt, struct mfc_cache *mfc,
int cmd);
static void igmpmsg_netlink_event(struct mr_table *mrt, struct sk_buff *pkt);
static void mroute_clean_tables(struct mr_table *mrt, bool all);
-static void ipmr_expire_process(unsigned long arg);
+static void ipmr_expire_process(struct timer_list *t);
#ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
#define ipmr_for_each_table(mrt, net) \
@@ -375,8 +375,7 @@ static struct mr_table *ipmr_new_table(struct net *net, u32 id)
INIT_LIST_HEAD(&mrt->mfc_cache_list);
INIT_LIST_HEAD(&mrt->mfc_unres_queue);
- setup_timer(&mrt->ipmr_expire_timer, ipmr_expire_process,
- (unsigned long)mrt);
+ timer_setup(&mrt->ipmr_expire_timer, ipmr_expire_process, 0);
mrt->mroute_reg_vif_num = -1;
#ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
@@ -804,9 +803,9 @@ static void ipmr_destroy_unres(struct mr_table *mrt, struct mfc_cache *c)
}
/* Timer process for the unresolved queue. */
-static void ipmr_expire_process(unsigned long arg)
+static void ipmr_expire_process(struct timer_list *t)
{
- struct mr_table *mrt = (struct mr_table *)arg;
+ struct mr_table *mrt = from_timer(mrt, t, ipmr_expire_timer);
unsigned long now;
unsigned long expires;
struct mfc_cache *c, *next;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index f844c06c0676..734cfc8ff76e 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -2964,7 +2964,7 @@ void tcp_rearm_rto(struct sock *sk)
/* Try to schedule a loss probe; if that doesn't work, then schedule an RTO. */
static void tcp_set_xmit_timer(struct sock *sk)
{
- if (!tcp_schedule_loss_probe(sk))
+ if (!tcp_schedule_loss_probe(sk, true))
tcp_rearm_rto(sk);
}
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 540b7d92cc70..a4d214c7b506 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2391,7 +2391,7 @@ repair:
/* Send one loss probe per tail loss episode. */
if (push_one != 2)
- tcp_schedule_loss_probe(sk);
+ tcp_schedule_loss_probe(sk, false);
is_cwnd_limited |= (tcp_packets_in_flight(tp) >= tp->snd_cwnd);
tcp_cwnd_validate(sk, is_cwnd_limited);
return false;
@@ -2399,7 +2399,7 @@ repair:
return !tp->packets_out && !tcp_write_queue_empty(sk);
}
-bool tcp_schedule_loss_probe(struct sock *sk)
+bool tcp_schedule_loss_probe(struct sock *sk, bool advancing_rto)
{
struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
@@ -2440,7 +2440,9 @@ bool tcp_schedule_loss_probe(struct sock *sk)
}
/* If the RTO formula yields an earlier time, then use that time. */
- rto_delta_us = tcp_rto_delta_us(sk); /* How far in future is RTO? */
+ rto_delta_us = advancing_rto ?
+ jiffies_to_usecs(inet_csk(sk)->icsk_rto) :
+ tcp_rto_delta_us(sk); /* How far in future is RTO? */
if (rto_delta_us > 0)
timeout = min_t(u32, timeout, usecs_to_jiffies(rto_delta_us));
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
index e360d55be555..01801b77bd0d 100644
--- a/net/ipv4/udp_offload.c
+++ b/net/ipv4/udp_offload.c
@@ -187,16 +187,57 @@ out_unlock:
}
EXPORT_SYMBOL(skb_udp_tunnel_segment);
-static struct sk_buff *udp4_tunnel_segment(struct sk_buff *skb,
- netdev_features_t features)
+static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
+ netdev_features_t features)
{
struct sk_buff *segs = ERR_PTR(-EINVAL);
+ unsigned int mss;
+ __wsum csum;
+ struct udphdr *uh;
+ struct iphdr *iph;
if (skb->encapsulation &&
(skb_shinfo(skb)->gso_type &
- (SKB_GSO_UDP_TUNNEL|SKB_GSO_UDP_TUNNEL_CSUM)))
+ (SKB_GSO_UDP_TUNNEL|SKB_GSO_UDP_TUNNEL_CSUM))) {
segs = skb_udp_tunnel_segment(skb, features, false);
+ goto out;
+ }
+
+ if (!pskb_may_pull(skb, sizeof(struct udphdr)))
+ goto out;
+
+ mss = skb_shinfo(skb)->gso_size;
+ if (unlikely(skb->len <= mss))
+ goto out;
+
+ /* Do software UFO. Complete and fill in the UDP checksum as
+ * HW cannot do checksum of UDP packets sent as multiple
+ * IP fragments.
+ */
+ uh = udp_hdr(skb);
+ iph = ip_hdr(skb);
+
+ uh->check = 0;
+ csum = skb_checksum(skb, 0, skb->len, 0);
+ uh->check = udp_v4_check(skb->len, iph->saddr, iph->daddr, csum);
+ if (uh->check == 0)
+ uh->check = CSUM_MANGLED_0;
+
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ /* If there is no outer header we can fake a checksum offload
+ * due to the fact that we have already done the checksum in
+ * software prior to segmenting the frame.
+ */
+ if (!skb->encap_hdr_csum)
+ features |= NETIF_F_HW_CSUM;
+
+ /* Fragment the skb. IP headers of the fragments are updated in
+ * inet_gso_segment()
+ */
+ segs = skb_segment(skb, features);
+out:
return segs;
}
@@ -330,7 +371,7 @@ static int udp4_gro_complete(struct sk_buff *skb, int nhoff)
static const struct net_offload udpv4_offload = {
.callbacks = {
- .gso_segment = udp4_tunnel_segment,
+ .gso_segment = udp4_ufo_fragment,
.gro_receive = udp4_gro_receive,
.gro_complete = udp4_gro_complete,
},
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index a0ae1c9d37df..f49bd7897e95 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -188,7 +188,7 @@ static void addrconf_dad_start(struct inet6_ifaddr *ifp);
static void addrconf_dad_work(struct work_struct *w);
static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id);
static void addrconf_dad_run(struct inet6_dev *idev);
-static void addrconf_rs_timer(unsigned long data);
+static void addrconf_rs_timer(struct timer_list *t);
static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifa);
static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifa);
@@ -388,8 +388,7 @@ static struct inet6_dev *ipv6_add_dev(struct net_device *dev)
rwlock_init(&ndev->lock);
ndev->dev = dev;
INIT_LIST_HEAD(&ndev->addr_list);
- setup_timer(&ndev->rs_timer, addrconf_rs_timer,
- (unsigned long)ndev);
+ timer_setup(&ndev->rs_timer, addrconf_rs_timer, 0);
memcpy(&ndev->cnf, dev_net(dev)->ipv6.devconf_dflt, sizeof(ndev->cnf));
if (ndev->cnf.stable_secret.initialized)
@@ -3741,9 +3740,9 @@ restart:
return 0;
}
-static void addrconf_rs_timer(unsigned long data)
+static void addrconf_rs_timer(struct timer_list *t)
{
- struct inet6_dev *idev = (struct inet6_dev *)data;
+ struct inet6_dev *idev = from_timer(idev, t, rs_timer);
struct net_device *dev = idev->dev;
struct in6_addr lladdr;
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 2e2804f5823e..f5285f4e1d08 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -70,7 +70,7 @@ static int fib6_walk_continue(struct fib6_walker *w);
* result of redirects, path MTU changes, etc.
*/
-static void fib6_gc_timer_cb(unsigned long arg);
+static void fib6_gc_timer_cb(struct timer_list *t);
#define FOR_WALKERS(net, w) \
list_for_each_entry(w, &(net)->ipv6.fib6_walkers, lh)
@@ -2026,9 +2026,11 @@ void fib6_run_gc(unsigned long expires, struct net *net, bool force)
spin_unlock_bh(&net->ipv6.fib6_gc_lock);
}
-static void fib6_gc_timer_cb(unsigned long arg)
+static void fib6_gc_timer_cb(struct timer_list *t)
{
- fib6_run_gc(0, (struct net *)arg, true);
+ struct net *arg = from_timer(arg, t, ipv6.ip6_fib_timer);
+
+ fib6_run_gc(0, arg, true);
}
static int __net_init fib6_net_init(struct net *net)
@@ -2043,7 +2045,7 @@ static int __net_init fib6_net_init(struct net *net)
spin_lock_init(&net->ipv6.fib6_gc_lock);
rwlock_init(&net->ipv6.fib6_walker_lock);
INIT_LIST_HEAD(&net->ipv6.fib6_walkers);
- setup_timer(&net->ipv6.ip6_fib_timer, fib6_gc_timer_cb, (unsigned long)net);
+ timer_setup(&net->ipv6.ip6_fib_timer, fib6_gc_timer_cb, 0);
net->ipv6.rt6_stats = kzalloc(sizeof(*net->ipv6.rt6_stats), GFP_KERNEL);
if (!net->ipv6.rt6_stats)
diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
index 9f2e73c71768..7f59c8fabeeb 100644
--- a/net/ipv6/ip6_flowlabel.c
+++ b/net/ipv6/ip6_flowlabel.c
@@ -46,7 +46,7 @@
static atomic_t fl_size = ATOMIC_INIT(0);
static struct ip6_flowlabel __rcu *fl_ht[FL_HASH_MASK+1];
-static void ip6_fl_gc(unsigned long dummy);
+static void ip6_fl_gc(struct timer_list *unused);
static DEFINE_TIMER(ip6_fl_gc_timer, ip6_fl_gc);
/* FL hash table lock: it protects only of GC */
@@ -127,7 +127,7 @@ static void fl_release(struct ip6_flowlabel *fl)
spin_unlock_bh(&ip6_fl_lock);
}
-static void ip6_fl_gc(unsigned long dummy)
+static void ip6_fl_gc(struct timer_list *unused)
{
int i;
unsigned long now = jiffies;
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index b90bad7a4e56..4cfd8e0696fe 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -460,7 +460,7 @@ static int ip6gre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi)
&ipv6h->saddr, &ipv6h->daddr, tpi->key,
tpi->proto);
if (tunnel) {
- ip6_tnl_rcv(tunnel, skb, tpi, NULL, false);
+ ip6_tnl_rcv(tunnel, skb, tpi, NULL, log_ecn_error);
return PACKET_RCVD;
}
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 9c24b85949c1..a2e1a864eb46 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -120,7 +120,7 @@ static void mrt6msg_netlink_event(struct mr6_table *mrt, struct sk_buff *pkt);
static int ip6mr_rtm_dumproute(struct sk_buff *skb,
struct netlink_callback *cb);
static void mroute_clean_tables(struct mr6_table *mrt, bool all);
-static void ipmr_expire_process(unsigned long arg);
+static void ipmr_expire_process(struct timer_list *t);
#ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
#define ip6mr_for_each_table(mrt, net) \
@@ -320,8 +320,7 @@ static struct mr6_table *ip6mr_new_table(struct net *net, u32 id)
INIT_LIST_HEAD(&mrt->mfc6_unres_queue);
- setup_timer(&mrt->ipmr_expire_timer, ipmr_expire_process,
- (unsigned long)mrt);
+ timer_setup(&mrt->ipmr_expire_timer, ipmr_expire_process, 0);
#ifdef CONFIG_IPV6_PIMSM_V2
mrt->mroute_reg_vif_num = -1;
@@ -888,9 +887,9 @@ static void ipmr_do_expire_process(struct mr6_table *mrt)
mod_timer(&mrt->ipmr_expire_timer, jiffies + expires);
}
-static void ipmr_expire_process(unsigned long arg)
+static void ipmr_expire_process(struct timer_list *t)
{
- struct mr6_table *mrt = (struct mr6_table *)arg;
+ struct mr6_table *mrt = from_timer(mrt, t, ipmr_expire_timer);
if (!spin_trylock(&mfc_unres_lock)) {
mod_timer(&mrt->ipmr_expire_timer, jiffies + 1);
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 12b7c27ce5ce..fc6d7d143f2c 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -75,10 +75,10 @@ static struct in6_addr mld2_all_mcr = MLD2_ALL_MCR_INIT;
static void igmp6_join_group(struct ifmcaddr6 *ma);
static void igmp6_leave_group(struct ifmcaddr6 *ma);
-static void igmp6_timer_handler(unsigned long data);
+static void igmp6_timer_handler(struct timer_list *t);
-static void mld_gq_timer_expire(unsigned long data);
-static void mld_ifc_timer_expire(unsigned long data);
+static void mld_gq_timer_expire(struct timer_list *t);
+static void mld_ifc_timer_expire(struct timer_list *t);
static void mld_ifc_event(struct inet6_dev *idev);
static void mld_add_delrec(struct inet6_dev *idev, struct ifmcaddr6 *pmc);
static void mld_del_delrec(struct inet6_dev *idev, struct ifmcaddr6 *pmc);
@@ -839,7 +839,7 @@ static struct ifmcaddr6 *mca_alloc(struct inet6_dev *idev,
if (!mc)
return NULL;
- setup_timer(&mc->mca_timer, igmp6_timer_handler, (unsigned long)mc);
+ timer_setup(&mc->mca_timer, igmp6_timer_handler, 0);
mc->mca_addr = *addr;
mc->idev = idev; /* reference taken by caller */
@@ -2083,9 +2083,9 @@ void ipv6_mc_dad_complete(struct inet6_dev *idev)
}
}
-static void mld_dad_timer_expire(unsigned long data)
+static void mld_dad_timer_expire(struct timer_list *t)
{
- struct inet6_dev *idev = (struct inet6_dev *)data;
+ struct inet6_dev *idev = from_timer(idev, t, mc_dad_timer);
mld_send_initial_cr(idev);
if (idev->mc_dad_count) {
@@ -2432,18 +2432,18 @@ static void igmp6_leave_group(struct ifmcaddr6 *ma)
}
}
-static void mld_gq_timer_expire(unsigned long data)
+static void mld_gq_timer_expire(struct timer_list *t)
{
- struct inet6_dev *idev = (struct inet6_dev *)data;
+ struct inet6_dev *idev = from_timer(idev, t, mc_gq_timer);
idev->mc_gq_running = 0;
mld_send_report(idev, NULL);
in6_dev_put(idev);
}
-static void mld_ifc_timer_expire(unsigned long data)
+static void mld_ifc_timer_expire(struct timer_list *t)
{
- struct inet6_dev *idev = (struct inet6_dev *)data;
+ struct inet6_dev *idev = from_timer(idev, t, mc_ifc_timer);
mld_send_cr(idev);
if (idev->mc_ifc_count) {
@@ -2462,9 +2462,9 @@ static void mld_ifc_event(struct inet6_dev *idev)
mld_ifc_start_timer(idev, 1);
}
-static void igmp6_timer_handler(unsigned long data)
+static void igmp6_timer_handler(struct timer_list *t)
{
- struct ifmcaddr6 *ma = (struct ifmcaddr6 *) data;
+ struct ifmcaddr6 *ma = from_timer(ma, t, mca_timer);
if (mld_in_v1_mode(ma->idev))
igmp6_send(&ma->mca_addr, ma->idev->dev, ICMPV6_MGM_REPORT);
@@ -2552,14 +2552,11 @@ void ipv6_mc_init_dev(struct inet6_dev *idev)
write_lock_bh(&idev->lock);
spin_lock_init(&idev->mc_lock);
idev->mc_gq_running = 0;
- setup_timer(&idev->mc_gq_timer, mld_gq_timer_expire,
- (unsigned long)idev);
+ timer_setup(&idev->mc_gq_timer, mld_gq_timer_expire, 0);
idev->mc_tomb = NULL;
idev->mc_ifc_count = 0;
- setup_timer(&idev->mc_ifc_timer, mld_ifc_timer_expire,
- (unsigned long)idev);
- setup_timer(&idev->mc_dad_timer, mld_dad_timer_expire,
- (unsigned long)idev);
+ timer_setup(&idev->mc_ifc_timer, mld_ifc_timer_expire, 0);
+ timer_setup(&idev->mc_dad_timer, mld_dad_timer_expire, 0);
ipv6_mc_reset(idev);
write_unlock_bh(&idev->lock);
}
diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c
index 4a7e5ffa5108..4fe7c90962dd 100644
--- a/net/ipv6/output_core.c
+++ b/net/ipv6/output_core.c
@@ -31,6 +31,37 @@ static u32 __ipv6_select_ident(struct net *net, u32 hashrnd,
return id;
}
+/* This function exists only for tap drivers that must support broken
+ * clients requesting UFO without specifying an IPv6 fragment ID.
+ *
+ * This is similar to ipv6_select_ident() but we use an independent hash
+ * seed to limit information leakage.
+ *
+ * The network header must be set before calling this.
+ */
+__be32 ipv6_proxy_select_ident(struct net *net, struct sk_buff *skb)
+{
+ static u32 ip6_proxy_idents_hashrnd __read_mostly;
+ struct in6_addr buf[2];
+ struct in6_addr *addrs;
+ u32 id;
+
+ addrs = skb_header_pointer(skb,
+ skb_network_offset(skb) +
+ offsetof(struct ipv6hdr, saddr),
+ sizeof(buf), buf);
+ if (!addrs)
+ return 0;
+
+ net_get_random_once(&ip6_proxy_idents_hashrnd,
+ sizeof(ip6_proxy_idents_hashrnd));
+
+ id = __ipv6_select_ident(net, ip6_proxy_idents_hashrnd,
+ &addrs[1], &addrs[0]);
+ return htonl(id);
+}
+EXPORT_SYMBOL_GPL(ipv6_proxy_select_ident);
+
__be32 ipv6_select_ident(struct net *net,
const struct in6_addr *daddr,
const struct in6_addr *saddr)
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 05eb7bc36156..7a8d1500d374 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -472,6 +472,11 @@ static struct rt6_info *rt6_multipath_select(struct rt6_info *match,
&match->rt6i_siblings, rt6i_siblings) {
route_choosen--;
if (route_choosen == 0) {
+ struct inet6_dev *idev = sibling->rt6i_idev;
+
+ if (!netif_carrier_ok(sibling->dst.dev) &&
+ idev->cnf.ignore_routes_with_linkdown)
+ break;
if (rt6_score_route(sibling, oif, strict) < 0)
break;
match = sibling;
@@ -1019,7 +1024,7 @@ static struct net_device *ip6_rt_get_dev_rcu(struct rt6_info *rt)
{
struct net_device *dev = rt->dst.dev;
- if (rt->rt6i_flags & RTF_LOCAL) {
+ if (rt->rt6i_flags & (RTF_LOCAL | RTF_ANYCAST)) {
/* for copies of local routes, dst->dev needs to be the
* device if it is a master device, the master device if
* device is enslaved, and the loopback as the default
diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c
index 455fd4e39333..a0f89ad76f9d 100644
--- a/net/ipv6/udp_offload.c
+++ b/net/ipv6/udp_offload.c
@@ -17,15 +17,94 @@
#include <net/ip6_checksum.h>
#include "ip6_offload.h"
-static struct sk_buff *udp6_tunnel_segment(struct sk_buff *skb,
- netdev_features_t features)
+static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
+ netdev_features_t features)
{
struct sk_buff *segs = ERR_PTR(-EINVAL);
+ unsigned int mss;
+ unsigned int unfrag_ip6hlen, unfrag_len;
+ struct frag_hdr *fptr;
+ u8 *packet_start, *prevhdr;
+ u8 nexthdr;
+ u8 frag_hdr_sz = sizeof(struct frag_hdr);
+ __wsum csum;
+ int tnl_hlen;
+ int err;
+
+ mss = skb_shinfo(skb)->gso_size;
+ if (unlikely(skb->len <= mss))
+ goto out;
if (skb->encapsulation && skb_shinfo(skb)->gso_type &
(SKB_GSO_UDP_TUNNEL|SKB_GSO_UDP_TUNNEL_CSUM))
segs = skb_udp_tunnel_segment(skb, features, true);
+ else {
+ const struct ipv6hdr *ipv6h;
+ struct udphdr *uh;
+
+ if (!pskb_may_pull(skb, sizeof(struct udphdr)))
+ goto out;
+
+ /* Do software UFO. Complete and fill in the UDP checksum as HW cannot
+ * do checksum of UDP packets sent as multiple IP fragments.
+ */
+
+ uh = udp_hdr(skb);
+ ipv6h = ipv6_hdr(skb);
+
+ uh->check = 0;
+ csum = skb_checksum(skb, 0, skb->len, 0);
+ uh->check = udp_v6_check(skb->len, &ipv6h->saddr,
+ &ipv6h->daddr, csum);
+ if (uh->check == 0)
+ uh->check = CSUM_MANGLED_0;
+
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ /* If there is no outer header we can fake a checksum offload
+ * due to the fact that we have already done the checksum in
+ * software prior to segmenting the frame.
+ */
+ if (!skb->encap_hdr_csum)
+ features |= NETIF_F_HW_CSUM;
+
+ /* Check if there is enough headroom to insert fragment header. */
+ tnl_hlen = skb_tnl_header_len(skb);
+ if (skb->mac_header < (tnl_hlen + frag_hdr_sz)) {
+ if (gso_pskb_expand_head(skb, tnl_hlen + frag_hdr_sz))
+ goto out;
+ }
+
+ /* Find the unfragmentable header and shift it left by frag_hdr_sz
+ * bytes to insert fragment header.
+ */
+ err = ip6_find_1stfragopt(skb, &prevhdr);
+ if (err < 0)
+ return ERR_PTR(err);
+ unfrag_ip6hlen = err;
+ nexthdr = *prevhdr;
+ *prevhdr = NEXTHDR_FRAGMENT;
+ unfrag_len = (skb_network_header(skb) - skb_mac_header(skb)) +
+ unfrag_ip6hlen + tnl_hlen;
+ packet_start = (u8 *) skb->head + SKB_GSO_CB(skb)->mac_offset;
+ memmove(packet_start-frag_hdr_sz, packet_start, unfrag_len);
+
+ SKB_GSO_CB(skb)->mac_offset -= frag_hdr_sz;
+ skb->mac_header -= frag_hdr_sz;
+ skb->network_header -= frag_hdr_sz;
+
+ fptr = (struct frag_hdr *)(skb_network_header(skb) + unfrag_ip6hlen);
+ fptr->nexthdr = nexthdr;
+ fptr->reserved = 0;
+ fptr->identification = ipv6_proxy_select_ident(dev_net(skb->dev), skb);
+
+ /* Fragment the skb. ipv6 header and the remaining fields of the
+ * fragment header are updated in ipv6_gso_segment()
+ */
+ segs = skb_segment(skb, features);
+ }
+out:
return segs;
}
@@ -75,7 +154,7 @@ static int udp6_gro_complete(struct sk_buff *skb, int nhoff)
static const struct net_offload udpv6_offload = {
.callbacks = {
- .gso_segment = udp6_tunnel_segment,
+ .gso_segment = udp6_ufo_fragment,
.gro_receive = udp6_gro_receive,
.gro_complete = udp6_gro_complete,
},
diff --git a/net/lapb/lapb_timer.c b/net/lapb/lapb_timer.c
index 8bb469cb3abe..5d4ae01951b5 100644
--- a/net/lapb/lapb_timer.c
+++ b/net/lapb/lapb_timer.c
@@ -42,7 +42,7 @@ void lapb_start_t1timer(struct lapb_cb *lapb)
{
del_timer(&lapb->t1timer);
- lapb->t1timer.function = (TIMER_FUNC_TYPE)lapb_t1timer_expiry;
+ lapb->t1timer.function = lapb_t1timer_expiry;
lapb->t1timer.expires = jiffies + lapb->t1;
add_timer(&lapb->t1timer);
@@ -52,7 +52,7 @@ void lapb_start_t2timer(struct lapb_cb *lapb)
{
del_timer(&lapb->t2timer);
- lapb->t2timer.function = (TIMER_FUNC_TYPE)lapb_t2timer_expiry;
+ lapb->t2timer.function = lapb_t2timer_expiry;
lapb->t2timer.expires = jiffies + lapb->t2;
add_timer(&lapb->t2timer);
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
index 88cc1ae935ea..d444752dbf40 100644
--- a/net/mac80211/agg-rx.c
+++ b/net/mac80211/agg-rx.c
@@ -151,21 +151,17 @@ EXPORT_SYMBOL(ieee80211_stop_rx_ba_session);
* After accepting the AddBA Request we activated a timer,
* resetting it after each frame that arrives from the originator.
*/
-static void sta_rx_agg_session_timer_expired(unsigned long data)
+static void sta_rx_agg_session_timer_expired(struct timer_list *t)
{
- /* not an elegant detour, but there is no choice as the timer passes
- * only one argument, and various sta_info are needed here, so init
- * flow in sta_info_create gives the TID as data, while the timer_to_id
- * array gives the sta through container_of */
- u8 *ptid = (u8 *)data;
- u8 *timer_to_id = ptid - *ptid;
- struct sta_info *sta = container_of(timer_to_id, struct sta_info,
- timer_to_tid[0]);
+ struct tid_ampdu_rx *tid_rx_timer =
+ from_timer(tid_rx_timer, t, session_timer);
+ struct sta_info *sta = tid_rx_timer->sta;
+ u8 tid = tid_rx_timer->tid;
struct tid_ampdu_rx *tid_rx;
unsigned long timeout;
rcu_read_lock();
- tid_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[*ptid]);
+ tid_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]);
if (!tid_rx) {
rcu_read_unlock();
return;
@@ -180,21 +176,18 @@ static void sta_rx_agg_session_timer_expired(unsigned long data)
rcu_read_unlock();
ht_dbg(sta->sdata, "RX session timer expired on %pM tid %d\n",
- sta->sta.addr, (u16)*ptid);
+ sta->sta.addr, tid);
- set_bit(*ptid, sta->ampdu_mlme.tid_rx_timer_expired);
+ set_bit(tid, sta->ampdu_mlme.tid_rx_timer_expired);
ieee80211_queue_work(&sta->local->hw, &sta->ampdu_mlme.work);
}
-static void sta_rx_agg_reorder_timer_expired(unsigned long data)
+static void sta_rx_agg_reorder_timer_expired(struct timer_list *t)
{
- u8 *ptid = (u8 *)data;
- u8 *timer_to_id = ptid - *ptid;
- struct sta_info *sta = container_of(timer_to_id, struct sta_info,
- timer_to_tid[0]);
+ struct tid_ampdu_rx *tid_rx = from_timer(tid_rx, t, reorder_timer);
rcu_read_lock();
- ieee80211_release_reorder_timeout(sta, *ptid);
+ ieee80211_release_reorder_timeout(tid_rx->sta, tid_rx->tid);
rcu_read_unlock();
}
@@ -356,14 +349,12 @@ void ___ieee80211_start_rx_ba_session(struct sta_info *sta,
spin_lock_init(&tid_agg_rx->reorder_lock);
/* rx timer */
- setup_deferrable_timer(&tid_agg_rx->session_timer,
- sta_rx_agg_session_timer_expired,
- (unsigned long)&sta->timer_to_tid[tid]);
+ timer_setup(&tid_agg_rx->session_timer,
+ sta_rx_agg_session_timer_expired, TIMER_DEFERRABLE);
/* rx reorder timer */
- setup_timer(&tid_agg_rx->reorder_timer,
- sta_rx_agg_reorder_timer_expired,
- (unsigned long)&sta->timer_to_tid[tid]);
+ timer_setup(&tid_agg_rx->reorder_timer,
+ sta_rx_agg_reorder_timer_expired, 0);
/* prepare reordering buffer */
tid_agg_rx->reorder_buf =
@@ -399,6 +390,8 @@ void ___ieee80211_start_rx_ba_session(struct sta_info *sta,
tid_agg_rx->auto_seq = auto_seq;
tid_agg_rx->started = false;
tid_agg_rx->reorder_buf_filtered = 0;
+ tid_agg_rx->tid = tid;
+ tid_agg_rx->sta = sta;
status = WLAN_STATUS_SUCCESS;
/* activate it for RX */
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index bef516ec47f9..5f8ab5be369f 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -330,6 +330,11 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
spin_lock_bh(&sta->lock);
+ /* free struct pending for start, if present */
+ tid_tx = sta->ampdu_mlme.tid_start_tx[tid];
+ kfree(tid_tx);
+ sta->ampdu_mlme.tid_start_tx[tid] = NULL;
+
tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
if (!tid_tx) {
spin_unlock_bh(&sta->lock);
@@ -422,15 +427,12 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
* add Block Ack response will arrive from the recipient.
* If this timer expires sta_addba_resp_timer_expired will be executed.
*/
-static void sta_addba_resp_timer_expired(unsigned long data)
+static void sta_addba_resp_timer_expired(struct timer_list *t)
{
- /* not an elegant detour, but there is no choice as the timer passes
- * only one argument, and both sta_info and TID are needed, so init
- * flow in sta_info_create gives the TID as data, while the timer_to_id
- * array gives the sta through container_of */
- u16 tid = *(u8 *)data;
- struct sta_info *sta = container_of((void *)data,
- struct sta_info, timer_to_tid[tid]);
+ struct tid_ampdu_tx *tid_tx_timer =
+ from_timer(tid_tx_timer, t, addba_resp_timer);
+ struct sta_info *sta = tid_tx_timer->sta;
+ u8 tid = tid_tx_timer->tid;
struct tid_ampdu_tx *tid_tx;
/* check if the TID waits for addBA response */
@@ -525,21 +527,17 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
* After accepting the AddBA Response we activated a timer,
* resetting it after each frame that we send.
*/
-static void sta_tx_agg_session_timer_expired(unsigned long data)
+static void sta_tx_agg_session_timer_expired(struct timer_list *t)
{
- /* not an elegant detour, but there is no choice as the timer passes
- * only one argument, and various sta_info are needed here, so init
- * flow in sta_info_create gives the TID as data, while the timer_to_id
- * array gives the sta through container_of */
- u8 *ptid = (u8 *)data;
- u8 *timer_to_id = ptid - *ptid;
- struct sta_info *sta = container_of(timer_to_id, struct sta_info,
- timer_to_tid[0]);
+ struct tid_ampdu_tx *tid_tx_timer =
+ from_timer(tid_tx_timer, t, session_timer);
+ struct sta_info *sta = tid_tx_timer->sta;
+ u8 tid = tid_tx_timer->tid;
struct tid_ampdu_tx *tid_tx;
unsigned long timeout;
rcu_read_lock();
- tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[*ptid]);
+ tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[tid]);
if (!tid_tx || test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
rcu_read_unlock();
return;
@@ -555,9 +553,9 @@ static void sta_tx_agg_session_timer_expired(unsigned long data)
rcu_read_unlock();
ht_dbg(sta->sdata, "tx session timer expired on %pM tid %d\n",
- sta->sta.addr, (u16)*ptid);
+ sta->sta.addr, tid);
- ieee80211_stop_tx_ba_session(&sta->sta, *ptid);
+ ieee80211_stop_tx_ba_session(&sta->sta, tid);
}
int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
@@ -670,16 +668,15 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
__set_bit(HT_AGG_STATE_WANT_START, &tid_tx->state);
tid_tx->timeout = timeout;
+ tid_tx->sta = sta;
+ tid_tx->tid = tid;
/* response timer */
- setup_timer(&tid_tx->addba_resp_timer,
- sta_addba_resp_timer_expired,
- (unsigned long)&sta->timer_to_tid[tid]);
+ timer_setup(&tid_tx->addba_resp_timer, sta_addba_resp_timer_expired, 0);
/* tx timer */
- setup_deferrable_timer(&tid_tx->session_timer,
- sta_tx_agg_session_timer_expired,
- (unsigned long)&sta->timer_to_tid[tid]);
+ timer_setup(&tid_tx->session_timer,
+ sta_tx_agg_session_timer_expired, TIMER_DEFERRABLE);
/* assign a dialog token */
sta->ampdu_mlme.dialog_token_allocator++;
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
index 41f5e48f8021..167f83b853e6 100644
--- a/net/mac80211/ht.c
+++ b/net/mac80211/ht.c
@@ -292,7 +292,6 @@ void ieee80211_sta_tear_down_BA_sessions(struct sta_info *sta,
mutex_lock(&sta->ampdu_mlme.mtx);
for (i = 0; i < IEEE80211_NUM_TIDS; i++) {
- ___ieee80211_stop_tx_ba_session(sta, i, reason);
___ieee80211_stop_rx_ba_session(sta, i, WLAN_BACK_RECIPIENT,
WLAN_REASON_QSTA_LEAVE_QBSS,
reason != AGG_STOP_DESTROY_STA &&
@@ -300,6 +299,9 @@ void ieee80211_sta_tear_down_BA_sessions(struct sta_info *sta,
}
mutex_unlock(&sta->ampdu_mlme.mtx);
+ for (i = 0; i < IEEE80211_NUM_TIDS; i++)
+ ___ieee80211_stop_tx_ba_session(sta, i, reason);
+
/* stopping might queue the work again - so cancel only afterwards */
cancel_work_sync(&sta->ampdu_mlme.work);
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index e9c6aa3ed05b..db07e0de9a03 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -1711,10 +1711,10 @@ void ieee80211_ibss_work(struct ieee80211_sub_if_data *sdata)
sdata_unlock(sdata);
}
-static void ieee80211_ibss_timer(unsigned long data)
+static void ieee80211_ibss_timer(struct timer_list *t)
{
struct ieee80211_sub_if_data *sdata =
- (struct ieee80211_sub_if_data *) data;
+ from_timer(sdata, t, u.ibss.timer);
ieee80211_queue_work(&sdata->local->hw, &sdata->work);
}
@@ -1723,8 +1723,7 @@ void ieee80211_ibss_setup_sdata(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
- setup_timer(&ifibss->timer, ieee80211_ibss_timer,
- (unsigned long) sdata);
+ timer_setup(&ifibss->timer, ieee80211_ibss_timer, 0);
INIT_LIST_HEAD(&ifibss->incomplete_stations);
spin_lock_init(&ifibss->incomplete_lock);
INIT_WORK(&ifibss->csa_connection_drop_work,
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 68f874e73561..885d00b41911 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -1057,6 +1057,7 @@ struct tpt_led_trigger {
const struct ieee80211_tpt_blink *blink_table;
unsigned int blink_table_len;
struct timer_list timer;
+ struct ieee80211_local *local;
unsigned long prev_traffic;
unsigned long tx_bytes, rx_bytes;
unsigned int active, want;
@@ -1932,7 +1933,7 @@ static inline int ieee80211_ac_from_tid(int tid)
void ieee80211_dynamic_ps_enable_work(struct work_struct *work);
void ieee80211_dynamic_ps_disable_work(struct work_struct *work);
-void ieee80211_dynamic_ps_timer(unsigned long data);
+void ieee80211_dynamic_ps_timer(struct timer_list *t);
void ieee80211_send_nullfunc(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata,
bool powersave);
diff --git a/net/mac80211/led.c b/net/mac80211/led.c
index 0505845b7ab8..ba0b507ea691 100644
--- a/net/mac80211/led.c
+++ b/net/mac80211/led.c
@@ -248,10 +248,10 @@ static unsigned long tpt_trig_traffic(struct ieee80211_local *local,
return DIV_ROUND_UP(delta, 1024 / 8);
}
-static void tpt_trig_timer(unsigned long data)
+static void tpt_trig_timer(struct timer_list *t)
{
- struct ieee80211_local *local = (void *)data;
- struct tpt_led_trigger *tpt_trig = local->tpt_led_trigger;
+ struct tpt_led_trigger *tpt_trig = from_timer(tpt_trig, t, timer);
+ struct ieee80211_local *local = tpt_trig->local;
struct led_classdev *led_cdev;
unsigned long on, off, tpt;
int i;
@@ -306,8 +306,9 @@ __ieee80211_create_tpt_led_trigger(struct ieee80211_hw *hw,
tpt_trig->blink_table = blink_table;
tpt_trig->blink_table_len = blink_table_len;
tpt_trig->want = flags;
+ tpt_trig->local = local;
- setup_timer(&tpt_trig->timer, tpt_trig_timer, (unsigned long)local);
+ timer_setup(&tpt_trig->timer, tpt_trig_timer, 0);
local->tpt_led_trigger = tpt_trig;
@@ -326,7 +327,7 @@ static void ieee80211_start_tpt_led_trig(struct ieee80211_local *local)
tpt_trig_traffic(local, tpt_trig);
tpt_trig->running = true;
- tpt_trig_timer((unsigned long)local);
+ tpt_trig_timer(&tpt_trig->timer);
mod_timer(&tpt_trig->timer, round_jiffies(jiffies + HZ));
}
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 8aa1f5b6a051..e054a2fd8d38 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -633,8 +633,7 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len,
ieee80211_dynamic_ps_enable_work);
INIT_WORK(&local->dynamic_ps_disable_work,
ieee80211_dynamic_ps_disable_work);
- setup_timer(&local->dynamic_ps_timer,
- ieee80211_dynamic_ps_timer, (unsigned long) local);
+ timer_setup(&local->dynamic_ps_timer, ieee80211_dynamic_ps_timer, 0);
INIT_WORK(&local->sched_scan_stopped_work,
ieee80211_sched_scan_stopped_work);
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index 7a76c4a6df30..5e27364e10ac 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -37,9 +37,10 @@ void ieee80211s_stop(void)
kmem_cache_destroy(rm_cache);
}
-static void ieee80211_mesh_housekeeping_timer(unsigned long data)
+static void ieee80211_mesh_housekeeping_timer(struct timer_list *t)
{
- struct ieee80211_sub_if_data *sdata = (void *) data;
+ struct ieee80211_sub_if_data *sdata =
+ from_timer(sdata, t, u.mesh.housekeeping_timer);
struct ieee80211_local *local = sdata->local;
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
@@ -528,18 +529,18 @@ int mesh_add_vht_oper_ie(struct ieee80211_sub_if_data *sdata,
return 0;
}
-static void ieee80211_mesh_path_timer(unsigned long data)
+static void ieee80211_mesh_path_timer(struct timer_list *t)
{
struct ieee80211_sub_if_data *sdata =
- (struct ieee80211_sub_if_data *) data;
+ from_timer(sdata, t, u.mesh.mesh_path_timer);
ieee80211_queue_work(&sdata->local->hw, &sdata->work);
}
-static void ieee80211_mesh_path_root_timer(unsigned long data)
+static void ieee80211_mesh_path_root_timer(struct timer_list *t)
{
struct ieee80211_sub_if_data *sdata =
- (struct ieee80211_sub_if_data *) data;
+ from_timer(sdata, t, u.mesh.mesh_path_root_timer);
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
set_bit(MESH_WORK_ROOT, &ifmsh->wrkq_flags);
@@ -1442,9 +1443,8 @@ void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata)
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
static u8 zero_addr[ETH_ALEN] = {};
- setup_timer(&ifmsh->housekeeping_timer,
- ieee80211_mesh_housekeeping_timer,
- (unsigned long) sdata);
+ timer_setup(&ifmsh->housekeeping_timer,
+ ieee80211_mesh_housekeeping_timer, 0);
ifmsh->accepting_plinks = true;
atomic_set(&ifmsh->mpaths, 0);
@@ -1458,12 +1458,9 @@ void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata)
mesh_pathtbl_init(sdata);
- setup_timer(&ifmsh->mesh_path_timer,
- ieee80211_mesh_path_timer,
- (unsigned long) sdata);
- setup_timer(&ifmsh->mesh_path_root_timer,
- ieee80211_mesh_path_root_timer,
- (unsigned long) sdata);
+ timer_setup(&ifmsh->mesh_path_timer, ieee80211_mesh_path_timer, 0);
+ timer_setup(&ifmsh->mesh_path_root_timer,
+ ieee80211_mesh_path_root_timer, 0);
INIT_LIST_HEAD(&ifmsh->preq_queue.list);
skb_queue_head_init(&ifmsh->ps.bc_buf);
spin_lock_init(&ifmsh->mesh_preq_queue_lock);
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
index 465b7853edc0..ee56f18cad3f 100644
--- a/net/mac80211/mesh.h
+++ b/net/mac80211/mesh.h
@@ -296,7 +296,7 @@ void mesh_path_tx_pending(struct mesh_path *mpath);
int mesh_pathtbl_init(struct ieee80211_sub_if_data *sdata);
void mesh_pathtbl_unregister(struct ieee80211_sub_if_data *sdata);
int mesh_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr);
-void mesh_path_timer(unsigned long data);
+void mesh_path_timer(struct timer_list *t);
void mesh_path_flush_by_nexthop(struct sta_info *sta);
void mesh_path_discard_frame(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb);
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index 146ec6c0f12f..4394463a0c2e 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -797,7 +797,7 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
struct mesh_path *mpath;
u8 ttl, flags, hopcount;
const u8 *orig_addr;
- u32 orig_sn, metric, metric_txsta, interval;
+ u32 orig_sn, new_metric, orig_metric, last_hop_metric, interval;
bool root_is_gate;
ttl = rann->rann_ttl;
@@ -808,7 +808,7 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
interval = le32_to_cpu(rann->rann_interval);
hopcount = rann->rann_hopcount;
hopcount++;
- metric = le32_to_cpu(rann->rann_metric);
+ orig_metric = le32_to_cpu(rann->rann_metric);
/* Ignore our own RANNs */
if (ether_addr_equal(orig_addr, sdata->vif.addr))
@@ -825,7 +825,10 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
return;
}
- metric_txsta = airtime_link_metric_get(local, sta);
+ last_hop_metric = airtime_link_metric_get(local, sta);
+ new_metric = orig_metric + last_hop_metric;
+ if (new_metric < orig_metric)
+ new_metric = MAX_METRIC;
mpath = mesh_path_lookup(sdata, orig_addr);
if (!mpath) {
@@ -838,7 +841,7 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
}
if (!(SN_LT(mpath->sn, orig_sn)) &&
- !(mpath->sn == orig_sn && metric < mpath->rann_metric)) {
+ !(mpath->sn == orig_sn && new_metric < mpath->rann_metric)) {
rcu_read_unlock();
return;
}
@@ -856,7 +859,7 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
}
mpath->sn = orig_sn;
- mpath->rann_metric = metric + metric_txsta;
+ mpath->rann_metric = new_metric;
mpath->is_root = true;
/* Recording RANNs sender address to send individually
* addressed PREQs destined for root mesh STA */
@@ -876,7 +879,7 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
mesh_path_sel_frame_tx(MPATH_RANN, flags, orig_addr,
orig_sn, 0, NULL, 0, broadcast_addr,
hopcount, ttl, interval,
- metric + metric_txsta, 0, sdata);
+ new_metric, 0, sdata);
}
rcu_read_unlock();
@@ -1194,9 +1197,9 @@ endlookup:
return err;
}
-void mesh_path_timer(unsigned long data)
+void mesh_path_timer(struct timer_list *t)
{
- struct mesh_path *mpath = (void *) data;
+ struct mesh_path *mpath = from_timer(mpath, t, timer);
struct ieee80211_sub_if_data *sdata = mpath->sdata;
int ret;
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index 97269caafecd..86c8dfef56a4 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -399,8 +399,7 @@ struct mesh_path *mesh_path_new(struct ieee80211_sub_if_data *sdata,
skb_queue_head_init(&new_mpath->frame_queue);
new_mpath->exp_time = jiffies;
spin_lock_init(&new_mpath->state_lock);
- setup_timer(&new_mpath->timer, mesh_path_timer,
- (unsigned long) new_mpath);
+ timer_setup(&new_mpath->timer, mesh_path_timer, 0);
return new_mpath;
}
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index e4ededa1909d..c244691deab9 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -895,7 +895,7 @@ void ieee80211_send_nullfunc(struct ieee80211_local *local,
struct ieee80211_hdr_3addr *nullfunc;
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
- skb = ieee80211_nullfunc_get(&local->hw, &sdata->vif);
+ skb = ieee80211_nullfunc_get(&local->hw, &sdata->vif, true);
if (!skb)
return;
@@ -1066,10 +1066,10 @@ void ieee80211_chswitch_done(struct ieee80211_vif *vif, bool success)
}
EXPORT_SYMBOL(ieee80211_chswitch_done);
-static void ieee80211_chswitch_timer(unsigned long data)
+static void ieee80211_chswitch_timer(struct timer_list *t)
{
struct ieee80211_sub_if_data *sdata =
- (struct ieee80211_sub_if_data *) data;
+ from_timer(sdata, t, u.mgd.chswitch_timer);
ieee80211_queue_work(&sdata->local->hw, &sdata->u.mgd.chswitch_work);
}
@@ -1577,9 +1577,9 @@ void ieee80211_dynamic_ps_enable_work(struct work_struct *work)
}
}
-void ieee80211_dynamic_ps_timer(unsigned long data)
+void ieee80211_dynamic_ps_timer(struct timer_list *t)
{
- struct ieee80211_local *local = (void *) data;
+ struct ieee80211_local *local = from_timer(local, t, dynamic_ps_timer);
ieee80211_queue_work(&local->hw, &local->dynamic_ps_enable_work);
}
@@ -3711,10 +3711,10 @@ void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
sdata_unlock(sdata);
}
-static void ieee80211_sta_timer(unsigned long data)
+static void ieee80211_sta_timer(struct timer_list *t)
{
struct ieee80211_sub_if_data *sdata =
- (struct ieee80211_sub_if_data *) data;
+ from_timer(sdata, t, u.mgd.timer);
ieee80211_queue_work(&sdata->local->hw, &sdata->work);
}
@@ -3991,10 +3991,10 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata)
sdata_unlock(sdata);
}
-static void ieee80211_sta_bcn_mon_timer(unsigned long data)
+static void ieee80211_sta_bcn_mon_timer(struct timer_list *t)
{
struct ieee80211_sub_if_data *sdata =
- (struct ieee80211_sub_if_data *) data;
+ from_timer(sdata, t, u.mgd.bcn_mon_timer);
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
if (sdata->vif.csa_active && !ifmgd->csa_waiting_bcn)
@@ -4005,10 +4005,10 @@ static void ieee80211_sta_bcn_mon_timer(unsigned long data)
&sdata->u.mgd.beacon_connection_loss_work);
}
-static void ieee80211_sta_conn_mon_timer(unsigned long data)
+static void ieee80211_sta_conn_mon_timer(struct timer_list *t)
{
struct ieee80211_sub_if_data *sdata =
- (struct ieee80211_sub_if_data *) data;
+ from_timer(sdata, t, u.mgd.conn_mon_timer);
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
struct ieee80211_local *local = sdata->local;
@@ -4139,14 +4139,10 @@ void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata)
INIT_WORK(&ifmgd->request_smps_work, ieee80211_request_smps_mgd_work);
INIT_DELAYED_WORK(&ifmgd->tdls_peer_del_work,
ieee80211_tdls_peer_del_work);
- setup_timer(&ifmgd->timer, ieee80211_sta_timer,
- (unsigned long) sdata);
- setup_timer(&ifmgd->bcn_mon_timer, ieee80211_sta_bcn_mon_timer,
- (unsigned long) sdata);
- setup_timer(&ifmgd->conn_mon_timer, ieee80211_sta_conn_mon_timer,
- (unsigned long) sdata);
- setup_timer(&ifmgd->chswitch_timer, ieee80211_chswitch_timer,
- (unsigned long) sdata);
+ timer_setup(&ifmgd->timer, ieee80211_sta_timer, 0);
+ timer_setup(&ifmgd->bcn_mon_timer, ieee80211_sta_bcn_mon_timer, 0);
+ timer_setup(&ifmgd->conn_mon_timer, ieee80211_sta_conn_mon_timer, 0);
+ timer_setup(&ifmgd->chswitch_timer, ieee80211_chswitch_timer, 0);
INIT_DELAYED_WORK(&ifmgd->tx_tspec_wk,
ieee80211_sta_handle_tspec_ac_params_wk);
diff --git a/net/mac80211/ocb.c b/net/mac80211/ocb.c
index 88e6ebbbe24f..d351dc1162be 100644
--- a/net/mac80211/ocb.c
+++ b/net/mac80211/ocb.c
@@ -150,9 +150,10 @@ void ieee80211_ocb_work(struct ieee80211_sub_if_data *sdata)
sdata_unlock(sdata);
}
-static void ieee80211_ocb_housekeeping_timer(unsigned long data)
+static void ieee80211_ocb_housekeeping_timer(struct timer_list *t)
{
- struct ieee80211_sub_if_data *sdata = (void *)data;
+ struct ieee80211_sub_if_data *sdata =
+ from_timer(sdata, t, u.ocb.housekeeping_timer);
struct ieee80211_local *local = sdata->local;
struct ieee80211_if_ocb *ifocb = &sdata->u.ocb;
@@ -165,9 +166,8 @@ void ieee80211_ocb_setup_sdata(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_if_ocb *ifocb = &sdata->u.ocb;
- setup_timer(&ifocb->housekeeping_timer,
- ieee80211_ocb_housekeeping_timer,
- (unsigned long)sdata);
+ timer_setup(&ifocb->housekeeping_timer,
+ ieee80211_ocb_housekeeping_timer, 0);
INIT_LIST_HEAD(&ifocb->incomplete_stations);
spin_lock_init(&ifocb->incomplete_lock);
}
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index a3060e55122c..0c5627f8a104 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -379,14 +379,6 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
if (sta_prepare_rate_control(local, sta, gfp))
goto free_txq;
- for (i = 0; i < IEEE80211_NUM_TIDS; i++) {
- /*
- * timer_to_tid must be initialized with identity mapping
- * to enable session_timer's data differentiation. See
- * sta_rx_agg_session_timer_expired for usage.
- */
- sta->timer_to_tid[i] = i;
- }
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
skb_queue_head_init(&sta->ps_tx_buf[i]);
skb_queue_head_init(&sta->tx_filtered[i]);
@@ -1064,9 +1056,9 @@ int sta_info_destroy_addr_bss(struct ieee80211_sub_if_data *sdata,
return ret;
}
-static void sta_info_cleanup(unsigned long data)
+static void sta_info_cleanup(struct timer_list *t)
{
- struct ieee80211_local *local = (struct ieee80211_local *) data;
+ struct ieee80211_local *local = from_timer(local, t, sta_cleanup);
struct sta_info *sta;
bool timer_needed = false;
@@ -1098,8 +1090,7 @@ int sta_info_init(struct ieee80211_local *local)
mutex_init(&local->sta_mtx);
INIT_LIST_HEAD(&local->sta_list);
- setup_timer(&local->sta_cleanup, sta_info_cleanup,
- (unsigned long)local);
+ timer_setup(&local->sta_cleanup, sta_info_cleanup, 0);
return 0;
}
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index 5c54acd10562..cd53619435b6 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -126,6 +126,8 @@ enum ieee80211_agg_stop_reason {
AGG_STOP_DESTROY_STA,
};
+struct sta_info;
+
/**
* struct tid_ampdu_tx - TID aggregation information (Tx).
*
@@ -133,8 +135,10 @@ enum ieee80211_agg_stop_reason {
* @session_timer: check if we keep Tx-ing on the TID (by timeout value)
* @addba_resp_timer: timer for peer's response to addba request
* @pending: pending frames queue -- use sta's spinlock to protect
+ * @sta: station we are attached to
* @dialog_token: dialog token for aggregation session
* @timeout: session timeout value to be filled in ADDBA requests
+ * @tid: TID number
* @state: session state (see above)
* @last_tx: jiffies of last tx activity
* @stop_initiator: initiator of a session stop
@@ -158,6 +162,7 @@ struct tid_ampdu_tx {
struct timer_list session_timer;
struct timer_list addba_resp_timer;
struct sk_buff_head pending;
+ struct sta_info *sta;
unsigned long state;
unsigned long last_tx;
u16 timeout;
@@ -169,6 +174,7 @@ struct tid_ampdu_tx {
u16 failed_bar_ssn;
bool bar_pending;
bool amsdu;
+ u8 tid;
};
/**
@@ -181,12 +187,14 @@ struct tid_ampdu_tx {
* @reorder_time: jiffies when skb was added
* @session_timer: check if peer keeps Tx-ing on the TID (by timeout value)
* @reorder_timer: releases expired frames from the reorder buffer.
+ * @sta: station we are attached to
* @last_rx: jiffies of last rx activity
* @head_seq_num: head sequence number in reordering buffer.
* @stored_mpdu_num: number of MPDUs in reordering buffer
* @ssn: Starting Sequence Number expected to be aggregated.
* @buf_size: buffer size for incoming A-MPDUs
* @timeout: reset timer value (in TUs).
+ * @tid: TID number
* @rcu_head: RCU head used for freeing this struct
* @reorder_lock: serializes access to reorder buffer, see below.
* @auto_seq: used for offloaded BA sessions to automatically pick head_seq_and
@@ -208,6 +216,7 @@ struct tid_ampdu_rx {
u64 reorder_buf_filtered;
struct sk_buff_head *reorder_buf;
unsigned long *reorder_time;
+ struct sta_info *sta;
struct timer_list session_timer;
struct timer_list reorder_timer;
unsigned long last_rx;
@@ -216,6 +225,7 @@ struct tid_ampdu_rx {
u16 ssn;
u16 buf_size;
u16 timeout;
+ u8 tid;
u8 auto_seq:1,
removed:1,
started:1;
@@ -447,7 +457,6 @@ struct ieee80211_sta_rx_stats {
* plus one for non-QoS frames)
* @tid_seq: per-TID sequence numbers for sending to this STA
* @ampdu_mlme: A-MPDU state machine state
- * @timer_to_tid: identity mapping to ID timers
* @mesh: mesh STA information
* @debugfs_dir: debug filesystem directory dentry
* @dead: set to true when sta is unlinked
@@ -554,7 +563,6 @@ struct sta_info {
* Aggregation information, locked with lock.
*/
struct sta_ampdu_mlme ampdu_mlme;
- u8 timer_to_tid[IEEE80211_NUM_TIDS];
#ifdef CONFIG_MAC80211_DEBUGFS
struct dentry *debugfs_dir;
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 7b8154474b9e..3160954fc406 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -4438,13 +4438,15 @@ struct sk_buff *ieee80211_pspoll_get(struct ieee80211_hw *hw,
EXPORT_SYMBOL(ieee80211_pspoll_get);
struct sk_buff *ieee80211_nullfunc_get(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
+ struct ieee80211_vif *vif,
+ bool qos_ok)
{
struct ieee80211_hdr_3addr *nullfunc;
struct ieee80211_sub_if_data *sdata;
struct ieee80211_if_managed *ifmgd;
struct ieee80211_local *local;
struct sk_buff *skb;
+ bool qos = false;
if (WARN_ON(vif->type != NL80211_IFTYPE_STATION))
return NULL;
@@ -4453,7 +4455,17 @@ struct sk_buff *ieee80211_nullfunc_get(struct ieee80211_hw *hw,
ifmgd = &sdata->u.mgd;
local = sdata->local;
- skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*nullfunc));
+ if (qos_ok) {
+ struct sta_info *sta;
+
+ rcu_read_lock();
+ sta = sta_info_get(sdata, ifmgd->bssid);
+ qos = sta && sta->sta.wme;
+ rcu_read_unlock();
+ }
+
+ skb = dev_alloc_skb(local->hw.extra_tx_headroom +
+ sizeof(*nullfunc) + 2);
if (!skb)
return NULL;
@@ -4463,6 +4475,19 @@ struct sk_buff *ieee80211_nullfunc_get(struct ieee80211_hw *hw,
nullfunc->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
IEEE80211_STYPE_NULLFUNC |
IEEE80211_FCTL_TODS);
+ if (qos) {
+ __le16 qos = cpu_to_le16(7);
+
+ BUILD_BUG_ON((IEEE80211_STYPE_QOS_NULLFUNC |
+ IEEE80211_STYPE_NULLFUNC) !=
+ IEEE80211_STYPE_QOS_NULLFUNC);
+ nullfunc->frame_control |=
+ cpu_to_le16(IEEE80211_STYPE_QOS_NULLFUNC);
+ skb->priority = 7;
+ skb_set_queue_mapping(skb, IEEE80211_AC_VO);
+ skb_put_data(skb, &qos, sizeof(qos));
+ }
+
memcpy(nullfunc->addr1, ifmgd->bssid, ETH_ALEN);
memcpy(nullfunc->addr2, vif->addr, ETH_ALEN);
memcpy(nullfunc->addr3, ifmgd->bssid, ETH_ALEN);
diff --git a/net/ncsi/ncsi-manage.c b/net/ncsi/ncsi-manage.c
index a2b904a718c6..c989211bbabc 100644
--- a/net/ncsi/ncsi-manage.c
+++ b/net/ncsi/ncsi-manage.c
@@ -184,9 +184,9 @@ report:
nd->handler(nd);
}
-static void ncsi_channel_monitor(unsigned long data)
+static void ncsi_channel_monitor(struct timer_list *t)
{
- struct ncsi_channel *nc = (struct ncsi_channel *)data;
+ struct ncsi_channel *nc = from_timer(nc, t, monitor.timer);
struct ncsi_package *np = nc->package;
struct ncsi_dev_priv *ndp = np->ndp;
struct ncsi_channel_mode *ncm;
@@ -313,8 +313,7 @@ struct ncsi_channel *ncsi_add_channel(struct ncsi_package *np, unsigned char id)
nc->package = np;
nc->state = NCSI_CHANNEL_INACTIVE;
nc->monitor.enabled = false;
- setup_timer(&nc->monitor.timer,
- ncsi_channel_monitor, (unsigned long)nc);
+ timer_setup(&nc->monitor.timer, ncsi_channel_monitor, 0);
spin_lock_init(&nc->lock);
INIT_LIST_HEAD(&nc->link);
for (index = 0; index < NCSI_CAP_MAX; index++)
@@ -529,9 +528,9 @@ struct ncsi_dev *ncsi_find_dev(struct net_device *dev)
return NULL;
}
-static void ncsi_request_timeout(unsigned long data)
+static void ncsi_request_timeout(struct timer_list *t)
{
- struct ncsi_request *nr = (struct ncsi_request *)data;
+ struct ncsi_request *nr = from_timer(nr, t, timer);
struct ncsi_dev_priv *ndp = nr->ndp;
unsigned long flags;
@@ -1577,9 +1576,7 @@ struct ncsi_dev *ncsi_register_dev(struct net_device *dev,
for (i = 0; i < ARRAY_SIZE(ndp->requests); i++) {
ndp->requests[i].id = i;
ndp->requests[i].ndp = ndp;
- setup_timer(&ndp->requests[i].timer,
- ncsi_request_timeout,
- (unsigned long)&ndp->requests[i]);
+ timer_setup(&ndp->requests[i].timer, ncsi_request_timeout, 0);
}
spin_lock_irqsave(&ncsi_dev_lock, flags);
diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
index 64778f9a8548..d6748a8a79c5 100644
--- a/net/netfilter/nf_conntrack_expect.c
+++ b/net/netfilter/nf_conntrack_expect.c
@@ -67,9 +67,9 @@ void nf_ct_unlink_expect_report(struct nf_conntrack_expect *exp,
}
EXPORT_SYMBOL_GPL(nf_ct_unlink_expect_report);
-static void nf_ct_expectation_timed_out(unsigned long ul_expect)
+static void nf_ct_expectation_timed_out(struct timer_list *t)
{
- struct nf_conntrack_expect *exp = (void *)ul_expect;
+ struct nf_conntrack_expect *exp = from_timer(exp, t, timeout);
spin_lock_bh(&nf_conntrack_expect_lock);
nf_ct_unlink_expect(exp);
@@ -368,8 +368,7 @@ static void nf_ct_expect_insert(struct nf_conntrack_expect *exp)
/* two references : one for hash insert, one for the timer */
refcount_add(2, &exp->use);
- setup_timer(&exp->timeout, nf_ct_expectation_timed_out,
- (unsigned long)exp);
+ timer_setup(&exp->timeout, nf_ct_expectation_timed_out, 0);
helper = rcu_dereference_protected(master_help->helper,
lockdep_is_held(&nf_conntrack_expect_lock));
if (helper) {
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index cad6498f10b0..e5afab86381c 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -151,7 +151,7 @@ instance_put(struct nfulnl_instance *inst)
call_rcu_bh(&inst->rcu, nfulnl_instance_free_rcu);
}
-static void nfulnl_timer(unsigned long data);
+static void nfulnl_timer(struct timer_list *t);
static struct nfulnl_instance *
instance_create(struct net *net, u_int16_t group_num,
@@ -184,7 +184,7 @@ instance_create(struct net *net, u_int16_t group_num,
/* needs to be two, since we _put() after creation */
refcount_set(&inst->use, 2);
- setup_timer(&inst->timer, nfulnl_timer, (unsigned long)inst);
+ timer_setup(&inst->timer, nfulnl_timer, 0);
inst->net = get_net(net);
inst->peer_user_ns = user_ns;
@@ -377,9 +377,9 @@ __nfulnl_flush(struct nfulnl_instance *inst)
}
static void
-nfulnl_timer(unsigned long data)
+nfulnl_timer(struct timer_list *t)
{
- struct nfulnl_instance *inst = (struct nfulnl_instance *)data;
+ struct nfulnl_instance *inst = from_timer(inst, t, timer);
spin_lock_bh(&inst->lock);
if (inst->skb)
diff --git a/net/netfilter/xt_IDLETIMER.c b/net/netfilter/xt_IDLETIMER.c
index daf45da448fa..ee3421ad108d 100644
--- a/net/netfilter/xt_IDLETIMER.c
+++ b/net/netfilter/xt_IDLETIMER.c
@@ -107,9 +107,9 @@ static void idletimer_tg_work(struct work_struct *work)
sysfs_notify(idletimer_tg_kobj, NULL, timer->attr.attr.name);
}
-static void idletimer_tg_expired(unsigned long data)
+static void idletimer_tg_expired(struct timer_list *t)
{
- struct idletimer_tg *timer = (struct idletimer_tg *) data;
+ struct idletimer_tg *timer = from_timer(timer, t, timer);
pr_debug("timer %s expired\n", timer->attr.attr.name);
@@ -143,8 +143,7 @@ static int idletimer_tg_create(struct idletimer_tg_info *info)
list_add(&info->timer->entry, &idletimer_tg_list);
- setup_timer(&info->timer->timer, idletimer_tg_expired,
- (unsigned long) info->timer);
+ timer_setup(&info->timer->timer, idletimer_tg_expired, 0);
info->timer->refcnt = 1;
mod_timer(&info->timer->timer,
diff --git a/net/netfilter/xt_LED.c b/net/netfilter/xt_LED.c
index 3ba31c194cce..0971634e5444 100644
--- a/net/netfilter/xt_LED.c
+++ b/net/netfilter/xt_LED.c
@@ -85,9 +85,10 @@ led_tg(struct sk_buff *skb, const struct xt_action_param *par)
return XT_CONTINUE;
}
-static void led_timeout_callback(unsigned long data)
+static void led_timeout_callback(struct timer_list *t)
{
- struct xt_led_info_internal *ledinternal = (struct xt_led_info_internal *)data;
+ struct xt_led_info_internal *ledinternal = from_timer(ledinternal, t,
+ timer);
led_trigger_event(&ledinternal->netfilter_led_trigger, LED_OFF);
}
@@ -143,8 +144,7 @@ static int led_tg_check(const struct xt_tgchk_param *par)
/* See if we need to set up a timer */
if (ledinfo->delay > 0)
- setup_timer(&ledinternal->timer, led_timeout_callback,
- (unsigned long)ledinternal);
+ timer_setup(&ledinternal->timer, led_timeout_callback, 0);
list_add_tail(&ledinternal->list, &xt_led_triggers);
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index 2dec3583c97d..7ed9d4422a73 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -284,7 +284,7 @@ void nr_destroy_socket(struct sock *sk)
if (sk_has_allocations(sk)) {
/* Defer: outstanding buffers */
- sk->sk_timer.function = (TIMER_FUNC_TYPE)nr_destroy_timer;
+ sk->sk_timer.function = nr_destroy_timer;
sk->sk_timer.expires = jiffies + 2 * HZ;
add_timer(&sk->sk_timer);
} else
diff --git a/net/netrom/nr_loopback.c b/net/netrom/nr_loopback.c
index 989ae647825e..215ad22a9647 100644
--- a/net/netrom/nr_loopback.c
+++ b/net/netrom/nr_loopback.c
@@ -15,7 +15,7 @@
#include <net/netrom.h>
#include <linux/init.h>
-static void nr_loopback_timer(unsigned long);
+static void nr_loopback_timer(struct timer_list *);
static struct sk_buff_head loopback_queue;
static DEFINE_TIMER(loopback_timer, nr_loopback_timer);
@@ -48,7 +48,7 @@ int nr_loopback_queue(struct sk_buff *skb)
return 1;
}
-static void nr_loopback_timer(unsigned long param)
+static void nr_loopback_timer(struct timer_list *unused)
{
struct sk_buff *skb;
ax25_address *nr_dest;
diff --git a/net/netrom/nr_timer.c b/net/netrom/nr_timer.c
index 43569aea0f5e..cbd51ed5a2d7 100644
--- a/net/netrom/nr_timer.c
+++ b/net/netrom/nr_timer.c
@@ -45,7 +45,7 @@ void nr_init_timers(struct sock *sk)
timer_setup(&nr->idletimer, nr_idletimer_expiry, 0);
/* initialized by sock_init_data */
- sk->sk_timer.function = (TIMER_FUNC_TYPE)nr_heartbeat_expiry;
+ sk->sk_timer.function = nr_heartbeat_expiry;
}
void nr_start_t1timer(struct sock *sk)
diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c
index c25e9b4179c3..074960154993 100644
--- a/net/nfc/nci/core.c
+++ b/net/nfc/nci/core.c
@@ -591,18 +591,18 @@ static int nci_close_device(struct nci_dev *ndev)
}
/* NCI command timer function */
-static void nci_cmd_timer(unsigned long arg)
+static void nci_cmd_timer(struct timer_list *t)
{
- struct nci_dev *ndev = (void *) arg;
+ struct nci_dev *ndev = from_timer(ndev, t, cmd_timer);
atomic_set(&ndev->cmd_cnt, 1);
queue_work(ndev->cmd_wq, &ndev->cmd_work);
}
/* NCI data exchange timer function */
-static void nci_data_timer(unsigned long arg)
+static void nci_data_timer(struct timer_list *t)
{
- struct nci_dev *ndev = (void *) arg;
+ struct nci_dev *ndev = from_timer(ndev, t, data_timer);
set_bit(NCI_DATA_EXCHANGE_TO, &ndev->flags);
queue_work(ndev->rx_wq, &ndev->rx_work);
@@ -1232,10 +1232,8 @@ int nci_register_device(struct nci_dev *ndev)
skb_queue_head_init(&ndev->rx_q);
skb_queue_head_init(&ndev->tx_q);
- setup_timer(&ndev->cmd_timer, nci_cmd_timer,
- (unsigned long) ndev);
- setup_timer(&ndev->data_timer, nci_data_timer,
- (unsigned long) ndev);
+ timer_setup(&ndev->cmd_timer, nci_cmd_timer, 0);
+ timer_setup(&ndev->data_timer, nci_data_timer, 0);
mutex_init(&ndev->req_lock);
INIT_LIST_HEAD(&ndev->conn_info_list);
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index 0dab33fb9844..ef38e5aecd28 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -308,6 +308,8 @@ static int queue_gso_packets(struct datapath *dp, struct sk_buff *skb,
const struct dp_upcall_info *upcall_info,
uint32_t cutlen)
{
+ unsigned int gso_type = skb_shinfo(skb)->gso_type;
+ struct sw_flow_key later_key;
struct sk_buff *segs, *nskb;
int err;
@@ -318,9 +320,21 @@ static int queue_gso_packets(struct datapath *dp, struct sk_buff *skb,
if (segs == NULL)
return -EINVAL;
+ if (gso_type & SKB_GSO_UDP) {
+ /* The initial flow key extracted by ovs_flow_key_extract()
+ * in this case is for a first fragment, so we need to
+ * properly mark later fragments.
+ */
+ later_key = *key;
+ later_key.ip.frag = OVS_FRAG_TYPE_LATER;
+ }
+
/* Queue all of the segments. */
skb = segs;
do {
+ if (gso_type & SKB_GSO_UDP && skb != segs)
+ key = &later_key;
+
err = queue_userspace_packet(dp, skb, key, upcall_info, cutlen);
if (err)
break;
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index 864ddb1e3642..dbe2379329c5 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -631,7 +631,8 @@ static int key_extract(struct sk_buff *skb, struct sw_flow_key *key)
key->ip.frag = OVS_FRAG_TYPE_LATER;
return 0;
}
- if (nh->frag_off & htons(IP_MF))
+ if (nh->frag_off & htons(IP_MF) ||
+ skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
key->ip.frag = OVS_FRAG_TYPE_FIRST;
else
key->ip.frag = OVS_FRAG_TYPE_NONE;
@@ -747,6 +748,9 @@ static int key_extract(struct sk_buff *skb, struct sw_flow_key *key)
if (key->ip.frag == OVS_FRAG_TYPE_LATER)
return 0;
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
+ key->ip.frag = OVS_FRAG_TYPE_FIRST;
+
/* Transport layer. */
if (key->ip.proto == NEXTHDR_TCP) {
if (tcphdr_ok(skb)) {
diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
index dc424798ba6f..624ea74353dd 100644
--- a/net/openvswitch/flow_netlink.c
+++ b/net/openvswitch/flow_netlink.c
@@ -2241,14 +2241,11 @@ int ovs_nla_put_mask(const struct sw_flow *flow, struct sk_buff *skb)
#define MAX_ACTIONS_BUFSIZE (32 * 1024)
-static struct sw_flow_actions *nla_alloc_flow_actions(int size, bool log)
+static struct sw_flow_actions *nla_alloc_flow_actions(int size)
{
struct sw_flow_actions *sfa;
- if (size > MAX_ACTIONS_BUFSIZE) {
- OVS_NLERR(log, "Flow action size %u bytes exceeds max", size);
- return ERR_PTR(-EINVAL);
- }
+ WARN_ON_ONCE(size > MAX_ACTIONS_BUFSIZE);
sfa = kmalloc(sizeof(*sfa) + size, GFP_KERNEL);
if (!sfa)
@@ -2321,12 +2318,15 @@ static struct nlattr *reserve_sfa_size(struct sw_flow_actions **sfa,
new_acts_size = ksize(*sfa) * 2;
if (new_acts_size > MAX_ACTIONS_BUFSIZE) {
- if ((MAX_ACTIONS_BUFSIZE - next_offset) < req_size)
+ if ((MAX_ACTIONS_BUFSIZE - next_offset) < req_size) {
+ OVS_NLERR(log, "Flow action size exceeds max %u",
+ MAX_ACTIONS_BUFSIZE);
return ERR_PTR(-EMSGSIZE);
+ }
new_acts_size = MAX_ACTIONS_BUFSIZE;
}
- acts = nla_alloc_flow_actions(new_acts_size, log);
+ acts = nla_alloc_flow_actions(new_acts_size);
if (IS_ERR(acts))
return (void *)acts;
@@ -3059,7 +3059,7 @@ int ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
{
int err;
- *sfa = nla_alloc_flow_actions(nla_len(attr), log);
+ *sfa = nla_alloc_flow_actions(min(nla_len(attr), MAX_ACTIONS_BUFSIZE));
if (IS_ERR(*sfa))
return PTR_ERR(*sfa);
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 737092ca9b4e..da215e5c1399 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -1687,7 +1687,6 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
atomic_long_set(&rollover->num, 0);
atomic_long_set(&rollover->num_huge, 0);
atomic_long_set(&rollover->num_failed, 0);
- po->rollover = rollover;
}
if (type_flags & PACKET_FANOUT_FLAG_UNIQUEID) {
@@ -1745,6 +1744,8 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
if (refcount_read(&match->sk_ref) < PACKET_FANOUT_MAX) {
__dev_remove_pack(&po->prot_hook);
po->fanout = match;
+ po->rollover = rollover;
+ rollover = NULL;
refcount_set(&match->sk_ref, refcount_read(&match->sk_ref) + 1);
__fanout_link(sk, po);
err = 0;
@@ -1758,10 +1759,7 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
}
out:
- if (err && rollover) {
- kfree_rcu(rollover, rcu);
- po->rollover = NULL;
- }
+ kfree(rollover);
mutex_unlock(&fanout_mutex);
return err;
}
@@ -1785,11 +1783,6 @@ static struct packet_fanout *fanout_release(struct sock *sk)
list_del(&f->list);
else
f = NULL;
-
- if (po->rollover) {
- kfree_rcu(po->rollover, rcu);
- po->rollover = NULL;
- }
}
mutex_unlock(&fanout_mutex);
@@ -3029,6 +3022,7 @@ static int packet_release(struct socket *sock)
synchronize_net();
if (f) {
+ kfree(po->rollover);
fanout_release_data(f);
kfree(f);
}
@@ -3097,6 +3091,10 @@ static int packet_do_bind(struct sock *sk, const char *name, int ifindex,
if (need_rehook) {
if (po->running) {
rcu_read_unlock();
+ /* prevents packet_notifier() from calling
+ * register_prot_hook()
+ */
+ po->num = 0;
__unregister_prot_hook(sk, true);
rcu_read_lock();
dev_curr = po->prot_hook.dev;
@@ -3105,6 +3103,7 @@ static int packet_do_bind(struct sock *sk, const char *name, int ifindex,
dev->ifindex);
}
+ BUG_ON(po->running);
po->num = proto;
po->prot_hook.type = proto;
@@ -3843,7 +3842,6 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
void *data = &val;
union tpacket_stats_u st;
struct tpacket_rollover_stats rstats;
- struct packet_rollover *rollover;
if (level != SOL_PACKET)
return -ENOPROTOOPT;
@@ -3922,18 +3920,13 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
0);
break;
case PACKET_ROLLOVER_STATS:
- rcu_read_lock();
- rollover = rcu_dereference(po->rollover);
- if (rollover) {
- rstats.tp_all = atomic_long_read(&rollover->num);
- rstats.tp_huge = atomic_long_read(&rollover->num_huge);
- rstats.tp_failed = atomic_long_read(&rollover->num_failed);
- data = &rstats;
- lv = sizeof(rstats);
- }
- rcu_read_unlock();
- if (!rollover)
+ if (!po->rollover)
return -EINVAL;
+ rstats.tp_all = atomic_long_read(&po->rollover->num);
+ rstats.tp_huge = atomic_long_read(&po->rollover->num_huge);
+ rstats.tp_failed = atomic_long_read(&po->rollover->num_failed);
+ data = &rstats;
+ lv = sizeof(rstats);
break;
case PACKET_TX_HAS_OFF:
val = po->tp_tx_has_off;
diff --git a/net/packet/internal.h b/net/packet/internal.h
index 562fbc155006..a1d2b2319ae9 100644
--- a/net/packet/internal.h
+++ b/net/packet/internal.h
@@ -95,7 +95,6 @@ struct packet_fanout {
struct packet_rollover {
int sock;
- struct rcu_head rcu;
atomic_long_t num;
atomic_long_t num_huge;
atomic_long_t num_failed;
diff --git a/net/rose/rose_link.c b/net/rose/rose_link.c
index cda4c6678ef1..62055d3069d2 100644
--- a/net/rose/rose_link.c
+++ b/net/rose/rose_link.c
@@ -37,7 +37,7 @@ void rose_start_ftimer(struct rose_neigh *neigh)
{
del_timer(&neigh->ftimer);
- neigh->ftimer.function = (TIMER_FUNC_TYPE)rose_ftimer_expiry;
+ neigh->ftimer.function = rose_ftimer_expiry;
neigh->ftimer.expires =
jiffies + msecs_to_jiffies(sysctl_rose_link_fail_timeout);
@@ -48,7 +48,7 @@ static void rose_start_t0timer(struct rose_neigh *neigh)
{
del_timer(&neigh->t0timer);
- neigh->t0timer.function = (TIMER_FUNC_TYPE)rose_t0timer_expiry;
+ neigh->t0timer.function = rose_t0timer_expiry;
neigh->t0timer.expires =
jiffies + msecs_to_jiffies(sysctl_rose_restart_request_timeout);
diff --git a/net/rose/rose_timer.c b/net/rose/rose_timer.c
index ea613b2a9735..74555fb95615 100644
--- a/net/rose/rose_timer.c
+++ b/net/rose/rose_timer.c
@@ -36,7 +36,7 @@ void rose_start_heartbeat(struct sock *sk)
{
del_timer(&sk->sk_timer);
- sk->sk_timer.function = (TIMER_FUNC_TYPE)rose_heartbeat_expiry;
+ sk->sk_timer.function = rose_heartbeat_expiry;
sk->sk_timer.expires = jiffies + 5 * HZ;
add_timer(&sk->sk_timer);
@@ -48,7 +48,7 @@ void rose_start_t1timer(struct sock *sk)
del_timer(&rose->timer);
- rose->timer.function = (TIMER_FUNC_TYPE)rose_timer_expiry;
+ rose->timer.function = rose_timer_expiry;
rose->timer.expires = jiffies + rose->t1;
add_timer(&rose->timer);
@@ -60,7 +60,7 @@ void rose_start_t2timer(struct sock *sk)
del_timer(&rose->timer);
- rose->timer.function = (TIMER_FUNC_TYPE)rose_timer_expiry;
+ rose->timer.function = rose_timer_expiry;
rose->timer.expires = jiffies + rose->t2;
add_timer(&rose->timer);
@@ -72,7 +72,7 @@ void rose_start_t3timer(struct sock *sk)
del_timer(&rose->timer);
- rose->timer.function = (TIMER_FUNC_TYPE)rose_timer_expiry;
+ rose->timer.function = rose_timer_expiry;
rose->timer.expires = jiffies + rose->t3;
add_timer(&rose->timer);
@@ -84,7 +84,7 @@ void rose_start_hbtimer(struct sock *sk)
del_timer(&rose->timer);
- rose->timer.function = (TIMER_FUNC_TYPE)rose_timer_expiry;
+ rose->timer.function = rose_timer_expiry;
rose->timer.expires = jiffies + rose->hb;
add_timer(&rose->timer);
@@ -97,7 +97,7 @@ void rose_start_idletimer(struct sock *sk)
del_timer(&rose->idletimer);
if (rose->idle > 0) {
- rose->idletimer.function = (TIMER_FUNC_TYPE)rose_idletimer_expiry;
+ rose->idletimer.function = rose_idletimer_expiry;
rose->idletimer.expires = jiffies + rose->idle;
add_timer(&rose->idletimer);
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
index 9b5c46b052fd..8f7cf4c042be 100644
--- a/net/rxrpc/af_rxrpc.c
+++ b/net/rxrpc/af_rxrpc.c
@@ -285,6 +285,7 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock,
bool upgrade)
{
struct rxrpc_conn_parameters cp;
+ struct rxrpc_call_params p;
struct rxrpc_call *call;
struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
int ret;
@@ -302,6 +303,10 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock,
if (key && !key->payload.data[0])
key = NULL; /* a no-security key */
+ memset(&p, 0, sizeof(p));
+ p.user_call_ID = user_call_ID;
+ p.tx_total_len = tx_total_len;
+
memset(&cp, 0, sizeof(cp));
cp.local = rx->local;
cp.key = key;
@@ -309,8 +314,7 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock,
cp.exclusive = false;
cp.upgrade = upgrade;
cp.service_id = srx->srx_service;
- call = rxrpc_new_client_call(rx, &cp, srx, user_call_ID, tx_total_len,
- gfp);
+ call = rxrpc_new_client_call(rx, &cp, srx, &p, gfp);
/* The socket has been unlocked. */
if (!IS_ERR(call)) {
call->notify_rx = notify_rx;
@@ -863,6 +867,19 @@ static int rxrpc_release_sock(struct sock *sk)
sock_orphan(sk);
sk->sk_shutdown = SHUTDOWN_MASK;
+ /* We want to kill off all connections from a service socket
+ * as fast as possible because we can't share these; client
+ * sockets, on the other hand, can share an endpoint.
+ */
+ switch (sk->sk_state) {
+ case RXRPC_SERVER_BOUND:
+ case RXRPC_SERVER_BOUND2:
+ case RXRPC_SERVER_LISTENING:
+ case RXRPC_SERVER_LISTEN_DISABLED:
+ rx->local->service_closed = true;
+ break;
+ }
+
spin_lock_bh(&sk->sk_receive_queue.lock);
sk->sk_state = RXRPC_CLOSE;
spin_unlock_bh(&sk->sk_receive_queue.lock);
@@ -878,6 +895,8 @@ static int rxrpc_release_sock(struct sock *sk)
rxrpc_release_calls_on_socket(rx);
flush_workqueue(rxrpc_workqueue);
rxrpc_purge_queue(&sk->sk_receive_queue);
+ rxrpc_queue_work(&rx->local->rxnet->service_conn_reaper);
+ rxrpc_queue_work(&rx->local->rxnet->client_conn_reaper);
rxrpc_put_local(rx->local);
rx->local = NULL;
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
index b2151993d384..416688381eb7 100644
--- a/net/rxrpc/ar-internal.h
+++ b/net/rxrpc/ar-internal.h
@@ -79,17 +79,20 @@ struct rxrpc_net {
struct list_head conn_proc_list; /* List of conns in this namespace for proc */
struct list_head service_conns; /* Service conns in this namespace */
rwlock_t conn_lock; /* Lock for ->conn_proc_list, ->service_conns */
- struct delayed_work service_conn_reaper;
+ struct work_struct service_conn_reaper;
+ struct timer_list service_conn_reap_timer;
unsigned int nr_client_conns;
unsigned int nr_active_client_conns;
bool kill_all_client_conns;
+ bool live;
spinlock_t client_conn_cache_lock; /* Lock for ->*_client_conns */
spinlock_t client_conn_discard_lock; /* Prevent multiple discarders */
struct list_head waiting_client_conns;
struct list_head active_client_conns;
struct list_head idle_client_conns;
- struct delayed_work client_conn_reaper;
+ struct work_struct client_conn_reaper;
+ struct timer_list client_conn_reap_timer;
struct list_head local_endpoints;
struct mutex local_mutex; /* Lock for ->local_endpoints */
@@ -265,6 +268,7 @@ struct rxrpc_local {
rwlock_t services_lock; /* lock for services list */
int debug_id; /* debug ID for printks */
bool dead;
+ bool service_closed; /* Service socket closed */
struct sockaddr_rxrpc srx; /* local address */
};
@@ -338,8 +342,17 @@ enum rxrpc_conn_flag {
RXRPC_CONN_DONT_REUSE, /* Don't reuse this connection */
RXRPC_CONN_COUNTED, /* Counted by rxrpc_nr_client_conns */
RXRPC_CONN_PROBING_FOR_UPGRADE, /* Probing for service upgrade */
+ RXRPC_CONN_FINAL_ACK_0, /* Need final ACK for channel 0 */
+ RXRPC_CONN_FINAL_ACK_1, /* Need final ACK for channel 1 */
+ RXRPC_CONN_FINAL_ACK_2, /* Need final ACK for channel 2 */
+ RXRPC_CONN_FINAL_ACK_3, /* Need final ACK for channel 3 */
};
+#define RXRPC_CONN_FINAL_ACK_MASK ((1UL << RXRPC_CONN_FINAL_ACK_0) | \
+ (1UL << RXRPC_CONN_FINAL_ACK_1) | \
+ (1UL << RXRPC_CONN_FINAL_ACK_2) | \
+ (1UL << RXRPC_CONN_FINAL_ACK_3))
+
/*
* Events that can be raised upon a connection.
*/
@@ -393,6 +406,7 @@ struct rxrpc_connection {
#define RXRPC_ACTIVE_CHANS_MASK ((1 << RXRPC_MAXCALLS) - 1)
struct list_head waiting_calls; /* Calls waiting for channels */
struct rxrpc_channel {
+ unsigned long final_ack_at; /* Time at which to issue final ACK */
struct rxrpc_call __rcu *call; /* Active call */
u32 call_id; /* ID of current call */
u32 call_counter; /* Call ID counter */
@@ -404,6 +418,7 @@ struct rxrpc_connection {
};
} channels[RXRPC_MAXCALLS];
+ struct timer_list timer; /* Conn event timer */
struct work_struct processor; /* connection event processor */
union {
struct rb_node client_node; /* Node in local->client_conns */
@@ -457,9 +472,10 @@ enum rxrpc_call_flag {
enum rxrpc_call_event {
RXRPC_CALL_EV_ACK, /* need to generate ACK */
RXRPC_CALL_EV_ABORT, /* need to generate abort */
- RXRPC_CALL_EV_TIMER, /* Timer expired */
RXRPC_CALL_EV_RESEND, /* Tx resend required */
RXRPC_CALL_EV_PING, /* Ping send required */
+ RXRPC_CALL_EV_EXPIRED, /* Expiry occurred */
+ RXRPC_CALL_EV_ACK_LOST, /* ACK may be lost, send ping */
};
/*
@@ -503,10 +519,16 @@ struct rxrpc_call {
struct rxrpc_peer *peer; /* Peer record for remote address */
struct rxrpc_sock __rcu *socket; /* socket responsible */
struct mutex user_mutex; /* User access mutex */
- ktime_t ack_at; /* When deferred ACK needs to happen */
- ktime_t resend_at; /* When next resend needs to happen */
- ktime_t ping_at; /* When next to send a ping */
- ktime_t expire_at; /* When the call times out */
+ unsigned long ack_at; /* When deferred ACK needs to happen */
+ unsigned long ack_lost_at; /* When ACK is figured as lost */
+ unsigned long resend_at; /* When next resend needs to happen */
+ unsigned long ping_at; /* When next to send a ping */
+ unsigned long keepalive_at; /* When next to send a keepalive ping */
+ unsigned long expect_rx_by; /* When we expect to get a packet by */
+ unsigned long expect_req_by; /* When we expect to get a request DATA packet by */
+ unsigned long expect_term_by; /* When we expect call termination by */
+ u32 next_rx_timo; /* Timeout for next Rx packet (jif) */
+ u32 next_req_timo; /* Timeout for next Rx request packet (jif) */
struct timer_list timer; /* Combined event timer */
struct work_struct processor; /* Event processor */
rxrpc_notify_rx_t notify_rx; /* kernel service Rx notification function */
@@ -609,6 +631,8 @@ struct rxrpc_call {
ktime_t acks_latest_ts; /* Timestamp of latest ACK received */
rxrpc_serial_t acks_latest; /* serial number of latest ACK received */
rxrpc_seq_t acks_lowest_nak; /* Lowest NACK in the buffer (or ==tx_hard_ack) */
+ rxrpc_seq_t acks_lost_top; /* tx_top at the time lost-ack ping sent */
+ rxrpc_serial_t acks_lost_ping; /* Serial number of probe ACK */
};
/*
@@ -632,6 +656,35 @@ struct rxrpc_ack_summary {
u8 cumulative_acks;
};
+/*
+ * sendmsg() cmsg-specified parameters.
+ */
+enum rxrpc_command {
+ RXRPC_CMD_SEND_DATA, /* send data message */
+ RXRPC_CMD_SEND_ABORT, /* request abort generation */
+ RXRPC_CMD_ACCEPT, /* [server] accept incoming call */
+ RXRPC_CMD_REJECT_BUSY, /* [server] reject a call as busy */
+};
+
+struct rxrpc_call_params {
+ s64 tx_total_len; /* Total Tx data length (if send data) */
+ unsigned long user_call_ID; /* User's call ID */
+ struct {
+ u32 hard; /* Maximum lifetime (sec) */
+ u32 idle; /* Max time since last data packet (msec) */
+ u32 normal; /* Max time since last call packet (msec) */
+ } timeouts;
+ u8 nr_timeouts; /* Number of timeouts specified */
+};
+
+struct rxrpc_send_params {
+ struct rxrpc_call_params call;
+ u32 abort_code; /* Abort code to Tx (if abort) */
+ enum rxrpc_command command : 8; /* The command to implement */
+ bool exclusive; /* Shared or exclusive call */
+ bool upgrade; /* If the connection is upgradeable */
+};
+
#include <trace/events/rxrpc.h>
/*
@@ -657,12 +710,19 @@ int rxrpc_reject_call(struct rxrpc_sock *);
/*
* call_event.c
*/
-void __rxrpc_set_timer(struct rxrpc_call *, enum rxrpc_timer_trace, ktime_t);
-void rxrpc_set_timer(struct rxrpc_call *, enum rxrpc_timer_trace, ktime_t);
void rxrpc_propose_ACK(struct rxrpc_call *, u8, u16, u32, bool, bool,
enum rxrpc_propose_ack_trace);
void rxrpc_process_call(struct work_struct *);
+static inline void rxrpc_reduce_call_timer(struct rxrpc_call *call,
+ unsigned long expire_at,
+ unsigned long now,
+ enum rxrpc_timer_trace why)
+{
+ trace_rxrpc_timer(call, why, now);
+ timer_reduce(&call->timer, expire_at);
+}
+
/*
* call_object.c
*/
@@ -672,11 +732,11 @@ extern unsigned int rxrpc_max_call_lifetime;
extern struct kmem_cache *rxrpc_call_jar;
struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *, unsigned long);
-struct rxrpc_call *rxrpc_alloc_call(gfp_t);
+struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *, gfp_t);
struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *,
struct rxrpc_conn_parameters *,
struct sockaddr_rxrpc *,
- unsigned long, s64, gfp_t);
+ struct rxrpc_call_params *, gfp_t);
int rxrpc_retry_client_call(struct rxrpc_sock *,
struct rxrpc_call *,
struct rxrpc_conn_parameters *,
@@ -803,8 +863,8 @@ static inline bool __rxrpc_abort_eproto(struct rxrpc_call *call,
*/
extern unsigned int rxrpc_max_client_connections;
extern unsigned int rxrpc_reap_client_connections;
-extern unsigned int rxrpc_conn_idle_client_expiry;
-extern unsigned int rxrpc_conn_idle_client_fast_expiry;
+extern unsigned long rxrpc_conn_idle_client_expiry;
+extern unsigned long rxrpc_conn_idle_client_fast_expiry;
extern struct idr rxrpc_client_conn_ids;
void rxrpc_destroy_client_conn_ids(void);
@@ -825,6 +885,7 @@ void rxrpc_process_connection(struct work_struct *);
* conn_object.c
*/
extern unsigned int rxrpc_connection_expiry;
+extern unsigned int rxrpc_closed_conn_expiry;
struct rxrpc_connection *rxrpc_alloc_connection(gfp_t);
struct rxrpc_connection *rxrpc_find_connection_rcu(struct rxrpc_local *,
@@ -861,6 +922,12 @@ static inline void rxrpc_put_connection(struct rxrpc_connection *conn)
rxrpc_put_service_conn(conn);
}
+static inline void rxrpc_reduce_conn_timer(struct rxrpc_connection *conn,
+ unsigned long expire_at)
+{
+ timer_reduce(&conn->timer, expire_at);
+}
+
/*
* conn_service.c
*/
@@ -930,13 +997,13 @@ static inline void rxrpc_queue_local(struct rxrpc_local *local)
* misc.c
*/
extern unsigned int rxrpc_max_backlog __read_mostly;
-extern unsigned int rxrpc_requested_ack_delay;
-extern unsigned int rxrpc_soft_ack_delay;
-extern unsigned int rxrpc_idle_ack_delay;
+extern unsigned long rxrpc_requested_ack_delay;
+extern unsigned long rxrpc_soft_ack_delay;
+extern unsigned long rxrpc_idle_ack_delay;
extern unsigned int rxrpc_rx_window_size;
extern unsigned int rxrpc_rx_mtu;
extern unsigned int rxrpc_rx_jumbo_max;
-extern unsigned int rxrpc_resend_timeout;
+extern unsigned long rxrpc_resend_timeout;
extern const s8 rxrpc_ack_priority[];
@@ -954,7 +1021,7 @@ static inline struct rxrpc_net *rxrpc_net(struct net *net)
/*
* output.c
*/
-int rxrpc_send_ack_packet(struct rxrpc_call *, bool);
+int rxrpc_send_ack_packet(struct rxrpc_call *, bool, rxrpc_serial_t *);
int rxrpc_send_abort_packet(struct rxrpc_call *);
int rxrpc_send_data_packet(struct rxrpc_call *, struct sk_buff *, bool);
void rxrpc_reject_packets(struct rxrpc_local *);
diff --git a/net/rxrpc/call_accept.c b/net/rxrpc/call_accept.c
index cbd1701e813a..3028298ca561 100644
--- a/net/rxrpc/call_accept.c
+++ b/net/rxrpc/call_accept.c
@@ -94,7 +94,7 @@ static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx,
/* Now it gets complicated, because calls get registered with the
* socket here, particularly if a user ID is preassigned by the user.
*/
- call = rxrpc_alloc_call(gfp);
+ call = rxrpc_alloc_call(rx, gfp);
if (!call)
return -ENOMEM;
call->flags |= (1 << RXRPC_CALL_IS_SERVICE);
diff --git a/net/rxrpc/call_event.c b/net/rxrpc/call_event.c
index 3574508baf9a..bda952ffe6a6 100644
--- a/net/rxrpc/call_event.c
+++ b/net/rxrpc/call_event.c
@@ -22,80 +22,6 @@
#include "ar-internal.h"
/*
- * Set the timer
- */
-void __rxrpc_set_timer(struct rxrpc_call *call, enum rxrpc_timer_trace why,
- ktime_t now)
-{
- unsigned long t_j, now_j = jiffies;
- ktime_t t;
- bool queue = false;
-
- if (call->state < RXRPC_CALL_COMPLETE) {
- t = call->expire_at;
- if (!ktime_after(t, now)) {
- trace_rxrpc_timer(call, why, now, now_j);
- queue = true;
- goto out;
- }
-
- if (!ktime_after(call->resend_at, now)) {
- call->resend_at = call->expire_at;
- if (!test_and_set_bit(RXRPC_CALL_EV_RESEND, &call->events))
- queue = true;
- } else if (ktime_before(call->resend_at, t)) {
- t = call->resend_at;
- }
-
- if (!ktime_after(call->ack_at, now)) {
- call->ack_at = call->expire_at;
- if (!test_and_set_bit(RXRPC_CALL_EV_ACK, &call->events))
- queue = true;
- } else if (ktime_before(call->ack_at, t)) {
- t = call->ack_at;
- }
-
- if (!ktime_after(call->ping_at, now)) {
- call->ping_at = call->expire_at;
- if (!test_and_set_bit(RXRPC_CALL_EV_PING, &call->events))
- queue = true;
- } else if (ktime_before(call->ping_at, t)) {
- t = call->ping_at;
- }
-
- t_j = nsecs_to_jiffies(ktime_to_ns(ktime_sub(t, now)));
- t_j += jiffies;
-
- /* We have to make sure that the calculated jiffies value falls
- * at or after the nsec value, or we may loop ceaselessly
- * because the timer times out, but we haven't reached the nsec
- * timeout yet.
- */
- t_j++;
-
- if (call->timer.expires != t_j || !timer_pending(&call->timer)) {
- mod_timer(&call->timer, t_j);
- trace_rxrpc_timer(call, why, now, now_j);
- }
- }
-
-out:
- if (queue)
- rxrpc_queue_call(call);
-}
-
-/*
- * Set the timer
- */
-void rxrpc_set_timer(struct rxrpc_call *call, enum rxrpc_timer_trace why,
- ktime_t now)
-{
- read_lock_bh(&call->state_lock);
- __rxrpc_set_timer(call, why, now);
- read_unlock_bh(&call->state_lock);
-}
-
-/*
* Propose a PING ACK be sent.
*/
static void rxrpc_propose_ping(struct rxrpc_call *call,
@@ -106,12 +32,13 @@ static void rxrpc_propose_ping(struct rxrpc_call *call,
!test_and_set_bit(RXRPC_CALL_EV_PING, &call->events))
rxrpc_queue_call(call);
} else {
- ktime_t now = ktime_get_real();
- ktime_t ping_at = ktime_add_ms(now, rxrpc_idle_ack_delay);
+ unsigned long now = jiffies;
+ unsigned long ping_at = now + rxrpc_idle_ack_delay;
- if (ktime_before(ping_at, call->ping_at)) {
- call->ping_at = ping_at;
- rxrpc_set_timer(call, rxrpc_timer_set_for_ping, now);
+ if (time_before(ping_at, call->ping_at)) {
+ WRITE_ONCE(call->ping_at, ping_at);
+ rxrpc_reduce_call_timer(call, ping_at, now,
+ rxrpc_timer_set_for_ping);
}
}
}
@@ -125,8 +52,7 @@ static void __rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason,
enum rxrpc_propose_ack_trace why)
{
enum rxrpc_propose_ack_outcome outcome = rxrpc_propose_ack_use;
- unsigned int expiry = rxrpc_soft_ack_delay;
- ktime_t now, ack_at;
+ unsigned long expiry = rxrpc_soft_ack_delay;
s8 prior = rxrpc_ack_priority[ack_reason];
/* Pings are handled specially because we don't want to accidentally
@@ -190,11 +116,18 @@ static void __rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason,
background)
rxrpc_queue_call(call);
} else {
- now = ktime_get_real();
- ack_at = ktime_add_ms(now, expiry);
- if (ktime_before(ack_at, call->ack_at)) {
- call->ack_at = ack_at;
- rxrpc_set_timer(call, rxrpc_timer_set_for_ack, now);
+ unsigned long now = jiffies, ack_at;
+
+ if (call->peer->rtt_usage > 0)
+ ack_at = nsecs_to_jiffies(call->peer->rtt);
+ else
+ ack_at = expiry;
+
+ ack_at = jiffies + expiry;
+ if (time_before(ack_at, call->ack_at)) {
+ WRITE_ONCE(call->ack_at, ack_at);
+ rxrpc_reduce_call_timer(call, ack_at, now,
+ rxrpc_timer_set_for_ack);
}
}
@@ -227,18 +160,28 @@ static void rxrpc_congestion_timeout(struct rxrpc_call *call)
/*
* Perform retransmission of NAK'd and unack'd packets.
*/
-static void rxrpc_resend(struct rxrpc_call *call, ktime_t now)
+static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j)
{
struct rxrpc_skb_priv *sp;
struct sk_buff *skb;
+ unsigned long resend_at;
rxrpc_seq_t cursor, seq, top;
- ktime_t max_age, oldest, ack_ts;
+ ktime_t now, max_age, oldest, ack_ts, timeout, min_timeo;
int ix;
u8 annotation, anno_type, retrans = 0, unacked = 0;
_enter("{%d,%d}", call->tx_hard_ack, call->tx_top);
- max_age = ktime_sub_ms(now, rxrpc_resend_timeout);
+ if (call->peer->rtt_usage > 1)
+ timeout = ns_to_ktime(call->peer->rtt * 3 / 2);
+ else
+ timeout = ms_to_ktime(rxrpc_resend_timeout);
+ min_timeo = ns_to_ktime((1000000000 / HZ) * 4);
+ if (ktime_before(timeout, min_timeo))
+ timeout = min_timeo;
+
+ now = ktime_get_real();
+ max_age = ktime_sub(now, timeout);
spin_lock_bh(&call->lock);
@@ -282,7 +225,9 @@ static void rxrpc_resend(struct rxrpc_call *call, ktime_t now)
ktime_to_ns(ktime_sub(skb->tstamp, max_age)));
}
- call->resend_at = ktime_add_ms(oldest, rxrpc_resend_timeout);
+ resend_at = nsecs_to_jiffies(ktime_to_ns(ktime_sub(oldest, now)));
+ resend_at += jiffies + rxrpc_resend_timeout;
+ WRITE_ONCE(call->resend_at, resend_at);
if (unacked)
rxrpc_congestion_timeout(call);
@@ -292,14 +237,15 @@ static void rxrpc_resend(struct rxrpc_call *call, ktime_t now)
* retransmitting data.
*/
if (!retrans) {
- rxrpc_set_timer(call, rxrpc_timer_set_for_resend, now);
+ rxrpc_reduce_call_timer(call, resend_at, now,
+ rxrpc_timer_set_for_resend);
spin_unlock_bh(&call->lock);
ack_ts = ktime_sub(now, call->acks_latest_ts);
if (ktime_to_ns(ack_ts) < call->peer->rtt)
goto out;
rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, 0, true, false,
rxrpc_propose_ack_ping_for_lost_ack);
- rxrpc_send_ack_packet(call, true);
+ rxrpc_send_ack_packet(call, true, NULL);
goto out;
}
@@ -364,7 +310,8 @@ void rxrpc_process_call(struct work_struct *work)
{
struct rxrpc_call *call =
container_of(work, struct rxrpc_call, processor);
- ktime_t now;
+ rxrpc_serial_t *send_ack;
+ unsigned long now, next, t;
rxrpc_see_call(call);
@@ -384,22 +331,89 @@ recheck_state:
goto out_put;
}
- now = ktime_get_real();
- if (ktime_before(call->expire_at, now)) {
+ /* Work out if any timeouts tripped */
+ now = jiffies;
+ t = READ_ONCE(call->expect_rx_by);
+ if (time_after_eq(now, t)) {
+ trace_rxrpc_timer(call, rxrpc_timer_exp_normal, now);
+ set_bit(RXRPC_CALL_EV_EXPIRED, &call->events);
+ }
+
+ t = READ_ONCE(call->expect_req_by);
+ if (call->state == RXRPC_CALL_SERVER_RECV_REQUEST &&
+ time_after_eq(now, t)) {
+ trace_rxrpc_timer(call, rxrpc_timer_exp_idle, now);
+ set_bit(RXRPC_CALL_EV_EXPIRED, &call->events);
+ }
+
+ t = READ_ONCE(call->expect_term_by);
+ if (time_after_eq(now, t)) {
+ trace_rxrpc_timer(call, rxrpc_timer_exp_hard, now);
+ set_bit(RXRPC_CALL_EV_EXPIRED, &call->events);
+ }
+
+ t = READ_ONCE(call->ack_at);
+ if (time_after_eq(now, t)) {
+ trace_rxrpc_timer(call, rxrpc_timer_exp_ack, now);
+ cmpxchg(&call->ack_at, t, now + MAX_JIFFY_OFFSET);
+ set_bit(RXRPC_CALL_EV_ACK, &call->events);
+ }
+
+ t = READ_ONCE(call->ack_lost_at);
+ if (time_after_eq(now, t)) {
+ trace_rxrpc_timer(call, rxrpc_timer_exp_lost_ack, now);
+ cmpxchg(&call->ack_lost_at, t, now + MAX_JIFFY_OFFSET);
+ set_bit(RXRPC_CALL_EV_ACK_LOST, &call->events);
+ }
+
+ t = READ_ONCE(call->keepalive_at);
+ if (time_after_eq(now, t)) {
+ trace_rxrpc_timer(call, rxrpc_timer_exp_keepalive, now);
+ cmpxchg(&call->keepalive_at, t, now + MAX_JIFFY_OFFSET);
+ rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, 0, true, true,
+ rxrpc_propose_ack_ping_for_keepalive);
+ set_bit(RXRPC_CALL_EV_PING, &call->events);
+ }
+
+ t = READ_ONCE(call->ping_at);
+ if (time_after_eq(now, t)) {
+ trace_rxrpc_timer(call, rxrpc_timer_exp_ping, now);
+ cmpxchg(&call->ping_at, t, now + MAX_JIFFY_OFFSET);
+ set_bit(RXRPC_CALL_EV_PING, &call->events);
+ }
+
+ t = READ_ONCE(call->resend_at);
+ if (time_after_eq(now, t)) {
+ trace_rxrpc_timer(call, rxrpc_timer_exp_resend, now);
+ cmpxchg(&call->resend_at, t, now + MAX_JIFFY_OFFSET);
+ set_bit(RXRPC_CALL_EV_RESEND, &call->events);
+ }
+
+ /* Process events */
+ if (test_and_clear_bit(RXRPC_CALL_EV_EXPIRED, &call->events)) {
rxrpc_abort_call("EXP", call, 0, RX_USER_ABORT, -ETIME);
set_bit(RXRPC_CALL_EV_ABORT, &call->events);
goto recheck_state;
}
- if (test_and_clear_bit(RXRPC_CALL_EV_ACK, &call->events)) {
+ send_ack = NULL;
+ if (test_and_clear_bit(RXRPC_CALL_EV_ACK_LOST, &call->events)) {
+ call->acks_lost_top = call->tx_top;
+ rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, 0, true, false,
+ rxrpc_propose_ack_ping_for_lost_ack);
+ send_ack = &call->acks_lost_ping;
+ }
+
+ if (test_and_clear_bit(RXRPC_CALL_EV_ACK, &call->events) ||
+ send_ack) {
if (call->ackr_reason) {
- rxrpc_send_ack_packet(call, false);
+ rxrpc_send_ack_packet(call, false, send_ack);
goto recheck_state;
}
}
if (test_and_clear_bit(RXRPC_CALL_EV_PING, &call->events)) {
- rxrpc_send_ack_packet(call, true);
+ rxrpc_send_ack_packet(call, true, NULL);
goto recheck_state;
}
@@ -408,7 +422,24 @@ recheck_state:
goto recheck_state;
}
- rxrpc_set_timer(call, rxrpc_timer_set_for_resend, now);
+ /* Make sure the timer is restarted */
+ next = call->expect_rx_by;
+
+#define set(T) { t = READ_ONCE(T); if (time_before(t, next)) next = t; }
+
+ set(call->expect_req_by);
+ set(call->expect_term_by);
+ set(call->ack_at);
+ set(call->ack_lost_at);
+ set(call->resend_at);
+ set(call->keepalive_at);
+ set(call->ping_at);
+
+ now = jiffies;
+ if (time_after_eq(now, next))
+ goto recheck_state;
+
+ rxrpc_reduce_call_timer(call, next, now, rxrpc_timer_restart);
/* other events may have been raised since we started checking */
if (call->events && call->state < RXRPC_CALL_COMPLETE) {
diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c
index 4c7fbc6dcce7..0b2db38dd32d 100644
--- a/net/rxrpc/call_object.c
+++ b/net/rxrpc/call_object.c
@@ -45,16 +45,20 @@ const char *const rxrpc_call_completions[NR__RXRPC_CALL_COMPLETIONS] = {
struct kmem_cache *rxrpc_call_jar;
-static void rxrpc_call_timer_expired(unsigned long _call)
+static void rxrpc_call_timer_expired(struct timer_list *t)
{
- struct rxrpc_call *call = (struct rxrpc_call *)_call;
+ struct rxrpc_call *call = from_timer(call, t, timer);
_enter("%d", call->debug_id);
- if (call->state < RXRPC_CALL_COMPLETE)
- rxrpc_set_timer(call, rxrpc_timer_expired, ktime_get_real());
+ if (call->state < RXRPC_CALL_COMPLETE) {
+ trace_rxrpc_timer(call, rxrpc_timer_expired, jiffies);
+ rxrpc_queue_call(call);
+ }
}
+static struct lock_class_key rxrpc_call_user_mutex_lock_class_key;
+
/*
* find an extant server call
* - called in process context with IRQs enabled
@@ -95,7 +99,7 @@ found_extant_call:
/*
* allocate a new call
*/
-struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
+struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *rx, gfp_t gfp)
{
struct rxrpc_call *call;
@@ -114,8 +118,15 @@ struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
goto nomem_2;
mutex_init(&call->user_mutex);
- setup_timer(&call->timer, rxrpc_call_timer_expired,
- (unsigned long)call);
+
+ /* Prevent lockdep reporting a deadlock false positive between the afs
+ * filesystem and sys_sendmsg() via the mmap sem.
+ */
+ if (rx->sk.sk_kern_sock)
+ lockdep_set_class(&call->user_mutex,
+ &rxrpc_call_user_mutex_lock_class_key);
+
+ timer_setup(&call->timer, rxrpc_call_timer_expired, 0);
INIT_WORK(&call->processor, &rxrpc_process_call);
INIT_LIST_HEAD(&call->link);
INIT_LIST_HEAD(&call->chan_wait_link);
@@ -129,6 +140,8 @@ struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
atomic_set(&call->usage, 1);
call->debug_id = atomic_inc_return(&rxrpc_debug_id);
call->tx_total_len = -1;
+ call->next_rx_timo = 20 * HZ;
+ call->next_req_timo = 1 * HZ;
memset(&call->sock_node, 0xed, sizeof(call->sock_node));
@@ -151,7 +164,8 @@ nomem:
/*
* Allocate a new client call.
*/
-static struct rxrpc_call *rxrpc_alloc_client_call(struct sockaddr_rxrpc *srx,
+static struct rxrpc_call *rxrpc_alloc_client_call(struct rxrpc_sock *rx,
+ struct sockaddr_rxrpc *srx,
gfp_t gfp)
{
struct rxrpc_call *call;
@@ -159,7 +173,7 @@ static struct rxrpc_call *rxrpc_alloc_client_call(struct sockaddr_rxrpc *srx,
_enter("");
- call = rxrpc_alloc_call(gfp);
+ call = rxrpc_alloc_call(rx, gfp);
if (!call)
return ERR_PTR(-ENOMEM);
call->state = RXRPC_CALL_CLIENT_AWAIT_CONN;
@@ -178,15 +192,17 @@ static struct rxrpc_call *rxrpc_alloc_client_call(struct sockaddr_rxrpc *srx,
*/
static void rxrpc_start_call_timer(struct rxrpc_call *call)
{
- ktime_t now = ktime_get_real(), expire_at;
-
- expire_at = ktime_add_ms(now, rxrpc_max_call_lifetime);
- call->expire_at = expire_at;
- call->ack_at = expire_at;
- call->ping_at = expire_at;
- call->resend_at = expire_at;
- call->timer.expires = jiffies + LONG_MAX / 2;
- rxrpc_set_timer(call, rxrpc_timer_begin, now);
+ unsigned long now = jiffies;
+ unsigned long j = now + MAX_JIFFY_OFFSET;
+
+ call->ack_at = j;
+ call->ack_lost_at = j;
+ call->resend_at = j;
+ call->ping_at = j;
+ call->expect_rx_by = j;
+ call->expect_req_by = j;
+ call->expect_term_by = j;
+ call->timer.expires = now;
}
/*
@@ -197,8 +213,7 @@ static void rxrpc_start_call_timer(struct rxrpc_call *call)
struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
struct rxrpc_conn_parameters *cp,
struct sockaddr_rxrpc *srx,
- unsigned long user_call_ID,
- s64 tx_total_len,
+ struct rxrpc_call_params *p,
gfp_t gfp)
__releases(&rx->sk.sk_lock.slock)
{
@@ -208,18 +223,18 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
const void *here = __builtin_return_address(0);
int ret;
- _enter("%p,%lx", rx, user_call_ID);
+ _enter("%p,%lx", rx, p->user_call_ID);
- call = rxrpc_alloc_client_call(srx, gfp);
+ call = rxrpc_alloc_client_call(rx, srx, gfp);
if (IS_ERR(call)) {
release_sock(&rx->sk);
_leave(" = %ld", PTR_ERR(call));
return call;
}
- call->tx_total_len = tx_total_len;
+ call->tx_total_len = p->tx_total_len;
trace_rxrpc_call(call, rxrpc_call_new_client, atomic_read(&call->usage),
- here, (const void *)user_call_ID);
+ here, (const void *)p->user_call_ID);
/* We need to protect a partially set up call against the user as we
* will be acting outside the socket lock.
@@ -235,16 +250,16 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
parent = *pp;
xcall = rb_entry(parent, struct rxrpc_call, sock_node);
- if (user_call_ID < xcall->user_call_ID)
+ if (p->user_call_ID < xcall->user_call_ID)
pp = &(*pp)->rb_left;
- else if (user_call_ID > xcall->user_call_ID)
+ else if (p->user_call_ID > xcall->user_call_ID)
pp = &(*pp)->rb_right;
else
goto error_dup_user_ID;
}
rcu_assign_pointer(call->socket, rx);
- call->user_call_ID = user_call_ID;
+ call->user_call_ID = p->user_call_ID;
__set_bit(RXRPC_CALL_HAS_USERID, &call->flags);
rxrpc_get_call(call, rxrpc_call_got_userid);
rb_link_node(&call->sock_node, parent, pp);
diff --git a/net/rxrpc/conn_client.c b/net/rxrpc/conn_client.c
index 5f9624bd311c..7f74ca3059f8 100644
--- a/net/rxrpc/conn_client.c
+++ b/net/rxrpc/conn_client.c
@@ -85,8 +85,8 @@
__read_mostly unsigned int rxrpc_max_client_connections = 1000;
__read_mostly unsigned int rxrpc_reap_client_connections = 900;
-__read_mostly unsigned int rxrpc_conn_idle_client_expiry = 2 * 60 * HZ;
-__read_mostly unsigned int rxrpc_conn_idle_client_fast_expiry = 2 * HZ;
+__read_mostly unsigned long rxrpc_conn_idle_client_expiry = 2 * 60 * HZ;
+__read_mostly unsigned long rxrpc_conn_idle_client_fast_expiry = 2 * HZ;
/*
* We use machine-unique IDs for our client connections.
@@ -554,6 +554,11 @@ static void rxrpc_activate_one_channel(struct rxrpc_connection *conn,
trace_rxrpc_client(conn, channel, rxrpc_client_chan_activate);
+ /* Cancel the final ACK on the previous call if it hasn't been sent yet
+ * as the DATA packet will implicitly ACK it.
+ */
+ clear_bit(RXRPC_CONN_FINAL_ACK_0 + channel, &conn->flags);
+
write_lock_bh(&call->state_lock);
if (!test_bit(RXRPC_CALL_TX_LASTQ, &call->flags))
call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
@@ -686,7 +691,7 @@ int rxrpc_connect_call(struct rxrpc_call *call,
_enter("{%d,%lx},", call->debug_id, call->user_call_ID);
- rxrpc_discard_expired_client_conns(&rxnet->client_conn_reaper.work);
+ rxrpc_discard_expired_client_conns(&rxnet->client_conn_reaper);
rxrpc_cull_active_client_conns(rxnet);
ret = rxrpc_get_client_conn(call, cp, srx, gfp);
@@ -752,6 +757,18 @@ void rxrpc_expose_client_call(struct rxrpc_call *call)
}
/*
+ * Set the reap timer.
+ */
+static void rxrpc_set_client_reap_timer(struct rxrpc_net *rxnet)
+{
+ unsigned long now = jiffies;
+ unsigned long reap_at = now + rxrpc_conn_idle_client_expiry;
+
+ if (rxnet->live)
+ timer_reduce(&rxnet->client_conn_reap_timer, reap_at);
+}
+
+/*
* Disconnect a client call.
*/
void rxrpc_disconnect_client_call(struct rxrpc_call *call)
@@ -813,6 +830,19 @@ void rxrpc_disconnect_client_call(struct rxrpc_call *call)
goto out_2;
}
+ /* Schedule the final ACK to be transmitted in a short while so that it
+ * can be skipped if we find a follow-on call. The first DATA packet
+ * of the follow on call will implicitly ACK this call.
+ */
+ if (test_bit(RXRPC_CALL_EXPOSED, &call->flags)) {
+ unsigned long final_ack_at = jiffies + 2;
+
+ WRITE_ONCE(chan->final_ack_at, final_ack_at);
+ smp_wmb(); /* vs rxrpc_process_delayed_final_acks() */
+ set_bit(RXRPC_CONN_FINAL_ACK_0 + channel, &conn->flags);
+ rxrpc_reduce_conn_timer(conn, final_ack_at);
+ }
+
/* Things are more complex and we need the cache lock. We might be
* able to simply idle the conn or it might now be lurking on the wait
* list. It might even get moved back to the active list whilst we're
@@ -878,9 +908,7 @@ idle_connection:
list_move_tail(&conn->cache_link, &rxnet->idle_client_conns);
if (rxnet->idle_client_conns.next == &conn->cache_link &&
!rxnet->kill_all_client_conns)
- queue_delayed_work(rxrpc_workqueue,
- &rxnet->client_conn_reaper,
- rxrpc_conn_idle_client_expiry);
+ rxrpc_set_client_reap_timer(rxnet);
} else {
trace_rxrpc_client(conn, channel, rxrpc_client_to_inactive);
conn->cache_state = RXRPC_CONN_CLIENT_INACTIVE;
@@ -1018,8 +1046,7 @@ void rxrpc_discard_expired_client_conns(struct work_struct *work)
{
struct rxrpc_connection *conn;
struct rxrpc_net *rxnet =
- container_of(to_delayed_work(work),
- struct rxrpc_net, client_conn_reaper);
+ container_of(work, struct rxrpc_net, client_conn_reaper);
unsigned long expiry, conn_expires_at, now;
unsigned int nr_conns;
bool did_discard = false;
@@ -1061,6 +1088,8 @@ next:
expiry = rxrpc_conn_idle_client_expiry;
if (nr_conns > rxrpc_reap_client_connections)
expiry = rxrpc_conn_idle_client_fast_expiry;
+ if (conn->params.local->service_closed)
+ expiry = rxrpc_closed_conn_expiry * HZ;
conn_expires_at = conn->idle_timestamp + expiry;
@@ -1096,9 +1125,8 @@ not_yet_expired:
*/
_debug("not yet");
if (!rxnet->kill_all_client_conns)
- queue_delayed_work(rxrpc_workqueue,
- &rxnet->client_conn_reaper,
- conn_expires_at - now);
+ timer_reduce(&rxnet->client_conn_reap_timer,
+ conn_expires_at);
out:
spin_unlock(&rxnet->client_conn_cache_lock);
@@ -1118,9 +1146,9 @@ void rxrpc_destroy_all_client_connections(struct rxrpc_net *rxnet)
rxnet->kill_all_client_conns = true;
spin_unlock(&rxnet->client_conn_cache_lock);
- cancel_delayed_work(&rxnet->client_conn_reaper);
+ del_timer_sync(&rxnet->client_conn_reap_timer);
- if (!queue_delayed_work(rxrpc_workqueue, &rxnet->client_conn_reaper, 0))
+ if (!rxrpc_queue_work(&rxnet->client_conn_reaper))
_debug("destroy: queue failed");
_leave("");
diff --git a/net/rxrpc/conn_event.c b/net/rxrpc/conn_event.c
index 59a51a56e7c8..9e9a8db1bc9c 100644
--- a/net/rxrpc/conn_event.c
+++ b/net/rxrpc/conn_event.c
@@ -24,9 +24,10 @@
* Retransmit terminal ACK or ABORT of the previous call.
*/
static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
- struct sk_buff *skb)
+ struct sk_buff *skb,
+ unsigned int channel)
{
- struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
+ struct rxrpc_skb_priv *sp = skb ? rxrpc_skb(skb) : NULL;
struct rxrpc_channel *chan;
struct msghdr msg;
struct kvec iov;
@@ -48,7 +49,7 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
_enter("%d", conn->debug_id);
- chan = &conn->channels[sp->hdr.cid & RXRPC_CHANNELMASK];
+ chan = &conn->channels[channel];
/* If the last call got moved on whilst we were waiting to run, just
* ignore this packet.
@@ -56,7 +57,7 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
call_id = READ_ONCE(chan->last_call);
/* Sync with __rxrpc_disconnect_call() */
smp_rmb();
- if (call_id != sp->hdr.callNumber)
+ if (skb && call_id != sp->hdr.callNumber)
return;
msg.msg_name = &conn->params.peer->srx.transport;
@@ -65,9 +66,9 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
msg.msg_controllen = 0;
msg.msg_flags = 0;
- pkt.whdr.epoch = htonl(sp->hdr.epoch);
- pkt.whdr.cid = htonl(sp->hdr.cid);
- pkt.whdr.callNumber = htonl(sp->hdr.callNumber);
+ pkt.whdr.epoch = htonl(conn->proto.epoch);
+ pkt.whdr.cid = htonl(conn->proto.cid);
+ pkt.whdr.callNumber = htonl(call_id);
pkt.whdr.seq = 0;
pkt.whdr.type = chan->last_type;
pkt.whdr.flags = conn->out_clientflag;
@@ -87,11 +88,11 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
mtu = conn->params.peer->if_mtu;
mtu -= conn->params.peer->hdrsize;
pkt.ack.bufferSpace = 0;
- pkt.ack.maxSkew = htons(skb->priority);
- pkt.ack.firstPacket = htonl(chan->last_seq);
- pkt.ack.previousPacket = htonl(chan->last_seq - 1);
- pkt.ack.serial = htonl(sp->hdr.serial);
- pkt.ack.reason = RXRPC_ACK_DUPLICATE;
+ pkt.ack.maxSkew = htons(skb ? skb->priority : 0);
+ pkt.ack.firstPacket = htonl(chan->last_seq + 1);
+ pkt.ack.previousPacket = htonl(chan->last_seq);
+ pkt.ack.serial = htonl(skb ? sp->hdr.serial : 0);
+ pkt.ack.reason = skb ? RXRPC_ACK_DUPLICATE : RXRPC_ACK_IDLE;
pkt.ack.nAcks = 0;
pkt.info.rxMTU = htonl(rxrpc_rx_mtu);
pkt.info.maxMTU = htonl(mtu);
@@ -272,7 +273,8 @@ static int rxrpc_process_event(struct rxrpc_connection *conn,
switch (sp->hdr.type) {
case RXRPC_PACKET_TYPE_DATA:
case RXRPC_PACKET_TYPE_ACK:
- rxrpc_conn_retransmit_call(conn, skb);
+ rxrpc_conn_retransmit_call(conn, skb,
+ sp->hdr.cid & RXRPC_CHANNELMASK);
return 0;
case RXRPC_PACKET_TYPE_BUSY:
@@ -379,6 +381,48 @@ abort:
}
/*
+ * Process delayed final ACKs that we haven't subsumed into a subsequent call.
+ */
+static void rxrpc_process_delayed_final_acks(struct rxrpc_connection *conn)
+{
+ unsigned long j = jiffies, next_j;
+ unsigned int channel;
+ bool set;
+
+again:
+ next_j = j + LONG_MAX;
+ set = false;
+ for (channel = 0; channel < RXRPC_MAXCALLS; channel++) {
+ struct rxrpc_channel *chan = &conn->channels[channel];
+ unsigned long ack_at;
+
+ if (!test_bit(RXRPC_CONN_FINAL_ACK_0 + channel, &conn->flags))
+ continue;
+
+ smp_rmb(); /* vs rxrpc_disconnect_client_call */
+ ack_at = READ_ONCE(chan->final_ack_at);
+
+ if (time_before(j, ack_at)) {
+ if (time_before(ack_at, next_j)) {
+ next_j = ack_at;
+ set = true;
+ }
+ continue;
+ }
+
+ if (test_and_clear_bit(RXRPC_CONN_FINAL_ACK_0 + channel,
+ &conn->flags))
+ rxrpc_conn_retransmit_call(conn, NULL, channel);
+ }
+
+ j = jiffies;
+ if (time_before_eq(next_j, j))
+ goto again;
+ if (set)
+ rxrpc_reduce_conn_timer(conn, next_j);
+}
+
+/*
* connection-level event processor
*/
void rxrpc_process_connection(struct work_struct *work)
@@ -394,6 +438,10 @@ void rxrpc_process_connection(struct work_struct *work)
if (test_and_clear_bit(RXRPC_CONN_EV_CHALLENGE, &conn->events))
rxrpc_secure_connection(conn);
+ /* Process delayed ACKs whose time has come. */
+ if (conn->flags & RXRPC_CONN_FINAL_ACK_MASK)
+ rxrpc_process_delayed_final_acks(conn);
+
/* go through the conn-level event packets, releasing the ref on this
* connection that each one has when we've finished with it */
while ((skb = skb_dequeue(&conn->rx_queue))) {
diff --git a/net/rxrpc/conn_object.c b/net/rxrpc/conn_object.c
index fe575798592f..1aad04a32d5e 100644
--- a/net/rxrpc/conn_object.c
+++ b/net/rxrpc/conn_object.c
@@ -20,10 +20,19 @@
/*
* Time till a connection expires after last use (in seconds).
*/
-unsigned int rxrpc_connection_expiry = 10 * 60;
+unsigned int __read_mostly rxrpc_connection_expiry = 10 * 60;
+unsigned int __read_mostly rxrpc_closed_conn_expiry = 10;
static void rxrpc_destroy_connection(struct rcu_head *);
+static void rxrpc_connection_timer(struct timer_list *timer)
+{
+ struct rxrpc_connection *conn =
+ container_of(timer, struct rxrpc_connection, timer);
+
+ rxrpc_queue_conn(conn);
+}
+
/*
* allocate a new connection
*/
@@ -38,6 +47,7 @@ struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp)
INIT_LIST_HEAD(&conn->cache_link);
spin_lock_init(&conn->channel_lock);
INIT_LIST_HEAD(&conn->waiting_calls);
+ timer_setup(&conn->timer, &rxrpc_connection_timer, 0);
INIT_WORK(&conn->processor, &rxrpc_process_connection);
INIT_LIST_HEAD(&conn->proc_link);
INIT_LIST_HEAD(&conn->link);
@@ -301,21 +311,29 @@ rxrpc_get_connection_maybe(struct rxrpc_connection *conn)
}
/*
+ * Set the service connection reap timer.
+ */
+static void rxrpc_set_service_reap_timer(struct rxrpc_net *rxnet,
+ unsigned long reap_at)
+{
+ if (rxnet->live)
+ timer_reduce(&rxnet->service_conn_reap_timer, reap_at);
+}
+
+/*
* Release a service connection
*/
void rxrpc_put_service_conn(struct rxrpc_connection *conn)
{
- struct rxrpc_net *rxnet;
const void *here = __builtin_return_address(0);
int n;
n = atomic_dec_return(&conn->usage);
trace_rxrpc_conn(conn, rxrpc_conn_put_service, n, here);
ASSERTCMP(n, >=, 0);
- if (n == 0) {
- rxnet = conn->params.local->rxnet;
- rxrpc_queue_delayed_work(&rxnet->service_conn_reaper, 0);
- }
+ if (n == 1)
+ rxrpc_set_service_reap_timer(conn->params.local->rxnet,
+ jiffies + rxrpc_connection_expiry);
}
/*
@@ -332,6 +350,7 @@ static void rxrpc_destroy_connection(struct rcu_head *rcu)
_net("DESTROY CONN %d", conn->debug_id);
+ del_timer_sync(&conn->timer);
rxrpc_purge_queue(&conn->rx_queue);
conn->security->clear(conn);
@@ -351,17 +370,15 @@ void rxrpc_service_connection_reaper(struct work_struct *work)
{
struct rxrpc_connection *conn, *_p;
struct rxrpc_net *rxnet =
- container_of(to_delayed_work(work),
- struct rxrpc_net, service_conn_reaper);
- unsigned long reap_older_than, earliest, idle_timestamp, now;
+ container_of(work, struct rxrpc_net, service_conn_reaper);
+ unsigned long expire_at, earliest, idle_timestamp, now;
LIST_HEAD(graveyard);
_enter("");
now = jiffies;
- reap_older_than = now - rxrpc_connection_expiry * HZ;
- earliest = ULONG_MAX;
+ earliest = now + MAX_JIFFY_OFFSET;
write_lock(&rxnet->conn_lock);
list_for_each_entry_safe(conn, _p, &rxnet->service_conns, link) {
@@ -371,15 +388,21 @@ void rxrpc_service_connection_reaper(struct work_struct *work)
if (conn->state == RXRPC_CONN_SERVICE_PREALLOC)
continue;
- idle_timestamp = READ_ONCE(conn->idle_timestamp);
- _debug("reap CONN %d { u=%d,t=%ld }",
- conn->debug_id, atomic_read(&conn->usage),
- (long)reap_older_than - (long)idle_timestamp);
-
- if (time_after(idle_timestamp, reap_older_than)) {
- if (time_before(idle_timestamp, earliest))
- earliest = idle_timestamp;
- continue;
+ if (rxnet->live) {
+ idle_timestamp = READ_ONCE(conn->idle_timestamp);
+ expire_at = idle_timestamp + rxrpc_connection_expiry * HZ;
+ if (conn->params.local->service_closed)
+ expire_at = idle_timestamp + rxrpc_closed_conn_expiry * HZ;
+
+ _debug("reap CONN %d { u=%d,t=%ld }",
+ conn->debug_id, atomic_read(&conn->usage),
+ (long)expire_at - (long)now);
+
+ if (time_before(now, expire_at)) {
+ if (time_before(expire_at, earliest))
+ earliest = expire_at;
+ continue;
+ }
}
/* The usage count sits at 1 whilst the object is unused on the
@@ -387,6 +410,7 @@ void rxrpc_service_connection_reaper(struct work_struct *work)
*/
if (atomic_cmpxchg(&conn->usage, 1, 0) != 1)
continue;
+ trace_rxrpc_conn(conn, rxrpc_conn_reap_service, 0, 0);
if (rxrpc_conn_is_client(conn))
BUG();
@@ -397,11 +421,10 @@ void rxrpc_service_connection_reaper(struct work_struct *work)
}
write_unlock(&rxnet->conn_lock);
- if (earliest != ULONG_MAX) {
- _debug("reschedule reaper %ld", (long) earliest - now);
+ if (earliest != now + MAX_JIFFY_OFFSET) {
+ _debug("reschedule reaper %ld", (long)earliest - (long)now);
ASSERT(time_after(earliest, now));
- rxrpc_queue_delayed_work(&rxnet->client_conn_reaper,
- earliest - now);
+ rxrpc_set_service_reap_timer(rxnet, earliest);
}
while (!list_empty(&graveyard)) {
@@ -429,9 +452,8 @@ void rxrpc_destroy_all_connections(struct rxrpc_net *rxnet)
rxrpc_destroy_all_client_connections(rxnet);
- rxrpc_connection_expiry = 0;
- cancel_delayed_work(&rxnet->client_conn_reaper);
- rxrpc_queue_delayed_work(&rxnet->client_conn_reaper, 0);
+ del_timer_sync(&rxnet->service_conn_reap_timer);
+ rxrpc_queue_work(&rxnet->service_conn_reaper);
flush_workqueue(rxrpc_workqueue);
write_lock(&rxnet->conn_lock);
diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c
index 1b592073ec96..23a5e61d8f79 100644
--- a/net/rxrpc/input.c
+++ b/net/rxrpc/input.c
@@ -318,16 +318,18 @@ bad_state:
static bool rxrpc_receiving_reply(struct rxrpc_call *call)
{
struct rxrpc_ack_summary summary = { 0 };
+ unsigned long now, timo;
rxrpc_seq_t top = READ_ONCE(call->tx_top);
if (call->ackr_reason) {
spin_lock_bh(&call->lock);
call->ackr_reason = 0;
- call->resend_at = call->expire_at;
- call->ack_at = call->expire_at;
spin_unlock_bh(&call->lock);
- rxrpc_set_timer(call, rxrpc_timer_init_for_reply,
- ktime_get_real());
+ now = jiffies;
+ timo = now + MAX_JIFFY_OFFSET;
+ WRITE_ONCE(call->resend_at, timo);
+ WRITE_ONCE(call->ack_at, timo);
+ trace_rxrpc_timer(call, rxrpc_timer_init_for_reply, now);
}
if (!test_bit(RXRPC_CALL_TX_LAST, &call->flags))
@@ -437,6 +439,19 @@ static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb,
if (state >= RXRPC_CALL_COMPLETE)
return;
+ if (call->state == RXRPC_CALL_SERVER_RECV_REQUEST) {
+ unsigned long timo = READ_ONCE(call->next_req_timo);
+ unsigned long now, expect_req_by;
+
+ if (timo) {
+ now = jiffies;
+ expect_req_by = now + timo;
+ WRITE_ONCE(call->expect_req_by, expect_req_by);
+ rxrpc_reduce_call_timer(call, expect_req_by, now,
+ rxrpc_timer_set_for_idle);
+ }
+ }
+
/* Received data implicitly ACKs all of the request packets we sent
* when we're acting as a client.
*/
@@ -616,6 +631,43 @@ found:
}
/*
+ * Process the response to a ping that we sent to find out if we lost an ACK.
+ *
+ * If we got back a ping response that indicates a lower tx_top than what we
+ * had at the time of the ping transmission, we adjudge all the DATA packets
+ * sent between the response tx_top and the ping-time tx_top to have been lost.
+ */
+static void rxrpc_input_check_for_lost_ack(struct rxrpc_call *call)
+{
+ rxrpc_seq_t top, bottom, seq;
+ bool resend = false;
+
+ spin_lock_bh(&call->lock);
+
+ bottom = call->tx_hard_ack + 1;
+ top = call->acks_lost_top;
+ if (before(bottom, top)) {
+ for (seq = bottom; before_eq(seq, top); seq++) {
+ int ix = seq & RXRPC_RXTX_BUFF_MASK;
+ u8 annotation = call->rxtx_annotations[ix];
+ u8 anno_type = annotation & RXRPC_TX_ANNO_MASK;
+
+ if (anno_type != RXRPC_TX_ANNO_UNACK)
+ continue;
+ annotation &= ~RXRPC_TX_ANNO_MASK;
+ annotation |= RXRPC_TX_ANNO_RETRANS;
+ call->rxtx_annotations[ix] = annotation;
+ resend = true;
+ }
+ }
+
+ spin_unlock_bh(&call->lock);
+
+ if (resend && !test_and_set_bit(RXRPC_CALL_EV_RESEND, &call->events))
+ rxrpc_queue_call(call);
+}
+
+/*
* Process a ping response.
*/
static void rxrpc_input_ping_response(struct rxrpc_call *call,
@@ -630,6 +682,9 @@ static void rxrpc_input_ping_response(struct rxrpc_call *call,
smp_rmb();
ping_serial = call->ping_serial;
+ if (orig_serial == call->acks_lost_ping)
+ rxrpc_input_check_for_lost_ack(call);
+
if (!test_bit(RXRPC_CALL_PINGING, &call->flags) ||
before(orig_serial, ping_serial))
return;
@@ -908,9 +963,20 @@ static void rxrpc_input_call_packet(struct rxrpc_call *call,
struct sk_buff *skb, u16 skew)
{
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
+ unsigned long timo;
_enter("%p,%p", call, skb);
+ timo = READ_ONCE(call->next_rx_timo);
+ if (timo) {
+ unsigned long now = jiffies, expect_rx_by;
+
+ expect_rx_by = jiffies + timo;
+ WRITE_ONCE(call->expect_rx_by, expect_rx_by);
+ rxrpc_reduce_call_timer(call, expect_rx_by, now,
+ rxrpc_timer_set_for_normal);
+ }
+
switch (sp->hdr.type) {
case RXRPC_PACKET_TYPE_DATA:
rxrpc_input_data(call, skb, skew);
diff --git a/net/rxrpc/misc.c b/net/rxrpc/misc.c
index 1a2d4b112064..c1d9e7fd7448 100644
--- a/net/rxrpc/misc.c
+++ b/net/rxrpc/misc.c
@@ -21,33 +21,28 @@
unsigned int rxrpc_max_backlog __read_mostly = 10;
/*
- * Maximum lifetime of a call (in mx).
- */
-unsigned int rxrpc_max_call_lifetime = 60 * 1000;
-
-/*
* How long to wait before scheduling ACK generation after seeing a
- * packet with RXRPC_REQUEST_ACK set (in ms).
+ * packet with RXRPC_REQUEST_ACK set (in jiffies).
*/
-unsigned int rxrpc_requested_ack_delay = 1;
+unsigned long rxrpc_requested_ack_delay = 1;
/*
- * How long to wait before scheduling an ACK with subtype DELAY (in ms).
+ * How long to wait before scheduling an ACK with subtype DELAY (in jiffies).
*
* We use this when we've received new data packets. If those packets aren't
* all consumed within this time we will send a DELAY ACK if an ACK was not
* requested to let the sender know it doesn't need to resend.
*/
-unsigned int rxrpc_soft_ack_delay = 1 * 1000;
+unsigned long rxrpc_soft_ack_delay = HZ;
/*
- * How long to wait before scheduling an ACK with subtype IDLE (in ms).
+ * How long to wait before scheduling an ACK with subtype IDLE (in jiffies).
*
* We use this when we've consumed some previously soft-ACK'd packets when
* further packets aren't immediately received to decide when to send an IDLE
* ACK let the other end know that it can free up its Tx buffer space.
*/
-unsigned int rxrpc_idle_ack_delay = 0.5 * 1000;
+unsigned long rxrpc_idle_ack_delay = HZ / 2;
/*
* Receive window size in packets. This indicates the maximum number of
@@ -75,7 +70,7 @@ unsigned int rxrpc_rx_jumbo_max = 4;
/*
* Time till packet resend (in milliseconds).
*/
-unsigned int rxrpc_resend_timeout = 4 * 1000;
+unsigned long rxrpc_resend_timeout = 4 * HZ;
const s8 rxrpc_ack_priority[] = {
[0] = 0,
diff --git a/net/rxrpc/net_ns.c b/net/rxrpc/net_ns.c
index 7edceb8522f5..f18c9248e0d4 100644
--- a/net/rxrpc/net_ns.c
+++ b/net/rxrpc/net_ns.c
@@ -14,6 +14,24 @@
unsigned int rxrpc_net_id;
+static void rxrpc_client_conn_reap_timeout(struct timer_list *timer)
+{
+ struct rxrpc_net *rxnet =
+ container_of(timer, struct rxrpc_net, client_conn_reap_timer);
+
+ if (rxnet->live)
+ rxrpc_queue_work(&rxnet->client_conn_reaper);
+}
+
+static void rxrpc_service_conn_reap_timeout(struct timer_list *timer)
+{
+ struct rxrpc_net *rxnet =
+ container_of(timer, struct rxrpc_net, service_conn_reap_timer);
+
+ if (rxnet->live)
+ rxrpc_queue_work(&rxnet->service_conn_reaper);
+}
+
/*
* Initialise a per-network namespace record.
*/
@@ -22,6 +40,7 @@ static __net_init int rxrpc_init_net(struct net *net)
struct rxrpc_net *rxnet = rxrpc_net(net);
int ret;
+ rxnet->live = true;
get_random_bytes(&rxnet->epoch, sizeof(rxnet->epoch));
rxnet->epoch |= RXRPC_RANDOM_EPOCH;
@@ -31,8 +50,10 @@ static __net_init int rxrpc_init_net(struct net *net)
INIT_LIST_HEAD(&rxnet->conn_proc_list);
INIT_LIST_HEAD(&rxnet->service_conns);
rwlock_init(&rxnet->conn_lock);
- INIT_DELAYED_WORK(&rxnet->service_conn_reaper,
- rxrpc_service_connection_reaper);
+ INIT_WORK(&rxnet->service_conn_reaper,
+ rxrpc_service_connection_reaper);
+ timer_setup(&rxnet->service_conn_reap_timer,
+ rxrpc_service_conn_reap_timeout, 0);
rxnet->nr_client_conns = 0;
rxnet->nr_active_client_conns = 0;
@@ -42,8 +63,10 @@ static __net_init int rxrpc_init_net(struct net *net)
INIT_LIST_HEAD(&rxnet->waiting_client_conns);
INIT_LIST_HEAD(&rxnet->active_client_conns);
INIT_LIST_HEAD(&rxnet->idle_client_conns);
- INIT_DELAYED_WORK(&rxnet->client_conn_reaper,
- rxrpc_discard_expired_client_conns);
+ INIT_WORK(&rxnet->client_conn_reaper,
+ rxrpc_discard_expired_client_conns);
+ timer_setup(&rxnet->client_conn_reap_timer,
+ rxrpc_client_conn_reap_timeout, 0);
INIT_LIST_HEAD(&rxnet->local_endpoints);
mutex_init(&rxnet->local_mutex);
@@ -60,6 +83,7 @@ static __net_init int rxrpc_init_net(struct net *net)
return 0;
err_proc:
+ rxnet->live = false;
return ret;
}
@@ -70,6 +94,7 @@ static __net_exit void rxrpc_exit_net(struct net *net)
{
struct rxrpc_net *rxnet = rxrpc_net(net);
+ rxnet->live = false;
rxrpc_destroy_all_calls(rxnet);
rxrpc_destroy_all_connections(rxnet);
rxrpc_destroy_all_locals(rxnet);
diff --git a/net/rxrpc/output.c b/net/rxrpc/output.c
index f47659c7b224..42410e910aff 100644
--- a/net/rxrpc/output.c
+++ b/net/rxrpc/output.c
@@ -33,6 +33,24 @@ struct rxrpc_abort_buffer {
};
/*
+ * Arrange for a keepalive ping a certain time after we last transmitted. This
+ * lets the far side know we're still interested in this call and helps keep
+ * the route through any intervening firewall open.
+ *
+ * Receiving a response to the ping will prevent the ->expect_rx_by timer from
+ * expiring.
+ */
+static void rxrpc_set_keepalive(struct rxrpc_call *call)
+{
+ unsigned long now = jiffies, keepalive_at = call->next_rx_timo / 6;
+
+ keepalive_at += now;
+ WRITE_ONCE(call->keepalive_at, keepalive_at);
+ rxrpc_reduce_call_timer(call, keepalive_at, now,
+ rxrpc_timer_set_for_keepalive);
+}
+
+/*
* Fill out an ACK packet.
*/
static size_t rxrpc_fill_out_ack(struct rxrpc_connection *conn,
@@ -95,7 +113,8 @@ static size_t rxrpc_fill_out_ack(struct rxrpc_connection *conn,
/*
* Send an ACK call packet.
*/
-int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping)
+int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
+ rxrpc_serial_t *_serial)
{
struct rxrpc_connection *conn = NULL;
struct rxrpc_ack_buffer *pkt;
@@ -165,6 +184,8 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping)
ntohl(pkt->ack.firstPacket),
ntohl(pkt->ack.serial),
pkt->ack.reason, pkt->ack.nAcks);
+ if (_serial)
+ *_serial = serial;
if (ping) {
call->ping_serial = serial;
@@ -202,6 +223,8 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping)
call->ackr_seen = top;
spin_unlock_bh(&call->lock);
}
+
+ rxrpc_set_keepalive(call);
}
out:
@@ -323,7 +346,8 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
* ACKs if a DATA packet appears to have been lost.
*/
if (!(sp->hdr.flags & RXRPC_LAST_PACKET) &&
- (retrans ||
+ (test_and_clear_bit(RXRPC_CALL_EV_ACK_LOST, &call->events) ||
+ retrans ||
call->cong_mode == RXRPC_CALL_SLOW_START ||
(call->peer->rtt_usage < 3 && sp->hdr.seq & 1) ||
ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000),
@@ -370,8 +394,23 @@ done:
if (whdr.flags & RXRPC_REQUEST_ACK) {
call->peer->rtt_last_req = now;
trace_rxrpc_rtt_tx(call, rxrpc_rtt_tx_data, serial);
+ if (call->peer->rtt_usage > 1) {
+ unsigned long nowj = jiffies, ack_lost_at;
+
+ ack_lost_at = nsecs_to_jiffies(2 * call->peer->rtt);
+ if (ack_lost_at < 1)
+ ack_lost_at = 1;
+
+ ack_lost_at += nowj;
+ WRITE_ONCE(call->ack_lost_at, ack_lost_at);
+ rxrpc_reduce_call_timer(call, ack_lost_at, nowj,
+ rxrpc_timer_set_for_lost_ack);
+ }
}
}
+
+ rxrpc_set_keepalive(call);
+
_leave(" = %d [%u]", ret, call->peer->maxdata);
return ret;
diff --git a/net/rxrpc/recvmsg.c b/net/rxrpc/recvmsg.c
index 8510a98b87e1..cc21e8db25b0 100644
--- a/net/rxrpc/recvmsg.c
+++ b/net/rxrpc/recvmsg.c
@@ -144,11 +144,13 @@ static void rxrpc_end_rx_phase(struct rxrpc_call *call, rxrpc_serial_t serial)
trace_rxrpc_receive(call, rxrpc_receive_end, 0, call->rx_top);
ASSERTCMP(call->rx_hard_ack, ==, call->rx_top);
+#if 0 // TODO: May want to transmit final ACK under some circumstances anyway
if (call->state == RXRPC_CALL_CLIENT_RECV_REPLY) {
rxrpc_propose_ACK(call, RXRPC_ACK_IDLE, 0, serial, true, false,
rxrpc_propose_ack_terminal_ack);
- rxrpc_send_ack_packet(call, false);
+ rxrpc_send_ack_packet(call, false, NULL);
}
+#endif
write_lock_bh(&call->state_lock);
@@ -161,7 +163,7 @@ static void rxrpc_end_rx_phase(struct rxrpc_call *call, rxrpc_serial_t serial)
case RXRPC_CALL_SERVER_RECV_REQUEST:
call->tx_phase = true;
call->state = RXRPC_CALL_SERVER_ACK_REQUEST;
- call->ack_at = call->expire_at;
+ call->expect_req_by = jiffies + MAX_JIFFY_OFFSET;
write_unlock_bh(&call->state_lock);
rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, 0, serial, false, true,
rxrpc_propose_ack_processing_op);
@@ -217,10 +219,10 @@ static void rxrpc_rotate_rx_window(struct rxrpc_call *call)
after_eq(top, call->ackr_seen + 2) ||
(hard_ack == top && after(hard_ack, call->ackr_consumed)))
rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, 0, serial,
- true, false,
+ true, true,
rxrpc_propose_ack_rotate_rx);
- if (call->ackr_reason)
- rxrpc_send_ack_packet(call, false);
+ if (call->ackr_reason && call->ackr_reason != RXRPC_ACK_DELAY)
+ rxrpc_send_ack_packet(call, false, NULL);
}
}
diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c
index 7d2595582c09..a1c53ac066a1 100644
--- a/net/rxrpc/sendmsg.c
+++ b/net/rxrpc/sendmsg.c
@@ -21,22 +21,6 @@
#include <net/af_rxrpc.h>
#include "ar-internal.h"
-enum rxrpc_command {
- RXRPC_CMD_SEND_DATA, /* send data message */
- RXRPC_CMD_SEND_ABORT, /* request abort generation */
- RXRPC_CMD_ACCEPT, /* [server] accept incoming call */
- RXRPC_CMD_REJECT_BUSY, /* [server] reject a call as busy */
-};
-
-struct rxrpc_send_params {
- s64 tx_total_len; /* Total Tx data length (if send data) */
- unsigned long user_call_ID; /* User's call ID */
- u32 abort_code; /* Abort code to Tx (if abort) */
- enum rxrpc_command command : 8; /* The command to implement */
- bool exclusive; /* Shared or exclusive call */
- bool upgrade; /* If the connection is upgradeable */
-};
-
/*
* Wait for space to appear in the Tx queue or a signal to occur.
*/
@@ -174,6 +158,7 @@ static void rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call,
rxrpc_notify_end_tx_t notify_end_tx)
{
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
+ unsigned long now;
rxrpc_seq_t seq = sp->hdr.seq;
int ret, ix;
u8 annotation = RXRPC_TX_ANNO_UNACK;
@@ -213,11 +198,11 @@ static void rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call,
break;
case RXRPC_CALL_SERVER_ACK_REQUEST:
call->state = RXRPC_CALL_SERVER_SEND_REPLY;
- call->ack_at = call->expire_at;
+ now = jiffies;
+ WRITE_ONCE(call->ack_at, now + MAX_JIFFY_OFFSET);
if (call->ackr_reason == RXRPC_ACK_DELAY)
call->ackr_reason = 0;
- __rxrpc_set_timer(call, rxrpc_timer_init_for_send_reply,
- ktime_get_real());
+ trace_rxrpc_timer(call, rxrpc_timer_init_for_send_reply, now);
if (!last)
break;
/* Fall through */
@@ -239,14 +224,19 @@ static void rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call,
_debug("need instant resend %d", ret);
rxrpc_instant_resend(call, ix);
} else {
- ktime_t now = ktime_get_real(), resend_at;
-
- resend_at = ktime_add_ms(now, rxrpc_resend_timeout);
-
- if (ktime_before(resend_at, call->resend_at)) {
- call->resend_at = resend_at;
- rxrpc_set_timer(call, rxrpc_timer_set_for_send, now);
- }
+ unsigned long now = jiffies, resend_at;
+
+ if (call->peer->rtt_usage > 1)
+ resend_at = nsecs_to_jiffies(call->peer->rtt * 3 / 2);
+ else
+ resend_at = rxrpc_resend_timeout;
+ if (resend_at < 1)
+ resend_at = 1;
+
+ resend_at = now + rxrpc_resend_timeout;
+ WRITE_ONCE(call->resend_at, resend_at);
+ rxrpc_reduce_call_timer(call, resend_at, now,
+ rxrpc_timer_set_for_send);
}
rxrpc_free_skb(skb, rxrpc_skb_tx_freed);
@@ -295,7 +285,7 @@ static int rxrpc_send_data(struct rxrpc_sock *rx,
do {
/* Check to see if there's a ping ACK to reply to. */
if (call->ackr_reason == RXRPC_ACK_PING_RESPONSE)
- rxrpc_send_ack_packet(call, false);
+ rxrpc_send_ack_packet(call, false, NULL);
if (!skb) {
size_t size, chunk, max, space;
@@ -480,11 +470,11 @@ static int rxrpc_sendmsg_cmsg(struct msghdr *msg, struct rxrpc_send_params *p)
if (msg->msg_flags & MSG_CMSG_COMPAT) {
if (len != sizeof(u32))
return -EINVAL;
- p->user_call_ID = *(u32 *)CMSG_DATA(cmsg);
+ p->call.user_call_ID = *(u32 *)CMSG_DATA(cmsg);
} else {
if (len != sizeof(unsigned long))
return -EINVAL;
- p->user_call_ID = *(unsigned long *)
+ p->call.user_call_ID = *(unsigned long *)
CMSG_DATA(cmsg);
}
got_user_ID = true;
@@ -522,11 +512,24 @@ static int rxrpc_sendmsg_cmsg(struct msghdr *msg, struct rxrpc_send_params *p)
break;
case RXRPC_TX_LENGTH:
- if (p->tx_total_len != -1 || len != sizeof(__s64))
+ if (p->call.tx_total_len != -1 || len != sizeof(__s64))
+ return -EINVAL;
+ p->call.tx_total_len = *(__s64 *)CMSG_DATA(cmsg);
+ if (p->call.tx_total_len < 0)
return -EINVAL;
- p->tx_total_len = *(__s64 *)CMSG_DATA(cmsg);
- if (p->tx_total_len < 0)
+ break;
+
+ case RXRPC_SET_CALL_TIMEOUT:
+ if (len & 3 || len < 4 || len > 12)
return -EINVAL;
+ memcpy(&p->call.timeouts, CMSG_DATA(cmsg), len);
+ p->call.nr_timeouts = len / 4;
+ if (p->call.timeouts.hard > INT_MAX / HZ)
+ return -ERANGE;
+ if (p->call.nr_timeouts >= 2 && p->call.timeouts.idle > 60 * 60 * 1000)
+ return -ERANGE;
+ if (p->call.nr_timeouts >= 3 && p->call.timeouts.normal > 60 * 60 * 1000)
+ return -ERANGE;
break;
default:
@@ -536,7 +539,7 @@ static int rxrpc_sendmsg_cmsg(struct msghdr *msg, struct rxrpc_send_params *p)
if (!got_user_ID)
return -EINVAL;
- if (p->tx_total_len != -1 && p->command != RXRPC_CMD_SEND_DATA)
+ if (p->call.tx_total_len != -1 && p->command != RXRPC_CMD_SEND_DATA)
return -EINVAL;
_leave(" = 0");
return 0;
@@ -576,8 +579,7 @@ rxrpc_new_client_call_for_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg,
cp.exclusive = rx->exclusive | p->exclusive;
cp.upgrade = p->upgrade;
cp.service_id = srx->srx_service;
- call = rxrpc_new_client_call(rx, &cp, srx, p->user_call_ID,
- p->tx_total_len, GFP_KERNEL);
+ call = rxrpc_new_client_call(rx, &cp, srx, &p->call, GFP_KERNEL);
/* The socket is now unlocked */
_leave(" = %p\n", call);
@@ -594,15 +596,17 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
{
enum rxrpc_call_state state;
struct rxrpc_call *call;
+ unsigned long now, j;
int ret;
struct rxrpc_send_params p = {
- .tx_total_len = -1,
- .user_call_ID = 0,
- .abort_code = 0,
- .command = RXRPC_CMD_SEND_DATA,
- .exclusive = false,
- .upgrade = true,
+ .call.tx_total_len = -1,
+ .call.user_call_ID = 0,
+ .call.nr_timeouts = 0,
+ .abort_code = 0,
+ .command = RXRPC_CMD_SEND_DATA,
+ .exclusive = false,
+ .upgrade = false,
};
_enter("");
@@ -615,15 +619,15 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
ret = -EINVAL;
if (rx->sk.sk_state != RXRPC_SERVER_LISTENING)
goto error_release_sock;
- call = rxrpc_accept_call(rx, p.user_call_ID, NULL);
+ call = rxrpc_accept_call(rx, p.call.user_call_ID, NULL);
/* The socket is now unlocked. */
if (IS_ERR(call))
return PTR_ERR(call);
- rxrpc_put_call(call, rxrpc_call_put);
- return 0;
+ ret = 0;
+ goto out_put_unlock;
}
- call = rxrpc_find_call_by_user_ID(rx, p.user_call_ID);
+ call = rxrpc_find_call_by_user_ID(rx, p.call.user_call_ID);
if (!call) {
ret = -EBADSLT;
if (p.command != RXRPC_CMD_SEND_DATA)
@@ -653,14 +657,39 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
goto error_put;
}
- if (p.tx_total_len != -1) {
+ if (p.call.tx_total_len != -1) {
ret = -EINVAL;
if (call->tx_total_len != -1 ||
call->tx_pending ||
call->tx_top != 0)
goto error_put;
- call->tx_total_len = p.tx_total_len;
+ call->tx_total_len = p.call.tx_total_len;
+ }
+ }
+
+ switch (p.call.nr_timeouts) {
+ case 3:
+ j = msecs_to_jiffies(p.call.timeouts.normal);
+ if (p.call.timeouts.normal > 0 && j == 0)
+ j = 1;
+ WRITE_ONCE(call->next_rx_timo, j);
+ /* Fall through */
+ case 2:
+ j = msecs_to_jiffies(p.call.timeouts.idle);
+ if (p.call.timeouts.idle > 0 && j == 0)
+ j = 1;
+ WRITE_ONCE(call->next_req_timo, j);
+ /* Fall through */
+ case 1:
+ if (p.call.timeouts.hard > 0) {
+ j = msecs_to_jiffies(p.call.timeouts.hard);
+ now = jiffies;
+ j += now;
+ WRITE_ONCE(call->expect_term_by, j);
+ rxrpc_reduce_call_timer(call, j, now,
+ rxrpc_timer_set_for_hard);
}
+ break;
}
state = READ_ONCE(call->state);
@@ -689,6 +718,7 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
ret = rxrpc_send_data(rx, call, msg, len, NULL);
}
+out_put_unlock:
mutex_unlock(&call->user_mutex);
error_put:
rxrpc_put_call(call, rxrpc_call_put);
diff --git a/net/rxrpc/sysctl.c b/net/rxrpc/sysctl.c
index 34c706d2f79c..4a7af7aff37d 100644
--- a/net/rxrpc/sysctl.c
+++ b/net/rxrpc/sysctl.c
@@ -21,6 +21,8 @@ static const unsigned int four = 4;
static const unsigned int thirtytwo = 32;
static const unsigned int n_65535 = 65535;
static const unsigned int n_max_acks = RXRPC_RXTX_BUFF_SIZE - 1;
+static const unsigned long one_jiffy = 1;
+static const unsigned long max_jiffies = MAX_JIFFY_OFFSET;
/*
* RxRPC operating parameters.
@@ -29,64 +31,60 @@ static const unsigned int n_max_acks = RXRPC_RXTX_BUFF_SIZE - 1;
* information on the individual parameters.
*/
static struct ctl_table rxrpc_sysctl_table[] = {
- /* Values measured in milliseconds */
+ /* Values measured in milliseconds but used in jiffies */
{
.procname = "req_ack_delay",
.data = &rxrpc_requested_ack_delay,
- .maxlen = sizeof(unsigned int),
+ .maxlen = sizeof(unsigned long),
.mode = 0644,
- .proc_handler = proc_dointvec,
- .extra1 = (void *)&zero,
+ .proc_handler = proc_doulongvec_ms_jiffies_minmax,
+ .extra1 = (void *)&one_jiffy,
+ .extra2 = (void *)&max_jiffies,
},
{
.procname = "soft_ack_delay",
.data = &rxrpc_soft_ack_delay,
- .maxlen = sizeof(unsigned int),
+ .maxlen = sizeof(unsigned long),
.mode = 0644,
- .proc_handler = proc_dointvec,
- .extra1 = (void *)&one,
+ .proc_handler = proc_doulongvec_ms_jiffies_minmax,
+ .extra1 = (void *)&one_jiffy,
+ .extra2 = (void *)&max_jiffies,
},
{
.procname = "idle_ack_delay",
.data = &rxrpc_idle_ack_delay,
- .maxlen = sizeof(unsigned int),
+ .maxlen = sizeof(unsigned long),
.mode = 0644,
- .proc_handler = proc_dointvec,
- .extra1 = (void *)&one,
- },
- {
- .procname = "resend_timeout",
- .data = &rxrpc_resend_timeout,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
- .extra1 = (void *)&one,
+ .proc_handler = proc_doulongvec_ms_jiffies_minmax,
+ .extra1 = (void *)&one_jiffy,
+ .extra2 = (void *)&max_jiffies,
},
{
.procname = "idle_conn_expiry",
.data = &rxrpc_conn_idle_client_expiry,
- .maxlen = sizeof(unsigned int),
+ .maxlen = sizeof(unsigned long),
.mode = 0644,
- .proc_handler = proc_dointvec_ms_jiffies,
- .extra1 = (void *)&one,
+ .proc_handler = proc_doulongvec_ms_jiffies_minmax,
+ .extra1 = (void *)&one_jiffy,
+ .extra2 = (void *)&max_jiffies,
},
{
.procname = "idle_conn_fast_expiry",
.data = &rxrpc_conn_idle_client_fast_expiry,
- .maxlen = sizeof(unsigned int),
+ .maxlen = sizeof(unsigned long),
.mode = 0644,
- .proc_handler = proc_dointvec_ms_jiffies,
- .extra1 = (void *)&one,
+ .proc_handler = proc_doulongvec_ms_jiffies_minmax,
+ .extra1 = (void *)&one_jiffy,
+ .extra2 = (void *)&max_jiffies,
},
-
- /* Values measured in seconds but used in jiffies */
{
- .procname = "max_call_lifetime",
- .data = &rxrpc_max_call_lifetime,
- .maxlen = sizeof(unsigned int),
+ .procname = "resend_timeout",
+ .data = &rxrpc_resend_timeout,
+ .maxlen = sizeof(unsigned long),
.mode = 0644,
- .proc_handler = proc_dointvec,
- .extra1 = (void *)&one,
+ .proc_handler = proc_doulongvec_ms_jiffies_minmax,
+ .extra1 = (void *)&one_jiffy,
+ .extra2 = (void *)&max_jiffies,
},
/* Non-time values */
diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c
index 1c40caadcff9..d836f998117b 100644
--- a/net/sched/act_csum.c
+++ b/net/sched/act_csum.c
@@ -229,6 +229,9 @@ static int tcf_csum_ipv4_udp(struct sk_buff *skb, unsigned int ihl,
const struct iphdr *iph;
u16 ul;
+ if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
+ return 1;
+
/*
* Support both UDP and UDPLITE checksum algorithms, Don't use
* udph->len to get the real length without any protocol check,
@@ -282,6 +285,9 @@ static int tcf_csum_ipv6_udp(struct sk_buff *skb, unsigned int ihl,
const struct ipv6hdr *ip6h;
u16 ul;
+ if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
+ return 1;
+
/*
* Support both UDP and UDPLITE checksum algorithms, Don't use
* udph->len to get the real length without any protocol check,
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index ab255b421781..ddcf04b4ab43 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -205,13 +205,14 @@ static void tcf_chain_head_change(struct tcf_chain *chain,
static void tcf_chain_flush(struct tcf_chain *chain)
{
- struct tcf_proto *tp;
+ struct tcf_proto *tp = rtnl_dereference(chain->filter_chain);
tcf_chain_head_change(chain, NULL);
- while ((tp = rtnl_dereference(chain->filter_chain)) != NULL) {
+ while (tp) {
RCU_INIT_POINTER(chain->filter_chain, tp->next);
- tcf_chain_put(chain);
tcf_proto_destroy(tp);
+ tp = rtnl_dereference(chain->filter_chain);
+ tcf_chain_put(chain);
}
}
@@ -335,7 +336,8 @@ static void tcf_block_put_final(struct work_struct *work)
struct tcf_chain *chain, *tmp;
rtnl_lock();
- /* Only chain 0 should be still here. */
+
+ /* At this point, all the chains should have refcnt == 1. */
list_for_each_entry_safe(chain, tmp, &block->chain_list, list)
tcf_chain_put(chain);
rtnl_unlock();
@@ -343,15 +345,21 @@ static void tcf_block_put_final(struct work_struct *work)
}
/* XXX: Standalone actions are not allowed to jump to any chain, and bound
- * actions should be all removed after flushing. However, filters are now
- * destroyed in tc filter workqueue with RTNL lock, they can not race here.
+ * actions should be all removed after flushing.
*/
void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
struct tcf_block_ext_info *ei)
{
- struct tcf_chain *chain, *tmp;
+ struct tcf_chain *chain;
- list_for_each_entry_safe(chain, tmp, &block->chain_list, list)
+ /* Hold a refcnt for all chains, except 0, so that they don't disappear
+ * while we are iterating.
+ */
+ list_for_each_entry(chain, &block->chain_list, list)
+ if (chain->index)
+ tcf_chain_hold(chain);
+
+ list_for_each_entry(chain, &block->chain_list, list)
tcf_chain_flush(chain);
tcf_block_offload_unbind(block, q, ei);
diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c
index fb680dafac5a..6fe798c2df1a 100644
--- a/net/sched/cls_bpf.c
+++ b/net/sched/cls_bpf.c
@@ -258,11 +258,8 @@ static int cls_bpf_init(struct tcf_proto *tp)
return 0;
}
-static void __cls_bpf_delete_prog(struct cls_bpf_prog *prog)
+static void cls_bpf_free_parms(struct cls_bpf_prog *prog)
{
- tcf_exts_destroy(&prog->exts);
- tcf_exts_put_net(&prog->exts);
-
if (cls_bpf_is_ebpf(prog))
bpf_prog_put(prog->filter);
else
@@ -270,6 +267,14 @@ static void __cls_bpf_delete_prog(struct cls_bpf_prog *prog)
kfree(prog->bpf_name);
kfree(prog->bpf_ops);
+}
+
+static void __cls_bpf_delete_prog(struct cls_bpf_prog *prog)
+{
+ tcf_exts_destroy(&prog->exts);
+ tcf_exts_put_net(&prog->exts);
+
+ cls_bpf_free_parms(prog);
kfree(prog);
}
@@ -382,15 +387,13 @@ static int cls_bpf_prog_from_efd(struct nlattr **tb, struct cls_bpf_prog *prog,
{
struct bpf_prog *fp;
char *name = NULL;
+ bool skip_sw;
u32 bpf_fd;
bpf_fd = nla_get_u32(tb[TCA_BPF_FD]);
+ skip_sw = gen_flags & TCA_CLS_FLAGS_SKIP_SW;
- if (gen_flags & TCA_CLS_FLAGS_SKIP_SW)
- fp = bpf_prog_get_type_dev(bpf_fd, BPF_PROG_TYPE_SCHED_CLS,
- qdisc_dev(tp->q));
- else
- fp = bpf_prog_get_type(bpf_fd, BPF_PROG_TYPE_SCHED_CLS);
+ fp = bpf_prog_get_type_dev(bpf_fd, BPF_PROG_TYPE_SCHED_CLS, skip_sw);
if (IS_ERR(fp))
return PTR_ERR(fp);
@@ -516,12 +519,8 @@ static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
goto errout_idr;
ret = cls_bpf_offload(tp, prog, oldprog);
- if (ret) {
- if (!oldprog)
- idr_remove_ext(&head->handle_idr, prog->handle);
- __cls_bpf_delete_prog(prog);
- return ret;
- }
+ if (ret)
+ goto errout_parms;
if (!tc_in_hw(prog->gen_flags))
prog->gen_flags |= TCA_CLS_FLAGS_NOT_IN_HW;
@@ -539,6 +538,8 @@ static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
*arg = prog;
return 0;
+errout_parms:
+ cls_bpf_free_parms(prog);
errout_idr:
if (!oldprog)
idr_remove_ext(&head->handle_idr, prog->handle);
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index 6361be7881f1..525eb3a6d625 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -1158,9 +1158,13 @@ static int cbq_init(struct Qdisc *sch, struct nlattr *opt)
if ((q->link.R_tab = qdisc_get_rtab(r, tb[TCA_CBQ_RTAB])) == NULL)
return -EINVAL;
+ err = tcf_block_get(&q->link.block, &q->link.filter_list, sch);
+ if (err)
+ goto put_rtab;
+
err = qdisc_class_hash_init(&q->clhash);
if (err < 0)
- goto put_rtab;
+ goto put_block;
q->link.sibling = &q->link;
q->link.common.classid = sch->handle;
@@ -1194,6 +1198,9 @@ static int cbq_init(struct Qdisc *sch, struct nlattr *opt)
cbq_addprio(q, &q->link);
return 0;
+put_block:
+ tcf_block_put(q->link.block);
+
put_rtab:
qdisc_put_rtab(q->link.R_tab);
return err;
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 890f4a4564e7..09c1203c1711 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -724,6 +724,7 @@ static int sfq_init(struct Qdisc *sch, struct nlattr *opt)
int i;
int err;
+ q->sch = sch;
timer_setup(&q->perturb_timer, sfq_perturbation, TIMER_DEFERRABLE);
err = tcf_block_get(&q->block, &q->filter_list, sch);
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index f5172c21349b..6a38c2503649 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -1499,6 +1499,7 @@ static __init int sctp_init(void)
INIT_LIST_HEAD(&sctp_address_families);
sctp_v4_pf_init();
sctp_v6_pf_init();
+ sctp_sched_ops_init();
status = register_pernet_subsys(&sctp_defaults_ops);
if (status)
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 3204a9b29407..014847e25648 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -188,13 +188,13 @@ static void sctp_for_each_tx_datachunk(struct sctp_association *asoc,
list_for_each_entry(chunk, &t->transmitted, transmitted_list)
cb(chunk);
- list_for_each_entry(chunk, &q->retransmit, list)
+ list_for_each_entry(chunk, &q->retransmit, transmitted_list)
cb(chunk);
- list_for_each_entry(chunk, &q->sacked, list)
+ list_for_each_entry(chunk, &q->sacked, transmitted_list)
cb(chunk);
- list_for_each_entry(chunk, &q->abandoned, list)
+ list_for_each_entry(chunk, &q->abandoned, transmitted_list)
cb(chunk);
list_for_each_entry(chunk, &q->out_chunk_list, list)
diff --git a/net/sctp/stream.c b/net/sctp/stream.c
index a11db21dc8a0..76ea66be0bbe 100644
--- a/net/sctp/stream.c
+++ b/net/sctp/stream.c
@@ -64,7 +64,7 @@ static void sctp_stream_outq_migrate(struct sctp_stream *stream,
*/
/* Mark as failed send. */
- sctp_chunk_fail(ch, SCTP_ERROR_INV_STRM);
+ sctp_chunk_fail(ch, (__force __u32)SCTP_ERROR_INV_STRM);
if (asoc->peer.prsctp_capable &&
SCTP_PR_PRIO_ENABLED(ch->sinfo.sinfo_flags))
asoc->sent_cnt_removable--;
@@ -254,6 +254,30 @@ static int sctp_send_reconf(struct sctp_association *asoc,
return retval;
}
+static bool sctp_stream_outq_is_empty(struct sctp_stream *stream,
+ __u16 str_nums, __be16 *str_list)
+{
+ struct sctp_association *asoc;
+ __u16 i;
+
+ asoc = container_of(stream, struct sctp_association, stream);
+ if (!asoc->outqueue.out_qlen)
+ return true;
+
+ if (!str_nums)
+ return false;
+
+ for (i = 0; i < str_nums; i++) {
+ __u16 sid = ntohs(str_list[i]);
+
+ if (stream->out[sid].ext &&
+ !list_empty(&stream->out[sid].ext->outq))
+ return false;
+ }
+
+ return true;
+}
+
int sctp_send_reset_streams(struct sctp_association *asoc,
struct sctp_reset_streams *params)
{
@@ -317,6 +341,11 @@ int sctp_send_reset_streams(struct sctp_association *asoc,
for (i = 0; i < str_nums; i++)
nstr_list[i] = htons(str_list[i]);
+ if (out && !sctp_stream_outq_is_empty(stream, str_nums, nstr_list)) {
+ retval = -EAGAIN;
+ goto out;
+ }
+
chunk = sctp_make_strreset_req(asoc, str_nums, nstr_list, out, in);
kfree(nstr_list);
@@ -377,6 +406,9 @@ int sctp_send_reset_assoc(struct sctp_association *asoc)
if (asoc->strreset_outstanding)
return -EINPROGRESS;
+ if (!sctp_outq_is_empty(&asoc->outqueue))
+ return -EAGAIN;
+
chunk = sctp_make_strreset_tsnreq(asoc);
if (!chunk)
return -ENOMEM;
@@ -563,7 +595,7 @@ struct sctp_chunk *sctp_process_strreset_outreq(
flags = SCTP_STREAM_RESET_INCOMING_SSN;
}
- nums = (ntohs(param.p->length) - sizeof(*outreq)) / 2;
+ nums = (ntohs(param.p->length) - sizeof(*outreq)) / sizeof(__u16);
if (nums) {
str_p = outreq->list_of_streams;
for (i = 0; i < nums; i++) {
@@ -627,7 +659,7 @@ struct sctp_chunk *sctp_process_strreset_inreq(
goto out;
}
- nums = (ntohs(param.p->length) - sizeof(*inreq)) / 2;
+ nums = (ntohs(param.p->length) - sizeof(*inreq)) / sizeof(__u16);
str_p = inreq->list_of_streams;
for (i = 0; i < nums; i++) {
if (ntohs(str_p[i]) >= stream->outcnt) {
@@ -636,6 +668,12 @@ struct sctp_chunk *sctp_process_strreset_inreq(
}
}
+ if (!sctp_stream_outq_is_empty(stream, nums, str_p)) {
+ result = SCTP_STRRESET_IN_PROGRESS;
+ asoc->strreset_inseq--;
+ goto err;
+ }
+
chunk = sctp_make_strreset_req(asoc, nums, str_p, 1, 0);
if (!chunk)
goto out;
@@ -687,12 +725,18 @@ struct sctp_chunk *sctp_process_strreset_tsnreq(
i = asoc->strreset_inseq - request_seq - 1;
result = asoc->strreset_result[i];
if (result == SCTP_STRRESET_PERFORMED) {
- next_tsn = asoc->next_tsn;
+ next_tsn = asoc->ctsn_ack_point + 1;
init_tsn =
sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map) + 1;
}
goto err;
}
+
+ if (!sctp_outq_is_empty(&asoc->outqueue)) {
+ result = SCTP_STRRESET_IN_PROGRESS;
+ goto err;
+ }
+
asoc->strreset_inseq++;
if (!(asoc->strreset_enable & SCTP_ENABLE_RESET_ASSOC_REQ))
@@ -703,9 +747,10 @@ struct sctp_chunk *sctp_process_strreset_tsnreq(
goto out;
}
- /* G3: The same processing as though a SACK chunk with no gap report
- * and a cumulative TSN ACK of the Sender's Next TSN minus 1 were
- * received MUST be performed.
+ /* G4: The same processing as though a FWD-TSN chunk (as defined in
+ * [RFC3758]) with all streams affected and a new cumulative TSN
+ * ACK of the Receiver's Next TSN minus 1 were received MUST be
+ * performed.
*/
max_tsn_seen = sctp_tsnmap_get_max_tsn_seen(&asoc->peer.tsn_map);
sctp_ulpq_reasm_flushtsn(&asoc->ulpq, max_tsn_seen);
@@ -720,10 +765,9 @@ struct sctp_chunk *sctp_process_strreset_tsnreq(
sctp_tsnmap_init(&asoc->peer.tsn_map, SCTP_TSN_MAP_INITIAL,
init_tsn, GFP_ATOMIC);
- /* G4: The same processing as though a FWD-TSN chunk (as defined in
- * [RFC3758]) with all streams affected and a new cumulative TSN
- * ACK of the Receiver's Next TSN minus 1 were received MUST be
- * performed.
+ /* G3: The same processing as though a SACK chunk with no gap report
+ * and a cumulative TSN ACK of the Sender's Next TSN minus 1 were
+ * received MUST be performed.
*/
sctp_outq_free(&asoc->outqueue);
@@ -927,7 +971,8 @@ struct sctp_chunk *sctp_process_strreset_resp(
outreq = (struct sctp_strreset_outreq *)req;
str_p = outreq->list_of_streams;
- nums = (ntohs(outreq->param_hdr.length) - sizeof(*outreq)) / 2;
+ nums = (ntohs(outreq->param_hdr.length) - sizeof(*outreq)) /
+ sizeof(__u16);
if (result == SCTP_STRRESET_PERFORMED) {
if (nums) {
@@ -956,7 +1001,8 @@ struct sctp_chunk *sctp_process_strreset_resp(
inreq = (struct sctp_strreset_inreq *)req;
str_p = inreq->list_of_streams;
- nums = (ntohs(inreq->param_hdr.length) - sizeof(*inreq)) / 2;
+ nums = (ntohs(inreq->param_hdr.length) - sizeof(*inreq)) /
+ sizeof(__u16);
*evp = sctp_ulpevent_make_stream_reset_event(asoc, flags,
nums, str_p, GFP_ATOMIC);
@@ -975,6 +1021,7 @@ struct sctp_chunk *sctp_process_strreset_resp(
if (result == SCTP_STRRESET_PERFORMED) {
__u32 mtsn = sctp_tsnmap_get_max_tsn_seen(
&asoc->peer.tsn_map);
+ LIST_HEAD(temp);
sctp_ulpq_reasm_flushtsn(&asoc->ulpq, mtsn);
sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC);
@@ -983,7 +1030,13 @@ struct sctp_chunk *sctp_process_strreset_resp(
SCTP_TSN_MAP_INITIAL,
stsn, GFP_ATOMIC);
+ /* Clean up sacked and abandoned queues only. As the
+ * out_chunk_list may not be empty, splice it to temp,
+ * then get it back after sctp_outq_free is done.
+ */
+ list_splice_init(&asoc->outqueue.out_chunk_list, &temp);
sctp_outq_free(&asoc->outqueue);
+ list_splice_init(&temp, &asoc->outqueue.out_chunk_list);
asoc->next_tsn = rtsn;
asoc->ctsn_ack_point = asoc->next_tsn - 1;
diff --git a/net/sctp/stream_sched.c b/net/sctp/stream_sched.c
index 0b83ec51e43b..d8c162a4089c 100644
--- a/net/sctp/stream_sched.c
+++ b/net/sctp/stream_sched.c
@@ -119,16 +119,27 @@ static struct sctp_sched_ops sctp_sched_fcfs = {
.unsched_all = sctp_sched_fcfs_unsched_all,
};
+static void sctp_sched_ops_fcfs_init(void)
+{
+ sctp_sched_ops_register(SCTP_SS_FCFS, &sctp_sched_fcfs);
+}
+
/* API to other parts of the stack */
-extern struct sctp_sched_ops sctp_sched_prio;
-extern struct sctp_sched_ops sctp_sched_rr;
+static struct sctp_sched_ops *sctp_sched_ops[SCTP_SS_MAX + 1];
-static struct sctp_sched_ops *sctp_sched_ops[] = {
- &sctp_sched_fcfs,
- &sctp_sched_prio,
- &sctp_sched_rr,
-};
+void sctp_sched_ops_register(enum sctp_sched_type sched,
+ struct sctp_sched_ops *sched_ops)
+{
+ sctp_sched_ops[sched] = sched_ops;
+}
+
+void sctp_sched_ops_init(void)
+{
+ sctp_sched_ops_fcfs_init();
+ sctp_sched_ops_prio_init();
+ sctp_sched_ops_rr_init();
+}
int sctp_sched_set_sched(struct sctp_association *asoc,
enum sctp_sched_type sched)
diff --git a/net/sctp/stream_sched_prio.c b/net/sctp/stream_sched_prio.c
index 384dbf3c8760..7997d35dd0fd 100644
--- a/net/sctp/stream_sched_prio.c
+++ b/net/sctp/stream_sched_prio.c
@@ -333,7 +333,7 @@ static void sctp_sched_prio_unsched_all(struct sctp_stream *stream)
sctp_sched_prio_unsched(soute);
}
-struct sctp_sched_ops sctp_sched_prio = {
+static struct sctp_sched_ops sctp_sched_prio = {
.set = sctp_sched_prio_set,
.get = sctp_sched_prio_get,
.init = sctp_sched_prio_init,
@@ -345,3 +345,8 @@ struct sctp_sched_ops sctp_sched_prio = {
.sched_all = sctp_sched_prio_sched_all,
.unsched_all = sctp_sched_prio_unsched_all,
};
+
+void sctp_sched_ops_prio_init(void)
+{
+ sctp_sched_ops_register(SCTP_SS_PRIO, &sctp_sched_prio);
+}
diff --git a/net/sctp/stream_sched_rr.c b/net/sctp/stream_sched_rr.c
index 7612a438c5b9..1155692448f1 100644
--- a/net/sctp/stream_sched_rr.c
+++ b/net/sctp/stream_sched_rr.c
@@ -187,7 +187,7 @@ static void sctp_sched_rr_unsched_all(struct sctp_stream *stream)
sctp_sched_rr_unsched(stream, soute);
}
-struct sctp_sched_ops sctp_sched_rr = {
+static struct sctp_sched_ops sctp_sched_rr = {
.set = sctp_sched_rr_set,
.get = sctp_sched_rr_get,
.init = sctp_sched_rr_init,
@@ -199,3 +199,8 @@ struct sctp_sched_ops sctp_sched_rr = {
.sched_all = sctp_sched_rr_sched_all,
.unsched_all = sctp_sched_rr_unsched_all,
};
+
+void sctp_sched_ops_rr_init(void)
+{
+ sctp_sched_ops_register(SCTP_SS_RR, &sctp_sched_rr);
+}
diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
index 2578fbd95664..94f21116dac5 100644
--- a/net/smc/smc_core.c
+++ b/net/smc/smc_core.c
@@ -562,7 +562,7 @@ static int __smc_buf_create(struct smc_sock *smc, bool is_rmb)
{
struct smc_connection *conn = &smc->conn;
struct smc_link_group *lgr = conn->lgr;
- struct smc_buf_desc *buf_desc = NULL;
+ struct smc_buf_desc *buf_desc = ERR_PTR(-ENOMEM);
struct list_head *buf_list;
int bufsize, bufsize_short;
int sk_buf_size;
@@ -575,7 +575,7 @@ static int __smc_buf_create(struct smc_sock *smc, bool is_rmb)
/* use socket send buffer size (w/o overhead) as start value */
sk_buf_size = smc->sk.sk_sndbuf / 2;
- for (bufsize_short = smc_compress_bufsize(smc->sk.sk_sndbuf / 2);
+ for (bufsize_short = smc_compress_bufsize(sk_buf_size);
bufsize_short >= 0; bufsize_short--) {
if (is_rmb) {
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index 73165e9ca5bf..5dd4e6c9fef2 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -264,7 +264,7 @@ out:
return status;
}
-static struct cache_detail rsi_cache_template = {
+static const struct cache_detail rsi_cache_template = {
.owner = THIS_MODULE,
.hash_size = RSI_HASHMAX,
.name = "auth.rpcsec.init",
@@ -524,7 +524,7 @@ out:
return status;
}
-static struct cache_detail rsc_cache_template = {
+static const struct cache_detail rsc_cache_template = {
.owner = THIS_MODULE,
.hash_size = RSC_HASHMAX,
.name = "auth.rpcsec.context",
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index 79d55d949d9a..e68943895be4 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -1674,7 +1674,7 @@ void cache_unregister_net(struct cache_detail *cd, struct net *net)
}
EXPORT_SYMBOL_GPL(cache_unregister_net);
-struct cache_detail *cache_create_net(struct cache_detail *tmpl, struct net *net)
+struct cache_detail *cache_create_net(const struct cache_detail *tmpl, struct net *net)
{
struct cache_detail *cd;
int i;
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index e8e0831229cf..f9307bd6644b 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -745,7 +745,7 @@ static void svc_add_new_temp_xprt(struct svc_serv *serv, struct svc_xprt *newxpt
serv->sv_tmpcnt++;
if (serv->sv_temptimer.function == NULL) {
/* setup timer to age temp transports */
- serv->sv_temptimer.function = (TIMER_FUNC_TYPE)svc_age_temp_xprts;
+ serv->sv_temptimer.function = svc_age_temp_xprts;
mod_timer(&serv->sv_temptimer,
jiffies + svc_conn_age_period * HZ);
}
diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c
index f81eaa8e0888..740b67d5a733 100644
--- a/net/sunrpc/svcauth_unix.c
+++ b/net/sunrpc/svcauth_unix.c
@@ -569,7 +569,7 @@ static int unix_gid_show(struct seq_file *m,
return 0;
}
-static struct cache_detail unix_gid_cache_template = {
+static const struct cache_detail unix_gid_cache_template = {
.owner = THIS_MODULE,
.hash_size = GID_HASHMAX,
.name = "auth.unix.gid",
@@ -862,7 +862,7 @@ struct auth_ops svcauth_unix = {
.set_client = svcauth_unix_set_client,
};
-static struct cache_detail ip_map_cache_template = {
+static const struct cache_detail ip_map_cache_template = {
.owner = THIS_MODULE,
.hash_size = IP_HASHMAX,
.name = "auth.unix.ip",
diff --git a/net/tipc/group.c b/net/tipc/group.c
index 7821085a7dd8..95fec2c057d6 100644
--- a/net/tipc/group.c
+++ b/net/tipc/group.c
@@ -497,6 +497,7 @@ void tipc_group_filter_msg(struct tipc_group *grp, struct sk_buff_head *inputq,
while ((skb = skb_peek(defq))) {
hdr = buf_msg(skb);
mtyp = msg_type(hdr);
+ blks = msg_blocks(hdr);
deliver = true;
ack = false;
update = false;
@@ -539,14 +540,13 @@ void tipc_group_filter_msg(struct tipc_group *grp, struct sk_buff_head *inputq,
tipc_group_proto_xmit(grp, m, GRP_ACK_MSG, xmitq);
if (leave) {
- tipc_group_delete_member(grp, m);
__skb_queue_purge(defq);
+ tipc_group_delete_member(grp, m);
break;
}
if (!update)
continue;
- blks = msg_blocks(hdr);
tipc_group_update_rcv_win(grp, blks, node, port, xmitq);
}
return;
diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c
index 391775e3575c..a7a73ffe675b 100644
--- a/net/vmw_vsock/vmci_transport.c
+++ b/net/vmw_vsock/vmci_transport.c
@@ -797,11 +797,13 @@ static void vmci_transport_handle_detach(struct sock *sk)
/* We should not be sending anymore since the peer won't be
* there to receive, but we can still receive if there is data
- * left in our consume queue.
+ * left in our consume queue. If the local endpoint is a host,
+ * we can't call vsock_stream_has_data, since that may block,
+ * but a host endpoint can't read data once the VM has
+ * detached, so there is no available data in that case.
*/
- if (vsock_stream_has_data(vsk) <= 0) {
- sk->sk_state = TCP_CLOSE;
-
+ if (vsk->local_addr.svm_cid == VMADDR_CID_HOST ||
+ vsock_stream_has_data(vsk) <= 0) {
if (sk->sk_state == TCP_SYN_SENT) {
/* The peer may detach from a queue pair while
* we are still in the connecting state, i.e.,
@@ -811,10 +813,12 @@ static void vmci_transport_handle_detach(struct sock *sk)
* event like a reset.
*/
+ sk->sk_state = TCP_CLOSE;
sk->sk_err = ECONNRESET;
sk->sk_error_report(sk);
return;
}
+ sk->sk_state = TCP_CLOSE;
}
sk->sk_state_change(sk);
}
@@ -2144,7 +2148,7 @@ module_exit(vmci_transport_exit);
MODULE_AUTHOR("VMware, Inc.");
MODULE_DESCRIPTION("VMCI transport for Virtual Sockets");
-MODULE_VERSION("1.0.4.0-k");
+MODULE_VERSION("1.0.5.0-k");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("vmware_vsock");
MODULE_ALIAS_NETPROTO(PF_VSOCK);
diff --git a/net/wireless/Kconfig b/net/wireless/Kconfig
index da91bb547db3..1abcc4fc4df1 100644
--- a/net/wireless/Kconfig
+++ b/net/wireless/Kconfig
@@ -20,6 +20,10 @@ config CFG80211
tristate "cfg80211 - wireless configuration API"
depends on RFKILL || !RFKILL
select FW_LOADER
+ # may need to update this when certificates are changed and are
+ # using a different algorithm, though right now they shouldn't
+ # (this is here rather than below to allow it to be a module)
+ select CRYPTO_SHA256 if CFG80211_USE_KERNEL_REGDB_KEYS
---help---
cfg80211 is the Linux wireless LAN (802.11) configuration API.
Enable this if you have a wireless device.
@@ -113,6 +117,9 @@ config CFG80211_EXTRA_REGDB_KEYDIR
certificates like in the kernel sources (net/wireless/certs/)
that shall be accepted for a signed regulatory database.
+ Note that you need to also select the correct CRYPTO_<hash> modules
+ for your certificates, and if cfg80211 is built-in they also must be.
+
config CFG80211_REG_CELLULAR_HINTS
bool "cfg80211 regulatory support for cellular base station hints"
depends on CFG80211_CERTIFICATION_ONUS
diff --git a/net/wireless/lib80211.c b/net/wireless/lib80211.c
index 459611577d3d..801d4781a73b 100644
--- a/net/wireless/lib80211.c
+++ b/net/wireless/lib80211.c
@@ -44,7 +44,7 @@ static DEFINE_SPINLOCK(lib80211_crypto_lock);
static void lib80211_crypt_deinit_entries(struct lib80211_crypt_info *info,
int force);
static void lib80211_crypt_quiescing(struct lib80211_crypt_info *info);
-static void lib80211_crypt_deinit_handler(unsigned long data);
+static void lib80211_crypt_deinit_handler(struct timer_list *t);
int lib80211_crypt_info_init(struct lib80211_crypt_info *info, char *name,
spinlock_t *lock)
@@ -55,8 +55,8 @@ int lib80211_crypt_info_init(struct lib80211_crypt_info *info, char *name,
info->lock = lock;
INIT_LIST_HEAD(&info->crypt_deinit_list);
- setup_timer(&info->crypt_deinit_timer, lib80211_crypt_deinit_handler,
- (unsigned long)info);
+ timer_setup(&info->crypt_deinit_timer, lib80211_crypt_deinit_handler,
+ 0);
return 0;
}
@@ -116,9 +116,10 @@ static void lib80211_crypt_quiescing(struct lib80211_crypt_info *info)
spin_unlock_irqrestore(info->lock, flags);
}
-static void lib80211_crypt_deinit_handler(unsigned long data)
+static void lib80211_crypt_deinit_handler(struct timer_list *t)
{
- struct lib80211_crypt_info *info = (struct lib80211_crypt_info *)data;
+ struct lib80211_crypt_info *info = from_timer(info, t,
+ crypt_deinit_timer);
unsigned long flags;
lib80211_crypt_deinit_entries(info, 0);
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index a0e1951227fa..b1ac23ca20c8 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -2605,10 +2605,32 @@ static int nl80211_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flag
goto nla_put_failure;
}
- if (wdev->ssid_len) {
- if (nla_put(msg, NL80211_ATTR_SSID, wdev->ssid_len, wdev->ssid))
+ wdev_lock(wdev);
+ switch (wdev->iftype) {
+ case NL80211_IFTYPE_AP:
+ if (wdev->ssid_len &&
+ nla_put(msg, NL80211_ATTR_SSID, wdev->ssid_len, wdev->ssid))
goto nla_put_failure;
+ break;
+ case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_P2P_CLIENT:
+ case NL80211_IFTYPE_ADHOC: {
+ const u8 *ssid_ie;
+ if (!wdev->current_bss)
+ break;
+ ssid_ie = ieee80211_bss_get_ie(&wdev->current_bss->pub,
+ WLAN_EID_SSID);
+ if (!ssid_ie)
+ break;
+ if (nla_put(msg, NL80211_ATTR_SSID, ssid_ie[1], ssid_ie + 2))
+ goto nla_put_failure;
+ break;
+ }
+ default:
+ /* nothing */
+ break;
}
+ wdev_unlock(wdev);
genlmsg_end(msg, hdr);
return 0;
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 3871998059de..78e71b0390be 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -3644,27 +3644,14 @@ void regulatory_propagate_dfs_state(struct wiphy *wiphy,
}
}
-int __init regulatory_init(void)
+static int __init regulatory_init_db(void)
{
- int err = 0;
+ int err;
err = load_builtin_regdb_keys();
if (err)
return err;
- reg_pdev = platform_device_register_simple("regulatory", 0, NULL, 0);
- if (IS_ERR(reg_pdev))
- return PTR_ERR(reg_pdev);
-
- spin_lock_init(&reg_requests_lock);
- spin_lock_init(&reg_pending_beacons_lock);
- spin_lock_init(&reg_indoor_lock);
-
- rcu_assign_pointer(cfg80211_regdomain, cfg80211_world_regdom);
-
- user_alpha2[0] = '9';
- user_alpha2[1] = '7';
-
/* We always try to get an update for the static regdomain */
err = regulatory_hint_core(cfg80211_world_regdom->alpha2);
if (err) {
@@ -3692,6 +3679,31 @@ int __init regulatory_init(void)
return 0;
}
+#ifndef MODULE
+late_initcall(regulatory_init_db);
+#endif
+
+int __init regulatory_init(void)
+{
+ reg_pdev = platform_device_register_simple("regulatory", 0, NULL, 0);
+ if (IS_ERR(reg_pdev))
+ return PTR_ERR(reg_pdev);
+
+ spin_lock_init(&reg_requests_lock);
+ spin_lock_init(&reg_pending_beacons_lock);
+ spin_lock_init(&reg_indoor_lock);
+
+ rcu_assign_pointer(cfg80211_regdomain, cfg80211_world_regdom);
+
+ user_alpha2[0] = '9';
+ user_alpha2[1] = '7';
+
+#ifdef MODULE
+ return regulatory_init_db();
+#else
+ return 0;
+#endif
+}
void regulatory_exit(void)
{
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index ea87143314f3..562cc11131f6 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -415,7 +415,7 @@ static void __x25_destroy_socket(struct sock *sk)
if (sk_has_allocations(sk)) {
/* Defer: outstanding buffers */
sk->sk_timer.expires = jiffies + 10 * HZ;
- sk->sk_timer.function = (TIMER_FUNC_TYPE)x25_destroy_timer;
+ sk->sk_timer.function = x25_destroy_timer;
add_timer(&sk->sk_timer);
} else {
/* drop last reference so sock_put will free */
diff --git a/net/x25/x25_link.c b/net/x25/x25_link.c
index e0cd04d28352..a6a8ab09b914 100644
--- a/net/x25/x25_link.c
+++ b/net/x25/x25_link.c
@@ -36,7 +36,7 @@
LIST_HEAD(x25_neigh_list);
DEFINE_RWLOCK(x25_neigh_list_lock);
-static void x25_t20timer_expiry(unsigned long);
+static void x25_t20timer_expiry(struct timer_list *);
static void x25_transmit_restart_confirmation(struct x25_neigh *nb);
static void x25_transmit_restart_request(struct x25_neigh *nb);
@@ -49,9 +49,9 @@ static inline void x25_start_t20timer(struct x25_neigh *nb)
mod_timer(&nb->t20timer, jiffies + nb->t20);
}
-static void x25_t20timer_expiry(unsigned long param)
+static void x25_t20timer_expiry(struct timer_list *t)
{
- struct x25_neigh *nb = (struct x25_neigh *)param;
+ struct x25_neigh *nb = from_timer(nb, t, t20timer);
x25_transmit_restart_request(nb);
@@ -252,7 +252,7 @@ void x25_link_device_up(struct net_device *dev)
return;
skb_queue_head_init(&nb->queue);
- setup_timer(&nb->t20timer, x25_t20timer_expiry, (unsigned long)nb);
+ timer_setup(&nb->t20timer, x25_t20timer_expiry, 0);
dev_hold(dev);
nb->dev = dev;
diff --git a/net/x25/x25_timer.c b/net/x25/x25_timer.c
index 1dfba3c23459..fa3461002b3e 100644
--- a/net/x25/x25_timer.c
+++ b/net/x25/x25_timer.c
@@ -36,7 +36,7 @@ void x25_init_timers(struct sock *sk)
timer_setup(&x25->timer, x25_timer_expiry, 0);
/* initialized by sock_init_data */
- sk->sk_timer.function = (TIMER_FUNC_TYPE)x25_heartbeat_expiry;
+ sk->sk_timer.function = x25_heartbeat_expiry;
}
void x25_start_heartbeat(struct sock *sk)
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 1f5cee2269af..065d89606888 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -556,7 +556,7 @@ out:
return HRTIMER_NORESTART;
}
-static void xfrm_replay_timer_handler(unsigned long data);
+static void xfrm_replay_timer_handler(struct timer_list *t);
struct xfrm_state *xfrm_state_alloc(struct net *net)
{
@@ -574,8 +574,7 @@ struct xfrm_state *xfrm_state_alloc(struct net *net)
INIT_HLIST_NODE(&x->byspi);
tasklet_hrtimer_init(&x->mtimer, xfrm_timer_handler,
CLOCK_BOOTTIME, HRTIMER_MODE_ABS);
- setup_timer(&x->rtimer, xfrm_replay_timer_handler,
- (unsigned long)x);
+ timer_setup(&x->rtimer, xfrm_replay_timer_handler, 0);
x->curlft.add_time = get_seconds();
x->lft.soft_byte_limit = XFRM_INF;
x->lft.soft_packet_limit = XFRM_INF;
@@ -1879,9 +1878,9 @@ void xfrm_state_walk_done(struct xfrm_state_walk *walk, struct net *net)
}
EXPORT_SYMBOL(xfrm_state_walk_done);
-static void xfrm_replay_timer_handler(unsigned long data)
+static void xfrm_replay_timer_handler(struct timer_list *t)
{
- struct xfrm_state *x = (struct xfrm_state *)data;
+ struct xfrm_state *x = from_timer(x, t, rtimer);
spin_lock(&x->lock);
diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
index 3b4945c1eab0..adeaa1302f34 100644
--- a/samples/bpf/Makefile
+++ b/samples/bpf/Makefile
@@ -1,7 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
-# kbuild trick to avoid linker error. Can be omitted if a module is built.
-obj- := dummy.o
-
# List of programs to build
hostprogs-y := test_lru_dist
hostprogs-y += sock_example
diff --git a/samples/hidraw/Makefile b/samples/hidraw/Makefile
index f5c3012ffa79..dec1b22adf54 100644
--- a/samples/hidraw/Makefile
+++ b/samples/hidraw/Makefile
@@ -1,7 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
-# kbuild trick to avoid linker error. Can be omitted if a module is built.
-obj- := dummy.o
-
# List of programs to build
hostprogs-y := hid-example
diff --git a/samples/seccomp/Makefile b/samples/seccomp/Makefile
index 19a870eed82b..0e349b80686e 100644
--- a/samples/seccomp/Makefile
+++ b/samples/seccomp/Makefile
@@ -1,7 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
-# kbuild trick to avoid linker error. Can be omitted if a module is built.
-obj- := dummy.o
-
hostprogs-$(CONFIG_SAMPLE_SECCOMP) := bpf-fancy dropper bpf-direct
HOSTCFLAGS_bpf-fancy.o += -I$(objtree)/usr/include
diff --git a/samples/sockmap/Makefile b/samples/sockmap/Makefile
index 9291ab8e0f8c..73f1da4d116c 100644
--- a/samples/sockmap/Makefile
+++ b/samples/sockmap/Makefile
@@ -1,6 +1,3 @@
-# kbuild trick to avoid linker error. Can be omitted if a module is built.
-obj- := dummy.o
-
# List of programs to build
hostprogs-y := sockmap
diff --git a/samples/statx/Makefile b/samples/statx/Makefile
index 1f80a3d8cf45..59df7c25a9d1 100644
--- a/samples/statx/Makefile
+++ b/samples/statx/Makefile
@@ -1,6 +1,3 @@
-# kbuild trick to avoid linker error. Can be omitted if a module is built.
-obj- := dummy.o
-
# List of programs to build
hostprogs-$(CONFIG_SAMPLE_STATX) := test-statx
diff --git a/samples/uhid/Makefile b/samples/uhid/Makefile
index c95a696560a7..8d7fd6190ac4 100644
--- a/samples/uhid/Makefile
+++ b/samples/uhid/Makefile
@@ -1,6 +1,3 @@
-# kbuild trick to avoid linker error. Can be omitted if a module is built.
-obj- := dummy.o
-
# List of programs to build
hostprogs-y := uhid-example
diff --git a/scripts/Makefile.build b/scripts/Makefile.build
index f171225383cc..cb8997ed0149 100644
--- a/scripts/Makefile.build
+++ b/scripts/Makefile.build
@@ -76,7 +76,7 @@ lib-target := $(obj)/lib.a
obj-y += $(obj)/lib-ksyms.o
endif
-ifneq ($(strip $(obj-y) $(obj-m) $(obj-) $(subdir-m) $(lib-target)),)
+ifneq ($(strip $(obj-y) $(need-builtin)),)
builtin-target := $(obj)/built-in.o
endif
@@ -100,6 +100,10 @@ ifneq ($(KBUILD_CHECKSRC),0)
endif
endif
+ifneq ($(KBUILD_ENABLE_EXTRA_GCC_CHECKS),)
+ cmd_checkdoc = $(srctree)/scripts/kernel-doc -none $< ;
+endif
+
# Do section mismatch analysis for each module/built-in.o
ifdef CONFIG_DEBUG_SECTION_MISMATCH
cmd_secanalysis = ; scripts/mod/modpost $@
@@ -283,6 +287,7 @@ define rule_cc_o_c
$(call echo-cmd,checksrc) $(cmd_checksrc) \
$(call cmd_and_fixdep,cc_o_c) \
$(cmd_modversions_c) \
+ $(cmd_checkdoc) \
$(call echo-cmd,objtool) $(cmd_objtool) \
$(call echo-cmd,record_mcount) $(cmd_record_mcount)
endef
@@ -561,7 +566,7 @@ targets := $(filter-out $(PHONY), $(targets))
PHONY += $(subdir-ym)
$(subdir-ym):
- $(Q)$(MAKE) $(build)=$@
+ $(Q)$(MAKE) $(build)=$@ need-builtin=$(if $(findstring $@,$(subdir-obj-y)),1)
# Add FORCE to the prequisites of a target to force it to be always rebuilt.
# ---------------------------------------------------------------------------
diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib
index 08eb40a7729f..1ca4dcd2d500 100644
--- a/scripts/Makefile.lib
+++ b/scripts/Makefile.lib
@@ -57,7 +57,7 @@ multi-objs-m := $(foreach m, $(multi-used-m), $($(m:.o=-objs)) $($(m:.o=-y)))
subdir-obj-y := $(filter %/built-in.o, $(obj-y))
# Replace multi-part objects by their individual parts, look at local dir only
-real-objs-y := $(foreach m, $(filter-out $(subdir-obj-y), $(obj-y)), $(if $(strip $($(m:.o=-objs)) $($(m:.o=-y))),$($(m:.o=-objs)) $($(m:.o=-y)),$(m))) $(extra-y)
+real-objs-y := $(foreach m, $(filter-out $(subdir-obj-y), $(obj-y)), $(if $(strip $($(m:.o=-objs)) $($(m:.o=-y))),$($(m:.o=-objs)) $($(m:.o=-y)),$(m)))
real-objs-m := $(foreach m, $(obj-m), $(if $(strip $($(m:.o=-objs)) $($(m:.o=-y)) $($(m:.o=-m))),$($(m:.o=-objs)) $($(m:.o=-y)) $($(m:.o=-m)),$(m)))
# DTB
diff --git a/scripts/bloat-o-meter b/scripts/bloat-o-meter
index 6f099f915dcf..94b664817ad9 100755
--- a/scripts/bloat-o-meter
+++ b/scripts/bloat-o-meter
@@ -83,8 +83,11 @@ def print_result(symboltype, symbolformat, argc):
for d, n in delta:
if d: print("%-40s %7s %7s %+7d" % (n, old.get(n,"-"), new.get(n,"-"), d))
- print("Total: Before=%d, After=%d, chg %+.2f%%" % \
- (otot, ntot, (ntot - otot)*100.0/otot))
+ if otot:
+ percent = (ntot - otot) * 100.0 / otot
+ else:
+ percent = 0
+ print("Total: Before=%d, After=%d, chg %+.2f%%" % (otot, ntot, percent))
if sys.argv[1] == "-c":
print_result("Function", "tT", 3)
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index 95cda3ecc66b..040aa79e1d9d 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -5753,7 +5753,7 @@ sub process {
for (my $count = $linenr; $count <= $lc; $count++) {
my $fmt = get_quoted_string($lines[$count - 1], raw_line($count, 0));
$fmt =~ s/%%//g;
- if ($fmt =~ /(\%[\*\d\.]*p(?![\WFfSsBKRraEhMmIiUDdgVCbGNO]).)/) {
+ if ($fmt =~ /(\%[\*\d\.]*p(?![\WFfSsBKRraEhMmIiUDdgVCbGNOx]).)/) {
$bad_extension = $1;
last;
}
diff --git a/scripts/coccicheck b/scripts/coccicheck
index d5f28d5044e7..ecfac64b39fe 100755
--- a/scripts/coccicheck
+++ b/scripts/coccicheck
@@ -30,12 +30,6 @@ else
VERBOSE=0
fi
-if [ -z "$J" ]; then
- NPROC=$(getconf _NPROCESSORS_ONLN)
-else
- NPROC="$J"
-fi
-
FLAGS="--very-quiet"
# You can use SPFLAGS to append extra arguments to coccicheck or override any
@@ -70,6 +64,9 @@ if [ "$C" = "1" -o "$C" = "2" ]; then
# Take only the last argument, which is the C file to test
shift $(( $# - 1 ))
OPTIONS="$COCCIINCLUDE $1"
+
+ # No need to parallelize Coccinelle since this mode takes one input file.
+ NPROC=1
else
ONLINE=0
if [ "$KBUILD_EXTMOD" = "" ] ; then
@@ -77,6 +74,12 @@ else
else
OPTIONS="--dir $KBUILD_EXTMOD $COCCIINCLUDE"
fi
+
+ if [ -z "$J" ]; then
+ NPROC=$(getconf _NPROCESSORS_ONLN)
+ else
+ NPROC="$J"
+ fi
fi
if [ "$KBUILD_EXTMOD" != "" ] ; then
diff --git a/scripts/coccinelle/api/setup_timer.cocci b/scripts/coccinelle/api/setup_timer.cocci
deleted file mode 100644
index e4577089dcb9..000000000000
--- a/scripts/coccinelle/api/setup_timer.cocci
+++ /dev/null
@@ -1,277 +0,0 @@
-/// Use setup_timer function instead of initializing timer with the function
-/// and data fields
-// Confidence: High
-// Copyright: (C) 2016 Vaishali Thakkar, Oracle. GPLv2
-// Copyright: (C) 2017 Kees Cook, Google. GPLv2
-// Options: --no-includes --include-headers
-// Keywords: init_timer, setup_timer
-
-virtual patch
-virtual context
-virtual org
-virtual report
-
-// Match the common cases first to avoid Coccinelle parsing loops with
-// "... when" clauses.
-
-@match_immediate_function_data_after_init_timer
-depends on patch && !context && !org && !report@
-expression e, func, da;
-@@
-
--init_timer
-+setup_timer
- ( \(&e\|e\)
-+, func, da
- );
-(
--\(e.function\|e->function\) = func;
--\(e.data\|e->data\) = da;
-|
--\(e.data\|e->data\) = da;
--\(e.function\|e->function\) = func;
-)
-
-@match_immediate_function_data_before_init_timer
-depends on patch && !context && !org && !report@
-expression e, func, da;
-@@
-
-(
--\(e.function\|e->function\) = func;
--\(e.data\|e->data\) = da;
-|
--\(e.data\|e->data\) = da;
--\(e.function\|e->function\) = func;
-)
--init_timer
-+setup_timer
- ( \(&e\|e\)
-+, func, da
- );
-
-@match_function_and_data_after_init_timer
-depends on patch && !context && !org && !report@
-expression e, e2, e3, e4, e5, func, da;
-@@
-
--init_timer
-+setup_timer
- ( \(&e\|e\)
-+, func, da
- );
- ... when != func = e2
- when != da = e3
-(
--e.function = func;
-... when != da = e4
--e.data = da;
-|
--e->function = func;
-... when != da = e4
--e->data = da;
-|
--e.data = da;
-... when != func = e5
--e.function = func;
-|
--e->data = da;
-... when != func = e5
--e->function = func;
-)
-
-@match_function_and_data_before_init_timer
-depends on patch && !context && !org && !report@
-expression e, e2, e3, e4, e5, func, da;
-@@
-(
--e.function = func;
-... when != da = e4
--e.data = da;
-|
--e->function = func;
-... when != da = e4
--e->data = da;
-|
--e.data = da;
-... when != func = e5
--e.function = func;
-|
--e->data = da;
-... when != func = e5
--e->function = func;
-)
-... when != func = e2
- when != da = e3
--init_timer
-+setup_timer
- ( \(&e\|e\)
-+, func, da
- );
-
-@r1 exists@
-expression t;
-identifier f;
-position p;
-@@
-
-f(...) { ... when any
- init_timer@p(\(&t\|t\))
- ... when any
-}
-
-@r2 exists@
-expression r1.t;
-identifier g != r1.f;
-expression e8;
-@@
-
-g(...) { ... when any
- \(t.data\|t->data\) = e8
- ... when any
-}
-
-// It is dangerous to use setup_timer if data field is initialized
-// in another function.
-
-@script:python depends on r2@
-p << r1.p;
-@@
-
-cocci.include_match(False)
-
-@r3 depends on patch && !context && !org && !report@
-expression r1.t, func, e7;
-position r1.p;
-@@
-
-(
--init_timer@p(&t);
-+setup_timer(&t, func, 0UL);
-... when != func = e7
--t.function = func;
-|
--t.function = func;
-... when != func = e7
--init_timer@p(&t);
-+setup_timer(&t, func, 0UL);
-|
--init_timer@p(t);
-+setup_timer(t, func, 0UL);
-... when != func = e7
--t->function = func;
-|
--t->function = func;
-... when != func = e7
--init_timer@p(t);
-+setup_timer(t, func, 0UL);
-)
-
-// ----------------------------------------------------------------------------
-
-@match_immediate_function_data_after_init_timer_context
-depends on !patch && (context || org || report)@
-expression da, e, func;
-position j0, j1, j2;
-@@
-
-* init_timer@j0 (&e);
-(
-* e@j1.function = func;
-* e@j2.data = da;
-|
-* e@j1.data = da;
-* e@j2.function = func;
-)
-
-@match_function_and_data_after_init_timer_context
-depends on !patch && (context || org || report)@
-expression a, b, e1, e2, e3, e4, e5;
-position j0 != match_immediate_function_data_after_init_timer_context.j0,j1,j2;
-@@
-
-* init_timer@j0 (&e1);
-... when != a = e2
- when != b = e3
-(
-* e1@j1.function = a;
-... when != b = e4
-* e1@j2.data = b;
-|
-* e1@j1.data = b;
-... when != a = e5
-* e1@j2.function = a;
-)
-
-@r3_context depends on !patch && (context || org || report)@
-expression c, e6, e7;
-position r1.p;
-position j0 !=
- {match_immediate_function_data_after_init_timer_context.j0,
- match_function_and_data_after_init_timer_context.j0}, j1;
-@@
-
-* init_timer@j0@p (&e6);
-... when != c = e7
-* e6@j1.function = c;
-
-// ----------------------------------------------------------------------------
-
-@script:python match_immediate_function_data_after_init_timer_org
-depends on org@
-j0 << match_immediate_function_data_after_init_timer_context.j0;
-j1 << match_immediate_function_data_after_init_timer_context.j1;
-j2 << match_immediate_function_data_after_init_timer_context.j2;
-@@
-
-msg = "Use setup_timer function."
-coccilib.org.print_todo(j0[0], msg)
-coccilib.org.print_link(j1[0], "")
-coccilib.org.print_link(j2[0], "")
-
-@script:python match_function_and_data_after_init_timer_org depends on org@
-j0 << match_function_and_data_after_init_timer_context.j0;
-j1 << match_function_and_data_after_init_timer_context.j1;
-j2 << match_function_and_data_after_init_timer_context.j2;
-@@
-
-msg = "Use setup_timer function."
-coccilib.org.print_todo(j0[0], msg)
-coccilib.org.print_link(j1[0], "")
-coccilib.org.print_link(j2[0], "")
-
-@script:python r3_org depends on org@
-j0 << r3_context.j0;
-j1 << r3_context.j1;
-@@
-
-msg = "Use setup_timer function."
-coccilib.org.print_todo(j0[0], msg)
-coccilib.org.print_link(j1[0], "")
-
-// ----------------------------------------------------------------------------
-
-@script:python match_immediate_function_data_after_init_timer_report
-depends on report@
-j0 << match_immediate_function_data_after_init_timer_context.j0;
-j1 << match_immediate_function_data_after_init_timer_context.j1;
-@@
-
-msg = "Use setup_timer function for function on line %s." % (j1[0].line)
-coccilib.report.print_report(j0[0], msg)
-
-@script:python match_function_and_data_after_init_timer_report depends on report@
-j0 << match_function_and_data_after_init_timer_context.j0;
-j1 << match_function_and_data_after_init_timer_context.j1;
-@@
-
-msg = "Use setup_timer function for function on line %s." % (j1[0].line)
-coccilib.report.print_report(j0[0], msg)
-
-@script:python r3_report depends on report@
-j0 << r3_context.j0;
-j1 << r3_context.j1;
-@@
-
-msg = "Use setup_timer function for function on line %s." % (j1[0].line)
-coccilib.report.print_report(j0[0], msg)
diff --git a/scripts/faddr2line b/scripts/faddr2line
index 1f5ce959f596..39e07d8574dd 100755
--- a/scripts/faddr2line
+++ b/scripts/faddr2line
@@ -44,9 +44,16 @@
set -o errexit
set -o nounset
+READELF="${CROSS_COMPILE}readelf"
+ADDR2LINE="${CROSS_COMPILE}addr2line"
+SIZE="${CROSS_COMPILE}size"
+NM="${CROSS_COMPILE}nm"
+
command -v awk >/dev/null 2>&1 || die "awk isn't installed"
-command -v readelf >/dev/null 2>&1 || die "readelf isn't installed"
-command -v addr2line >/dev/null 2>&1 || die "addr2line isn't installed"
+command -v ${READELF} >/dev/null 2>&1 || die "readelf isn't installed"
+command -v ${ADDR2LINE} >/dev/null 2>&1 || die "addr2line isn't installed"
+command -v ${SIZE} >/dev/null 2>&1 || die "size isn't installed"
+command -v ${NM} >/dev/null 2>&1 || die "nm isn't installed"
usage() {
echo "usage: faddr2line <object file> <func+offset> <func+offset>..." >&2
@@ -69,10 +76,10 @@ die() {
find_dir_prefix() {
local objfile=$1
- local start_kernel_addr=$(readelf -sW $objfile | awk '$8 == "start_kernel" {printf "0x%s", $2}')
+ local start_kernel_addr=$(${READELF} -sW $objfile | awk '$8 == "start_kernel" {printf "0x%s", $2}')
[[ -z $start_kernel_addr ]] && return
- local file_line=$(addr2line -e $objfile $start_kernel_addr)
+ local file_line=$(${ADDR2LINE} -e $objfile $start_kernel_addr)
[[ -z $file_line ]] && return
local prefix=${file_line%init/main.c:*}
@@ -104,7 +111,7 @@ __faddr2line() {
# Go through each of the object's symbols which match the func name.
# In rare cases there might be duplicates.
- file_end=$(size -Ax $objfile | awk '$1 == ".text" {print $2}')
+ file_end=$(${SIZE} -Ax $objfile | awk '$1 == ".text" {print $2}')
while read symbol; do
local fields=($symbol)
local sym_base=0x${fields[0]}
@@ -156,10 +163,10 @@ __faddr2line() {
# pass real address to addr2line
echo "$func+$offset/$sym_size:"
- addr2line -fpie $objfile $addr | sed "s; $dir_prefix\(\./\)*; ;"
+ ${ADDR2LINE} -fpie $objfile $addr | sed "s; $dir_prefix\(\./\)*; ;"
DONE=1
- done < <(nm -n $objfile | awk -v fn=$func -v end=$file_end '$3 == fn { found=1; line=$0; start=$1; next } found == 1 { found=0; print line, "0x"$1 } END {if (found == 1) print line, end; }')
+ done < <(${NM} -n $objfile | awk -v fn=$func -v end=$file_end '$3 == fn { found=1; line=$0; start=$1; next } found == 1 { found=0; print line, "0x"$1 } END {if (found == 1) print line, end; }')
}
[[ $# -lt 2 ]] && usage
diff --git a/scripts/kconfig/symbol.c b/scripts/kconfig/symbol.c
index 20136ffefb23..3c8bd9bb4267 100644
--- a/scripts/kconfig/symbol.c
+++ b/scripts/kconfig/symbol.c
@@ -1061,7 +1061,7 @@ struct symbol **sym_re_search(const char *pattern)
}
if (sym_match_arr) {
qsort(sym_match_arr, cnt, sizeof(struct sym_match), sym_rel_comp);
- sym_arr = malloc((cnt+1) * sizeof(struct symbol));
+ sym_arr = malloc((cnt+1) * sizeof(struct symbol *));
if (!sym_arr)
goto sym_re_search_free;
for (i = 0; i < cnt; i++)
diff --git a/scripts/kernel-doc b/scripts/kernel-doc
index 7bd52b8f63d4..bd29a92b4b48 100755
--- a/scripts/kernel-doc
+++ b/scripts/kernel-doc
@@ -58,6 +58,7 @@ Output format selection (mutually exclusive):
-man Output troff manual page format. This is the default.
-rst Output reStructuredText format.
-text Output plain text format.
+ -none Do not output documentation, only warnings.
Output selection (mutually exclusive):
-export Only output documentation for symbols that have been
@@ -532,6 +533,8 @@ while ($ARGV[0] =~ m/^-(.*)/) {
$output_mode = "gnome";
@highlights = @highlights_gnome;
$blankline = $blankline_gnome;
+ } elsif ($cmd eq "-none") {
+ $output_mode = "none";
} elsif ($cmd eq "-module") { # not needed for XML, inherits from calling document
$modulename = shift @ARGV;
} elsif ($cmd eq "-function") { # to only output specific functions
@@ -2117,6 +2120,24 @@ sub output_blockhead_list(%) {
}
}
+
+## none mode output functions
+
+sub output_function_none(%) {
+}
+
+sub output_enum_none(%) {
+}
+
+sub output_typedef_none(%) {
+}
+
+sub output_struct_none(%) {
+}
+
+sub output_blockhead_none(%) {
+}
+
##
# generic output function for all types (function, struct/union, typedef, enum);
# calls the generated, variable output_ function name based on
@@ -3143,7 +3164,9 @@ sub process_file($) {
}
}
if ($initial_section_counter == $section_counter) {
- print STDERR "${file}:1: warning: no structured comments found\n";
+ if ($output_mode ne "none") {
+ print STDERR "${file}:1: warning: no structured comments found\n";
+ }
if (($output_selection == OUTPUT_INCLUDE) && ($show_not_found == 1)) {
print STDERR " Was looking for '$_'.\n" for keys %function_table;
}
diff --git a/scripts/package/Makefile b/scripts/package/Makefile
index 9ed96aefc72d..c23534925b38 100644
--- a/scripts/package/Makefile
+++ b/scripts/package/Makefile
@@ -39,14 +39,13 @@ if test "$(objtree)" != "$(srctree)"; then \
false; \
fi ; \
$(srctree)/scripts/setlocalversion --save-scmversion; \
-ln -sf $(srctree) $(2); \
tar -cz $(RCS_TAR_IGNORE) -f $(2).tar.gz \
- $(addprefix $(2)/,$(TAR_CONTENT) $(3)); \
-rm -f $(2) $(objtree)/.scmversion
+ --transform 's:^:$(2)/:S' $(TAR_CONTENT) $(3); \
+rm -f $(objtree)/.scmversion
# rpm-pkg
# ---------------------------------------------------------------------------
-rpm-pkg rpm: FORCE
+rpm-pkg: FORCE
$(MAKE) clean
$(CONFIG_SHELL) $(MKSPEC) >$(objtree)/kernel.spec
$(call cmd,src_tar,$(KERNELPATH),kernel.spec)
diff --git a/security/apparmor/apparmorfs.c b/security/apparmor/apparmorfs.c
index caaf51dda648..d4fa04d91439 100644
--- a/security/apparmor/apparmorfs.c
+++ b/security/apparmor/apparmorfs.c
@@ -533,7 +533,7 @@ static ssize_t ns_revision_read(struct file *file, char __user *buf,
long last_read;
int avail;
- mutex_lock(&rev->ns->lock);
+ mutex_lock_nested(&rev->ns->lock, rev->ns->level);
last_read = rev->last_read;
if (last_read == rev->ns->revision) {
mutex_unlock(&rev->ns->lock);
@@ -543,7 +543,7 @@ static ssize_t ns_revision_read(struct file *file, char __user *buf,
last_read !=
READ_ONCE(rev->ns->revision)))
return -ERESTARTSYS;
- mutex_lock(&rev->ns->lock);
+ mutex_lock_nested(&rev->ns->lock, rev->ns->level);
}
avail = sprintf(buffer, "%ld\n", rev->ns->revision);
@@ -577,7 +577,7 @@ static unsigned int ns_revision_poll(struct file *file, poll_table *pt)
unsigned int mask = 0;
if (rev) {
- mutex_lock(&rev->ns->lock);
+ mutex_lock_nested(&rev->ns->lock, rev->ns->level);
poll_wait(file, &rev->ns->wait, pt);
if (rev->last_read < rev->ns->revision)
mask |= POLLIN | POLLRDNORM;
@@ -1643,7 +1643,7 @@ static int ns_mkdir_op(struct inode *dir, struct dentry *dentry, umode_t mode)
*/
inode_unlock(dir);
error = simple_pin_fs(&aafs_ops, &aafs_mnt, &aafs_count);
- mutex_lock(&parent->lock);
+ mutex_lock_nested(&parent->lock, parent->level);
inode_lock_nested(dir, I_MUTEX_PARENT);
if (error)
goto out;
@@ -1692,7 +1692,7 @@ static int ns_rmdir_op(struct inode *dir, struct dentry *dentry)
inode_unlock(dir);
inode_unlock(dentry->d_inode);
- mutex_lock(&parent->lock);
+ mutex_lock_nested(&parent->lock, parent->level);
ns = aa_get_ns(__aa_findn_ns(&parent->sub_ns, dentry->d_name.name,
dentry->d_name.len));
if (!ns) {
@@ -1747,7 +1747,7 @@ void __aafs_ns_rmdir(struct aa_ns *ns)
__aafs_profile_rmdir(child);
list_for_each_entry(sub, &ns->sub_ns, base.list) {
- mutex_lock(&sub->lock);
+ mutex_lock_nested(&sub->lock, sub->level);
__aafs_ns_rmdir(sub);
mutex_unlock(&sub->lock);
}
@@ -1877,7 +1877,7 @@ int __aafs_ns_mkdir(struct aa_ns *ns, struct dentry *parent, const char *name,
/* subnamespaces */
list_for_each_entry(sub, &ns->sub_ns, base.list) {
- mutex_lock(&sub->lock);
+ mutex_lock_nested(&sub->lock, sub->level);
error = __aafs_ns_mkdir(sub, ns_subns_dir(ns), NULL, NULL);
mutex_unlock(&sub->lock);
if (error)
@@ -1921,7 +1921,7 @@ static struct aa_ns *__next_ns(struct aa_ns *root, struct aa_ns *ns)
/* is next namespace a child */
if (!list_empty(&ns->sub_ns)) {
next = list_first_entry(&ns->sub_ns, typeof(*ns), base.list);
- mutex_lock(&next->lock);
+ mutex_lock_nested(&next->lock, next->level);
return next;
}
@@ -1931,7 +1931,7 @@ static struct aa_ns *__next_ns(struct aa_ns *root, struct aa_ns *ns)
mutex_unlock(&ns->lock);
next = list_next_entry(ns, base.list);
if (!list_entry_is_head(next, &parent->sub_ns, base.list)) {
- mutex_lock(&next->lock);
+ mutex_lock_nested(&next->lock, next->level);
return next;
}
ns = parent;
@@ -2039,7 +2039,7 @@ static void *p_start(struct seq_file *f, loff_t *pos)
f->private = root;
/* find the first profile */
- mutex_lock(&root->lock);
+ mutex_lock_nested(&root->lock, root->level);
profile = __first_profile(root, root);
/* skip to position */
@@ -2451,7 +2451,7 @@ static int __init aa_create_aafs(void)
aafs_mnt = kern_mount(&aafs_ops);
if (IS_ERR(aafs_mnt))
panic("can't set apparmorfs up\n");
- aafs_mnt->mnt_sb->s_flags &= ~MS_NOUSER;
+ aafs_mnt->mnt_sb->s_flags &= ~SB_NOUSER;
/* Populate fs tree. */
error = entry_create_dir(&aa_sfs_entry, NULL);
@@ -2491,7 +2491,7 @@ static int __init aa_create_aafs(void)
ns_subrevision(root_ns) = dent;
/* policy tree referenced by magic policy symlink */
- mutex_lock(&root_ns->lock);
+ mutex_lock_nested(&root_ns->lock, root_ns->level);
error = __aafs_ns_mkdir(root_ns, aafs_mnt->mnt_root, ".policy",
aafs_mnt->mnt_root);
mutex_unlock(&root_ns->lock);
diff --git a/security/apparmor/domain.c b/security/apparmor/domain.c
index dd754b7850a8..04ba9d0718ea 100644
--- a/security/apparmor/domain.c
+++ b/security/apparmor/domain.c
@@ -305,6 +305,7 @@ static int change_profile_perms(struct aa_profile *profile,
* __attach_match_ - find an attachment match
* @name - to match against (NOT NULL)
* @head - profile list to walk (NOT NULL)
+ * @info - info message if there was an error (NOT NULL)
*
* Do a linear search on the profiles in the list. There is a matching
* preference where an exact match is preferred over a name which uses
@@ -316,28 +317,46 @@ static int change_profile_perms(struct aa_profile *profile,
* Returns: profile or NULL if no match found
*/
static struct aa_profile *__attach_match(const char *name,
- struct list_head *head)
+ struct list_head *head,
+ const char **info)
{
int len = 0;
+ bool conflict = false;
struct aa_profile *profile, *candidate = NULL;
list_for_each_entry_rcu(profile, head, base.list) {
- if (profile->label.flags & FLAG_NULL)
+ if (profile->label.flags & FLAG_NULL &&
+ &profile->label == ns_unconfined(profile->ns))
continue;
- if (profile->xmatch && profile->xmatch_len > len) {
- unsigned int state = aa_dfa_match(profile->xmatch,
- DFA_START, name);
- u32 perm = dfa_user_allow(profile->xmatch, state);
- /* any accepting state means a valid match. */
- if (perm & MAY_EXEC) {
- candidate = profile;
- len = profile->xmatch_len;
+
+ if (profile->xmatch) {
+ if (profile->xmatch_len == len) {
+ conflict = true;
+ continue;
+ } else if (profile->xmatch_len > len) {
+ unsigned int state;
+ u32 perm;
+
+ state = aa_dfa_match(profile->xmatch,
+ DFA_START, name);
+ perm = dfa_user_allow(profile->xmatch, state);
+ /* any accepting state means a valid match. */
+ if (perm & MAY_EXEC) {
+ candidate = profile;
+ len = profile->xmatch_len;
+ conflict = false;
+ }
}
} else if (!strcmp(profile->base.name, name))
/* exact non-re match, no more searching required */
return profile;
}
+ if (conflict) {
+ *info = "conflicting profile attachments";
+ return NULL;
+ }
+
return candidate;
}
@@ -346,16 +365,17 @@ static struct aa_profile *__attach_match(const char *name,
* @ns: the current namespace (NOT NULL)
* @list: list to search (NOT NULL)
* @name: the executable name to match against (NOT NULL)
+ * @info: info message if there was an error
*
* Returns: label or NULL if no match found
*/
static struct aa_label *find_attach(struct aa_ns *ns, struct list_head *list,
- const char *name)
+ const char *name, const char **info)
{
struct aa_profile *profile;
rcu_read_lock();
- profile = aa_get_profile(__attach_match(name, list));
+ profile = aa_get_profile(__attach_match(name, list, info));
rcu_read_unlock();
return profile ? &profile->label : NULL;
@@ -448,11 +468,11 @@ static struct aa_label *x_to_label(struct aa_profile *profile,
if (xindex & AA_X_CHILD)
/* released by caller */
new = find_attach(ns, &profile->base.profiles,
- name);
+ name, info);
else
/* released by caller */
new = find_attach(ns, &ns->base.profiles,
- name);
+ name, info);
*lookupname = name;
break;
}
@@ -516,7 +536,7 @@ static struct aa_label *profile_transition(struct aa_profile *profile,
if (profile_unconfined(profile)) {
new = find_attach(profile->ns, &profile->ns->base.profiles,
- name);
+ name, &info);
if (new) {
AA_DEBUG("unconfined attached to new label");
return new;
@@ -541,9 +561,21 @@ static struct aa_label *profile_transition(struct aa_profile *profile,
}
} else if (COMPLAIN_MODE(profile)) {
/* no exec permission - learning mode */
- struct aa_profile *new_profile = aa_new_null_profile(profile,
- false, name,
- GFP_ATOMIC);
+ struct aa_profile *new_profile = NULL;
+ char *n = kstrdup(name, GFP_ATOMIC);
+
+ if (n) {
+ /* name is ptr into buffer */
+ long pos = name - buffer;
+ /* break per cpu buffer hold */
+ put_buffers(buffer);
+ new_profile = aa_new_null_profile(profile, false, n,
+ GFP_KERNEL);
+ get_buffers(buffer);
+ name = buffer + pos;
+ strcpy((char *)name, n);
+ kfree(n);
+ }
if (!new_profile) {
error = -ENOMEM;
info = "could not create null profile";
diff --git a/security/apparmor/file.c b/security/apparmor/file.c
index 3382518b87fa..e79bf44396a3 100644
--- a/security/apparmor/file.c
+++ b/security/apparmor/file.c
@@ -226,18 +226,12 @@ static u32 map_old_perms(u32 old)
struct aa_perms aa_compute_fperms(struct aa_dfa *dfa, unsigned int state,
struct path_cond *cond)
{
- struct aa_perms perms;
-
/* FIXME: change over to new dfa format
* currently file perms are encoded in the dfa, new format
* splits the permissions from the dfa. This mapping can be
* done at profile load
*/
- perms.deny = 0;
- perms.kill = perms.stop = 0;
- perms.complain = perms.cond = 0;
- perms.hide = 0;
- perms.prompt = 0;
+ struct aa_perms perms = { };
if (uid_eq(current_fsuid(), cond->uid)) {
perms.allow = map_old_perms(dfa_user_allow(dfa, state));
diff --git a/security/apparmor/include/lib.h b/security/apparmor/include/lib.h
index f546707a2bbb..6505e1ad9e23 100644
--- a/security/apparmor/include/lib.h
+++ b/security/apparmor/include/lib.h
@@ -86,7 +86,7 @@ static inline unsigned int aa_dfa_null_transition(struct aa_dfa *dfa,
static inline bool path_mediated_fs(struct dentry *dentry)
{
- return !(dentry->d_sb->s_flags & MS_NOUSER);
+ return !(dentry->d_sb->s_flags & SB_NOUSER);
}
diff --git a/security/apparmor/label.c b/security/apparmor/label.c
index ad28e03a6f30..324fe5c60f87 100644
--- a/security/apparmor/label.c
+++ b/security/apparmor/label.c
@@ -2115,7 +2115,7 @@ void __aa_labelset_update_subtree(struct aa_ns *ns)
__labelset_update(ns);
list_for_each_entry(child, &ns->sub_ns, base.list) {
- mutex_lock(&child->lock);
+ mutex_lock_nested(&child->lock, child->level);
__aa_labelset_update_subtree(child);
mutex_unlock(&child->lock);
}
diff --git a/security/apparmor/lib.c b/security/apparmor/lib.c
index 08ca26bcca77..4d5e98e49d5e 100644
--- a/security/apparmor/lib.c
+++ b/security/apparmor/lib.c
@@ -317,14 +317,11 @@ static u32 map_other(u32 x)
void aa_compute_perms(struct aa_dfa *dfa, unsigned int state,
struct aa_perms *perms)
{
- perms->deny = 0;
- perms->kill = perms->stop = 0;
- perms->complain = perms->cond = 0;
- perms->hide = 0;
- perms->prompt = 0;
- perms->allow = dfa_user_allow(dfa, state);
- perms->audit = dfa_user_audit(dfa, state);
- perms->quiet = dfa_user_quiet(dfa, state);
+ *perms = (struct aa_perms) {
+ .allow = dfa_user_allow(dfa, state),
+ .audit = dfa_user_audit(dfa, state),
+ .quiet = dfa_user_quiet(dfa, state),
+ };
/* for v5 perm mapping in the policydb, the other set is used
* to extend the general perm set
@@ -426,7 +423,6 @@ int aa_check_perms(struct aa_profile *profile, struct aa_perms *perms,
void (*cb)(struct audit_buffer *, void *))
{
int type, error;
- bool stop = false;
u32 denied = request & (~perms->allow | perms->deny);
if (likely(!denied)) {
@@ -447,8 +443,6 @@ int aa_check_perms(struct aa_profile *profile, struct aa_perms *perms,
else
type = AUDIT_APPARMOR_DENIED;
- if (denied & perms->stop)
- stop = true;
if (denied == (denied & perms->hide))
error = -ENOENT;
diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
index 17893fde4487..9a65eeaf7dfa 100644
--- a/security/apparmor/lsm.c
+++ b/security/apparmor/lsm.c
@@ -846,7 +846,7 @@ module_param_call(audit, param_set_audit, param_get_audit,
/* Determines if audit header is included in audited messages. This
* provides more context if the audit daemon is not running
*/
-bool aa_g_audit_header = 1;
+bool aa_g_audit_header = true;
module_param_named(audit_header, aa_g_audit_header, aabool,
S_IRUSR | S_IWUSR);
@@ -871,7 +871,7 @@ module_param_named(path_max, aa_g_path_max, aauint, S_IRUSR);
* DEPRECATED: read only as strict checking of load is always done now
* that none root users (user namespaces) can load policy.
*/
-bool aa_g_paranoid_load = 1;
+bool aa_g_paranoid_load = true;
module_param_named(paranoid_load, aa_g_paranoid_load, aabool, S_IRUGO);
/* Boot time disable flag */
@@ -1119,7 +1119,7 @@ static int __init apparmor_init(void)
if (!apparmor_enabled || !security_module_enable("apparmor")) {
aa_info_message("AppArmor disabled by boot time parameter");
- apparmor_enabled = 0;
+ apparmor_enabled = false;
return 0;
}
@@ -1175,7 +1175,7 @@ alloc_out:
aa_destroy_aafs();
aa_teardown_dfa_engine();
- apparmor_enabled = 0;
+ apparmor_enabled = false;
return error;
}
diff --git a/security/apparmor/mount.c b/security/apparmor/mount.c
index 82a64b58041d..ed9b4d0f9f7e 100644
--- a/security/apparmor/mount.c
+++ b/security/apparmor/mount.c
@@ -216,13 +216,12 @@ static unsigned int match_mnt_flags(struct aa_dfa *dfa, unsigned int state,
static struct aa_perms compute_mnt_perms(struct aa_dfa *dfa,
unsigned int state)
{
- struct aa_perms perms;
-
- perms.kill = 0;
- perms.allow = dfa_user_allow(dfa, state);
- perms.audit = dfa_user_audit(dfa, state);
- perms.quiet = dfa_user_quiet(dfa, state);
- perms.xindex = dfa_user_xindex(dfa, state);
+ struct aa_perms perms = {
+ .allow = dfa_user_allow(dfa, state),
+ .audit = dfa_user_audit(dfa, state),
+ .quiet = dfa_user_quiet(dfa, state),
+ .xindex = dfa_user_xindex(dfa, state),
+ };
return perms;
}
diff --git a/security/apparmor/policy.c b/security/apparmor/policy.c
index 4243b0c3f0e4..b0b58848c248 100644
--- a/security/apparmor/policy.c
+++ b/security/apparmor/policy.c
@@ -502,7 +502,7 @@ struct aa_profile *aa_new_null_profile(struct aa_profile *parent, bool hat,
{
struct aa_profile *p, *profile;
const char *bname;
- char *name;
+ char *name = NULL;
AA_BUG(!parent);
@@ -545,7 +545,7 @@ name:
profile->file.dfa = aa_get_dfa(nulldfa);
profile->policy.dfa = aa_get_dfa(nulldfa);
- mutex_lock(&profile->ns->lock);
+ mutex_lock_nested(&profile->ns->lock, profile->ns->level);
p = __find_child(&parent->base.profiles, bname);
if (p) {
aa_free_profile(profile);
@@ -562,6 +562,7 @@ out:
return profile;
fail:
+ kfree(name);
aa_free_profile(profile);
return NULL;
}
@@ -905,7 +906,7 @@ ssize_t aa_replace_profiles(struct aa_ns *policy_ns, struct aa_label *label,
} else
ns = aa_get_ns(policy_ns ? policy_ns : labels_ns(label));
- mutex_lock(&ns->lock);
+ mutex_lock_nested(&ns->lock, ns->level);
/* check for duplicate rawdata blobs: space and file dedup */
list_for_each_entry(rawdata_ent, &ns->rawdata_list, list) {
if (aa_rawdata_eq(rawdata_ent, udata)) {
@@ -1116,13 +1117,13 @@ ssize_t aa_remove_profiles(struct aa_ns *policy_ns, struct aa_label *subj,
if (!name) {
/* remove namespace - can only happen if fqname[0] == ':' */
- mutex_lock(&ns->parent->lock);
+ mutex_lock_nested(&ns->parent->lock, ns->level);
__aa_remove_ns(ns);
__aa_bump_ns_revision(ns);
mutex_unlock(&ns->parent->lock);
} else {
/* remove profile */
- mutex_lock(&ns->lock);
+ mutex_lock_nested(&ns->lock, ns->level);
profile = aa_get_profile(__lookup_profile(&ns->base, name));
if (!profile) {
error = -ENOENT;
diff --git a/security/apparmor/policy_ns.c b/security/apparmor/policy_ns.c
index 62a3589c62ab..b1e629cba70b 100644
--- a/security/apparmor/policy_ns.c
+++ b/security/apparmor/policy_ns.c
@@ -256,7 +256,8 @@ static struct aa_ns *__aa_create_ns(struct aa_ns *parent, const char *name,
ns = alloc_ns(parent->base.hname, name);
if (!ns)
return NULL;
- mutex_lock(&ns->lock);
+ ns->level = parent->level + 1;
+ mutex_lock_nested(&ns->lock, ns->level);
error = __aafs_ns_mkdir(ns, ns_subns_dir(parent), name, dir);
if (error) {
AA_ERROR("Failed to create interface for ns %s\n",
@@ -266,7 +267,6 @@ static struct aa_ns *__aa_create_ns(struct aa_ns *parent, const char *name,
return ERR_PTR(error);
}
ns->parent = aa_get_ns(parent);
- ns->level = parent->level + 1;
list_add_rcu(&ns->base.list, &parent->sub_ns);
/* add list ref */
aa_get_ns(ns);
@@ -313,7 +313,7 @@ struct aa_ns *aa_prepare_ns(struct aa_ns *parent, const char *name)
{
struct aa_ns *ns;
- mutex_lock(&parent->lock);
+ mutex_lock_nested(&parent->lock, parent->level);
/* try and find the specified ns and if it doesn't exist create it */
/* released by caller */
ns = aa_get_ns(__aa_find_ns(&parent->sub_ns, name));
@@ -336,7 +336,7 @@ static void destroy_ns(struct aa_ns *ns)
if (!ns)
return;
- mutex_lock(&ns->lock);
+ mutex_lock_nested(&ns->lock, ns->level);
/* release all profiles in this namespace */
__aa_profile_list_release(&ns->base.profiles);
diff --git a/security/apparmor/policy_unpack.c b/security/apparmor/policy_unpack.c
index 4ede87c30f8b..59a1a25b7d43 100644
--- a/security/apparmor/policy_unpack.c
+++ b/security/apparmor/policy_unpack.c
@@ -157,7 +157,7 @@ static void do_loaddata_free(struct work_struct *work)
struct aa_ns *ns = aa_get_ns(d->ns);
if (ns) {
- mutex_lock(&ns->lock);
+ mutex_lock_nested(&ns->lock, ns->level);
__aa_fs_remove_rawdata(d);
mutex_unlock(&ns->lock);
aa_put_ns(ns);
diff --git a/security/apparmor/resource.c b/security/apparmor/resource.c
index d8bc842594ed..cf4d234febe9 100644
--- a/security/apparmor/resource.c
+++ b/security/apparmor/resource.c
@@ -47,7 +47,7 @@ static void audit_cb(struct audit_buffer *ab, void *va)
/**
* audit_resource - audit setting resource limit
* @profile: profile being enforced (NOT NULL)
- * @resoure: rlimit being auditing
+ * @resource: rlimit being auditing
* @value: value being set
* @error: error value
*
@@ -128,7 +128,7 @@ int aa_task_setrlimit(struct aa_label *label, struct task_struct *task,
error = fn_for_each(label, profile,
audit_resource(profile, resource,
new_rlim->rlim_max, peer,
- "cap_sys_resoure", -EACCES));
+ "cap_sys_resource", -EACCES));
else
error = fn_for_each_confined(label, profile,
profile_setrlimit(profile, resource, new_rlim));
diff --git a/security/keys/gc.c b/security/keys/gc.c
index afb3a9175d76..7207e6094dc1 100644
--- a/security/keys/gc.c
+++ b/security/keys/gc.c
@@ -29,10 +29,10 @@ DECLARE_WORK(key_gc_work, key_garbage_collector);
/*
* Reaper for links from keyrings to dead keys.
*/
-static void key_gc_timer_func(unsigned long);
+static void key_gc_timer_func(struct timer_list *);
static DEFINE_TIMER(key_gc_timer, key_gc_timer_func);
-static time_t key_gc_next_run = LONG_MAX;
+static time64_t key_gc_next_run = TIME64_MAX;
static struct key_type *key_gc_dead_keytype;
static unsigned long key_gc_flags;
@@ -53,12 +53,12 @@ struct key_type key_type_dead = {
* Schedule a garbage collection run.
* - time precision isn't particularly important
*/
-void key_schedule_gc(time_t gc_at)
+void key_schedule_gc(time64_t gc_at)
{
unsigned long expires;
- time_t now = current_kernel_time().tv_sec;
+ time64_t now = ktime_get_real_seconds();
- kenter("%ld", gc_at - now);
+ kenter("%lld", gc_at - now);
if (gc_at <= now || test_bit(KEY_GC_REAP_KEYTYPE, &key_gc_flags)) {
kdebug("IMMEDIATE");
@@ -84,10 +84,10 @@ void key_schedule_gc_links(void)
* Some key's cleanup time was met after it expired, so we need to get the
* reaper to go through a cycle finding expired keys.
*/
-static void key_gc_timer_func(unsigned long data)
+static void key_gc_timer_func(struct timer_list *unused)
{
kenter("");
- key_gc_next_run = LONG_MAX;
+ key_gc_next_run = TIME64_MAX;
key_schedule_gc_links();
}
@@ -184,11 +184,11 @@ static void key_garbage_collector(struct work_struct *work)
struct rb_node *cursor;
struct key *key;
- time_t new_timer, limit;
+ time64_t new_timer, limit;
kenter("[%lx,%x]", key_gc_flags, gc_state);
- limit = current_kernel_time().tv_sec;
+ limit = ktime_get_real_seconds();
if (limit > key_gc_delay)
limit -= key_gc_delay;
else
@@ -204,7 +204,7 @@ static void key_garbage_collector(struct work_struct *work)
gc_state |= KEY_GC_REAPING_DEAD_1;
kdebug("new pass %x", gc_state);
- new_timer = LONG_MAX;
+ new_timer = TIME64_MAX;
/* As only this function is permitted to remove things from the key
* serial tree, if cursor is non-NULL then it will always point to a
@@ -235,7 +235,7 @@ continue_scanning:
if (gc_state & KEY_GC_SET_TIMER) {
if (key->expiry > limit && key->expiry < new_timer) {
- kdebug("will expire %x in %ld",
+ kdebug("will expire %x in %lld",
key_serial(key), key->expiry - limit);
new_timer = key->expiry;
}
@@ -276,7 +276,7 @@ maybe_resched:
*/
kdebug("pass complete");
- if (gc_state & KEY_GC_SET_TIMER && new_timer != (time_t)LONG_MAX) {
+ if (gc_state & KEY_GC_SET_TIMER && new_timer != (time64_t)TIME64_MAX) {
new_timer += key_gc_delay;
key_schedule_gc(new_timer);
}
diff --git a/security/keys/internal.h b/security/keys/internal.h
index 503adbae7b0d..9f8208dc0e55 100644
--- a/security/keys/internal.h
+++ b/security/keys/internal.h
@@ -130,7 +130,7 @@ struct keyring_search_context {
int skipped_ret;
bool possessed;
key_ref_t result;
- struct timespec now;
+ time64_t now;
};
extern bool key_default_cmp(const struct key *key,
@@ -169,10 +169,10 @@ extern void key_change_session_keyring(struct callback_head *twork);
extern struct work_struct key_gc_work;
extern unsigned key_gc_delay;
-extern void keyring_gc(struct key *keyring, time_t limit);
+extern void keyring_gc(struct key *keyring, time64_t limit);
extern void keyring_restriction_gc(struct key *keyring,
struct key_type *dead_type);
-extern void key_schedule_gc(time_t gc_at);
+extern void key_schedule_gc(time64_t gc_at);
extern void key_schedule_gc_links(void);
extern void key_gc_keytype(struct key_type *ktype);
@@ -211,7 +211,7 @@ extern struct key *key_get_instantiation_authkey(key_serial_t target_id);
/*
* Determine whether a key is dead.
*/
-static inline bool key_is_dead(const struct key *key, time_t limit)
+static inline bool key_is_dead(const struct key *key, time64_t limit)
{
return
key->flags & ((1 << KEY_FLAG_DEAD) |
diff --git a/security/keys/key.c b/security/keys/key.c
index 83bf4b4afd49..66049183ad89 100644
--- a/security/keys/key.c
+++ b/security/keys/key.c
@@ -460,7 +460,7 @@ static int __key_instantiate_and_link(struct key *key,
if (authkey)
key_revoke(authkey);
- if (prep->expiry != TIME_T_MAX) {
+ if (prep->expiry != TIME64_MAX) {
key->expiry = prep->expiry;
key_schedule_gc(prep->expiry + key_gc_delay);
}
@@ -506,7 +506,7 @@ int key_instantiate_and_link(struct key *key,
prep.data = data;
prep.datalen = datalen;
prep.quotalen = key->type->def_datalen;
- prep.expiry = TIME_T_MAX;
+ prep.expiry = TIME64_MAX;
if (key->type->preparse) {
ret = key->type->preparse(&prep);
if (ret < 0)
@@ -570,7 +570,6 @@ int key_reject_and_link(struct key *key,
struct key *authkey)
{
struct assoc_array_edit *edit;
- struct timespec now;
int ret, awaken, link_ret = 0;
key_check(key);
@@ -593,8 +592,7 @@ int key_reject_and_link(struct key *key,
/* mark the key as being negatively instantiated */
atomic_inc(&key->user->nikeys);
mark_key_instantiated(key, -error);
- now = current_kernel_time();
- key->expiry = now.tv_sec + timeout;
+ key->expiry = ktime_get_real_seconds() + timeout;
key_schedule_gc(key->expiry + key_gc_delay);
if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags))
@@ -710,16 +708,13 @@ found_kernel_type:
void key_set_timeout(struct key *key, unsigned timeout)
{
- struct timespec now;
- time_t expiry = 0;
+ time64_t expiry = 0;
/* make the changes with the locks held to prevent races */
down_write(&key->sem);
- if (timeout > 0) {
- now = current_kernel_time();
- expiry = now.tv_sec + timeout;
- }
+ if (timeout > 0)
+ expiry = ktime_get_real_seconds() + timeout;
key->expiry = expiry;
key_schedule_gc(key->expiry + key_gc_delay);
@@ -850,7 +845,7 @@ key_ref_t key_create_or_update(key_ref_t keyring_ref,
prep.data = payload;
prep.datalen = plen;
prep.quotalen = index_key.type->def_datalen;
- prep.expiry = TIME_T_MAX;
+ prep.expiry = TIME64_MAX;
if (index_key.type->preparse) {
ret = index_key.type->preparse(&prep);
if (ret < 0) {
@@ -994,7 +989,7 @@ int key_update(key_ref_t key_ref, const void *payload, size_t plen)
prep.data = payload;
prep.datalen = plen;
prep.quotalen = key->type->def_datalen;
- prep.expiry = TIME_T_MAX;
+ prep.expiry = TIME64_MAX;
if (key->type->preparse) {
ret = key->type->preparse(&prep);
if (ret < 0)
@@ -1028,8 +1023,7 @@ EXPORT_SYMBOL(key_update);
*/
void key_revoke(struct key *key)
{
- struct timespec now;
- time_t time;
+ time64_t time;
key_check(key);
@@ -1044,8 +1038,7 @@ void key_revoke(struct key *key)
key->type->revoke(key);
/* set the death time to no more than the expiry time */
- now = current_kernel_time();
- time = now.tv_sec;
+ time = ktime_get_real_seconds();
if (key->revoked_at == 0 || key->revoked_at > time) {
key->revoked_at = time;
key_schedule_gc(key->revoked_at + key_gc_delay);
diff --git a/security/keys/keyring.c b/security/keys/keyring.c
index 36f842ec87f0..d0bccebbd3b5 100644
--- a/security/keys/keyring.c
+++ b/security/keys/keyring.c
@@ -565,7 +565,7 @@ static int keyring_search_iterator(const void *object, void *iterator_data)
/* skip invalidated, revoked and expired keys */
if (ctx->flags & KEYRING_SEARCH_DO_STATE_CHECK) {
- time_t expiry = READ_ONCE(key->expiry);
+ time64_t expiry = READ_ONCE(key->expiry);
if (kflags & ((1 << KEY_FLAG_INVALIDATED) |
(1 << KEY_FLAG_REVOKED))) {
@@ -574,7 +574,7 @@ static int keyring_search_iterator(const void *object, void *iterator_data)
goto skipped;
}
- if (expiry && ctx->now.tv_sec >= expiry) {
+ if (expiry && ctx->now >= expiry) {
if (!(ctx->flags & KEYRING_SEARCH_SKIP_EXPIRED))
ctx->result = ERR_PTR(-EKEYEXPIRED);
kleave(" = %d [expire]", ctx->skipped_ret);
@@ -834,10 +834,10 @@ found:
key = key_ref_to_ptr(ctx->result);
key_check(key);
if (!(ctx->flags & KEYRING_SEARCH_NO_UPDATE_TIME)) {
- key->last_used_at = ctx->now.tv_sec;
- keyring->last_used_at = ctx->now.tv_sec;
+ key->last_used_at = ctx->now;
+ keyring->last_used_at = ctx->now;
while (sp > 0)
- stack[--sp].keyring->last_used_at = ctx->now.tv_sec;
+ stack[--sp].keyring->last_used_at = ctx->now;
}
kleave(" = true");
return true;
@@ -898,7 +898,7 @@ key_ref_t keyring_search_aux(key_ref_t keyring_ref,
}
rcu_read_lock();
- ctx->now = current_kernel_time();
+ ctx->now = ktime_get_real_seconds();
if (search_nested_keyrings(keyring, ctx))
__key_get(key_ref_to_ptr(ctx->result));
rcu_read_unlock();
@@ -1149,7 +1149,7 @@ struct key *find_keyring_by_name(const char *name, bool uid_keyring)
* (ie. it has a zero usage count) */
if (!refcount_inc_not_zero(&keyring->usage))
continue;
- keyring->last_used_at = current_kernel_time().tv_sec;
+ keyring->last_used_at = ktime_get_real_seconds();
goto out;
}
}
@@ -1489,7 +1489,7 @@ static void keyring_revoke(struct key *keyring)
static bool keyring_gc_select_iterator(void *object, void *iterator_data)
{
struct key *key = keyring_ptr_to_key(object);
- time_t *limit = iterator_data;
+ time64_t *limit = iterator_data;
if (key_is_dead(key, *limit))
return false;
@@ -1500,7 +1500,7 @@ static bool keyring_gc_select_iterator(void *object, void *iterator_data)
static int keyring_gc_check_iterator(const void *object, void *iterator_data)
{
const struct key *key = keyring_ptr_to_key(object);
- time_t *limit = iterator_data;
+ time64_t *limit = iterator_data;
key_check(key);
return key_is_dead(key, *limit);
@@ -1512,7 +1512,7 @@ static int keyring_gc_check_iterator(const void *object, void *iterator_data)
* Not called with any locks held. The keyring's key struct will not be
* deallocated under us as only our caller may deallocate it.
*/
-void keyring_gc(struct key *keyring, time_t limit)
+void keyring_gc(struct key *keyring, time64_t limit)
{
int result;
diff --git a/security/keys/permission.c b/security/keys/permission.c
index a72b4dd70c8a..f68dc04d614e 100644
--- a/security/keys/permission.c
+++ b/security/keys/permission.c
@@ -89,7 +89,7 @@ EXPORT_SYMBOL(key_task_permission);
int key_validate(const struct key *key)
{
unsigned long flags = READ_ONCE(key->flags);
- time_t expiry = READ_ONCE(key->expiry);
+ time64_t expiry = READ_ONCE(key->expiry);
if (flags & (1 << KEY_FLAG_INVALIDATED))
return -ENOKEY;
@@ -101,8 +101,7 @@ int key_validate(const struct key *key)
/* check it hasn't expired */
if (expiry) {
- struct timespec now = current_kernel_time();
- if (now.tv_sec >= expiry)
+ if (ktime_get_real_seconds() >= expiry)
return -EKEYEXPIRED;
}
diff --git a/security/keys/proc.c b/security/keys/proc.c
index 6d1fcbba1e09..fbc4af5c6c9f 100644
--- a/security/keys/proc.c
+++ b/security/keys/proc.c
@@ -178,13 +178,12 @@ static int proc_keys_show(struct seq_file *m, void *v)
{
struct rb_node *_p = v;
struct key *key = rb_entry(_p, struct key, serial_node);
- struct timespec now;
- time_t expiry;
- unsigned long timo;
unsigned long flags;
key_ref_t key_ref, skey_ref;
+ time64_t now, expiry;
char xbuf[16];
short state;
+ u64 timo;
int rc;
struct keyring_search_context ctx = {
@@ -215,7 +214,7 @@ static int proc_keys_show(struct seq_file *m, void *v)
if (rc < 0)
return 0;
- now = current_kernel_time();
+ now = ktime_get_real_seconds();
rcu_read_lock();
@@ -223,21 +222,21 @@ static int proc_keys_show(struct seq_file *m, void *v)
expiry = READ_ONCE(key->expiry);
if (expiry == 0) {
memcpy(xbuf, "perm", 5);
- } else if (now.tv_sec >= expiry) {
+ } else if (now >= expiry) {
memcpy(xbuf, "expd", 5);
} else {
- timo = expiry - now.tv_sec;
+ timo = expiry - now;
if (timo < 60)
- sprintf(xbuf, "%lus", timo);
+ sprintf(xbuf, "%llus", timo);
else if (timo < 60*60)
- sprintf(xbuf, "%lum", timo / 60);
+ sprintf(xbuf, "%llum", div_u64(timo, 60));
else if (timo < 60*60*24)
- sprintf(xbuf, "%luh", timo / (60*60));
+ sprintf(xbuf, "%lluh", div_u64(timo, 60 * 60));
else if (timo < 60*60*24*7)
- sprintf(xbuf, "%lud", timo / (60*60*24));
+ sprintf(xbuf, "%llud", div_u64(timo, 60 * 60 * 24));
else
- sprintf(xbuf, "%luw", timo / (60*60*24*7));
+ sprintf(xbuf, "%lluw", div_u64(timo, 60 * 60 * 24 * 7));
}
state = key_read_state(key);
diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
index 740affd65ee9..d5b25e535d3a 100644
--- a/security/keys/process_keys.c
+++ b/security/keys/process_keys.c
@@ -738,7 +738,7 @@ try_again:
if (ret < 0)
goto invalid_key;
- key->last_used_at = current_kernel_time().tv_sec;
+ key->last_used_at = ktime_get_real_seconds();
error:
put_cred(ctx.cred);
diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
index a93a4235a332..10e7ef7a8804 100644
--- a/sound/core/pcm_lib.c
+++ b/sound/core/pcm_lib.c
@@ -248,8 +248,10 @@ static void update_audio_tstamp(struct snd_pcm_substream *substream,
runtime->rate);
*audio_tstamp = ns_to_timespec(audio_nsecs);
}
- runtime->status->audio_tstamp = *audio_tstamp;
- runtime->status->tstamp = *curr_tstamp;
+ if (!timespec_equal(&runtime->status->audio_tstamp, audio_tstamp)) {
+ runtime->status->audio_tstamp = *audio_tstamp;
+ runtime->status->tstamp = *curr_tstamp;
+ }
/*
* re-take a driver timestamp to let apps detect if the reference tstamp
diff --git a/sound/core/timer_compat.c b/sound/core/timer_compat.c
index 59127b6ef39e..e00f7e399e46 100644
--- a/sound/core/timer_compat.c
+++ b/sound/core/timer_compat.c
@@ -66,11 +66,11 @@ static int snd_timer_user_info_compat(struct file *file,
struct snd_timer *t;
tu = file->private_data;
- if (snd_BUG_ON(!tu->timeri))
- return -ENXIO;
+ if (!tu->timeri)
+ return -EBADFD;
t = tu->timeri->timer;
- if (snd_BUG_ON(!t))
- return -ENXIO;
+ if (!t)
+ return -EBADFD;
memset(&info, 0, sizeof(info));
info.card = t->card ? t->card->number : -1;
if (t->hw.flags & SNDRV_TIMER_HW_SLAVE)
@@ -99,8 +99,8 @@ static int snd_timer_user_status_compat(struct file *file,
struct snd_timer_status32 status;
tu = file->private_data;
- if (snd_BUG_ON(!tu->timeri))
- return -ENXIO;
+ if (!tu->timeri)
+ return -EBADFD;
memset(&status, 0, sizeof(status));
status.tstamp.tv_sec = tu->tstamp.tv_sec;
status.tstamp.tv_nsec = tu->tstamp.tv_nsec;
diff --git a/sound/core/vmaster.c b/sound/core/vmaster.c
index e43af18d4383..8632301489fa 100644
--- a/sound/core/vmaster.c
+++ b/sound/core/vmaster.c
@@ -495,7 +495,9 @@ EXPORT_SYMBOL_GPL(snd_ctl_sync_vmaster);
* Returns 0 if successful, or a negative error code.
*/
int snd_ctl_apply_vmaster_slaves(struct snd_kcontrol *kctl,
- int (*func)(struct snd_kcontrol *, void *),
+ int (*func)(struct snd_kcontrol *vslave,
+ struct snd_kcontrol *slave,
+ void *arg),
void *arg)
{
struct link_master *master;
@@ -507,7 +509,7 @@ int snd_ctl_apply_vmaster_slaves(struct snd_kcontrol *kctl,
if (err < 0)
return err;
list_for_each_entry(slave, &master->slaves, list) {
- err = func(&slave->slave, arg);
+ err = func(slave->kctl, &slave->slave, arg);
if (err < 0)
return err;
}
diff --git a/sound/hda/hdmi_chmap.c b/sound/hda/hdmi_chmap.c
index 81acc20c2535..f21633cd9b38 100644
--- a/sound/hda/hdmi_chmap.c
+++ b/sound/hda/hdmi_chmap.c
@@ -746,7 +746,7 @@ static int hdmi_chmap_ctl_get(struct snd_kcontrol *kcontrol,
memset(pcm_chmap, 0, sizeof(pcm_chmap));
chmap->ops.get_chmap(chmap->hdac, pcm_idx, pcm_chmap);
- for (i = 0; i < sizeof(chmap); i++)
+ for (i = 0; i < ARRAY_SIZE(pcm_chmap); i++)
ucontrol->value.integer.value[i] = pcm_chmap[i];
return 0;
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index c1f8e5479bf3..e018ecbf78a8 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -1823,7 +1823,9 @@ struct slave_init_arg {
};
/* initialize the slave volume with 0dB via snd_ctl_apply_vmaster_slaves() */
-static int init_slave_0dB(struct snd_kcontrol *kctl, void *_arg)
+static int init_slave_0dB(struct snd_kcontrol *slave,
+ struct snd_kcontrol *kctl,
+ void *_arg)
{
struct slave_init_arg *arg = _arg;
int _tlv[4];
@@ -1860,7 +1862,7 @@ static int init_slave_0dB(struct snd_kcontrol *kctl, void *_arg)
arg->step = step;
val = -tlv[2] / step;
if (val > 0) {
- put_kctl_with_value(kctl, val);
+ put_kctl_with_value(slave, val);
return val;
}
@@ -1868,7 +1870,9 @@ static int init_slave_0dB(struct snd_kcontrol *kctl, void *_arg)
}
/* unmute the slave via snd_ctl_apply_vmaster_slaves() */
-static int init_slave_unmute(struct snd_kcontrol *slave, void *_arg)
+static int init_slave_unmute(struct snd_kcontrol *slave,
+ struct snd_kcontrol *kctl,
+ void *_arg)
{
return put_kctl_with_value(slave, 1);
}
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index f958d8d54d15..c71dcacea807 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -2463,6 +2463,9 @@ static const struct pci_device_id azx_ids[] = {
/* AMD Hudson */
{ PCI_DEVICE(0x1022, 0x780d),
.driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB },
+ /* AMD Raven */
+ { PCI_DEVICE(0x1022, 0x15e3),
+ .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB },
/* ATI HDMI */
{ PCI_DEVICE(0x1002, 0x0002),
.driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index db1a376e27c0..921a10eff43a 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -341,6 +341,9 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
case 0x10ec0299:
alc_update_coef_idx(codec, 0x10, 1<<9, 0);
break;
+ case 0x10ec0275:
+ alc_update_coef_idx(codec, 0xe, 0, 1<<0);
+ break;
case 0x10ec0293:
alc_update_coef_idx(codec, 0xa, 1<<13, 0);
break;
@@ -6452,6 +6455,10 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
ALC225_STANDARD_PINS,
{0x12, 0xb7a60130},
{0x1b, 0x90170110}),
+ SND_HDA_PIN_QUIRK(0x10ec0233, 0x8086, "Intel NUC Skull Canyon", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
+ {0x1b, 0x01111010},
+ {0x1e, 0x01451130},
+ {0x21, 0x02211020}),
SND_HDA_PIN_QUIRK(0x10ec0236, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
{0x12, 0x90a60140},
{0x14, 0x90170110},
@@ -6887,7 +6894,7 @@ static int patch_alc269(struct hda_codec *codec)
case 0x10ec0703:
spec->codec_variant = ALC269_TYPE_ALC700;
spec->gen.mixer_nid = 0; /* ALC700 does not have any loopback mixer path */
- alc_update_coef_idx(codec, 0x4a, 0, 1 << 15); /* Combo jack auto trigger control */
+ alc_update_coef_idx(codec, 0x4a, 1 << 15, 0); /* Combo jack auto trigger control */
break;
}
diff --git a/sound/soc/intel/Kconfig b/sound/soc/intel/Kconfig
index bb8be10b8437..7b49d04e3c60 100644
--- a/sound/soc/intel/Kconfig
+++ b/sound/soc/intel/Kconfig
@@ -34,6 +34,11 @@ config SND_SOC_INTEL_SST_TOPLEVEL
depends on X86 || COMPILE_TEST
select SND_SOC_INTEL_MACH
select SND_SOC_INTEL_COMMON
+ help
+ Intel ASoC Audio Drivers. If you have a Intel machine that
+ has audio controller with a DSP and I2S or DMIC port, then
+ enable this option by saying Y or M
+ If unsure select "N".
config SND_SOC_INTEL_HASWELL
tristate "Intel ASoC SST driver for Haswell/Broadwell"
diff --git a/sound/usb/clock.c b/sound/usb/clock.c
index 26dd5f20f149..eb3396ffba4c 100644
--- a/sound/usb/clock.c
+++ b/sound/usb/clock.c
@@ -43,7 +43,7 @@ static struct uac_clock_source_descriptor *
while ((cs = snd_usb_find_csint_desc(ctrl_iface->extra,
ctrl_iface->extralen,
cs, UAC2_CLOCK_SOURCE))) {
- if (cs->bClockID == clock_id)
+ if (cs->bLength >= sizeof(*cs) && cs->bClockID == clock_id)
return cs;
}
@@ -59,8 +59,11 @@ static struct uac_clock_selector_descriptor *
while ((cs = snd_usb_find_csint_desc(ctrl_iface->extra,
ctrl_iface->extralen,
cs, UAC2_CLOCK_SELECTOR))) {
- if (cs->bClockID == clock_id)
+ if (cs->bLength >= sizeof(*cs) && cs->bClockID == clock_id) {
+ if (cs->bLength < 5 + cs->bNrInPins)
+ return NULL;
return cs;
+ }
}
return NULL;
@@ -75,7 +78,7 @@ static struct uac_clock_multiplier_descriptor *
while ((cs = snd_usb_find_csint_desc(ctrl_iface->extra,
ctrl_iface->extralen,
cs, UAC2_CLOCK_MULTIPLIER))) {
- if (cs->bClockID == clock_id)
+ if (cs->bLength >= sizeof(*cs) && cs->bClockID == clock_id)
return cs;
}
diff --git a/sound/usb/line6/driver.c b/sound/usb/line6/driver.c
index 4f9613e5fc9e..c1376bfdc90b 100644
--- a/sound/usb/line6/driver.c
+++ b/sound/usb/line6/driver.c
@@ -201,7 +201,7 @@ static int line6_send_raw_message_async_part(struct message *msg,
void line6_start_timer(struct timer_list *timer, unsigned long msecs,
void (*function)(struct timer_list *t))
{
- timer->function = (TIMER_FUNC_TYPE)function;
+ timer->function = function;
mod_timer(timer, jiffies + msecs_to_jiffies(msecs));
}
EXPORT_SYMBOL_GPL(line6_start_timer);
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
index 91bc8f18791e..61b348383de8 100644
--- a/sound/usb/mixer.c
+++ b/sound/usb/mixer.c
@@ -1469,6 +1469,12 @@ static int parse_audio_feature_unit(struct mixer_build *state, int unitid,
__u8 *bmaControls;
if (state->mixer->protocol == UAC_VERSION_1) {
+ if (hdr->bLength < 7) {
+ usb_audio_err(state->chip,
+ "unit %u: invalid UAC_FEATURE_UNIT descriptor\n",
+ unitid);
+ return -EINVAL;
+ }
csize = hdr->bControlSize;
if (!csize) {
usb_audio_dbg(state->chip,
@@ -1486,6 +1492,12 @@ static int parse_audio_feature_unit(struct mixer_build *state, int unitid,
}
} else {
struct uac2_feature_unit_descriptor *ftr = _ftr;
+ if (hdr->bLength < 6) {
+ usb_audio_err(state->chip,
+ "unit %u: invalid UAC_FEATURE_UNIT descriptor\n",
+ unitid);
+ return -EINVAL;
+ }
csize = 4;
channels = (hdr->bLength - 6) / 4 - 1;
bmaControls = ftr->bmaControls;
@@ -2086,7 +2098,8 @@ static int parse_audio_selector_unit(struct mixer_build *state, int unitid,
const struct usbmix_name_map *map;
char **namelist;
- if (!desc->bNrInPins || desc->bLength < 5 + desc->bNrInPins) {
+ if (desc->bLength < 5 || !desc->bNrInPins ||
+ desc->bLength < 5 + desc->bNrInPins) {
usb_audio_err(state->chip,
"invalid SELECTOR UNIT descriptor %d\n", unitid);
return -EINVAL;
@@ -2330,9 +2343,14 @@ void snd_usb_mixer_notify_id(struct usb_mixer_interface *mixer, int unitid)
{
struct usb_mixer_elem_list *list;
- for (list = mixer->id_elems[unitid]; list; list = list->next_id_elem)
+ for (list = mixer->id_elems[unitid]; list; list = list->next_id_elem) {
+ struct usb_mixer_elem_info *info =
+ (struct usb_mixer_elem_info *)list;
+ /* invalidate cache, so the value is read from the device */
+ info->cached = 0;
snd_ctl_notify(mixer->chip->card, SNDRV_CTL_EVENT_MASK_VALUE,
&list->kctl->id);
+ }
}
static void snd_usb_mixer_dump_cval(struct snd_info_buffer *buffer,
diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c
index f45c44ef9bec..ad619b96c276 100644
--- a/tools/bpf/bpftool/prog.c
+++ b/tools/bpf/bpftool/prog.c
@@ -41,7 +41,6 @@
#include <string.h>
#include <time.h>
#include <unistd.h>
-#include <net/if.h>
#include <sys/types.h>
#include <sys/stat.h>
@@ -230,21 +229,6 @@ static void print_prog_json(struct bpf_prog_info *info, int fd)
info->tag[0], info->tag[1], info->tag[2], info->tag[3],
info->tag[4], info->tag[5], info->tag[6], info->tag[7]);
- if (info->status & BPF_PROG_STATUS_DEV_BOUND) {
- jsonw_name(json_wtr, "dev");
- if (info->ifindex) {
- char name[IF_NAMESIZE];
-
- if (!if_indextoname(info->ifindex, name))
- jsonw_printf(json_wtr, "\"ifindex:%d\"",
- info->ifindex);
- else
- jsonw_printf(json_wtr, "\"%s\"", name);
- } else {
- jsonw_printf(json_wtr, "\"unknown\"");
- }
- }
-
if (info->load_time) {
char buf[32];
@@ -302,21 +286,6 @@ static void print_prog_plain(struct bpf_prog_info *info, int fd)
printf("tag ");
fprint_hex(stdout, info->tag, BPF_TAG_SIZE, "");
- printf(" ");
-
- if (info->status & BPF_PROG_STATUS_DEV_BOUND) {
- printf("dev ");
- if (info->ifindex) {
- char name[IF_NAMESIZE];
-
- if (!if_indextoname(info->ifindex, name))
- printf("ifindex:%d ", info->ifindex);
- else
- printf("%s ", name);
- } else {
- printf("unknown ");
- }
- }
printf("\n");
if (info->load_time) {
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index e880ae6434ee..4c223ab30293 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -262,7 +262,7 @@ union bpf_attr {
__u32 kern_version; /* checked when prog_type=kprobe */
__u32 prog_flags;
char prog_name[BPF_OBJ_NAME_LEN];
- __u32 prog_target_ifindex; /* ifindex of netdev to prep for */
+ __u32 prog_ifindex; /* ifindex of netdev to prep for */
};
struct { /* anonymous struct used by BPF_OBJ_* commands */
@@ -897,10 +897,6 @@ enum sk_action {
#define BPF_TAG_SIZE 8
-enum bpf_prog_status {
- BPF_PROG_STATUS_DEV_BOUND = (1 << 0),
-};
-
struct bpf_prog_info {
__u32 type;
__u32 id;
@@ -914,8 +910,6 @@ struct bpf_prog_info {
__u32 nr_map_ids;
__aligned_u64 map_ids;
char name[BPF_OBJ_NAME_LEN];
- __u32 ifindex;
- __u32 status;
} __attribute__((aligned(8)));
struct bpf_map_info {
diff --git a/tools/objtool/.gitignore b/tools/objtool/.gitignore
index d3102c865a95..914cff12899b 100644
--- a/tools/objtool/.gitignore
+++ b/tools/objtool/.gitignore
@@ -1,3 +1,3 @@
-arch/x86/insn/inat-tables.c
+arch/x86/lib/inat-tables.c
objtool
fixdep
diff --git a/tools/objtool/Makefile b/tools/objtool/Makefile
index 424b1965d06f..0f94af3ccaaa 100644
--- a/tools/objtool/Makefile
+++ b/tools/objtool/Makefile
@@ -25,7 +25,9 @@ OBJTOOL_IN := $(OBJTOOL)-in.o
all: $(OBJTOOL)
-INCLUDES := -I$(srctree)/tools/include -I$(srctree)/tools/arch/$(HOSTARCH)/include/uapi
+INCLUDES := -I$(srctree)/tools/include \
+ -I$(srctree)/tools/arch/$(HOSTARCH)/include/uapi \
+ -I$(srctree)/tools/objtool/arch/$(ARCH)/include
WARNINGS := $(EXTRA_WARNINGS) -Wno-switch-default -Wno-switch-enum -Wno-packed
CFLAGS += -Wall -Werror $(WARNINGS) -fomit-frame-pointer -O2 -g $(INCLUDES)
LDFLAGS += -lelf $(LIBSUBCMD)
@@ -41,22 +43,8 @@ include $(srctree)/tools/build/Makefile.include
$(OBJTOOL_IN): fixdep FORCE
@$(MAKE) $(build)=objtool
-# Busybox's diff doesn't have -I, avoid warning in that case
-#
$(OBJTOOL): $(LIBSUBCMD) $(OBJTOOL_IN)
- @(diff -I 2>&1 | grep -q 'option requires an argument' && \
- test -d ../../kernel -a -d ../../tools -a -d ../objtool && (( \
- diff -I'^#include' arch/x86/insn/insn.c ../../arch/x86/lib/insn.c >/dev/null && \
- diff -I'^#include' arch/x86/insn/inat.c ../../arch/x86/lib/inat.c >/dev/null && \
- diff arch/x86/insn/x86-opcode-map.txt ../../arch/x86/lib/x86-opcode-map.txt >/dev/null && \
- diff arch/x86/insn/gen-insn-attr-x86.awk ../../arch/x86/tools/gen-insn-attr-x86.awk >/dev/null && \
- diff -I'^#include' arch/x86/insn/insn.h ../../arch/x86/include/asm/insn.h >/dev/null && \
- diff -I'^#include' arch/x86/insn/inat.h ../../arch/x86/include/asm/inat.h >/dev/null && \
- diff -I'^#include' arch/x86/insn/inat_types.h ../../arch/x86/include/asm/inat_types.h >/dev/null) \
- || echo "warning: objtool: x86 instruction decoder differs from kernel" >&2 )) || true
- @(test -d ../../kernel -a -d ../../tools -a -d ../objtool && (( \
- diff ../../arch/x86/include/asm/orc_types.h orc_types.h >/dev/null) \
- || echo "warning: objtool: orc_types.h differs from kernel" >&2 )) || true
+ @./sync-check.sh
$(QUIET_LINK)$(CC) $(OBJTOOL_IN) $(LDFLAGS) -o $@
@@ -66,7 +54,7 @@ $(LIBSUBCMD): fixdep FORCE
clean:
$(call QUIET_CLEAN, objtool) $(RM) $(OBJTOOL)
$(Q)find $(OUTPUT) -name '*.o' -delete -o -name '\.*.cmd' -delete -o -name '\.*.d' -delete
- $(Q)$(RM) $(OUTPUT)arch/x86/insn/inat-tables.c $(OUTPUT)fixdep
+ $(Q)$(RM) $(OUTPUT)arch/x86/lib/inat-tables.c $(OUTPUT)fixdep
FORCE:
diff --git a/tools/objtool/arch/x86/Build b/tools/objtool/arch/x86/Build
index debbdb0b5c43..b998412c017d 100644
--- a/tools/objtool/arch/x86/Build
+++ b/tools/objtool/arch/x86/Build
@@ -1,12 +1,12 @@
objtool-y += decode.o
-inat_tables_script = arch/x86/insn/gen-insn-attr-x86.awk
-inat_tables_maps = arch/x86/insn/x86-opcode-map.txt
+inat_tables_script = arch/x86/tools/gen-insn-attr-x86.awk
+inat_tables_maps = arch/x86/lib/x86-opcode-map.txt
-$(OUTPUT)arch/x86/insn/inat-tables.c: $(inat_tables_script) $(inat_tables_maps)
+$(OUTPUT)arch/x86/lib/inat-tables.c: $(inat_tables_script) $(inat_tables_maps)
$(call rule_mkdir)
$(Q)$(call echo-cmd,gen)$(AWK) -f $(inat_tables_script) $(inat_tables_maps) > $@
-$(OUTPUT)arch/x86/decode.o: $(OUTPUT)arch/x86/insn/inat-tables.c
+$(OUTPUT)arch/x86/decode.o: $(OUTPUT)arch/x86/lib/inat-tables.c
-CFLAGS_decode.o += -I$(OUTPUT)arch/x86/insn
+CFLAGS_decode.o += -I$(OUTPUT)arch/x86/lib
diff --git a/tools/objtool/arch/x86/decode.c b/tools/objtool/arch/x86/decode.c
index 34a579f806e3..8acfc47af70e 100644
--- a/tools/objtool/arch/x86/decode.c
+++ b/tools/objtool/arch/x86/decode.c
@@ -19,9 +19,9 @@
#include <stdlib.h>
#define unlikely(cond) (cond)
-#include "insn/insn.h"
-#include "insn/inat.c"
-#include "insn/insn.c"
+#include <asm/insn.h>
+#include "lib/inat.c"
+#include "lib/insn.c"
#include "../../elf.h"
#include "../../arch.h"
diff --git a/tools/objtool/arch/x86/insn/inat.h b/tools/objtool/arch/x86/include/asm/inat.h
index 125ecd2a300d..1c78580e58be 100644
--- a/tools/objtool/arch/x86/insn/inat.h
+++ b/tools/objtool/arch/x86/include/asm/inat.h
@@ -20,7 +20,7 @@
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
*/
-#include "inat_types.h"
+#include <asm/inat_types.h>
/*
* Internal bits. Don't use bitmasks directly, because these bits are
@@ -97,6 +97,16 @@
#define INAT_MAKE_GROUP(grp) ((grp << INAT_GRP_OFFS) | INAT_MODRM)
#define INAT_MAKE_IMM(imm) (imm << INAT_IMM_OFFS)
+/* Identifiers for segment registers */
+#define INAT_SEG_REG_IGNORE 0
+#define INAT_SEG_REG_DEFAULT 1
+#define INAT_SEG_REG_CS 2
+#define INAT_SEG_REG_SS 3
+#define INAT_SEG_REG_DS 4
+#define INAT_SEG_REG_ES 5
+#define INAT_SEG_REG_FS 6
+#define INAT_SEG_REG_GS 7
+
/* Attribute search APIs */
extern insn_attr_t inat_get_opcode_attribute(insn_byte_t opcode);
extern int inat_get_last_prefix_id(insn_byte_t last_pfx);
diff --git a/tools/objtool/arch/x86/insn/inat_types.h b/tools/objtool/arch/x86/include/asm/inat_types.h
index cb3c20ce39cf..cb3c20ce39cf 100644
--- a/tools/objtool/arch/x86/insn/inat_types.h
+++ b/tools/objtool/arch/x86/include/asm/inat_types.h
diff --git a/tools/objtool/arch/x86/insn/insn.h b/tools/objtool/arch/x86/include/asm/insn.h
index e23578c7b1be..b3e32b010ab1 100644
--- a/tools/objtool/arch/x86/insn/insn.h
+++ b/tools/objtool/arch/x86/include/asm/insn.h
@@ -21,7 +21,7 @@
*/
/* insn_attr_t is defined in inat.h */
-#include "inat.h"
+#include <asm/inat.h>
struct insn_field {
union {
diff --git a/tools/objtool/orc_types.h b/tools/objtool/arch/x86/include/asm/orc_types.h
index 9c9dc579bd7d..9c9dc579bd7d 100644
--- a/tools/objtool/orc_types.h
+++ b/tools/objtool/arch/x86/include/asm/orc_types.h
diff --git a/tools/objtool/arch/x86/insn/inat.c b/tools/objtool/arch/x86/lib/inat.c
index e4bf28e6f4c7..c1f01a8e9f65 100644
--- a/tools/objtool/arch/x86/insn/inat.c
+++ b/tools/objtool/arch/x86/lib/inat.c
@@ -18,7 +18,7 @@
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
*/
-#include "insn.h"
+#include <asm/insn.h>
/* Attribute tables are generated from opcode map */
#include "inat-tables.c"
diff --git a/tools/objtool/arch/x86/insn/insn.c b/tools/objtool/arch/x86/lib/insn.c
index ca983e2bea8b..1088eb8f3a5f 100644
--- a/tools/objtool/arch/x86/insn/insn.c
+++ b/tools/objtool/arch/x86/lib/insn.c
@@ -23,8 +23,8 @@
#else
#include <string.h>
#endif
-#include "inat.h"
-#include "insn.h"
+#include <asm/inat.h>
+#include <asm/insn.h>
/* Verify next sizeof(t) bytes can be on the same instruction */
#define validate_next(t, insn, n) \
diff --git a/tools/objtool/arch/x86/insn/x86-opcode-map.txt b/tools/objtool/arch/x86/lib/x86-opcode-map.txt
index 12e377184ee4..12e377184ee4 100644
--- a/tools/objtool/arch/x86/insn/x86-opcode-map.txt
+++ b/tools/objtool/arch/x86/lib/x86-opcode-map.txt
diff --git a/tools/objtool/arch/x86/insn/gen-insn-attr-x86.awk b/tools/objtool/arch/x86/tools/gen-insn-attr-x86.awk
index b02a36b2c14f..b02a36b2c14f 100644
--- a/tools/objtool/arch/x86/insn/gen-insn-attr-x86.awk
+++ b/tools/objtool/arch/x86/tools/gen-insn-attr-x86.awk
diff --git a/tools/objtool/orc.h b/tools/objtool/orc.h
index a4139e386ef3..b0e92a6d0903 100644
--- a/tools/objtool/orc.h
+++ b/tools/objtool/orc.h
@@ -18,7 +18,7 @@
#ifndef _ORC_H
#define _ORC_H
-#include "orc_types.h"
+#include <asm/orc_types.h>
struct objtool_file;
diff --git a/tools/objtool/sync-check.sh b/tools/objtool/sync-check.sh
new file mode 100755
index 000000000000..1470e74e9d66
--- /dev/null
+++ b/tools/objtool/sync-check.sh
@@ -0,0 +1,29 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+
+FILES='
+arch/x86/lib/insn.c
+arch/x86/lib/inat.c
+arch/x86/lib/x86-opcode-map.txt
+arch/x86/tools/gen-insn-attr-x86.awk
+arch/x86/include/asm/insn.h
+arch/x86/include/asm/inat.h
+arch/x86/include/asm/inat_types.h
+arch/x86/include/asm/orc_types.h
+'
+
+check()
+{
+ local file=$1
+
+ diff $file ../../$file > /dev/null ||
+ echo "Warning: synced file at 'tools/objtool/$file' differs from latest kernel version at '$file'"
+}
+
+if [ ! -d ../../kernel ] || [ ! -d ../../tools ] || [ ! -d ../objtool ]; then
+ exit 0
+fi
+
+for i in $FILES; do
+ check $i
+done
diff --git a/tools/power/cpupower/Makefile b/tools/power/cpupower/Makefile
index da205d1fa03c..1dd5f4fcffd5 100644
--- a/tools/power/cpupower/Makefile
+++ b/tools/power/cpupower/Makefile
@@ -26,7 +26,7 @@ endif
ifneq ($(OUTPUT),)
# check that the output directory actually exists
-OUTDIR := $(shell cd $(OUTPUT) && /bin/pwd)
+OUTDIR := $(shell cd $(OUTPUT) && pwd)
$(if $(OUTDIR),, $(error output directory "$(OUTPUT)" does not exist))
endif
diff --git a/tools/scripts/Makefile.include b/tools/scripts/Makefile.include
index 654efd9768fd..3fab179b1aba 100644
--- a/tools/scripts/Makefile.include
+++ b/tools/scripts/Makefile.include
@@ -13,7 +13,7 @@ endif
# check that the output directory actually exists
ifneq ($(OUTPUT),)
-OUTDIR := $(shell cd $(OUTPUT) && /bin/pwd)
+OUTDIR := $(shell cd $(OUTPUT) && pwd)
$(if $(OUTDIR),, $(error output directory "$(OUTPUT)" does not exist))
endif
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
index bf092b83e453..3c64f30cf63c 100644
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@ -4377,11 +4377,10 @@ static struct bpf_test tests[] = {
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_MOV64_IMM(BPF_REG_1, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_write_user),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_EMIT_CALL(BPF_FUNC_trace_printk),
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
@@ -4481,14 +4480,12 @@ static struct bpf_test tests[] = {
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
offsetof(struct test_val, foo)),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
- BPF_MOV64_IMM(BPF_REG_1, 0),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_write_user),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_EMIT_CALL(BPF_FUNC_trace_printk),
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
@@ -4618,18 +4615,16 @@ static struct bpf_test tests[] = {
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
- BPF_MOV64_IMM(BPF_REG_1, 0),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_write_user),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_EMIT_CALL(BPF_FUNC_trace_printk),
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
- .errstr = "R2 min value is outside of the array range",
+ .errstr = "R1 min value is outside of the array range",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
@@ -4760,20 +4755,18 @@ static struct bpf_test tests[] = {
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
- offsetof(struct test_val, foo), 4),
+ offsetof(struct test_val, foo), 3),
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
- BPF_MOV64_IMM(BPF_REG_1, 0),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_write_user),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_EMIT_CALL(BPF_FUNC_trace_printk),
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
- .errstr = "R2 min value is outside of the array range",
+ .errstr = "R1 min value is outside of the array range",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
@@ -5638,7 +5631,7 @@ static struct bpf_test tests[] = {
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
- "helper access to variable memory: size = 0 allowed on NULL",
+ "helper access to variable memory: size = 0 allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)",
.insns = {
BPF_MOV64_IMM(BPF_REG_1, 0),
BPF_MOV64_IMM(BPF_REG_2, 0),
@@ -5652,7 +5645,7 @@ static struct bpf_test tests[] = {
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
- "helper access to variable memory: size > 0 not allowed on NULL",
+ "helper access to variable memory: size > 0 not allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)",
.insns = {
BPF_MOV64_IMM(BPF_REG_1, 0),
BPF_MOV64_IMM(BPF_REG_2, 0),
@@ -5670,7 +5663,7 @@ static struct bpf_test tests[] = {
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
- "helper access to variable memory: size = 0 allowed on != NULL stack pointer",
+ "helper access to variable memory: size = 0 allowed on != NULL stack pointer (ARG_PTR_TO_MEM_OR_NULL)",
.insns = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
@@ -5687,7 +5680,7 @@ static struct bpf_test tests[] = {
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
- "helper access to variable memory: size = 0 allowed on != NULL map pointer",
+ "helper access to variable memory: size = 0 allowed on != NULL map pointer (ARG_PTR_TO_MEM_OR_NULL)",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
@@ -5709,7 +5702,7 @@ static struct bpf_test tests[] = {
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
- "helper access to variable memory: size possible = 0 allowed on != NULL stack pointer",
+ "helper access to variable memory: size possible = 0 allowed on != NULL stack pointer (ARG_PTR_TO_MEM_OR_NULL)",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
@@ -5734,7 +5727,7 @@ static struct bpf_test tests[] = {
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
- "helper access to variable memory: size possible = 0 allowed on != NULL map pointer",
+ "helper access to variable memory: size possible = 0 allowed on != NULL map pointer (ARG_PTR_TO_MEM_OR_NULL)",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
@@ -5757,7 +5750,7 @@ static struct bpf_test tests[] = {
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
- "helper access to variable memory: size possible = 0 allowed on != NULL packet pointer",
+ "helper access to variable memory: size possible = 0 allowed on != NULL packet pointer (ARG_PTR_TO_MEM_OR_NULL)",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
offsetof(struct __sk_buff, data)),
@@ -5779,6 +5772,105 @@ static struct bpf_test tests[] = {
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
+ "helper access to variable memory: size = 0 not allowed on NULL (!ARG_PTR_TO_MEM_OR_NULL)",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, 0),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 type=inv expected=fp",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ },
+ {
+ "helper access to variable memory: size > 0 not allowed on NULL (!ARG_PTR_TO_MEM_OR_NULL)",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, 0),
+ BPF_MOV64_IMM(BPF_REG_2, 1),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 type=inv expected=fp",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ },
+ {
+ "helper access to variable memory: size = 0 allowed on != NULL stack pointer (!ARG_PTR_TO_MEM_OR_NULL)",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ },
+ {
+ "helper access to variable memory: size = 0 allowed on != NULL map pointer (!ARG_PTR_TO_MEM_OR_NULL)",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map1 = { 3 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ },
+ {
+ "helper access to variable memory: size possible = 0 allowed on != NULL stack pointer (!ARG_PTR_TO_MEM_OR_NULL)",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map1 = { 3 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ },
+ {
+ "helper access to variable memory: size possible = 0 allowed on != NULL map pointer (!ARG_PTR_TO_MEM_OR_NULL)",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 2),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map1 = { 3 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ },
+ {
"helper access to variable memory: 8 bytes leak",
.insns = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
diff --git a/tools/testing/selftests/x86/5lvl.c b/tools/testing/selftests/x86/5lvl.c
new file mode 100644
index 000000000000..2eafdcd4c2b3
--- /dev/null
+++ b/tools/testing/selftests/x86/5lvl.c
@@ -0,0 +1,177 @@
+#include <stdio.h>
+#include <sys/mman.h>
+
+#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
+
+#define PAGE_SIZE 4096
+#define LOW_ADDR ((void *) (1UL << 30))
+#define HIGH_ADDR ((void *) (1UL << 50))
+
+struct testcase {
+ void *addr;
+ unsigned long size;
+ unsigned long flags;
+ const char *msg;
+ unsigned int low_addr_required:1;
+ unsigned int keep_mapped:1;
+};
+
+static struct testcase testcases[] = {
+ {
+ .addr = NULL,
+ .size = 2 * PAGE_SIZE,
+ .flags = MAP_PRIVATE | MAP_ANONYMOUS,
+ .msg = "mmap(NULL)",
+ .low_addr_required = 1,
+ },
+ {
+ .addr = LOW_ADDR,
+ .size = 2 * PAGE_SIZE,
+ .flags = MAP_PRIVATE | MAP_ANONYMOUS,
+ .msg = "mmap(LOW_ADDR)",
+ .low_addr_required = 1,
+ },
+ {
+ .addr = HIGH_ADDR,
+ .size = 2 * PAGE_SIZE,
+ .flags = MAP_PRIVATE | MAP_ANONYMOUS,
+ .msg = "mmap(HIGH_ADDR)",
+ .keep_mapped = 1,
+ },
+ {
+ .addr = HIGH_ADDR,
+ .size = 2 * PAGE_SIZE,
+ .flags = MAP_PRIVATE | MAP_ANONYMOUS,
+ .msg = "mmap(HIGH_ADDR) again",
+ .keep_mapped = 1,
+ },
+ {
+ .addr = HIGH_ADDR,
+ .size = 2 * PAGE_SIZE,
+ .flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
+ .msg = "mmap(HIGH_ADDR, MAP_FIXED)",
+ },
+ {
+ .addr = (void*) -1,
+ .size = 2 * PAGE_SIZE,
+ .flags = MAP_PRIVATE | MAP_ANONYMOUS,
+ .msg = "mmap(-1)",
+ .keep_mapped = 1,
+ },
+ {
+ .addr = (void*) -1,
+ .size = 2 * PAGE_SIZE,
+ .flags = MAP_PRIVATE | MAP_ANONYMOUS,
+ .msg = "mmap(-1) again",
+ },
+ {
+ .addr = (void *)((1UL << 47) - PAGE_SIZE),
+ .size = 2 * PAGE_SIZE,
+ .flags = MAP_PRIVATE | MAP_ANONYMOUS,
+ .msg = "mmap((1UL << 47), 2 * PAGE_SIZE)",
+ .low_addr_required = 1,
+ .keep_mapped = 1,
+ },
+ {
+ .addr = (void *)((1UL << 47) - PAGE_SIZE / 2),
+ .size = 2 * PAGE_SIZE,
+ .flags = MAP_PRIVATE | MAP_ANONYMOUS,
+ .msg = "mmap((1UL << 47), 2 * PAGE_SIZE / 2)",
+ .low_addr_required = 1,
+ .keep_mapped = 1,
+ },
+ {
+ .addr = (void *)((1UL << 47) - PAGE_SIZE),
+ .size = 2 * PAGE_SIZE,
+ .flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
+ .msg = "mmap((1UL << 47) - PAGE_SIZE, 2 * PAGE_SIZE, MAP_FIXED)",
+ },
+ {
+ .addr = NULL,
+ .size = 2UL << 20,
+ .flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS,
+ .msg = "mmap(NULL, MAP_HUGETLB)",
+ .low_addr_required = 1,
+ },
+ {
+ .addr = LOW_ADDR,
+ .size = 2UL << 20,
+ .flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS,
+ .msg = "mmap(LOW_ADDR, MAP_HUGETLB)",
+ .low_addr_required = 1,
+ },
+ {
+ .addr = HIGH_ADDR,
+ .size = 2UL << 20,
+ .flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS,
+ .msg = "mmap(HIGH_ADDR, MAP_HUGETLB)",
+ .keep_mapped = 1,
+ },
+ {
+ .addr = HIGH_ADDR,
+ .size = 2UL << 20,
+ .flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS,
+ .msg = "mmap(HIGH_ADDR, MAP_HUGETLB) again",
+ .keep_mapped = 1,
+ },
+ {
+ .addr = HIGH_ADDR,
+ .size = 2UL << 20,
+ .flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
+ .msg = "mmap(HIGH_ADDR, MAP_FIXED | MAP_HUGETLB)",
+ },
+ {
+ .addr = (void*) -1,
+ .size = 2UL << 20,
+ .flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS,
+ .msg = "mmap(-1, MAP_HUGETLB)",
+ .keep_mapped = 1,
+ },
+ {
+ .addr = (void*) -1,
+ .size = 2UL << 20,
+ .flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS,
+ .msg = "mmap(-1, MAP_HUGETLB) again",
+ },
+ {
+ .addr = (void *)((1UL << 47) - PAGE_SIZE),
+ .size = 4UL << 20,
+ .flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS,
+ .msg = "mmap((1UL << 47), 4UL << 20, MAP_HUGETLB)",
+ .low_addr_required = 1,
+ .keep_mapped = 1,
+ },
+ {
+ .addr = (void *)((1UL << 47) - (2UL << 20)),
+ .size = 4UL << 20,
+ .flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
+ .msg = "mmap((1UL << 47) - (2UL << 20), 4UL << 20, MAP_FIXED | MAP_HUGETLB)",
+ },
+};
+
+int main(int argc, char **argv)
+{
+ int i;
+ void *p;
+
+ for (i = 0; i < ARRAY_SIZE(testcases); i++) {
+ struct testcase *t = testcases + i;
+
+ p = mmap(t->addr, t->size, PROT_NONE, t->flags, -1, 0);
+
+ printf("%s: %p - ", t->msg, p);
+
+ if (p == MAP_FAILED) {
+ printf("FAILED\n");
+ continue;
+ }
+
+ if (t->low_addr_required && p >= (void *)(1UL << 47))
+ printf("FAILED\n");
+ else
+ printf("OK\n");
+ if (!t->keep_mapped)
+ munmap(p, t->size);
+ }
+ return 0;
+}
diff --git a/tools/testing/selftests/x86/Makefile b/tools/testing/selftests/x86/Makefile
index 7b1adeee4b0f..939a337128db 100644
--- a/tools/testing/selftests/x86/Makefile
+++ b/tools/testing/selftests/x86/Makefile
@@ -11,7 +11,7 @@ TARGETS_C_BOTHBITS := single_step_syscall sysret_ss_attrs syscall_nt ptrace_sysc
TARGETS_C_32BIT_ONLY := entry_from_vm86 syscall_arg_fault test_syscall_vdso unwind_vdso \
test_FCMOV test_FCOMI test_FISTTP \
vdso_restorer
-TARGETS_C_64BIT_ONLY := fsgsbase sysret_rip
+TARGETS_C_64BIT_ONLY := fsgsbase sysret_rip 5lvl
TARGETS_C_32BIT_ALL := $(TARGETS_C_BOTHBITS) $(TARGETS_C_32BIT_ONLY)
TARGETS_C_64BIT_ALL := $(TARGETS_C_BOTHBITS) $(TARGETS_C_64BIT_ONLY)
diff --git a/tools/testing/selftests/x86/mpx-hw.h b/tools/testing/selftests/x86/mpx-hw.h
index 3f0093911f03..d1b61ab870f8 100644
--- a/tools/testing/selftests/x86/mpx-hw.h
+++ b/tools/testing/selftests/x86/mpx-hw.h
@@ -52,14 +52,14 @@
struct mpx_bd_entry {
union {
char x[MPX_BOUNDS_DIR_ENTRY_SIZE_BYTES];
- void *contents[1];
+ void *contents[0];
};
} __attribute__((packed));
struct mpx_bt_entry {
union {
char x[MPX_BOUNDS_TABLE_ENTRY_SIZE_BYTES];
- unsigned long contents[1];
+ unsigned long contents[0];
};
} __attribute__((packed));
diff --git a/tools/testing/selftests/x86/pkey-helpers.h b/tools/testing/selftests/x86/pkey-helpers.h
index 3818f25391c2..b3cb7670e026 100644
--- a/tools/testing/selftests/x86/pkey-helpers.h
+++ b/tools/testing/selftests/x86/pkey-helpers.h
@@ -30,6 +30,7 @@ static inline void sigsafe_printf(const char *format, ...)
if (!dprint_in_signal) {
vprintf(format, ap);
} else {
+ int ret;
int len = vsnprintf(dprint_in_signal_buffer,
DPRINT_IN_SIGNAL_BUF_SIZE,
format, ap);
@@ -39,7 +40,9 @@ static inline void sigsafe_printf(const char *format, ...)
*/
if (len > DPRINT_IN_SIGNAL_BUF_SIZE)
len = DPRINT_IN_SIGNAL_BUF_SIZE;
- write(1, dprint_in_signal_buffer, len);
+ ret = write(1, dprint_in_signal_buffer, len);
+ if (ret < 0)
+ abort();
}
va_end(ap);
}
diff --git a/tools/testing/selftests/x86/protection_keys.c b/tools/testing/selftests/x86/protection_keys.c
index 7a1cc0e56d2d..bc1b0735bb50 100644
--- a/tools/testing/selftests/x86/protection_keys.c
+++ b/tools/testing/selftests/x86/protection_keys.c
@@ -250,7 +250,7 @@ void signal_handler(int signum, siginfo_t *si, void *vucontext)
unsigned long ip;
char *fpregs;
u32 *pkru_ptr;
- u64 si_pkey;
+ u64 siginfo_pkey;
u32 *si_pkey_ptr;
int pkru_offset;
fpregset_t fpregset;
@@ -292,9 +292,9 @@ void signal_handler(int signum, siginfo_t *si, void *vucontext)
si_pkey_ptr = (u32 *)(((u8 *)si) + si_pkey_offset);
dprintf1("si_pkey_ptr: %p\n", si_pkey_ptr);
dump_mem(si_pkey_ptr - 8, 24);
- si_pkey = *si_pkey_ptr;
- pkey_assert(si_pkey < NR_PKEYS);
- last_si_pkey = si_pkey;
+ siginfo_pkey = *si_pkey_ptr;
+ pkey_assert(siginfo_pkey < NR_PKEYS);
+ last_si_pkey = siginfo_pkey;
if ((si->si_code == SEGV_MAPERR) ||
(si->si_code == SEGV_ACCERR) ||
@@ -306,7 +306,7 @@ void signal_handler(int signum, siginfo_t *si, void *vucontext)
dprintf1("signal pkru from xsave: %08x\n", *pkru_ptr);
/* need __rdpkru() version so we do not do shadow_pkru checking */
dprintf1("signal pkru from pkru: %08x\n", __rdpkru());
- dprintf1("si_pkey from siginfo: %jx\n", si_pkey);
+ dprintf1("pkey from siginfo: %jx\n", siginfo_pkey);
*(u64 *)pkru_ptr = 0x00000000;
dprintf1("WARNING: set PRKU=0 to allow faulting instruction to continue\n");
pkru_faults++;
diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c
index 4db54ff08d9e..4151250ce8da 100644
--- a/virt/kvm/arm/arch_timer.c
+++ b/virt/kvm/arm/arch_timer.c
@@ -817,9 +817,6 @@ int kvm_timer_enable(struct kvm_vcpu *vcpu)
{
struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
- struct irq_desc *desc;
- struct irq_data *data;
- int phys_irq;
int ret;
if (timer->enabled)
@@ -837,26 +834,7 @@ int kvm_timer_enable(struct kvm_vcpu *vcpu)
return -EINVAL;
}
- /*
- * Find the physical IRQ number corresponding to the host_vtimer_irq
- */
- desc = irq_to_desc(host_vtimer_irq);
- if (!desc) {
- kvm_err("%s: no interrupt descriptor\n", __func__);
- return -EINVAL;
- }
-
- data = irq_desc_get_irq_data(desc);
- while (data->parent_data)
- data = data->parent_data;
-
- phys_irq = data->hwirq;
-
- /*
- * Tell the VGIC that the virtual interrupt is tied to a
- * physical interrupt. We do that once per VCPU.
- */
- ret = kvm_vgic_map_phys_irq(vcpu, vtimer->irq.irq, phys_irq);
+ ret = kvm_vgic_map_phys_irq(vcpu, host_vtimer_irq, vtimer->irq.irq);
if (ret)
return ret;
diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
index 772bf74ac2e9..a6524ff27de4 100644
--- a/virt/kvm/arm/arm.c
+++ b/virt/kvm/arm/arm.c
@@ -27,6 +27,8 @@
#include <linux/mman.h>
#include <linux/sched.h>
#include <linux/kvm.h>
+#include <linux/kvm_irqfd.h>
+#include <linux/irqbypass.h>
#include <trace/events/kvm.h>
#include <kvm/arm_pmu.h>
@@ -175,6 +177,8 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
{
int i;
+ kvm_vgic_destroy(kvm);
+
free_percpu(kvm->arch.last_vcpu_ran);
kvm->arch.last_vcpu_ran = NULL;
@@ -184,8 +188,6 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
kvm->vcpus[i] = NULL;
}
}
-
- kvm_vgic_destroy(kvm);
}
int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
@@ -313,11 +315,13 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
{
kvm_timer_schedule(vcpu);
+ kvm_vgic_v4_enable_doorbell(vcpu);
}
void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
{
kvm_timer_unschedule(vcpu);
+ kvm_vgic_v4_disable_doorbell(vcpu);
}
int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
@@ -1450,6 +1454,46 @@ struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr)
return NULL;
}
+bool kvm_arch_has_irq_bypass(void)
+{
+ return true;
+}
+
+int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons,
+ struct irq_bypass_producer *prod)
+{
+ struct kvm_kernel_irqfd *irqfd =
+ container_of(cons, struct kvm_kernel_irqfd, consumer);
+
+ return kvm_vgic_v4_set_forwarding(irqfd->kvm, prod->irq,
+ &irqfd->irq_entry);
+}
+void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
+ struct irq_bypass_producer *prod)
+{
+ struct kvm_kernel_irqfd *irqfd =
+ container_of(cons, struct kvm_kernel_irqfd, consumer);
+
+ kvm_vgic_v4_unset_forwarding(irqfd->kvm, prod->irq,
+ &irqfd->irq_entry);
+}
+
+void kvm_arch_irq_bypass_stop(struct irq_bypass_consumer *cons)
+{
+ struct kvm_kernel_irqfd *irqfd =
+ container_of(cons, struct kvm_kernel_irqfd, consumer);
+
+ kvm_arm_halt_guest(irqfd->kvm);
+}
+
+void kvm_arch_irq_bypass_start(struct irq_bypass_consumer *cons)
+{
+ struct kvm_kernel_irqfd *irqfd =
+ container_of(cons, struct kvm_kernel_irqfd, consumer);
+
+ kvm_arm_resume_guest(irqfd->kvm);
+}
+
/**
* Initialize Hyp-mode and memory mappings on all CPUs.
*/
diff --git a/virt/kvm/arm/hyp/vgic-v3-sr.c b/virt/kvm/arm/hyp/vgic-v3-sr.c
index 91728faa13fd..f5c3d6d7019e 100644
--- a/virt/kvm/arm/hyp/vgic-v3-sr.c
+++ b/virt/kvm/arm/hyp/vgic-v3-sr.c
@@ -258,7 +258,8 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu)
cpu_if->vgic_ap1r[0] = __vgic_v3_read_ap1rn(0);
}
} else {
- if (static_branch_unlikely(&vgic_v3_cpuif_trap))
+ if (static_branch_unlikely(&vgic_v3_cpuif_trap) ||
+ cpu_if->its_vpe.its_vm)
write_gicreg(0, ICH_HCR_EL2);
cpu_if->vgic_elrsr = 0xffff;
@@ -337,9 +338,11 @@ void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu)
/*
* If we need to trap system registers, we must write
* ICH_HCR_EL2 anyway, even if no interrupts are being
- * injected,
+ * injected. Same thing if GICv4 is used, as VLPI
+ * delivery is gated by ICH_HCR_EL2.En.
*/
- if (static_branch_unlikely(&vgic_v3_cpuif_trap))
+ if (static_branch_unlikely(&vgic_v3_cpuif_trap) ||
+ cpu_if->its_vpe.its_vm)
write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2);
}
diff --git a/virt/kvm/arm/vgic/vgic-init.c b/virt/kvm/arm/vgic/vgic-init.c
index 5801261f3add..62310122ee78 100644
--- a/virt/kvm/arm/vgic/vgic-init.c
+++ b/virt/kvm/arm/vgic/vgic-init.c
@@ -285,6 +285,10 @@ int vgic_init(struct kvm *kvm)
if (ret)
goto out;
+ ret = vgic_v4_init(kvm);
+ if (ret)
+ goto out;
+
kvm_for_each_vcpu(i, vcpu, kvm)
kvm_vgic_vcpu_enable(vcpu);
@@ -320,6 +324,9 @@ static void kvm_vgic_dist_destroy(struct kvm *kvm)
kfree(dist->spis);
dist->nr_spis = 0;
+
+ if (vgic_supports_direct_msis(kvm))
+ vgic_v4_teardown(kvm);
}
void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c
index d2a99ab0ade7..1f761a9991e7 100644
--- a/virt/kvm/arm/vgic/vgic-its.c
+++ b/virt/kvm/arm/vgic/vgic-its.c
@@ -38,7 +38,7 @@ static int vgic_its_save_tables_v0(struct vgic_its *its);
static int vgic_its_restore_tables_v0(struct vgic_its *its);
static int vgic_its_commit_v0(struct vgic_its *its);
static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq,
- struct kvm_vcpu *filter_vcpu);
+ struct kvm_vcpu *filter_vcpu, bool needs_inv);
/*
* Creates a new (reference to a) struct vgic_irq for a given LPI.
@@ -106,7 +106,7 @@ out_unlock:
* However we only have those structs for mapped IRQs, so we read in
* the respective config data from memory here upon mapping the LPI.
*/
- ret = update_lpi_config(kvm, irq, NULL);
+ ret = update_lpi_config(kvm, irq, NULL, false);
if (ret)
return ERR_PTR(ret);
@@ -273,7 +273,7 @@ static struct its_collection *find_collection(struct vgic_its *its, int coll_id)
* VCPU. Unconditionally applies if filter_vcpu is NULL.
*/
static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq,
- struct kvm_vcpu *filter_vcpu)
+ struct kvm_vcpu *filter_vcpu, bool needs_inv)
{
u64 propbase = GICR_PROPBASER_ADDRESS(kvm->arch.vgic.propbaser);
u8 prop;
@@ -292,11 +292,17 @@ static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq,
irq->priority = LPI_PROP_PRIORITY(prop);
irq->enabled = LPI_PROP_ENABLE_BIT(prop);
- vgic_queue_irq_unlock(kvm, irq, flags);
- } else {
- spin_unlock_irqrestore(&irq->irq_lock, flags);
+ if (!irq->hw) {
+ vgic_queue_irq_unlock(kvm, irq, flags);
+ return 0;
+ }
}
+ spin_unlock_irqrestore(&irq->irq_lock, flags);
+
+ if (irq->hw)
+ return its_prop_update_vlpi(irq->host_irq, prop, needs_inv);
+
return 0;
}
@@ -336,6 +342,29 @@ static int vgic_copy_lpi_list(struct kvm_vcpu *vcpu, u32 **intid_ptr)
return i;
}
+static int update_affinity(struct vgic_irq *irq, struct kvm_vcpu *vcpu)
+{
+ int ret = 0;
+
+ spin_lock(&irq->irq_lock);
+ irq->target_vcpu = vcpu;
+ spin_unlock(&irq->irq_lock);
+
+ if (irq->hw) {
+ struct its_vlpi_map map;
+
+ ret = its_get_vlpi(irq->host_irq, &map);
+ if (ret)
+ return ret;
+
+ map.vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
+
+ ret = its_map_vlpi(irq->host_irq, &map);
+ }
+
+ return ret;
+}
+
/*
* Promotes the ITS view of affinity of an ITTE (which redistributor this LPI
* is targeting) to the VGIC's view, which deals with target VCPUs.
@@ -350,10 +379,7 @@ static void update_affinity_ite(struct kvm *kvm, struct its_ite *ite)
return;
vcpu = kvm_get_vcpu(kvm, ite->collection->target_addr);
-
- spin_lock(&ite->irq->irq_lock);
- ite->irq->target_vcpu = vcpu;
- spin_unlock(&ite->irq->irq_lock);
+ update_affinity(ite->irq, vcpu);
}
/*
@@ -505,19 +531,11 @@ static unsigned long vgic_mmio_read_its_idregs(struct kvm *kvm,
return 0;
}
-/*
- * Find the target VCPU and the LPI number for a given devid/eventid pair
- * and make this IRQ pending, possibly injecting it.
- * Must be called with the its_lock mutex held.
- * Returns 0 on success, a positive error value for any ITS mapping
- * related errors and negative error values for generic errors.
- */
-static int vgic_its_trigger_msi(struct kvm *kvm, struct vgic_its *its,
- u32 devid, u32 eventid)
+int vgic_its_resolve_lpi(struct kvm *kvm, struct vgic_its *its,
+ u32 devid, u32 eventid, struct vgic_irq **irq)
{
struct kvm_vcpu *vcpu;
struct its_ite *ite;
- unsigned long flags;
if (!its->enabled)
return -EBUSY;
@@ -533,26 +551,65 @@ static int vgic_its_trigger_msi(struct kvm *kvm, struct vgic_its *its,
if (!vcpu->arch.vgic_cpu.lpis_enabled)
return -EBUSY;
- spin_lock_irqsave(&ite->irq->irq_lock, flags);
- ite->irq->pending_latch = true;
- vgic_queue_irq_unlock(kvm, ite->irq, flags);
-
+ *irq = ite->irq;
return 0;
}
-static struct vgic_io_device *vgic_get_its_iodev(struct kvm_io_device *dev)
+struct vgic_its *vgic_msi_to_its(struct kvm *kvm, struct kvm_msi *msi)
{
+ u64 address;
+ struct kvm_io_device *kvm_io_dev;
struct vgic_io_device *iodev;
- if (dev->ops != &kvm_io_gic_ops)
- return NULL;
+ if (!vgic_has_its(kvm))
+ return ERR_PTR(-ENODEV);
- iodev = container_of(dev, struct vgic_io_device, dev);
+ if (!(msi->flags & KVM_MSI_VALID_DEVID))
+ return ERR_PTR(-EINVAL);
+ address = (u64)msi->address_hi << 32 | msi->address_lo;
+
+ kvm_io_dev = kvm_io_bus_get_dev(kvm, KVM_MMIO_BUS, address);
+ if (!kvm_io_dev)
+ return ERR_PTR(-EINVAL);
+
+ if (kvm_io_dev->ops != &kvm_io_gic_ops)
+ return ERR_PTR(-EINVAL);
+
+ iodev = container_of(kvm_io_dev, struct vgic_io_device, dev);
if (iodev->iodev_type != IODEV_ITS)
- return NULL;
+ return ERR_PTR(-EINVAL);
+
+ return iodev->its;
+}
+
+/*
+ * Find the target VCPU and the LPI number for a given devid/eventid pair
+ * and make this IRQ pending, possibly injecting it.
+ * Must be called with the its_lock mutex held.
+ * Returns 0 on success, a positive error value for any ITS mapping
+ * related errors and negative error values for generic errors.
+ */
+static int vgic_its_trigger_msi(struct kvm *kvm, struct vgic_its *its,
+ u32 devid, u32 eventid)
+{
+ struct vgic_irq *irq = NULL;
+ unsigned long flags;
+ int err;
+
+ err = vgic_its_resolve_lpi(kvm, its, devid, eventid, &irq);
+ if (err)
+ return err;
+
+ if (irq->hw)
+ return irq_set_irqchip_state(irq->host_irq,
+ IRQCHIP_STATE_PENDING, true);
+
+ spin_lock_irqsave(&irq->irq_lock, flags);
+ irq->pending_latch = true;
+ vgic_queue_irq_unlock(kvm, irq, flags);
- return iodev;
+ return 0;
}
/*
@@ -563,30 +620,16 @@ static struct vgic_io_device *vgic_get_its_iodev(struct kvm_io_device *dev)
*/
int vgic_its_inject_msi(struct kvm *kvm, struct kvm_msi *msi)
{
- u64 address;
- struct kvm_io_device *kvm_io_dev;
- struct vgic_io_device *iodev;
+ struct vgic_its *its;
int ret;
- if (!vgic_has_its(kvm))
- return -ENODEV;
-
- if (!(msi->flags & KVM_MSI_VALID_DEVID))
- return -EINVAL;
+ its = vgic_msi_to_its(kvm, msi);
+ if (IS_ERR(its))
+ return PTR_ERR(its);
- address = (u64)msi->address_hi << 32 | msi->address_lo;
-
- kvm_io_dev = kvm_io_bus_get_dev(kvm, KVM_MMIO_BUS, address);
- if (!kvm_io_dev)
- return -EINVAL;
-
- iodev = vgic_get_its_iodev(kvm_io_dev);
- if (!iodev)
- return -EINVAL;
-
- mutex_lock(&iodev->its->its_lock);
- ret = vgic_its_trigger_msi(kvm, iodev->its, msi->devid, msi->data);
- mutex_unlock(&iodev->its->its_lock);
+ mutex_lock(&its->its_lock);
+ ret = vgic_its_trigger_msi(kvm, its, msi->devid, msi->data);
+ mutex_unlock(&its->its_lock);
if (ret < 0)
return ret;
@@ -608,8 +651,12 @@ static void its_free_ite(struct kvm *kvm, struct its_ite *ite)
list_del(&ite->ite_list);
/* This put matches the get in vgic_add_lpi. */
- if (ite->irq)
+ if (ite->irq) {
+ if (ite->irq->hw)
+ WARN_ON(its_unmap_vlpi(ite->irq->host_irq));
+
vgic_put_irq(kvm, ite->irq);
+ }
kfree(ite);
}
@@ -683,11 +730,7 @@ static int vgic_its_cmd_handle_movi(struct kvm *kvm, struct vgic_its *its,
ite->collection = collection;
vcpu = kvm_get_vcpu(kvm, collection->target_addr);
- spin_lock(&ite->irq->irq_lock);
- ite->irq->target_vcpu = vcpu;
- spin_unlock(&ite->irq->irq_lock);
-
- return 0;
+ return update_affinity(ite->irq, vcpu);
}
/*
@@ -1054,6 +1097,10 @@ static int vgic_its_cmd_handle_clear(struct kvm *kvm, struct vgic_its *its,
ite->irq->pending_latch = false;
+ if (ite->irq->hw)
+ return irq_set_irqchip_state(ite->irq->host_irq,
+ IRQCHIP_STATE_PENDING, false);
+
return 0;
}
@@ -1073,7 +1120,7 @@ static int vgic_its_cmd_handle_inv(struct kvm *kvm, struct vgic_its *its,
if (!ite)
return E_ITS_INV_UNMAPPED_INTERRUPT;
- return update_lpi_config(kvm, ite->irq, NULL);
+ return update_lpi_config(kvm, ite->irq, NULL, true);
}
/*
@@ -1108,12 +1155,15 @@ static int vgic_its_cmd_handle_invall(struct kvm *kvm, struct vgic_its *its,
irq = vgic_get_irq(kvm, NULL, intids[i]);
if (!irq)
continue;
- update_lpi_config(kvm, irq, vcpu);
+ update_lpi_config(kvm, irq, vcpu, false);
vgic_put_irq(kvm, irq);
}
kfree(intids);
+ if (vcpu->arch.vgic_cpu.vgic_v3.its_vpe.its_vm)
+ its_invall_vpe(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe);
+
return 0;
}
@@ -1128,11 +1178,12 @@ static int vgic_its_cmd_handle_invall(struct kvm *kvm, struct vgic_its *its,
static int vgic_its_cmd_handle_movall(struct kvm *kvm, struct vgic_its *its,
u64 *its_cmd)
{
- struct vgic_dist *dist = &kvm->arch.vgic;
u32 target1_addr = its_cmd_get_target_addr(its_cmd);
u32 target2_addr = its_cmd_mask_field(its_cmd, 3, 16, 32);
struct kvm_vcpu *vcpu1, *vcpu2;
struct vgic_irq *irq;
+ u32 *intids;
+ int irq_count, i;
if (target1_addr >= atomic_read(&kvm->online_vcpus) ||
target2_addr >= atomic_read(&kvm->online_vcpus))
@@ -1144,19 +1195,19 @@ static int vgic_its_cmd_handle_movall(struct kvm *kvm, struct vgic_its *its,
vcpu1 = kvm_get_vcpu(kvm, target1_addr);
vcpu2 = kvm_get_vcpu(kvm, target2_addr);
- spin_lock(&dist->lpi_list_lock);
+ irq_count = vgic_copy_lpi_list(vcpu1, &intids);
+ if (irq_count < 0)
+ return irq_count;
- list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
- spin_lock(&irq->irq_lock);
+ for (i = 0; i < irq_count; i++) {
+ irq = vgic_get_irq(kvm, NULL, intids[i]);
- if (irq->target_vcpu == vcpu1)
- irq->target_vcpu = vcpu2;
+ update_affinity(irq, vcpu2);
- spin_unlock(&irq->irq_lock);
+ vgic_put_irq(kvm, irq);
}
- spin_unlock(&dist->lpi_list_lock);
-
+ kfree(intids);
return 0;
}
@@ -1634,6 +1685,14 @@ static int vgic_its_create(struct kvm_device *dev, u32 type)
if (!its)
return -ENOMEM;
+ if (vgic_initialized(dev->kvm)) {
+ int ret = vgic_v4_init(dev->kvm);
+ if (ret < 0) {
+ kfree(its);
+ return ret;
+ }
+ }
+
mutex_init(&its->its_lock);
mutex_init(&its->cmd_lock);
@@ -1946,6 +2005,15 @@ static int vgic_its_save_itt(struct vgic_its *its, struct its_device *device)
list_for_each_entry(ite, &device->itt_head, ite_list) {
gpa_t gpa = base + ite->event_id * ite_esz;
+ /*
+ * If an LPI carries the HW bit, this means that this
+ * interrupt is controlled by GICv4, and we do not
+ * have direct access to that state. Let's simply fail
+ * the save operation...
+ */
+ if (ite->irq->hw)
+ return -EACCES;
+
ret = vgic_its_save_ite(its, device, ite, gpa, ite_esz);
if (ret)
return ret;
diff --git a/virt/kvm/arm/vgic/vgic-mmio-v3.c b/virt/kvm/arm/vgic/vgic-mmio-v3.c
index 83786108829e..671fe81f8e1d 100644
--- a/virt/kvm/arm/vgic/vgic-mmio-v3.c
+++ b/virt/kvm/arm/vgic/vgic-mmio-v3.c
@@ -54,6 +54,11 @@ bool vgic_has_its(struct kvm *kvm)
return dist->has_its;
}
+bool vgic_supports_direct_msis(struct kvm *kvm)
+{
+ return kvm_vgic_global_state.has_gicv4 && vgic_has_its(kvm);
+}
+
static unsigned long vgic_mmio_read_v3_misc(struct kvm_vcpu *vcpu,
gpa_t addr, unsigned int len)
{
diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c
index 863351c090d8..2f05f732d3fd 100644
--- a/virt/kvm/arm/vgic/vgic-v3.c
+++ b/virt/kvm/arm/vgic/vgic-v3.c
@@ -24,6 +24,7 @@
static bool group0_trap;
static bool group1_trap;
static bool common_trap;
+static bool gicv4_enable;
void vgic_v3_set_underflow(struct kvm_vcpu *vcpu)
{
@@ -461,6 +462,12 @@ static int __init early_common_trap_cfg(char *buf)
}
early_param("kvm-arm.vgic_v3_common_trap", early_common_trap_cfg);
+static int __init early_gicv4_enable(char *buf)
+{
+ return strtobool(buf, &gicv4_enable);
+}
+early_param("kvm-arm.vgic_v4_enable", early_gicv4_enable);
+
/**
* vgic_v3_probe - probe for a GICv3 compatible interrupt controller in DT
* @node: pointer to the DT node
@@ -480,6 +487,13 @@ int vgic_v3_probe(const struct gic_kvm_info *info)
kvm_vgic_global_state.can_emulate_gicv2 = false;
kvm_vgic_global_state.ich_vtr_el2 = ich_vtr_el2;
+ /* GICv4 support? */
+ if (info->has_v4) {
+ kvm_vgic_global_state.has_gicv4 = gicv4_enable;
+ kvm_info("GICv4 support %sabled\n",
+ gicv4_enable ? "en" : "dis");
+ }
+
if (!info->vcpu.start) {
kvm_info("GICv3: no GICV resource entry\n");
kvm_vgic_global_state.vcpu_base = 0;
diff --git a/virt/kvm/arm/vgic/vgic-v4.c b/virt/kvm/arm/vgic/vgic-v4.c
new file mode 100644
index 000000000000..53c324aa44ef
--- /dev/null
+++ b/virt/kvm/arm/vgic/vgic-v4.c
@@ -0,0 +1,364 @@
+/*
+ * Copyright (C) 2017 ARM Ltd.
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/kvm_host.h>
+#include <linux/irqchip/arm-gic-v3.h>
+
+#include "vgic.h"
+
+/*
+ * How KVM uses GICv4 (insert rude comments here):
+ *
+ * The vgic-v4 layer acts as a bridge between several entities:
+ * - The GICv4 ITS representation offered by the ITS driver
+ * - VFIO, which is in charge of the PCI endpoint
+ * - The virtual ITS, which is the only thing the guest sees
+ *
+ * The configuration of VLPIs is triggered by a callback from VFIO,
+ * instructing KVM that a PCI device has been configured to deliver
+ * MSIs to a vITS.
+ *
+ * kvm_vgic_v4_set_forwarding() is thus called with the routing entry,
+ * and this is used to find the corresponding vITS data structures
+ * (ITS instance, device, event and irq) using a process that is
+ * extremely similar to the injection of an MSI.
+ *
+ * At this stage, we can link the guest's view of an LPI (uniquely
+ * identified by the routing entry) and the host irq, using the GICv4
+ * driver mapping operation. Should the mapping succeed, we've then
+ * successfully upgraded the guest's LPI to a VLPI. We can then start
+ * with updating GICv4's view of the property table and generating an
+ * INValidation in order to kickstart the delivery of this VLPI to the
+ * guest directly, without software intervention. Well, almost.
+ *
+ * When the PCI endpoint is deconfigured, this operation is reversed
+ * with VFIO calling kvm_vgic_v4_unset_forwarding().
+ *
+ * Once the VLPI has been mapped, it needs to follow any change the
+ * guest performs on its LPI through the vITS. For that, a number of
+ * command handlers have hooks to communicate these changes to the HW:
+ * - Any invalidation triggers a call to its_prop_update_vlpi()
+ * - The INT command results in a irq_set_irqchip_state(), which
+ * generates an INT on the corresponding VLPI.
+ * - The CLEAR command results in a irq_set_irqchip_state(), which
+ * generates an CLEAR on the corresponding VLPI.
+ * - DISCARD translates into an unmap, similar to a call to
+ * kvm_vgic_v4_unset_forwarding().
+ * - MOVI is translated by an update of the existing mapping, changing
+ * the target vcpu, resulting in a VMOVI being generated.
+ * - MOVALL is translated by a string of mapping updates (similar to
+ * the handling of MOVI). MOVALL is horrible.
+ *
+ * Note that a DISCARD/MAPTI sequence emitted from the guest without
+ * reprogramming the PCI endpoint after MAPTI does not result in a
+ * VLPI being mapped, as there is no callback from VFIO (the guest
+ * will get the interrupt via the normal SW injection). Fixing this is
+ * not trivial, and requires some horrible messing with the VFIO
+ * internals. Not fun. Don't do that.
+ *
+ * Then there is the scheduling. Each time a vcpu is about to run on a
+ * physical CPU, KVM must tell the corresponding redistributor about
+ * it. And if we've migrated our vcpu from one CPU to another, we must
+ * tell the ITS (so that the messages reach the right redistributor).
+ * This is done in two steps: first issue a irq_set_affinity() on the
+ * irq corresponding to the vcpu, then call its_schedule_vpe(). You
+ * must be in a non-preemptible context. On exit, another call to
+ * its_schedule_vpe() tells the redistributor that we're done with the
+ * vcpu.
+ *
+ * Finally, the doorbell handling: Each vcpu is allocated an interrupt
+ * which will fire each time a VLPI is made pending whilst the vcpu is
+ * not running. Each time the vcpu gets blocked, the doorbell
+ * interrupt gets enabled. When the vcpu is unblocked (for whatever
+ * reason), the doorbell interrupt is disabled.
+ */
+
+#define DB_IRQ_FLAGS (IRQ_NOAUTOEN | IRQ_DISABLE_UNLAZY | IRQ_NO_BALANCING)
+
+static irqreturn_t vgic_v4_doorbell_handler(int irq, void *info)
+{
+ struct kvm_vcpu *vcpu = info;
+
+ vcpu->arch.vgic_cpu.vgic_v3.its_vpe.pending_last = true;
+ kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
+ kvm_vcpu_kick(vcpu);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * vgic_v4_init - Initialize the GICv4 data structures
+ * @kvm: Pointer to the VM being initialized
+ *
+ * We may be called each time a vITS is created, or when the
+ * vgic is initialized. This relies on kvm->lock to be
+ * held. In both cases, the number of vcpus should now be
+ * fixed.
+ */
+int vgic_v4_init(struct kvm *kvm)
+{
+ struct vgic_dist *dist = &kvm->arch.vgic;
+ struct kvm_vcpu *vcpu;
+ int i, nr_vcpus, ret;
+
+ if (!vgic_supports_direct_msis(kvm))
+ return 0; /* Nothing to see here... move along. */
+
+ if (dist->its_vm.vpes)
+ return 0;
+
+ nr_vcpus = atomic_read(&kvm->online_vcpus);
+
+ dist->its_vm.vpes = kzalloc(sizeof(*dist->its_vm.vpes) * nr_vcpus,
+ GFP_KERNEL);
+ if (!dist->its_vm.vpes)
+ return -ENOMEM;
+
+ dist->its_vm.nr_vpes = nr_vcpus;
+
+ kvm_for_each_vcpu(i, vcpu, kvm)
+ dist->its_vm.vpes[i] = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
+
+ ret = its_alloc_vcpu_irqs(&dist->its_vm);
+ if (ret < 0) {
+ kvm_err("VPE IRQ allocation failure\n");
+ kfree(dist->its_vm.vpes);
+ dist->its_vm.nr_vpes = 0;
+ dist->its_vm.vpes = NULL;
+ return ret;
+ }
+
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ int irq = dist->its_vm.vpes[i]->irq;
+
+ /*
+ * Don't automatically enable the doorbell, as we're
+ * flipping it back and forth when the vcpu gets
+ * blocked. Also disable the lazy disabling, as the
+ * doorbell could kick us out of the guest too
+ * early...
+ */
+ irq_set_status_flags(irq, DB_IRQ_FLAGS);
+ ret = request_irq(irq, vgic_v4_doorbell_handler,
+ 0, "vcpu", vcpu);
+ if (ret) {
+ kvm_err("failed to allocate vcpu IRQ%d\n", irq);
+ /*
+ * Trick: adjust the number of vpes so we know
+ * how many to nuke on teardown...
+ */
+ dist->its_vm.nr_vpes = i;
+ break;
+ }
+ }
+
+ if (ret)
+ vgic_v4_teardown(kvm);
+
+ return ret;
+}
+
+/**
+ * vgic_v4_teardown - Free the GICv4 data structures
+ * @kvm: Pointer to the VM being destroyed
+ *
+ * Relies on kvm->lock to be held.
+ */
+void vgic_v4_teardown(struct kvm *kvm)
+{
+ struct its_vm *its_vm = &kvm->arch.vgic.its_vm;
+ int i;
+
+ if (!its_vm->vpes)
+ return;
+
+ for (i = 0; i < its_vm->nr_vpes; i++) {
+ struct kvm_vcpu *vcpu = kvm_get_vcpu(kvm, i);
+ int irq = its_vm->vpes[i]->irq;
+
+ irq_clear_status_flags(irq, DB_IRQ_FLAGS);
+ free_irq(irq, vcpu);
+ }
+
+ its_free_vcpu_irqs(its_vm);
+ kfree(its_vm->vpes);
+ its_vm->nr_vpes = 0;
+ its_vm->vpes = NULL;
+}
+
+int vgic_v4_sync_hwstate(struct kvm_vcpu *vcpu)
+{
+ if (!vgic_supports_direct_msis(vcpu->kvm))
+ return 0;
+
+ return its_schedule_vpe(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe, false);
+}
+
+int vgic_v4_flush_hwstate(struct kvm_vcpu *vcpu)
+{
+ int irq = vcpu->arch.vgic_cpu.vgic_v3.its_vpe.irq;
+ int err;
+
+ if (!vgic_supports_direct_msis(vcpu->kvm))
+ return 0;
+
+ /*
+ * Before making the VPE resident, make sure the redistributor
+ * corresponding to our current CPU expects us here. See the
+ * doc in drivers/irqchip/irq-gic-v4.c to understand how this
+ * turns into a VMOVP command at the ITS level.
+ */
+ err = irq_set_affinity(irq, cpumask_of(smp_processor_id()));
+ if (err)
+ return err;
+
+ err = its_schedule_vpe(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe, true);
+ if (err)
+ return err;
+
+ /*
+ * Now that the VPE is resident, let's get rid of a potential
+ * doorbell interrupt that would still be pending.
+ */
+ err = irq_set_irqchip_state(irq, IRQCHIP_STATE_PENDING, false);
+
+ return err;
+}
+
+static struct vgic_its *vgic_get_its(struct kvm *kvm,
+ struct kvm_kernel_irq_routing_entry *irq_entry)
+{
+ struct kvm_msi msi = (struct kvm_msi) {
+ .address_lo = irq_entry->msi.address_lo,
+ .address_hi = irq_entry->msi.address_hi,
+ .data = irq_entry->msi.data,
+ .flags = irq_entry->msi.flags,
+ .devid = irq_entry->msi.devid,
+ };
+
+ return vgic_msi_to_its(kvm, &msi);
+}
+
+int kvm_vgic_v4_set_forwarding(struct kvm *kvm, int virq,
+ struct kvm_kernel_irq_routing_entry *irq_entry)
+{
+ struct vgic_its *its;
+ struct vgic_irq *irq;
+ struct its_vlpi_map map;
+ int ret;
+
+ if (!vgic_supports_direct_msis(kvm))
+ return 0;
+
+ /*
+ * Get the ITS, and escape early on error (not a valid
+ * doorbell for any of our vITSs).
+ */
+ its = vgic_get_its(kvm, irq_entry);
+ if (IS_ERR(its))
+ return 0;
+
+ mutex_lock(&its->its_lock);
+
+ /* Perform then actual DevID/EventID -> LPI translation. */
+ ret = vgic_its_resolve_lpi(kvm, its, irq_entry->msi.devid,
+ irq_entry->msi.data, &irq);
+ if (ret)
+ goto out;
+
+ /*
+ * Emit the mapping request. If it fails, the ITS probably
+ * isn't v4 compatible, so let's silently bail out. Holding
+ * the ITS lock should ensure that nothing can modify the
+ * target vcpu.
+ */
+ map = (struct its_vlpi_map) {
+ .vm = &kvm->arch.vgic.its_vm,
+ .vpe = &irq->target_vcpu->arch.vgic_cpu.vgic_v3.its_vpe,
+ .vintid = irq->intid,
+ .properties = ((irq->priority & 0xfc) |
+ (irq->enabled ? LPI_PROP_ENABLED : 0) |
+ LPI_PROP_GROUP1),
+ .db_enabled = true,
+ };
+
+ ret = its_map_vlpi(virq, &map);
+ if (ret)
+ goto out;
+
+ irq->hw = true;
+ irq->host_irq = virq;
+
+out:
+ mutex_unlock(&its->its_lock);
+ return ret;
+}
+
+int kvm_vgic_v4_unset_forwarding(struct kvm *kvm, int virq,
+ struct kvm_kernel_irq_routing_entry *irq_entry)
+{
+ struct vgic_its *its;
+ struct vgic_irq *irq;
+ int ret;
+
+ if (!vgic_supports_direct_msis(kvm))
+ return 0;
+
+ /*
+ * Get the ITS, and escape early on error (not a valid
+ * doorbell for any of our vITSs).
+ */
+ its = vgic_get_its(kvm, irq_entry);
+ if (IS_ERR(its))
+ return 0;
+
+ mutex_lock(&its->its_lock);
+
+ ret = vgic_its_resolve_lpi(kvm, its, irq_entry->msi.devid,
+ irq_entry->msi.data, &irq);
+ if (ret)
+ goto out;
+
+ WARN_ON(!(irq->hw && irq->host_irq == virq));
+ irq->hw = false;
+ ret = its_unmap_vlpi(virq);
+
+out:
+ mutex_unlock(&its->its_lock);
+ return ret;
+}
+
+void kvm_vgic_v4_enable_doorbell(struct kvm_vcpu *vcpu)
+{
+ if (vgic_supports_direct_msis(vcpu->kvm)) {
+ int irq = vcpu->arch.vgic_cpu.vgic_v3.its_vpe.irq;
+ if (irq)
+ enable_irq(irq);
+ }
+}
+
+void kvm_vgic_v4_disable_doorbell(struct kvm_vcpu *vcpu)
+{
+ if (vgic_supports_direct_msis(vcpu->kvm)) {
+ int irq = vcpu->arch.vgic_cpu.vgic_v3.its_vpe.irq;
+ if (irq)
+ disable_irq(irq);
+ }
+}
diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c
index e54ef2fdf73d..b168a328a9e0 100644
--- a/virt/kvm/arm/vgic/vgic.c
+++ b/virt/kvm/arm/vgic/vgic.c
@@ -17,6 +17,8 @@
#include <linux/kvm.h>
#include <linux/kvm_host.h>
#include <linux/list_sort.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
#include "vgic.h"
@@ -409,25 +411,56 @@ int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid,
return 0;
}
-int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, u32 virt_irq, u32 phys_irq)
+/* @irq->irq_lock must be held */
+static int kvm_vgic_map_irq(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
+ unsigned int host_irq)
{
- struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, virt_irq);
+ struct irq_desc *desc;
+ struct irq_data *data;
+
+ /*
+ * Find the physical IRQ number corresponding to @host_irq
+ */
+ desc = irq_to_desc(host_irq);
+ if (!desc) {
+ kvm_err("%s: no interrupt descriptor\n", __func__);
+ return -EINVAL;
+ }
+ data = irq_desc_get_irq_data(desc);
+ while (data->parent_data)
+ data = data->parent_data;
+
+ irq->hw = true;
+ irq->host_irq = host_irq;
+ irq->hwintid = data->hwirq;
+ return 0;
+}
+
+/* @irq->irq_lock must be held */
+static inline void kvm_vgic_unmap_irq(struct vgic_irq *irq)
+{
+ irq->hw = false;
+ irq->hwintid = 0;
+}
+
+int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, unsigned int host_irq,
+ u32 vintid)
+{
+ struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
unsigned long flags;
+ int ret;
BUG_ON(!irq);
spin_lock_irqsave(&irq->irq_lock, flags);
-
- irq->hw = true;
- irq->hwintid = phys_irq;
-
+ ret = kvm_vgic_map_irq(vcpu, irq, host_irq);
spin_unlock_irqrestore(&irq->irq_lock, flags);
vgic_put_irq(vcpu->kvm, irq);
- return 0;
+ return ret;
}
-int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int virt_irq)
+int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int vintid)
{
struct vgic_irq *irq;
unsigned long flags;
@@ -435,14 +468,11 @@ int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int virt_irq)
if (!vgic_initialized(vcpu->kvm))
return -EAGAIN;
- irq = vgic_get_irq(vcpu->kvm, vcpu, virt_irq);
+ irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
BUG_ON(!irq);
spin_lock_irqsave(&irq->irq_lock, flags);
-
- irq->hw = false;
- irq->hwintid = 0;
-
+ kvm_vgic_unmap_irq(irq);
spin_unlock_irqrestore(&irq->irq_lock, flags);
vgic_put_irq(vcpu->kvm, irq);
@@ -688,6 +718,8 @@ void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
{
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
+ WARN_ON(vgic_v4_sync_hwstate(vcpu));
+
/* An empty ap_list_head implies used_lrs == 0 */
if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head))
return;
@@ -700,6 +732,8 @@ void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
/* Flush our emulation state into the GIC hardware before entering the guest. */
void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
{
+ WARN_ON(vgic_v4_flush_hwstate(vcpu));
+
/*
* If there are no virtual interrupts active or pending for this
* VCPU, then there is no work to do and we can bail out without
@@ -751,6 +785,9 @@ int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
if (!vcpu->kvm->arch.vgic.enabled)
return false;
+ if (vcpu->arch.vgic_cpu.vgic_v3.its_vpe.pending_last)
+ return true;
+
spin_lock_irqsave(&vgic_cpu->ap_list_lock, flags);
list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
@@ -784,9 +821,9 @@ void vgic_kick_vcpus(struct kvm *kvm)
}
}
-bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int virt_irq)
+bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int vintid)
{
- struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, virt_irq);
+ struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
bool map_is_active;
unsigned long flags;
diff --git a/virt/kvm/arm/vgic/vgic.h b/virt/kvm/arm/vgic/vgic.h
index 4f8aecb07ae6..efbcf8f96f9c 100644
--- a/virt/kvm/arm/vgic/vgic.h
+++ b/virt/kvm/arm/vgic/vgic.h
@@ -237,4 +237,14 @@ static inline int vgic_v3_max_apr_idx(struct kvm_vcpu *vcpu)
}
}
+int vgic_its_resolve_lpi(struct kvm *kvm, struct vgic_its *its,
+ u32 devid, u32 eventid, struct vgic_irq **irq);
+struct vgic_its *vgic_msi_to_its(struct kvm *kvm, struct kvm_msi *msi);
+
+bool vgic_supports_direct_msis(struct kvm *kvm);
+int vgic_v4_init(struct kvm *kvm);
+void vgic_v4_teardown(struct kvm *kvm);
+int vgic_v4_sync_hwstate(struct kvm_vcpu *vcpu);
+int vgic_v4_flush_hwstate(struct kvm_vcpu *vcpu);
+
#endif