summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.gitignore3
-rw-r--r--Documentation/00-INDEX24
-rw-r--r--Documentation/RCU/00-INDEX2
-rw-r--r--Documentation/arm/00-INDEX14
-rw-r--r--Documentation/blackfin/00-INDEX6
-rw-r--r--Documentation/block/00-INDEX2
-rw-r--r--Documentation/devicetree/00-INDEX2
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/lsi,zevio-intc.txt18
-rw-r--r--Documentation/devicetree/bindings/net/allwinner,sun4i-emac.txt5
-rw-r--r--Documentation/devicetree/bindings/net/allwinner,sun4i-mdio.txt5
-rw-r--r--Documentation/devicetree/bindings/power/bq2415x.txt47
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.txt6
-rw-r--r--Documentation/dvb/contributors.txt2
-rw-r--r--Documentation/fb/00-INDEX6
-rw-r--r--Documentation/filesystems/00-INDEX2
-rw-r--r--Documentation/filesystems/nfs/00-INDEX4
-rw-r--r--Documentation/ide/00-INDEX2
-rw-r--r--Documentation/kernel-parameters.txt8
-rw-r--r--Documentation/laptops/00-INDEX6
-rw-r--r--Documentation/leds/00-INDEX8
-rw-r--r--Documentation/m68k/00-INDEX2
-rw-r--r--Documentation/networking/00-INDEX30
-rw-r--r--Documentation/power/00-INDEX6
-rw-r--r--Documentation/ptp/testptp.c11
-rw-r--r--Documentation/s390/00-INDEX8
-rw-r--r--Documentation/scheduler/00-INDEX2
-rw-r--r--Documentation/scsi/00-INDEX16
-rw-r--r--Documentation/serial/00-INDEX6
-rw-r--r--Documentation/spi/00-INDEX22
-rw-r--r--Documentation/spi/spi-summary17
-rw-r--r--Documentation/timers/00-INDEX2
-rw-r--r--Documentation/virtual/kvm/00-INDEX2
-rw-r--r--Documentation/vm/00-INDEX4
-rw-r--r--Documentation/w1/masters/00-INDEX4
-rw-r--r--Documentation/w1/slaves/00-INDEX2
-rw-r--r--Documentation/x86/00-INDEX18
-rw-r--r--Documentation/zh_CN/arm64/booting.txt65
-rw-r--r--Documentation/zh_CN/arm64/memory.txt46
-rw-r--r--Documentation/zh_CN/arm64/tagged-pointers.txt52
-rw-r--r--MAINTAINERS6
-rw-r--r--Makefile2
-rw-r--r--arch/arm/boot/dts/sun4i-a10.dtsi4
-rw-r--r--arch/arm/boot/dts/sun5i-a10s.dtsi4
-rw-r--r--arch/arm/boot/dts/sun7i-a20.dtsi4
-rw-r--r--arch/arm64/Kconfig1
-rw-r--r--arch/arm64/configs/defconfig18
-rw-r--r--arch/arm64/include/asm/atomic.h53
-rw-r--r--arch/arm64/include/asm/barrier.h2
-rw-r--r--arch/arm64/include/asm/cacheflush.h1
-rw-r--r--arch/arm64/include/asm/cmpxchg.h17
-rw-r--r--arch/arm64/include/asm/esr.h2
-rw-r--r--arch/arm64/include/asm/futex.h10
-rw-r--r--arch/arm64/include/asm/kvm_arm.h2
-rw-r--r--arch/arm64/include/asm/spinlock.h10
-rw-r--r--arch/arm64/include/asm/unistd32.h5
-rw-r--r--arch/arm64/include/uapi/asm/kvm.h9
-rw-r--r--arch/arm64/kernel/kuser32.S6
-rw-r--r--arch/arm64/kernel/vdso.c4
-rw-r--r--arch/arm64/kernel/vdso/Makefile2
-rw-r--r--arch/arm64/kernel/vdso/gettimeofday.S7
-rw-r--r--arch/arm64/lib/bitops.S3
-rw-r--r--arch/arm64/mm/dma-mapping.c1
-rw-r--r--arch/arm64/mm/mmu.c12
-rw-r--r--arch/arm64/mm/pgd.c11
-rw-r--r--arch/ia64/include/asm/unistd.h2
-rw-r--r--arch/ia64/include/uapi/asm/unistd.h2
-rw-r--r--arch/ia64/kernel/entry.S2
-rw-r--r--arch/microblaze/include/asm/delay.h2
-rw-r--r--arch/microblaze/include/asm/io.h6
-rw-r--r--arch/microblaze/kernel/head.S2
-rw-r--r--arch/mips/alchemy/devboards/db1000.c7
-rw-r--r--arch/mips/include/asm/fpu.h2
-rw-r--r--arch/mips/include/uapi/asm/unistd.h18
-rw-r--r--arch/mips/kernel/scall32-o32.S2
-rw-r--r--arch/mips/kernel/scall64-64.S2
-rw-r--r--arch/mips/kernel/scall64-n32.S2
-rw-r--r--arch/mips/kernel/scall64-o32.S2
-rw-r--r--arch/parisc/hpux/fs.c15
-rw-r--r--arch/powerpc/include/asm/dma-mapping.h1
-rw-r--r--arch/powerpc/include/asm/iommu.h1
-rw-r--r--arch/powerpc/include/asm/sections.h12
-rw-r--r--arch/powerpc/kernel/dma.c10
-rw-r--r--arch/powerpc/kernel/eeh_driver.c8
-rw-r--r--arch/powerpc/kernel/iommu.c12
-rw-r--r--arch/powerpc/kernel/irq.c5
-rw-r--r--arch/powerpc/kernel/machine_kexec.c14
-rw-r--r--arch/powerpc/kernel/machine_kexec_64.c6
-rw-r--r--arch/powerpc/kernel/reloc_64.S4
-rw-r--r--arch/powerpc/kernel/setup_32.c5
-rw-r--r--arch/powerpc/mm/hash_utils_64.c14
-rw-r--r--arch/powerpc/perf/core-book3s.c5
-rw-r--r--arch/powerpc/perf/power8-pmu.c144
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c84
-rw-r--r--arch/powerpc/platforms/powernv/pci.c10
-rw-r--r--arch/powerpc/platforms/powernv/pci.h6
-rw-r--r--arch/powerpc/platforms/powernv/powernv.h8
-rw-r--r--arch/powerpc/platforms/powernv/setup.c9
-rw-r--r--arch/powerpc/platforms/pseries/Kconfig1
-rw-r--r--arch/powerpc/platforms/pseries/setup.c3
-rw-r--r--arch/powerpc/sysdev/mpic.c38
-rw-r--r--arch/powerpc/xmon/xmon.c24
-rw-r--r--arch/s390/appldata/appldata_base.c1
-rw-r--r--arch/s390/crypto/aes_s390.c65
-rw-r--r--arch/s390/crypto/des_s390.c95
-rw-r--r--arch/s390/kernel/head64.S7
-rw-r--r--arch/s390/mm/page-states.c10
-rw-r--r--arch/x86/Kconfig7
-rw-r--r--arch/x86/Kconfig.debug1
-rw-r--r--arch/x86/include/asm/amd_nb.h2
-rw-r--r--arch/x86/include/asm/pgtable.h14
-rw-r--r--arch/x86/include/asm/tlbflush.h6
-rw-r--r--arch/x86/include/asm/xen/page.h5
-rw-r--r--arch/x86/kernel/amd_nb.c2
-rw-r--r--arch/x86/kernel/cpu/amd.c5
-rw-r--r--arch/x86/kernel/cpu/common.c7
-rw-r--r--arch/x86/kernel/cpu/intel.c10
-rw-r--r--arch/x86/kernel/cpu/microcode/amd_early.c43
-rw-r--r--arch/x86/kernel/cpu/mtrr/generic.c4
-rw-r--r--arch/x86/kernel/irq.c9
-rw-r--r--arch/x86/kernel/quirks.c37
-rw-r--r--arch/x86/kernel/tsc.c4
-rw-r--r--arch/x86/mm/fault.c14
-rw-r--r--arch/x86/mm/numa.c21
-rw-r--r--arch/x86/mm/numa_32.c2
-rw-r--r--arch/x86/mm/srat.c16
-rw-r--r--arch/x86/mm/tlb.c52
-rw-r--r--arch/x86/platform/efi/efi-bgrt.c10
-rw-r--r--arch/x86/xen/enlighten.c12
-rw-r--r--arch/x86/xen/mmu.c4
-rw-r--r--arch/x86/xen/p2m.c17
-rw-r--r--block/blk-core.c20
-rw-r--r--block/blk-exec.c2
-rw-r--r--block/blk-flush.c101
-rw-r--r--block/blk-lib.c8
-rw-r--r--block/blk-merge.c91
-rw-r--r--block/blk-mq-tag.c2
-rw-r--r--block/blk-mq.c143
-rw-r--r--block/blk-mq.h4
-rw-r--r--block/blk-sysfs.c2
-rw-r--r--block/blk-timeout.c2
-rw-r--r--block/blk.h2
-rw-r--r--drivers/acpi/battery.c2
-rw-r--r--drivers/acpi/container.c5
-rw-r--r--drivers/acpi/dock.c5
-rw-r--r--drivers/acpi/proc.c2
-rw-r--r--drivers/acpi/scan.c6
-rw-r--r--drivers/acpi/utils.c4
-rw-r--r--drivers/acpi/video_detect.c8
-rw-r--r--drivers/base/component.c8
-rw-r--r--drivers/block/null_blk.c97
-rw-r--r--drivers/block/nvme-core.c610
-rw-r--r--drivers/block/nvme-scsi.c147
-rw-r--r--drivers/block/virtio_blk.c7
-rw-r--r--drivers/block/xen-blkback/blkback.c81
-rw-r--r--drivers/block/xen-blkback/common.h5
-rw-r--r--drivers/block/xen-blkback/xenbus.c14
-rw-r--r--drivers/block/xen-blkfront.c11
-rw-r--r--drivers/char/Kconfig1
-rw-r--r--drivers/char/raw.c2
-rw-r--r--drivers/char/virtio_console.c9
-rw-r--r--drivers/cpufreq/intel_pstate.c30
-rw-r--r--drivers/crypto/nx/nx-842.c29
-rw-r--r--drivers/edac/edac_mc.c13
-rw-r--r--drivers/edac/edac_mc_sysfs.c10
-rw-r--r--drivers/edac/edac_module.h2
-rw-r--r--drivers/gpio/Kconfig1
-rw-r--r--drivers/gpio/gpio-bcm-kona.c4
-rw-r--r--drivers/gpio/gpio-clps711x.c1
-rw-r--r--drivers/gpio/gpio-intel-mid.c4
-rw-r--r--drivers/gpio/gpio-xtensa.c16
-rw-r--r--drivers/gpu/drm/ast/ast_fb.c2
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_fbdev.c2
-rw-r--r--drivers/gpu/drm/exynos/Kconfig4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c14
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.c3
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c66
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c17
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h8
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c5
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c3
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c12
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c7
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c9
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_fb.c2
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c4
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c179
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c4
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c7
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c2
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c9
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c3
-rw-r--r--drivers/gpu/drm/radeon/btc_dpm.c32
-rw-r--r--drivers/gpu/drm/radeon/btcd.h4
-rw-r--r--drivers/gpu/drm/radeon/kv_dpm.c2
-rw-r--r--drivers/gpu/drm/radeon/ni_dpm.c8
-rw-r--r--drivers/gpu/drm/radeon/r600.c4
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c18
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c3
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/r6001
-rw-r--r--drivers/gpu/drm/radeon/rv770_dpm.c5
-rw-r--r--drivers/gpu/drm/radeon/si.c4
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c3
-rw-r--r--drivers/gpu/drm/radeon/sumo_dpm.c2
-rw-r--r--drivers/gpu/drm/radeon/trinity_dpm.c3
-rw-r--r--drivers/gpu/drm/radeon/uvd_v2_2.c1
-rw-r--r--drivers/gpu/drm/ttm/ttm_object.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/svga3d_reg.h24
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_context.c144
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c7
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h35
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c330
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c93
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_mob.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c11
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_shader.c467
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c6
-rw-r--r--drivers/hv/connection.c13
-rw-r--r--drivers/hwmon/da9055-hwmon.c4
-rw-r--r--drivers/hwmon/ntc_thermistor.c6
-rw-r--r--drivers/hwmon/pmbus/pmbus_core.c68
-rw-r--r--drivers/iio/accel/bma180.c16
-rw-r--r--drivers/iio/adc/max1363.c2
-rw-r--r--drivers/iio/imu/adis16400.h1
-rw-r--r--drivers/iio/imu/adis16400_core.c10
-rw-r--r--drivers/iio/light/tsl2563.c16
-rw-r--r--drivers/iio/magnetometer/ak8975.c16
-rw-r--r--drivers/iio/magnetometer/mag3110.c8
-rw-r--r--drivers/infiniband/hw/amso1100/c2.c4
-rw-r--r--drivers/infiniband/hw/amso1100/c2_rnic.c3
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c1
-rw-r--r--drivers/infiniband/hw/mlx4/main.c185
-rw-r--r--drivers/infiniband/hw/mlx5/Kconfig2
-rw-r--r--drivers/infiniband/hw/mlx5/main.c22
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c18
-rw-r--r--drivers/infiniband/hw/mlx5/user.h7
-rw-r--r--drivers/infiniband/hw/nes/nes.c5
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_main.c2
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c5
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c9
-rw-r--r--drivers/infiniband/ulp/iser/iser_initiator.c3
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c10
-rw-r--r--drivers/irqchip/Makefile1
-rw-r--r--drivers/irqchip/irq-armada-370-xp.c4
-rw-r--r--drivers/irqchip/irq-zevio.c127
-rw-r--r--drivers/isdn/hisax/q931.c2
-rw-r--r--drivers/md/bcache/bcache.h4
-rw-r--r--drivers/md/bcache/bset.c7
-rw-r--r--drivers/md/bcache/btree.c4
-rw-r--r--drivers/md/bcache/extents.c2
-rw-r--r--drivers/md/bcache/request.c6
-rw-r--r--drivers/md/bcache/sysfs.c2
-rw-r--r--drivers/md/raid1.c13
-rw-r--r--drivers/md/raid5.c90
-rw-r--r--drivers/media/dvb-frontends/cx24117.c10
-rw-r--r--drivers/media/dvb-frontends/nxt200x.c2
-rw-r--r--drivers/media/i2c/adv7842.c2
-rw-r--r--drivers/media/i2c/s5k5baf.c30
-rw-r--r--drivers/media/pci/bt8xx/bttv-cards.c2
-rw-r--r--drivers/media/pci/bt8xx/bttv-gpio.c2
-rw-r--r--drivers/media/pci/saa7134/saa7134-cards.c2
-rw-r--r--drivers/media/platform/exynos4-is/fimc-core.c5
-rw-r--r--drivers/media/platform/exynos4-is/fimc-lite.c7
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-core.c8
-rw-r--r--drivers/media/usb/dvb-usb-v2/af9035.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf-demod.c4
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf-demod.h2
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf-gpio.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf-gpio.h2
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.h2
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf-phy.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf-phy.h2
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf-reg.h2
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.c4
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.h4
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf.c6
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf.h2
-rw-r--r--drivers/media/usb/hdpvr/hdpvr-core.c4
-rw-r--r--drivers/media/v4l2-core/v4l2-dv-timings.c1
-rw-r--r--drivers/media/v4l2-core/videobuf-dma-contig.c12
-rw-r--r--drivers/media/v4l2-core/videobuf-dma-sg.c10
-rw-r--r--drivers/media/v4l2-core/videobuf-vmalloc.c10
-rw-r--r--drivers/media/v4l2-core/videobuf2-core.c5
-rw-r--r--drivers/message/i2o/i2o_config.c4
-rw-r--r--drivers/misc/genwqe/card_dev.c1
-rw-r--r--drivers/misc/mei/client.c11
-rw-r--r--drivers/misc/mic/host/mic_virtio.c3
-rw-r--r--drivers/misc/sgi-gru/grukdump.c11
-rw-r--r--drivers/net/bonding/bond_main.c26
-rw-r--r--drivers/net/can/Kconfig2
-rw-r--r--drivers/net/can/dev.c15
-rw-r--r--drivers/net/can/flexcan.c7
-rw-r--r--drivers/net/can/janz-ican3.c20
-rw-r--r--drivers/net/can/vcan.c9
-rw-r--r--drivers/net/ethernet/3com/3c59x.c2
-rw-r--r--drivers/net/ethernet/allwinner/sun4i-emac.c3
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c12
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c6
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c17
-rw-r--r--drivers/net/ethernet/ethoc.c138
-rw-r--r--drivers/net/ethernet/intel/e100.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig2
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.c6
-rw-r--r--drivers/net/ethernet/sfc/tx.c2
-rw-r--r--drivers/net/ethernet/ti/cpsw.c14
-rw-r--r--drivers/net/irda/Kconfig7
-rw-r--r--drivers/net/irda/Makefile1
-rw-r--r--drivers/net/irda/ep7211-sir.c70
-rw-r--r--drivers/net/phy/dp83640.c19
-rw-r--r--drivers/net/phy/mdio-sun4i.c3
-rw-r--r--drivers/net/phy/phy_device.c38
-rw-r--r--drivers/net/usb/Kconfig16
-rw-r--r--drivers/net/usb/Makefile1
-rw-r--r--drivers/net/usb/hso.c32
-rw-r--r--drivers/net/usb/qmi_wwan.c2
-rw-r--r--drivers/net/usb/r8152.c17
-rw-r--r--drivers/net/usb/sr9800.c870
-rw-r--r--drivers/net/usb/sr9800.h202
-rw-r--r--drivers/net/vxlan.c3
-rw-r--r--drivers/net/wan/dlci.c5
-rw-r--r--drivers/net/wireless/ath/ar5523/ar5523.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/htc.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_init.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c63
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c8
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-nvm-parse.c5
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h4
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac80211.c2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/scan.c5
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sta.c2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/tx.c73
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/utils.c2
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/drv.c7
-rw-r--r--drivers/net/wireless/rt2x00/rt2500pci.c5
-rw-r--r--drivers/net/wireless/rt2x00/rt2500usb.c5
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c5
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/dev.c23
-rw-r--r--drivers/net/xen-netback/common.h6
-rw-r--r--drivers/net/xen-netback/interface.c1
-rw-r--r--drivers/net/xen-netback/netback.c16
-rw-r--r--drivers/net/xen-netfront.c5
-rw-r--r--drivers/of/address.c5
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c70
-rw-r--r--drivers/pinctrl/core.c8
-rw-r--r--drivers/pinctrl/pinctrl-at91.c10
-rw-r--r--drivers/pinctrl/pinctrl-imx1-core.c10
-rw-r--r--drivers/pinctrl/pinctrl-tegra.c2
-rw-r--r--drivers/pinctrl/sirf/pinctrl-prima2.c2
-rw-r--r--drivers/pinctrl/vt8500/pinctrl-wmt.c15
-rw-r--r--drivers/power/ds2782_battery.c2
-rw-r--r--drivers/power/isp1704_charger.c2
-rw-r--r--drivers/power/max17040_battery.c5
-rw-r--r--drivers/regulator/ab3100.c4
-rw-r--r--drivers/regulator/core.c9
-rw-r--r--drivers/regulator/da9055-regulator.c4
-rw-r--r--drivers/regulator/max14577.c5
-rw-r--r--drivers/regulator/s2mps11.c1
-rw-r--r--drivers/s390/cio/cio.c40
-rw-r--r--drivers/s390/cio/qdio.h14
-rw-r--r--drivers/s390/cio/qdio_main.c2
-rw-r--r--drivers/spi/Kconfig4
-rw-r--r--drivers/spi/spi-nuc900.c2
-rw-r--r--drivers/spi/spi.c4
-rw-r--r--drivers/staging/android/ashmem.c45
-rw-r--r--drivers/staging/android/ion/compat_ion.c26
-rw-r--r--drivers/staging/android/ion/ion_dummy_driver.c12
-rw-r--r--drivers/staging/android/ion/ion_heap.c2
-rw-r--r--drivers/staging/android/ion/ion_priv.h1
-rw-r--r--drivers/staging/android/ion/ion_system_heap.c6
-rw-r--r--drivers/staging/android/sw_sync.h17
-rw-r--r--drivers/staging/android/sync.c14
-rw-r--r--drivers/staging/comedi/drivers.c2
-rw-r--r--drivers/staging/comedi/drivers/adv_pci1710.c17
-rw-r--r--drivers/staging/comedi/drivers/usbduxsigma.c6
-rw-r--r--drivers/staging/dgrp/dgrp_net_ops.c330
-rw-r--r--drivers/staging/gdm72xx/gdm_usb.c3
-rw-r--r--drivers/staging/iio/Documentation/iio_utils.h6
-rw-r--r--drivers/staging/iio/adc/ad799x_core.c13
-rw-r--r--drivers/staging/iio/adc/mxs-lradc.c6
-rw-r--r--drivers/staging/iio/impedance-analyzer/ad5933.c2
-rw-r--r--drivers/staging/imx-drm/imx-drm-core.c55
-rw-r--r--drivers/staging/imx-drm/imx-hdmi.c22
-rw-r--r--drivers/staging/lustre/TODO5
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_kernelcomm.h2
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_private.h2
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c2
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c5
-rw-r--r--drivers/staging/lustre/lustre/include/lustre/lustre_user.h3
-rw-r--r--drivers/staging/lustre/lustre/llite/dir.c2
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_request.c6
-rw-r--r--drivers/staging/media/go7007/go7007-loader.c4
-rw-r--r--drivers/staging/netlogic/xlr_net.c5
-rw-r--r--drivers/staging/octeon-usb/octeon-hcd.c273
-rw-r--r--drivers/staging/ozwpan/ozproto.c3
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_wlan_util.c22
-rw-r--r--drivers/staging/rtl8188eu/os_dep/ioctl_linux.c12
-rw-r--r--drivers/staging/rtl8188eu/os_dep/usb_intf.c2
-rw-r--r--drivers/staging/rtl8821ae/Kconfig2
-rw-r--r--drivers/staging/rtl8821ae/wifi.h2
-rw-r--r--drivers/staging/usbip/userspace/libsrc/names.c8
-rw-r--r--drivers/staging/usbip/vhci_sysfs.c3
-rw-r--r--drivers/staging/wlags49_h2/wl_wext.c2
-rw-r--r--drivers/video/Kconfig2
-rw-r--r--drivers/video/exynos/Kconfig3
-rw-r--r--drivers/video/omap2/dss/dispc.c16
-rw-r--r--drivers/video/omap2/dss/dpi.c2
-rw-r--r--drivers/video/omap2/dss/sdi.c2
-rw-r--r--drivers/vme/bridges/vme_ca91cx42.c4
-rw-r--r--drivers/vme/bridges/vme_tsi148.c4
-rw-r--r--drivers/watchdog/Kconfig1
-rw-r--r--drivers/xen/Makefile1
-rw-r--r--drivers/xen/events/events_base.c2
-rw-r--r--drivers/xen/gntdev.c13
-rw-r--r--drivers/xen/grant-table.c89
-rw-r--r--drivers/xen/xencomm.c219
-rw-r--r--fs/bio-integrity.c13
-rw-r--r--fs/bio.c15
-rw-r--r--fs/btrfs/check-integrity.c4
-rw-r--r--fs/btrfs/compression.c2
-rw-r--r--fs/btrfs/disk-io.c4
-rw-r--r--fs/btrfs/extent-tree.c1
-rw-r--r--fs/btrfs/inode.c2
-rw-r--r--fs/btrfs/ioctl.c6
-rw-r--r--fs/btrfs/send.c12
-rw-r--r--fs/btrfs/super.c2
-rw-r--r--fs/buffer.c6
-rw-r--r--fs/cifs/cifsacl.c28
-rw-r--r--fs/cifs/cifsglob.h9
-rw-r--r--fs/cifs/cifsproto.h3
-rw-r--r--fs/cifs/cifssmb.c15
-rw-r--r--fs/cifs/file.c35
-rw-r--r--fs/cifs/inode.c13
-rw-r--r--fs/cifs/smb1ops.c8
-rw-r--r--fs/cifs/smb2pdu.c5
-rw-r--r--fs/cifs/smb2proto.h3
-rw-r--r--fs/cifs/xattr.c15
-rw-r--r--fs/exec.c45
-rw-r--r--fs/ext4/file.c2
-rw-r--r--fs/file.c2
-rw-r--r--fs/jfs/xattr.c14
-rw-r--r--fs/kernfs/dir.c12
-rw-r--r--fs/lockd/svclock.c8
-rw-r--r--fs/namei.c30
-rw-r--r--fs/nfs/dir.c5
-rw-r--r--fs/nfs/nfs3acl.c34
-rw-r--r--fs/nfs/nfs4client.c2
-rw-r--r--fs/nfs/nfs4proc.c8
-rw-r--r--fs/nfs/nfs4session.c25
-rw-r--r--fs/nfs/nfs4session.h2
-rw-r--r--fs/nfsd/nfs4acl.c9
-rw-r--r--fs/ntfs/file.c2
-rw-r--r--fs/ocfs2/alloc.c40
-rw-r--r--fs/ocfs2/file.c52
-rw-r--r--fs/ocfs2/localalloc.c42
-rw-r--r--fs/ocfs2/localalloc.h6
-rw-r--r--fs/ocfs2/namei.c17
-rw-r--r--fs/posix_acl.c18
-rw-r--r--fs/proc/vmcore.c26
-rw-r--r--fs/sync.c17
-rw-r--r--fs/xfs/xfs_file.c2
-rw-r--r--include/drm/drmP.h3
-rw-r--r--include/linux/binfmts.h1
-rw-r--r--include/linux/bio.h12
-rw-r--r--include/linux/blk-mq.h9
-rw-r--r--include/linux/blkdev.h11
-rw-r--r--include/linux/can/skb.h38
-rw-r--r--include/linux/compiler-gcc4.h6
-rw-r--r--include/linux/fs.h9
-rw-r--r--include/linux/gpio/consumer.h4
-rw-r--r--include/linux/hyperv.h2
-rw-r--r--include/linux/mlx5/driver.h3
-rw-r--r--include/linux/nfs_xdr.h2
-rw-r--r--include/linux/nvme.h6
-rw-r--r--include/linux/of.h153
-rw-r--r--include/linux/of_device.h4
-rw-r--r--include/linux/page-flags.h4
-rw-r--r--include/linux/sched.h3
-rw-r--r--include/linux/smp.h3
-rw-r--r--include/linux/spi/spi.h7
-rw-r--r--include/linux/vm_event_item.h4
-rw-r--r--include/linux/vmstat.h8
-rw-r--r--include/net/datalink.h2
-rw-r--r--include/net/dn.h2
-rw-r--r--include/net/dn_route.h2
-rw-r--r--include/net/ethoc.h1
-rw-r--r--include/net/ipx.h11
-rw-r--r--include/net/net_namespace.h8
-rw-r--r--include/net/netfilter/nf_conntrack.h2
-rw-r--r--include/net/netfilter/nf_tables.h9
-rw-r--r--include/net/netfilter/nft_reject.h25
-rw-r--r--include/rdma/ib_verbs.h3
-rw-r--r--include/trace/events/power.h7
-rw-r--r--include/uapi/linux/in6.h23
-rw-r--r--include/uapi/linux/mic_ioctl.h2
-rw-r--r--include/uapi/linux/nvme.h11
-rw-r--r--include/uapi/xen/Kbuild2
-rw-r--r--include/uapi/xen/gntalloc.h (renamed from include/xen/gntalloc.h)0
-rw-r--r--include/uapi/xen/gntdev.h (renamed from include/xen/gntdev.h)0
-rw-r--r--include/xen/grant_table.h8
-rw-r--r--include/xen/interface/io/blkif.h34
-rw-r--r--include/xen/interface/xencomm.h41
-rw-r--r--include/xen/xencomm.h77
-rw-r--r--init/main.c2
-rw-r--r--kernel/auditsc.c2
-rw-r--r--kernel/irq/Kconfig1
-rw-r--r--kernel/kmod.c2
-rw-r--r--lib/Kconfig.debug2
-rw-r--r--lib/Makefile1
-rw-r--r--lib/percpu_ida.c7
-rw-r--r--mm/filemap.c4
-rw-r--r--mm/memory-failure.c6
-rw-r--r--mm/page-writeback.c5
-rw-r--r--mm/slub.c38
-rw-r--r--mm/swap_state.c63
-rw-r--r--mm/swapfile.c11
-rw-r--r--mm/vmstat.c4
-rw-r--r--net/9p/client.c2
-rw-r--r--net/9p/trans_virtio.c5
-rw-r--r--net/bridge/br_device.c54
-rw-r--r--net/bridge/br_fdb.c137
-rw-r--r--net/bridge/br_if.c6
-rw-r--r--net/bridge/br_input.c4
-rw-r--r--net/bridge/br_private.h13
-rw-r--r--net/bridge/br_stp_if.c2
-rw-r--r--net/bridge/br_vlan.c27
-rw-r--r--net/caif/caif_dev.c1
-rw-r--r--net/caif/cfsrvl.c1
-rw-r--r--net/can/af_can.c3
-rw-r--r--net/can/bcm.c4
-rw-r--r--net/can/raw.c1
-rw-r--r--net/ceph/messenger.c8
-rw-r--r--net/ceph/osd_client.c84
-rw-r--r--net/core/dev.c6
-rw-r--r--net/core/fib_rules.c7
-rw-r--r--net/core/netpoll.c4
-rw-r--r--net/core/rtnetlink.c2
-rw-r--r--net/core/sock.c6
-rw-r--r--net/decnet/af_decnet.c5
-rw-r--r--net/ieee802154/6lowpan.c23
-rw-r--r--net/ipv4/devinet.c3
-rw-r--r--net/ipv4/ip_tunnel.c29
-rw-r--r--net/ipv4/netfilter/Kconfig5
-rw-r--r--net/ipv4/netfilter/Makefile1
-rw-r--r--net/ipv4/netfilter/nf_nat_h323.c5
-rw-r--r--net/ipv4/netfilter/nft_reject_ipv4.c75
-rw-r--r--net/ipv4/tcp.c2
-rw-r--r--net/ipv4/tcp_input.c18
-rw-r--r--net/ipv4/tcp_output.c15
-rw-r--r--net/ipv4/udp_offload.c17
-rw-r--r--net/ipv6/icmp.c2
-rw-r--r--net/ipv6/netfilter/Kconfig5
-rw-r--r--net/ipv6/netfilter/Makefile1
-rw-r--r--net/ipv6/netfilter/nft_reject_ipv6.c76
-rw-r--r--net/ipx/af_ipx.c22
-rw-r--r--net/ipx/ipx_route.c4
-rw-r--r--net/mac80211/cfg.c44
-rw-r--r--net/mac80211/ht.c4
-rw-r--r--net/mac80211/ibss.c5
-rw-r--r--net/mac80211/iface.c27
-rw-r--r--net/mac80211/tx.c2
-rw-r--r--net/netfilter/Kconfig6
-rw-r--r--net/netfilter/Makefile1
-rw-r--r--net/netfilter/ipvs/ip_vs_conn.c8
-rw-r--r--net/netfilter/nf_conntrack_core.c55
-rw-r--r--net/netfilter/nf_synproxy_core.c5
-rw-r--r--net/netfilter/nf_tables_api.c82
-rw-r--r--net/netfilter/nf_tables_core.c6
-rw-r--r--net/netfilter/nft_ct.c16
-rw-r--r--net/netfilter/nft_log.c5
-rw-r--r--net/netfilter/nft_lookup.c1
-rw-r--r--net/netfilter/nft_queue.c4
-rw-r--r--net/netfilter/nft_rbtree.c16
-rw-r--r--net/netfilter/nft_reject.c89
-rw-r--r--net/netfilter/nft_reject_inet.c63
-rw-r--r--net/netfilter/xt_CT.c7
-rw-r--r--net/openvswitch/datapath.c23
-rw-r--r--net/openvswitch/flow_table.c88
-rw-r--r--net/openvswitch/flow_table.h2
-rw-r--r--net/sctp/ipv6.c2
-rw-r--r--net/sunrpc/svc_xprt.c6
-rw-r--r--net/wireless/core.c17
-rw-r--r--net/wireless/core.h4
-rw-r--r--net/wireless/nl80211.c32
-rw-r--r--net/wireless/nl80211.h8
-rw-r--r--net/wireless/scan.c40
-rw-r--r--net/wireless/sme.c2
-rwxr-xr-xscripts/checkpatch.pl4
-rwxr-xr-xscripts/get_maintainer.pl2
-rw-r--r--security/Kconfig2
-rw-r--r--security/selinux/nlmsgtab.c2
-rw-r--r--security/selinux/ss/services.c4
-rw-r--r--sound/pci/hda/hda_codec.c34
-rw-r--r--sound/pci/hda/hda_generic.c8
-rw-r--r--sound/pci/hda/hda_generic.h1
-rw-r--r--sound/pci/hda/hda_intel.c2
-rw-r--r--sound/pci/hda/patch_analog.c27
-rw-r--r--sound/pci/hda/patch_conexant.c3
-rw-r--r--sound/pci/hda/patch_realtek.c20
-rw-r--r--sound/pci/hda/patch_sigmatel.c27
-rw-r--r--sound/pci/hda/thinkpad_helper.c1
-rw-r--r--sound/usb/Kconfig1
-rw-r--r--tools/perf/builtin-buildid-cache.c33
-rw-r--r--tools/perf/builtin-record.c10
-rw-r--r--tools/perf/design.txt1
-rw-r--r--tools/perf/perf.h4
-rw-r--r--tools/perf/tests/vmlinux-kallsyms.c10
-rw-r--r--tools/perf/util/event.c36
-rw-r--r--tools/perf/util/event.h6
-rw-r--r--tools/perf/util/include/asm/hash.h6
-rw-r--r--tools/perf/util/machine.c42
-rw-r--r--tools/perf/util/machine.h2
-rw-r--r--tools/perf/util/map.c5
-rw-r--r--tools/perf/util/map.h1
-rw-r--r--tools/perf/util/symbol-elf.c4
-rw-r--r--tools/perf/util/symbol.c65
-rw-r--r--virt/kvm/arm/vgic.c1
-rw-r--r--virt/kvm/coalesced_mmio.c8
627 files changed, 8374 insertions, 3673 deletions
diff --git a/.gitignore b/.gitignore
index 7e9932e55475..42fa0d5626a9 100644
--- a/.gitignore
+++ b/.gitignore
@@ -92,3 +92,6 @@ extra_certificates
signing_key.priv
signing_key.x509
x509.genkey
+
+# Kconfig presets
+all.config
diff --git a/Documentation/00-INDEX b/Documentation/00-INDEX
index 38f8444bdd0e..07de7e19b4ce 100644
--- a/Documentation/00-INDEX
+++ b/Documentation/00-INDEX
@@ -29,6 +29,8 @@ DMA-ISA-LPC.txt
- How to do DMA with ISA (and LPC) devices.
DMA-attributes.txt
- listing of the various possible attributes a DMA region can have
+dmatest.txt
+ - how to compile, configure and use the dmatest system.
DocBook/
- directory with DocBook templates etc. for kernel documentation.
EDID/
@@ -77,6 +79,8 @@ arm/
- directory with info about Linux on the ARM architecture.
arm64/
- directory with info about Linux on the 64 bit ARM architecture.
+assoc_array.txt
+ - generic associative array intro.
atomic_ops.txt
- semantics and behavior of atomic and bitmask operations.
auxdisplay/
@@ -87,6 +91,8 @@ bad_memory.txt
- how to use kernel parameters to exclude bad RAM regions.
basic_profiling.txt
- basic instructions for those who wants to profile Linux kernel.
+bcache.txt
+ - Block-layer cache on fast SSDs to improve slow (raid) I/O performance.
binfmt_misc.txt
- info on the kernel support for extra binary formats.
blackfin/
@@ -171,6 +177,8 @@ early-userspace/
- info about initramfs, klibc, and userspace early during boot.
edac.txt
- information on EDAC - Error Detection And Correction
+efi-stub.txt
+ - How to use the EFI boot stub to bypass GRUB or elilo on EFI systems.
eisa.txt
- info on EISA bus support.
email-clients.txt
@@ -195,8 +203,8 @@ futex-requeue-pi.txt
- info on requeueing of tasks from a non-PI futex to a PI futex
gcov.txt
- use of GCC's coverage testing tool "gcov" with the Linux kernel
-gpio.txt
- - overview of GPIO (General Purpose Input/Output) access conventions.
+gpio/
+ - gpio related documentation
hid/
- directory with information on human interface devices
highuid.txt
@@ -255,6 +263,8 @@ kernel-docs.txt
- listing of various WWW + books that document kernel internals.
kernel-parameters.txt
- summary listing of command line / boot prompt args for the kernel.
+kernel-per-CPU-kthreads.txt
+ - List of all per-CPU kthreads and how they introduce jitter.
kmemcheck.txt
- info on dynamic checker that detects uses of uninitialized memory.
kmemleak.txt
@@ -299,8 +309,6 @@ memory-devices/
- directory with info on parts like the Texas Instruments EMIF driver
memory-hotplug.txt
- Hotpluggable memory support, how to use and current status.
-memory.txt
- - info on typical Linux memory problems.
metag/
- directory with info about Linux on Meta architecture.
mips/
@@ -311,6 +319,8 @@ mmc/
- directory with info about the MMC subsystem
mn10300/
- directory with info about the mn10300 architecture port
+module-signing.txt
+ - Kernel module signing for increased security when loading modules.
mtd/
- directory with info about memory technology devices (flash)
mono.txt
@@ -343,6 +353,8 @@ pcmcia/
- info on the Linux PCMCIA driver.
percpu-rw-semaphore.txt
- RCU based read-write semaphore optimized for locking for reading
+phy.txt
+ - Description of the generic PHY framework.
pi-futex.txt
- documentation on lightweight priority inheritance futexes.
pinctrl.txt
@@ -431,6 +443,8 @@ sysrq.txt
- info on the magic SysRq key.
target/
- directory with info on generating TCM v4 fabric .ko modules
+this_cpu_ops.txt
+ - List rationale behind and the way to use this_cpu operations.
thermal/
- directory with information on managing thermal issues (CPU/temp)
trace/
@@ -469,6 +483,8 @@ wimax/
- directory with info about Intel Wireless Wimax Connections
workqueue.txt
- information on the Concurrency Managed Workqueue implementation
+ww-mutex-design.txt
+ - Intro to Mutex wait/would deadlock handling.s
x86/x86_64/
- directory with info on Linux support for AMD x86-64 (Hammer) machines.
xtensa/
diff --git a/Documentation/RCU/00-INDEX b/Documentation/RCU/00-INDEX
index 1d7a885761f5..fa57139f50bf 100644
--- a/Documentation/RCU/00-INDEX
+++ b/Documentation/RCU/00-INDEX
@@ -8,6 +8,8 @@ listRCU.txt
- Using RCU to Protect Read-Mostly Linked Lists
lockdep.txt
- RCU and lockdep checking
+lockdep-splat.txt
+ - RCU Lockdep splats explained.
NMI-RCU.txt
- Using RCU to Protect Dynamic NMI Handlers
rcubarrier.txt
diff --git a/Documentation/arm/00-INDEX b/Documentation/arm/00-INDEX
index 36420e116c90..a94090cc785d 100644
--- a/Documentation/arm/00-INDEX
+++ b/Documentation/arm/00-INDEX
@@ -4,6 +4,8 @@ Booting
- requirements for booting
Interrupts
- ARM Interrupt subsystem documentation
+IXP4xx
+ - Intel IXP4xx Network processor.
msm
- MSM specific documentation
Netwinder
@@ -24,8 +26,16 @@ SPEAr
- ST SPEAr platform Linux Overview
VFP/
- Release notes for Linux Kernel Vector Floating Point support code
+cluster-pm-race-avoidance.txt
+ - Algorithm for CPU and Cluster setup/teardown
empeg/
- Ltd's Empeg MP3 Car Audio Player
+firmware.txt
+ - Secure firmware registration and calling.
+kernel_mode_neon.txt
+ - How to use NEON instructions in kernel mode
+kernel_user_helpers.txt
+ - Helper functions in kernel space made available for userspace.
mem_alignment
- alignment abort handler documentation
memory.txt
@@ -34,3 +44,7 @@ nwfpe/
- NWFPE floating point emulator documentation
swp_emulation
- SWP/SWPB emulation handler/logging description
+tcm.txt
+ - ARM Tightly Coupled Memory
+vlocks.txt
+ - Voting locks, low-level mechanism relying on memory system atomic writes.
diff --git a/Documentation/blackfin/00-INDEX b/Documentation/blackfin/00-INDEX
index 2df0365f2dff..c54fcdd4ae9f 100644
--- a/Documentation/blackfin/00-INDEX
+++ b/Documentation/blackfin/00-INDEX
@@ -1,8 +1,10 @@
00-INDEX
- This file
-
+Makefile
+ - Makefile for gptimers example file.
bfin-gpio-notes.txt
- Notes in developing/using bfin-gpio driver.
-
bfin-spi-notes.txt
- Notes for using bfin spi bus driver.
+gptimers-example.c
+ - gptimers example
diff --git a/Documentation/block/00-INDEX b/Documentation/block/00-INDEX
index 929d9904f74b..e840b47613f7 100644
--- a/Documentation/block/00-INDEX
+++ b/Documentation/block/00-INDEX
@@ -14,6 +14,8 @@ deadline-iosched.txt
- Deadline IO scheduler tunables
ioprio.txt
- Block io priorities (in CFQ scheduler)
+null_blk.txt
+ - Null block for block-layer benchmarking.
queue-sysfs.txt
- Queue's sysfs entries
request.txt
diff --git a/Documentation/devicetree/00-INDEX b/Documentation/devicetree/00-INDEX
index b78f691fd847..8c4102c6a5e7 100644
--- a/Documentation/devicetree/00-INDEX
+++ b/Documentation/devicetree/00-INDEX
@@ -8,3 +8,5 @@ https://lists.ozlabs.org/listinfo/devicetree-discuss
- this file
booting-without-of.txt
- Booting Linux without Open Firmware, describes history and format of device trees.
+usage-model.txt
+ - How Linux uses DT and what DT aims to solve. \ No newline at end of file
diff --git a/Documentation/devicetree/bindings/interrupt-controller/lsi,zevio-intc.txt b/Documentation/devicetree/bindings/interrupt-controller/lsi,zevio-intc.txt
new file mode 100644
index 000000000000..aee38e7c13e7
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/lsi,zevio-intc.txt
@@ -0,0 +1,18 @@
+TI-NSPIRE interrupt controller
+
+Required properties:
+- compatible: Compatible property value should be "lsi,zevio-intc".
+
+- reg: Physical base address of the controller and length of memory mapped
+ region.
+
+- interrupt-controller : Identifies the node as an interrupt controller
+
+Example:
+
+interrupt-controller {
+ compatible = "lsi,zevio-intc";
+ interrupt-controller;
+ reg = <0xDC000000 0x1000>;
+ #interrupt-cells = <1>;
+};
diff --git a/Documentation/devicetree/bindings/net/allwinner,sun4i-emac.txt b/Documentation/devicetree/bindings/net/allwinner,sun4i-emac.txt
index b90bfcd138ff..863d5b8155c7 100644
--- a/Documentation/devicetree/bindings/net/allwinner,sun4i-emac.txt
+++ b/Documentation/devicetree/bindings/net/allwinner,sun4i-emac.txt
@@ -1,7 +1,8 @@
* Allwinner EMAC ethernet controller
Required properties:
-- compatible: should be "allwinner,sun4i-emac".
+- compatible: should be "allwinner,sun4i-a10-emac" (Deprecated:
+ "allwinner,sun4i-emac")
- reg: address and length of the register set for the device.
- interrupts: interrupt for the device
- phy: A phandle to a phy node defining the PHY address (as the reg
@@ -14,7 +15,7 @@ Optional properties:
Example:
emac: ethernet@01c0b000 {
- compatible = "allwinner,sun4i-emac";
+ compatible = "allwinner,sun4i-a10-emac";
reg = <0x01c0b000 0x1000>;
interrupts = <55>;
clocks = <&ahb_gates 17>;
diff --git a/Documentation/devicetree/bindings/net/allwinner,sun4i-mdio.txt b/Documentation/devicetree/bindings/net/allwinner,sun4i-mdio.txt
index 00b9f9a3ec1d..4ec56413779d 100644
--- a/Documentation/devicetree/bindings/net/allwinner,sun4i-mdio.txt
+++ b/Documentation/devicetree/bindings/net/allwinner,sun4i-mdio.txt
@@ -1,7 +1,8 @@
* Allwinner A10 MDIO Ethernet Controller interface
Required properties:
-- compatible: should be "allwinner,sun4i-mdio".
+- compatible: should be "allwinner,sun4i-a10-mdio"
+ (Deprecated: "allwinner,sun4i-mdio").
- reg: address and length of the register set for the device.
Optional properties:
@@ -9,7 +10,7 @@ Optional properties:
Example at the SoC level:
mdio@01c0b080 {
- compatible = "allwinner,sun4i-mdio";
+ compatible = "allwinner,sun4i-a10-mdio";
reg = <0x01c0b080 0x14>;
#address-cells = <1>;
#size-cells = <0>;
diff --git a/Documentation/devicetree/bindings/power/bq2415x.txt b/Documentation/devicetree/bindings/power/bq2415x.txt
new file mode 100644
index 000000000000..d0327f0b59ad
--- /dev/null
+++ b/Documentation/devicetree/bindings/power/bq2415x.txt
@@ -0,0 +1,47 @@
+Binding for TI bq2415x Li-Ion Charger
+
+Required properties:
+- compatible: Should contain one of the following:
+ * "ti,bq24150"
+ * "ti,bq24150"
+ * "ti,bq24150a"
+ * "ti,bq24151"
+ * "ti,bq24151a"
+ * "ti,bq24152"
+ * "ti,bq24153"
+ * "ti,bq24153a"
+ * "ti,bq24155"
+ * "ti,bq24156"
+ * "ti,bq24156a"
+ * "ti,bq24158"
+- reg: integer, i2c address of the device.
+- ti,current-limit: integer, initial maximum current charger can pull
+ from power supply in mA.
+- ti,weak-battery-voltage: integer, weak battery voltage threshold in mV.
+ The chip will use slow precharge if battery voltage
+ is below this value.
+- ti,battery-regulation-voltage: integer, maximum charging voltage in mV.
+- ti,charge-current: integer, maximum charging current in mA.
+- ti,termination-current: integer, charge will be terminated when current in
+ constant-voltage phase drops below this value (in mA).
+- ti,resistor-sense: integer, value of sensing resistor in milliohm.
+
+Optional properties:
+- ti,usb-charger-detection: phandle to usb charger detection device.
+ (required for auto mode)
+
+Example from Nokia N900:
+
+bq24150a {
+ compatible = "ti,bq24150a";
+ reg = <0x6b>;
+
+ ti,current-limit = <100>;
+ ti,weak-battery-voltage = <3400>;
+ ti,battery-regulation-voltage = <4200>;
+ ti,charge-current = <650>;
+ ti,termination-current = <100>;
+ ti,resistor-sense = <68>;
+
+ ti,usb-charger-detection = <&isp1704>;
+};
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index 3f900cd51bf0..40ce2df0e0e9 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -8,6 +8,7 @@ ad Avionic Design GmbH
adi Analog Devices, Inc.
aeroflexgaisler Aeroflex Gaisler AB
ak Asahi Kasei Corp.
+allwinner Allwinner Technology Co., Ltd.
altr Altera Corp.
amcc Applied Micro Circuits Corporation (APM, formally AMCC)
amstaos AMS-Taos Inc.
@@ -40,6 +41,7 @@ gmt Global Mixed-mode Technology, Inc.
gumstix Gumstix, Inc.
haoyu Haoyu Microelectronic Co. Ltd.
hisilicon Hisilicon Limited.
+honeywell Honeywell
hp Hewlett Packard
ibm International Business Machines (IBM)
idt Integrated Device Technologies, Inc.
@@ -55,6 +57,7 @@ maxim Maxim Integrated Products
microchip Microchip Technology Inc.
mosaixtech Mosaix Technologies, Inc.
national National Semiconductor
+neonode Neonode Inc.
nintendo Nintendo
nvidia NVIDIA
nxp NXP Semiconductors
@@ -64,7 +67,7 @@ phytec PHYTEC Messtechnik GmbH
picochip Picochip Ltd
powervr PowerVR (deprecated, use img)
qca Qualcomm Atheros, Inc.
-qcom Qualcomm, Inc.
+qcom Qualcomm Technologies, Inc
ralink Mediatek/Ralink Technology Corp.
ramtron Ramtron International
realtek Realtek Semiconductor Corp.
@@ -78,6 +81,7 @@ silabs Silicon Laboratories
simtek
sirf SiRF Technology, Inc.
snps Synopsys, Inc.
+spansion Spansion Inc.
st STMicroelectronics
ste ST-Ericsson
stericsson ST-Ericsson
diff --git a/Documentation/dvb/contributors.txt b/Documentation/dvb/contributors.txt
index 47c30098dab6..731a009723c7 100644
--- a/Documentation/dvb/contributors.txt
+++ b/Documentation/dvb/contributors.txt
@@ -78,7 +78,7 @@ Peter Beutner <p.beutner@gmx.net>
Wilson Michaels <wilsonmichaels@earthlink.net>
for the lgdt330x frontend driver, and various bugfixes
-Michael Krufky <mkrufky@m1k.net>
+Michael Krufky <mkrufky@linuxtv.org>
for maintaining v4l/dvb inter-tree dependencies
Taylor Jacob <rtjacob@earthlink.net>
diff --git a/Documentation/fb/00-INDEX b/Documentation/fb/00-INDEX
index 30a70542e823..fe85e7c5907a 100644
--- a/Documentation/fb/00-INDEX
+++ b/Documentation/fb/00-INDEX
@@ -5,6 +5,8 @@ please mail me.
00-INDEX
- this file.
+api.txt
+ - The frame buffer API between applications and buffer devices.
arkfb.txt
- info on the fbdev driver for ARK Logic chips.
aty128fb.txt
@@ -51,12 +53,16 @@ sh7760fb.txt
- info on the SH7760/SH7763 integrated LCDC Framebuffer driver.
sisfb.txt
- info on the framebuffer device driver for various SiS chips.
+sm501.txt
+ - info on the framebuffer device driver for sm501 videoframebuffer.
sstfb.txt
- info on the frame buffer driver for 3dfx' Voodoo Graphics boards.
tgafb.txt
- info on the TGA (DECChip 21030) frame buffer driver.
tridentfb.txt
info on the framebuffer driver for some Trident chip based cards.
+udlfb.txt
+ - Driver for DisplayLink USB 2.0 chips.
uvesafb.txt
- info on the userspace VESA (VBE2+ compliant) frame buffer device.
vesafb.txt
diff --git a/Documentation/filesystems/00-INDEX b/Documentation/filesystems/00-INDEX
index 632211cbdd56..ac28149aede4 100644
--- a/Documentation/filesystems/00-INDEX
+++ b/Documentation/filesystems/00-INDEX
@@ -2,6 +2,8 @@
- this file (info on some of the filesystems supported by linux).
Locking
- info on locking rules as they pertain to Linux VFS.
+Makefile
+ - Makefile for building the filsystems-part of DocBook.
9p.txt
- 9p (v9fs) is an implementation of the Plan 9 remote fs protocol.
adfs.txt
diff --git a/Documentation/filesystems/nfs/00-INDEX b/Documentation/filesystems/nfs/00-INDEX
index 66eb6c8c5334..53f3b596ac0d 100644
--- a/Documentation/filesystems/nfs/00-INDEX
+++ b/Documentation/filesystems/nfs/00-INDEX
@@ -12,6 +12,8 @@ nfs41-server.txt
- info on the Linux server implementation of NFSv4 minor version 1.
nfs-rdma.txt
- how to install and setup the Linux NFS/RDMA client and server software
+nfsd-admin-interfaces.txt
+ - Administrative interfaces for nfsd.
nfsroot.txt
- short guide on setting up a diskless box with NFS root filesystem.
pnfs.txt
@@ -20,5 +22,5 @@ rpc-cache.txt
- introduction to the caching mechanisms in the sunrpc layer.
idmapper.txt
- information for configuring request-keys to be used by idmapper
-knfsd-rpcgss.txt
+rpc-server-gss.txt
- Information on GSS authentication support in the NFS Server
diff --git a/Documentation/ide/00-INDEX b/Documentation/ide/00-INDEX
index d6b778842b75..22f98ca79539 100644
--- a/Documentation/ide/00-INDEX
+++ b/Documentation/ide/00-INDEX
@@ -10,3 +10,5 @@ ide-tape.txt
- info on the IDE ATAPI streaming tape driver
ide.txt
- important info for users of ATA devices (IDE/EIDE disks and CD-ROMS).
+warm-plug-howto.txt
+ - using sysfs to remove and add IDE devices. \ No newline at end of file
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 8f441dab0396..7116fda7077f 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -1726,16 +1726,16 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
option description.
memmap=nn[KMG]@ss[KMG]
- [KNL] Force usage of a specific region of memory
- Region of memory to be used, from ss to ss+nn.
+ [KNL] Force usage of a specific region of memory.
+ Region of memory to be used is from ss to ss+nn.
memmap=nn[KMG]#ss[KMG]
[KNL,ACPI] Mark specific memory as ACPI data.
- Region of memory to be used, from ss to ss+nn.
+ Region of memory to be marked is from ss to ss+nn.
memmap=nn[KMG]$ss[KMG]
[KNL,ACPI] Mark specific memory as reserved.
- Region of memory to be used, from ss to ss+nn.
+ Region of memory to be reserved is from ss to ss+nn.
Example: Exclude memory from 0x18690000-0x1869ffff
memmap=64K$0x18690000
or
diff --git a/Documentation/laptops/00-INDEX b/Documentation/laptops/00-INDEX
index fa688538e757..d13b9a9a9e00 100644
--- a/Documentation/laptops/00-INDEX
+++ b/Documentation/laptops/00-INDEX
@@ -1,13 +1,15 @@
00-INDEX
- This file
-acer-wmi.txt
- - information on the Acer Laptop WMI Extras driver.
+Makefile
+ - Makefile for building dslm example program.
asus-laptop.txt
- information on the Asus Laptop Extras driver.
disk-shock-protection.txt
- information on hard disk shock protection.
dslm.c
- Simple Disk Sleep Monitor program
+hpfall.c
+ - (HP) laptop accelerometer program for disk protection.
laptop-mode.txt
- how to conserve battery power using laptop-mode.
sony-laptop.txt
diff --git a/Documentation/leds/00-INDEX b/Documentation/leds/00-INDEX
index 1ecd1596633e..b4ef1f34e25f 100644
--- a/Documentation/leds/00-INDEX
+++ b/Documentation/leds/00-INDEX
@@ -1,3 +1,7 @@
+00-INDEX
+ - This file
+leds-blinkm.txt
+ - Driver for BlinkM LED-devices.
leds-class.txt
- documents LED handling under Linux.
leds-lp3944.txt
@@ -12,3 +16,7 @@ leds-lp55xx.txt
- description about lp55xx common driver.
leds-lm3556.txt
- notes on how to use the leds-lm3556 driver.
+ledtrig-oneshot.txt
+ - One-shot LED trigger for both sporadic and dense events.
+ledtrig-transient.txt
+ - LED Transient Trigger, one shot timer activation.
diff --git a/Documentation/m68k/00-INDEX b/Documentation/m68k/00-INDEX
index a014e9f00765..2be8c6b00e74 100644
--- a/Documentation/m68k/00-INDEX
+++ b/Documentation/m68k/00-INDEX
@@ -1,5 +1,7 @@
00-INDEX
- this file
+README.buddha
+ - Amiga Buddha and Catweasel IDE Driver
kernel-options.txt
- command line options for Linux/m68k
diff --git a/Documentation/networking/00-INDEX b/Documentation/networking/00-INDEX
index f11580f8719a..557b6ef70c26 100644
--- a/Documentation/networking/00-INDEX
+++ b/Documentation/networking/00-INDEX
@@ -6,8 +6,14 @@
- information on the 3Com Etherlink III Series Ethernet cards.
6pack.txt
- info on the 6pack protocol, an alternative to KISS for AX.25
-DLINK.txt
- - info on the D-Link DE-600/DE-620 parallel port pocket adapters
+LICENSE.qla3xxx
+ - GPLv2 for QLogic Linux Networking HBA Driver
+LICENSE.qlge
+ - GPLv2 for QLogic Linux qlge NIC Driver
+LICENSE.qlcnic
+ - GPLv2 for QLogic Linux qlcnic NIC Driver
+Makefile
+ - Makefile for docsrc.
PLIP.txt
- PLIP: The Parallel Line Internet Protocol device driver
README.ipw2100
@@ -17,7 +23,7 @@ README.ipw2200
README.sb1000
- info on General Instrument/NextLevel SURFboard1000 cable modem.
alias.txt
- - info on using alias network devices
+ - info on using alias network devices.
arcnet-hardware.txt
- tons of info on ARCnet, hubs, jumper settings for ARCnet cards, etc.
arcnet.txt
@@ -80,7 +86,7 @@ framerelay.txt
- info on using Frame Relay/Data Link Connection Identifier (DLCI).
gen_stats.txt
- Generic networking statistics for netlink users.
-generic_hdlc.txt
+generic-hdlc.txt
- The generic High Level Data Link Control (HDLC) layer.
generic_netlink.txt
- info on Generic Netlink
@@ -88,6 +94,8 @@ gianfar.txt
- Gianfar Ethernet Driver.
i40e.txt
- README for the Intel Ethernet Controller XL710 Driver (i40e).
+i40evf.txt
+ - Short note on the Driver for the Intel(R) XL710 X710 Virtual Function
ieee802154.txt
- Linux IEEE 802.15.4 implementation, API and drivers
igb.txt
@@ -102,6 +110,8 @@ ipddp.txt
- AppleTalk-IP Decapsulation and AppleTalk-IP Encapsulation
iphase.txt
- Interphase PCI ATM (i)Chip IA Linux driver info.
+ipsec.txt
+ - Note on not compressing IPSec payload and resulting failed policy check.
ipv6.txt
- Options to the ipv6 kernel module.
ipvs-sysctl.txt
@@ -120,6 +130,8 @@ lapb-module.txt
- programming information of the LAPB module.
ltpc.txt
- the Apple or Farallon LocalTalk PC card driver
+mac80211-auth-assoc-deauth.txt
+ - authentication and association / deauth-disassoc with max80211
mac80211-injection.txt
- HOWTO use packet injection with mac80211
multiqueue.txt
@@ -134,6 +146,10 @@ netdevices.txt
- info on network device driver functions exported to the kernel.
netif-msg.txt
- Design of the network interface message level setting (NETIF_MSG_*).
+netlink_mmap.txt
+ - memory mapped I/O with netlink
+nf_conntrack-sysctl.txt
+ - list of netfilter-sysctl knobs.
nfc.txt
- The Linux Near Field Communication (NFS) subsystem.
openvswitch.txt
@@ -176,7 +192,7 @@ skfp.txt
- SysKonnect FDDI (SK-5xxx, Compaq Netelligent) driver info.
smc9.txt
- the driver for SMC's 9000 series of Ethernet cards
-spider-net.txt
+spider_net.txt
- README for the Spidernet Driver (as found in PS3 / Cell BE).
stmmac.txt
- README for the STMicro Synopsys Ethernet driver.
@@ -188,6 +204,8 @@ tcp.txt
- short blurb on how TCP output takes place.
tcp-thin.txt
- kernel tuning options for low rate 'thin' TCP streams.
+team.txt
+ - pointer to information for ethernet teaming devices.
tlan.txt
- ThunderLAN (Compaq Netelligent 10/100, Olicom OC-2xxx) driver info.
tproxy.txt
@@ -200,6 +218,8 @@ vortex.txt
- info on using 3Com Vortex (3c590, 3c592, 3c595, 3c597) Ethernet cards.
vxge.txt
- README for the Neterion X3100 PCIe Server Adapter.
+vxlan.txt
+ - Virtual extensible LAN overview
x25.txt
- general info on X.25 development.
x25-iface.txt
diff --git a/Documentation/power/00-INDEX b/Documentation/power/00-INDEX
index a4d682f54231..ad04cc8097ed 100644
--- a/Documentation/power/00-INDEX
+++ b/Documentation/power/00-INDEX
@@ -4,6 +4,8 @@ apm-acpi.txt
- basic info about the APM and ACPI support.
basic-pm-debugging.txt
- Debugging suspend and resume
+charger-manager.txt
+ - Battery charger management.
devices.txt
- How drivers interact with system-wide power management
drivers-testing.txt
@@ -22,6 +24,8 @@ pm_qos_interface.txt
- info on Linux PM Quality of Service interface
power_supply_class.txt
- Tells userspace about battery, UPS, AC or DC power supply properties
+runtime_pm.txt
+ - Power management framework for I/O devices.
s2ram.txt
- How to get suspend to ram working (and debug it when it isn't)
states.txt
@@ -38,7 +42,5 @@ tricks.txt
- How to trick software suspend (to disk) into working when it isn't
userland-swsusp.txt
- Experimental implementation of software suspend in userspace
-video_extension.txt
- - ACPI video extensions
video.txt
- Video issues during resume from suspend
diff --git a/Documentation/ptp/testptp.c b/Documentation/ptp/testptp.c
index a74d0a84d329..4aba0436da65 100644
--- a/Documentation/ptp/testptp.c
+++ b/Documentation/ptp/testptp.c
@@ -117,6 +117,7 @@ static void usage(char *progname)
" -f val adjust the ptp clock frequency by 'val' ppb\n"
" -g get the ptp clock time\n"
" -h prints this message\n"
+ " -i val index for event/trigger\n"
" -k val measure the time offset between system and phc clock\n"
" for 'val' times (Maximum 25)\n"
" -p val enable output with a period of 'val' nanoseconds\n"
@@ -154,6 +155,7 @@ int main(int argc, char *argv[])
int capabilities = 0;
int extts = 0;
int gettime = 0;
+ int index = 0;
int oneshot = 0;
int pct_offset = 0;
int n_samples = 0;
@@ -167,7 +169,7 @@ int main(int argc, char *argv[])
progname = strrchr(argv[0], '/');
progname = progname ? 1+progname : argv[0];
- while (EOF != (c = getopt(argc, argv, "a:A:cd:e:f:ghk:p:P:sSt:v"))) {
+ while (EOF != (c = getopt(argc, argv, "a:A:cd:e:f:ghi:k:p:P:sSt:v"))) {
switch (c) {
case 'a':
oneshot = atoi(optarg);
@@ -190,6 +192,9 @@ int main(int argc, char *argv[])
case 'g':
gettime = 1;
break;
+ case 'i':
+ index = atoi(optarg);
+ break;
case 'k':
pct_offset = 1;
n_samples = atoi(optarg);
@@ -301,7 +306,7 @@ int main(int argc, char *argv[])
if (extts) {
memset(&extts_request, 0, sizeof(extts_request));
- extts_request.index = 0;
+ extts_request.index = index;
extts_request.flags = PTP_ENABLE_FEATURE;
if (ioctl(fd, PTP_EXTTS_REQUEST, &extts_request)) {
perror("PTP_EXTTS_REQUEST");
@@ -375,7 +380,7 @@ int main(int argc, char *argv[])
return -1;
}
memset(&perout_request, 0, sizeof(perout_request));
- perout_request.index = 0;
+ perout_request.index = index;
perout_request.start.sec = ts.tv_sec + 2;
perout_request.start.nsec = 0;
perout_request.period.sec = 0;
diff --git a/Documentation/s390/00-INDEX b/Documentation/s390/00-INDEX
index 3a2b96302ecc..10c874ebdfe5 100644
--- a/Documentation/s390/00-INDEX
+++ b/Documentation/s390/00-INDEX
@@ -16,11 +16,13 @@ Debugging390.txt
- hints for debugging on s390 systems.
driver-model.txt
- information on s390 devices and the driver model.
+kvm.txt
+ - ioctl calls to /dev/kvm on s390.
monreader.txt
- information on accessing the z/VM monitor stream from Linux.
+qeth.txt
+ - HiperSockets Bridge Port Support.
s390dbf.txt
- information on using the s390 debug feature.
-TAPE
- - information on the driver for channel-attached tapes.
-zfcpdump
+zfcpdump.txt
- information on the s390 SCSI dump tool.
diff --git a/Documentation/scheduler/00-INDEX b/Documentation/scheduler/00-INDEX
index 46702e4f89c9..eccf7ad2e7f9 100644
--- a/Documentation/scheduler/00-INDEX
+++ b/Documentation/scheduler/00-INDEX
@@ -2,6 +2,8 @@
- this file.
sched-arch.txt
- CPU Scheduler implementation hints for architecture specific code.
+sched-bwc.txt
+ - CFS bandwidth control overview.
sched-design-CFS.txt
- goals, design and implementation of the Completely Fair Scheduler.
sched-domains.txt
diff --git a/Documentation/scsi/00-INDEX b/Documentation/scsi/00-INDEX
index 2044be565d93..c4b978a72f78 100644
--- a/Documentation/scsi/00-INDEX
+++ b/Documentation/scsi/00-INDEX
@@ -36,6 +36,8 @@ NinjaSCSI.txt
- info on WorkBiT NinjaSCSI-32/32Bi driver
aacraid.txt
- Driver supporting Adaptec RAID controllers
+advansys.txt
+ - List of Advansys Host Adapters
aha152x.txt
- info on driver for Adaptec AHA152x based adapters
aic79xx.txt
@@ -44,6 +46,12 @@ aic7xxx.txt
- info on driver for Adaptec controllers
arcmsr_spec.txt
- ARECA FIRMWARE SPEC (for IOP331 adapter)
+bfa.txt
+ - Brocade FC/FCOE adapter driver.
+bnx2fc.txt
+ - FCoE hardware offload for Broadcom network interfaces.
+cxgb3i.txt
+ - Chelsio iSCSI Linux Driver
dc395x.txt
- README file for the dc395x SCSI driver
dpti.txt
@@ -52,18 +60,24 @@ dtc3x80.txt
- info on driver for DTC 2x80 based adapters
g_NCR5380.txt
- info on driver for NCR5380 and NCR53c400 based adapters
+hpsa.txt
+ - HP Smart Array Controller SCSI driver.
hptiop.txt
- HIGHPOINT ROCKETRAID 3xxx RAID DRIVER
in2000.txt
- info on in2000 driver
libsas.txt
- Serial Attached SCSI management layer.
+link_power_management_policy.txt
+ - Link power management options.
lpfc.txt
- LPFC driver release notes
megaraid.txt
- Common Management Module, shared code handling ioctls for LSI drivers
ncr53c8xx.txt
- info on driver for NCR53c8xx based adapters
+osd.txt
+ Object-Based Storage Device, command set introduction.
osst.txt
- info on driver for OnStream SC-x0 SCSI tape
ppa.txt
@@ -74,6 +88,8 @@ scsi-changer.txt
- README for the SCSI media changer driver
scsi-generic.txt
- info on the sg driver for generic (non-disk/CD/tape) SCSI devices.
+scsi-parameters.txt
+ - List of SCSI-parameters to pass to the kernel at module load-time.
scsi.txt
- short blurb on using SCSI support as a module.
scsi_mid_low_api.txt
diff --git a/Documentation/serial/00-INDEX b/Documentation/serial/00-INDEX
index 1f1b22fbd739..f9c6b5ed03e7 100644
--- a/Documentation/serial/00-INDEX
+++ b/Documentation/serial/00-INDEX
@@ -4,10 +4,12 @@ README.cycladesZ
- info on Cyclades-Z firmware loading.
digiepca.txt
- info on Digi Intl. {PC,PCI,EISA}Xx and Xem series cards.
-hayes-esp.txt
- - info on using the Hayes ESP serial driver.
+driver
+ - intro to the low level serial driver.
moxa-smartio
- file with info on installing/using Moxa multiport serial driver.
+n_gsm.txt
+ - GSM 0710 tty multiplexer howto.
riscom8.txt
- notes on using the RISCom/8 multi-port serial driver.
rocket.txt
diff --git a/Documentation/spi/00-INDEX b/Documentation/spi/00-INDEX
new file mode 100644
index 000000000000..a128fa835512
--- /dev/null
+++ b/Documentation/spi/00-INDEX
@@ -0,0 +1,22 @@
+00-INDEX
+ - this file.
+Makefile
+ - Makefile for the example sourcefiles.
+butterfly
+ - AVR Butterfly SPI driver overview and pin configuration.
+ep93xx_spi
+ - Basic EP93xx SPI driver configuration.
+pxa2xx
+ - PXA2xx SPI master controller build by spi_message fifo wq
+spidev
+ - Intro to the userspace API for spi devices
+spidev_fdx.c
+ - spidev example file
+spi-lm70llp
+ - Connecting an LM70-LLP sensor to the kernel via the SPI subsys.
+spi-sc18is602
+ - NXP SC18IS602/603 I2C-bus to SPI bridge
+spi-summary
+ - (Linux) SPI overview. If unsure about SPI or SPI in Linux, start here.
+spidev_test.c
+ - SPI testing utility.
diff --git a/Documentation/spi/spi-summary b/Documentation/spi/spi-summary
index f72e0d1e0da8..7982bcc4d151 100644
--- a/Documentation/spi/spi-summary
+++ b/Documentation/spi/spi-summary
@@ -543,7 +543,22 @@ SPI MASTER METHODS
queuing transfers that arrive in the meantime. When the driver is
finished with this message, it must call
spi_finalize_current_message() so the subsystem can issue the next
- transfer. This may sleep.
+ message. This may sleep.
+
+ master->transfer_one(struct spi_master *master, struct spi_device *spi,
+ struct spi_transfer *transfer)
+ The subsystem calls the driver to transfer a single transfer while
+ queuing transfers that arrive in the meantime. When the driver is
+ finished with this transfer, it must call
+ spi_finalize_current_transfer() so the subsystem can issue the next
+ transfer. This may sleep. Note: transfer_one and transfer_one_message
+ are mutually exclusive; when both are set, the generic subsystem does
+ not call your transfer_one callback.
+
+ Return values:
+ negative errno: error
+ 0: transfer is finished
+ 1: transfer is still in progress
DEPRECATED METHODS
diff --git a/Documentation/timers/00-INDEX b/Documentation/timers/00-INDEX
index ef2ccbf77fa2..6d042dc1cce0 100644
--- a/Documentation/timers/00-INDEX
+++ b/Documentation/timers/00-INDEX
@@ -8,6 +8,8 @@ hpet_example.c
- sample hpet timer test program
hrtimers.txt
- subsystem for high-resolution kernel timers
+Makefile
+ - Build and link hpet_example
NO_HZ.txt
- Summary of the different methods for the scheduler clock-interrupts management.
timers-howto.txt
diff --git a/Documentation/virtual/kvm/00-INDEX b/Documentation/virtual/kvm/00-INDEX
index 641ec9220179..fee9f2bf9c64 100644
--- a/Documentation/virtual/kvm/00-INDEX
+++ b/Documentation/virtual/kvm/00-INDEX
@@ -20,5 +20,7 @@ ppc-pv.txt
- the paravirtualization interface on PowerPC.
review-checklist.txt
- review checklist for KVM patches.
+s390-diag.txt
+ - Diagnose hypercall description (for IBM S/390)
timekeeping.txt
- timekeeping virtualization for x86-based architectures.
diff --git a/Documentation/vm/00-INDEX b/Documentation/vm/00-INDEX
index a39d06680e1c..081c49777abb 100644
--- a/Documentation/vm/00-INDEX
+++ b/Documentation/vm/00-INDEX
@@ -16,8 +16,6 @@ hwpoison.txt
- explains what hwpoison is
ksm.txt
- how to use the Kernel Samepage Merging feature.
-locking
- - info on how locking and synchronization is done in the Linux vm code.
numa
- information about NUMA specific code in the Linux vm.
numa_memory_policy.txt
@@ -32,6 +30,8 @@ slub.txt
- a short users guide for SLUB.
soft-dirty.txt
- short explanation for soft-dirty PTEs
+split_page_table_lock
+ - Separate per-table lock to improve scalability of the old page_table_lock.
transhuge.txt
- Transparent Hugepage Support, alternative way of using hugepages.
unevictable-lru.txt
diff --git a/Documentation/w1/masters/00-INDEX b/Documentation/w1/masters/00-INDEX
index d63fa024ac05..8330cf9325f0 100644
--- a/Documentation/w1/masters/00-INDEX
+++ b/Documentation/w1/masters/00-INDEX
@@ -4,7 +4,9 @@ ds2482
- The Maxim/Dallas Semiconductor DS2482 provides 1-wire busses.
ds2490
- The Maxim/Dallas Semiconductor DS2490 builds USB <-> W1 bridges.
-mxc_w1
+mxc-w1
- W1 master controller driver found on Freescale MX2/MX3 SoCs
+omap-hdq
+ - HDQ/1-wire module of TI OMAP 2430/3430.
w1-gpio
- GPIO 1-wire bus master driver.
diff --git a/Documentation/w1/slaves/00-INDEX b/Documentation/w1/slaves/00-INDEX
index 75613c9ac4db..6e18c70c3474 100644
--- a/Documentation/w1/slaves/00-INDEX
+++ b/Documentation/w1/slaves/00-INDEX
@@ -4,3 +4,5 @@ w1_therm
- The Maxim/Dallas Semiconductor ds18*20 temperature sensor.
w1_ds2423
- The Maxim/Dallas Semiconductor ds2423 counter device.
+w1_ds28e04
+ - The Maxim/Dallas Semiconductor ds28e04 eeprom.
diff --git a/Documentation/x86/00-INDEX b/Documentation/x86/00-INDEX
index f37b46d34861..692264456f0f 100644
--- a/Documentation/x86/00-INDEX
+++ b/Documentation/x86/00-INDEX
@@ -1,6 +1,20 @@
00-INDEX
- this file
-mtrr.txt
- - how to use x86 Memory Type Range Registers to increase performance
+boot.txt
+ - List of boot protocol versions
+early-microcode.txt
+ - How to load microcode from an initrd-CPIO archive early to fix CPU issues.
+earlyprintk.txt
+ - Using earlyprintk with a USB2 debug port key.
+entry_64.txt
+ - Describe (some of the) kernel entry points for x86.
exception-tables.txt
- why and how Linux kernel uses exception tables on x86
+mtrr.txt
+ - how to use x86 Memory Type Range Registers to increase performance
+pat.txt
+ - Page Attribute Table intro and API
+usb-legacy-support.txt
+ - how to fix/avoid quirks when using emulated PS/2 mouse/keyboard.
+zero-page.txt
+ - layout of the first page of memory.
diff --git a/Documentation/zh_CN/arm64/booting.txt b/Documentation/zh_CN/arm64/booting.txt
index 28fa325b7461..6f6d956ac1c9 100644
--- a/Documentation/zh_CN/arm64/booting.txt
+++ b/Documentation/zh_CN/arm64/booting.txt
@@ -7,7 +7,7 @@ help. Contact the Chinese maintainer if this translation is outdated
or if there is a problem with the translation.
Maintainer: Will Deacon <will.deacon@arm.com>
-Chinese maintainer: Fu Wei <tekkamanninja@gmail.com>
+Chinese maintainer: Fu Wei <wefu@redhat.com>
---------------------------------------------------------------------
Documentation/arm64/booting.txt 的中文翻译
@@ -16,9 +16,9 @@ Documentation/arm64/booting.txt 的中文翻译
译存在问题,请è”系中文版维护者。
英文版维护者: Will Deacon <will.deacon@arm.com>
-中文版维护者: 傅炜 Fu Wei <tekkamanninja@gmail.com>
-中文版翻译者: 傅炜 Fu Wei <tekkamanninja@gmail.com>
-中文版校译者: 傅炜 Fu Wei <tekkamanninja@gmail.com>
+中文版维护者: 傅炜 Fu Wei <wefu@redhat.com>
+中文版翻译者: 傅炜 Fu Wei <wefu@redhat.com>
+中文版校译者: 傅炜 Fu Wei <wefu@redhat.com>
以下为正文
---------------------------------------------------------------------
@@ -64,8 +64,8 @@ RAM,或å¯èƒ½ä½¿ç”¨å¯¹è¿™ä¸ªè®¾å¤‡å·²çŸ¥çš„ RAM ä¿¡æ¯ï¼Œè¿˜å¯èƒ½ä½¿ç”¨ä»»ä½•
å¿…è¦æ€§: 强制
-设备树数æ®å—(dtb)大å°å¿…é¡»ä¸å¤§äºŽ 2 MB,且ä½äºŽä»Žå†…核映åƒèµ·å§‹ç®—起第一个
-512MB 内的 2MB 边界上。这使得内核å¯ä»¥é€šè¿‡åˆå§‹é¡µè¡¨ä¸­çš„å•ä¸ªèŠ‚æ述符æ¥
+设备树数æ®å—(dtb)必须 8 字节对é½ï¼Œå¹¶ä½äºŽä»Žå†…核映åƒèµ·å§‹ç®—起第一个 512MB
+内,且ä¸å¾—跨越 2MB 对é½è¾¹ç•Œã€‚这使得内核å¯ä»¥é€šè¿‡åˆå§‹é¡µè¡¨ä¸­çš„å•ä¸ªèŠ‚æ述符æ¥
映射此数æ®å—。
@@ -84,13 +84,23 @@ AArch64 内核当å‰æ²¡æœ‰æ供自解压代ç ï¼Œå› æ­¤å¦‚果使用了压缩内
å¿…è¦æ€§: 强制
-已解压的内核映åƒåŒ…å«ä¸€ä¸ª 32 字节的头,内容如下:
+已解压的内核映åƒåŒ…å«ä¸€ä¸ª 64 字节的头,内容如下:
- u32 magic = 0x14000008; /* 跳转到 stext, å°ç«¯ */
- u32 res0 = 0; /* ä¿ç•™ */
+ u32 code0; /* å¯æ‰§è¡Œä»£ç  */
+ u32 code1; /* å¯æ‰§è¡Œä»£ç  */
u64 text_offset; /* 映åƒè£…è½½å移 */
+ u64 res0 = 0; /* ä¿ç•™ */
u64 res1 = 0; /* ä¿ç•™ */
u64 res2 = 0; /* ä¿ç•™ */
+ u64 res3 = 0; /* ä¿ç•™ */
+ u64 res4 = 0; /* ä¿ç•™ */
+ u32 magic = 0x644d5241; /* 魔数, å°ç«¯, "ARM\x64" */
+ u32 res5 = 0; /* ä¿ç•™ */
+
+
+映åƒå¤´æ³¨é‡Šï¼š
+
+- code0/code1 负责跳转到 stext.
映åƒå¿…é¡»ä½äºŽç³»ç»Ÿ RAM 起始处的特定å移(当å‰æ˜¯ 0x80000)。系统 RAM
的起始地å€å¿…须是以 2MB 对é½çš„。
@@ -118,9 +128,9 @@ AArch64 内核当å‰æ²¡æœ‰æ供自解压代ç ï¼Œå› æ­¤å¦‚果使用了压缩内
外部高速缓存(如果存在)必须é…置并ç¦ç”¨ã€‚
- 架构计时器
- CNTFRQ 必须设定为计时器的频率。
- 如果在 EL1 模å¼ä¸‹è¿›å…¥å†…核,则 CNTHCTL_EL2 中的 EL1PCTEN (bit 0)
- 必须置ä½ã€‚
+ CNTFRQ 必须设定为计时器的频率,且 CNTVOFF 必须设定为对所有 CPU
+ 都一致的值。如果在 EL1 模å¼ä¸‹è¿›å…¥å†…核,则 CNTHCTL_EL2 中的
+ EL1PCTEN (bit 0) 必须置ä½ã€‚
- 一致性
通过内核å¯åŠ¨çš„所有 CPU 在内核入å£åœ°å€ä¸Šå¿…须处于相åŒçš„一致性域中。
@@ -131,23 +141,40 @@ AArch64 内核当å‰æ²¡æœ‰æ供自解压代ç ï¼Œå› æ­¤å¦‚果使用了压缩内
在进入内核映åƒçš„异常级中,所有构架中å¯å†™çš„系统寄存器必须通过软件
在一个更高的异常级别下åˆå§‹åŒ–,以防止在 未知 状æ€ä¸‹è¿è¡Œã€‚
+以上对于 CPU 模å¼ã€é«˜é€Ÿç¼“å­˜ã€MMUã€æž¶æž„计时器ã€ä¸€è‡´æ€§ã€ç³»ç»Ÿå¯„存器的
+å¿…è¦æ¡ä»¶æ述适用于所有 CPU。所有 CPU 必须在åŒä¸€å¼‚常级别跳入内核。
+
引导装载程åºå¿…须在æ¯ä¸ª CPU 处于以下状æ€æ—¶è·³å…¥å†…核入å£ï¼š
- 主 CPU 必须直接跳入内核映åƒçš„第一æ¡æŒ‡ä»¤ã€‚通过此 CPU 传递的设备树
- æ•°æ®å—必须在æ¯ä¸ª CPU 节点中包å«ä»¥ä¸‹å†…容:
-
- 1ã€â€˜enable-method’属性。目å‰ï¼Œæ­¤å­—段支æŒçš„值仅为字符串“spin-tableâ€ã€‚
-
- 2ã€â€˜cpu-release-addr’标识一个 64-bitã€åˆå§‹åŒ–为零的内存ä½ç½®ã€‚
+ æ•°æ®å—必须在æ¯ä¸ª CPU 节点中包å«ä¸€ä¸ª ‘enable-method’ 属性,所
+ 支æŒçš„ enable-method 请è§ä¸‹æ–‡ã€‚
引导装载程åºå¿…须生æˆè¿™äº›è®¾å¤‡æ ‘属性,并在跳入内核入å£ä¹‹å‰å°†å…¶æ’å…¥
æ•°æ®å—。
-- 任何辅助 CPU 必须在内存ä¿ç•™åŒºï¼ˆé€šè¿‡è®¾å¤‡æ ‘中的 /memreserve/ 域传递
+- enable-method 为 “spin-table†的 CPU 必须在它们的 CPU
+ 节点中包å«ä¸€ä¸ª ‘cpu-release-addr’ 属性。这个属性标识了一个
+ 64 ä½è‡ªç„¶å¯¹é½ä¸”åˆå§‹åŒ–为零的内存ä½ç½®ã€‚
+
+ 这些 CPU 必须在内存ä¿ç•™åŒºï¼ˆé€šè¿‡è®¾å¤‡æ ‘中的 /memreserve/ 域传递
给内核)中自旋于内核之外,轮询它们的 cpu-release-addr ä½ç½®ï¼ˆå¿…é¡»
包å«åœ¨ä¿ç•™åŒºä¸­ï¼‰ã€‚å¯é€šè¿‡æ’å…¥ wfe 指令æ¥é™ä½Žå¿™å¾ªçŽ¯å¼€é”€ï¼Œè€Œä¸» CPU å°†
å‘出 sev 指令。当对 cpu-release-addr 所指ä½ç½®çš„读å–æ“作返回éžé›¶å€¼
- 时,CPU 必须直接跳入此值所指å‘的地å€ã€‚
+ 时,CPU 必须跳入此值所指å‘的地å€ã€‚此值为一个å•ç‹¬çš„ 64 ä½å°ç«¯å€¼ï¼Œ
+ å› æ­¤ CPU 须在跳转å‰å°†æ‰€è¯»å–的值转æ¢ä¸ºå…¶æœ¬èº«çš„端模å¼ã€‚
+
+- enable-method 为 “psci†的 CPU ä¿æŒåœ¨å†…核外(比如,在
+ memory 节点中æ述为内核空间的内存区外,或在通过设备树 /memreserve/
+ 域中æ述为内核ä¿ç•™åŒºçš„空间中)。内核将会å‘起在 ARM 文档(编å·
+ ARM DEN 0022A:用于 ARM 上的电æºçŠ¶æ€å调接å£ç³»ç»Ÿè½¯ä»¶ï¼‰ä¸­æè¿°çš„
+ CPU_ON 调用æ¥å°† CPU 带入内核。
+
+ *译者注:到文档翻译时,此文档已更新为 ARM DEN 0022B。
+
+ 设备树必须包å«ä¸€ä¸ª ‘psci’ 节点,请å‚考以下文档:
+ Documentation/devicetree/bindings/arm/psci.txt
+
- 辅助 CPU 通用寄存器设置
x0 = 0 (ä¿ç•™ï¼Œå°†æ¥å¯èƒ½ä½¿ç”¨)
diff --git a/Documentation/zh_CN/arm64/memory.txt b/Documentation/zh_CN/arm64/memory.txt
index a5f6283829f9..a782704c1cb5 100644
--- a/Documentation/zh_CN/arm64/memory.txt
+++ b/Documentation/zh_CN/arm64/memory.txt
@@ -7,7 +7,7 @@ help. Contact the Chinese maintainer if this translation is outdated
or if there is a problem with the translation.
Maintainer: Catalin Marinas <catalin.marinas@arm.com>
-Chinese maintainer: Fu Wei <tekkamanninja@gmail.com>
+Chinese maintainer: Fu Wei <wefu@redhat.com>
---------------------------------------------------------------------
Documentation/arm64/memory.txt 的中文翻译
@@ -16,9 +16,9 @@ Documentation/arm64/memory.txt 的中文翻译
译存在问题,请è”系中文版维护者。
英文版维护者: Catalin Marinas <catalin.marinas@arm.com>
-中文版维护者: 傅炜 Fu Wei <tekkamanninja@gmail.com>
-中文版翻译者: 傅炜 Fu Wei <tekkamanninja@gmail.com>
-中文版校译者: 傅炜 Fu Wei <tekkamanninja@gmail.com>
+中文版维护者: 傅炜 Fu Wei <wefu@redhat.com>
+中文版翻译者: 傅炜 Fu Wei <wefu@redhat.com>
+中文版校译者: 傅炜 Fu Wei <wefu@redhat.com>
以下为正文
---------------------------------------------------------------------
@@ -41,7 +41,7 @@ AArch64 Linux 使用页大å°ä¸º 4KB çš„ 3 级转æ¢è¡¨é…置,对于用户和å
TTBR1 中,且从ä¸å†™å…¥ TTBR0。
-AArch64 Linux 内存布局:
+AArch64 Linux 在页大å°ä¸º 4KB 时的内存布局:
èµ·å§‹åœ°å€ ç»“æŸåœ°å€ å¤§å° ç”¨é€”
-----------------------------------------------------------------------
@@ -55,15 +55,42 @@ ffffffbc00000000 ffffffbdffffffff 8GB vmemmap
ffffffbe00000000 ffffffbffbbfffff ~8GB [防护页,未æ¥ç”¨äºŽ vmmemap]
+ffffffbffbc00000 ffffffbffbdfffff 2MB earlyprintk 设备
+
ffffffbffbe00000 ffffffbffbe0ffff 64KB PCI I/O 空间
-ffffffbbffff0000 ffffffbcffffffff ~2MB [防护页]
+ffffffbffbe10000 ffffffbcffffffff ~2MB [防护页]
ffffffbffc000000 ffffffbfffffffff 64MB 模å—
ffffffc000000000 ffffffffffffffff 256GB 内核逻辑内存映射
+AArch64 Linux 在页大å°ä¸º 64KB 时的内存布局:
+
+èµ·å§‹åœ°å€ ç»“æŸåœ°å€ å¤§å° ç”¨é€”
+-----------------------------------------------------------------------
+0000000000000000 000003ffffffffff 4TB 用户空间
+
+fffffc0000000000 fffffdfbfffeffff ~2TB vmalloc
+
+fffffdfbffff0000 fffffdfbffffffff 64KB [防护页]
+
+fffffdfc00000000 fffffdfdffffffff 8GB vmemmap
+
+fffffdfe00000000 fffffdfffbbfffff ~8GB [防护页,未æ¥ç”¨äºŽ vmmemap]
+
+fffffdfffbc00000 fffffdfffbdfffff 2MB earlyprintk 设备
+
+fffffdfffbe00000 fffffdfffbe0ffff 64KB PCI I/O 空间
+
+fffffdfffbe10000 fffffdfffbffffff ~2MB [防护页]
+
+fffffdfffc000000 fffffdffffffffff 64MB 模å—
+
+fffffe0000000000 ffffffffffffffff 2TB 内核逻辑内存映射
+
+
4KB 页大å°çš„转æ¢è¡¨æŸ¥æ‰¾ï¼š
+--------+--------+--------+--------+--------+--------+--------+--------+
@@ -91,3 +118,10 @@ ffffffc000000000 ffffffffffffffff 256GB 内核逻辑内存映射
| | +--------------------------> [41:29] L2 索引 (仅使用 38:29 )
| +-------------------------------> [47:42] L1 索引 (未使用)
+-------------------------------------------------> [63] TTBR0/1
+
+当使用 KVM æ—¶, 管ç†ç¨‹åºï¼ˆhypervisor)在 EL2 中通过相对内核虚拟地å€çš„
+一个固定å移æ¥æ˜ å°„内核页(内核虚拟地å€çš„高 24 ä½è®¾ä¸ºé›¶ï¼‰:
+
+èµ·å§‹åœ°å€ ç»“æŸåœ°å€ å¤§å° ç”¨é€”
+-----------------------------------------------------------------------
+0000004000000000 0000007fffffffff 256GB 在 HYP 中映射的内核对象
diff --git a/Documentation/zh_CN/arm64/tagged-pointers.txt b/Documentation/zh_CN/arm64/tagged-pointers.txt
new file mode 100644
index 000000000000..2664d1bd5a1c
--- /dev/null
+++ b/Documentation/zh_CN/arm64/tagged-pointers.txt
@@ -0,0 +1,52 @@
+Chinese translated version of Documentation/arm64/tagged-pointers.txt
+
+If you have any comment or update to the content, please contact the
+original document maintainer directly. However, if you have a problem
+communicating in English you can also ask the Chinese maintainer for
+help. Contact the Chinese maintainer if this translation is outdated
+or if there is a problem with the translation.
+
+Maintainer: Will Deacon <will.deacon@arm.com>
+Chinese maintainer: Fu Wei <wefu@redhat.com>
+---------------------------------------------------------------------
+Documentation/arm64/tagged-pointers.txt 的中文翻译
+
+如果想评论或更新本文的内容,请直接è”系原文档的维护者。如果你使用英文
+交æµæœ‰å›°éš¾çš„è¯ï¼Œä¹Ÿå¯ä»¥å‘中文版维护者求助。如果本翻译更新ä¸åŠæ—¶æˆ–者翻
+译存在问题,请è”系中文版维护者。
+
+英文版维护者: Will Deacon <will.deacon@arm.com>
+中文版维护者: 傅炜 Fu Wei <wefu@redhat.com>
+中文版翻译者: 傅炜 Fu Wei <wefu@redhat.com>
+中文版校译者: 傅炜 Fu Wei <wefu@redhat.com>
+
+以下为正文
+---------------------------------------------------------------------
+ Linux 在 AArch64 中带标记的虚拟地å€
+ =================================
+
+作者: Will Deacon <will.deacon@arm.com>
+日期: 2013 年 06 月 12 日
+
+本文档简述了在 AArch64 地å€è½¬æ¢ç³»ç»Ÿä¸­æ供的带标记的虚拟地å€åŠå…¶åœ¨
+AArch64 Linux 中的潜在用途。
+
+内核æ供的地å€è½¬æ¢è¡¨é…置使通过 TTBR0 完æˆçš„虚拟地å€è½¬æ¢ï¼ˆå³ç”¨æˆ·ç©ºé—´
+映射),其虚拟地å€çš„最高 8 ä½ï¼ˆ63:56)会被转æ¢ç¡¬ä»¶æ‰€å¿½ç•¥ã€‚è¿™ç§æœºåˆ¶
+让这些ä½å¯ä¾›åº”用程åºè‡ªç”±ä½¿ç”¨ï¼Œå…¶æ³¨æ„事项如下:
+
+ (1) 内核è¦æ±‚所有传递到 EL1 的用户空间地å€å¸¦æœ‰ 0x00 标记。
+ è¿™æ„味ç€ä»»ä½•æºå¸¦ç”¨æˆ·ç©ºé—´è™šæ‹Ÿåœ°å€çš„系统调用(syscall)
+ å‚æ•° *å¿…é¡»* 在陷入内核å‰ä½¿å®ƒä»¬çš„最高字节被清零。
+
+ (2) éžé›¶æ ‡è®°åœ¨ä¼ é€’ä¿¡å·æ—¶ä¸è¢«ä¿å­˜ã€‚è¿™æ„味ç€åœ¨åº”用程åºä¸­åˆ©ç”¨äº†
+ 标记的信å·å¤„ç†å‡½æ•°æ— æ³•ä¾èµ– siginfo_t 的用户空间虚拟
+ 地å€æ‰€æºå¸¦çš„包å«å…¶å†…部域信æ¯çš„标记。此规则的一个例外是
+ 当信å·æ˜¯åœ¨è°ƒè¯•è§‚察点的异常处ç†ç¨‹åºä¸­äº§ç”Ÿçš„,此时标记的
+ ä¿¡æ¯å°†è¢«ä¿å­˜ã€‚
+
+ (3) 当使用带标记的指针时需特别留心,因为仅对两个虚拟地å€
+ 的高字节,C 编译器很å¯èƒ½æ— æ³•åˆ¤æ–­å®ƒä»¬æ˜¯ä¸åŒçš„。
+
+此构架会阻止对带标记的 PC 指针的利用,因此在异常返回时,其高字节
+将被设置æˆä¸€ä¸ªä¸º “55†的扩展符。
diff --git a/MAINTAINERS b/MAINTAINERS
index b2cf5cfb4d29..fb08dcececf1 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2367,7 +2367,7 @@ F: include/linux/cpufreq.h
CPU FREQUENCY DRIVERS - ARM BIG LITTLE
M: Viresh Kumar <viresh.kumar@linaro.org>
-M: Sudeep KarkadaNagesha <sudeep.karkadanagesha@arm.com>
+M: Sudeep Holla <sudeep.holla@arm.com>
L: cpufreq@vger.kernel.org
L: linux-pm@vger.kernel.org
W: http://www.arm.com/products/processors/technologies/biglittleprocessing.php
@@ -2857,7 +2857,7 @@ M: Jani Nikula <jani.nikula@linux.intel.com>
L: intel-gfx@lists.freedesktop.org
L: dri-devel@lists.freedesktop.org
Q: http://patchwork.freedesktop.org/project/intel-gfx/
-T: git git://people.freedesktop.org/~danvet/drm-intel
+T: git git://anongit.freedesktop.org/drm-intel
S: Supported
F: drivers/gpu/drm/i915/
F: include/drm/i915*
@@ -7196,7 +7196,7 @@ S: Maintained
F: drivers/net/ethernet/rdc/r6040.c
RDS - RELIABLE DATAGRAM SOCKETS
-M: Venkat Venkatsubra <venkat.x.venkatsubra@oracle.com>
+M: Chien Yen <chien.yen@oracle.com>
L: rds-devel@oss.oracle.com (moderated for non-subscribers)
S: Supported
F: net/rds/
diff --git a/Makefile b/Makefile
index 606ef7c4a544..933e1def6baf 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 3
PATCHLEVEL = 14
SUBLEVEL = 0
-EXTRAVERSION = -rc1
+EXTRAVERSION = -rc2
NAME = Shuffling Zombie Juror
# *DOCUMENTATION*
diff --git a/arch/arm/boot/dts/sun4i-a10.dtsi b/arch/arm/boot/dts/sun4i-a10.dtsi
index 040bb0eba152..10666ca8aee1 100644
--- a/arch/arm/boot/dts/sun4i-a10.dtsi
+++ b/arch/arm/boot/dts/sun4i-a10.dtsi
@@ -315,7 +315,7 @@
ranges;
emac: ethernet@01c0b000 {
- compatible = "allwinner,sun4i-emac";
+ compatible = "allwinner,sun4i-a10-emac";
reg = <0x01c0b000 0x1000>;
interrupts = <55>;
clocks = <&ahb_gates 17>;
@@ -323,7 +323,7 @@
};
mdio@01c0b080 {
- compatible = "allwinner,sun4i-mdio";
+ compatible = "allwinner,sun4i-a10-mdio";
reg = <0x01c0b080 0x14>;
status = "disabled";
#address-cells = <1>;
diff --git a/arch/arm/boot/dts/sun5i-a10s.dtsi b/arch/arm/boot/dts/sun5i-a10s.dtsi
index ea16054857a4..64961595e8d6 100644
--- a/arch/arm/boot/dts/sun5i-a10s.dtsi
+++ b/arch/arm/boot/dts/sun5i-a10s.dtsi
@@ -278,7 +278,7 @@
ranges;
emac: ethernet@01c0b000 {
- compatible = "allwinner,sun4i-emac";
+ compatible = "allwinner,sun4i-a10-emac";
reg = <0x01c0b000 0x1000>;
interrupts = <55>;
clocks = <&ahb_gates 17>;
@@ -286,7 +286,7 @@
};
mdio@01c0b080 {
- compatible = "allwinner,sun4i-mdio";
+ compatible = "allwinner,sun4i-a10-mdio";
reg = <0x01c0b080 0x14>;
status = "disabled";
#address-cells = <1>;
diff --git a/arch/arm/boot/dts/sun7i-a20.dtsi b/arch/arm/boot/dts/sun7i-a20.dtsi
index 119f066f0d98..9ff09484847b 100644
--- a/arch/arm/boot/dts/sun7i-a20.dtsi
+++ b/arch/arm/boot/dts/sun7i-a20.dtsi
@@ -340,7 +340,7 @@
ranges;
emac: ethernet@01c0b000 {
- compatible = "allwinner,sun4i-emac";
+ compatible = "allwinner,sun4i-a10-emac";
reg = <0x01c0b000 0x1000>;
interrupts = <0 55 4>;
clocks = <&ahb_gates 17>;
@@ -348,7 +348,7 @@
};
mdio@01c0b080 {
- compatible = "allwinner,sun4i-mdio";
+ compatible = "allwinner,sun4i-a10-mdio";
reg = <0x01c0b080 0x14>;
status = "disabled";
#address-cells = <1>;
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index dd4327f09ba4..27bbcfc7202a 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -36,6 +36,7 @@ config ARM64
select HAVE_GENERIC_DMA_COHERENT
select HAVE_HW_BREAKPOINT if PERF_EVENTS
select HAVE_MEMBLOCK
+ select HAVE_PATA_PLATFORM
select HAVE_PERF_EVENTS
select IRQ_DOMAIN
select MODULES_USE_ELF_RELA
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index 84139be62ae6..7959dd0ca5d5 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -1,4 +1,3 @@
-CONFIG_EXPERIMENTAL=y
# CONFIG_LOCALVERSION_AUTO is not set
# CONFIG_SWAP is not set
CONFIG_SYSVIPC=y
@@ -19,6 +18,7 @@ CONFIG_BLK_DEV_INITRD=y
CONFIG_KALLSYMS_ALL=y
# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
+CONFIG_JUMP_LABEL=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
@@ -27,6 +27,7 @@ CONFIG_ARCH_VEXPRESS=y
CONFIG_ARCH_XGENE=y
CONFIG_SMP=y
CONFIG_PREEMPT=y
+CONFIG_CMA=y
CONFIG_CMDLINE="console=ttyAMA0"
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_COMPAT=y
@@ -42,14 +43,17 @@ CONFIG_IP_PNP_BOOTP=y
# CONFIG_WIRELESS is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_DEVTMPFS=y
-CONFIG_BLK_DEV=y
+CONFIG_DMA_CMA=y
CONFIG_SCSI=y
# CONFIG_SCSI_PROC_FS is not set
CONFIG_BLK_DEV_SD=y
# CONFIG_SCSI_LOWLEVEL is not set
+CONFIG_ATA=y
+CONFIG_PATA_PLATFORM=y
+CONFIG_PATA_OF_PLATFORM=y
CONFIG_NETDEVICES=y
-CONFIG_MII=y
CONFIG_SMC91X=y
+CONFIG_SMSC911X=y
# CONFIG_WLAN is not set
CONFIG_INPUT_EVDEV=y
# CONFIG_SERIO_I8042 is not set
@@ -62,13 +66,19 @@ CONFIG_SERIAL_AMBA_PL011=y
CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
# CONFIG_HW_RANDOM is not set
# CONFIG_HWMON is not set
+CONFIG_REGULATOR=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
CONFIG_FB=y
# CONFIG_VGA_CONSOLE is not set
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_LOGO=y
# CONFIG_LOGO_LINUX_MONO is not set
# CONFIG_LOGO_LINUX_VGA16 is not set
-# CONFIG_USB_SUPPORT is not set
+CONFIG_USB=y
+CONFIG_USB_ISP1760_HCD=y
+CONFIG_USB_STORAGE=y
+CONFIG_MMC=y
+CONFIG_MMC_ARMMMCI=y
# CONFIG_IOMMU_SUPPORT is not set
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h
index 01de5aaa3edc..0237f0867e37 100644
--- a/arch/arm64/include/asm/atomic.h
+++ b/arch/arm64/include/asm/atomic.h
@@ -54,8 +54,7 @@ static inline void atomic_add(int i, atomic_t *v)
" stxr %w1, %w0, %2\n"
" cbnz %w1, 1b"
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
- : "Ir" (i)
- : "cc");
+ : "Ir" (i));
}
static inline int atomic_add_return(int i, atomic_t *v)
@@ -64,14 +63,15 @@ static inline int atomic_add_return(int i, atomic_t *v)
int result;
asm volatile("// atomic_add_return\n"
-"1: ldaxr %w0, %2\n"
+"1: ldxr %w0, %2\n"
" add %w0, %w0, %w3\n"
" stlxr %w1, %w0, %2\n"
" cbnz %w1, 1b"
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
: "Ir" (i)
- : "cc", "memory");
+ : "memory");
+ smp_mb();
return result;
}
@@ -86,8 +86,7 @@ static inline void atomic_sub(int i, atomic_t *v)
" stxr %w1, %w0, %2\n"
" cbnz %w1, 1b"
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
- : "Ir" (i)
- : "cc");
+ : "Ir" (i));
}
static inline int atomic_sub_return(int i, atomic_t *v)
@@ -96,14 +95,15 @@ static inline int atomic_sub_return(int i, atomic_t *v)
int result;
asm volatile("// atomic_sub_return\n"
-"1: ldaxr %w0, %2\n"
+"1: ldxr %w0, %2\n"
" sub %w0, %w0, %w3\n"
" stlxr %w1, %w0, %2\n"
" cbnz %w1, 1b"
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
: "Ir" (i)
- : "cc", "memory");
+ : "memory");
+ smp_mb();
return result;
}
@@ -112,17 +112,20 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
unsigned long tmp;
int oldval;
+ smp_mb();
+
asm volatile("// atomic_cmpxchg\n"
-"1: ldaxr %w1, %2\n"
+"1: ldxr %w1, %2\n"
" cmp %w1, %w3\n"
" b.ne 2f\n"
-" stlxr %w0, %w4, %2\n"
+" stxr %w0, %w4, %2\n"
" cbnz %w0, 1b\n"
"2:"
: "=&r" (tmp), "=&r" (oldval), "+Q" (ptr->counter)
: "Ir" (old), "r" (new)
- : "cc", "memory");
+ : "cc");
+ smp_mb();
return oldval;
}
@@ -173,8 +176,7 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
" stxr %w1, %0, %2\n"
" cbnz %w1, 1b"
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
- : "Ir" (i)
- : "cc");
+ : "Ir" (i));
}
static inline long atomic64_add_return(long i, atomic64_t *v)
@@ -183,14 +185,15 @@ static inline long atomic64_add_return(long i, atomic64_t *v)
unsigned long tmp;
asm volatile("// atomic64_add_return\n"
-"1: ldaxr %0, %2\n"
+"1: ldxr %0, %2\n"
" add %0, %0, %3\n"
" stlxr %w1, %0, %2\n"
" cbnz %w1, 1b"
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
: "Ir" (i)
- : "cc", "memory");
+ : "memory");
+ smp_mb();
return result;
}
@@ -205,8 +208,7 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
" stxr %w1, %0, %2\n"
" cbnz %w1, 1b"
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
- : "Ir" (i)
- : "cc");
+ : "Ir" (i));
}
static inline long atomic64_sub_return(long i, atomic64_t *v)
@@ -215,14 +217,15 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
unsigned long tmp;
asm volatile("// atomic64_sub_return\n"
-"1: ldaxr %0, %2\n"
+"1: ldxr %0, %2\n"
" sub %0, %0, %3\n"
" stlxr %w1, %0, %2\n"
" cbnz %w1, 1b"
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
: "Ir" (i)
- : "cc", "memory");
+ : "memory");
+ smp_mb();
return result;
}
@@ -231,17 +234,20 @@ static inline long atomic64_cmpxchg(atomic64_t *ptr, long old, long new)
long oldval;
unsigned long res;
+ smp_mb();
+
asm volatile("// atomic64_cmpxchg\n"
-"1: ldaxr %1, %2\n"
+"1: ldxr %1, %2\n"
" cmp %1, %3\n"
" b.ne 2f\n"
-" stlxr %w0, %4, %2\n"
+" stxr %w0, %4, %2\n"
" cbnz %w0, 1b\n"
"2:"
: "=&r" (res), "=&r" (oldval), "+Q" (ptr->counter)
: "Ir" (old), "r" (new)
- : "cc", "memory");
+ : "cc");
+ smp_mb();
return oldval;
}
@@ -253,11 +259,12 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
unsigned long tmp;
asm volatile("// atomic64_dec_if_positive\n"
-"1: ldaxr %0, %2\n"
+"1: ldxr %0, %2\n"
" subs %0, %0, #1\n"
" b.mi 2f\n"
" stlxr %w1, %0, %2\n"
" cbnz %w1, 1b\n"
+" dmb ish\n"
"2:"
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
:
diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
index 78e20ba8806b..409ca370cfe2 100644
--- a/arch/arm64/include/asm/barrier.h
+++ b/arch/arm64/include/asm/barrier.h
@@ -25,7 +25,7 @@
#define wfi() asm volatile("wfi" : : : "memory")
#define isb() asm volatile("isb" : : : "memory")
-#define dsb() asm volatile("dsb sy" : : : "memory")
+#define dsb(opt) asm volatile("dsb sy" : : : "memory")
#define mb() dsb()
#define rmb() asm volatile("dsb ld" : : : "memory")
diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h
index fea9ee327206..889324981aa4 100644
--- a/arch/arm64/include/asm/cacheflush.h
+++ b/arch/arm64/include/asm/cacheflush.h
@@ -116,6 +116,7 @@ extern void flush_dcache_page(struct page *);
static inline void __flush_icache_all(void)
{
asm("ic ialluis");
+ dsb();
}
#define flush_dcache_mmap_lock(mapping) \
diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h
index 56166d7f4a25..57c0fa7bf711 100644
--- a/arch/arm64/include/asm/cmpxchg.h
+++ b/arch/arm64/include/asm/cmpxchg.h
@@ -29,44 +29,45 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
switch (size) {
case 1:
asm volatile("// __xchg1\n"
- "1: ldaxrb %w0, %2\n"
+ "1: ldxrb %w0, %2\n"
" stlxrb %w1, %w3, %2\n"
" cbnz %w1, 1b\n"
: "=&r" (ret), "=&r" (tmp), "+Q" (*(u8 *)ptr)
: "r" (x)
- : "cc", "memory");
+ : "memory");
break;
case 2:
asm volatile("// __xchg2\n"
- "1: ldaxrh %w0, %2\n"
+ "1: ldxrh %w0, %2\n"
" stlxrh %w1, %w3, %2\n"
" cbnz %w1, 1b\n"
: "=&r" (ret), "=&r" (tmp), "+Q" (*(u16 *)ptr)
: "r" (x)
- : "cc", "memory");
+ : "memory");
break;
case 4:
asm volatile("// __xchg4\n"
- "1: ldaxr %w0, %2\n"
+ "1: ldxr %w0, %2\n"
" stlxr %w1, %w3, %2\n"
" cbnz %w1, 1b\n"
: "=&r" (ret), "=&r" (tmp), "+Q" (*(u32 *)ptr)
: "r" (x)
- : "cc", "memory");
+ : "memory");
break;
case 8:
asm volatile("// __xchg8\n"
- "1: ldaxr %0, %2\n"
+ "1: ldxr %0, %2\n"
" stlxr %w1, %3, %2\n"
" cbnz %w1, 1b\n"
: "=&r" (ret), "=&r" (tmp), "+Q" (*(u64 *)ptr)
: "r" (x)
- : "cc", "memory");
+ : "memory");
break;
default:
BUILD_BUG();
}
+ smp_mb();
return ret;
}
diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h
index 78834123a32e..c4a7f940b387 100644
--- a/arch/arm64/include/asm/esr.h
+++ b/arch/arm64/include/asm/esr.h
@@ -42,7 +42,7 @@
#define ESR_EL1_EC_SP_ALIGN (0x26)
#define ESR_EL1_EC_FP_EXC32 (0x28)
#define ESR_EL1_EC_FP_EXC64 (0x2C)
-#define ESR_EL1_EC_SERRROR (0x2F)
+#define ESR_EL1_EC_SERROR (0x2F)
#define ESR_EL1_EC_BREAKPT_EL0 (0x30)
#define ESR_EL1_EC_BREAKPT_EL1 (0x31)
#define ESR_EL1_EC_SOFTSTP_EL0 (0x32)
diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h
index 78cc3aba5d69..5f750dc96e0f 100644
--- a/arch/arm64/include/asm/futex.h
+++ b/arch/arm64/include/asm/futex.h
@@ -24,10 +24,11 @@
#define __futex_atomic_op(insn, ret, oldval, uaddr, tmp, oparg) \
asm volatile( \
-"1: ldaxr %w1, %2\n" \
+"1: ldxr %w1, %2\n" \
insn "\n" \
"2: stlxr %w3, %w0, %2\n" \
" cbnz %w3, 1b\n" \
+" dmb ish\n" \
"3:\n" \
" .pushsection .fixup,\"ax\"\n" \
" .align 2\n" \
@@ -40,7 +41,7 @@
" .popsection\n" \
: "=&r" (ret), "=&r" (oldval), "+Q" (*uaddr), "=&r" (tmp) \
: "r" (oparg), "Ir" (-EFAULT) \
- : "cc", "memory")
+ : "memory")
static inline int
futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
@@ -111,11 +112,12 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
return -EFAULT;
asm volatile("// futex_atomic_cmpxchg_inatomic\n"
-"1: ldaxr %w1, %2\n"
+"1: ldxr %w1, %2\n"
" sub %w3, %w1, %w4\n"
" cbnz %w3, 3f\n"
"2: stlxr %w3, %w5, %2\n"
" cbnz %w3, 1b\n"
+" dmb ish\n"
"3:\n"
" .pushsection .fixup,\"ax\"\n"
"4: mov %w0, %w6\n"
@@ -127,7 +129,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
" .popsection\n"
: "+r" (ret), "=&r" (val), "+Q" (*uaddr), "=&r" (tmp)
: "r" (oldval), "r" (newval), "Ir" (-EFAULT)
- : "cc", "memory");
+ : "memory");
*uval = val;
return ret;
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index c98ef4771c73..0eb398655378 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -231,7 +231,7 @@
#define ESR_EL2_EC_SP_ALIGN (0x26)
#define ESR_EL2_EC_FP_EXC32 (0x28)
#define ESR_EL2_EC_FP_EXC64 (0x2C)
-#define ESR_EL2_EC_SERRROR (0x2F)
+#define ESR_EL2_EC_SERROR (0x2F)
#define ESR_EL2_EC_BREAKPT (0x30)
#define ESR_EL2_EC_BREAKPT_HYP (0x31)
#define ESR_EL2_EC_SOFTSTP (0x32)
diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h
index 3d5cf064d7a1..c45b7b1b7197 100644
--- a/arch/arm64/include/asm/spinlock.h
+++ b/arch/arm64/include/asm/spinlock.h
@@ -132,7 +132,7 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
" cbnz %w0, 2b\n"
: "=&r" (tmp), "+Q" (rw->lock)
: "r" (0x80000000)
- : "cc", "memory");
+ : "memory");
}
static inline int arch_write_trylock(arch_rwlock_t *rw)
@@ -146,7 +146,7 @@ static inline int arch_write_trylock(arch_rwlock_t *rw)
"1:\n"
: "=&r" (tmp), "+Q" (rw->lock)
: "r" (0x80000000)
- : "cc", "memory");
+ : "memory");
return !tmp;
}
@@ -187,7 +187,7 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
" cbnz %w1, 2b\n"
: "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock)
:
- : "cc", "memory");
+ : "memory");
}
static inline void arch_read_unlock(arch_rwlock_t *rw)
@@ -201,7 +201,7 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
" cbnz %w1, 1b\n"
: "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock)
:
- : "cc", "memory");
+ : "memory");
}
static inline int arch_read_trylock(arch_rwlock_t *rw)
@@ -216,7 +216,7 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
"1:\n"
: "=&r" (tmp), "+r" (tmp2), "+Q" (rw->lock)
:
- : "cc", "memory");
+ : "memory");
return !tmp2;
}
diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h
index 58125bf008d3..bb8eb8a78e67 100644
--- a/arch/arm64/include/asm/unistd32.h
+++ b/arch/arm64/include/asm/unistd32.h
@@ -399,7 +399,10 @@ __SYSCALL(374, compat_sys_sendmmsg)
__SYSCALL(375, sys_setns)
__SYSCALL(376, compat_sys_process_vm_readv)
__SYSCALL(377, compat_sys_process_vm_writev)
-__SYSCALL(378, sys_ni_syscall) /* 378 for kcmp */
+__SYSCALL(378, sys_kcmp)
+__SYSCALL(379, sys_finit_module)
+__SYSCALL(380, sys_sched_setattr)
+__SYSCALL(381, sys_sched_getattr)
#define __NR_compat_syscalls 379
diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h
index 495ab6f84a61..eaf54a30bedc 100644
--- a/arch/arm64/include/uapi/asm/kvm.h
+++ b/arch/arm64/include/uapi/asm/kvm.h
@@ -148,6 +148,15 @@ struct kvm_arch_memory_slot {
#define KVM_REG_ARM_TIMER_CNT ARM64_SYS_REG(3, 3, 14, 3, 2)
#define KVM_REG_ARM_TIMER_CVAL ARM64_SYS_REG(3, 3, 14, 0, 2)
+/* Device Control API: ARM VGIC */
+#define KVM_DEV_ARM_VGIC_GRP_ADDR 0
+#define KVM_DEV_ARM_VGIC_GRP_DIST_REGS 1
+#define KVM_DEV_ARM_VGIC_GRP_CPU_REGS 2
+#define KVM_DEV_ARM_VGIC_CPUID_SHIFT 32
+#define KVM_DEV_ARM_VGIC_CPUID_MASK (0xffULL << KVM_DEV_ARM_VGIC_CPUID_SHIFT)
+#define KVM_DEV_ARM_VGIC_OFFSET_SHIFT 0
+#define KVM_DEV_ARM_VGIC_OFFSET_MASK (0xffffffffULL << KVM_DEV_ARM_VGIC_OFFSET_SHIFT)
+
/* KVM_IRQ_LINE irq field index values */
#define KVM_ARM_IRQ_TYPE_SHIFT 24
#define KVM_ARM_IRQ_TYPE_MASK 0xff
diff --git a/arch/arm64/kernel/kuser32.S b/arch/arm64/kernel/kuser32.S
index 63c48ffdf230..7787208e8cc6 100644
--- a/arch/arm64/kernel/kuser32.S
+++ b/arch/arm64/kernel/kuser32.S
@@ -38,12 +38,13 @@ __kuser_cmpxchg64: // 0xffff0f60
.inst 0xe92d00f0 // push {r4, r5, r6, r7}
.inst 0xe1c040d0 // ldrd r4, r5, [r0]
.inst 0xe1c160d0 // ldrd r6, r7, [r1]
- .inst 0xe1b20e9f // 1: ldaexd r0, r1, [r2]
+ .inst 0xe1b20f9f // 1: ldrexd r0, r1, [r2]
.inst 0xe0303004 // eors r3, r0, r4
.inst 0x00313005 // eoreqs r3, r1, r5
.inst 0x01a23e96 // stlexdeq r3, r6, [r2]
.inst 0x03330001 // teqeq r3, #1
.inst 0x0afffff9 // beq 1b
+ .inst 0xf57ff05b // dmb ish
.inst 0xe2730000 // rsbs r0, r3, #0
.inst 0xe8bd00f0 // pop {r4, r5, r6, r7}
.inst 0xe12fff1e // bx lr
@@ -55,11 +56,12 @@ __kuser_memory_barrier: // 0xffff0fa0
.align 5
__kuser_cmpxchg: // 0xffff0fc0
- .inst 0xe1923e9f // 1: ldaex r3, [r2]
+ .inst 0xe1923f9f // 1: ldrex r3, [r2]
.inst 0xe0533000 // subs r3, r3, r0
.inst 0x01823e91 // stlexeq r3, r1, [r2]
.inst 0x03330001 // teqeq r3, #1
.inst 0x0afffffa // beq 1b
+ .inst 0xf57ff05b // dmb ish
.inst 0xe2730000 // rsbs r0, r3, #0
.inst 0xe12fff1e // bx lr
diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
index 65d40cf6945a..a7149cae1615 100644
--- a/arch/arm64/kernel/vdso.c
+++ b/arch/arm64/kernel/vdso.c
@@ -238,6 +238,8 @@ void update_vsyscall(struct timekeeper *tk)
vdso_data->use_syscall = use_syscall;
vdso_data->xtime_coarse_sec = xtime_coarse.tv_sec;
vdso_data->xtime_coarse_nsec = xtime_coarse.tv_nsec;
+ vdso_data->wtm_clock_sec = tk->wall_to_monotonic.tv_sec;
+ vdso_data->wtm_clock_nsec = tk->wall_to_monotonic.tv_nsec;
if (!use_syscall) {
vdso_data->cs_cycle_last = tk->clock->cycle_last;
@@ -245,8 +247,6 @@ void update_vsyscall(struct timekeeper *tk)
vdso_data->xtime_clock_nsec = tk->xtime_nsec;
vdso_data->cs_mult = tk->mult;
vdso_data->cs_shift = tk->shift;
- vdso_data->wtm_clock_sec = tk->wall_to_monotonic.tv_sec;
- vdso_data->wtm_clock_nsec = tk->wall_to_monotonic.tv_nsec;
}
smp_wmb();
diff --git a/arch/arm64/kernel/vdso/Makefile b/arch/arm64/kernel/vdso/Makefile
index d8064af42e62..6d20b7d162d8 100644
--- a/arch/arm64/kernel/vdso/Makefile
+++ b/arch/arm64/kernel/vdso/Makefile
@@ -48,7 +48,7 @@ $(obj-vdso): %.o: %.S
# Actual build commands
quiet_cmd_vdsold = VDSOL $@
- cmd_vdsold = $(CC) $(c_flags) -Wl,-T $^ -o $@
+ cmd_vdsold = $(CC) $(c_flags) -Wl,-n -Wl,-T $^ -o $@
quiet_cmd_vdsoas = VDSOA $@
cmd_vdsoas = $(CC) $(a_flags) -c -o $@ $<
diff --git a/arch/arm64/kernel/vdso/gettimeofday.S b/arch/arm64/kernel/vdso/gettimeofday.S
index f0a6d10b5211..fe652ffd34c2 100644
--- a/arch/arm64/kernel/vdso/gettimeofday.S
+++ b/arch/arm64/kernel/vdso/gettimeofday.S
@@ -103,6 +103,8 @@ ENTRY(__kernel_clock_gettime)
bl __do_get_tspec
seqcnt_check w9, 1b
+ mov x30, x2
+
cmp w0, #CLOCK_MONOTONIC
b.ne 6f
@@ -118,6 +120,9 @@ ENTRY(__kernel_clock_gettime)
ccmp w0, #CLOCK_MONOTONIC_COARSE, #0x4, ne
b.ne 8f
+ /* xtime_coarse_nsec is already right-shifted */
+ mov x12, #0
+
/* Get coarse timespec. */
adr vdso_data, _vdso_data
3: seqcnt_acquire
@@ -156,7 +161,7 @@ ENTRY(__kernel_clock_gettime)
lsr x11, x11, x12
stp x10, x11, [x1, #TSPEC_TV_SEC]
mov x0, xzr
- ret x2
+ ret
7:
mov x30, x2
8: /* Syscall fallback. */
diff --git a/arch/arm64/lib/bitops.S b/arch/arm64/lib/bitops.S
index e5db797790d3..7dac371cc9a2 100644
--- a/arch/arm64/lib/bitops.S
+++ b/arch/arm64/lib/bitops.S
@@ -46,11 +46,12 @@ ENTRY( \name )
mov x2, #1
add x1, x1, x0, lsr #3 // Get word offset
lsl x4, x2, x3 // Create mask
-1: ldaxr x2, [x1]
+1: ldxr x2, [x1]
lsr x0, x2, x3 // Save old value of bit
\instr x2, x2, x4 // toggle bit
stlxr w5, x2, [x1]
cbnz w5, 1b
+ dmb ish
and x0, x0, #1
3: ret
ENDPROC(\name )
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index 45b5ab54c9ee..fbd76785c5db 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -45,6 +45,7 @@ static void *arm64_swiotlb_alloc_coherent(struct device *dev, size_t size,
if (IS_ENABLED(CONFIG_DMA_CMA)) {
struct page *page;
+ size = PAGE_ALIGN(size);
page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
get_order(size));
if (!page)
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index f557ebbe7013..f8dc7e8fce6f 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -203,10 +203,18 @@ static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
do {
next = pmd_addr_end(addr, end);
/* try section mapping first */
- if (((addr | next | phys) & ~SECTION_MASK) == 0)
+ if (((addr | next | phys) & ~SECTION_MASK) == 0) {
+ pmd_t old_pmd =*pmd;
set_pmd(pmd, __pmd(phys | prot_sect_kernel));
- else
+ /*
+ * Check for previous table entries created during
+ * boot (__create_page_tables) and flush them.
+ */
+ if (!pmd_none(old_pmd))
+ flush_tlb_all();
+ } else {
alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys));
+ }
phys += next - addr;
} while (pmd++, addr = next, addr != end);
}
diff --git a/arch/arm64/mm/pgd.c b/arch/arm64/mm/pgd.c
index 7083cdada657..62c6101df260 100644
--- a/arch/arm64/mm/pgd.c
+++ b/arch/arm64/mm/pgd.c
@@ -32,17 +32,10 @@
pgd_t *pgd_alloc(struct mm_struct *mm)
{
- pgd_t *new_pgd;
-
if (PGD_SIZE == PAGE_SIZE)
- new_pgd = (pgd_t *)get_zeroed_page(GFP_KERNEL);
+ return (pgd_t *)get_zeroed_page(GFP_KERNEL);
else
- new_pgd = kzalloc(PGD_SIZE, GFP_KERNEL);
-
- if (!new_pgd)
- return NULL;
-
- return new_pgd;
+ return kzalloc(PGD_SIZE, GFP_KERNEL);
}
void pgd_free(struct mm_struct *mm, pgd_t *pgd)
diff --git a/arch/ia64/include/asm/unistd.h b/arch/ia64/include/asm/unistd.h
index afd45e0d552e..ae763d8bf55a 100644
--- a/arch/ia64/include/asm/unistd.h
+++ b/arch/ia64/include/asm/unistd.h
@@ -11,7 +11,7 @@
-#define NR_syscalls 312 /* length of syscall table */
+#define NR_syscalls 314 /* length of syscall table */
/*
* The following defines stop scripts/checksyscalls.sh from complaining about
diff --git a/arch/ia64/include/uapi/asm/unistd.h b/arch/ia64/include/uapi/asm/unistd.h
index 34fd6fe46da1..715e85f858de 100644
--- a/arch/ia64/include/uapi/asm/unistd.h
+++ b/arch/ia64/include/uapi/asm/unistd.h
@@ -325,5 +325,7 @@
#define __NR_process_vm_writev 1333
#define __NR_accept4 1334
#define __NR_finit_module 1335
+#define __NR_sched_setattr 1336
+#define __NR_sched_getattr 1337
#endif /* _UAPI_ASM_IA64_UNISTD_H */
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S
index ddea607f948a..fa8d61a312a7 100644
--- a/arch/ia64/kernel/entry.S
+++ b/arch/ia64/kernel/entry.S
@@ -1773,6 +1773,8 @@ sys_call_table:
data8 sys_process_vm_writev
data8 sys_accept4
data8 sys_finit_module // 1335
+ data8 sys_sched_setattr
+ data8 sys_sched_getattr
.org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls
#endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */
diff --git a/arch/microblaze/include/asm/delay.h b/arch/microblaze/include/asm/delay.h
index 05b7d39e4391..66fc24c24238 100644
--- a/arch/microblaze/include/asm/delay.h
+++ b/arch/microblaze/include/asm/delay.h
@@ -13,6 +13,8 @@
#ifndef _ASM_MICROBLAZE_DELAY_H
#define _ASM_MICROBLAZE_DELAY_H
+#include <linux/param.h>
+
extern inline void __delay(unsigned long loops)
{
asm volatile ("# __delay \n\t" \
diff --git a/arch/microblaze/include/asm/io.h b/arch/microblaze/include/asm/io.h
index a2cea7206077..3fbb7f1db3bc 100644
--- a/arch/microblaze/include/asm/io.h
+++ b/arch/microblaze/include/asm/io.h
@@ -89,6 +89,11 @@ static inline unsigned int readl(const volatile void __iomem *addr)
{
return le32_to_cpu(*(volatile unsigned int __force *)addr);
}
+#define readq readq
+static inline u64 readq(const volatile void __iomem *addr)
+{
+ return le64_to_cpu(__raw_readq(addr));
+}
static inline void writeb(unsigned char v, volatile void __iomem *addr)
{
*(volatile unsigned char __force *)addr = v;
@@ -101,6 +106,7 @@ static inline void writel(unsigned int v, volatile void __iomem *addr)
{
*(volatile unsigned int __force *)addr = cpu_to_le32(v);
}
+#define writeq(b, addr) __raw_writeq(cpu_to_le64(b), addr)
/* ioread and iowrite variants. thease are for now same as __raw_
* variants of accessors. we might check for endianess in the feature
diff --git a/arch/microblaze/kernel/head.S b/arch/microblaze/kernel/head.S
index b7fb0438458c..17645b2e2f07 100644
--- a/arch/microblaze/kernel/head.S
+++ b/arch/microblaze/kernel/head.S
@@ -66,7 +66,7 @@ real_start:
mts rmsr, r0
/* Disable stack protection from bootloader */
mts rslr, r0
- addi r8, r0, 0xFFFFFFF
+ addi r8, r0, 0xFFFFFFFF
mts rshr, r8
/*
* According to Xilinx, msrclr instruction behaves like 'mfs rX,rpc'
diff --git a/arch/mips/alchemy/devboards/db1000.c b/arch/mips/alchemy/devboards/db1000.c
index 11f3ad20321c..5483906e0f86 100644
--- a/arch/mips/alchemy/devboards/db1000.c
+++ b/arch/mips/alchemy/devboards/db1000.c
@@ -534,13 +534,10 @@ static int __init db1000_dev_init(void)
s0 = AU1100_GPIO1_INT;
s1 = AU1100_GPIO4_INT;
+ gpio_request(19, "sd0_cd");
+ gpio_request(20, "sd1_cd");
gpio_direction_input(19); /* sd0 cd# */
gpio_direction_input(20); /* sd1 cd# */
- gpio_direction_input(21); /* touch pendown# */
- gpio_direction_input(207); /* SPI MISO */
- gpio_direction_output(208, 0); /* SPI MOSI */
- gpio_direction_output(209, 1); /* SPI SCK */
- gpio_direction_output(210, 1); /* SPI CS# */
/* spi_gpio on SSI0 pins */
pfc = __raw_readl((void __iomem *)SYS_PINFUNC);
diff --git a/arch/mips/include/asm/fpu.h b/arch/mips/include/asm/fpu.h
index cfe092fc720d..6b9749540edf 100644
--- a/arch/mips/include/asm/fpu.h
+++ b/arch/mips/include/asm/fpu.h
@@ -74,6 +74,8 @@ static inline int __enable_fpu(enum fpu_mode mode)
default:
BUG();
}
+
+ return SIGFPE;
}
#define __disable_fpu() \
diff --git a/arch/mips/include/uapi/asm/unistd.h b/arch/mips/include/uapi/asm/unistd.h
index 1dee279f9665..d6e154a9e6a5 100644
--- a/arch/mips/include/uapi/asm/unistd.h
+++ b/arch/mips/include/uapi/asm/unistd.h
@@ -369,16 +369,18 @@
#define __NR_process_vm_writev (__NR_Linux + 346)
#define __NR_kcmp (__NR_Linux + 347)
#define __NR_finit_module (__NR_Linux + 348)
+#define __NR_sched_setattr (__NR_Linux + 349)
+#define __NR_sched_getattr (__NR_Linux + 350)
/*
* Offset of the last Linux o32 flavoured syscall
*/
-#define __NR_Linux_syscalls 348
+#define __NR_Linux_syscalls 350
#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
#define __NR_O32_Linux 4000
-#define __NR_O32_Linux_syscalls 348
+#define __NR_O32_Linux_syscalls 350
#if _MIPS_SIM == _MIPS_SIM_ABI64
@@ -695,16 +697,18 @@
#define __NR_kcmp (__NR_Linux + 306)
#define __NR_finit_module (__NR_Linux + 307)
#define __NR_getdents64 (__NR_Linux + 308)
+#define __NR_sched_setattr (__NR_Linux + 309)
+#define __NR_sched_getattr (__NR_Linux + 310)
/*
* Offset of the last Linux 64-bit flavoured syscall
*/
-#define __NR_Linux_syscalls 308
+#define __NR_Linux_syscalls 310
#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */
#define __NR_64_Linux 5000
-#define __NR_64_Linux_syscalls 308
+#define __NR_64_Linux_syscalls 310
#if _MIPS_SIM == _MIPS_SIM_NABI32
@@ -1025,15 +1029,17 @@
#define __NR_process_vm_writev (__NR_Linux + 310)
#define __NR_kcmp (__NR_Linux + 311)
#define __NR_finit_module (__NR_Linux + 312)
+#define __NR_sched_setattr (__NR_Linux + 313)
+#define __NR_sched_getattr (__NR_Linux + 314)
/*
* Offset of the last N32 flavoured syscall
*/
-#define __NR_Linux_syscalls 312
+#define __NR_Linux_syscalls 314
#endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */
#define __NR_N32_Linux 6000
-#define __NR_N32_Linux_syscalls 312
+#define __NR_N32_Linux_syscalls 314
#endif /* _UAPI_ASM_UNISTD_H */
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
index e8e541b40d86..a5b14f48e1af 100644
--- a/arch/mips/kernel/scall32-o32.S
+++ b/arch/mips/kernel/scall32-o32.S
@@ -563,3 +563,5 @@ EXPORT(sys_call_table)
PTR sys_process_vm_writev
PTR sys_kcmp
PTR sys_finit_module
+ PTR sys_sched_setattr
+ PTR sys_sched_getattr /* 4350 */
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
index 57e3742fec59..b56e254beb15 100644
--- a/arch/mips/kernel/scall64-64.S
+++ b/arch/mips/kernel/scall64-64.S
@@ -425,4 +425,6 @@ EXPORT(sys_call_table)
PTR sys_kcmp
PTR sys_finit_module
PTR sys_getdents64
+ PTR sys_sched_setattr
+ PTR sys_sched_getattr /* 5310 */
.size sys_call_table,.-sys_call_table
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index 2f48f5934399..f7e5b72cf481 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -418,4 +418,6 @@ EXPORT(sysn32_call_table)
PTR compat_sys_process_vm_writev /* 6310 */
PTR sys_kcmp
PTR sys_finit_module
+ PTR sys_sched_setattr
+ PTR sys_sched_getattr
.size sysn32_call_table,.-sysn32_call_table
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index f1acdb429f4f..6788727d91af 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -541,4 +541,6 @@ EXPORT(sys32_call_table)
PTR compat_sys_process_vm_writev
PTR sys_kcmp
PTR sys_finit_module
+ PTR sys_sched_setattr
+ PTR sys_sched_getattr /* 4350 */
.size sys32_call_table,.-sys32_call_table
diff --git a/arch/parisc/hpux/fs.c b/arch/parisc/hpux/fs.c
index 88d0962de65a..2bedafea3d94 100644
--- a/arch/parisc/hpux/fs.c
+++ b/arch/parisc/hpux/fs.c
@@ -33,22 +33,9 @@
int hpux_execve(struct pt_regs *regs)
{
- int error;
- struct filename *filename;
-
- filename = getname((const char __user *) regs->gr[26]);
- error = PTR_ERR(filename);
- if (IS_ERR(filename))
- goto out;
-
- error = do_execve(filename->name,
+ return do_execve(getname((const char __user *) regs->gr[26]),
(const char __user *const __user *) regs->gr[25],
(const char __user *const __user *) regs->gr[24]);
-
- putname(filename);
-
-out:
- return error;
}
struct hpux_dirent {
diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h
index e27e9ad6818e..150866b2a3fe 100644
--- a/arch/powerpc/include/asm/dma-mapping.h
+++ b/arch/powerpc/include/asm/dma-mapping.h
@@ -134,6 +134,7 @@ static inline int dma_supported(struct device *dev, u64 mask)
}
extern int dma_set_mask(struct device *dev, u64 dma_mask);
+extern int __dma_set_mask(struct device *dev, u64 dma_mask);
#define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL)
diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h
index f7a8036579b5..42632c7a2a4e 100644
--- a/arch/powerpc/include/asm/iommu.h
+++ b/arch/powerpc/include/asm/iommu.h
@@ -77,6 +77,7 @@ struct iommu_table {
#ifdef CONFIG_IOMMU_API
struct iommu_group *it_group;
#endif
+ void (*set_bypass)(struct iommu_table *tbl, bool enable);
};
/* Pure 2^n version of get_order */
diff --git a/arch/powerpc/include/asm/sections.h b/arch/powerpc/include/asm/sections.h
index 4ee06fe15de4..d0e784e0ff48 100644
--- a/arch/powerpc/include/asm/sections.h
+++ b/arch/powerpc/include/asm/sections.h
@@ -8,6 +8,7 @@
#ifdef __powerpc64__
+extern char __start_interrupts[];
extern char __end_interrupts[];
extern char __prom_init_toc_start[];
@@ -21,6 +22,17 @@ static inline int in_kernel_text(unsigned long addr)
return 0;
}
+static inline int overlaps_interrupt_vector_text(unsigned long start,
+ unsigned long end)
+{
+ unsigned long real_start, real_end;
+ real_start = __start_interrupts - _stext;
+ real_end = __end_interrupts - _stext;
+
+ return start < (unsigned long)__va(real_end) &&
+ (unsigned long)__va(real_start) < end;
+}
+
static inline int overlaps_kernel_text(unsigned long start, unsigned long end)
{
return start < (unsigned long)__init_end &&
diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c
index 8032b97ccdcb..ee78f6e49d64 100644
--- a/arch/powerpc/kernel/dma.c
+++ b/arch/powerpc/kernel/dma.c
@@ -191,12 +191,10 @@ EXPORT_SYMBOL(dma_direct_ops);
#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
-int dma_set_mask(struct device *dev, u64 dma_mask)
+int __dma_set_mask(struct device *dev, u64 dma_mask)
{
struct dma_map_ops *dma_ops = get_dma_ops(dev);
- if (ppc_md.dma_set_mask)
- return ppc_md.dma_set_mask(dev, dma_mask);
if ((dma_ops != NULL) && (dma_ops->set_dma_mask != NULL))
return dma_ops->set_dma_mask(dev, dma_mask);
if (!dev->dma_mask || !dma_supported(dev, dma_mask))
@@ -204,6 +202,12 @@ int dma_set_mask(struct device *dev, u64 dma_mask)
*dev->dma_mask = dma_mask;
return 0;
}
+int dma_set_mask(struct device *dev, u64 dma_mask)
+{
+ if (ppc_md.dma_set_mask)
+ return ppc_md.dma_set_mask(dev, dma_mask);
+ return __dma_set_mask(dev, dma_mask);
+}
EXPORT_SYMBOL(dma_set_mask);
u64 dma_get_required_mask(struct device *dev)
diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
index 7bb30dca4e19..fdc679d309ec 100644
--- a/arch/powerpc/kernel/eeh_driver.c
+++ b/arch/powerpc/kernel/eeh_driver.c
@@ -362,9 +362,13 @@ static void *eeh_rmv_device(void *data, void *userdata)
*/
if (!dev || (dev->hdr_type & PCI_HEADER_TYPE_BRIDGE))
return NULL;
+
driver = eeh_pcid_get(dev);
- if (driver && driver->err_handler)
- return NULL;
+ if (driver) {
+ eeh_pcid_put(dev);
+ if (driver->err_handler)
+ return NULL;
+ }
/* Remove it from PCI subsystem */
pr_debug("EEH: Removing %s without EEH sensitive driver\n",
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index d773dd440a45..88e3ec6e1d96 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -1088,6 +1088,14 @@ int iommu_take_ownership(struct iommu_table *tbl)
memset(tbl->it_map, 0xff, sz);
iommu_clear_tces_and_put_pages(tbl, tbl->it_offset, tbl->it_size);
+ /*
+ * Disable iommu bypass, otherwise the user can DMA to all of
+ * our physical memory via the bypass window instead of just
+ * the pages that has been explicitly mapped into the iommu
+ */
+ if (tbl->set_bypass)
+ tbl->set_bypass(tbl, false);
+
return 0;
}
EXPORT_SYMBOL_GPL(iommu_take_ownership);
@@ -1102,6 +1110,10 @@ void iommu_release_ownership(struct iommu_table *tbl)
/* Restore bit#0 set by iommu_init_table() */
if (tbl->it_offset == 0)
set_bit(0, tbl->it_map);
+
+ /* The kernel owns the device now, we can restore the iommu bypass */
+ if (tbl->set_bypass)
+ tbl->set_bypass(tbl, true);
}
EXPORT_SYMBOL_GPL(iommu_release_ownership);
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 9729b23bfb0a..1d0848bba049 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -559,8 +559,13 @@ void exc_lvl_ctx_init(void)
#ifdef CONFIG_PPC64
cpu_nr = i;
#else
+#ifdef CONFIG_SMP
cpu_nr = get_hard_smp_processor_id(i);
+#else
+ cpu_nr = 0;
#endif
+#endif
+
memset((void *)critirq_ctx[cpu_nr], 0, THREAD_SIZE);
tp = critirq_ctx[cpu_nr];
tp->cpu = cpu_nr;
diff --git a/arch/powerpc/kernel/machine_kexec.c b/arch/powerpc/kernel/machine_kexec.c
index 75d4f7340da8..015ae55c1868 100644
--- a/arch/powerpc/kernel/machine_kexec.c
+++ b/arch/powerpc/kernel/machine_kexec.c
@@ -196,7 +196,9 @@ int overlaps_crashkernel(unsigned long start, unsigned long size)
/* Values we need to export to the second kernel via the device tree. */
static phys_addr_t kernel_end;
+static phys_addr_t crashk_base;
static phys_addr_t crashk_size;
+static unsigned long long mem_limit;
static struct property kernel_end_prop = {
.name = "linux,kernel-end",
@@ -207,7 +209,7 @@ static struct property kernel_end_prop = {
static struct property crashk_base_prop = {
.name = "linux,crashkernel-base",
.length = sizeof(phys_addr_t),
- .value = &crashk_res.start,
+ .value = &crashk_base
};
static struct property crashk_size_prop = {
@@ -219,9 +221,11 @@ static struct property crashk_size_prop = {
static struct property memory_limit_prop = {
.name = "linux,memory-limit",
.length = sizeof(unsigned long long),
- .value = &memory_limit,
+ .value = &mem_limit,
};
+#define cpu_to_be_ulong __PASTE(cpu_to_be, BITS_PER_LONG)
+
static void __init export_crashk_values(struct device_node *node)
{
struct property *prop;
@@ -237,8 +241,9 @@ static void __init export_crashk_values(struct device_node *node)
of_remove_property(node, prop);
if (crashk_res.start != 0) {
+ crashk_base = cpu_to_be_ulong(crashk_res.start),
of_add_property(node, &crashk_base_prop);
- crashk_size = resource_size(&crashk_res);
+ crashk_size = cpu_to_be_ulong(resource_size(&crashk_res));
of_add_property(node, &crashk_size_prop);
}
@@ -246,6 +251,7 @@ static void __init export_crashk_values(struct device_node *node)
* memory_limit is required by the kexec-tools to limit the
* crash regions to the actual memory used.
*/
+ mem_limit = cpu_to_be_ulong(memory_limit);
of_update_property(node, &memory_limit_prop);
}
@@ -264,7 +270,7 @@ static int __init kexec_setup(void)
of_remove_property(node, prop);
/* information needed by userspace when using default_machine_kexec */
- kernel_end = __pa(_end);
+ kernel_end = cpu_to_be_ulong(__pa(_end));
of_add_property(node, &kernel_end_prop);
export_crashk_values(node);
diff --git a/arch/powerpc/kernel/machine_kexec_64.c b/arch/powerpc/kernel/machine_kexec_64.c
index be4e6d648f60..59d229a2a3e0 100644
--- a/arch/powerpc/kernel/machine_kexec_64.c
+++ b/arch/powerpc/kernel/machine_kexec_64.c
@@ -369,6 +369,7 @@ void default_machine_kexec(struct kimage *image)
/* Values we need to export to the second kernel via the device tree. */
static unsigned long htab_base;
+static unsigned long htab_size;
static struct property htab_base_prop = {
.name = "linux,htab-base",
@@ -379,7 +380,7 @@ static struct property htab_base_prop = {
static struct property htab_size_prop = {
.name = "linux,htab-size",
.length = sizeof(unsigned long),
- .value = &htab_size_bytes,
+ .value = &htab_size,
};
static int __init export_htab_values(void)
@@ -403,8 +404,9 @@ static int __init export_htab_values(void)
if (prop)
of_remove_property(node, prop);
- htab_base = __pa(htab_address);
+ htab_base = cpu_to_be64(__pa(htab_address));
of_add_property(node, &htab_base_prop);
+ htab_size = cpu_to_be64(htab_size_bytes);
of_add_property(node, &htab_size_prop);
of_node_put(node);
diff --git a/arch/powerpc/kernel/reloc_64.S b/arch/powerpc/kernel/reloc_64.S
index b47a0e1ab001..1482327cfeba 100644
--- a/arch/powerpc/kernel/reloc_64.S
+++ b/arch/powerpc/kernel/reloc_64.S
@@ -69,8 +69,8 @@ _GLOBAL(relocate)
* R_PPC64_RELATIVE ones.
*/
mtctr r8
-5: lwz r0,12(9) /* ELF64_R_TYPE(reloc->r_info) */
- cmpwi r0,R_PPC64_RELATIVE
+5: ld r0,8(9) /* ELF64_R_TYPE(reloc->r_info) */
+ cmpdi r0,R_PPC64_RELATIVE
bne 6f
ld r6,0(r9) /* reloc->r_offset */
ld r0,16(r9) /* reloc->r_addend */
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index 2b0da27eaee4..04cc4fcca78b 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -247,7 +247,12 @@ static void __init exc_lvl_early_init(void)
/* interrupt stacks must be in lowmem, we get that for free on ppc32
* as the memblock is limited to lowmem by MEMBLOCK_REAL_LIMIT */
for_each_possible_cpu(i) {
+#ifdef CONFIG_SMP
hw_cpu = get_hard_smp_processor_id(i);
+#else
+ hw_cpu = 0;
+#endif
+
critirq_ctx[hw_cpu] = (struct thread_info *)
__va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
#ifdef CONFIG_BOOKE
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index de6881259aef..d766d6ee33fe 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -207,6 +207,20 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
if (overlaps_kernel_text(vaddr, vaddr + step))
tprot &= ~HPTE_R_N;
+ /*
+ * If relocatable, check if it overlaps interrupt vectors that
+ * are copied down to real 0. For relocatable kernel
+ * (e.g. kdump case) we copy interrupt vectors down to real
+ * address 0. Mark that region as executable. This is
+ * because on p8 system with relocation on exception feature
+ * enabled, exceptions are raised with MMU (IR=DR=1) ON. Hence
+ * in order to execute the interrupt handlers in virtual
+ * mode the vector region need to be marked as executable.
+ */
+ if ((PHYSICAL_START > MEMORY_START) &&
+ overlaps_interrupt_vector_text(vaddr, vaddr + step))
+ tprot &= ~HPTE_R_N;
+
hash = hpt_hash(vpn, shift, ssize);
hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index 29b89e863d7c..67cf22083f4c 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -1147,6 +1147,9 @@ static void power_pmu_enable(struct pmu *pmu)
mmcr0 = ebb_switch_in(ebb, cpuhw->mmcr[0]);
mb();
+ if (cpuhw->bhrb_users)
+ ppmu->config_bhrb(cpuhw->bhrb_filter);
+
write_mmcr0(cpuhw, mmcr0);
/*
@@ -1158,8 +1161,6 @@ static void power_pmu_enable(struct pmu *pmu)
}
out:
- if (cpuhw->bhrb_users)
- ppmu->config_bhrb(cpuhw->bhrb_filter);
local_irq_restore(flags);
}
diff --git a/arch/powerpc/perf/power8-pmu.c b/arch/powerpc/perf/power8-pmu.c
index a3f7abd2f13f..96cee20dcd34 100644
--- a/arch/powerpc/perf/power8-pmu.c
+++ b/arch/powerpc/perf/power8-pmu.c
@@ -25,6 +25,37 @@
#define PM_BRU_FIN 0x10068
#define PM_BR_MPRED_CMPL 0x400f6
+/* All L1 D cache load references counted at finish, gated by reject */
+#define PM_LD_REF_L1 0x100ee
+/* Load Missed L1 */
+#define PM_LD_MISS_L1 0x3e054
+/* Store Missed L1 */
+#define PM_ST_MISS_L1 0x300f0
+/* L1 cache data prefetches */
+#define PM_L1_PREF 0x0d8b8
+/* Instruction fetches from L1 */
+#define PM_INST_FROM_L1 0x04080
+/* Demand iCache Miss */
+#define PM_L1_ICACHE_MISS 0x200fd
+/* Instruction Demand sectors wriittent into IL1 */
+#define PM_L1_DEMAND_WRITE 0x0408c
+/* Instruction prefetch written into IL1 */
+#define PM_IC_PREF_WRITE 0x0408e
+/* The data cache was reloaded from local core's L3 due to a demand load */
+#define PM_DATA_FROM_L3 0x4c042
+/* Demand LD - L3 Miss (not L2 hit and not L3 hit) */
+#define PM_DATA_FROM_L3MISS 0x300fe
+/* All successful D-side store dispatches for this thread */
+#define PM_L2_ST 0x17080
+/* All successful D-side store dispatches for this thread that were L2 Miss */
+#define PM_L2_ST_MISS 0x17082
+/* Total HW L3 prefetches(Load+store) */
+#define PM_L3_PREF_ALL 0x4e052
+/* Data PTEG reload */
+#define PM_DTLB_MISS 0x300fc
+/* ITLB Reloaded */
+#define PM_ITLB_MISS 0x400fc
+
/*
* Raw event encoding for POWER8:
@@ -557,6 +588,8 @@ static int power8_generic_events[] = {
[PERF_COUNT_HW_INSTRUCTIONS] = PM_INST_CMPL,
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = PM_BRU_FIN,
[PERF_COUNT_HW_BRANCH_MISSES] = PM_BR_MPRED_CMPL,
+ [PERF_COUNT_HW_CACHE_REFERENCES] = PM_LD_REF_L1,
+ [PERF_COUNT_HW_CACHE_MISSES] = PM_LD_MISS_L1,
};
static u64 power8_bhrb_filter_map(u64 branch_sample_type)
@@ -596,6 +629,116 @@ static void power8_config_bhrb(u64 pmu_bhrb_filter)
mtspr(SPRN_MMCRA, (mfspr(SPRN_MMCRA) | pmu_bhrb_filter));
}
+#define C(x) PERF_COUNT_HW_CACHE_##x
+
+/*
+ * Table of generalized cache-related events.
+ * 0 means not supported, -1 means nonsensical, other values
+ * are event codes.
+ */
+static int power8_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
+ [ C(L1D) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = PM_LD_REF_L1,
+ [ C(RESULT_MISS) ] = PM_LD_MISS_L1,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = 0,
+ [ C(RESULT_MISS) ] = PM_ST_MISS_L1,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = PM_L1_PREF,
+ [ C(RESULT_MISS) ] = 0,
+ },
+ },
+ [ C(L1I) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = PM_INST_FROM_L1,
+ [ C(RESULT_MISS) ] = PM_L1_ICACHE_MISS,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = PM_L1_DEMAND_WRITE,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = PM_IC_PREF_WRITE,
+ [ C(RESULT_MISS) ] = 0,
+ },
+ },
+ [ C(LL) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = PM_DATA_FROM_L3,
+ [ C(RESULT_MISS) ] = PM_DATA_FROM_L3MISS,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = PM_L2_ST,
+ [ C(RESULT_MISS) ] = PM_L2_ST_MISS,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = PM_L3_PREF_ALL,
+ [ C(RESULT_MISS) ] = 0,
+ },
+ },
+ [ C(DTLB) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0,
+ [ C(RESULT_MISS) ] = PM_DTLB_MISS,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ },
+ [ C(ITLB) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0,
+ [ C(RESULT_MISS) ] = PM_ITLB_MISS,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ },
+ [ C(BPU) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = PM_BRU_FIN,
+ [ C(RESULT_MISS) ] = PM_BR_MPRED_CMPL,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ },
+ [ C(NODE) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ },
+};
+
+#undef C
+
static struct power_pmu power8_pmu = {
.name = "POWER8",
.n_counter = 6,
@@ -611,6 +754,7 @@ static struct power_pmu power8_pmu = {
.flags = PPMU_HAS_SSLOT | PPMU_HAS_SIER | PPMU_BHRB | PPMU_EBB,
.n_generic = ARRAY_SIZE(power8_generic_events),
.generic_events = power8_generic_events,
+ .cache_events = &power8_cache_events,
.attr_groups = power8_pmu_attr_groups,
.bhrb_nr = 32,
};
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index 7d6dcc6d5fa9..3b2b4fb3585b 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -21,6 +21,7 @@
#include <linux/irq.h>
#include <linux/io.h>
#include <linux/msi.h>
+#include <linux/memblock.h>
#include <asm/sections.h>
#include <asm/io.h>
@@ -460,9 +461,39 @@ static void pnv_pci_ioda_dma_dev_setup(struct pnv_phb *phb, struct pci_dev *pdev
return;
pe = &phb->ioda.pe_array[pdn->pe_number];
+ WARN_ON(get_dma_ops(&pdev->dev) != &dma_iommu_ops);
set_iommu_table_base_and_group(&pdev->dev, &pe->tce32_table);
}
+static int pnv_pci_ioda_dma_set_mask(struct pnv_phb *phb,
+ struct pci_dev *pdev, u64 dma_mask)
+{
+ struct pci_dn *pdn = pci_get_pdn(pdev);
+ struct pnv_ioda_pe *pe;
+ uint64_t top;
+ bool bypass = false;
+
+ if (WARN_ON(!pdn || pdn->pe_number == IODA_INVALID_PE))
+ return -ENODEV;;
+
+ pe = &phb->ioda.pe_array[pdn->pe_number];
+ if (pe->tce_bypass_enabled) {
+ top = pe->tce_bypass_base + memblock_end_of_DRAM() - 1;
+ bypass = (dma_mask >= top);
+ }
+
+ if (bypass) {
+ dev_info(&pdev->dev, "Using 64-bit DMA iommu bypass\n");
+ set_dma_ops(&pdev->dev, &dma_direct_ops);
+ set_dma_offset(&pdev->dev, pe->tce_bypass_base);
+ } else {
+ dev_info(&pdev->dev, "Using 32-bit DMA via iommu\n");
+ set_dma_ops(&pdev->dev, &dma_iommu_ops);
+ set_iommu_table_base(&pdev->dev, &pe->tce32_table);
+ }
+ return 0;
+}
+
static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe, struct pci_bus *bus)
{
struct pci_dev *dev;
@@ -657,6 +688,56 @@ static void pnv_pci_ioda_setup_dma_pe(struct pnv_phb *phb,
__free_pages(tce_mem, get_order(TCE32_TABLE_SIZE * segs));
}
+static void pnv_pci_ioda2_set_bypass(struct iommu_table *tbl, bool enable)
+{
+ struct pnv_ioda_pe *pe = container_of(tbl, struct pnv_ioda_pe,
+ tce32_table);
+ uint16_t window_id = (pe->pe_number << 1 ) + 1;
+ int64_t rc;
+
+ pe_info(pe, "%sabling 64-bit DMA bypass\n", enable ? "En" : "Dis");
+ if (enable) {
+ phys_addr_t top = memblock_end_of_DRAM();
+
+ top = roundup_pow_of_two(top);
+ rc = opal_pci_map_pe_dma_window_real(pe->phb->opal_id,
+ pe->pe_number,
+ window_id,
+ pe->tce_bypass_base,
+ top);
+ } else {
+ rc = opal_pci_map_pe_dma_window_real(pe->phb->opal_id,
+ pe->pe_number,
+ window_id,
+ pe->tce_bypass_base,
+ 0);
+
+ /*
+ * We might want to reset the DMA ops of all devices on
+ * this PE. However in theory, that shouldn't be necessary
+ * as this is used for VFIO/KVM pass-through and the device
+ * hasn't yet been returned to its kernel driver
+ */
+ }
+ if (rc)
+ pe_err(pe, "OPAL error %lld configuring bypass window\n", rc);
+ else
+ pe->tce_bypass_enabled = enable;
+}
+
+static void pnv_pci_ioda2_setup_bypass_pe(struct pnv_phb *phb,
+ struct pnv_ioda_pe *pe)
+{
+ /* TVE #1 is selected by PCI address bit 59 */
+ pe->tce_bypass_base = 1ull << 59;
+
+ /* Install set_bypass callback for VFIO */
+ pe->tce32_table.set_bypass = pnv_pci_ioda2_set_bypass;
+
+ /* Enable bypass by default */
+ pnv_pci_ioda2_set_bypass(&pe->tce32_table, true);
+}
+
static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,
struct pnv_ioda_pe *pe)
{
@@ -727,6 +808,8 @@ static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,
else
pnv_ioda_setup_bus_dma(pe, pe->pbus);
+ /* Also create a bypass window */
+ pnv_pci_ioda2_setup_bypass_pe(phb, pe);
return;
fail:
if (pe->tce32_seg >= 0)
@@ -1286,6 +1369,7 @@ void __init pnv_pci_init_ioda_phb(struct device_node *np,
/* Setup TCEs */
phb->dma_dev_setup = pnv_pci_ioda_dma_dev_setup;
+ phb->dma_set_mask = pnv_pci_ioda_dma_set_mask;
/* Setup shutdown function for kexec */
phb->shutdown = pnv_pci_ioda_shutdown;
diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c
index b555ebc57ef5..95633d79ef5d 100644
--- a/arch/powerpc/platforms/powernv/pci.c
+++ b/arch/powerpc/platforms/powernv/pci.c
@@ -634,6 +634,16 @@ static void pnv_pci_dma_dev_setup(struct pci_dev *pdev)
pnv_pci_dma_fallback_setup(hose, pdev);
}
+int pnv_pci_dma_set_mask(struct pci_dev *pdev, u64 dma_mask)
+{
+ struct pci_controller *hose = pci_bus_to_host(pdev->bus);
+ struct pnv_phb *phb = hose->private_data;
+
+ if (phb && phb->dma_set_mask)
+ return phb->dma_set_mask(phb, pdev, dma_mask);
+ return __dma_set_mask(&pdev->dev, dma_mask);
+}
+
void pnv_pci_shutdown(void)
{
struct pci_controller *hose;
diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h
index 13f1942a9a5f..cde169442775 100644
--- a/arch/powerpc/platforms/powernv/pci.h
+++ b/arch/powerpc/platforms/powernv/pci.h
@@ -54,7 +54,9 @@ struct pnv_ioda_pe {
struct iommu_table tce32_table;
phys_addr_t tce_inval_reg_phys;
- /* XXX TODO: Add support for additional 64-bit iommus */
+ /* 64-bit TCE bypass region */
+ bool tce_bypass_enabled;
+ uint64_t tce_bypass_base;
/* MSIs. MVE index is identical for for 32 and 64 bit MSI
* and -1 if not supported. (It's actually identical to the
@@ -113,6 +115,8 @@ struct pnv_phb {
unsigned int hwirq, unsigned int virq,
unsigned int is_64, struct msi_msg *msg);
void (*dma_dev_setup)(struct pnv_phb *phb, struct pci_dev *pdev);
+ int (*dma_set_mask)(struct pnv_phb *phb, struct pci_dev *pdev,
+ u64 dma_mask);
void (*fixup_phb)(struct pci_controller *hose);
u32 (*bdfn_to_pe)(struct pnv_phb *phb, struct pci_bus *bus, u32 devfn);
void (*shutdown)(struct pnv_phb *phb);
diff --git a/arch/powerpc/platforms/powernv/powernv.h b/arch/powerpc/platforms/powernv/powernv.h
index de6819be1f95..0051e108ef0f 100644
--- a/arch/powerpc/platforms/powernv/powernv.h
+++ b/arch/powerpc/platforms/powernv/powernv.h
@@ -7,12 +7,20 @@ extern void pnv_smp_init(void);
static inline void pnv_smp_init(void) { }
#endif
+struct pci_dev;
+
#ifdef CONFIG_PCI
extern void pnv_pci_init(void);
extern void pnv_pci_shutdown(void);
+extern int pnv_pci_dma_set_mask(struct pci_dev *pdev, u64 dma_mask);
#else
static inline void pnv_pci_init(void) { }
static inline void pnv_pci_shutdown(void) { }
+
+static inline int pnv_pci_dma_set_mask(struct pci_dev *pdev, u64 dma_mask)
+{
+ return -ENODEV;
+}
#endif
extern void pnv_lpc_init(void);
diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c
index 21166f65c97c..110f4fbd319f 100644
--- a/arch/powerpc/platforms/powernv/setup.c
+++ b/arch/powerpc/platforms/powernv/setup.c
@@ -27,6 +27,7 @@
#include <linux/interrupt.h>
#include <linux/bug.h>
#include <linux/cpuidle.h>
+#include <linux/pci.h>
#include <asm/machdep.h>
#include <asm/firmware.h>
@@ -141,6 +142,13 @@ static void pnv_progress(char *s, unsigned short hex)
{
}
+static int pnv_dma_set_mask(struct device *dev, u64 dma_mask)
+{
+ if (dev_is_pci(dev))
+ return pnv_pci_dma_set_mask(to_pci_dev(dev), dma_mask);
+ return __dma_set_mask(dev, dma_mask);
+}
+
static void pnv_shutdown(void)
{
/* Let the PCI code clear up IODA tables */
@@ -238,6 +246,7 @@ define_machine(powernv) {
.machine_shutdown = pnv_shutdown,
.power_save = powernv_idle,
.calibrate_decr = generic_calibrate_decr,
+ .dma_set_mask = pnv_dma_set_mask,
#ifdef CONFIG_KEXEC
.kexec_cpu_down = pnv_kexec_cpu_down,
#endif
diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig
index 37300f6ee244..80b1d57c306a 100644
--- a/arch/powerpc/platforms/pseries/Kconfig
+++ b/arch/powerpc/platforms/pseries/Kconfig
@@ -20,6 +20,7 @@ config PPC_PSERIES
select PPC_DOORBELL
select HAVE_CONTEXT_TRACKING
select HOTPLUG_CPU if SMP
+ select ARCH_RANDOM
default y
config PPC_SPLPAR
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index 8e639d7cbda7..972df0ffd4dc 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -430,8 +430,7 @@ static void pSeries_machine_kexec(struct kimage *image)
{
long rc;
- if (firmware_has_feature(FW_FEATURE_SET_MODE) &&
- (image->type != KEXEC_TYPE_CRASH)) {
+ if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
rc = pSeries_disable_reloc_on_exc();
if (rc != H_SUCCESS)
pr_warning("Warning: Failed to disable relocation on "
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
index 0e166ed4cd16..8209744b2829 100644
--- a/arch/powerpc/sysdev/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -886,25 +886,25 @@ int mpic_set_irq_type(struct irq_data *d, unsigned int flow_type)
/* Default: read HW settings */
if (flow_type == IRQ_TYPE_DEFAULT) {
- switch(vold & (MPIC_INFO(VECPRI_POLARITY_MASK) |
- MPIC_INFO(VECPRI_SENSE_MASK))) {
- case MPIC_INFO(VECPRI_SENSE_EDGE) |
- MPIC_INFO(VECPRI_POLARITY_POSITIVE):
- flow_type = IRQ_TYPE_EDGE_RISING;
- break;
- case MPIC_INFO(VECPRI_SENSE_EDGE) |
- MPIC_INFO(VECPRI_POLARITY_NEGATIVE):
- flow_type = IRQ_TYPE_EDGE_FALLING;
- break;
- case MPIC_INFO(VECPRI_SENSE_LEVEL) |
- MPIC_INFO(VECPRI_POLARITY_POSITIVE):
- flow_type = IRQ_TYPE_LEVEL_HIGH;
- break;
- case MPIC_INFO(VECPRI_SENSE_LEVEL) |
- MPIC_INFO(VECPRI_POLARITY_NEGATIVE):
- flow_type = IRQ_TYPE_LEVEL_LOW;
- break;
- }
+ int vold_ps;
+
+ vold_ps = vold & (MPIC_INFO(VECPRI_POLARITY_MASK) |
+ MPIC_INFO(VECPRI_SENSE_MASK));
+
+ if (vold_ps == (MPIC_INFO(VECPRI_SENSE_EDGE) |
+ MPIC_INFO(VECPRI_POLARITY_POSITIVE)))
+ flow_type = IRQ_TYPE_EDGE_RISING;
+ else if (vold_ps == (MPIC_INFO(VECPRI_SENSE_EDGE) |
+ MPIC_INFO(VECPRI_POLARITY_NEGATIVE)))
+ flow_type = IRQ_TYPE_EDGE_FALLING;
+ else if (vold_ps == (MPIC_INFO(VECPRI_SENSE_LEVEL) |
+ MPIC_INFO(VECPRI_POLARITY_POSITIVE)))
+ flow_type = IRQ_TYPE_LEVEL_HIGH;
+ else if (vold_ps == (MPIC_INFO(VECPRI_SENSE_LEVEL) |
+ MPIC_INFO(VECPRI_POLARITY_NEGATIVE)))
+ flow_type = IRQ_TYPE_LEVEL_LOW;
+ else
+ WARN_ONCE(1, "mpic: unknown IRQ type %d\n", vold);
}
/* Apply to irq desc */
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index a90731b3d44a..b07909850f77 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -309,16 +309,23 @@ static void get_output_lock(void)
if (xmon_speaker == me)
return;
+
for (;;) {
- if (xmon_speaker == 0) {
- last_speaker = cmpxchg(&xmon_speaker, 0, me);
- if (last_speaker == 0)
- return;
- }
- timeout = 10000000;
+ last_speaker = cmpxchg(&xmon_speaker, 0, me);
+ if (last_speaker == 0)
+ return;
+
+ /*
+ * Wait a full second for the lock, we might be on a slow
+ * console, but check every 100us.
+ */
+ timeout = 10000;
while (xmon_speaker == last_speaker) {
- if (--timeout > 0)
+ if (--timeout > 0) {
+ udelay(100);
continue;
+ }
+
/* hostile takeover */
prev = cmpxchg(&xmon_speaker, last_speaker, me);
if (prev == last_speaker)
@@ -397,7 +404,6 @@ static int xmon_core(struct pt_regs *regs, int fromipi)
}
xmon_fault_jmp[cpu] = recurse_jmp;
- cpumask_set_cpu(cpu, &cpus_in_xmon);
bp = NULL;
if ((regs->msr & (MSR_IR|MSR_PR|MSR_64BIT)) == (MSR_IR|MSR_64BIT))
@@ -419,6 +425,8 @@ static int xmon_core(struct pt_regs *regs, int fromipi)
release_output_lock();
}
+ cpumask_set_cpu(cpu, &cpus_in_xmon);
+
waiting:
secondary = 1;
while (secondary && !xmon_gate) {
diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c
index 4c4a1cef5208..47c8630c93cd 100644
--- a/arch/s390/appldata/appldata_base.c
+++ b/arch/s390/appldata/appldata_base.c
@@ -529,6 +529,7 @@ static int __init appldata_init(void)
{
int rc;
+ init_virt_timer(&appldata_timer);
appldata_timer.function = appldata_timer_function;
appldata_timer.data = (unsigned long) &appldata_work;
diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
index b3feabd39f31..cf3c0089bef2 100644
--- a/arch/s390/crypto/aes_s390.c
+++ b/arch/s390/crypto/aes_s390.c
@@ -25,6 +25,7 @@
#include <linux/err.h>
#include <linux/module.h>
#include <linux/init.h>
+#include <linux/spinlock.h>
#include "crypt_s390.h"
#define AES_KEYLEN_128 1
@@ -32,6 +33,7 @@
#define AES_KEYLEN_256 4
static u8 *ctrblk;
+static DEFINE_SPINLOCK(ctrblk_lock);
static char keylen_flag;
struct s390_aes_ctx {
@@ -758,43 +760,67 @@ static int ctr_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
return aes_set_key(tfm, in_key, key_len);
}
+static unsigned int __ctrblk_init(u8 *ctrptr, unsigned int nbytes)
+{
+ unsigned int i, n;
+
+ /* only use complete blocks, max. PAGE_SIZE */
+ n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(AES_BLOCK_SIZE - 1);
+ for (i = AES_BLOCK_SIZE; i < n; i += AES_BLOCK_SIZE) {
+ memcpy(ctrptr + i, ctrptr + i - AES_BLOCK_SIZE,
+ AES_BLOCK_SIZE);
+ crypto_inc(ctrptr + i, AES_BLOCK_SIZE);
+ }
+ return n;
+}
+
static int ctr_aes_crypt(struct blkcipher_desc *desc, long func,
struct s390_aes_ctx *sctx, struct blkcipher_walk *walk)
{
int ret = blkcipher_walk_virt_block(desc, walk, AES_BLOCK_SIZE);
- unsigned int i, n, nbytes;
- u8 buf[AES_BLOCK_SIZE];
- u8 *out, *in;
+ unsigned int n, nbytes;
+ u8 buf[AES_BLOCK_SIZE], ctrbuf[AES_BLOCK_SIZE];
+ u8 *out, *in, *ctrptr = ctrbuf;
if (!walk->nbytes)
return ret;
- memcpy(ctrblk, walk->iv, AES_BLOCK_SIZE);
+ if (spin_trylock(&ctrblk_lock))
+ ctrptr = ctrblk;
+
+ memcpy(ctrptr, walk->iv, AES_BLOCK_SIZE);
while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
out = walk->dst.virt.addr;
in = walk->src.virt.addr;
while (nbytes >= AES_BLOCK_SIZE) {
- /* only use complete blocks, max. PAGE_SIZE */
- n = (nbytes > PAGE_SIZE) ? PAGE_SIZE :
- nbytes & ~(AES_BLOCK_SIZE - 1);
- for (i = AES_BLOCK_SIZE; i < n; i += AES_BLOCK_SIZE) {
- memcpy(ctrblk + i, ctrblk + i - AES_BLOCK_SIZE,
- AES_BLOCK_SIZE);
- crypto_inc(ctrblk + i, AES_BLOCK_SIZE);
- }
- ret = crypt_s390_kmctr(func, sctx->key, out, in, n, ctrblk);
- if (ret < 0 || ret != n)
+ if (ctrptr == ctrblk)
+ n = __ctrblk_init(ctrptr, nbytes);
+ else
+ n = AES_BLOCK_SIZE;
+ ret = crypt_s390_kmctr(func, sctx->key, out, in,
+ n, ctrptr);
+ if (ret < 0 || ret != n) {
+ if (ctrptr == ctrblk)
+ spin_unlock(&ctrblk_lock);
return -EIO;
+ }
if (n > AES_BLOCK_SIZE)
- memcpy(ctrblk, ctrblk + n - AES_BLOCK_SIZE,
+ memcpy(ctrptr, ctrptr + n - AES_BLOCK_SIZE,
AES_BLOCK_SIZE);
- crypto_inc(ctrblk, AES_BLOCK_SIZE);
+ crypto_inc(ctrptr, AES_BLOCK_SIZE);
out += n;
in += n;
nbytes -= n;
}
ret = blkcipher_walk_done(desc, walk, nbytes);
}
+ if (ctrptr == ctrblk) {
+ if (nbytes)
+ memcpy(ctrbuf, ctrptr, AES_BLOCK_SIZE);
+ else
+ memcpy(walk->iv, ctrptr, AES_BLOCK_SIZE);
+ spin_unlock(&ctrblk_lock);
+ }
/*
* final block may be < AES_BLOCK_SIZE, copy only nbytes
*/
@@ -802,14 +828,15 @@ static int ctr_aes_crypt(struct blkcipher_desc *desc, long func,
out = walk->dst.virt.addr;
in = walk->src.virt.addr;
ret = crypt_s390_kmctr(func, sctx->key, buf, in,
- AES_BLOCK_SIZE, ctrblk);
+ AES_BLOCK_SIZE, ctrbuf);
if (ret < 0 || ret != AES_BLOCK_SIZE)
return -EIO;
memcpy(out, buf, nbytes);
- crypto_inc(ctrblk, AES_BLOCK_SIZE);
+ crypto_inc(ctrbuf, AES_BLOCK_SIZE);
ret = blkcipher_walk_done(desc, walk, 0);
+ memcpy(walk->iv, ctrbuf, AES_BLOCK_SIZE);
}
- memcpy(walk->iv, ctrblk, AES_BLOCK_SIZE);
+
return ret;
}
diff --git a/arch/s390/crypto/des_s390.c b/arch/s390/crypto/des_s390.c
index 200f2a1b599d..0a5aac8a9412 100644
--- a/arch/s390/crypto/des_s390.c
+++ b/arch/s390/crypto/des_s390.c
@@ -25,6 +25,7 @@
#define DES3_KEY_SIZE (3 * DES_KEY_SIZE)
static u8 *ctrblk;
+static DEFINE_SPINLOCK(ctrblk_lock);
struct s390_des_ctx {
u8 iv[DES_BLOCK_SIZE];
@@ -105,29 +106,35 @@ static int ecb_desall_crypt(struct blkcipher_desc *desc, long func,
}
static int cbc_desall_crypt(struct blkcipher_desc *desc, long func,
- u8 *iv, struct blkcipher_walk *walk)
+ struct blkcipher_walk *walk)
{
+ struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
int ret = blkcipher_walk_virt(desc, walk);
unsigned int nbytes = walk->nbytes;
+ struct {
+ u8 iv[DES_BLOCK_SIZE];
+ u8 key[DES3_KEY_SIZE];
+ } param;
if (!nbytes)
goto out;
- memcpy(iv, walk->iv, DES_BLOCK_SIZE);
+ memcpy(param.iv, walk->iv, DES_BLOCK_SIZE);
+ memcpy(param.key, ctx->key, DES3_KEY_SIZE);
do {
/* only use complete blocks */
unsigned int n = nbytes & ~(DES_BLOCK_SIZE - 1);
u8 *out = walk->dst.virt.addr;
u8 *in = walk->src.virt.addr;
- ret = crypt_s390_kmc(func, iv, out, in, n);
+ ret = crypt_s390_kmc(func, &param, out, in, n);
if (ret < 0 || ret != n)
return -EIO;
nbytes &= DES_BLOCK_SIZE - 1;
ret = blkcipher_walk_done(desc, walk, nbytes);
} while ((nbytes = walk->nbytes));
- memcpy(walk->iv, iv, DES_BLOCK_SIZE);
+ memcpy(walk->iv, param.iv, DES_BLOCK_SIZE);
out:
return ret;
@@ -179,22 +186,20 @@ static int cbc_des_encrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
- struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
- return cbc_desall_crypt(desc, KMC_DEA_ENCRYPT, ctx->iv, &walk);
+ return cbc_desall_crypt(desc, KMC_DEA_ENCRYPT, &walk);
}
static int cbc_des_decrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
- struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
- return cbc_desall_crypt(desc, KMC_DEA_DECRYPT, ctx->iv, &walk);
+ return cbc_desall_crypt(desc, KMC_DEA_DECRYPT, &walk);
}
static struct crypto_alg cbc_des_alg = {
@@ -327,22 +332,20 @@ static int cbc_des3_encrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
- struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
- return cbc_desall_crypt(desc, KMC_TDEA_192_ENCRYPT, ctx->iv, &walk);
+ return cbc_desall_crypt(desc, KMC_TDEA_192_ENCRYPT, &walk);
}
static int cbc_des3_decrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
- struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
- return cbc_desall_crypt(desc, KMC_TDEA_192_DECRYPT, ctx->iv, &walk);
+ return cbc_desall_crypt(desc, KMC_TDEA_192_DECRYPT, &walk);
}
static struct crypto_alg cbc_des3_alg = {
@@ -366,54 +369,80 @@ static struct crypto_alg cbc_des3_alg = {
}
};
+static unsigned int __ctrblk_init(u8 *ctrptr, unsigned int nbytes)
+{
+ unsigned int i, n;
+
+ /* align to block size, max. PAGE_SIZE */
+ n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(DES_BLOCK_SIZE - 1);
+ for (i = DES_BLOCK_SIZE; i < n; i += DES_BLOCK_SIZE) {
+ memcpy(ctrptr + i, ctrptr + i - DES_BLOCK_SIZE, DES_BLOCK_SIZE);
+ crypto_inc(ctrptr + i, DES_BLOCK_SIZE);
+ }
+ return n;
+}
+
static int ctr_desall_crypt(struct blkcipher_desc *desc, long func,
- struct s390_des_ctx *ctx, struct blkcipher_walk *walk)
+ struct s390_des_ctx *ctx,
+ struct blkcipher_walk *walk)
{
int ret = blkcipher_walk_virt_block(desc, walk, DES_BLOCK_SIZE);
- unsigned int i, n, nbytes;
- u8 buf[DES_BLOCK_SIZE];
- u8 *out, *in;
+ unsigned int n, nbytes;
+ u8 buf[DES_BLOCK_SIZE], ctrbuf[DES_BLOCK_SIZE];
+ u8 *out, *in, *ctrptr = ctrbuf;
+
+ if (!walk->nbytes)
+ return ret;
- memcpy(ctrblk, walk->iv, DES_BLOCK_SIZE);
+ if (spin_trylock(&ctrblk_lock))
+ ctrptr = ctrblk;
+
+ memcpy(ctrptr, walk->iv, DES_BLOCK_SIZE);
while ((nbytes = walk->nbytes) >= DES_BLOCK_SIZE) {
out = walk->dst.virt.addr;
in = walk->src.virt.addr;
while (nbytes >= DES_BLOCK_SIZE) {
- /* align to block size, max. PAGE_SIZE */
- n = (nbytes > PAGE_SIZE) ? PAGE_SIZE :
- nbytes & ~(DES_BLOCK_SIZE - 1);
- for (i = DES_BLOCK_SIZE; i < n; i += DES_BLOCK_SIZE) {
- memcpy(ctrblk + i, ctrblk + i - DES_BLOCK_SIZE,
- DES_BLOCK_SIZE);
- crypto_inc(ctrblk + i, DES_BLOCK_SIZE);
- }
- ret = crypt_s390_kmctr(func, ctx->key, out, in, n, ctrblk);
- if (ret < 0 || ret != n)
+ if (ctrptr == ctrblk)
+ n = __ctrblk_init(ctrptr, nbytes);
+ else
+ n = DES_BLOCK_SIZE;
+ ret = crypt_s390_kmctr(func, ctx->key, out, in,
+ n, ctrptr);
+ if (ret < 0 || ret != n) {
+ if (ctrptr == ctrblk)
+ spin_unlock(&ctrblk_lock);
return -EIO;
+ }
if (n > DES_BLOCK_SIZE)
- memcpy(ctrblk, ctrblk + n - DES_BLOCK_SIZE,
+ memcpy(ctrptr, ctrptr + n - DES_BLOCK_SIZE,
DES_BLOCK_SIZE);
- crypto_inc(ctrblk, DES_BLOCK_SIZE);
+ crypto_inc(ctrptr, DES_BLOCK_SIZE);
out += n;
in += n;
nbytes -= n;
}
ret = blkcipher_walk_done(desc, walk, nbytes);
}
-
+ if (ctrptr == ctrblk) {
+ if (nbytes)
+ memcpy(ctrbuf, ctrptr, DES_BLOCK_SIZE);
+ else
+ memcpy(walk->iv, ctrptr, DES_BLOCK_SIZE);
+ spin_unlock(&ctrblk_lock);
+ }
/* final block may be < DES_BLOCK_SIZE, copy only nbytes */
if (nbytes) {
out = walk->dst.virt.addr;
in = walk->src.virt.addr;
ret = crypt_s390_kmctr(func, ctx->key, buf, in,
- DES_BLOCK_SIZE, ctrblk);
+ DES_BLOCK_SIZE, ctrbuf);
if (ret < 0 || ret != DES_BLOCK_SIZE)
return -EIO;
memcpy(out, buf, nbytes);
- crypto_inc(ctrblk, DES_BLOCK_SIZE);
+ crypto_inc(ctrbuf, DES_BLOCK_SIZE);
ret = blkcipher_walk_done(desc, walk, 0);
+ memcpy(walk->iv, ctrbuf, DES_BLOCK_SIZE);
}
- memcpy(walk->iv, ctrblk, DES_BLOCK_SIZE);
return ret;
}
diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S
index b9e25ae2579c..d7c00507568a 100644
--- a/arch/s390/kernel/head64.S
+++ b/arch/s390/kernel/head64.S
@@ -59,7 +59,7 @@ ENTRY(startup_continue)
.quad 0 # cr12: tracing off
.quad 0 # cr13: home space segment table
.quad 0xc0000000 # cr14: machine check handling off
- .quad 0 # cr15: linkage stack operations
+ .quad .Llinkage_stack # cr15: linkage stack operations
.Lpcmsk:.quad 0x0000000180000000
.L4malign:.quad 0xffffffffffc00000
.Lscan2g:.quad 0x80000000 + 0x20000 - 8 # 2GB + 128K - 8
@@ -67,12 +67,15 @@ ENTRY(startup_continue)
.Lparmaddr:
.quad PARMAREA
.align 64
-.Lduct: .long 0,0,0,0,.Lduald,0,0,0
+.Lduct: .long 0,.Laste,.Laste,0,.Lduald,0,0,0
.long 0,0,0,0,0,0,0,0
+.Laste: .quad 0,0xffffffffffffffff,0,0,0,0,0,0
.align 128
.Lduald:.rept 8
.long 0x80000000,0,0,0 # invalid access-list entries
.endr
+.Llinkage_stack:
+ .long 0,0,0x89000000,0,0,0,0x8a000000,0
ENTRY(_ehead)
diff --git a/arch/s390/mm/page-states.c b/arch/s390/mm/page-states.c
index a90d45e9dfb0..27c50f4d90cb 100644
--- a/arch/s390/mm/page-states.c
+++ b/arch/s390/mm/page-states.c
@@ -12,6 +12,8 @@
#include <linux/mm.h>
#include <linux/gfp.h>
#include <linux/init.h>
+#include <asm/setup.h>
+#include <asm/ipl.h>
#define ESSA_SET_STABLE 1
#define ESSA_SET_UNUSED 2
@@ -41,6 +43,14 @@ void __init cmma_init(void)
if (!cmma_flag)
return;
+ /*
+ * Disable CMM for dump, otherwise the tprot based memory
+ * detection can fail because of unstable pages.
+ */
+ if (OLDMEM_BASE || ipl_info.type == IPL_TYPE_FCP_DUMP) {
+ cmma_flag = 0;
+ return;
+ }
asm volatile(
" .insn rrf,0xb9ab0000,%1,%1,0,0\n"
"0: la %0,0\n"
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 940e50ebfafa..0af5250d914f 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -444,6 +444,7 @@ config X86_INTEL_MID
bool "Intel MID platform support"
depends on X86_32
depends on X86_EXTENDED_PLATFORM
+ depends on X86_PLATFORM_DEVICES
depends on PCI
depends on PCI_GOANY
depends on X86_IO_APIC
@@ -1051,9 +1052,9 @@ config MICROCODE_INTEL
This options enables microcode patch loading support for Intel
processors.
- For latest news and information on obtaining all the required
- Intel ingredients for this driver, check:
- <http://www.urbanmyth.org/microcode/>.
+ For the current Intel microcode data package go to
+ <https://downloadcenter.intel.com> and search for
+ 'Linux Processor Microcode Data File'.
config MICROCODE_AMD
bool "AMD microcode loading support"
diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
index 0f3621ed1db6..321a52ccf63a 100644
--- a/arch/x86/Kconfig.debug
+++ b/arch/x86/Kconfig.debug
@@ -184,6 +184,7 @@ config HAVE_MMIOTRACE_SUPPORT
config X86_DECODER_SELFTEST
bool "x86 instruction decoder selftest"
depends on DEBUG_KERNEL && KPROBES
+ depends on !COMPILE_TEST
---help---
Perform x86 instruction decoder selftests at build time.
This option is useful for checking the sanity of x86 instruction
diff --git a/arch/x86/include/asm/amd_nb.h b/arch/x86/include/asm/amd_nb.h
index a54ee1d054d9..aaac3b2fb746 100644
--- a/arch/x86/include/asm/amd_nb.h
+++ b/arch/x86/include/asm/amd_nb.h
@@ -19,7 +19,7 @@ extern int amd_cache_northbridges(void);
extern void amd_flush_garts(void);
extern int amd_numa_init(void);
extern int amd_get_subcaches(int);
-extern int amd_set_subcaches(int, int);
+extern int amd_set_subcaches(int, unsigned long);
struct amd_l3_cache {
unsigned indices;
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index bbc8b12fa443..5ad38ad07890 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -445,10 +445,20 @@ static inline int pte_same(pte_t a, pte_t b)
return a.pte == b.pte;
}
+static inline int pteval_present(pteval_t pteval)
+{
+ /*
+ * Yes Linus, _PAGE_PROTNONE == _PAGE_NUMA. Expressing it this
+ * way clearly states that the intent is that protnone and numa
+ * hinting ptes are considered present for the purposes of
+ * pagetable operations like zapping, protection changes, gup etc.
+ */
+ return pteval & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_NUMA);
+}
+
static inline int pte_present(pte_t a)
{
- return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE |
- _PAGE_NUMA);
+ return pteval_present(pte_flags(a));
}
#define pte_accessible pte_accessible
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index e6d90babc245..04905bfc508b 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -62,7 +62,7 @@ static inline void __flush_tlb_all(void)
static inline void __flush_tlb_one(unsigned long addr)
{
- count_vm_event(NR_TLB_LOCAL_FLUSH_ONE);
+ count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE);
__flush_tlb_single(addr);
}
@@ -93,13 +93,13 @@ static inline void __flush_tlb_one(unsigned long addr)
*/
static inline void __flush_tlb_up(void)
{
- count_vm_event(NR_TLB_LOCAL_FLUSH_ALL);
+ count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
__flush_tlb();
}
static inline void flush_tlb_all(void)
{
- count_vm_event(NR_TLB_LOCAL_FLUSH_ALL);
+ count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
__flush_tlb_all();
}
diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
index 787e1bb5aafc..3e276eb23d1b 100644
--- a/arch/x86/include/asm/xen/page.h
+++ b/arch/x86/include/asm/xen/page.h
@@ -52,8 +52,7 @@ extern unsigned long set_phys_range_identity(unsigned long pfn_s,
extern int m2p_add_override(unsigned long mfn, struct page *page,
struct gnttab_map_grant_ref *kmap_op);
extern int m2p_remove_override(struct page *page,
- struct gnttab_map_grant_ref *kmap_op,
- unsigned long mfn);
+ struct gnttab_map_grant_ref *kmap_op);
extern struct page *m2p_find_override(unsigned long mfn);
extern unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn);
@@ -122,7 +121,7 @@ static inline unsigned long mfn_to_pfn(unsigned long mfn)
pfn = m2p_find_override_pfn(mfn, ~0);
}
- /*
+ /*
* pfn is ~0 if there are no entries in the m2p for mfn or if the
* entry doesn't map back to the mfn and m2p_override doesn't have a
* valid entry for it.
diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c
index 59554dca96ec..dec8de4e1663 100644
--- a/arch/x86/kernel/amd_nb.c
+++ b/arch/x86/kernel/amd_nb.c
@@ -179,7 +179,7 @@ int amd_get_subcaches(int cpu)
return (mask >> (4 * cuid)) & 0xf;
}
-int amd_set_subcaches(int cpu, int mask)
+int amd_set_subcaches(int cpu, unsigned long mask)
{
static unsigned int reset, ban;
struct amd_northbridge *nb = node_to_amd_nb(amd_get_nb_id(cpu));
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index d3153e281d72..c67ffa686064 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -767,10 +767,7 @@ static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)
static void cpu_set_tlb_flushall_shift(struct cpuinfo_x86 *c)
{
- tlb_flushall_shift = 5;
-
- if (c->x86 <= 0x11)
- tlb_flushall_shift = 4;
+ tlb_flushall_shift = 6;
}
static void cpu_detect_tlb_amd(struct cpuinfo_x86 *c)
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 24b6fd10625a..8e28bf2fc3ef 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -284,8 +284,13 @@ static __always_inline void setup_smap(struct cpuinfo_x86 *c)
raw_local_save_flags(eflags);
BUG_ON(eflags & X86_EFLAGS_AC);
- if (cpu_has(c, X86_FEATURE_SMAP))
+ if (cpu_has(c, X86_FEATURE_SMAP)) {
+#ifdef CONFIG_X86_SMAP
set_in_cr4(X86_CR4_SMAP);
+#else
+ clear_in_cr4(X86_CR4_SMAP);
+#endif
+ }
}
/*
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 3db61c644e44..5cd9bfabd645 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -640,21 +640,17 @@ static void intel_tlb_flushall_shift_set(struct cpuinfo_x86 *c)
case 0x61d: /* six-core 45 nm xeon "Dunnington" */
tlb_flushall_shift = -1;
break;
+ case 0x63a: /* Ivybridge */
+ tlb_flushall_shift = 2;
+ break;
case 0x61a: /* 45 nm nehalem, "Bloomfield" */
case 0x61e: /* 45 nm nehalem, "Lynnfield" */
case 0x625: /* 32 nm nehalem, "Clarkdale" */
case 0x62c: /* 32 nm nehalem, "Gulftown" */
case 0x62e: /* 45 nm nehalem-ex, "Beckton" */
case 0x62f: /* 32 nm Xeon E7 */
- tlb_flushall_shift = 6;
- break;
case 0x62a: /* SandyBridge */
case 0x62d: /* SandyBridge, "Romely-EP" */
- tlb_flushall_shift = 5;
- break;
- case 0x63a: /* Ivybridge */
- tlb_flushall_shift = 1;
- break;
default:
tlb_flushall_shift = 6;
}
diff --git a/arch/x86/kernel/cpu/microcode/amd_early.c b/arch/x86/kernel/cpu/microcode/amd_early.c
index 8384c0fa206f..617a9e284245 100644
--- a/arch/x86/kernel/cpu/microcode/amd_early.c
+++ b/arch/x86/kernel/cpu/microcode/amd_early.c
@@ -285,6 +285,15 @@ static void __init collect_cpu_sig_on_bsp(void *arg)
uci->cpu_sig.sig = cpuid_eax(0x00000001);
}
+
+static void __init get_bsp_sig(void)
+{
+ unsigned int bsp = boot_cpu_data.cpu_index;
+ struct ucode_cpu_info *uci = ucode_cpu_info + bsp;
+
+ if (!uci->cpu_sig.sig)
+ smp_call_function_single(bsp, collect_cpu_sig_on_bsp, NULL, 1);
+}
#else
void load_ucode_amd_ap(void)
{
@@ -337,31 +346,37 @@ void load_ucode_amd_ap(void)
int __init save_microcode_in_initrd_amd(void)
{
+ unsigned long cont;
enum ucode_state ret;
u32 eax;
-#ifdef CONFIG_X86_32
- unsigned int bsp = boot_cpu_data.cpu_index;
- struct ucode_cpu_info *uci = ucode_cpu_info + bsp;
-
- if (!uci->cpu_sig.sig)
- smp_call_function_single(bsp, collect_cpu_sig_on_bsp, NULL, 1);
+ if (!container)
+ return -EINVAL;
+#ifdef CONFIG_X86_32
+ get_bsp_sig();
+ cont = (unsigned long)container;
+#else
/*
- * Take into account the fact that the ramdisk might get relocated
- * and therefore we need to recompute the container's position in
- * virtual memory space.
+ * We need the physical address of the container for both bitness since
+ * boot_params.hdr.ramdisk_image is a physical address.
*/
- container = (u8 *)(__va((u32)relocated_ramdisk) +
- ((u32)container - boot_params.hdr.ramdisk_image));
+ cont = __pa(container);
#endif
+
+ /*
+ * Take into account the fact that the ramdisk might get relocated and
+ * therefore we need to recompute the container's position in virtual
+ * memory space.
+ */
+ if (relocated_ramdisk)
+ container = (u8 *)(__va(relocated_ramdisk) +
+ (cont - boot_params.hdr.ramdisk_image));
+
if (ucode_new_rev)
pr_info("microcode: updated early to new patch_level=0x%08x\n",
ucode_new_rev);
- if (!container)
- return -EINVAL;
-
eax = cpuid_eax(0x00000001);
eax = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff);
diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
index ce2d0a2c3e4f..0e25a1bc5ab5 100644
--- a/arch/x86/kernel/cpu/mtrr/generic.c
+++ b/arch/x86/kernel/cpu/mtrr/generic.c
@@ -683,7 +683,7 @@ static void prepare_set(void) __acquires(set_atomicity_lock)
}
/* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */
- count_vm_event(NR_TLB_LOCAL_FLUSH_ALL);
+ count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
__flush_tlb();
/* Save MTRR state */
@@ -697,7 +697,7 @@ static void prepare_set(void) __acquires(set_atomicity_lock)
static void post_set(void) __releases(set_atomicity_lock)
{
/* Flush TLBs (no need to flush caches - they are disabled) */
- count_vm_event(NR_TLB_LOCAL_FLUSH_ALL);
+ count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
__flush_tlb();
/* Intel (P6) standard MTRRs */
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index dbb60878b744..d99f31d9a750 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -266,6 +266,14 @@ __visible void smp_trace_x86_platform_ipi(struct pt_regs *regs)
EXPORT_SYMBOL_GPL(vector_used_by_percpu_irq);
#ifdef CONFIG_HOTPLUG_CPU
+
+/* These two declarations are only used in check_irq_vectors_for_cpu_disable()
+ * below, which is protected by stop_machine(). Putting them on the stack
+ * results in a stack frame overflow. Dynamically allocating could result in a
+ * failure so declare these two cpumasks as global.
+ */
+static struct cpumask affinity_new, online_new;
+
/*
* This cpu is going to be removed and its vectors migrated to the remaining
* online cpus. Check to see if there are enough vectors in the remaining cpus.
@@ -277,7 +285,6 @@ int check_irq_vectors_for_cpu_disable(void)
unsigned int this_cpu, vector, this_count, count;
struct irq_desc *desc;
struct irq_data *data;
- struct cpumask affinity_new, online_new;
this_cpu = smp_processor_id();
cpumask_copy(&online_new, cpu_online_mask);
diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c
index 04ee1e2e4c02..7c6acd4b8995 100644
--- a/arch/x86/kernel/quirks.c
+++ b/arch/x86/kernel/quirks.c
@@ -571,3 +571,40 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F5,
quirk_amd_nb_node);
#endif
+
+#ifdef CONFIG_PCI
+/*
+ * Processor does not ensure DRAM scrub read/write sequence
+ * is atomic wrt accesses to CC6 save state area. Therefore
+ * if a concurrent scrub read/write access is to same address
+ * the entry may appear as if it is not written. This quirk
+ * applies to Fam16h models 00h-0Fh
+ *
+ * See "Revision Guide" for AMD F16h models 00h-0fh,
+ * document 51810 rev. 3.04, Nov 2013
+ */
+static void amd_disable_seq_and_redirect_scrub(struct pci_dev *dev)
+{
+ u32 val;
+
+ /*
+ * Suggested workaround:
+ * set D18F3x58[4:0] = 00h and set D18F3x5C[0] = 0b
+ */
+ pci_read_config_dword(dev, 0x58, &val);
+ if (val & 0x1F) {
+ val &= ~(0x1F);
+ pci_write_config_dword(dev, 0x58, val);
+ }
+
+ pci_read_config_dword(dev, 0x5C, &val);
+ if (val & BIT(0)) {
+ val &= ~BIT(0);
+ pci_write_config_dword(dev, 0x5c, val);
+ }
+}
+
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3,
+ amd_disable_seq_and_redirect_scrub);
+
+#endif
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 19e5adb49a27..acb3b606613e 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -209,7 +209,7 @@ static inline unsigned long long cycles_2_ns(unsigned long long cyc)
* dance when its actually needed.
*/
- preempt_disable();
+ preempt_disable_notrace();
data = this_cpu_read(cyc2ns.head);
tail = this_cpu_read(cyc2ns.tail);
@@ -229,7 +229,7 @@ static inline unsigned long long cycles_2_ns(unsigned long long cyc)
if (!--data->__count)
this_cpu_write(cyc2ns.tail, data);
}
- preempt_enable();
+ preempt_enable_notrace();
return ns;
}
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 9d591c895803..6dea040cc3a1 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -1001,6 +1001,12 @@ static int fault_in_kernel_space(unsigned long address)
static inline bool smap_violation(int error_code, struct pt_regs *regs)
{
+ if (!IS_ENABLED(CONFIG_X86_SMAP))
+ return false;
+
+ if (!static_cpu_has(X86_FEATURE_SMAP))
+ return false;
+
if (error_code & PF_USER)
return false;
@@ -1087,11 +1093,9 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code)
if (unlikely(error_code & PF_RSVD))
pgtable_bad(regs, error_code, address);
- if (static_cpu_has(X86_FEATURE_SMAP)) {
- if (unlikely(smap_violation(error_code, regs))) {
- bad_area_nosemaphore(regs, error_code, address);
- return;
- }
+ if (unlikely(smap_violation(error_code, regs))) {
+ bad_area_nosemaphore(regs, error_code, address);
+ return;
}
/*
diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
index 81b2750f3666..27aa0455fab3 100644
--- a/arch/x86/mm/numa.c
+++ b/arch/x86/mm/numa.c
@@ -493,14 +493,6 @@ static int __init numa_register_memblks(struct numa_meminfo *mi)
struct numa_memblk *mb = &mi->blk[i];
memblock_set_node(mb->start, mb->end - mb->start,
&memblock.memory, mb->nid);
-
- /*
- * At this time, all memory regions reserved by memblock are
- * used by the kernel. Set the nid in memblock.reserved will
- * mark out all the nodes the kernel resides in.
- */
- memblock_set_node(mb->start, mb->end - mb->start,
- &memblock.reserved, mb->nid);
}
/*
@@ -565,10 +557,21 @@ static void __init numa_init_array(void)
static void __init numa_clear_kernel_node_hotplug(void)
{
int i, nid;
- nodemask_t numa_kernel_nodes;
+ nodemask_t numa_kernel_nodes = NODE_MASK_NONE;
unsigned long start, end;
struct memblock_type *type = &memblock.reserved;
+ /*
+ * At this time, all memory regions reserved by memblock are
+ * used by the kernel. Set the nid in memblock.reserved will
+ * mark out all the nodes the kernel resides in.
+ */
+ for (i = 0; i < numa_meminfo.nr_blks; i++) {
+ struct numa_memblk *mb = &numa_meminfo.blk[i];
+ memblock_set_node(mb->start, mb->end - mb->start,
+ &memblock.reserved, mb->nid);
+ }
+
/* Mark all kernel nodes. */
for (i = 0; i < type->cnt; i++)
node_set(type->regions[i].nid, numa_kernel_nodes);
diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c
index 0342d27ca798..47b6436e41c2 100644
--- a/arch/x86/mm/numa_32.c
+++ b/arch/x86/mm/numa_32.c
@@ -52,6 +52,8 @@ void memory_present(int nid, unsigned long start, unsigned long end)
nid, start, end);
printk(KERN_DEBUG " Setting physnode_map array to node %d for pfns:\n", nid);
printk(KERN_DEBUG " ");
+ start = round_down(start, PAGES_PER_SECTION);
+ end = round_up(end, PAGES_PER_SECTION);
for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) {
physnode_map[pfn / PAGES_PER_SECTION] = nid;
printk(KERN_CONT "%lx ", pfn);
diff --git a/arch/x86/mm/srat.c b/arch/x86/mm/srat.c
index 1a25187e151e..1953e9c9391a 100644
--- a/arch/x86/mm/srat.c
+++ b/arch/x86/mm/srat.c
@@ -42,15 +42,25 @@ static __init inline int srat_disabled(void)
return acpi_numa < 0;
}
-/* Callback for SLIT parsing */
+/*
+ * Callback for SLIT parsing. pxm_to_node() returns NUMA_NO_NODE for
+ * I/O localities since SRAT does not list them. I/O localities are
+ * not supported at this point.
+ */
void __init acpi_numa_slit_init(struct acpi_table_slit *slit)
{
int i, j;
- for (i = 0; i < slit->locality_count; i++)
- for (j = 0; j < slit->locality_count; j++)
+ for (i = 0; i < slit->locality_count; i++) {
+ if (pxm_to_node(i) == NUMA_NO_NODE)
+ continue;
+ for (j = 0; j < slit->locality_count; j++) {
+ if (pxm_to_node(j) == NUMA_NO_NODE)
+ continue;
numa_set_distance(pxm_to_node(i), pxm_to_node(j),
slit->entry[slit->locality_count * i + j]);
+ }
+ }
}
/* Callback for Proximity Domain -> x2APIC mapping */
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index ae699b3bbac8..dd8dda167a24 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -103,7 +103,7 @@ static void flush_tlb_func(void *info)
if (f->flush_mm != this_cpu_read(cpu_tlbstate.active_mm))
return;
- count_vm_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
+ count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) {
if (f->flush_end == TLB_FLUSH_ALL)
local_flush_tlb();
@@ -131,7 +131,7 @@ void native_flush_tlb_others(const struct cpumask *cpumask,
info.flush_start = start;
info.flush_end = end;
- count_vm_event(NR_TLB_REMOTE_FLUSH);
+ count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
if (is_uv_system()) {
unsigned int cpu;
@@ -151,44 +151,19 @@ void flush_tlb_current_task(void)
preempt_disable();
- count_vm_event(NR_TLB_LOCAL_FLUSH_ALL);
+ count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
local_flush_tlb();
if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL);
preempt_enable();
}
-/*
- * It can find out the THP large page, or
- * HUGETLB page in tlb_flush when THP disabled
- */
-static inline unsigned long has_large_page(struct mm_struct *mm,
- unsigned long start, unsigned long end)
-{
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd;
- unsigned long addr = ALIGN(start, HPAGE_SIZE);
- for (; addr < end; addr += HPAGE_SIZE) {
- pgd = pgd_offset(mm, addr);
- if (likely(!pgd_none(*pgd))) {
- pud = pud_offset(pgd, addr);
- if (likely(!pud_none(*pud))) {
- pmd = pmd_offset(pud, addr);
- if (likely(!pmd_none(*pmd)))
- if (pmd_large(*pmd))
- return addr;
- }
- }
- }
- return 0;
-}
-
void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
unsigned long end, unsigned long vmflag)
{
unsigned long addr;
unsigned act_entries, tlb_entries = 0;
+ unsigned long nr_base_pages;
preempt_disable();
if (current->active_mm != mm)
@@ -210,21 +185,20 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
tlb_entries = tlb_lli_4k[ENTRIES];
else
tlb_entries = tlb_lld_4k[ENTRIES];
+
/* Assume all of TLB entries was occupied by this task */
- act_entries = mm->total_vm > tlb_entries ? tlb_entries : mm->total_vm;
+ act_entries = tlb_entries >> tlb_flushall_shift;
+ act_entries = mm->total_vm > act_entries ? act_entries : mm->total_vm;
+ nr_base_pages = (end - start) >> PAGE_SHIFT;
/* tlb_flushall_shift is on balance point, details in commit log */
- if ((end - start) >> PAGE_SHIFT > act_entries >> tlb_flushall_shift) {
- count_vm_event(NR_TLB_LOCAL_FLUSH_ALL);
+ if (nr_base_pages > act_entries) {
+ count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
local_flush_tlb();
} else {
- if (has_large_page(mm, start, end)) {
- local_flush_tlb();
- goto flush_all;
- }
/* flush range by one by one 'invlpg' */
for (addr = start; addr < end; addr += PAGE_SIZE) {
- count_vm_event(NR_TLB_LOCAL_FLUSH_ONE);
+ count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE);
__flush_tlb_single(addr);
}
@@ -262,7 +236,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long start)
static void do_flush_tlb_all(void *info)
{
- count_vm_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
+ count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
__flush_tlb_all();
if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_LAZY)
leave_mm(smp_processor_id());
@@ -270,7 +244,7 @@ static void do_flush_tlb_all(void *info)
void flush_tlb_all(void)
{
- count_vm_event(NR_TLB_REMOTE_FLUSH);
+ count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
on_each_cpu(do_flush_tlb_all, NULL, 1);
}
diff --git a/arch/x86/platform/efi/efi-bgrt.c b/arch/x86/platform/efi/efi-bgrt.c
index 7145ec63c520..4df9591eadad 100644
--- a/arch/x86/platform/efi/efi-bgrt.c
+++ b/arch/x86/platform/efi/efi-bgrt.c
@@ -49,7 +49,8 @@ void __init efi_bgrt_init(void)
image = efi_lookup_mapped_addr(bgrt_tab->image_address);
if (!image) {
- image = ioremap(bgrt_tab->image_address, sizeof(bmp_header));
+ image = early_memremap(bgrt_tab->image_address,
+ sizeof(bmp_header));
ioremapped = true;
if (!image)
return;
@@ -57,7 +58,7 @@ void __init efi_bgrt_init(void)
memcpy_fromio(&bmp_header, image, sizeof(bmp_header));
if (ioremapped)
- iounmap(image);
+ early_iounmap(image, sizeof(bmp_header));
bgrt_image_size = bmp_header.size;
bgrt_image = kmalloc(bgrt_image_size, GFP_KERNEL);
@@ -65,7 +66,8 @@ void __init efi_bgrt_init(void)
return;
if (ioremapped) {
- image = ioremap(bgrt_tab->image_address, bmp_header.size);
+ image = early_memremap(bgrt_tab->image_address,
+ bmp_header.size);
if (!image) {
kfree(bgrt_image);
bgrt_image = NULL;
@@ -75,5 +77,5 @@ void __init efi_bgrt_init(void)
memcpy_fromio(bgrt_image, image, bgrt_image_size);
if (ioremapped)
- iounmap(image);
+ early_iounmap(image, bmp_header.size);
}
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index a4d7b647867f..201d09a7c46b 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -1473,6 +1473,18 @@ static void xen_pvh_set_cr_flags(int cpu)
* X86_CR0_TS, X86_CR0_PE, X86_CR0_ET are set by Xen for HVM guests
* (which PVH shared codepaths), while X86_CR0_PG is for PVH. */
write_cr0(read_cr0() | X86_CR0_MP | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM);
+
+ if (!cpu)
+ return;
+ /*
+ * For BSP, PSE PGE are set in probe_page_size_mask(), for APs
+ * set them here. For all, OSFXSR OSXMMEXCPT are set in fpu_init.
+ */
+ if (cpu_has_pse)
+ set_in_cr4(X86_CR4_PSE);
+
+ if (cpu_has_pge)
+ set_in_cr4(X86_CR4_PGE);
}
/*
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 2423ef04ffea..256282e7888b 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -365,7 +365,7 @@ void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
/* Assume pteval_t is equivalent to all the other *val_t types. */
static pteval_t pte_mfn_to_pfn(pteval_t val)
{
- if (val & _PAGE_PRESENT) {
+ if (pteval_present(val)) {
unsigned long mfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
unsigned long pfn = mfn_to_pfn(mfn);
@@ -381,7 +381,7 @@ static pteval_t pte_mfn_to_pfn(pteval_t val)
static pteval_t pte_pfn_to_mfn(pteval_t val)
{
- if (val & _PAGE_PRESENT) {
+ if (pteval_present(val)) {
unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
pteval_t flags = val & PTE_FLAGS_MASK;
unsigned long mfn;
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
index 8009acbe41e4..696c694986d0 100644
--- a/arch/x86/xen/p2m.c
+++ b/arch/x86/xen/p2m.c
@@ -899,6 +899,13 @@ int m2p_add_override(unsigned long mfn, struct page *page,
"m2p_add_override: pfn %lx not mapped", pfn))
return -EINVAL;
}
+ WARN_ON(PagePrivate(page));
+ SetPagePrivate(page);
+ set_page_private(page, mfn);
+ page->index = pfn_to_mfn(pfn);
+
+ if (unlikely(!set_phys_to_machine(pfn, FOREIGN_FRAME(mfn))))
+ return -ENOMEM;
if (kmap_op != NULL) {
if (!PageHighMem(page)) {
@@ -937,16 +944,19 @@ int m2p_add_override(unsigned long mfn, struct page *page,
}
EXPORT_SYMBOL_GPL(m2p_add_override);
int m2p_remove_override(struct page *page,
- struct gnttab_map_grant_ref *kmap_op,
- unsigned long mfn)
+ struct gnttab_map_grant_ref *kmap_op)
{
unsigned long flags;
+ unsigned long mfn;
unsigned long pfn;
unsigned long uninitialized_var(address);
unsigned level;
pte_t *ptep = NULL;
pfn = page_to_pfn(page);
+ mfn = get_phys_to_machine(pfn);
+ if (mfn == INVALID_P2M_ENTRY || !(mfn & FOREIGN_FRAME_BIT))
+ return -EINVAL;
if (!PageHighMem(page)) {
address = (unsigned long)__va(pfn << PAGE_SHIFT);
@@ -960,7 +970,10 @@ int m2p_remove_override(struct page *page,
spin_lock_irqsave(&m2p_override_lock, flags);
list_del(&page->lru);
spin_unlock_irqrestore(&m2p_override_lock, flags);
+ WARN_ON(!PagePrivate(page));
+ ClearPagePrivate(page);
+ set_phys_to_machine(pfn, page->index);
if (kmap_op != NULL) {
if (!PageHighMem(page)) {
struct multicall_space mcs;
diff --git a/block/blk-core.c b/block/blk-core.c
index c00e0bdeab4a..853f92749202 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -693,11 +693,20 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
if (!uninit_q)
return NULL;
+ uninit_q->flush_rq = kzalloc(sizeof(struct request), GFP_KERNEL);
+ if (!uninit_q->flush_rq)
+ goto out_cleanup_queue;
+
q = blk_init_allocated_queue(uninit_q, rfn, lock);
if (!q)
- blk_cleanup_queue(uninit_q);
-
+ goto out_free_flush_rq;
return q;
+
+out_free_flush_rq:
+ kfree(uninit_q->flush_rq);
+out_cleanup_queue:
+ blk_cleanup_queue(uninit_q);
+ return NULL;
}
EXPORT_SYMBOL(blk_init_queue_node);
@@ -1127,7 +1136,7 @@ static struct request *blk_old_get_request(struct request_queue *q, int rw,
struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask)
{
if (q->mq_ops)
- return blk_mq_alloc_request(q, rw, gfp_mask, false);
+ return blk_mq_alloc_request(q, rw, gfp_mask);
else
return blk_old_get_request(q, rw, gfp_mask);
}
@@ -1278,6 +1287,11 @@ void __blk_put_request(struct request_queue *q, struct request *req)
if (unlikely(!q))
return;
+ if (q->mq_ops) {
+ blk_mq_free_request(req);
+ return;
+ }
+
blk_pm_put_request(req);
elv_completed_request(q, req);
diff --git a/block/blk-exec.c b/block/blk-exec.c
index bbfc072a79c2..c68613bb4c79 100644
--- a/block/blk-exec.c
+++ b/block/blk-exec.c
@@ -65,7 +65,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
* be resued after dying flag is set
*/
if (q->mq_ops) {
- blk_mq_insert_request(q, rq, true);
+ blk_mq_insert_request(q, rq, at_head, true);
return;
}
diff --git a/block/blk-flush.c b/block/blk-flush.c
index 9288aaf35c21..66e2b697f5db 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -130,20 +130,26 @@ static void blk_flush_restore_request(struct request *rq)
blk_clear_rq_complete(rq);
}
-static void mq_flush_data_run(struct work_struct *work)
+static void mq_flush_run(struct work_struct *work)
{
struct request *rq;
- rq = container_of(work, struct request, mq_flush_data);
+ rq = container_of(work, struct request, mq_flush_work);
memset(&rq->csd, 0, sizeof(rq->csd));
blk_mq_run_request(rq, true, false);
}
-static void blk_mq_flush_data_insert(struct request *rq)
+static bool blk_flush_queue_rq(struct request *rq)
{
- INIT_WORK(&rq->mq_flush_data, mq_flush_data_run);
- kblockd_schedule_work(rq->q, &rq->mq_flush_data);
+ if (rq->q->mq_ops) {
+ INIT_WORK(&rq->mq_flush_work, mq_flush_run);
+ kblockd_schedule_work(rq->q, &rq->mq_flush_work);
+ return false;
+ } else {
+ list_add_tail(&rq->queuelist, &rq->q->queue_head);
+ return true;
+ }
}
/**
@@ -187,12 +193,7 @@ static bool blk_flush_complete_seq(struct request *rq, unsigned int seq,
case REQ_FSEQ_DATA:
list_move_tail(&rq->flush.list, &q->flush_data_in_flight);
- if (q->mq_ops)
- blk_mq_flush_data_insert(rq);
- else {
- list_add(&rq->queuelist, &q->queue_head);
- queued = true;
- }
+ queued = blk_flush_queue_rq(rq);
break;
case REQ_FSEQ_DONE:
@@ -216,9 +217,6 @@ static bool blk_flush_complete_seq(struct request *rq, unsigned int seq,
}
kicked = blk_kick_flush(q);
- /* blk_mq_run_flush will run queue */
- if (q->mq_ops)
- return queued;
return kicked | queued;
}
@@ -230,10 +228,9 @@ static void flush_end_io(struct request *flush_rq, int error)
struct request *rq, *n;
unsigned long flags = 0;
- if (q->mq_ops) {
- blk_mq_free_request(flush_rq);
+ if (q->mq_ops)
spin_lock_irqsave(&q->mq_flush_lock, flags);
- }
+
running = &q->flush_queue[q->flush_running_idx];
BUG_ON(q->flush_pending_idx == q->flush_running_idx);
@@ -263,49 +260,14 @@ static void flush_end_io(struct request *flush_rq, int error)
* kblockd.
*/
if (queued || q->flush_queue_delayed) {
- if (!q->mq_ops)
- blk_run_queue_async(q);
- else
- /*
- * This can be optimized to only run queues with requests
- * queued if necessary.
- */
- blk_mq_run_queues(q, true);
+ WARN_ON(q->mq_ops);
+ blk_run_queue_async(q);
}
q->flush_queue_delayed = 0;
if (q->mq_ops)
spin_unlock_irqrestore(&q->mq_flush_lock, flags);
}
-static void mq_flush_work(struct work_struct *work)
-{
- struct request_queue *q;
- struct request *rq;
-
- q = container_of(work, struct request_queue, mq_flush_work);
-
- /* We don't need set REQ_FLUSH_SEQ, it's for consistency */
- rq = blk_mq_alloc_request(q, WRITE_FLUSH|REQ_FLUSH_SEQ,
- __GFP_WAIT|GFP_ATOMIC, true);
- rq->cmd_type = REQ_TYPE_FS;
- rq->end_io = flush_end_io;
-
- blk_mq_run_request(rq, true, false);
-}
-
-/*
- * We can't directly use q->flush_rq, because it doesn't have tag and is not in
- * hctx->rqs[]. so we must allocate a new request, since we can't sleep here,
- * so offload the work to workqueue.
- *
- * Note: we assume a flush request finished in any hardware queue will flush
- * the whole disk cache.
- */
-static void mq_run_flush(struct request_queue *q)
-{
- kblockd_schedule_work(q, &q->mq_flush_work);
-}
-
/**
* blk_kick_flush - consider issuing flush request
* @q: request_queue being kicked
@@ -340,19 +302,31 @@ static bool blk_kick_flush(struct request_queue *q)
* different from running_idx, which means flush is in flight.
*/
q->flush_pending_idx ^= 1;
+
if (q->mq_ops) {
- mq_run_flush(q);
- return true;
+ struct blk_mq_ctx *ctx = first_rq->mq_ctx;
+ struct blk_mq_hw_ctx *hctx = q->mq_ops->map_queue(q, ctx->cpu);
+
+ blk_mq_rq_init(hctx, q->flush_rq);
+ q->flush_rq->mq_ctx = ctx;
+
+ /*
+ * Reuse the tag value from the fist waiting request,
+ * with blk-mq the tag is generated during request
+ * allocation and drivers can rely on it being inside
+ * the range they asked for.
+ */
+ q->flush_rq->tag = first_rq->tag;
+ } else {
+ blk_rq_init(q, q->flush_rq);
}
- blk_rq_init(q, &q->flush_rq);
- q->flush_rq.cmd_type = REQ_TYPE_FS;
- q->flush_rq.cmd_flags = WRITE_FLUSH | REQ_FLUSH_SEQ;
- q->flush_rq.rq_disk = first_rq->rq_disk;
- q->flush_rq.end_io = flush_end_io;
+ q->flush_rq->cmd_type = REQ_TYPE_FS;
+ q->flush_rq->cmd_flags = WRITE_FLUSH | REQ_FLUSH_SEQ;
+ q->flush_rq->rq_disk = first_rq->rq_disk;
+ q->flush_rq->end_io = flush_end_io;
- list_add_tail(&q->flush_rq.queuelist, &q->queue_head);
- return true;
+ return blk_flush_queue_rq(q->flush_rq);
}
static void flush_data_end_io(struct request *rq, int error)
@@ -558,5 +532,4 @@ EXPORT_SYMBOL(blkdev_issue_flush);
void blk_mq_init_flush(struct request_queue *q)
{
spin_lock_init(&q->mq_flush_lock);
- INIT_WORK(&q->mq_flush_work, mq_flush_work);
}
diff --git a/block/blk-lib.c b/block/blk-lib.c
index 2da76c999ef3..97a733cf3d5f 100644
--- a/block/blk-lib.c
+++ b/block/blk-lib.c
@@ -119,6 +119,14 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
atomic_inc(&bb.done);
submit_bio(type, bio);
+
+ /*
+ * We can loop for a long time in here, if someone does
+ * full device discards (like mkfs). Be nice and allow
+ * us to schedule out to avoid softlocking if preempt
+ * is disabled.
+ */
+ cond_resched();
}
blk_finish_plug(&plug);
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 8f8adaa95466..6c583f9c5b65 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -21,6 +21,16 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
if (!bio)
return 0;
+ /*
+ * This should probably be returning 0, but blk_add_request_payload()
+ * (Christoph!!!!)
+ */
+ if (bio->bi_rw & REQ_DISCARD)
+ return 1;
+
+ if (bio->bi_rw & REQ_WRITE_SAME)
+ return 1;
+
fbio = bio;
cluster = blk_queue_cluster(q);
seg_size = 0;
@@ -161,30 +171,60 @@ new_segment:
*bvprv = *bvec;
}
-/*
- * map a request to scatterlist, return number of sg entries setup. Caller
- * must make sure sg can hold rq->nr_phys_segments entries
- */
-int blk_rq_map_sg(struct request_queue *q, struct request *rq,
- struct scatterlist *sglist)
+static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio,
+ struct scatterlist *sglist,
+ struct scatterlist **sg)
{
struct bio_vec bvec, bvprv = { NULL };
- struct req_iterator iter;
- struct scatterlist *sg;
+ struct bvec_iter iter;
int nsegs, cluster;
nsegs = 0;
cluster = blk_queue_cluster(q);
- /*
- * for each bio in rq
- */
- sg = NULL;
- rq_for_each_segment(bvec, rq, iter) {
- __blk_segment_map_sg(q, &bvec, sglist, &bvprv, &sg,
- &nsegs, &cluster);
- } /* segments in rq */
+ if (bio->bi_rw & REQ_DISCARD) {
+ /*
+ * This is a hack - drivers should be neither modifying the
+ * biovec, nor relying on bi_vcnt - but because of
+ * blk_add_request_payload(), a discard bio may or may not have
+ * a payload we need to set up here (thank you Christoph) and
+ * bi_vcnt is really the only way of telling if we need to.
+ */
+
+ if (bio->bi_vcnt)
+ goto single_segment;
+
+ return 0;
+ }
+
+ if (bio->bi_rw & REQ_WRITE_SAME) {
+single_segment:
+ *sg = sglist;
+ bvec = bio_iovec(bio);
+ sg_set_page(*sg, bvec.bv_page, bvec.bv_len, bvec.bv_offset);
+ return 1;
+ }
+
+ for_each_bio(bio)
+ bio_for_each_segment(bvec, bio, iter)
+ __blk_segment_map_sg(q, &bvec, sglist, &bvprv, sg,
+ &nsegs, &cluster);
+ return nsegs;
+}
+
+/*
+ * map a request to scatterlist, return number of sg entries setup. Caller
+ * must make sure sg can hold rq->nr_phys_segments entries
+ */
+int blk_rq_map_sg(struct request_queue *q, struct request *rq,
+ struct scatterlist *sglist)
+{
+ struct scatterlist *sg = NULL;
+ int nsegs = 0;
+
+ if (rq->bio)
+ nsegs = __blk_bios_map_sg(q, rq->bio, sglist, &sg);
if (unlikely(rq->cmd_flags & REQ_COPY_USER) &&
(blk_rq_bytes(rq) & q->dma_pad_mask)) {
@@ -230,20 +270,13 @@ EXPORT_SYMBOL(blk_rq_map_sg);
int blk_bio_map_sg(struct request_queue *q, struct bio *bio,
struct scatterlist *sglist)
{
- struct bio_vec bvec, bvprv = { NULL };
- struct scatterlist *sg;
- int nsegs, cluster;
- struct bvec_iter iter;
-
- nsegs = 0;
- cluster = blk_queue_cluster(q);
-
- sg = NULL;
- bio_for_each_segment(bvec, bio, iter) {
- __blk_segment_map_sg(q, &bvec, sglist, &bvprv, &sg,
- &nsegs, &cluster);
- } /* segments in bio */
+ struct scatterlist *sg = NULL;
+ int nsegs;
+ struct bio *next = bio->bi_next;
+ bio->bi_next = NULL;
+ nsegs = __blk_bios_map_sg(q, bio, sglist, &sg);
+ bio->bi_next = next;
if (sg)
sg_mark_end(sg);
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index 5d70edc9855f..83ae96c51a27 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -184,7 +184,7 @@ void blk_mq_free_tags(struct blk_mq_tags *tags)
ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page)
{
char *orig_page = page;
- int cpu;
+ unsigned int cpu;
if (!tags)
return 0;
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 57039fcd9c93..1fa9dd153fde 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -226,15 +226,14 @@ static struct request *blk_mq_alloc_request_pinned(struct request_queue *q,
return rq;
}
-struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
- gfp_t gfp, bool reserved)
+struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp)
{
struct request *rq;
if (blk_mq_queue_enter(q))
return NULL;
- rq = blk_mq_alloc_request_pinned(q, rw, gfp, reserved);
+ rq = blk_mq_alloc_request_pinned(q, rw, gfp, false);
if (rq)
blk_mq_put_ctx(rq->mq_ctx);
return rq;
@@ -258,7 +257,7 @@ EXPORT_SYMBOL(blk_mq_alloc_reserved_request);
/*
* Re-init and set pdu, if we have it
*/
-static void blk_mq_rq_init(struct blk_mq_hw_ctx *hctx, struct request *rq)
+void blk_mq_rq_init(struct blk_mq_hw_ctx *hctx, struct request *rq)
{
blk_rq_init(hctx->queue, rq);
@@ -305,7 +304,7 @@ static void blk_mq_bio_endio(struct request *rq, struct bio *bio, int error)
bio_endio(bio, error);
}
-void blk_mq_complete_request(struct request *rq, int error)
+void blk_mq_end_io(struct request *rq, int error)
{
struct bio *bio = rq->bio;
unsigned int bytes = 0;
@@ -330,48 +329,55 @@ void blk_mq_complete_request(struct request *rq, int error)
else
blk_mq_free_request(rq);
}
+EXPORT_SYMBOL(blk_mq_end_io);
-void __blk_mq_end_io(struct request *rq, int error)
-{
- if (!blk_mark_rq_complete(rq))
- blk_mq_complete_request(rq, error);
-}
-
-static void blk_mq_end_io_remote(void *data)
+static void __blk_mq_complete_request_remote(void *data)
{
struct request *rq = data;
- __blk_mq_end_io(rq, rq->errors);
+ rq->q->softirq_done_fn(rq);
}
-/*
- * End IO on this request on a multiqueue enabled driver. We'll either do
- * it directly inline, or punt to a local IPI handler on the matching
- * remote CPU.
- */
-void blk_mq_end_io(struct request *rq, int error)
+void __blk_mq_complete_request(struct request *rq)
{
struct blk_mq_ctx *ctx = rq->mq_ctx;
int cpu;
- if (!ctx->ipi_redirect)
- return __blk_mq_end_io(rq, error);
+ if (!ctx->ipi_redirect) {
+ rq->q->softirq_done_fn(rq);
+ return;
+ }
cpu = get_cpu();
if (cpu != ctx->cpu && cpu_online(ctx->cpu)) {
- rq->errors = error;
- rq->csd.func = blk_mq_end_io_remote;
+ rq->csd.func = __blk_mq_complete_request_remote;
rq->csd.info = rq;
rq->csd.flags = 0;
__smp_call_function_single(ctx->cpu, &rq->csd, 0);
} else {
- __blk_mq_end_io(rq, error);
+ rq->q->softirq_done_fn(rq);
}
put_cpu();
}
-EXPORT_SYMBOL(blk_mq_end_io);
-static void blk_mq_start_request(struct request *rq)
+/**
+ * blk_mq_complete_request - end I/O on a request
+ * @rq: the request being processed
+ *
+ * Description:
+ * Ends all I/O on a request. It does not handle partial completions.
+ * The actual completion happens out-of-order, through a IPI handler.
+ **/
+void blk_mq_complete_request(struct request *rq)
+{
+ if (unlikely(blk_should_fake_timeout(rq->q)))
+ return;
+ if (!blk_mark_rq_complete(rq))
+ __blk_mq_complete_request(rq);
+}
+EXPORT_SYMBOL(blk_mq_complete_request);
+
+static void blk_mq_start_request(struct request *rq, bool last)
{
struct request_queue *q = rq->q;
@@ -384,6 +390,25 @@ static void blk_mq_start_request(struct request *rq)
*/
rq->deadline = jiffies + q->rq_timeout;
set_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
+
+ if (q->dma_drain_size && blk_rq_bytes(rq)) {
+ /*
+ * Make sure space for the drain appears. We know we can do
+ * this because max_hw_segments has been adjusted to be one
+ * fewer than the device can handle.
+ */
+ rq->nr_phys_segments++;
+ }
+
+ /*
+ * Flag the last request in the series so that drivers know when IO
+ * should be kicked off, if they don't do it on a per-request basis.
+ *
+ * Note: the flag isn't the only condition drivers should do kick off.
+ * If drive is busy, the last request might not have the bit set.
+ */
+ if (last)
+ rq->cmd_flags |= REQ_END;
}
static void blk_mq_requeue_request(struct request *rq)
@@ -392,6 +417,11 @@ static void blk_mq_requeue_request(struct request *rq)
trace_block_rq_requeue(q, rq);
clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
+
+ rq->cmd_flags &= ~REQ_END;
+
+ if (q->dma_drain_size && blk_rq_bytes(rq))
+ rq->nr_phys_segments--;
}
struct blk_mq_timeout_data {
@@ -559,19 +589,8 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
rq = list_first_entry(&rq_list, struct request, queuelist);
list_del_init(&rq->queuelist);
- blk_mq_start_request(rq);
- /*
- * Last request in the series. Flag it as such, this
- * enables drivers to know when IO should be kicked off,
- * if they don't do it on a per-request basis.
- *
- * Note: the flag isn't the only condition drivers
- * should do kick off. If drive is busy, the last
- * request might not have the bit set.
- */
- if (list_empty(&rq_list))
- rq->cmd_flags |= REQ_END;
+ blk_mq_start_request(rq, list_empty(&rq_list));
ret = q->mq_ops->queue_rq(hctx, rq);
switch (ret) {
@@ -589,8 +608,8 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
break;
default:
pr_err("blk-mq: bad return on queue: %d\n", ret);
- rq->errors = -EIO;
case BLK_MQ_RQ_QUEUE_ERROR:
+ rq->errors = -EIO;
blk_mq_end_io(rq, rq->errors);
break;
}
@@ -693,13 +712,16 @@ static void blk_mq_work_fn(struct work_struct *work)
}
static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx,
- struct request *rq)
+ struct request *rq, bool at_head)
{
struct blk_mq_ctx *ctx = rq->mq_ctx;
trace_block_rq_insert(hctx->queue, rq);
- list_add_tail(&rq->queuelist, &ctx->rq_list);
+ if (at_head)
+ list_add(&rq->queuelist, &ctx->rq_list);
+ else
+ list_add_tail(&rq->queuelist, &ctx->rq_list);
blk_mq_hctx_mark_pending(hctx, ctx);
/*
@@ -709,7 +731,7 @@ static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx,
}
void blk_mq_insert_request(struct request_queue *q, struct request *rq,
- bool run_queue)
+ bool at_head, bool run_queue)
{
struct blk_mq_hw_ctx *hctx;
struct blk_mq_ctx *ctx, *current_ctx;
@@ -728,7 +750,7 @@ void blk_mq_insert_request(struct request_queue *q, struct request *rq,
rq->mq_ctx = ctx;
}
spin_lock(&ctx->lock);
- __blk_mq_insert_request(hctx, rq);
+ __blk_mq_insert_request(hctx, rq, at_head);
spin_unlock(&ctx->lock);
blk_mq_put_ctx(current_ctx);
@@ -760,7 +782,7 @@ void blk_mq_run_request(struct request *rq, bool run_queue, bool async)
/* ctx->cpu might be offline */
spin_lock(&ctx->lock);
- __blk_mq_insert_request(hctx, rq);
+ __blk_mq_insert_request(hctx, rq, false);
spin_unlock(&ctx->lock);
blk_mq_put_ctx(current_ctx);
@@ -798,7 +820,7 @@ static void blk_mq_insert_requests(struct request_queue *q,
rq = list_first_entry(list, struct request, queuelist);
list_del_init(&rq->queuelist);
rq->mq_ctx = ctx;
- __blk_mq_insert_request(hctx, rq);
+ __blk_mq_insert_request(hctx, rq, false);
}
spin_unlock(&ctx->lock);
@@ -888,6 +910,11 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
blk_queue_bounce(q, &bio);
+ if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
+ bio_endio(bio, -EIO);
+ return;
+ }
+
if (use_plug && blk_attempt_plug_merge(q, bio, &request_count))
return;
@@ -950,7 +977,7 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
__blk_mq_free_request(hctx, ctx, rq);
else {
blk_mq_bio_to_request(rq, bio);
- __blk_mq_insert_request(hctx, rq);
+ __blk_mq_insert_request(hctx, rq, false);
}
spin_unlock(&ctx->lock);
@@ -1309,15 +1336,6 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_reg *reg,
reg->queue_depth = BLK_MQ_MAX_DEPTH;
}
- /*
- * Set aside a tag for flush requests. It will only be used while
- * another flush request is in progress but outside the driver.
- *
- * TODO: only allocate if flushes are supported
- */
- reg->queue_depth++;
- reg->reserved_tags++;
-
if (reg->queue_depth < (reg->reserved_tags + BLK_MQ_TAG_MIN))
return ERR_PTR(-EINVAL);
@@ -1360,17 +1378,27 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_reg *reg,
q->mq_ops = reg->ops;
q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
+ q->sg_reserved_size = INT_MAX;
+
blk_queue_make_request(q, blk_mq_make_request);
blk_queue_rq_timed_out(q, reg->ops->timeout);
if (reg->timeout)
blk_queue_rq_timeout(q, reg->timeout);
+ if (reg->ops->complete)
+ blk_queue_softirq_done(q, reg->ops->complete);
+
blk_mq_init_flush(q);
blk_mq_init_cpu_queues(q, reg->nr_hw_queues);
- if (blk_mq_init_hw_queues(q, reg, driver_data))
+ q->flush_rq = kzalloc(round_up(sizeof(struct request) + reg->cmd_size,
+ cache_line_size()), GFP_KERNEL);
+ if (!q->flush_rq)
goto err_hw;
+ if (blk_mq_init_hw_queues(q, reg, driver_data))
+ goto err_flush_rq;
+
blk_mq_map_swqueue(q);
mutex_lock(&all_q_mutex);
@@ -1378,6 +1406,9 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_reg *reg,
mutex_unlock(&all_q_mutex);
return q;
+
+err_flush_rq:
+ kfree(q->flush_rq);
err_hw:
kfree(q->mq_map);
err_map:
diff --git a/block/blk-mq.h b/block/blk-mq.h
index 5c3917984b00..ed0035cd458e 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -22,13 +22,13 @@ struct blk_mq_ctx {
struct kobject kobj;
};
-void __blk_mq_end_io(struct request *rq, int error);
-void blk_mq_complete_request(struct request *rq, int error);
+void __blk_mq_complete_request(struct request *rq);
void blk_mq_run_request(struct request *rq, bool run_queue, bool async);
void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
void blk_mq_init_flush(struct request_queue *q);
void blk_mq_drain_queue(struct request_queue *q);
void blk_mq_free_queue(struct request_queue *q);
+void blk_mq_rq_init(struct blk_mq_hw_ctx *hctx, struct request *rq);
/*
* CPU hotplug helpers
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 8095c4a21fc0..7500f876dae4 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -549,6 +549,8 @@ static void blk_release_queue(struct kobject *kobj)
if (q->mq_ops)
blk_mq_free_queue(q);
+ kfree(q->flush_rq);
+
blk_trace_shutdown(q);
bdi_destroy(&q->backing_dev_info);
diff --git a/block/blk-timeout.c b/block/blk-timeout.c
index bba81c9348e1..d96f7061c6fd 100644
--- a/block/blk-timeout.c
+++ b/block/blk-timeout.c
@@ -91,7 +91,7 @@ static void blk_rq_timed_out(struct request *req)
case BLK_EH_HANDLED:
/* Can we use req->errors here? */
if (q->mq_ops)
- blk_mq_complete_request(req, req->errors);
+ __blk_mq_complete_request(req);
else
__blk_complete_request(req);
break;
diff --git a/block/blk.h b/block/blk.h
index c90e1d8f7a2b..d23b415b8a28 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -113,7 +113,7 @@ static inline struct request *__elv_next_request(struct request_queue *q)
q->flush_queue_delayed = 1;
return NULL;
}
- if (unlikely(blk_queue_dying(q)) ||
+ if (unlikely(blk_queue_bypass(q)) ||
!q->elevator->type->ops.elevator_dispatch_fn(q, 0))
return NULL;
}
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 470e7542bf31..018a42883706 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -549,7 +549,7 @@ static ssize_t acpi_battery_alarm_store(struct device *dev,
{
unsigned long x;
struct acpi_battery *battery = to_acpi_battery(dev_get_drvdata(dev));
- if (sscanf(buf, "%ld\n", &x) == 1)
+ if (sscanf(buf, "%lu\n", &x) == 1)
battery->alarm = x/1000;
if (acpi_battery_present(battery))
acpi_battery_set_alarm(battery);
diff --git a/drivers/acpi/container.c b/drivers/acpi/container.c
index 0b6ae6eb5c4a..368f9ddb8480 100644
--- a/drivers/acpi/container.c
+++ b/drivers/acpi/container.c
@@ -79,9 +79,10 @@ static int container_device_attach(struct acpi_device *adev,
ACPI_COMPANION_SET(dev, adev);
dev->release = acpi_container_release;
ret = device_register(dev);
- if (ret)
+ if (ret) {
+ put_device(dev);
return ret;
-
+ }
adev->driver_data = dev;
return 1;
}
diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c
index c431c88faaff..e9b3081c4fe9 100644
--- a/drivers/acpi/dock.c
+++ b/drivers/acpi/dock.c
@@ -609,7 +609,7 @@ static int handle_eject_request(struct dock_station *ds, u32 event)
static void dock_notify(struct dock_station *ds, u32 event)
{
acpi_handle handle = ds->handle;
- struct acpi_device *ad;
+ struct acpi_device *adev = NULL;
int surprise_removal = 0;
/*
@@ -632,7 +632,8 @@ static void dock_notify(struct dock_station *ds, u32 event)
switch (event) {
case ACPI_NOTIFY_BUS_CHECK:
case ACPI_NOTIFY_DEVICE_CHECK:
- if (!dock_in_progress(ds) && acpi_bus_get_device(handle, &ad)) {
+ acpi_bus_get_device(handle, &adev);
+ if (!dock_in_progress(ds) && !acpi_device_enumerated(adev)) {
begin_dock(ds);
dock(ds);
if (!dock_present(ds)) {
diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
index 50fe34ffe932..75c28eae8860 100644
--- a/drivers/acpi/proc.c
+++ b/drivers/acpi/proc.c
@@ -60,7 +60,7 @@ acpi_system_wakeup_device_seq_show(struct seq_file *seq, void *offset)
seq_printf(seq, "%c%-8s %s:%s\n",
dev->wakeup.flags.run_wake ? '*' : ' ',
(device_may_wakeup(&dev->dev) ||
- (ldev && device_may_wakeup(ldev))) ?
+ device_may_wakeup(ldev)) ?
"enabled" : "disabled",
ldev->bus ? ldev->bus->name :
"no-bus", dev_name(ldev));
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 7384158c7f87..57b053f424d1 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -484,7 +484,6 @@ static void acpi_device_hotplug(void *data, u32 src)
static void acpi_hotplug_notify_cb(acpi_handle handle, u32 type, void *data)
{
u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE;
- struct acpi_scan_handler *handler = data;
struct acpi_device *adev;
acpi_status status;
@@ -500,7 +499,10 @@ static void acpi_hotplug_notify_cb(acpi_handle handle, u32 type, void *data)
break;
case ACPI_NOTIFY_EJECT_REQUEST:
acpi_handle_debug(handle, "ACPI_NOTIFY_EJECT_REQUEST event\n");
- if (!handler->hotplug.enabled) {
+ if (!adev->handler)
+ goto err_out;
+
+ if (!adev->handler->hotplug.enabled) {
acpi_handle_err(handle, "Eject disabled\n");
ost_code = ACPI_OST_SC_EJECT_NOT_SUPPORTED;
goto err_out;
diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c
index 0347a37eb438..85e3b612bdc0 100644
--- a/drivers/acpi/utils.c
+++ b/drivers/acpi/utils.c
@@ -99,10 +99,6 @@ acpi_extract_package(union acpi_object *package,
union acpi_object *element = &(package->package.elements[i]);
- if (!element) {
- return AE_BAD_DATA;
- }
-
switch (element->type) {
case ACPI_TYPE_INTEGER:
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index f0447d3daf2c..a697b77b8865 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -170,6 +170,14 @@ static struct dmi_system_id video_detect_dmi_table[] = {
},
{
.callback = video_detect_force_vendor,
+ .ident = "HP EliteBook Revolve 810",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook Revolve 810 G1"),
+ },
+ },
+ {
+ .callback = video_detect_force_vendor,
.ident = "Lenovo Yoga 13",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
diff --git a/drivers/base/component.c b/drivers/base/component.c
index c53efe6c6d8e..c4778995cd72 100644
--- a/drivers/base/component.c
+++ b/drivers/base/component.c
@@ -133,9 +133,16 @@ static int try_to_bring_up_master(struct master *master,
goto out;
}
+ if (!devres_open_group(master->dev, NULL, GFP_KERNEL)) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
/* Found all components */
ret = master->ops->bind(master->dev);
if (ret < 0) {
+ devres_release_group(master->dev, NULL);
+ dev_info(master->dev, "master bind failed: %d\n", ret);
master_remove_components(master);
goto out;
}
@@ -166,6 +173,7 @@ static void take_down_master(struct master *master)
{
if (master->bound) {
master->ops->unbind(master->dev);
+ devres_release_group(master->dev, NULL);
master->bound = false;
}
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
index 3107282a9741..091b9ea14feb 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk.c
@@ -60,7 +60,9 @@ enum {
NULL_IRQ_NONE = 0,
NULL_IRQ_SOFTIRQ = 1,
NULL_IRQ_TIMER = 2,
+};
+enum {
NULL_Q_BIO = 0,
NULL_Q_RQ = 1,
NULL_Q_MQ = 2,
@@ -172,18 +174,20 @@ static struct nullb_cmd *alloc_cmd(struct nullb_queue *nq, int can_wait)
static void end_cmd(struct nullb_cmd *cmd)
{
- if (cmd->rq) {
- if (queue_mode == NULL_Q_MQ)
- blk_mq_end_io(cmd->rq, 0);
- else {
- INIT_LIST_HEAD(&cmd->rq->queuelist);
- blk_end_request_all(cmd->rq, 0);
- }
- } else if (cmd->bio)
+ switch (queue_mode) {
+ case NULL_Q_MQ:
+ blk_mq_end_io(cmd->rq, 0);
+ return;
+ case NULL_Q_RQ:
+ INIT_LIST_HEAD(&cmd->rq->queuelist);
+ blk_end_request_all(cmd->rq, 0);
+ break;
+ case NULL_Q_BIO:
bio_endio(cmd->bio, 0);
+ break;
+ }
- if (queue_mode != NULL_Q_MQ)
- free_cmd(cmd);
+ free_cmd(cmd);
}
static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer)
@@ -195,6 +199,7 @@ static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer)
cq = &per_cpu(completion_queues, smp_processor_id());
while ((entry = llist_del_all(&cq->list)) != NULL) {
+ entry = llist_reverse_order(entry);
do {
cmd = container_of(entry, struct nullb_cmd, ll_list);
end_cmd(cmd);
@@ -221,61 +226,31 @@ static void null_cmd_end_timer(struct nullb_cmd *cmd)
static void null_softirq_done_fn(struct request *rq)
{
- blk_end_request_all(rq, 0);
-}
-
-#ifdef CONFIG_SMP
-
-static void null_ipi_cmd_end_io(void *data)
-{
- struct completion_queue *cq;
- struct llist_node *entry, *next;
- struct nullb_cmd *cmd;
-
- cq = &per_cpu(completion_queues, smp_processor_id());
-
- entry = llist_del_all(&cq->list);
-
- while (entry) {
- next = entry->next;
- cmd = llist_entry(entry, struct nullb_cmd, ll_list);
- end_cmd(cmd);
- entry = next;
- }
-}
-
-static void null_cmd_end_ipi(struct nullb_cmd *cmd)
-{
- struct call_single_data *data = &cmd->csd;
- int cpu = get_cpu();
- struct completion_queue *cq = &per_cpu(completion_queues, cpu);
-
- cmd->ll_list.next = NULL;
-
- if (llist_add(&cmd->ll_list, &cq->list)) {
- data->func = null_ipi_cmd_end_io;
- data->flags = 0;
- __smp_call_function_single(cpu, data, 0);
- }
-
- put_cpu();
+ end_cmd(rq->special);
}
-#endif /* CONFIG_SMP */
-
static inline void null_handle_cmd(struct nullb_cmd *cmd)
{
/* Complete IO by inline, softirq or timer */
switch (irqmode) {
- case NULL_IRQ_NONE:
- end_cmd(cmd);
- break;
case NULL_IRQ_SOFTIRQ:
-#ifdef CONFIG_SMP
- null_cmd_end_ipi(cmd);
-#else
+ switch (queue_mode) {
+ case NULL_Q_MQ:
+ blk_mq_complete_request(cmd->rq);
+ break;
+ case NULL_Q_RQ:
+ blk_complete_request(cmd->rq);
+ break;
+ case NULL_Q_BIO:
+ /*
+ * XXX: no proper submitting cpu information available.
+ */
+ end_cmd(cmd);
+ break;
+ }
+ break;
+ case NULL_IRQ_NONE:
end_cmd(cmd);
-#endif
break;
case NULL_IRQ_TIMER:
null_cmd_end_timer(cmd);
@@ -411,6 +386,7 @@ static struct blk_mq_ops null_mq_ops = {
.queue_rq = null_queue_rq,
.map_queue = blk_mq_map_queue,
.init_hctx = null_init_hctx,
+ .complete = null_softirq_done_fn,
};
static struct blk_mq_reg null_mq_reg = {
@@ -609,13 +585,6 @@ static int __init null_init(void)
{
unsigned int i;
-#if !defined(CONFIG_SMP)
- if (irqmode == NULL_IRQ_SOFTIRQ) {
- pr_warn("null_blk: softirq completions not available.\n");
- pr_warn("null_blk: using direct completions.\n");
- irqmode = NULL_IRQ_NONE;
- }
-#endif
if (bs > PAGE_SIZE) {
pr_warn("null_blk: invalid block size\n");
pr_warn("null_blk: defaults block size to %lu\n", PAGE_SIZE);
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index 1f14ac403945..51824d1f23ea 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -46,7 +46,6 @@
#define NVME_Q_DEPTH 1024
#define SQ_SIZE(depth) (depth * sizeof(struct nvme_command))
#define CQ_SIZE(depth) (depth * sizeof(struct nvme_completion))
-#define NVME_MINORS 64
#define ADMIN_TIMEOUT (60 * HZ)
static int nvme_major;
@@ -58,6 +57,17 @@ module_param(use_threaded_interrupts, int, 0);
static DEFINE_SPINLOCK(dev_list_lock);
static LIST_HEAD(dev_list);
static struct task_struct *nvme_thread;
+static struct workqueue_struct *nvme_workq;
+
+static void nvme_reset_failed_dev(struct work_struct *ws);
+
+struct async_cmd_info {
+ struct kthread_work work;
+ struct kthread_worker *worker;
+ u32 result;
+ int status;
+ void *ctx;
+};
/*
* An NVM Express queue. Each device has at least two (one for admin
@@ -66,6 +76,7 @@ static struct task_struct *nvme_thread;
struct nvme_queue {
struct device *q_dmadev;
struct nvme_dev *dev;
+ char irqname[24]; /* nvme4294967295-65535\0 */
spinlock_t q_lock;
struct nvme_command *sq_cmds;
volatile struct nvme_completion *cqes;
@@ -80,9 +91,11 @@ struct nvme_queue {
u16 sq_head;
u16 sq_tail;
u16 cq_head;
+ u16 qid;
u8 cq_phase;
u8 cqe_seen;
u8 q_suspended;
+ struct async_cmd_info cmdinfo;
unsigned long cmdid_data[];
};
@@ -97,6 +110,7 @@ static inline void _nvme_check_size(void)
BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64);
BUILD_BUG_ON(sizeof(struct nvme_features) != 64);
BUILD_BUG_ON(sizeof(struct nvme_format_cmd) != 64);
+ BUILD_BUG_ON(sizeof(struct nvme_abort_cmd) != 64);
BUILD_BUG_ON(sizeof(struct nvme_command) != 64);
BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != 4096);
BUILD_BUG_ON(sizeof(struct nvme_id_ns) != 4096);
@@ -111,6 +125,7 @@ struct nvme_cmd_info {
nvme_completion_fn fn;
void *ctx;
unsigned long timeout;
+ int aborted;
};
static struct nvme_cmd_info *nvme_cmd_info(struct nvme_queue *nvmeq)
@@ -154,6 +169,7 @@ static int alloc_cmdid(struct nvme_queue *nvmeq, void *ctx,
info[cmdid].fn = handler;
info[cmdid].ctx = ctx;
info[cmdid].timeout = jiffies + timeout;
+ info[cmdid].aborted = 0;
return cmdid;
}
@@ -172,6 +188,7 @@ static int alloc_cmdid_killable(struct nvme_queue *nvmeq, void *ctx,
#define CMD_CTX_COMPLETED (0x310 + CMD_CTX_BASE)
#define CMD_CTX_INVALID (0x314 + CMD_CTX_BASE)
#define CMD_CTX_FLUSH (0x318 + CMD_CTX_BASE)
+#define CMD_CTX_ABORT (0x31C + CMD_CTX_BASE)
static void special_completion(struct nvme_dev *dev, void *ctx,
struct nvme_completion *cqe)
@@ -180,6 +197,10 @@ static void special_completion(struct nvme_dev *dev, void *ctx,
return;
if (ctx == CMD_CTX_FLUSH)
return;
+ if (ctx == CMD_CTX_ABORT) {
+ ++dev->abort_limit;
+ return;
+ }
if (ctx == CMD_CTX_COMPLETED) {
dev_warn(&dev->pci_dev->dev,
"completed id %d twice on queue %d\n",
@@ -196,6 +217,15 @@ static void special_completion(struct nvme_dev *dev, void *ctx,
dev_warn(&dev->pci_dev->dev, "Unknown special completion %p\n", ctx);
}
+static void async_completion(struct nvme_dev *dev, void *ctx,
+ struct nvme_completion *cqe)
+{
+ struct async_cmd_info *cmdinfo = ctx;
+ cmdinfo->result = le32_to_cpup(&cqe->result);
+ cmdinfo->status = le16_to_cpup(&cqe->status) >> 1;
+ queue_kthread_work(cmdinfo->worker, &cmdinfo->work);
+}
+
/*
* Called with local interrupts disabled and the q_lock held. May not sleep.
*/
@@ -693,7 +723,7 @@ static int nvme_process_cq(struct nvme_queue *nvmeq)
if (head == nvmeq->cq_head && phase == nvmeq->cq_phase)
return 0;
- writel(head, nvmeq->q_db + (1 << nvmeq->dev->db_stride));
+ writel(head, nvmeq->q_db + nvmeq->dev->db_stride);
nvmeq->cq_head = head;
nvmeq->cq_phase = phase;
@@ -804,12 +834,34 @@ int nvme_submit_sync_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd,
return cmdinfo.status;
}
+static int nvme_submit_async_cmd(struct nvme_queue *nvmeq,
+ struct nvme_command *cmd,
+ struct async_cmd_info *cmdinfo, unsigned timeout)
+{
+ int cmdid;
+
+ cmdid = alloc_cmdid_killable(nvmeq, cmdinfo, async_completion, timeout);
+ if (cmdid < 0)
+ return cmdid;
+ cmdinfo->status = -EINTR;
+ cmd->common.command_id = cmdid;
+ nvme_submit_cmd(nvmeq, cmd);
+ return 0;
+}
+
int nvme_submit_admin_cmd(struct nvme_dev *dev, struct nvme_command *cmd,
u32 *result)
{
return nvme_submit_sync_cmd(dev->queues[0], cmd, result, ADMIN_TIMEOUT);
}
+static int nvme_submit_admin_cmd_async(struct nvme_dev *dev,
+ struct nvme_command *cmd, struct async_cmd_info *cmdinfo)
+{
+ return nvme_submit_async_cmd(dev->queues[0], cmd, cmdinfo,
+ ADMIN_TIMEOUT);
+}
+
static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id)
{
int status;
@@ -920,6 +972,56 @@ int nvme_set_features(struct nvme_dev *dev, unsigned fid, unsigned dword11,
}
/**
+ * nvme_abort_cmd - Attempt aborting a command
+ * @cmdid: Command id of a timed out IO
+ * @queue: The queue with timed out IO
+ *
+ * Schedule controller reset if the command was already aborted once before and
+ * still hasn't been returned to the driver, or if this is the admin queue.
+ */
+static void nvme_abort_cmd(int cmdid, struct nvme_queue *nvmeq)
+{
+ int a_cmdid;
+ struct nvme_command cmd;
+ struct nvme_dev *dev = nvmeq->dev;
+ struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
+
+ if (!nvmeq->qid || info[cmdid].aborted) {
+ if (work_busy(&dev->reset_work))
+ return;
+ list_del_init(&dev->node);
+ dev_warn(&dev->pci_dev->dev,
+ "I/O %d QID %d timeout, reset controller\n", cmdid,
+ nvmeq->qid);
+ PREPARE_WORK(&dev->reset_work, nvme_reset_failed_dev);
+ queue_work(nvme_workq, &dev->reset_work);
+ return;
+ }
+
+ if (!dev->abort_limit)
+ return;
+
+ a_cmdid = alloc_cmdid(dev->queues[0], CMD_CTX_ABORT, special_completion,
+ ADMIN_TIMEOUT);
+ if (a_cmdid < 0)
+ return;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.abort.opcode = nvme_admin_abort_cmd;
+ cmd.abort.cid = cmdid;
+ cmd.abort.sqid = cpu_to_le16(nvmeq->qid);
+ cmd.abort.command_id = a_cmdid;
+
+ --dev->abort_limit;
+ info[cmdid].aborted = 1;
+ info[cmdid].timeout = jiffies + ADMIN_TIMEOUT;
+
+ dev_warn(nvmeq->q_dmadev, "Aborting I/O %d QID %d\n", cmdid,
+ nvmeq->qid);
+ nvme_submit_cmd(dev->queues[0], &cmd);
+}
+
+/**
* nvme_cancel_ios - Cancel outstanding I/Os
* @queue: The queue to cancel I/Os on
* @timeout: True to only cancel I/Os which have timed out
@@ -942,7 +1044,12 @@ static void nvme_cancel_ios(struct nvme_queue *nvmeq, bool timeout)
continue;
if (info[cmdid].ctx == CMD_CTX_CANCELLED)
continue;
- dev_warn(nvmeq->q_dmadev, "Cancelling I/O %d\n", cmdid);
+ if (timeout && nvmeq->dev->initialized) {
+ nvme_abort_cmd(cmdid, nvmeq);
+ continue;
+ }
+ dev_warn(nvmeq->q_dmadev, "Cancelling I/O %d QID %d\n", cmdid,
+ nvmeq->qid);
ctx = cancel_cmdid(nvmeq, cmdid, &fn);
fn(nvmeq->dev, ctx, &cqe);
}
@@ -964,26 +1071,31 @@ static void nvme_free_queue(struct nvme_queue *nvmeq)
kfree(nvmeq);
}
-static void nvme_free_queues(struct nvme_dev *dev)
+static void nvme_free_queues(struct nvme_dev *dev, int lowest)
{
int i;
- for (i = dev->queue_count - 1; i >= 0; i--) {
+ for (i = dev->queue_count - 1; i >= lowest; i--) {
nvme_free_queue(dev->queues[i]);
dev->queue_count--;
dev->queues[i] = NULL;
}
}
-static void nvme_disable_queue(struct nvme_dev *dev, int qid)
+/**
+ * nvme_suspend_queue - put queue into suspended state
+ * @nvmeq - queue to suspend
+ *
+ * Returns 1 if already suspended, 0 otherwise.
+ */
+static int nvme_suspend_queue(struct nvme_queue *nvmeq)
{
- struct nvme_queue *nvmeq = dev->queues[qid];
- int vector = dev->entry[nvmeq->cq_vector].vector;
+ int vector = nvmeq->dev->entry[nvmeq->cq_vector].vector;
spin_lock_irq(&nvmeq->q_lock);
if (nvmeq->q_suspended) {
spin_unlock_irq(&nvmeq->q_lock);
- return;
+ return 1;
}
nvmeq->q_suspended = 1;
spin_unlock_irq(&nvmeq->q_lock);
@@ -991,18 +1103,35 @@ static void nvme_disable_queue(struct nvme_dev *dev, int qid)
irq_set_affinity_hint(vector, NULL);
free_irq(vector, nvmeq);
- /* Don't tell the adapter to delete the admin queue */
- if (qid) {
- adapter_delete_sq(dev, qid);
- adapter_delete_cq(dev, qid);
- }
+ return 0;
+}
+static void nvme_clear_queue(struct nvme_queue *nvmeq)
+{
spin_lock_irq(&nvmeq->q_lock);
nvme_process_cq(nvmeq);
nvme_cancel_ios(nvmeq, false);
spin_unlock_irq(&nvmeq->q_lock);
}
+static void nvme_disable_queue(struct nvme_dev *dev, int qid)
+{
+ struct nvme_queue *nvmeq = dev->queues[qid];
+
+ if (!nvmeq)
+ return;
+ if (nvme_suspend_queue(nvmeq))
+ return;
+
+ /* Don't tell the adapter to delete the admin queue.
+ * Don't tell a removed adapter to delete IO queues. */
+ if (qid && readl(&dev->bar->csts) != -1) {
+ adapter_delete_sq(dev, qid);
+ adapter_delete_cq(dev, qid);
+ }
+ nvme_clear_queue(nvmeq);
+}
+
static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
int depth, int vector)
{
@@ -1025,15 +1154,18 @@ static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
nvmeq->q_dmadev = dmadev;
nvmeq->dev = dev;
+ snprintf(nvmeq->irqname, sizeof(nvmeq->irqname), "nvme%dq%d",
+ dev->instance, qid);
spin_lock_init(&nvmeq->q_lock);
nvmeq->cq_head = 0;
nvmeq->cq_phase = 1;
init_waitqueue_head(&nvmeq->sq_full);
init_waitqueue_entry(&nvmeq->sq_cong_wait, nvme_thread);
bio_list_init(&nvmeq->sq_cong);
- nvmeq->q_db = &dev->dbs[qid << (dev->db_stride + 1)];
+ nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
nvmeq->q_depth = depth;
nvmeq->cq_vector = vector;
+ nvmeq->qid = qid;
nvmeq->q_suspended = 1;
dev->queue_count++;
@@ -1052,11 +1184,10 @@ static int queue_request_irq(struct nvme_dev *dev, struct nvme_queue *nvmeq,
{
if (use_threaded_interrupts)
return request_threaded_irq(dev->entry[nvmeq->cq_vector].vector,
- nvme_irq_check, nvme_irq,
- IRQF_DISABLED | IRQF_SHARED,
+ nvme_irq_check, nvme_irq, IRQF_SHARED,
name, nvmeq);
return request_irq(dev->entry[nvmeq->cq_vector].vector, nvme_irq,
- IRQF_DISABLED | IRQF_SHARED, name, nvmeq);
+ IRQF_SHARED, name, nvmeq);
}
static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid)
@@ -1067,7 +1198,7 @@ static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid)
nvmeq->sq_tail = 0;
nvmeq->cq_head = 0;
nvmeq->cq_phase = 1;
- nvmeq->q_db = &dev->dbs[qid << (dev->db_stride + 1)];
+ nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
memset(nvmeq->cmdid_data, 0, extra);
memset((void *)nvmeq->cqes, 0, CQ_SIZE(nvmeq->q_depth));
nvme_cancel_ios(nvmeq, false);
@@ -1087,13 +1218,13 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
if (result < 0)
goto release_cq;
- result = queue_request_irq(dev, nvmeq, "nvme");
+ result = queue_request_irq(dev, nvmeq, nvmeq->irqname);
if (result < 0)
goto release_sq;
- spin_lock(&nvmeq->q_lock);
+ spin_lock_irq(&nvmeq->q_lock);
nvme_init_queue(nvmeq, qid);
- spin_unlock(&nvmeq->q_lock);
+ spin_unlock_irq(&nvmeq->q_lock);
return result;
@@ -1205,13 +1336,13 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
if (result)
return result;
- result = queue_request_irq(dev, nvmeq, "nvme admin");
+ result = queue_request_irq(dev, nvmeq, nvmeq->irqname);
if (result)
return result;
- spin_lock(&nvmeq->q_lock);
+ spin_lock_irq(&nvmeq->q_lock);
nvme_init_queue(nvmeq, 0);
- spin_unlock(&nvmeq->q_lock);
+ spin_unlock_irq(&nvmeq->q_lock);
return result;
}
@@ -1487,10 +1618,47 @@ static int nvme_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
}
}
+#ifdef CONFIG_COMPAT
+static int nvme_compat_ioctl(struct block_device *bdev, fmode_t mode,
+ unsigned int cmd, unsigned long arg)
+{
+ struct nvme_ns *ns = bdev->bd_disk->private_data;
+
+ switch (cmd) {
+ case SG_IO:
+ return nvme_sg_io32(ns, arg);
+ }
+ return nvme_ioctl(bdev, mode, cmd, arg);
+}
+#else
+#define nvme_compat_ioctl NULL
+#endif
+
+static int nvme_open(struct block_device *bdev, fmode_t mode)
+{
+ struct nvme_ns *ns = bdev->bd_disk->private_data;
+ struct nvme_dev *dev = ns->dev;
+
+ kref_get(&dev->kref);
+ return 0;
+}
+
+static void nvme_free_dev(struct kref *kref);
+
+static void nvme_release(struct gendisk *disk, fmode_t mode)
+{
+ struct nvme_ns *ns = disk->private_data;
+ struct nvme_dev *dev = ns->dev;
+
+ kref_put(&dev->kref, nvme_free_dev);
+}
+
static const struct block_device_operations nvme_fops = {
.owner = THIS_MODULE,
.ioctl = nvme_ioctl,
- .compat_ioctl = nvme_ioctl,
+ .compat_ioctl = nvme_compat_ioctl,
+ .open = nvme_open,
+ .release = nvme_release,
};
static void nvme_resubmit_bios(struct nvme_queue *nvmeq)
@@ -1514,13 +1682,25 @@ static void nvme_resubmit_bios(struct nvme_queue *nvmeq)
static int nvme_kthread(void *data)
{
- struct nvme_dev *dev;
+ struct nvme_dev *dev, *next;
while (!kthread_should_stop()) {
set_current_state(TASK_INTERRUPTIBLE);
spin_lock(&dev_list_lock);
- list_for_each_entry(dev, &dev_list, node) {
+ list_for_each_entry_safe(dev, next, &dev_list, node) {
int i;
+ if (readl(&dev->bar->csts) & NVME_CSTS_CFS &&
+ dev->initialized) {
+ if (work_busy(&dev->reset_work))
+ continue;
+ list_del_init(&dev->node);
+ dev_warn(&dev->pci_dev->dev,
+ "Failed status, reset controller\n");
+ PREPARE_WORK(&dev->reset_work,
+ nvme_reset_failed_dev);
+ queue_work(nvme_workq, &dev->reset_work);
+ continue;
+ }
for (i = 0; i < dev->queue_count; i++) {
struct nvme_queue *nvmeq = dev->queues[i];
if (!nvmeq)
@@ -1541,33 +1721,6 @@ static int nvme_kthread(void *data)
return 0;
}
-static DEFINE_IDA(nvme_index_ida);
-
-static int nvme_get_ns_idx(void)
-{
- int index, error;
-
- do {
- if (!ida_pre_get(&nvme_index_ida, GFP_KERNEL))
- return -1;
-
- spin_lock(&dev_list_lock);
- error = ida_get_new(&nvme_index_ida, &index);
- spin_unlock(&dev_list_lock);
- } while (error == -EAGAIN);
-
- if (error)
- index = -1;
- return index;
-}
-
-static void nvme_put_ns_idx(int index)
-{
- spin_lock(&dev_list_lock);
- ida_remove(&nvme_index_ida, index);
- spin_unlock(&dev_list_lock);
-}
-
static void nvme_config_discard(struct nvme_ns *ns)
{
u32 logical_block_size = queue_logical_block_size(ns->queue);
@@ -1601,7 +1754,7 @@ static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid,
ns->dev = dev;
ns->queue->queuedata = ns;
- disk = alloc_disk(NVME_MINORS);
+ disk = alloc_disk(0);
if (!disk)
goto out_free_queue;
ns->ns_id = nsid;
@@ -1614,12 +1767,12 @@ static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid,
blk_queue_max_hw_sectors(ns->queue, dev->max_hw_sectors);
disk->major = nvme_major;
- disk->minors = NVME_MINORS;
- disk->first_minor = NVME_MINORS * nvme_get_ns_idx();
+ disk->first_minor = 0;
disk->fops = &nvme_fops;
disk->private_data = ns;
disk->queue = ns->queue;
disk->driverfs_dev = &dev->pci_dev->dev;
+ disk->flags = GENHD_FL_EXT_DEVT;
sprintf(disk->disk_name, "nvme%dn%d", dev->instance, nsid);
set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9));
@@ -1635,15 +1788,6 @@ static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid,
return NULL;
}
-static void nvme_ns_free(struct nvme_ns *ns)
-{
- int index = ns->disk->first_minor / NVME_MINORS;
- put_disk(ns->disk);
- nvme_put_ns_idx(index);
- blk_cleanup_queue(ns->queue);
- kfree(ns);
-}
-
static int set_queue_count(struct nvme_dev *dev, int count)
{
int status;
@@ -1659,11 +1803,12 @@ static int set_queue_count(struct nvme_dev *dev, int count)
static size_t db_bar_size(struct nvme_dev *dev, unsigned nr_io_queues)
{
- return 4096 + ((nr_io_queues + 1) << (dev->db_stride + 3));
+ return 4096 + ((nr_io_queues + 1) * 8 * dev->db_stride);
}
static int nvme_setup_io_queues(struct nvme_dev *dev)
{
+ struct nvme_queue *adminq = dev->queues[0];
struct pci_dev *pdev = dev->pci_dev;
int result, cpu, i, vecs, nr_io_queues, size, q_depth;
@@ -1690,7 +1835,7 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
}
/* Deregister the admin queue's interrupt */
- free_irq(dev->entry[0].vector, dev->queues[0]);
+ free_irq(dev->entry[0].vector, adminq);
vecs = nr_io_queues;
for (i = 0; i < vecs; i++)
@@ -1728,9 +1873,9 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
*/
nr_io_queues = vecs;
- result = queue_request_irq(dev, dev->queues[0], "nvme admin");
+ result = queue_request_irq(dev, adminq, adminq->irqname);
if (result) {
- dev->queues[0]->q_suspended = 1;
+ adminq->q_suspended = 1;
goto free_queues;
}
@@ -1739,9 +1884,9 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
for (i = dev->queue_count - 1; i > nr_io_queues; i--) {
struct nvme_queue *nvmeq = dev->queues[i];
- spin_lock(&nvmeq->q_lock);
+ spin_lock_irq(&nvmeq->q_lock);
nvme_cancel_ios(nvmeq, false);
- spin_unlock(&nvmeq->q_lock);
+ spin_unlock_irq(&nvmeq->q_lock);
nvme_free_queue(nvmeq);
dev->queue_count--;
@@ -1782,7 +1927,7 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
return 0;
free_queues:
- nvme_free_queues(dev);
+ nvme_free_queues(dev, 1);
return result;
}
@@ -1794,6 +1939,7 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
*/
static int nvme_dev_add(struct nvme_dev *dev)
{
+ struct pci_dev *pdev = dev->pci_dev;
int res;
unsigned nn, i;
struct nvme_ns *ns;
@@ -1803,8 +1949,7 @@ static int nvme_dev_add(struct nvme_dev *dev)
dma_addr_t dma_addr;
int shift = NVME_CAP_MPSMIN(readq(&dev->bar->cap)) + 12;
- mem = dma_alloc_coherent(&dev->pci_dev->dev, 8192, &dma_addr,
- GFP_KERNEL);
+ mem = dma_alloc_coherent(&pdev->dev, 8192, &dma_addr, GFP_KERNEL);
if (!mem)
return -ENOMEM;
@@ -1817,13 +1962,14 @@ static int nvme_dev_add(struct nvme_dev *dev)
ctrl = mem;
nn = le32_to_cpup(&ctrl->nn);
dev->oncs = le16_to_cpup(&ctrl->oncs);
+ dev->abort_limit = ctrl->acl + 1;
memcpy(dev->serial, ctrl->sn, sizeof(ctrl->sn));
memcpy(dev->model, ctrl->mn, sizeof(ctrl->mn));
memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr));
if (ctrl->mdts)
dev->max_hw_sectors = 1 << (ctrl->mdts + shift - 9);
- if ((dev->pci_dev->vendor == PCI_VENDOR_ID_INTEL) &&
- (dev->pci_dev->device == 0x0953) && ctrl->vs[3])
+ if ((pdev->vendor == PCI_VENDOR_ID_INTEL) &&
+ (pdev->device == 0x0953) && ctrl->vs[3])
dev->stripe_size = 1 << (ctrl->vs[3] + shift);
id_ns = mem;
@@ -1871,16 +2017,21 @@ static int nvme_dev_map(struct nvme_dev *dev)
dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
goto disable;
- pci_set_drvdata(pdev, dev);
dev->bar = ioremap(pci_resource_start(pdev, 0), 8192);
if (!dev->bar)
goto disable;
-
- dev->db_stride = NVME_CAP_STRIDE(readq(&dev->bar->cap));
+ if (readl(&dev->bar->csts) == -1) {
+ result = -ENODEV;
+ goto unmap;
+ }
+ dev->db_stride = 1 << NVME_CAP_STRIDE(readq(&dev->bar->cap));
dev->dbs = ((void __iomem *)dev->bar) + 4096;
return 0;
+ unmap:
+ iounmap(dev->bar);
+ dev->bar = NULL;
disable:
pci_release_regions(pdev);
disable_pci:
@@ -1898,37 +2049,183 @@ static void nvme_dev_unmap(struct nvme_dev *dev)
if (dev->bar) {
iounmap(dev->bar);
dev->bar = NULL;
+ pci_release_regions(dev->pci_dev);
}
- pci_release_regions(dev->pci_dev);
if (pci_is_enabled(dev->pci_dev))
pci_disable_device(dev->pci_dev);
}
+struct nvme_delq_ctx {
+ struct task_struct *waiter;
+ struct kthread_worker *worker;
+ atomic_t refcount;
+};
+
+static void nvme_wait_dq(struct nvme_delq_ctx *dq, struct nvme_dev *dev)
+{
+ dq->waiter = current;
+ mb();
+
+ for (;;) {
+ set_current_state(TASK_KILLABLE);
+ if (!atomic_read(&dq->refcount))
+ break;
+ if (!schedule_timeout(ADMIN_TIMEOUT) ||
+ fatal_signal_pending(current)) {
+ set_current_state(TASK_RUNNING);
+
+ nvme_disable_ctrl(dev, readq(&dev->bar->cap));
+ nvme_disable_queue(dev, 0);
+
+ send_sig(SIGKILL, dq->worker->task, 1);
+ flush_kthread_worker(dq->worker);
+ return;
+ }
+ }
+ set_current_state(TASK_RUNNING);
+}
+
+static void nvme_put_dq(struct nvme_delq_ctx *dq)
+{
+ atomic_dec(&dq->refcount);
+ if (dq->waiter)
+ wake_up_process(dq->waiter);
+}
+
+static struct nvme_delq_ctx *nvme_get_dq(struct nvme_delq_ctx *dq)
+{
+ atomic_inc(&dq->refcount);
+ return dq;
+}
+
+static void nvme_del_queue_end(struct nvme_queue *nvmeq)
+{
+ struct nvme_delq_ctx *dq = nvmeq->cmdinfo.ctx;
+
+ nvme_clear_queue(nvmeq);
+ nvme_put_dq(dq);
+}
+
+static int adapter_async_del_queue(struct nvme_queue *nvmeq, u8 opcode,
+ kthread_work_func_t fn)
+{
+ struct nvme_command c;
+
+ memset(&c, 0, sizeof(c));
+ c.delete_queue.opcode = opcode;
+ c.delete_queue.qid = cpu_to_le16(nvmeq->qid);
+
+ init_kthread_work(&nvmeq->cmdinfo.work, fn);
+ return nvme_submit_admin_cmd_async(nvmeq->dev, &c, &nvmeq->cmdinfo);
+}
+
+static void nvme_del_cq_work_handler(struct kthread_work *work)
+{
+ struct nvme_queue *nvmeq = container_of(work, struct nvme_queue,
+ cmdinfo.work);
+ nvme_del_queue_end(nvmeq);
+}
+
+static int nvme_delete_cq(struct nvme_queue *nvmeq)
+{
+ return adapter_async_del_queue(nvmeq, nvme_admin_delete_cq,
+ nvme_del_cq_work_handler);
+}
+
+static void nvme_del_sq_work_handler(struct kthread_work *work)
+{
+ struct nvme_queue *nvmeq = container_of(work, struct nvme_queue,
+ cmdinfo.work);
+ int status = nvmeq->cmdinfo.status;
+
+ if (!status)
+ status = nvme_delete_cq(nvmeq);
+ if (status)
+ nvme_del_queue_end(nvmeq);
+}
+
+static int nvme_delete_sq(struct nvme_queue *nvmeq)
+{
+ return adapter_async_del_queue(nvmeq, nvme_admin_delete_sq,
+ nvme_del_sq_work_handler);
+}
+
+static void nvme_del_queue_start(struct kthread_work *work)
+{
+ struct nvme_queue *nvmeq = container_of(work, struct nvme_queue,
+ cmdinfo.work);
+ allow_signal(SIGKILL);
+ if (nvme_delete_sq(nvmeq))
+ nvme_del_queue_end(nvmeq);
+}
+
+static void nvme_disable_io_queues(struct nvme_dev *dev)
+{
+ int i;
+ DEFINE_KTHREAD_WORKER_ONSTACK(worker);
+ struct nvme_delq_ctx dq;
+ struct task_struct *kworker_task = kthread_run(kthread_worker_fn,
+ &worker, "nvme%d", dev->instance);
+
+ if (IS_ERR(kworker_task)) {
+ dev_err(&dev->pci_dev->dev,
+ "Failed to create queue del task\n");
+ for (i = dev->queue_count - 1; i > 0; i--)
+ nvme_disable_queue(dev, i);
+ return;
+ }
+
+ dq.waiter = NULL;
+ atomic_set(&dq.refcount, 0);
+ dq.worker = &worker;
+ for (i = dev->queue_count - 1; i > 0; i--) {
+ struct nvme_queue *nvmeq = dev->queues[i];
+
+ if (nvme_suspend_queue(nvmeq))
+ continue;
+ nvmeq->cmdinfo.ctx = nvme_get_dq(&dq);
+ nvmeq->cmdinfo.worker = dq.worker;
+ init_kthread_work(&nvmeq->cmdinfo.work, nvme_del_queue_start);
+ queue_kthread_work(dq.worker, &nvmeq->cmdinfo.work);
+ }
+ nvme_wait_dq(&dq, dev);
+ kthread_stop(kworker_task);
+}
+
static void nvme_dev_shutdown(struct nvme_dev *dev)
{
int i;
- for (i = dev->queue_count - 1; i >= 0; i--)
- nvme_disable_queue(dev, i);
+ dev->initialized = 0;
spin_lock(&dev_list_lock);
list_del_init(&dev->node);
spin_unlock(&dev_list_lock);
- if (dev->bar)
+ if (!dev->bar || (dev->bar && readl(&dev->bar->csts) == -1)) {
+ for (i = dev->queue_count - 1; i >= 0; i--) {
+ struct nvme_queue *nvmeq = dev->queues[i];
+ nvme_suspend_queue(nvmeq);
+ nvme_clear_queue(nvmeq);
+ }
+ } else {
+ nvme_disable_io_queues(dev);
nvme_shutdown_ctrl(dev);
+ nvme_disable_queue(dev, 0);
+ }
nvme_dev_unmap(dev);
}
static void nvme_dev_remove(struct nvme_dev *dev)
{
- struct nvme_ns *ns, *next;
+ struct nvme_ns *ns;
- list_for_each_entry_safe(ns, next, &dev->namespaces, list) {
- list_del(&ns->list);
- del_gendisk(ns->disk);
- nvme_ns_free(ns);
+ list_for_each_entry(ns, &dev->namespaces, list) {
+ if (ns->disk->flags & GENHD_FL_UP)
+ del_gendisk(ns->disk);
+ if (!blk_queue_dying(ns->queue))
+ blk_cleanup_queue(ns->queue);
}
}
@@ -1985,14 +2282,22 @@ static void nvme_release_instance(struct nvme_dev *dev)
spin_unlock(&dev_list_lock);
}
+static void nvme_free_namespaces(struct nvme_dev *dev)
+{
+ struct nvme_ns *ns, *next;
+
+ list_for_each_entry_safe(ns, next, &dev->namespaces, list) {
+ list_del(&ns->list);
+ put_disk(ns->disk);
+ kfree(ns);
+ }
+}
+
static void nvme_free_dev(struct kref *kref)
{
struct nvme_dev *dev = container_of(kref, struct nvme_dev, kref);
- nvme_dev_remove(dev);
- nvme_dev_shutdown(dev);
- nvme_free_queues(dev);
- nvme_release_instance(dev);
- nvme_release_prp_pools(dev);
+
+ nvme_free_namespaces(dev);
kfree(dev->queues);
kfree(dev->entry);
kfree(dev);
@@ -2056,6 +2361,7 @@ static int nvme_dev_start(struct nvme_dev *dev)
return result;
disable:
+ nvme_disable_queue(dev, 0);
spin_lock(&dev_list_lock);
list_del_init(&dev->node);
spin_unlock(&dev_list_lock);
@@ -2064,6 +2370,71 @@ static int nvme_dev_start(struct nvme_dev *dev)
return result;
}
+static int nvme_remove_dead_ctrl(void *arg)
+{
+ struct nvme_dev *dev = (struct nvme_dev *)arg;
+ struct pci_dev *pdev = dev->pci_dev;
+
+ if (pci_get_drvdata(pdev))
+ pci_stop_and_remove_bus_device(pdev);
+ kref_put(&dev->kref, nvme_free_dev);
+ return 0;
+}
+
+static void nvme_remove_disks(struct work_struct *ws)
+{
+ int i;
+ struct nvme_dev *dev = container_of(ws, struct nvme_dev, reset_work);
+
+ nvme_dev_remove(dev);
+ spin_lock(&dev_list_lock);
+ for (i = dev->queue_count - 1; i > 0; i--) {
+ BUG_ON(!dev->queues[i] || !dev->queues[i]->q_suspended);
+ nvme_free_queue(dev->queues[i]);
+ dev->queue_count--;
+ dev->queues[i] = NULL;
+ }
+ spin_unlock(&dev_list_lock);
+}
+
+static int nvme_dev_resume(struct nvme_dev *dev)
+{
+ int ret;
+
+ ret = nvme_dev_start(dev);
+ if (ret && ret != -EBUSY)
+ return ret;
+ if (ret == -EBUSY) {
+ spin_lock(&dev_list_lock);
+ PREPARE_WORK(&dev->reset_work, nvme_remove_disks);
+ queue_work(nvme_workq, &dev->reset_work);
+ spin_unlock(&dev_list_lock);
+ }
+ dev->initialized = 1;
+ return 0;
+}
+
+static void nvme_dev_reset(struct nvme_dev *dev)
+{
+ nvme_dev_shutdown(dev);
+ if (nvme_dev_resume(dev)) {
+ dev_err(&dev->pci_dev->dev, "Device failed to resume\n");
+ kref_get(&dev->kref);
+ if (IS_ERR(kthread_run(nvme_remove_dead_ctrl, dev, "nvme%d",
+ dev->instance))) {
+ dev_err(&dev->pci_dev->dev,
+ "Failed to start controller remove task\n");
+ kref_put(&dev->kref, nvme_free_dev);
+ }
+ }
+}
+
+static void nvme_reset_failed_dev(struct work_struct *ws)
+{
+ struct nvme_dev *dev = container_of(ws, struct nvme_dev, reset_work);
+ nvme_dev_reset(dev);
+}
+
static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
int result = -ENOMEM;
@@ -2082,8 +2453,9 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto free;
INIT_LIST_HEAD(&dev->namespaces);
+ INIT_WORK(&dev->reset_work, nvme_reset_failed_dev);
dev->pci_dev = pdev;
-
+ pci_set_drvdata(pdev, dev);
result = nvme_set_instance(dev);
if (result)
goto free;
@@ -2099,6 +2471,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto release_pools;
}
+ kref_init(&dev->kref);
result = nvme_dev_add(dev);
if (result)
goto shutdown;
@@ -2113,15 +2486,16 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (result)
goto remove;
- kref_init(&dev->kref);
+ dev->initialized = 1;
return 0;
remove:
nvme_dev_remove(dev);
+ nvme_free_namespaces(dev);
shutdown:
nvme_dev_shutdown(dev);
release_pools:
- nvme_free_queues(dev);
+ nvme_free_queues(dev, 0);
nvme_release_prp_pools(dev);
release:
nvme_release_instance(dev);
@@ -2132,10 +2506,28 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return result;
}
+static void nvme_shutdown(struct pci_dev *pdev)
+{
+ struct nvme_dev *dev = pci_get_drvdata(pdev);
+ nvme_dev_shutdown(dev);
+}
+
static void nvme_remove(struct pci_dev *pdev)
{
struct nvme_dev *dev = pci_get_drvdata(pdev);
+
+ spin_lock(&dev_list_lock);
+ list_del_init(&dev->node);
+ spin_unlock(&dev_list_lock);
+
+ pci_set_drvdata(pdev, NULL);
+ flush_work(&dev->reset_work);
misc_deregister(&dev->miscdev);
+ nvme_dev_remove(dev);
+ nvme_dev_shutdown(dev);
+ nvme_free_queues(dev, 0);
+ nvme_release_instance(dev);
+ nvme_release_prp_pools(dev);
kref_put(&dev->kref, nvme_free_dev);
}
@@ -2159,13 +2551,12 @@ static int nvme_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct nvme_dev *ndev = pci_get_drvdata(pdev);
- int ret;
- ret = nvme_dev_start(ndev);
- /* XXX: should remove gendisks if resume fails */
- if (ret)
- nvme_free_queues(ndev);
- return ret;
+ if (nvme_dev_resume(ndev) && !work_busy(&ndev->reset_work)) {
+ PREPARE_WORK(&ndev->reset_work, nvme_reset_failed_dev);
+ queue_work(nvme_workq, &ndev->reset_work);
+ }
+ return 0;
}
static SIMPLE_DEV_PM_OPS(nvme_dev_pm_ops, nvme_suspend, nvme_resume);
@@ -2192,6 +2583,7 @@ static struct pci_driver nvme_driver = {
.id_table = nvme_id_table,
.probe = nvme_probe,
.remove = nvme_remove,
+ .shutdown = nvme_shutdown,
.driver = {
.pm = &nvme_dev_pm_ops,
},
@@ -2206,9 +2598,14 @@ static int __init nvme_init(void)
if (IS_ERR(nvme_thread))
return PTR_ERR(nvme_thread);
+ result = -ENOMEM;
+ nvme_workq = create_singlethread_workqueue("nvme");
+ if (!nvme_workq)
+ goto kill_kthread;
+
result = register_blkdev(nvme_major, "nvme");
if (result < 0)
- goto kill_kthread;
+ goto kill_workq;
else if (result > 0)
nvme_major = result;
@@ -2219,6 +2616,8 @@ static int __init nvme_init(void)
unregister_blkdev:
unregister_blkdev(nvme_major, "nvme");
+ kill_workq:
+ destroy_workqueue(nvme_workq);
kill_kthread:
kthread_stop(nvme_thread);
return result;
@@ -2228,6 +2627,7 @@ static void __exit nvme_exit(void)
{
pci_unregister_driver(&nvme_driver);
unregister_blkdev(nvme_major, "nvme");
+ destroy_workqueue(nvme_workq);
kthread_stop(nvme_thread);
}
diff --git a/drivers/block/nvme-scsi.c b/drivers/block/nvme-scsi.c
index 4a4ff4eb8e23..4a0ceb64e269 100644
--- a/drivers/block/nvme-scsi.c
+++ b/drivers/block/nvme-scsi.c
@@ -25,6 +25,7 @@
#include <linux/bio.h>
#include <linux/bitops.h>
#include <linux/blkdev.h>
+#include <linux/compat.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/fs.h>
@@ -3038,6 +3039,152 @@ int nvme_sg_io(struct nvme_ns *ns, struct sg_io_hdr __user *u_hdr)
return retcode;
}
+#ifdef CONFIG_COMPAT
+typedef struct sg_io_hdr32 {
+ compat_int_t interface_id; /* [i] 'S' for SCSI generic (required) */
+ compat_int_t dxfer_direction; /* [i] data transfer direction */
+ unsigned char cmd_len; /* [i] SCSI command length ( <= 16 bytes) */
+ unsigned char mx_sb_len; /* [i] max length to write to sbp */
+ unsigned short iovec_count; /* [i] 0 implies no scatter gather */
+ compat_uint_t dxfer_len; /* [i] byte count of data transfer */
+ compat_uint_t dxferp; /* [i], [*io] points to data transfer memory
+ or scatter gather list */
+ compat_uptr_t cmdp; /* [i], [*i] points to command to perform */
+ compat_uptr_t sbp; /* [i], [*o] points to sense_buffer memory */
+ compat_uint_t timeout; /* [i] MAX_UINT->no timeout (unit: millisec) */
+ compat_uint_t flags; /* [i] 0 -> default, see SG_FLAG... */
+ compat_int_t pack_id; /* [i->o] unused internally (normally) */
+ compat_uptr_t usr_ptr; /* [i->o] unused internally */
+ unsigned char status; /* [o] scsi status */
+ unsigned char masked_status; /* [o] shifted, masked scsi status */
+ unsigned char msg_status; /* [o] messaging level data (optional) */
+ unsigned char sb_len_wr; /* [o] byte count actually written to sbp */
+ unsigned short host_status; /* [o] errors from host adapter */
+ unsigned short driver_status; /* [o] errors from software driver */
+ compat_int_t resid; /* [o] dxfer_len - actual_transferred */
+ compat_uint_t duration; /* [o] time taken by cmd (unit: millisec) */
+ compat_uint_t info; /* [o] auxiliary information */
+} sg_io_hdr32_t; /* 64 bytes long (on sparc32) */
+
+typedef struct sg_iovec32 {
+ compat_uint_t iov_base;
+ compat_uint_t iov_len;
+} sg_iovec32_t;
+
+static int sg_build_iovec(sg_io_hdr_t __user *sgio, void __user *dxferp, u16 iovec_count)
+{
+ sg_iovec_t __user *iov = (sg_iovec_t __user *) (sgio + 1);
+ sg_iovec32_t __user *iov32 = dxferp;
+ int i;
+
+ for (i = 0; i < iovec_count; i++) {
+ u32 base, len;
+
+ if (get_user(base, &iov32[i].iov_base) ||
+ get_user(len, &iov32[i].iov_len) ||
+ put_user(compat_ptr(base), &iov[i].iov_base) ||
+ put_user(len, &iov[i].iov_len))
+ return -EFAULT;
+ }
+
+ if (put_user(iov, &sgio->dxferp))
+ return -EFAULT;
+ return 0;
+}
+
+int nvme_sg_io32(struct nvme_ns *ns, unsigned long arg)
+{
+ sg_io_hdr32_t __user *sgio32 = (sg_io_hdr32_t __user *)arg;
+ sg_io_hdr_t __user *sgio;
+ u16 iovec_count;
+ u32 data;
+ void __user *dxferp;
+ int err;
+ int interface_id;
+
+ if (get_user(interface_id, &sgio32->interface_id))
+ return -EFAULT;
+ if (interface_id != 'S')
+ return -EINVAL;
+
+ if (get_user(iovec_count, &sgio32->iovec_count))
+ return -EFAULT;
+
+ {
+ void __user *top = compat_alloc_user_space(0);
+ void __user *new = compat_alloc_user_space(sizeof(sg_io_hdr_t) +
+ (iovec_count * sizeof(sg_iovec_t)));
+ if (new > top)
+ return -EINVAL;
+
+ sgio = new;
+ }
+
+ /* Ok, now construct. */
+ if (copy_in_user(&sgio->interface_id, &sgio32->interface_id,
+ (2 * sizeof(int)) +
+ (2 * sizeof(unsigned char)) +
+ (1 * sizeof(unsigned short)) +
+ (1 * sizeof(unsigned int))))
+ return -EFAULT;
+
+ if (get_user(data, &sgio32->dxferp))
+ return -EFAULT;
+ dxferp = compat_ptr(data);
+ if (iovec_count) {
+ if (sg_build_iovec(sgio, dxferp, iovec_count))
+ return -EFAULT;
+ } else {
+ if (put_user(dxferp, &sgio->dxferp))
+ return -EFAULT;
+ }
+
+ {
+ unsigned char __user *cmdp;
+ unsigned char __user *sbp;
+
+ if (get_user(data, &sgio32->cmdp))
+ return -EFAULT;
+ cmdp = compat_ptr(data);
+
+ if (get_user(data, &sgio32->sbp))
+ return -EFAULT;
+ sbp = compat_ptr(data);
+
+ if (put_user(cmdp, &sgio->cmdp) ||
+ put_user(sbp, &sgio->sbp))
+ return -EFAULT;
+ }
+
+ if (copy_in_user(&sgio->timeout, &sgio32->timeout,
+ 3 * sizeof(int)))
+ return -EFAULT;
+
+ if (get_user(data, &sgio32->usr_ptr))
+ return -EFAULT;
+ if (put_user(compat_ptr(data), &sgio->usr_ptr))
+ return -EFAULT;
+
+ err = nvme_sg_io(ns, sgio);
+ if (err >= 0) {
+ void __user *datap;
+
+ if (copy_in_user(&sgio32->pack_id, &sgio->pack_id,
+ sizeof(int)) ||
+ get_user(datap, &sgio->usr_ptr) ||
+ put_user((u32)(unsigned long)datap,
+ &sgio32->usr_ptr) ||
+ copy_in_user(&sgio32->status, &sgio->status,
+ (4 * sizeof(unsigned char)) +
+ (2 * sizeof(unsigned short)) +
+ (3 * sizeof(int))))
+ err = -EFAULT;
+ }
+
+ return err;
+}
+#endif
+
int nvme_sg_get_version_num(int __user *ip)
{
return put_user(sg_version_num, ip);
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 6a680d4de7f1..b1cb3f4c4db4 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -110,9 +110,9 @@ static int __virtblk_add_req(struct virtqueue *vq,
return virtqueue_add_sgs(vq, sgs, num_out, num_in, vbr, GFP_ATOMIC);
}
-static inline void virtblk_request_done(struct virtblk_req *vbr)
+static inline void virtblk_request_done(struct request *req)
{
- struct request *req = vbr->req;
+ struct virtblk_req *vbr = req->special;
int error = virtblk_result(vbr);
if (req->cmd_type == REQ_TYPE_BLOCK_PC) {
@@ -138,7 +138,7 @@ static void virtblk_done(struct virtqueue *vq)
do {
virtqueue_disable_cb(vq);
while ((vbr = virtqueue_get_buf(vblk->vq, &len)) != NULL) {
- virtblk_request_done(vbr);
+ blk_mq_complete_request(vbr->req);
req_done = true;
}
if (unlikely(virtqueue_is_broken(vq)))
@@ -479,6 +479,7 @@ static struct blk_mq_ops virtio_mq_ops = {
.map_queue = blk_mq_map_queue,
.alloc_hctx = blk_mq_alloc_single_hw_queue,
.free_hctx = blk_mq_free_single_hw_queue,
+ .complete = virtblk_request_done,
};
static struct blk_mq_reg virtio_mq_reg = {
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index da18046d0e07..64c60edcdfbc 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -285,7 +285,8 @@ static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root,
if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST ||
!rb_next(&persistent_gnt->node)) {
- ret = gnttab_unmap_refs(unmap, pages, segs_to_unmap);
+ ret = gnttab_unmap_refs(unmap, NULL, pages,
+ segs_to_unmap);
BUG_ON(ret);
put_free_pages(blkif, pages, segs_to_unmap);
segs_to_unmap = 0;
@@ -298,7 +299,7 @@ static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root,
BUG_ON(num != 0);
}
-static void unmap_purged_grants(struct work_struct *work)
+void xen_blkbk_unmap_purged_grants(struct work_struct *work)
{
struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
@@ -320,7 +321,8 @@ static void unmap_purged_grants(struct work_struct *work)
pages[segs_to_unmap] = persistent_gnt->page;
if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
- ret = gnttab_unmap_refs(unmap, pages, segs_to_unmap);
+ ret = gnttab_unmap_refs(unmap, NULL, pages,
+ segs_to_unmap);
BUG_ON(ret);
put_free_pages(blkif, pages, segs_to_unmap);
segs_to_unmap = 0;
@@ -328,7 +330,7 @@ static void unmap_purged_grants(struct work_struct *work)
kfree(persistent_gnt);
}
if (segs_to_unmap > 0) {
- ret = gnttab_unmap_refs(unmap, pages, segs_to_unmap);
+ ret = gnttab_unmap_refs(unmap, NULL, pages, segs_to_unmap);
BUG_ON(ret);
put_free_pages(blkif, pages, segs_to_unmap);
}
@@ -373,7 +375,7 @@ static void purge_persistent_gnt(struct xen_blkif *blkif)
pr_debug(DRV_PFX "Going to purge %u persistent grants\n", num_clean);
- INIT_LIST_HEAD(&blkif->persistent_purge_list);
+ BUG_ON(!list_empty(&blkif->persistent_purge_list));
root = &blkif->persistent_gnts;
purge_list:
foreach_grant_safe(persistent_gnt, n, root, node) {
@@ -418,7 +420,6 @@ finished:
blkif->vbd.overflow_max_grants = 0;
/* We can defer this work */
- INIT_WORK(&blkif->persistent_purge_work, unmap_purged_grants);
schedule_work(&blkif->persistent_purge_work);
pr_debug(DRV_PFX "Purged %u/%u\n", (total - num_clean), total);
return;
@@ -623,9 +624,23 @@ purge_gnt_list:
print_stats(blkif);
}
- /* Since we are shutting down remove all pages from the buffer */
- shrink_free_pagepool(blkif, 0 /* All */);
+ /* Drain pending purge work */
+ flush_work(&blkif->persistent_purge_work);
+ if (log_stats)
+ print_stats(blkif);
+
+ blkif->xenblkd = NULL;
+ xen_blkif_put(blkif);
+
+ return 0;
+}
+
+/*
+ * Remove persistent grants and empty the pool of free pages
+ */
+void xen_blkbk_free_caches(struct xen_blkif *blkif)
+{
/* Free all persistent grant pages */
if (!RB_EMPTY_ROOT(&blkif->persistent_gnts))
free_persistent_gnts(blkif, &blkif->persistent_gnts,
@@ -634,13 +649,8 @@ purge_gnt_list:
BUG_ON(!RB_EMPTY_ROOT(&blkif->persistent_gnts));
blkif->persistent_gnt_c = 0;
- if (log_stats)
- print_stats(blkif);
-
- blkif->xenblkd = NULL;
- xen_blkif_put(blkif);
-
- return 0;
+ /* Since we are shutting down remove all pages from the buffer */
+ shrink_free_pagepool(blkif, 0 /* All */);
}
/*
@@ -668,14 +678,15 @@ static void xen_blkbk_unmap(struct xen_blkif *blkif,
GNTMAP_host_map, pages[i]->handle);
pages[i]->handle = BLKBACK_INVALID_HANDLE;
if (++invcount == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
- ret = gnttab_unmap_refs(unmap, unmap_pages, invcount);
+ ret = gnttab_unmap_refs(unmap, NULL, unmap_pages,
+ invcount);
BUG_ON(ret);
put_free_pages(blkif, unmap_pages, invcount);
invcount = 0;
}
}
if (invcount) {
- ret = gnttab_unmap_refs(unmap, unmap_pages, invcount);
+ ret = gnttab_unmap_refs(unmap, NULL, unmap_pages, invcount);
BUG_ON(ret);
put_free_pages(blkif, unmap_pages, invcount);
}
@@ -737,7 +748,7 @@ again:
}
if (segs_to_map) {
- ret = gnttab_map_refs(map, pages_to_gnt, segs_to_map);
+ ret = gnttab_map_refs(map, NULL, pages_to_gnt, segs_to_map);
BUG_ON(ret);
}
@@ -835,7 +846,7 @@ static int xen_blkbk_parse_indirect(struct blkif_request *req,
struct grant_page **pages = pending_req->indirect_pages;
struct xen_blkif *blkif = pending_req->blkif;
int indirect_grefs, rc, n, nseg, i;
- struct blkif_request_segment_aligned *segments = NULL;
+ struct blkif_request_segment *segments = NULL;
nseg = pending_req->nr_pages;
indirect_grefs = INDIRECT_PAGES(nseg);
@@ -931,9 +942,7 @@ static void xen_blk_drain_io(struct xen_blkif *blkif)
{
atomic_set(&blkif->drain, 1);
do {
- /* The initial value is one, and one refcnt taken at the
- * start of the xen_blkif_schedule thread. */
- if (atomic_read(&blkif->refcnt) <= 2)
+ if (atomic_read(&blkif->inflight) == 0)
break;
wait_for_completion_interruptible_timeout(
&blkif->drain_complete, HZ);
@@ -973,17 +982,30 @@ static void __end_block_io_op(struct pending_req *pending_req, int error)
* the proper response on the ring.
*/
if (atomic_dec_and_test(&pending_req->pendcnt)) {
- xen_blkbk_unmap(pending_req->blkif,
+ struct xen_blkif *blkif = pending_req->blkif;
+
+ xen_blkbk_unmap(blkif,
pending_req->segments,
pending_req->nr_pages);
- make_response(pending_req->blkif, pending_req->id,
+ make_response(blkif, pending_req->id,
pending_req->operation, pending_req->status);
- xen_blkif_put(pending_req->blkif);
- if (atomic_read(&pending_req->blkif->refcnt) <= 2) {
- if (atomic_read(&pending_req->blkif->drain))
- complete(&pending_req->blkif->drain_complete);
+ free_req(blkif, pending_req);
+ /*
+ * Make sure the request is freed before releasing blkif,
+ * or there could be a race between free_req and the
+ * cleanup done in xen_blkif_free during shutdown.
+ *
+ * NB: The fact that we might try to wake up pending_free_wq
+ * before drain_complete (in case there's a drain going on)
+ * it's not a problem with our current implementation
+ * because we can assure there's no thread waiting on
+ * pending_free_wq if there's a drain going on, but it has
+ * to be taken into account if the current model is changed.
+ */
+ if (atomic_dec_and_test(&blkif->inflight) && atomic_read(&blkif->drain)) {
+ complete(&blkif->drain_complete);
}
- free_req(pending_req->blkif, pending_req);
+ xen_blkif_put(blkif);
}
}
@@ -1237,6 +1259,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
* below (in "!bio") if we are handling a BLKIF_OP_DISCARD.
*/
xen_blkif_get(blkif);
+ atomic_inc(&blkif->inflight);
for (i = 0; i < nseg; i++) {
while ((bio == NULL) ||
diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h
index 8d8807563d99..be052773ad03 100644
--- a/drivers/block/xen-blkback/common.h
+++ b/drivers/block/xen-blkback/common.h
@@ -57,7 +57,7 @@
#define MAX_INDIRECT_SEGMENTS 256
#define SEGS_PER_INDIRECT_FRAME \
- (PAGE_SIZE/sizeof(struct blkif_request_segment_aligned))
+ (PAGE_SIZE/sizeof(struct blkif_request_segment))
#define MAX_INDIRECT_PAGES \
((MAX_INDIRECT_SEGMENTS + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME)
#define INDIRECT_PAGES(_segs) \
@@ -278,6 +278,7 @@ struct xen_blkif {
/* for barrier (drain) requests */
struct completion drain_complete;
atomic_t drain;
+ atomic_t inflight;
/* One thread per one blkif. */
struct task_struct *xenblkd;
unsigned int waiting_reqs;
@@ -376,6 +377,7 @@ int xen_blkif_xenbus_init(void);
irqreturn_t xen_blkif_be_int(int irq, void *dev_id);
int xen_blkif_schedule(void *arg);
int xen_blkif_purge_persistent(void *arg);
+void xen_blkbk_free_caches(struct xen_blkif *blkif);
int xen_blkbk_flush_diskcache(struct xenbus_transaction xbt,
struct backend_info *be, int state);
@@ -383,6 +385,7 @@ int xen_blkbk_flush_diskcache(struct xenbus_transaction xbt,
int xen_blkbk_barrier(struct xenbus_transaction xbt,
struct backend_info *be, int state);
struct xenbus_device *xen_blkbk_xenbus(struct backend_info *be);
+void xen_blkbk_unmap_purged_grants(struct work_struct *work);
static inline void blkif_get_x86_32_req(struct blkif_request *dst,
struct blkif_x86_32_request *src)
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index c2014a0aa206..9a547e6b6ebf 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -125,8 +125,11 @@ static struct xen_blkif *xen_blkif_alloc(domid_t domid)
blkif->persistent_gnts.rb_node = NULL;
spin_lock_init(&blkif->free_pages_lock);
INIT_LIST_HEAD(&blkif->free_pages);
+ INIT_LIST_HEAD(&blkif->persistent_purge_list);
blkif->free_pages_num = 0;
atomic_set(&blkif->persistent_gnt_in_use, 0);
+ atomic_set(&blkif->inflight, 0);
+ INIT_WORK(&blkif->persistent_purge_work, xen_blkbk_unmap_purged_grants);
INIT_LIST_HEAD(&blkif->pending_free);
@@ -259,6 +262,17 @@ static void xen_blkif_free(struct xen_blkif *blkif)
if (!atomic_dec_and_test(&blkif->refcnt))
BUG();
+ /* Remove all persistent grants and the cache of ballooned pages. */
+ xen_blkbk_free_caches(blkif);
+
+ /* Make sure everything is drained before shutting down */
+ BUG_ON(blkif->persistent_gnt_c != 0);
+ BUG_ON(atomic_read(&blkif->persistent_gnt_in_use) != 0);
+ BUG_ON(blkif->free_pages_num != 0);
+ BUG_ON(!list_empty(&blkif->persistent_purge_list));
+ BUG_ON(!list_empty(&blkif->free_pages));
+ BUG_ON(!RB_EMPTY_ROOT(&blkif->persistent_gnts));
+
/* Check that there is no request in use */
list_for_each_entry_safe(req, n, &blkif->pending_free, free_list) {
list_del(&req->free_list);
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 8dcfb54f1603..efe1b4761735 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -162,7 +162,7 @@ static DEFINE_SPINLOCK(minor_lock);
#define DEV_NAME "xvd" /* name in /dev */
#define SEGS_PER_INDIRECT_FRAME \
- (PAGE_SIZE/sizeof(struct blkif_request_segment_aligned))
+ (PAGE_SIZE/sizeof(struct blkif_request_segment))
#define INDIRECT_GREFS(_segs) \
((_segs + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME)
@@ -393,7 +393,7 @@ static int blkif_queue_request(struct request *req)
unsigned long id;
unsigned int fsect, lsect;
int i, ref, n;
- struct blkif_request_segment_aligned *segments = NULL;
+ struct blkif_request_segment *segments = NULL;
/*
* Used to store if we are able to queue the request by just using
@@ -550,7 +550,7 @@ static int blkif_queue_request(struct request *req)
} else {
n = i % SEGS_PER_INDIRECT_FRAME;
segments[n] =
- (struct blkif_request_segment_aligned) {
+ (struct blkif_request_segment) {
.gref = ref,
.first_sect = fsect,
.last_sect = lsect };
@@ -1904,13 +1904,16 @@ static void blkback_changed(struct xenbus_device *dev,
case XenbusStateReconfiguring:
case XenbusStateReconfigured:
case XenbusStateUnknown:
- case XenbusStateClosed:
break;
case XenbusStateConnected:
blkfront_connect(info);
break;
+ case XenbusStateClosed:
+ if (dev->state == XenbusStateClosed)
+ break;
+ /* Missed the backend's Closing state -- fallthrough */
case XenbusStateClosing:
blkfront_closing(info);
break;
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index fa3243d71c76..1386749b48ff 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -499,6 +499,7 @@ config RAW_DRIVER
config MAX_RAW_DEVS
int "Maximum number of RAW devices to support (1-65536)"
depends on RAW_DRIVER
+ range 1 65536
default "256"
help
The maximum number of RAW devices that are supported.
diff --git a/drivers/char/raw.c b/drivers/char/raw.c
index f3223aac4df1..6e8d65e9b1d3 100644
--- a/drivers/char/raw.c
+++ b/drivers/char/raw.c
@@ -190,7 +190,7 @@ static int bind_get(int number, dev_t *dev)
struct raw_device_data *rawdev;
struct block_device *bdev;
- if (number <= 0 || number >= MAX_RAW_MINORS)
+ if (number <= 0 || number >= max_raw_minors)
return -EINVAL;
rawdev = &raw_devices[number];
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index feea87cc6b8f..6928d094451d 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -890,12 +890,10 @@ static int pipe_to_sg(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
} else {
/* Failback to copying a page */
struct page *page = alloc_page(GFP_KERNEL);
- char *src = buf->ops->map(pipe, buf, 1);
- char *dst;
+ char *src;
if (!page)
return -ENOMEM;
- dst = kmap(page);
offset = sd->pos & ~PAGE_MASK;
@@ -903,9 +901,8 @@ static int pipe_to_sg(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
if (len + offset > PAGE_SIZE)
len = PAGE_SIZE - offset;
- memcpy(dst + offset, src + buf->offset, len);
-
- kunmap(page);
+ src = buf->ops->map(pipe, buf, 1);
+ memcpy(page_address(page) + offset, src + buf->offset, len);
buf->ops->unmap(pipe, buf, src);
sg_set_page(&(sgl->sg[sgl->n]), page, len, offset);
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 7e257b233602..c788abf1c457 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -51,12 +51,11 @@ static inline int32_t div_fp(int32_t x, int32_t y)
return div_s64((int64_t)x << FRAC_BITS, (int64_t)y);
}
-static u64 energy_divisor;
-
struct sample {
int32_t core_pct_busy;
u64 aperf;
u64 mperf;
+ unsigned long long tsc;
int freq;
};
@@ -96,6 +95,7 @@ struct cpudata {
u64 prev_aperf;
u64 prev_mperf;
+ unsigned long long prev_tsc;
int sample_ptr;
struct sample samples[SAMPLE_COUNT];
};
@@ -548,30 +548,41 @@ static inline void intel_pstate_calc_busy(struct cpudata *cpu,
struct sample *sample)
{
u64 core_pct;
- core_pct = div64_u64(int_tofp(sample->aperf * 100),
- sample->mperf);
- sample->freq = fp_toint(cpu->pstate.max_pstate * core_pct * 1000);
+ u64 c0_pct;
+
+ core_pct = div64_u64(sample->aperf * 100, sample->mperf);
- sample->core_pct_busy = core_pct;
+ c0_pct = div64_u64(sample->mperf * 100, sample->tsc);
+ sample->freq = fp_toint(
+ mul_fp(int_tofp(cpu->pstate.max_pstate),
+ int_tofp(core_pct * 1000)));
+
+ sample->core_pct_busy = mul_fp(int_tofp(core_pct),
+ div_fp(int_tofp(c0_pct + 1), int_tofp(100)));
}
static inline void intel_pstate_sample(struct cpudata *cpu)
{
u64 aperf, mperf;
+ unsigned long long tsc;
rdmsrl(MSR_IA32_APERF, aperf);
rdmsrl(MSR_IA32_MPERF, mperf);
+ tsc = native_read_tsc();
cpu->sample_ptr = (cpu->sample_ptr + 1) % SAMPLE_COUNT;
cpu->samples[cpu->sample_ptr].aperf = aperf;
cpu->samples[cpu->sample_ptr].mperf = mperf;
+ cpu->samples[cpu->sample_ptr].tsc = tsc;
cpu->samples[cpu->sample_ptr].aperf -= cpu->prev_aperf;
cpu->samples[cpu->sample_ptr].mperf -= cpu->prev_mperf;
+ cpu->samples[cpu->sample_ptr].tsc -= cpu->prev_tsc;
intel_pstate_calc_busy(cpu, &cpu->samples[cpu->sample_ptr]);
cpu->prev_aperf = aperf;
cpu->prev_mperf = mperf;
+ cpu->prev_tsc = tsc;
}
static inline void intel_pstate_set_sample_time(struct cpudata *cpu)
@@ -617,12 +628,10 @@ static void intel_pstate_timer_func(unsigned long __data)
{
struct cpudata *cpu = (struct cpudata *) __data;
struct sample *sample;
- u64 energy;
intel_pstate_sample(cpu);
sample = &cpu->samples[cpu->sample_ptr];
- rdmsrl(MSR_PKG_ENERGY_STATUS, energy);
intel_pstate_adjust_busy_pstate(cpu);
@@ -631,7 +640,6 @@ static void intel_pstate_timer_func(unsigned long __data)
cpu->pstate.current_pstate,
sample->mperf,
sample->aperf,
- div64_u64(energy, energy_divisor),
sample->freq);
intel_pstate_set_sample_time(cpu);
@@ -913,7 +921,6 @@ static int __init intel_pstate_init(void)
int cpu, rc = 0;
const struct x86_cpu_id *id;
struct cpu_defaults *cpu_info;
- u64 units;
if (no_load)
return -ENODEV;
@@ -947,9 +954,6 @@ static int __init intel_pstate_init(void)
if (rc)
goto out;
- rdmsrl(MSR_RAPL_POWER_UNIT, units);
- energy_divisor = 1 << ((units >> 8) & 0x1f); /* bits{12:8} */
-
intel_pstate_debug_expose_params();
intel_pstate_sysfs_expose_params();
diff --git a/drivers/crypto/nx/nx-842.c b/drivers/crypto/nx/nx-842.c
index 6c4c000671c5..1e5481d88a26 100644
--- a/drivers/crypto/nx/nx-842.c
+++ b/drivers/crypto/nx/nx-842.c
@@ -158,6 +158,15 @@ static inline unsigned long nx842_get_scatterlist_size(
return sl->entry_nr * sizeof(struct nx842_slentry);
}
+static inline unsigned long nx842_get_pa(void *addr)
+{
+ if (is_vmalloc_addr(addr))
+ return page_to_phys(vmalloc_to_page(addr))
+ + offset_in_page(addr);
+ else
+ return __pa(addr);
+}
+
static int nx842_build_scatterlist(unsigned long buf, int len,
struct nx842_scatterlist *sl)
{
@@ -168,7 +177,7 @@ static int nx842_build_scatterlist(unsigned long buf, int len,
entry = sl->entries;
while (len) {
- entry->ptr = __pa(buf);
+ entry->ptr = nx842_get_pa((void *)buf);
nextpage = ALIGN(buf + 1, NX842_HW_PAGE_SIZE);
if (nextpage < buf + len) {
/* we aren't at the end yet */
@@ -370,8 +379,8 @@ int nx842_compress(const unsigned char *in, unsigned int inlen,
op.flags = NX842_OP_COMPRESS;
csbcpb = &workmem->csbcpb;
memset(csbcpb, 0, sizeof(*csbcpb));
- op.csbcpb = __pa(csbcpb);
- op.out = __pa(slout.entries);
+ op.csbcpb = nx842_get_pa(csbcpb);
+ op.out = nx842_get_pa(slout.entries);
for (i = 0; i < hdr->blocks_nr; i++) {
/*
@@ -401,13 +410,13 @@ int nx842_compress(const unsigned char *in, unsigned int inlen,
*/
if (likely(max_sync_size == NX842_HW_PAGE_SIZE)) {
/* Create direct DDE */
- op.in = __pa(inbuf);
+ op.in = nx842_get_pa((void *)inbuf);
op.inlen = max_sync_size;
} else {
/* Create indirect DDE (scatterlist) */
nx842_build_scatterlist(inbuf, max_sync_size, &slin);
- op.in = __pa(slin.entries);
+ op.in = nx842_get_pa(slin.entries);
op.inlen = -nx842_get_scatterlist_size(&slin);
}
@@ -565,7 +574,7 @@ int nx842_decompress(const unsigned char *in, unsigned int inlen,
op.flags = NX842_OP_DECOMPRESS;
csbcpb = &workmem->csbcpb;
memset(csbcpb, 0, sizeof(*csbcpb));
- op.csbcpb = __pa(csbcpb);
+ op.csbcpb = nx842_get_pa(csbcpb);
/*
* max_sync_size may have changed since compression,
@@ -597,12 +606,12 @@ int nx842_decompress(const unsigned char *in, unsigned int inlen,
if (likely((inbuf & NX842_HW_PAGE_MASK) ==
((inbuf + hdr->sizes[i] - 1) & NX842_HW_PAGE_MASK))) {
/* Create direct DDE */
- op.in = __pa(inbuf);
+ op.in = nx842_get_pa((void *)inbuf);
op.inlen = hdr->sizes[i];
} else {
/* Create indirect DDE (scatterlist) */
nx842_build_scatterlist(inbuf, hdr->sizes[i] , &slin);
- op.in = __pa(slin.entries);
+ op.in = nx842_get_pa(slin.entries);
op.inlen = -nx842_get_scatterlist_size(&slin);
}
@@ -613,12 +622,12 @@ int nx842_decompress(const unsigned char *in, unsigned int inlen,
*/
if (likely(max_sync_size == NX842_HW_PAGE_SIZE)) {
/* Create direct DDE */
- op.out = __pa(outbuf);
+ op.out = nx842_get_pa((void *)outbuf);
op.outlen = max_sync_size;
} else {
/* Create indirect DDE (scatterlist) */
nx842_build_scatterlist(outbuf, max_sync_size, &slout);
- op.out = __pa(slout.entries);
+ op.out = nx842_get_pa(slout.entries);
op.outlen = -nx842_get_scatterlist_size(&slout);
}
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index e8c9ef03495b..33edd6766344 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -559,7 +559,8 @@ static void edac_mc_workq_function(struct work_struct *work_req)
*
* called with the mem_ctls_mutex held
*/
-static void edac_mc_workq_setup(struct mem_ctl_info *mci, unsigned msec)
+static void edac_mc_workq_setup(struct mem_ctl_info *mci, unsigned msec,
+ bool init)
{
edac_dbg(0, "\n");
@@ -567,7 +568,9 @@ static void edac_mc_workq_setup(struct mem_ctl_info *mci, unsigned msec)
if (mci->op_state != OP_RUNNING_POLL)
return;
- INIT_DELAYED_WORK(&mci->work, edac_mc_workq_function);
+ if (init)
+ INIT_DELAYED_WORK(&mci->work, edac_mc_workq_function);
+
mod_delayed_work(edac_workqueue, &mci->work, msecs_to_jiffies(msec));
}
@@ -601,7 +604,7 @@ static void edac_mc_workq_teardown(struct mem_ctl_info *mci)
* user space has updated our poll period value, need to
* reset our workq delays
*/
-void edac_mc_reset_delay_period(int value)
+void edac_mc_reset_delay_period(unsigned long value)
{
struct mem_ctl_info *mci;
struct list_head *item;
@@ -611,7 +614,7 @@ void edac_mc_reset_delay_period(int value)
list_for_each(item, &mc_devices) {
mci = list_entry(item, struct mem_ctl_info, link);
- edac_mc_workq_setup(mci, (unsigned long) value);
+ edac_mc_workq_setup(mci, value, false);
}
mutex_unlock(&mem_ctls_mutex);
@@ -782,7 +785,7 @@ int edac_mc_add_mc(struct mem_ctl_info *mci)
/* This instance is NOW RUNNING */
mci->op_state = OP_RUNNING_POLL;
- edac_mc_workq_setup(mci, edac_mc_get_poll_msec());
+ edac_mc_workq_setup(mci, edac_mc_get_poll_msec(), true);
} else {
mci->op_state = OP_RUNNING_INTERRUPT;
}
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index 51c0362acf5c..b335c6ab5efe 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -52,18 +52,20 @@ int edac_mc_get_poll_msec(void)
static int edac_set_poll_msec(const char *val, struct kernel_param *kp)
{
- long l;
+ unsigned long l;
int ret;
if (!val)
return -EINVAL;
- ret = kstrtol(val, 0, &l);
+ ret = kstrtoul(val, 0, &l);
if (ret)
return ret;
- if ((int)l != l)
+
+ if (l < 1000)
return -EINVAL;
- *((int *)kp->arg) = l;
+
+ *((unsigned long *)kp->arg) = l;
/* notify edac_mc engine to reset the poll period */
edac_mc_reset_delay_period(l);
diff --git a/drivers/edac/edac_module.h b/drivers/edac/edac_module.h
index 3d139c6e7fe3..f2118bfcf8df 100644
--- a/drivers/edac/edac_module.h
+++ b/drivers/edac/edac_module.h
@@ -52,7 +52,7 @@ extern void edac_device_workq_setup(struct edac_device_ctl_info *edac_dev,
extern void edac_device_workq_teardown(struct edac_device_ctl_info *edac_dev);
extern void edac_device_reset_delay_period(struct edac_device_ctl_info
*edac_dev, unsigned long value);
-extern void edac_mc_reset_delay_period(int value);
+extern void edac_mc_reset_delay_period(unsigned long value);
extern void *edac_align_ptr(void **p, unsigned size, int n_elems);
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 697338772b64..903f24d28ba0 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -403,6 +403,7 @@ config GPIO_GRGPIO
config GPIO_TB10X
bool
+ select GENERIC_IRQ_CHIP
select OF_GPIO
comment "I2C GPIO expanders:"
diff --git a/drivers/gpio/gpio-bcm-kona.c b/drivers/gpio/gpio-bcm-kona.c
index 233d088ac59f..f32357e2d78d 100644
--- a/drivers/gpio/gpio-bcm-kona.c
+++ b/drivers/gpio/gpio-bcm-kona.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012-2013 Broadcom Corporation
+ * Copyright (C) 2012-2014 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
@@ -657,6 +657,6 @@ static struct platform_driver bcm_kona_gpio_driver = {
module_platform_driver(bcm_kona_gpio_driver);
-MODULE_AUTHOR("Broadcom");
+MODULE_AUTHOR("Broadcom Corporation <bcm-kernel-feedback-list@broadcom.com>");
MODULE_DESCRIPTION("Broadcom Kona GPIO Driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/gpio-clps711x.c b/drivers/gpio/gpio-clps711x.c
index d3550274b8f7..3c2ba2ad0ada 100644
--- a/drivers/gpio/gpio-clps711x.c
+++ b/drivers/gpio/gpio-clps711x.c
@@ -97,3 +97,4 @@ module_platform_driver(clps711x_gpio_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Alexander Shiyan <shc_work@mail.ru>");
MODULE_DESCRIPTION("CLPS711X GPIO driver");
+MODULE_ALIAS("platform:clps711x-gpio");
diff --git a/drivers/gpio/gpio-intel-mid.c b/drivers/gpio/gpio-intel-mid.c
index d1b50ef5fab8..e585163f1ad5 100644
--- a/drivers/gpio/gpio-intel-mid.c
+++ b/drivers/gpio/gpio-intel-mid.c
@@ -394,8 +394,8 @@ static const struct irq_domain_ops intel_gpio_irq_ops = {
static int intel_gpio_runtime_idle(struct device *dev)
{
- pm_schedule_suspend(dev, 500);
- return -EBUSY;
+ int err = pm_schedule_suspend(dev, 500);
+ return err ?: -EBUSY;
}
static const struct dev_pm_ops intel_gpio_pm_ops = {
diff --git a/drivers/gpio/gpio-xtensa.c b/drivers/gpio/gpio-xtensa.c
index 1d136eceda62..7081304d6797 100644
--- a/drivers/gpio/gpio-xtensa.c
+++ b/drivers/gpio/gpio-xtensa.c
@@ -40,6 +40,8 @@
#error GPIO32 option is not enabled for your xtensa core variant
#endif
+#if XCHAL_HAVE_CP
+
static inline unsigned long enable_cp(unsigned long *cpenable)
{
unsigned long flags;
@@ -57,6 +59,20 @@ static inline void disable_cp(unsigned long flags, unsigned long cpenable)
local_irq_restore(flags);
}
+#else
+
+static inline unsigned long enable_cp(unsigned long *cpenable)
+{
+ *cpenable = 0; /* avoid uninitialized value warning */
+ return 0;
+}
+
+static inline void disable_cp(unsigned long flags, unsigned long cpenable)
+{
+}
+
+#endif /* XCHAL_HAVE_CP */
+
static int xtensa_impwire_get_direction(struct gpio_chip *gc, unsigned offset)
{
return 1; /* input only */
diff --git a/drivers/gpu/drm/ast/ast_fb.c b/drivers/gpu/drm/ast/ast_fb.c
index 3f65dd6676b2..a28640f47c27 100644
--- a/drivers/gpu/drm/ast/ast_fb.c
+++ b/drivers/gpu/drm/ast/ast_fb.c
@@ -65,7 +65,7 @@ static void ast_dirty_update(struct ast_fbdev *afbdev,
* then the BO is being moved and we should
* store up the damage until later.
*/
- if (!drm_can_sleep())
+ if (drm_can_sleep())
ret = ast_bo_reserve(bo, true);
if (ret) {
if (ret != -EBUSY)
diff --git a/drivers/gpu/drm/cirrus/cirrus_fbdev.c b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
index 2fd4a92162cb..32bbba0a787b 100644
--- a/drivers/gpu/drm/cirrus/cirrus_fbdev.c
+++ b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
@@ -39,7 +39,7 @@ static void cirrus_dirty_update(struct cirrus_fbdev *afbdev,
* then the BO is being moved and we should
* store up the damage until later.
*/
- if (!drm_can_sleep())
+ if (drm_can_sleep())
ret = cirrus_bo_reserve(bo, true);
if (ret) {
if (ret != -EBUSY)
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index f227f544aa36..6e1a1a20cf6b 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -51,7 +51,7 @@ config DRM_EXYNOS_G2D
config DRM_EXYNOS_IPP
bool "Exynos DRM IPP"
- depends on DRM_EXYNOS && !ARCH_MULTIPLATFORM
+ depends on DRM_EXYNOS
help
Choose this option if you want to use IPP feature for DRM.
@@ -69,6 +69,6 @@ config DRM_EXYNOS_ROTATOR
config DRM_EXYNOS_GSC
bool "Exynos DRM GSC"
- depends on DRM_EXYNOS_IPP && ARCH_EXYNOS5
+ depends on DRM_EXYNOS_IPP && ARCH_EXYNOS5 && !ARCH_MULTIPLATFORM
help
Choose this option if you want to use Exynos GSC for DRM.
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 9d096a0c5f8d..215131ab1dd2 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -171,22 +171,24 @@ static int exynos_drm_open(struct drm_device *dev, struct drm_file *file)
file->driver_priv = file_priv;
ret = exynos_drm_subdrv_open(dev, file);
- if (ret) {
- kfree(file_priv);
- file->driver_priv = NULL;
- }
+ if (ret)
+ goto out;
anon_filp = anon_inode_getfile("exynos_gem", &exynos_drm_gem_fops,
NULL, 0);
if (IS_ERR(anon_filp)) {
- kfree(file_priv);
- return PTR_ERR(anon_filp);
+ ret = PTR_ERR(anon_filp);
+ goto out;
}
anon_filp->f_mode = FMODE_READ | FMODE_WRITE;
file_priv->anon_filp = anon_filp;
return ret;
+out:
+ kfree(file_priv);
+ file->driver_priv = NULL;
+ return ret;
}
static void exynos_drm_preclose(struct drm_device *dev,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index 380aec28840b..6c1885eedfdf 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -607,7 +607,7 @@ static enum g2d_reg_type g2d_get_reg_type(int reg_offset)
reg_type = REG_TYPE_NONE;
DRM_ERROR("Unknown register offset![%d]\n", reg_offset);
break;
- };
+ }
return reg_type;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
index d519a4e5fe40..09312b877470 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_ipp.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
@@ -16,7 +16,6 @@
#include <linux/types.h>
#include <linux/clk.h>
#include <linux/pm_runtime.h>
-#include <plat/map-base.h>
#include <drm/drmP.h>
#include <drm/exynos_drm.h>
@@ -826,7 +825,7 @@ static void ipp_put_event(struct drm_exynos_ipp_cmd_node *c_node,
DRM_DEBUG_KMS("count[%d]e[0x%x]\n", count++, (int)e);
/*
- * quf == NULL condition means all event deletion.
+ * qbuf == NULL condition means all event deletion.
* stop operations want to delete all event list.
* another case delete only same buf id.
*/
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index a0e10aeb0e67..c021ddc1ffb4 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -34,6 +34,7 @@
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_gpio.h>
+#include <linux/hdmi.h>
#include <drm/exynos_drm.h>
@@ -59,19 +60,6 @@
#define HDMI_AUI_VERSION 0x01
#define HDMI_AUI_LENGTH 0x0A
-/* HDMI infoframe to configure HDMI out packet header, AUI and AVI */
-enum HDMI_PACKET_TYPE {
- /* refer to Table 5-8 Packet Type in HDMI specification v1.4a */
- /* InfoFrame packet type */
- HDMI_PACKET_TYPE_INFOFRAME = 0x80,
- /* Vendor-Specific InfoFrame */
- HDMI_PACKET_TYPE_VSI = HDMI_PACKET_TYPE_INFOFRAME + 1,
- /* Auxiliary Video information InfoFrame */
- HDMI_PACKET_TYPE_AVI = HDMI_PACKET_TYPE_INFOFRAME + 2,
- /* Audio information InfoFrame */
- HDMI_PACKET_TYPE_AUI = HDMI_PACKET_TYPE_INFOFRAME + 4
-};
-
enum hdmi_type {
HDMI_TYPE13,
HDMI_TYPE14,
@@ -379,12 +367,6 @@ static const struct hdmiphy_config hdmiphy_v14_configs[] = {
},
};
-struct hdmi_infoframe {
- enum HDMI_PACKET_TYPE type;
- u8 ver;
- u8 len;
-};
-
static inline u32 hdmi_reg_read(struct hdmi_context *hdata, u32 reg_id)
{
return readl(hdata->regs + reg_id);
@@ -682,7 +664,7 @@ static u8 hdmi_chksum(struct hdmi_context *hdata,
}
static void hdmi_reg_infoframe(struct hdmi_context *hdata,
- struct hdmi_infoframe *infoframe)
+ union hdmi_infoframe *infoframe)
{
u32 hdr_sum;
u8 chksum;
@@ -700,13 +682,15 @@ static void hdmi_reg_infoframe(struct hdmi_context *hdata,
return;
}
- switch (infoframe->type) {
- case HDMI_PACKET_TYPE_AVI:
+ switch (infoframe->any.type) {
+ case HDMI_INFOFRAME_TYPE_AVI:
hdmi_reg_writeb(hdata, HDMI_AVI_CON, HDMI_AVI_CON_EVERY_VSYNC);
- hdmi_reg_writeb(hdata, HDMI_AVI_HEADER0, infoframe->type);
- hdmi_reg_writeb(hdata, HDMI_AVI_HEADER1, infoframe->ver);
- hdmi_reg_writeb(hdata, HDMI_AVI_HEADER2, infoframe->len);
- hdr_sum = infoframe->type + infoframe->ver + infoframe->len;
+ hdmi_reg_writeb(hdata, HDMI_AVI_HEADER0, infoframe->any.type);
+ hdmi_reg_writeb(hdata, HDMI_AVI_HEADER1,
+ infoframe->any.version);
+ hdmi_reg_writeb(hdata, HDMI_AVI_HEADER2, infoframe->any.length);
+ hdr_sum = infoframe->any.type + infoframe->any.version +
+ infoframe->any.length;
/* Output format zero hardcoded ,RGB YBCR selection */
hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(1), 0 << 5 |
@@ -722,18 +706,20 @@ static void hdmi_reg_infoframe(struct hdmi_context *hdata,
hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(4), vic);
chksum = hdmi_chksum(hdata, HDMI_AVI_BYTE(1),
- infoframe->len, hdr_sum);
+ infoframe->any.length, hdr_sum);
DRM_DEBUG_KMS("AVI checksum = 0x%x\n", chksum);
hdmi_reg_writeb(hdata, HDMI_AVI_CHECK_SUM, chksum);
break;
- case HDMI_PACKET_TYPE_AUI:
+ case HDMI_INFOFRAME_TYPE_AUDIO:
hdmi_reg_writeb(hdata, HDMI_AUI_CON, 0x02);
- hdmi_reg_writeb(hdata, HDMI_AUI_HEADER0, infoframe->type);
- hdmi_reg_writeb(hdata, HDMI_AUI_HEADER1, infoframe->ver);
- hdmi_reg_writeb(hdata, HDMI_AUI_HEADER2, infoframe->len);
- hdr_sum = infoframe->type + infoframe->ver + infoframe->len;
+ hdmi_reg_writeb(hdata, HDMI_AUI_HEADER0, infoframe->any.type);
+ hdmi_reg_writeb(hdata, HDMI_AUI_HEADER1,
+ infoframe->any.version);
+ hdmi_reg_writeb(hdata, HDMI_AUI_HEADER2, infoframe->any.length);
+ hdr_sum = infoframe->any.type + infoframe->any.version +
+ infoframe->any.length;
chksum = hdmi_chksum(hdata, HDMI_AUI_BYTE(1),
- infoframe->len, hdr_sum);
+ infoframe->any.length, hdr_sum);
DRM_DEBUG_KMS("AUI checksum = 0x%x\n", chksum);
hdmi_reg_writeb(hdata, HDMI_AUI_CHECK_SUM, chksum);
break;
@@ -985,7 +971,7 @@ static void hdmi_conf_reset(struct hdmi_context *hdata)
static void hdmi_conf_init(struct hdmi_context *hdata)
{
- struct hdmi_infoframe infoframe;
+ union hdmi_infoframe infoframe;
/* disable HPD interrupts from HDMI IP block, use GPIO instead */
hdmi_reg_writemask(hdata, HDMI_INTC_CON, 0, HDMI_INTC_EN_GLOBAL |
@@ -1021,14 +1007,14 @@ static void hdmi_conf_init(struct hdmi_context *hdata)
hdmi_reg_writeb(hdata, HDMI_V13_AUI_CON, 0x02);
hdmi_reg_writeb(hdata, HDMI_V13_ACR_CON, 0x04);
} else {
- infoframe.type = HDMI_PACKET_TYPE_AVI;
- infoframe.ver = HDMI_AVI_VERSION;
- infoframe.len = HDMI_AVI_LENGTH;
+ infoframe.any.type = HDMI_INFOFRAME_TYPE_AVI;
+ infoframe.any.version = HDMI_AVI_VERSION;
+ infoframe.any.length = HDMI_AVI_LENGTH;
hdmi_reg_infoframe(hdata, &infoframe);
- infoframe.type = HDMI_PACKET_TYPE_AUI;
- infoframe.ver = HDMI_AUI_VERSION;
- infoframe.len = HDMI_AUI_LENGTH;
+ infoframe.any.type = HDMI_INFOFRAME_TYPE_AUDIO;
+ infoframe.any.version = HDMI_AUI_VERSION;
+ infoframe.any.length = HDMI_AUI_LENGTH;
hdmi_reg_infoframe(hdata, &infoframe);
/* enable AVI packet every vsync, fixes purple line problem */
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index 400b0c4a10fb..fa18cf374470 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -208,7 +208,7 @@ struct tda998x_priv {
# define PLL_SERIAL_1_SRL_IZ(x) (((x) & 3) << 1)
# define PLL_SERIAL_1_SRL_MAN_IZ (1 << 6)
#define REG_PLL_SERIAL_2 REG(0x02, 0x01) /* read/write */
-# define PLL_SERIAL_2_SRL_NOSC(x) (((x) & 3) << 0)
+# define PLL_SERIAL_2_SRL_NOSC(x) ((x) << 0)
# define PLL_SERIAL_2_SRL_PR(x) (((x) & 0xf) << 4)
#define REG_PLL_SERIAL_3 REG(0x02, 0x02) /* read/write */
# define PLL_SERIAL_3_SRL_CCIR (1 << 0)
@@ -528,10 +528,10 @@ tda998x_write_aif(struct drm_encoder *encoder, struct tda998x_encoder_params *p)
{
uint8_t buf[PB(5) + 1];
+ memset(buf, 0, sizeof(buf));
buf[HB(0)] = 0x84;
buf[HB(1)] = 0x01;
buf[HB(2)] = 10;
- buf[PB(0)] = 0;
buf[PB(1)] = p->audio_frame[1] & 0x07; /* CC */
buf[PB(2)] = p->audio_frame[2] & 0x1c; /* SF */
buf[PB(4)] = p->audio_frame[4];
@@ -824,6 +824,11 @@ tda998x_encoder_mode_set(struct drm_encoder *encoder,
}
div = 148500 / mode->clock;
+ if (div != 0) {
+ div--;
+ if (div > 3)
+ div = 3;
+ }
/* mute the audio FIFO: */
reg_set(encoder, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_FIFO);
@@ -913,7 +918,7 @@ tda998x_encoder_mode_set(struct drm_encoder *encoder,
if (priv->rev == TDA19988) {
/* let incoming pixels fill the active space (if any) */
- reg_write(encoder, REG_ENABLE_SPACE, 0x01);
+ reg_write(encoder, REG_ENABLE_SPACE, 0x00);
}
/* must be last register set: */
@@ -1094,6 +1099,8 @@ tda998x_encoder_destroy(struct drm_encoder *encoder)
{
struct tda998x_priv *priv = to_tda998x_priv(encoder);
drm_i2c_encoder_destroy(encoder);
+ if (priv->cec)
+ i2c_unregister_device(priv->cec);
kfree(priv);
}
@@ -1142,8 +1149,10 @@ tda998x_encoder_init(struct i2c_client *client,
priv->vip_cntrl_1 = VIP_CNTRL_1_SWAP_C(0) | VIP_CNTRL_1_SWAP_D(1);
priv->vip_cntrl_2 = VIP_CNTRL_2_SWAP_E(4) | VIP_CNTRL_2_SWAP_F(5);
- priv->current_page = 0;
+ priv->current_page = 0xff;
priv->cec = i2c_new_dummy(client->adapter, 0x34);
+ if (!priv->cec)
+ return -ENODEV;
priv->dpms = DRM_MODE_DPMS_OFF;
encoder_slave->slave_priv = priv;
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 4a2bf8e3f739..df77e20e3c3d 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1831,6 +1831,14 @@ struct drm_i915_file_private {
/* Early gen2 have a totally busted CS tlb and require pinned batches. */
#define HAS_BROKEN_CS_TLB(dev) (IS_I830(dev) || IS_845G(dev))
+/*
+ * dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts
+ * even when in MSI mode. This results in spurious interrupt warnings if the
+ * legacy irq no. is shared with another device. The kernel then disables that
+ * interrupt source and so prevents the other device from working properly.
+ */
+#define HAS_AUX_IRQ(dev) (INTEL_INFO(dev)->gen >= 5)
+#define HAS_GMBUS_IRQ(dev) (INTEL_INFO(dev)->gen >= 5)
/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
* rows, which changed the alignment requirements and fence programming.
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index d7fd2fd2f0a5..990cf8f43efd 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -146,7 +146,10 @@ static void i915_error_vprintf(struct drm_i915_error_state_buf *e,
va_list tmp;
va_copy(tmp, args);
- if (!__i915_error_seek(e, vsnprintf(NULL, 0, f, tmp)))
+ len = vsnprintf(NULL, 0, f, tmp);
+ va_end(tmp);
+
+ if (!__i915_error_seek(e, len))
return;
}
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 17d8fcb1b6f7..9fec71175571 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -567,8 +567,7 @@ static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
vbl_start = mode->crtc_vblank_start * mode->crtc_htotal;
} else {
- enum transcoder cpu_transcoder =
- intel_pipe_to_cpu_transcoder(dev_priv, pipe);
+ enum transcoder cpu_transcoder = (enum transcoder) pipe;
u32 htotal;
htotal = ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff) + 1;
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 5ede4e8e290d..2f517b85b3f4 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -404,7 +404,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
int i, ret, recv_bytes;
uint32_t status;
int try, precharge, clock = 0;
- bool has_aux_irq = true;
+ bool has_aux_irq = HAS_AUX_IRQ(dev);
uint32_t timeout;
/* dp aux is extremely sensitive to irq latency, hence request the
@@ -1869,10 +1869,12 @@ static void vlv_pre_enable_dp(struct intel_encoder *encoder)
mutex_unlock(&dev_priv->dpio_lock);
- /* init power sequencer on this pipe and port */
- intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
- intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
- &power_seq);
+ if (is_edp(intel_dp)) {
+ /* init power sequencer on this pipe and port */
+ intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
+ intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
+ &power_seq);
+ }
intel_enable_dp(encoder);
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index b1dc33f47899..d33b61d0dd33 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -258,13 +258,6 @@ intel_gpio_setup(struct intel_gmbus *bus, u32 pin)
algo->data = bus;
}
-/*
- * gmbus on gen4 seems to be able to generate legacy interrupts even when in MSI
- * mode. This results in spurious interrupt warnings if the legacy irq no. is
- * shared with another device. The kernel then disables that interrupt source
- * and so prevents the other device from working properly.
- */
-#define HAS_GMBUS_IRQ(dev) (INTEL_INFO(dev)->gen >= 5)
static int
gmbus_wait_hw_status(struct drm_i915_private *dev_priv,
u32 gmbus2_status,
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 4e960ec7419f..acde2945eb8a 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -226,6 +226,8 @@ struct opregion_asle {
#define ACPI_DIGITAL_OUTPUT (3<<8)
#define ACPI_LVDS_OUTPUT (4<<8)
+#define MAX_DSLP 1500
+
#ifdef CONFIG_ACPI
static int swsci(struct drm_device *dev, u32 function, u32 parm, u32 *parm_out)
{
@@ -260,10 +262,11 @@ static int swsci(struct drm_device *dev, u32 function, u32 parm, u32 *parm_out)
/* The spec says 2ms should be the default, but it's too small
* for some machines. */
dslp = 50;
- } else if (dslp > 500) {
+ } else if (dslp > MAX_DSLP) {
/* Hey bios, trust must be earned. */
- WARN_ONCE(1, "excessive driver sleep timeout (DSPL) %u\n", dslp);
- dslp = 500;
+ DRM_INFO_ONCE("ACPI BIOS requests an excessive sleep of %u ms, "
+ "using %u ms instead\n", dslp, MAX_DSLP);
+ dslp = MAX_DSLP;
}
/* The spec tells us to do this, but we are the only user... */
diff --git a/drivers/gpu/drm/mgag200/mgag200_fb.c b/drivers/gpu/drm/mgag200/mgag200_fb.c
index f9adc27ef32a..13b7dd83faa9 100644
--- a/drivers/gpu/drm/mgag200/mgag200_fb.c
+++ b/drivers/gpu/drm/mgag200/mgag200_fb.c
@@ -41,7 +41,7 @@ static void mga_dirty_update(struct mga_fbdev *mfbdev,
* then the BO is being moved and we should
* store up the damage until later.
*/
- if (!drm_can_sleep())
+ if (drm_can_sleep())
ret = mgag200_bo_reserve(bo, true);
if (ret) {
if (ret != -EBUSY)
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index b8583f275e80..968374776db9 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -1519,11 +1519,11 @@ static int mga_vga_mode_valid(struct drm_connector *connector,
(mga_vga_calculate_mode_bandwidth(mode, bpp)
> (32700 * 1024))) {
return MODE_BANDWIDTH;
- } else if (mode->type == G200_EH &&
+ } else if (mdev->type == G200_EH &&
(mga_vga_calculate_mode_bandwidth(mode, bpp)
> (37500 * 1024))) {
return MODE_BANDWIDTH;
- } else if (mode->type == G200_ER &&
+ } else if (mdev->type == G200_ER &&
(mga_vga_calculate_mode_bandwidth(mode,
bpp) > (55000 * 1024))) {
return MODE_BANDWIDTH;
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
index 1964f4f0d452..84c5b13b33c9 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
@@ -39,6 +39,7 @@ struct mdp4_crtc {
spinlock_t lock;
bool stale;
uint32_t width, height;
+ uint32_t x, y;
/* next cursor to scan-out: */
uint32_t next_iova;
@@ -57,9 +58,16 @@ struct mdp4_crtc {
#define PENDING_FLIP 0x2
atomic_t pending;
- /* the fb that we currently hold a scanout ref to: */
+ /* the fb that we logically (from PoV of KMS API) hold a ref
+ * to. Which we may not yet be scanning out (we may still
+ * be scanning out previous in case of page_flip while waiting
+ * for gpu rendering to complete:
+ */
struct drm_framebuffer *fb;
+ /* the fb that we currently hold a scanout ref to: */
+ struct drm_framebuffer *scanout_fb;
+
/* for unref'ing framebuffers after scanout completes: */
struct drm_flip_work unref_fb_work;
@@ -77,24 +85,73 @@ static struct mdp4_kms *get_kms(struct drm_crtc *crtc)
return to_mdp4_kms(to_mdp_kms(priv->kms));
}
-static void update_fb(struct drm_crtc *crtc, bool async,
- struct drm_framebuffer *new_fb)
+static void request_pending(struct drm_crtc *crtc, uint32_t pending)
{
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
- struct drm_framebuffer *old_fb = mdp4_crtc->fb;
- if (old_fb)
- drm_flip_work_queue(&mdp4_crtc->unref_fb_work, old_fb);
+ atomic_or(pending, &mdp4_crtc->pending);
+ mdp_irq_register(&get_kms(crtc)->base, &mdp4_crtc->vblank);
+}
+
+static void crtc_flush(struct drm_crtc *crtc)
+{
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+ struct mdp4_kms *mdp4_kms = get_kms(crtc);
+ uint32_t i, flush = 0;
+
+ for (i = 0; i < ARRAY_SIZE(mdp4_crtc->planes); i++) {
+ struct drm_plane *plane = mdp4_crtc->planes[i];
+ if (plane) {
+ enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane);
+ flush |= pipe2flush(pipe_id);
+ }
+ }
+ flush |= ovlp2flush(mdp4_crtc->ovlp);
+
+ DBG("%s: flush=%08x", mdp4_crtc->name, flush);
+
+ mdp4_write(mdp4_kms, REG_MDP4_OVERLAY_FLUSH, flush);
+}
+
+static void update_fb(struct drm_crtc *crtc, struct drm_framebuffer *new_fb)
+{
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+ struct drm_framebuffer *old_fb = mdp4_crtc->fb;
/* grab reference to incoming scanout fb: */
drm_framebuffer_reference(new_fb);
mdp4_crtc->base.fb = new_fb;
mdp4_crtc->fb = new_fb;
- if (!async) {
- /* enable vblank to pick up the old_fb */
- mdp_irq_register(&get_kms(crtc)->base, &mdp4_crtc->vblank);
- }
+ if (old_fb)
+ drm_flip_work_queue(&mdp4_crtc->unref_fb_work, old_fb);
+}
+
+/* unlike update_fb(), take a ref to the new scanout fb *before* updating
+ * plane, then call this. Needed to ensure we don't unref the buffer that
+ * is actually still being scanned out.
+ *
+ * Note that this whole thing goes away with atomic.. since we can defer
+ * calling into driver until rendering is done.
+ */
+static void update_scanout(struct drm_crtc *crtc, struct drm_framebuffer *fb)
+{
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+
+ /* flush updates, to make sure hw is updated to new scanout fb,
+ * so that we can safely queue unref to current fb (ie. next
+ * vblank we know hw is done w/ previous scanout_fb).
+ */
+ crtc_flush(crtc);
+
+ if (mdp4_crtc->scanout_fb)
+ drm_flip_work_queue(&mdp4_crtc->unref_fb_work,
+ mdp4_crtc->scanout_fb);
+
+ mdp4_crtc->scanout_fb = fb;
+
+ /* enable vblank to complete flip: */
+ request_pending(crtc, PENDING_FLIP);
}
/* if file!=NULL, this is preclose potential cancel-flip path */
@@ -120,34 +177,6 @@ static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
spin_unlock_irqrestore(&dev->event_lock, flags);
}
-static void crtc_flush(struct drm_crtc *crtc)
-{
- struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
- struct mdp4_kms *mdp4_kms = get_kms(crtc);
- uint32_t i, flush = 0;
-
- for (i = 0; i < ARRAY_SIZE(mdp4_crtc->planes); i++) {
- struct drm_plane *plane = mdp4_crtc->planes[i];
- if (plane) {
- enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane);
- flush |= pipe2flush(pipe_id);
- }
- }
- flush |= ovlp2flush(mdp4_crtc->ovlp);
-
- DBG("%s: flush=%08x", mdp4_crtc->name, flush);
-
- mdp4_write(mdp4_kms, REG_MDP4_OVERLAY_FLUSH, flush);
-}
-
-static void request_pending(struct drm_crtc *crtc, uint32_t pending)
-{
- struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
-
- atomic_or(pending, &mdp4_crtc->pending);
- mdp_irq_register(&get_kms(crtc)->base, &mdp4_crtc->vblank);
-}
-
static void pageflip_cb(struct msm_fence_cb *cb)
{
struct mdp4_crtc *mdp4_crtc =
@@ -158,11 +187,9 @@ static void pageflip_cb(struct msm_fence_cb *cb)
if (!fb)
return;
+ drm_framebuffer_reference(fb);
mdp4_plane_set_scanout(mdp4_crtc->plane, fb);
- crtc_flush(crtc);
-
- /* enable vblank to complete flip: */
- request_pending(crtc, PENDING_FLIP);
+ update_scanout(crtc, fb);
}
static void unref_fb_worker(struct drm_flip_work *work, void *val)
@@ -320,6 +347,20 @@ static int mdp4_crtc_mode_set(struct drm_crtc *crtc,
mode->vsync_end, mode->vtotal,
mode->type, mode->flags);
+ /* grab extra ref for update_scanout() */
+ drm_framebuffer_reference(crtc->fb);
+
+ ret = mdp4_plane_mode_set(mdp4_crtc->plane, crtc, crtc->fb,
+ 0, 0, mode->hdisplay, mode->vdisplay,
+ x << 16, y << 16,
+ mode->hdisplay << 16, mode->vdisplay << 16);
+ if (ret) {
+ drm_framebuffer_unreference(crtc->fb);
+ dev_err(crtc->dev->dev, "%s: failed to set mode on plane: %d\n",
+ mdp4_crtc->name, ret);
+ return ret;
+ }
+
mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_SIZE(dma),
MDP4_DMA_SRC_SIZE_WIDTH(mode->hdisplay) |
MDP4_DMA_SRC_SIZE_HEIGHT(mode->vdisplay));
@@ -341,24 +382,15 @@ static int mdp4_crtc_mode_set(struct drm_crtc *crtc,
mdp4_write(mdp4_kms, REG_MDP4_OVLP_CFG(ovlp), 1);
- update_fb(crtc, false, crtc->fb);
-
- ret = mdp4_plane_mode_set(mdp4_crtc->plane, crtc, crtc->fb,
- 0, 0, mode->hdisplay, mode->vdisplay,
- x << 16, y << 16,
- mode->hdisplay << 16, mode->vdisplay << 16);
- if (ret) {
- dev_err(crtc->dev->dev, "%s: failed to set mode on plane: %d\n",
- mdp4_crtc->name, ret);
- return ret;
- }
-
if (dma == DMA_E) {
mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(0), 0x00ff0000);
mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(1), 0x00ff0000);
mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(2), 0x00ff0000);
}
+ update_fb(crtc, crtc->fb);
+ update_scanout(crtc, crtc->fb);
+
return 0;
}
@@ -385,13 +417,24 @@ static int mdp4_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
struct drm_plane *plane = mdp4_crtc->plane;
struct drm_display_mode *mode = &crtc->mode;
+ int ret;
- update_fb(crtc, false, crtc->fb);
+ /* grab extra ref for update_scanout() */
+ drm_framebuffer_reference(crtc->fb);
- return mdp4_plane_mode_set(plane, crtc, crtc->fb,
+ ret = mdp4_plane_mode_set(plane, crtc, crtc->fb,
0, 0, mode->hdisplay, mode->vdisplay,
x << 16, y << 16,
mode->hdisplay << 16, mode->vdisplay << 16);
+ if (ret) {
+ drm_framebuffer_unreference(crtc->fb);
+ return ret;
+ }
+
+ update_fb(crtc, crtc->fb);
+ update_scanout(crtc, crtc->fb);
+
+ return 0;
}
static void mdp4_crtc_load_lut(struct drm_crtc *crtc)
@@ -419,7 +462,7 @@ static int mdp4_crtc_page_flip(struct drm_crtc *crtc,
mdp4_crtc->event = event;
spin_unlock_irqrestore(&dev->event_lock, flags);
- update_fb(crtc, true, new_fb);
+ update_fb(crtc, new_fb);
return msm_gem_queue_inactive_cb(obj, &mdp4_crtc->pageflip_cb);
}
@@ -442,12 +485,12 @@ static int mdp4_crtc_set_property(struct drm_crtc *crtc,
static void update_cursor(struct drm_crtc *crtc)
{
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+ struct mdp4_kms *mdp4_kms = get_kms(crtc);
enum mdp4_dma dma = mdp4_crtc->dma;
unsigned long flags;
spin_lock_irqsave(&mdp4_crtc->cursor.lock, flags);
if (mdp4_crtc->cursor.stale) {
- struct mdp4_kms *mdp4_kms = get_kms(crtc);
struct drm_gem_object *next_bo = mdp4_crtc->cursor.next_bo;
struct drm_gem_object *prev_bo = mdp4_crtc->cursor.scanout_bo;
uint32_t iova = mdp4_crtc->cursor.next_iova;
@@ -479,6 +522,11 @@ static void update_cursor(struct drm_crtc *crtc)
mdp4_crtc->cursor.scanout_bo = next_bo;
mdp4_crtc->cursor.stale = false;
}
+
+ mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_POS(dma),
+ MDP4_DMA_CURSOR_POS_X(mdp4_crtc->cursor.x) |
+ MDP4_DMA_CURSOR_POS_Y(mdp4_crtc->cursor.y));
+
spin_unlock_irqrestore(&mdp4_crtc->cursor.lock, flags);
}
@@ -530,6 +578,7 @@ static int mdp4_crtc_cursor_set(struct drm_crtc *crtc,
drm_gem_object_unreference_unlocked(old_bo);
}
+ crtc_flush(crtc);
request_pending(crtc, PENDING_CURSOR);
return 0;
@@ -542,12 +591,15 @@ fail:
static int mdp4_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
{
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
- struct mdp4_kms *mdp4_kms = get_kms(crtc);
- enum mdp4_dma dma = mdp4_crtc->dma;
+ unsigned long flags;
- mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_POS(dma),
- MDP4_DMA_CURSOR_POS_X(x) |
- MDP4_DMA_CURSOR_POS_Y(y));
+ spin_lock_irqsave(&mdp4_crtc->cursor.lock, flags);
+ mdp4_crtc->cursor.x = x;
+ mdp4_crtc->cursor.y = y;
+ spin_unlock_irqrestore(&mdp4_crtc->cursor.lock, flags);
+
+ crtc_flush(crtc);
+ request_pending(crtc, PENDING_CURSOR);
return 0;
}
@@ -713,6 +765,7 @@ struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
crtc = &mdp4_crtc->base;
mdp4_crtc->plane = plane;
+ mdp4_crtc->id = id;
mdp4_crtc->ovlp = ovlp_id;
mdp4_crtc->dma = dma_id;
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
index 2406027200ec..1e893dd13859 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
@@ -170,8 +170,8 @@ int mdp4_plane_mode_set(struct drm_plane *plane,
MDP4_PIPE_DST_SIZE_HEIGHT(crtc_h));
mdp4_write(mdp4_kms, REG_MDP4_PIPE_DST_XY(pipe),
- MDP4_PIPE_SRC_XY_X(crtc_x) |
- MDP4_PIPE_SRC_XY_Y(crtc_y));
+ MDP4_PIPE_DST_XY_X(crtc_x) |
+ MDP4_PIPE_DST_XY_Y(crtc_y));
mdp4_plane_set_scanout(plane, fb);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
index 71a3b2345eb3..f2794021f086 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
@@ -296,6 +296,7 @@ static int mdp5_crtc_mode_set(struct drm_crtc *crtc,
x << 16, y << 16,
mode->hdisplay << 16, mode->vdisplay << 16);
if (ret) {
+ drm_framebuffer_unreference(crtc->fb);
dev_err(crtc->dev->dev, "%s: failed to set mode on plane: %d\n",
mdp5_crtc->name, ret);
return ret;
@@ -343,11 +344,15 @@ static int mdp5_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
0, 0, mode->hdisplay, mode->vdisplay,
x << 16, y << 16,
mode->hdisplay << 16, mode->vdisplay << 16);
+ if (ret) {
+ drm_framebuffer_unreference(crtc->fb);
+ return ret;
+ }
update_fb(crtc, crtc->fb);
update_scanout(crtc, crtc->fb);
- return ret;
+ return 0;
}
static void mdp5_crtc_load_lut(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index d8d60c969ac7..3da8264d3039 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -644,7 +644,7 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev,
fail:
if (obj)
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_unreference(obj);
return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index 5281d4bc37f7..5423e914e491 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -163,7 +163,7 @@ retry:
/* if locking succeeded, pin bo: */
- ret = msm_gem_get_iova(&msm_obj->base,
+ ret = msm_gem_get_iova_locked(&msm_obj->base,
submit->gpu->id, &iova);
/* this would break the logic in the fail path.. there is no
@@ -247,7 +247,7 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
/* For now, just map the entire thing. Eventually we probably
* to do it page-by-page, w/ kmap() if not vmap()d..
*/
- ptr = msm_gem_vaddr(&obj->base);
+ ptr = msm_gem_vaddr_locked(&obj->base);
if (IS_ERR(ptr)) {
ret = PTR_ERR(ptr);
@@ -307,14 +307,12 @@ static void submit_cleanup(struct msm_gem_submit *submit, bool fail)
{
unsigned i;
- mutex_lock(&submit->dev->struct_mutex);
for (i = 0; i < submit->nr_bos; i++) {
struct msm_gem_object *msm_obj = submit->bos[i].obj;
submit_unlock_unpin_bo(submit, i);
list_del_init(&msm_obj->submit_entry);
drm_gem_object_unreference(&msm_obj->base);
}
- mutex_unlock(&submit->dev->struct_mutex);
ww_acquire_fini(&submit->ticket);
kfree(submit);
@@ -342,6 +340,8 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
if (args->nr_cmds > MAX_CMDS)
return -EINVAL;
+ mutex_lock(&dev->struct_mutex);
+
submit = submit_create(dev, gpu, args->nr_bos);
if (!submit) {
ret = -ENOMEM;
@@ -410,5 +410,6 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
out:
if (submit)
submit_cleanup(submit, !!ret);
+ mutex_unlock(&dev->struct_mutex);
return ret;
}
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 4ebce8be489d..0cfe3f426ee4 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -298,8 +298,6 @@ int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
struct msm_drm_private *priv = dev->dev_private;
int i, ret;
- mutex_lock(&dev->struct_mutex);
-
submit->fence = ++priv->next_fence;
gpu->submitted_fence = submit->fence;
@@ -331,7 +329,6 @@ int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
msm_gem_move_to_active(&msm_obj->base, gpu, true, submit->fence);
}
hangcheck_timer_reset(gpu);
- mutex_unlock(&dev->struct_mutex);
return ret;
}
diff --git a/drivers/gpu/drm/radeon/btc_dpm.c b/drivers/gpu/drm/radeon/btc_dpm.c
index 0fbd36f3d4e9..ea103ccdf4bd 100644
--- a/drivers/gpu/drm/radeon/btc_dpm.c
+++ b/drivers/gpu/drm/radeon/btc_dpm.c
@@ -29,6 +29,7 @@
#include "cypress_dpm.h"
#include "btc_dpm.h"
#include "atom.h"
+#include <linux/seq_file.h>
#define MC_CG_ARB_FREQ_F0 0x0a
#define MC_CG_ARB_FREQ_F1 0x0b
@@ -2756,6 +2757,37 @@ void btc_dpm_fini(struct radeon_device *rdev)
r600_free_extended_power_table(rdev);
}
+void btc_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
+ struct seq_file *m)
+{
+ struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+ struct radeon_ps *rps = &eg_pi->current_rps;
+ struct rv7xx_ps *ps = rv770_get_ps(rps);
+ struct rv7xx_pl *pl;
+ u32 current_index =
+ (RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_PROFILE_INDEX_MASK) >>
+ CURRENT_PROFILE_INDEX_SHIFT;
+
+ if (current_index > 2) {
+ seq_printf(m, "invalid dpm profile %d\n", current_index);
+ } else {
+ if (current_index == 0)
+ pl = &ps->low;
+ else if (current_index == 1)
+ pl = &ps->medium;
+ else /* current_index == 2 */
+ pl = &ps->high;
+ seq_printf(m, "uvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
+ if (rdev->family >= CHIP_CEDAR) {
+ seq_printf(m, "power level %d sclk: %u mclk: %u vddc: %u vddci: %u\n",
+ current_index, pl->sclk, pl->mclk, pl->vddc, pl->vddci);
+ } else {
+ seq_printf(m, "power level %d sclk: %u mclk: %u vddc: %u\n",
+ current_index, pl->sclk, pl->mclk, pl->vddc);
+ }
+ }
+}
+
u32 btc_dpm_get_sclk(struct radeon_device *rdev, bool low)
{
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
diff --git a/drivers/gpu/drm/radeon/btcd.h b/drivers/gpu/drm/radeon/btcd.h
index 29e32de7e025..9c65be2d55a9 100644
--- a/drivers/gpu/drm/radeon/btcd.h
+++ b/drivers/gpu/drm/radeon/btcd.h
@@ -44,6 +44,10 @@
# define DYN_SPREAD_SPECTRUM_EN (1 << 23)
# define AC_DC_SW (1 << 24)
+#define TARGET_AND_CURRENT_PROFILE_INDEX 0x66c
+# define CURRENT_PROFILE_INDEX_MASK (0xf << 4)
+# define CURRENT_PROFILE_INDEX_SHIFT 4
+
#define CG_BIF_REQ_AND_RSP 0x7f4
#define CG_CLIENT_REQ(x) ((x) << 0)
#define CG_CLIENT_REQ_MASK (0xff << 0)
diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c
index b6e01d5d2cce..351db361239d 100644
--- a/drivers/gpu/drm/radeon/kv_dpm.c
+++ b/drivers/gpu/drm/radeon/kv_dpm.c
@@ -1223,7 +1223,7 @@ int kv_dpm_enable(struct radeon_device *rdev)
int kv_dpm_late_enable(struct radeon_device *rdev)
{
- int ret;
+ int ret = 0;
if (rdev->irq.installed &&
r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c
index c351226ecb31..1217fbcbdcca 100644
--- a/drivers/gpu/drm/radeon/ni_dpm.c
+++ b/drivers/gpu/drm/radeon/ni_dpm.c
@@ -3945,7 +3945,6 @@ static void ni_parse_pplib_clock_info(struct radeon_device *rdev,
struct rv7xx_power_info *pi = rv770_get_pi(rdev);
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
struct ni_ps *ps = ni_get_ps(rps);
- u16 vddc;
struct rv7xx_pl *pl = &ps->performance_levels[index];
ps->performance_level_count = index + 1;
@@ -3961,8 +3960,8 @@ static void ni_parse_pplib_clock_info(struct radeon_device *rdev,
/* patch up vddc if necessary */
if (pl->vddc == 0xff01) {
- if (radeon_atom_get_max_vddc(rdev, 0, 0, &vddc) == 0)
- pl->vddc = vddc;
+ if (pi->max_vddc)
+ pl->vddc = pi->max_vddc;
}
if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) {
@@ -4322,7 +4321,8 @@ void ni_dpm_print_power_state(struct radeon_device *rdev,
void ni_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
struct seq_file *m)
{
- struct radeon_ps *rps = rdev->pm.dpm.current_ps;
+ struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+ struct radeon_ps *rps = &eg_pi->current_rps;
struct ni_ps *ps = ni_get_ps(rps);
struct rv7xx_pl *pl;
u32 current_index =
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 56140b4e5bb2..cdbc4171fe73 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -3991,6 +3991,10 @@ restart_ih:
break;
}
break;
+ case 124: /* UVD */
+ DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data);
+ radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX);
+ break;
case 176: /* CP_INT in ring buffer */
case 177: /* CP_INT in IB1 */
case 178: /* CP_INT in IB2 */
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index 7b399dc5fd54..2812c7d1ae6f 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -1007,8 +1007,22 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case R_008C64_SQ_VSTMP_RING_SIZE:
case R_0288C8_SQ_GS_VERT_ITEMSIZE:
/* get value to populate the IB don't remove */
- tmp =radeon_get_ib_value(p, idx);
- ib[idx] = 0;
+ /*tmp =radeon_get_ib_value(p, idx);
+ ib[idx] = 0;*/
+ break;
+ case SQ_ESGS_RING_BASE:
+ case SQ_GSVS_RING_BASE:
+ case SQ_ESTMP_RING_BASE:
+ case SQ_GSTMP_RING_BASE:
+ case SQ_PSTMP_RING_BASE:
+ case SQ_VSTMP_RING_BASE:
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ if (r) {
+ dev_warn(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
break;
case SQ_CONFIG:
track->sq_config = radeon_get_ib_value(p, idx);
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index f74db43346fd..dda02bfc10a4 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -1555,7 +1555,7 @@ static struct radeon_asic btc_asic = {
.get_sclk = &btc_dpm_get_sclk,
.get_mclk = &btc_dpm_get_mclk,
.print_power_state = &rv770_dpm_print_power_state,
- .debugfs_print_current_performance_level = &rv770_dpm_debugfs_print_current_performance_level,
+ .debugfs_print_current_performance_level = &btc_dpm_debugfs_print_current_performance_level,
.force_performance_level = &rv770_dpm_force_performance_level,
.vblank_too_short = &btc_dpm_vblank_too_short,
},
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index b3bc433eed4c..ae637cfda783 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -551,6 +551,8 @@ void btc_dpm_fini(struct radeon_device *rdev);
u32 btc_dpm_get_sclk(struct radeon_device *rdev, bool low);
u32 btc_dpm_get_mclk(struct radeon_device *rdev, bool low);
bool btc_dpm_vblank_too_short(struct radeon_device *rdev);
+void btc_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
+ struct seq_file *m);
int sumo_dpm_init(struct radeon_device *rdev);
int sumo_dpm_enable(struct radeon_device *rdev);
int sumo_dpm_late_enable(struct radeon_device *rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index ec8c388eec17..84a1bbb75f91 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -78,9 +78,10 @@
* 2.34.0 - Add CIK tiling mode array query
* 2.35.0 - Add CIK macrotile mode array query
* 2.36.0 - Fix CIK DCE tiling setup
+ * 2.37.0 - allow GS ring setup on r6xx/r7xx
*/
#define KMS_DRIVER_MAJOR 2
-#define KMS_DRIVER_MINOR 36
+#define KMS_DRIVER_MINOR 37
#define KMS_DRIVER_PATCHLEVEL 0
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
int radeon_driver_unload_kms(struct drm_device *dev);
diff --git a/drivers/gpu/drm/radeon/reg_srcs/r600 b/drivers/gpu/drm/radeon/reg_srcs/r600
index 20bfbda7b3f1..ec0c6829c1dc 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/r600
+++ b/drivers/gpu/drm/radeon/reg_srcs/r600
@@ -18,6 +18,7 @@ r600 0x9400
0x00028A3C VGT_GROUP_VECT_1_FMT_CNTL
0x00028A40 VGT_GS_MODE
0x00028A6C VGT_GS_OUT_PRIM_TYPE
+0x00028B38 VGT_GS_MAX_VERT_OUT
0x000088C8 VGT_GS_PER_ES
0x000088E8 VGT_GS_PER_VS
0x000088D4 VGT_GS_VERTEX_REUSE
diff --git a/drivers/gpu/drm/radeon/rv770_dpm.c b/drivers/gpu/drm/radeon/rv770_dpm.c
index 80c595aba359..5b2ea8ac0731 100644
--- a/drivers/gpu/drm/radeon/rv770_dpm.c
+++ b/drivers/gpu/drm/radeon/rv770_dpm.c
@@ -2174,7 +2174,6 @@ static void rv7xx_parse_pplib_clock_info(struct radeon_device *rdev,
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
struct rv7xx_ps *ps = rv770_get_ps(rps);
u32 sclk, mclk;
- u16 vddc;
struct rv7xx_pl *pl;
switch (index) {
@@ -2214,8 +2213,8 @@ static void rv7xx_parse_pplib_clock_info(struct radeon_device *rdev,
/* patch up vddc if necessary */
if (pl->vddc == 0xff01) {
- if (radeon_atom_get_max_vddc(rdev, 0, 0, &vddc) == 0)
- pl->vddc = vddc;
+ if (pi->max_vddc)
+ pl->vddc = pi->max_vddc;
}
if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) {
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 09ec4f6c53bb..83578324e5d1 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -6338,6 +6338,10 @@ restart_ih:
break;
}
break;
+ case 124: /* UVD */
+ DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data);
+ radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX);
+ break;
case 146:
case 147:
addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR);
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index 0471501338fb..eafb0e6bc67e 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -6472,7 +6472,8 @@ void si_dpm_fini(struct radeon_device *rdev)
void si_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
struct seq_file *m)
{
- struct radeon_ps *rps = rdev->pm.dpm.current_ps;
+ struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+ struct radeon_ps *rps = &eg_pi->current_rps;
struct ni_ps *ps = ni_get_ps(rps);
struct rv7xx_pl *pl;
u32 current_index =
diff --git a/drivers/gpu/drm/radeon/sumo_dpm.c b/drivers/gpu/drm/radeon/sumo_dpm.c
index f121efe12dc5..8b47b3cd0357 100644
--- a/drivers/gpu/drm/radeon/sumo_dpm.c
+++ b/drivers/gpu/drm/radeon/sumo_dpm.c
@@ -1807,7 +1807,7 @@ void sumo_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev
struct seq_file *m)
{
struct sumo_power_info *pi = sumo_get_pi(rdev);
- struct radeon_ps *rps = rdev->pm.dpm.current_ps;
+ struct radeon_ps *rps = &pi->current_rps;
struct sumo_ps *ps = sumo_get_ps(rps);
struct sumo_pl *pl;
u32 current_index =
diff --git a/drivers/gpu/drm/radeon/trinity_dpm.c b/drivers/gpu/drm/radeon/trinity_dpm.c
index 2d447192d6f7..2da0e17eb960 100644
--- a/drivers/gpu/drm/radeon/trinity_dpm.c
+++ b/drivers/gpu/drm/radeon/trinity_dpm.c
@@ -1926,7 +1926,8 @@ void trinity_dpm_print_power_state(struct radeon_device *rdev,
void trinity_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
struct seq_file *m)
{
- struct radeon_ps *rps = rdev->pm.dpm.current_ps;
+ struct trinity_power_info *pi = trinity_get_pi(rdev);
+ struct radeon_ps *rps = &pi->current_rps;
struct trinity_ps *ps = trinity_get_ps(rps);
struct trinity_pl *pl;
u32 current_index =
diff --git a/drivers/gpu/drm/radeon/uvd_v2_2.c b/drivers/gpu/drm/radeon/uvd_v2_2.c
index 824550db3fed..d1771004cb52 100644
--- a/drivers/gpu/drm/radeon/uvd_v2_2.c
+++ b/drivers/gpu/drm/radeon/uvd_v2_2.c
@@ -57,7 +57,6 @@ void uvd_v2_2_fence_emit(struct radeon_device *rdev,
radeon_ring_write(ring, 0);
radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_CMD, 0));
radeon_ring_write(ring, 2);
- return;
}
/**
diff --git a/drivers/gpu/drm/ttm/ttm_object.c b/drivers/gpu/drm/ttm/ttm_object.c
index 37079859afc8..53b51c4e671a 100644
--- a/drivers/gpu/drm/ttm/ttm_object.c
+++ b/drivers/gpu/drm/ttm/ttm_object.c
@@ -292,7 +292,7 @@ int ttm_ref_object_add(struct ttm_object_file *tfile,
if (ret == 0) {
ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
- if (!kref_get_unless_zero(&ref->kref)) {
+ if (kref_get_unless_zero(&ref->kref)) {
rcu_read_unlock();
break;
}
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 9af99084b344..75f319090043 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -380,6 +380,9 @@ static void ttm_tt_clear_mapping(struct ttm_tt *ttm)
pgoff_t i;
struct page **page = ttm->pages;
+ if (ttm->page_flags & TTM_PAGE_FLAG_SG)
+ return;
+
for (i = 0; i < ttm->num_pages; ++i) {
(*page)->mapping = NULL;
(*page++)->index = 0;
diff --git a/drivers/gpu/drm/vmwgfx/svga3d_reg.h b/drivers/gpu/drm/vmwgfx/svga3d_reg.h
index d95335cb90bd..b645647b7776 100644
--- a/drivers/gpu/drm/vmwgfx/svga3d_reg.h
+++ b/drivers/gpu/drm/vmwgfx/svga3d_reg.h
@@ -2583,4 +2583,28 @@ typedef union {
float f;
} SVGA3dDevCapResult;
+typedef enum {
+ SVGA3DCAPS_RECORD_UNKNOWN = 0,
+ SVGA3DCAPS_RECORD_DEVCAPS_MIN = 0x100,
+ SVGA3DCAPS_RECORD_DEVCAPS = 0x100,
+ SVGA3DCAPS_RECORD_DEVCAPS_MAX = 0x1ff,
+} SVGA3dCapsRecordType;
+
+typedef
+struct SVGA3dCapsRecordHeader {
+ uint32 length;
+ SVGA3dCapsRecordType type;
+}
+SVGA3dCapsRecordHeader;
+
+typedef
+struct SVGA3dCapsRecord {
+ SVGA3dCapsRecordHeader header;
+ uint32 data[1];
+}
+SVGA3dCapsRecord;
+
+
+typedef uint32 SVGA3dCapPair[2];
+
#endif /* _SVGA3D_REG_H_ */
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
index 82c41daebc0e..9426c53fb483 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
@@ -37,7 +37,7 @@ struct vmw_user_context {
-typedef int (*vmw_scrub_func)(struct vmw_ctx_bindinfo *);
+typedef int (*vmw_scrub_func)(struct vmw_ctx_bindinfo *, bool);
static void vmw_user_context_free(struct vmw_resource *res);
static struct vmw_resource *
@@ -50,9 +50,11 @@ static int vmw_gb_context_unbind(struct vmw_resource *res,
bool readback,
struct ttm_validate_buffer *val_buf);
static int vmw_gb_context_destroy(struct vmw_resource *res);
-static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi);
-static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi);
-static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi);
+static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind);
+static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi,
+ bool rebind);
+static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi, bool rebind);
+static void vmw_context_binding_state_scrub(struct vmw_ctx_binding_state *cbs);
static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs);
static uint64_t vmw_user_context_size;
@@ -111,10 +113,14 @@ static void vmw_hw_context_destroy(struct vmw_resource *res)
if (res->func->destroy == vmw_gb_context_destroy) {
mutex_lock(&dev_priv->cmdbuf_mutex);
+ mutex_lock(&dev_priv->binding_mutex);
+ (void) vmw_context_binding_state_kill
+ (&container_of(res, struct vmw_user_context, res)->cbs);
(void) vmw_gb_context_destroy(res);
if (dev_priv->pinned_bo != NULL &&
!dev_priv->query_cid_valid)
__vmw_execbuf_release_pinned_bo(dev_priv, NULL);
+ mutex_unlock(&dev_priv->binding_mutex);
mutex_unlock(&dev_priv->cmdbuf_mutex);
return;
}
@@ -328,7 +334,7 @@ static int vmw_gb_context_unbind(struct vmw_resource *res,
BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
mutex_lock(&dev_priv->binding_mutex);
- vmw_context_binding_state_kill(&uctx->cbs);
+ vmw_context_binding_state_scrub(&uctx->cbs);
submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
@@ -378,10 +384,6 @@ static int vmw_gb_context_destroy(struct vmw_resource *res)
SVGA3dCmdHeader header;
SVGA3dCmdDestroyGBContext body;
} *cmd;
- struct vmw_user_context *uctx =
- container_of(res, struct vmw_user_context, res);
-
- BUG_ON(!list_empty(&uctx->cbs.list));
if (likely(res->id == -1))
return 0;
@@ -528,8 +530,9 @@ out_unlock:
* vmw_context_scrub_shader - scrub a shader binding from a context.
*
* @bi: single binding information.
+ * @rebind: Whether to issue a bind instead of scrub command.
*/
-static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi)
+static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind)
{
struct vmw_private *dev_priv = bi->ctx->dev_priv;
struct {
@@ -548,7 +551,8 @@ static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi)
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = bi->ctx->id;
cmd->body.type = bi->i1.shader_type;
- cmd->body.shid = SVGA3D_INVALID_ID;
+ cmd->body.shid =
+ cpu_to_le32((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
vmw_fifo_commit(dev_priv, sizeof(*cmd));
return 0;
@@ -559,8 +563,10 @@ static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi)
* from a context.
*
* @bi: single binding information.
+ * @rebind: Whether to issue a bind instead of scrub command.
*/
-static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi)
+static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi,
+ bool rebind)
{
struct vmw_private *dev_priv = bi->ctx->dev_priv;
struct {
@@ -579,7 +585,8 @@ static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi)
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = bi->ctx->id;
cmd->body.type = bi->i1.rt_type;
- cmd->body.target.sid = SVGA3D_INVALID_ID;
+ cmd->body.target.sid =
+ cpu_to_le32((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
cmd->body.target.face = 0;
cmd->body.target.mipmap = 0;
vmw_fifo_commit(dev_priv, sizeof(*cmd));
@@ -591,11 +598,13 @@ static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi)
* vmw_context_scrub_texture - scrub a texture binding from a context.
*
* @bi: single binding information.
+ * @rebind: Whether to issue a bind instead of scrub command.
*
* TODO: Possibly complement this function with a function that takes
* a list of texture bindings and combines them to a single command.
*/
-static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi)
+static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi,
+ bool rebind)
{
struct vmw_private *dev_priv = bi->ctx->dev_priv;
struct {
@@ -619,7 +628,8 @@ static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi)
cmd->body.c.cid = bi->ctx->id;
cmd->body.s1.stage = bi->i1.texture_stage;
cmd->body.s1.name = SVGA3D_TS_BIND_TEXTURE;
- cmd->body.s1.value = (uint32) SVGA3D_INVALID_ID;
+ cmd->body.s1.value =
+ cpu_to_le32((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
vmw_fifo_commit(dev_priv, sizeof(*cmd));
return 0;
@@ -692,6 +702,7 @@ int vmw_context_binding_add(struct vmw_ctx_binding_state *cbs,
vmw_context_binding_drop(loc);
loc->bi = *bi;
+ loc->bi.scrubbed = false;
list_add_tail(&loc->ctx_list, &cbs->list);
INIT_LIST_HEAD(&loc->res_list);
@@ -727,12 +738,11 @@ static void vmw_context_binding_transfer(struct vmw_ctx_binding_state *cbs,
if (loc->bi.ctx != NULL)
vmw_context_binding_drop(loc);
- loc->bi = *bi;
- list_add_tail(&loc->ctx_list, &cbs->list);
- if (bi->res != NULL)
+ if (bi->res != NULL) {
+ loc->bi = *bi;
+ list_add_tail(&loc->ctx_list, &cbs->list);
list_add_tail(&loc->res_list, &bi->res->binding_head);
- else
- INIT_LIST_HEAD(&loc->res_list);
+ }
}
/**
@@ -746,7 +756,10 @@ static void vmw_context_binding_transfer(struct vmw_ctx_binding_state *cbs,
*/
static void vmw_context_binding_kill(struct vmw_ctx_binding *cb)
{
- (void) vmw_scrub_funcs[cb->bi.bt](&cb->bi);
+ if (!cb->bi.scrubbed) {
+ (void) vmw_scrub_funcs[cb->bi.bt](&cb->bi, false);
+ cb->bi.scrubbed = true;
+ }
vmw_context_binding_drop(cb);
}
@@ -768,6 +781,27 @@ static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs)
}
/**
+ * vmw_context_binding_state_scrub - Scrub all bindings associated with a
+ * struct vmw_ctx_binding state structure.
+ *
+ * @cbs: Pointer to the context binding state tracker.
+ *
+ * Emits commands to scrub all bindings associated with the
+ * context binding state tracker.
+ */
+static void vmw_context_binding_state_scrub(struct vmw_ctx_binding_state *cbs)
+{
+ struct vmw_ctx_binding *entry;
+
+ list_for_each_entry(entry, &cbs->list, ctx_list) {
+ if (!entry->bi.scrubbed) {
+ (void) vmw_scrub_funcs[entry->bi.bt](&entry->bi, false);
+ entry->bi.scrubbed = true;
+ }
+ }
+}
+
+/**
* vmw_context_binding_res_list_kill - Kill all bindings on a
* resource binding list
*
@@ -785,6 +819,27 @@ void vmw_context_binding_res_list_kill(struct list_head *head)
}
/**
+ * vmw_context_binding_res_list_scrub - Scrub all bindings on a
+ * resource binding list
+ *
+ * @head: list head of resource binding list
+ *
+ * Scrub all bindings associated with a specific resource. Typically
+ * called before the resource is evicted.
+ */
+void vmw_context_binding_res_list_scrub(struct list_head *head)
+{
+ struct vmw_ctx_binding *entry;
+
+ list_for_each_entry(entry, head, res_list) {
+ if (!entry->bi.scrubbed) {
+ (void) vmw_scrub_funcs[entry->bi.bt](&entry->bi, false);
+ entry->bi.scrubbed = true;
+ }
+ }
+}
+
+/**
* vmw_context_binding_state_transfer - Commit staged binding info
*
* @ctx: Pointer to context to commit the staged binding info to.
@@ -803,3 +858,50 @@ void vmw_context_binding_state_transfer(struct vmw_resource *ctx,
list_for_each_entry_safe(entry, next, &from->list, ctx_list)
vmw_context_binding_transfer(&uctx->cbs, &entry->bi);
}
+
+/**
+ * vmw_context_rebind_all - Rebind all scrubbed bindings of a context
+ *
+ * @ctx: The context resource
+ *
+ * Walks through the context binding list and rebinds all scrubbed
+ * resources.
+ */
+int vmw_context_rebind_all(struct vmw_resource *ctx)
+{
+ struct vmw_ctx_binding *entry;
+ struct vmw_user_context *uctx =
+ container_of(ctx, struct vmw_user_context, res);
+ struct vmw_ctx_binding_state *cbs = &uctx->cbs;
+ int ret;
+
+ list_for_each_entry(entry, &cbs->list, ctx_list) {
+ if (likely(!entry->bi.scrubbed))
+ continue;
+
+ if (WARN_ON(entry->bi.res == NULL || entry->bi.res->id ==
+ SVGA3D_INVALID_ID))
+ continue;
+
+ ret = vmw_scrub_funcs[entry->bi.bt](&entry->bi, true);
+ if (unlikely(ret != 0))
+ return ret;
+
+ entry->bi.scrubbed = false;
+ }
+
+ return 0;
+}
+
+/**
+ * vmw_context_binding_list - Return a list of context bindings
+ *
+ * @ctx: The context resource
+ *
+ * Returns the current list of bindings of the given context. Note that
+ * this list becomes stale as soon as the dev_priv::binding_mutex is unlocked.
+ */
+struct list_head *vmw_context_binding_list(struct vmw_resource *ctx)
+{
+ return &(container_of(ctx, struct vmw_user_context, res)->cbs.list);
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 9893328f8fdc..3bdc0adc656d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -941,6 +941,7 @@ static void vmw_postclose(struct drm_device *dev,
drm_master_put(&vmw_fp->locked_master);
}
+ vmw_compat_shader_man_destroy(vmw_fp->shman);
ttm_object_file_release(&vmw_fp->tfile);
kfree(vmw_fp);
}
@@ -960,11 +961,17 @@ static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
if (unlikely(vmw_fp->tfile == NULL))
goto out_no_tfile;
+ vmw_fp->shman = vmw_compat_shader_man_create(dev_priv);
+ if (IS_ERR(vmw_fp->shman))
+ goto out_no_shman;
+
file_priv->driver_priv = vmw_fp;
dev_priv->bdev.dev_mapping = dev->dev_mapping;
return 0;
+out_no_shman:
+ ttm_object_file_release(&vmw_fp->tfile);
out_no_tfile:
kfree(vmw_fp);
return ret;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 554e7fa33082..ecaa302a6154 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -75,10 +75,14 @@
#define VMW_RES_FENCE ttm_driver_type3
#define VMW_RES_SHADER ttm_driver_type4
+struct vmw_compat_shader_manager;
+
struct vmw_fpriv {
struct drm_master *locked_master;
struct ttm_object_file *tfile;
struct list_head fence_events;
+ bool gb_aware;
+ struct vmw_compat_shader_manager *shman;
};
struct vmw_dma_buffer {
@@ -272,6 +276,7 @@ struct vmw_ctx_bindinfo {
struct vmw_resource *ctx;
struct vmw_resource *res;
enum vmw_ctx_binding_type bt;
+ bool scrubbed;
union {
SVGA3dShaderType shader_type;
SVGA3dRenderTargetType rt_type;
@@ -318,7 +323,7 @@ struct vmw_sw_context{
struct drm_open_hash res_ht;
bool res_ht_initialized;
bool kernel; /**< is the called made from the kernel */
- struct ttm_object_file *tfile;
+ struct vmw_fpriv *fp;
struct list_head validate_nodes;
struct vmw_relocation relocs[VMWGFX_MAX_RELOCATIONS];
uint32_t cur_reloc;
@@ -336,6 +341,7 @@ struct vmw_sw_context{
bool needs_post_query_barrier;
struct vmw_resource *error_resource;
struct vmw_ctx_binding_state staged_bindings;
+ struct list_head staged_shaders;
};
struct vmw_legacy_display;
@@ -569,6 +575,8 @@ struct vmw_user_resource_conv;
extern void vmw_resource_unreference(struct vmw_resource **p_res);
extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res);
+extern struct vmw_resource *
+vmw_resource_reference_unless_doomed(struct vmw_resource *res);
extern int vmw_resource_validate(struct vmw_resource *res);
extern int vmw_resource_reserve(struct vmw_resource *res, bool no_backup);
extern bool vmw_resource_needs_backup(const struct vmw_resource *res);
@@ -957,6 +965,9 @@ extern void
vmw_context_binding_state_transfer(struct vmw_resource *res,
struct vmw_ctx_binding_state *cbs);
extern void vmw_context_binding_res_list_kill(struct list_head *head);
+extern void vmw_context_binding_res_list_scrub(struct list_head *head);
+extern int vmw_context_rebind_all(struct vmw_resource *ctx);
+extern struct list_head *vmw_context_binding_list(struct vmw_resource *ctx);
/*
* Surface management - vmwgfx_surface.c
@@ -991,6 +1002,28 @@ extern int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+extern int vmw_compat_shader_lookup(struct vmw_compat_shader_manager *man,
+ SVGA3dShaderType shader_type,
+ u32 *user_key);
+extern void vmw_compat_shaders_commit(struct vmw_compat_shader_manager *man,
+ struct list_head *list);
+extern void vmw_compat_shaders_revert(struct vmw_compat_shader_manager *man,
+ struct list_head *list);
+extern int vmw_compat_shader_remove(struct vmw_compat_shader_manager *man,
+ u32 user_key,
+ SVGA3dShaderType shader_type,
+ struct list_head *list);
+extern int vmw_compat_shader_add(struct vmw_compat_shader_manager *man,
+ u32 user_key, const void *bytecode,
+ SVGA3dShaderType shader_type,
+ size_t size,
+ struct ttm_object_file *tfile,
+ struct list_head *list);
+extern struct vmw_compat_shader_manager *
+vmw_compat_shader_man_create(struct vmw_private *dev_priv);
+extern void
+vmw_compat_shader_man_destroy(struct vmw_compat_shader_manager *man);
+
/**
* Inline helper functions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 7a5f1eb55c5a..269b85cc875a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -114,8 +114,10 @@ static void vmw_resource_list_unreserve(struct list_head *list,
* persistent context binding tracker.
*/
if (unlikely(val->staged_bindings)) {
- vmw_context_binding_state_transfer
- (val->res, val->staged_bindings);
+ if (!backoff) {
+ vmw_context_binding_state_transfer
+ (val->res, val->staged_bindings);
+ }
kfree(val->staged_bindings);
val->staged_bindings = NULL;
}
@@ -178,6 +180,44 @@ static int vmw_resource_val_add(struct vmw_sw_context *sw_context,
}
/**
+ * vmw_resource_context_res_add - Put resources previously bound to a context on
+ * the validation list
+ *
+ * @dev_priv: Pointer to a device private structure
+ * @sw_context: Pointer to a software context used for this command submission
+ * @ctx: Pointer to the context resource
+ *
+ * This function puts all resources that were previously bound to @ctx on
+ * the resource validation list. This is part of the context state reemission
+ */
+static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ struct vmw_resource *ctx)
+{
+ struct list_head *binding_list;
+ struct vmw_ctx_binding *entry;
+ int ret = 0;
+ struct vmw_resource *res;
+
+ mutex_lock(&dev_priv->binding_mutex);
+ binding_list = vmw_context_binding_list(ctx);
+
+ list_for_each_entry(entry, binding_list, ctx_list) {
+ res = vmw_resource_reference_unless_doomed(entry->bi.res);
+ if (unlikely(res == NULL))
+ continue;
+
+ ret = vmw_resource_val_add(sw_context, entry->bi.res, NULL);
+ vmw_resource_unreference(&res);
+ if (unlikely(ret != 0))
+ break;
+ }
+
+ mutex_unlock(&dev_priv->binding_mutex);
+ return ret;
+}
+
+/**
* vmw_resource_relocation_add - Add a relocation to the relocation list
*
* @list: Pointer to head of relocation list.
@@ -233,8 +273,12 @@ static void vmw_resource_relocations_apply(uint32_t *cb,
{
struct vmw_resource_relocation *rel;
- list_for_each_entry(rel, list, head)
- cb[rel->offset] = rel->res->id;
+ list_for_each_entry(rel, list, head) {
+ if (likely(rel->res != NULL))
+ cb[rel->offset] = rel->res->id;
+ else
+ cb[rel->offset] = SVGA_3D_CMD_NOP;
+ }
}
static int vmw_cmd_invalid(struct vmw_private *dev_priv,
@@ -379,22 +423,27 @@ static int vmw_resources_validate(struct vmw_sw_context *sw_context)
}
/**
- * vmw_cmd_res_check - Check that a resource is present and if so, put it
+ * vmw_cmd_compat_res_check - Check that a resource is present and if so, put it
* on the resource validate list unless it's already there.
*
* @dev_priv: Pointer to a device private structure.
* @sw_context: Pointer to the software context.
* @res_type: Resource type.
* @converter: User-space visisble type specific information.
- * @id: Pointer to the location in the command buffer currently being
+ * @id: user-space resource id handle.
+ * @id_loc: Pointer to the location in the command buffer currently being
* parsed from where the user-space resource id handle is located.
+ * @p_val: Pointer to pointer to resource validalidation node. Populated
+ * on exit.
*/
-static int vmw_cmd_res_check(struct vmw_private *dev_priv,
- struct vmw_sw_context *sw_context,
- enum vmw_res_type res_type,
- const struct vmw_user_resource_conv *converter,
- uint32_t *id,
- struct vmw_resource_val_node **p_val)
+static int
+vmw_cmd_compat_res_check(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ enum vmw_res_type res_type,
+ const struct vmw_user_resource_conv *converter,
+ uint32_t id,
+ uint32_t *id_loc,
+ struct vmw_resource_val_node **p_val)
{
struct vmw_res_cache_entry *rcache =
&sw_context->res_cache[res_type];
@@ -402,7 +451,7 @@ static int vmw_cmd_res_check(struct vmw_private *dev_priv,
struct vmw_resource_val_node *node;
int ret;
- if (*id == SVGA3D_INVALID_ID) {
+ if (id == SVGA3D_INVALID_ID) {
if (p_val)
*p_val = NULL;
if (res_type == vmw_res_context) {
@@ -417,7 +466,7 @@ static int vmw_cmd_res_check(struct vmw_private *dev_priv,
* resource
*/
- if (likely(rcache->valid && *id == rcache->handle)) {
+ if (likely(rcache->valid && id == rcache->handle)) {
const struct vmw_resource *res = rcache->res;
rcache->node->first_usage = false;
@@ -426,28 +475,28 @@ static int vmw_cmd_res_check(struct vmw_private *dev_priv,
return vmw_resource_relocation_add
(&sw_context->res_relocations, res,
- id - sw_context->buf_start);
+ id_loc - sw_context->buf_start);
}
ret = vmw_user_resource_lookup_handle(dev_priv,
- sw_context->tfile,
- *id,
+ sw_context->fp->tfile,
+ id,
converter,
&res);
if (unlikely(ret != 0)) {
DRM_ERROR("Could not find or use resource 0x%08x.\n",
- (unsigned) *id);
+ (unsigned) id);
dump_stack();
return ret;
}
rcache->valid = true;
rcache->res = res;
- rcache->handle = *id;
+ rcache->handle = id;
ret = vmw_resource_relocation_add(&sw_context->res_relocations,
res,
- id - sw_context->buf_start);
+ id_loc - sw_context->buf_start);
if (unlikely(ret != 0))
goto out_no_reloc;
@@ -459,7 +508,11 @@ static int vmw_cmd_res_check(struct vmw_private *dev_priv,
if (p_val)
*p_val = node;
- if (node->first_usage && res_type == vmw_res_context) {
+ if (dev_priv->has_mob && node->first_usage &&
+ res_type == vmw_res_context) {
+ ret = vmw_resource_context_res_add(dev_priv, sw_context, res);
+ if (unlikely(ret != 0))
+ goto out_no_reloc;
node->staged_bindings =
kzalloc(sizeof(*node->staged_bindings), GFP_KERNEL);
if (node->staged_bindings == NULL) {
@@ -481,6 +534,59 @@ out_no_reloc:
}
/**
+ * vmw_cmd_res_check - Check that a resource is present and if so, put it
+ * on the resource validate list unless it's already there.
+ *
+ * @dev_priv: Pointer to a device private structure.
+ * @sw_context: Pointer to the software context.
+ * @res_type: Resource type.
+ * @converter: User-space visisble type specific information.
+ * @id_loc: Pointer to the location in the command buffer currently being
+ * parsed from where the user-space resource id handle is located.
+ * @p_val: Pointer to pointer to resource validalidation node. Populated
+ * on exit.
+ */
+static int
+vmw_cmd_res_check(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ enum vmw_res_type res_type,
+ const struct vmw_user_resource_conv *converter,
+ uint32_t *id_loc,
+ struct vmw_resource_val_node **p_val)
+{
+ return vmw_cmd_compat_res_check(dev_priv, sw_context, res_type,
+ converter, *id_loc, id_loc, p_val);
+}
+
+/**
+ * vmw_rebind_contexts - Rebind all resources previously bound to
+ * referenced contexts.
+ *
+ * @sw_context: Pointer to the software context.
+ *
+ * Rebind context binding points that have been scrubbed because of eviction.
+ */
+static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
+{
+ struct vmw_resource_val_node *val;
+ int ret;
+
+ list_for_each_entry(val, &sw_context->resource_list, head) {
+ if (likely(!val->staged_bindings))
+ continue;
+
+ ret = vmw_context_rebind_all(val->res);
+ if (unlikely(ret != 0)) {
+ if (ret != -ERESTARTSYS)
+ DRM_ERROR("Failed to rebind context.\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/**
* vmw_cmd_cid_check - Check a command header for valid context information.
*
* @dev_priv: Pointer to a device private structure.
@@ -767,7 +873,7 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
struct vmw_relocation *reloc;
int ret;
- ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo);
+ ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo);
if (unlikely(ret != 0)) {
DRM_ERROR("Could not find or use MOB buffer.\n");
return -EINVAL;
@@ -828,7 +934,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
struct vmw_relocation *reloc;
int ret;
- ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo);
+ ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo);
if (unlikely(ret != 0)) {
DRM_ERROR("Could not find or use GMR region.\n");
return -EINVAL;
@@ -1127,7 +1233,8 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
- vmw_kms_cursor_snoop(srf, sw_context->tfile, &vmw_bo->base, header);
+ vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->base,
+ header);
out_no_surface:
vmw_dmabuf_unreference(&vmw_bo);
@@ -1478,6 +1585,98 @@ static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv,
&cmd->body.sid, NULL);
}
+
+/**
+ * vmw_cmd_shader_define - Validate an SVGA_3D_CMD_SHADER_DEFINE
+ * command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_shader_define_cmd {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDefineShader body;
+ } *cmd;
+ int ret;
+ size_t size;
+
+ cmd = container_of(header, struct vmw_shader_define_cmd,
+ header);
+
+ ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
+ user_context_converter, &cmd->body.cid,
+ NULL);
+ if (unlikely(ret != 0))
+ return ret;
+
+ if (unlikely(!dev_priv->has_mob))
+ return 0;
+
+ size = cmd->header.size - sizeof(cmd->body);
+ ret = vmw_compat_shader_add(sw_context->fp->shman,
+ cmd->body.shid, cmd + 1,
+ cmd->body.type, size,
+ sw_context->fp->tfile,
+ &sw_context->staged_shaders);
+ if (unlikely(ret != 0))
+ return ret;
+
+ return vmw_resource_relocation_add(&sw_context->res_relocations,
+ NULL, &cmd->header.id -
+ sw_context->buf_start);
+
+ return 0;
+}
+
+/**
+ * vmw_cmd_shader_destroy - Validate an SVGA_3D_CMD_SHADER_DESTROY
+ * command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_shader_destroy_cmd {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDestroyShader body;
+ } *cmd;
+ int ret;
+
+ cmd = container_of(header, struct vmw_shader_destroy_cmd,
+ header);
+
+ ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
+ user_context_converter, &cmd->body.cid,
+ NULL);
+ if (unlikely(ret != 0))
+ return ret;
+
+ if (unlikely(!dev_priv->has_mob))
+ return 0;
+
+ ret = vmw_compat_shader_remove(sw_context->fp->shman,
+ cmd->body.shid,
+ cmd->body.type,
+ &sw_context->staged_shaders);
+ if (unlikely(ret != 0))
+ return ret;
+
+ return vmw_resource_relocation_add(&sw_context->res_relocations,
+ NULL, &cmd->header.id -
+ sw_context->buf_start);
+
+ return 0;
+}
+
/**
* vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER
* command
@@ -1509,10 +1708,18 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
if (dev_priv->has_mob) {
struct vmw_ctx_bindinfo bi;
struct vmw_resource_val_node *res_node;
-
- ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_shader,
- user_shader_converter,
- &cmd->body.shid, &res_node);
+ u32 shid = cmd->body.shid;
+
+ if (shid != SVGA3D_INVALID_ID)
+ (void) vmw_compat_shader_lookup(sw_context->fp->shman,
+ cmd->body.type,
+ &shid);
+
+ ret = vmw_cmd_compat_res_check(dev_priv, sw_context,
+ vmw_res_shader,
+ user_shader_converter,
+ shid,
+ &cmd->body.shid, &res_node);
if (unlikely(ret != 0))
return ret;
@@ -1527,6 +1734,39 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
}
/**
+ * vmw_cmd_set_shader_const - Validate an SVGA_3D_CMD_SET_SHADER_CONST
+ * command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_set_shader_const(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_set_shader_const_cmd {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdSetShaderConst body;
+ } *cmd;
+ int ret;
+
+ cmd = container_of(header, struct vmw_set_shader_const_cmd,
+ header);
+
+ ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
+ user_context_converter, &cmd->body.cid,
+ NULL);
+ if (unlikely(ret != 0))
+ return ret;
+
+ if (dev_priv->has_mob)
+ header->id = SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE;
+
+ return 0;
+}
+
+/**
* vmw_cmd_bind_gb_shader - Validate an SVGA_3D_CMD_BIND_GB_SHADER
* command
*
@@ -1634,14 +1874,14 @@ static const struct vmw_cmd_entry const vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
true, false, false),
VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check,
false, false, false),
- VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_cid_check,
- true, true, false),
- VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_cid_check,
- true, true, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_shader_define,
+ true, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_shader_destroy,
+ true, false, false),
VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader,
true, false, false),
- VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_cid_check,
- true, true, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_set_shader_const,
+ true, false, false),
VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw,
true, false, false),
VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check,
@@ -2171,7 +2411,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
} else
sw_context->kernel = true;
- sw_context->tfile = vmw_fpriv(file_priv)->tfile;
+ sw_context->fp = vmw_fpriv(file_priv);
sw_context->cur_reloc = 0;
sw_context->cur_val_buf = 0;
sw_context->fence_flags = 0;
@@ -2188,16 +2428,17 @@ int vmw_execbuf_process(struct drm_file *file_priv,
goto out_unlock;
sw_context->res_ht_initialized = true;
}
+ INIT_LIST_HEAD(&sw_context->staged_shaders);
INIT_LIST_HEAD(&resource_list);
ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
command_size);
if (unlikely(ret != 0))
- goto out_err;
+ goto out_err_nores;
ret = vmw_resources_reserve(sw_context);
if (unlikely(ret != 0))
- goto out_err;
+ goto out_err_nores;
ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes);
if (unlikely(ret != 0))
@@ -2225,6 +2466,12 @@ int vmw_execbuf_process(struct drm_file *file_priv,
goto out_err;
}
+ if (dev_priv->has_mob) {
+ ret = vmw_rebind_contexts(sw_context);
+ if (unlikely(ret != 0))
+ goto out_err;
+ }
+
cmd = vmw_fifo_reserve(dev_priv, command_size);
if (unlikely(cmd == NULL)) {
DRM_ERROR("Failed reserving fifo space for commands.\n");
@@ -2276,6 +2523,8 @@ int vmw_execbuf_process(struct drm_file *file_priv,
}
list_splice_init(&sw_context->resource_list, &resource_list);
+ vmw_compat_shaders_commit(sw_context->fp->shman,
+ &sw_context->staged_shaders);
mutex_unlock(&dev_priv->cmdbuf_mutex);
/*
@@ -2289,10 +2538,11 @@ int vmw_execbuf_process(struct drm_file *file_priv,
out_unlock_binding:
mutex_unlock(&dev_priv->binding_mutex);
out_err:
- vmw_resource_relocations_free(&sw_context->res_relocations);
- vmw_free_relocations(sw_context);
ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes);
+out_err_nores:
vmw_resource_list_unreserve(&sw_context->resource_list, true);
+ vmw_resource_relocations_free(&sw_context->res_relocations);
+ vmw_free_relocations(sw_context);
vmw_clear_validations(sw_context);
if (unlikely(dev_priv->pinned_bo != NULL &&
!dev_priv->query_cid_valid))
@@ -2301,6 +2551,8 @@ out_unlock:
list_splice_init(&sw_context->resource_list, &resource_list);
error_resource = sw_context->error_resource;
sw_context->error_resource = NULL;
+ vmw_compat_shaders_revert(sw_context->fp->shman,
+ &sw_context->staged_shaders);
mutex_unlock(&dev_priv->cmdbuf_mutex);
/*
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index 116c49736763..f9881f9e62bd 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -29,12 +29,18 @@
#include <drm/vmwgfx_drm.h>
#include "vmwgfx_kms.h"
+struct svga_3d_compat_cap {
+ SVGA3dCapsRecordHeader header;
+ SVGA3dCapPair pairs[SVGA3D_DEVCAP_MAX];
+};
+
int vmw_getparam_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct vmw_private *dev_priv = vmw_priv(dev);
struct drm_vmw_getparam_arg *param =
(struct drm_vmw_getparam_arg *)data;
+ struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
switch (param->param) {
case DRM_VMW_PARAM_NUM_STREAMS:
@@ -60,6 +66,11 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
const struct vmw_fifo_state *fifo = &dev_priv->fifo;
+ if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS)) {
+ param->value = SVGA3D_HWVERSION_WS8_B1;
+ break;
+ }
+
param->value =
ioread32(fifo_mem +
((fifo->capabilities &
@@ -69,17 +80,26 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
break;
}
case DRM_VMW_PARAM_MAX_SURF_MEMORY:
- param->value = dev_priv->memory_size;
+ if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS) &&
+ !vmw_fp->gb_aware)
+ param->value = dev_priv->max_mob_pages * PAGE_SIZE / 2;
+ else
+ param->value = dev_priv->memory_size;
break;
case DRM_VMW_PARAM_3D_CAPS_SIZE:
- if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS)
- param->value = SVGA3D_DEVCAP_MAX;
+ if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS) &&
+ vmw_fp->gb_aware)
+ param->value = SVGA3D_DEVCAP_MAX * sizeof(uint32_t);
+ else if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS)
+ param->value = sizeof(struct svga_3d_compat_cap) +
+ sizeof(uint32_t);
else
param->value = (SVGA_FIFO_3D_CAPS_LAST -
- SVGA_FIFO_3D_CAPS + 1);
- param->value *= sizeof(uint32_t);
+ SVGA_FIFO_3D_CAPS + 1) *
+ sizeof(uint32_t);
break;
case DRM_VMW_PARAM_MAX_MOB_MEMORY:
+ vmw_fp->gb_aware = true;
param->value = dev_priv->max_mob_pages * PAGE_SIZE;
break;
default:
@@ -91,6 +111,38 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
return 0;
}
+static int vmw_fill_compat_cap(struct vmw_private *dev_priv, void *bounce,
+ size_t size)
+{
+ struct svga_3d_compat_cap *compat_cap =
+ (struct svga_3d_compat_cap *) bounce;
+ unsigned int i;
+ size_t pair_offset = offsetof(struct svga_3d_compat_cap, pairs);
+ unsigned int max_size;
+
+ if (size < pair_offset)
+ return -EINVAL;
+
+ max_size = (size - pair_offset) / sizeof(SVGA3dCapPair);
+
+ if (max_size > SVGA3D_DEVCAP_MAX)
+ max_size = SVGA3D_DEVCAP_MAX;
+
+ compat_cap->header.length =
+ (pair_offset + max_size * sizeof(SVGA3dCapPair)) / sizeof(u32);
+ compat_cap->header.type = SVGA3DCAPS_RECORD_DEVCAPS;
+
+ mutex_lock(&dev_priv->hw_mutex);
+ for (i = 0; i < max_size; ++i) {
+ vmw_write(dev_priv, SVGA_REG_DEV_CAP, i);
+ compat_cap->pairs[i][0] = i;
+ compat_cap->pairs[i][1] = vmw_read(dev_priv, SVGA_REG_DEV_CAP);
+ }
+ mutex_unlock(&dev_priv->hw_mutex);
+
+ return 0;
+}
+
int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
@@ -104,41 +156,49 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
void *bounce;
int ret;
bool gb_objects = !!(dev_priv->capabilities & SVGA_CAP_GBOBJECTS);
+ struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
if (unlikely(arg->pad64 != 0)) {
DRM_ERROR("Illegal GET_3D_CAP argument.\n");
return -EINVAL;
}
- if (gb_objects)
- size = SVGA3D_DEVCAP_MAX;
+ if (gb_objects && vmw_fp->gb_aware)
+ size = SVGA3D_DEVCAP_MAX * sizeof(uint32_t);
+ else if (gb_objects)
+ size = sizeof(struct svga_3d_compat_cap) + sizeof(uint32_t);
else
- size = (SVGA_FIFO_3D_CAPS_LAST - SVGA_FIFO_3D_CAPS + 1);
-
- size *= sizeof(uint32_t);
+ size = (SVGA_FIFO_3D_CAPS_LAST - SVGA_FIFO_3D_CAPS + 1) *
+ sizeof(uint32_t);
if (arg->max_size < size)
size = arg->max_size;
- bounce = vmalloc(size);
+ bounce = vzalloc(size);
if (unlikely(bounce == NULL)) {
DRM_ERROR("Failed to allocate bounce buffer for 3D caps.\n");
return -ENOMEM;
}
- if (gb_objects) {
- int i;
+ if (gb_objects && vmw_fp->gb_aware) {
+ int i, num;
uint32_t *bounce32 = (uint32_t *) bounce;
+ num = size / sizeof(uint32_t);
+ if (num > SVGA3D_DEVCAP_MAX)
+ num = SVGA3D_DEVCAP_MAX;
+
mutex_lock(&dev_priv->hw_mutex);
- for (i = 0; i < SVGA3D_DEVCAP_MAX; ++i) {
+ for (i = 0; i < num; ++i) {
vmw_write(dev_priv, SVGA_REG_DEV_CAP, i);
*bounce32++ = vmw_read(dev_priv, SVGA_REG_DEV_CAP);
}
mutex_unlock(&dev_priv->hw_mutex);
-
+ } else if (gb_objects) {
+ ret = vmw_fill_compat_cap(dev_priv, bounce, size);
+ if (unlikely(ret != 0))
+ goto out_err;
} else {
-
fifo_mem = dev_priv->mmio_virt;
memcpy_fromio(bounce, &fifo_mem[SVGA_FIFO_3D_CAPS], size);
}
@@ -146,6 +206,7 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
ret = copy_to_user(buffer, bounce, size);
if (ret)
ret = -EFAULT;
+out_err:
vfree(bounce);
if (unlikely(ret != 0))
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
index 4910e7b81811..d4a5a19cb8c3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
@@ -134,6 +134,7 @@ static int vmw_setup_otable_base(struct vmw_private *dev_priv,
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL)) {
DRM_ERROR("Failed reserving FIFO space for OTable setup.\n");
+ ret = -ENOMEM;
goto out_no_fifo;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 6fdd82d42f65..2aa4bc6a4d60 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -88,6 +88,11 @@ struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
return res;
}
+struct vmw_resource *
+vmw_resource_reference_unless_doomed(struct vmw_resource *res)
+{
+ return kref_get_unless_zero(&res->kref) ? res : NULL;
+}
/**
* vmw_resource_release_id - release a resource id to the id manager.
@@ -136,8 +141,12 @@ static void vmw_resource_release(struct kref *kref)
vmw_dmabuf_unreference(&res->backup);
}
- if (likely(res->hw_destroy != NULL))
+ if (likely(res->hw_destroy != NULL)) {
res->hw_destroy(res);
+ mutex_lock(&dev_priv->binding_mutex);
+ vmw_context_binding_res_list_kill(&res->binding_head);
+ mutex_unlock(&dev_priv->binding_mutex);
+ }
id = res->id;
if (res->res_free != NULL)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
index 1457ec4b7125..217d941b8176 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
@@ -29,6 +29,8 @@
#include "vmwgfx_resource_priv.h"
#include "ttm/ttm_placement.h"
+#define VMW_COMPAT_SHADER_HT_ORDER 12
+
struct vmw_shader {
struct vmw_resource res;
SVGA3dShaderType type;
@@ -40,6 +42,50 @@ struct vmw_user_shader {
struct vmw_shader shader;
};
+/**
+ * enum vmw_compat_shader_state - Staging state for compat shaders
+ */
+enum vmw_compat_shader_state {
+ VMW_COMPAT_COMMITED,
+ VMW_COMPAT_ADD,
+ VMW_COMPAT_DEL
+};
+
+/**
+ * struct vmw_compat_shader - Metadata for compat shaders.
+ *
+ * @handle: The TTM handle of the guest backed shader.
+ * @tfile: The struct ttm_object_file the guest backed shader is registered
+ * with.
+ * @hash: Hash item for lookup.
+ * @head: List head for staging lists or the compat shader manager list.
+ * @state: Staging state.
+ *
+ * The structure is protected by the cmdbuf lock.
+ */
+struct vmw_compat_shader {
+ u32 handle;
+ struct ttm_object_file *tfile;
+ struct drm_hash_item hash;
+ struct list_head head;
+ enum vmw_compat_shader_state state;
+};
+
+/**
+ * struct vmw_compat_shader_manager - Compat shader manager.
+ *
+ * @shaders: Hash table containing staged and commited compat shaders
+ * @list: List of commited shaders.
+ * @dev_priv: Pointer to a device private structure.
+ *
+ * @shaders and @list are protected by the cmdbuf mutex for now.
+ */
+struct vmw_compat_shader_manager {
+ struct drm_open_hash shaders;
+ struct list_head list;
+ struct vmw_private *dev_priv;
+};
+
static void vmw_user_shader_free(struct vmw_resource *res);
static struct vmw_resource *
vmw_user_shader_base_to_res(struct ttm_base_object *base);
@@ -258,7 +304,7 @@ static int vmw_gb_shader_destroy(struct vmw_resource *res)
return 0;
mutex_lock(&dev_priv->binding_mutex);
- vmw_context_binding_res_list_kill(&res->binding_head);
+ vmw_context_binding_res_list_scrub(&res->binding_head);
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL)) {
@@ -325,13 +371,81 @@ int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data,
TTM_REF_USAGE);
}
+int vmw_shader_alloc(struct vmw_private *dev_priv,
+ struct vmw_dma_buffer *buffer,
+ size_t shader_size,
+ size_t offset,
+ SVGA3dShaderType shader_type,
+ struct ttm_object_file *tfile,
+ u32 *handle)
+{
+ struct vmw_user_shader *ushader;
+ struct vmw_resource *res, *tmp;
+ int ret;
+
+ /*
+ * Approximate idr memory usage with 128 bytes. It will be limited
+ * by maximum number_of shaders anyway.
+ */
+ if (unlikely(vmw_user_shader_size == 0))
+ vmw_user_shader_size =
+ ttm_round_pot(sizeof(struct vmw_user_shader)) + 128;
+
+ ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
+ vmw_user_shader_size,
+ false, true);
+ if (unlikely(ret != 0)) {
+ if (ret != -ERESTARTSYS)
+ DRM_ERROR("Out of graphics memory for shader "
+ "creation.\n");
+ goto out;
+ }
+
+ ushader = kzalloc(sizeof(*ushader), GFP_KERNEL);
+ if (unlikely(ushader == NULL)) {
+ ttm_mem_global_free(vmw_mem_glob(dev_priv),
+ vmw_user_shader_size);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ res = &ushader->shader.res;
+ ushader->base.shareable = false;
+ ushader->base.tfile = NULL;
+
+ /*
+ * From here on, the destructor takes over resource freeing.
+ */
+
+ ret = vmw_gb_shader_init(dev_priv, res, shader_size,
+ offset, shader_type, buffer,
+ vmw_user_shader_free);
+ if (unlikely(ret != 0))
+ goto out;
+
+ tmp = vmw_resource_reference(res);
+ ret = ttm_base_object_init(tfile, &ushader->base, false,
+ VMW_RES_SHADER,
+ &vmw_user_shader_base_release, NULL);
+
+ if (unlikely(ret != 0)) {
+ vmw_resource_unreference(&tmp);
+ goto out_err;
+ }
+
+ if (handle)
+ *handle = ushader->base.hash.key;
+out_err:
+ vmw_resource_unreference(&res);
+out:
+ return ret;
+}
+
+
int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct vmw_private *dev_priv = vmw_priv(dev);
- struct vmw_user_shader *ushader;
- struct vmw_resource *res;
- struct vmw_resource *tmp;
struct drm_vmw_shader_create_arg *arg =
(struct drm_vmw_shader_create_arg *)data;
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
@@ -373,69 +487,324 @@ int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
goto out_bad_arg;
}
- /*
- * Approximate idr memory usage with 128 bytes. It will be limited
- * by maximum number_of shaders anyway.
- */
+ ret = ttm_read_lock(&vmaster->lock, true);
+ if (unlikely(ret != 0))
+ goto out_bad_arg;
- if (unlikely(vmw_user_shader_size == 0))
- vmw_user_shader_size = ttm_round_pot(sizeof(*ushader))
- + 128;
+ ret = vmw_shader_alloc(dev_priv, buffer, arg->size, arg->offset,
+ shader_type, tfile, &arg->shader_handle);
- ret = ttm_read_lock(&vmaster->lock, true);
+ ttm_read_unlock(&vmaster->lock);
+out_bad_arg:
+ vmw_dmabuf_unreference(&buffer);
+ return ret;
+}
+
+/**
+ * vmw_compat_shader_lookup - Look up a compat shader
+ *
+ * @man: Pointer to the compat shader manager.
+ * @shader_type: The shader type, that combined with the user_key identifies
+ * the shader.
+ * @user_key: On entry, this should be a pointer to the user_key.
+ * On successful exit, it will contain the guest-backed shader's TTM handle.
+ *
+ * Returns 0 on success. Non-zero on failure, in which case the value pointed
+ * to by @user_key is unmodified.
+ */
+int vmw_compat_shader_lookup(struct vmw_compat_shader_manager *man,
+ SVGA3dShaderType shader_type,
+ u32 *user_key)
+{
+ struct drm_hash_item *hash;
+ int ret;
+ unsigned long key = *user_key | (shader_type << 24);
+
+ ret = drm_ht_find_item(&man->shaders, key, &hash);
if (unlikely(ret != 0))
return ret;
- ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
- vmw_user_shader_size,
- false, true);
- if (unlikely(ret != 0)) {
- if (ret != -ERESTARTSYS)
- DRM_ERROR("Out of graphics memory for shader"
- " creation.\n");
- goto out_unlock;
+ *user_key = drm_hash_entry(hash, struct vmw_compat_shader,
+ hash)->handle;
+
+ return 0;
+}
+
+/**
+ * vmw_compat_shader_free - Free a compat shader.
+ *
+ * @man: Pointer to the compat shader manager.
+ * @entry: Pointer to a struct vmw_compat_shader.
+ *
+ * Frees a struct vmw_compat_shder entry and drops its reference to the
+ * guest backed shader.
+ */
+static void vmw_compat_shader_free(struct vmw_compat_shader_manager *man,
+ struct vmw_compat_shader *entry)
+{
+ list_del(&entry->head);
+ WARN_ON(drm_ht_remove_item(&man->shaders, &entry->hash));
+ WARN_ON(ttm_ref_object_base_unref(entry->tfile, entry->handle,
+ TTM_REF_USAGE));
+ kfree(entry);
+}
+
+/**
+ * vmw_compat_shaders_commit - Commit a list of compat shader actions.
+ *
+ * @man: Pointer to the compat shader manager.
+ * @list: Caller's list of compat shader actions.
+ *
+ * This function commits a list of compat shader additions or removals.
+ * It is typically called when the execbuf ioctl call triggering these
+ * actions has commited the fifo contents to the device.
+ */
+void vmw_compat_shaders_commit(struct vmw_compat_shader_manager *man,
+ struct list_head *list)
+{
+ struct vmw_compat_shader *entry, *next;
+
+ list_for_each_entry_safe(entry, next, list, head) {
+ list_del(&entry->head);
+ switch (entry->state) {
+ case VMW_COMPAT_ADD:
+ entry->state = VMW_COMPAT_COMMITED;
+ list_add_tail(&entry->head, &man->list);
+ break;
+ case VMW_COMPAT_DEL:
+ ttm_ref_object_base_unref(entry->tfile, entry->handle,
+ TTM_REF_USAGE);
+ kfree(entry);
+ break;
+ default:
+ BUG();
+ break;
+ }
}
+}
- ushader = kzalloc(sizeof(*ushader), GFP_KERNEL);
- if (unlikely(ushader == NULL)) {
- ttm_mem_global_free(vmw_mem_glob(dev_priv),
- vmw_user_shader_size);
- ret = -ENOMEM;
- goto out_unlock;
+/**
+ * vmw_compat_shaders_revert - Revert a list of compat shader actions
+ *
+ * @man: Pointer to the compat shader manager.
+ * @list: Caller's list of compat shader actions.
+ *
+ * This function reverts a list of compat shader additions or removals.
+ * It is typically called when the execbuf ioctl call triggering these
+ * actions failed for some reason, and the command stream was never
+ * submitted.
+ */
+void vmw_compat_shaders_revert(struct vmw_compat_shader_manager *man,
+ struct list_head *list)
+{
+ struct vmw_compat_shader *entry, *next;
+ int ret;
+
+ list_for_each_entry_safe(entry, next, list, head) {
+ switch (entry->state) {
+ case VMW_COMPAT_ADD:
+ vmw_compat_shader_free(man, entry);
+ break;
+ case VMW_COMPAT_DEL:
+ ret = drm_ht_insert_item(&man->shaders, &entry->hash);
+ list_del(&entry->head);
+ list_add_tail(&entry->head, &man->list);
+ entry->state = VMW_COMPAT_COMMITED;
+ break;
+ default:
+ BUG();
+ break;
+ }
}
+}
- res = &ushader->shader.res;
- ushader->base.shareable = false;
- ushader->base.tfile = NULL;
+/**
+ * vmw_compat_shader_remove - Stage a compat shader for removal.
+ *
+ * @man: Pointer to the compat shader manager
+ * @user_key: The key that is used to identify the shader. The key is
+ * unique to the shader type.
+ * @shader_type: Shader type.
+ * @list: Caller's list of staged shader actions.
+ *
+ * This function stages a compat shader for removal and removes the key from
+ * the shader manager's hash table. If the shader was previously only staged
+ * for addition it is completely removed (But the execbuf code may keep a
+ * reference if it was bound to a context between addition and removal). If
+ * it was previously commited to the manager, it is staged for removal.
+ */
+int vmw_compat_shader_remove(struct vmw_compat_shader_manager *man,
+ u32 user_key, SVGA3dShaderType shader_type,
+ struct list_head *list)
+{
+ struct vmw_compat_shader *entry;
+ struct drm_hash_item *hash;
+ int ret;
- /*
- * From here on, the destructor takes over resource freeing.
- */
+ ret = drm_ht_find_item(&man->shaders, user_key | (shader_type << 24),
+ &hash);
+ if (likely(ret != 0))
+ return -EINVAL;
- ret = vmw_gb_shader_init(dev_priv, res, arg->size,
- arg->offset, shader_type, buffer,
- vmw_user_shader_free);
+ entry = drm_hash_entry(hash, struct vmw_compat_shader, hash);
+
+ switch (entry->state) {
+ case VMW_COMPAT_ADD:
+ vmw_compat_shader_free(man, entry);
+ break;
+ case VMW_COMPAT_COMMITED:
+ (void) drm_ht_remove_item(&man->shaders, &entry->hash);
+ list_del(&entry->head);
+ entry->state = VMW_COMPAT_DEL;
+ list_add_tail(&entry->head, list);
+ break;
+ default:
+ BUG();
+ break;
+ }
+
+ return 0;
+}
+
+/**
+ * vmw_compat_shader_add - Create a compat shader and add the
+ * key to the manager
+ *
+ * @man: Pointer to the compat shader manager
+ * @user_key: The key that is used to identify the shader. The key is
+ * unique to the shader type.
+ * @bytecode: Pointer to the bytecode of the shader.
+ * @shader_type: Shader type.
+ * @tfile: Pointer to a struct ttm_object_file that the guest-backed shader is
+ * to be created with.
+ * @list: Caller's list of staged shader actions.
+ *
+ * Note that only the key is added to the shader manager's hash table.
+ * The shader is not yet added to the shader manager's list of shaders.
+ */
+int vmw_compat_shader_add(struct vmw_compat_shader_manager *man,
+ u32 user_key, const void *bytecode,
+ SVGA3dShaderType shader_type,
+ size_t size,
+ struct ttm_object_file *tfile,
+ struct list_head *list)
+{
+ struct vmw_dma_buffer *buf;
+ struct ttm_bo_kmap_obj map;
+ bool is_iomem;
+ struct vmw_compat_shader *compat;
+ u32 handle;
+ int ret;
+
+ if (user_key > ((1 << 24) - 1) || (unsigned) shader_type > 16)
+ return -EINVAL;
+
+ /* Allocate and pin a DMA buffer */
+ buf = kzalloc(sizeof(*buf), GFP_KERNEL);
+ if (unlikely(buf == NULL))
+ return -ENOMEM;
+
+ ret = vmw_dmabuf_init(man->dev_priv, buf, size, &vmw_sys_ne_placement,
+ true, vmw_dmabuf_bo_free);
if (unlikely(ret != 0))
- goto out_unlock;
+ goto out;
- tmp = vmw_resource_reference(res);
- ret = ttm_base_object_init(tfile, &ushader->base, false,
- VMW_RES_SHADER,
- &vmw_user_shader_base_release, NULL);
+ ret = ttm_bo_reserve(&buf->base, false, true, false, NULL);
+ if (unlikely(ret != 0))
+ goto no_reserve;
+ /* Map and copy shader bytecode. */
+ ret = ttm_bo_kmap(&buf->base, 0, PAGE_ALIGN(size) >> PAGE_SHIFT,
+ &map);
if (unlikely(ret != 0)) {
- vmw_resource_unreference(&tmp);
- goto out_err;
+ ttm_bo_unreserve(&buf->base);
+ goto no_reserve;
}
- arg->shader_handle = ushader->base.hash.key;
-out_err:
- vmw_resource_unreference(&res);
-out_unlock:
- ttm_read_unlock(&vmaster->lock);
-out_bad_arg:
- vmw_dmabuf_unreference(&buffer);
+ memcpy(ttm_kmap_obj_virtual(&map, &is_iomem), bytecode, size);
+ WARN_ON(is_iomem);
+
+ ttm_bo_kunmap(&map);
+ ret = ttm_bo_validate(&buf->base, &vmw_sys_placement, false, true);
+ WARN_ON(ret != 0);
+ ttm_bo_unreserve(&buf->base);
+
+ /* Create a guest-backed shader container backed by the dma buffer */
+ ret = vmw_shader_alloc(man->dev_priv, buf, size, 0, shader_type,
+ tfile, &handle);
+ vmw_dmabuf_unreference(&buf);
+ if (unlikely(ret != 0))
+ goto no_reserve;
+ /*
+ * Create a compat shader structure and stage it for insertion
+ * in the manager
+ */
+ compat = kzalloc(sizeof(*compat), GFP_KERNEL);
+ if (compat == NULL)
+ goto no_compat;
+
+ compat->hash.key = user_key | (shader_type << 24);
+ ret = drm_ht_insert_item(&man->shaders, &compat->hash);
+ if (unlikely(ret != 0))
+ goto out_invalid_key;
+
+ compat->state = VMW_COMPAT_ADD;
+ compat->handle = handle;
+ compat->tfile = tfile;
+ list_add_tail(&compat->head, list);
+ return 0;
+
+out_invalid_key:
+ kfree(compat);
+no_compat:
+ ttm_ref_object_base_unref(tfile, handle, TTM_REF_USAGE);
+no_reserve:
+out:
return ret;
+}
+
+/**
+ * vmw_compat_shader_man_create - Create a compat shader manager
+ *
+ * @dev_priv: Pointer to a device private structure.
+ *
+ * Typically done at file open time. If successful returns a pointer to a
+ * compat shader manager. Otherwise returns an error pointer.
+ */
+struct vmw_compat_shader_manager *
+vmw_compat_shader_man_create(struct vmw_private *dev_priv)
+{
+ struct vmw_compat_shader_manager *man;
+ int ret;
+
+ man = kzalloc(sizeof(*man), GFP_KERNEL);
+
+ man->dev_priv = dev_priv;
+ INIT_LIST_HEAD(&man->list);
+ ret = drm_ht_create(&man->shaders, VMW_COMPAT_SHADER_HT_ORDER);
+ if (ret == 0)
+ return man;
+
+ kfree(man);
+ return ERR_PTR(ret);
+}
+
+/**
+ * vmw_compat_shader_man_destroy - Destroy a compat shader manager
+ *
+ * @man: Pointer to the shader manager to destroy.
+ *
+ * Typically done at file close time.
+ */
+void vmw_compat_shader_man_destroy(struct vmw_compat_shader_manager *man)
+{
+ struct vmw_compat_shader *entry, *next;
+
+ mutex_lock(&man->dev_priv->cmdbuf_mutex);
+ list_for_each_entry_safe(entry, next, &man->list, head)
+ vmw_compat_shader_free(man, entry);
+ mutex_unlock(&man->dev_priv->cmdbuf_mutex);
+ kfree(man);
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index 979da1c246a5..82468d902915 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -908,8 +908,8 @@ int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
rep->size_addr;
if (user_sizes)
- ret = copy_to_user(user_sizes, srf->sizes,
- srf->num_sizes * sizeof(*srf->sizes));
+ ret = copy_to_user(user_sizes, &srf->base_size,
+ sizeof(srf->base_size));
if (unlikely(ret != 0)) {
DRM_ERROR("copy_to_user failed %p %u\n",
user_sizes, srf->num_sizes);
@@ -1111,7 +1111,7 @@ static int vmw_gb_surface_destroy(struct vmw_resource *res)
return 0;
mutex_lock(&dev_priv->binding_mutex);
- vmw_context_binding_res_list_kill(&res->binding_head);
+ vmw_context_binding_res_list_scrub(&res->binding_head);
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL)) {
diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
index af6edf9b1936..f2d7bf90c9fe 100644
--- a/drivers/hv/connection.c
+++ b/drivers/hv/connection.c
@@ -67,7 +67,6 @@ static int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo,
int ret = 0;
struct vmbus_channel_initiate_contact *msg;
unsigned long flags;
- int t;
init_completion(&msginfo->waitevent);
@@ -78,6 +77,8 @@ static int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo,
msg->interrupt_page = virt_to_phys(vmbus_connection.int_page);
msg->monitor_page1 = virt_to_phys(vmbus_connection.monitor_pages[0]);
msg->monitor_page2 = virt_to_phys(vmbus_connection.monitor_pages[1]);
+ if (version == VERSION_WIN8)
+ msg->target_vcpu = hv_context.vp_index[smp_processor_id()];
/*
* Add to list before we send the request since we may
@@ -100,15 +101,7 @@ static int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo,
}
/* Wait for the connection response */
- t = wait_for_completion_timeout(&msginfo->waitevent, 5*HZ);
- if (t == 0) {
- spin_lock_irqsave(&vmbus_connection.channelmsg_lock,
- flags);
- list_del(&msginfo->msglistentry);
- spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock,
- flags);
- return -ETIMEDOUT;
- }
+ wait_for_completion(&msginfo->waitevent);
spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
list_del(&msginfo->msglistentry);
diff --git a/drivers/hwmon/da9055-hwmon.c b/drivers/hwmon/da9055-hwmon.c
index 029ecabc4380..73b3865f1207 100644
--- a/drivers/hwmon/da9055-hwmon.c
+++ b/drivers/hwmon/da9055-hwmon.c
@@ -278,10 +278,6 @@ static int da9055_hwmon_probe(struct platform_device *pdev)
if (hwmon_irq < 0)
return hwmon_irq;
- hwmon_irq = regmap_irq_get_virq(hwmon->da9055->irq_data, hwmon_irq);
- if (hwmon_irq < 0)
- return hwmon_irq;
-
ret = devm_request_threaded_irq(&pdev->dev, hwmon_irq,
NULL, da9055_auxadc_irq,
IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c
index 8c23203915af..8a17f01e8672 100644
--- a/drivers/hwmon/ntc_thermistor.c
+++ b/drivers/hwmon/ntc_thermistor.c
@@ -145,7 +145,7 @@ struct ntc_data {
static int ntc_adc_iio_read(struct ntc_thermistor_platform_data *pdata)
{
struct iio_channel *channel = pdata->chan;
- unsigned int result;
+ s64 result;
int val, ret;
ret = iio_read_channel_raw(channel, &val);
@@ -155,10 +155,10 @@ static int ntc_adc_iio_read(struct ntc_thermistor_platform_data *pdata)
}
/* unit: mV */
- result = pdata->pullup_uv * val;
+ result = pdata->pullup_uv * (s64) val;
result >>= 12;
- return result;
+ return (int)result;
}
static const struct of_device_id ntc_match[] = {
diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
index 3cbf66e9d861..291d11fe93e7 100644
--- a/drivers/hwmon/pmbus/pmbus_core.c
+++ b/drivers/hwmon/pmbus/pmbus_core.c
@@ -90,7 +90,8 @@ struct pmbus_data {
u32 flags; /* from platform data */
- int exponent; /* linear mode: exponent for output voltages */
+ int exponent[PMBUS_PAGES];
+ /* linear mode: exponent for output voltages */
const struct pmbus_driver_info *info;
@@ -410,7 +411,7 @@ static long pmbus_reg2data_linear(struct pmbus_data *data,
long val;
if (sensor->class == PSC_VOLTAGE_OUT) { /* LINEAR16 */
- exponent = data->exponent;
+ exponent = data->exponent[sensor->page];
mantissa = (u16) sensor->data;
} else { /* LINEAR11 */
exponent = ((s16)sensor->data) >> 11;
@@ -516,7 +517,7 @@ static long pmbus_reg2data(struct pmbus_data *data, struct pmbus_sensor *sensor)
#define MIN_MANTISSA (511 * 1000)
static u16 pmbus_data2reg_linear(struct pmbus_data *data,
- enum pmbus_sensor_classes class, long val)
+ struct pmbus_sensor *sensor, long val)
{
s16 exponent = 0, mantissa;
bool negative = false;
@@ -525,7 +526,7 @@ static u16 pmbus_data2reg_linear(struct pmbus_data *data,
if (val == 0)
return 0;
- if (class == PSC_VOLTAGE_OUT) {
+ if (sensor->class == PSC_VOLTAGE_OUT) {
/* LINEAR16 does not support negative voltages */
if (val < 0)
return 0;
@@ -534,10 +535,10 @@ static u16 pmbus_data2reg_linear(struct pmbus_data *data,
* For a static exponents, we don't have a choice
* but to adjust the value to it.
*/
- if (data->exponent < 0)
- val <<= -data->exponent;
+ if (data->exponent[sensor->page] < 0)
+ val <<= -data->exponent[sensor->page];
else
- val >>= data->exponent;
+ val >>= data->exponent[sensor->page];
val = DIV_ROUND_CLOSEST(val, 1000);
return val & 0xffff;
}
@@ -548,14 +549,14 @@ static u16 pmbus_data2reg_linear(struct pmbus_data *data,
}
/* Power is in uW. Convert to mW before converting. */
- if (class == PSC_POWER)
+ if (sensor->class == PSC_POWER)
val = DIV_ROUND_CLOSEST(val, 1000L);
/*
* For simplicity, convert fan data to milli-units
* before calculating the exponent.
*/
- if (class == PSC_FAN)
+ if (sensor->class == PSC_FAN)
val = val * 1000;
/* Reduce large mantissa until it fits into 10 bit */
@@ -585,22 +586,22 @@ static u16 pmbus_data2reg_linear(struct pmbus_data *data,
}
static u16 pmbus_data2reg_direct(struct pmbus_data *data,
- enum pmbus_sensor_classes class, long val)
+ struct pmbus_sensor *sensor, long val)
{
long m, b, R;
- m = data->info->m[class];
- b = data->info->b[class];
- R = data->info->R[class];
+ m = data->info->m[sensor->class];
+ b = data->info->b[sensor->class];
+ R = data->info->R[sensor->class];
/* Power is in uW. Adjust R and b. */
- if (class == PSC_POWER) {
+ if (sensor->class == PSC_POWER) {
R -= 3;
b *= 1000;
}
/* Calculate Y = (m * X + b) * 10^R */
- if (class != PSC_FAN) {
+ if (sensor->class != PSC_FAN) {
R -= 3; /* Adjust R and b for data in milli-units */
b *= 1000;
}
@@ -619,7 +620,7 @@ static u16 pmbus_data2reg_direct(struct pmbus_data *data,
}
static u16 pmbus_data2reg_vid(struct pmbus_data *data,
- enum pmbus_sensor_classes class, long val)
+ struct pmbus_sensor *sensor, long val)
{
val = clamp_val(val, 500, 1600);
@@ -627,20 +628,20 @@ static u16 pmbus_data2reg_vid(struct pmbus_data *data,
}
static u16 pmbus_data2reg(struct pmbus_data *data,
- enum pmbus_sensor_classes class, long val)
+ struct pmbus_sensor *sensor, long val)
{
u16 regval;
- switch (data->info->format[class]) {
+ switch (data->info->format[sensor->class]) {
case direct:
- regval = pmbus_data2reg_direct(data, class, val);
+ regval = pmbus_data2reg_direct(data, sensor, val);
break;
case vid:
- regval = pmbus_data2reg_vid(data, class, val);
+ regval = pmbus_data2reg_vid(data, sensor, val);
break;
case linear:
default:
- regval = pmbus_data2reg_linear(data, class, val);
+ regval = pmbus_data2reg_linear(data, sensor, val);
break;
}
return regval;
@@ -746,7 +747,7 @@ static ssize_t pmbus_set_sensor(struct device *dev,
return -EINVAL;
mutex_lock(&data->update_lock);
- regval = pmbus_data2reg(data, sensor->class, val);
+ regval = pmbus_data2reg(data, sensor, val);
ret = _pmbus_write_word_data(client, sensor->page, sensor->reg, regval);
if (ret < 0)
rv = ret;
@@ -1643,12 +1644,13 @@ static int pmbus_find_attributes(struct i2c_client *client,
* This function is called for all chips.
*/
static int pmbus_identify_common(struct i2c_client *client,
- struct pmbus_data *data)
+ struct pmbus_data *data, int page)
{
int vout_mode = -1;
- if (pmbus_check_byte_register(client, 0, PMBUS_VOUT_MODE))
- vout_mode = _pmbus_read_byte_data(client, 0, PMBUS_VOUT_MODE);
+ if (pmbus_check_byte_register(client, page, PMBUS_VOUT_MODE))
+ vout_mode = _pmbus_read_byte_data(client, page,
+ PMBUS_VOUT_MODE);
if (vout_mode >= 0 && vout_mode != 0xff) {
/*
* Not all chips support the VOUT_MODE command,
@@ -1659,7 +1661,7 @@ static int pmbus_identify_common(struct i2c_client *client,
if (data->info->format[PSC_VOLTAGE_OUT] != linear)
return -ENODEV;
- data->exponent = ((s8)(vout_mode << 3)) >> 3;
+ data->exponent[page] = ((s8)(vout_mode << 3)) >> 3;
break;
case 1: /* VID mode */
if (data->info->format[PSC_VOLTAGE_OUT] != vid)
@@ -1674,7 +1676,7 @@ static int pmbus_identify_common(struct i2c_client *client,
}
}
- pmbus_clear_fault_page(client, 0);
+ pmbus_clear_fault_page(client, page);
return 0;
}
@@ -1682,7 +1684,7 @@ static int pmbus_init_common(struct i2c_client *client, struct pmbus_data *data,
struct pmbus_driver_info *info)
{
struct device *dev = &client->dev;
- int ret;
+ int page, ret;
/*
* Some PMBus chips don't support PMBUS_STATUS_BYTE, so try
@@ -1715,10 +1717,12 @@ static int pmbus_init_common(struct i2c_client *client, struct pmbus_data *data,
return -ENODEV;
}
- ret = pmbus_identify_common(client, data);
- if (ret < 0) {
- dev_err(dev, "Failed to identify chip capabilities\n");
- return ret;
+ for (page = 0; page < info->pages; page++) {
+ ret = pmbus_identify_common(client, data, page);
+ if (ret < 0) {
+ dev_err(dev, "Failed to identify chip capabilities\n");
+ return ret;
+ }
}
return 0;
}
diff --git a/drivers/iio/accel/bma180.c b/drivers/iio/accel/bma180.c
index 3bec9220df04..bfec313492b3 100644
--- a/drivers/iio/accel/bma180.c
+++ b/drivers/iio/accel/bma180.c
@@ -447,14 +447,14 @@ static const struct iio_chan_spec_ext_info bma180_ext_info[] = {
{ },
};
-#define BMA180_CHANNEL(_index) { \
+#define BMA180_CHANNEL(_axis) { \
.type = IIO_ACCEL, \
- .indexed = 1, \
- .channel = (_index), \
+ .modified = 1, \
+ .channel2 = IIO_MOD_##_axis, \
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY), \
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
- .scan_index = (_index), \
+ .scan_index = AXIS_##_axis, \
.scan_type = { \
.sign = 's', \
.realbits = 14, \
@@ -465,10 +465,10 @@ static const struct iio_chan_spec_ext_info bma180_ext_info[] = {
}
static const struct iio_chan_spec bma180_channels[] = {
- BMA180_CHANNEL(AXIS_X),
- BMA180_CHANNEL(AXIS_Y),
- BMA180_CHANNEL(AXIS_Z),
- IIO_CHAN_SOFT_TIMESTAMP(4),
+ BMA180_CHANNEL(X),
+ BMA180_CHANNEL(Y),
+ BMA180_CHANNEL(Z),
+ IIO_CHAN_SOFT_TIMESTAMP(3),
};
static irqreturn_t bma180_trigger_handler(int irq, void *p)
diff --git a/drivers/iio/adc/max1363.c b/drivers/iio/adc/max1363.c
index e283f2f2ee2f..360259266d4f 100644
--- a/drivers/iio/adc/max1363.c
+++ b/drivers/iio/adc/max1363.c
@@ -1560,7 +1560,7 @@ static int max1363_probe(struct i2c_client *client,
st->client = client;
st->vref_uv = st->chip_info->int_vref_mv * 1000;
- vref = devm_regulator_get(&client->dev, "vref");
+ vref = devm_regulator_get_optional(&client->dev, "vref");
if (!IS_ERR(vref)) {
int vref_uv;
diff --git a/drivers/iio/imu/adis16400.h b/drivers/iio/imu/adis16400.h
index 2f8f9d632386..0916bf6b6c31 100644
--- a/drivers/iio/imu/adis16400.h
+++ b/drivers/iio/imu/adis16400.h
@@ -189,6 +189,7 @@ enum {
ADIS16300_SCAN_INCLI_X,
ADIS16300_SCAN_INCLI_Y,
ADIS16400_SCAN_ADC,
+ ADIS16400_SCAN_TIMESTAMP,
};
#ifdef CONFIG_IIO_BUFFER
diff --git a/drivers/iio/imu/adis16400_core.c b/drivers/iio/imu/adis16400_core.c
index 368660dfe135..7c582f7ae34e 100644
--- a/drivers/iio/imu/adis16400_core.c
+++ b/drivers/iio/imu/adis16400_core.c
@@ -632,7 +632,7 @@ static const struct iio_chan_spec adis16400_channels[] = {
ADIS16400_MAGN_CHAN(Z, ADIS16400_ZMAGN_OUT, 14),
ADIS16400_TEMP_CHAN(ADIS16400_TEMP_OUT, 12),
ADIS16400_AUX_ADC_CHAN(ADIS16400_AUX_ADC, 12),
- IIO_CHAN_SOFT_TIMESTAMP(12)
+ IIO_CHAN_SOFT_TIMESTAMP(ADIS16400_SCAN_TIMESTAMP),
};
static const struct iio_chan_spec adis16448_channels[] = {
@@ -659,7 +659,7 @@ static const struct iio_chan_spec adis16448_channels[] = {
},
},
ADIS16400_TEMP_CHAN(ADIS16448_TEMP_OUT, 12),
- IIO_CHAN_SOFT_TIMESTAMP(11)
+ IIO_CHAN_SOFT_TIMESTAMP(ADIS16400_SCAN_TIMESTAMP),
};
static const struct iio_chan_spec adis16350_channels[] = {
@@ -677,7 +677,7 @@ static const struct iio_chan_spec adis16350_channels[] = {
ADIS16400_MOD_TEMP_CHAN(X, ADIS16350_XTEMP_OUT, 12),
ADIS16400_MOD_TEMP_CHAN(Y, ADIS16350_YTEMP_OUT, 12),
ADIS16400_MOD_TEMP_CHAN(Z, ADIS16350_ZTEMP_OUT, 12),
- IIO_CHAN_SOFT_TIMESTAMP(11)
+ IIO_CHAN_SOFT_TIMESTAMP(ADIS16400_SCAN_TIMESTAMP),
};
static const struct iio_chan_spec adis16300_channels[] = {
@@ -690,7 +690,7 @@ static const struct iio_chan_spec adis16300_channels[] = {
ADIS16400_AUX_ADC_CHAN(ADIS16300_AUX_ADC, 12),
ADIS16400_INCLI_CHAN(X, ADIS16300_PITCH_OUT, 13),
ADIS16400_INCLI_CHAN(Y, ADIS16300_ROLL_OUT, 13),
- IIO_CHAN_SOFT_TIMESTAMP(14)
+ IIO_CHAN_SOFT_TIMESTAMP(ADIS16400_SCAN_TIMESTAMP),
};
static const struct iio_chan_spec adis16334_channels[] = {
@@ -701,7 +701,7 @@ static const struct iio_chan_spec adis16334_channels[] = {
ADIS16400_ACCEL_CHAN(Y, ADIS16400_YACCL_OUT, 14),
ADIS16400_ACCEL_CHAN(Z, ADIS16400_ZACCL_OUT, 14),
ADIS16400_TEMP_CHAN(ADIS16350_XTEMP_OUT, 12),
- IIO_CHAN_SOFT_TIMESTAMP(8)
+ IIO_CHAN_SOFT_TIMESTAMP(ADIS16400_SCAN_TIMESTAMP),
};
static struct attribute *adis16400_attributes[] = {
diff --git a/drivers/iio/light/tsl2563.c b/drivers/iio/light/tsl2563.c
index 3d8110157f2d..94daa9fc1247 100644
--- a/drivers/iio/light/tsl2563.c
+++ b/drivers/iio/light/tsl2563.c
@@ -460,10 +460,14 @@ static int tsl2563_write_raw(struct iio_dev *indio_dev,
{
struct tsl2563_chip *chip = iio_priv(indio_dev);
- if (chan->channel == IIO_MOD_LIGHT_BOTH)
+ if (mask != IIO_CHAN_INFO_CALIBSCALE)
+ return -EINVAL;
+ if (chan->channel2 == IIO_MOD_LIGHT_BOTH)
chip->calib0 = calib_from_sysfs(val);
- else
+ else if (chan->channel2 == IIO_MOD_LIGHT_IR)
chip->calib1 = calib_from_sysfs(val);
+ else
+ return -EINVAL;
return 0;
}
@@ -472,14 +476,14 @@ static int tsl2563_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int *val,
int *val2,
- long m)
+ long mask)
{
int ret = -EINVAL;
u32 calib0, calib1;
struct tsl2563_chip *chip = iio_priv(indio_dev);
mutex_lock(&chip->lock);
- switch (m) {
+ switch (mask) {
case IIO_CHAN_INFO_RAW:
case IIO_CHAN_INFO_PROCESSED:
switch (chan->type) {
@@ -498,7 +502,7 @@ static int tsl2563_read_raw(struct iio_dev *indio_dev,
ret = tsl2563_get_adc(chip);
if (ret)
goto error_ret;
- if (chan->channel == 0)
+ if (chan->channel2 == IIO_MOD_LIGHT_BOTH)
*val = chip->data0;
else
*val = chip->data1;
@@ -510,7 +514,7 @@ static int tsl2563_read_raw(struct iio_dev *indio_dev,
break;
case IIO_CHAN_INFO_CALIBSCALE:
- if (chan->channel == 0)
+ if (chan->channel2 == IIO_MOD_LIGHT_BOTH)
*val = calib_to_sysfs(chip->calib0);
else
*val = calib_to_sysfs(chip->calib1);
diff --git a/drivers/iio/magnetometer/ak8975.c b/drivers/iio/magnetometer/ak8975.c
index ff284e5afd95..05423543f89d 100644
--- a/drivers/iio/magnetometer/ak8975.c
+++ b/drivers/iio/magnetometer/ak8975.c
@@ -85,6 +85,7 @@
#define AK8975_MAX_CONVERSION_TIMEOUT 500
#define AK8975_CONVERSION_DONE_POLL_TIME 10
#define AK8975_DATA_READY_TIMEOUT ((100*HZ)/1000)
+#define RAW_TO_GAUSS(asa) ((((asa) + 128) * 3000) / 256)
/*
* Per-instance context data for the device.
@@ -265,15 +266,15 @@ static int ak8975_setup(struct i2c_client *client)
*
* Since 1uT = 0.01 gauss, our final scale factor becomes:
*
- * Hadj = H * ((ASA + 128) / 256) * 3/10 * 100
- * Hadj = H * ((ASA + 128) * 30 / 256
+ * Hadj = H * ((ASA + 128) / 256) * 3/10 * 1/100
+ * Hadj = H * ((ASA + 128) * 0.003) / 256
*
* Since ASA doesn't change, we cache the resultant scale factor into the
* device context in ak8975_setup().
*/
- data->raw_to_gauss[0] = ((data->asa[0] + 128) * 30) >> 8;
- data->raw_to_gauss[1] = ((data->asa[1] + 128) * 30) >> 8;
- data->raw_to_gauss[2] = ((data->asa[2] + 128) * 30) >> 8;
+ data->raw_to_gauss[0] = RAW_TO_GAUSS(data->asa[0]);
+ data->raw_to_gauss[1] = RAW_TO_GAUSS(data->asa[1]);
+ data->raw_to_gauss[2] = RAW_TO_GAUSS(data->asa[2]);
return 0;
}
@@ -428,8 +429,9 @@ static int ak8975_read_raw(struct iio_dev *indio_dev,
case IIO_CHAN_INFO_RAW:
return ak8975_read_axis(indio_dev, chan->address, val);
case IIO_CHAN_INFO_SCALE:
- *val = data->raw_to_gauss[chan->address];
- return IIO_VAL_INT;
+ *val = 0;
+ *val2 = data->raw_to_gauss[chan->address];
+ return IIO_VAL_INT_PLUS_MICRO;
}
return -EINVAL;
}
diff --git a/drivers/iio/magnetometer/mag3110.c b/drivers/iio/magnetometer/mag3110.c
index 4b65b6d3bdb1..f66955fb3509 100644
--- a/drivers/iio/magnetometer/mag3110.c
+++ b/drivers/iio/magnetometer/mag3110.c
@@ -106,7 +106,7 @@ static ssize_t mag3110_show_int_plus_micros(char *buf,
while (n-- > 0)
len += scnprintf(buf + len, PAGE_SIZE - len,
- "%d.%d ", vals[n][0], vals[n][1]);
+ "%d.%06d ", vals[n][0], vals[n][1]);
/* replace trailing space by newline */
buf[len - 1] = '\n';
@@ -154,6 +154,9 @@ static int mag3110_read_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_RAW:
+ if (iio_buffer_enabled(indio_dev))
+ return -EBUSY;
+
switch (chan->type) {
case IIO_MAGN: /* in 0.1 uT / LSB */
ret = mag3110_read(data, buffer);
@@ -199,6 +202,9 @@ static int mag3110_write_raw(struct iio_dev *indio_dev,
struct mag3110_data *data = iio_priv(indio_dev);
int rate;
+ if (iio_buffer_enabled(indio_dev))
+ return -EBUSY;
+
switch (mask) {
case IIO_CHAN_INFO_SAMP_FREQ:
rate = mag3110_get_samp_freq_index(data, val, val2);
diff --git a/drivers/infiniband/hw/amso1100/c2.c b/drivers/infiniband/hw/amso1100/c2.c
index d53cf519f42a..00400c352c1a 100644
--- a/drivers/infiniband/hw/amso1100/c2.c
+++ b/drivers/infiniband/hw/amso1100/c2.c
@@ -1082,6 +1082,7 @@ static int c2_probe(struct pci_dev *pcidev, const struct pci_device_id *ent)
/* Initialize network device */
if ((netdev = c2_devinit(c2dev, mmio_regs)) == NULL) {
+ ret = -ENOMEM;
iounmap(mmio_regs);
goto bail4;
}
@@ -1151,7 +1152,8 @@ static int c2_probe(struct pci_dev *pcidev, const struct pci_device_id *ent)
goto bail10;
}
- if (c2_register_device(c2dev))
+ ret = c2_register_device(c2dev);
+ if (ret)
goto bail10;
return 0;
diff --git a/drivers/infiniband/hw/amso1100/c2_rnic.c b/drivers/infiniband/hw/amso1100/c2_rnic.c
index b7c986990053..d2a6d961344b 100644
--- a/drivers/infiniband/hw/amso1100/c2_rnic.c
+++ b/drivers/infiniband/hw/amso1100/c2_rnic.c
@@ -576,7 +576,8 @@ int c2_rnic_init(struct c2_dev *c2dev)
goto bail4;
/* Initialize cached the adapter limits */
- if (c2_rnic_query(c2dev, &c2dev->props))
+ err = c2_rnic_query(c2dev, &c2dev->props);
+ if (err)
goto bail5;
/* Initialize the PD pool */
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 45126879ad28..d286bdebe2ab 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -3352,6 +3352,7 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
goto free_dst;
}
+ neigh_release(neigh);
step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan;
rss_qid = dev->rdev.lldi.rxq_ids[pi->port_id * step];
window = (__force u16) htons((__force u16)tcph->window);
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index c2702f549f10..e81c5547e647 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -347,7 +347,7 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port,
props->active_width = (((u8 *)mailbox->buf)[5] == 0x40) ?
IB_WIDTH_4X : IB_WIDTH_1X;
props->active_speed = IB_SPEED_QDR;
- props->port_cap_flags = IB_PORT_CM_SUP;
+ props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_IP_BASED_GIDS;
props->gid_tbl_len = mdev->dev->caps.gid_table_len[port];
props->max_msg_sz = mdev->dev->caps.max_msg_sz;
props->pkey_tbl_len = 1;
@@ -1357,6 +1357,21 @@ static struct device_attribute *mlx4_class_attributes[] = {
&dev_attr_board_id
};
+static void mlx4_addrconf_ifid_eui48(u8 *eui, u16 vlan_id,
+ struct net_device *dev)
+{
+ memcpy(eui, dev->dev_addr, 3);
+ memcpy(eui + 5, dev->dev_addr + 3, 3);
+ if (vlan_id < 0x1000) {
+ eui[3] = vlan_id >> 8;
+ eui[4] = vlan_id & 0xff;
+ } else {
+ eui[3] = 0xff;
+ eui[4] = 0xfe;
+ }
+ eui[0] ^= 2;
+}
+
static void update_gids_task(struct work_struct *work)
{
struct update_gid_work *gw = container_of(work, struct update_gid_work, work);
@@ -1393,7 +1408,6 @@ static void reset_gids_task(struct work_struct *work)
struct mlx4_cmd_mailbox *mailbox;
union ib_gid *gids;
int err;
- int i;
struct mlx4_dev *dev = gw->dev->dev;
mailbox = mlx4_alloc_cmd_mailbox(dev);
@@ -1405,18 +1419,16 @@ static void reset_gids_task(struct work_struct *work)
gids = mailbox->buf;
memcpy(gids, gw->gids, sizeof(gw->gids));
- for (i = 1; i < gw->dev->num_ports + 1; i++) {
- if (mlx4_ib_port_link_layer(&gw->dev->ib_dev, i) ==
- IB_LINK_LAYER_ETHERNET) {
- err = mlx4_cmd(dev, mailbox->dma,
- MLX4_SET_PORT_GID_TABLE << 8 | i,
- 1, MLX4_CMD_SET_PORT,
- MLX4_CMD_TIME_CLASS_B,
- MLX4_CMD_WRAPPED);
- if (err)
- pr_warn(KERN_WARNING
- "set port %d command failed\n", i);
- }
+ if (mlx4_ib_port_link_layer(&gw->dev->ib_dev, gw->port) ==
+ IB_LINK_LAYER_ETHERNET) {
+ err = mlx4_cmd(dev, mailbox->dma,
+ MLX4_SET_PORT_GID_TABLE << 8 | gw->port,
+ 1, MLX4_CMD_SET_PORT,
+ MLX4_CMD_TIME_CLASS_B,
+ MLX4_CMD_WRAPPED);
+ if (err)
+ pr_warn(KERN_WARNING
+ "set port %d command failed\n", gw->port);
}
mlx4_free_cmd_mailbox(dev, mailbox);
@@ -1425,7 +1437,8 @@ free:
}
static int update_gid_table(struct mlx4_ib_dev *dev, int port,
- union ib_gid *gid, int clear)
+ union ib_gid *gid, int clear,
+ int default_gid)
{
struct update_gid_work *work;
int i;
@@ -1434,26 +1447,31 @@ static int update_gid_table(struct mlx4_ib_dev *dev, int port,
int found = -1;
int max_gids;
- max_gids = dev->dev->caps.gid_table_len[port];
- for (i = 0; i < max_gids; ++i) {
- if (!memcmp(&dev->iboe.gid_table[port - 1][i], gid,
- sizeof(*gid)))
- found = i;
-
- if (clear) {
- if (found >= 0) {
- need_update = 1;
- dev->iboe.gid_table[port - 1][found] = zgid;
- break;
- }
- } else {
- if (found >= 0)
- break;
-
- if (free < 0 &&
- !memcmp(&dev->iboe.gid_table[port - 1][i], &zgid,
+ if (default_gid) {
+ free = 0;
+ } else {
+ max_gids = dev->dev->caps.gid_table_len[port];
+ for (i = 1; i < max_gids; ++i) {
+ if (!memcmp(&dev->iboe.gid_table[port - 1][i], gid,
sizeof(*gid)))
- free = i;
+ found = i;
+
+ if (clear) {
+ if (found >= 0) {
+ need_update = 1;
+ dev->iboe.gid_table[port - 1][found] =
+ zgid;
+ break;
+ }
+ } else {
+ if (found >= 0)
+ break;
+
+ if (free < 0 &&
+ !memcmp(&dev->iboe.gid_table[port - 1][i],
+ &zgid, sizeof(*gid)))
+ free = i;
+ }
}
}
@@ -1478,18 +1496,26 @@ static int update_gid_table(struct mlx4_ib_dev *dev, int port,
return 0;
}
-static int reset_gid_table(struct mlx4_ib_dev *dev)
+static void mlx4_make_default_gid(struct net_device *dev, union ib_gid *gid)
{
- struct update_gid_work *work;
+ gid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
+ mlx4_addrconf_ifid_eui48(&gid->raw[8], 0xffff, dev);
+}
+
+static int reset_gid_table(struct mlx4_ib_dev *dev, u8 port)
+{
+ struct update_gid_work *work;
work = kzalloc(sizeof(*work), GFP_ATOMIC);
if (!work)
return -ENOMEM;
- memset(dev->iboe.gid_table, 0, sizeof(dev->iboe.gid_table));
+
+ memset(dev->iboe.gid_table[port - 1], 0, sizeof(work->gids));
memset(work->gids, 0, sizeof(work->gids));
INIT_WORK(&work->work, reset_gids_task);
work->dev = dev;
+ work->port = port;
queue_work(wq, &work->work);
return 0;
}
@@ -1502,6 +1528,12 @@ static int mlx4_ib_addr_event(int event, struct net_device *event_netdev,
struct net_device *real_dev = rdma_vlan_dev_real_dev(event_netdev) ?
rdma_vlan_dev_real_dev(event_netdev) :
event_netdev;
+ union ib_gid default_gid;
+
+ mlx4_make_default_gid(real_dev, &default_gid);
+
+ if (!memcmp(gid, &default_gid, sizeof(*gid)))
+ return 0;
if (event != NETDEV_DOWN && event != NETDEV_UP)
return 0;
@@ -1520,7 +1552,7 @@ static int mlx4_ib_addr_event(int event, struct net_device *event_netdev,
(!netif_is_bond_master(real_dev) &&
(real_dev == iboe->netdevs[port - 1])))
update_gid_table(ibdev, port, gid,
- event == NETDEV_DOWN);
+ event == NETDEV_DOWN, 0);
spin_unlock(&iboe->lock);
return 0;
@@ -1536,7 +1568,6 @@ static u8 mlx4_ib_get_dev_port(struct net_device *dev,
rdma_vlan_dev_real_dev(dev) : dev;
iboe = &ibdev->iboe;
- spin_lock(&iboe->lock);
for (port = 1; port <= MLX4_MAX_PORTS; ++port)
if ((netif_is_bond_master(real_dev) &&
@@ -1545,8 +1576,6 @@ static u8 mlx4_ib_get_dev_port(struct net_device *dev,
(real_dev == iboe->netdevs[port - 1])))
break;
- spin_unlock(&iboe->lock);
-
if ((port == 0) || (port > MLX4_MAX_PORTS))
return 0;
else
@@ -1607,7 +1636,7 @@ static void mlx4_ib_get_dev_addr(struct net_device *dev,
/*ifa->ifa_address;*/
ipv6_addr_set_v4mapped(ifa->ifa_address,
(struct in6_addr *)&gid);
- update_gid_table(ibdev, port, &gid, 0);
+ update_gid_table(ibdev, port, &gid, 0, 0);
}
endfor_ifa(in_dev);
in_dev_put(in_dev);
@@ -1619,7 +1648,7 @@ static void mlx4_ib_get_dev_addr(struct net_device *dev,
read_lock_bh(&in6_dev->lock);
list_for_each_entry(ifp, &in6_dev->addr_list, if_list) {
pgid = (union ib_gid *)&ifp->addr;
- update_gid_table(ibdev, port, pgid, 0);
+ update_gid_table(ibdev, port, pgid, 0, 0);
}
read_unlock_bh(&in6_dev->lock);
in6_dev_put(in6_dev);
@@ -1627,14 +1656,26 @@ static void mlx4_ib_get_dev_addr(struct net_device *dev,
#endif
}
+static void mlx4_ib_set_default_gid(struct mlx4_ib_dev *ibdev,
+ struct net_device *dev, u8 port)
+{
+ union ib_gid gid;
+ mlx4_make_default_gid(dev, &gid);
+ update_gid_table(ibdev, port, &gid, 0, 1);
+}
+
static int mlx4_ib_init_gid_table(struct mlx4_ib_dev *ibdev)
{
struct net_device *dev;
+ struct mlx4_ib_iboe *iboe = &ibdev->iboe;
+ int i;
- if (reset_gid_table(ibdev))
- return -1;
+ for (i = 1; i <= ibdev->num_ports; ++i)
+ if (reset_gid_table(ibdev, i))
+ return -1;
read_lock(&dev_base_lock);
+ spin_lock(&iboe->lock);
for_each_netdev(&init_net, dev) {
u8 port = mlx4_ib_get_dev_port(dev, ibdev);
@@ -1642,6 +1683,7 @@ static int mlx4_ib_init_gid_table(struct mlx4_ib_dev *ibdev)
mlx4_ib_get_dev_addr(dev, ibdev, port);
}
+ spin_unlock(&iboe->lock);
read_unlock(&dev_base_lock);
return 0;
@@ -1656,25 +1698,57 @@ static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev)
spin_lock(&iboe->lock);
mlx4_foreach_ib_transport_port(port, ibdev->dev) {
+ enum ib_port_state port_state = IB_PORT_NOP;
struct net_device *old_master = iboe->masters[port - 1];
+ struct net_device *curr_netdev;
struct net_device *curr_master;
+
iboe->netdevs[port - 1] =
mlx4_get_protocol_dev(ibdev->dev, MLX4_PROT_ETH, port);
+ if (iboe->netdevs[port - 1])
+ mlx4_ib_set_default_gid(ibdev,
+ iboe->netdevs[port - 1], port);
+ curr_netdev = iboe->netdevs[port - 1];
if (iboe->netdevs[port - 1] &&
netif_is_bond_slave(iboe->netdevs[port - 1])) {
- rtnl_lock();
iboe->masters[port - 1] = netdev_master_upper_dev_get(
iboe->netdevs[port - 1]);
- rtnl_unlock();
+ } else {
+ iboe->masters[port - 1] = NULL;
}
curr_master = iboe->masters[port - 1];
+ if (curr_netdev) {
+ port_state = (netif_running(curr_netdev) && netif_carrier_ok(curr_netdev)) ?
+ IB_PORT_ACTIVE : IB_PORT_DOWN;
+ mlx4_ib_set_default_gid(ibdev, curr_netdev, port);
+ } else {
+ reset_gid_table(ibdev, port);
+ }
+ /* if using bonding/team and a slave port is down, we don't the bond IP
+ * based gids in the table since flows that select port by gid may get
+ * the down port.
+ */
+ if (curr_master && (port_state == IB_PORT_DOWN)) {
+ reset_gid_table(ibdev, port);
+ mlx4_ib_set_default_gid(ibdev, curr_netdev, port);
+ }
/* if bonding is used it is possible that we add it to masters
- only after IP address is assigned to the net bonding
- interface */
- if (curr_master && (old_master != curr_master))
+ * only after IP address is assigned to the net bonding
+ * interface.
+ */
+ if (curr_master && (old_master != curr_master)) {
+ reset_gid_table(ibdev, port);
+ mlx4_ib_set_default_gid(ibdev, curr_netdev, port);
mlx4_ib_get_dev_addr(curr_master, ibdev, port);
+ }
+
+ if (!curr_master && (old_master != curr_master)) {
+ reset_gid_table(ibdev, port);
+ mlx4_ib_set_default_gid(ibdev, curr_netdev, port);
+ mlx4_ib_get_dev_addr(curr_netdev, ibdev, port);
+ }
}
spin_unlock(&iboe->lock);
@@ -1810,6 +1884,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
int i, j;
int err;
struct mlx4_ib_iboe *iboe;
+ int ib_num_ports = 0;
pr_info_once("%s", mlx4_ib_version);
@@ -1985,10 +2060,14 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
ibdev->counters[i] = -1;
}
+ mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
+ ib_num_ports++;
+
spin_lock_init(&ibdev->sm_lock);
mutex_init(&ibdev->cap_mask_mutex);
- if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED) {
+ if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED &&
+ ib_num_ports) {
ibdev->steer_qpn_count = MLX4_IB_UC_MAX_NUM_QPS;
err = mlx4_qp_reserve_range(dev, ibdev->steer_qpn_count,
MLX4_IB_UC_STEER_QPN_ALIGN,
@@ -2051,7 +2130,11 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
}
}
#endif
+ for (i = 1 ; i <= ibdev->num_ports ; ++i)
+ reset_gid_table(ibdev, i);
+ rtnl_lock();
mlx4_ib_scan_netdevs(ibdev);
+ rtnl_unlock();
mlx4_ib_init_gid_table(ibdev);
}
diff --git a/drivers/infiniband/hw/mlx5/Kconfig b/drivers/infiniband/hw/mlx5/Kconfig
index 8e6aebfaf8a4..10df386c6344 100644
--- a/drivers/infiniband/hw/mlx5/Kconfig
+++ b/drivers/infiniband/hw/mlx5/Kconfig
@@ -1,6 +1,6 @@
config MLX5_INFINIBAND
tristate "Mellanox Connect-IB HCA support"
- depends on NETDEVICES && ETHERNET && PCI && X86
+ depends on NETDEVICES && ETHERNET && PCI
select NET_VENDOR_MELLANOX
select MLX5_CORE
---help---
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 9660d093f8cf..aa03e732b6a8 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -261,8 +261,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
props->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT |
IB_DEVICE_PORT_ACTIVE_EVENT |
IB_DEVICE_SYS_IMAGE_GUID |
- IB_DEVICE_RC_RNR_NAK_GEN |
- IB_DEVICE_BLOCK_MULTICAST_LOOPBACK;
+ IB_DEVICE_RC_RNR_NAK_GEN;
flags = dev->mdev.caps.flags;
if (flags & MLX5_DEV_CAP_FLAG_BAD_PKEY_CNTR)
props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
@@ -536,24 +535,38 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
struct ib_udata *udata)
{
struct mlx5_ib_dev *dev = to_mdev(ibdev);
- struct mlx5_ib_alloc_ucontext_req req;
+ struct mlx5_ib_alloc_ucontext_req_v2 req;
struct mlx5_ib_alloc_ucontext_resp resp;
struct mlx5_ib_ucontext *context;
struct mlx5_uuar_info *uuari;
struct mlx5_uar *uars;
int gross_uuars;
int num_uars;
+ int ver;
int uuarn;
int err;
int i;
+ int reqlen;
if (!dev->ib_active)
return ERR_PTR(-EAGAIN);
- err = ib_copy_from_udata(&req, udata, sizeof(req));
+ memset(&req, 0, sizeof(req));
+ reqlen = udata->inlen - sizeof(struct ib_uverbs_cmd_hdr);
+ if (reqlen == sizeof(struct mlx5_ib_alloc_ucontext_req))
+ ver = 0;
+ else if (reqlen == sizeof(struct mlx5_ib_alloc_ucontext_req_v2))
+ ver = 2;
+ else
+ return ERR_PTR(-EINVAL);
+
+ err = ib_copy_from_udata(&req, udata, reqlen);
if (err)
return ERR_PTR(err);
+ if (req.flags || req.reserved)
+ return ERR_PTR(-EINVAL);
+
if (req.total_num_uuars > MLX5_MAX_UUARS)
return ERR_PTR(-ENOMEM);
@@ -626,6 +639,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
if (err)
goto out_uars;
+ uuari->ver = ver;
uuari->num_low_latency_uuars = req.num_low_latency_uuars;
uuari->uars = uars;
uuari->num_uars = num_uars;
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index ae37fb9bf262..7dfe8a1c84cf 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -216,7 +216,9 @@ static int sq_overhead(enum ib_qp_type qp_type)
case IB_QPT_UC:
size += sizeof(struct mlx5_wqe_ctrl_seg) +
- sizeof(struct mlx5_wqe_raddr_seg);
+ sizeof(struct mlx5_wqe_raddr_seg) +
+ sizeof(struct mlx5_wqe_umr_ctrl_seg) +
+ sizeof(struct mlx5_mkey_seg);
break;
case IB_QPT_UD:
@@ -428,11 +430,17 @@ static int alloc_uuar(struct mlx5_uuar_info *uuari,
break;
case MLX5_IB_LATENCY_CLASS_MEDIUM:
- uuarn = alloc_med_class_uuar(uuari);
+ if (uuari->ver < 2)
+ uuarn = -ENOMEM;
+ else
+ uuarn = alloc_med_class_uuar(uuari);
break;
case MLX5_IB_LATENCY_CLASS_HIGH:
- uuarn = alloc_high_class_uuar(uuari);
+ if (uuari->ver < 2)
+ uuarn = -ENOMEM;
+ else
+ uuarn = alloc_high_class_uuar(uuari);
break;
case MLX5_IB_LATENCY_CLASS_FAST_PATH:
@@ -657,8 +665,8 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev,
int err;
uuari = &dev->mdev.priv.uuari;
- if (init_attr->create_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK)
- qp->flags |= MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK;
+ if (init_attr->create_flags)
+ return -EINVAL;
if (init_attr->qp_type == MLX5_IB_QPT_REG_UMR)
lc = MLX5_IB_LATENCY_CLASS_FAST_PATH;
diff --git a/drivers/infiniband/hw/mlx5/user.h b/drivers/infiniband/hw/mlx5/user.h
index 32a2a5dfc523..0f4f8e42a17f 100644
--- a/drivers/infiniband/hw/mlx5/user.h
+++ b/drivers/infiniband/hw/mlx5/user.h
@@ -62,6 +62,13 @@ struct mlx5_ib_alloc_ucontext_req {
__u32 num_low_latency_uuars;
};
+struct mlx5_ib_alloc_ucontext_req_v2 {
+ __u32 total_num_uuars;
+ __u32 num_low_latency_uuars;
+ __u32 flags;
+ __u32 reserved;
+};
+
struct mlx5_ib_alloc_ucontext_resp {
__u32 qp_tab_size;
__u32 bf_reg_size;
diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
index 429141078eec..353c7b05a90a 100644
--- a/drivers/infiniband/hw/nes/nes.c
+++ b/drivers/infiniband/hw/nes/nes.c
@@ -675,8 +675,11 @@ static int nes_probe(struct pci_dev *pcidev, const struct pci_device_id *ent)
INIT_DELAYED_WORK(&nesdev->work, nes_recheck_link_status);
/* Initialize network devices */
- if ((netdev = nes_netdev_init(nesdev, mmio_regs)) == NULL)
+ netdev = nes_netdev_init(nesdev, mmio_regs);
+ if (netdev == NULL) {
+ ret = -ENOMEM;
goto bail7;
+ }
/* Register network device */
ret = register_netdev(netdev);
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
index 2ca86ca818bd..1a8a945efa60 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
@@ -127,7 +127,7 @@ static int ocrdma_addr_event(unsigned long event, struct net_device *netdev,
is_vlan = netdev->priv_flags & IFF_802_1Q_VLAN;
if (is_vlan)
- netdev = vlan_dev_real_dev(netdev);
+ netdev = rdma_vlan_dev_real_dev(netdev);
rcu_read_lock();
list_for_each_entry_rcu(dev, &ocrdma_dev_list, entry) {
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index aa92f40c9d50..e0cc201be41a 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -176,7 +176,7 @@ int ocrdma_query_port(struct ib_device *ibdev,
props->port_cap_flags =
IB_PORT_CM_SUP |
IB_PORT_REINIT_SUP |
- IB_PORT_DEVICE_MGMT_SUP | IB_PORT_VENDOR_CLASS_SUP;
+ IB_PORT_DEVICE_MGMT_SUP | IB_PORT_VENDOR_CLASS_SUP | IB_PORT_IP_BASED_GIDS;
props->gid_tbl_len = OCRDMA_MAX_SGID;
props->pkey_tbl_len = 1;
props->bad_pkey_cntr = 0;
@@ -1416,7 +1416,7 @@ int ocrdma_query_qp(struct ib_qp *ibqp,
OCRDMA_QP_PARAMS_HOP_LMT_MASK) >>
OCRDMA_QP_PARAMS_HOP_LMT_SHIFT;
qp_attr->ah_attr.grh.traffic_class = (params.tclass_sq_psn &
- OCRDMA_QP_PARAMS_SQ_PSN_MASK) >>
+ OCRDMA_QP_PARAMS_TCLASS_MASK) >>
OCRDMA_QP_PARAMS_TCLASS_SHIFT;
qp_attr->ah_attr.ah_flags = IB_AH_GRH;
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index 5bfc02f450e6..d1bd21319d7d 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -2395,6 +2395,11 @@ static int qib_7322_bringup_serdes(struct qib_pportdata *ppd)
qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
qib_write_kreg(dd, kr_scratch, 0ULL);
+ /* ensure previous Tx parameters are not still forced */
+ qib_write_kreg_port(ppd, krp_tx_deemph_override,
+ SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
+ reset_tx_deemphasis_override));
+
if (qib_compat_ddr_negotiate) {
ppd->cpspec->ibdeltainprog = 1;
ppd->cpspec->ibsymsnap = read_7322_creg32_port(ppd,
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c b/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c
index 7ecc6061f1f4..f8dfd76be89f 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c
@@ -629,6 +629,7 @@ static int qp_grp_id_from_flow(struct usnic_ib_qp_grp_flow *qp_flow,
{
enum usnic_transport_type trans_type = qp_flow->trans_type;
int err;
+ uint16_t port_num = 0;
switch (trans_type) {
case USNIC_TRANSPORT_ROCE_CUSTOM:
@@ -637,9 +638,15 @@ static int qp_grp_id_from_flow(struct usnic_ib_qp_grp_flow *qp_flow,
case USNIC_TRANSPORT_IPV4_UDP:
err = usnic_transport_sock_get_addr(qp_flow->udp.sock,
NULL, NULL,
- (uint16_t *) id);
+ &port_num);
if (err)
return err;
+ /*
+ * Copy port_num to stack first and then to *id,
+ * so that the short to int cast works for little
+ * and big endian systems.
+ */
+ *id = port_num;
break;
default:
usnic_err("Unsupported transport %u\n", trans_type);
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
index 538822684d5b..334f34b1cd46 100644
--- a/drivers/infiniband/ulp/iser/iser_initiator.c
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -610,11 +610,12 @@ void iser_snd_completion(struct iser_tx_desc *tx_desc,
ib_dma_unmap_single(device->ib_device, tx_desc->dma_addr,
ISER_HEADERS_LEN, DMA_TO_DEVICE);
kmem_cache_free(ig.desc_cache, tx_desc);
+ tx_desc = NULL;
}
atomic_dec(&ib_conn->post_send_buf_count);
- if (tx_desc->type == ISCSI_TX_CONTROL) {
+ if (tx_desc && tx_desc->type == ISCSI_TX_CONTROL) {
/* this arithmetic is legal by libiscsi dd_data allocation */
task = (void *) ((long)(void *)tx_desc -
sizeof(struct iscsi_task));
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index afe95674008b..ca37edef2791 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -652,9 +652,13 @@ static int iser_disconnected_handler(struct rdma_cm_id *cma_id)
/* getting here when the state is UP means that the conn is being *
* terminated asynchronously from the iSCSI layer's perspective. */
if (iser_conn_state_comp_exch(ib_conn, ISER_CONN_UP,
- ISER_CONN_TERMINATING))
- iscsi_conn_failure(ib_conn->iser_conn->iscsi_conn,
- ISCSI_ERR_CONN_FAILED);
+ ISER_CONN_TERMINATING)){
+ if (ib_conn->iser_conn)
+ iscsi_conn_failure(ib_conn->iser_conn->iscsi_conn,
+ ISCSI_ERR_CONN_FAILED);
+ else
+ iser_err("iscsi_iser connection isn't bound\n");
+ }
/* Complete the termination process if no posts are pending */
if (ib_conn->post_recv_buf_count == 0 &&
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index 86b484cb3ec2..5194afb39e78 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -21,6 +21,7 @@ obj-$(CONFIG_SIRF_IRQ) += irq-sirfsoc.o
obj-$(CONFIG_RENESAS_INTC_IRQPIN) += irq-renesas-intc-irqpin.o
obj-$(CONFIG_RENESAS_IRQC) += irq-renesas-irqc.o
obj-$(CONFIG_VERSATILE_FPGA_IRQ) += irq-versatile-fpga.o
+obj-$(CONFIG_ARCH_NSPIRE) += irq-zevio.o
obj-$(CONFIG_ARCH_VT8500) += irq-vt8500.o
obj-$(CONFIG_TB10X_IRQC) += irq-tb10x.o
obj-$(CONFIG_XTENSA) += irq-xtensa-pic.o
diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c
index 9300bc32784e..540956465ed2 100644
--- a/drivers/irqchip/irq-armada-370-xp.c
+++ b/drivers/irqchip/irq-armada-370-xp.c
@@ -381,7 +381,7 @@ armada_370_xp_handle_irq(struct pt_regs *regs)
ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS)
& PCI_MSI_DOORBELL_MASK;
- writel(~PCI_MSI_DOORBELL_MASK, per_cpu_int_base +
+ writel(~msimask, per_cpu_int_base +
ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS);
for (msinr = PCI_MSI_DOORBELL_START;
@@ -407,7 +407,7 @@ armada_370_xp_handle_irq(struct pt_regs *regs)
ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS)
& IPI_DOORBELL_MASK;
- writel(~IPI_DOORBELL_MASK, per_cpu_int_base +
+ writel(~ipimask, per_cpu_int_base +
ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS);
/* Handle all pending doorbells */
diff --git a/drivers/irqchip/irq-zevio.c b/drivers/irqchip/irq-zevio.c
new file mode 100644
index 000000000000..8ed04c4a43ee
--- /dev/null
+++ b/drivers/irqchip/irq-zevio.c
@@ -0,0 +1,127 @@
+/*
+ * linux/drivers/irqchip/irq-zevio.c
+ *
+ * Copyright (C) 2013 Daniel Tang <tangrs@tangrs.id.au>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2, as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
+#include <asm/mach/irq.h>
+#include <asm/exception.h>
+
+#include "irqchip.h"
+
+#define IO_STATUS 0x000
+#define IO_RAW_STATUS 0x004
+#define IO_ENABLE 0x008
+#define IO_DISABLE 0x00C
+#define IO_CURRENT 0x020
+#define IO_RESET 0x028
+#define IO_MAX_PRIOTY 0x02C
+
+#define IO_IRQ_BASE 0x000
+#define IO_FIQ_BASE 0x100
+
+#define IO_INVERT_SEL 0x200
+#define IO_STICKY_SEL 0x204
+#define IO_PRIORITY_SEL 0x300
+
+#define MAX_INTRS 32
+#define FIQ_START MAX_INTRS
+
+static struct irq_domain *zevio_irq_domain;
+static void __iomem *zevio_irq_io;
+
+static void zevio_irq_ack(struct irq_data *irqd)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(irqd);
+ struct irq_chip_regs *regs =
+ &container_of(irqd->chip, struct irq_chip_type, chip)->regs;
+
+ readl(gc->reg_base + regs->ack);
+}
+
+static asmlinkage void __exception_irq_entry zevio_handle_irq(struct pt_regs *regs)
+{
+ int irqnr;
+
+ while (readl(zevio_irq_io + IO_STATUS)) {
+ irqnr = readl(zevio_irq_io + IO_CURRENT);
+ irqnr = irq_find_mapping(zevio_irq_domain, irqnr);
+ handle_IRQ(irqnr, regs);
+ };
+}
+
+static void __init zevio_init_irq_base(void __iomem *base)
+{
+ /* Disable all interrupts */
+ writel(~0, base + IO_DISABLE);
+
+ /* Accept interrupts of all priorities */
+ writel(0xF, base + IO_MAX_PRIOTY);
+
+ /* Reset existing interrupts */
+ readl(base + IO_RESET);
+}
+
+static int __init zevio_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+ unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
+ struct irq_chip_generic *gc;
+ int ret;
+
+ if (WARN_ON(zevio_irq_io || zevio_irq_domain))
+ return -EBUSY;
+
+ zevio_irq_io = of_iomap(node, 0);
+ BUG_ON(!zevio_irq_io);
+
+ /* Do not invert interrupt status bits */
+ writel(~0, zevio_irq_io + IO_INVERT_SEL);
+
+ /* Disable sticky interrupts */
+ writel(0, zevio_irq_io + IO_STICKY_SEL);
+
+ /* We don't use IRQ priorities. Set each IRQ to highest priority. */
+ memset_io(zevio_irq_io + IO_PRIORITY_SEL, 0, MAX_INTRS * sizeof(u32));
+
+ /* Init IRQ and FIQ */
+ zevio_init_irq_base(zevio_irq_io + IO_IRQ_BASE);
+ zevio_init_irq_base(zevio_irq_io + IO_FIQ_BASE);
+
+ zevio_irq_domain = irq_domain_add_linear(node, MAX_INTRS,
+ &irq_generic_chip_ops, NULL);
+ BUG_ON(!zevio_irq_domain);
+
+ ret = irq_alloc_domain_generic_chips(zevio_irq_domain, MAX_INTRS, 1,
+ "zevio_intc", handle_level_irq,
+ clr, 0, IRQ_GC_INIT_MASK_CACHE);
+ BUG_ON(ret);
+
+ gc = irq_get_domain_generic_chip(zevio_irq_domain, 0);
+ gc->reg_base = zevio_irq_io;
+ gc->chip_types[0].chip.irq_ack = zevio_irq_ack;
+ gc->chip_types[0].chip.irq_mask = irq_gc_mask_disable_reg;
+ gc->chip_types[0].chip.irq_unmask = irq_gc_unmask_enable_reg;
+ gc->chip_types[0].regs.mask = IO_IRQ_BASE + IO_ENABLE;
+ gc->chip_types[0].regs.enable = IO_IRQ_BASE + IO_ENABLE;
+ gc->chip_types[0].regs.disable = IO_IRQ_BASE + IO_DISABLE;
+ gc->chip_types[0].regs.ack = IO_IRQ_BASE + IO_RESET;
+
+ set_handle_irq(zevio_handle_irq);
+
+ pr_info("TI-NSPIRE classic IRQ controller\n");
+ return 0;
+}
+
+IRQCHIP_DECLARE(zevio_irq, "lsi,zevio-intc", zevio_of_init);
diff --git a/drivers/isdn/hisax/q931.c b/drivers/isdn/hisax/q931.c
index af1b020a81f1..b420f8bd862e 100644
--- a/drivers/isdn/hisax/q931.c
+++ b/drivers/isdn/hisax/q931.c
@@ -810,7 +810,7 @@ prfeatureind(char *dest, u_char *p)
dp += sprintf(dp, " octet 3 ");
dp += prbits(dp, *p, 8, 8);
*dp++ = '\n';
- if (!(*p++ & 80)) {
+ if (!(*p++ & 0x80)) {
dp += sprintf(dp, " octet 4 ");
dp += prbits(dp, *p++, 8, 8);
*dp++ = '\n';
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index 0c707e4f4eaf..a4c7306ff43d 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -210,7 +210,9 @@ BITMASK(GC_MARK, struct bucket, gc_mark, 0, 2);
#define GC_MARK_RECLAIMABLE 0
#define GC_MARK_DIRTY 1
#define GC_MARK_METADATA 2
-BITMASK(GC_SECTORS_USED, struct bucket, gc_mark, 2, 13);
+#define GC_SECTORS_USED_SIZE 13
+#define MAX_GC_SECTORS_USED (~(~0ULL << GC_SECTORS_USED_SIZE))
+BITMASK(GC_SECTORS_USED, struct bucket, gc_mark, 2, GC_SECTORS_USED_SIZE);
BITMASK(GC_MOVE, struct bucket, gc_mark, 15, 1);
#include "journal.h"
diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c
index 4f6b5940e609..3f74b4b0747b 100644
--- a/drivers/md/bcache/bset.c
+++ b/drivers/md/bcache/bset.c
@@ -23,7 +23,7 @@ void bch_dump_bset(struct btree_keys *b, struct bset *i, unsigned set)
for (k = i->start; k < bset_bkey_last(i); k = next) {
next = bkey_next(k);
- printk(KERN_ERR "block %u key %zi/%u: ", set,
+ printk(KERN_ERR "block %u key %li/%u: ", set,
(uint64_t *) k - i->d, i->keys);
if (b->ops->key_dump)
@@ -1185,9 +1185,12 @@ static void __btree_sort(struct btree_keys *b, struct btree_iter *iter,
struct bset *out = (void *) __get_free_pages(__GFP_NOWARN|GFP_NOIO,
order);
if (!out) {
+ struct page *outp;
+
BUG_ON(order > state->page_order);
- out = page_address(mempool_alloc(state->pool, GFP_NOIO));
+ outp = mempool_alloc(state->pool, GFP_NOIO);
+ out = page_address(outp);
used_mempool = true;
order = state->page_order;
}
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index 98cc0a810a36..5f9c2a665ca5 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -1167,7 +1167,7 @@ uint8_t __bch_btree_mark_key(struct cache_set *c, int level, struct bkey *k)
/* guard against overflow */
SET_GC_SECTORS_USED(g, min_t(unsigned,
GC_SECTORS_USED(g) + KEY_SIZE(k),
- (1 << 14) - 1));
+ MAX_GC_SECTORS_USED));
BUG_ON(!GC_SECTORS_USED(g));
}
@@ -1805,7 +1805,7 @@ static bool btree_insert_key(struct btree *b, struct bkey *k,
static size_t insert_u64s_remaining(struct btree *b)
{
- ssize_t ret = bch_btree_keys_u64s_remaining(&b->keys);
+ long ret = bch_btree_keys_u64s_remaining(&b->keys);
/*
* Might land in the middle of an existing extent and have to split it
diff --git a/drivers/md/bcache/extents.c b/drivers/md/bcache/extents.c
index c3ead586dc27..416d1a3e028e 100644
--- a/drivers/md/bcache/extents.c
+++ b/drivers/md/bcache/extents.c
@@ -194,7 +194,7 @@ err:
mutex_unlock(&b->c->bucket_lock);
bch_extent_to_text(buf, sizeof(buf), k);
btree_bug(b,
-"inconsistent btree pointer %s: bucket %li pin %i prio %i gen %i last_gc %i mark %llu gc_gen %i",
+"inconsistent btree pointer %s: bucket %zi pin %i prio %i gen %i last_gc %i mark %llu gc_gen %i",
buf, PTR_BUCKET_NR(b->c, k, i), atomic_read(&g->pin),
g->prio, g->gen, g->last_gc, GC_MARK(g), g->gc_gen);
return true;
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 72cd213f213f..5d5d031cf381 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -353,14 +353,14 @@ static void bch_data_insert_start(struct closure *cl)
struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
struct bio *bio = op->bio, *n;
- if (op->bypass)
- return bch_data_invalidate(cl);
-
if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0) {
set_gc_sectors(op->c);
wake_up_gc(op->c);
}
+ if (op->bypass)
+ return bch_data_invalidate(cl);
+
/*
* Journal writes are marked REQ_FLUSH; if the original write was a
* flush, it'll wait on the journal write.
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
index c6ab69333a6d..d8458d477a12 100644
--- a/drivers/md/bcache/sysfs.c
+++ b/drivers/md/bcache/sysfs.c
@@ -416,7 +416,7 @@ static int btree_bset_stats(struct btree_op *b_op, struct btree *b)
return MAP_CONTINUE;
}
-int bch_bset_print_stats(struct cache_set *c, char *buf)
+static int bch_bset_print_stats(struct cache_set *c, char *buf)
{
struct bset_stats_op op;
int ret;
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index fd3a2a14b587..4a6ca1cb2e78 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1953,11 +1953,15 @@ static int process_checks(struct r1bio *r1_bio)
for (i = 0; i < conf->raid_disks * 2; i++) {
int j;
int size;
+ int uptodate;
struct bio *b = r1_bio->bios[i];
if (b->bi_end_io != end_sync_read)
continue;
- /* fixup the bio for reuse */
+ /* fixup the bio for reuse, but preserve BIO_UPTODATE */
+ uptodate = test_bit(BIO_UPTODATE, &b->bi_flags);
bio_reset(b);
+ if (!uptodate)
+ clear_bit(BIO_UPTODATE, &b->bi_flags);
b->bi_vcnt = vcnt;
b->bi_iter.bi_size = r1_bio->sectors << 9;
b->bi_iter.bi_sector = r1_bio->sector +
@@ -1990,11 +1994,14 @@ static int process_checks(struct r1bio *r1_bio)
int j;
struct bio *pbio = r1_bio->bios[primary];
struct bio *sbio = r1_bio->bios[i];
+ int uptodate = test_bit(BIO_UPTODATE, &sbio->bi_flags);
if (sbio->bi_end_io != end_sync_read)
continue;
+ /* Now we can 'fixup' the BIO_UPTODATE flag */
+ set_bit(BIO_UPTODATE, &sbio->bi_flags);
- if (test_bit(BIO_UPTODATE, &sbio->bi_flags)) {
+ if (uptodate) {
for (j = vcnt; j-- ; ) {
struct page *p, *s;
p = pbio->bi_io_vec[j].bv_page;
@@ -2009,7 +2016,7 @@ static int process_checks(struct r1bio *r1_bio)
if (j >= 0)
atomic64_add(r1_bio->sectors, &mddev->resync_mismatches);
if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)
- && test_bit(BIO_UPTODATE, &sbio->bi_flags))) {
+ && uptodate)) {
/* No need to write to this device. */
sbio->bi_end_io = NULL;
rdev_dec_pending(conf->mirrors[i].rdev, mddev);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index f1feadeb7bb2..16f5c21963db 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -5514,23 +5514,43 @@ raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks)
return sectors * (raid_disks - conf->max_degraded);
}
+static void free_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu)
+{
+ safe_put_page(percpu->spare_page);
+ kfree(percpu->scribble);
+ percpu->spare_page = NULL;
+ percpu->scribble = NULL;
+}
+
+static int alloc_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu)
+{
+ if (conf->level == 6 && !percpu->spare_page)
+ percpu->spare_page = alloc_page(GFP_KERNEL);
+ if (!percpu->scribble)
+ percpu->scribble = kmalloc(conf->scribble_len, GFP_KERNEL);
+
+ if (!percpu->scribble || (conf->level == 6 && !percpu->spare_page)) {
+ free_scratch_buffer(conf, percpu);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
static void raid5_free_percpu(struct r5conf *conf)
{
- struct raid5_percpu *percpu;
unsigned long cpu;
if (!conf->percpu)
return;
- get_online_cpus();
- for_each_possible_cpu(cpu) {
- percpu = per_cpu_ptr(conf->percpu, cpu);
- safe_put_page(percpu->spare_page);
- kfree(percpu->scribble);
- }
#ifdef CONFIG_HOTPLUG_CPU
unregister_cpu_notifier(&conf->cpu_notify);
#endif
+
+ get_online_cpus();
+ for_each_possible_cpu(cpu)
+ free_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu));
put_online_cpus();
free_percpu(conf->percpu);
@@ -5557,15 +5577,7 @@ static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action,
switch (action) {
case CPU_UP_PREPARE:
case CPU_UP_PREPARE_FROZEN:
- if (conf->level == 6 && !percpu->spare_page)
- percpu->spare_page = alloc_page(GFP_KERNEL);
- if (!percpu->scribble)
- percpu->scribble = kmalloc(conf->scribble_len, GFP_KERNEL);
-
- if (!percpu->scribble ||
- (conf->level == 6 && !percpu->spare_page)) {
- safe_put_page(percpu->spare_page);
- kfree(percpu->scribble);
+ if (alloc_scratch_buffer(conf, percpu)) {
pr_err("%s: failed memory allocation for cpu%ld\n",
__func__, cpu);
return notifier_from_errno(-ENOMEM);
@@ -5573,10 +5585,7 @@ static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action,
break;
case CPU_DEAD:
case CPU_DEAD_FROZEN:
- safe_put_page(percpu->spare_page);
- kfree(percpu->scribble);
- percpu->spare_page = NULL;
- percpu->scribble = NULL;
+ free_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu));
break;
default:
break;
@@ -5588,40 +5597,29 @@ static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action,
static int raid5_alloc_percpu(struct r5conf *conf)
{
unsigned long cpu;
- struct page *spare_page;
- struct raid5_percpu __percpu *allcpus;
- void *scribble;
- int err;
+ int err = 0;
- allcpus = alloc_percpu(struct raid5_percpu);
- if (!allcpus)
+ conf->percpu = alloc_percpu(struct raid5_percpu);
+ if (!conf->percpu)
return -ENOMEM;
- conf->percpu = allcpus;
+
+#ifdef CONFIG_HOTPLUG_CPU
+ conf->cpu_notify.notifier_call = raid456_cpu_notify;
+ conf->cpu_notify.priority = 0;
+ err = register_cpu_notifier(&conf->cpu_notify);
+ if (err)
+ return err;
+#endif
get_online_cpus();
- err = 0;
for_each_present_cpu(cpu) {
- if (conf->level == 6) {
- spare_page = alloc_page(GFP_KERNEL);
- if (!spare_page) {
- err = -ENOMEM;
- break;
- }
- per_cpu_ptr(conf->percpu, cpu)->spare_page = spare_page;
- }
- scribble = kmalloc(conf->scribble_len, GFP_KERNEL);
- if (!scribble) {
- err = -ENOMEM;
+ err = alloc_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu));
+ if (err) {
+ pr_err("%s: failed memory allocation for cpu%ld\n",
+ __func__, cpu);
break;
}
- per_cpu_ptr(conf->percpu, cpu)->scribble = scribble;
}
-#ifdef CONFIG_HOTPLUG_CPU
- conf->cpu_notify.notifier_call = raid456_cpu_notify;
- conf->cpu_notify.priority = 0;
- if (err == 0)
- err = register_cpu_notifier(&conf->cpu_notify);
-#endif
put_online_cpus();
return err;
diff --git a/drivers/media/dvb-frontends/cx24117.c b/drivers/media/dvb-frontends/cx24117.c
index 68f768a5422d..a6c3c9e2e897 100644
--- a/drivers/media/dvb-frontends/cx24117.c
+++ b/drivers/media/dvb-frontends/cx24117.c
@@ -1176,7 +1176,7 @@ struct dvb_frontend *cx24117_attach(const struct cx24117_config *config,
switch (demod) {
case 0:
- dev_err(&state->priv->i2c->dev,
+ dev_err(&i2c->dev,
"%s: Error attaching frontend %d\n",
KBUILD_MODNAME, demod);
goto error1;
@@ -1200,12 +1200,6 @@ struct dvb_frontend *cx24117_attach(const struct cx24117_config *config,
state->demod = demod - 1;
state->priv = priv;
- /* test i2c bus for ack */
- if (demod == 0) {
- if (cx24117_readreg(state, 0x00) < 0)
- goto error3;
- }
-
dev_info(&state->priv->i2c->dev,
"%s: Attaching frontend %d\n",
KBUILD_MODNAME, state->demod);
@@ -1216,8 +1210,6 @@ struct dvb_frontend *cx24117_attach(const struct cx24117_config *config,
state->frontend.demodulator_priv = state;
return &state->frontend;
-error3:
- kfree(state);
error2:
cx24117_release_priv(priv);
error1:
diff --git a/drivers/media/dvb-frontends/nxt200x.c b/drivers/media/dvb-frontends/nxt200x.c
index 4bf057544607..8a8e1ecb762d 100644
--- a/drivers/media/dvb-frontends/nxt200x.c
+++ b/drivers/media/dvb-frontends/nxt200x.c
@@ -2,7 +2,7 @@
* Support for NXT2002 and NXT2004 - VSB/QAM
*
* Copyright (C) 2005 Kirk Lapray <kirk.lapray@gmail.com>
- * Copyright (C) 2006 Michael Krufky <mkrufky@m1k.net>
+ * Copyright (C) 2006-2014 Michael Krufky <mkrufky@linuxtv.org>
* based on nxt2002 by Taylor Jacob <rtjacob@earthlink.net>
* and nxt2004 by Jean-Francois Thibert <jeanfrancois@sagetv.com>
*
diff --git a/drivers/media/i2c/adv7842.c b/drivers/media/i2c/adv7842.c
index 1effc21e1cdd..9bbd6656fb8f 100644
--- a/drivers/media/i2c/adv7842.c
+++ b/drivers/media/i2c/adv7842.c
@@ -2554,7 +2554,7 @@ static int adv7842_core_init(struct v4l2_subdev *sd)
sdp_write_and_or(sd, 0xdd, 0xf0, pdata->sdp_free_run_force |
(pdata->sdp_free_run_cbar_en << 1) |
(pdata->sdp_free_run_man_col_en << 2) |
- (pdata->sdp_free_run_force << 3));
+ (pdata->sdp_free_run_auto << 3));
/* TODO from platform data */
cp_write(sd, 0x69, 0x14); /* Enable CP CSC */
diff --git a/drivers/media/i2c/s5k5baf.c b/drivers/media/i2c/s5k5baf.c
index 4b8381111cbd..77e10e0fd8d6 100644
--- a/drivers/media/i2c/s5k5baf.c
+++ b/drivers/media/i2c/s5k5baf.c
@@ -478,25 +478,33 @@ static void s5k5baf_write_arr_seq(struct s5k5baf *state, u16 addr,
u16 count, const u16 *seq)
{
struct i2c_client *c = v4l2_get_subdevdata(&state->sd);
- __be16 buf[count + 1];
- int ret, n;
+ __be16 buf[65];
s5k5baf_i2c_write(state, REG_CMDWR_ADDR, addr);
if (state->error)
return;
+ v4l2_dbg(3, debug, c, "i2c_write_seq(count=%d): %*ph\n", count,
+ min(2 * count, 64), seq);
+
buf[0] = __constant_cpu_to_be16(REG_CMD_BUF);
- for (n = 1; n <= count; ++n)
- buf[n] = cpu_to_be16(*seq++);
- n *= 2;
- ret = i2c_master_send(c, (char *)buf, n);
- v4l2_dbg(3, debug, c, "i2c_write_seq(count=%d): %*ph\n", count,
- min(2 * count, 64), seq - count);
+ while (count > 0) {
+ int n = min_t(int, count, ARRAY_SIZE(buf) - 1);
+ int ret, i;
- if (ret != n) {
- v4l2_err(c, "i2c_write_seq: error during transfer (%d)\n", ret);
- state->error = ret;
+ for (i = 1; i <= n; ++i)
+ buf[i] = cpu_to_be16(*seq++);
+
+ i *= 2;
+ ret = i2c_master_send(c, (char *)buf, i);
+ if (ret != i) {
+ v4l2_err(c, "i2c_write_seq: error during transfer (%d)\n", ret);
+ state->error = ret;
+ break;
+ }
+
+ count -= n;
}
}
diff --git a/drivers/media/pci/bt8xx/bttv-cards.c b/drivers/media/pci/bt8xx/bttv-cards.c
index d85cb0ace4dc..6662b495b22c 100644
--- a/drivers/media/pci/bt8xx/bttv-cards.c
+++ b/drivers/media/pci/bt8xx/bttv-cards.c
@@ -2426,7 +2426,7 @@ struct tvcard bttv_tvcards[] = {
},
/* ---- card 0x87---------------------------------- */
[BTTV_BOARD_DVICO_FUSIONHDTV_5_LITE] = {
- /* Michael Krufky <mkrufky@m1k.net> */
+ /* Michael Krufky <mkrufky@linuxtv.org> */
.name = "DViCO FusionHDTV 5 Lite",
.tuner_type = TUNER_LG_TDVS_H06XF, /* TDVS-H064F */
.tuner_addr = ADDR_UNSET,
diff --git a/drivers/media/pci/bt8xx/bttv-gpio.c b/drivers/media/pci/bt8xx/bttv-gpio.c
index 922e8233fd0b..3f364b7062b9 100644
--- a/drivers/media/pci/bt8xx/bttv-gpio.c
+++ b/drivers/media/pci/bt8xx/bttv-gpio.c
@@ -98,7 +98,7 @@ int bttv_sub_add_device(struct bttv_core *core, char *name)
err = device_register(&sub->dev);
if (0 != err) {
- kfree(sub);
+ put_device(&sub->dev);
return err;
}
pr_info("%d: add subdevice \"%s\"\n", core->nr, dev_name(&sub->dev));
diff --git a/drivers/media/pci/saa7134/saa7134-cards.c b/drivers/media/pci/saa7134/saa7134-cards.c
index d45e7f6ff332..c9b2350e92c8 100644
--- a/drivers/media/pci/saa7134/saa7134-cards.c
+++ b/drivers/media/pci/saa7134/saa7134-cards.c
@@ -2590,7 +2590,7 @@ struct saa7134_board saa7134_boards[] = {
}},
},
[SAA7134_BOARD_AVERMEDIA_AVERTVHD_A180] = {
- /* Michael Krufky <mkrufky@m1k.net>
+ /* Michael Krufky <mkrufky@linuxtv.org>
* Uses Alps Electric TDHU2, containing NXT2004 ATSC Decoder
* AFAIK, there is no analog demod, thus,
* no support for analog television.
diff --git a/drivers/media/platform/exynos4-is/fimc-core.c b/drivers/media/platform/exynos4-is/fimc-core.c
index a7dfd07e8389..da2fc86cc524 100644
--- a/drivers/media/platform/exynos4-is/fimc-core.c
+++ b/drivers/media/platform/exynos4-is/fimc-core.c
@@ -1027,7 +1027,8 @@ static int fimc_probe(struct platform_device *pdev)
return 0;
err_gclk:
- clk_disable(fimc->clock[CLK_GATE]);
+ if (!pm_runtime_enabled(dev))
+ clk_disable(fimc->clock[CLK_GATE]);
err_sd:
fimc_unregister_capture_subdev(fimc);
err_sclk:
@@ -1036,6 +1037,7 @@ err_sclk:
return ret;
}
+#ifdef CONFIG_PM_RUNTIME
static int fimc_runtime_resume(struct device *dev)
{
struct fimc_dev *fimc = dev_get_drvdata(dev);
@@ -1068,6 +1070,7 @@ static int fimc_runtime_suspend(struct device *dev)
dbg("fimc%d: state: 0x%lx", fimc->id, fimc->state);
return ret;
}
+#endif
#ifdef CONFIG_PM_SLEEP
static int fimc_resume(struct device *dev)
diff --git a/drivers/media/platform/exynos4-is/fimc-lite.c b/drivers/media/platform/exynos4-is/fimc-lite.c
index 1234734bccf4..779ec3cd259d 100644
--- a/drivers/media/platform/exynos4-is/fimc-lite.c
+++ b/drivers/media/platform/exynos4-is/fimc-lite.c
@@ -1563,7 +1563,7 @@ static int fimc_lite_probe(struct platform_device *pdev)
if (!pm_runtime_enabled(dev)) {
ret = clk_enable(fimc->clock);
if (ret < 0)
- goto err_clk_put;
+ goto err_sd;
}
fimc->alloc_ctx = vb2_dma_contig_init_ctx(dev);
@@ -1579,7 +1579,8 @@ static int fimc_lite_probe(struct platform_device *pdev)
return 0;
err_clk_dis:
- clk_disable(fimc->clock);
+ if (!pm_runtime_enabled(dev))
+ clk_disable(fimc->clock);
err_sd:
fimc_lite_unregister_capture_subdev(fimc);
err_clk_put:
@@ -1587,6 +1588,7 @@ err_clk_put:
return ret;
}
+#ifdef CONFIG_PM_RUNTIME
static int fimc_lite_runtime_resume(struct device *dev)
{
struct fimc_lite *fimc = dev_get_drvdata(dev);
@@ -1602,6 +1604,7 @@ static int fimc_lite_runtime_suspend(struct device *dev)
clk_disable(fimc->clock);
return 0;
}
+#endif
#ifdef CONFIG_PM_SLEEP
static int fimc_lite_resume(struct device *dev)
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-core.c b/drivers/media/platform/s5p-jpeg/jpeg-core.c
index a1c78c870b68..7d68d0b9966a 100644
--- a/drivers/media/platform/s5p-jpeg/jpeg-core.c
+++ b/drivers/media/platform/s5p-jpeg/jpeg-core.c
@@ -175,7 +175,7 @@ static struct s5p_jpeg_fmt sjpeg_formats[] = {
{
.name = "YUV 4:2:0 planar, Y/CbCr",
.fourcc = V4L2_PIX_FMT_NV12,
- .depth = 16,
+ .depth = 12,
.colplanes = 2,
.h_align = 1,
.v_align = 1,
@@ -188,10 +188,10 @@ static struct s5p_jpeg_fmt sjpeg_formats[] = {
{
.name = "YUV 4:2:0 planar, Y/CbCr",
.fourcc = V4L2_PIX_FMT_NV12,
- .depth = 16,
- .colplanes = 4,
+ .depth = 12,
+ .colplanes = 2,
.h_align = 4,
- .v_align = 1,
+ .v_align = 4,
.flags = SJPEG_FMT_FLAG_ENC_OUTPUT |
SJPEG_FMT_FLAG_DEC_CAPTURE |
SJPEG_FMT_FLAG_S5P |
diff --git a/drivers/media/usb/dvb-usb-v2/af9035.c b/drivers/media/usb/dvb-usb-v2/af9035.c
index 8f9b2cea88f0..8ede8ea762e6 100644
--- a/drivers/media/usb/dvb-usb-v2/af9035.c
+++ b/drivers/media/usb/dvb-usb-v2/af9035.c
@@ -1539,6 +1539,8 @@ static const struct usb_device_id af9035_id_table[] = {
&af9035_props, "TerraTec Cinergy T Stick Dual RC (rev. 2)", NULL) },
{ DVB_USB_DEVICE(USB_VID_LEADTEK, 0x6a05,
&af9035_props, "Leadtek WinFast DTV Dongle Dual", NULL) },
+ { DVB_USB_DEVICE(USB_VID_HAUPPAUGE, 0xf900,
+ &af9035_props, "Hauppauge WinTV-MiniStick 2", NULL) },
{ }
};
MODULE_DEVICE_TABLE(usb, af9035_id_table);
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.c b/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.c
index d83df4bb72d3..0a98d04c53e4 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.c
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.c
@@ -1,7 +1,7 @@
/*
* mxl111sf-demod.c - driver for the MaxLinear MXL111SF DVB-T demodulator
*
- * Copyright (C) 2010 Michael Krufky <mkrufky@kernellabs.com>
+ * Copyright (C) 2010-2014 Michael Krufky <mkrufky@linuxtv.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -601,7 +601,7 @@ struct dvb_frontend *mxl111sf_demod_attach(struct mxl111sf_state *mxl_state,
EXPORT_SYMBOL_GPL(mxl111sf_demod_attach);
MODULE_DESCRIPTION("MaxLinear MxL111SF DVB-T demodulator driver");
-MODULE_AUTHOR("Michael Krufky <mkrufky@kernellabs.com>");
+MODULE_AUTHOR("Michael Krufky <mkrufky@linuxtv.org>");
MODULE_LICENSE("GPL");
MODULE_VERSION("0.1");
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.h b/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.h
index 3f3f8bfd190b..2d4530f5be54 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.h
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.h
@@ -1,7 +1,7 @@
/*
* mxl111sf-demod.h - driver for the MaxLinear MXL111SF DVB-T demodulator
*
- * Copyright (C) 2010 Michael Krufky <mkrufky@kernellabs.com>
+ * Copyright (C) 2010-2014 Michael Krufky <mkrufky@linuxtv.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-gpio.c b/drivers/media/usb/dvb-usb-v2/mxl111sf-gpio.c
index e4121cb8f5ef..a619410adde4 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-gpio.c
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-gpio.c
@@ -1,7 +1,7 @@
/*
* mxl111sf-gpio.c - driver for the MaxLinear MXL111SF
*
- * Copyright (C) 2010 Michael Krufky <mkrufky@kernellabs.com>
+ * Copyright (C) 2010-2014 Michael Krufky <mkrufky@linuxtv.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-gpio.h b/drivers/media/usb/dvb-usb-v2/mxl111sf-gpio.h
index 0220f54299a5..b85a5772d771 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-gpio.h
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-gpio.h
@@ -1,7 +1,7 @@
/*
* mxl111sf-gpio.h - driver for the MaxLinear MXL111SF
*
- * Copyright (C) 2010 Michael Krufky <mkrufky@kernellabs.com>
+ * Copyright (C) 2010-2014 Michael Krufky <mkrufky@linuxtv.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.c b/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.c
index 34434557ef65..a101d06eb143 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.c
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.c
@@ -1,7 +1,7 @@
/*
* mxl111sf-i2c.c - driver for the MaxLinear MXL111SF
*
- * Copyright (C) 2010 Michael Krufky <mkrufky@kernellabs.com>
+ * Copyright (C) 2010-2014 Michael Krufky <mkrufky@linuxtv.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.h b/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.h
index a57a45ffb9e4..465762145ad2 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.h
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.h
@@ -1,7 +1,7 @@
/*
* mxl111sf-i2c.h - driver for the MaxLinear MXL111SF
*
- * Copyright (C) 2010 Michael Krufky <mkrufky@kernellabs.com>
+ * Copyright (C) 2010-2014 Michael Krufky <mkrufky@linuxtv.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-phy.c b/drivers/media/usb/dvb-usb-v2/mxl111sf-phy.c
index b741b3a7a325..f6b348024bec 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-phy.c
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-phy.c
@@ -1,7 +1,7 @@
/*
* mxl111sf-phy.c - driver for the MaxLinear MXL111SF
*
- * Copyright (C) 2010 Michael Krufky <mkrufky@kernellabs.com>
+ * Copyright (C) 2010-2014 Michael Krufky <mkrufky@linuxtv.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-phy.h b/drivers/media/usb/dvb-usb-v2/mxl111sf-phy.h
index f0756071d347..0643738de7de 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-phy.h
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-phy.h
@@ -1,7 +1,7 @@
/*
* mxl111sf-phy.h - driver for the MaxLinear MXL111SF
*
- * Copyright (C) 2010 Michael Krufky <mkrufky@kernellabs.com>
+ * Copyright (C) 2010-2014 Michael Krufky <mkrufky@linuxtv.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-reg.h b/drivers/media/usb/dvb-usb-v2/mxl111sf-reg.h
index 17831b0fb9db..89bf115e927e 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-reg.h
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-reg.h
@@ -1,7 +1,7 @@
/*
* mxl111sf-reg.h - driver for the MaxLinear MXL111SF
*
- * Copyright (C) 2010 Michael Krufky <mkrufky@kernellabs.com>
+ * Copyright (C) 2010-2014 Michael Krufky <mkrufky@linuxtv.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.c b/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.c
index 879c529640f7..a8d2c7053674 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.c
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.c
@@ -1,7 +1,7 @@
/*
* mxl111sf-tuner.c - driver for the MaxLinear MXL111SF CMOS tuner
*
- * Copyright (C) 2010 Michael Krufky <mkrufky@kernellabs.com>
+ * Copyright (C) 2010-2014 Michael Krufky <mkrufky@linuxtv.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -512,7 +512,7 @@ struct dvb_frontend *mxl111sf_tuner_attach(struct dvb_frontend *fe,
EXPORT_SYMBOL_GPL(mxl111sf_tuner_attach);
MODULE_DESCRIPTION("MaxLinear MxL111SF CMOS tuner driver");
-MODULE_AUTHOR("Michael Krufky <mkrufky@kernellabs.com>");
+MODULE_AUTHOR("Michael Krufky <mkrufky@linuxtv.org>");
MODULE_LICENSE("GPL");
MODULE_VERSION("0.1");
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.h b/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.h
index 90f583e5d6a6..2046db22519e 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.h
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.h
@@ -1,7 +1,7 @@
/*
* mxl111sf-tuner.h - driver for the MaxLinear MXL111SF CMOS tuner
*
- * Copyright (C) 2010 Michael Krufky <mkrufky@kernellabs.com>
+ * Copyright (C) 2010-2014 Michael Krufky <mkrufky@linuxtv.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -68,7 +68,7 @@ struct dvb_frontend *mxl111sf_tuner_attach(struct dvb_frontend *fe,
#else
static inline
struct dvb_frontend *mxl111sf_tuner_attach(struct dvb_frontend *fe,
- struct mxl111sf_state *mxl_state
+ struct mxl111sf_state *mxl_state,
struct mxl111sf_tuner_config *cfg)
{
printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf.c b/drivers/media/usb/dvb-usb-v2/mxl111sf.c
index 08240e498451..c7304fa8ab73 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf.c
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2010 Michael Krufky (mkrufky@kernellabs.com)
+ * Copyright (C) 2010-2014 Michael Krufky (mkrufky@linuxtv.org)
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
@@ -105,7 +105,7 @@ int mxl111sf_read_reg(struct mxl111sf_state *state, u8 addr, u8 *data)
ret = -EINVAL;
}
- pr_debug("R: (0x%02x, 0x%02x)\n", addr, *data);
+ pr_debug("R: (0x%02x, 0x%02x)\n", addr, buf[1]);
fail:
return ret;
}
@@ -1421,7 +1421,7 @@ static struct usb_driver mxl111sf_usb_driver = {
module_usb_driver(mxl111sf_usb_driver);
-MODULE_AUTHOR("Michael Krufky <mkrufky@kernellabs.com>");
+MODULE_AUTHOR("Michael Krufky <mkrufky@linuxtv.org>");
MODULE_DESCRIPTION("Driver for MaxLinear MxL111SF");
MODULE_VERSION("1.0");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf.h b/drivers/media/usb/dvb-usb-v2/mxl111sf.h
index 9816de86e48c..8516c011b7cc 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf.h
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2010 Michael Krufky (mkrufky@kernellabs.com)
+ * Copyright (C) 2010-2014 Michael Krufky (mkrufky@linuxtv.org)
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
diff --git a/drivers/media/usb/hdpvr/hdpvr-core.c b/drivers/media/usb/hdpvr/hdpvr-core.c
index 2f0c89cbac76..c5638964c3f2 100644
--- a/drivers/media/usb/hdpvr/hdpvr-core.c
+++ b/drivers/media/usb/hdpvr/hdpvr-core.c
@@ -198,7 +198,6 @@ static int device_authorization(struct hdpvr_device *dev)
hex_dump_to_buffer(response, 8, 16, 1, print_buf, 5*buf_size+1, 0);
v4l2_dbg(MSG_INFO, hdpvr_debug, &dev->v4l2_dev, " response: %s\n",
print_buf);
- kfree(print_buf);
#endif
msleep(100);
@@ -214,6 +213,9 @@ static int device_authorization(struct hdpvr_device *dev)
retval = ret != 8;
unlock:
mutex_unlock(&dev->usbc_mutex);
+#ifdef HDPVR_DEBUG
+ kfree(print_buf);
+#endif
return retval;
}
diff --git a/drivers/media/v4l2-core/v4l2-dv-timings.c b/drivers/media/v4l2-core/v4l2-dv-timings.c
index ee52b9f4a944..f7902fe8a526 100644
--- a/drivers/media/v4l2-core/v4l2-dv-timings.c
+++ b/drivers/media/v4l2-core/v4l2-dv-timings.c
@@ -515,6 +515,7 @@ bool v4l2_detect_gtf(unsigned frame_height,
aspect.denominator = 9;
}
image_width = ((image_height * aspect.numerator) / aspect.denominator);
+ image_width = (image_width + GTF_CELL_GRAN/2) & ~(GTF_CELL_GRAN - 1);
/* Horizontal */
if (default_gtf)
diff --git a/drivers/media/v4l2-core/videobuf-dma-contig.c b/drivers/media/v4l2-core/videobuf-dma-contig.c
index 65411adcd0ea..7e6b209b7002 100644
--- a/drivers/media/v4l2-core/videobuf-dma-contig.c
+++ b/drivers/media/v4l2-core/videobuf-dma-contig.c
@@ -66,14 +66,11 @@ static void __videobuf_dc_free(struct device *dev,
static void videobuf_vm_open(struct vm_area_struct *vma)
{
struct videobuf_mapping *map = vma->vm_private_data;
- struct videobuf_queue *q = map->q;
- dev_dbg(q->dev, "vm_open %p [count=%u,vma=%08lx-%08lx]\n",
+ dev_dbg(map->q->dev, "vm_open %p [count=%u,vma=%08lx-%08lx]\n",
map, map->count, vma->vm_start, vma->vm_end);
- videobuf_queue_lock(q);
map->count++;
- videobuf_queue_unlock(q);
}
static void videobuf_vm_close(struct vm_area_struct *vma)
@@ -85,11 +82,12 @@ static void videobuf_vm_close(struct vm_area_struct *vma)
dev_dbg(q->dev, "vm_close %p [count=%u,vma=%08lx-%08lx]\n",
map, map->count, vma->vm_start, vma->vm_end);
- videobuf_queue_lock(q);
- if (!--map->count) {
+ map->count--;
+ if (0 == map->count) {
struct videobuf_dma_contig_memory *mem;
dev_dbg(q->dev, "munmap %p q=%p\n", map, q);
+ videobuf_queue_lock(q);
/* We need first to cancel streams, before unmapping */
if (q->streaming)
@@ -128,8 +126,8 @@ static void videobuf_vm_close(struct vm_area_struct *vma)
kfree(map);
+ videobuf_queue_unlock(q);
}
- videobuf_queue_unlock(q);
}
static const struct vm_operations_struct videobuf_vm_ops = {
diff --git a/drivers/media/v4l2-core/videobuf-dma-sg.c b/drivers/media/v4l2-core/videobuf-dma-sg.c
index 9db674ccdc68..828e7c10bd70 100644
--- a/drivers/media/v4l2-core/videobuf-dma-sg.c
+++ b/drivers/media/v4l2-core/videobuf-dma-sg.c
@@ -338,14 +338,11 @@ EXPORT_SYMBOL_GPL(videobuf_dma_free);
static void videobuf_vm_open(struct vm_area_struct *vma)
{
struct videobuf_mapping *map = vma->vm_private_data;
- struct videobuf_queue *q = map->q;
dprintk(2, "vm_open %p [count=%d,vma=%08lx-%08lx]\n", map,
map->count, vma->vm_start, vma->vm_end);
- videobuf_queue_lock(q);
map->count++;
- videobuf_queue_unlock(q);
}
static void videobuf_vm_close(struct vm_area_struct *vma)
@@ -358,9 +355,10 @@ static void videobuf_vm_close(struct vm_area_struct *vma)
dprintk(2, "vm_close %p [count=%d,vma=%08lx-%08lx]\n", map,
map->count, vma->vm_start, vma->vm_end);
- videobuf_queue_lock(q);
- if (!--map->count) {
+ map->count--;
+ if (0 == map->count) {
dprintk(1, "munmap %p q=%p\n", map, q);
+ videobuf_queue_lock(q);
for (i = 0; i < VIDEO_MAX_FRAME; i++) {
if (NULL == q->bufs[i])
continue;
@@ -376,9 +374,9 @@ static void videobuf_vm_close(struct vm_area_struct *vma)
q->bufs[i]->baddr = 0;
q->ops->buf_release(q, q->bufs[i]);
}
+ videobuf_queue_unlock(q);
kfree(map);
}
- videobuf_queue_unlock(q);
return;
}
diff --git a/drivers/media/v4l2-core/videobuf-vmalloc.c b/drivers/media/v4l2-core/videobuf-vmalloc.c
index 1365c651c177..2ff7fcc77b11 100644
--- a/drivers/media/v4l2-core/videobuf-vmalloc.c
+++ b/drivers/media/v4l2-core/videobuf-vmalloc.c
@@ -54,14 +54,11 @@ MODULE_LICENSE("GPL");
static void videobuf_vm_open(struct vm_area_struct *vma)
{
struct videobuf_mapping *map = vma->vm_private_data;
- struct videobuf_queue *q = map->q;
dprintk(2, "vm_open %p [count=%u,vma=%08lx-%08lx]\n", map,
map->count, vma->vm_start, vma->vm_end);
- videobuf_queue_lock(q);
map->count++;
- videobuf_queue_unlock(q);
}
static void videobuf_vm_close(struct vm_area_struct *vma)
@@ -73,11 +70,12 @@ static void videobuf_vm_close(struct vm_area_struct *vma)
dprintk(2, "vm_close %p [count=%u,vma=%08lx-%08lx]\n", map,
map->count, vma->vm_start, vma->vm_end);
- videobuf_queue_lock(q);
- if (!--map->count) {
+ map->count--;
+ if (0 == map->count) {
struct videobuf_vmalloc_memory *mem;
dprintk(1, "munmap %p q=%p\n", map, q);
+ videobuf_queue_lock(q);
/* We need first to cancel streams, before unmapping */
if (q->streaming)
@@ -116,8 +114,8 @@ static void videobuf_vm_close(struct vm_area_struct *vma)
kfree(map);
+ videobuf_queue_unlock(q);
}
- videobuf_queue_unlock(q);
return;
}
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
index 5a5fb7f09b7b..a127925c9d61 100644
--- a/drivers/media/v4l2-core/videobuf2-core.c
+++ b/drivers/media/v4l2-core/videobuf2-core.c
@@ -1776,6 +1776,11 @@ static int vb2_internal_streamon(struct vb2_queue *q, enum v4l2_buf_type type)
return 0;
}
+ if (!q->num_buffers) {
+ dprintk(1, "streamon: no buffers have been allocated\n");
+ return -EINVAL;
+ }
+
/*
* If any buffers were queued before streamon,
* we can now pass them to driver for processing.
diff --git a/drivers/message/i2o/i2o_config.c b/drivers/message/i2o/i2o_config.c
index a60c188c2bd9..04bd3b6de401 100644
--- a/drivers/message/i2o/i2o_config.c
+++ b/drivers/message/i2o/i2o_config.c
@@ -754,19 +754,19 @@ static long i2o_cfg_compat_ioctl(struct file *file, unsigned cmd,
unsigned long arg)
{
int ret;
- mutex_lock(&i2o_cfg_mutex);
switch (cmd) {
case I2OGETIOPS:
ret = i2o_cfg_ioctl(file, cmd, arg);
break;
case I2OPASSTHRU32:
+ mutex_lock(&i2o_cfg_mutex);
ret = i2o_cfg_passthru32(file, cmd, arg);
+ mutex_unlock(&i2o_cfg_mutex);
break;
default:
ret = -ENOIOCTLCMD;
break;
}
- mutex_unlock(&i2o_cfg_mutex);
return ret;
}
diff --git a/drivers/misc/genwqe/card_dev.c b/drivers/misc/genwqe/card_dev.c
index 8f8a6b327cdb..2c2c9cc75231 100644
--- a/drivers/misc/genwqe/card_dev.c
+++ b/drivers/misc/genwqe/card_dev.c
@@ -787,6 +787,7 @@ static int genwqe_pin_mem(struct genwqe_file *cfile, struct genwqe_mem *m)
if (rc != 0) {
dev_err(&pci_dev->dev,
"[%s] genwqe_user_vmap rc=%d\n", __func__, rc);
+ kfree(dma_map);
return rc;
}
diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
index 1ee2b9492a82..9b809cfc2899 100644
--- a/drivers/misc/mei/client.c
+++ b/drivers/misc/mei/client.c
@@ -908,7 +908,6 @@ void mei_cl_all_disconnect(struct mei_device *dev)
list_for_each_entry_safe(cl, next, &dev->file_list, link) {
cl->state = MEI_FILE_DISCONNECTED;
cl->mei_flow_ctrl_creds = 0;
- cl->read_cb = NULL;
cl->timer_count = 0;
}
}
@@ -942,8 +941,16 @@ void mei_cl_all_wakeup(struct mei_device *dev)
void mei_cl_all_write_clear(struct mei_device *dev)
{
struct mei_cl_cb *cb, *next;
+ struct list_head *list;
- list_for_each_entry_safe(cb, next, &dev->write_list.list, list) {
+ list = &dev->write_list.list;
+ list_for_each_entry_safe(cb, next, list, list) {
+ list_del(&cb->list);
+ mei_io_cb_free(cb);
+ }
+
+ list = &dev->write_waiting_list.list;
+ list_for_each_entry_safe(cb, next, list, list) {
list_del(&cb->list);
mei_io_cb_free(cb);
}
diff --git a/drivers/misc/mic/host/mic_virtio.c b/drivers/misc/mic/host/mic_virtio.c
index 752ff873f891..7e1ef0ebbb80 100644
--- a/drivers/misc/mic/host/mic_virtio.c
+++ b/drivers/misc/mic/host/mic_virtio.c
@@ -156,7 +156,8 @@ static int mic_vringh_copy(struct mic_vdev *mvdev, struct vringh_kiov *iov,
static int _mic_virtio_copy(struct mic_vdev *mvdev,
struct mic_copy_desc *copy)
{
- int ret = 0, iovcnt = copy->iovcnt;
+ int ret = 0;
+ u32 iovcnt = copy->iovcnt;
struct iovec iov;
struct iovec __user *u_iov = copy->iov;
void __user *ubuf = NULL;
diff --git a/drivers/misc/sgi-gru/grukdump.c b/drivers/misc/sgi-gru/grukdump.c
index 9b2062d17327..2bef3f76032a 100644
--- a/drivers/misc/sgi-gru/grukdump.c
+++ b/drivers/misc/sgi-gru/grukdump.c
@@ -139,8 +139,11 @@ static int gru_dump_context(struct gru_state *gru, int ctxnum,
ubuf += sizeof(hdr);
ubufcch = ubuf;
- if (gru_user_copy_handle(&ubuf, cch))
- goto fail;
+ if (gru_user_copy_handle(&ubuf, cch)) {
+ if (cch_locked)
+ unlock_cch_handle(cch);
+ return -EFAULT;
+ }
if (cch_locked)
ubufcch->delresp = 0;
bytes = sizeof(hdr) + GRU_CACHE_LINE_BYTES;
@@ -179,10 +182,6 @@ static int gru_dump_context(struct gru_state *gru, int ctxnum,
ret = -EFAULT;
return ret ? ret : bytes;
-
-fail:
- unlock_cch_handle(cch);
- return -EFAULT;
}
int gru_dump_chiplet_request(unsigned long arg)
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 4c08018d7333..71ba18efa15b 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1270,9 +1270,13 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
if (slave_ops->ndo_set_mac_address == NULL) {
if (!bond_has_slaves(bond)) {
- pr_warning("%s: Warning: The first slave device specified does not support setting the MAC address. Setting fail_over_mac to active.",
- bond_dev->name);
- bond->params.fail_over_mac = BOND_FOM_ACTIVE;
+ pr_warn("%s: Warning: The first slave device specified does not support setting the MAC address.\n",
+ bond_dev->name);
+ if (bond->params.mode == BOND_MODE_ACTIVEBACKUP) {
+ bond->params.fail_over_mac = BOND_FOM_ACTIVE;
+ pr_warn("%s: Setting fail_over_mac to active for active-backup mode.\n",
+ bond_dev->name);
+ }
} else if (bond->params.fail_over_mac != BOND_FOM_ACTIVE) {
pr_err("%s: Error: The slave device specified does not support setting the MAC address, but fail_over_mac is not set to active.\n",
bond_dev->name);
@@ -1315,7 +1319,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
*/
memcpy(new_slave->perm_hwaddr, slave_dev->dev_addr, ETH_ALEN);
- if (!bond->params.fail_over_mac) {
+ if (!bond->params.fail_over_mac ||
+ bond->params.mode != BOND_MODE_ACTIVEBACKUP) {
/*
* Set slave to master's mac address. The application already
* set the master's mac address to that of the first slave
@@ -1505,7 +1510,6 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
slave_dev->npinfo = bond->dev->npinfo;
if (slave_dev->npinfo) {
if (slave_enable_netpoll(new_slave)) {
- read_unlock(&bond->lock);
pr_info("Error, %s: master_dev is using netpoll, "
"but new slave device does not support netpoll.\n",
bond_dev->name);
@@ -1579,7 +1583,8 @@ err_close:
dev_close(slave_dev);
err_restore_mac:
- if (!bond->params.fail_over_mac) {
+ if (!bond->params.fail_over_mac ||
+ bond->params.mode != BOND_MODE_ACTIVEBACKUP) {
/* XXX TODO - fom follow mode needs to change master's
* MAC if this slave's MAC is in use by the bond, or at
* least print a warning.
@@ -1672,7 +1677,8 @@ static int __bond_release_one(struct net_device *bond_dev,
bond->current_arp_slave = NULL;
- if (!all && !bond->params.fail_over_mac) {
+ if (!all && (!bond->params.fail_over_mac ||
+ bond->params.mode != BOND_MODE_ACTIVEBACKUP)) {
if (ether_addr_equal_64bits(bond_dev->dev_addr, slave->perm_hwaddr) &&
bond_has_slaves(bond))
pr_warn("%s: Warning: the permanent HWaddr of %s - %pM - is still in use by %s. Set the HWaddr of %s to a different address to avoid conflicts.\n",
@@ -1769,7 +1775,8 @@ static int __bond_release_one(struct net_device *bond_dev,
/* close slave before restoring its mac address */
dev_close(slave_dev);
- if (bond->params.fail_over_mac != BOND_FOM_ACTIVE) {
+ if (bond->params.fail_over_mac != BOND_FOM_ACTIVE ||
+ bond->params.mode != BOND_MODE_ACTIVEBACKUP) {
/* restore original ("permanent") mac address */
memcpy(addr.sa_data, slave->perm_hwaddr, ETH_ALEN);
addr.sa_family = slave_dev->type;
@@ -3431,7 +3438,8 @@ static int bond_set_mac_address(struct net_device *bond_dev, void *addr)
/* If fail_over_mac is enabled, do nothing and return success.
* Returning an error causes ifenslave to fail.
*/
- if (bond->params.fail_over_mac)
+ if (bond->params.fail_over_mac &&
+ bond->params.mode == BOND_MODE_ACTIVEBACKUP)
return 0;
if (!is_valid_ether_addr(sa->sa_data))
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index d447b881bbde..9e7d95dae2c7 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -104,7 +104,7 @@ config CAN_JANZ_ICAN3
config CAN_FLEXCAN
tristate "Support for Freescale FLEXCAN based chips"
- depends on (ARM && CPU_LITTLE_ENDIAN) || PPC
+ depends on ARM || PPC
---help---
Say Y here if you want to support for Freescale FlexCAN.
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index 13a909822e25..fc59bc6f040b 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -323,19 +323,10 @@ void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
}
if (!priv->echo_skb[idx]) {
- struct sock *srcsk = skb->sk;
- if (atomic_read(&skb->users) != 1) {
- struct sk_buff *old_skb = skb;
-
- skb = skb_clone(old_skb, GFP_ATOMIC);
- kfree_skb(old_skb);
- if (!skb)
- return;
- } else
- skb_orphan(skb);
-
- skb->sk = srcsk;
+ skb = can_create_echo_skb(skb);
+ if (!skb)
+ return;
/* make settings for echo to reduce code in irq context */
skb->protocol = htons(ETH_P_CAN);
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index aaed97bee471..320bef2dba42 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -235,9 +235,12 @@ static const struct can_bittiming_const flexcan_bittiming_const = {
};
/*
- * Abstract off the read/write for arm versus ppc.
+ * Abstract off the read/write for arm versus ppc. This
+ * assumes that PPC uses big-endian registers and everything
+ * else uses little-endian registers, independent of CPU
+ * endianess.
*/
-#if defined(__BIG_ENDIAN)
+#if defined(CONFIG_PPC)
static inline u32 flexcan_read(void __iomem *addr)
{
return in_be32(addr);
diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c
index e24e6690d672..71594e5676fd 100644
--- a/drivers/net/can/janz-ican3.c
+++ b/drivers/net/can/janz-ican3.c
@@ -18,6 +18,7 @@
#include <linux/netdevice.h>
#include <linux/can.h>
#include <linux/can/dev.h>
+#include <linux/can/skb.h>
#include <linux/can/error.h>
#include <linux/mfd/janz.h>
@@ -1133,20 +1134,9 @@ static void ican3_handle_message(struct ican3_dev *mod, struct ican3_msg *msg)
*/
static void ican3_put_echo_skb(struct ican3_dev *mod, struct sk_buff *skb)
{
- struct sock *srcsk = skb->sk;
-
- if (atomic_read(&skb->users) != 1) {
- struct sk_buff *old_skb = skb;
-
- skb = skb_clone(old_skb, GFP_ATOMIC);
- kfree_skb(old_skb);
- if (!skb)
- return;
- } else {
- skb_orphan(skb);
- }
-
- skb->sk = srcsk;
+ skb = can_create_echo_skb(skb);
+ if (!skb)
+ return;
/* save this skb for tx interrupt echo handling */
skb_queue_tail(&mod->echoq, skb);
@@ -1322,7 +1312,7 @@ static int ican3_napi(struct napi_struct *napi, int budget)
/* process all communication messages */
while (true) {
- struct ican3_msg msg;
+ struct ican3_msg uninitialized_var(msg);
ret = ican3_recv_msg(mod, &msg);
if (ret)
break;
diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c
index 0a2a5ee79a17..4e94057ef5cf 100644
--- a/drivers/net/can/vcan.c
+++ b/drivers/net/can/vcan.c
@@ -46,6 +46,7 @@
#include <linux/if_ether.h>
#include <linux/can.h>
#include <linux/can/dev.h>
+#include <linux/can/skb.h>
#include <linux/slab.h>
#include <net/rtnetlink.h>
@@ -109,25 +110,23 @@ static netdev_tx_t vcan_tx(struct sk_buff *skb, struct net_device *dev)
stats->rx_packets++;
stats->rx_bytes += cfd->len;
}
- kfree_skb(skb);
+ consume_skb(skb);
return NETDEV_TX_OK;
}
/* perform standard echo handling for CAN network interfaces */
if (loop) {
- struct sock *srcsk = skb->sk;
- skb = skb_share_check(skb, GFP_ATOMIC);
+ skb = can_create_echo_skb(skb);
if (!skb)
return NETDEV_TX_OK;
/* receive with packet counting */
- skb->sk = srcsk;
vcan_rx(skb, dev);
} else {
/* no looped packets => no counting */
- kfree_skb(skb);
+ consume_skb(skb);
}
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index 0f4241c6e97e..238ccea965c8 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -3294,7 +3294,6 @@ static int __init vortex_init(void)
static void __exit vortex_eisa_cleanup(void)
{
- struct vortex_private *vp;
void __iomem *ioaddr;
#ifdef CONFIG_EISA
@@ -3303,7 +3302,6 @@ static void __exit vortex_eisa_cleanup(void)
#endif
if (compaq_net_device) {
- vp = netdev_priv(compaq_net_device);
ioaddr = ioport_map(compaq_net_device->base_addr,
VORTEX_TOTAL_SIZE);
diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c
index 0cc21437478c..511f6eecd58b 100644
--- a/drivers/net/ethernet/allwinner/sun4i-emac.c
+++ b/drivers/net/ethernet/allwinner/sun4i-emac.c
@@ -929,6 +929,9 @@ static int emac_resume(struct platform_device *dev)
}
static const struct of_device_id emac_of_match[] = {
+ {.compatible = "allwinner,sun4i-a10-emac",},
+
+ /* Deprecated */
{.compatible = "allwinner,sun4i-emac",},
{},
};
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index e92ffd6e1c15..2e45f6ec1bf0 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -1292,6 +1292,7 @@ static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
alx = netdev_priv(netdev);
spin_lock_init(&alx->hw.mdio_lock);
spin_lock_init(&alx->irq_lock);
+ spin_lock_init(&alx->stats_lock);
alx->dev = netdev;
alx->hw.pdev = pdev;
alx->msg_enable = NETIF_MSG_LINK | NETIF_MSG_HW | NETIF_MSG_IFUP |
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index 9d2dedadf2df..cda25ac45b47 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -85,7 +85,7 @@ MODULE_FIRMWARE(FW_RV2P_FILE_09_Ax);
static int disable_msi = 0;
-module_param(disable_msi, int, 0);
+module_param(disable_msi, int, S_IRUGO);
MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
typedef enum {
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index 17d1689aec6b..bfc58d488bb5 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -936,7 +936,7 @@ static inline int bnx2x_func_start(struct bnx2x *bp)
else /* CHIP_IS_E1X */
start_params->network_cos_mode = FW_WRR;
- start_params->gre_tunnel_mode = IPGRE_TUNNEL;
+ start_params->gre_tunnel_mode = L2GRE_TUNNEL;
start_params->gre_tunnel_rss = GRE_INNER_HEADERS_RSS;
return bnx2x_func_state_change(bp, &func_params);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index c9c445e7b4a5..7d4382286457 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -95,29 +95,29 @@ MODULE_FIRMWARE(FW_FILE_NAME_E1H);
MODULE_FIRMWARE(FW_FILE_NAME_E2);
int bnx2x_num_queues;
-module_param_named(num_queues, bnx2x_num_queues, int, 0);
+module_param_named(num_queues, bnx2x_num_queues, int, S_IRUGO);
MODULE_PARM_DESC(num_queues,
" Set number of queues (default is as a number of CPUs)");
static int disable_tpa;
-module_param(disable_tpa, int, 0);
+module_param(disable_tpa, int, S_IRUGO);
MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
static int int_mode;
-module_param(int_mode, int, 0);
+module_param(int_mode, int, S_IRUGO);
MODULE_PARM_DESC(int_mode, " Force interrupt mode other than MSI-X "
"(1 INT#x; 2 MSI)");
static int dropless_fc;
-module_param(dropless_fc, int, 0);
+module_param(dropless_fc, int, S_IRUGO);
MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
static int mrrs = -1;
-module_param(mrrs, int, 0);
+module_param(mrrs, int, S_IRUGO);
MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
static int debug;
-module_param(debug, int, 0);
+module_param(debug, int, S_IRUGO);
MODULE_PARM_DESC(debug, " Default debug msglevel");
struct workqueue_struct *bnx2x_wq;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index aec5ef2ed7ce..e42f48df6e94 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -1446,12 +1446,12 @@ static void bnx2x_vf_igu_reset(struct bnx2x *bp, struct bnx2x_virtf *vf)
if (vf->cfg_flags & VF_CFG_INT_SIMD)
val |= IGU_VF_CONF_SINGLE_ISR_EN;
val &= ~IGU_VF_CONF_PARENT_MASK;
- val |= BP_FUNC(bp) << IGU_VF_CONF_PARENT_SHIFT; /* parent PF */
+ val |= (BP_ABS_FUNC(bp) >> 1) << IGU_VF_CONF_PARENT_SHIFT;
REG_WR(bp, IGU_REG_VF_CONFIGURATION, val);
DP(BNX2X_MSG_IOV,
- "value in IGU_REG_VF_CONFIGURATION of vf %d after write %x\n",
- vf->abs_vfid, REG_RD(bp, IGU_REG_VF_CONFIGURATION));
+ "value in IGU_REG_VF_CONFIGURATION of vf %d after write is 0x%08x\n",
+ vf->abs_vfid, val);
bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index e2ca03e23dc1..3167ed6593b0 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -2609,13 +2609,14 @@ static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
tg3_writephy(tp, MII_CTRL1000, phy9_orig);
- if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
- reg32 &= ~0x3000;
- tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
- } else if (!err)
- err = -EBUSY;
+ err = tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32);
+ if (err)
+ return err;
- return err;
+ reg32 &= ~0x3000;
+ tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
+
+ return 0;
}
static void tg3_carrier_off(struct tg3 *tp)
@@ -14113,12 +14114,12 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu)
tg3_netif_stop(tp);
+ tg3_set_mtu(dev, tp, new_mtu);
+
tg3_full_lock(tp, 1);
tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
- tg3_set_mtu(dev, tp, new_mtu);
-
/* Reset PHY, otherwise the read DMA engine will be in a mode that
* breaks all requests to 256 bytes.
*/
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
index 4de8cfd149cf..55e0fa03dc90 100644
--- a/drivers/net/ethernet/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -13,6 +13,7 @@
#include <linux/dma-mapping.h>
#include <linux/etherdevice.h>
+#include <linux/clk.h>
#include <linux/crc32.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -51,6 +52,7 @@ MODULE_PARM_DESC(buffer_size, "DMA buffer allocation size");
#define ETH_HASH0 0x48
#define ETH_HASH1 0x4c
#define ETH_TXCTRL 0x50
+#define ETH_END 0x54
/* mode register */
#define MODER_RXEN (1 << 0) /* receive enable */
@@ -179,6 +181,7 @@ MODULE_PARM_DESC(buffer_size, "DMA buffer allocation size");
* @membase: pointer to buffer memory region
* @dma_alloc: dma allocated buffer size
* @io_region_size: I/O memory region size
+ * @num_bd: number of buffer descriptors
* @num_tx: number of send buffers
* @cur_tx: last send buffer written
* @dty_tx: last buffer actually sent
@@ -199,6 +202,7 @@ struct ethoc {
int dma_alloc;
resource_size_t io_region_size;
+ unsigned int num_bd;
unsigned int num_tx;
unsigned int cur_tx;
unsigned int dty_tx;
@@ -216,6 +220,7 @@ struct ethoc {
struct phy_device *phy;
struct mii_bus *mdio;
+ struct clk *clk;
s8 phy_id;
};
@@ -688,6 +693,11 @@ static int ethoc_mdio_probe(struct net_device *dev)
}
priv->phy = phy;
+ phy->advertising &= ~(ADVERTISED_1000baseT_Full |
+ ADVERTISED_1000baseT_Half);
+ phy->supported &= ~(SUPPORTED_1000baseT_Full |
+ SUPPORTED_1000baseT_Half);
+
return 0;
}
@@ -890,6 +900,102 @@ out:
return NETDEV_TX_OK;
}
+static int ethoc_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct ethoc *priv = netdev_priv(dev);
+ struct phy_device *phydev = priv->phy;
+
+ if (!phydev)
+ return -EOPNOTSUPP;
+
+ return phy_ethtool_gset(phydev, cmd);
+}
+
+static int ethoc_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct ethoc *priv = netdev_priv(dev);
+ struct phy_device *phydev = priv->phy;
+
+ if (!phydev)
+ return -EOPNOTSUPP;
+
+ return phy_ethtool_sset(phydev, cmd);
+}
+
+static int ethoc_get_regs_len(struct net_device *netdev)
+{
+ return ETH_END;
+}
+
+static void ethoc_get_regs(struct net_device *dev, struct ethtool_regs *regs,
+ void *p)
+{
+ struct ethoc *priv = netdev_priv(dev);
+ u32 *regs_buff = p;
+ unsigned i;
+
+ regs->version = 0;
+ for (i = 0; i < ETH_END / sizeof(u32); ++i)
+ regs_buff[i] = ethoc_read(priv, i * sizeof(u32));
+}
+
+static void ethoc_get_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ring)
+{
+ struct ethoc *priv = netdev_priv(dev);
+
+ ring->rx_max_pending = priv->num_bd - 1;
+ ring->rx_mini_max_pending = 0;
+ ring->rx_jumbo_max_pending = 0;
+ ring->tx_max_pending = priv->num_bd - 1;
+
+ ring->rx_pending = priv->num_rx;
+ ring->rx_mini_pending = 0;
+ ring->rx_jumbo_pending = 0;
+ ring->tx_pending = priv->num_tx;
+}
+
+static int ethoc_set_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ring)
+{
+ struct ethoc *priv = netdev_priv(dev);
+
+ if (ring->tx_pending < 1 || ring->rx_pending < 1 ||
+ ring->tx_pending + ring->rx_pending > priv->num_bd)
+ return -EINVAL;
+ if (ring->rx_mini_pending || ring->rx_jumbo_pending)
+ return -EINVAL;
+
+ if (netif_running(dev)) {
+ netif_tx_disable(dev);
+ ethoc_disable_rx_and_tx(priv);
+ ethoc_disable_irq(priv, INT_MASK_TX | INT_MASK_RX);
+ synchronize_irq(dev->irq);
+ }
+
+ priv->num_tx = rounddown_pow_of_two(ring->tx_pending);
+ priv->num_rx = ring->rx_pending;
+ ethoc_init_ring(priv, dev->mem_start);
+
+ if (netif_running(dev)) {
+ ethoc_enable_irq(priv, INT_MASK_TX | INT_MASK_RX);
+ ethoc_enable_rx_and_tx(priv);
+ netif_wake_queue(dev);
+ }
+ return 0;
+}
+
+const struct ethtool_ops ethoc_ethtool_ops = {
+ .get_settings = ethoc_get_settings,
+ .set_settings = ethoc_set_settings,
+ .get_regs_len = ethoc_get_regs_len,
+ .get_regs = ethoc_get_regs,
+ .get_link = ethtool_op_get_link,
+ .get_ringparam = ethoc_get_ringparam,
+ .set_ringparam = ethoc_set_ringparam,
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
static const struct net_device_ops ethoc_netdev_ops = {
.ndo_open = ethoc_open,
.ndo_stop = ethoc_stop,
@@ -917,6 +1023,8 @@ static int ethoc_probe(struct platform_device *pdev)
int num_bd;
int ret = 0;
bool random_mac = false;
+ struct ethoc_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ u32 eth_clkfreq = pdata ? pdata->eth_clkfreq : 0;
/* allocate networking device */
netdev = alloc_etherdev(sizeof(struct ethoc));
@@ -1016,6 +1124,7 @@ static int ethoc_probe(struct platform_device *pdev)
ret = -ENODEV;
goto error;
}
+ priv->num_bd = num_bd;
/* num_tx must be a power of two */
priv->num_tx = rounddown_pow_of_two(num_bd >> 1);
priv->num_rx = num_bd - priv->num_tx;
@@ -1030,8 +1139,7 @@ static int ethoc_probe(struct platform_device *pdev)
}
/* Allow the platform setup code to pass in a MAC address. */
- if (dev_get_platdata(&pdev->dev)) {
- struct ethoc_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ if (pdata) {
memcpy(netdev->dev_addr, pdata->hwaddr, IFHWADDRLEN);
priv->phy_id = pdata->phy_id;
} else {
@@ -1069,6 +1177,27 @@ static int ethoc_probe(struct platform_device *pdev)
if (random_mac)
netdev->addr_assign_type = NET_ADDR_RANDOM;
+ /* Allow the platform setup code to adjust MII management bus clock. */
+ if (!eth_clkfreq) {
+ struct clk *clk = devm_clk_get(&pdev->dev, NULL);
+
+ if (!IS_ERR(clk)) {
+ priv->clk = clk;
+ clk_prepare_enable(clk);
+ eth_clkfreq = clk_get_rate(clk);
+ }
+ }
+ if (eth_clkfreq) {
+ u32 clkdiv = MIIMODER_CLKDIV(eth_clkfreq / 2500000 + 1);
+
+ if (!clkdiv)
+ clkdiv = 2;
+ dev_dbg(&pdev->dev, "setting MII clkdiv to %u\n", clkdiv);
+ ethoc_write(priv, MIIMODER,
+ (ethoc_read(priv, MIIMODER) & MIIMODER_NOPRE) |
+ clkdiv);
+ }
+
/* register MII bus */
priv->mdio = mdiobus_alloc();
if (!priv->mdio) {
@@ -1111,6 +1240,7 @@ static int ethoc_probe(struct platform_device *pdev)
netdev->netdev_ops = &ethoc_netdev_ops;
netdev->watchdog_timeo = ETHOC_TIMEOUT;
netdev->features |= 0;
+ netdev->ethtool_ops = &ethoc_ethtool_ops;
/* setup NAPI */
netif_napi_add(netdev, &priv->napi, ethoc_poll, 64);
@@ -1133,6 +1263,8 @@ free_mdio:
kfree(priv->mdio->irq);
mdiobus_free(priv->mdio);
free:
+ if (priv->clk)
+ clk_disable_unprepare(priv->clk);
free_netdev(netdev);
out:
return ret;
@@ -1157,6 +1289,8 @@ static int ethoc_remove(struct platform_device *pdev)
kfree(priv->mdio->irq);
mdiobus_free(priv->mdio);
}
+ if (priv->clk)
+ clk_disable_unprepare(priv->clk);
unregister_netdev(netdev);
free_netdev(netdev);
}
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index cbaba4442d4b..bf7a01ef9a57 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -3034,7 +3034,7 @@ static void __e100_shutdown(struct pci_dev *pdev, bool *enable_wake)
*enable_wake = false;
}
- pci_disable_device(pdev);
+ pci_clear_master(pdev);
}
static int __e100_power_off(struct pci_dev *pdev, bool wake)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
index 157fe8df2c3e..8ff57e8e3e91 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
@@ -4,5 +4,5 @@
config MLX5_CORE
tristate
- depends on PCI && X86
+ depends on PCI
default n
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c
index 1ded50ca1600..e46e8698e630 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c
@@ -726,9 +726,6 @@ static int vxge_learn_mac(struct vxgedev *vdev, u8 *mac_header)
int vpath_idx = 0;
enum vxge_hw_status status = VXGE_HW_OK;
struct vxge_vpath *vpath = NULL;
- struct __vxge_hw_device *hldev;
-
- hldev = pci_get_drvdata(vdev->pdev);
mac_address = (u8 *)&mac_addr;
memcpy(mac_address, mac_header, ETH_ALEN);
@@ -2443,9 +2440,6 @@ static void vxge_rem_msix_isr(struct vxgedev *vdev)
static void vxge_rem_isr(struct vxgedev *vdev)
{
- struct __vxge_hw_device *hldev;
- hldev = pci_get_drvdata(vdev->pdev);
-
#ifdef CONFIG_PCI_MSI
if (vdev->config.intr_type == MSI_X) {
vxge_rem_msix_isr(vdev);
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index c49d1fb16965..75d11fa4eb0a 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -429,7 +429,9 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
}
/* Transfer ownership of the skb to the final buffer */
+#ifdef EFX_USE_PIO
finish_packet:
+#endif
buffer->skb = skb;
buffer->flags = EFX_TX_BUF_SKB | dma_flags;
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index bde63e3af96f..1d860ce914ed 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -1878,8 +1878,18 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
mdio_node = of_find_node_by_phandle(be32_to_cpup(parp));
phyid = be32_to_cpup(parp+1);
mdio = of_find_device_by_node(mdio_node);
- snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
- PHY_ID_FMT, mdio->name, phyid);
+
+ if (strncmp(mdio->name, "gpio", 4) == 0) {
+ /* GPIO bitbang MDIO driver attached */
+ struct mii_bus *bus = dev_get_drvdata(&mdio->dev);
+
+ snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
+ PHY_ID_FMT, bus->id, phyid);
+ } else {
+ /* davinci MDIO driver attached */
+ snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
+ PHY_ID_FMT, mdio->name, phyid);
+ }
mac_addr = of_get_mac_address(slave_node);
if (mac_addr)
diff --git a/drivers/net/irda/Kconfig b/drivers/net/irda/Kconfig
index 2dc82f1d2e70..3da44d5d9149 100644
--- a/drivers/net/irda/Kconfig
+++ b/drivers/net/irda/Kconfig
@@ -210,13 +210,6 @@ config KINGSUN_DONGLE
To compile it as a module, choose M here: the module will be called
kingsun-sir.
-config EP7211_DONGLE
- tristate "Cirrus Logic clps711x I/R support"
- depends on IRTTY_SIR && ARCH_CLPS711X && IRDA
- help
- Say Y here if you want to build support for the Cirrus logic
- EP7211 chipset's infrared module.
-
config KSDAZZLE_DONGLE
tristate "KingSun Dazzle IrDA-USB dongle"
depends on IRDA && USB
diff --git a/drivers/net/irda/Makefile b/drivers/net/irda/Makefile
index dfc64537f62f..be8ab5b9a4a2 100644
--- a/drivers/net/irda/Makefile
+++ b/drivers/net/irda/Makefile
@@ -35,7 +35,6 @@ obj-$(CONFIG_MCP2120_DONGLE) += mcp2120-sir.o
obj-$(CONFIG_ACT200L_DONGLE) += act200l-sir.o
obj-$(CONFIG_MA600_DONGLE) += ma600-sir.o
obj-$(CONFIG_TOIM3232_DONGLE) += toim3232-sir.o
-obj-$(CONFIG_EP7211_DONGLE) += ep7211-sir.o
obj-$(CONFIG_KINGSUN_DONGLE) += kingsun-sir.o
obj-$(CONFIG_KSDAZZLE_DONGLE) += ksdazzle-sir.o
obj-$(CONFIG_KS959_DONGLE) += ks959-sir.o
diff --git a/drivers/net/irda/ep7211-sir.c b/drivers/net/irda/ep7211-sir.c
deleted file mode 100644
index 5fe1f4dd3369..000000000000
--- a/drivers/net/irda/ep7211-sir.c
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * IR port driver for the Cirrus Logic CLPS711X processors
- *
- * Copyright 2001, Blue Mug Inc. All rights reserved.
- * Copyright 2007, Samuel Ortiz <samuel@sortiz.org>
- */
-
-#include <linux/module.h>
-#include <linux/platform_device.h>
-
-#include <mach/hardware.h>
-
-#include "sir-dev.h"
-
-static int clps711x_dongle_open(struct sir_dev *dev)
-{
- unsigned int syscon;
-
- /* Turn on the SIR encoder. */
- syscon = clps_readl(SYSCON1);
- syscon |= SYSCON1_SIREN;
- clps_writel(syscon, SYSCON1);
-
- return 0;
-}
-
-static int clps711x_dongle_close(struct sir_dev *dev)
-{
- unsigned int syscon;
-
- /* Turn off the SIR encoder. */
- syscon = clps_readl(SYSCON1);
- syscon &= ~SYSCON1_SIREN;
- clps_writel(syscon, SYSCON1);
-
- return 0;
-}
-
-static struct dongle_driver clps711x_dongle = {
- .owner = THIS_MODULE,
- .driver_name = "EP7211 IR driver",
- .type = IRDA_EP7211_DONGLE,
- .open = clps711x_dongle_open,
- .close = clps711x_dongle_close,
-};
-
-static int clps711x_sir_probe(struct platform_device *pdev)
-{
- return irda_register_dongle(&clps711x_dongle);
-}
-
-static int clps711x_sir_remove(struct platform_device *pdev)
-{
- return irda_unregister_dongle(&clps711x_dongle);
-}
-
-static struct platform_driver clps711x_sir_driver = {
- .driver = {
- .name = "sir-clps711x",
- .owner = THIS_MODULE,
- },
- .probe = clps711x_sir_probe,
- .remove = clps711x_sir_remove,
-};
-module_platform_driver(clps711x_sir_driver);
-
-MODULE_AUTHOR("Samuel Ortiz <samuel@sortiz.org>");
-MODULE_DESCRIPTION("EP7211 IR dongle driver");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("irda-dongle-13"); /* IRDA_EP7211_DONGLE */
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index 547725fa8671..9414fa272160 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -437,7 +437,10 @@ static int ptp_dp83640_enable(struct ptp_clock_info *ptp,
if (on) {
gpio_num = gpio_tab[EXTTS0_GPIO + index];
evnt |= (gpio_num & EVNT_GPIO_MASK) << EVNT_GPIO_SHIFT;
- evnt |= EVNT_RISE;
+ if (rq->extts.flags & PTP_FALLING_EDGE)
+ evnt |= EVNT_FALL;
+ else
+ evnt |= EVNT_RISE;
}
ext_write(0, phydev, PAGE5, PTP_EVNT, evnt);
return 0;
@@ -1058,6 +1061,13 @@ static void dp83640_remove(struct phy_device *phydev)
kfree(dp83640);
}
+static int dp83640_config_init(struct phy_device *phydev)
+{
+ enable_status_frames(phydev, true);
+ ext_write(0, phydev, PAGE4, PTP_CTL, PTP_ENABLE);
+ return 0;
+}
+
static int dp83640_ack_interrupt(struct phy_device *phydev)
{
int err = phy_read(phydev, MII_DP83640_MISR);
@@ -1195,11 +1205,6 @@ static int dp83640_hwtstamp(struct phy_device *phydev, struct ifreq *ifr)
mutex_lock(&dp83640->clock->extreg_lock);
- if (dp83640->hwts_tx_en || dp83640->hwts_rx_en) {
- enable_status_frames(phydev, true);
- ext_write(0, phydev, PAGE4, PTP_CTL, PTP_ENABLE);
- }
-
ext_write(0, phydev, PAGE5, PTP_TXCFG0, txcfg0);
ext_write(0, phydev, PAGE5, PTP_RXCFG0, rxcfg0);
@@ -1281,6 +1286,7 @@ static void dp83640_txtstamp(struct phy_device *phydev,
}
/* fall through */
case HWTSTAMP_TX_ON:
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
skb_queue_tail(&dp83640->tx_queue, skb);
schedule_work(&dp83640->ts_work);
break;
@@ -1330,6 +1336,7 @@ static struct phy_driver dp83640_driver = {
.flags = PHY_HAS_INTERRUPT,
.probe = dp83640_probe,
.remove = dp83640_remove,
+ .config_init = dp83640_config_init,
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
.ack_interrupt = dp83640_ack_interrupt,
diff --git a/drivers/net/phy/mdio-sun4i.c b/drivers/net/phy/mdio-sun4i.c
index bb88bc7d81fb..9367acc84fbb 100644
--- a/drivers/net/phy/mdio-sun4i.c
+++ b/drivers/net/phy/mdio-sun4i.c
@@ -170,6 +170,9 @@ static int sun4i_mdio_remove(struct platform_device *pdev)
}
static const struct of_device_id sun4i_mdio_dt_ids[] = {
+ { .compatible = "allwinner,sun4i-a10-mdio" },
+
+ /* Deprecated */
{ .compatible = "allwinner,sun4i-mdio" },
{ }
};
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 4b03e63639b7..82514e72b3d8 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -719,7 +719,7 @@ int phy_resume(struct phy_device *phydev)
static int genphy_config_advert(struct phy_device *phydev)
{
u32 advertise;
- int oldadv, adv;
+ int oldadv, adv, bmsr;
int err, changed = 0;
/* Only allow advertising what this PHY supports */
@@ -744,26 +744,36 @@ static int genphy_config_advert(struct phy_device *phydev)
changed = 1;
}
+ bmsr = phy_read(phydev, MII_BMSR);
+ if (bmsr < 0)
+ return bmsr;
+
+ /* Per 802.3-2008, Section 22.2.4.2.16 Extended status all
+ * 1000Mbits/sec capable PHYs shall have the BMSR_ESTATEN bit set to a
+ * logical 1.
+ */
+ if (!(bmsr & BMSR_ESTATEN))
+ return changed;
+
/* Configure gigabit if it's supported */
+ adv = phy_read(phydev, MII_CTRL1000);
+ if (adv < 0)
+ return adv;
+
+ oldadv = adv;
+ adv &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
+
if (phydev->supported & (SUPPORTED_1000baseT_Half |
SUPPORTED_1000baseT_Full)) {
- adv = phy_read(phydev, MII_CTRL1000);
- if (adv < 0)
- return adv;
-
- oldadv = adv;
- adv &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
adv |= ethtool_adv_to_mii_ctrl1000_t(advertise);
-
- if (adv != oldadv) {
- err = phy_write(phydev, MII_CTRL1000, adv);
-
- if (err < 0)
- return err;
+ if (adv != oldadv)
changed = 1;
- }
}
+ err = phy_write(phydev, MII_CTRL1000, adv);
+ if (err < 0)
+ return err;
+
return changed;
}
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index 6b638a066c1d..409499fdb157 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -292,6 +292,22 @@ config USB_NET_SR9700
This option adds support for CoreChip-sz SR9700 based USB 1.1
10/100 Ethernet adapters.
+config USB_NET_SR9800
+ tristate "CoreChip-sz SR9800 based USB 2.0 10/100 ethernet devices"
+ depends on USB_USBNET
+ select CRC32
+ default y
+ ---help---
+ Say Y if you want to use one of the following 100Mbps USB Ethernet
+ device based on the CoreChip-sz SR9800 chip.
+
+ This driver makes the adapter appear as a normal Ethernet interface,
+ typically on eth0, if it is the only ethernet device, or perhaps on
+ eth1, if you have a PCI or ISA ethernet card installed.
+
+ To compile this driver as a module, choose M here: the
+ module will be called sr9800.
+
config USB_NET_SMSC75XX
tristate "SMSC LAN75XX based USB 2.0 gigabit ethernet devices"
depends on USB_USBNET
diff --git a/drivers/net/usb/Makefile b/drivers/net/usb/Makefile
index b17b5e88bbaf..433f0a00c683 100644
--- a/drivers/net/usb/Makefile
+++ b/drivers/net/usb/Makefile
@@ -15,6 +15,7 @@ obj-$(CONFIG_USB_NET_CDCETHER) += cdc_ether.o r815x.o
obj-$(CONFIG_USB_NET_CDC_EEM) += cdc_eem.o
obj-$(CONFIG_USB_NET_DM9601) += dm9601.o
obj-$(CONFIG_USB_NET_SR9700) += sr9700.o
+obj-$(CONFIG_USB_NET_SR9800) += sr9800.o
obj-$(CONFIG_USB_NET_SMSC75XX) += smsc75xx.o
obj-$(CONFIG_USB_NET_SMSC95XX) += smsc95xx.o
obj-$(CONFIG_USB_NET_GL620A) += gl620a.o
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index 1a482344b3f5..660bd5ea9fc0 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -1201,16 +1201,18 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
struct hso_serial *serial = urb->context;
int status = urb->status;
+ D4("\n--- Got serial_read_bulk callback %02x ---", status);
+
/* sanity check */
if (!serial) {
D1("serial == NULL");
return;
- } else if (status) {
+ }
+ if (status) {
handle_usb_error(status, __func__, serial->parent);
return;
}
- D4("\n--- Got serial_read_bulk callback %02x ---", status);
D1("Actual length = %d\n", urb->actual_length);
DUMP1(urb->transfer_buffer, urb->actual_length);
@@ -1218,25 +1220,13 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
if (serial->port.count == 0)
return;
- if (status == 0) {
- if (serial->parent->port_spec & HSO_INFO_CRC_BUG)
- fix_crc_bug(urb, serial->in_endp->wMaxPacketSize);
- /* Valid data, handle RX data */
- spin_lock(&serial->serial_lock);
- serial->rx_urb_filled[hso_urb_to_index(serial, urb)] = 1;
- put_rxbuf_data_and_resubmit_bulk_urb(serial);
- spin_unlock(&serial->serial_lock);
- } else if (status == -ENOENT || status == -ECONNRESET) {
- /* Unlinked - check for throttled port. */
- D2("Port %d, successfully unlinked urb", serial->minor);
- spin_lock(&serial->serial_lock);
- serial->rx_urb_filled[hso_urb_to_index(serial, urb)] = 0;
- hso_resubmit_rx_bulk_urb(serial, urb);
- spin_unlock(&serial->serial_lock);
- } else {
- D2("Port %d, status = %d for read urb", serial->minor, status);
- return;
- }
+ if (serial->parent->port_spec & HSO_INFO_CRC_BUG)
+ fix_crc_bug(urb, serial->in_endp->wMaxPacketSize);
+ /* Valid data, handle RX data */
+ spin_lock(&serial->serial_lock);
+ serial->rx_urb_filled[hso_urb_to_index(serial, urb)] = 1;
+ put_rxbuf_data_and_resubmit_bulk_urb(serial);
+ spin_unlock(&serial->serial_lock);
}
/*
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 23bdd5b9274d..ff5c87128ffe 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -712,6 +712,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x19d2, 0x1255, 3)},
{QMI_FIXED_INTF(0x19d2, 0x1255, 4)},
{QMI_FIXED_INTF(0x19d2, 0x1256, 4)},
+ {QMI_FIXED_INTF(0x19d2, 0x1270, 5)}, /* ZTE MF667 */
{QMI_FIXED_INTF(0x19d2, 0x1401, 2)},
{QMI_FIXED_INTF(0x19d2, 0x1402, 2)}, /* ZTE MF60 */
{QMI_FIXED_INTF(0x19d2, 0x1424, 2)},
@@ -723,6 +724,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */
{QMI_FIXED_INTF(0x1199, 0x68a2, 19)}, /* Sierra Wireless MC7710 in QMI mode */
{QMI_FIXED_INTF(0x1199, 0x901c, 8)}, /* Sierra Wireless EM7700 */
+ {QMI_FIXED_INTF(0x1199, 0x9051, 8)}, /* Netgear AirCard 340U */
{QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
{QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */
{QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index e8fac732c6f1..d89dbe395ad2 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -2273,22 +2273,21 @@ static int rtl8152_open(struct net_device *netdev)
struct r8152 *tp = netdev_priv(netdev);
int res = 0;
+ rtl8152_set_speed(tp, AUTONEG_ENABLE,
+ tp->mii.supports_gmii ? SPEED_1000 : SPEED_100,
+ DUPLEX_FULL);
+ tp->speed = 0;
+ netif_carrier_off(netdev);
+ netif_start_queue(netdev);
+ set_bit(WORK_ENABLE, &tp->flags);
res = usb_submit_urb(tp->intr_urb, GFP_KERNEL);
if (res) {
if (res == -ENODEV)
netif_device_detach(tp->netdev);
netif_warn(tp, ifup, netdev, "intr_urb submit failed: %d\n",
res);
- return res;
}
- rtl8152_set_speed(tp, AUTONEG_ENABLE,
- tp->mii.supports_gmii ? SPEED_1000 : SPEED_100,
- DUPLEX_FULL);
- tp->speed = 0;
- netif_carrier_off(netdev);
- netif_start_queue(netdev);
- set_bit(WORK_ENABLE, &tp->flags);
return res;
}
@@ -2298,8 +2297,8 @@ static int rtl8152_close(struct net_device *netdev)
struct r8152 *tp = netdev_priv(netdev);
int res = 0;
- usb_kill_urb(tp->intr_urb);
clear_bit(WORK_ENABLE, &tp->flags);
+ usb_kill_urb(tp->intr_urb);
cancel_delayed_work_sync(&tp->schedule);
netif_stop_queue(netdev);
tasklet_disable(&tp->tl);
diff --git a/drivers/net/usb/sr9800.c b/drivers/net/usb/sr9800.c
new file mode 100644
index 000000000000..4175eb9fdeca
--- /dev/null
+++ b/drivers/net/usb/sr9800.c
@@ -0,0 +1,870 @@
+/* CoreChip-sz SR9800 one chip USB 2.0 Ethernet Devices
+ *
+ * Author : Liu Junliang <liujunliang_ljl@163.com>
+ *
+ * Based on asix_common.c, asix_devices.c
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.*
+ */
+
+#include <linux/module.h>
+#include <linux/kmod.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/workqueue.h>
+#include <linux/mii.h>
+#include <linux/usb.h>
+#include <linux/crc32.h>
+#include <linux/usb/usbnet.h>
+#include <linux/slab.h>
+#include <linux/if_vlan.h>
+
+#include "sr9800.h"
+
+static int sr_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
+ u16 size, void *data)
+{
+ int err;
+
+ err = usbnet_read_cmd(dev, cmd, SR_REQ_RD_REG, value, index,
+ data, size);
+ if ((err != size) && (err >= 0))
+ err = -EINVAL;
+
+ return err;
+}
+
+static int sr_write_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
+ u16 size, void *data)
+{
+ int err;
+
+ err = usbnet_write_cmd(dev, cmd, SR_REQ_WR_REG, value, index,
+ data, size);
+ if ((err != size) && (err >= 0))
+ err = -EINVAL;
+
+ return err;
+}
+
+static void
+sr_write_cmd_async(struct usbnet *dev, u8 cmd, u16 value, u16 index,
+ u16 size, void *data)
+{
+ usbnet_write_cmd_async(dev, cmd, SR_REQ_WR_REG, value, index, data,
+ size);
+}
+
+static int sr_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
+{
+ int offset = 0;
+
+ while (offset + sizeof(u32) < skb->len) {
+ struct sk_buff *sr_skb;
+ u16 size;
+ u32 header = get_unaligned_le32(skb->data + offset);
+
+ offset += sizeof(u32);
+ /* get the packet length */
+ size = (u16) (header & 0x7ff);
+ if (size != ((~header >> 16) & 0x07ff)) {
+ netdev_err(dev->net, "%s : Bad Header Length\n",
+ __func__);
+ return 0;
+ }
+
+ if ((size > dev->net->mtu + ETH_HLEN + VLAN_HLEN) ||
+ (size + offset > skb->len)) {
+ netdev_err(dev->net, "%s : Bad RX Length %d\n",
+ __func__, size);
+ return 0;
+ }
+ sr_skb = netdev_alloc_skb_ip_align(dev->net, size);
+ if (!sr_skb)
+ return 0;
+
+ skb_put(sr_skb, size);
+ memcpy(sr_skb->data, skb->data + offset, size);
+ usbnet_skb_return(dev, sr_skb);
+
+ offset += (size + 1) & 0xfffe;
+ }
+
+ if (skb->len != offset) {
+ netdev_err(dev->net, "%s : Bad SKB Length %d\n", __func__,
+ skb->len);
+ return 0;
+ }
+
+ return 1;
+}
+
+static struct sk_buff *sr_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
+ gfp_t flags)
+{
+ int headroom = skb_headroom(skb);
+ int tailroom = skb_tailroom(skb);
+ u32 padbytes = 0xffff0000;
+ u32 packet_len;
+ int padlen;
+
+ padlen = ((skb->len + 4) % (dev->maxpacket - 1)) ? 0 : 4;
+
+ if ((!skb_cloned(skb)) && ((headroom + tailroom) >= (4 + padlen))) {
+ if ((headroom < 4) || (tailroom < padlen)) {
+ skb->data = memmove(skb->head + 4, skb->data,
+ skb->len);
+ skb_set_tail_pointer(skb, skb->len);
+ }
+ } else {
+ struct sk_buff *skb2;
+ skb2 = skb_copy_expand(skb, 4, padlen, flags);
+ dev_kfree_skb_any(skb);
+ skb = skb2;
+ if (!skb)
+ return NULL;
+ }
+
+ skb_push(skb, 4);
+ packet_len = (((skb->len - 4) ^ 0x0000ffff) << 16) + (skb->len - 4);
+ cpu_to_le32s(&packet_len);
+ skb_copy_to_linear_data(skb, &packet_len, sizeof(packet_len));
+
+ if (padlen) {
+ cpu_to_le32s(&padbytes);
+ memcpy(skb_tail_pointer(skb), &padbytes, sizeof(padbytes));
+ skb_put(skb, sizeof(padbytes));
+ }
+
+ return skb;
+}
+
+static void sr_status(struct usbnet *dev, struct urb *urb)
+{
+ struct sr9800_int_data *event;
+ int link;
+
+ if (urb->actual_length < 8)
+ return;
+
+ event = urb->transfer_buffer;
+ link = event->link & 0x01;
+ if (netif_carrier_ok(dev->net) != link) {
+ usbnet_link_change(dev, link, 1);
+ netdev_dbg(dev->net, "Link Status is: %d\n", link);
+ }
+
+ return;
+}
+
+static inline int sr_set_sw_mii(struct usbnet *dev)
+{
+ int ret;
+
+ ret = sr_write_cmd(dev, SR_CMD_SET_SW_MII, 0x0000, 0, 0, NULL);
+ if (ret < 0)
+ netdev_err(dev->net, "Failed to enable software MII access\n");
+ return ret;
+}
+
+static inline int sr_set_hw_mii(struct usbnet *dev)
+{
+ int ret;
+
+ ret = sr_write_cmd(dev, SR_CMD_SET_HW_MII, 0x0000, 0, 0, NULL);
+ if (ret < 0)
+ netdev_err(dev->net, "Failed to enable hardware MII access\n");
+ return ret;
+}
+
+static inline int sr_get_phy_addr(struct usbnet *dev)
+{
+ u8 buf[2];
+ int ret;
+
+ ret = sr_read_cmd(dev, SR_CMD_READ_PHY_ID, 0, 0, 2, buf);
+ if (ret < 0) {
+ netdev_err(dev->net, "%s : Error reading PHYID register:%02x\n",
+ __func__, ret);
+ goto out;
+ }
+ netdev_dbg(dev->net, "%s : returning 0x%04x\n", __func__,
+ *((__le16 *)buf));
+
+ ret = buf[1];
+
+out:
+ return ret;
+}
+
+static int sr_sw_reset(struct usbnet *dev, u8 flags)
+{
+ int ret;
+
+ ret = sr_write_cmd(dev, SR_CMD_SW_RESET, flags, 0, 0, NULL);
+ if (ret < 0)
+ netdev_err(dev->net, "Failed to send software reset:%02x\n",
+ ret);
+
+ return ret;
+}
+
+static u16 sr_read_rx_ctl(struct usbnet *dev)
+{
+ __le16 v;
+ int ret;
+
+ ret = sr_read_cmd(dev, SR_CMD_READ_RX_CTL, 0, 0, 2, &v);
+ if (ret < 0) {
+ netdev_err(dev->net, "Error reading RX_CTL register:%02x\n",
+ ret);
+ goto out;
+ }
+
+ ret = le16_to_cpu(v);
+out:
+ return ret;
+}
+
+static int sr_write_rx_ctl(struct usbnet *dev, u16 mode)
+{
+ int ret;
+
+ netdev_dbg(dev->net, "%s : mode = 0x%04x\n", __func__, mode);
+ ret = sr_write_cmd(dev, SR_CMD_WRITE_RX_CTL, mode, 0, 0, NULL);
+ if (ret < 0)
+ netdev_err(dev->net,
+ "Failed to write RX_CTL mode to 0x%04x:%02x\n",
+ mode, ret);
+
+ return ret;
+}
+
+static u16 sr_read_medium_status(struct usbnet *dev)
+{
+ __le16 v;
+ int ret;
+
+ ret = sr_read_cmd(dev, SR_CMD_READ_MEDIUM_STATUS, 0, 0, 2, &v);
+ if (ret < 0) {
+ netdev_err(dev->net,
+ "Error reading Medium Status register:%02x\n", ret);
+ return ret; /* TODO: callers not checking for error ret */
+ }
+
+ return le16_to_cpu(v);
+}
+
+static int sr_write_medium_mode(struct usbnet *dev, u16 mode)
+{
+ int ret;
+
+ netdev_dbg(dev->net, "%s : mode = 0x%04x\n", __func__, mode);
+ ret = sr_write_cmd(dev, SR_CMD_WRITE_MEDIUM_MODE, mode, 0, 0, NULL);
+ if (ret < 0)
+ netdev_err(dev->net,
+ "Failed to write Medium Mode mode to 0x%04x:%02x\n",
+ mode, ret);
+ return ret;
+}
+
+static int sr_write_gpio(struct usbnet *dev, u16 value, int sleep)
+{
+ int ret;
+
+ netdev_dbg(dev->net, "%s : value = 0x%04x\n", __func__, value);
+ ret = sr_write_cmd(dev, SR_CMD_WRITE_GPIOS, value, 0, 0, NULL);
+ if (ret < 0)
+ netdev_err(dev->net, "Failed to write GPIO value 0x%04x:%02x\n",
+ value, ret);
+ if (sleep)
+ msleep(sleep);
+
+ return ret;
+}
+
+/* SR9800 have a 16-bit RX_CTL value */
+static void sr_set_multicast(struct net_device *net)
+{
+ struct usbnet *dev = netdev_priv(net);
+ struct sr_data *data = (struct sr_data *)&dev->data;
+ u16 rx_ctl = SR_DEFAULT_RX_CTL;
+
+ if (net->flags & IFF_PROMISC) {
+ rx_ctl |= SR_RX_CTL_PRO;
+ } else if (net->flags & IFF_ALLMULTI ||
+ netdev_mc_count(net) > SR_MAX_MCAST) {
+ rx_ctl |= SR_RX_CTL_AMALL;
+ } else if (netdev_mc_empty(net)) {
+ /* just broadcast and directed */
+ } else {
+ /* We use the 20 byte dev->data
+ * for our 8 byte filter buffer
+ * to avoid allocating memory that
+ * is tricky to free later
+ */
+ struct netdev_hw_addr *ha;
+ u32 crc_bits;
+
+ memset(data->multi_filter, 0, SR_MCAST_FILTER_SIZE);
+
+ /* Build the multicast hash filter. */
+ netdev_for_each_mc_addr(ha, net) {
+ crc_bits = ether_crc(ETH_ALEN, ha->addr) >> 26;
+ data->multi_filter[crc_bits >> 3] |=
+ 1 << (crc_bits & 7);
+ }
+
+ sr_write_cmd_async(dev, SR_CMD_WRITE_MULTI_FILTER, 0, 0,
+ SR_MCAST_FILTER_SIZE, data->multi_filter);
+
+ rx_ctl |= SR_RX_CTL_AM;
+ }
+
+ sr_write_cmd_async(dev, SR_CMD_WRITE_RX_CTL, rx_ctl, 0, 0, NULL);
+}
+
+static int sr_mdio_read(struct net_device *net, int phy_id, int loc)
+{
+ struct usbnet *dev = netdev_priv(net);
+ __le16 res;
+
+ mutex_lock(&dev->phy_mutex);
+ sr_set_sw_mii(dev);
+ sr_read_cmd(dev, SR_CMD_READ_MII_REG, phy_id, (__u16)loc, 2, &res);
+ sr_set_hw_mii(dev);
+ mutex_unlock(&dev->phy_mutex);
+
+ netdev_dbg(dev->net,
+ "%s : phy_id=0x%02x, loc=0x%02x, returns=0x%04x\n", __func__,
+ phy_id, loc, le16_to_cpu(res));
+
+ return le16_to_cpu(res);
+}
+
+static void
+sr_mdio_write(struct net_device *net, int phy_id, int loc, int val)
+{
+ struct usbnet *dev = netdev_priv(net);
+ __le16 res = cpu_to_le16(val);
+
+ netdev_dbg(dev->net,
+ "%s : phy_id=0x%02x, loc=0x%02x, val=0x%04x\n", __func__,
+ phy_id, loc, val);
+ mutex_lock(&dev->phy_mutex);
+ sr_set_sw_mii(dev);
+ sr_write_cmd(dev, SR_CMD_WRITE_MII_REG, phy_id, (__u16)loc, 2, &res);
+ sr_set_hw_mii(dev);
+ mutex_unlock(&dev->phy_mutex);
+}
+
+/* Get the PHY Identifier from the PHYSID1 & PHYSID2 MII registers */
+static u32 sr_get_phyid(struct usbnet *dev)
+{
+ int phy_reg;
+ u32 phy_id;
+ int i;
+
+ /* Poll for the rare case the FW or phy isn't ready yet. */
+ for (i = 0; i < 100; i++) {
+ phy_reg = sr_mdio_read(dev->net, dev->mii.phy_id, MII_PHYSID1);
+ if (phy_reg != 0 && phy_reg != 0xFFFF)
+ break;
+ mdelay(1);
+ }
+
+ if (phy_reg <= 0 || phy_reg == 0xFFFF)
+ return 0;
+
+ phy_id = (phy_reg & 0xffff) << 16;
+
+ phy_reg = sr_mdio_read(dev->net, dev->mii.phy_id, MII_PHYSID2);
+ if (phy_reg < 0)
+ return 0;
+
+ phy_id |= (phy_reg & 0xffff);
+
+ return phy_id;
+}
+
+static void
+sr_get_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo)
+{
+ struct usbnet *dev = netdev_priv(net);
+ u8 opt;
+
+ if (sr_read_cmd(dev, SR_CMD_READ_MONITOR_MODE, 0, 0, 1, &opt) < 0) {
+ wolinfo->supported = 0;
+ wolinfo->wolopts = 0;
+ return;
+ }
+ wolinfo->supported = WAKE_PHY | WAKE_MAGIC;
+ wolinfo->wolopts = 0;
+ if (opt & SR_MONITOR_LINK)
+ wolinfo->wolopts |= WAKE_PHY;
+ if (opt & SR_MONITOR_MAGIC)
+ wolinfo->wolopts |= WAKE_MAGIC;
+}
+
+static int
+sr_set_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo)
+{
+ struct usbnet *dev = netdev_priv(net);
+ u8 opt = 0;
+
+ if (wolinfo->wolopts & WAKE_PHY)
+ opt |= SR_MONITOR_LINK;
+ if (wolinfo->wolopts & WAKE_MAGIC)
+ opt |= SR_MONITOR_MAGIC;
+
+ if (sr_write_cmd(dev, SR_CMD_WRITE_MONITOR_MODE,
+ opt, 0, 0, NULL) < 0)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int sr_get_eeprom_len(struct net_device *net)
+{
+ struct usbnet *dev = netdev_priv(net);
+ struct sr_data *data = (struct sr_data *)&dev->data;
+
+ return data->eeprom_len;
+}
+
+static int sr_get_eeprom(struct net_device *net,
+ struct ethtool_eeprom *eeprom, u8 *data)
+{
+ struct usbnet *dev = netdev_priv(net);
+ __le16 *ebuf = (__le16 *)data;
+ int ret;
+ int i;
+
+ /* Crude hack to ensure that we don't overwrite memory
+ * if an odd length is supplied
+ */
+ if (eeprom->len % 2)
+ return -EINVAL;
+
+ eeprom->magic = SR_EEPROM_MAGIC;
+
+ /* sr9800 returns 2 bytes from eeprom on read */
+ for (i = 0; i < eeprom->len / 2; i++) {
+ ret = sr_read_cmd(dev, SR_CMD_READ_EEPROM, eeprom->offset + i,
+ 0, 2, &ebuf[i]);
+ if (ret < 0)
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void sr_get_drvinfo(struct net_device *net,
+ struct ethtool_drvinfo *info)
+{
+ struct usbnet *dev = netdev_priv(net);
+ struct sr_data *data = (struct sr_data *)&dev->data;
+
+ /* Inherit standard device info */
+ usbnet_get_drvinfo(net, info);
+ strncpy(info->driver, DRIVER_NAME, sizeof(info->driver));
+ strncpy(info->version, DRIVER_VERSION, sizeof(info->version));
+ info->eedump_len = data->eeprom_len;
+}
+
+static u32 sr_get_link(struct net_device *net)
+{
+ struct usbnet *dev = netdev_priv(net);
+
+ return mii_link_ok(&dev->mii);
+}
+
+static int sr_ioctl(struct net_device *net, struct ifreq *rq, int cmd)
+{
+ struct usbnet *dev = netdev_priv(net);
+
+ return generic_mii_ioctl(&dev->mii, if_mii(rq), cmd, NULL);
+}
+
+static int sr_set_mac_address(struct net_device *net, void *p)
+{
+ struct usbnet *dev = netdev_priv(net);
+ struct sr_data *data = (struct sr_data *)&dev->data;
+ struct sockaddr *addr = p;
+
+ if (netif_running(net))
+ return -EBUSY;
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ memcpy(net->dev_addr, addr->sa_data, ETH_ALEN);
+
+ /* We use the 20 byte dev->data
+ * for our 6 byte mac buffer
+ * to avoid allocating memory that
+ * is tricky to free later
+ */
+ memcpy(data->mac_addr, addr->sa_data, ETH_ALEN);
+ sr_write_cmd_async(dev, SR_CMD_WRITE_NODE_ID, 0, 0, ETH_ALEN,
+ data->mac_addr);
+
+ return 0;
+}
+
+static const struct ethtool_ops sr9800_ethtool_ops = {
+ .get_drvinfo = sr_get_drvinfo,
+ .get_link = sr_get_link,
+ .get_msglevel = usbnet_get_msglevel,
+ .set_msglevel = usbnet_set_msglevel,
+ .get_wol = sr_get_wol,
+ .set_wol = sr_set_wol,
+ .get_eeprom_len = sr_get_eeprom_len,
+ .get_eeprom = sr_get_eeprom,
+ .get_settings = usbnet_get_settings,
+ .set_settings = usbnet_set_settings,
+ .nway_reset = usbnet_nway_reset,
+};
+
+static int sr9800_link_reset(struct usbnet *dev)
+{
+ struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
+ u16 mode;
+
+ mii_check_media(&dev->mii, 1, 1);
+ mii_ethtool_gset(&dev->mii, &ecmd);
+ mode = SR9800_MEDIUM_DEFAULT;
+
+ if (ethtool_cmd_speed(&ecmd) != SPEED_100)
+ mode &= ~SR_MEDIUM_PS;
+
+ if (ecmd.duplex != DUPLEX_FULL)
+ mode &= ~SR_MEDIUM_FD;
+
+ netdev_dbg(dev->net, "%s : speed: %u duplex: %d mode: 0x%04x\n",
+ __func__, ethtool_cmd_speed(&ecmd), ecmd.duplex, mode);
+
+ sr_write_medium_mode(dev, mode);
+
+ return 0;
+}
+
+
+static int sr9800_set_default_mode(struct usbnet *dev)
+{
+ u16 rx_ctl;
+ int ret;
+
+ sr_mdio_write(dev->net, dev->mii.phy_id, MII_BMCR, BMCR_RESET);
+ sr_mdio_write(dev->net, dev->mii.phy_id, MII_ADVERTISE,
+ ADVERTISE_ALL | ADVERTISE_CSMA);
+ mii_nway_restart(&dev->mii);
+
+ ret = sr_write_medium_mode(dev, SR9800_MEDIUM_DEFAULT);
+ if (ret < 0)
+ goto out;
+
+ ret = sr_write_cmd(dev, SR_CMD_WRITE_IPG012,
+ SR9800_IPG0_DEFAULT | SR9800_IPG1_DEFAULT,
+ SR9800_IPG2_DEFAULT, 0, NULL);
+ if (ret < 0) {
+ netdev_dbg(dev->net, "Write IPG,IPG1,IPG2 failed: %d\n", ret);
+ goto out;
+ }
+
+ /* Set RX_CTL to default values with 2k buffer, and enable cactus */
+ ret = sr_write_rx_ctl(dev, SR_DEFAULT_RX_CTL);
+ if (ret < 0)
+ goto out;
+
+ rx_ctl = sr_read_rx_ctl(dev);
+ netdev_dbg(dev->net, "RX_CTL is 0x%04x after all initializations\n",
+ rx_ctl);
+
+ rx_ctl = sr_read_medium_status(dev);
+ netdev_dbg(dev->net, "Medium Status:0x%04x after all initializations\n",
+ rx_ctl);
+
+ return 0;
+out:
+ return ret;
+}
+
+static int sr9800_reset(struct usbnet *dev)
+{
+ struct sr_data *data = (struct sr_data *)&dev->data;
+ int ret, embd_phy;
+ u16 rx_ctl;
+
+ ret = sr_write_gpio(dev,
+ SR_GPIO_RSE | SR_GPIO_GPO_2 | SR_GPIO_GPO2EN, 5);
+ if (ret < 0)
+ goto out;
+
+ embd_phy = ((sr_get_phy_addr(dev) & 0x1f) == 0x10 ? 1 : 0);
+
+ ret = sr_write_cmd(dev, SR_CMD_SW_PHY_SELECT, embd_phy, 0, 0, NULL);
+ if (ret < 0) {
+ netdev_dbg(dev->net, "Select PHY #1 failed: %d\n", ret);
+ goto out;
+ }
+
+ ret = sr_sw_reset(dev, SR_SWRESET_IPPD | SR_SWRESET_PRL);
+ if (ret < 0)
+ goto out;
+
+ msleep(150);
+
+ ret = sr_sw_reset(dev, SR_SWRESET_CLEAR);
+ if (ret < 0)
+ goto out;
+
+ msleep(150);
+
+ if (embd_phy) {
+ ret = sr_sw_reset(dev, SR_SWRESET_IPRL);
+ if (ret < 0)
+ goto out;
+ } else {
+ ret = sr_sw_reset(dev, SR_SWRESET_PRTE);
+ if (ret < 0)
+ goto out;
+ }
+
+ msleep(150);
+ rx_ctl = sr_read_rx_ctl(dev);
+ netdev_dbg(dev->net, "RX_CTL is 0x%04x after software reset\n", rx_ctl);
+ ret = sr_write_rx_ctl(dev, 0x0000);
+ if (ret < 0)
+ goto out;
+
+ rx_ctl = sr_read_rx_ctl(dev);
+ netdev_dbg(dev->net, "RX_CTL is 0x%04x setting to 0x0000\n", rx_ctl);
+
+ ret = sr_sw_reset(dev, SR_SWRESET_PRL);
+ if (ret < 0)
+ goto out;
+
+ msleep(150);
+
+ ret = sr_sw_reset(dev, SR_SWRESET_IPRL | SR_SWRESET_PRL);
+ if (ret < 0)
+ goto out;
+
+ msleep(150);
+
+ ret = sr9800_set_default_mode(dev);
+ if (ret < 0)
+ goto out;
+
+ /* Rewrite MAC address */
+ memcpy(data->mac_addr, dev->net->dev_addr, ETH_ALEN);
+ ret = sr_write_cmd(dev, SR_CMD_WRITE_NODE_ID, 0, 0, ETH_ALEN,
+ data->mac_addr);
+ if (ret < 0)
+ goto out;
+
+ return 0;
+
+out:
+ return ret;
+}
+
+static const struct net_device_ops sr9800_netdev_ops = {
+ .ndo_open = usbnet_open,
+ .ndo_stop = usbnet_stop,
+ .ndo_start_xmit = usbnet_start_xmit,
+ .ndo_tx_timeout = usbnet_tx_timeout,
+ .ndo_change_mtu = usbnet_change_mtu,
+ .ndo_set_mac_address = sr_set_mac_address,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_do_ioctl = sr_ioctl,
+ .ndo_set_rx_mode = sr_set_multicast,
+};
+
+static int sr9800_phy_powerup(struct usbnet *dev)
+{
+ int ret;
+
+ /* set the embedded Ethernet PHY in power-down state */
+ ret = sr_sw_reset(dev, SR_SWRESET_IPPD | SR_SWRESET_IPRL);
+ if (ret < 0) {
+ netdev_err(dev->net, "Failed to power down PHY : %d\n", ret);
+ return ret;
+ }
+ msleep(20);
+
+ /* set the embedded Ethernet PHY in power-up state */
+ ret = sr_sw_reset(dev, SR_SWRESET_IPRL);
+ if (ret < 0) {
+ netdev_err(dev->net, "Failed to reset PHY: %d\n", ret);
+ return ret;
+ }
+ msleep(600);
+
+ /* set the embedded Ethernet PHY in reset state */
+ ret = sr_sw_reset(dev, SR_SWRESET_CLEAR);
+ if (ret < 0) {
+ netdev_err(dev->net, "Failed to power up PHY: %d\n", ret);
+ return ret;
+ }
+ msleep(20);
+
+ /* set the embedded Ethernet PHY in power-up state */
+ ret = sr_sw_reset(dev, SR_SWRESET_IPRL);
+ if (ret < 0) {
+ netdev_err(dev->net, "Failed to reset PHY: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int sr9800_bind(struct usbnet *dev, struct usb_interface *intf)
+{
+ struct sr_data *data = (struct sr_data *)&dev->data;
+ u16 led01_mux, led23_mux;
+ int ret, embd_phy;
+ u32 phyid;
+ u16 rx_ctl;
+
+ data->eeprom_len = SR9800_EEPROM_LEN;
+
+ usbnet_get_endpoints(dev, intf);
+
+ /* LED Setting Rule :
+ * AABB:CCDD
+ * AA : MFA0(LED0)
+ * BB : MFA1(LED1)
+ * CC : MFA2(LED2), Reserved for SR9800
+ * DD : MFA3(LED3), Reserved for SR9800
+ */
+ led01_mux = (SR_LED_MUX_LINK_ACTIVE << 8) | SR_LED_MUX_LINK;
+ led23_mux = (SR_LED_MUX_LINK_ACTIVE << 8) | SR_LED_MUX_TX_ACTIVE;
+ ret = sr_write_cmd(dev, SR_CMD_LED_MUX, led01_mux, led23_mux, 0, NULL);
+ if (ret < 0) {
+ netdev_err(dev->net, "set LINK LED failed : %d\n", ret);
+ goto out;
+ }
+
+ /* Get the MAC address */
+ ret = sr_read_cmd(dev, SR_CMD_READ_NODE_ID, 0, 0, ETH_ALEN,
+ dev->net->dev_addr);
+ if (ret < 0) {
+ netdev_dbg(dev->net, "Failed to read MAC address: %d\n", ret);
+ return ret;
+ }
+ netdev_dbg(dev->net, "mac addr : %pM\n", dev->net->dev_addr);
+
+ /* Initialize MII structure */
+ dev->mii.dev = dev->net;
+ dev->mii.mdio_read = sr_mdio_read;
+ dev->mii.mdio_write = sr_mdio_write;
+ dev->mii.phy_id_mask = 0x1f;
+ dev->mii.reg_num_mask = 0x1f;
+ dev->mii.phy_id = sr_get_phy_addr(dev);
+
+ dev->net->netdev_ops = &sr9800_netdev_ops;
+ dev->net->ethtool_ops = &sr9800_ethtool_ops;
+
+ embd_phy = ((dev->mii.phy_id & 0x1f) == 0x10 ? 1 : 0);
+ /* Reset the PHY to normal operation mode */
+ ret = sr_write_cmd(dev, SR_CMD_SW_PHY_SELECT, embd_phy, 0, 0, NULL);
+ if (ret < 0) {
+ netdev_dbg(dev->net, "Select PHY #1 failed: %d\n", ret);
+ return ret;
+ }
+
+ /* Init PHY routine */
+ ret = sr9800_phy_powerup(dev);
+ if (ret < 0)
+ goto out;
+
+ rx_ctl = sr_read_rx_ctl(dev);
+ netdev_dbg(dev->net, "RX_CTL is 0x%04x after software reset\n", rx_ctl);
+ ret = sr_write_rx_ctl(dev, 0x0000);
+ if (ret < 0)
+ goto out;
+
+ rx_ctl = sr_read_rx_ctl(dev);
+ netdev_dbg(dev->net, "RX_CTL is 0x%04x setting to 0x0000\n", rx_ctl);
+
+ /* Read PHYID register *AFTER* the PHY was reset properly */
+ phyid = sr_get_phyid(dev);
+ netdev_dbg(dev->net, "PHYID=0x%08x\n", phyid);
+
+ /* medium mode setting */
+ ret = sr9800_set_default_mode(dev);
+ if (ret < 0)
+ goto out;
+
+ if (dev->udev->speed == USB_SPEED_HIGH) {
+ ret = sr_write_cmd(dev, SR_CMD_BULKIN_SIZE,
+ SR9800_BULKIN_SIZE[SR9800_MAX_BULKIN_4K].byte_cnt,
+ SR9800_BULKIN_SIZE[SR9800_MAX_BULKIN_4K].threshold,
+ 0, NULL);
+ if (ret < 0) {
+ netdev_err(dev->net, "Reset RX_CTL failed: %d\n", ret);
+ goto out;
+ }
+ dev->rx_urb_size =
+ SR9800_BULKIN_SIZE[SR9800_MAX_BULKIN_4K].size;
+ } else {
+ ret = sr_write_cmd(dev, SR_CMD_BULKIN_SIZE,
+ SR9800_BULKIN_SIZE[SR9800_MAX_BULKIN_2K].byte_cnt,
+ SR9800_BULKIN_SIZE[SR9800_MAX_BULKIN_2K].threshold,
+ 0, NULL);
+ if (ret < 0) {
+ netdev_err(dev->net, "Reset RX_CTL failed: %d\n", ret);
+ goto out;
+ }
+ dev->rx_urb_size =
+ SR9800_BULKIN_SIZE[SR9800_MAX_BULKIN_2K].size;
+ }
+ netdev_dbg(dev->net, "%s : setting rx_urb_size with : %ld\n", __func__,
+ dev->rx_urb_size);
+ return 0;
+
+out:
+ return ret;
+}
+
+static const struct driver_info sr9800_driver_info = {
+ .description = "CoreChip SR9800 USB 2.0 Ethernet",
+ .bind = sr9800_bind,
+ .status = sr_status,
+ .link_reset = sr9800_link_reset,
+ .reset = sr9800_reset,
+ .flags = DRIVER_FLAG,
+ .rx_fixup = sr_rx_fixup,
+ .tx_fixup = sr_tx_fixup,
+};
+
+static const struct usb_device_id products[] = {
+ {
+ USB_DEVICE(0x0fe6, 0x9800), /* SR9800 Device */
+ .driver_info = (unsigned long) &sr9800_driver_info,
+ },
+ {}, /* END */
+};
+
+MODULE_DEVICE_TABLE(usb, products);
+
+static struct usb_driver sr_driver = {
+ .name = DRIVER_NAME,
+ .id_table = products,
+ .probe = usbnet_probe,
+ .suspend = usbnet_suspend,
+ .resume = usbnet_resume,
+ .disconnect = usbnet_disconnect,
+ .supports_autosuspend = 1,
+};
+
+module_usb_driver(sr_driver);
+
+MODULE_AUTHOR("Liu Junliang <liujunliang_ljl@163.com");
+MODULE_VERSION(DRIVER_VERSION);
+MODULE_DESCRIPTION("SR9800 USB 2.0 USB2NET Dev : http://www.corechip-sz.com");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/usb/sr9800.h b/drivers/net/usb/sr9800.h
new file mode 100644
index 000000000000..18f670251275
--- /dev/null
+++ b/drivers/net/usb/sr9800.h
@@ -0,0 +1,202 @@
+/* CoreChip-sz SR9800 one chip USB 2.0 Ethernet Devices
+ *
+ * Author : Liu Junliang <liujunliang_ljl@163.com>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#ifndef _SR9800_H
+#define _SR9800_H
+
+/* SR9800 spec. command table on Linux Platform */
+
+/* command : Software Station Management Control Reg */
+#define SR_CMD_SET_SW_MII 0x06
+/* command : PHY Read Reg */
+#define SR_CMD_READ_MII_REG 0x07
+/* command : PHY Write Reg */
+#define SR_CMD_WRITE_MII_REG 0x08
+/* command : Hardware Station Management Control Reg */
+#define SR_CMD_SET_HW_MII 0x0a
+/* command : SROM Read Reg */
+#define SR_CMD_READ_EEPROM 0x0b
+/* command : SROM Write Reg */
+#define SR_CMD_WRITE_EEPROM 0x0c
+/* command : SROM Write Enable Reg */
+#define SR_CMD_WRITE_ENABLE 0x0d
+/* command : SROM Write Disable Reg */
+#define SR_CMD_WRITE_DISABLE 0x0e
+/* command : RX Control Read Reg */
+#define SR_CMD_READ_RX_CTL 0x0f
+#define SR_RX_CTL_PRO (1 << 0)
+#define SR_RX_CTL_AMALL (1 << 1)
+#define SR_RX_CTL_SEP (1 << 2)
+#define SR_RX_CTL_AB (1 << 3)
+#define SR_RX_CTL_AM (1 << 4)
+#define SR_RX_CTL_AP (1 << 5)
+#define SR_RX_CTL_ARP (1 << 6)
+#define SR_RX_CTL_SO (1 << 7)
+#define SR_RX_CTL_RH1M (1 << 8)
+#define SR_RX_CTL_RH2M (1 << 9)
+#define SR_RX_CTL_RH3M (1 << 10)
+/* command : RX Control Write Reg */
+#define SR_CMD_WRITE_RX_CTL 0x10
+/* command : IPG0/IPG1/IPG2 Control Read Reg */
+#define SR_CMD_READ_IPG012 0x11
+/* command : IPG0/IPG1/IPG2 Control Write Reg */
+#define SR_CMD_WRITE_IPG012 0x12
+/* command : Node ID Read Reg */
+#define SR_CMD_READ_NODE_ID 0x13
+/* command : Node ID Write Reg */
+#define SR_CMD_WRITE_NODE_ID 0x14
+/* command : Multicast Filter Array Read Reg */
+#define SR_CMD_READ_MULTI_FILTER 0x15
+/* command : Multicast Filter Array Write Reg */
+#define SR_CMD_WRITE_MULTI_FILTER 0x16
+/* command : Eth/HomePNA PHY Address Reg */
+#define SR_CMD_READ_PHY_ID 0x19
+/* command : Medium Status Read Reg */
+#define SR_CMD_READ_MEDIUM_STATUS 0x1a
+#define SR_MONITOR_LINK (1 << 1)
+#define SR_MONITOR_MAGIC (1 << 2)
+#define SR_MONITOR_HSFS (1 << 4)
+/* command : Medium Status Write Reg */
+#define SR_CMD_WRITE_MEDIUM_MODE 0x1b
+#define SR_MEDIUM_GM (1 << 0)
+#define SR_MEDIUM_FD (1 << 1)
+#define SR_MEDIUM_AC (1 << 2)
+#define SR_MEDIUM_ENCK (1 << 3)
+#define SR_MEDIUM_RFC (1 << 4)
+#define SR_MEDIUM_TFC (1 << 5)
+#define SR_MEDIUM_JFE (1 << 6)
+#define SR_MEDIUM_PF (1 << 7)
+#define SR_MEDIUM_RE (1 << 8)
+#define SR_MEDIUM_PS (1 << 9)
+#define SR_MEDIUM_RSV (1 << 10)
+#define SR_MEDIUM_SBP (1 << 11)
+#define SR_MEDIUM_SM (1 << 12)
+/* command : Monitor Mode Status Read Reg */
+#define SR_CMD_READ_MONITOR_MODE 0x1c
+/* command : Monitor Mode Status Write Reg */
+#define SR_CMD_WRITE_MONITOR_MODE 0x1d
+/* command : GPIO Status Read Reg */
+#define SR_CMD_READ_GPIOS 0x1e
+#define SR_GPIO_GPO0EN (1 << 0) /* GPIO0 Output enable */
+#define SR_GPIO_GPO_0 (1 << 1) /* GPIO0 Output value */
+#define SR_GPIO_GPO1EN (1 << 2) /* GPIO1 Output enable */
+#define SR_GPIO_GPO_1 (1 << 3) /* GPIO1 Output value */
+#define SR_GPIO_GPO2EN (1 << 4) /* GPIO2 Output enable */
+#define SR_GPIO_GPO_2 (1 << 5) /* GPIO2 Output value */
+#define SR_GPIO_RESERVED (1 << 6) /* Reserved */
+#define SR_GPIO_RSE (1 << 7) /* Reload serial EEPROM */
+/* command : GPIO Status Write Reg */
+#define SR_CMD_WRITE_GPIOS 0x1f
+/* command : Eth PHY Power and Reset Control Reg */
+#define SR_CMD_SW_RESET 0x20
+#define SR_SWRESET_CLEAR 0x00
+#define SR_SWRESET_RR (1 << 0)
+#define SR_SWRESET_RT (1 << 1)
+#define SR_SWRESET_PRTE (1 << 2)
+#define SR_SWRESET_PRL (1 << 3)
+#define SR_SWRESET_BZ (1 << 4)
+#define SR_SWRESET_IPRL (1 << 5)
+#define SR_SWRESET_IPPD (1 << 6)
+/* command : Software Interface Selection Status Read Reg */
+#define SR_CMD_SW_PHY_STATUS 0x21
+/* command : Software Interface Selection Status Write Reg */
+#define SR_CMD_SW_PHY_SELECT 0x22
+/* command : BULK in Buffer Size Reg */
+#define SR_CMD_BULKIN_SIZE 0x2A
+/* command : LED_MUX Control Reg */
+#define SR_CMD_LED_MUX 0x70
+#define SR_LED_MUX_TX_ACTIVE (1 << 0)
+#define SR_LED_MUX_RX_ACTIVE (1 << 1)
+#define SR_LED_MUX_COLLISION (1 << 2)
+#define SR_LED_MUX_DUP_COL (1 << 3)
+#define SR_LED_MUX_DUP (1 << 4)
+#define SR_LED_MUX_SPEED (1 << 5)
+#define SR_LED_MUX_LINK_ACTIVE (1 << 6)
+#define SR_LED_MUX_LINK (1 << 7)
+
+/* Register Access Flags */
+#define SR_REQ_RD_REG (USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE)
+#define SR_REQ_WR_REG (USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE)
+
+/* Multicast Filter Array size & Max Number */
+#define SR_MCAST_FILTER_SIZE 8
+#define SR_MAX_MCAST 64
+
+/* IPG0/1/2 Default Value */
+#define SR9800_IPG0_DEFAULT 0x15
+#define SR9800_IPG1_DEFAULT 0x0c
+#define SR9800_IPG2_DEFAULT 0x12
+
+/* Medium Status Default Mode */
+#define SR9800_MEDIUM_DEFAULT \
+ (SR_MEDIUM_FD | SR_MEDIUM_RFC | \
+ SR_MEDIUM_TFC | SR_MEDIUM_PS | \
+ SR_MEDIUM_AC | SR_MEDIUM_RE)
+
+/* RX Control Default Setting */
+#define SR_DEFAULT_RX_CTL \
+ (SR_RX_CTL_SO | SR_RX_CTL_AB | SR_RX_CTL_RH1M)
+
+/* EEPROM Magic Number & EEPROM Size */
+#define SR_EEPROM_MAGIC 0xdeadbeef
+#define SR9800_EEPROM_LEN 0xff
+
+/* SR9800 Driver Version and Driver Name */
+#define DRIVER_VERSION "11-Nov-2013"
+#define DRIVER_NAME "CoreChips"
+#define DRIVER_FLAG \
+ (FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR | FLAG_MULTI_PACKET)
+
+/* SR9800 BULKIN Buffer Size */
+#define SR9800_MAX_BULKIN_2K 0
+#define SR9800_MAX_BULKIN_4K 1
+#define SR9800_MAX_BULKIN_6K 2
+#define SR9800_MAX_BULKIN_8K 3
+#define SR9800_MAX_BULKIN_16K 4
+#define SR9800_MAX_BULKIN_20K 5
+#define SR9800_MAX_BULKIN_24K 6
+#define SR9800_MAX_BULKIN_32K 7
+
+struct {unsigned short size, byte_cnt, threshold; } SR9800_BULKIN_SIZE[] = {
+ /* 2k */
+ {2048, 0x8000, 0x8001},
+ /* 4k */
+ {4096, 0x8100, 0x8147},
+ /* 6k */
+ {6144, 0x8200, 0x81EB},
+ /* 8k */
+ {8192, 0x8300, 0x83D7},
+ /* 16 */
+ {16384, 0x8400, 0x851E},
+ /* 20k */
+ {20480, 0x8500, 0x8666},
+ /* 24k */
+ {24576, 0x8600, 0x87AE},
+ /* 32k */
+ {32768, 0x8700, 0x8A3D},
+};
+
+/* This structure cannot exceed sizeof(unsigned long [5]) AKA 20 bytes */
+struct sr_data {
+ u8 multi_filter[SR_MCAST_FILTER_SIZE];
+ u8 mac_addr[ETH_ALEN];
+ u8 phymode;
+ u8 ledmode;
+ u8 eeprom_len;
+};
+
+struct sr9800_int_data {
+ __le16 res1;
+ u8 link;
+ __le16 res2;
+ u8 status;
+ __le16 res3;
+} __packed;
+
+#endif /* _SR9800_H */
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 026a313c2d2d..b0f705c2378f 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -469,7 +469,6 @@ static inline struct hlist_head *vxlan_fdb_head(struct vxlan_dev *vxlan,
/* Look up Ethernet address in forwarding table */
static struct vxlan_fdb *__vxlan_find_mac(struct vxlan_dev *vxlan,
const u8 *mac)
-
{
struct hlist_head *head = vxlan_fdb_head(vxlan, mac);
struct vxlan_fdb *f;
@@ -596,10 +595,8 @@ static struct sk_buff **vxlan_gro_receive(struct sk_buff **head, struct sk_buff
NAPI_GRO_CB(p)->same_flow = 0;
continue;
}
- goto found;
}
-found:
type = eh->h_proto;
rcu_read_lock();
diff --git a/drivers/net/wan/dlci.c b/drivers/net/wan/dlci.c
index 0d1c7592efa0..19f7cb2cdef3 100644
--- a/drivers/net/wan/dlci.c
+++ b/drivers/net/wan/dlci.c
@@ -71,12 +71,9 @@ static int dlci_header(struct sk_buff *skb, struct net_device *dev,
const void *saddr, unsigned len)
{
struct frhdr hdr;
- struct dlci_local *dlp;
unsigned int hlen;
char *dest;
- dlp = netdev_priv(dev);
-
hdr.control = FRAD_I_UI;
switch (type)
{
@@ -107,11 +104,9 @@ static int dlci_header(struct sk_buff *skb, struct net_device *dev,
static void dlci_receive(struct sk_buff *skb, struct net_device *dev)
{
- struct dlci_local *dlp;
struct frhdr *hdr;
int process, header;
- dlp = netdev_priv(dev);
if (!pskb_may_pull(skb, sizeof(*hdr))) {
netdev_notice(dev, "invalid data no header\n");
dev->stats.rx_errors++;
diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c
index 8aa20df55e50..507d9a9ee69a 100644
--- a/drivers/net/wireless/ath/ar5523/ar5523.c
+++ b/drivers/net/wireless/ath/ar5523/ar5523.c
@@ -1764,7 +1764,7 @@ static struct usb_device_id ar5523_id_table[] = {
AR5523_DEVICE_UG(0x07d1, 0x3a07), /* D-Link / WUA-2340 rev A1 */
AR5523_DEVICE_UG(0x1690, 0x0712), /* Gigaset / AR5523 */
AR5523_DEVICE_UG(0x1690, 0x0710), /* Gigaset / SMCWUSBTG */
- AR5523_DEVICE_UG(0x129b, 0x160c), /* Gigaset / USB stick 108
+ AR5523_DEVICE_UG(0x129b, 0x160b), /* Gigaset / USB stick 108
(CyberTAN Technology) */
AR5523_DEVICE_UG(0x16ab, 0x7801), /* Globalsun / AR5523_1 */
AR5523_DEVICE_UX(0x16ab, 0x7811), /* Globalsun / AR5523_2 */
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index 25243cbc07f0..b8daff78b9d1 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -5065,6 +5065,10 @@ static u16 ar9003_hw_get_max_edge_power(struct ar9300_eeprom *eep,
break;
}
}
+
+ if (is2GHz && !twiceMaxEdgePower)
+ twiceMaxEdgePower = 60;
+
return twiceMaxEdgePower;
}
diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h
index 58da3468d1f0..99a203174f45 100644
--- a/drivers/net/wireless/ath/ath9k/htc.h
+++ b/drivers/net/wireless/ath/ath9k/htc.h
@@ -262,6 +262,8 @@ enum tid_aggr_state {
struct ath9k_htc_sta {
u8 index;
enum tid_aggr_state tid_state[ATH9K_HTC_MAX_TID];
+ struct work_struct rc_update_work;
+ struct ath9k_htc_priv *htc_priv;
};
#define ATH9K_HTC_RXBUF 256
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index f4e1de20d99c..c57d6b859c04 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -34,6 +34,10 @@ static int ath9k_htc_btcoex_enable;
module_param_named(btcoex_enable, ath9k_htc_btcoex_enable, int, 0444);
MODULE_PARM_DESC(btcoex_enable, "Enable wifi-BT coexistence");
+static int ath9k_ps_enable;
+module_param_named(ps_enable, ath9k_ps_enable, int, 0444);
+MODULE_PARM_DESC(ps_enable, "Enable WLAN PowerSave");
+
#define CHAN2G(_freq, _idx) { \
.center_freq = (_freq), \
.hw_value = (_idx), \
@@ -725,12 +729,14 @@ static void ath9k_set_hw_capab(struct ath9k_htc_priv *priv,
IEEE80211_HW_SPECTRUM_MGMT |
IEEE80211_HW_HAS_RATE_CONTROL |
IEEE80211_HW_RX_INCLUDES_FCS |
- IEEE80211_HW_SUPPORTS_PS |
IEEE80211_HW_PS_NULLFUNC_STACK |
IEEE80211_HW_REPORTS_TX_ACK_STATUS |
IEEE80211_HW_MFP_CAPABLE |
IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING;
+ if (ath9k_ps_enable)
+ hw->flags |= IEEE80211_HW_SUPPORTS_PS;
+
hw->wiphy->interface_modes =
BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_ADHOC) |
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index 608d739d1378..c9254a61ca52 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -1270,18 +1270,50 @@ static void ath9k_htc_configure_filter(struct ieee80211_hw *hw,
mutex_unlock(&priv->mutex);
}
+static void ath9k_htc_sta_rc_update_work(struct work_struct *work)
+{
+ struct ath9k_htc_sta *ista =
+ container_of(work, struct ath9k_htc_sta, rc_update_work);
+ struct ieee80211_sta *sta =
+ container_of((void *)ista, struct ieee80211_sta, drv_priv);
+ struct ath9k_htc_priv *priv = ista->htc_priv;
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ struct ath9k_htc_target_rate trate;
+
+ mutex_lock(&priv->mutex);
+ ath9k_htc_ps_wakeup(priv);
+
+ memset(&trate, 0, sizeof(struct ath9k_htc_target_rate));
+ ath9k_htc_setup_rate(priv, sta, &trate);
+ if (!ath9k_htc_send_rate_cmd(priv, &trate))
+ ath_dbg(common, CONFIG,
+ "Supported rates for sta: %pM updated, rate caps: 0x%X\n",
+ sta->addr, be32_to_cpu(trate.capflags));
+ else
+ ath_dbg(common, CONFIG,
+ "Unable to update supported rates for sta: %pM\n",
+ sta->addr);
+
+ ath9k_htc_ps_restore(priv);
+ mutex_unlock(&priv->mutex);
+}
+
static int ath9k_htc_sta_add(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
struct ath9k_htc_priv *priv = hw->priv;
+ struct ath9k_htc_sta *ista = (struct ath9k_htc_sta *) sta->drv_priv;
int ret;
mutex_lock(&priv->mutex);
ath9k_htc_ps_wakeup(priv);
ret = ath9k_htc_add_station(priv, vif, sta);
- if (!ret)
+ if (!ret) {
+ INIT_WORK(&ista->rc_update_work, ath9k_htc_sta_rc_update_work);
+ ista->htc_priv = priv;
ath9k_htc_init_rate(priv, sta);
+ }
ath9k_htc_ps_restore(priv);
mutex_unlock(&priv->mutex);
@@ -1293,12 +1325,13 @@ static int ath9k_htc_sta_remove(struct ieee80211_hw *hw,
struct ieee80211_sta *sta)
{
struct ath9k_htc_priv *priv = hw->priv;
- struct ath9k_htc_sta *ista;
+ struct ath9k_htc_sta *ista = (struct ath9k_htc_sta *) sta->drv_priv;
int ret;
+ cancel_work_sync(&ista->rc_update_work);
+
mutex_lock(&priv->mutex);
ath9k_htc_ps_wakeup(priv);
- ista = (struct ath9k_htc_sta *) sta->drv_priv;
htc_sta_drain(priv->htc, ista->index);
ret = ath9k_htc_remove_station(priv, vif, sta);
ath9k_htc_ps_restore(priv);
@@ -1311,28 +1344,12 @@ static void ath9k_htc_sta_rc_update(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta, u32 changed)
{
- struct ath9k_htc_priv *priv = hw->priv;
- struct ath_common *common = ath9k_hw_common(priv->ah);
- struct ath9k_htc_target_rate trate;
-
- mutex_lock(&priv->mutex);
- ath9k_htc_ps_wakeup(priv);
+ struct ath9k_htc_sta *ista = (struct ath9k_htc_sta *) sta->drv_priv;
- if (changed & IEEE80211_RC_SUPP_RATES_CHANGED) {
- memset(&trate, 0, sizeof(struct ath9k_htc_target_rate));
- ath9k_htc_setup_rate(priv, sta, &trate);
- if (!ath9k_htc_send_rate_cmd(priv, &trate))
- ath_dbg(common, CONFIG,
- "Supported rates for sta: %pM updated, rate caps: 0x%X\n",
- sta->addr, be32_to_cpu(trate.capflags));
- else
- ath_dbg(common, CONFIG,
- "Unable to update supported rates for sta: %pM\n",
- sta->addr);
- }
+ if (!(changed & IEEE80211_RC_SUPP_RATES_CHANGED))
+ return;
- ath9k_htc_ps_restore(priv);
- mutex_unlock(&priv->mutex);
+ schedule_work(&ista->rc_update_work);
}
static int ath9k_htc_conf_tx(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index fbf43c05713f..11eab9f01fd8 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -1316,7 +1316,7 @@ static bool ath9k_hw_set_reset(struct ath_hw *ah, int type)
if (AR_SREV_9300_20_OR_LATER(ah))
udelay(50);
else if (AR_SREV_9100(ah))
- udelay(10000);
+ mdelay(10);
else
udelay(100);
@@ -2051,9 +2051,8 @@ static bool ath9k_hw_set_power_awake(struct ath_hw *ah)
REG_SET_BIT(ah, AR_RTC_FORCE_WAKE,
AR_RTC_FORCE_WAKE_EN);
-
if (AR_SREV_9100(ah))
- udelay(10000);
+ mdelay(10);
else
udelay(50);
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index c36de303c8f3..1fc2e5a26b52 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -57,6 +57,10 @@ static int ath9k_bt_ant_diversity;
module_param_named(bt_ant_diversity, ath9k_bt_ant_diversity, int, 0444);
MODULE_PARM_DESC(bt_ant_diversity, "Enable WLAN/BT RX antenna diversity");
+static int ath9k_ps_enable;
+module_param_named(ps_enable, ath9k_ps_enable, int, 0444);
+MODULE_PARM_DESC(ps_enable, "Enable WLAN PowerSave");
+
bool is_ath9k_unloaded;
/* We use the hw_value as an index into our private channel structure */
@@ -903,13 +907,15 @@ static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
IEEE80211_HW_SIGNAL_DBM |
- IEEE80211_HW_SUPPORTS_PS |
IEEE80211_HW_PS_NULLFUNC_STACK |
IEEE80211_HW_SPECTRUM_MGMT |
IEEE80211_HW_REPORTS_TX_ACK_STATUS |
IEEE80211_HW_SUPPORTS_RC_TABLE |
IEEE80211_HW_SUPPORTS_HT_CCK_RATES;
+ if (ath9k_ps_enable)
+ hw->flags |= IEEE80211_HW_SUPPORTS_PS;
+
if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION;
diff --git a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
index f06f4cbe1317..725e954d8475 100644
--- a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
@@ -182,6 +182,11 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
for (ch_idx = 0; ch_idx < IWL_NUM_CHANNELS; ch_idx++) {
ch_flags = __le16_to_cpup(nvm_ch_flags + ch_idx);
+
+ if (ch_idx >= NUM_2GHZ_CHANNELS &&
+ !data->sku_cap_band_52GHz_enable)
+ ch_flags &= ~NVM_CHANNEL_VALID;
+
if (!(ch_flags & NVM_CHANNEL_VALID)) {
IWL_DEBUG_EEPROM(dev,
"Ch. %d Flags %x [%sGHz] - No traffic\n",
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
index 73cbba7424f2..9426905de6b2 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
@@ -504,6 +504,7 @@ struct iwl_scan_offload_profile {
* @match_notify: clients waiting for match found notification
* @pass_match: clients waiting for the results
* @active_clients: active clients bitmap - enum scan_framework_client
+ * @any_beacon_notify: clients waiting for match notification without match
*/
struct iwl_scan_offload_profile_cfg {
struct iwl_scan_offload_profile profiles[IWL_SCAN_MAX_PROFILES];
@@ -512,7 +513,8 @@ struct iwl_scan_offload_profile_cfg {
u8 match_notify;
u8 pass_match;
u8 active_clients;
- u8 reserved[3];
+ u8 any_beacon_notify;
+ u8 reserved[2];
} __packed;
/**
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
index c49b5073c251..6bf9766e5982 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
@@ -246,7 +246,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
else
hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
- if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_SCHED_SCAN) {
+ if (0 && mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_SCHED_SCAN) {
hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX;
hw->wiphy->max_match_sets = IWL_SCAN_MAX_PROFILES;
diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c
index 0e0007960612..742afc429c94 100644
--- a/drivers/net/wireless/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/mvm/scan.c
@@ -344,7 +344,8 @@ int iwl_mvm_scan_request(struct iwl_mvm *mvm,
iwl_mvm_scan_fill_ssids(cmd, req, basic_ssid ? 1 : 0);
- cmd->tx_cmd.tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL);
+ cmd->tx_cmd.tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL |
+ TX_CMD_FLG_BT_DIS);
cmd->tx_cmd.sta_id = mvm->aux_sta.sta_id;
cmd->tx_cmd.life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE);
cmd->tx_cmd.rate_n_flags =
@@ -807,6 +808,8 @@ int iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm,
profile_cfg->active_clients = SCAN_CLIENT_SCHED_SCAN;
profile_cfg->pass_match = SCAN_CLIENT_SCHED_SCAN;
profile_cfg->match_notify = SCAN_CLIENT_SCHED_SCAN;
+ if (!req->n_match_sets || !req->match_sets[0].ssid.ssid_len)
+ profile_cfg->any_beacon_notify = SCAN_CLIENT_SCHED_SCAN;
for (i = 0; i < req->n_match_sets; i++) {
profile = &profile_cfg->profiles[i];
diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.c b/drivers/net/wireless/iwlwifi/mvm/sta.c
index ec1812133235..3397f59cd4e4 100644
--- a/drivers/net/wireless/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/iwlwifi/mvm/sta.c
@@ -652,7 +652,7 @@ int iwl_mvm_send_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
static const u8 _baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
- static const u8 *baddr = _baddr;
+ const u8 *baddr = _baddr;
lockdep_assert_held(&mvm->mutex);
diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c
index 90378c217bc7..4df12fa9d336 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tx.c
@@ -659,8 +659,14 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
rcu_read_lock();
sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
+ /*
+ * sta can't be NULL otherwise it'd mean that the sta has been freed in
+ * the firmware while we still have packets for it in the Tx queues.
+ */
+ if (WARN_ON_ONCE(!sta))
+ goto out;
- if (!IS_ERR_OR_NULL(sta)) {
+ if (!IS_ERR(sta)) {
mvmsta = iwl_mvm_sta_from_mac80211(sta);
if (tid != IWL_TID_NON_QOS) {
@@ -675,7 +681,6 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
spin_unlock_bh(&mvmsta->lock);
}
} else {
- sta = NULL;
mvmsta = NULL;
}
@@ -683,42 +688,38 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
* If the txq is not an AMPDU queue, there is no chance we freed
* several skbs. Check that out...
*/
- if (txq_id < mvm->first_agg_queue && !WARN_ON(skb_freed > 1) &&
- atomic_sub_and_test(skb_freed, &mvm->pending_frames[sta_id])) {
- if (mvmsta) {
- /*
- * If there are no pending frames for this STA, notify
- * mac80211 that this station can go to sleep in its
- * STA table.
- */
- if (mvmsta->vif->type == NL80211_IFTYPE_AP)
- ieee80211_sta_block_awake(mvm->hw, sta, false);
- /*
- * We might very well have taken mvmsta pointer while
- * the station was being removed. The remove flow might
- * have seen a pending_frame (because we didn't take
- * the lock) even if now the queues are drained. So make
- * really sure now that this the station is not being
- * removed. If it is, run the drain worker to remove it.
- */
- spin_lock_bh(&mvmsta->lock);
- sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
- if (!sta || PTR_ERR(sta) == -EBUSY) {
- /*
- * Station disappeared in the meantime:
- * so we are draining.
- */
- set_bit(sta_id, mvm->sta_drained);
- schedule_work(&mvm->sta_drained_wk);
- }
- spin_unlock_bh(&mvmsta->lock);
- } else if (!mvmsta && PTR_ERR(sta) == -EBUSY) {
- /* Tx response without STA, so we are draining */
- set_bit(sta_id, mvm->sta_drained);
- schedule_work(&mvm->sta_drained_wk);
- }
+ if (txq_id >= mvm->first_agg_queue)
+ goto out;
+
+ /* We can't free more than one frame at once on a shared queue */
+ WARN_ON(skb_freed > 1);
+
+ /* If we have still frames from this STA nothing to do here */
+ if (!atomic_sub_and_test(skb_freed, &mvm->pending_frames[sta_id]))
+ goto out;
+
+ if (mvmsta && mvmsta->vif->type == NL80211_IFTYPE_AP) {
+ /*
+ * If there are no pending frames for this STA, notify
+ * mac80211 that this station can go to sleep in its
+ * STA table.
+ * If mvmsta is not NULL, sta is valid.
+ */
+ ieee80211_sta_block_awake(mvm->hw, sta, false);
+ }
+
+ if (PTR_ERR(sta) == -EBUSY || PTR_ERR(sta) == -ENOENT) {
+ /*
+ * We are draining and this was the last packet - pre_rcu_remove
+ * has been called already. We might be after the
+ * synchronize_net already.
+ * Don't rely on iwl_mvm_rm_sta to see the empty Tx queues.
+ */
+ set_bit(sta_id, mvm->sta_drained);
+ schedule_work(&mvm->sta_drained_wk);
}
+out:
rcu_read_unlock();
}
diff --git a/drivers/net/wireless/iwlwifi/mvm/utils.c b/drivers/net/wireless/iwlwifi/mvm/utils.c
index a4a5e25623c3..86989df69356 100644
--- a/drivers/net/wireless/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/iwlwifi/mvm/utils.c
@@ -411,6 +411,8 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
mvm->status, table.valid);
}
+ IWL_ERR(mvm, "Loaded firmware version: %s\n", mvm->fw->fw_version);
+
trace_iwlwifi_dev_ucode_error(trans->dev, table.error_id, table.tsf_low,
table.data1, table.data2, table.data3,
table.blink1, table.blink2, table.ilink1,
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
index 3040924f5f3c..f47bcbe2945a 100644
--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
@@ -359,20 +359,25 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
/* 7265 Series */
{IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x5110, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5112, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5100, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x510A, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095B, 0x5310, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095B, 0x5302, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095B, 0x5210, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x5012, iwl7265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x095A, 0x500A, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x5410, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x5400, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x1010, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x5000, iwl7265_2n_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x500A, iwl7265_2n_cfg)},
{IWL_PCI_DEVICE(0x095B, 0x5200, iwl7265_2n_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x5002, iwl7265_n_cfg)},
{IWL_PCI_DEVICE(0x095B, 0x5202, iwl7265_n_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x9010, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x9012, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x9110, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x9112, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x9210, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x9510, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x9310, iwl7265_2ac_cfg)},
diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c
index abc5f56f29fe..2f1cd929c6f6 100644
--- a/drivers/net/wireless/rt2x00/rt2500pci.c
+++ b/drivers/net/wireless/rt2x00/rt2500pci.c
@@ -1877,6 +1877,11 @@ static int rt2500pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
EEPROM_MAC_ADDR_0));
/*
+ * Disable powersaving as default.
+ */
+ rt2x00dev->hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
+
+ /*
* Initialize hw_mode information.
*/
spec->supported_bands = SUPPORT_BAND_2GHZ;
diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c
index 9f16824cd1bc..d849d590de25 100644
--- a/drivers/net/wireless/rt2x00/rt2500usb.c
+++ b/drivers/net/wireless/rt2x00/rt2500usb.c
@@ -1706,6 +1706,11 @@ static int rt2500usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
IEEE80211_HW_SUPPORTS_PS |
IEEE80211_HW_PS_NULLFUNC_STACK;
+ /*
+ * Disable powersaving as default.
+ */
+ rt2x00dev->hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
+
SET_IEEE80211_DEV(rt2x00dev->hw, rt2x00dev->dev);
SET_IEEE80211_PERM_ADDR(rt2x00dev->hw,
rt2x00_eeprom_addr(rt2x00dev,
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index b8f5b06006c4..7f8b5d156c8c 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -7458,10 +7458,9 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
u32 reg;
/*
- * Disable powersaving as default on PCI devices.
+ * Disable powersaving as default.
*/
- if (rt2x00_is_pci(rt2x00dev) || rt2x00_is_soc(rt2x00dev))
- rt2x00dev->hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
+ rt2x00dev->hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
/*
* Initialize all hw fields.
diff --git a/drivers/net/wireless/rtl818x/rtl8180/dev.c b/drivers/net/wireless/rtl818x/rtl8180/dev.c
index 8ec17aad0e52..3867d1470b36 100644
--- a/drivers/net/wireless/rtl818x/rtl8180/dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8180/dev.c
@@ -107,6 +107,7 @@ static void rtl8180_handle_rx(struct ieee80211_hw *dev)
struct rtl8180_priv *priv = dev->priv;
unsigned int count = 32;
u8 signal, agc, sq;
+ dma_addr_t mapping;
while (count--) {
struct rtl8180_rx_desc *entry = &priv->rx_ring[priv->rx_idx];
@@ -128,6 +129,17 @@ static void rtl8180_handle_rx(struct ieee80211_hw *dev)
if (unlikely(!new_skb))
goto done;
+ mapping = pci_map_single(priv->pdev,
+ skb_tail_pointer(new_skb),
+ MAX_RX_SIZE, PCI_DMA_FROMDEVICE);
+
+ if (pci_dma_mapping_error(priv->pdev, mapping)) {
+ kfree_skb(new_skb);
+ dev_err(&priv->pdev->dev, "RX DMA map error\n");
+
+ goto done;
+ }
+
pci_unmap_single(priv->pdev,
*((dma_addr_t *)skb->cb),
MAX_RX_SIZE, PCI_DMA_FROMDEVICE);
@@ -158,9 +170,7 @@ static void rtl8180_handle_rx(struct ieee80211_hw *dev)
skb = new_skb;
priv->rx_buf[priv->rx_idx] = skb;
- *((dma_addr_t *) skb->cb) =
- pci_map_single(priv->pdev, skb_tail_pointer(skb),
- MAX_RX_SIZE, PCI_DMA_FROMDEVICE);
+ *((dma_addr_t *) skb->cb) = mapping;
}
done:
@@ -266,6 +276,13 @@ static void rtl8180_tx(struct ieee80211_hw *dev,
mapping = pci_map_single(priv->pdev, skb->data,
skb->len, PCI_DMA_TODEVICE);
+ if (pci_dma_mapping_error(priv->pdev, mapping)) {
+ kfree_skb(skb);
+ dev_err(&priv->pdev->dev, "TX DMA mapping error\n");
+ return;
+
+ }
+
tx_flags = RTL818X_TX_DESC_FLAG_OWN | RTL818X_TX_DESC_FLAG_FS |
RTL818X_TX_DESC_FLAG_LS |
(ieee80211_get_tx_rate(dev, info)->hw_value << 24) |
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 4c76bcb9a879..ae413a2cbee7 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -143,11 +143,7 @@ struct xenvif {
char rx_irq_name[IFNAMSIZ+4]; /* DEVNAME-rx */
struct xen_netif_rx_back_ring rx;
struct sk_buff_head rx_queue;
- bool rx_queue_stopped;
- /* Set when the RX interrupt is triggered by the frontend.
- * The worker thread may need to wake the queue.
- */
- bool rx_event;
+ RING_IDX rx_last_skb_slots;
/* This array is allocated seperately as it is large */
struct gnttab_copy *grant_copy_op;
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index b9de31ea7fc4..7669d49a67e2 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -100,7 +100,6 @@ static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id)
{
struct xenvif *vif = dev_id;
- vif->rx_event = true;
xenvif_kick_thread(vif);
return IRQ_HANDLED;
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 6b62c3eb8e18..e5284bca2d90 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -476,7 +476,6 @@ static void xenvif_rx_action(struct xenvif *vif)
unsigned long offset;
struct skb_cb_overlay *sco;
bool need_to_notify = false;
- bool ring_full = false;
struct netrx_pending_operations npo = {
.copy = vif->grant_copy_op,
@@ -486,7 +485,7 @@ static void xenvif_rx_action(struct xenvif *vif)
skb_queue_head_init(&rxq);
while ((skb = skb_dequeue(&vif->rx_queue)) != NULL) {
- int max_slots_needed;
+ RING_IDX max_slots_needed;
int i;
/* We need a cheap worse case estimate for the number of
@@ -509,9 +508,10 @@ static void xenvif_rx_action(struct xenvif *vif)
if (!xenvif_rx_ring_slots_available(vif, max_slots_needed)) {
skb_queue_head(&vif->rx_queue, skb);
need_to_notify = true;
- ring_full = true;
+ vif->rx_last_skb_slots = max_slots_needed;
break;
- }
+ } else
+ vif->rx_last_skb_slots = 0;
sco = (struct skb_cb_overlay *)skb->cb;
sco->meta_slots_used = xenvif_gop_skb(skb, &npo);
@@ -522,8 +522,6 @@ static void xenvif_rx_action(struct xenvif *vif)
BUG_ON(npo.meta_prod > ARRAY_SIZE(vif->meta));
- vif->rx_queue_stopped = !npo.copy_prod && ring_full;
-
if (!npo.copy_prod)
goto done;
@@ -1473,8 +1471,8 @@ static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif,
static inline int rx_work_todo(struct xenvif *vif)
{
- return (!skb_queue_empty(&vif->rx_queue) && !vif->rx_queue_stopped) ||
- vif->rx_event;
+ return !skb_queue_empty(&vif->rx_queue) &&
+ xenvif_rx_ring_slots_available(vif, vif->rx_last_skb_slots);
}
static inline int tx_work_todo(struct xenvif *vif)
@@ -1560,8 +1558,6 @@ int xenvif_kthread(void *data)
if (!skb_queue_empty(&vif->rx_queue))
xenvif_rx_action(vif);
- vif->rx_event = false;
-
if (skb_queue_empty(&vif->rx_queue) &&
netif_queue_stopped(vif->dev))
xenvif_start_queue(vif);
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index ff04d4f95baa..f9daa9e183f2 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -1832,7 +1832,6 @@ static void netback_changed(struct xenbus_device *dev,
case XenbusStateReconfiguring:
case XenbusStateReconfigured:
case XenbusStateUnknown:
- case XenbusStateClosed:
break;
case XenbusStateInitWait:
@@ -1847,6 +1846,10 @@ static void netback_changed(struct xenbus_device *dev,
netdev_notify_peers(netdev);
break;
+ case XenbusStateClosed:
+ if (dev->state == XenbusStateClosed)
+ break;
+ /* Missed the backend's CLOSING state -- fallthrough */
case XenbusStateClosing:
xenbus_frontend_closed(dev);
break;
diff --git a/drivers/of/address.c b/drivers/of/address.c
index d3dd41c840f1..1a54f1ffaadb 100644
--- a/drivers/of/address.c
+++ b/drivers/of/address.c
@@ -99,11 +99,12 @@ static unsigned int of_bus_default_get_flags(const __be32 *addr)
static int of_bus_pci_match(struct device_node *np)
{
/*
+ * "pciex" is PCI Express
* "vci" is for the /chaos bridge on 1st-gen PCI powermacs
* "ht" is hypertransport
*/
- return !strcmp(np->type, "pci") || !strcmp(np->type, "vci") ||
- !strcmp(np->type, "ht");
+ return !strcmp(np->type, "pci") || !strcmp(np->type, "pciex") ||
+ !strcmp(np->type, "vci") || !strcmp(np->type, "ht");
}
static void of_bus_pci_count_cells(struct device_node *np,
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index cd929aed3613..7c7a388c85ab 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -210,10 +210,29 @@ static void post_dock_fixups(acpi_handle not_used, u32 event, void *data)
}
}
+static void dock_event(acpi_handle handle, u32 type, void *data)
+{
+ struct acpiphp_context *context;
+
+ mutex_lock(&acpiphp_context_lock);
+ context = acpiphp_get_context(handle);
+ if (!context || WARN_ON(context->handle != handle)
+ || context->func.parent->is_going_away) {
+ mutex_unlock(&acpiphp_context_lock);
+ return;
+ }
+ get_bridge(context->func.parent);
+ acpiphp_put_context(context);
+ mutex_unlock(&acpiphp_context_lock);
+
+ hotplug_event(handle, type, data);
+
+ put_bridge(context->func.parent);
+}
static const struct acpi_dock_ops acpiphp_dock_ops = {
.fixup = post_dock_fixups,
- .handler = hotplug_event,
+ .handler = dock_event,
};
/* Check whether the PCI device is managed by native PCIe hotplug driver */
@@ -441,7 +460,9 @@ static void cleanup_bridge(struct acpiphp_bridge *bridge)
list_del(&bridge->list);
mutex_unlock(&bridge_mutex);
+ mutex_lock(&acpiphp_context_lock);
bridge->is_going_away = true;
+ mutex_unlock(&acpiphp_context_lock);
}
/**
@@ -709,6 +730,17 @@ static unsigned int get_slot_status(struct acpiphp_slot *slot)
return (unsigned int)sta;
}
+static inline bool device_status_valid(unsigned int sta)
+{
+ /*
+ * ACPI spec says that _STA may return bit 0 clear with bit 3 set
+ * if the device is valid but does not require a device driver to be
+ * loaded (Section 6.3.7 of ACPI 5.0A).
+ */
+ unsigned int mask = ACPI_STA_DEVICE_ENABLED | ACPI_STA_DEVICE_FUNCTIONING;
+ return (sta & mask) == mask;
+}
+
/**
* trim_stale_devices - remove PCI devices that are not responding.
* @dev: PCI device to start walking the hierarchy from.
@@ -724,7 +756,7 @@ static void trim_stale_devices(struct pci_dev *dev)
unsigned long long sta;
status = acpi_evaluate_integer(handle, "_STA", NULL, &sta);
- alive = (ACPI_SUCCESS(status) && sta == ACPI_STA_ALL)
+ alive = (ACPI_SUCCESS(status) && device_status_valid(sta))
|| acpiphp_no_hotplug(handle);
}
if (!alive) {
@@ -742,7 +774,7 @@ static void trim_stale_devices(struct pci_dev *dev)
/* The device is a bridge. so check the bus below it. */
pm_runtime_get_sync(&dev->dev);
- list_for_each_entry_safe(child, tmp, &bus->devices, bus_list)
+ list_for_each_entry_safe_reverse(child, tmp, &bus->devices, bus_list)
trim_stale_devices(child);
pm_runtime_put(&dev->dev);
@@ -771,10 +803,10 @@ static void acpiphp_check_bridge(struct acpiphp_bridge *bridge)
mutex_lock(&slot->crit_sect);
if (slot_no_hotplug(slot)) {
; /* do nothing */
- } else if (get_slot_status(slot) == ACPI_STA_ALL) {
+ } else if (device_status_valid(get_slot_status(slot))) {
/* remove stale devices if any */
- list_for_each_entry_safe(dev, tmp, &bus->devices,
- bus_list)
+ list_for_each_entry_safe_reverse(dev, tmp,
+ &bus->devices, bus_list)
if (PCI_SLOT(dev->devfn) == slot->device)
trim_stale_devices(dev);
@@ -805,7 +837,7 @@ static void acpiphp_sanitize_bus(struct pci_bus *bus)
int i;
unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM;
- list_for_each_entry_safe(dev, tmp, &bus->devices, bus_list) {
+ list_for_each_entry_safe_reverse(dev, tmp, &bus->devices, bus_list) {
for (i=0; i<PCI_BRIDGE_RESOURCES; i++) {
struct resource *res = &dev->resource[i];
if ((res->flags & type_mask) && !res->start &&
@@ -829,7 +861,11 @@ void acpiphp_check_host_bridge(acpi_handle handle)
bridge = acpiphp_handle_to_bridge(handle);
if (bridge) {
+ pci_lock_rescan_remove();
+
acpiphp_check_bridge(bridge);
+
+ pci_unlock_rescan_remove();
put_bridge(bridge);
}
}
@@ -852,6 +888,7 @@ static void hotplug_event(acpi_handle handle, u32 type, void *data)
mutex_unlock(&acpiphp_context_lock);
+ pci_lock_rescan_remove();
acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
switch (type) {
@@ -905,6 +942,7 @@ static void hotplug_event(acpi_handle handle, u32 type, void *data)
break;
}
+ pci_unlock_rescan_remove();
if (bridge)
put_bridge(bridge);
}
@@ -915,11 +953,9 @@ static void hotplug_event_work(void *data, u32 type)
acpi_handle handle = context->handle;
acpi_scan_lock_acquire();
- pci_lock_rescan_remove();
hotplug_event(handle, type, context);
- pci_unlock_rescan_remove();
acpi_scan_lock_release();
acpi_evaluate_hotplug_ost(handle, type, ACPI_OST_SC_SUCCESS, NULL);
put_bridge(context->func.parent);
@@ -937,6 +973,7 @@ static void handle_hotplug_event(acpi_handle handle, u32 type, void *data)
{
struct acpiphp_context *context;
u32 ost_code = ACPI_OST_SC_SUCCESS;
+ acpi_status status;
switch (type) {
case ACPI_NOTIFY_BUS_CHECK:
@@ -972,13 +1009,20 @@ static void handle_hotplug_event(acpi_handle handle, u32 type, void *data)
mutex_lock(&acpiphp_context_lock);
context = acpiphp_get_context(handle);
- if (context && !WARN_ON(context->handle != handle)) {
- get_bridge(context->func.parent);
- acpiphp_put_context(context);
- acpi_hotplug_execute(hotplug_event_work, context, type);
+ if (!context || WARN_ON(context->handle != handle)
+ || context->func.parent->is_going_away)
+ goto err_out;
+
+ get_bridge(context->func.parent);
+ acpiphp_put_context(context);
+ status = acpi_hotplug_execute(hotplug_event_work, context, type);
+ if (ACPI_SUCCESS(status)) {
mutex_unlock(&acpiphp_context_lock);
return;
}
+ put_bridge(context->func.parent);
+
+ err_out:
mutex_unlock(&acpiphp_context_lock);
ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE;
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index 5ee61a470016..c0fe6091566a 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -851,7 +851,9 @@ static struct pinctrl *create_pinctrl(struct device *dev)
kref_init(&p->users);
/* Add the pinctrl handle to the global list */
+ mutex_lock(&pinctrl_list_mutex);
list_add_tail(&p->node, &pinctrl_list);
+ mutex_unlock(&pinctrl_list_mutex);
return p;
}
@@ -1642,8 +1644,10 @@ static void pinctrl_init_device_debugfs(struct pinctrl_dev *pctldev)
device_root, pctldev, &pinctrl_groups_ops);
debugfs_create_file("gpio-ranges", S_IFREG | S_IRUGO,
device_root, pctldev, &pinctrl_gpioranges_ops);
- pinmux_init_device_debugfs(device_root, pctldev);
- pinconf_init_device_debugfs(device_root, pctldev);
+ if (pctldev->desc->pmxops)
+ pinmux_init_device_debugfs(device_root, pctldev);
+ if (pctldev->desc->confops)
+ pinconf_init_device_debugfs(device_root, pctldev);
}
static void pinctrl_remove_device_debugfs(struct pinctrl_dev *pctldev)
diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c
index 38c6f8b9790e..d990e33d8aa7 100644
--- a/drivers/pinctrl/pinctrl-at91.c
+++ b/drivers/pinctrl/pinctrl-at91.c
@@ -1286,22 +1286,22 @@ static int alt_gpio_irq_type(struct irq_data *d, unsigned type)
switch (type) {
case IRQ_TYPE_EDGE_RISING:
- irq_set_handler(d->irq, handle_simple_irq);
+ __irq_set_handler_locked(d->irq, handle_simple_irq);
writel_relaxed(mask, pio + PIO_ESR);
writel_relaxed(mask, pio + PIO_REHLSR);
break;
case IRQ_TYPE_EDGE_FALLING:
- irq_set_handler(d->irq, handle_simple_irq);
+ __irq_set_handler_locked(d->irq, handle_simple_irq);
writel_relaxed(mask, pio + PIO_ESR);
writel_relaxed(mask, pio + PIO_FELLSR);
break;
case IRQ_TYPE_LEVEL_LOW:
- irq_set_handler(d->irq, handle_level_irq);
+ __irq_set_handler_locked(d->irq, handle_level_irq);
writel_relaxed(mask, pio + PIO_LSR);
writel_relaxed(mask, pio + PIO_FELLSR);
break;
case IRQ_TYPE_LEVEL_HIGH:
- irq_set_handler(d->irq, handle_level_irq);
+ __irq_set_handler_locked(d->irq, handle_level_irq);
writel_relaxed(mask, pio + PIO_LSR);
writel_relaxed(mask, pio + PIO_REHLSR);
break;
@@ -1310,7 +1310,7 @@ static int alt_gpio_irq_type(struct irq_data *d, unsigned type)
* disable additional interrupt modes:
* fall back to default behavior
*/
- irq_set_handler(d->irq, handle_simple_irq);
+ __irq_set_handler_locked(d->irq, handle_simple_irq);
writel_relaxed(mask, pio + PIO_AIMDR);
return 0;
case IRQ_TYPE_NONE:
diff --git a/drivers/pinctrl/pinctrl-imx1-core.c b/drivers/pinctrl/pinctrl-imx1-core.c
index 17aecde1b51d..815384b377b5 100644
--- a/drivers/pinctrl/pinctrl-imx1-core.c
+++ b/drivers/pinctrl/pinctrl-imx1-core.c
@@ -45,7 +45,7 @@ struct imx1_pinctrl {
#define MX1_DDIR 0x00
#define MX1_OCR 0x04
#define MX1_ICONFA 0x0c
-#define MX1_ICONFB 0x10
+#define MX1_ICONFB 0x14
#define MX1_GIUS 0x20
#define MX1_GPR 0x38
#define MX1_PUEN 0x40
@@ -97,13 +97,13 @@ static void imx1_write_2bit(struct imx1_pinctrl *ipctl, unsigned int pin_id,
u32 old_val;
u32 new_val;
- dev_dbg(ipctl->dev, "write: register 0x%p offset %d value 0x%x\n",
- reg, offset, value);
-
/* Use the next register if the pin's port pin number is >=16 */
if (pin_id % 32 >= 16)
reg += 0x04;
+ dev_dbg(ipctl->dev, "write: register 0x%p offset %d value 0x%x\n",
+ reg, offset, value);
+
/* Get current state of pins */
old_val = readl(reg);
old_val &= mask;
@@ -139,7 +139,7 @@ static int imx1_read_2bit(struct imx1_pinctrl *ipctl, unsigned int pin_id,
u32 reg_offset)
{
void __iomem *reg = imx1_mem(ipctl, pin_id) + reg_offset;
- int offset = pin_id % 16;
+ int offset = (pin_id % 16) * 2;
/* Use the next register if the pin's port pin number is >=16 */
if (pin_id % 32 >= 16)
diff --git a/drivers/pinctrl/pinctrl-tegra.c b/drivers/pinctrl/pinctrl-tegra.c
index a2e93a2b5ff4..e767355ab0ad 100644
--- a/drivers/pinctrl/pinctrl-tegra.c
+++ b/drivers/pinctrl/pinctrl-tegra.c
@@ -645,7 +645,7 @@ int tegra_pinctrl_probe(struct platform_device *pdev,
GFP_KERNEL);
if (!pmx->regs) {
dev_err(&pdev->dev, "Can't alloc regs pointer\n");
- return -ENODEV;
+ return -ENOMEM;
}
for (i = 0; i < pmx->nbanks; i++) {
diff --git a/drivers/pinctrl/sirf/pinctrl-prima2.c b/drivers/pinctrl/sirf/pinctrl-prima2.c
index 37b42651d76a..dde0285544d6 100644
--- a/drivers/pinctrl/sirf/pinctrl-prima2.c
+++ b/drivers/pinctrl/sirf/pinctrl-prima2.c
@@ -413,7 +413,7 @@ static const struct sirfsoc_padmux ac97_padmux = {
.funcval = 0,
};
-static const unsigned ac97_pins[] = { 33, 34, 35, 36 };
+static const unsigned ac97_pins[] = { 43, 44, 45, 46 };
static const struct sirfsoc_muxmask spi1_muxmask[] = {
{
diff --git a/drivers/pinctrl/vt8500/pinctrl-wmt.c b/drivers/pinctrl/vt8500/pinctrl-wmt.c
index b28d1af9c232..9802b67040cc 100644
--- a/drivers/pinctrl/vt8500/pinctrl-wmt.c
+++ b/drivers/pinctrl/vt8500/pinctrl-wmt.c
@@ -276,7 +276,20 @@ static int wmt_pctl_dt_node_to_map_pull(struct wmt_pinctrl_data *data,
if (!configs)
return -ENOMEM;
- configs[0] = pull;
+ switch (pull) {
+ case 0:
+ configs[0] = PIN_CONFIG_BIAS_DISABLE;
+ break;
+ case 1:
+ configs[0] = PIN_CONFIG_BIAS_PULL_DOWN;
+ break;
+ case 2:
+ configs[0] = PIN_CONFIG_BIAS_PULL_UP;
+ break;
+ default:
+ configs[0] = PIN_CONFIG_BIAS_DISABLE;
+ dev_err(data->dev, "invalid pull state %d - disabling\n", pull);
+ }
map->type = PIN_MAP_TYPE_CONFIGS_PIN;
map->data.configs.group_or_pin = data->groups[group];
diff --git a/drivers/power/ds2782_battery.c b/drivers/power/ds2782_battery.c
index 563174891c90..041f9b638d28 100644
--- a/drivers/power/ds2782_battery.c
+++ b/drivers/power/ds2782_battery.c
@@ -192,7 +192,7 @@ static int ds2786_get_voltage(struct ds278x_info *info, int *voltage_uV)
/*
* Voltage is measured in units of 1.22mV. The voltage is stored as
- * a 10-bit number plus sign, in the upper bits of a 16-bit register
+ * a 12-bit number plus sign, in the upper bits of a 16-bit register
*/
err = ds278x_read_reg16(info, DS278x_REG_VOLT_MSB, &raw);
if (err)
diff --git a/drivers/power/isp1704_charger.c b/drivers/power/isp1704_charger.c
index 80edb7d8cb54..0b4cf9d63291 100644
--- a/drivers/power/isp1704_charger.c
+++ b/drivers/power/isp1704_charger.c
@@ -444,8 +444,6 @@ static int isp1704_charger_probe(struct platform_device *pdev)
ret = PTR_ERR(isp->phy);
goto fail0;
}
- if (!isp->phy)
- goto fail0;
isp->dev = &pdev->dev;
platform_set_drvdata(pdev, isp);
diff --git a/drivers/power/max17040_battery.c b/drivers/power/max17040_battery.c
index c7ff6d67f158..0fbac861080d 100644
--- a/drivers/power/max17040_battery.c
+++ b/drivers/power/max17040_battery.c
@@ -148,7 +148,7 @@ static void max17040_get_online(struct i2c_client *client)
{
struct max17040_chip *chip = i2c_get_clientdata(client);
- if (chip->pdata->battery_online)
+ if (chip->pdata && chip->pdata->battery_online)
chip->online = chip->pdata->battery_online();
else
chip->online = 1;
@@ -158,7 +158,8 @@ static void max17040_get_status(struct i2c_client *client)
{
struct max17040_chip *chip = i2c_get_clientdata(client);
- if (!chip->pdata->charger_online || !chip->pdata->charger_enable) {
+ if (!chip->pdata || !chip->pdata->charger_online
+ || !chip->pdata->charger_enable) {
chip->status = POWER_SUPPLY_STATUS_UNKNOWN;
return;
}
diff --git a/drivers/regulator/ab3100.c b/drivers/regulator/ab3100.c
index 77b46d0b37a6..e10febe9ec34 100644
--- a/drivers/regulator/ab3100.c
+++ b/drivers/regulator/ab3100.c
@@ -498,7 +498,7 @@ static int ab3100_regulator_register(struct platform_device *pdev,
struct ab3100_platform_data *plfdata,
struct regulator_init_data *init_data,
struct device_node *np,
- int id)
+ unsigned long id)
{
struct regulator_desc *desc;
struct ab3100_regulator *reg;
@@ -646,7 +646,7 @@ ab3100_regulator_of_probe(struct platform_device *pdev, struct device_node *np)
err = ab3100_regulator_register(
pdev, NULL, ab3100_regulator_matches[i].init_data,
ab3100_regulator_matches[i].of_node,
- (int) ab3100_regulator_matches[i].driver_data);
+ (unsigned long)ab3100_regulator_matches[i].driver_data);
if (err) {
ab3100_regulators_remove(pdev);
return err;
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index b38a6b669e8c..16a309e5c024 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -1272,6 +1272,8 @@ static struct regulator_dev *regulator_dev_lookup(struct device *dev,
if (r->dev.parent &&
node == r->dev.of_node)
return r;
+ *ret = -EPROBE_DEFER;
+ return NULL;
} else {
/*
* If we couldn't even get the node then it's
@@ -1312,7 +1314,7 @@ static struct regulator *_regulator_get(struct device *dev, const char *id,
struct regulator_dev *rdev;
struct regulator *regulator = ERR_PTR(-EPROBE_DEFER);
const char *devname = NULL;
- int ret = -EPROBE_DEFER;
+ int ret;
if (id == NULL) {
pr_err("get() with no identifier\n");
@@ -1322,6 +1324,11 @@ static struct regulator *_regulator_get(struct device *dev, const char *id,
if (dev)
devname = dev_name(dev);
+ if (have_full_constraints())
+ ret = -ENODEV;
+ else
+ ret = -EPROBE_DEFER;
+
mutex_lock(&regulator_list_mutex);
rdev = regulator_dev_lookup(dev, id, &ret);
diff --git a/drivers/regulator/da9055-regulator.c b/drivers/regulator/da9055-regulator.c
index 7f340206d329..b14ebdad5dd2 100644
--- a/drivers/regulator/da9055-regulator.c
+++ b/drivers/regulator/da9055-regulator.c
@@ -576,7 +576,9 @@ static int da9055_regulator_probe(struct platform_device *pdev)
/* Only LDO 5 and 6 has got the over current interrupt */
if (pdev->id == DA9055_ID_LDO5 || pdev->id == DA9055_ID_LDO6) {
irq = platform_get_irq_byname(pdev, "REGULATOR");
- irq = regmap_irq_get_virq(da9055->irq_data, irq);
+ if (irq < 0)
+ return irq;
+
ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
da9055_ldo5_6_oc_irq,
IRQF_TRIGGER_HIGH |
diff --git a/drivers/regulator/max14577.c b/drivers/regulator/max14577.c
index b1078ba3f393..186df8785a91 100644
--- a/drivers/regulator/max14577.c
+++ b/drivers/regulator/max14577.c
@@ -168,10 +168,11 @@ static int max14577_regulator_dt_parse_pdata(struct platform_device *pdev)
MAX14577_REG_MAX);
if (ret < 0) {
dev_err(&pdev->dev, "Error parsing regulator init data: %d\n", ret);
- return ret;
}
- return 0;
+ of_node_put(np);
+
+ return ret;
}
static inline struct regulator_init_data *match_init_data(int index)
diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c
index d9e557990577..cd0b9e35a56d 100644
--- a/drivers/regulator/s2mps11.c
+++ b/drivers/regulator/s2mps11.c
@@ -441,6 +441,7 @@ common_reg:
for (i = 0; i < S2MPS11_REGULATOR_MAX; i++) {
if (!reg_np) {
config.init_data = pdata->regulators[i].initdata;
+ config.of_node = pdata->regulators[i].reg_node;
} else {
config.init_data = rdata[i].init_data;
config.of_node = rdata[i].of_node;
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index 88e35d85d205..8ee88c4ebd83 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -342,8 +342,9 @@ static int cio_check_config(struct subchannel *sch, struct schib *schib)
*/
int cio_commit_config(struct subchannel *sch)
{
- struct schib schib;
int ccode, retry, ret = 0;
+ struct schib schib;
+ struct irb irb;
if (stsch_err(sch->schid, &schib) || !css_sch_is_valid(&schib))
return -ENODEV;
@@ -367,7 +368,10 @@ int cio_commit_config(struct subchannel *sch)
ret = -EAGAIN;
break;
case 1: /* status pending */
- return -EBUSY;
+ ret = -EBUSY;
+ if (tsch(sch->schid, &irb))
+ return ret;
+ break;
case 2: /* busy */
udelay(100); /* allow for recovery */
ret = -EBUSY;
@@ -403,7 +407,6 @@ EXPORT_SYMBOL_GPL(cio_update_schib);
*/
int cio_enable_subchannel(struct subchannel *sch, u32 intparm)
{
- int retry;
int ret;
CIO_TRACE_EVENT(2, "ensch");
@@ -418,20 +421,14 @@ int cio_enable_subchannel(struct subchannel *sch, u32 intparm)
sch->config.isc = sch->isc;
sch->config.intparm = intparm;
- for (retry = 0; retry < 3; retry++) {
+ ret = cio_commit_config(sch);
+ if (ret == -EIO) {
+ /*
+ * Got a program check in msch. Try without
+ * the concurrent sense bit the next time.
+ */
+ sch->config.csense = 0;
ret = cio_commit_config(sch);
- if (ret == -EIO) {
- /*
- * Got a program check in msch. Try without
- * the concurrent sense bit the next time.
- */
- sch->config.csense = 0;
- } else if (ret == -EBUSY) {
- struct irb irb;
- if (tsch(sch->schid, &irb) != 0)
- break;
- } else
- break;
}
CIO_HEX_EVENT(2, &ret, sizeof(ret));
return ret;
@@ -444,7 +441,6 @@ EXPORT_SYMBOL_GPL(cio_enable_subchannel);
*/
int cio_disable_subchannel(struct subchannel *sch)
{
- int retry;
int ret;
CIO_TRACE_EVENT(2, "dissch");
@@ -456,16 +452,8 @@ int cio_disable_subchannel(struct subchannel *sch)
return -ENODEV;
sch->config.ena = 0;
+ ret = cio_commit_config(sch);
- for (retry = 0; retry < 3; retry++) {
- ret = cio_commit_config(sch);
- if (ret == -EBUSY) {
- struct irb irb;
- if (tsch(sch->schid, &irb) != 0)
- break;
- } else
- break;
- }
CIO_HEX_EVENT(2, &ret, sizeof(ret));
return ret;
}
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index 8acaae18bd11..a563e4c00590 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -359,14 +359,12 @@ static inline int multicast_outbound(struct qdio_q *q)
#define need_siga_sync_out_after_pci(q) \
(unlikely(q->irq_ptr->siga_flag.sync_out_after_pci))
-#define for_each_input_queue(irq_ptr, q, i) \
- for (i = 0, q = irq_ptr->input_qs[0]; \
- i < irq_ptr->nr_input_qs; \
- q = irq_ptr->input_qs[++i])
-#define for_each_output_queue(irq_ptr, q, i) \
- for (i = 0, q = irq_ptr->output_qs[0]; \
- i < irq_ptr->nr_output_qs; \
- q = irq_ptr->output_qs[++i])
+#define for_each_input_queue(irq_ptr, q, i) \
+ for (i = 0; i < irq_ptr->nr_input_qs && \
+ ({ q = irq_ptr->input_qs[i]; 1; }); i++)
+#define for_each_output_queue(irq_ptr, q, i) \
+ for (i = 0; i < irq_ptr->nr_output_qs && \
+ ({ q = irq_ptr->output_qs[i]; 1; }); i++)
#define prev_buf(bufnr) \
((bufnr + QDIO_MAX_BUFFERS_MASK) & QDIO_MAX_BUFFERS_MASK)
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index c883a085c059..77466c4faabb 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -996,7 +996,7 @@ static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
}
}
- if (!pci_out_supported(q))
+ if (!(irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED))
return;
for_each_output_queue(irq_ptr, q, i) {
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index ba9310bc9acb..581ee2a8856b 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -376,10 +376,10 @@ config SPI_PXA2XX_PCI
def_tristate SPI_PXA2XX && PCI
config SPI_RSPI
- tristate "Renesas RSPI controller"
+ tristate "Renesas RSPI/QSPI controller"
depends on (SUPERH && SH_DMAE_BASE) || ARCH_SHMOBILE
help
- SPI driver for Renesas RSPI blocks.
+ SPI driver for Renesas RSPI and QSPI blocks.
config SPI_S3C24XX
tristate "Samsung S3C24XX series SPI"
diff --git a/drivers/spi/spi-nuc900.c b/drivers/spi/spi-nuc900.c
index 50406306bc20..bae97ffec4b9 100644
--- a/drivers/spi/spi-nuc900.c
+++ b/drivers/spi/spi-nuc900.c
@@ -361,6 +361,8 @@ static int nuc900_spi_probe(struct platform_device *pdev)
init_completion(&hw->done);
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+ if (hw->pdata->lsb)
+ master->mode_bits |= SPI_LSB_FIRST;
master->num_chipselect = hw->pdata->num_cs;
master->bus_num = hw->pdata->bus_num;
hw->bitbang.master = hw->master;
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 23756b0f9036..d0b28bba38be 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -755,9 +755,7 @@ static void spi_pump_messages(struct kthread_work *work)
ret = master->transfer_one_message(master, master->cur_msg);
if (ret) {
dev_err(&master->dev,
- "failed to transfer one message from queue: %d\n", ret);
- master->cur_msg->status = ret;
- spi_finalize_current_message(master);
+ "failed to transfer one message from queue\n");
return;
}
}
diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
index 23948f167012..713a97226787 100644
--- a/drivers/staging/android/ashmem.c
+++ b/drivers/staging/android/ashmem.c
@@ -295,21 +295,29 @@ static ssize_t ashmem_read(struct file *file, char __user *buf,
/* If size is not set, or set to 0, always return EOF. */
if (asma->size == 0)
- goto out;
+ goto out_unlock;
if (!asma->file) {
ret = -EBADF;
- goto out;
+ goto out_unlock;
}
- ret = asma->file->f_op->read(asma->file, buf, len, pos);
- if (ret < 0)
- goto out;
+ mutex_unlock(&ashmem_mutex);
- /** Update backing file pos, since f_ops->read() doesn't */
- asma->file->f_pos = *pos;
+ /*
+ * asma and asma->file are used outside the lock here. We assume
+ * once asma->file is set it will never be changed, and will not
+ * be destroyed until all references to the file are dropped and
+ * ashmem_release is called.
+ */
+ ret = asma->file->f_op->read(asma->file, buf, len, pos);
+ if (ret >= 0) {
+ /** Update backing file pos, since f_ops->read() doesn't */
+ asma->file->f_pos = *pos;
+ }
+ return ret;
-out:
+out_unlock:
mutex_unlock(&ashmem_mutex);
return ret;
}
@@ -498,6 +506,7 @@ out:
static int set_name(struct ashmem_area *asma, void __user *name)
{
+ int len;
int ret = 0;
char local_name[ASHMEM_NAME_LEN];
@@ -510,21 +519,19 @@ static int set_name(struct ashmem_area *asma, void __user *name)
* variable that does not need protection and later copy the local
* variable to the structure member with lock held.
*/
- if (copy_from_user(local_name, name, ASHMEM_NAME_LEN))
- return -EFAULT;
-
+ len = strncpy_from_user(local_name, name, ASHMEM_NAME_LEN);
+ if (len < 0)
+ return len;
+ if (len == ASHMEM_NAME_LEN)
+ local_name[ASHMEM_NAME_LEN - 1] = '\0';
mutex_lock(&ashmem_mutex);
/* cannot change an existing mapping's name */
- if (unlikely(asma->file)) {
+ if (unlikely(asma->file))
ret = -EINVAL;
- goto out;
- }
- memcpy(asma->name + ASHMEM_NAME_PREFIX_LEN,
- local_name, ASHMEM_NAME_LEN);
- asma->name[ASHMEM_FULL_NAME_LEN-1] = '\0';
-out:
- mutex_unlock(&ashmem_mutex);
+ else
+ strcpy(asma->name + ASHMEM_NAME_PREFIX_LEN, local_name);
+ mutex_unlock(&ashmem_mutex);
return ret;
}
diff --git a/drivers/staging/android/ion/compat_ion.c b/drivers/staging/android/ion/compat_ion.c
index af6cd370b30f..ee3a7380e53b 100644
--- a/drivers/staging/android/ion/compat_ion.c
+++ b/drivers/staging/android/ion/compat_ion.c
@@ -35,9 +35,14 @@ struct compat_ion_custom_data {
compat_ulong_t arg;
};
+struct compat_ion_handle_data {
+ compat_int_t handle;
+};
+
#define COMPAT_ION_IOC_ALLOC _IOWR(ION_IOC_MAGIC, 0, \
struct compat_ion_allocation_data)
-#define COMPAT_ION_IOC_FREE _IOWR(ION_IOC_MAGIC, 1, struct ion_handle_data)
+#define COMPAT_ION_IOC_FREE _IOWR(ION_IOC_MAGIC, 1, \
+ struct compat_ion_handle_data)
#define COMPAT_ION_IOC_CUSTOM _IOWR(ION_IOC_MAGIC, 6, \
struct compat_ion_custom_data)
@@ -64,6 +69,19 @@ static int compat_get_ion_allocation_data(
return err;
}
+static int compat_get_ion_handle_data(
+ struct compat_ion_handle_data __user *data32,
+ struct ion_handle_data __user *data)
+{
+ compat_int_t i;
+ int err;
+
+ err = get_user(i, &data32->handle);
+ err |= put_user(i, &data->handle);
+
+ return err;
+}
+
static int compat_put_ion_allocation_data(
struct compat_ion_allocation_data __user *data32,
struct ion_allocation_data __user *data)
@@ -132,8 +150,8 @@ long compat_ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
}
case COMPAT_ION_IOC_FREE:
{
- struct compat_ion_allocation_data __user *data32;
- struct ion_allocation_data __user *data;
+ struct compat_ion_handle_data __user *data32;
+ struct ion_handle_data __user *data;
int err;
data32 = compat_ptr(arg);
@@ -141,7 +159,7 @@ long compat_ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
if (data == NULL)
return -EFAULT;
- err = compat_get_ion_allocation_data(data32, data);
+ err = compat_get_ion_handle_data(data32, data);
if (err)
return err;
diff --git a/drivers/staging/android/ion/ion_dummy_driver.c b/drivers/staging/android/ion/ion_dummy_driver.c
index 55b2002753f2..01cdc8aee898 100644
--- a/drivers/staging/android/ion/ion_dummy_driver.c
+++ b/drivers/staging/android/ion/ion_dummy_driver.c
@@ -17,9 +17,11 @@
#include <linux/err.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
+#include <linux/init.h>
#include <linux/bootmem.h>
#include <linux/memblock.h>
#include <linux/sizes.h>
+#include <linux/io.h>
#include "ion.h"
#include "ion_priv.h"
@@ -57,7 +59,7 @@ struct ion_platform_heap dummy_heaps[] = {
};
struct ion_platform_data dummy_ion_pdata = {
- .nr = 4,
+ .nr = ARRAY_SIZE(dummy_heaps),
.heaps = dummy_heaps,
};
@@ -69,7 +71,7 @@ static int __init ion_dummy_init(void)
heaps = kzalloc(sizeof(struct ion_heap *) * dummy_ion_pdata.nr,
GFP_KERNEL);
if (!heaps)
- return PTR_ERR(heaps);
+ return -ENOMEM;
/* Allocate a dummy carveout heap */
@@ -128,6 +130,7 @@ err:
}
return err;
}
+device_initcall(ion_dummy_init);
static void __exit ion_dummy_exit(void)
{
@@ -152,7 +155,4 @@ static void __exit ion_dummy_exit(void)
return;
}
-
-module_init(ion_dummy_init);
-module_exit(ion_dummy_exit);
-
+__exitcall(ion_dummy_exit);
diff --git a/drivers/staging/android/ion/ion_heap.c b/drivers/staging/android/ion/ion_heap.c
index 296c74f98dc0..37e64d51394c 100644
--- a/drivers/staging/android/ion/ion_heap.c
+++ b/drivers/staging/android/ion/ion_heap.c
@@ -243,12 +243,12 @@ int ion_heap_init_deferred_free(struct ion_heap *heap)
init_waitqueue_head(&heap->waitqueue);
heap->task = kthread_run(ion_heap_deferred_free, heap,
"%s", heap->name);
- sched_setscheduler(heap->task, SCHED_IDLE, &param);
if (IS_ERR(heap->task)) {
pr_err("%s: creating thread for deferred free failed\n",
__func__);
return PTR_RET(heap->task);
}
+ sched_setscheduler(heap->task, SCHED_IDLE, &param);
return 0;
}
diff --git a/drivers/staging/android/ion/ion_priv.h b/drivers/staging/android/ion/ion_priv.h
index d98673981cc4..fc2e4fccf69d 100644
--- a/drivers/staging/android/ion/ion_priv.h
+++ b/drivers/staging/android/ion/ion_priv.h
@@ -17,6 +17,7 @@
#ifndef _ION_PRIV_H
#define _ION_PRIV_H
+#include <linux/device.h>
#include <linux/dma-direction.h>
#include <linux/kref.h>
#include <linux/mm_types.h>
diff --git a/drivers/staging/android/ion/ion_system_heap.c b/drivers/staging/android/ion/ion_system_heap.c
index 7f0729130d65..9849f3963e75 100644
--- a/drivers/staging/android/ion/ion_system_heap.c
+++ b/drivers/staging/android/ion/ion_system_heap.c
@@ -124,6 +124,7 @@ static struct page_info *alloc_largest_available(struct ion_system_heap *heap,
info->page = page;
info->order = orders[i];
+ INIT_LIST_HEAD(&info->list);
return info;
}
kfree(info);
@@ -145,12 +146,15 @@ static int ion_system_heap_allocate(struct ion_heap *heap,
struct list_head pages;
struct page_info *info, *tmp_info;
int i = 0;
- long size_remaining = PAGE_ALIGN(size);
+ unsigned long size_remaining = PAGE_ALIGN(size);
unsigned int max_order = orders[0];
if (align > PAGE_SIZE)
return -EINVAL;
+ if (size / PAGE_SIZE > totalram_pages / 2)
+ return -ENOMEM;
+
INIT_LIST_HEAD(&pages);
while (size_remaining > 0) {
info = alloc_largest_available(sys_heap, buffer, size_remaining,
diff --git a/drivers/staging/android/sw_sync.h b/drivers/staging/android/sw_sync.h
index 585040be5f18..5aaf71d6974b 100644
--- a/drivers/staging/android/sw_sync.h
+++ b/drivers/staging/android/sw_sync.h
@@ -35,10 +35,27 @@ struct sw_sync_pt {
u32 value;
};
+#if IS_ENABLED(CONFIG_SW_SYNC)
struct sw_sync_timeline *sw_sync_timeline_create(const char *name);
void sw_sync_timeline_inc(struct sw_sync_timeline *obj, u32 inc);
struct sync_pt *sw_sync_pt_create(struct sw_sync_timeline *obj, u32 value);
+#else
+static inline struct sw_sync_timeline *sw_sync_timeline_create(const char *name)
+{
+ return NULL;
+}
+
+static inline void sw_sync_timeline_inc(struct sw_sync_timeline *obj, u32 inc)
+{
+}
+
+static inline struct sync_pt *sw_sync_pt_create(struct sw_sync_timeline *obj,
+ u32 value)
+{
+ return NULL;
+}
+#endif /* IS_ENABLED(CONFIG_SW_SYNC) */
#endif /* __KERNEL __ */
diff --git a/drivers/staging/android/sync.c b/drivers/staging/android/sync.c
index 38e5d3b5ed9b..3d05f662110b 100644
--- a/drivers/staging/android/sync.c
+++ b/drivers/staging/android/sync.c
@@ -79,27 +79,27 @@ static void sync_timeline_free(struct kref *kref)
container_of(kref, struct sync_timeline, kref);
unsigned long flags;
- if (obj->ops->release_obj)
- obj->ops->release_obj(obj);
-
spin_lock_irqsave(&sync_timeline_list_lock, flags);
list_del(&obj->sync_timeline_list);
spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
+ if (obj->ops->release_obj)
+ obj->ops->release_obj(obj);
+
kfree(obj);
}
void sync_timeline_destroy(struct sync_timeline *obj)
{
obj->destroyed = true;
+ smp_wmb();
/*
- * If this is not the last reference, signal any children
- * that their parent is going away.
+ * signal any children that their parent is going away.
*/
+ sync_timeline_signal(obj);
- if (!kref_put(&obj->kref, sync_timeline_free))
- sync_timeline_signal(obj);
+ kref_put(&obj->kref, sync_timeline_free);
}
EXPORT_SYMBOL(sync_timeline_destroy);
diff --git a/drivers/staging/comedi/drivers.c b/drivers/staging/comedi/drivers.c
index 246080316c90..5b15033a94bf 100644
--- a/drivers/staging/comedi/drivers.c
+++ b/drivers/staging/comedi/drivers.c
@@ -616,8 +616,6 @@ int comedi_auto_config(struct device *hardware_device,
ret = driver->auto_attach(dev, context);
if (ret >= 0)
ret = comedi_device_postconfig(dev);
- if (ret < 0)
- comedi_device_detach(dev);
mutex_unlock(&dev->mutex);
if (ret < 0) {
diff --git a/drivers/staging/comedi/drivers/adv_pci1710.c b/drivers/staging/comedi/drivers/adv_pci1710.c
index 593676cf706a..d9ad2c0fdda2 100644
--- a/drivers/staging/comedi/drivers/adv_pci1710.c
+++ b/drivers/staging/comedi/drivers/adv_pci1710.c
@@ -494,6 +494,7 @@ static int pci171x_insn_write_ao(struct comedi_device *dev,
struct comedi_insn *insn, unsigned int *data)
{
struct pci1710_private *devpriv = dev->private;
+ unsigned int val;
int n, chan, range, ofs;
chan = CR_CHAN(insn->chanspec);
@@ -509,11 +510,14 @@ static int pci171x_insn_write_ao(struct comedi_device *dev,
outw(devpriv->da_ranges, dev->iobase + PCI171x_DAREF);
ofs = PCI171x_DA1;
}
+ val = devpriv->ao_data[chan];
- for (n = 0; n < insn->n; n++)
- outw(data[n], dev->iobase + ofs);
+ for (n = 0; n < insn->n; n++) {
+ val = data[n];
+ outw(val, dev->iobase + ofs);
+ }
- devpriv->ao_data[chan] = data[n];
+ devpriv->ao_data[chan] = val;
return n;
@@ -679,6 +683,7 @@ static int pci1720_insn_write_ao(struct comedi_device *dev,
struct comedi_insn *insn, unsigned int *data)
{
struct pci1710_private *devpriv = dev->private;
+ unsigned int val;
int n, rangereg, chan;
chan = CR_CHAN(insn->chanspec);
@@ -688,13 +693,15 @@ static int pci1720_insn_write_ao(struct comedi_device *dev,
outb(rangereg, dev->iobase + PCI1720_RANGE);
devpriv->da_ranges = rangereg;
}
+ val = devpriv->ao_data[chan];
for (n = 0; n < insn->n; n++) {
- outw(data[n], dev->iobase + PCI1720_DA0 + (chan << 1));
+ val = data[n];
+ outw(val, dev->iobase + PCI1720_DA0 + (chan << 1));
outb(0, dev->iobase + PCI1720_SYNCOUT); /* update outputs */
}
- devpriv->ao_data[chan] = data[n];
+ devpriv->ao_data[chan] = val;
return n;
}
diff --git a/drivers/staging/comedi/drivers/usbduxsigma.c b/drivers/staging/comedi/drivers/usbduxsigma.c
index 3beeb1254152..88c60b6020c4 100644
--- a/drivers/staging/comedi/drivers/usbduxsigma.c
+++ b/drivers/staging/comedi/drivers/usbduxsigma.c
@@ -48,6 +48,7 @@
#include <linux/usb.h>
#include <linux/fcntl.h>
#include <linux/compiler.h>
+#include <asm/unaligned.h>
#include "comedi_fc.h"
#include "../comedidev.h"
@@ -792,7 +793,8 @@ static int usbduxsigma_ai_insn_read(struct comedi_device *dev,
}
/* 32 bits big endian from the A/D converter */
- val = be32_to_cpu(*((uint32_t *)((devpriv->insn_buf) + 1)));
+ val = be32_to_cpu(get_unaligned((uint32_t
+ *)(devpriv->insn_buf + 1)));
val &= 0x00ffffff; /* strip status byte */
val ^= 0x00800000; /* convert to unsigned */
@@ -1357,7 +1359,7 @@ static int usbduxsigma_getstatusinfo(struct comedi_device *dev, int chan)
return ret;
/* 32 bits big endian from the A/D converter */
- val = be32_to_cpu(*((uint32_t *)((devpriv->insn_buf)+1)));
+ val = be32_to_cpu(get_unaligned((uint32_t *)(devpriv->insn_buf + 1)));
val &= 0x00ffffff; /* strip status byte */
val ^= 0x00800000; /* convert to unsigned */
diff --git a/drivers/staging/dgrp/dgrp_net_ops.c b/drivers/staging/dgrp/dgrp_net_ops.c
index 1f61b89eca44..33ac7fb88cbd 100644
--- a/drivers/staging/dgrp/dgrp_net_ops.c
+++ b/drivers/staging/dgrp/dgrp_net_ops.c
@@ -2232,177 +2232,6 @@ done:
return rtn;
}
-/*
- * Common Packet Handling code
- */
-
-static void handle_data_in_packet(struct nd_struct *nd, struct ch_struct *ch,
- long dlen, long plen, int n1, u8 *dbuf)
-{
- char *error;
- long n;
- long remain;
- u8 *buf;
- u8 *b;
-
- remain = nd->nd_remain;
- nd->nd_tx_work = 1;
-
- /*
- * Otherwise data should appear only when we are
- * in the CS_READY state.
- */
-
- if (ch->ch_state < CS_READY) {
- error = "Data received before RWIN established";
- nd->nd_remain = 0;
- nd->nd_state = NS_SEND_ERROR;
- nd->nd_error = error;
- }
-
- /*
- * Assure that the data received is within the
- * allowable window.
- */
-
- n = (ch->ch_s_rwin - ch->ch_s_rin) & 0xffff;
-
- if (dlen > n) {
- error = "Receive data overrun";
- nd->nd_remain = 0;
- nd->nd_state = NS_SEND_ERROR;
- nd->nd_error = error;
- }
-
- /*
- * If we received 3 or less characters,
- * assume it is a human typing, and set RTIME
- * to 10 milliseconds.
- *
- * If we receive 10 or more characters,
- * assume its not a human typing, and set RTIME
- * to 100 milliseconds.
- */
-
- if (ch->ch_edelay != DGRP_RTIME) {
- if (ch->ch_rtime != ch->ch_edelay) {
- ch->ch_rtime = ch->ch_edelay;
- ch->ch_flag |= CH_PARAM;
- }
- } else if (dlen <= 3) {
- if (ch->ch_rtime != 10) {
- ch->ch_rtime = 10;
- ch->ch_flag |= CH_PARAM;
- }
- } else {
- if (ch->ch_rtime != DGRP_RTIME) {
- ch->ch_rtime = DGRP_RTIME;
- ch->ch_flag |= CH_PARAM;
- }
- }
-
- /*
- * If a portion of the packet is outside the
- * buffer, shorten the effective length of the
- * data packet to be the amount of data received.
- */
-
- if (remain < plen)
- dlen -= plen - remain;
-
- /*
- * Detect if receive flush is now complete.
- */
-
- if ((ch->ch_flag & CH_RX_FLUSH) != 0 &&
- ((ch->ch_flush_seq - nd->nd_seq_out) & SEQ_MASK) >=
- ((nd->nd_seq_in - nd->nd_seq_out) & SEQ_MASK)) {
- ch->ch_flag &= ~CH_RX_FLUSH;
- }
-
- /*
- * If we are ready to receive, move the data into
- * the receive buffer.
- */
-
- ch->ch_s_rin = (ch->ch_s_rin + dlen) & 0xffff;
-
- if (ch->ch_state == CS_READY &&
- (ch->ch_tun.un_open_count != 0) &&
- (ch->ch_tun.un_flag & UN_CLOSING) == 0 &&
- (ch->ch_cflag & CF_CREAD) != 0 &&
- (ch->ch_flag & (CH_BAUD0 | CH_RX_FLUSH)) == 0 &&
- (ch->ch_send & RR_RX_FLUSH) == 0) {
-
- if (ch->ch_rin + dlen >= RBUF_MAX) {
- n = RBUF_MAX - ch->ch_rin;
-
- memcpy(ch->ch_rbuf + ch->ch_rin, dbuf, n);
-
- ch->ch_rin = 0;
- dbuf += n;
- dlen -= n;
- }
-
- memcpy(ch->ch_rbuf + ch->ch_rin, dbuf, dlen);
-
- ch->ch_rin += dlen;
-
-
- /*
- * If we are not in fastcook mode, or
- * if there is a fastcook thread
- * waiting for data, send the data to
- * the line discipline.
- */
-
- if ((ch->ch_flag & CH_FAST_READ) == 0 ||
- ch->ch_inwait != 0) {
- dgrp_input(ch);
- }
-
- /*
- * If there is a read thread waiting
- * in select, and we are in fastcook
- * mode, wake him up.
- */
-
- if (waitqueue_active(&ch->ch_tun.un_tty->read_wait) &&
- (ch->ch_flag & CH_FAST_READ) != 0)
- wake_up_interruptible(&ch->ch_tun.un_tty->read_wait);
-
- /*
- * Wake any thread waiting in the
- * fastcook loop.
- */
-
- if ((ch->ch_flag & CH_INPUT) != 0) {
- ch->ch_flag &= ~CH_INPUT;
- wake_up_interruptible(&ch->ch_flag_wait);
- }
- }
-
- /*
- * Fabricate and insert a data packet header to
- * preced the remaining data when it comes in.
- */
-
- if (remain < plen) {
- dlen = plen - remain;
- b = buf;
-
- b[0] = 0x90 + n1;
- put_unaligned_be16(dlen, b + 1);
-
- remain = 3;
- if (remain > 0 && b != buf)
- memcpy(buf, b, remain);
-
- nd->nd_remain = remain;
- return;
- }
-}
-
/**
* dgrp_receive() -- decode data packets received from the remote PortServer.
* @nd: pointer to a node structure
@@ -2477,8 +2306,7 @@ static void dgrp_receive(struct nd_struct *nd)
plen = dlen + 1;
dbuf = b + 1;
- handle_data_in_packet(nd, ch, dlen, plen, n1, dbuf);
- break;
+ goto data;
/*
* Process 2-byte header data packet.
@@ -2492,8 +2320,7 @@ static void dgrp_receive(struct nd_struct *nd)
plen = dlen + 2;
dbuf = b + 2;
- handle_data_in_packet(nd, ch, dlen, plen, n1, dbuf);
- break;
+ goto data;
/*
* Process 3-byte header data packet.
@@ -2508,6 +2335,159 @@ static void dgrp_receive(struct nd_struct *nd)
dbuf = b + 3;
+ /*
+ * Common packet handling code.
+ */
+
+data:
+ nd->nd_tx_work = 1;
+
+ /*
+ * Otherwise data should appear only when we are
+ * in the CS_READY state.
+ */
+
+ if (ch->ch_state < CS_READY) {
+ error = "Data received before RWIN established";
+ goto prot_error;
+ }
+
+ /*
+ * Assure that the data received is within the
+ * allowable window.
+ */
+
+ n = (ch->ch_s_rwin - ch->ch_s_rin) & 0xffff;
+
+ if (dlen > n) {
+ error = "Receive data overrun";
+ goto prot_error;
+ }
+
+ /*
+ * If we received 3 or less characters,
+ * assume it is a human typing, and set RTIME
+ * to 10 milliseconds.
+ *
+ * If we receive 10 or more characters,
+ * assume its not a human typing, and set RTIME
+ * to 100 milliseconds.
+ */
+
+ if (ch->ch_edelay != DGRP_RTIME) {
+ if (ch->ch_rtime != ch->ch_edelay) {
+ ch->ch_rtime = ch->ch_edelay;
+ ch->ch_flag |= CH_PARAM;
+ }
+ } else if (dlen <= 3) {
+ if (ch->ch_rtime != 10) {
+ ch->ch_rtime = 10;
+ ch->ch_flag |= CH_PARAM;
+ }
+ } else {
+ if (ch->ch_rtime != DGRP_RTIME) {
+ ch->ch_rtime = DGRP_RTIME;
+ ch->ch_flag |= CH_PARAM;
+ }
+ }
+
+ /*
+ * If a portion of the packet is outside the
+ * buffer, shorten the effective length of the
+ * data packet to be the amount of data received.
+ */
+
+ if (remain < plen)
+ dlen -= plen - remain;
+
+ /*
+ * Detect if receive flush is now complete.
+ */
+
+ if ((ch->ch_flag & CH_RX_FLUSH) != 0 &&
+ ((ch->ch_flush_seq - nd->nd_seq_out) & SEQ_MASK) >=
+ ((nd->nd_seq_in - nd->nd_seq_out) & SEQ_MASK)) {
+ ch->ch_flag &= ~CH_RX_FLUSH;
+ }
+
+ /*
+ * If we are ready to receive, move the data into
+ * the receive buffer.
+ */
+
+ ch->ch_s_rin = (ch->ch_s_rin + dlen) & 0xffff;
+
+ if (ch->ch_state == CS_READY &&
+ (ch->ch_tun.un_open_count != 0) &&
+ (ch->ch_tun.un_flag & UN_CLOSING) == 0 &&
+ (ch->ch_cflag & CF_CREAD) != 0 &&
+ (ch->ch_flag & (CH_BAUD0 | CH_RX_FLUSH)) == 0 &&
+ (ch->ch_send & RR_RX_FLUSH) == 0) {
+
+ if (ch->ch_rin + dlen >= RBUF_MAX) {
+ n = RBUF_MAX - ch->ch_rin;
+
+ memcpy(ch->ch_rbuf + ch->ch_rin, dbuf, n);
+
+ ch->ch_rin = 0;
+ dbuf += n;
+ dlen -= n;
+ }
+
+ memcpy(ch->ch_rbuf + ch->ch_rin, dbuf, dlen);
+
+ ch->ch_rin += dlen;
+
+
+ /*
+ * If we are not in fastcook mode, or
+ * if there is a fastcook thread
+ * waiting for data, send the data to
+ * the line discipline.
+ */
+
+ if ((ch->ch_flag & CH_FAST_READ) == 0 ||
+ ch->ch_inwait != 0) {
+ dgrp_input(ch);
+ }
+
+ /*
+ * If there is a read thread waiting
+ * in select, and we are in fastcook
+ * mode, wake him up.
+ */
+
+ if (waitqueue_active(&ch->ch_tun.un_tty->read_wait) &&
+ (ch->ch_flag & CH_FAST_READ) != 0)
+ wake_up_interruptible(&ch->ch_tun.un_tty->read_wait);
+
+ /*
+ * Wake any thread waiting in the
+ * fastcook loop.
+ */
+
+ if ((ch->ch_flag & CH_INPUT) != 0) {
+ ch->ch_flag &= ~CH_INPUT;
+
+ wake_up_interruptible(&ch->ch_flag_wait);
+ }
+ }
+
+ /*
+ * Fabricate and insert a data packet header to
+ * preced the remaining data when it comes in.
+ */
+
+ if (remain < plen) {
+ dlen = plen - remain;
+ b = buf;
+
+ b[0] = 0x90 + n1;
+ put_unaligned_be16(dlen, b + 1);
+
+ remain = 3;
+ goto done;
+ }
break;
/*
diff --git a/drivers/staging/gdm72xx/gdm_usb.c b/drivers/staging/gdm72xx/gdm_usb.c
index f8788bf0a7d3..cdeffe75496b 100644
--- a/drivers/staging/gdm72xx/gdm_usb.c
+++ b/drivers/staging/gdm72xx/gdm_usb.c
@@ -635,11 +635,14 @@ static int gdm_usb_probe(struct usb_interface *intf,
#endif /* CONFIG_WIMAX_GDM72XX_USB_PM */
ret = register_wimax_device(phy_dev, &intf->dev);
+ if (ret)
+ release_usb(udev);
out:
if (ret) {
kfree(phy_dev);
kfree(udev);
+ usb_put_dev(usbdev);
} else {
usb_set_intfdata(intf, phy_dev);
}
diff --git a/drivers/staging/iio/Documentation/iio_utils.h b/drivers/staging/iio/Documentation/iio_utils.h
index 35154d60faf6..c9fedb79e3a2 100644
--- a/drivers/staging/iio/Documentation/iio_utils.h
+++ b/drivers/staging/iio/Documentation/iio_utils.h
@@ -77,7 +77,6 @@ struct iio_channel_info {
uint64_t mask;
unsigned be;
unsigned is_signed;
- unsigned enabled;
unsigned location;
};
@@ -335,6 +334,7 @@ inline int build_channel_array(const char *device_dir,
while (ent = readdir(dp), ent != NULL) {
if (strcmp(ent->d_name + strlen(ent->d_name) - strlen("_en"),
"_en") == 0) {
+ int current_enabled = 0;
current = &(*ci_array)[count++];
ret = asprintf(&filename,
"%s/%s", scan_el_dir, ent->d_name);
@@ -350,10 +350,10 @@ inline int build_channel_array(const char *device_dir,
ret = -errno;
goto error_cleanup_array;
}
- fscanf(sysfsfp, "%u", &current->enabled);
+ fscanf(sysfsfp, "%u", &current_enabled);
fclose(sysfsfp);
- if (!current->enabled) {
+ if (!current_enabled) {
free(filename);
count--;
continue;
diff --git a/drivers/staging/iio/adc/ad799x_core.c b/drivers/staging/iio/adc/ad799x_core.c
index 5ea36410f716..5708ffc62aec 100644
--- a/drivers/staging/iio/adc/ad799x_core.c
+++ b/drivers/staging/iio/adc/ad799x_core.c
@@ -393,7 +393,7 @@ static const struct iio_event_spec ad799x_events[] = {
}, {
.type = IIO_EV_TYPE_THRESH,
.dir = IIO_EV_DIR_FALLING,
- .mask_separate = BIT(IIO_EV_INFO_VALUE),
+ .mask_separate = BIT(IIO_EV_INFO_VALUE) |
BIT(IIO_EV_INFO_ENABLE),
}, {
.type = IIO_EV_TYPE_THRESH,
@@ -409,7 +409,13 @@ static const struct iio_event_spec ad799x_events[] = {
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
.scan_index = (_index), \
- .scan_type = IIO_ST('u', _realbits, 16, 12 - (_realbits)), \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = (_realbits), \
+ .storagebits = 16, \
+ .shift = 12 - (_realbits), \
+ .endianness = IIO_BE, \
+ }, \
.event_spec = _ev_spec, \
.num_event_specs = _num_ev_spec, \
}
@@ -588,7 +594,8 @@ static int ad799x_probe(struct i2c_client *client,
return 0;
error_free_irq:
- free_irq(client->irq, indio_dev);
+ if (client->irq > 0)
+ free_irq(client->irq, indio_dev);
error_cleanup_ring:
ad799x_ring_cleanup(indio_dev);
error_disable_reg:
diff --git a/drivers/staging/iio/adc/mxs-lradc.c b/drivers/staging/iio/adc/mxs-lradc.c
index df71669bb60e..7fc66a6a6e36 100644
--- a/drivers/staging/iio/adc/mxs-lradc.c
+++ b/drivers/staging/iio/adc/mxs-lradc.c
@@ -1035,8 +1035,6 @@ SHOW_SCALE_AVAILABLE_ATTR(4);
SHOW_SCALE_AVAILABLE_ATTR(5);
SHOW_SCALE_AVAILABLE_ATTR(6);
SHOW_SCALE_AVAILABLE_ATTR(7);
-SHOW_SCALE_AVAILABLE_ATTR(8);
-SHOW_SCALE_AVAILABLE_ATTR(9);
SHOW_SCALE_AVAILABLE_ATTR(10);
SHOW_SCALE_AVAILABLE_ATTR(11);
SHOW_SCALE_AVAILABLE_ATTR(12);
@@ -1053,8 +1051,6 @@ static struct attribute *mxs_lradc_attributes[] = {
&iio_dev_attr_in_voltage5_scale_available.dev_attr.attr,
&iio_dev_attr_in_voltage6_scale_available.dev_attr.attr,
&iio_dev_attr_in_voltage7_scale_available.dev_attr.attr,
- &iio_dev_attr_in_voltage8_scale_available.dev_attr.attr,
- &iio_dev_attr_in_voltage9_scale_available.dev_attr.attr,
&iio_dev_attr_in_voltage10_scale_available.dev_attr.attr,
&iio_dev_attr_in_voltage11_scale_available.dev_attr.attr,
&iio_dev_attr_in_voltage12_scale_available.dev_attr.attr,
@@ -1613,7 +1609,7 @@ static int mxs_lradc_probe(struct platform_device *pdev)
* of the array.
*/
scale_uv = ((u64)lradc->vref_mv[i] * 100000000) >>
- (iio->channels[i].scan_type.realbits - s);
+ (LRADC_RESOLUTION - s);
lradc->scale_avail[i][s].nano =
do_div(scale_uv, 100000000) * 10;
lradc->scale_avail[i][s].integer = scale_uv;
diff --git a/drivers/staging/iio/impedance-analyzer/ad5933.c b/drivers/staging/iio/impedance-analyzer/ad5933.c
index 0a4298b744e6..2b96665da8a2 100644
--- a/drivers/staging/iio/impedance-analyzer/ad5933.c
+++ b/drivers/staging/iio/impedance-analyzer/ad5933.c
@@ -629,7 +629,7 @@ static int ad5933_register_ring_funcs_and_init(struct iio_dev *indio_dev)
struct iio_buffer *buffer;
buffer = iio_kfifo_allocate(indio_dev);
- if (buffer)
+ if (!buffer)
return -ENOMEM;
iio_device_attach_buffer(indio_dev, buffer);
diff --git a/drivers/staging/imx-drm/imx-drm-core.c b/drivers/staging/imx-drm/imx-drm-core.c
index 09ef5fb8bae6..236ed66f116a 100644
--- a/drivers/staging/imx-drm/imx-drm-core.c
+++ b/drivers/staging/imx-drm/imx-drm-core.c
@@ -88,9 +88,9 @@ static int imx_drm_driver_unload(struct drm_device *drm)
imx_drm_device_put();
- drm_vblank_cleanup(imxdrm->drm);
- drm_kms_helper_poll_fini(imxdrm->drm);
- drm_mode_config_cleanup(imxdrm->drm);
+ drm_vblank_cleanup(drm);
+ drm_kms_helper_poll_fini(drm);
+ drm_mode_config_cleanup(drm);
return 0;
}
@@ -142,19 +142,19 @@ EXPORT_SYMBOL_GPL(imx_drm_crtc_panel_format);
int imx_drm_crtc_vblank_get(struct imx_drm_crtc *imx_drm_crtc)
{
- return drm_vblank_get(imx_drm_crtc->imxdrm->drm, imx_drm_crtc->pipe);
+ return drm_vblank_get(imx_drm_crtc->crtc->dev, imx_drm_crtc->pipe);
}
EXPORT_SYMBOL_GPL(imx_drm_crtc_vblank_get);
void imx_drm_crtc_vblank_put(struct imx_drm_crtc *imx_drm_crtc)
{
- drm_vblank_put(imx_drm_crtc->imxdrm->drm, imx_drm_crtc->pipe);
+ drm_vblank_put(imx_drm_crtc->crtc->dev, imx_drm_crtc->pipe);
}
EXPORT_SYMBOL_GPL(imx_drm_crtc_vblank_put);
void imx_drm_handle_vblank(struct imx_drm_crtc *imx_drm_crtc)
{
- drm_handle_vblank(imx_drm_crtc->imxdrm->drm, imx_drm_crtc->pipe);
+ drm_handle_vblank(imx_drm_crtc->crtc->dev, imx_drm_crtc->pipe);
}
EXPORT_SYMBOL_GPL(imx_drm_handle_vblank);
@@ -370,29 +370,6 @@ static void imx_drm_connector_unregister(
}
/*
- * register a crtc to the drm core
- */
-static int imx_drm_crtc_register(struct imx_drm_crtc *imx_drm_crtc)
-{
- struct imx_drm_device *imxdrm = __imx_drm_device();
- int ret;
-
- ret = drm_mode_crtc_set_gamma_size(imx_drm_crtc->crtc, 256);
- if (ret)
- return ret;
-
- drm_crtc_helper_add(imx_drm_crtc->crtc,
- imx_drm_crtc->imx_drm_helper_funcs.crtc_helper_funcs);
-
- drm_crtc_init(imxdrm->drm, imx_drm_crtc->crtc,
- imx_drm_crtc->imx_drm_helper_funcs.crtc_funcs);
-
- drm_mode_group_reinit(imxdrm->drm);
-
- return 0;
-}
-
-/*
* Called by the CRTC driver when all CRTCs are registered. This
* puts all the pieces together and initializes the driver.
* Once this is called no more CRTCs can be registered since
@@ -424,15 +401,15 @@ static int imx_drm_driver_load(struct drm_device *drm, unsigned long flags)
mutex_lock(&imxdrm->mutex);
- drm_kms_helper_poll_init(imxdrm->drm);
+ drm_kms_helper_poll_init(drm);
/* setup the grouping for the legacy output */
- ret = drm_mode_group_init_legacy_group(imxdrm->drm,
- &imxdrm->drm->primary->mode_group);
+ ret = drm_mode_group_init_legacy_group(drm,
+ &drm->primary->mode_group);
if (ret)
goto err_kms;
- ret = drm_vblank_init(imxdrm->drm, MAX_CRTC);
+ ret = drm_vblank_init(drm, MAX_CRTC);
if (ret)
goto err_kms;
@@ -441,7 +418,7 @@ static int imx_drm_driver_load(struct drm_device *drm, unsigned long flags)
* by drm timer once a current process gives up ownership of
* vblank event.(after drm_vblank_put function is called)
*/
- imxdrm->drm->vblank_disable_allowed = true;
+ drm->vblank_disable_allowed = true;
if (!imx_drm_device_get()) {
ret = -EINVAL;
@@ -536,10 +513,18 @@ int imx_drm_add_crtc(struct drm_crtc *crtc,
*new_crtc = imx_drm_crtc;
- ret = imx_drm_crtc_register(imx_drm_crtc);
+ ret = drm_mode_crtc_set_gamma_size(imx_drm_crtc->crtc, 256);
if (ret)
goto err_register;
+ drm_crtc_helper_add(crtc,
+ imx_drm_crtc->imx_drm_helper_funcs.crtc_helper_funcs);
+
+ drm_crtc_init(imxdrm->drm, crtc,
+ imx_drm_crtc->imx_drm_helper_funcs.crtc_funcs);
+
+ drm_mode_group_reinit(imxdrm->drm);
+
imx_drm_update_possible_crtcs();
mutex_unlock(&imxdrm->mutex);
diff --git a/drivers/staging/imx-drm/imx-hdmi.c b/drivers/staging/imx-drm/imx-hdmi.c
index f3a1f5e2e492..62ce0e86f14b 100644
--- a/drivers/staging/imx-drm/imx-hdmi.c
+++ b/drivers/staging/imx-drm/imx-hdmi.c
@@ -16,6 +16,7 @@
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/clk.h>
+#include <linux/hdmi.h>
#include <linux/regmap.h>
#include <linux/mfd/syscon.h>
#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
@@ -52,11 +53,6 @@ enum hdmi_datamap {
YCbCr422_12B = 0x12,
};
-enum hdmi_colorimetry {
- ITU601,
- ITU709,
-};
-
enum imx_hdmi_devtype {
IMX6Q_HDMI,
IMX6DL_HDMI,
@@ -489,12 +485,12 @@ static void imx_hdmi_update_csc_coeffs(struct imx_hdmi *hdmi)
if (is_color_space_conversion(hdmi)) {
if (hdmi->hdmi_data.enc_out_format == RGB) {
- if (hdmi->hdmi_data.colorimetry == ITU601)
+ if (hdmi->hdmi_data.colorimetry == HDMI_COLORIMETRY_ITU_601)
csc_coeff = &csc_coeff_rgb_out_eitu601;
else
csc_coeff = &csc_coeff_rgb_out_eitu709;
} else if (hdmi->hdmi_data.enc_in_format == RGB) {
- if (hdmi->hdmi_data.colorimetry == ITU601)
+ if (hdmi->hdmi_data.colorimetry == HDMI_COLORIMETRY_ITU_601)
csc_coeff = &csc_coeff_rgb_in_eitu601;
else
csc_coeff = &csc_coeff_rgb_in_eitu709;
@@ -1140,16 +1136,16 @@ static void hdmi_config_AVI(struct imx_hdmi *hdmi)
/* Set up colorimetry */
if (hdmi->hdmi_data.enc_out_format == XVYCC444) {
colorimetry = HDMI_FC_AVICONF1_COLORIMETRY_EXTENDED_INFO;
- if (hdmi->hdmi_data.colorimetry == ITU601)
+ if (hdmi->hdmi_data.colorimetry == HDMI_COLORIMETRY_ITU_601)
ext_colorimetry =
HDMI_FC_AVICONF2_EXT_COLORIMETRY_XVYCC601;
- else /* hdmi->hdmi_data.colorimetry == ITU709 */
+ else /*hdmi->hdmi_data.colorimetry == HDMI_COLORIMETRY_ITU_709*/
ext_colorimetry =
HDMI_FC_AVICONF2_EXT_COLORIMETRY_XVYCC709;
} else if (hdmi->hdmi_data.enc_out_format != RGB) {
- if (hdmi->hdmi_data.colorimetry == ITU601)
+ if (hdmi->hdmi_data.colorimetry == HDMI_COLORIMETRY_ITU_601)
colorimetry = HDMI_FC_AVICONF1_COLORIMETRY_SMPTE;
- else /* hdmi->hdmi_data.colorimetry == ITU709 */
+ else /*hdmi->hdmi_data.colorimetry == HDMI_COLORIMETRY_ITU_709*/
colorimetry = HDMI_FC_AVICONF1_COLORIMETRY_ITUR;
ext_colorimetry = HDMI_FC_AVICONF2_EXT_COLORIMETRY_XVYCC601;
} else { /* Carries no data */
@@ -1379,9 +1375,9 @@ static int imx_hdmi_setup(struct imx_hdmi *hdmi, struct drm_display_mode *mode)
(hdmi->vic == 21) || (hdmi->vic == 22) ||
(hdmi->vic == 2) || (hdmi->vic == 3) ||
(hdmi->vic == 17) || (hdmi->vic == 18))
- hdmi->hdmi_data.colorimetry = ITU601;
+ hdmi->hdmi_data.colorimetry = HDMI_COLORIMETRY_ITU_601;
else
- hdmi->hdmi_data.colorimetry = ITU709;
+ hdmi->hdmi_data.colorimetry = HDMI_COLORIMETRY_ITU_709;
if ((hdmi->vic == 10) || (hdmi->vic == 11) ||
(hdmi->vic == 12) || (hdmi->vic == 13) ||
diff --git a/drivers/staging/lustre/TODO b/drivers/staging/lustre/TODO
index 22742d6d62a8..0a2b6cb3775e 100644
--- a/drivers/staging/lustre/TODO
+++ b/drivers/staging/lustre/TODO
@@ -9,5 +9,6 @@
* Other minor misc cleanups...
Please send any patches to Greg Kroah-Hartman <greg@kroah.com>, Andreas Dilger
-<andreas.dilger@intel.com> and Peng Tao <tao.peng@emc.com>. CCing
-hpdd-discuss <hpdd-discuss@lists.01.org> would be great too.
+<andreas.dilger@intel.com>, Oleg Drokin <oleg.drokin@intel.com> and
+Peng Tao <tao.peng@emc.com>. CCing hpdd-discuss <hpdd-discuss@lists.01.org>
+would be great too.
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_kernelcomm.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_kernelcomm.h
index 596a15fc8996..037ae8a6d531 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_kernelcomm.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_kernelcomm.h
@@ -61,6 +61,8 @@ struct kuc_hdr {
__u16 kuc_msglen; /* Including header */
} __attribute__((aligned(sizeof(__u64))));
+#define KUC_CHANGELOG_MSG_MAXSIZE (sizeof(struct kuc_hdr)+CR_MAXSIZE)
+
#define KUC_MAGIC 0x191C /*Lustre9etLinC */
#define KUC_FL_BLOCK 0x01 /* Wait for send */
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h
index d0d942ced01a..dddccca120c9 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h
@@ -120,7 +120,7 @@ do { \
do { \
LASSERT(!in_interrupt() || \
((size) <= LIBCFS_VMALLOC_SIZE && \
- ((mask) & GFP_ATOMIC)) != 0); \
+ ((mask) & __GFP_WAIT) == 0)); \
} while (0)
#define LIBCFS_ALLOC_POST(ptr, size) \
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
index 93648632ba26..6f58ead20393 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
@@ -529,7 +529,7 @@ kiblnd_kvaddr_to_page (unsigned long vaddr)
{
struct page *page;
- if (is_vmalloc_addr(vaddr)) {
+ if (is_vmalloc_addr((void *)vaddr)) {
page = vmalloc_to_page ((void *)vaddr);
LASSERT (page != NULL);
return page;
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
index 68a4f52ec998..b7b53b579c85 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
@@ -924,7 +924,7 @@ ksocknal_launch_packet (lnet_ni_t *ni, ksock_tx_t *tx, lnet_process_id_t id)
int
ksocknal_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
{
- int mpflag = 0;
+ int mpflag = 1;
int type = lntmsg->msg_type;
lnet_process_id_t target = lntmsg->msg_target;
unsigned int payload_niov = lntmsg->msg_niov;
@@ -993,8 +993,9 @@ ksocknal_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
/* The first fragment will be set later in pro_pack */
rc = ksocknal_launch_packet(ni, tx, target);
- if (lntmsg->msg_vmflush)
+ if (!mpflag)
cfs_memory_pressure_restore(mpflag);
+
if (rc == 0)
return (0);
diff --git a/drivers/staging/lustre/lustre/include/lustre/lustre_user.h b/drivers/staging/lustre/lustre/include/lustre/lustre_user.h
index 6b6c0240e824..7893d83e131f 100644
--- a/drivers/staging/lustre/lustre/include/lustre/lustre_user.h
+++ b/drivers/staging/lustre/lustre/include/lustre/lustre_user.h
@@ -760,7 +760,8 @@ static inline void hsm_set_cl_error(int *flags, int error)
*flags |= (error << CLF_HSM_ERR_L);
}
-#define CR_MAXSIZE cfs_size_round(2*NAME_MAX + 1 + sizeof(struct changelog_rec))
+#define CR_MAXSIZE cfs_size_round(2*NAME_MAX + 1 + \
+ sizeof(struct changelog_ext_rec))
struct changelog_rec {
__u16 cr_namelen;
diff --git a/drivers/staging/lustre/lustre/llite/dir.c b/drivers/staging/lustre/lustre/llite/dir.c
index 22d0acc95bc5..52b7731bcc38 100644
--- a/drivers/staging/lustre/lustre/llite/dir.c
+++ b/drivers/staging/lustre/lustre/llite/dir.c
@@ -1086,7 +1086,7 @@ static int quotactl_ioctl(struct ll_sb_info *sbi, struct if_quotactl *qctl)
break;
case Q_GETQUOTA:
if (((type == USRQUOTA &&
- uid_eq(current_euid(), make_kuid(&init_user_ns, id))) ||
+ !uid_eq(current_euid(), make_kuid(&init_user_ns, id))) ||
(type == GRPQUOTA &&
!in_egroup_p(make_kgid(&init_user_ns, id)))) &&
(!cfs_capable(CFS_CAP_SYS_ADMIN) ||
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_request.c b/drivers/staging/lustre/lustre/mdc/mdc_request.c
index d1ad91c34ddc..83013927e131 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_request.c
+++ b/drivers/staging/lustre/lustre/mdc/mdc_request.c
@@ -1430,7 +1430,7 @@ static struct kuc_hdr *changelog_kuc_hdr(char *buf, int len, int flags)
{
struct kuc_hdr *lh = (struct kuc_hdr *)buf;
- LASSERT(len <= CR_MAXSIZE);
+ LASSERT(len <= KUC_CHANGELOG_MSG_MAXSIZE);
lh->kuc_magic = KUC_MAGIC;
lh->kuc_transport = KUC_TRANSPORT_CHANGELOG;
@@ -1503,7 +1503,7 @@ static int mdc_changelog_send_thread(void *csdata)
CDEBUG(D_CHANGELOG, "changelog to fp=%p start "LPU64"\n",
cs->cs_fp, cs->cs_startrec);
- OBD_ALLOC(cs->cs_buf, CR_MAXSIZE);
+ OBD_ALLOC(cs->cs_buf, KUC_CHANGELOG_MSG_MAXSIZE);
if (cs->cs_buf == NULL)
GOTO(out, rc = -ENOMEM);
@@ -1540,7 +1540,7 @@ out:
if (ctxt)
llog_ctxt_put(ctxt);
if (cs->cs_buf)
- OBD_FREE(cs->cs_buf, CR_MAXSIZE);
+ OBD_FREE(cs->cs_buf, KUC_CHANGELOG_MSG_MAXSIZE);
OBD_FREE_PTR(cs);
return rc;
}
diff --git a/drivers/staging/media/go7007/go7007-loader.c b/drivers/staging/media/go7007/go7007-loader.c
index 10bb41c2fb6d..eecb1f2a5574 100644
--- a/drivers/staging/media/go7007/go7007-loader.c
+++ b/drivers/staging/media/go7007/go7007-loader.c
@@ -59,7 +59,7 @@ static int go7007_loader_probe(struct usb_interface *interface,
if (usbdev->descriptor.bNumConfigurations != 1) {
dev_err(&interface->dev, "can't handle multiple config\n");
- return -ENODEV;
+ goto failed2;
}
vendor = le16_to_cpu(usbdev->descriptor.idVendor);
@@ -108,6 +108,7 @@ static int go7007_loader_probe(struct usb_interface *interface,
return 0;
failed2:
+ usb_put_dev(usbdev);
dev_err(&interface->dev, "probe failed\n");
return -ENODEV;
}
@@ -115,6 +116,7 @@ failed2:
static void go7007_loader_disconnect(struct usb_interface *interface)
{
dev_info(&interface->dev, "disconnect\n");
+ usb_put_dev(interface_to_usbdev(interface));
usb_set_intfdata(interface, NULL);
}
diff --git a/drivers/staging/netlogic/xlr_net.c b/drivers/staging/netlogic/xlr_net.c
index eedffed17e39..d8ea25486a33 100644
--- a/drivers/staging/netlogic/xlr_net.c
+++ b/drivers/staging/netlogic/xlr_net.c
@@ -892,6 +892,11 @@ static int xlr_setup_mdio(struct xlr_net_priv *priv,
priv->mii_bus->write = xlr_mii_write;
priv->mii_bus->parent = &pdev->dev;
priv->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
+ if (priv->mii_bus->irq == NULL) {
+ pr_err("irq alloc failed\n");
+ mdiobus_free(priv->mii_bus);
+ return -ENOMEM;
+ }
priv->mii_bus->irq[priv->phy_addr] = priv->ndev->irq;
/* Scan only the enabled address */
diff --git a/drivers/staging/octeon-usb/octeon-hcd.c b/drivers/staging/octeon-usb/octeon-hcd.c
index 47e0a91238a1..5a001d9b4252 100644
--- a/drivers/staging/octeon-usb/octeon-hcd.c
+++ b/drivers/staging/octeon-usb/octeon-hcd.c
@@ -275,13 +275,6 @@ enum cvmx_usb_pipe_flags {
*/
#define MAX_TRANSFER_PACKETS ((1<<10)-1)
-enum {
- USB_CLOCK_TYPE_REF_12,
- USB_CLOCK_TYPE_REF_24,
- USB_CLOCK_TYPE_REF_48,
- USB_CLOCK_TYPE_CRYSTAL_12,
-};
-
/**
* Logical transactions may take numerous low level
* transactions, especially when splits are concerned. This
@@ -471,19 +464,6 @@ struct octeon_hcd {
/* Returns the IO address to push/pop stuff data from the FIFOs */
#define USB_FIFO_ADDRESS(channel, usb_index) (CVMX_USBCX_GOTGCTL(usb_index) + ((channel)+1)*0x1000)
-static int octeon_usb_get_clock_type(void)
-{
- switch (cvmx_sysinfo_get()->board_type) {
- case CVMX_BOARD_TYPE_BBGW_REF:
- case CVMX_BOARD_TYPE_LANAI2_A:
- case CVMX_BOARD_TYPE_LANAI2_U:
- case CVMX_BOARD_TYPE_LANAI2_G:
- case CVMX_BOARD_TYPE_UBNT_E100:
- return USB_CLOCK_TYPE_CRYSTAL_12;
- }
- return USB_CLOCK_TYPE_REF_48;
-}
-
/**
* Read a USB 32bit CSR. It performs the necessary address swizzle
* for 32bit CSRs and logs the value in a readable format if
@@ -582,37 +562,6 @@ static inline int __cvmx_usb_get_data_pid(struct cvmx_usb_pipe *pipe)
return 0; /* Data0 */
}
-
-/**
- * Return the number of USB ports supported by this Octeon
- * chip. If the chip doesn't support USB, or is not supported
- * by this API, a zero will be returned. Most Octeon chips
- * support one usb port, but some support two ports.
- * cvmx_usb_initialize() must be called on independent
- * struct cvmx_usb_state.
- *
- * Returns: Number of port, zero if usb isn't supported
- */
-static int cvmx_usb_get_num_ports(void)
-{
- int arch_ports = 0;
-
- if (OCTEON_IS_MODEL(OCTEON_CN56XX))
- arch_ports = 1;
- else if (OCTEON_IS_MODEL(OCTEON_CN52XX))
- arch_ports = 2;
- else if (OCTEON_IS_MODEL(OCTEON_CN50XX))
- arch_ports = 1;
- else if (OCTEON_IS_MODEL(OCTEON_CN31XX))
- arch_ports = 1;
- else if (OCTEON_IS_MODEL(OCTEON_CN30XX))
- arch_ports = 1;
- else
- arch_ports = 0;
-
- return arch_ports;
-}
-
/**
* Initialize a USB port for use. This must be called before any
* other access to the Octeon USB port is made. The port starts
@@ -628,41 +577,16 @@ static int cvmx_usb_get_num_ports(void)
* Returns: 0 or a negative error code.
*/
static int cvmx_usb_initialize(struct cvmx_usb_state *usb,
- int usb_port_number)
+ int usb_port_number,
+ enum cvmx_usb_initialize_flags flags)
{
union cvmx_usbnx_clk_ctl usbn_clk_ctl;
union cvmx_usbnx_usbp_ctl_status usbn_usbp_ctl_status;
- enum cvmx_usb_initialize_flags flags = 0;
int i;
/* At first allow 0-1 for the usb port number */
if ((usb_port_number < 0) || (usb_port_number > 1))
return -EINVAL;
- /* For all chips except 52XX there is only one port */
- if (!OCTEON_IS_MODEL(OCTEON_CN52XX) && (usb_port_number > 0))
- return -EINVAL;
- /* Try to determine clock type automatically */
- if (octeon_usb_get_clock_type() == USB_CLOCK_TYPE_CRYSTAL_12) {
- /* Only 12 MHZ crystals are supported */
- flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_XI;
- } else {
- flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_GND;
-
- switch (octeon_usb_get_clock_type()) {
- case USB_CLOCK_TYPE_REF_12:
- flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_12MHZ;
- break;
- case USB_CLOCK_TYPE_REF_24:
- flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_24MHZ;
- break;
- case USB_CLOCK_TYPE_REF_48:
- flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_48MHZ;
- break;
- default:
- return -EINVAL;
- break;
- }
- }
memset(usb, 0, sizeof(*usb));
usb->init_flags = flags;
@@ -3431,7 +3355,6 @@ static int octeon_usb_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
return 0;
}
-
static const struct hc_driver octeon_hc_driver = {
.description = "Octeon USB",
.product_desc = "Octeon Host Controller",
@@ -3448,15 +3371,74 @@ static const struct hc_driver octeon_hc_driver = {
.hub_control = octeon_usb_hub_control,
};
-
-static int octeon_usb_driver_probe(struct device *dev)
+static int octeon_usb_probe(struct platform_device *pdev)
{
int status;
- int usb_num = to_platform_device(dev)->id;
- int irq = platform_get_irq(to_platform_device(dev), 0);
+ int initialize_flags;
+ int usb_num;
+ struct resource *res_mem;
+ struct device_node *usbn_node;
+ int irq = platform_get_irq(pdev, 0);
+ struct device *dev = &pdev->dev;
struct octeon_hcd *priv;
struct usb_hcd *hcd;
unsigned long flags;
+ u32 clock_rate = 48000000;
+ bool is_crystal_clock = false;
+ const char *clock_type;
+ int i;
+
+ if (dev->of_node == NULL) {
+ dev_err(dev, "Error: empty of_node\n");
+ return -ENXIO;
+ }
+ usbn_node = dev->of_node->parent;
+
+ i = of_property_read_u32(usbn_node,
+ "refclk-frequency", &clock_rate);
+ if (i) {
+ dev_err(dev, "No USBN \"refclk-frequency\"\n");
+ return -ENXIO;
+ }
+ switch (clock_rate) {
+ case 12000000:
+ initialize_flags = CVMX_USB_INITIALIZE_FLAGS_CLOCK_12MHZ;
+ break;
+ case 24000000:
+ initialize_flags = CVMX_USB_INITIALIZE_FLAGS_CLOCK_24MHZ;
+ break;
+ case 48000000:
+ initialize_flags = CVMX_USB_INITIALIZE_FLAGS_CLOCK_48MHZ;
+ break;
+ default:
+ dev_err(dev, "Illebal USBN \"refclk-frequency\" %u\n", clock_rate);
+ return -ENXIO;
+
+ }
+
+ i = of_property_read_string(usbn_node,
+ "refclk-type", &clock_type);
+
+ if (!i && strcmp("crystal", clock_type) == 0)
+ is_crystal_clock = true;
+
+ if (is_crystal_clock)
+ initialize_flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_XI;
+ else
+ initialize_flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_GND;
+
+ res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res_mem == NULL) {
+ dev_err(dev, "found no memory resource\n");
+ return -ENXIO;
+ }
+ usb_num = (res_mem->start >> 44) & 1;
+
+ if (irq < 0) {
+ /* Defective device tree, but we know how to fix it. */
+ irq_hw_number_t hwirq = usb_num ? (1 << 6) + 17 : 56;
+ irq = irq_create_mapping(NULL, hwirq);
+ }
/*
* Set the DMA mask to 64bits so we get buffers already translated for
@@ -3465,6 +3447,26 @@ static int octeon_usb_driver_probe(struct device *dev)
dev->coherent_dma_mask = ~0;
dev->dma_mask = &dev->coherent_dma_mask;
+ /*
+ * Only cn52XX and cn56XX have DWC_OTG USB hardware and the
+ * IOB priority registers. Under heavy network load USB
+ * hardware can be starved by the IOB causing a crash. Give
+ * it a priority boost if it has been waiting more than 400
+ * cycles to avoid this situation.
+ *
+ * Testing indicates that a cnt_val of 8192 is not sufficient,
+ * but no failures are seen with 4096. We choose a value of
+ * 400 to give a safety factor of 10.
+ */
+ if (OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)) {
+ union cvmx_iob_n2c_l2c_pri_cnt pri_cnt;
+
+ pri_cnt.u64 = 0;
+ pri_cnt.s.cnt_enb = 1;
+ pri_cnt.s.cnt_val = 400;
+ cvmx_write_csr(CVMX_IOB_N2C_L2C_PRI_CNT, pri_cnt.u64);
+ }
+
hcd = usb_create_hcd(&octeon_hc_driver, dev, dev_name(dev));
if (!hcd) {
dev_dbg(dev, "Failed to allocate memory for HCD\n");
@@ -3478,7 +3480,7 @@ static int octeon_usb_driver_probe(struct device *dev)
tasklet_init(&priv->dequeue_tasklet, octeon_usb_urb_dequeue_work, (unsigned long)priv);
INIT_LIST_HEAD(&priv->dequeue_list);
- status = cvmx_usb_initialize(&priv->usb, usb_num);
+ status = cvmx_usb_initialize(&priv->usb, usb_num, initialize_flags);
if (status) {
dev_dbg(dev, "USB initialization failed with %d\n", status);
kfree(hcd);
@@ -3492,7 +3494,7 @@ static int octeon_usb_driver_probe(struct device *dev)
cvmx_usb_poll(&priv->usb);
spin_unlock_irqrestore(&priv->lock, flags);
- status = usb_add_hcd(hcd, irq, IRQF_SHARED);
+ status = usb_add_hcd(hcd, irq, 0);
if (status) {
dev_dbg(dev, "USB add HCD failed with %d\n", status);
kfree(hcd);
@@ -3500,14 +3502,15 @@ static int octeon_usb_driver_probe(struct device *dev)
}
device_wakeup_enable(hcd->self.controller);
- dev_dbg(dev, "Registered HCD for port %d on irq %d\n", usb_num, irq);
+ dev_info(dev, "Registered HCD for port %d on irq %d\n", usb_num, irq);
return 0;
}
-static int octeon_usb_driver_remove(struct device *dev)
+static int octeon_usb_remove(struct platform_device *pdev)
{
int status;
+ struct device *dev = &pdev->dev;
struct usb_hcd *hcd = dev_get_drvdata(dev);
struct octeon_hcd *priv = hcd_to_octeon(hcd);
unsigned long flags;
@@ -3525,85 +3528,41 @@ static int octeon_usb_driver_remove(struct device *dev)
return 0;
}
-static struct device_driver octeon_usb_driver = {
- .name = "OcteonUSB",
- .bus = &platform_bus_type,
- .probe = octeon_usb_driver_probe,
- .remove = octeon_usb_driver_remove,
+static struct of_device_id octeon_usb_match[] = {
+ {
+ .compatible = "cavium,octeon-5750-usbc",
+ },
+ {},
};
+static struct platform_driver octeon_usb_driver = {
+ .driver = {
+ .name = "OcteonUSB",
+ .owner = THIS_MODULE,
+ .of_match_table = octeon_usb_match,
+ },
+ .probe = octeon_usb_probe,
+ .remove = octeon_usb_remove,
+};
-#define MAX_USB_PORTS 10
-static struct platform_device *pdev_glob[MAX_USB_PORTS];
-static int octeon_usb_registered;
-static int __init octeon_usb_module_init(void)
+static int __init octeon_usb_driver_init(void)
{
- int num_devices = cvmx_usb_get_num_ports();
- int device;
-
- if (usb_disabled() || num_devices == 0)
- return -ENODEV;
-
- if (driver_register(&octeon_usb_driver))
- return -ENOMEM;
-
- octeon_usb_registered = 1;
-
- /*
- * Only cn52XX and cn56XX have DWC_OTG USB hardware and the
- * IOB priority registers. Under heavy network load USB
- * hardware can be starved by the IOB causing a crash. Give
- * it a priority boost if it has been waiting more than 400
- * cycles to avoid this situation.
- *
- * Testing indicates that a cnt_val of 8192 is not sufficient,
- * but no failures are seen with 4096. We choose a value of
- * 400 to give a safety factor of 10.
- */
- if (OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)) {
- union cvmx_iob_n2c_l2c_pri_cnt pri_cnt;
-
- pri_cnt.u64 = 0;
- pri_cnt.s.cnt_enb = 1;
- pri_cnt.s.cnt_val = 400;
- cvmx_write_csr(CVMX_IOB_N2C_L2C_PRI_CNT, pri_cnt.u64);
- }
-
- for (device = 0; device < num_devices; device++) {
- struct resource irq_resource;
- struct platform_device *pdev;
- memset(&irq_resource, 0, sizeof(irq_resource));
- irq_resource.start = (device == 0) ? OCTEON_IRQ_USB0 : OCTEON_IRQ_USB1;
- irq_resource.end = irq_resource.start;
- irq_resource.flags = IORESOURCE_IRQ;
- pdev = platform_device_register_simple((char *)octeon_usb_driver. name, device, &irq_resource, 1);
- if (IS_ERR(pdev)) {
- driver_unregister(&octeon_usb_driver);
- octeon_usb_registered = 0;
- return PTR_ERR(pdev);
- }
- if (device < MAX_USB_PORTS)
- pdev_glob[device] = pdev;
+ if (usb_disabled())
+ return 0;
- }
- return 0;
+ return platform_driver_register(&octeon_usb_driver);
}
+module_init(octeon_usb_driver_init);
-static void __exit octeon_usb_module_cleanup(void)
+static void __exit octeon_usb_driver_exit(void)
{
- int i;
+ if (usb_disabled())
+ return;
- for (i = 0; i < MAX_USB_PORTS; i++)
- if (pdev_glob[i]) {
- platform_device_unregister(pdev_glob[i]);
- pdev_glob[i] = NULL;
- }
- if (octeon_usb_registered)
- driver_unregister(&octeon_usb_driver);
+ platform_driver_unregister(&octeon_usb_driver);
}
+module_exit(octeon_usb_driver_exit);
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Cavium Networks <support@caviumnetworks.com>");
-MODULE_DESCRIPTION("Cavium Networks Octeon USB Host driver.");
-module_init(octeon_usb_module_init);
-module_exit(octeon_usb_module_cleanup);
+MODULE_AUTHOR("Cavium, Inc. <support@cavium.com>");
+MODULE_DESCRIPTION("Cavium Inc. OCTEON USB Host driver.");
diff --git a/drivers/staging/ozwpan/ozproto.c b/drivers/staging/ozwpan/ozproto.c
index cb060364dfe7..5d965cf06d59 100644
--- a/drivers/staging/ozwpan/ozproto.c
+++ b/drivers/staging/ozwpan/ozproto.c
@@ -668,8 +668,8 @@ void oz_binding_add(const char *net_dev)
if (binding) {
binding->ptype.type = __constant_htons(OZ_ETHERTYPE);
binding->ptype.func = oz_pkt_recv;
- memcpy(binding->name, net_dev, OZ_MAX_BINDING_LEN);
if (net_dev && *net_dev) {
+ memcpy(binding->name, net_dev, OZ_MAX_BINDING_LEN);
oz_dbg(ON, "Adding binding: %s\n", net_dev);
binding->ptype.dev =
dev_get_by_name(&init_net, net_dev);
@@ -680,6 +680,7 @@ void oz_binding_add(const char *net_dev)
}
} else {
oz_dbg(ON, "Binding to all netcards\n");
+ memset(binding->name, 0, OZ_MAX_BINDING_LEN);
binding->ptype.dev = NULL;
}
if (binding) {
diff --git a/drivers/staging/rtl8188eu/core/rtw_wlan_util.c b/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
index 153ec61493ab..96df62f95b6b 100644
--- a/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
+++ b/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
@@ -912,12 +912,12 @@ int rtw_check_bcn_info(struct adapter *Adapter, u8 *pframe, u32 packet_len)
unsigned char *pbuf;
u32 wpa_ielen = 0;
u8 *pbssid = GetAddr3Ptr(pframe);
- u32 hidden_ssid = 0;
struct HT_info_element *pht_info = NULL;
struct rtw_ieee80211_ht_cap *pht_cap = NULL;
u32 bcn_channel;
unsigned short ht_cap_info;
unsigned char ht_info_infos_0;
+ int ssid_len;
if (is_client_associated_to_ap(Adapter) == false)
return true;
@@ -999,21 +999,15 @@ int rtw_check_bcn_info(struct adapter *Adapter, u8 *pframe, u32 packet_len)
}
/* checking SSID */
+ ssid_len = 0;
p = rtw_get_ie(bssid->IEs + _FIXED_IE_LENGTH_, _SSID_IE_, &len, bssid->IELength - _FIXED_IE_LENGTH_);
- if (p == NULL) {
- DBG_88E("%s marc: cannot find SSID for survey event\n", __func__);
- hidden_ssid = true;
- } else {
- hidden_ssid = false;
- }
-
- if ((NULL != p) && (false == hidden_ssid && (*(p + 1)))) {
- memcpy(bssid->Ssid.Ssid, (p + 2), *(p + 1));
- bssid->Ssid.SsidLength = *(p + 1);
- } else {
- bssid->Ssid.SsidLength = 0;
- bssid->Ssid.Ssid[0] = '\0';
+ if (p) {
+ ssid_len = *(p + 1);
+ if (ssid_len > NDIS_802_11_LENGTH_SSID)
+ ssid_len = 0;
}
+ memcpy(bssid->Ssid.Ssid, (p + 2), ssid_len);
+ bssid->Ssid.SsidLength = ssid_len;
RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("%s bssid.Ssid.Ssid:%s bssid.Ssid.SsidLength:%d "
"cur_network->network.Ssid.Ssid:%s len:%d\n", __func__, bssid->Ssid.Ssid,
diff --git a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
index dec992569476..4ad80ae1067f 100644
--- a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
@@ -2500,7 +2500,7 @@ static int rtw_mp_ioctl_hdl(struct net_device *dev, struct iw_request_info *info
("rtw_mp_ioctl_hdl: subcode [%d], len[%d], buffer_len[%d]\r\n",
poidparam->subcode, poidparam->len, len));
- if (poidparam->subcode >= MAX_MP_IOCTL_SUBCODE) {
+ if (poidparam->subcode >= ARRAY_SIZE(mp_ioctl_hdl)) {
RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_err_, ("no matching drvext subcodes\r\n"));
ret = -EINVAL;
goto _rtw_mp_ioctl_hdl_exit;
@@ -3164,9 +3164,7 @@ static int rtw_p2p_get_go_device_address(struct net_device *dev,
u8 *p2pie;
uint p2pielen = 0, attr_contentlen = 0;
u8 attr_content[100] = {0x00};
-
- u8 go_devadd_str[17 + 10] = {0x00};
- /* +10 is for the str "go_devadd =", we have to clear it at wrqu->data.pointer */
+ u8 go_devadd_str[17 + 12] = {};
/* Commented by Albert 20121209 */
/* The input data is the GO's interface address which the application wants to know its device address. */
@@ -3223,12 +3221,12 @@ static int rtw_p2p_get_go_device_address(struct net_device *dev,
spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
if (!blnMatch)
- sprintf(go_devadd_str, "\n\ndev_add = NULL");
+ snprintf(go_devadd_str, sizeof(go_devadd_str), "\n\ndev_add = NULL");
else
- sprintf(go_devadd_str, "\n\ndev_add =%.2X:%.2X:%.2X:%.2X:%.2X:%.2X",
+ snprintf(go_devadd_str, sizeof(go_devadd_str), "\n\ndev_add =%.2X:%.2X:%.2X:%.2X:%.2X:%.2X",
attr_content[0], attr_content[1], attr_content[2], attr_content[3], attr_content[4], attr_content[5]);
- if (copy_to_user(wrqu->data.pointer, go_devadd_str, 10 + 17))
+ if (copy_to_user(wrqu->data.pointer, go_devadd_str, sizeof(go_devadd_str)))
return -EFAULT;
return ret;
}
diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
index 0a341d6ec51f..a70dcef1419e 100644
--- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c
+++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
@@ -53,7 +53,7 @@ static struct usb_device_id rtw_usb_id_tbl[] = {
{USB_DEVICE(USB_VENDER_ID_REALTEK, 0x0179)}, /* 8188ETV */
/*=== Customer ID ===*/
/****** 8188EUS ********/
- {USB_DEVICE(0x8179, 0x07B8)}, /* Abocom - Abocom */
+ {USB_DEVICE(0x07b8, 0x8179)}, /* Abocom - Abocom */
{USB_DEVICE(0x2001, 0x330F)}, /* DLink DWA-125 REV D1 */
{} /* Terminating entry */
};
diff --git a/drivers/staging/rtl8821ae/Kconfig b/drivers/staging/rtl8821ae/Kconfig
index 2aa5dac2f1df..abccc9dabd65 100644
--- a/drivers/staging/rtl8821ae/Kconfig
+++ b/drivers/staging/rtl8821ae/Kconfig
@@ -1,6 +1,6 @@
config R8821AE
tristate "RealTek RTL8821AE Wireless LAN NIC driver"
- depends on PCI && WLAN
+ depends on PCI && WLAN && MAC80211
depends on m
select WIRELESS_EXT
select WEXT_PRIV
diff --git a/drivers/staging/rtl8821ae/wifi.h b/drivers/staging/rtl8821ae/wifi.h
index cfe88a1efd55..76bef93ad70a 100644
--- a/drivers/staging/rtl8821ae/wifi.h
+++ b/drivers/staging/rtl8821ae/wifi.h
@@ -1414,7 +1414,7 @@ struct rtl_dm {
/*88e tx power tracking*/
- u8 bb_swing_idx_ofdm[2];
+ u8 bb_swing_idx_ofdm[MAX_RF_PATH];
u8 bb_swing_idx_ofdm_current;
u8 bb_swing_idx_ofdm_base[MAX_RF_PATH];
bool bb_swing_flag_Ofdm;
diff --git a/drivers/staging/usbip/userspace/libsrc/names.c b/drivers/staging/usbip/userspace/libsrc/names.c
index 3c8d28b771e0..81ff8522405c 100644
--- a/drivers/staging/usbip/userspace/libsrc/names.c
+++ b/drivers/staging/usbip/userspace/libsrc/names.c
@@ -169,14 +169,14 @@ static void *my_malloc(size_t size)
struct pool *p;
p = calloc(1, sizeof(struct pool));
- if (!p) {
- free(p);
+ if (!p)
return NULL;
- }
p->mem = calloc(1, size);
- if (!p->mem)
+ if (!p->mem) {
+ free(p);
return NULL;
+ }
p->next = pool_head;
pool_head = p;
diff --git a/drivers/staging/usbip/vhci_sysfs.c b/drivers/staging/usbip/vhci_sysfs.c
index 9b51586d11d9..0141bc34d5cc 100644
--- a/drivers/staging/usbip/vhci_sysfs.c
+++ b/drivers/staging/usbip/vhci_sysfs.c
@@ -149,7 +149,8 @@ static int valid_args(__u32 rhport, enum usb_device_speed speed)
case USB_SPEED_WIRELESS:
break;
default:
- pr_err("speed %d\n", speed);
+ pr_err("Failed attach request for unsupported USB speed: %s\n",
+ usb_speed_string(speed));
return -EINVAL;
}
diff --git a/drivers/staging/wlags49_h2/wl_wext.c b/drivers/staging/wlags49_h2/wl_wext.c
index 4a1ddaf5e00f..187fc060de26 100644
--- a/drivers/staging/wlags49_h2/wl_wext.c
+++ b/drivers/staging/wlags49_h2/wl_wext.c
@@ -1061,7 +1061,7 @@ static int wireless_set_essid(struct net_device *dev, struct iw_request_info *in
goto out;
}
- if (data->flags != 0 && data->length > HCF_MAX_NAME_LEN + 1) {
+ if (data->flags != 0 && data->length > HCF_MAX_NAME_LEN) {
ret = -EINVAL;
goto out;
}
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 22262a3a0e2d..dade5b7699bc 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -364,7 +364,7 @@ config FB_SA1100
config FB_IMX
tristate "Freescale i.MX1/21/25/27 LCD support"
- depends on FB && IMX_HAVE_PLATFORM_IMX_FB
+ depends on FB && ARCH_MXC
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
diff --git a/drivers/video/exynos/Kconfig b/drivers/video/exynos/Kconfig
index 1129d0e9e640..75c8a8e7efc0 100644
--- a/drivers/video/exynos/Kconfig
+++ b/drivers/video/exynos/Kconfig
@@ -22,7 +22,8 @@ config EXYNOS_MIPI_DSI
config EXYNOS_LCD_S6E8AX0
bool "S6E8AX0 MIPI AMOLED LCD Driver"
- depends on (EXYNOS_MIPI_DSI && BACKLIGHT_CLASS_DEVICE && LCD_CLASS_DEVICE)
+ depends on EXYNOS_MIPI_DSI && BACKLIGHT_CLASS_DEVICE
+ depends on (LCD_CLASS_DEVICE = y)
default n
help
If you have an S6E8AX0 MIPI AMOLED LCD Panel, say Y to enable its
diff --git a/drivers/video/omap2/dss/dispc.c b/drivers/video/omap2/dss/dispc.c
index bbeb8dd7f108..77d6221618f4 100644
--- a/drivers/video/omap2/dss/dispc.c
+++ b/drivers/video/omap2/dss/dispc.c
@@ -2160,8 +2160,8 @@ static int dispc_ovl_calc_scaling_24xx(unsigned long pclk, unsigned long lclk,
*five_taps = false;
do {
- in_height = DIV_ROUND_UP(height, *decim_y);
- in_width = DIV_ROUND_UP(width, *decim_x);
+ in_height = height / *decim_y;
+ in_width = width / *decim_x;
*core_clk = dispc.feat->calc_core_clk(pclk, in_width,
in_height, out_width, out_height, mem_to_mem);
error = (in_width > maxsinglelinewidth || !*core_clk ||
@@ -2199,8 +2199,8 @@ static int dispc_ovl_calc_scaling_34xx(unsigned long pclk, unsigned long lclk,
dss_feat_get_param_max(FEAT_PARAM_LINEWIDTH);
do {
- in_height = DIV_ROUND_UP(height, *decim_y);
- in_width = DIV_ROUND_UP(width, *decim_x);
+ in_height = height / *decim_y;
+ in_width = width / *decim_x;
*five_taps = in_height > out_height;
if (in_width > maxsinglelinewidth)
@@ -2268,7 +2268,7 @@ static int dispc_ovl_calc_scaling_44xx(unsigned long pclk, unsigned long lclk,
{
u16 in_width, in_width_max;
int decim_x_min = *decim_x;
- u16 in_height = DIV_ROUND_UP(height, *decim_y);
+ u16 in_height = height / *decim_y;
const int maxsinglelinewidth =
dss_feat_get_param_max(FEAT_PARAM_LINEWIDTH);
const int maxdownscale = dss_feat_get_param_max(FEAT_PARAM_DOWNSCALE);
@@ -2287,7 +2287,7 @@ static int dispc_ovl_calc_scaling_44xx(unsigned long pclk, unsigned long lclk,
return -EINVAL;
do {
- in_width = DIV_ROUND_UP(width, *decim_x);
+ in_width = width / *decim_x;
} while (*decim_x <= *x_predecim &&
in_width > maxsinglelinewidth && ++*decim_x);
@@ -2466,8 +2466,8 @@ static int dispc_ovl_setup_common(enum omap_plane plane,
if (r)
return r;
- in_width = DIV_ROUND_UP(in_width, x_predecim);
- in_height = DIV_ROUND_UP(in_height, y_predecim);
+ in_width = in_width / x_predecim;
+ in_height = in_height / y_predecim;
if (color_mode == OMAP_DSS_COLOR_YUV2 ||
color_mode == OMAP_DSS_COLOR_UYVY ||
diff --git a/drivers/video/omap2/dss/dpi.c b/drivers/video/omap2/dss/dpi.c
index 7411f2674e16..23ef21ffc2c4 100644
--- a/drivers/video/omap2/dss/dpi.c
+++ b/drivers/video/omap2/dss/dpi.c
@@ -117,7 +117,7 @@ struct dpi_clk_calc_ctx {
/* outputs */
struct dsi_clock_info dsi_cinfo;
- unsigned long long fck;
+ unsigned long fck;
struct dispc_clock_info dispc_cinfo;
};
diff --git a/drivers/video/omap2/dss/sdi.c b/drivers/video/omap2/dss/sdi.c
index efb9ee9e3c96..ba806c9e7f54 100644
--- a/drivers/video/omap2/dss/sdi.c
+++ b/drivers/video/omap2/dss/sdi.c
@@ -46,7 +46,7 @@ static struct {
struct sdi_clk_calc_ctx {
unsigned long pck_min, pck_max;
- unsigned long long fck;
+ unsigned long fck;
struct dispc_clock_info dispc_cinfo;
};
diff --git a/drivers/vme/bridges/vme_ca91cx42.c b/drivers/vme/bridges/vme_ca91cx42.c
index a06edbfa95ca..1b5d48c578e1 100644
--- a/drivers/vme/bridges/vme_ca91cx42.c
+++ b/drivers/vme/bridges/vme_ca91cx42.c
@@ -884,7 +884,7 @@ static ssize_t ca91cx42_master_read(struct vme_master_resource *image,
if (done == count)
goto out;
}
- if ((uintptr_t)addr & 0x2) {
+ if ((uintptr_t)(addr + done) & 0x2) {
if ((count - done) < 2) {
*(u8 *)(buf + done) = ioread8(addr + done);
done += 1;
@@ -938,7 +938,7 @@ static ssize_t ca91cx42_master_write(struct vme_master_resource *image,
if (done == count)
goto out;
}
- if ((uintptr_t)addr & 0x2) {
+ if ((uintptr_t)(addr + done) & 0x2) {
if ((count - done) < 2) {
iowrite8(*(u8 *)(buf + done), addr + done);
done += 1;
diff --git a/drivers/vme/bridges/vme_tsi148.c b/drivers/vme/bridges/vme_tsi148.c
index 16830d8b777c..9911cd5fddb5 100644
--- a/drivers/vme/bridges/vme_tsi148.c
+++ b/drivers/vme/bridges/vme_tsi148.c
@@ -1289,7 +1289,7 @@ static ssize_t tsi148_master_read(struct vme_master_resource *image, void *buf,
if (done == count)
goto out;
}
- if ((uintptr_t)addr & 0x2) {
+ if ((uintptr_t)(addr + done) & 0x2) {
if ((count - done) < 2) {
*(u8 *)(buf + done) = ioread8(addr + done);
done += 1;
@@ -1371,7 +1371,7 @@ static ssize_t tsi148_master_write(struct vme_master_resource *image, void *buf,
if (done == count)
goto out;
}
- if ((uintptr_t)addr & 0x2) {
+ if ((uintptr_t)(addr + done) & 0x2) {
if ((count - done) < 2) {
iowrite8(*(u8 *)(buf + done), addr + done);
done += 1;
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 4c4c566c52a3..79d25894343a 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -223,6 +223,7 @@ config SA1100_WATCHDOG
config DW_WATCHDOG
tristate "Synopsys DesignWare watchdog"
+ depends on HAS_IOMEM
help
Say Y here if to include support for the Synopsys DesignWare
watchdog timer found in many chips.
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile
index d75c811bfa56..45e00afa7f2d 100644
--- a/drivers/xen/Makefile
+++ b/drivers/xen/Makefile
@@ -16,7 +16,6 @@ xen-pad-$(CONFIG_X86) += xen-acpi-pad.o
dom0-$(CONFIG_X86) += pcpu.o
obj-$(CONFIG_XEN_DOM0) += $(dom0-y)
obj-$(CONFIG_BLOCK) += biomerge.o
-obj-$(CONFIG_XEN_XENCOMM) += xencomm.o
obj-$(CONFIG_XEN_BALLOON) += xen-balloon.o
obj-$(CONFIG_XEN_SELFBALLOONING) += xen-selfballoon.o
obj-$(CONFIG_XEN_DEV_EVTCHN) += xen-evtchn.o
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index 4672e003c0ad..f4a9e3311297 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -862,6 +862,8 @@ int bind_evtchn_to_irq(unsigned int evtchn)
irq = ret;
goto out;
}
+ /* New interdomain events are bound to VCPU 0. */
+ bind_evtchn_to_cpu(evtchn, 0);
} else {
struct irq_info *info = info_for_irq(irq);
WARN_ON(info == NULL || info->type != IRQT_EVTCHN);
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index 34a2704fbc88..073b4a19a8b0 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -284,10 +284,8 @@ static int map_grant_pages(struct grant_map *map)
}
pr_debug("map %d+%d\n", map->index, map->count);
- err = gnttab_map_refs_userspace(map->map_ops,
- use_ptemod ? map->kmap_ops : NULL,
- map->pages,
- map->count);
+ err = gnttab_map_refs(map->map_ops, use_ptemod ? map->kmap_ops : NULL,
+ map->pages, map->count);
if (err)
return err;
@@ -317,10 +315,9 @@ static int __unmap_grant_pages(struct grant_map *map, int offset, int pages)
}
}
- err = gnttab_unmap_refs_userspace(map->unmap_ops + offset,
- use_ptemod ? map->kmap_ops + offset : NULL,
- map->pages + offset,
- pages);
+ err = gnttab_unmap_refs(map->unmap_ops + offset,
+ use_ptemod ? map->kmap_ops + offset : NULL, map->pages + offset,
+ pages);
if (err)
return err;
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index 8ee13e2e45e2..b84e3ab839aa 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -928,17 +928,15 @@ void gnttab_batch_copy(struct gnttab_copy *batch, unsigned count)
}
EXPORT_SYMBOL_GPL(gnttab_batch_copy);
-int __gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
+int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
struct gnttab_map_grant_ref *kmap_ops,
- struct page **pages, unsigned int count,
- bool m2p_override)
+ struct page **pages, unsigned int count)
{
int i, ret;
bool lazy = false;
pte_t *pte;
- unsigned long mfn, pfn;
+ unsigned long mfn;
- BUG_ON(kmap_ops && !m2p_override);
ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map_ops, count);
if (ret)
return ret;
@@ -957,12 +955,10 @@ int __gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
set_phys_to_machine(map_ops[i].host_addr >> PAGE_SHIFT,
map_ops[i].dev_bus_addr >> PAGE_SHIFT);
}
- return 0;
+ return ret;
}
- if (m2p_override &&
- !in_interrupt() &&
- paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) {
+ if (!in_interrupt() && paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) {
arch_enter_lazy_mmu_mode();
lazy = true;
}
@@ -979,20 +975,8 @@ int __gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
} else {
mfn = PFN_DOWN(map_ops[i].dev_bus_addr);
}
- pfn = page_to_pfn(pages[i]);
-
- WARN_ON(PagePrivate(pages[i]));
- SetPagePrivate(pages[i]);
- set_page_private(pages[i], mfn);
-
- pages[i]->index = pfn_to_mfn(pfn);
- if (unlikely(!set_phys_to_machine(pfn, FOREIGN_FRAME(mfn)))) {
- ret = -ENOMEM;
- goto out;
- }
- if (m2p_override)
- ret = m2p_add_override(mfn, pages[i], kmap_ops ?
- &kmap_ops[i] : NULL);
+ ret = m2p_add_override(mfn, pages[i], kmap_ops ?
+ &kmap_ops[i] : NULL);
if (ret)
goto out;
}
@@ -1003,32 +987,15 @@ int __gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
return ret;
}
-
-int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
- struct page **pages, unsigned int count)
-{
- return __gnttab_map_refs(map_ops, NULL, pages, count, false);
-}
EXPORT_SYMBOL_GPL(gnttab_map_refs);
-int gnttab_map_refs_userspace(struct gnttab_map_grant_ref *map_ops,
- struct gnttab_map_grant_ref *kmap_ops,
- struct page **pages, unsigned int count)
-{
- return __gnttab_map_refs(map_ops, kmap_ops, pages, count, true);
-}
-EXPORT_SYMBOL_GPL(gnttab_map_refs_userspace);
-
-int __gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
+int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
struct gnttab_map_grant_ref *kmap_ops,
- struct page **pages, unsigned int count,
- bool m2p_override)
+ struct page **pages, unsigned int count)
{
int i, ret;
bool lazy = false;
- unsigned long pfn, mfn;
- BUG_ON(kmap_ops && !m2p_override);
ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap_ops, count);
if (ret)
return ret;
@@ -1039,33 +1006,17 @@ int __gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
set_phys_to_machine(unmap_ops[i].host_addr >> PAGE_SHIFT,
INVALID_P2M_ENTRY);
}
- return 0;
+ return ret;
}
- if (m2p_override &&
- !in_interrupt() &&
- paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) {
+ if (!in_interrupt() && paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) {
arch_enter_lazy_mmu_mode();
lazy = true;
}
for (i = 0; i < count; i++) {
- pfn = page_to_pfn(pages[i]);
- mfn = get_phys_to_machine(pfn);
- if (mfn == INVALID_P2M_ENTRY || !(mfn & FOREIGN_FRAME_BIT)) {
- ret = -EINVAL;
- goto out;
- }
-
- set_page_private(pages[i], INVALID_P2M_ENTRY);
- WARN_ON(!PagePrivate(pages[i]));
- ClearPagePrivate(pages[i]);
- set_phys_to_machine(pfn, pages[i]->index);
- if (m2p_override)
- ret = m2p_remove_override(pages[i],
- kmap_ops ?
- &kmap_ops[i] : NULL,
- mfn);
+ ret = m2p_remove_override(pages[i], kmap_ops ?
+ &kmap_ops[i] : NULL);
if (ret)
goto out;
}
@@ -1076,22 +1027,8 @@ int __gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
return ret;
}
-
-int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *map_ops,
- struct page **pages, unsigned int count)
-{
- return __gnttab_unmap_refs(map_ops, NULL, pages, count, false);
-}
EXPORT_SYMBOL_GPL(gnttab_unmap_refs);
-int gnttab_unmap_refs_userspace(struct gnttab_unmap_grant_ref *map_ops,
- struct gnttab_map_grant_ref *kmap_ops,
- struct page **pages, unsigned int count)
-{
- return __gnttab_unmap_refs(map_ops, kmap_ops, pages, count, true);
-}
-EXPORT_SYMBOL_GPL(gnttab_unmap_refs_userspace);
-
static unsigned nr_status_frames(unsigned nr_grant_frames)
{
BUG_ON(grefs_per_grant_frame == 0);
diff --git a/drivers/xen/xencomm.c b/drivers/xen/xencomm.c
deleted file mode 100644
index 4793fc594549..000000000000
--- a/drivers/xen/xencomm.c
+++ /dev/null
@@ -1,219 +0,0 @@
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Copyright (C) IBM Corp. 2006
- *
- * Authors: Hollis Blanchard <hollisb@us.ibm.com>
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/mm.h>
-#include <linux/slab.h>
-#include <asm/page.h>
-#include <xen/xencomm.h>
-#include <xen/interface/xen.h>
-#include <asm/xen/xencomm.h> /* for xencomm_is_phys_contiguous() */
-
-static int xencomm_init(struct xencomm_desc *desc,
- void *buffer, unsigned long bytes)
-{
- unsigned long recorded = 0;
- int i = 0;
-
- while ((recorded < bytes) && (i < desc->nr_addrs)) {
- unsigned long vaddr = (unsigned long)buffer + recorded;
- unsigned long paddr;
- int offset;
- int chunksz;
-
- offset = vaddr % PAGE_SIZE; /* handle partial pages */
- chunksz = min(PAGE_SIZE - offset, bytes - recorded);
-
- paddr = xencomm_vtop(vaddr);
- if (paddr == ~0UL) {
- printk(KERN_DEBUG "%s: couldn't translate vaddr %lx\n",
- __func__, vaddr);
- return -EINVAL;
- }
-
- desc->address[i++] = paddr;
- recorded += chunksz;
- }
-
- if (recorded < bytes) {
- printk(KERN_DEBUG
- "%s: could only translate %ld of %ld bytes\n",
- __func__, recorded, bytes);
- return -ENOSPC;
- }
-
- /* mark remaining addresses invalid (just for safety) */
- while (i < desc->nr_addrs)
- desc->address[i++] = XENCOMM_INVALID;
-
- desc->magic = XENCOMM_MAGIC;
-
- return 0;
-}
-
-static struct xencomm_desc *xencomm_alloc(gfp_t gfp_mask,
- void *buffer, unsigned long bytes)
-{
- struct xencomm_desc *desc;
- unsigned long buffer_ulong = (unsigned long)buffer;
- unsigned long start = buffer_ulong & PAGE_MASK;
- unsigned long end = (buffer_ulong + bytes) | ~PAGE_MASK;
- unsigned long nr_addrs = (end - start + 1) >> PAGE_SHIFT;
- unsigned long size = sizeof(*desc) +
- sizeof(desc->address[0]) * nr_addrs;
-
- /*
- * slab allocator returns at least sizeof(void*) aligned pointer.
- * When sizeof(*desc) > sizeof(void*), struct xencomm_desc might
- * cross page boundary.
- */
- if (sizeof(*desc) > sizeof(void *)) {
- unsigned long order = get_order(size);
- desc = (struct xencomm_desc *)__get_free_pages(gfp_mask,
- order);
- if (desc == NULL)
- return NULL;
-
- desc->nr_addrs =
- ((PAGE_SIZE << order) - sizeof(struct xencomm_desc)) /
- sizeof(*desc->address);
- } else {
- desc = kmalloc(size, gfp_mask);
- if (desc == NULL)
- return NULL;
-
- desc->nr_addrs = nr_addrs;
- }
- return desc;
-}
-
-void xencomm_free(struct xencomm_handle *desc)
-{
- if (desc && !((ulong)desc & XENCOMM_INLINE_FLAG)) {
- struct xencomm_desc *desc__ = (struct xencomm_desc *)desc;
- if (sizeof(*desc__) > sizeof(void *)) {
- unsigned long size = sizeof(*desc__) +
- sizeof(desc__->address[0]) * desc__->nr_addrs;
- unsigned long order = get_order(size);
- free_pages((unsigned long)__va(desc), order);
- } else
- kfree(__va(desc));
- }
-}
-
-static int xencomm_create(void *buffer, unsigned long bytes,
- struct xencomm_desc **ret, gfp_t gfp_mask)
-{
- struct xencomm_desc *desc;
- int rc;
-
- pr_debug("%s: %p[%ld]\n", __func__, buffer, bytes);
-
- if (bytes == 0) {
- /* don't create a descriptor; Xen recognizes NULL. */
- BUG_ON(buffer != NULL);
- *ret = NULL;
- return 0;
- }
-
- BUG_ON(buffer == NULL); /* 'bytes' is non-zero */
-
- desc = xencomm_alloc(gfp_mask, buffer, bytes);
- if (!desc) {
- printk(KERN_DEBUG "%s failure\n", "xencomm_alloc");
- return -ENOMEM;
- }
-
- rc = xencomm_init(desc, buffer, bytes);
- if (rc) {
- printk(KERN_DEBUG "%s failure: %d\n", "xencomm_init", rc);
- xencomm_free((struct xencomm_handle *)__pa(desc));
- return rc;
- }
-
- *ret = desc;
- return 0;
-}
-
-static struct xencomm_handle *xencomm_create_inline(void *ptr)
-{
- unsigned long paddr;
-
- BUG_ON(!xencomm_is_phys_contiguous((unsigned long)ptr));
-
- paddr = (unsigned long)xencomm_pa(ptr);
- BUG_ON(paddr & XENCOMM_INLINE_FLAG);
- return (struct xencomm_handle *)(paddr | XENCOMM_INLINE_FLAG);
-}
-
-/* "mini" routine, for stack-based communications: */
-static int xencomm_create_mini(void *buffer,
- unsigned long bytes, struct xencomm_mini *xc_desc,
- struct xencomm_desc **ret)
-{
- int rc = 0;
- struct xencomm_desc *desc;
- BUG_ON(((unsigned long)xc_desc) % sizeof(*xc_desc) != 0);
-
- desc = (void *)xc_desc;
-
- desc->nr_addrs = XENCOMM_MINI_ADDRS;
-
- rc = xencomm_init(desc, buffer, bytes);
- if (!rc)
- *ret = desc;
-
- return rc;
-}
-
-struct xencomm_handle *xencomm_map(void *ptr, unsigned long bytes)
-{
- int rc;
- struct xencomm_desc *desc;
-
- if (xencomm_is_phys_contiguous((unsigned long)ptr))
- return xencomm_create_inline(ptr);
-
- rc = xencomm_create(ptr, bytes, &desc, GFP_KERNEL);
-
- if (rc || desc == NULL)
- return NULL;
-
- return xencomm_pa(desc);
-}
-
-struct xencomm_handle *__xencomm_map_no_alloc(void *ptr, unsigned long bytes,
- struct xencomm_mini *xc_desc)
-{
- int rc;
- struct xencomm_desc *desc = NULL;
-
- if (xencomm_is_phys_contiguous((unsigned long)ptr))
- return xencomm_create_inline(ptr);
-
- rc = xencomm_create_mini(ptr, bytes, xc_desc,
- &desc);
-
- if (rc)
- return NULL;
-
- return xencomm_pa(desc);
-}
diff --git a/fs/bio-integrity.c b/fs/bio-integrity.c
index 0bad24ddc2e7..0129b78a6908 100644
--- a/fs/bio-integrity.c
+++ b/fs/bio-integrity.c
@@ -114,6 +114,14 @@ void bio_integrity_free(struct bio *bio)
}
EXPORT_SYMBOL(bio_integrity_free);
+static inline unsigned int bip_integrity_vecs(struct bio_integrity_payload *bip)
+{
+ if (bip->bip_slab == BIO_POOL_NONE)
+ return BIP_INLINE_VECS;
+
+ return bvec_nr_vecs(bip->bip_slab);
+}
+
/**
* bio_integrity_add_page - Attach integrity metadata
* @bio: bio to update
@@ -129,7 +137,7 @@ int bio_integrity_add_page(struct bio *bio, struct page *page,
struct bio_integrity_payload *bip = bio->bi_integrity;
struct bio_vec *iv;
- if (bip->bip_vcnt >= bvec_nr_vecs(bip->bip_slab)) {
+ if (bip->bip_vcnt >= bip_integrity_vecs(bip)) {
printk(KERN_ERR "%s: bip_vec full\n", __func__);
return 0;
}
@@ -226,7 +234,8 @@ unsigned int bio_integrity_tag_size(struct bio *bio)
}
EXPORT_SYMBOL(bio_integrity_tag_size);
-int bio_integrity_tag(struct bio *bio, void *tag_buf, unsigned int len, int set)
+static int bio_integrity_tag(struct bio *bio, void *tag_buf, unsigned int len,
+ int set)
{
struct bio_integrity_payload *bip = bio->bi_integrity;
struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
diff --git a/fs/bio.c b/fs/bio.c
index 75c49a382239..8754e7b6eb49 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -611,7 +611,6 @@ EXPORT_SYMBOL(bio_clone_fast);
struct bio *bio_clone_bioset(struct bio *bio_src, gfp_t gfp_mask,
struct bio_set *bs)
{
- unsigned nr_iovecs = 0;
struct bvec_iter iter;
struct bio_vec bv;
struct bio *bio;
@@ -638,10 +637,7 @@ struct bio *bio_clone_bioset(struct bio *bio_src, gfp_t gfp_mask,
* __bio_clone_fast() anyways.
*/
- bio_for_each_segment(bv, bio_src, iter)
- nr_iovecs++;
-
- bio = bio_alloc_bioset(gfp_mask, nr_iovecs, bs);
+ bio = bio_alloc_bioset(gfp_mask, bio_segments(bio_src), bs);
if (!bio)
return NULL;
@@ -650,9 +646,18 @@ struct bio *bio_clone_bioset(struct bio *bio_src, gfp_t gfp_mask,
bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector;
bio->bi_iter.bi_size = bio_src->bi_iter.bi_size;
+ if (bio->bi_rw & REQ_DISCARD)
+ goto integrity_clone;
+
+ if (bio->bi_rw & REQ_WRITE_SAME) {
+ bio->bi_io_vec[bio->bi_vcnt++] = bio_src->bi_io_vec[0];
+ goto integrity_clone;
+ }
+
bio_for_each_segment(bv, bio_src, iter)
bio->bi_io_vec[bio->bi_vcnt++] = bv;
+integrity_clone:
if (bio_integrity(bio_src)) {
int ret;
diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c
index 49a62b4dda3b..0e8388e72d8d 100644
--- a/fs/btrfs/check-integrity.c
+++ b/fs/btrfs/check-integrity.c
@@ -92,11 +92,11 @@
#include <linux/slab.h>
#include <linux/buffer_head.h>
#include <linux/mutex.h>
-#include <linux/crc32c.h>
#include <linux/genhd.h>
#include <linux/blkdev.h>
#include "ctree.h"
#include "disk-io.h"
+#include "hash.h"
#include "transaction.h"
#include "extent_io.h"
#include "volumes.h"
@@ -1823,7 +1823,7 @@ static int btrfsic_test_for_metadata(struct btrfsic_state *state,
size_t sublen = i ? PAGE_CACHE_SIZE :
(PAGE_CACHE_SIZE - BTRFS_CSUM_SIZE);
- crc = crc32c(crc, data, sublen);
+ crc = btrfs_crc32c(crc, data, sublen);
}
btrfs_csum_final(crc, csum);
if (memcmp(csum, h->csum, state->csum_size))
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index e2600cdb6c25..b01fb6c527e3 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -1010,6 +1010,8 @@ int btrfs_decompress_buf2page(char *buf, unsigned long buf_start,
bytes = min(bytes, working_bytes);
kaddr = kmap_atomic(page_out);
memcpy(kaddr + *pg_offset, buf + buf_offset, bytes);
+ if (*pg_index == (vcnt - 1) && *pg_offset == 0)
+ memset(kaddr + bytes, 0, PAGE_CACHE_SIZE - bytes);
kunmap_atomic(kaddr);
flush_dcache_page(page_out);
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 0e69295d0031..5215f04260b2 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -26,7 +26,6 @@
#include <linux/workqueue.h>
#include <linux/kthread.h>
#include <linux/freezer.h>
-#include <linux/crc32c.h>
#include <linux/slab.h>
#include <linux/migrate.h>
#include <linux/ratelimit.h>
@@ -35,6 +34,7 @@
#include <asm/unaligned.h>
#include "ctree.h"
#include "disk-io.h"
+#include "hash.h"
#include "transaction.h"
#include "btrfs_inode.h"
#include "volumes.h"
@@ -244,7 +244,7 @@ out:
u32 btrfs_csum_data(char *data, u32 seed, size_t len)
{
- return crc32c(seed, data, len);
+ return btrfs_crc32c(seed, data, len);
}
void btrfs_csum_final(u32 crc, char *result)
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 9c9ecc93ae2c..32312e09f0f5 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -2385,6 +2385,7 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
spin_unlock(&delayed_refs->lock);
locked_ref = NULL;
cond_resched();
+ count++;
continue;
}
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 5c4ab9c18940..184e9cb39647 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -2629,7 +2629,7 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
EXTENT_DEFRAG, 1, cached_state);
if (ret) {
u64 last_snapshot = btrfs_root_last_snapshot(&root->root_item);
- if (last_snapshot >= BTRFS_I(inode)->generation)
+ if (0 && last_snapshot >= BTRFS_I(inode)->generation)
/* the inode is shared */
new = record_old_file_extents(inode, ordered_extent);
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index b0134892dc70..383ab455bfa7 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -4525,7 +4525,7 @@ static int btrfs_ioctl_set_fslabel(struct file *file, void __user *arg)
spin_lock(&root->fs_info->super_lock);
strcpy(super_block->label, label);
spin_unlock(&root->fs_info->super_lock);
- ret = btrfs_end_transaction(trans, root);
+ ret = btrfs_commit_transaction(trans, root);
out_unlock:
mnt_drop_write_file(file);
@@ -4668,7 +4668,7 @@ static int btrfs_ioctl_set_features(struct file *file, void __user *arg)
if (ret)
return ret;
- trans = btrfs_start_transaction(root, 1);
+ trans = btrfs_start_transaction(root, 0);
if (IS_ERR(trans))
return PTR_ERR(trans);
@@ -4689,7 +4689,7 @@ static int btrfs_ioctl_set_features(struct file *file, void __user *arg)
btrfs_set_super_incompat_flags(super_block, newflags);
spin_unlock(&root->fs_info->super_lock);
- return btrfs_end_transaction(trans, root);
+ return btrfs_commit_transaction(trans, root);
}
long btrfs_ioctl(struct file *file, unsigned int
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index 730dce395858..9c8d1a3fdc3a 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -24,12 +24,12 @@
#include <linux/xattr.h>
#include <linux/posix_acl_xattr.h>
#include <linux/radix-tree.h>
-#include <linux/crc32c.h>
#include <linux/vmalloc.h>
#include <linux/string.h>
#include "send.h"
#include "backref.h"
+#include "hash.h"
#include "locking.h"
#include "disk-io.h"
#include "btrfs_inode.h"
@@ -620,7 +620,7 @@ static int send_cmd(struct send_ctx *sctx)
hdr->len = cpu_to_le32(sctx->send_size - sizeof(*hdr));
hdr->crc = 0;
- crc = crc32c(0, (unsigned char *)sctx->send_buf, sctx->send_size);
+ crc = btrfs_crc32c(0, (unsigned char *)sctx->send_buf, sctx->send_size);
hdr->crc = cpu_to_le32(crc);
ret = write_buf(sctx->send_filp, sctx->send_buf, sctx->send_size,
@@ -2774,8 +2774,6 @@ static int add_waiting_dir_move(struct send_ctx *sctx, u64 ino)
return 0;
}
-#ifdef CONFIG_BTRFS_ASSERT
-
static int del_waiting_dir_move(struct send_ctx *sctx, u64 ino)
{
struct rb_node *n = sctx->waiting_dir_moves.rb_node;
@@ -2796,8 +2794,6 @@ static int del_waiting_dir_move(struct send_ctx *sctx, u64 ino)
return -ENOENT;
}
-#endif
-
static int add_pending_dir_move(struct send_ctx *sctx, u64 parent_ino)
{
struct rb_node **p = &sctx->pending_dir_moves.rb_node;
@@ -2902,7 +2898,9 @@ static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm)
}
sctx->send_progress = sctx->cur_ino + 1;
- ASSERT(del_waiting_dir_move(sctx, pm->ino) == 0);
+ ret = del_waiting_dir_move(sctx, pm->ino);
+ ASSERT(ret == 0);
+
ret = get_cur_path(sctx, pm->ino, pm->gen, to_path);
if (ret < 0)
goto out;
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index c02f63356895..97cc24198554 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -1996,7 +1996,7 @@ static void __exit exit_btrfs_fs(void)
btrfs_hash_exit();
}
-module_init(init_btrfs_fs)
+late_initcall(init_btrfs_fs);
module_exit(exit_btrfs_fs)
MODULE_LICENSE("GPL");
diff --git a/fs/buffer.c b/fs/buffer.c
index 651dba10b9c2..27265a8b43c1 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -654,14 +654,16 @@ EXPORT_SYMBOL(mark_buffer_dirty_inode);
static void __set_page_dirty(struct page *page,
struct address_space *mapping, int warn)
{
- spin_lock_irq(&mapping->tree_lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&mapping->tree_lock, flags);
if (page->mapping) { /* Race with truncate? */
WARN_ON_ONCE(warn && !PageUptodate(page));
account_page_dirtied(page, mapping);
radix_tree_tag_set(&mapping->page_tree,
page_index(page), PAGECACHE_TAG_DIRTY);
}
- spin_unlock_irq(&mapping->tree_lock);
+ spin_unlock_irqrestore(&mapping->tree_lock, flags);
__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
}
diff --git a/fs/cifs/cifsacl.c b/fs/cifs/cifsacl.c
index 8f9b4f710d4a..c819b0bd491a 100644
--- a/fs/cifs/cifsacl.c
+++ b/fs/cifs/cifsacl.c
@@ -1043,15 +1043,30 @@ id_mode_to_cifs_acl(struct inode *inode, const char *path, __u64 nmode,
__u32 secdesclen = 0;
struct cifs_ntsd *pntsd = NULL; /* acl obtained from server */
struct cifs_ntsd *pnntsd = NULL; /* modified acl to be sent to server */
+ struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
+ struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
+ struct cifs_tcon *tcon;
+
+ if (IS_ERR(tlink))
+ return PTR_ERR(tlink);
+ tcon = tlink_tcon(tlink);
cifs_dbg(NOISY, "set ACL from mode for %s\n", path);
/* Get the security descriptor */
- pntsd = get_cifs_acl(CIFS_SB(inode->i_sb), inode, path, &secdesclen);
+
+ if (tcon->ses->server->ops->get_acl == NULL) {
+ cifs_put_tlink(tlink);
+ return -EOPNOTSUPP;
+ }
+
+ pntsd = tcon->ses->server->ops->get_acl(cifs_sb, inode, path,
+ &secdesclen);
if (IS_ERR(pntsd)) {
rc = PTR_ERR(pntsd);
cifs_dbg(VFS, "%s: error %d getting sec desc\n", __func__, rc);
- goto out;
+ cifs_put_tlink(tlink);
+ return rc;
}
/*
@@ -1064,6 +1079,7 @@ id_mode_to_cifs_acl(struct inode *inode, const char *path, __u64 nmode,
pnntsd = kmalloc(secdesclen, GFP_KERNEL);
if (!pnntsd) {
kfree(pntsd);
+ cifs_put_tlink(tlink);
return -ENOMEM;
}
@@ -1072,14 +1088,18 @@ id_mode_to_cifs_acl(struct inode *inode, const char *path, __u64 nmode,
cifs_dbg(NOISY, "build_sec_desc rc: %d\n", rc);
+ if (tcon->ses->server->ops->set_acl == NULL)
+ rc = -EOPNOTSUPP;
+
if (!rc) {
/* Set the security descriptor */
- rc = set_cifs_acl(pnntsd, secdesclen, inode, path, aclflag);
+ rc = tcon->ses->server->ops->set_acl(pnntsd, secdesclen, inode,
+ path, aclflag);
cifs_dbg(NOISY, "set_cifs_acl rc: %d\n", rc);
}
+ cifs_put_tlink(tlink);
kfree(pnntsd);
kfree(pntsd);
-out:
return rc;
}
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index a245d1809ed8..86dc28c7aa5c 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -323,7 +323,8 @@ struct smb_version_operations {
/* async read from the server */
int (*async_readv)(struct cifs_readdata *);
/* async write to the server */
- int (*async_writev)(struct cifs_writedata *);
+ int (*async_writev)(struct cifs_writedata *,
+ void (*release)(struct kref *));
/* sync read from the server */
int (*sync_read)(const unsigned int, struct cifsFileInfo *,
struct cifs_io_parms *, unsigned int *, char **,
@@ -395,6 +396,10 @@ struct smb_version_operations {
int (*set_EA)(const unsigned int, struct cifs_tcon *, const char *,
const char *, const void *, const __u16,
const struct nls_table *, int);
+ struct cifs_ntsd * (*get_acl)(struct cifs_sb_info *, struct inode *,
+ const char *, u32 *);
+ int (*set_acl)(struct cifs_ntsd *, __u32, struct inode *, const char *,
+ int);
};
struct smb_version_values {
@@ -1064,7 +1069,7 @@ struct cifs_writedata {
unsigned int pagesz;
unsigned int tailsz;
unsigned int nr_pages;
- struct page *pages[1];
+ struct page *pages[];
};
/*
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index 79e6e9a93a8c..d00e09dfc452 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -488,7 +488,8 @@ void cifs_readdata_release(struct kref *refcount);
int cifs_async_readv(struct cifs_readdata *rdata);
int cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid);
-int cifs_async_writev(struct cifs_writedata *wdata);
+int cifs_async_writev(struct cifs_writedata *wdata,
+ void (*release)(struct kref *kref));
void cifs_writev_complete(struct work_struct *work);
struct cifs_writedata *cifs_writedata_alloc(unsigned int nr_pages,
work_func_t complete);
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 4d881c35eeca..f3264bd7a83d 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -1910,7 +1910,7 @@ cifs_writev_requeue(struct cifs_writedata *wdata)
do {
server = tlink_tcon(wdata->cfile->tlink)->ses->server;
- rc = server->ops->async_writev(wdata);
+ rc = server->ops->async_writev(wdata, cifs_writedata_release);
} while (rc == -EAGAIN);
for (i = 0; i < wdata->nr_pages; i++) {
@@ -1962,15 +1962,9 @@ cifs_writedata_alloc(unsigned int nr_pages, work_func_t complete)
{
struct cifs_writedata *wdata;
- /* this would overflow */
- if (nr_pages == 0) {
- cifs_dbg(VFS, "%s: called with nr_pages == 0!\n", __func__);
- return NULL;
- }
-
/* writedata + number of page pointers */
wdata = kzalloc(sizeof(*wdata) +
- sizeof(struct page *) * (nr_pages - 1), GFP_NOFS);
+ sizeof(struct page *) * nr_pages, GFP_NOFS);
if (wdata != NULL) {
kref_init(&wdata->refcount);
INIT_LIST_HEAD(&wdata->list);
@@ -2031,7 +2025,8 @@ cifs_writev_callback(struct mid_q_entry *mid)
/* cifs_async_writev - send an async write, and set up mid to handle result */
int
-cifs_async_writev(struct cifs_writedata *wdata)
+cifs_async_writev(struct cifs_writedata *wdata,
+ void (*release)(struct kref *kref))
{
int rc = -EACCES;
WRITE_REQ *smb = NULL;
@@ -2105,7 +2100,7 @@ cifs_async_writev(struct cifs_writedata *wdata)
if (rc == 0)
cifs_stats_inc(&tcon->stats.cifs_stats.num_writes);
else
- kref_put(&wdata->refcount, cifs_writedata_release);
+ kref_put(&wdata->refcount, release);
async_writev_out:
cifs_small_buf_release(smb);
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 853d6d1cc822..755584684f6c 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -2043,7 +2043,8 @@ retry:
}
wdata->pid = wdata->cfile->pid;
server = tlink_tcon(wdata->cfile->tlink)->ses->server;
- rc = server->ops->async_writev(wdata);
+ rc = server->ops->async_writev(wdata,
+ cifs_writedata_release);
} while (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN);
for (i = 0; i < nr_pages; ++i)
@@ -2331,9 +2332,20 @@ size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len)
}
static void
-cifs_uncached_writev_complete(struct work_struct *work)
+cifs_uncached_writedata_release(struct kref *refcount)
{
int i;
+ struct cifs_writedata *wdata = container_of(refcount,
+ struct cifs_writedata, refcount);
+
+ for (i = 0; i < wdata->nr_pages; i++)
+ put_page(wdata->pages[i]);
+ cifs_writedata_release(refcount);
+}
+
+static void
+cifs_uncached_writev_complete(struct work_struct *work)
+{
struct cifs_writedata *wdata = container_of(work,
struct cifs_writedata, work);
struct inode *inode = wdata->cfile->dentry->d_inode;
@@ -2347,12 +2359,7 @@ cifs_uncached_writev_complete(struct work_struct *work)
complete(&wdata->done);
- if (wdata->result != -EAGAIN) {
- for (i = 0; i < wdata->nr_pages; i++)
- put_page(wdata->pages[i]);
- }
-
- kref_put(&wdata->refcount, cifs_writedata_release);
+ kref_put(&wdata->refcount, cifs_uncached_writedata_release);
}
/* attempt to send write to server, retry on any -EAGAIN errors */
@@ -2370,7 +2377,8 @@ cifs_uncached_retry_writev(struct cifs_writedata *wdata)
if (rc != 0)
continue;
}
- rc = server->ops->async_writev(wdata);
+ rc = server->ops->async_writev(wdata,
+ cifs_uncached_writedata_release);
} while (rc == -EAGAIN);
return rc;
@@ -2454,7 +2462,8 @@ cifs_iovec_write(struct file *file, const struct iovec *iov,
wdata->tailsz = cur_len - ((nr_pages - 1) * PAGE_SIZE);
rc = cifs_uncached_retry_writev(wdata);
if (rc) {
- kref_put(&wdata->refcount, cifs_writedata_release);
+ kref_put(&wdata->refcount,
+ cifs_uncached_writedata_release);
break;
}
@@ -2496,7 +2505,7 @@ restart_loop:
}
}
list_del_init(&wdata->list);
- kref_put(&wdata->refcount, cifs_writedata_release);
+ kref_put(&wdata->refcount, cifs_uncached_writedata_release);
}
if (total_written > 0)
@@ -2559,8 +2568,8 @@ cifs_writev(struct kiocb *iocb, const struct iovec *iov,
if (rc > 0) {
ssize_t err;
- err = generic_write_sync(file, pos, rc);
- if (err < 0 && rc > 0)
+ err = generic_write_sync(file, iocb->ki_pos - rc, rc);
+ if (err < 0)
rc = err;
}
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 9cb9679d7357..be58b8fcdb3c 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -527,10 +527,15 @@ static int cifs_sfu_mode(struct cifs_fattr *fattr, const unsigned char *path,
return PTR_ERR(tlink);
tcon = tlink_tcon(tlink);
- rc = CIFSSMBQAllEAs(xid, tcon, path, "SETFILEBITS",
- ea_value, 4 /* size of buf */, cifs_sb->local_nls,
- cifs_sb->mnt_cifs_flags &
- CIFS_MOUNT_MAP_SPECIAL_CHR);
+ if (tcon->ses->server->ops->query_all_EAs == NULL) {
+ cifs_put_tlink(tlink);
+ return -EOPNOTSUPP;
+ }
+
+ rc = tcon->ses->server->ops->query_all_EAs(xid, tcon, path,
+ "SETFILEBITS", ea_value, 4 /* size of buf */,
+ cifs_sb->local_nls,
+ cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
cifs_put_tlink(tlink);
if (rc < 0)
return (int)rc;
diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
index 9ac5bfc9cc56..bfd66d84831e 100644
--- a/fs/cifs/smb1ops.c
+++ b/fs/cifs/smb1ops.c
@@ -1067,6 +1067,14 @@ struct smb_version_operations smb1_operations = {
.query_mf_symlink = cifs_query_mf_symlink,
.create_mf_symlink = cifs_create_mf_symlink,
.is_read_op = cifs_is_read_op,
+#ifdef CONFIG_CIFS_XATTR
+ .query_all_EAs = CIFSSMBQAllEAs,
+ .set_EA = CIFSSMBSetEA,
+#endif /* CIFS_XATTR */
+#ifdef CONFIG_CIFS_ACL
+ .get_acl = get_cifs_acl,
+ .set_acl = set_cifs_acl,
+#endif /* CIFS_ACL */
};
struct smb_version_values smb1_values = {
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index 2013234b73ad..a3f7a9c3cc69 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -1890,7 +1890,8 @@ smb2_writev_callback(struct mid_q_entry *mid)
/* smb2_async_writev - send an async write, and set up mid to handle result */
int
-smb2_async_writev(struct cifs_writedata *wdata)
+smb2_async_writev(struct cifs_writedata *wdata,
+ void (*release)(struct kref *kref))
{
int rc = -EACCES;
struct smb2_write_req *req = NULL;
@@ -1938,7 +1939,7 @@ smb2_async_writev(struct cifs_writedata *wdata)
smb2_writev_callback, wdata, 0);
if (rc) {
- kref_put(&wdata->refcount, cifs_writedata_release);
+ kref_put(&wdata->refcount, release);
cifs_stats_fail_inc(tcon, SMB2_WRITE_HE);
}
diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h
index 93adc64666f3..0ce48db20a65 100644
--- a/fs/cifs/smb2proto.h
+++ b/fs/cifs/smb2proto.h
@@ -123,7 +123,8 @@ extern int SMB2_get_srv_num(const unsigned int xid, struct cifs_tcon *tcon,
extern int smb2_async_readv(struct cifs_readdata *rdata);
extern int SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms,
unsigned int *nbytes, char **buf, int *buf_type);
-extern int smb2_async_writev(struct cifs_writedata *wdata);
+extern int smb2_async_writev(struct cifs_writedata *wdata,
+ void (*release)(struct kref *kref));
extern int SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms,
unsigned int *nbytes, struct kvec *iov, int n_vec);
extern int SMB2_echo(struct TCP_Server_Info *server);
diff --git a/fs/cifs/xattr.c b/fs/cifs/xattr.c
index 95c43bb20335..5ac836a86b18 100644
--- a/fs/cifs/xattr.c
+++ b/fs/cifs/xattr.c
@@ -176,8 +176,12 @@ int cifs_setxattr(struct dentry *direntry, const char *ea_name,
rc = -ENOMEM;
} else {
memcpy(pacl, ea_value, value_size);
- rc = set_cifs_acl(pacl, value_size,
- direntry->d_inode, full_path, CIFS_ACL_DACL);
+ if (pTcon->ses->server->ops->set_acl)
+ rc = pTcon->ses->server->ops->set_acl(pacl,
+ value_size, direntry->d_inode,
+ full_path, CIFS_ACL_DACL);
+ else
+ rc = -EOPNOTSUPP;
if (rc == 0) /* force revalidate of the inode */
CIFS_I(direntry->d_inode)->time = 0;
kfree(pacl);
@@ -323,8 +327,11 @@ ssize_t cifs_getxattr(struct dentry *direntry, const char *ea_name,
u32 acllen;
struct cifs_ntsd *pacl;
- pacl = get_cifs_acl(cifs_sb, direntry->d_inode,
- full_path, &acllen);
+ if (pTcon->ses->server->ops->get_acl == NULL)
+ goto get_ea_exit; /* rc already EOPNOTSUPP */
+
+ pacl = pTcon->ses->server->ops->get_acl(cifs_sb,
+ direntry->d_inode, full_path, &acllen);
if (IS_ERR(pacl)) {
rc = PTR_ERR(pacl);
cifs_dbg(VFS, "%s: error %zd getting sec desc\n",
diff --git a/fs/exec.c b/fs/exec.c
index e1529b4c79b1..3d78fccdd723 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -748,11 +748,10 @@ EXPORT_SYMBOL(setup_arg_pages);
#endif /* CONFIG_MMU */
-struct file *open_exec(const char *name)
+static struct file *do_open_exec(struct filename *name)
{
struct file *file;
int err;
- struct filename tmp = { .name = name };
static const struct open_flags open_exec_flags = {
.open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
.acc_mode = MAY_EXEC | MAY_OPEN,
@@ -760,7 +759,7 @@ struct file *open_exec(const char *name)
.lookup_flags = LOOKUP_FOLLOW,
};
- file = do_filp_open(AT_FDCWD, &tmp, &open_exec_flags);
+ file = do_filp_open(AT_FDCWD, name, &open_exec_flags);
if (IS_ERR(file))
goto out;
@@ -784,6 +783,12 @@ exit:
fput(file);
return ERR_PTR(err);
}
+
+struct file *open_exec(const char *name)
+{
+ struct filename tmp = { .name = name };
+ return do_open_exec(&tmp);
+}
EXPORT_SYMBOL(open_exec);
int kernel_read(struct file *file, loff_t offset,
@@ -1162,7 +1167,7 @@ int prepare_bprm_creds(struct linux_binprm *bprm)
return -ENOMEM;
}
-void free_bprm(struct linux_binprm *bprm)
+static void free_bprm(struct linux_binprm *bprm)
{
free_arg_pages(bprm);
if (bprm->cred) {
@@ -1432,7 +1437,7 @@ static int exec_binprm(struct linux_binprm *bprm)
/*
* sys_execve() executes a new program.
*/
-static int do_execve_common(const char *filename,
+static int do_execve_common(struct filename *filename,
struct user_arg_ptr argv,
struct user_arg_ptr envp)
{
@@ -1441,6 +1446,9 @@ static int do_execve_common(const char *filename,
struct files_struct *displaced;
int retval;
+ if (IS_ERR(filename))
+ return PTR_ERR(filename);
+
/*
* We move the actual failure in case of RLIMIT_NPROC excess from
* set*uid() to execve() because too many poorly written programs
@@ -1473,7 +1481,7 @@ static int do_execve_common(const char *filename,
check_unsafe_exec(bprm);
current->in_execve = 1;
- file = open_exec(filename);
+ file = do_open_exec(filename);
retval = PTR_ERR(file);
if (IS_ERR(file))
goto out_unmark;
@@ -1481,8 +1489,7 @@ static int do_execve_common(const char *filename,
sched_exec();
bprm->file = file;
- bprm->filename = filename;
- bprm->interp = filename;
+ bprm->filename = bprm->interp = filename->name;
retval = bprm_mm_init(bprm);
if (retval)
@@ -1523,6 +1530,7 @@ static int do_execve_common(const char *filename,
acct_update_integrals(current);
task_numa_free(current);
free_bprm(bprm);
+ putname(filename);
if (displaced)
put_files_struct(displaced);
return retval;
@@ -1544,10 +1552,11 @@ out_files:
if (displaced)
reset_files_struct(displaced);
out_ret:
+ putname(filename);
return retval;
}
-int do_execve(const char *filename,
+int do_execve(struct filename *filename,
const char __user *const __user *__argv,
const char __user *const __user *__envp)
{
@@ -1557,7 +1566,7 @@ int do_execve(const char *filename,
}
#ifdef CONFIG_COMPAT
-static int compat_do_execve(const char *filename,
+static int compat_do_execve(struct filename *filename,
const compat_uptr_t __user *__argv,
const compat_uptr_t __user *__envp)
{
@@ -1607,25 +1616,13 @@ SYSCALL_DEFINE3(execve,
const char __user *const __user *, argv,
const char __user *const __user *, envp)
{
- struct filename *path = getname(filename);
- int error = PTR_ERR(path);
- if (!IS_ERR(path)) {
- error = do_execve(path->name, argv, envp);
- putname(path);
- }
- return error;
+ return do_execve(getname(filename), argv, envp);
}
#ifdef CONFIG_COMPAT
asmlinkage long compat_sys_execve(const char __user * filename,
const compat_uptr_t __user * argv,
const compat_uptr_t __user * envp)
{
- struct filename *path = getname(filename);
- int error = PTR_ERR(path);
- if (!IS_ERR(path)) {
- error = compat_do_execve(path->name, argv, envp);
- putname(path);
- }
- return error;
+ return compat_do_execve(getname(filename), argv, envp);
}
#endif
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index 43e64f6022eb..1a5073959f32 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -152,7 +152,7 @@ ext4_file_dio_write(struct kiocb *iocb, const struct iovec *iov,
if (ret > 0) {
ssize_t err;
- err = generic_write_sync(file, pos, ret);
+ err = generic_write_sync(file, iocb->ki_pos - ret, ret);
if (err < 0 && ret > 0)
ret = err;
}
diff --git a/fs/file.c b/fs/file.c
index 771578b33fb6..db25c2bdfe46 100644
--- a/fs/file.c
+++ b/fs/file.c
@@ -34,7 +34,7 @@ static void *alloc_fdmem(size_t size)
* vmalloc() if the allocation size will be considered "large" by the VM.
*/
if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
- void *data = kmalloc(size, GFP_KERNEL|__GFP_NOWARN);
+ void *data = kmalloc(size, GFP_KERNEL|__GFP_NOWARN|__GFP_NORETRY);
if (data != NULL)
return data;
}
diff --git a/fs/jfs/xattr.c b/fs/jfs/xattr.c
index 3bd5ee45f7b3..46325d5c34fc 100644
--- a/fs/jfs/xattr.c
+++ b/fs/jfs/xattr.c
@@ -854,9 +854,6 @@ int jfs_setxattr(struct dentry *dentry, const char *name, const void *value,
int rc;
tid_t tid;
- if ((rc = can_set_xattr(inode, name, value, value_len)))
- return rc;
-
/*
* If this is a request for a synthetic attribute in the system.*
* namespace use the generic infrastructure to resolve a handler
@@ -865,6 +862,9 @@ int jfs_setxattr(struct dentry *dentry, const char *name, const void *value,
if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
return generic_setxattr(dentry, name, value, value_len, flags);
+ if ((rc = can_set_xattr(inode, name, value, value_len)))
+ return rc;
+
if (value == NULL) { /* empty EA, do not remove */
value = "";
value_len = 0;
@@ -1034,9 +1034,6 @@ int jfs_removexattr(struct dentry *dentry, const char *name)
int rc;
tid_t tid;
- if ((rc = can_set_xattr(inode, name, NULL, 0)))
- return rc;
-
/*
* If this is a request for a synthetic attribute in the system.*
* namespace use the generic infrastructure to resolve a handler
@@ -1045,6 +1042,9 @@ int jfs_removexattr(struct dentry *dentry, const char *name)
if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
return generic_removexattr(dentry, name);
+ if ((rc = can_set_xattr(inode, name, NULL, 0)))
+ return rc;
+
tid = txBegin(inode->i_sb, 0);
mutex_lock(&ji->commit_mutex);
rc = __jfs_setxattr(tid, dentry->d_inode, name, NULL, 0, XATTR_REPLACE);
@@ -1061,7 +1061,7 @@ int jfs_removexattr(struct dentry *dentry, const char *name)
* attributes are handled directly.
*/
const struct xattr_handler *jfs_xattr_handlers[] = {
-#ifdef JFS_POSIX_ACL
+#ifdef CONFIG_JFS_POSIX_ACL
&posix_acl_access_xattr_handler,
&posix_acl_default_xattr_handler,
#endif
diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c
index 5104cf5d25c5..bd6e18be6e1a 100644
--- a/fs/kernfs/dir.c
+++ b/fs/kernfs/dir.c
@@ -187,19 +187,23 @@ static void kernfs_deactivate(struct kernfs_node *kn)
kn->u.completion = (void *)&wait;
- rwsem_acquire(&kn->dep_map, 0, 0, _RET_IP_);
+ if (kn->flags & KERNFS_LOCKDEP)
+ rwsem_acquire(&kn->dep_map, 0, 0, _RET_IP_);
/* atomic_add_return() is a mb(), put_active() will always see
* the updated kn->u.completion.
*/
v = atomic_add_return(KN_DEACTIVATED_BIAS, &kn->active);
if (v != KN_DEACTIVATED_BIAS) {
- lock_contended(&kn->dep_map, _RET_IP_);
+ if (kn->flags & KERNFS_LOCKDEP)
+ lock_contended(&kn->dep_map, _RET_IP_);
wait_for_completion(&wait);
}
- lock_acquired(&kn->dep_map, _RET_IP_);
- rwsem_release(&kn->dep_map, 1, _RET_IP_);
+ if (kn->flags & KERNFS_LOCKDEP) {
+ lock_acquired(&kn->dep_map, _RET_IP_);
+ rwsem_release(&kn->dep_map, 1, _RET_IP_);
+ }
}
/**
diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c
index e066a3902973..ab798a88ec1d 100644
--- a/fs/lockd/svclock.c
+++ b/fs/lockd/svclock.c
@@ -779,6 +779,7 @@ nlmsvc_grant_blocked(struct nlm_block *block)
struct nlm_file *file = block->b_file;
struct nlm_lock *lock = &block->b_call->a_args.lock;
int error;
+ loff_t fl_start, fl_end;
dprintk("lockd: grant blocked lock %p\n", block);
@@ -796,9 +797,16 @@ nlmsvc_grant_blocked(struct nlm_block *block)
}
/* Try the lock operation again */
+ /* vfs_lock_file() can mangle fl_start and fl_end, but we need
+ * them unchanged for the GRANT_MSG
+ */
lock->fl.fl_flags |= FL_SLEEP;
+ fl_start = lock->fl.fl_start;
+ fl_end = lock->fl.fl_end;
error = vfs_lock_file(file->f_file, F_SETLK, &lock->fl, NULL);
lock->fl.fl_flags &= ~FL_SLEEP;
+ lock->fl.fl_start = fl_start;
+ lock->fl.fl_end = fl_end;
switch (error) {
case 0:
diff --git a/fs/namei.c b/fs/namei.c
index d580df2e6804..385f7817bfcc 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -196,6 +196,7 @@ recopy:
goto error;
result->uptr = filename;
+ result->aname = NULL;
audit_getname(result);
return result;
@@ -210,6 +211,35 @@ getname(const char __user * filename)
return getname_flags(filename, 0, NULL);
}
+/*
+ * The "getname_kernel()" interface doesn't do pathnames longer
+ * than EMBEDDED_NAME_MAX. Deal with it - you're a kernel user.
+ */
+struct filename *
+getname_kernel(const char * filename)
+{
+ struct filename *result;
+ char *kname;
+ int len;
+
+ len = strlen(filename);
+ if (len >= EMBEDDED_NAME_MAX)
+ return ERR_PTR(-ENAMETOOLONG);
+
+ result = __getname();
+ if (unlikely(!result))
+ return ERR_PTR(-ENOMEM);
+
+ kname = (char *)result + sizeof(*result);
+ result->name = kname;
+ result->uptr = NULL;
+ result->aname = NULL;
+ result->separate = false;
+
+ strlcpy(kname, filename, EMBEDDED_NAME_MAX);
+ return result;
+}
+
#ifdef CONFIG_AUDITSYSCALL
void putname(struct filename *name)
{
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index be38b573495a..4a48fe4b84b6 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -1846,6 +1846,11 @@ int nfs_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
GFP_KERNEL)) {
SetPageUptodate(page);
unlock_page(page);
+ /*
+ * add_to_page_cache_lru() grabs an extra page refcount.
+ * Drop it here to avoid leaking this page later.
+ */
+ page_cache_release(page);
} else
__free_page(page);
diff --git a/fs/nfs/nfs3acl.c b/fs/nfs/nfs3acl.c
index 9a5ca03fa539..871d6eda8dba 100644
--- a/fs/nfs/nfs3acl.c
+++ b/fs/nfs/nfs3acl.c
@@ -80,7 +80,7 @@ struct posix_acl *nfs3_get_acl(struct inode *inode, int type)
}
if (res.acl_access != NULL) {
- if (posix_acl_equiv_mode(res.acl_access, NULL) ||
+ if ((posix_acl_equiv_mode(res.acl_access, NULL) == 0) ||
res.acl_access->a_count == 0) {
posix_acl_release(res.acl_access);
res.acl_access = NULL;
@@ -113,7 +113,7 @@ getout:
return ERR_PTR(status);
}
-int nfs3_proc_setacls(struct inode *inode, struct posix_acl *acl,
+static int __nfs3_proc_setacls(struct inode *inode, struct posix_acl *acl,
struct posix_acl *dfacl)
{
struct nfs_server *server = NFS_SERVER(inode);
@@ -198,6 +198,15 @@ out:
return status;
}
+int nfs3_proc_setacls(struct inode *inode, struct posix_acl *acl,
+ struct posix_acl *dfacl)
+{
+ int ret;
+ ret = __nfs3_proc_setacls(inode, acl, dfacl);
+ return (ret == -EOPNOTSUPP) ? 0 : ret;
+
+}
+
int nfs3_set_acl(struct inode *inode, struct posix_acl *acl, int type)
{
struct posix_acl *alloc = NULL, *dfacl = NULL;
@@ -225,7 +234,7 @@ int nfs3_set_acl(struct inode *inode, struct posix_acl *acl, int type)
if (IS_ERR(alloc))
goto fail;
}
- status = nfs3_proc_setacls(inode, acl, dfacl);
+ status = __nfs3_proc_setacls(inode, acl, dfacl);
posix_acl_release(alloc);
return status;
@@ -233,25 +242,6 @@ fail:
return PTR_ERR(alloc);
}
-int nfs3_proc_set_default_acl(struct inode *dir, struct inode *inode,
- umode_t mode)
-{
- struct posix_acl *default_acl, *acl;
- int error;
-
- error = posix_acl_create(dir, &mode, &default_acl, &acl);
- if (error)
- return (error == -EOPNOTSUPP) ? 0 : error;
-
- error = nfs3_proc_setacls(inode, acl, default_acl);
-
- if (acl)
- posix_acl_release(acl);
- if (default_acl)
- posix_acl_release(default_acl);
- return error;
-}
-
const struct xattr_handler *nfs3_xattr_handlers[] = {
&posix_acl_access_xattr_handler,
&posix_acl_default_xattr_handler,
diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
index dbb3e1f30c68..860ad26a5590 100644
--- a/fs/nfs/nfs4client.c
+++ b/fs/nfs/nfs4client.c
@@ -170,7 +170,7 @@ void nfs41_shutdown_client(struct nfs_client *clp)
void nfs40_shutdown_client(struct nfs_client *clp)
{
if (clp->cl_slot_tbl) {
- nfs4_release_slot_table(clp->cl_slot_tbl);
+ nfs4_shutdown_slot_table(clp->cl_slot_tbl);
kfree(clp->cl_slot_tbl);
}
}
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 42da6af77587..2da6a698b8f7 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -1620,15 +1620,15 @@ static void nfs4_open_confirm_prepare(struct rpc_task *task, void *calldata)
{
struct nfs4_opendata *data = calldata;
- nfs40_setup_sequence(data->o_arg.server, &data->o_arg.seq_args,
- &data->o_res.seq_res, task);
+ nfs40_setup_sequence(data->o_arg.server, &data->c_arg.seq_args,
+ &data->c_res.seq_res, task);
}
static void nfs4_open_confirm_done(struct rpc_task *task, void *calldata)
{
struct nfs4_opendata *data = calldata;
- nfs40_sequence_done(task, &data->o_res.seq_res);
+ nfs40_sequence_done(task, &data->c_res.seq_res);
data->rpc_status = task->tk_status;
if (data->rpc_status == 0) {
@@ -1686,7 +1686,7 @@ static int _nfs4_proc_open_confirm(struct nfs4_opendata *data)
};
int status;
- nfs4_init_sequence(&data->o_arg.seq_args, &data->o_res.seq_res, 1);
+ nfs4_init_sequence(&data->c_arg.seq_args, &data->c_res.seq_res, 1);
kref_get(&data->kref);
data->rpc_done = 0;
data->rpc_status = 0;
diff --git a/fs/nfs/nfs4session.c b/fs/nfs/nfs4session.c
index cf883c7ae053..e799dc3c3b1d 100644
--- a/fs/nfs/nfs4session.c
+++ b/fs/nfs/nfs4session.c
@@ -231,14 +231,23 @@ out:
return ret;
}
+/*
+ * nfs4_release_slot_table - release all slot table entries
+ */
+static void nfs4_release_slot_table(struct nfs4_slot_table *tbl)
+{
+ nfs4_shrink_slot_table(tbl, 0);
+}
+
/**
- * nfs4_release_slot_table - release resources attached to a slot table
+ * nfs4_shutdown_slot_table - release resources attached to a slot table
* @tbl: slot table to shut down
*
*/
-void nfs4_release_slot_table(struct nfs4_slot_table *tbl)
+void nfs4_shutdown_slot_table(struct nfs4_slot_table *tbl)
{
- nfs4_shrink_slot_table(tbl, 0);
+ nfs4_release_slot_table(tbl);
+ rpc_destroy_wait_queue(&tbl->slot_tbl_waitq);
}
/**
@@ -422,7 +431,7 @@ void nfs41_update_target_slotid(struct nfs4_slot_table *tbl,
spin_unlock(&tbl->slot_tbl_lock);
}
-static void nfs4_destroy_session_slot_tables(struct nfs4_session *session)
+static void nfs4_release_session_slot_tables(struct nfs4_session *session)
{
nfs4_release_slot_table(&session->fc_slot_table);
nfs4_release_slot_table(&session->bc_slot_table);
@@ -450,7 +459,7 @@ int nfs4_setup_session_slot_tables(struct nfs4_session *ses)
if (status && tbl->slots == NULL)
/* Fore and back channel share a connection so get
* both slot tables or neither */
- nfs4_destroy_session_slot_tables(ses);
+ nfs4_release_session_slot_tables(ses);
return status;
}
@@ -470,6 +479,12 @@ struct nfs4_session *nfs4_alloc_session(struct nfs_client *clp)
return session;
}
+static void nfs4_destroy_session_slot_tables(struct nfs4_session *session)
+{
+ nfs4_shutdown_slot_table(&session->fc_slot_table);
+ nfs4_shutdown_slot_table(&session->bc_slot_table);
+}
+
void nfs4_destroy_session(struct nfs4_session *session)
{
struct rpc_xprt *xprt;
diff --git a/fs/nfs/nfs4session.h b/fs/nfs/nfs4session.h
index 232306100651..b34ada9bc6a2 100644
--- a/fs/nfs/nfs4session.h
+++ b/fs/nfs/nfs4session.h
@@ -74,7 +74,7 @@ enum nfs4_session_state {
extern int nfs4_setup_slot_table(struct nfs4_slot_table *tbl,
unsigned int max_reqs, const char *queue);
-extern void nfs4_release_slot_table(struct nfs4_slot_table *tbl);
+extern void nfs4_shutdown_slot_table(struct nfs4_slot_table *tbl);
extern struct nfs4_slot *nfs4_alloc_slot(struct nfs4_slot_table *tbl);
extern void nfs4_free_slot(struct nfs4_slot_table *tbl, struct nfs4_slot *slot);
extern void nfs4_slot_tbl_drain_complete(struct nfs4_slot_table *tbl);
diff --git a/fs/nfsd/nfs4acl.c b/fs/nfsd/nfs4acl.c
index d3a587144222..d190e33d0ec2 100644
--- a/fs/nfsd/nfs4acl.c
+++ b/fs/nfsd/nfs4acl.c
@@ -151,17 +151,15 @@ nfsd4_get_nfs4_acl(struct svc_rqst *rqstp, struct dentry *dentry,
pacl = posix_acl_from_mode(inode->i_mode, GFP_KERNEL);
if (IS_ERR(pacl))
return PTR_ERR(pacl);
- /* allocate for worst case: one (deny, allow) pair each: */
- size += 2 * pacl->a_count;
}
+ /* allocate for worst case: one (deny, allow) pair each: */
+ size += 2 * pacl->a_count;
if (S_ISDIR(inode->i_mode)) {
flags = NFS4_ACL_DIR;
dpacl = get_acl(inode, ACL_TYPE_DEFAULT);
if (dpacl)
size += 2 * dpacl->a_count;
- } else {
- dpacl = NULL;
}
*acl = nfs4_acl_new(size);
@@ -170,8 +168,7 @@ nfsd4_get_nfs4_acl(struct svc_rqst *rqstp, struct dentry *dentry,
goto out;
}
- if (pacl)
- _posix_to_nfsv4_one(pacl, *acl, flags & ~NFS4_ACL_TYPE_DEFAULT);
+ _posix_to_nfsv4_one(pacl, *acl, flags & ~NFS4_ACL_TYPE_DEFAULT);
if (dpacl)
_posix_to_nfsv4_one(dpacl, *acl, flags | NFS4_ACL_TYPE_DEFAULT);
diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
index ea4ba9daeb47..db9bd8a31725 100644
--- a/fs/ntfs/file.c
+++ b/fs/ntfs/file.c
@@ -2134,7 +2134,7 @@ static ssize_t ntfs_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
ret = ntfs_file_aio_write_nolock(iocb, iov, nr_segs, &iocb->ki_pos);
mutex_unlock(&inode->i_mutex);
if (ret > 0) {
- int err = generic_write_sync(file, pos, ret);
+ int err = generic_write_sync(file, iocb->ki_pos - ret, ret);
if (err < 0)
ret = err;
}
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
index 8750ae1b8636..e2edff38be52 100644
--- a/fs/ocfs2/alloc.c
+++ b/fs/ocfs2/alloc.c
@@ -4742,6 +4742,7 @@ int ocfs2_add_clusters_in_btree(handle_t *handle,
enum ocfs2_alloc_restarted *reason_ret)
{
int status = 0, err = 0;
+ int need_free = 0;
int free_extents;
enum ocfs2_alloc_restarted reason = RESTART_NONE;
u32 bit_off, num_bits;
@@ -4796,7 +4797,8 @@ int ocfs2_add_clusters_in_btree(handle_t *handle,
OCFS2_JOURNAL_ACCESS_WRITE);
if (status < 0) {
mlog_errno(status);
- goto leave;
+ need_free = 1;
+ goto bail;
}
block = ocfs2_clusters_to_blocks(osb->sb, bit_off);
@@ -4807,7 +4809,8 @@ int ocfs2_add_clusters_in_btree(handle_t *handle,
num_bits, flags, meta_ac);
if (status < 0) {
mlog_errno(status);
- goto leave;
+ need_free = 1;
+ goto bail;
}
ocfs2_journal_dirty(handle, et->et_root_bh);
@@ -4821,6 +4824,19 @@ int ocfs2_add_clusters_in_btree(handle_t *handle,
reason = RESTART_TRANS;
}
+bail:
+ if (need_free) {
+ if (data_ac->ac_which == OCFS2_AC_USE_LOCAL)
+ ocfs2_free_local_alloc_bits(osb, handle, data_ac,
+ bit_off, num_bits);
+ else
+ ocfs2_free_clusters(handle,
+ data_ac->ac_inode,
+ data_ac->ac_bh,
+ ocfs2_clusters_to_blocks(osb->sb, bit_off),
+ num_bits);
+ }
+
leave:
if (reason_ret)
*reason_ret = reason;
@@ -6805,6 +6821,8 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode,
struct buffer_head *di_bh)
{
int ret, i, has_data, num_pages = 0;
+ int need_free = 0;
+ u32 bit_off, num;
handle_t *handle;
u64 uninitialized_var(block);
struct ocfs2_inode_info *oi = OCFS2_I(inode);
@@ -6850,7 +6868,6 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode,
}
if (has_data) {
- u32 bit_off, num;
unsigned int page_end;
u64 phys;
@@ -6886,6 +6903,7 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode,
ret = ocfs2_grab_eof_pages(inode, 0, end, pages, &num_pages);
if (ret) {
mlog_errno(ret);
+ need_free = 1;
goto out_commit;
}
@@ -6896,6 +6914,7 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode,
ret = ocfs2_read_inline_data(inode, pages[0], di_bh);
if (ret) {
mlog_errno(ret);
+ need_free = 1;
goto out_commit;
}
@@ -6927,6 +6946,7 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode,
ret = ocfs2_insert_extent(handle, &et, 0, block, 1, 0, NULL);
if (ret) {
mlog_errno(ret);
+ need_free = 1;
goto out_commit;
}
@@ -6938,6 +6958,18 @@ out_commit:
dquot_free_space_nodirty(inode,
ocfs2_clusters_to_bytes(osb->sb, 1));
+ if (need_free) {
+ if (data_ac->ac_which == OCFS2_AC_USE_LOCAL)
+ ocfs2_free_local_alloc_bits(osb, handle, data_ac,
+ bit_off, num);
+ else
+ ocfs2_free_clusters(handle,
+ data_ac->ac_inode,
+ data_ac->ac_bh,
+ ocfs2_clusters_to_blocks(osb->sb, bit_off),
+ num);
+ }
+
ocfs2_commit_trans(osb, handle);
out_unlock:
@@ -7126,7 +7158,7 @@ int ocfs2_truncate_inline(struct inode *inode, struct buffer_head *di_bh,
if (end > i_size_read(inode))
end = i_size_read(inode);
- BUG_ON(start >= end);
+ BUG_ON(start > end);
if (!(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) ||
!(le16_to_cpu(di->i_dyn_features) & OCFS2_INLINE_DATA_FL) ||
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index d77d71ead8d1..8450262bcf2a 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -185,6 +185,9 @@ static int ocfs2_sync_file(struct file *file, loff_t start, loff_t end,
file->f_path.dentry->d_name.name,
(unsigned long long)datasync);
+ if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
+ return -EROFS;
+
err = filemap_write_and_wait_range(inode->i_mapping, start, end);
if (err)
return err;
@@ -474,11 +477,6 @@ static int ocfs2_truncate_file(struct inode *inode,
goto bail;
}
- /* lets handle the simple truncate cases before doing any more
- * cluster locking. */
- if (new_i_size == le64_to_cpu(fe->i_size))
- goto bail;
-
down_write(&OCFS2_I(inode)->ip_alloc_sem);
ocfs2_resv_discard(&osb->osb_la_resmap,
@@ -718,7 +716,8 @@ leave:
* While a write will already be ordering the data, a truncate will not.
* Thus, we need to explicitly order the zeroed pages.
*/
-static handle_t *ocfs2_zero_start_ordered_transaction(struct inode *inode)
+static handle_t *ocfs2_zero_start_ordered_transaction(struct inode *inode,
+ struct buffer_head *di_bh)
{
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
handle_t *handle = NULL;
@@ -735,7 +734,14 @@ static handle_t *ocfs2_zero_start_ordered_transaction(struct inode *inode)
}
ret = ocfs2_jbd2_file_inode(handle, inode);
- if (ret < 0)
+ if (ret < 0) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
+ OCFS2_JOURNAL_ACCESS_WRITE);
+ if (ret)
mlog_errno(ret);
out:
@@ -751,7 +757,7 @@ out:
* to be too fragile to do exactly what we need without us having to
* worry about recursive locking in ->write_begin() and ->write_end(). */
static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from,
- u64 abs_to)
+ u64 abs_to, struct buffer_head *di_bh)
{
struct address_space *mapping = inode->i_mapping;
struct page *page;
@@ -759,6 +765,7 @@ static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from,
handle_t *handle = NULL;
int ret = 0;
unsigned zero_from, zero_to, block_start, block_end;
+ struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
BUG_ON(abs_from >= abs_to);
BUG_ON(abs_to > (((u64)index + 1) << PAGE_CACHE_SHIFT));
@@ -801,7 +808,8 @@ static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from,
}
if (!handle) {
- handle = ocfs2_zero_start_ordered_transaction(inode);
+ handle = ocfs2_zero_start_ordered_transaction(inode,
+ di_bh);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
handle = NULL;
@@ -818,8 +826,22 @@ static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from,
ret = 0;
}
- if (handle)
+ if (handle) {
+ /*
+ * fs-writeback will release the dirty pages without page lock
+ * whose offset are over inode size, the release happens at
+ * block_write_full_page_endio().
+ */
+ i_size_write(inode, abs_to);
+ inode->i_blocks = ocfs2_inode_sector_count(inode);
+ di->i_size = cpu_to_le64((u64)i_size_read(inode));
+ inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+ di->i_mtime = di->i_ctime = cpu_to_le64(inode->i_mtime.tv_sec);
+ di->i_ctime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
+ di->i_mtime_nsec = di->i_ctime_nsec;
+ ocfs2_journal_dirty(handle, di_bh);
ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle);
+ }
out_unlock:
unlock_page(page);
@@ -915,7 +937,7 @@ out:
* has made sure that the entire range needs zeroing.
*/
static int ocfs2_zero_extend_range(struct inode *inode, u64 range_start,
- u64 range_end)
+ u64 range_end, struct buffer_head *di_bh)
{
int rc = 0;
u64 next_pos;
@@ -931,7 +953,7 @@ static int ocfs2_zero_extend_range(struct inode *inode, u64 range_start,
next_pos = (zero_pos & PAGE_CACHE_MASK) + PAGE_CACHE_SIZE;
if (next_pos > range_end)
next_pos = range_end;
- rc = ocfs2_write_zero_page(inode, zero_pos, next_pos);
+ rc = ocfs2_write_zero_page(inode, zero_pos, next_pos, di_bh);
if (rc < 0) {
mlog_errno(rc);
break;
@@ -977,7 +999,7 @@ int ocfs2_zero_extend(struct inode *inode, struct buffer_head *di_bh,
range_end = zero_to_size;
ret = ocfs2_zero_extend_range(inode, range_start,
- range_end);
+ range_end, di_bh);
if (ret) {
mlog_errno(ret);
break;
@@ -1145,14 +1167,14 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
goto bail_unlock_rw;
}
- if (size_change && attr->ia_size != i_size_read(inode)) {
+ if (size_change) {
status = inode_newsize_ok(inode, attr->ia_size);
if (status)
goto bail_unlock;
inode_dio_wait(inode);
- if (i_size_read(inode) > attr->ia_size) {
+ if (i_size_read(inode) >= attr->ia_size) {
if (ocfs2_should_order_data(inode)) {
status = ocfs2_begin_ordered_truncate(inode,
attr->ia_size);
diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
index cd5496b7a0a3..044013455621 100644
--- a/fs/ocfs2/localalloc.c
+++ b/fs/ocfs2/localalloc.c
@@ -781,6 +781,48 @@ bail:
return status;
}
+int ocfs2_free_local_alloc_bits(struct ocfs2_super *osb,
+ handle_t *handle,
+ struct ocfs2_alloc_context *ac,
+ u32 bit_off,
+ u32 num_bits)
+{
+ int status, start;
+ u32 clear_bits;
+ struct inode *local_alloc_inode;
+ void *bitmap;
+ struct ocfs2_dinode *alloc;
+ struct ocfs2_local_alloc *la;
+
+ BUG_ON(ac->ac_which != OCFS2_AC_USE_LOCAL);
+
+ local_alloc_inode = ac->ac_inode;
+ alloc = (struct ocfs2_dinode *) osb->local_alloc_bh->b_data;
+ la = OCFS2_LOCAL_ALLOC(alloc);
+
+ bitmap = la->la_bitmap;
+ start = bit_off - le32_to_cpu(la->la_bm_off);
+ clear_bits = num_bits;
+
+ status = ocfs2_journal_access_di(handle,
+ INODE_CACHE(local_alloc_inode),
+ osb->local_alloc_bh,
+ OCFS2_JOURNAL_ACCESS_WRITE);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ while (clear_bits--)
+ ocfs2_clear_bit(start++, bitmap);
+
+ le32_add_cpu(&alloc->id1.bitmap1.i_used, -num_bits);
+ ocfs2_journal_dirty(handle, osb->local_alloc_bh);
+
+bail:
+ return status;
+}
+
static u32 ocfs2_local_alloc_count_bits(struct ocfs2_dinode *alloc)
{
u32 count;
diff --git a/fs/ocfs2/localalloc.h b/fs/ocfs2/localalloc.h
index 1be9b5864460..44a7d1fb2dec 100644
--- a/fs/ocfs2/localalloc.h
+++ b/fs/ocfs2/localalloc.h
@@ -55,6 +55,12 @@ int ocfs2_claim_local_alloc_bits(struct ocfs2_super *osb,
u32 *bit_off,
u32 *num_bits);
+int ocfs2_free_local_alloc_bits(struct ocfs2_super *osb,
+ handle_t *handle,
+ struct ocfs2_alloc_context *ac,
+ u32 bit_off,
+ u32 num_bits);
+
void ocfs2_local_alloc_seen_free_bits(struct ocfs2_super *osb,
unsigned int num_clusters);
void ocfs2_la_enable_worker(struct work_struct *work);
diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
index f4d609be9400..3683643f3f0e 100644
--- a/fs/ocfs2/namei.c
+++ b/fs/ocfs2/namei.c
@@ -664,6 +664,7 @@ static int ocfs2_link(struct dentry *old_dentry,
struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
struct ocfs2_dir_lookup_result lookup = { NULL, };
sigset_t oldset;
+ u64 old_de_ino;
trace_ocfs2_link((unsigned long long)OCFS2_I(inode)->ip_blkno,
old_dentry->d_name.len, old_dentry->d_name.name,
@@ -686,6 +687,22 @@ static int ocfs2_link(struct dentry *old_dentry,
goto out;
}
+ err = ocfs2_lookup_ino_from_name(dir, old_dentry->d_name.name,
+ old_dentry->d_name.len, &old_de_ino);
+ if (err) {
+ err = -ENOENT;
+ goto out;
+ }
+
+ /*
+ * Check whether another node removed the source inode while we
+ * were in the vfs.
+ */
+ if (old_de_ino != OCFS2_I(inode)->ip_blkno) {
+ err = -ENOENT;
+ goto out;
+ }
+
err = ocfs2_check_dir_for_entry(dir, dentry->d_name.name,
dentry->d_name.len);
if (err)
diff --git a/fs/posix_acl.c b/fs/posix_acl.c
index 38bae5a0ea25..11c54fd51e16 100644
--- a/fs/posix_acl.c
+++ b/fs/posix_acl.c
@@ -521,8 +521,11 @@ posix_acl_chmod(struct inode *inode, umode_t mode)
return -EOPNOTSUPP;
acl = get_acl(inode, ACL_TYPE_ACCESS);
- if (IS_ERR_OR_NULL(acl))
+ if (IS_ERR_OR_NULL(acl)) {
+ if (acl == ERR_PTR(-EOPNOTSUPP))
+ return 0;
return PTR_ERR(acl);
+ }
ret = __posix_acl_chmod(&acl, GFP_KERNEL, mode);
if (ret)
@@ -544,14 +547,15 @@ posix_acl_create(struct inode *dir, umode_t *mode,
goto no_acl;
p = get_acl(dir, ACL_TYPE_DEFAULT);
- if (IS_ERR(p))
+ if (IS_ERR(p)) {
+ if (p == ERR_PTR(-EOPNOTSUPP))
+ goto apply_umask;
return PTR_ERR(p);
-
- if (!p) {
- *mode &= ~current_umask();
- goto no_acl;
}
+ if (!p)
+ goto apply_umask;
+
*acl = posix_acl_clone(p, GFP_NOFS);
if (!*acl)
return -ENOMEM;
@@ -575,6 +579,8 @@ posix_acl_create(struct inode *dir, umode_t *mode,
}
return 0;
+apply_umask:
+ *mode &= ~current_umask();
no_acl:
*default_acl = NULL;
*acl = NULL;
diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
index 2ca7ba047f04..88d4585b30f1 100644
--- a/fs/proc/vmcore.c
+++ b/fs/proc/vmcore.c
@@ -468,17 +468,24 @@ static int __init update_note_header_size_elf64(const Elf64_Ehdr *ehdr_ptr)
return rc;
}
nhdr_ptr = notes_section;
- while (real_sz < max_sz) {
- if (nhdr_ptr->n_namesz == 0)
- break;
+ while (nhdr_ptr->n_namesz != 0) {
sz = sizeof(Elf64_Nhdr) +
((nhdr_ptr->n_namesz + 3) & ~3) +
((nhdr_ptr->n_descsz + 3) & ~3);
+ if ((real_sz + sz) > max_sz) {
+ pr_warn("Warning: Exceeded p_memsz, dropping PT_NOTE entry n_namesz=0x%x, n_descsz=0x%x\n",
+ nhdr_ptr->n_namesz, nhdr_ptr->n_descsz);
+ break;
+ }
real_sz += sz;
nhdr_ptr = (Elf64_Nhdr*)((char*)nhdr_ptr + sz);
}
kfree(notes_section);
phdr_ptr->p_memsz = real_sz;
+ if (real_sz == 0) {
+ pr_warn("Warning: Zero PT_NOTE entries found\n");
+ return -EINVAL;
+ }
}
return 0;
@@ -648,17 +655,24 @@ static int __init update_note_header_size_elf32(const Elf32_Ehdr *ehdr_ptr)
return rc;
}
nhdr_ptr = notes_section;
- while (real_sz < max_sz) {
- if (nhdr_ptr->n_namesz == 0)
- break;
+ while (nhdr_ptr->n_namesz != 0) {
sz = sizeof(Elf32_Nhdr) +
((nhdr_ptr->n_namesz + 3) & ~3) +
((nhdr_ptr->n_descsz + 3) & ~3);
+ if ((real_sz + sz) > max_sz) {
+ pr_warn("Warning: Exceeded p_memsz, dropping PT_NOTE entry n_namesz=0x%x, n_descsz=0x%x\n",
+ nhdr_ptr->n_namesz, nhdr_ptr->n_descsz);
+ break;
+ }
real_sz += sz;
nhdr_ptr = (Elf32_Nhdr*)((char*)nhdr_ptr + sz);
}
kfree(notes_section);
phdr_ptr->p_memsz = real_sz;
+ if (real_sz == 0) {
+ pr_warn("Warning: Zero PT_NOTE entries found\n");
+ return -EINVAL;
+ }
}
return 0;
diff --git a/fs/sync.c b/fs/sync.c
index f15537452231..e8ba024a055b 100644
--- a/fs/sync.c
+++ b/fs/sync.c
@@ -222,23 +222,6 @@ SYSCALL_DEFINE1(fdatasync, unsigned int, fd)
return do_fsync(fd, 1);
}
-/**
- * generic_write_sync - perform syncing after a write if file / inode is sync
- * @file: file to which the write happened
- * @pos: offset where the write started
- * @count: length of the write
- *
- * This is just a simple wrapper about our general syncing function.
- */
-int generic_write_sync(struct file *file, loff_t pos, loff_t count)
-{
- if (!(file->f_flags & O_DSYNC) && !IS_SYNC(file->f_mapping->host))
- return 0;
- return vfs_fsync_range(file, pos, pos + count - 1,
- (file->f_flags & __O_SYNC) ? 0 : 1);
-}
-EXPORT_SYMBOL(generic_write_sync);
-
/*
* sys_sync_file_range() permits finely controlled syncing over a segment of
* a file in the range offset .. (offset+nbytes-1) inclusive. If nbytes is
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index 2e7989e3a2d6..64b48eade91d 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -799,7 +799,7 @@ xfs_file_aio_write(
XFS_STATS_ADD(xs_write_bytes, ret);
/* Handle various SYNC-type writes */
- err = generic_write_sync(file, pos, ret);
+ err = generic_write_sync(file, iocb->ki_pos - ret, ret);
if (err < 0)
ret = err;
}
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 04086c5be930..04a7f31301f8 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -199,6 +199,9 @@ int drm_err(const char *func, const char *format, ...);
#define DRM_INFO(fmt, ...) \
printk(KERN_INFO "[" DRM_NAME "] " fmt, ##__VA_ARGS__)
+#define DRM_INFO_ONCE(fmt, ...) \
+ printk_once(KERN_INFO "[" DRM_NAME "] " fmt, ##__VA_ARGS__)
+
/**
* Debug output.
*
diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
index fd8bf3219ef7..b4a745d7d9a9 100644
--- a/include/linux/binfmts.h
+++ b/include/linux/binfmts.h
@@ -115,7 +115,6 @@ extern int copy_strings_kernel(int argc, const char *const *argv,
extern int prepare_bprm_creds(struct linux_binprm *bprm);
extern void install_exec_creds(struct linux_binprm *bprm);
extern void set_binfmt(struct linux_binfmt *new);
-extern void free_bprm(struct linux_binprm *);
extern ssize_t read_code(struct file *, unsigned long, loff_t, size_t);
#endif /* _LINUX_BINFMTS_H */
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 70654521dab6..5a4d39b4686b 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -250,6 +250,17 @@ static inline unsigned bio_segments(struct bio *bio)
struct bio_vec bv;
struct bvec_iter iter;
+ /*
+ * We special case discard/write same, because they interpret bi_size
+ * differently:
+ */
+
+ if (bio->bi_rw & REQ_DISCARD)
+ return 1;
+
+ if (bio->bi_rw & REQ_WRITE_SAME)
+ return 1;
+
bio_for_each_segment(bv, bio, iter)
segs++;
@@ -332,6 +343,7 @@ extern struct bio *bio_clone_fast(struct bio *, gfp_t, struct bio_set *);
extern struct bio *bio_clone_bioset(struct bio *, gfp_t, struct bio_set *bs);
extern struct bio_set *fs_bio_set;
+unsigned int bio_integrity_tag_size(struct bio *bio);
static inline struct bio *bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs)
{
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 161b23105b1e..18ba8a627f46 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -83,6 +83,8 @@ struct blk_mq_ops {
*/
rq_timed_out_fn *timeout;
+ softirq_done_fn *complete;
+
/*
* Override for hctx allocations (should probably go)
*/
@@ -119,11 +121,12 @@ void blk_mq_init_commands(struct request_queue *, void (*init)(void *data, struc
void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule);
-void blk_mq_insert_request(struct request_queue *, struct request *, bool);
+void blk_mq_insert_request(struct request_queue *, struct request *,
+ bool, bool);
void blk_mq_run_queues(struct request_queue *q, bool async);
void blk_mq_free_request(struct request *rq);
bool blk_mq_can_queue(struct blk_mq_hw_ctx *);
-struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp, bool reserved);
+struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp);
struct request *blk_mq_alloc_reserved_request(struct request_queue *q, int rw, gfp_t gfp);
struct request *blk_mq_rq_from_tag(struct request_queue *q, unsigned int tag);
@@ -133,6 +136,8 @@ void blk_mq_free_single_hw_queue(struct blk_mq_hw_ctx *, unsigned int);
void blk_mq_end_io(struct request *rq, int error);
+void blk_mq_complete_request(struct request *rq);
+
void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx);
void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx);
void blk_mq_stop_hw_queues(struct request_queue *q);
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 8678c4322b44..4afa4f8f6090 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -98,7 +98,7 @@ struct request {
struct list_head queuelist;
union {
struct call_single_data csd;
- struct work_struct mq_flush_data;
+ struct work_struct mq_flush_work;
};
struct request_queue *q;
@@ -448,13 +448,8 @@ struct request_queue {
unsigned long flush_pending_since;
struct list_head flush_queue[2];
struct list_head flush_data_in_flight;
- union {
- struct request flush_rq;
- struct {
- spinlock_t mq_flush_lock;
- struct work_struct mq_flush_work;
- };
- };
+ struct request *flush_rq;
+ spinlock_t mq_flush_lock;
struct mutex sysfs_lock;
diff --git a/include/linux/can/skb.h b/include/linux/can/skb.h
index 2f0543f7510c..f9bbbb472663 100644
--- a/include/linux/can/skb.h
+++ b/include/linux/can/skb.h
@@ -11,7 +11,9 @@
#define CAN_SKB_H
#include <linux/types.h>
+#include <linux/skbuff.h>
#include <linux/can.h>
+#include <net/sock.h>
/*
* The struct can_skb_priv is used to transport additional information along
@@ -42,4 +44,40 @@ static inline void can_skb_reserve(struct sk_buff *skb)
skb_reserve(skb, sizeof(struct can_skb_priv));
}
+static inline void can_skb_destructor(struct sk_buff *skb)
+{
+ sock_put(skb->sk);
+}
+
+static inline void can_skb_set_owner(struct sk_buff *skb, struct sock *sk)
+{
+ if (sk) {
+ sock_hold(sk);
+ skb->destructor = can_skb_destructor;
+ skb->sk = sk;
+ }
+}
+
+/*
+ * returns an unshared skb owned by the original sock to be echo'ed back
+ */
+static inline struct sk_buff *can_create_echo_skb(struct sk_buff *skb)
+{
+ if (skb_shared(skb)) {
+ struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
+
+ if (likely(nskb)) {
+ can_skb_set_owner(nskb, skb->sk);
+ consume_skb(skb);
+ return nskb;
+ } else {
+ kfree_skb(skb);
+ return NULL;
+ }
+ }
+
+ /* we can assume to have an unshared skb with proper owner */
+ return skb;
+}
+
#endif /* CAN_SKB_H */
diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
index ded429966c1f..2507fd2a1eb4 100644
--- a/include/linux/compiler-gcc4.h
+++ b/include/linux/compiler-gcc4.h
@@ -75,11 +75,7 @@
*
* (asm goto is automatically volatile - the naming reflects this.)
*/
-#if GCC_VERSION <= 40801
-# define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0)
-#else
-# define asm_volatile_goto(x...) do { asm goto(x); } while (0)
-#endif
+#define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0)
#ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP
#if GCC_VERSION >= 40400
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 09f553c59813..60829565e552 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -2079,6 +2079,7 @@ extern struct file * dentry_open(const struct path *, int, const struct cred *);
extern int filp_close(struct file *, fl_owner_t id);
extern struct filename *getname(const char __user *);
+extern struct filename *getname_kernel(const char *);
enum {
FILE_CREATED = 1,
@@ -2273,7 +2274,13 @@ extern int filemap_fdatawrite_range(struct address_space *mapping,
extern int vfs_fsync_range(struct file *file, loff_t start, loff_t end,
int datasync);
extern int vfs_fsync(struct file *file, int datasync);
-extern int generic_write_sync(struct file *file, loff_t pos, loff_t count);
+static inline int generic_write_sync(struct file *file, loff_t pos, loff_t count)
+{
+ if (!(file->f_flags & O_DSYNC) && !IS_SYNC(file->f_mapping->host))
+ return 0;
+ return vfs_fsync_range(file, pos, pos + count - 1,
+ (file->f_flags & __O_SYNC) ? 0 : 1);
+}
extern void emergency_sync(void);
extern void emergency_remount(void);
#ifdef CONFIG_BLOCK
diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h
index 4d34dbbbad4d..7a8144fef406 100644
--- a/include/linux/gpio/consumer.h
+++ b/include/linux/gpio/consumer.h
@@ -4,8 +4,6 @@
#include <linux/err.h>
#include <linux/kernel.h>
-#ifdef CONFIG_GPIOLIB
-
struct device;
struct gpio_chip;
@@ -18,6 +16,8 @@ struct gpio_chip;
*/
struct gpio_desc;
+#ifdef CONFIG_GPIOLIB
+
/* Acquire and dispose GPIOs */
struct gpio_desc *__must_check gpiod_get(struct device *dev,
const char *con_id);
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index 15da677478dd..344883dce584 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -875,7 +875,7 @@ struct vmbus_channel_relid_released {
struct vmbus_channel_initiate_contact {
struct vmbus_channel_message_header header;
u32 vmbus_version_requested;
- u32 padding2;
+ u32 target_vcpu; /* The VCPU the host should respond to */
u64 interrupt_page;
u64 monitor_page1;
u64 monitor_page2;
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 554548cd3dd4..130bc8d77fa5 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -38,8 +38,10 @@
#include <linux/pci.h>
#include <linux/spinlock_types.h>
#include <linux/semaphore.h>
+#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/radix-tree.h>
+
#include <linux/mlx5/device.h>
#include <linux/mlx5/doorbell.h>
@@ -227,6 +229,7 @@ struct mlx5_uuar_info {
* protect uuar allocation data structs
*/
struct mutex lock;
+ u32 ver;
};
struct mlx5_bf {
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 3ccfcecf8999..b2fb167b2e6d 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -379,12 +379,14 @@ struct nfs_openres {
* Arguments to the open_confirm call.
*/
struct nfs_open_confirmargs {
+ struct nfs4_sequence_args seq_args;
const struct nfs_fh * fh;
nfs4_stateid * stateid;
struct nfs_seqid * seqid;
};
struct nfs_open_confirmres {
+ struct nfs4_sequence_res seq_res;
nfs4_stateid stateid;
struct nfs_seqid * seqid;
};
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index 26ebcf41c213..69ae03f6eb15 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -80,13 +80,14 @@ struct nvme_dev {
struct dma_pool *prp_small_pool;
int instance;
int queue_count;
- int db_stride;
+ u32 db_stride;
u32 ctrl_config;
struct msix_entry *entry;
struct nvme_bar __iomem *bar;
struct list_head namespaces;
struct kref kref;
struct miscdevice miscdev;
+ struct work_struct reset_work;
char name[12];
char serial[20];
char model[40];
@@ -94,6 +95,8 @@ struct nvme_dev {
u32 max_hw_sectors;
u32 stripe_size;
u16 oncs;
+ u16 abort_limit;
+ u8 initialized;
};
/*
@@ -165,6 +168,7 @@ int nvme_set_features(struct nvme_dev *dev, unsigned fid, unsigned dword11,
struct sg_io_hdr;
int nvme_sg_io(struct nvme_ns *ns, struct sg_io_hdr __user *u_hdr);
+int nvme_sg_io32(struct nvme_ns *ns, unsigned long arg);
int nvme_sg_get_version_num(int __user *ip);
#endif /* _LINUX_NVME_H */
diff --git a/include/linux/of.h b/include/linux/of.h
index 70c64ba17fa5..435cb995904d 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -169,35 +169,15 @@ static inline const char *of_node_full_name(const struct device_node *np)
extern struct device_node *of_find_node_by_name(struct device_node *from,
const char *name);
-#define for_each_node_by_name(dn, name) \
- for (dn = of_find_node_by_name(NULL, name); dn; \
- dn = of_find_node_by_name(dn, name))
extern struct device_node *of_find_node_by_type(struct device_node *from,
const char *type);
-#define for_each_node_by_type(dn, type) \
- for (dn = of_find_node_by_type(NULL, type); dn; \
- dn = of_find_node_by_type(dn, type))
extern struct device_node *of_find_compatible_node(struct device_node *from,
const char *type, const char *compat);
-#define for_each_compatible_node(dn, type, compatible) \
- for (dn = of_find_compatible_node(NULL, type, compatible); dn; \
- dn = of_find_compatible_node(dn, type, compatible))
extern struct device_node *of_find_matching_node_and_match(
struct device_node *from,
const struct of_device_id *matches,
const struct of_device_id **match);
-static inline struct device_node *of_find_matching_node(
- struct device_node *from,
- const struct of_device_id *matches)
-{
- return of_find_matching_node_and_match(from, matches, NULL);
-}
-#define for_each_matching_node(dn, matches) \
- for (dn = of_find_matching_node(NULL, matches); dn; \
- dn = of_find_matching_node(dn, matches))
-#define for_each_matching_node_and_match(dn, matches, match) \
- for (dn = of_find_matching_node_and_match(NULL, matches, match); \
- dn; dn = of_find_matching_node_and_match(dn, matches, match))
+
extern struct device_node *of_find_node_by_path(const char *path);
extern struct device_node *of_find_node_by_phandle(phandle handle);
extern struct device_node *of_get_parent(const struct device_node *node);
@@ -209,43 +189,11 @@ extern struct device_node *of_get_next_available_child(
extern struct device_node *of_get_child_by_name(const struct device_node *node,
const char *name);
-#define for_each_child_of_node(parent, child) \
- for (child = of_get_next_child(parent, NULL); child != NULL; \
- child = of_get_next_child(parent, child))
-
-#define for_each_available_child_of_node(parent, child) \
- for (child = of_get_next_available_child(parent, NULL); child != NULL; \
- child = of_get_next_available_child(parent, child))
-
-static inline int of_get_child_count(const struct device_node *np)
-{
- struct device_node *child;
- int num = 0;
-
- for_each_child_of_node(np, child)
- num++;
-
- return num;
-}
-
-static inline int of_get_available_child_count(const struct device_node *np)
-{
- struct device_node *child;
- int num = 0;
-
- for_each_available_child_of_node(np, child)
- num++;
-
- return num;
-}
/* cache lookup */
extern struct device_node *of_find_next_cache_node(const struct device_node *);
extern struct device_node *of_find_node_with_property(
struct device_node *from, const char *prop_name);
-#define for_each_node_with_property(dn, prop_name) \
- for (dn = of_find_node_with_property(NULL, prop_name); dn; \
- dn = of_find_node_with_property(dn, prop_name))
extern struct property *of_find_property(const struct device_node *np,
const char *name,
@@ -367,42 +315,53 @@ static inline struct device_node *of_find_node_by_name(struct device_node *from,
return NULL;
}
-static inline struct device_node *of_get_parent(const struct device_node *node)
+static inline struct device_node *of_find_node_by_type(struct device_node *from,
+ const char *type)
{
return NULL;
}
-static inline bool of_have_populated_dt(void)
+static inline struct device_node *of_find_matching_node_and_match(
+ struct device_node *from,
+ const struct of_device_id *matches,
+ const struct of_device_id **match)
{
- return false;
+ return NULL;
}
-/* Kill an unused variable warning on a device_node pointer */
-static inline void __of_use_dn(const struct device_node *np)
+static inline struct device_node *of_get_parent(const struct device_node *node)
{
+ return NULL;
}
-#define for_each_child_of_node(parent, child) \
- while (__of_use_dn(parent), __of_use_dn(child), 0)
+static inline struct device_node *of_get_next_child(
+ const struct device_node *node, struct device_node *prev)
+{
+ return NULL;
+}
-#define for_each_available_child_of_node(parent, child) \
- while (0)
+static inline struct device_node *of_get_next_available_child(
+ const struct device_node *node, struct device_node *prev)
+{
+ return NULL;
+}
-static inline struct device_node *of_get_child_by_name(
- const struct device_node *node,
- const char *name)
+static inline struct device_node *of_find_node_with_property(
+ struct device_node *from, const char *prop_name)
{
return NULL;
}
-static inline int of_get_child_count(const struct device_node *np)
+static inline bool of_have_populated_dt(void)
{
- return 0;
+ return false;
}
-static inline int of_get_available_child_count(const struct device_node *np)
+static inline struct device_node *of_get_child_by_name(
+ const struct device_node *node,
+ const char *name)
{
- return 0;
+ return NULL;
}
static inline int of_device_is_compatible(const struct device_node *device,
@@ -569,6 +528,13 @@ extern int of_node_to_nid(struct device_node *np);
static inline int of_node_to_nid(struct device_node *device) { return 0; }
#endif
+static inline struct device_node *of_find_matching_node(
+ struct device_node *from,
+ const struct of_device_id *matches)
+{
+ return of_find_matching_node_and_match(from, matches, NULL);
+}
+
/**
* of_property_read_bool - Findfrom a property
* @np: device node from which the property value is to be read.
@@ -618,6 +584,55 @@ static inline int of_property_read_u32(const struct device_node *np,
s; \
s = of_prop_next_string(prop, s))
+#define for_each_node_by_name(dn, name) \
+ for (dn = of_find_node_by_name(NULL, name); dn; \
+ dn = of_find_node_by_name(dn, name))
+#define for_each_node_by_type(dn, type) \
+ for (dn = of_find_node_by_type(NULL, type); dn; \
+ dn = of_find_node_by_type(dn, type))
+#define for_each_compatible_node(dn, type, compatible) \
+ for (dn = of_find_compatible_node(NULL, type, compatible); dn; \
+ dn = of_find_compatible_node(dn, type, compatible))
+#define for_each_matching_node(dn, matches) \
+ for (dn = of_find_matching_node(NULL, matches); dn; \
+ dn = of_find_matching_node(dn, matches))
+#define for_each_matching_node_and_match(dn, matches, match) \
+ for (dn = of_find_matching_node_and_match(NULL, matches, match); \
+ dn; dn = of_find_matching_node_and_match(dn, matches, match))
+
+#define for_each_child_of_node(parent, child) \
+ for (child = of_get_next_child(parent, NULL); child != NULL; \
+ child = of_get_next_child(parent, child))
+#define for_each_available_child_of_node(parent, child) \
+ for (child = of_get_next_available_child(parent, NULL); child != NULL; \
+ child = of_get_next_available_child(parent, child))
+
+#define for_each_node_with_property(dn, prop_name) \
+ for (dn = of_find_node_with_property(NULL, prop_name); dn; \
+ dn = of_find_node_with_property(dn, prop_name))
+
+static inline int of_get_child_count(const struct device_node *np)
+{
+ struct device_node *child;
+ int num = 0;
+
+ for_each_child_of_node(np, child)
+ num++;
+
+ return num;
+}
+
+static inline int of_get_available_child_count(const struct device_node *np)
+{
+ struct device_node *child;
+ int num = 0;
+
+ for_each_available_child_of_node(np, child)
+ num++;
+
+ return num;
+}
+
#if defined(CONFIG_PROC_FS) && defined(CONFIG_PROC_DEVICETREE)
extern void proc_device_tree_add_node(struct device_node *, struct proc_dir_entry *);
extern void proc_device_tree_add_prop(struct proc_dir_entry *pde, struct property *prop);
diff --git a/include/linux/of_device.h b/include/linux/of_device.h
index 8d7dd6768cb7..ef370210ffb2 100644
--- a/include/linux/of_device.h
+++ b/include/linux/of_device.h
@@ -78,11 +78,13 @@ static inline int of_device_uevent_modalias(struct device *dev,
static inline void of_device_node_put(struct device *dev) { }
-static inline const struct of_device_id *of_match_device(
+static inline const struct of_device_id *__of_match_device(
const struct of_device_id *matches, const struct device *dev)
{
return NULL;
}
+#define of_match_device(matches, dev) \
+ __of_match_device(of_match_ptr(matches), (dev))
static inline struct device_node *of_cpu_device_node_get(int cpu)
{
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index e464b4e987e8..d1fe1a761047 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -228,9 +228,9 @@ PAGEFLAG(OwnerPriv1, owner_priv_1) TESTCLEARFLAG(OwnerPriv1, owner_priv_1)
TESTPAGEFLAG(Writeback, writeback) TESTSCFLAG(Writeback, writeback)
PAGEFLAG(MappedToDisk, mappedtodisk)
-/* PG_readahead is only used for file reads; PG_reclaim is only for writes */
+/* PG_readahead is only used for reads; PG_reclaim is only for writes */
PAGEFLAG(Reclaim, reclaim) TESTCLEARFLAG(Reclaim, reclaim)
-PAGEFLAG(Readahead, reclaim) /* Reminder to do async read-ahead */
+PAGEFLAG(Readahead, reclaim) TESTCLEARFLAG(Readahead, reclaim)
#ifdef CONFIG_HIGHMEM
/*
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 68a0e84463a0..a781dec1cd0b 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -128,6 +128,7 @@ struct bio_list;
struct fs_struct;
struct perf_event_context;
struct blk_plug;
+struct filename;
/*
* List of flags we want to share for kernel threads,
@@ -2311,7 +2312,7 @@ extern void do_group_exit(int);
extern int allow_signal(int);
extern int disallow_signal(int);
-extern int do_execve(const char *,
+extern int do_execve(struct filename *,
const char __user * const __user *,
const char __user * const __user *);
extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *);
diff --git a/include/linux/smp.h b/include/linux/smp.h
index 3834f43f9993..6ae004e437ea 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -188,6 +188,9 @@ static inline void kick_all_cpus_sync(void) { }
*/
extern void arch_disable_smp_support(void);
+extern void arch_enable_nonboot_cpus_begin(void);
+extern void arch_enable_nonboot_cpus_end(void);
+
void smp_setup_processor_id(void);
#endif /* __LINUX_SMP_H */
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index a1d4ca290862..4203c66d8803 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -273,7 +273,7 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
* message while queuing transfers that arrive in the meantime. When the
* driver is finished with this message, it must call
* spi_finalize_current_message() so the subsystem can issue the next
- * transfer
+ * message
* @unprepare_transfer_hardware: there are currently no more messages on the
* queue so the subsystem notifies the driver that it may relax the
* hardware by issuing this call
@@ -287,7 +287,10 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
* - return 1 if the transfer is still in progress. When
* the driver is finished with this transfer it must
* call spi_finalize_current_transfer() so the subsystem
- * can issue the next transfer
+ * can issue the next transfer. Note: transfer_one and
+ * transfer_one_message are mutually exclusive; when both
+ * are set, the generic subsystem does not call your
+ * transfer_one callback.
* @unprepare_message: undo any work done by prepare_message().
* @cs_gpios: Array of GPIOs to use as chip select lines; one per CS
* number. Any individual value may be -ENOENT for CS lines that
diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h
index c557c6d096de..3a712e2e7d76 100644
--- a/include/linux/vm_event_item.h
+++ b/include/linux/vm_event_item.h
@@ -71,12 +71,14 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
THP_ZERO_PAGE_ALLOC,
THP_ZERO_PAGE_ALLOC_FAILED,
#endif
+#ifdef CONFIG_DEBUG_TLBFLUSH
#ifdef CONFIG_SMP
NR_TLB_REMOTE_FLUSH, /* cpu tried to flush others' tlbs */
NR_TLB_REMOTE_FLUSH_RECEIVED,/* cpu received ipi for flush */
-#endif
+#endif /* CONFIG_SMP */
NR_TLB_LOCAL_FLUSH_ALL,
NR_TLB_LOCAL_FLUSH_ONE,
+#endif /* CONFIG_DEBUG_TLBFLUSH */
NR_VM_EVENT_ITEMS
};
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index a67b38415768..67ce70c8279b 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -83,6 +83,14 @@ static inline void vm_events_fold_cpu(int cpu)
#define count_vm_numa_events(x, y) do { (void)(y); } while (0)
#endif /* CONFIG_NUMA_BALANCING */
+#ifdef CONFIG_DEBUG_TLBFLUSH
+#define count_vm_tlb_event(x) count_vm_event(x)
+#define count_vm_tlb_events(x, y) count_vm_events(x, y)
+#else
+#define count_vm_tlb_event(x) do {} while (0)
+#define count_vm_tlb_events(x, y) do { (void)(y); } while (0)
+#endif
+
#define __count_zone_vm_events(item, zone, delta) \
__count_vm_events(item##_NORMAL - ZONE_NORMAL + \
zone_idx(zone), delta)
diff --git a/include/net/datalink.h b/include/net/datalink.h
index deb7ca75db48..93cb18f729b5 100644
--- a/include/net/datalink.h
+++ b/include/net/datalink.h
@@ -15,4 +15,6 @@ struct datalink_proto {
struct list_head node;
};
+struct datalink_proto *make_EII_client(void);
+void destroy_EII_client(struct datalink_proto *dl);
#endif
diff --git a/include/net/dn.h b/include/net/dn.h
index ccc15588d108..913b73d239f5 100644
--- a/include/net/dn.h
+++ b/include/net/dn.h
@@ -200,6 +200,8 @@ static inline void dn_sk_ports_copy(struct flowidn *fld, struct dn_scp *scp)
}
unsigned int dn_mss_from_pmtu(struct net_device *dev, int mtu);
+void dn_register_sysctl(void);
+void dn_unregister_sysctl(void);
#define DN_MENUVER_ACC 0x01
#define DN_MENUVER_USR 0x02
diff --git a/include/net/dn_route.h b/include/net/dn_route.h
index b409ad6b8d7a..55df9939bca2 100644
--- a/include/net/dn_route.h
+++ b/include/net/dn_route.h
@@ -20,6 +20,8 @@ int dn_route_output_sock(struct dst_entry __rcu **pprt, struct flowidn *,
struct sock *sk, int flags);
int dn_cache_dump(struct sk_buff *skb, struct netlink_callback *cb);
void dn_rt_cache_flush(int delay);
+int dn_route_rcv(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *pt, struct net_device *orig_dev);
/* Masks for flags field */
#define DN_RT_F_PID 0x07 /* Mask for packet type */
diff --git a/include/net/ethoc.h b/include/net/ethoc.h
index 96f3789b27bc..2a2d6bb34eb8 100644
--- a/include/net/ethoc.h
+++ b/include/net/ethoc.h
@@ -16,6 +16,7 @@
struct ethoc_platform_data {
u8 hwaddr[IFHWADDRLEN];
s8 phy_id;
+ u32 eth_clkfreq;
};
#endif /* !LINUX_NET_ETHOC_H */
diff --git a/include/net/ipx.h b/include/net/ipx.h
index 9e9e35465baf..0143180fecc9 100644
--- a/include/net/ipx.h
+++ b/include/net/ipx.h
@@ -140,6 +140,17 @@ static __inline__ void ipxitf_hold(struct ipx_interface *intrfc)
}
void ipxitf_down(struct ipx_interface *intrfc);
+struct ipx_interface *ipxitf_find_using_net(__be32 net);
+int ipxitf_send(struct ipx_interface *intrfc, struct sk_buff *skb, char *node);
+__be16 ipx_cksum(struct ipxhdr *packet, int length);
+int ipxrtr_add_route(__be32 network, struct ipx_interface *intrfc,
+ unsigned char *node);
+void ipxrtr_del_routes(struct ipx_interface *intrfc);
+int ipxrtr_route_packet(struct sock *sk, struct sockaddr_ipx *usipx,
+ struct iovec *iov, size_t len, int noblock);
+int ipxrtr_route_skb(struct sk_buff *skb);
+struct ipx_route *ipxrtr_lookup(__be32 net);
+int ipxrtr_ioctl(unsigned int cmd, void __user *arg);
static __inline__ void ipxitf_put(struct ipx_interface *intrfc)
{
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index da68c9a90ac5..991dcd94cbbf 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -162,6 +162,14 @@ extern struct list_head net_namespace_list;
struct net *get_net_ns_by_pid(pid_t pid);
struct net *get_net_ns_by_fd(int pid);
+#ifdef CONFIG_SYSCTL
+void ipx_register_sysctl(void);
+void ipx_unregister_sysctl(void);
+#else
+#define ipx_register_sysctl()
+#define ipx_unregister_sysctl()
+#endif
+
#ifdef CONFIG_NET_NS
void __put_net(struct net *net);
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index 01ea6eed1bb1..b2ac6246b7e0 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -284,6 +284,8 @@ extern unsigned int nf_conntrack_max;
extern unsigned int nf_conntrack_hash_rnd;
void init_nf_conntrack_hash_rnd(void);
+void nf_conntrack_tmpl_insert(struct net *net, struct nf_conn *tmpl);
+
#define NF_CT_STAT_INC(net, count) __this_cpu_inc((net)->ct.stat->count)
#define NF_CT_STAT_INC_ATOMIC(net, count) this_cpu_inc((net)->ct.stat->count)
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index 57c8ff7955df..e7e14ffe0f6a 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -252,6 +252,7 @@ void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set,
* @owner: module reference
* @policy: netlink attribute policy
* @maxattr: highest netlink attribute number
+ * @family: address family for AF-specific types
*/
struct nft_expr_type {
const struct nft_expr_ops *(*select_ops)(const struct nft_ctx *,
@@ -262,6 +263,7 @@ struct nft_expr_type {
struct module *owner;
const struct nla_policy *policy;
unsigned int maxattr;
+ u8 family;
};
/**
@@ -320,7 +322,6 @@ static inline void *nft_expr_priv(const struct nft_expr *expr)
* struct nft_rule - nf_tables rule
*
* @list: used internally
- * @rcu_head: used internally for rcu
* @handle: rule handle
* @genmask: generation mask
* @dlen: length of expression data
@@ -328,7 +329,6 @@ static inline void *nft_expr_priv(const struct nft_expr *expr)
*/
struct nft_rule {
struct list_head list;
- struct rcu_head rcu_head;
u64 handle:46,
genmask:2,
dlen:16;
@@ -389,7 +389,6 @@ enum nft_chain_flags {
*
* @rules: list of rules in the chain
* @list: used internally
- * @rcu_head: used internally
* @net: net namespace that this chain belongs to
* @table: table that this chain belongs to
* @handle: chain handle
@@ -401,7 +400,6 @@ enum nft_chain_flags {
struct nft_chain {
struct list_head rules;
struct list_head list;
- struct rcu_head rcu_head;
struct net *net;
struct nft_table *table;
u64 handle;
@@ -529,6 +527,9 @@ void nft_unregister_expr(struct nft_expr_type *);
#define MODULE_ALIAS_NFT_CHAIN(family, name) \
MODULE_ALIAS("nft-chain-" __stringify(family) "-" name)
+#define MODULE_ALIAS_NFT_AF_EXPR(family, name) \
+ MODULE_ALIAS("nft-expr-" __stringify(family) "-" name)
+
#define MODULE_ALIAS_NFT_EXPR(name) \
MODULE_ALIAS("nft-expr-" name)
diff --git a/include/net/netfilter/nft_reject.h b/include/net/netfilter/nft_reject.h
new file mode 100644
index 000000000000..36b0da2d55bb
--- /dev/null
+++ b/include/net/netfilter/nft_reject.h
@@ -0,0 +1,25 @@
+#ifndef _NFT_REJECT_H_
+#define _NFT_REJECT_H_
+
+struct nft_reject {
+ enum nft_reject_types type:8;
+ u8 icmp_code;
+};
+
+extern const struct nla_policy nft_reject_policy[];
+
+int nft_reject_init(const struct nft_ctx *ctx,
+ const struct nft_expr *expr,
+ const struct nlattr * const tb[]);
+
+int nft_reject_dump(struct sk_buff *skb, const struct nft_expr *expr);
+
+void nft_reject_ipv4_eval(const struct nft_expr *expr,
+ struct nft_data data[NFT_REG_MAX + 1],
+ const struct nft_pktinfo *pkt);
+
+void nft_reject_ipv6_eval(const struct nft_expr *expr,
+ struct nft_data data[NFT_REG_MAX + 1],
+ const struct nft_pktinfo *pkt);
+
+#endif
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 8d4a1c06f7e4..6793f32ccb58 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -226,7 +226,8 @@ enum ib_port_cap_flags {
IB_PORT_CAP_MASK_NOTICE_SUP = 1 << 22,
IB_PORT_BOOT_MGMT_SUP = 1 << 23,
IB_PORT_LINK_LATENCY_SUP = 1 << 24,
- IB_PORT_CLIENT_REG_SUP = 1 << 25
+ IB_PORT_CLIENT_REG_SUP = 1 << 25,
+ IB_PORT_IP_BASED_GIDS = 1 << 26
};
enum ib_port_width {
diff --git a/include/trace/events/power.h b/include/trace/events/power.h
index 9e9475c85de5..e5bf9a76f169 100644
--- a/include/trace/events/power.h
+++ b/include/trace/events/power.h
@@ -42,7 +42,6 @@ TRACE_EVENT(pstate_sample,
u32 state,
u64 mperf,
u64 aperf,
- u32 energy,
u32 freq
),
@@ -51,7 +50,6 @@ TRACE_EVENT(pstate_sample,
state,
mperf,
aperf,
- energy,
freq
),
@@ -61,7 +59,6 @@ TRACE_EVENT(pstate_sample,
__field(u32, state)
__field(u64, mperf)
__field(u64, aperf)
- __field(u32, energy)
__field(u32, freq)
),
@@ -72,17 +69,15 @@ TRACE_EVENT(pstate_sample,
__entry->state = state;
__entry->mperf = mperf;
__entry->aperf = aperf;
- __entry->energy = energy;
__entry->freq = freq;
),
- TP_printk("core_busy=%lu scaled=%lu state=%lu mperf=%llu aperf=%llu energy=%lu freq=%lu ",
+ TP_printk("core_busy=%lu scaled=%lu state=%lu mperf=%llu aperf=%llu freq=%lu ",
(unsigned long)__entry->core_busy,
(unsigned long)__entry->scaled_busy,
(unsigned long)__entry->state,
(unsigned long long)__entry->mperf,
(unsigned long long)__entry->aperf,
- (unsigned long)__entry->energy,
(unsigned long)__entry->freq
)
diff --git a/include/uapi/linux/in6.h b/include/uapi/linux/in6.h
index 633b93cac1ed..e9a1d2d973b6 100644
--- a/include/uapi/linux/in6.h
+++ b/include/uapi/linux/in6.h
@@ -128,22 +128,13 @@ struct in6_flowlabel_req {
* IPV6 extension headers
*/
#if __UAPI_DEF_IPPROTO_V6
-enum {
- IPPROTO_HOPOPTS = 0, /* IPv6 hop-by-hop options */
-#define IPPROTO_HOPOPTS IPPROTO_HOPOPTS
- IPPROTO_ROUTING = 43, /* IPv6 routing header */
-#define IPPROTO_ROUTING IPPROTO_ROUTING
- IPPROTO_FRAGMENT = 44, /* IPv6 fragmentation header */
-#define IPPROTO_FRAGMENT IPPROTO_FRAGMENT
- IPPROTO_ICMPV6 = 58, /* ICMPv6 */
-#define IPPROTO_ICMPV6 IPPROTO_ICMPV6
- IPPROTO_NONE = 59, /* IPv6 no next header */
-#define IPPROTO_NONE IPPROTO_NONE
- IPPROTO_DSTOPTS = 60, /* IPv6 destination options */
-#define IPPROTO_DSTOPTS IPPROTO_DSTOPTS
- IPPROTO_MH = 135, /* IPv6 mobility header */
-#define IPPROTO_MH IPPROTO_MH
-};
+#define IPPROTO_HOPOPTS 0 /* IPv6 hop-by-hop options */
+#define IPPROTO_ROUTING 43 /* IPv6 routing header */
+#define IPPROTO_FRAGMENT 44 /* IPv6 fragmentation header */
+#define IPPROTO_ICMPV6 58 /* ICMPv6 */
+#define IPPROTO_NONE 59 /* IPv6 no next header */
+#define IPPROTO_DSTOPTS 60 /* IPv6 destination options */
+#define IPPROTO_MH 135 /* IPv6 mobility header */
#endif /* __UAPI_DEF_IPPROTO_V6 */
/*
diff --git a/include/uapi/linux/mic_ioctl.h b/include/uapi/linux/mic_ioctl.h
index 7fabba5059cf..feb0b4c0814c 100644
--- a/include/uapi/linux/mic_ioctl.h
+++ b/include/uapi/linux/mic_ioctl.h
@@ -39,7 +39,7 @@ struct mic_copy_desc {
#else
struct iovec *iov;
#endif
- int iovcnt;
+ __u32 iovcnt;
__u8 vr_idx;
__u8 update_used;
__u32 out_len;
diff --git a/include/uapi/linux/nvme.h b/include/uapi/linux/nvme.h
index 989c04e0c563..e5ab62201119 100644
--- a/include/uapi/linux/nvme.h
+++ b/include/uapi/linux/nvme.h
@@ -350,6 +350,16 @@ struct nvme_delete_queue {
__u32 rsvd11[5];
};
+struct nvme_abort_cmd {
+ __u8 opcode;
+ __u8 flags;
+ __u16 command_id;
+ __u32 rsvd1[9];
+ __le16 sqid;
+ __u16 cid;
+ __u32 rsvd11[5];
+};
+
struct nvme_download_firmware {
__u8 opcode;
__u8 flags;
@@ -384,6 +394,7 @@ struct nvme_command {
struct nvme_download_firmware dlfw;
struct nvme_format_cmd format;
struct nvme_dsm_cmd dsm;
+ struct nvme_abort_cmd abort;
};
};
diff --git a/include/uapi/xen/Kbuild b/include/uapi/xen/Kbuild
index 61257cb14653..5c459628e8c7 100644
--- a/include/uapi/xen/Kbuild
+++ b/include/uapi/xen/Kbuild
@@ -1,3 +1,5 @@
# UAPI Header export list
header-y += evtchn.h
+header-y += gntalloc.h
+header-y += gntdev.h
header-y += privcmd.h
diff --git a/include/xen/gntalloc.h b/include/uapi/xen/gntalloc.h
index 76bd58065f4f..76bd58065f4f 100644
--- a/include/xen/gntalloc.h
+++ b/include/uapi/xen/gntalloc.h
diff --git a/include/xen/gntdev.h b/include/uapi/xen/gntdev.h
index 5304bd3c84c5..5304bd3c84c5 100644
--- a/include/xen/gntdev.h
+++ b/include/uapi/xen/gntdev.h
diff --git a/include/xen/grant_table.h b/include/xen/grant_table.h
index 7ad033dbc845..a5af2a26d94f 100644
--- a/include/xen/grant_table.h
+++ b/include/xen/grant_table.h
@@ -191,15 +191,11 @@ void gnttab_free_auto_xlat_frames(void);
#define gnttab_map_vaddr(map) ((void *)(map.host_virt_addr))
int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
+ struct gnttab_map_grant_ref *kmap_ops,
struct page **pages, unsigned int count);
-int gnttab_map_refs_userspace(struct gnttab_map_grant_ref *map_ops,
- struct gnttab_map_grant_ref *kmap_ops,
- struct page **pages, unsigned int count);
int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
+ struct gnttab_map_grant_ref *kunmap_ops,
struct page **pages, unsigned int count);
-int gnttab_unmap_refs_userspace(struct gnttab_unmap_grant_ref *unmap_ops,
- struct gnttab_map_grant_ref *kunmap_ops,
- struct page **pages, unsigned int count);
/* Perform a batch of grant map/copy operations. Retry every batch slot
* for which the hypervisor returns GNTST_eagain. This is typically due
diff --git a/include/xen/interface/io/blkif.h b/include/xen/interface/io/blkif.h
index ae665ac59c36..32ec05a6572f 100644
--- a/include/xen/interface/io/blkif.h
+++ b/include/xen/interface/io/blkif.h
@@ -113,13 +113,13 @@ typedef uint64_t blkif_sector_t;
* it's less than the number provided by the backend. The indirect_grefs field
* in blkif_request_indirect should be filled by the frontend with the
* grant references of the pages that are holding the indirect segments.
- * This pages are filled with an array of blkif_request_segment_aligned
- * that hold the information about the segments. The number of indirect
- * pages to use is determined by the maximum number of segments
- * a indirect request contains. Every indirect page can contain a maximum
- * of 512 segments (PAGE_SIZE/sizeof(blkif_request_segment_aligned)),
- * so to calculate the number of indirect pages to use we have to do
- * ceil(indirect_segments/512).
+ * These pages are filled with an array of blkif_request_segment that hold the
+ * information about the segments. The number of indirect pages to use is
+ * determined by the number of segments an indirect request contains. Every
+ * indirect page can contain a maximum of
+ * (PAGE_SIZE / sizeof(struct blkif_request_segment)) segments, so to
+ * calculate the number of indirect pages to use we have to do
+ * ceil(indirect_segments / (PAGE_SIZE / sizeof(struct blkif_request_segment))).
*
* If a backend does not recognize BLKIF_OP_INDIRECT, it should *not*
* create the "feature-max-indirect-segments" node!
@@ -135,13 +135,12 @@ typedef uint64_t blkif_sector_t;
#define BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST 8
-struct blkif_request_segment_aligned {
- grant_ref_t gref; /* reference to I/O buffer frame */
- /* @first_sect: first sector in frame to transfer (inclusive). */
- /* @last_sect: last sector in frame to transfer (inclusive). */
- uint8_t first_sect, last_sect;
- uint16_t _pad; /* padding to make it 8 bytes, so it's cache-aligned */
-} __attribute__((__packed__));
+struct blkif_request_segment {
+ grant_ref_t gref; /* reference to I/O buffer frame */
+ /* @first_sect: first sector in frame to transfer (inclusive). */
+ /* @last_sect: last sector in frame to transfer (inclusive). */
+ uint8_t first_sect, last_sect;
+};
struct blkif_request_rw {
uint8_t nr_segments; /* number of segments */
@@ -151,12 +150,7 @@ struct blkif_request_rw {
#endif
uint64_t id; /* private guest value, echoed in resp */
blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
- struct blkif_request_segment {
- grant_ref_t gref; /* reference to I/O buffer frame */
- /* @first_sect: first sector in frame to transfer (inclusive). */
- /* @last_sect: last sector in frame to transfer (inclusive). */
- uint8_t first_sect, last_sect;
- } seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
+ struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
} __attribute__((__packed__));
struct blkif_request_discard {
diff --git a/include/xen/interface/xencomm.h b/include/xen/interface/xencomm.h
deleted file mode 100644
index ac45e0712afa..000000000000
--- a/include/xen/interface/xencomm.h
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Copyright (C) IBM Corp. 2006
- */
-
-#ifndef _XEN_XENCOMM_H_
-#define _XEN_XENCOMM_H_
-
-/* A xencomm descriptor is a scatter/gather list containing physical
- * addresses corresponding to a virtually contiguous memory area. The
- * hypervisor translates these physical addresses to machine addresses to copy
- * to and from the virtually contiguous area.
- */
-
-#define XENCOMM_MAGIC 0x58434F4D /* 'XCOM' */
-#define XENCOMM_INVALID (~0UL)
-
-struct xencomm_desc {
- uint32_t magic;
- uint32_t nr_addrs; /* the number of entries in address[] */
- uint64_t address[0];
-};
-
-#endif /* _XEN_XENCOMM_H_ */
diff --git a/include/xen/xencomm.h b/include/xen/xencomm.h
deleted file mode 100644
index e43b039be112..000000000000
--- a/include/xen/xencomm.h
+++ /dev/null
@@ -1,77 +0,0 @@
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Copyright (C) IBM Corp. 2006
- *
- * Authors: Hollis Blanchard <hollisb@us.ibm.com>
- * Jerone Young <jyoung5@us.ibm.com>
- */
-
-#ifndef _LINUX_XENCOMM_H_
-#define _LINUX_XENCOMM_H_
-
-#include <xen/interface/xencomm.h>
-
-#define XENCOMM_MINI_ADDRS 3
-struct xencomm_mini {
- struct xencomm_desc _desc;
- uint64_t address[XENCOMM_MINI_ADDRS];
-};
-
-/* To avoid additionnal virt to phys conversion, an opaque structure is
- presented. */
-struct xencomm_handle;
-
-extern void xencomm_free(struct xencomm_handle *desc);
-extern struct xencomm_handle *xencomm_map(void *ptr, unsigned long bytes);
-extern struct xencomm_handle *__xencomm_map_no_alloc(void *ptr,
- unsigned long bytes, struct xencomm_mini *xc_area);
-
-#if 0
-#define XENCOMM_MINI_ALIGNED(xc_desc, n) \
- struct xencomm_mini xc_desc ## _base[(n)] \
- __attribute__((__aligned__(sizeof(struct xencomm_mini)))); \
- struct xencomm_mini *xc_desc = &xc_desc ## _base[0];
-#else
-/*
- * gcc bug workaround:
- * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=16660
- * gcc doesn't handle properly stack variable with
- * __attribute__((__align__(sizeof(struct xencomm_mini))))
- */
-#define XENCOMM_MINI_ALIGNED(xc_desc, n) \
- unsigned char xc_desc ## _base[((n) + 1 ) * \
- sizeof(struct xencomm_mini)]; \
- struct xencomm_mini *xc_desc = (struct xencomm_mini *) \
- ((unsigned long)xc_desc ## _base + \
- (sizeof(struct xencomm_mini) - \
- ((unsigned long)xc_desc ## _base) % \
- sizeof(struct xencomm_mini)));
-#endif
-#define xencomm_map_no_alloc(ptr, bytes) \
- ({ XENCOMM_MINI_ALIGNED(xc_desc, 1); \
- __xencomm_map_no_alloc(ptr, bytes, xc_desc); })
-
-/* provided by architecture code: */
-extern unsigned long xencomm_vtop(unsigned long vaddr);
-
-static inline void *xencomm_pa(void *ptr)
-{
- return (void *)xencomm_vtop((unsigned long)ptr);
-}
-
-#define xen_guest_handle(hnd) ((hnd).p)
-
-#endif /* _LINUX_XENCOMM_H_ */
diff --git a/init/main.c b/init/main.c
index 2fd9cef70ee8..eb03090cdced 100644
--- a/init/main.c
+++ b/init/main.c
@@ -812,7 +812,7 @@ void __init load_default_modules(void)
static int run_init_process(const char *init_filename)
{
argv_init[0] = init_filename;
- return do_execve(init_filename,
+ return do_execve(getname_kernel(init_filename),
(const char __user *const __user *)argv_init,
(const char __user *const __user *)envp_init);
}
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index 10176cd5956a..7aef2f4b6c64 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -1719,7 +1719,7 @@ void audit_putname(struct filename *name)
struct audit_context *context = current->audit_context;
BUG_ON(!context);
- if (!context->in_syscall) {
+ if (!name->aname || !context->in_syscall) {
#if AUDIT_DEBUG == 2
printk(KERN_ERR "%s:%d(:%d): final_putname(%p)\n",
__FILE__, __LINE__, context->serial, name);
diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig
index 4a1fef09f658..07cbdfea9ae2 100644
--- a/kernel/irq/Kconfig
+++ b/kernel/irq/Kconfig
@@ -40,6 +40,7 @@ config IRQ_EDGE_EOI_HANDLER
# Generic configurable interrupt chip implementation
config GENERIC_IRQ_CHIP
bool
+ select IRQ_DOMAIN
# Generic irq_domain hw <--> linux irq number translation
config IRQ_DOMAIN
diff --git a/kernel/kmod.c b/kernel/kmod.c
index b086006c59e7..6b375af4958d 100644
--- a/kernel/kmod.c
+++ b/kernel/kmod.c
@@ -239,7 +239,7 @@ static int ____call_usermodehelper(void *data)
commit_creds(new);
- retval = do_execve(sub_info->path,
+ retval = do_execve(getname_kernel(sub_info->path),
(const char __user *const __user *)sub_info->argv,
(const char __user *const __user *)sub_info->envp);
if (!retval)
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index dbf94a7d25a8..a48abeac753f 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -119,7 +119,7 @@ menu "Compile-time checks and compiler options"
config DEBUG_INFO
bool "Compile the kernel with debug info"
- depends on DEBUG_KERNEL
+ depends on DEBUG_KERNEL && !COMPILE_TEST
help
If you say Y here the resulting kernel image will include
debugging info resulting in a larger kernel image.
diff --git a/lib/Makefile b/lib/Makefile
index 126b34f2eb16..48140e3ba73f 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -45,6 +45,7 @@ obj-$(CONFIG_HAS_IOMEM) += iomap_copy.o devres.o
obj-$(CONFIG_CHECK_SIGNATURE) += check_signature.o
obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o
+GCOV_PROFILE_hweight.o := n
CFLAGS_hweight.o = $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS))
obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
diff --git a/lib/percpu_ida.c b/lib/percpu_ida.c
index 7be235f1a70b..93d145e5539c 100644
--- a/lib/percpu_ida.c
+++ b/lib/percpu_ida.c
@@ -54,9 +54,7 @@ static inline void move_tags(unsigned *dst, unsigned *dst_nr,
/*
* Try to steal tags from a remote cpu's percpu freelist.
*
- * We first check how many percpu freelists have tags - we don't steal tags
- * unless enough percpu freelists have tags on them that it's possible more than
- * half the total tags could be stuck on remote percpu freelists.
+ * We first check how many percpu freelists have tags
*
* Then we iterate through the cpus until we find some tags - we don't attempt
* to find the "best" cpu to steal from, to keep cacheline bouncing to a
@@ -69,8 +67,7 @@ static inline void steal_tags(struct percpu_ida *pool,
struct percpu_ida_cpu *remote;
for (cpus_have_tags = cpumask_weight(&pool->cpus_have_tags);
- cpus_have_tags * pool->percpu_max_size > pool->nr_tags / 2;
- cpus_have_tags--) {
+ cpus_have_tags; cpus_have_tags--) {
cpu = cpumask_next(cpu, &pool->cpus_have_tags);
if (cpu >= nr_cpu_ids) {
diff --git a/mm/filemap.c b/mm/filemap.c
index d56d3c145b9f..7a13f6ac5421 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -2553,8 +2553,8 @@ ssize_t generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
if (ret > 0) {
ssize_t err;
- err = generic_write_sync(file, pos, ret);
- if (err < 0 && ret > 0)
+ err = generic_write_sync(file, iocb->ki_pos - ret, ret);
+ if (err < 0)
ret = err;
}
return ret;
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 4f08a2d61487..2f2f34a4e77d 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -945,8 +945,10 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
* to it. Similarly, page lock is shifted.
*/
if (hpage != p) {
- put_page(hpage);
- get_page(p);
+ if (!(flags & MF_COUNT_INCREASED)) {
+ put_page(hpage);
+ get_page(p);
+ }
lock_page(p);
unlock_page(hpage);
*hpagep = p;
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 2d30e2cfe804..7106cb1aca8e 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -2173,11 +2173,12 @@ int __set_page_dirty_nobuffers(struct page *page)
if (!TestSetPageDirty(page)) {
struct address_space *mapping = page_mapping(page);
struct address_space *mapping2;
+ unsigned long flags;
if (!mapping)
return 1;
- spin_lock_irq(&mapping->tree_lock);
+ spin_lock_irqsave(&mapping->tree_lock, flags);
mapping2 = page_mapping(page);
if (mapping2) { /* Race with truncate? */
BUG_ON(mapping2 != mapping);
@@ -2186,7 +2187,7 @@ int __set_page_dirty_nobuffers(struct page *page)
radix_tree_tag_set(&mapping->page_tree,
page_index(page), PAGECACHE_TAG_DIRTY);
}
- spin_unlock_irq(&mapping->tree_lock);
+ spin_unlock_irqrestore(&mapping->tree_lock, flags);
if (mapping->host) {
/* !PageAnon && !swapper_space */
__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
diff --git a/mm/slub.c b/mm/slub.c
index 7e3e0458bce4..25f14ad8f817 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1004,21 +1004,19 @@ static inline void slab_free_hook(struct kmem_cache *s, void *x)
static void add_full(struct kmem_cache *s,
struct kmem_cache_node *n, struct page *page)
{
- lockdep_assert_held(&n->list_lock);
-
if (!(s->flags & SLAB_STORE_USER))
return;
+ lockdep_assert_held(&n->list_lock);
list_add(&page->lru, &n->full);
}
static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct page *page)
{
- lockdep_assert_held(&n->list_lock);
-
if (!(s->flags & SLAB_STORE_USER))
return;
+ lockdep_assert_held(&n->list_lock);
list_del(&page->lru);
}
@@ -1520,11 +1518,9 @@ static void discard_slab(struct kmem_cache *s, struct page *page)
/*
* Management of partially allocated slabs.
*/
-static inline void add_partial(struct kmem_cache_node *n,
- struct page *page, int tail)
+static inline void
+__add_partial(struct kmem_cache_node *n, struct page *page, int tail)
{
- lockdep_assert_held(&n->list_lock);
-
n->nr_partial++;
if (tail == DEACTIVATE_TO_TAIL)
list_add_tail(&page->lru, &n->partial);
@@ -1532,15 +1528,27 @@ static inline void add_partial(struct kmem_cache_node *n,
list_add(&page->lru, &n->partial);
}
-static inline void remove_partial(struct kmem_cache_node *n,
- struct page *page)
+static inline void add_partial(struct kmem_cache_node *n,
+ struct page *page, int tail)
{
lockdep_assert_held(&n->list_lock);
+ __add_partial(n, page, tail);
+}
+static inline void
+__remove_partial(struct kmem_cache_node *n, struct page *page)
+{
list_del(&page->lru);
n->nr_partial--;
}
+static inline void remove_partial(struct kmem_cache_node *n,
+ struct page *page)
+{
+ lockdep_assert_held(&n->list_lock);
+ __remove_partial(n, page);
+}
+
/*
* Remove slab from the partial list, freeze it and
* return the pointer to the freelist.
@@ -2906,12 +2914,10 @@ static void early_kmem_cache_node_alloc(int node)
inc_slabs_node(kmem_cache_node, node, page->objects);
/*
- * the lock is for lockdep's sake, not for any actual
- * race protection
+ * No locks need to be taken here as it has just been
+ * initialized and there is no concurrent access.
*/
- spin_lock(&n->list_lock);
- add_partial(n, page, DEACTIVATE_TO_HEAD);
- spin_unlock(&n->list_lock);
+ __add_partial(n, page, DEACTIVATE_TO_HEAD);
}
static void free_kmem_cache_nodes(struct kmem_cache *s)
@@ -3197,7 +3203,7 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
list_for_each_entry_safe(page, h, &n->partial, lru) {
if (!page->inuse) {
- remove_partial(n, page);
+ __remove_partial(n, page);
discard_slab(s, page);
} else {
list_slab_objects(s, page,
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 98e85e9c2b2d..e76ace30d436 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -63,6 +63,8 @@ unsigned long total_swapcache_pages(void)
return ret;
}
+static atomic_t swapin_readahead_hits = ATOMIC_INIT(4);
+
void show_swap_cache_info(void)
{
printk("%lu pages in swap cache\n", total_swapcache_pages());
@@ -286,8 +288,11 @@ struct page * lookup_swap_cache(swp_entry_t entry)
page = find_get_page(swap_address_space(entry), entry.val);
- if (page)
+ if (page) {
INC_CACHE_INFO(find_success);
+ if (TestClearPageReadahead(page))
+ atomic_inc(&swapin_readahead_hits);
+ }
INC_CACHE_INFO(find_total);
return page;
@@ -389,6 +394,50 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
return found_page;
}
+static unsigned long swapin_nr_pages(unsigned long offset)
+{
+ static unsigned long prev_offset;
+ unsigned int pages, max_pages, last_ra;
+ static atomic_t last_readahead_pages;
+
+ max_pages = 1 << ACCESS_ONCE(page_cluster);
+ if (max_pages <= 1)
+ return 1;
+
+ /*
+ * This heuristic has been found to work well on both sequential and
+ * random loads, swapping to hard disk or to SSD: please don't ask
+ * what the "+ 2" means, it just happens to work well, that's all.
+ */
+ pages = atomic_xchg(&swapin_readahead_hits, 0) + 2;
+ if (pages == 2) {
+ /*
+ * We can have no readahead hits to judge by: but must not get
+ * stuck here forever, so check for an adjacent offset instead
+ * (and don't even bother to check whether swap type is same).
+ */
+ if (offset != prev_offset + 1 && offset != prev_offset - 1)
+ pages = 1;
+ prev_offset = offset;
+ } else {
+ unsigned int roundup = 4;
+ while (roundup < pages)
+ roundup <<= 1;
+ pages = roundup;
+ }
+
+ if (pages > max_pages)
+ pages = max_pages;
+
+ /* Don't shrink readahead too fast */
+ last_ra = atomic_read(&last_readahead_pages) / 2;
+ if (pages < last_ra)
+ pages = last_ra;
+ atomic_set(&last_readahead_pages, pages);
+
+ return pages;
+}
+
/**
* swapin_readahead - swap in pages in hope we need them soon
* @entry: swap entry of this memory
@@ -412,11 +461,16 @@ struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
struct vm_area_struct *vma, unsigned long addr)
{
struct page *page;
- unsigned long offset = swp_offset(entry);
+ unsigned long entry_offset = swp_offset(entry);
+ unsigned long offset = entry_offset;
unsigned long start_offset, end_offset;
- unsigned long mask = (1UL << page_cluster) - 1;
+ unsigned long mask;
struct blk_plug plug;
+ mask = swapin_nr_pages(offset) - 1;
+ if (!mask)
+ goto skip;
+
/* Read a page_cluster sized and aligned cluster around offset. */
start_offset = offset & ~mask;
end_offset = offset | mask;
@@ -430,10 +484,13 @@ struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
gfp_mask, vma, addr);
if (!page)
continue;
+ if (offset != entry_offset)
+ SetPageReadahead(page);
page_cache_release(page);
}
blk_finish_plug(&plug);
lru_add_drain(); /* Push any new pages onto the LRU now */
+skip:
return read_swap_cache_async(entry, gfp_mask, vma, addr);
}
diff --git a/mm/swapfile.c b/mm/swapfile.c
index c6c13b050a58..4a7f7e6992b6 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -1923,7 +1923,6 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
p->swap_map = NULL;
cluster_info = p->cluster_info;
p->cluster_info = NULL;
- p->flags = 0;
frontswap_map = frontswap_map_get(p);
spin_unlock(&p->lock);
spin_unlock(&swap_lock);
@@ -1949,6 +1948,16 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
mutex_unlock(&inode->i_mutex);
}
filp_close(swap_file, NULL);
+
+ /*
+ * Clear the SWP_USED flag after all resources are freed so that swapon
+ * can reuse this swap_info in alloc_swap_info() safely. It is ok to
+ * not hold p->lock after we cleared its SWP_WRITEOK.
+ */
+ spin_lock(&swap_lock);
+ p->flags = 0;
+ spin_unlock(&swap_lock);
+
err = 0;
atomic_inc(&proc_poll_event);
wake_up_interruptible(&proc_poll_wait);
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 72496140ac08..def5dd2fbe61 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -851,12 +851,14 @@ const char * const vmstat_text[] = {
"thp_zero_page_alloc",
"thp_zero_page_alloc_failed",
#endif
+#ifdef CONFIG_DEBUG_TLBFLUSH
#ifdef CONFIG_SMP
"nr_tlb_remote_flush",
"nr_tlb_remote_flush_received",
-#endif
+#endif /* CONFIG_SMP */
"nr_tlb_local_flush_all",
"nr_tlb_local_flush_one",
+#endif /* CONFIG_DEBUG_TLBFLUSH */
#endif /* CONFIG_VM_EVENTS_COUNTERS */
};
diff --git a/net/9p/client.c b/net/9p/client.c
index a5e4d2dcb03e..9186550d77a6 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -204,7 +204,7 @@ free_and_return:
return ret;
}
-struct p9_fcall *p9_fcall_alloc(int alloc_msize)
+static struct p9_fcall *p9_fcall_alloc(int alloc_msize)
{
struct p9_fcall *fc;
fc = kmalloc(sizeof(struct p9_fcall) + alloc_msize, GFP_NOFS);
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index cd1e1ede73a4..ac2666c1d011 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -340,7 +340,10 @@ static int p9_get_mapped_pages(struct virtio_chan *chan,
int count = nr_pages;
while (nr_pages) {
s = rest_of_page(data);
- pages[index++] = kmap_to_page(data);
+ if (is_vmalloc_addr(data))
+ pages[index++] = vmalloc_to_page(data);
+ else
+ pages[index++] = kmap_to_page(data);
data += s;
nr_pages--;
}
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index e4401a531afb..63f0455c0bc3 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -187,8 +187,7 @@ static int br_set_mac_address(struct net_device *dev, void *p)
spin_lock_bh(&br->lock);
if (!ether_addr_equal(dev->dev_addr, addr->sa_data)) {
- memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
- br_fdb_change_mac_address(br, addr->sa_data);
+ /* Mac address will be changed in br_stp_change_bridge_id(). */
br_stp_change_bridge_id(br, addr->sa_data);
}
spin_unlock_bh(&br->lock);
@@ -226,6 +225,33 @@ static void br_netpoll_cleanup(struct net_device *dev)
br_netpoll_disable(p);
}
+static int __br_netpoll_enable(struct net_bridge_port *p, gfp_t gfp)
+{
+ struct netpoll *np;
+ int err;
+
+ np = kzalloc(sizeof(*p->np), gfp);
+ if (!np)
+ return -ENOMEM;
+
+ err = __netpoll_setup(np, p->dev, gfp);
+ if (err) {
+ kfree(np);
+ return err;
+ }
+
+ p->np = np;
+ return err;
+}
+
+int br_netpoll_enable(struct net_bridge_port *p, gfp_t gfp)
+{
+ if (!p->br->dev->npinfo)
+ return 0;
+
+ return __br_netpoll_enable(p, gfp);
+}
+
static int br_netpoll_setup(struct net_device *dev, struct netpoll_info *ni,
gfp_t gfp)
{
@@ -236,7 +262,7 @@ static int br_netpoll_setup(struct net_device *dev, struct netpoll_info *ni,
list_for_each_entry(p, &br->port_list, list) {
if (!p->dev)
continue;
- err = br_netpoll_enable(p, gfp);
+ err = __br_netpoll_enable(p, gfp);
if (err)
goto fail;
}
@@ -249,28 +275,6 @@ fail:
goto out;
}
-int br_netpoll_enable(struct net_bridge_port *p, gfp_t gfp)
-{
- struct netpoll *np;
- int err;
-
- if (!p->br->dev->npinfo)
- return 0;
-
- np = kzalloc(sizeof(*p->np), gfp);
- if (!np)
- return -ENOMEM;
-
- err = __netpoll_setup(np, p->dev, gfp);
- if (err) {
- kfree(np);
- return err;
- }
-
- p->np = np;
- return err;
-}
-
void br_netpoll_disable(struct net_bridge_port *p)
{
struct netpoll *np = p->np;
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index c5f5a4a933f4..9203d5a1943f 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -27,6 +27,9 @@
#include "br_private.h"
static struct kmem_cache *br_fdb_cache __read_mostly;
+static struct net_bridge_fdb_entry *fdb_find(struct hlist_head *head,
+ const unsigned char *addr,
+ __u16 vid);
static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
const unsigned char *addr, u16 vid);
static void fdb_notify(struct net_bridge *br,
@@ -89,11 +92,57 @@ static void fdb_delete(struct net_bridge *br, struct net_bridge_fdb_entry *f)
call_rcu(&f->rcu, fdb_rcu_free);
}
+/* Delete a local entry if no other port had the same address. */
+static void fdb_delete_local(struct net_bridge *br,
+ const struct net_bridge_port *p,
+ struct net_bridge_fdb_entry *f)
+{
+ const unsigned char *addr = f->addr.addr;
+ u16 vid = f->vlan_id;
+ struct net_bridge_port *op;
+
+ /* Maybe another port has same hw addr? */
+ list_for_each_entry(op, &br->port_list, list) {
+ if (op != p && ether_addr_equal(op->dev->dev_addr, addr) &&
+ (!vid || nbp_vlan_find(op, vid))) {
+ f->dst = op;
+ f->added_by_user = 0;
+ return;
+ }
+ }
+
+ /* Maybe bridge device has same hw addr? */
+ if (p && ether_addr_equal(br->dev->dev_addr, addr) &&
+ (!vid || br_vlan_find(br, vid))) {
+ f->dst = NULL;
+ f->added_by_user = 0;
+ return;
+ }
+
+ fdb_delete(br, f);
+}
+
+void br_fdb_find_delete_local(struct net_bridge *br,
+ const struct net_bridge_port *p,
+ const unsigned char *addr, u16 vid)
+{
+ struct hlist_head *head = &br->hash[br_mac_hash(addr, vid)];
+ struct net_bridge_fdb_entry *f;
+
+ spin_lock_bh(&br->hash_lock);
+ f = fdb_find(head, addr, vid);
+ if (f && f->is_local && !f->added_by_user && f->dst == p)
+ fdb_delete_local(br, p, f);
+ spin_unlock_bh(&br->hash_lock);
+}
+
void br_fdb_changeaddr(struct net_bridge_port *p, const unsigned char *newaddr)
{
struct net_bridge *br = p->br;
- bool no_vlan = (nbp_get_vlan_info(p) == NULL) ? true : false;
+ struct net_port_vlans *pv = nbp_get_vlan_info(p);
+ bool no_vlan = !pv;
int i;
+ u16 vid;
spin_lock_bh(&br->hash_lock);
@@ -104,38 +153,34 @@ void br_fdb_changeaddr(struct net_bridge_port *p, const unsigned char *newaddr)
struct net_bridge_fdb_entry *f;
f = hlist_entry(h, struct net_bridge_fdb_entry, hlist);
- if (f->dst == p && f->is_local) {
- /* maybe another port has same hw addr? */
- struct net_bridge_port *op;
- u16 vid = f->vlan_id;
- list_for_each_entry(op, &br->port_list, list) {
- if (op != p &&
- ether_addr_equal(op->dev->dev_addr,
- f->addr.addr) &&
- nbp_vlan_find(op, vid)) {
- f->dst = op;
- goto insert;
- }
- }
-
+ if (f->dst == p && f->is_local && !f->added_by_user) {
/* delete old one */
- fdb_delete(br, f);
-insert:
- /* insert new address, may fail if invalid
- * address or dup.
- */
- fdb_insert(br, p, newaddr, vid);
+ fdb_delete_local(br, p, f);
/* if this port has no vlan information
* configured, we can safely be done at
* this point.
*/
if (no_vlan)
- goto done;
+ goto insert;
}
}
}
+insert:
+ /* insert new address, may fail if invalid address or dup. */
+ fdb_insert(br, p, newaddr, 0);
+
+ if (no_vlan)
+ goto done;
+
+ /* Now add entries for every VLAN configured on the port.
+ * This function runs under RTNL so the bitmap will not change
+ * from under us.
+ */
+ for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID)
+ fdb_insert(br, p, newaddr, vid);
+
done:
spin_unlock_bh(&br->hash_lock);
}
@@ -146,10 +191,12 @@ void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr)
struct net_port_vlans *pv;
u16 vid = 0;
+ spin_lock_bh(&br->hash_lock);
+
/* If old entry was unassociated with any port, then delete it. */
f = __br_fdb_get(br, br->dev->dev_addr, 0);
if (f && f->is_local && !f->dst)
- fdb_delete(br, f);
+ fdb_delete_local(br, NULL, f);
fdb_insert(br, NULL, newaddr, 0);
@@ -159,14 +206,16 @@ void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr)
*/
pv = br_get_vlan_info(br);
if (!pv)
- return;
+ goto out;
for_each_set_bit_from(vid, pv->vlan_bitmap, VLAN_N_VID) {
f = __br_fdb_get(br, br->dev->dev_addr, vid);
if (f && f->is_local && !f->dst)
- fdb_delete(br, f);
+ fdb_delete_local(br, NULL, f);
fdb_insert(br, NULL, newaddr, vid);
}
+out:
+ spin_unlock_bh(&br->hash_lock);
}
void br_fdb_cleanup(unsigned long _data)
@@ -235,25 +284,11 @@ void br_fdb_delete_by_port(struct net_bridge *br,
if (f->is_static && !do_all)
continue;
- /*
- * if multiple ports all have the same device address
- * then when one port is deleted, assign
- * the local entry to other port
- */
- if (f->is_local) {
- struct net_bridge_port *op;
- list_for_each_entry(op, &br->port_list, list) {
- if (op != p &&
- ether_addr_equal(op->dev->dev_addr,
- f->addr.addr)) {
- f->dst = op;
- goto skip_delete;
- }
- }
- }
- fdb_delete(br, f);
- skip_delete: ;
+ if (f->is_local)
+ fdb_delete_local(br, p, f);
+ else
+ fdb_delete(br, f);
}
}
spin_unlock_bh(&br->hash_lock);
@@ -397,6 +432,7 @@ static struct net_bridge_fdb_entry *fdb_create(struct hlist_head *head,
fdb->vlan_id = vid;
fdb->is_local = 0;
fdb->is_static = 0;
+ fdb->added_by_user = 0;
fdb->updated = fdb->used = jiffies;
hlist_add_head_rcu(&fdb->hlist, head);
}
@@ -447,7 +483,7 @@ int br_fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
}
void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
- const unsigned char *addr, u16 vid)
+ const unsigned char *addr, u16 vid, bool added_by_user)
{
struct hlist_head *head = &br->hash[br_mac_hash(addr, vid)];
struct net_bridge_fdb_entry *fdb;
@@ -473,13 +509,18 @@ void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
/* fastpath: update of existing entry */
fdb->dst = source;
fdb->updated = jiffies;
+ if (unlikely(added_by_user))
+ fdb->added_by_user = 1;
}
} else {
spin_lock(&br->hash_lock);
if (likely(!fdb_find(head, addr, vid))) {
fdb = fdb_create(head, source, addr, vid);
- if (fdb)
+ if (fdb) {
+ if (unlikely(added_by_user))
+ fdb->added_by_user = 1;
fdb_notify(br, fdb, RTM_NEWNEIGH);
+ }
}
/* else we lose race and someone else inserts
* it first, don't bother updating
@@ -647,6 +688,7 @@ static int fdb_add_entry(struct net_bridge_port *source, const __u8 *addr,
modified = true;
}
+ fdb->added_by_user = 1;
fdb->used = jiffies;
if (modified) {
@@ -664,7 +706,7 @@ static int __br_fdb_add(struct ndmsg *ndm, struct net_bridge_port *p,
if (ndm->ndm_flags & NTF_USE) {
rcu_read_lock();
- br_fdb_update(p->br, p, addr, vid);
+ br_fdb_update(p->br, p, addr, vid, true);
rcu_read_unlock();
} else {
spin_lock_bh(&p->br->hash_lock);
@@ -749,8 +791,7 @@ out:
return err;
}
-int fdb_delete_by_addr(struct net_bridge *br, const u8 *addr,
- u16 vlan)
+static int fdb_delete_by_addr(struct net_bridge *br, const u8 *addr, u16 vlan)
{
struct hlist_head *head = &br->hash[br_mac_hash(addr, vlan)];
struct net_bridge_fdb_entry *fdb;
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index cffe1d666ba1..54d207d3a31c 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -389,6 +389,9 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
if (br->dev->needed_headroom < dev->needed_headroom)
br->dev->needed_headroom = dev->needed_headroom;
+ if (br_fdb_insert(br, p, dev->dev_addr, 0))
+ netdev_err(dev, "failed insert local address bridge forwarding table\n");
+
spin_lock_bh(&br->lock);
changed_addr = br_stp_recalculate_bridge_id(br);
@@ -404,9 +407,6 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
dev_set_mtu(br->dev, br_min_mtu(br));
- if (br_fdb_insert(br, p, dev->dev_addr, 0))
- netdev_err(dev, "failed insert local address bridge forwarding table\n");
-
kobject_uevent(&p->kobj, KOBJ_ADD);
return 0;
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index bf8dc7d308d6..28d544627422 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -77,7 +77,7 @@ int br_handle_frame_finish(struct sk_buff *skb)
/* insert into forwarding database after filtering to avoid spoofing */
br = p->br;
if (p->flags & BR_LEARNING)
- br_fdb_update(br, p, eth_hdr(skb)->h_source, vid);
+ br_fdb_update(br, p, eth_hdr(skb)->h_source, vid, false);
if (!is_broadcast_ether_addr(dest) && is_multicast_ether_addr(dest) &&
br_multicast_rcv(br, p, skb, vid))
@@ -148,7 +148,7 @@ static int br_handle_local_finish(struct sk_buff *skb)
br_vlan_get_tag(skb, &vid);
if (p->flags & BR_LEARNING)
- br_fdb_update(p->br, p, eth_hdr(skb)->h_source, vid);
+ br_fdb_update(p->br, p, eth_hdr(skb)->h_source, vid, false);
return 0; /* process further */
}
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index fcd12333c59b..3ba11bc99b65 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -104,6 +104,7 @@ struct net_bridge_fdb_entry
mac_addr addr;
unsigned char is_local;
unsigned char is_static;
+ unsigned char added_by_user;
__u16 vlan_id;
};
@@ -370,6 +371,9 @@ static inline void br_netpoll_disable(struct net_bridge_port *p)
int br_fdb_init(void);
void br_fdb_fini(void);
void br_fdb_flush(struct net_bridge *br);
+void br_fdb_find_delete_local(struct net_bridge *br,
+ const struct net_bridge_port *p,
+ const unsigned char *addr, u16 vid);
void br_fdb_changeaddr(struct net_bridge_port *p, const unsigned char *newaddr);
void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr);
void br_fdb_cleanup(unsigned long arg);
@@ -383,8 +387,7 @@ int br_fdb_fillbuf(struct net_bridge *br, void *buf, unsigned long count,
int br_fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
const unsigned char *addr, u16 vid);
void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
- const unsigned char *addr, u16 vid);
-int fdb_delete_by_addr(struct net_bridge *br, const u8 *addr, u16 vid);
+ const unsigned char *addr, u16 vid, bool added_by_user);
int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *dev, const unsigned char *addr);
@@ -584,6 +587,7 @@ struct sk_buff *br_handle_vlan(struct net_bridge *br,
int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags);
int br_vlan_delete(struct net_bridge *br, u16 vid);
void br_vlan_flush(struct net_bridge *br);
+bool br_vlan_find(struct net_bridge *br, u16 vid);
int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val);
int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags);
int nbp_vlan_delete(struct net_bridge_port *port, u16 vid);
@@ -665,6 +669,11 @@ static inline void br_vlan_flush(struct net_bridge *br)
{
}
+static inline bool br_vlan_find(struct net_bridge *br, u16 vid)
+{
+ return false;
+}
+
static inline int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags)
{
return -EOPNOTSUPP;
diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
index 656a6f3e40de..189ba1e7d851 100644
--- a/net/bridge/br_stp_if.c
+++ b/net/bridge/br_stp_if.c
@@ -194,6 +194,8 @@ void br_stp_change_bridge_id(struct net_bridge *br, const unsigned char *addr)
wasroot = br_is_root_bridge(br);
+ br_fdb_change_mac_address(br, addr);
+
memcpy(oldaddr, br->bridge_id.addr, ETH_ALEN);
memcpy(br->bridge_id.addr, addr, ETH_ALEN);
memcpy(br->dev->dev_addr, addr, ETH_ALEN);
diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
index 4ca4d0a0151c..8249ca764c79 100644
--- a/net/bridge/br_vlan.c
+++ b/net/bridge/br_vlan.c
@@ -275,9 +275,7 @@ int br_vlan_delete(struct net_bridge *br, u16 vid)
if (!pv)
return -EINVAL;
- spin_lock_bh(&br->hash_lock);
- fdb_delete_by_addr(br, br->dev->dev_addr, vid);
- spin_unlock_bh(&br->hash_lock);
+ br_fdb_find_delete_local(br, NULL, br->dev->dev_addr, vid);
__vlan_del(pv, vid);
return 0;
@@ -295,6 +293,25 @@ void br_vlan_flush(struct net_bridge *br)
__vlan_flush(pv);
}
+bool br_vlan_find(struct net_bridge *br, u16 vid)
+{
+ struct net_port_vlans *pv;
+ bool found = false;
+
+ rcu_read_lock();
+ pv = rcu_dereference(br->vlan_info);
+
+ if (!pv)
+ goto out;
+
+ if (test_bit(vid, pv->vlan_bitmap))
+ found = true;
+
+out:
+ rcu_read_unlock();
+ return found;
+}
+
int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val)
{
if (!rtnl_trylock())
@@ -359,9 +376,7 @@ int nbp_vlan_delete(struct net_bridge_port *port, u16 vid)
if (!pv)
return -EINVAL;
- spin_lock_bh(&port->br->hash_lock);
- fdb_delete_by_addr(port->br, port->dev->dev_addr, vid);
- spin_unlock_bh(&port->br->hash_lock);
+ br_fdb_find_delete_local(port->br, port, port->dev->dev_addr, vid);
return __vlan_del(pv, vid);
}
diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c
index 4dca159435cf..edbca468fa73 100644
--- a/net/caif/caif_dev.c
+++ b/net/caif/caif_dev.c
@@ -22,6 +22,7 @@
#include <net/pkt_sched.h>
#include <net/caif/caif_device.h>
#include <net/caif/caif_layer.h>
+#include <net/caif/caif_dev.h>
#include <net/caif/cfpkt.h>
#include <net/caif/cfcnfg.h>
#include <net/caif/cfserl.h>
diff --git a/net/caif/cfsrvl.c b/net/caif/cfsrvl.c
index 353f793d1b3b..a6e115463052 100644
--- a/net/caif/cfsrvl.c
+++ b/net/caif/cfsrvl.c
@@ -15,6 +15,7 @@
#include <net/caif/caif_layer.h>
#include <net/caif/cfsrvl.h>
#include <net/caif/cfpkt.h>
+#include <net/caif/caif_dev.h>
#define SRVL_CTRL_PKT_SIZE 1
#define SRVL_FLOW_OFF 0x81
diff --git a/net/can/af_can.c b/net/can/af_can.c
index d249874a366d..a27f8aad9e99 100644
--- a/net/can/af_can.c
+++ b/net/can/af_can.c
@@ -57,6 +57,7 @@
#include <linux/skbuff.h>
#include <linux/can.h>
#include <linux/can/core.h>
+#include <linux/can/skb.h>
#include <linux/ratelimit.h>
#include <net/net_namespace.h>
#include <net/sock.h>
@@ -290,7 +291,7 @@ int can_send(struct sk_buff *skb, int loop)
return -ENOMEM;
}
- newskb->sk = skb->sk;
+ can_skb_set_owner(newskb, skb->sk);
newskb->ip_summed = CHECKSUM_UNNECESSARY;
newskb->pkt_type = PACKET_BROADCAST;
}
diff --git a/net/can/bcm.c b/net/can/bcm.c
index 3fc737b214c7..dcb75c0e66c1 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -268,7 +268,7 @@ static void bcm_can_tx(struct bcm_op *op)
/* send with loopback */
skb->dev = dev;
- skb->sk = op->sk;
+ can_skb_set_owner(skb, op->sk);
can_send(skb, 1);
/* update statistics */
@@ -1223,7 +1223,7 @@ static int bcm_tx_send(struct msghdr *msg, int ifindex, struct sock *sk)
can_skb_prv(skb)->ifindex = dev->ifindex;
skb->dev = dev;
- skb->sk = sk;
+ can_skb_set_owner(skb, sk);
err = can_send(skb, 1); /* send with loopback */
dev_put(dev);
diff --git a/net/can/raw.c b/net/can/raw.c
index 07d72d852324..8be757cca2ec 100644
--- a/net/can/raw.c
+++ b/net/can/raw.c
@@ -715,6 +715,7 @@ static int raw_sendmsg(struct kiocb *iocb, struct socket *sock,
skb->dev = dev;
skb->sk = sk;
+ skb->priority = sk->sk_priority;
err = can_send(skb, ro->loopback);
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index 0e478a0f4204..30efc5c18622 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -840,9 +840,13 @@ static bool ceph_msg_data_bio_advance(struct ceph_msg_data_cursor *cursor,
if (!cursor->bvec_iter.bi_size) {
bio = bio->bi_next;
- cursor->bvec_iter = bio->bi_iter;
+ cursor->bio = bio;
+ if (bio)
+ cursor->bvec_iter = bio->bi_iter;
+ else
+ memset(&cursor->bvec_iter, 0,
+ sizeof(cursor->bvec_iter));
}
- cursor->bio = bio;
if (!cursor->last_piece) {
BUG_ON(!cursor->resid);
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index 010ff3bd58ad..0676f2b199d6 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -1427,6 +1427,40 @@ static void __send_queued(struct ceph_osd_client *osdc)
}
/*
+ * Caller should hold map_sem for read and request_mutex.
+ */
+static int __ceph_osdc_start_request(struct ceph_osd_client *osdc,
+ struct ceph_osd_request *req,
+ bool nofail)
+{
+ int rc;
+
+ __register_request(osdc, req);
+ req->r_sent = 0;
+ req->r_got_reply = 0;
+ rc = __map_request(osdc, req, 0);
+ if (rc < 0) {
+ if (nofail) {
+ dout("osdc_start_request failed map, "
+ " will retry %lld\n", req->r_tid);
+ rc = 0;
+ } else {
+ __unregister_request(osdc, req);
+ }
+ return rc;
+ }
+
+ if (req->r_osd == NULL) {
+ dout("send_request %p no up osds in pg\n", req);
+ ceph_monc_request_next_osdmap(&osdc->client->monc);
+ } else {
+ __send_queued(osdc);
+ }
+
+ return 0;
+}
+
+/*
* Timeout callback, called every N seconds when 1 or more osd
* requests has been active for more than N seconds. When this
* happens, we ping all OSDs with requests who have timed out to
@@ -1653,6 +1687,7 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg,
osdmap_epoch = ceph_decode_32(&p);
/* lookup */
+ down_read(&osdc->map_sem);
mutex_lock(&osdc->request_mutex);
req = __lookup_request(osdc, tid);
if (req == NULL) {
@@ -1709,7 +1744,6 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg,
dout("redirect pool %lld\n", redir.oloc.pool);
__unregister_request(osdc, req);
- mutex_unlock(&osdc->request_mutex);
req->r_target_oloc = redir.oloc; /* struct */
@@ -1721,10 +1755,10 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg,
* successfully. In the future we might want to follow
* original request's nofail setting here.
*/
- err = ceph_osdc_start_request(osdc, req, true);
+ err = __ceph_osdc_start_request(osdc, req, true);
BUG_ON(err);
- goto done;
+ goto out_unlock;
}
already_completed = req->r_got_reply;
@@ -1742,8 +1776,7 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg,
req->r_got_reply = 1;
} else if ((flags & CEPH_OSD_FLAG_ONDISK) == 0) {
dout("handle_reply tid %llu dup ack\n", tid);
- mutex_unlock(&osdc->request_mutex);
- goto done;
+ goto out_unlock;
}
dout("handle_reply tid %llu flags %d\n", tid, flags);
@@ -1758,6 +1791,7 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg,
__unregister_request(osdc, req);
mutex_unlock(&osdc->request_mutex);
+ up_read(&osdc->map_sem);
if (!already_completed) {
if (req->r_unsafe_callback &&
@@ -1775,10 +1809,14 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg,
complete_request(req);
}
-done:
+out:
dout("req=%p req->r_linger=%d\n", req, req->r_linger);
ceph_osdc_put_request(req);
return;
+out_unlock:
+ mutex_unlock(&osdc->request_mutex);
+ up_read(&osdc->map_sem);
+ goto out;
bad_put:
req->r_result = -EIO;
@@ -1791,6 +1829,7 @@ bad_put:
ceph_osdc_put_request(req);
bad_mutex:
mutex_unlock(&osdc->request_mutex);
+ up_read(&osdc->map_sem);
bad:
pr_err("corrupt osd_op_reply got %d %d\n",
(int)msg->front.iov_len, le32_to_cpu(msg->hdr.front_len));
@@ -2351,34 +2390,16 @@ int ceph_osdc_start_request(struct ceph_osd_client *osdc,
struct ceph_osd_request *req,
bool nofail)
{
- int rc = 0;
+ int rc;
down_read(&osdc->map_sem);
mutex_lock(&osdc->request_mutex);
- __register_request(osdc, req);
- req->r_sent = 0;
- req->r_got_reply = 0;
- rc = __map_request(osdc, req, 0);
- if (rc < 0) {
- if (nofail) {
- dout("osdc_start_request failed map, "
- " will retry %lld\n", req->r_tid);
- rc = 0;
- } else {
- __unregister_request(osdc, req);
- }
- goto out_unlock;
- }
- if (req->r_osd == NULL) {
- dout("send_request %p no up osds in pg\n", req);
- ceph_monc_request_next_osdmap(&osdc->client->monc);
- } else {
- __send_queued(osdc);
- }
- rc = 0;
-out_unlock:
+
+ rc = __ceph_osdc_start_request(osdc, req, nofail);
+
mutex_unlock(&osdc->request_mutex);
up_read(&osdc->map_sem);
+
return rc;
}
EXPORT_SYMBOL(ceph_osdc_start_request);
@@ -2504,9 +2525,12 @@ int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client)
err = -ENOMEM;
osdc->notify_wq = create_singlethread_workqueue("ceph-watch-notify");
if (!osdc->notify_wq)
- goto out_msgpool;
+ goto out_msgpool_reply;
+
return 0;
+out_msgpool_reply:
+ ceph_msgpool_destroy(&osdc->msgpool_op_reply);
out_msgpool:
ceph_msgpool_destroy(&osdc->msgpool_op);
out_mempool:
diff --git a/net/core/dev.c b/net/core/dev.c
index 3721db716350..4ad1b78c9c77 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2803,7 +2803,7 @@ EXPORT_SYMBOL(dev_loopback_xmit);
* the BH enable code must have IRQs enabled so that it will not deadlock.
* --BLG
*/
-int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
+static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
{
struct net_device *dev = skb->dev;
struct netdev_queue *txq;
@@ -4637,7 +4637,7 @@ struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev)
}
EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu);
-int netdev_adjacent_sysfs_add(struct net_device *dev,
+static int netdev_adjacent_sysfs_add(struct net_device *dev,
struct net_device *adj_dev,
struct list_head *dev_list)
{
@@ -4647,7 +4647,7 @@ int netdev_adjacent_sysfs_add(struct net_device *dev,
return sysfs_create_link(&(dev->dev.kobj), &(adj_dev->dev.kobj),
linkname);
}
-void netdev_adjacent_sysfs_del(struct net_device *dev,
+static void netdev_adjacent_sysfs_del(struct net_device *dev,
char *name,
struct list_head *dev_list)
{
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index f409e0bd35c0..185c341fafbd 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -745,6 +745,13 @@ static int fib_rules_event(struct notifier_block *this, unsigned long event,
attach_rules(&ops->rules_list, dev);
break;
+ case NETDEV_CHANGENAME:
+ list_for_each_entry(ops, &net->rules_ops, list) {
+ detach_rules(&ops->rules_list, dev);
+ attach_rules(&ops->rules_list, dev);
+ }
+ break;
+
case NETDEV_UNREGISTER:
list_for_each_entry(ops, &net->rules_ops, list)
detach_rules(&ops->rules_list, dev);
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index c03f3dec4763..a664f7829a6d 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -948,6 +948,7 @@ int netpoll_parse_options(struct netpoll *np, char *opt)
{
char *cur=opt, *delim;
int ipv6;
+ bool ipversion_set = false;
if (*cur != '@') {
if ((delim = strchr(cur, '@')) == NULL)
@@ -960,6 +961,7 @@ int netpoll_parse_options(struct netpoll *np, char *opt)
cur++;
if (*cur != '/') {
+ ipversion_set = true;
if ((delim = strchr(cur, '/')) == NULL)
goto parse_failed;
*delim = 0;
@@ -1002,7 +1004,7 @@ int netpoll_parse_options(struct netpoll *np, char *opt)
ipv6 = netpoll_parse_ip_addr(cur, &np->remote_ip);
if (ipv6 < 0)
goto parse_failed;
- else if (np->ipv6 != (bool)ipv6)
+ else if (ipversion_set && np->ipv6 != (bool)ipv6)
goto parse_failed;
else
np->ipv6 = (bool)ipv6;
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 393b1bc9a618..048dc8d183aa 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -374,7 +374,7 @@ static size_t rtnl_link_get_slave_info_data_size(const struct net_device *dev)
if (!master_dev)
return 0;
ops = master_dev->rtnl_link_ops;
- if (!ops->get_slave_size)
+ if (!ops || !ops->get_slave_size)
return 0;
/* IFLA_INFO_SLAVE_DATA + nested data */
return nla_total_size(sizeof(struct nlattr)) +
diff --git a/net/core/sock.c b/net/core/sock.c
index 0c127dcdf6a8..5b6a9431b017 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1775,7 +1775,9 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
while (order) {
if (npages >= 1 << order) {
page = alloc_pages(sk->sk_allocation |
- __GFP_COMP | __GFP_NOWARN,
+ __GFP_COMP |
+ __GFP_NOWARN |
+ __GFP_NORETRY,
order);
if (page)
goto fill_page;
@@ -1845,7 +1847,7 @@ bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t prio)
gfp_t gfp = prio;
if (order)
- gfp |= __GFP_COMP | __GFP_NOWARN;
+ gfp |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY;
pfrag->page = alloc_pages(gfp, order);
if (likely(pfrag->page)) {
pfrag->offset = 0;
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
index 2954dcbca832..4c04848953bd 100644
--- a/net/decnet/af_decnet.c
+++ b/net/decnet/af_decnet.c
@@ -2104,8 +2104,6 @@ static struct notifier_block dn_dev_notifier = {
.notifier_call = dn_device_event,
};
-extern int dn_route_rcv(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *);
-
static struct packet_type dn_dix_packet_type __read_mostly = {
.type = cpu_to_be16(ETH_P_DNA_RT),
.func = dn_route_rcv,
@@ -2353,9 +2351,6 @@ static const struct proto_ops dn_proto_ops = {
.sendpage = sock_no_sendpage,
};
-void dn_register_sysctl(void);
-void dn_unregister_sysctl(void);
-
MODULE_DESCRIPTION("The Linux DECnet Network Protocol");
MODULE_AUTHOR("Linux DECnet Project Team");
MODULE_LICENSE("GPL");
diff --git a/net/ieee802154/6lowpan.c b/net/ieee802154/6lowpan.c
index 48b25c0af4d0..8edfea5da572 100644
--- a/net/ieee802154/6lowpan.c
+++ b/net/ieee802154/6lowpan.c
@@ -106,7 +106,6 @@ static int lowpan_header_create(struct sk_buff *skb,
unsigned short type, const void *_daddr,
const void *_saddr, unsigned int len)
{
- struct ipv6hdr *hdr;
const u8 *saddr = _saddr;
const u8 *daddr = _daddr;
struct ieee802154_addr sa, da;
@@ -117,8 +116,6 @@ static int lowpan_header_create(struct sk_buff *skb,
if (type != ETH_P_IPV6)
return 0;
- hdr = ipv6_hdr(skb);
-
if (!saddr)
saddr = dev->dev_addr;
@@ -533,7 +530,27 @@ static struct header_ops lowpan_header_ops = {
.create = lowpan_header_create,
};
+static struct lock_class_key lowpan_tx_busylock;
+static struct lock_class_key lowpan_netdev_xmit_lock_key;
+
+static void lowpan_set_lockdep_class_one(struct net_device *dev,
+ struct netdev_queue *txq,
+ void *_unused)
+{
+ lockdep_set_class(&txq->_xmit_lock,
+ &lowpan_netdev_xmit_lock_key);
+}
+
+
+static int lowpan_dev_init(struct net_device *dev)
+{
+ netdev_for_each_tx_queue(dev, lowpan_set_lockdep_class_one, NULL);
+ dev->qdisc_tx_busylock = &lowpan_tx_busylock;
+ return 0;
+}
+
static const struct net_device_ops lowpan_netdev_ops = {
+ .ndo_init = lowpan_dev_init,
.ndo_start_xmit = lowpan_xmit,
.ndo_set_mac_address = lowpan_set_address,
};
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index ac2dff3c2c1c..bdbf68bb2e2d 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -1443,7 +1443,8 @@ static size_t inet_nlmsg_size(void)
+ nla_total_size(4) /* IFA_LOCAL */
+ nla_total_size(4) /* IFA_BROADCAST */
+ nla_total_size(IFNAMSIZ) /* IFA_LABEL */
- + nla_total_size(4); /* IFA_FLAGS */
+ + nla_total_size(4) /* IFA_FLAGS */
+ + nla_total_size(sizeof(struct ifa_cacheinfo)); /* IFA_CACHEINFO */
}
static inline u32 cstamp_delta(unsigned long cstamp)
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index bd28f386bd02..50228be5c17b 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -101,28 +101,22 @@ static void tunnel_dst_reset_all(struct ip_tunnel *t)
__tunnel_dst_set(per_cpu_ptr(t->dst_cache, i), NULL);
}
-static struct dst_entry *tunnel_dst_get(struct ip_tunnel *t)
+static struct rtable *tunnel_rtable_get(struct ip_tunnel *t, u32 cookie)
{
struct dst_entry *dst;
rcu_read_lock();
dst = rcu_dereference(this_cpu_ptr(t->dst_cache)->dst);
- if (dst)
+ if (dst) {
+ if (dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
+ rcu_read_unlock();
+ tunnel_dst_reset(t);
+ return NULL;
+ }
dst_hold(dst);
- rcu_read_unlock();
- return dst;
-}
-
-static struct dst_entry *tunnel_dst_check(struct ip_tunnel *t, u32 cookie)
-{
- struct dst_entry *dst = tunnel_dst_get(t);
-
- if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
- tunnel_dst_reset(t);
- return NULL;
}
-
- return dst;
+ rcu_read_unlock();
+ return (struct rtable *)dst;
}
/* Often modified stats are per cpu, other are shared (netdev->stats) */
@@ -584,7 +578,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
struct flowi4 fl4;
u8 tos, ttl;
__be16 df;
- struct rtable *rt = NULL; /* Route to the other host */
+ struct rtable *rt; /* Route to the other host */
unsigned int max_headroom; /* The extra header space needed */
__be32 dst;
int err;
@@ -657,8 +651,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
init_tunnel_flow(&fl4, protocol, dst, tnl_params->saddr,
tunnel->parms.o_key, RT_TOS(tos), tunnel->parms.link);
- if (connected)
- rt = (struct rtable *)tunnel_dst_check(tunnel, 0);
+ rt = connected ? tunnel_rtable_get(tunnel, 0) : NULL;
if (!rt) {
rt = ip_route_output_key(tunnel->net, &fl4);
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig
index 81c6910cfa92..a26ce035e3fa 100644
--- a/net/ipv4/netfilter/Kconfig
+++ b/net/ipv4/netfilter/Kconfig
@@ -61,6 +61,11 @@ config NFT_CHAIN_NAT_IPV4
packet transformations such as the source, destination address and
source and destination ports.
+config NFT_REJECT_IPV4
+ depends on NF_TABLES_IPV4
+ default NFT_REJECT
+ tristate
+
config NF_TABLES_ARP
depends on NF_TABLES
tristate "ARP nf_tables support"
diff --git a/net/ipv4/netfilter/Makefile b/net/ipv4/netfilter/Makefile
index c16be9d58420..90b82405331e 100644
--- a/net/ipv4/netfilter/Makefile
+++ b/net/ipv4/netfilter/Makefile
@@ -30,6 +30,7 @@ obj-$(CONFIG_NF_NAT_PROTO_GRE) += nf_nat_proto_gre.o
obj-$(CONFIG_NF_TABLES_IPV4) += nf_tables_ipv4.o
obj-$(CONFIG_NFT_CHAIN_ROUTE_IPV4) += nft_chain_route_ipv4.o
obj-$(CONFIG_NFT_CHAIN_NAT_IPV4) += nft_chain_nat_ipv4.o
+obj-$(CONFIG_NFT_REJECT_IPV4) += nft_reject_ipv4.o
obj-$(CONFIG_NF_TABLES_ARP) += nf_tables_arp.o
# generic IP tables
diff --git a/net/ipv4/netfilter/nf_nat_h323.c b/net/ipv4/netfilter/nf_nat_h323.c
index 9eea059dd621..574f7ebba0b6 100644
--- a/net/ipv4/netfilter/nf_nat_h323.c
+++ b/net/ipv4/netfilter/nf_nat_h323.c
@@ -229,7 +229,10 @@ static int nat_rtp_rtcp(struct sk_buff *skb, struct nf_conn *ct,
ret = nf_ct_expect_related(rtcp_exp);
if (ret == 0)
break;
- else if (ret != -EBUSY) {
+ else if (ret == -EBUSY) {
+ nf_ct_unexpect_related(rtp_exp);
+ continue;
+ } else if (ret < 0) {
nf_ct_unexpect_related(rtp_exp);
nated_port = 0;
break;
diff --git a/net/ipv4/netfilter/nft_reject_ipv4.c b/net/ipv4/netfilter/nft_reject_ipv4.c
new file mode 100644
index 000000000000..e79718a382f2
--- /dev/null
+++ b/net/ipv4/netfilter/nft_reject_ipv4.c
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
+ * Copyright (c) 2013 Eric Leblond <eric@regit.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Development of this code funded by Astaro AG (http://www.astaro.com/)
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables.h>
+#include <net/icmp.h>
+#include <net/netfilter/ipv4/nf_reject.h>
+#include <net/netfilter/nft_reject.h>
+
+void nft_reject_ipv4_eval(const struct nft_expr *expr,
+ struct nft_data data[NFT_REG_MAX + 1],
+ const struct nft_pktinfo *pkt)
+{
+ struct nft_reject *priv = nft_expr_priv(expr);
+
+ switch (priv->type) {
+ case NFT_REJECT_ICMP_UNREACH:
+ nf_send_unreach(pkt->skb, priv->icmp_code);
+ break;
+ case NFT_REJECT_TCP_RST:
+ nf_send_reset(pkt->skb, pkt->ops->hooknum);
+ break;
+ }
+
+ data[NFT_REG_VERDICT].verdict = NF_DROP;
+}
+EXPORT_SYMBOL_GPL(nft_reject_ipv4_eval);
+
+static struct nft_expr_type nft_reject_ipv4_type;
+static const struct nft_expr_ops nft_reject_ipv4_ops = {
+ .type = &nft_reject_ipv4_type,
+ .size = NFT_EXPR_SIZE(sizeof(struct nft_reject)),
+ .eval = nft_reject_ipv4_eval,
+ .init = nft_reject_init,
+ .dump = nft_reject_dump,
+};
+
+static struct nft_expr_type nft_reject_ipv4_type __read_mostly = {
+ .family = NFPROTO_IPV4,
+ .name = "reject",
+ .ops = &nft_reject_ipv4_ops,
+ .policy = nft_reject_policy,
+ .maxattr = NFTA_REJECT_MAX,
+ .owner = THIS_MODULE,
+};
+
+static int __init nft_reject_ipv4_module_init(void)
+{
+ return nft_register_expr(&nft_reject_ipv4_type);
+}
+
+static void __exit nft_reject_ipv4_module_exit(void)
+{
+ nft_unregister_expr(&nft_reject_ipv4_type);
+}
+
+module_init(nft_reject_ipv4_module_init);
+module_exit(nft_reject_ipv4_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_ALIAS_NFT_AF_EXPR(AF_INET, "reject");
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 4475b3bb494d..9f3a2db9109e 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2229,7 +2229,7 @@ adjudge_to_death:
/* This is a (useful) BSD violating of the RFC. There is a
* problem with TCP as specified in that the other end could
* keep a socket open forever with no application left this end.
- * We use a 3 minute timeout (about the same as BSD) then kill
+ * We use a 1 minute timeout (about the same as BSD) then kill
* our end. If they send after that then tough - BUT: long enough
* that we won't make the old 4*rto = almost no time - whoops
* reset mistake.
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 65cf90e063d5..227cba79fa6b 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -671,6 +671,7 @@ static void tcp_rtt_estimator(struct sock *sk, const __u32 mrtt)
{
struct tcp_sock *tp = tcp_sk(sk);
long m = mrtt; /* RTT */
+ u32 srtt = tp->srtt;
/* The following amusing code comes from Jacobson's
* article in SIGCOMM '88. Note that rtt and mdev
@@ -688,11 +689,9 @@ static void tcp_rtt_estimator(struct sock *sk, const __u32 mrtt)
* does not matter how to _calculate_ it. Seems, it was trap
* that VJ failed to avoid. 8)
*/
- if (m == 0)
- m = 1;
- if (tp->srtt != 0) {
- m -= (tp->srtt >> 3); /* m is now error in rtt est */
- tp->srtt += m; /* rtt = 7/8 rtt + 1/8 new */
+ if (srtt != 0) {
+ m -= (srtt >> 3); /* m is now error in rtt est */
+ srtt += m; /* rtt = 7/8 rtt + 1/8 new */
if (m < 0) {
m = -m; /* m is now abs(error) */
m -= (tp->mdev >> 2); /* similar update on mdev */
@@ -723,11 +722,12 @@ static void tcp_rtt_estimator(struct sock *sk, const __u32 mrtt)
}
} else {
/* no previous measure. */
- tp->srtt = m << 3; /* take the measured time to be rtt */
+ srtt = m << 3; /* take the measured time to be rtt */
tp->mdev = m << 1; /* make sure rto = 3*rtt */
tp->mdev_max = tp->rttvar = max(tp->mdev, tcp_rto_min(sk));
tp->rtt_seq = tp->snd_nxt;
}
+ tp->srtt = max(1U, srtt);
}
/* Set the sk_pacing_rate to allow proper sizing of TSO packets.
@@ -746,8 +746,10 @@ static void tcp_update_pacing_rate(struct sock *sk)
rate *= max(tp->snd_cwnd, tp->packets_out);
- /* Correction for small srtt : minimum srtt being 8 (1 jiffy << 3),
- * be conservative and assume srtt = 1 (125 us instead of 1.25 ms)
+ /* Correction for small srtt and scheduling constraints.
+ * For small rtt, consider noise is too high, and use
+ * the minimal value (srtt = 1 -> 125 us for HZ=1000)
+ *
* We probably need usec resolution in the future.
* Note: This also takes care of possible srtt=0 case,
* when tcp_rtt_estimator() was not yet called.
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 03d26b85eab8..3be16727f058 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -698,7 +698,8 @@ static void tcp_tsq_handler(struct sock *sk)
if ((1 << sk->sk_state) &
(TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_CLOSING |
TCPF_CLOSE_WAIT | TCPF_LAST_ACK))
- tcp_write_xmit(sk, tcp_current_mss(sk), 0, 0, GFP_ATOMIC);
+ tcp_write_xmit(sk, tcp_current_mss(sk), tcp_sk(sk)->nonagle,
+ 0, GFP_ATOMIC);
}
/*
* One tasklet per cpu tries to send more skbs.
@@ -1904,7 +1905,15 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
if (atomic_read(&sk->sk_wmem_alloc) > limit) {
set_bit(TSQ_THROTTLED, &tp->tsq_flags);
- break;
+ /* It is possible TX completion already happened
+ * before we set TSQ_THROTTLED, so we must
+ * test again the condition.
+ * We abuse smp_mb__after_clear_bit() because
+ * there is no smp_mb__after_set_bit() yet
+ */
+ smp_mb__after_clear_bit();
+ if (atomic_read(&sk->sk_wmem_alloc) > limit)
+ break;
}
limit = mss_now;
@@ -1977,7 +1986,7 @@ bool tcp_schedule_loss_probe(struct sock *sk)
/* Schedule a loss probe in 2*RTT for SACK capable connections
* in Open state, that are either limited by cwnd or application.
*/
- if (sysctl_tcp_early_retrans < 3 || !rtt || !tp->packets_out ||
+ if (sysctl_tcp_early_retrans < 3 || !tp->srtt || !tp->packets_out ||
!tcp_is_sack(tp) || inet_csk(sk)->icsk_ca_state != TCP_CA_Open)
return false;
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
index 25f5cee3a08a..88b4023ecfcf 100644
--- a/net/ipv4/udp_offload.c
+++ b/net/ipv4/udp_offload.c
@@ -17,6 +17,8 @@
static DEFINE_SPINLOCK(udp_offload_lock);
static struct udp_offload_priv __rcu *udp_offload_base __read_mostly;
+#define udp_deref_protected(X) rcu_dereference_protected(X, lockdep_is_held(&udp_offload_lock))
+
struct udp_offload_priv {
struct udp_offload *offload;
struct rcu_head rcu;
@@ -100,8 +102,7 @@ out:
int udp_add_offload(struct udp_offload *uo)
{
- struct udp_offload_priv __rcu **head = &udp_offload_base;
- struct udp_offload_priv *new_offload = kzalloc(sizeof(*new_offload), GFP_KERNEL);
+ struct udp_offload_priv *new_offload = kzalloc(sizeof(*new_offload), GFP_ATOMIC);
if (!new_offload)
return -ENOMEM;
@@ -109,8 +110,8 @@ int udp_add_offload(struct udp_offload *uo)
new_offload->offload = uo;
spin_lock(&udp_offload_lock);
- rcu_assign_pointer(new_offload->next, rcu_dereference(*head));
- rcu_assign_pointer(*head, new_offload);
+ new_offload->next = udp_offload_base;
+ rcu_assign_pointer(udp_offload_base, new_offload);
spin_unlock(&udp_offload_lock);
return 0;
@@ -130,12 +131,12 @@ void udp_del_offload(struct udp_offload *uo)
spin_lock(&udp_offload_lock);
- uo_priv = rcu_dereference(*head);
+ uo_priv = udp_deref_protected(*head);
for (; uo_priv != NULL;
- uo_priv = rcu_dereference(*head)) {
-
+ uo_priv = udp_deref_protected(*head)) {
if (uo_priv->offload == uo) {
- rcu_assign_pointer(*head, rcu_dereference(uo_priv->next));
+ rcu_assign_pointer(*head,
+ udp_deref_protected(uo_priv->next));
goto unlock;
}
head = &uo_priv->next;
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index f81f59686f21..f2610e157660 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -414,7 +414,7 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
addr_type = ipv6_addr_type(&hdr->daddr);
if (ipv6_chk_addr(net, &hdr->daddr, skb->dev, 0) ||
- ipv6_anycast_destination(skb))
+ ipv6_chk_acast_addr_src(net, skb->dev, &hdr->daddr))
saddr = &hdr->daddr;
/*
diff --git a/net/ipv6/netfilter/Kconfig b/net/ipv6/netfilter/Kconfig
index 35750df744dc..4bff1f297e39 100644
--- a/net/ipv6/netfilter/Kconfig
+++ b/net/ipv6/netfilter/Kconfig
@@ -50,6 +50,11 @@ config NFT_CHAIN_NAT_IPV6
packet transformations such as the source, destination address and
source and destination ports.
+config NFT_REJECT_IPV6
+ depends on NF_TABLES_IPV6
+ default NFT_REJECT
+ tristate
+
config IP6_NF_IPTABLES
tristate "IP6 tables support (required for filtering)"
depends on INET && IPV6
diff --git a/net/ipv6/netfilter/Makefile b/net/ipv6/netfilter/Makefile
index d1b4928f34f7..70d3dd66f2cd 100644
--- a/net/ipv6/netfilter/Makefile
+++ b/net/ipv6/netfilter/Makefile
@@ -27,6 +27,7 @@ obj-$(CONFIG_NF_DEFRAG_IPV6) += nf_defrag_ipv6.o
obj-$(CONFIG_NF_TABLES_IPV6) += nf_tables_ipv6.o
obj-$(CONFIG_NFT_CHAIN_ROUTE_IPV6) += nft_chain_route_ipv6.o
obj-$(CONFIG_NFT_CHAIN_NAT_IPV6) += nft_chain_nat_ipv6.o
+obj-$(CONFIG_NFT_REJECT_IPV6) += nft_reject_ipv6.o
# matches
obj-$(CONFIG_IP6_NF_MATCH_AH) += ip6t_ah.o
diff --git a/net/ipv6/netfilter/nft_reject_ipv6.c b/net/ipv6/netfilter/nft_reject_ipv6.c
new file mode 100644
index 000000000000..0bc19fa87821
--- /dev/null
+++ b/net/ipv6/netfilter/nft_reject_ipv6.c
@@ -0,0 +1,76 @@
+/*
+ * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
+ * Copyright (c) 2013 Eric Leblond <eric@regit.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Development of this code funded by Astaro AG (http://www.astaro.com/)
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables.h>
+#include <net/netfilter/nft_reject.h>
+#include <net/netfilter/ipv6/nf_reject.h>
+
+void nft_reject_ipv6_eval(const struct nft_expr *expr,
+ struct nft_data data[NFT_REG_MAX + 1],
+ const struct nft_pktinfo *pkt)
+{
+ struct nft_reject *priv = nft_expr_priv(expr);
+ struct net *net = dev_net((pkt->in != NULL) ? pkt->in : pkt->out);
+
+ switch (priv->type) {
+ case NFT_REJECT_ICMP_UNREACH:
+ nf_send_unreach6(net, pkt->skb, priv->icmp_code,
+ pkt->ops->hooknum);
+ break;
+ case NFT_REJECT_TCP_RST:
+ nf_send_reset6(net, pkt->skb, pkt->ops->hooknum);
+ break;
+ }
+
+ data[NFT_REG_VERDICT].verdict = NF_DROP;
+}
+EXPORT_SYMBOL_GPL(nft_reject_ipv6_eval);
+
+static struct nft_expr_type nft_reject_ipv6_type;
+static const struct nft_expr_ops nft_reject_ipv6_ops = {
+ .type = &nft_reject_ipv6_type,
+ .size = NFT_EXPR_SIZE(sizeof(struct nft_reject)),
+ .eval = nft_reject_ipv6_eval,
+ .init = nft_reject_init,
+ .dump = nft_reject_dump,
+};
+
+static struct nft_expr_type nft_reject_ipv6_type __read_mostly = {
+ .family = NFPROTO_IPV6,
+ .name = "reject",
+ .ops = &nft_reject_ipv6_ops,
+ .policy = nft_reject_policy,
+ .maxattr = NFTA_REJECT_MAX,
+ .owner = THIS_MODULE,
+};
+
+static int __init nft_reject_ipv6_module_init(void)
+{
+ return nft_register_expr(&nft_reject_ipv6_type);
+}
+
+static void __exit nft_reject_ipv6_module_exit(void)
+{
+ nft_unregister_expr(&nft_reject_ipv6_type);
+}
+
+module_init(nft_reject_ipv6_module_init);
+module_exit(nft_reject_ipv6_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_ALIAS_NFT_AF_EXPR(AF_INET6, "reject");
diff --git a/net/ipx/af_ipx.c b/net/ipx/af_ipx.c
index 994e28bfb32e..00b2a6d1c009 100644
--- a/net/ipx/af_ipx.c
+++ b/net/ipx/af_ipx.c
@@ -52,18 +52,12 @@
#include <net/p8022.h>
#include <net/psnap.h>
#include <net/sock.h>
+#include <net/datalink.h>
#include <net/tcp_states.h>
+#include <net/net_namespace.h>
#include <asm/uaccess.h>
-#ifdef CONFIG_SYSCTL
-extern void ipx_register_sysctl(void);
-extern void ipx_unregister_sysctl(void);
-#else
-#define ipx_register_sysctl()
-#define ipx_unregister_sysctl()
-#endif
-
/* Configuration Variables */
static unsigned char ipxcfg_max_hops = 16;
static char ipxcfg_auto_select_primary;
@@ -84,15 +78,6 @@ DEFINE_SPINLOCK(ipx_interfaces_lock);
struct ipx_interface *ipx_primary_net;
struct ipx_interface *ipx_internal_net;
-extern int ipxrtr_add_route(__be32 network, struct ipx_interface *intrfc,
- unsigned char *node);
-extern void ipxrtr_del_routes(struct ipx_interface *intrfc);
-extern int ipxrtr_route_packet(struct sock *sk, struct sockaddr_ipx *usipx,
- struct iovec *iov, size_t len, int noblock);
-extern int ipxrtr_route_skb(struct sk_buff *skb);
-extern struct ipx_route *ipxrtr_lookup(__be32 net);
-extern int ipxrtr_ioctl(unsigned int cmd, void __user *arg);
-
struct ipx_interface *ipx_interfaces_head(void)
{
struct ipx_interface *rc = NULL;
@@ -1986,9 +1971,6 @@ static struct notifier_block ipx_dev_notifier = {
.notifier_call = ipxitf_device_event,
};
-extern struct datalink_proto *make_EII_client(void);
-extern void destroy_EII_client(struct datalink_proto *);
-
static const unsigned char ipx_8022_type = 0xE0;
static const unsigned char ipx_snap_id[5] = { 0x0, 0x0, 0x0, 0x81, 0x37 };
static const char ipx_EII_err_msg[] __initconst =
diff --git a/net/ipx/ipx_route.c b/net/ipx/ipx_route.c
index 30f4519b092f..c1f03185c5e1 100644
--- a/net/ipx/ipx_route.c
+++ b/net/ipx/ipx_route.c
@@ -20,15 +20,11 @@ DEFINE_RWLOCK(ipx_routes_lock);
extern struct ipx_interface *ipx_internal_net;
-extern __be16 ipx_cksum(struct ipxhdr *packet, int length);
extern struct ipx_interface *ipxitf_find_using_net(__be32 net);
extern int ipxitf_demux_socket(struct ipx_interface *intrfc,
struct sk_buff *skb, int copy);
extern int ipxitf_demux_socket(struct ipx_interface *intrfc,
struct sk_buff *skb, int copy);
-extern int ipxitf_send(struct ipx_interface *intrfc, struct sk_buff *skb,
- char *node);
-extern struct ipx_interface *ipxitf_find_using_net(__be32 net);
struct ipx_route *ipxrtr_lookup(__be32 net)
{
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index f9ae9b85d4c1..453e974287d1 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -1021,8 +1021,10 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
IEEE80211_P2P_OPPPS_ENABLE_BIT;
err = ieee80211_assign_beacon(sdata, &params->beacon);
- if (err < 0)
+ if (err < 0) {
+ ieee80211_vif_release_channel(sdata);
return err;
+ }
changed |= err;
err = drv_start_ap(sdata->local, sdata);
@@ -1032,6 +1034,7 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
if (old)
kfree_rcu(old, rcu_head);
RCU_INIT_POINTER(sdata->u.ap.beacon, NULL);
+ ieee80211_vif_release_channel(sdata);
return err;
}
@@ -1090,8 +1093,6 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev)
kfree(sdata->u.ap.next_beacon);
sdata->u.ap.next_beacon = NULL;
- cancel_work_sync(&sdata->u.ap.request_smps_work);
-
/* turn off carrier for this interface and dependent VLANs */
list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list)
netif_carrier_off(vlan->dev);
@@ -1103,6 +1104,7 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev)
kfree_rcu(old_beacon, rcu_head);
if (old_probe_resp)
kfree_rcu(old_probe_resp, rcu_head);
+ sdata->u.ap.driver_smps_mode = IEEE80211_SMPS_OFF;
__sta_info_flush(sdata, true);
ieee80211_free_keys(sdata, true);
@@ -2638,6 +2640,24 @@ static int ieee80211_start_roc_work(struct ieee80211_local *local,
INIT_DELAYED_WORK(&roc->work, ieee80211_sw_roc_work);
INIT_LIST_HEAD(&roc->dependents);
+ /*
+ * cookie is either the roc cookie (for normal roc)
+ * or the SKB (for mgmt TX)
+ */
+ if (!txskb) {
+ /* local->mtx protects this */
+ local->roc_cookie_counter++;
+ roc->cookie = local->roc_cookie_counter;
+ /* wow, you wrapped 64 bits ... more likely a bug */
+ if (WARN_ON(roc->cookie == 0)) {
+ roc->cookie = 1;
+ local->roc_cookie_counter++;
+ }
+ *cookie = roc->cookie;
+ } else {
+ *cookie = (unsigned long)txskb;
+ }
+
/* if there's one pending or we're scanning, queue this one */
if (!list_empty(&local->roc_list) ||
local->scanning || local->radar_detect_enabled)
@@ -2772,24 +2792,6 @@ static int ieee80211_start_roc_work(struct ieee80211_local *local,
if (!queued)
list_add_tail(&roc->list, &local->roc_list);
- /*
- * cookie is either the roc cookie (for normal roc)
- * or the SKB (for mgmt TX)
- */
- if (!txskb) {
- /* local->mtx protects this */
- local->roc_cookie_counter++;
- roc->cookie = local->roc_cookie_counter;
- /* wow, you wrapped 64 bits ... more likely a bug */
- if (WARN_ON(roc->cookie == 0)) {
- roc->cookie = 1;
- local->roc_cookie_counter++;
- }
- *cookie = roc->cookie;
- } else {
- *cookie = (unsigned long)txskb;
- }
-
return 0;
}
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
index fab7b91923e0..70dd013de836 100644
--- a/net/mac80211/ht.c
+++ b/net/mac80211/ht.c
@@ -466,7 +466,9 @@ void ieee80211_request_smps_ap_work(struct work_struct *work)
u.ap.request_smps_work);
sdata_lock(sdata);
- __ieee80211_request_smps_ap(sdata, sdata->u.ap.driver_smps_mode);
+ if (sdata_dereference(sdata->u.ap.beacon, sdata))
+ __ieee80211_request_smps_ap(sdata,
+ sdata->u.ap.driver_smps_mode);
sdata_unlock(sdata);
}
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index 771080ec7212..2796a198728f 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -695,12 +695,9 @@ static void ieee80211_ibss_disconnect(struct ieee80211_sub_if_data *sdata)
struct cfg80211_bss *cbss;
struct beacon_data *presp;
struct sta_info *sta;
- int active_ibss;
u16 capability;
- active_ibss = ieee80211_sta_active_ibss(sdata);
-
- if (!active_ibss && !is_zero_ether_addr(ifibss->bssid)) {
+ if (!is_zero_ether_addr(ifibss->bssid)) {
capability = WLAN_CAPABILITY_IBSS;
if (ifibss->privacy)
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 3dfd20a453ab..d6d1f1df9119 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -418,20 +418,24 @@ int ieee80211_add_virtual_monitor(struct ieee80211_local *local)
return ret;
}
+ mutex_lock(&local->iflist_mtx);
+ rcu_assign_pointer(local->monitor_sdata, sdata);
+ mutex_unlock(&local->iflist_mtx);
+
mutex_lock(&local->mtx);
ret = ieee80211_vif_use_channel(sdata, &local->monitor_chandef,
IEEE80211_CHANCTX_EXCLUSIVE);
mutex_unlock(&local->mtx);
if (ret) {
+ mutex_lock(&local->iflist_mtx);
+ rcu_assign_pointer(local->monitor_sdata, NULL);
+ mutex_unlock(&local->iflist_mtx);
+ synchronize_net();
drv_remove_interface(local, sdata);
kfree(sdata);
return ret;
}
- mutex_lock(&local->iflist_mtx);
- rcu_assign_pointer(local->monitor_sdata, sdata);
- mutex_unlock(&local->iflist_mtx);
-
return 0;
}
@@ -770,12 +774,19 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
ieee80211_roc_purge(local, sdata);
- if (sdata->vif.type == NL80211_IFTYPE_STATION)
+ switch (sdata->vif.type) {
+ case NL80211_IFTYPE_STATION:
ieee80211_mgd_stop(sdata);
-
- if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
+ break;
+ case NL80211_IFTYPE_ADHOC:
ieee80211_ibss_stop(sdata);
-
+ break;
+ case NL80211_IFTYPE_AP:
+ cancel_work_sync(&sdata->u.ap.request_smps_work);
+ break;
+ default:
+ break;
+ }
/*
* Remove all stations associated with this interface.
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 27c990bf2320..97a02d3f7d87 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -878,7 +878,7 @@ static int ieee80211_fragment(struct ieee80211_tx_data *tx,
}
/* adjust first fragment's length */
- skb->len = hdrlen + per_fragm;
+ skb_trim(skb, hdrlen + per_fragm);
return 0;
}
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index c37467562fd0..e9410d17619d 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -513,7 +513,6 @@ config NFT_QUEUE
config NFT_REJECT
depends on NF_TABLES
- depends on NF_TABLES_IPV6 || !NF_TABLES_IPV6
default m if NETFILTER_ADVANCED=n
tristate "Netfilter nf_tables reject support"
help
@@ -521,6 +520,11 @@ config NFT_REJECT
explicitly deny and notify via TCP reset/ICMP informational errors
unallowed traffic.
+config NFT_REJECT_INET
+ depends on NF_TABLES_INET
+ default NFT_REJECT
+ tristate
+
config NFT_COMPAT
depends on NF_TABLES
depends on NETFILTER_XTABLES
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
index ee9c4de5f8ed..bffdad774da7 100644
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -79,6 +79,7 @@ obj-$(CONFIG_NFT_LIMIT) += nft_limit.o
obj-$(CONFIG_NFT_NAT) += nft_nat.o
obj-$(CONFIG_NFT_QUEUE) += nft_queue.o
obj-$(CONFIG_NFT_REJECT) += nft_reject.o
+obj-$(CONFIG_NFT_REJECT_INET) += nft_reject_inet.o
obj-$(CONFIG_NFT_RBTREE) += nft_rbtree.o
obj-$(CONFIG_NFT_HASH) += nft_hash.o
obj-$(CONFIG_NFT_COUNTER) += nft_counter.o
diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
index 59a1a85bcb3e..a8eb0a89326a 100644
--- a/net/netfilter/ipvs/ip_vs_conn.c
+++ b/net/netfilter/ipvs/ip_vs_conn.c
@@ -871,11 +871,11 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
cp->protocol = p->protocol;
ip_vs_addr_set(p->af, &cp->caddr, p->caddr);
cp->cport = p->cport;
- ip_vs_addr_set(p->af, &cp->vaddr, p->vaddr);
- cp->vport = p->vport;
- /* proto should only be IPPROTO_IP if d_addr is a fwmark */
+ /* proto should only be IPPROTO_IP if p->vaddr is a fwmark */
ip_vs_addr_set(p->protocol == IPPROTO_IP ? AF_UNSPEC : p->af,
- &cp->daddr, daddr);
+ &cp->vaddr, p->vaddr);
+ cp->vport = p->vport;
+ ip_vs_addr_set(p->af, &cp->daddr, daddr);
cp->dport = dport;
cp->flags = flags;
cp->fwmark = fwmark;
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 8824ed0ccc9c..356bef519fe5 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -312,6 +312,21 @@ static void death_by_timeout(unsigned long ul_conntrack)
nf_ct_delete((struct nf_conn *)ul_conntrack, 0, 0);
}
+static inline bool
+nf_ct_key_equal(struct nf_conntrack_tuple_hash *h,
+ const struct nf_conntrack_tuple *tuple,
+ u16 zone)
+{
+ struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
+
+ /* A conntrack can be recreated with the equal tuple,
+ * so we need to check that the conntrack is confirmed
+ */
+ return nf_ct_tuple_equal(tuple, &h->tuple) &&
+ nf_ct_zone(ct) == zone &&
+ nf_ct_is_confirmed(ct);
+}
+
/*
* Warning :
* - Caller must take a reference on returned object
@@ -333,8 +348,7 @@ ____nf_conntrack_find(struct net *net, u16 zone,
local_bh_disable();
begin:
hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[bucket], hnnode) {
- if (nf_ct_tuple_equal(tuple, &h->tuple) &&
- nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)) == zone) {
+ if (nf_ct_key_equal(h, tuple, zone)) {
NF_CT_STAT_INC(net, found);
local_bh_enable();
return h;
@@ -372,8 +386,7 @@ begin:
!atomic_inc_not_zero(&ct->ct_general.use)))
h = NULL;
else {
- if (unlikely(!nf_ct_tuple_equal(tuple, &h->tuple) ||
- nf_ct_zone(ct) != zone)) {
+ if (unlikely(!nf_ct_key_equal(h, tuple, zone))) {
nf_ct_put(ct);
goto begin;
}
@@ -435,7 +448,9 @@ nf_conntrack_hash_check_insert(struct nf_conn *ct)
goto out;
add_timer(&ct->timeout);
- nf_conntrack_get(&ct->ct_general);
+ smp_wmb();
+ /* The caller holds a reference to this object */
+ atomic_set(&ct->ct_general.use, 2);
__nf_conntrack_hash_insert(ct, hash, repl_hash);
NF_CT_STAT_INC(net, insert);
spin_unlock_bh(&nf_conntrack_lock);
@@ -449,6 +464,21 @@ out:
}
EXPORT_SYMBOL_GPL(nf_conntrack_hash_check_insert);
+/* deletion from this larval template list happens via nf_ct_put() */
+void nf_conntrack_tmpl_insert(struct net *net, struct nf_conn *tmpl)
+{
+ __set_bit(IPS_TEMPLATE_BIT, &tmpl->status);
+ __set_bit(IPS_CONFIRMED_BIT, &tmpl->status);
+ nf_conntrack_get(&tmpl->ct_general);
+
+ spin_lock_bh(&nf_conntrack_lock);
+ /* Overload tuple linked list to put us in template list. */
+ hlist_nulls_add_head_rcu(&tmpl->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
+ &net->ct.tmpl);
+ spin_unlock_bh(&nf_conntrack_lock);
+}
+EXPORT_SYMBOL_GPL(nf_conntrack_tmpl_insert);
+
/* Confirm a connection given skb; places it in hash table */
int
__nf_conntrack_confirm(struct sk_buff *skb)
@@ -720,11 +750,10 @@ __nf_conntrack_alloc(struct net *net, u16 zone,
nf_ct_zone->id = zone;
}
#endif
- /*
- * changes to lookup keys must be done before setting refcnt to 1
+ /* Because we use RCU lookups, we set ct_general.use to zero before
+ * this is inserted in any list.
*/
- smp_wmb();
- atomic_set(&ct->ct_general.use, 1);
+ atomic_set(&ct->ct_general.use, 0);
return ct;
#ifdef CONFIG_NF_CONNTRACK_ZONES
@@ -748,6 +777,11 @@ void nf_conntrack_free(struct nf_conn *ct)
{
struct net *net = nf_ct_net(ct);
+ /* A freed object has refcnt == 0, that's
+ * the golden rule for SLAB_DESTROY_BY_RCU
+ */
+ NF_CT_ASSERT(atomic_read(&ct->ct_general.use) == 0);
+
nf_ct_ext_destroy(ct);
nf_ct_ext_free(ct);
kmem_cache_free(net->ct.nf_conntrack_cachep, ct);
@@ -843,6 +877,9 @@ init_conntrack(struct net *net, struct nf_conn *tmpl,
NF_CT_STAT_INC(net, new);
}
+ /* Now it is inserted into the unconfirmed list, bump refcount */
+ nf_conntrack_get(&ct->ct_general);
+
/* Overload tuple linked list to put us in unconfirmed list. */
hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
&net->ct.unconfirmed);
diff --git a/net/netfilter/nf_synproxy_core.c b/net/netfilter/nf_synproxy_core.c
index 9858e3e51a3a..52e20c9a46a5 100644
--- a/net/netfilter/nf_synproxy_core.c
+++ b/net/netfilter/nf_synproxy_core.c
@@ -363,9 +363,8 @@ static int __net_init synproxy_net_init(struct net *net)
goto err2;
if (!nfct_synproxy_ext_add(ct))
goto err2;
- __set_bit(IPS_TEMPLATE_BIT, &ct->status);
- __set_bit(IPS_CONFIRMED_BIT, &ct->status);
+ nf_conntrack_tmpl_insert(net, ct);
snet->tmpl = ct;
snet->stats = alloc_percpu(struct synproxy_stats);
@@ -390,7 +389,7 @@ static void __net_exit synproxy_net_exit(struct net *net)
{
struct synproxy_net *snet = synproxy_pernet(net);
- nf_conntrack_free(snet->tmpl);
+ nf_ct_put(snet->tmpl);
synproxy_proc_exit(net);
free_percpu(snet->stats);
}
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 117bbaaddde6..adce01e8bb57 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -1008,10 +1008,8 @@ notify:
return 0;
}
-static void nf_tables_rcu_chain_destroy(struct rcu_head *head)
+static void nf_tables_chain_destroy(struct nft_chain *chain)
{
- struct nft_chain *chain = container_of(head, struct nft_chain, rcu_head);
-
BUG_ON(chain->use > 0);
if (chain->flags & NFT_BASE_CHAIN) {
@@ -1045,7 +1043,7 @@ static int nf_tables_delchain(struct sock *nlsk, struct sk_buff *skb,
if (IS_ERR(chain))
return PTR_ERR(chain);
- if (!list_empty(&chain->rules))
+ if (!list_empty(&chain->rules) || chain->use > 0)
return -EBUSY;
list_del(&chain->list);
@@ -1059,7 +1057,9 @@ static int nf_tables_delchain(struct sock *nlsk, struct sk_buff *skb,
family);
/* Make sure all rule references are gone before this is released */
- call_rcu(&chain->rcu_head, nf_tables_rcu_chain_destroy);
+ synchronize_rcu();
+
+ nf_tables_chain_destroy(chain);
return 0;
}
@@ -1114,35 +1114,45 @@ void nft_unregister_expr(struct nft_expr_type *type)
}
EXPORT_SYMBOL_GPL(nft_unregister_expr);
-static const struct nft_expr_type *__nft_expr_type_get(struct nlattr *nla)
+static const struct nft_expr_type *__nft_expr_type_get(u8 family,
+ struct nlattr *nla)
{
const struct nft_expr_type *type;
list_for_each_entry(type, &nf_tables_expressions, list) {
- if (!nla_strcmp(nla, type->name))
+ if (!nla_strcmp(nla, type->name) &&
+ (!type->family || type->family == family))
return type;
}
return NULL;
}
-static const struct nft_expr_type *nft_expr_type_get(struct nlattr *nla)
+static const struct nft_expr_type *nft_expr_type_get(u8 family,
+ struct nlattr *nla)
{
const struct nft_expr_type *type;
if (nla == NULL)
return ERR_PTR(-EINVAL);
- type = __nft_expr_type_get(nla);
+ type = __nft_expr_type_get(family, nla);
if (type != NULL && try_module_get(type->owner))
return type;
#ifdef CONFIG_MODULES
if (type == NULL) {
nfnl_unlock(NFNL_SUBSYS_NFTABLES);
+ request_module("nft-expr-%u-%.*s", family,
+ nla_len(nla), (char *)nla_data(nla));
+ nfnl_lock(NFNL_SUBSYS_NFTABLES);
+ if (__nft_expr_type_get(family, nla))
+ return ERR_PTR(-EAGAIN);
+
+ nfnl_unlock(NFNL_SUBSYS_NFTABLES);
request_module("nft-expr-%.*s",
nla_len(nla), (char *)nla_data(nla));
nfnl_lock(NFNL_SUBSYS_NFTABLES);
- if (__nft_expr_type_get(nla))
+ if (__nft_expr_type_get(family, nla))
return ERR_PTR(-EAGAIN);
}
#endif
@@ -1193,7 +1203,7 @@ static int nf_tables_expr_parse(const struct nft_ctx *ctx,
if (err < 0)
return err;
- type = nft_expr_type_get(tb[NFTA_EXPR_NAME]);
+ type = nft_expr_type_get(ctx->afi->family, tb[NFTA_EXPR_NAME]);
if (IS_ERR(type))
return PTR_ERR(type);
@@ -1521,9 +1531,8 @@ err:
return err;
}
-static void nf_tables_rcu_rule_destroy(struct rcu_head *head)
+static void nf_tables_rule_destroy(struct nft_rule *rule)
{
- struct nft_rule *rule = container_of(head, struct nft_rule, rcu_head);
struct nft_expr *expr;
/*
@@ -1538,11 +1547,6 @@ static void nf_tables_rcu_rule_destroy(struct rcu_head *head)
kfree(rule);
}
-static void nf_tables_rule_destroy(struct nft_rule *rule)
-{
- call_rcu(&rule->rcu_head, nf_tables_rcu_rule_destroy);
-}
-
#define NFT_RULE_MAXEXPRS 128
static struct nft_expr_info *info;
@@ -1809,9 +1813,6 @@ static int nf_tables_commit(struct sk_buff *skb)
synchronize_rcu();
list_for_each_entry_safe(rupd, tmp, &net->nft.commit_list, list) {
- /* Delete this rule from the dirty list */
- list_del(&rupd->list);
-
/* This rule was inactive in the past and just became active.
* Clear the next bit of the genmask since its meaning has
* changed, now it is the future.
@@ -1822,6 +1823,7 @@ static int nf_tables_commit(struct sk_buff *skb)
rupd->chain, rupd->rule,
NFT_MSG_NEWRULE, 0,
rupd->family);
+ list_del(&rupd->list);
kfree(rupd);
continue;
}
@@ -1831,7 +1833,15 @@ static int nf_tables_commit(struct sk_buff *skb)
nf_tables_rule_notify(skb, rupd->nlh, rupd->table, rupd->chain,
rupd->rule, NFT_MSG_DELRULE, 0,
rupd->family);
+ }
+
+ /* Make sure we don't see any packet traversing old rules */
+ synchronize_rcu();
+
+ /* Now we can safely release unused old rules */
+ list_for_each_entry_safe(rupd, tmp, &net->nft.commit_list, list) {
nf_tables_rule_destroy(rupd->rule);
+ list_del(&rupd->list);
kfree(rupd);
}
@@ -1844,20 +1854,26 @@ static int nf_tables_abort(struct sk_buff *skb)
struct nft_rule_trans *rupd, *tmp;
list_for_each_entry_safe(rupd, tmp, &net->nft.commit_list, list) {
- /* Delete all rules from the dirty list */
- list_del(&rupd->list);
-
if (!nft_rule_is_active_next(net, rupd->rule)) {
nft_rule_clear(net, rupd->rule);
+ list_del(&rupd->list);
kfree(rupd);
continue;
}
/* This rule is inactive, get rid of it */
list_del_rcu(&rupd->rule->list);
+ }
+
+ /* Make sure we don't see any packet accessing aborted rules */
+ synchronize_rcu();
+
+ list_for_each_entry_safe(rupd, tmp, &net->nft.commit_list, list) {
nf_tables_rule_destroy(rupd->rule);
+ list_del(&rupd->list);
kfree(rupd);
}
+
return 0;
}
@@ -1943,6 +1959,9 @@ static int nft_ctx_init_from_setattr(struct nft_ctx *ctx,
}
if (nla[NFTA_SET_TABLE] != NULL) {
+ if (afi == NULL)
+ return -EAFNOSUPPORT;
+
table = nf_tables_table_lookup(afi, nla[NFTA_SET_TABLE]);
if (IS_ERR(table))
return PTR_ERR(table);
@@ -1989,13 +2008,13 @@ static int nf_tables_set_alloc_name(struct nft_ctx *ctx, struct nft_set *set,
if (!sscanf(i->name, name, &tmp))
continue;
- if (tmp < 0 || tmp > BITS_PER_LONG * PAGE_SIZE)
+ if (tmp < 0 || tmp >= BITS_PER_BYTE * PAGE_SIZE)
continue;
set_bit(tmp, inuse);
}
- n = find_first_zero_bit(inuse, BITS_PER_LONG * PAGE_SIZE);
+ n = find_first_zero_bit(inuse, BITS_PER_BYTE * PAGE_SIZE);
free_page((unsigned long)inuse);
}
@@ -2428,6 +2447,8 @@ static int nf_tables_delset(struct sock *nlsk, struct sk_buff *skb,
struct nft_ctx ctx;
int err;
+ if (nfmsg->nfgen_family == NFPROTO_UNSPEC)
+ return -EAFNOSUPPORT;
if (nla[NFTA_SET_TABLE] == NULL)
return -EINVAL;
@@ -2435,9 +2456,6 @@ static int nf_tables_delset(struct sock *nlsk, struct sk_buff *skb,
if (err < 0)
return err;
- if (nfmsg->nfgen_family == NFPROTO_UNSPEC)
- return -EAFNOSUPPORT;
-
set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_NAME]);
if (IS_ERR(set))
return PTR_ERR(set);
@@ -2723,6 +2741,9 @@ static int nft_add_set_elem(const struct nft_ctx *ctx, struct nft_set *set,
if (nla[NFTA_SET_ELEM_DATA] == NULL &&
!(elem.flags & NFT_SET_ELEM_INTERVAL_END))
return -EINVAL;
+ if (nla[NFTA_SET_ELEM_DATA] != NULL &&
+ elem.flags & NFT_SET_ELEM_INTERVAL_END)
+ return -EINVAL;
} else {
if (nla[NFTA_SET_ELEM_DATA] != NULL)
return -EINVAL;
@@ -2977,6 +2998,9 @@ static int nf_tables_loop_check_setelem(const struct nft_ctx *ctx,
const struct nft_set_iter *iter,
const struct nft_set_elem *elem)
{
+ if (elem->flags & NFT_SET_ELEM_INTERVAL_END)
+ return 0;
+
switch (elem->data.verdict) {
case NFT_JUMP:
case NFT_GOTO:
diff --git a/net/netfilter/nf_tables_core.c b/net/netfilter/nf_tables_core.c
index 0d879fcb8763..90998a6ff8b9 100644
--- a/net/netfilter/nf_tables_core.c
+++ b/net/netfilter/nf_tables_core.c
@@ -103,9 +103,9 @@ static struct nf_loginfo trace_loginfo = {
},
};
-static inline void nft_trace_packet(const struct nft_pktinfo *pkt,
- const struct nft_chain *chain,
- int rulenum, enum nft_trace type)
+static void nft_trace_packet(const struct nft_pktinfo *pkt,
+ const struct nft_chain *chain,
+ int rulenum, enum nft_trace type)
{
struct net *net = dev_net(pkt->in ? pkt->in : pkt->out);
diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c
index 917052e20602..46e275403838 100644
--- a/net/netfilter/nft_ct.c
+++ b/net/netfilter/nft_ct.c
@@ -226,6 +226,7 @@ static int nft_ct_init_validate_get(const struct nft_expr *expr,
if (tb[NFTA_CT_DIRECTION] != NULL)
return -EINVAL;
break;
+ case NFT_CT_L3PROTOCOL:
case NFT_CT_PROTOCOL:
case NFT_CT_SRC:
case NFT_CT_DST:
@@ -311,8 +312,19 @@ static int nft_ct_get_dump(struct sk_buff *skb, const struct nft_expr *expr)
goto nla_put_failure;
if (nla_put_be32(skb, NFTA_CT_KEY, htonl(priv->key)))
goto nla_put_failure;
- if (nla_put_u8(skb, NFTA_CT_DIRECTION, priv->dir))
- goto nla_put_failure;
+
+ switch (priv->key) {
+ case NFT_CT_PROTOCOL:
+ case NFT_CT_SRC:
+ case NFT_CT_DST:
+ case NFT_CT_PROTO_SRC:
+ case NFT_CT_PROTO_DST:
+ if (nla_put_u8(skb, NFTA_CT_DIRECTION, priv->dir))
+ goto nla_put_failure;
+ default:
+ break;
+ }
+
return 0;
nla_put_failure:
diff --git a/net/netfilter/nft_log.c b/net/netfilter/nft_log.c
index 5af790123ad8..26c5154e05f3 100644
--- a/net/netfilter/nft_log.c
+++ b/net/netfilter/nft_log.c
@@ -23,7 +23,6 @@ static const char *nft_log_null_prefix = "";
struct nft_log {
struct nf_loginfo loginfo;
char *prefix;
- int family;
};
static void nft_log_eval(const struct nft_expr *expr,
@@ -33,7 +32,7 @@ static void nft_log_eval(const struct nft_expr *expr,
const struct nft_log *priv = nft_expr_priv(expr);
struct net *net = dev_net(pkt->in ? pkt->in : pkt->out);
- nf_log_packet(net, priv->family, pkt->ops->hooknum, pkt->skb, pkt->in,
+ nf_log_packet(net, pkt->ops->pf, pkt->ops->hooknum, pkt->skb, pkt->in,
pkt->out, &priv->loginfo, "%s", priv->prefix);
}
@@ -52,8 +51,6 @@ static int nft_log_init(const struct nft_ctx *ctx,
struct nf_loginfo *li = &priv->loginfo;
const struct nlattr *nla;
- priv->family = ctx->afi->family;
-
nla = tb[NFTA_LOG_PREFIX];
if (nla != NULL) {
priv->prefix = kmalloc(nla_len(nla) + 1, GFP_KERNEL);
diff --git a/net/netfilter/nft_lookup.c b/net/netfilter/nft_lookup.c
index 8a6116b75b5a..bb4ef4cccb6e 100644
--- a/net/netfilter/nft_lookup.c
+++ b/net/netfilter/nft_lookup.c
@@ -16,6 +16,7 @@
#include <linux/netfilter.h>
#include <linux/netfilter/nf_tables.h>
#include <net/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables_core.h>
struct nft_lookup {
struct nft_set *set;
diff --git a/net/netfilter/nft_queue.c b/net/netfilter/nft_queue.c
index cbea473d69e9..e8ae2f6bf232 100644
--- a/net/netfilter/nft_queue.c
+++ b/net/netfilter/nft_queue.c
@@ -25,7 +25,6 @@ struct nft_queue {
u16 queuenum;
u16 queues_total;
u16 flags;
- u8 family;
};
static void nft_queue_eval(const struct nft_expr *expr,
@@ -43,7 +42,7 @@ static void nft_queue_eval(const struct nft_expr *expr,
queue = priv->queuenum + cpu % priv->queues_total;
} else {
queue = nfqueue_hash(pkt->skb, queue,
- priv->queues_total, priv->family,
+ priv->queues_total, pkt->ops->pf,
jhash_initval);
}
}
@@ -71,7 +70,6 @@ static int nft_queue_init(const struct nft_ctx *ctx,
return -EINVAL;
init_hashrandom(&jhash_initval);
- priv->family = ctx->afi->family;
priv->queuenum = ntohs(nla_get_be16(tb[NFTA_QUEUE_NUM]));
if (tb[NFTA_QUEUE_TOTAL] != NULL)
diff --git a/net/netfilter/nft_rbtree.c b/net/netfilter/nft_rbtree.c
index ca0c1b231bfe..e21d69d13506 100644
--- a/net/netfilter/nft_rbtree.c
+++ b/net/netfilter/nft_rbtree.c
@@ -69,8 +69,10 @@ static void nft_rbtree_elem_destroy(const struct nft_set *set,
struct nft_rbtree_elem *rbe)
{
nft_data_uninit(&rbe->key, NFT_DATA_VALUE);
- if (set->flags & NFT_SET_MAP)
+ if (set->flags & NFT_SET_MAP &&
+ !(rbe->flags & NFT_SET_ELEM_INTERVAL_END))
nft_data_uninit(rbe->data, set->dtype);
+
kfree(rbe);
}
@@ -108,7 +110,8 @@ static int nft_rbtree_insert(const struct nft_set *set,
int err;
size = sizeof(*rbe);
- if (set->flags & NFT_SET_MAP)
+ if (set->flags & NFT_SET_MAP &&
+ !(elem->flags & NFT_SET_ELEM_INTERVAL_END))
size += sizeof(rbe->data[0]);
rbe = kzalloc(size, GFP_KERNEL);
@@ -117,7 +120,8 @@ static int nft_rbtree_insert(const struct nft_set *set,
rbe->flags = elem->flags;
nft_data_copy(&rbe->key, &elem->key);
- if (set->flags & NFT_SET_MAP)
+ if (set->flags & NFT_SET_MAP &&
+ !(rbe->flags & NFT_SET_ELEM_INTERVAL_END))
nft_data_copy(rbe->data, &elem->data);
err = __nft_rbtree_insert(set, rbe);
@@ -153,7 +157,8 @@ static int nft_rbtree_get(const struct nft_set *set, struct nft_set_elem *elem)
parent = parent->rb_right;
else {
elem->cookie = rbe;
- if (set->flags & NFT_SET_MAP)
+ if (set->flags & NFT_SET_MAP &&
+ !(rbe->flags & NFT_SET_ELEM_INTERVAL_END))
nft_data_copy(&elem->data, rbe->data);
elem->flags = rbe->flags;
return 0;
@@ -177,7 +182,8 @@ static void nft_rbtree_walk(const struct nft_ctx *ctx,
rbe = rb_entry(node, struct nft_rbtree_elem, node);
nft_data_copy(&elem.key, &rbe->key);
- if (set->flags & NFT_SET_MAP)
+ if (set->flags & NFT_SET_MAP &&
+ !(rbe->flags & NFT_SET_ELEM_INTERVAL_END))
nft_data_copy(&elem.data, rbe->data);
elem.flags = rbe->flags;
diff --git a/net/netfilter/nft_reject.c b/net/netfilter/nft_reject.c
index 5e204711d704..f3448c296446 100644
--- a/net/netfilter/nft_reject.c
+++ b/net/netfilter/nft_reject.c
@@ -16,65 +16,23 @@
#include <linux/netfilter.h>
#include <linux/netfilter/nf_tables.h>
#include <net/netfilter/nf_tables.h>
-#include <net/icmp.h>
-#include <net/netfilter/ipv4/nf_reject.h>
+#include <net/netfilter/nft_reject.h>
-#if IS_ENABLED(CONFIG_NF_TABLES_IPV6)
-#include <net/netfilter/ipv6/nf_reject.h>
-#endif
-
-struct nft_reject {
- enum nft_reject_types type:8;
- u8 icmp_code;
- u8 family;
-};
-
-static void nft_reject_eval(const struct nft_expr *expr,
- struct nft_data data[NFT_REG_MAX + 1],
- const struct nft_pktinfo *pkt)
-{
- struct nft_reject *priv = nft_expr_priv(expr);
-#if IS_ENABLED(CONFIG_NF_TABLES_IPV6)
- struct net *net = dev_net((pkt->in != NULL) ? pkt->in : pkt->out);
-#endif
- switch (priv->type) {
- case NFT_REJECT_ICMP_UNREACH:
- if (priv->family == NFPROTO_IPV4)
- nf_send_unreach(pkt->skb, priv->icmp_code);
-#if IS_ENABLED(CONFIG_NF_TABLES_IPV6)
- else if (priv->family == NFPROTO_IPV6)
- nf_send_unreach6(net, pkt->skb, priv->icmp_code,
- pkt->ops->hooknum);
-#endif
- break;
- case NFT_REJECT_TCP_RST:
- if (priv->family == NFPROTO_IPV4)
- nf_send_reset(pkt->skb, pkt->ops->hooknum);
-#if IS_ENABLED(CONFIG_NF_TABLES_IPV6)
- else if (priv->family == NFPROTO_IPV6)
- nf_send_reset6(net, pkt->skb, pkt->ops->hooknum);
-#endif
- break;
- }
-
- data[NFT_REG_VERDICT].verdict = NF_DROP;
-}
-
-static const struct nla_policy nft_reject_policy[NFTA_REJECT_MAX + 1] = {
+const struct nla_policy nft_reject_policy[NFTA_REJECT_MAX + 1] = {
[NFTA_REJECT_TYPE] = { .type = NLA_U32 },
[NFTA_REJECT_ICMP_CODE] = { .type = NLA_U8 },
};
+EXPORT_SYMBOL_GPL(nft_reject_policy);
-static int nft_reject_init(const struct nft_ctx *ctx,
- const struct nft_expr *expr,
- const struct nlattr * const tb[])
+int nft_reject_init(const struct nft_ctx *ctx,
+ const struct nft_expr *expr,
+ const struct nlattr * const tb[])
{
struct nft_reject *priv = nft_expr_priv(expr);
if (tb[NFTA_REJECT_TYPE] == NULL)
return -EINVAL;
- priv->family = ctx->afi->family;
priv->type = ntohl(nla_get_be32(tb[NFTA_REJECT_TYPE]));
switch (priv->type) {
case NFT_REJECT_ICMP_UNREACH:
@@ -89,8 +47,9 @@ static int nft_reject_init(const struct nft_ctx *ctx,
return 0;
}
+EXPORT_SYMBOL_GPL(nft_reject_init);
-static int nft_reject_dump(struct sk_buff *skb, const struct nft_expr *expr)
+int nft_reject_dump(struct sk_buff *skb, const struct nft_expr *expr)
{
const struct nft_reject *priv = nft_expr_priv(expr);
@@ -109,37 +68,7 @@ static int nft_reject_dump(struct sk_buff *skb, const struct nft_expr *expr)
nla_put_failure:
return -1;
}
-
-static struct nft_expr_type nft_reject_type;
-static const struct nft_expr_ops nft_reject_ops = {
- .type = &nft_reject_type,
- .size = NFT_EXPR_SIZE(sizeof(struct nft_reject)),
- .eval = nft_reject_eval,
- .init = nft_reject_init,
- .dump = nft_reject_dump,
-};
-
-static struct nft_expr_type nft_reject_type __read_mostly = {
- .name = "reject",
- .ops = &nft_reject_ops,
- .policy = nft_reject_policy,
- .maxattr = NFTA_REJECT_MAX,
- .owner = THIS_MODULE,
-};
-
-static int __init nft_reject_module_init(void)
-{
- return nft_register_expr(&nft_reject_type);
-}
-
-static void __exit nft_reject_module_exit(void)
-{
- nft_unregister_expr(&nft_reject_type);
-}
-
-module_init(nft_reject_module_init);
-module_exit(nft_reject_module_exit);
+EXPORT_SYMBOL_GPL(nft_reject_dump);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
-MODULE_ALIAS_NFT_EXPR("reject");
diff --git a/net/netfilter/nft_reject_inet.c b/net/netfilter/nft_reject_inet.c
new file mode 100644
index 000000000000..8a310f239c93
--- /dev/null
+++ b/net/netfilter/nft_reject_inet.c
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2014 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables.h>
+#include <net/netfilter/nft_reject.h>
+
+static void nft_reject_inet_eval(const struct nft_expr *expr,
+ struct nft_data data[NFT_REG_MAX + 1],
+ const struct nft_pktinfo *pkt)
+{
+ switch (pkt->ops->pf) {
+ case NFPROTO_IPV4:
+ nft_reject_ipv4_eval(expr, data, pkt);
+ case NFPROTO_IPV6:
+ nft_reject_ipv6_eval(expr, data, pkt);
+ }
+}
+
+static struct nft_expr_type nft_reject_inet_type;
+static const struct nft_expr_ops nft_reject_inet_ops = {
+ .type = &nft_reject_inet_type,
+ .size = NFT_EXPR_SIZE(sizeof(struct nft_reject)),
+ .eval = nft_reject_inet_eval,
+ .init = nft_reject_init,
+ .dump = nft_reject_dump,
+};
+
+static struct nft_expr_type nft_reject_inet_type __read_mostly = {
+ .family = NFPROTO_INET,
+ .name = "reject",
+ .ops = &nft_reject_inet_ops,
+ .policy = nft_reject_policy,
+ .maxattr = NFTA_REJECT_MAX,
+ .owner = THIS_MODULE,
+};
+
+static int __init nft_reject_inet_module_init(void)
+{
+ return nft_register_expr(&nft_reject_inet_type);
+}
+
+static void __exit nft_reject_inet_module_exit(void)
+{
+ nft_unregister_expr(&nft_reject_inet_type);
+}
+
+module_init(nft_reject_inet_module_init);
+module_exit(nft_reject_inet_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_ALIAS_NFT_AF_EXPR(1, "reject");
diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c
index 5929be622c5c..75747aecdebe 100644
--- a/net/netfilter/xt_CT.c
+++ b/net/netfilter/xt_CT.c
@@ -228,12 +228,7 @@ static int xt_ct_tg_check(const struct xt_tgchk_param *par,
goto err3;
}
- __set_bit(IPS_TEMPLATE_BIT, &ct->status);
- __set_bit(IPS_CONFIRMED_BIT, &ct->status);
-
- /* Overload tuple linked list to put us in template list. */
- hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
- &par->net->ct.tmpl);
+ nf_conntrack_tmpl_insert(par->net, ct);
out:
info->ct = ct;
return 0;
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index df4692826ead..e9a48baf8551 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -55,6 +55,7 @@
#include "datapath.h"
#include "flow.h"
+#include "flow_table.h"
#include "flow_netlink.h"
#include "vport-internal_dev.h"
#include "vport-netdev.h"
@@ -160,7 +161,6 @@ static void destroy_dp_rcu(struct rcu_head *rcu)
{
struct datapath *dp = container_of(rcu, struct datapath, rcu);
- ovs_flow_tbl_destroy(&dp->table);
free_percpu(dp->stats_percpu);
release_net(ovs_dp_get_net(dp));
kfree(dp->ports);
@@ -466,6 +466,14 @@ static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb,
skb_zerocopy(user_skb, skb, skb->len, hlen);
+ /* Pad OVS_PACKET_ATTR_PACKET if linear copy was performed */
+ if (!(dp->user_features & OVS_DP_F_UNALIGNED)) {
+ size_t plen = NLA_ALIGN(user_skb->len) - user_skb->len;
+
+ if (plen > 0)
+ memset(skb_put(user_skb, plen), 0, plen);
+ }
+
((struct nlmsghdr *) user_skb->data)->nlmsg_len = user_skb->len;
err = genlmsg_unicast(ovs_dp_get_net(dp), user_skb, upcall_info->portid);
@@ -852,11 +860,8 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
goto err_unlock_ovs;
/* The unmasked key has to be the same for flow updates. */
- error = -EINVAL;
- if (!ovs_flow_cmp_unmasked_key(flow, &match)) {
- OVS_NLERR("Flow modification message rejected, unmasked key does not match.\n");
+ if (!ovs_flow_cmp_unmasked_key(flow, &match))
goto err_unlock_ovs;
- }
/* Update actions. */
old_acts = ovsl_dereference(flow->sf_acts);
@@ -1079,6 +1084,7 @@ static size_t ovs_dp_cmd_msg_size(void)
msgsize += nla_total_size(IFNAMSIZ);
msgsize += nla_total_size(sizeof(struct ovs_dp_stats));
msgsize += nla_total_size(sizeof(struct ovs_dp_megaflow_stats));
+ msgsize += nla_total_size(sizeof(u32)); /* OVS_DP_ATTR_USER_FEATURES */
return msgsize;
}
@@ -1279,7 +1285,7 @@ err_destroy_ports_array:
err_destroy_percpu:
free_percpu(dp->stats_percpu);
err_destroy_table:
- ovs_flow_tbl_destroy(&dp->table);
+ ovs_flow_tbl_destroy(&dp->table, false);
err_free_dp:
release_net(ovs_dp_get_net(dp));
kfree(dp);
@@ -1306,10 +1312,13 @@ static void __dp_destroy(struct datapath *dp)
list_del_rcu(&dp->list_node);
/* OVSP_LOCAL is datapath internal port. We need to make sure that
- * all port in datapath are destroyed first before freeing datapath.
+ * all ports in datapath are destroyed first before freeing datapath.
*/
ovs_dp_detach_port(ovs_vport_ovsl(dp, OVSP_LOCAL));
+ /* RCU destroy the flow table */
+ ovs_flow_tbl_destroy(&dp->table, true);
+
call_rcu(&dp->rcu, destroy_dp_rcu);
}
diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c
index c58a0fe3c889..3c268b3d71c3 100644
--- a/net/openvswitch/flow_table.c
+++ b/net/openvswitch/flow_table.c
@@ -153,29 +153,29 @@ static void rcu_free_flow_callback(struct rcu_head *rcu)
flow_free(flow);
}
-static void flow_mask_del_ref(struct sw_flow_mask *mask, bool deferred)
-{
- if (!mask)
- return;
-
- BUG_ON(!mask->ref_count);
- mask->ref_count--;
-
- if (!mask->ref_count) {
- list_del_rcu(&mask->list);
- if (deferred)
- kfree_rcu(mask, rcu);
- else
- kfree(mask);
- }
-}
-
void ovs_flow_free(struct sw_flow *flow, bool deferred)
{
if (!flow)
return;
- flow_mask_del_ref(flow->mask, deferred);
+ if (flow->mask) {
+ struct sw_flow_mask *mask = flow->mask;
+
+ /* ovs-lock is required to protect mask-refcount and
+ * mask list.
+ */
+ ASSERT_OVSL();
+ BUG_ON(!mask->ref_count);
+ mask->ref_count--;
+
+ if (!mask->ref_count) {
+ list_del_rcu(&mask->list);
+ if (deferred)
+ kfree_rcu(mask, rcu);
+ else
+ kfree(mask);
+ }
+ }
if (deferred)
call_rcu(&flow->rcu, rcu_free_flow_callback);
@@ -188,26 +188,9 @@ static void free_buckets(struct flex_array *buckets)
flex_array_free(buckets);
}
+
static void __table_instance_destroy(struct table_instance *ti)
{
- int i;
-
- if (ti->keep_flows)
- goto skip_flows;
-
- for (i = 0; i < ti->n_buckets; i++) {
- struct sw_flow *flow;
- struct hlist_head *head = flex_array_get(ti->buckets, i);
- struct hlist_node *n;
- int ver = ti->node_ver;
-
- hlist_for_each_entry_safe(flow, n, head, hash_node[ver]) {
- hlist_del(&flow->hash_node[ver]);
- ovs_flow_free(flow, false);
- }
- }
-
-skip_flows:
free_buckets(ti->buckets);
kfree(ti);
}
@@ -258,20 +241,38 @@ static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu)
static void table_instance_destroy(struct table_instance *ti, bool deferred)
{
+ int i;
+
if (!ti)
return;
+ if (ti->keep_flows)
+ goto skip_flows;
+
+ for (i = 0; i < ti->n_buckets; i++) {
+ struct sw_flow *flow;
+ struct hlist_head *head = flex_array_get(ti->buckets, i);
+ struct hlist_node *n;
+ int ver = ti->node_ver;
+
+ hlist_for_each_entry_safe(flow, n, head, hash_node[ver]) {
+ hlist_del_rcu(&flow->hash_node[ver]);
+ ovs_flow_free(flow, deferred);
+ }
+ }
+
+skip_flows:
if (deferred)
call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
else
__table_instance_destroy(ti);
}
-void ovs_flow_tbl_destroy(struct flow_table *table)
+void ovs_flow_tbl_destroy(struct flow_table *table, bool deferred)
{
struct table_instance *ti = ovsl_dereference(table->ti);
- table_instance_destroy(ti, false);
+ table_instance_destroy(ti, deferred);
}
struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *ti,
@@ -504,16 +505,11 @@ static struct sw_flow_mask *mask_alloc(void)
mask = kmalloc(sizeof(*mask), GFP_KERNEL);
if (mask)
- mask->ref_count = 0;
+ mask->ref_count = 1;
return mask;
}
-static void mask_add_ref(struct sw_flow_mask *mask)
-{
- mask->ref_count++;
-}
-
static bool mask_equal(const struct sw_flow_mask *a,
const struct sw_flow_mask *b)
{
@@ -554,9 +550,11 @@ static int flow_mask_insert(struct flow_table *tbl, struct sw_flow *flow,
mask->key = new->key;
mask->range = new->range;
list_add_rcu(&mask->list, &tbl->mask_list);
+ } else {
+ BUG_ON(!mask->ref_count);
+ mask->ref_count++;
}
- mask_add_ref(mask);
flow->mask = mask;
return 0;
}
diff --git a/net/openvswitch/flow_table.h b/net/openvswitch/flow_table.h
index 1996e34c0fd8..baaeb101924d 100644
--- a/net/openvswitch/flow_table.h
+++ b/net/openvswitch/flow_table.h
@@ -60,7 +60,7 @@ void ovs_flow_free(struct sw_flow *, bool deferred);
int ovs_flow_tbl_init(struct flow_table *);
int ovs_flow_tbl_count(struct flow_table *table);
-void ovs_flow_tbl_destroy(struct flow_table *table);
+void ovs_flow_tbl_destroy(struct flow_table *table, bool deferred);
int ovs_flow_tbl_flush(struct flow_table *flow_table);
int ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow,
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index 0f6259a6a932..2b1738ef9394 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -662,6 +662,8 @@ static struct sock *sctp_v6_create_accept_sk(struct sock *sk,
*/
sctp_v6_to_sk_daddr(&asoc->peer.primary_addr, newsk);
+ newsk->sk_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
+
sk_refcnt_debug_inc(newsk);
if (newsk->sk_prot->init(newsk)) {
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index 80a6640f329b..06c6ff0cb911 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -571,7 +571,7 @@ static void svc_check_conn_limits(struct svc_serv *serv)
}
}
-int svc_alloc_arg(struct svc_rqst *rqstp)
+static int svc_alloc_arg(struct svc_rqst *rqstp)
{
struct svc_serv *serv = rqstp->rq_server;
struct xdr_buf *arg;
@@ -612,7 +612,7 @@ int svc_alloc_arg(struct svc_rqst *rqstp)
return 0;
}
-struct svc_xprt *svc_get_next_xprt(struct svc_rqst *rqstp, long timeout)
+static struct svc_xprt *svc_get_next_xprt(struct svc_rqst *rqstp, long timeout)
{
struct svc_xprt *xprt;
struct svc_pool *pool = rqstp->rq_pool;
@@ -691,7 +691,7 @@ struct svc_xprt *svc_get_next_xprt(struct svc_rqst *rqstp, long timeout)
return xprt;
}
-void svc_add_new_temp_xprt(struct svc_serv *serv, struct svc_xprt *newxpt)
+static void svc_add_new_temp_xprt(struct svc_serv *serv, struct svc_xprt *newxpt)
{
spin_lock_bh(&serv->sv_lock);
set_bit(XPT_TEMP, &newxpt->xpt_flags);
diff --git a/net/wireless/core.c b/net/wireless/core.c
index d89dee2259b5..010892b81a06 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -203,8 +203,11 @@ void cfg80211_stop_p2p_device(struct cfg80211_registered_device *rdev,
rdev->opencount--;
- WARN_ON(rdev->scan_req && rdev->scan_req->wdev == wdev &&
- !rdev->scan_req->notified);
+ if (rdev->scan_req && rdev->scan_req->wdev == wdev) {
+ if (WARN_ON(!rdev->scan_req->notified))
+ rdev->scan_req->aborted = true;
+ ___cfg80211_scan_done(rdev, false);
+ }
}
static int cfg80211_rfkill_set_block(void *data, bool blocked)
@@ -440,9 +443,6 @@ int wiphy_register(struct wiphy *wiphy)
int i;
u16 ifmodes = wiphy->interface_modes;
- /* support for 5/10 MHz is broken due to nl80211 API mess - disable */
- wiphy->flags &= ~WIPHY_FLAG_SUPPORTS_5_10_MHZ;
-
/*
* There are major locking problems in nl80211/mac80211 for CSA,
* disable for all drivers until this has been reworked.
@@ -859,8 +859,11 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
break;
case NETDEV_DOWN:
cfg80211_update_iface_num(rdev, wdev->iftype, -1);
- WARN_ON(rdev->scan_req && rdev->scan_req->wdev == wdev &&
- !rdev->scan_req->notified);
+ if (rdev->scan_req && rdev->scan_req->wdev == wdev) {
+ if (WARN_ON(!rdev->scan_req->notified))
+ rdev->scan_req->aborted = true;
+ ___cfg80211_scan_done(rdev, false);
+ }
if (WARN_ON(rdev->sched_scan_req &&
rdev->sched_scan_req->dev == wdev->netdev)) {
diff --git a/net/wireless/core.h b/net/wireless/core.h
index 37ec16d7bb1a..f1d193b557b6 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -62,6 +62,7 @@ struct cfg80211_registered_device {
struct rb_root bss_tree;
u32 bss_generation;
struct cfg80211_scan_request *scan_req; /* protected by RTNL */
+ struct sk_buff *scan_msg;
struct cfg80211_sched_scan_request *sched_scan_req;
unsigned long suspend_at;
struct work_struct scan_done_wk;
@@ -361,7 +362,8 @@ int cfg80211_validate_key_settings(struct cfg80211_registered_device *rdev,
struct key_params *params, int key_idx,
bool pairwise, const u8 *mac_addr);
void __cfg80211_scan_done(struct work_struct *wk);
-void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev);
+void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev,
+ bool send_message);
void __cfg80211_sched_scan_results(struct work_struct *wk);
int __cfg80211_stop_sched_scan(struct cfg80211_registered_device *rdev,
bool driver_initiated);
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 7a742594916e..4fe2e6e2bc76 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -1719,9 +1719,10 @@ static int nl80211_dump_wiphy(struct sk_buff *skb, struct netlink_callback *cb)
* We can then retry with the larger buffer.
*/
if ((ret == -ENOBUFS || ret == -EMSGSIZE) &&
- !skb->len &&
+ !skb->len && !state->split &&
cb->min_dump_alloc < 4096) {
cb->min_dump_alloc = 4096;
+ state->split_start = 0;
rtnl_unlock();
return 1;
}
@@ -5244,7 +5245,7 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
if (!rdev->ops->scan)
return -EOPNOTSUPP;
- if (rdev->scan_req) {
+ if (rdev->scan_req || rdev->scan_msg) {
err = -EBUSY;
goto unlock;
}
@@ -10011,40 +10012,31 @@ void nl80211_send_scan_start(struct cfg80211_registered_device *rdev,
NL80211_MCGRP_SCAN, GFP_KERNEL);
}
-void nl80211_send_scan_done(struct cfg80211_registered_device *rdev,
- struct wireless_dev *wdev)
+struct sk_buff *nl80211_build_scan_msg(struct cfg80211_registered_device *rdev,
+ struct wireless_dev *wdev, bool aborted)
{
struct sk_buff *msg;
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg)
- return;
+ return NULL;
if (nl80211_send_scan_msg(msg, rdev, wdev, 0, 0, 0,
- NL80211_CMD_NEW_SCAN_RESULTS) < 0) {
+ aborted ? NL80211_CMD_SCAN_ABORTED :
+ NL80211_CMD_NEW_SCAN_RESULTS) < 0) {
nlmsg_free(msg);
- return;
+ return NULL;
}
- genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0,
- NL80211_MCGRP_SCAN, GFP_KERNEL);
+ return msg;
}
-void nl80211_send_scan_aborted(struct cfg80211_registered_device *rdev,
- struct wireless_dev *wdev)
+void nl80211_send_scan_result(struct cfg80211_registered_device *rdev,
+ struct sk_buff *msg)
{
- struct sk_buff *msg;
-
- msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg)
return;
- if (nl80211_send_scan_msg(msg, rdev, wdev, 0, 0, 0,
- NL80211_CMD_SCAN_ABORTED) < 0) {
- nlmsg_free(msg);
- return;
- }
-
genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0,
NL80211_MCGRP_SCAN, GFP_KERNEL);
}
diff --git a/net/wireless/nl80211.h b/net/wireless/nl80211.h
index b1b231324e10..75799746d845 100644
--- a/net/wireless/nl80211.h
+++ b/net/wireless/nl80211.h
@@ -8,10 +8,10 @@ void nl80211_exit(void);
void nl80211_notify_dev_rename(struct cfg80211_registered_device *rdev);
void nl80211_send_scan_start(struct cfg80211_registered_device *rdev,
struct wireless_dev *wdev);
-void nl80211_send_scan_done(struct cfg80211_registered_device *rdev,
- struct wireless_dev *wdev);
-void nl80211_send_scan_aborted(struct cfg80211_registered_device *rdev,
- struct wireless_dev *wdev);
+struct sk_buff *nl80211_build_scan_msg(struct cfg80211_registered_device *rdev,
+ struct wireless_dev *wdev, bool aborted);
+void nl80211_send_scan_result(struct cfg80211_registered_device *rdev,
+ struct sk_buff *msg);
void nl80211_send_sched_scan(struct cfg80211_registered_device *rdev,
struct net_device *netdev, u32 cmd);
void nl80211_send_sched_scan_results(struct cfg80211_registered_device *rdev,
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index b528e31da2cf..d1ed4aebbbb7 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -161,18 +161,25 @@ static void __cfg80211_bss_expire(struct cfg80211_registered_device *dev,
dev->bss_generation++;
}
-void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev)
+void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev,
+ bool send_message)
{
struct cfg80211_scan_request *request;
struct wireless_dev *wdev;
+ struct sk_buff *msg;
#ifdef CONFIG_CFG80211_WEXT
union iwreq_data wrqu;
#endif
ASSERT_RTNL();
- request = rdev->scan_req;
+ if (rdev->scan_msg) {
+ nl80211_send_scan_result(rdev, rdev->scan_msg);
+ rdev->scan_msg = NULL;
+ return;
+ }
+ request = rdev->scan_req;
if (!request)
return;
@@ -186,18 +193,16 @@ void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev)
if (wdev->netdev)
cfg80211_sme_scan_done(wdev->netdev);
- if (request->aborted) {
- nl80211_send_scan_aborted(rdev, wdev);
- } else {
- if (request->flags & NL80211_SCAN_FLAG_FLUSH) {
- /* flush entries from previous scans */
- spin_lock_bh(&rdev->bss_lock);
- __cfg80211_bss_expire(rdev, request->scan_start);
- spin_unlock_bh(&rdev->bss_lock);
- }
- nl80211_send_scan_done(rdev, wdev);
+ if (!request->aborted &&
+ request->flags & NL80211_SCAN_FLAG_FLUSH) {
+ /* flush entries from previous scans */
+ spin_lock_bh(&rdev->bss_lock);
+ __cfg80211_bss_expire(rdev, request->scan_start);
+ spin_unlock_bh(&rdev->bss_lock);
}
+ msg = nl80211_build_scan_msg(rdev, wdev, request->aborted);
+
#ifdef CONFIG_CFG80211_WEXT
if (wdev->netdev && !request->aborted) {
memset(&wrqu, 0, sizeof(wrqu));
@@ -211,6 +216,11 @@ void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev)
rdev->scan_req = NULL;
kfree(request);
+
+ if (!send_message)
+ rdev->scan_msg = msg;
+ else
+ nl80211_send_scan_result(rdev, msg);
}
void __cfg80211_scan_done(struct work_struct *wk)
@@ -221,7 +231,7 @@ void __cfg80211_scan_done(struct work_struct *wk)
scan_done_wk);
rtnl_lock();
- ___cfg80211_scan_done(rdev);
+ ___cfg80211_scan_done(rdev, true);
rtnl_unlock();
}
@@ -1079,7 +1089,7 @@ int cfg80211_wext_siwscan(struct net_device *dev,
if (IS_ERR(rdev))
return PTR_ERR(rdev);
- if (rdev->scan_req) {
+ if (rdev->scan_req || rdev->scan_msg) {
err = -EBUSY;
goto out;
}
@@ -1481,7 +1491,7 @@ int cfg80211_wext_giwscan(struct net_device *dev,
if (IS_ERR(rdev))
return PTR_ERR(rdev);
- if (rdev->scan_req)
+ if (rdev->scan_req || rdev->scan_msg)
return -EAGAIN;
res = ieee80211_scan_results(rdev, info, extra, data->length);
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index a63509118508..f04d4c32e96e 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -67,7 +67,7 @@ static int cfg80211_conn_scan(struct wireless_dev *wdev)
ASSERT_RDEV_LOCK(rdev);
ASSERT_WDEV_LOCK(wdev);
- if (rdev->scan_req)
+ if (rdev->scan_req || rdev->scan_msg)
return -EBUSY;
if (wdev->conn->params.channel)
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index 0ea2a1e24ade..464dcef79b35 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -471,7 +471,7 @@ sub seed_camelcase_includes {
$camelcase_seeded = 1;
- if (-d ".git") {
+ if (-e ".git") {
my $git_last_include_commit = `git log --no-merges --pretty=format:"%h%n" -1 -- include`;
chomp $git_last_include_commit;
$camelcase_cache = ".checkpatch-camelcase.git.$git_last_include_commit";
@@ -499,7 +499,7 @@ sub seed_camelcase_includes {
return;
}
- if (-d ".git") {
+ if (-e ".git") {
$files = `git ls-files "include/*.h"`;
@include_files = split('\n', $files);
}
diff --git a/scripts/get_maintainer.pl b/scripts/get_maintainer.pl
index 9c3986f4140c..41987885bd31 100755
--- a/scripts/get_maintainer.pl
+++ b/scripts/get_maintainer.pl
@@ -95,7 +95,7 @@ my %VCS_cmds;
my %VCS_cmds_git = (
"execute_cmd" => \&git_execute_cmd,
- "available" => '(which("git") ne "") && (-d ".git")',
+ "available" => '(which("git") ne "") && (-e ".git")',
"find_signers_cmd" =>
"git log --no-color --follow --since=\$email_git_since " .
'--numstat --no-merges ' .
diff --git a/security/Kconfig b/security/Kconfig
index e9c6ac724fef..beb86b500adf 100644
--- a/security/Kconfig
+++ b/security/Kconfig
@@ -103,7 +103,7 @@ config INTEL_TXT
config LSM_MMAP_MIN_ADDR
int "Low address space for LSM to protect from user allocation"
depends on SECURITY && SECURITY_SELINUX
- default 32768 if ARM
+ default 32768 if ARM || (ARM64 && COMPAT)
default 65536
help
This is the portion of low virtual memory which should be protected
diff --git a/security/selinux/nlmsgtab.c b/security/selinux/nlmsgtab.c
index 332ac8a80cf5..2df7b900e259 100644
--- a/security/selinux/nlmsgtab.c
+++ b/security/selinux/nlmsgtab.c
@@ -17,6 +17,7 @@
#include <linux/inet_diag.h>
#include <linux/xfrm.h>
#include <linux/audit.h>
+#include <linux/sock_diag.h>
#include "flask.h"
#include "av_permissions.h"
@@ -78,6 +79,7 @@ static struct nlmsg_perm nlmsg_tcpdiag_perms[] =
{
{ TCPDIAG_GETSOCK, NETLINK_TCPDIAG_SOCKET__NLMSG_READ },
{ DCCPDIAG_GETSOCK, NETLINK_TCPDIAG_SOCKET__NLMSG_READ },
+ { SOCK_DIAG_BY_FAMILY, NETLINK_TCPDIAG_SOCKET__NLMSG_READ },
};
static struct nlmsg_perm nlmsg_xfrm_perms[] =
diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
index c93c21127f0c..5d0144ee8ed6 100644
--- a/security/selinux/ss/services.c
+++ b/security/selinux/ss/services.c
@@ -1232,6 +1232,10 @@ static int security_context_to_sid_core(const char *scontext, u32 scontext_len,
struct context context;
int rc = 0;
+ /* An empty security context is never valid. */
+ if (!scontext_len)
+ return -EINVAL;
+
if (!ss_initialized) {
int i;
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index ec4536c8d8d4..dafcf82139e2 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -932,7 +932,7 @@ int snd_hda_bus_new(struct snd_card *card,
}
EXPORT_SYMBOL_GPL(snd_hda_bus_new);
-#ifdef CONFIG_SND_HDA_GENERIC
+#if IS_ENABLED(CONFIG_SND_HDA_GENERIC)
#define is_generic_config(codec) \
(codec->modelname && !strcmp(codec->modelname, "generic"))
#else
@@ -1339,23 +1339,15 @@ get_hda_cvt_setup(struct hda_codec *codec, hda_nid_t nid)
/*
* Dynamic symbol binding for the codec parsers
*/
-#ifdef MODULE
-#define load_parser_sym(sym) ((int (*)(struct hda_codec *))symbol_request(sym))
-#define unload_parser_addr(addr) symbol_put_addr(addr)
-#else
-#define load_parser_sym(sym) (sym)
-#define unload_parser_addr(addr) do {} while (0)
-#endif
#define load_parser(codec, sym) \
- ((codec)->parser = load_parser_sym(sym))
+ ((codec)->parser = (int (*)(struct hda_codec *))symbol_request(sym))
static void unload_parser(struct hda_codec *codec)
{
- if (codec->parser) {
- unload_parser_addr(codec->parser);
- codec->parser = NULL;
- }
+ if (codec->parser)
+ symbol_put_addr(codec->parser);
+ codec->parser = NULL;
}
/*
@@ -1570,7 +1562,7 @@ int snd_hda_codec_update_widgets(struct hda_codec *codec)
EXPORT_SYMBOL_GPL(snd_hda_codec_update_widgets);
-#ifdef CONFIG_SND_HDA_CODEC_HDMI
+#if IS_ENABLED(CONFIG_SND_HDA_CODEC_HDMI)
/* if all audio out widgets are digital, let's assume the codec as a HDMI/DP */
static bool is_likely_hdmi_codec(struct hda_codec *codec)
{
@@ -1620,12 +1612,20 @@ int snd_hda_codec_configure(struct hda_codec *codec)
patch = codec->preset->patch;
if (!patch) {
unload_parser(codec); /* to be sure */
- if (is_likely_hdmi_codec(codec))
+ if (is_likely_hdmi_codec(codec)) {
+#if IS_MODULE(CONFIG_SND_HDA_CODEC_HDMI)
patch = load_parser(codec, snd_hda_parse_hdmi_codec);
-#ifdef CONFIG_SND_HDA_GENERIC
- if (!patch)
+#elif IS_BUILTIN(CONFIG_SND_HDA_CODEC_HDMI)
+ patch = snd_hda_parse_hdmi_codec;
+#endif
+ }
+ if (!patch) {
+#if IS_MODULE(CONFIG_SND_HDA_GENERIC)
patch = load_parser(codec, snd_hda_parse_generic_codec);
+#elif IS_BUILTIN(CONFIG_SND_HDA_GENERIC)
+ patch = snd_hda_parse_generic_codec;
#endif
+ }
if (!patch) {
printk(KERN_ERR "hda-codec: No codec parser is available\n");
return -ENODEV;
diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
index 8321a97d5c05..d9a09bdd09db 100644
--- a/sound/pci/hda/hda_generic.c
+++ b/sound/pci/hda/hda_generic.c
@@ -3269,7 +3269,7 @@ static int cap_put_caller(struct snd_kcontrol *kcontrol,
mutex_unlock(&codec->control_mutex);
snd_hda_codec_flush_cache(codec); /* flush the updates */
if (err >= 0 && spec->cap_sync_hook)
- spec->cap_sync_hook(codec, ucontrol);
+ spec->cap_sync_hook(codec, kcontrol, ucontrol);
return err;
}
@@ -3390,7 +3390,7 @@ static int cap_single_sw_put(struct snd_kcontrol *kcontrol,
return ret;
if (spec->cap_sync_hook)
- spec->cap_sync_hook(codec, ucontrol);
+ spec->cap_sync_hook(codec, kcontrol, ucontrol);
return ret;
}
@@ -3795,7 +3795,7 @@ static int mux_select(struct hda_codec *codec, unsigned int adc_idx,
return 0;
snd_hda_activate_path(codec, path, true, false);
if (spec->cap_sync_hook)
- spec->cap_sync_hook(codec, NULL);
+ spec->cap_sync_hook(codec, NULL, NULL);
path_power_down_sync(codec, old_path);
return 1;
}
@@ -5270,7 +5270,7 @@ static void init_input_src(struct hda_codec *codec)
}
if (spec->cap_sync_hook)
- spec->cap_sync_hook(codec, NULL);
+ spec->cap_sync_hook(codec, NULL, NULL);
}
/* set right pin controls for digital I/O */
diff --git a/sound/pci/hda/hda_generic.h b/sound/pci/hda/hda_generic.h
index 07f767231c9f..c908afbe4d94 100644
--- a/sound/pci/hda/hda_generic.h
+++ b/sound/pci/hda/hda_generic.h
@@ -274,6 +274,7 @@ struct hda_gen_spec {
void (*init_hook)(struct hda_codec *codec);
void (*automute_hook)(struct hda_codec *codec);
void (*cap_sync_hook)(struct hda_codec *codec,
+ struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol);
/* PCM hooks */
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index fa2879a21a50..e354ab1ec20f 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -198,7 +198,7 @@ MODULE_DESCRIPTION("Intel HDA driver");
#endif
#if defined(CONFIG_PM) && defined(CONFIG_VGA_SWITCHEROO)
-#ifdef CONFIG_SND_HDA_CODEC_HDMI
+#if IS_ENABLED(CONFIG_SND_HDA_CODEC_HDMI)
#define SUPPORT_VGA_SWITCHEROO
#endif
#endif
diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c
index 7a426ed491f2..df3652ad15ef 100644
--- a/sound/pci/hda/patch_analog.c
+++ b/sound/pci/hda/patch_analog.c
@@ -244,6 +244,19 @@ static void ad_fixup_inv_jack_detect(struct hda_codec *codec,
}
}
+/* Toshiba Satellite L40 implements EAPD in a standard way unlike others */
+static void ad1986a_fixup_eapd(struct hda_codec *codec,
+ const struct hda_fixup *fix, int action)
+{
+ struct ad198x_spec *spec = codec->spec;
+
+ if (action == HDA_FIXUP_ACT_PRE_PROBE) {
+ codec->inv_eapd = 0;
+ spec->gen.keep_eapd_on = 1;
+ spec->eapd_nid = 0x1b;
+ }
+}
+
enum {
AD1986A_FIXUP_INV_JACK_DETECT,
AD1986A_FIXUP_ULTRA,
@@ -251,6 +264,7 @@ enum {
AD1986A_FIXUP_3STACK,
AD1986A_FIXUP_LAPTOP,
AD1986A_FIXUP_LAPTOP_IMIC,
+ AD1986A_FIXUP_EAPD,
};
static const struct hda_fixup ad1986a_fixups[] = {
@@ -311,6 +325,10 @@ static const struct hda_fixup ad1986a_fixups[] = {
.chained_before = 1,
.chain_id = AD1986A_FIXUP_LAPTOP,
},
+ [AD1986A_FIXUP_EAPD] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = ad1986a_fixup_eapd,
+ },
};
static const struct snd_pci_quirk ad1986a_fixup_tbl[] = {
@@ -318,6 +336,7 @@ static const struct snd_pci_quirk ad1986a_fixup_tbl[] = {
SND_PCI_QUIRK_MASK(0x1043, 0xff00, 0x8100, "ASUS P5", AD1986A_FIXUP_3STACK),
SND_PCI_QUIRK_MASK(0x1043, 0xff00, 0x8200, "ASUS M2", AD1986A_FIXUP_3STACK),
SND_PCI_QUIRK(0x10de, 0xcb84, "ASUS A8N-VM", AD1986A_FIXUP_3STACK),
+ SND_PCI_QUIRK(0x1179, 0xff40, "Toshiba Satellite L40", AD1986A_FIXUP_EAPD),
SND_PCI_QUIRK(0x144d, 0xc01e, "FSC V2060", AD1986A_FIXUP_LAPTOP),
SND_PCI_QUIRK_MASK(0x144d, 0xff00, 0xc000, "Samsung", AD1986A_FIXUP_SAMSUNG),
SND_PCI_QUIRK(0x144d, 0xc027, "Samsung Q1", AD1986A_FIXUP_ULTRA),
@@ -472,6 +491,8 @@ static int ad1983_add_spdif_mux_ctl(struct hda_codec *codec)
static int patch_ad1983(struct hda_codec *codec)
{
struct ad198x_spec *spec;
+ static hda_nid_t conn_0c[] = { 0x08 };
+ static hda_nid_t conn_0d[] = { 0x09 };
int err;
err = alloc_ad_spec(codec);
@@ -479,8 +500,14 @@ static int patch_ad1983(struct hda_codec *codec)
return err;
spec = codec->spec;
+ spec->gen.mixer_nid = 0x0e;
spec->gen.beep_nid = 0x10;
set_beep_amp(spec, 0x10, 0, HDA_OUTPUT);
+
+ /* limit the loopback routes not to confuse the parser */
+ snd_hda_override_conn_list(codec, 0x0c, ARRAY_SIZE(conn_0c), conn_0c);
+ snd_hda_override_conn_list(codec, 0x0d, ARRAY_SIZE(conn_0d), conn_0d);
+
err = ad198x_parse_auto_config(codec, false);
if (err < 0)
goto error;
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index 4e0ec146553d..bcf91bea3317 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -3291,7 +3291,8 @@ static void cxt_update_headset_mode(struct hda_codec *codec)
}
static void cxt_update_headset_mode_hook(struct hda_codec *codec,
- struct snd_ctl_elem_value *ucontrol)
+ struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
{
cxt_update_headset_mode(codec);
}
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 56a8f1876603..a9a83b85517a 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -708,7 +708,8 @@ static void alc_inv_dmic_sync(struct hda_codec *codec, bool force)
}
static void alc_inv_dmic_hook(struct hda_codec *codec,
- struct snd_ctl_elem_value *ucontrol)
+ struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
{
alc_inv_dmic_sync(codec, false);
}
@@ -1821,6 +1822,7 @@ enum {
ALC889_FIXUP_IMAC91_VREF,
ALC889_FIXUP_MBA11_VREF,
ALC889_FIXUP_MBA21_VREF,
+ ALC889_FIXUP_MP11_VREF,
ALC882_FIXUP_INV_DMIC,
ALC882_FIXUP_NO_PRIMARY_HP,
ALC887_FIXUP_ASUS_BASS,
@@ -2190,6 +2192,12 @@ static const struct hda_fixup alc882_fixups[] = {
.chained = true,
.chain_id = ALC889_FIXUP_MBP_VREF,
},
+ [ALC889_FIXUP_MP11_VREF] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc889_fixup_mba11_vref,
+ .chained = true,
+ .chain_id = ALC885_FIXUP_MACPRO_GPIO,
+ },
[ALC882_FIXUP_INV_DMIC] = {
.type = HDA_FIXUP_FUNC,
.v.func = alc_fixup_inv_dmic_0x12,
@@ -2253,7 +2261,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
SND_PCI_QUIRK(0x106b, 0x00a0, "MacBookPro 3,1", ALC889_FIXUP_MBP_VREF),
SND_PCI_QUIRK(0x106b, 0x00a1, "Macbook", ALC889_FIXUP_MBP_VREF),
SND_PCI_QUIRK(0x106b, 0x00a4, "MacbookPro 4,1", ALC889_FIXUP_MBP_VREF),
- SND_PCI_QUIRK(0x106b, 0x0c00, "Mac Pro", ALC885_FIXUP_MACPRO_GPIO),
+ SND_PCI_QUIRK(0x106b, 0x0c00, "Mac Pro", ALC889_FIXUP_MP11_VREF),
SND_PCI_QUIRK(0x106b, 0x1000, "iMac 24", ALC885_FIXUP_MACPRO_GPIO),
SND_PCI_QUIRK(0x106b, 0x2800, "AppleTV", ALC885_FIXUP_MACPRO_GPIO),
SND_PCI_QUIRK(0x106b, 0x2c00, "MacbookPro rev3", ALC889_FIXUP_MBP_VREF),
@@ -3211,7 +3219,8 @@ static void alc269_fixup_hp_gpio_mute_hook(void *private_data, int enabled)
/* turn on/off mic-mute LED per capture hook */
static void alc269_fixup_hp_gpio_mic_mute_hook(struct hda_codec *codec,
- struct snd_ctl_elem_value *ucontrol)
+ struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
{
struct alc_spec *spec = codec->spec;
unsigned int oldval = spec->gpio_led;
@@ -3521,7 +3530,8 @@ static void alc_update_headset_mode(struct hda_codec *codec)
}
static void alc_update_headset_mode_hook(struct hda_codec *codec,
- struct snd_ctl_elem_value *ucontrol)
+ struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
{
alc_update_headset_mode(codec);
}
@@ -4322,6 +4332,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1043, 0x8398, "ASUS P1005", ALC269_FIXUP_STEREO_DMIC),
SND_PCI_QUIRK(0x1043, 0x83ce, "ASUS P1005", ALC269_FIXUP_STEREO_DMIC),
SND_PCI_QUIRK(0x1043, 0x8516, "ASUS X101CH", ALC269_FIXUP_ASUS_X101),
+ SND_PCI_QUIRK(0x104d, 0x90b5, "Sony VAIO Pro 11", ALC286_FIXUP_SONY_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x104d, 0x90b6, "Sony VAIO Pro 13", ALC286_FIXUP_SONY_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x104d, 0x9073, "Sony VAIO", ALC275_FIXUP_SONY_VAIO_GPIO2),
SND_PCI_QUIRK(0x104d, 0x907b, "Sony VAIO", ALC275_FIXUP_SONY_HWEQ),
@@ -5096,6 +5107,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
SND_PCI_QUIRK(0x1025, 0x038b, "Acer Aspire 8943G", ALC662_FIXUP_ASPIRE),
SND_PCI_QUIRK(0x1028, 0x05d8, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x05db, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1028, 0x060a, "Dell XPS 13", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x0623, "Dell", ALC668_FIXUP_AUTO_MUTE),
SND_PCI_QUIRK(0x1028, 0x0624, "Dell", ALC668_FIXUP_AUTO_MUTE),
SND_PCI_QUIRK(0x1028, 0x0625, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index 6998cf29b9bc..7311badf6a94 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -194,7 +194,7 @@ struct sigmatel_spec {
int default_polarity;
unsigned int mic_mute_led_gpio; /* capture mute LED GPIO */
- bool mic_mute_led_on; /* current mic mute state */
+ unsigned int mic_enabled; /* current mic mute state (bitmask) */
/* stream */
unsigned int stream_delay;
@@ -324,19 +324,26 @@ static void stac_gpio_set(struct hda_codec *codec, unsigned int mask,
/* hook for controlling mic-mute LED GPIO */
static void stac_capture_led_hook(struct hda_codec *codec,
- struct snd_ctl_elem_value *ucontrol)
+ struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
{
struct sigmatel_spec *spec = codec->spec;
- bool mute;
+ unsigned int mask;
+ bool cur_mute, prev_mute;
- if (!ucontrol)
+ if (!kcontrol || !ucontrol)
return;
- mute = !(ucontrol->value.integer.value[0] ||
- ucontrol->value.integer.value[1]);
- if (spec->mic_mute_led_on != mute) {
- spec->mic_mute_led_on = mute;
- if (mute)
+ mask = 1U << snd_ctl_get_ioffidx(kcontrol, &ucontrol->id);
+ prev_mute = !spec->mic_enabled;
+ if (ucontrol->value.integer.value[0] ||
+ ucontrol->value.integer.value[1])
+ spec->mic_enabled |= mask;
+ else
+ spec->mic_enabled &= ~mask;
+ cur_mute = !spec->mic_enabled;
+ if (cur_mute != prev_mute) {
+ if (cur_mute)
spec->gpio_data |= spec->mic_mute_led_gpio;
else
spec->gpio_data &= ~spec->mic_mute_led_gpio;
@@ -4462,7 +4469,7 @@ static void stac_setup_gpio(struct hda_codec *codec)
if (spec->mic_mute_led_gpio) {
spec->gpio_mask |= spec->mic_mute_led_gpio;
spec->gpio_dir |= spec->mic_mute_led_gpio;
- spec->mic_mute_led_on = true;
+ spec->mic_enabled = 0;
spec->gpio_data |= spec->mic_mute_led_gpio;
spec->gen.cap_sync_hook = stac_capture_led_hook;
diff --git a/sound/pci/hda/thinkpad_helper.c b/sound/pci/hda/thinkpad_helper.c
index 5799fbc24c28..8fe3b8c18ed4 100644
--- a/sound/pci/hda/thinkpad_helper.c
+++ b/sound/pci/hda/thinkpad_helper.c
@@ -39,6 +39,7 @@ static void update_tpacpi_mute_led(void *private_data, int enabled)
}
static void update_tpacpi_micmute_led(struct hda_codec *codec,
+ struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
if (!ucontrol || !led_set_func)
diff --git a/sound/usb/Kconfig b/sound/usb/Kconfig
index de9408b83f75..e05a86b7c0da 100644
--- a/sound/usb/Kconfig
+++ b/sound/usb/Kconfig
@@ -14,6 +14,7 @@ config SND_USB_AUDIO
select SND_HWDEP
select SND_RAWMIDI
select SND_PCM
+ select BITREVERSE
help
Say Y here to include support for USB audio and USB MIDI
devices.
diff --git a/tools/perf/builtin-buildid-cache.c b/tools/perf/builtin-buildid-cache.c
index cfede86161d8..b22dbb16f877 100644
--- a/tools/perf/builtin-buildid-cache.c
+++ b/tools/perf/builtin-buildid-cache.c
@@ -63,11 +63,35 @@ static int build_id_cache__kcore_dir(char *dir, size_t sz)
return 0;
}
+static bool same_kallsyms_reloc(const char *from_dir, char *to_dir)
+{
+ char from[PATH_MAX];
+ char to[PATH_MAX];
+ const char *name;
+ u64 addr1 = 0, addr2 = 0;
+ int i;
+
+ scnprintf(from, sizeof(from), "%s/kallsyms", from_dir);
+ scnprintf(to, sizeof(to), "%s/kallsyms", to_dir);
+
+ for (i = 0; (name = ref_reloc_sym_names[i]) != NULL; i++) {
+ addr1 = kallsyms__get_function_start(from, name);
+ if (addr1)
+ break;
+ }
+
+ if (name)
+ addr2 = kallsyms__get_function_start(to, name);
+
+ return addr1 == addr2;
+}
+
static int build_id_cache__kcore_existing(const char *from_dir, char *to_dir,
size_t to_dir_sz)
{
char from[PATH_MAX];
char to[PATH_MAX];
+ char to_subdir[PATH_MAX];
struct dirent *dent;
int ret = -1;
DIR *d;
@@ -86,10 +110,11 @@ static int build_id_cache__kcore_existing(const char *from_dir, char *to_dir,
continue;
scnprintf(to, sizeof(to), "%s/%s/modules", to_dir,
dent->d_name);
- if (!compare_proc_modules(from, to)) {
- scnprintf(to, sizeof(to), "%s/%s", to_dir,
- dent->d_name);
- strlcpy(to_dir, to, to_dir_sz);
+ scnprintf(to_subdir, sizeof(to_subdir), "%s/%s",
+ to_dir, dent->d_name);
+ if (!compare_proc_modules(from, to) &&
+ same_kallsyms_reloc(from_dir, to_subdir)) {
+ strlcpy(to_dir, to_subdir, to_dir_sz);
ret = 0;
break;
}
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 3c394bf16fa8..af47531b82ec 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -287,10 +287,7 @@ static void perf_event__synthesize_guest_os(struct machine *machine, void *data)
* have no _text sometimes.
*/
err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
- machine, "_text");
- if (err < 0)
- err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
- machine, "_stext");
+ machine);
if (err < 0)
pr_err("Couldn't record guest kernel [%d]'s reference"
" relocation symbol.\n", machine->pid);
@@ -457,10 +454,7 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
}
err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
- machine, "_text");
- if (err < 0)
- err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
- machine, "_stext");
+ machine);
if (err < 0)
pr_err("Couldn't record kernel reference relocation symbol\n"
"Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
diff --git a/tools/perf/design.txt b/tools/perf/design.txt
index 67e5d0cace85..63a0e6f04a01 100644
--- a/tools/perf/design.txt
+++ b/tools/perf/design.txt
@@ -454,7 +454,6 @@ So to start with, in order to add HAVE_PERF_EVENTS to your Kconfig, you
will need at least this:
- asm/perf_event.h - a basic stub will suffice at first
- support for atomic64 types (and associated helper functions)
- - set_perf_event_pending() implemented
If your architecture does have hardware capabilities, you can override the
weak stub hw_perf_event_init() to register hardware counters.
diff --git a/tools/perf/perf.h b/tools/perf/perf.h
index 7daa806d9050..e84fa26bc1be 100644
--- a/tools/perf/perf.h
+++ b/tools/perf/perf.h
@@ -100,8 +100,8 @@
#ifdef __aarch64__
#define mb() asm volatile("dmb ish" ::: "memory")
-#define wmb() asm volatile("dmb ishld" ::: "memory")
-#define rmb() asm volatile("dmb ishst" ::: "memory")
+#define wmb() asm volatile("dmb ishst" ::: "memory")
+#define rmb() asm volatile("dmb ishld" ::: "memory")
#define cpu_relax() asm volatile("yield" ::: "memory")
#endif
diff --git a/tools/perf/tests/vmlinux-kallsyms.c b/tools/perf/tests/vmlinux-kallsyms.c
index 2bd13edcbc17..3d9088003a5b 100644
--- a/tools/perf/tests/vmlinux-kallsyms.c
+++ b/tools/perf/tests/vmlinux-kallsyms.c
@@ -26,7 +26,6 @@ int test__vmlinux_matches_kallsyms(void)
struct map *kallsyms_map, *vmlinux_map;
struct machine kallsyms, vmlinux;
enum map_type type = MAP__FUNCTION;
- struct ref_reloc_sym ref_reloc_sym = { .name = "_stext", };
u64 mem_start, mem_end;
/*
@@ -70,14 +69,6 @@ int test__vmlinux_matches_kallsyms(void)
*/
kallsyms_map = machine__kernel_map(&kallsyms, type);
- sym = map__find_symbol_by_name(kallsyms_map, ref_reloc_sym.name, NULL);
- if (sym == NULL) {
- pr_debug("dso__find_symbol_by_name ");
- goto out;
- }
-
- ref_reloc_sym.addr = UM(sym->start);
-
/*
* Step 5:
*
@@ -89,7 +80,6 @@ int test__vmlinux_matches_kallsyms(void)
}
vmlinux_map = machine__kernel_map(&vmlinux, type);
- map__kmap(vmlinux_map)->ref_reloc_sym = &ref_reloc_sym;
/*
* Step 6:
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
index 1fc1c2f04772..b0f3ca850e9e 100644
--- a/tools/perf/util/event.c
+++ b/tools/perf/util/event.c
@@ -470,23 +470,32 @@ static int find_symbol_cb(void *arg, const char *name, char type,
return 1;
}
+u64 kallsyms__get_function_start(const char *kallsyms_filename,
+ const char *symbol_name)
+{
+ struct process_symbol_args args = { .name = symbol_name, };
+
+ if (kallsyms__parse(kallsyms_filename, &args, find_symbol_cb) <= 0)
+ return 0;
+
+ return args.start;
+}
+
int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
perf_event__handler_t process,
- struct machine *machine,
- const char *symbol_name)
+ struct machine *machine)
{
size_t size;
- const char *filename, *mmap_name;
- char path[PATH_MAX];
+ const char *mmap_name;
char name_buff[PATH_MAX];
struct map *map;
+ struct kmap *kmap;
int err;
/*
* We should get this from /sys/kernel/sections/.text, but till that is
* available use this, and after it is use this as a fallback for older
* kernels.
*/
- struct process_symbol_args args = { .name = symbol_name, };
union perf_event *event = zalloc((sizeof(event->mmap) +
machine->id_hdr_size));
if (event == NULL) {
@@ -502,30 +511,19 @@ int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
* see kernel/perf_event.c __perf_event_mmap
*/
event->header.misc = PERF_RECORD_MISC_KERNEL;
- filename = "/proc/kallsyms";
} else {
event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
- if (machine__is_default_guest(machine))
- filename = (char *) symbol_conf.default_guest_kallsyms;
- else {
- sprintf(path, "%s/proc/kallsyms", machine->root_dir);
- filename = path;
- }
- }
-
- if (kallsyms__parse(filename, &args, find_symbol_cb) <= 0) {
- free(event);
- return -ENOENT;
}
map = machine->vmlinux_maps[MAP__FUNCTION];
+ kmap = map__kmap(map);
size = snprintf(event->mmap.filename, sizeof(event->mmap.filename),
- "%s%s", mmap_name, symbol_name) + 1;
+ "%s%s", mmap_name, kmap->ref_reloc_sym->name) + 1;
size = PERF_ALIGN(size, sizeof(u64));
event->mmap.header.type = PERF_RECORD_MMAP;
event->mmap.header.size = (sizeof(event->mmap) -
(sizeof(event->mmap.filename) - size) + machine->id_hdr_size);
- event->mmap.pgoff = args.start;
+ event->mmap.pgoff = kmap->ref_reloc_sym->addr;
event->mmap.start = map->start;
event->mmap.len = map->end - event->mmap.start;
event->mmap.pid = machine->pid;
diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h
index faf6e219be21..851fa06f4a42 100644
--- a/tools/perf/util/event.h
+++ b/tools/perf/util/event.h
@@ -214,8 +214,7 @@ int perf_event__synthesize_threads(struct perf_tool *tool,
struct machine *machine, bool mmap_data);
int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
perf_event__handler_t process,
- struct machine *machine,
- const char *symbol_name);
+ struct machine *machine);
int perf_event__synthesize_modules(struct perf_tool *tool,
perf_event__handler_t process,
@@ -279,4 +278,7 @@ size_t perf_event__fprintf_mmap2(union perf_event *event, FILE *fp);
size_t perf_event__fprintf_task(union perf_event *event, FILE *fp);
size_t perf_event__fprintf(union perf_event *event, FILE *fp);
+u64 kallsyms__get_function_start(const char *kallsyms_filename,
+ const char *symbol_name);
+
#endif /* __PERF_RECORD_H */
diff --git a/tools/perf/util/include/asm/hash.h b/tools/perf/util/include/asm/hash.h
new file mode 100644
index 000000000000..d82b170bb216
--- /dev/null
+++ b/tools/perf/util/include/asm/hash.h
@@ -0,0 +1,6 @@
+#ifndef __ASM_GENERIC_HASH_H
+#define __ASM_GENERIC_HASH_H
+
+/* Stub */
+
+#endif /* __ASM_GENERIC_HASH_H */
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index ded74590b92f..c872991e0f65 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -496,19 +496,22 @@ static int symbol__in_kernel(void *arg, const char *name,
return 1;
}
+static void machine__get_kallsyms_filename(struct machine *machine, char *buf,
+ size_t bufsz)
+{
+ if (machine__is_default_guest(machine))
+ scnprintf(buf, bufsz, "%s", symbol_conf.default_guest_kallsyms);
+ else
+ scnprintf(buf, bufsz, "%s/proc/kallsyms", machine->root_dir);
+}
+
/* Figure out the start address of kernel map from /proc/kallsyms */
static u64 machine__get_kernel_start_addr(struct machine *machine)
{
- const char *filename;
- char path[PATH_MAX];
+ char filename[PATH_MAX];
struct process_args args;
- if (machine__is_default_guest(machine))
- filename = (char *)symbol_conf.default_guest_kallsyms;
- else {
- sprintf(path, "%s/proc/kallsyms", machine->root_dir);
- filename = path;
- }
+ machine__get_kallsyms_filename(machine, filename, PATH_MAX);
if (symbol__restricted_filename(filename, "/proc/kallsyms"))
return 0;
@@ -829,9 +832,25 @@ static int machine__create_modules(struct machine *machine)
return 0;
}
+const char *ref_reloc_sym_names[] = {"_text", "_stext", NULL};
+
int machine__create_kernel_maps(struct machine *machine)
{
struct dso *kernel = machine__get_kernel(machine);
+ char filename[PATH_MAX];
+ const char *name;
+ u64 addr = 0;
+ int i;
+
+ machine__get_kallsyms_filename(machine, filename, PATH_MAX);
+
+ for (i = 0; (name = ref_reloc_sym_names[i]) != NULL; i++) {
+ addr = kallsyms__get_function_start(filename, name);
+ if (addr)
+ break;
+ }
+ if (!addr)
+ return -1;
if (kernel == NULL ||
__machine__create_kernel_maps(machine, kernel) < 0)
@@ -850,6 +869,13 @@ int machine__create_kernel_maps(struct machine *machine)
* Now that we have all the maps created, just set the ->end of them:
*/
map_groups__fixup_end(&machine->kmaps);
+
+ if (maps__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps, name,
+ addr)) {
+ machine__destroy_kernel_maps(machine);
+ return -1;
+ }
+
return 0;
}
diff --git a/tools/perf/util/machine.h b/tools/perf/util/machine.h
index 477133015440..f77e91e483dc 100644
--- a/tools/perf/util/machine.h
+++ b/tools/perf/util/machine.h
@@ -18,6 +18,8 @@ union perf_event;
#define HOST_KERNEL_ID (-1)
#define DEFAULT_GUEST_KERNEL_ID (0)
+extern const char *ref_reloc_sym_names[];
+
struct machine {
struct rb_node rb_node;
pid_t pid;
diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c
index 3b97513f0e77..39cd2d0faff6 100644
--- a/tools/perf/util/map.c
+++ b/tools/perf/util/map.c
@@ -39,6 +39,7 @@ void map__init(struct map *map, enum map_type type,
map->start = start;
map->end = end;
map->pgoff = pgoff;
+ map->reloc = 0;
map->dso = dso;
map->map_ip = map__map_ip;
map->unmap_ip = map__unmap_ip;
@@ -288,7 +289,7 @@ u64 map__rip_2objdump(struct map *map, u64 rip)
if (map->dso->rel)
return rip - map->pgoff;
- return map->unmap_ip(map, rip);
+ return map->unmap_ip(map, rip) - map->reloc;
}
/**
@@ -311,7 +312,7 @@ u64 map__objdump_2mem(struct map *map, u64 ip)
if (map->dso->rel)
return map->unmap_ip(map, ip + map->pgoff);
- return ip;
+ return ip + map->reloc;
}
void map_groups__init(struct map_groups *mg)
diff --git a/tools/perf/util/map.h b/tools/perf/util/map.h
index 18068c6b71c1..257e513205ce 100644
--- a/tools/perf/util/map.h
+++ b/tools/perf/util/map.h
@@ -36,6 +36,7 @@ struct map {
bool erange_warned;
u32 priv;
u64 pgoff;
+ u64 reloc;
u32 maj, min; /* only valid for MMAP2 record */
u64 ino; /* only valid for MMAP2 record */
u64 ino_generation;/* only valid for MMAP2 record */
diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
index 759456728703..3e9f336740fa 100644
--- a/tools/perf/util/symbol-elf.c
+++ b/tools/perf/util/symbol-elf.c
@@ -751,6 +751,8 @@ int dso__load_sym(struct dso *dso, struct map *map,
if (strcmp(elf_name, kmap->ref_reloc_sym->name))
continue;
kmap->ref_reloc_sym->unrelocated_addr = sym.st_value;
+ map->reloc = kmap->ref_reloc_sym->addr -
+ kmap->ref_reloc_sym->unrelocated_addr;
break;
}
}
@@ -922,6 +924,7 @@ int dso__load_sym(struct dso *dso, struct map *map,
(u64)shdr.sh_offset);
sym.st_value -= shdr.sh_addr - shdr.sh_offset;
}
+new_symbol:
/*
* We need to figure out if the object was created from C++ sources
* DWARF DW_compile_unit has this, but we don't always have access
@@ -933,7 +936,6 @@ int dso__load_sym(struct dso *dso, struct map *map,
if (demangled != NULL)
elf_name = demangled;
}
-new_symbol:
f = symbol__new(sym.st_value, sym.st_size,
GELF_ST_BIND(sym.st_info), elf_name);
free(demangled);
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index 39ce9adbaaf0..a9d758a3b371 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -627,7 +627,7 @@ static int dso__split_kallsyms_for_kcore(struct dso *dso, struct map *map,
* kernel range is broken in several maps, named [kernel].N, as we don't have
* the original ELF section names vmlinux have.
*/
-static int dso__split_kallsyms(struct dso *dso, struct map *map,
+static int dso__split_kallsyms(struct dso *dso, struct map *map, u64 delta,
symbol_filter_t filter)
{
struct map_groups *kmaps = map__kmap(map)->kmaps;
@@ -692,6 +692,12 @@ static int dso__split_kallsyms(struct dso *dso, struct map *map,
char dso_name[PATH_MAX];
struct dso *ndso;
+ if (delta) {
+ /* Kernel was relocated at boot time */
+ pos->start -= delta;
+ pos->end -= delta;
+ }
+
if (count == 0) {
curr_map = map;
goto filter_symbol;
@@ -721,6 +727,10 @@ static int dso__split_kallsyms(struct dso *dso, struct map *map,
curr_map->map_ip = curr_map->unmap_ip = identity__map_ip;
map_groups__insert(kmaps, curr_map);
++kernel_range;
+ } else if (delta) {
+ /* Kernel was relocated at boot time */
+ pos->start -= delta;
+ pos->end -= delta;
}
filter_symbol:
if (filter && filter(curr_map, pos)) {
@@ -976,6 +986,23 @@ static int validate_kcore_modules(const char *kallsyms_filename,
return 0;
}
+static int validate_kcore_addresses(const char *kallsyms_filename,
+ struct map *map)
+{
+ struct kmap *kmap = map__kmap(map);
+
+ if (kmap->ref_reloc_sym && kmap->ref_reloc_sym->name) {
+ u64 start;
+
+ start = kallsyms__get_function_start(kallsyms_filename,
+ kmap->ref_reloc_sym->name);
+ if (start != kmap->ref_reloc_sym->addr)
+ return -EINVAL;
+ }
+
+ return validate_kcore_modules(kallsyms_filename, map);
+}
+
struct kcore_mapfn_data {
struct dso *dso;
enum map_type type;
@@ -1019,8 +1046,8 @@ static int dso__load_kcore(struct dso *dso, struct map *map,
kallsyms_filename))
return -EINVAL;
- /* All modules must be present at their original addresses */
- if (validate_kcore_modules(kallsyms_filename, map))
+ /* Modules and kernel must be present at their original addresses */
+ if (validate_kcore_addresses(kallsyms_filename, map))
return -EINVAL;
md.dso = dso;
@@ -1113,15 +1140,41 @@ out_err:
return -EINVAL;
}
+/*
+ * If the kernel is relocated at boot time, kallsyms won't match. Compute the
+ * delta based on the relocation reference symbol.
+ */
+static int kallsyms__delta(struct map *map, const char *filename, u64 *delta)
+{
+ struct kmap *kmap = map__kmap(map);
+ u64 addr;
+
+ if (!kmap->ref_reloc_sym || !kmap->ref_reloc_sym->name)
+ return 0;
+
+ addr = kallsyms__get_function_start(filename,
+ kmap->ref_reloc_sym->name);
+ if (!addr)
+ return -1;
+
+ *delta = addr - kmap->ref_reloc_sym->addr;
+ return 0;
+}
+
int dso__load_kallsyms(struct dso *dso, const char *filename,
struct map *map, symbol_filter_t filter)
{
+ u64 delta = 0;
+
if (symbol__restricted_filename(filename, "/proc/kallsyms"))
return -1;
if (dso__load_all_kallsyms(dso, filename, map) < 0)
return -1;
+ if (kallsyms__delta(map, filename, &delta))
+ return -1;
+
symbols__fixup_duplicate(&dso->symbols[map->type]);
symbols__fixup_end(&dso->symbols[map->type]);
@@ -1133,7 +1186,7 @@ int dso__load_kallsyms(struct dso *dso, const char *filename,
if (!dso__load_kcore(dso, map, filename))
return dso__split_kallsyms_for_kcore(dso, map, filter);
else
- return dso__split_kallsyms(dso, map, filter);
+ return dso__split_kallsyms(dso, map, delta, filter);
}
static int dso__load_perf_map(struct dso *dso, struct map *map,
@@ -1424,7 +1477,7 @@ static int find_matching_kcore(struct map *map, char *dir, size_t dir_sz)
continue;
scnprintf(kallsyms_filename, sizeof(kallsyms_filename),
"%s/%s/kallsyms", dir, dent->d_name);
- if (!validate_kcore_modules(kallsyms_filename, map)) {
+ if (!validate_kcore_addresses(kallsyms_filename, map)) {
strlcpy(dir, kallsyms_filename, dir_sz);
ret = 0;
break;
@@ -1479,7 +1532,7 @@ static char *dso__find_kallsyms(struct dso *dso, struct map *map)
if (fd != -1) {
close(fd);
/* If module maps match go with /proc/kallsyms */
- if (!validate_kcore_modules("/proc/kallsyms", map))
+ if (!validate_kcore_addresses("/proc/kallsyms", map))
goto proc_kallsyms;
}
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
index be456ce264d0..8ca405cd7c1a 100644
--- a/virt/kvm/arm/vgic.c
+++ b/virt/kvm/arm/vgic.c
@@ -24,6 +24,7 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
+#include <linux/uaccess.h>
#include <linux/irqchip/arm-gic.h>
diff --git a/virt/kvm/coalesced_mmio.c b/virt/kvm/coalesced_mmio.c
index 88b2fe3ddf42..00d86427af0f 100644
--- a/virt/kvm/coalesced_mmio.c
+++ b/virt/kvm/coalesced_mmio.c
@@ -154,17 +154,13 @@ int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm,
list_add_tail(&dev->list, &kvm->coalesced_zones);
mutex_unlock(&kvm->slots_lock);
- return ret;
+ return 0;
out_free_dev:
mutex_unlock(&kvm->slots_lock);
-
kfree(dev);
- if (dev == NULL)
- return -ENXIO;
-
- return 0;
+ return ret;
}
int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,