summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/DocBook/filesystems.tmpl4
-rw-r--r--Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt8
-rw-r--r--Documentation/filesystems/Locking2
-rw-r--r--Documentation/filesystems/porting5
-rw-r--r--Documentation/filesystems/vfs.txt4
-rw-r--r--Documentation/laptops/laptop-mode.txt12
-rw-r--r--Documentation/security/Yama.txt14
-rw-r--r--Documentation/sysctl/vm.txt14
-rw-r--r--MAINTAINERS30
-rw-r--r--Makefile2
-rw-r--r--arch/arm/boot/dts/imx27.dtsi6
-rw-r--r--arch/arm/boot/dts/imx51-babbage.dts4
-rw-r--r--arch/arm/boot/dts/imx51.dtsi4
-rw-r--r--arch/arm/boot/dts/imx53.dtsi7
-rw-r--r--arch/arm/boot/dts/imx6q.dtsi7
-rw-r--r--arch/arm/mach-imx/clk-imx6q.c8
-rw-r--r--arch/arm/mach-imx/hotplug.c23
-rw-r--r--arch/arm/mm/dma-mapping.c12
-rw-r--r--arch/blackfin/kernel/setup.c1
-rw-r--r--arch/ia64/kernel/acpi.c5
-rw-r--r--arch/m68k/Kconfig12
-rw-r--r--arch/m68k/Kconfig.cpu14
-rw-r--r--arch/m68k/apollo/config.c16
-rw-r--r--arch/m68k/include/asm/Kbuild25
-rw-r--r--arch/m68k/include/asm/MC68332.h152
-rw-r--r--arch/m68k/include/asm/apollodma.h248
-rw-r--r--arch/m68k/include/asm/apollohw.h2
-rw-r--r--arch/m68k/include/asm/bitsperlong.h1
-rw-r--r--arch/m68k/include/asm/cputime.h6
-rw-r--r--arch/m68k/include/asm/delay.h2
-rw-r--r--arch/m68k/include/asm/device.h7
-rw-r--r--arch/m68k/include/asm/emergency-restart.h6
-rw-r--r--arch/m68k/include/asm/errno.h6
-rw-r--r--arch/m68k/include/asm/futex.h6
-rw-r--r--arch/m68k/include/asm/ioctl.h1
-rw-r--r--arch/m68k/include/asm/ipcbuf.h1
-rw-r--r--arch/m68k/include/asm/irq_regs.h1
-rw-r--r--arch/m68k/include/asm/kdebug.h1
-rw-r--r--arch/m68k/include/asm/kmap_types.h6
-rw-r--r--arch/m68k/include/asm/kvm_para.h1
-rw-r--r--arch/m68k/include/asm/local.h6
-rw-r--r--arch/m68k/include/asm/local64.h1
-rw-r--r--arch/m68k/include/asm/mac_mouse.h23
-rw-r--r--arch/m68k/include/asm/mcfmbus.h77
-rw-r--r--arch/m68k/include/asm/mman.h1
-rw-r--r--arch/m68k/include/asm/mutex.h9
-rw-r--r--arch/m68k/include/asm/percpu.h6
-rw-r--r--arch/m68k/include/asm/resource.h6
-rw-r--r--arch/m68k/include/asm/sbus.h45
-rw-r--r--arch/m68k/include/asm/scatterlist.h6
-rw-r--r--arch/m68k/include/asm/sections.h8
-rw-r--r--arch/m68k/include/asm/shm.h31
-rw-r--r--arch/m68k/include/asm/siginfo.h6
-rw-r--r--arch/m68k/include/asm/statfs.h6
-rw-r--r--arch/m68k/include/asm/topology.h6
-rw-r--r--arch/m68k/include/asm/types.h22
-rw-r--r--arch/m68k/include/asm/unaligned.h4
-rw-r--r--arch/m68k/include/asm/xor.h1
-rw-r--r--arch/m68k/kernel/setup_no.c11
-rw-r--r--arch/m68k/kernel/sys_m68k.c8
-rw-r--r--arch/m68k/kernel/vmlinux-nommu.lds2
-rw-r--r--arch/m68k/kernel/vmlinux-std.lds2
-rw-r--r--arch/m68k/kernel/vmlinux-sun3.lds2
-rw-r--r--arch/m68k/lib/muldi3.c2
-rw-r--r--arch/m68k/mm/init_mm.c2
-rw-r--r--arch/m68k/mm/init_no.c2
-rw-r--r--arch/m68k/platform/68328/head-de2.S8
-rw-r--r--arch/m68k/platform/68328/head-pilot.S10
-rw-r--r--arch/m68k/platform/68328/head-ram.S4
-rw-r--r--arch/m68k/platform/68328/head-rom.S6
-rw-r--r--arch/m68k/platform/68360/head-ram.S6
-rw-r--r--arch/m68k/platform/68360/head-rom.S8
-rw-r--r--arch/m68k/platform/coldfire/head.S10
-rw-r--r--arch/m68k/sun3/prom/init.c48
-rw-r--r--arch/microblaze/include/asm/sections.h4
-rw-r--r--arch/microblaze/kernel/microblaze_ksyms.c3
-rw-r--r--arch/microblaze/kernel/setup.c4
-rw-r--r--arch/microblaze/kernel/vmlinux.lds.S1
-rw-r--r--arch/s390/Kconfig1
-rw-r--r--arch/s390/include/asm/sparsemem.h2
-rw-r--r--arch/s390/include/asm/syscall.h10
-rw-r--r--arch/s390/kernel/compat_linux.c2
-rw-r--r--arch/s390/kernel/compat_wrapper.S4
-rw-r--r--arch/s390/kernel/ptrace.c7
-rw-r--r--arch/s390/kernel/sys_s390.c9
-rw-r--r--arch/sh/drivers/dma/dma-sh.c2
-rw-r--r--arch/sh/include/asm/sections.h1
-rw-r--r--arch/sh/include/cpu-sh2a/cpu/sh7269.h36
-rw-r--r--arch/sh/kernel/cpu/sh2a/pinmux-sh7269.c195
-rw-r--r--arch/sh/kernel/setup.c2
-rw-r--r--arch/sh/kernel/sh_ksyms_32.c1
-rw-r--r--arch/sh/kernel/vmlinux.lds.S1
-rw-r--r--arch/sh/lib/mcount.S8
-rw-r--r--arch/sparc/kernel/sys_sparc_64.c10
-rw-r--r--arch/sparc/mm/init_64.c28
-rw-r--r--arch/x86/Kconfig2
-rw-r--r--arch/x86/include/asm/mce.h8
-rw-r--r--arch/x86/include/asm/perf_event.h11
-rw-r--r--arch/x86/kernel/acpi/sleep.c4
-rw-r--r--arch/x86/kernel/acpi/sleep.h2
-rw-r--r--arch/x86/kernel/acpi/wakeup_32.S4
-rw-r--r--arch/x86/kernel/acpi/wakeup_64.S4
-rw-r--r--arch/x86/kernel/alternative.c2
-rw-r--r--arch/x86/kernel/apic/io_apic.c4
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce-severity.c7
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c43
-rw-r--r--arch/x86/kernel/cpu/perf_event.c89
-rw-r--r--arch/x86/kernel/cpu/perf_event.h20
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd_ibs.c4
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_ds.c7
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_uncore.h2
-rw-r--r--arch/x86/kernel/irq.c1
-rw-r--r--arch/x86/kernel/kdebugfs.c6
-rw-r--r--arch/x86/kvm/i8259.c17
-rw-r--r--arch/x86/kvm/vmx.c20
-rw-r--r--arch/x86/kvm/x86.c4
-rw-r--r--arch/x86/mm/srat.c15
-rw-r--r--arch/x86/syscalls/syscall_64.tbl2
-rw-r--r--arch/x86/xen/p2m.c5
-rw-r--r--drivers/acpi/ac.c4
-rw-r--r--drivers/acpi/acpica/achware.h12
-rw-r--r--drivers/acpi/acpica/hwesleep.c19
-rw-r--r--drivers/acpi/acpica/hwsleep.c20
-rw-r--r--drivers/acpi/acpica/hwxfsleep.c22
-rw-r--r--drivers/acpi/battery.c2
-rw-r--r--drivers/acpi/button.c4
-rw-r--r--drivers/acpi/fan.c4
-rw-r--r--drivers/acpi/numa.c12
-rw-r--r--drivers/acpi/pci_root.c11
-rw-r--r--drivers/acpi/power.c4
-rw-r--r--drivers/acpi/processor_driver.c2
-rw-r--r--drivers/acpi/sbs.c2
-rw-r--r--drivers/acpi/sleep.c75
-rw-r--r--drivers/acpi/sysfs.c4
-rw-r--r--drivers/acpi/thermal.c4
-rw-r--r--drivers/atm/iphase.c2
-rw-r--r--drivers/base/power/clock_ops.c3
-rw-r--r--drivers/base/power/common.c4
-rw-r--r--drivers/bcma/host_pci.c1
-rw-r--r--drivers/bcma/sprom.c4
-rw-r--r--drivers/block/drbd/drbd_main.c4
-rw-r--r--drivers/char/agp/intel-agp.h39
-rw-r--r--drivers/char/agp/intel-gtt.c60
-rw-r--r--drivers/char/tpm/tpm_tis.c2
-rw-r--r--drivers/cpufreq/pcc-cpufreq.c1
-rw-r--r--drivers/dma/imx-dma.c36
-rw-r--r--drivers/dma/tegra20-apb-dma.c18
-rw-r--r--drivers/gpio/gpio-langwell.c7
-rw-r--r--drivers/gpio/gpio-msic.c2
-rw-r--r--drivers/gpio/gpio-mxc.c5
-rw-r--r--drivers/gpio/gpio-pxa.c30
-rw-r--r--drivers/gpio/gpio-samsung.c14
-rw-r--r--drivers/gpio/gpio-sch.c3
-rw-r--r--drivers/gpu/drm/drm_edid_load.c8
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c31
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c1
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c20
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c3
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c12
-rw-r--r--drivers/gpu/drm/i915/intel_display.c12
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c14
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h20
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c10
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c15
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c6
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c48
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c5
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c12
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_i2c.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c1
-rw-r--r--drivers/gpu/drm/nouveau/nv84_fifo.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_pm.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvd0_display.c2
-rw-r--r--drivers/gpu/drm/nouveau/nve0_fifo.c37
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c22
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c71
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c13
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h2
-rw-r--r--drivers/gpu/drm/radeon/ni.c14
-rw-r--r--drivers/gpu/drm/radeon/r600.c20
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c65
-rw-r--r--drivers/gpu/drm/radeon/r600d.h3
-rw-r--r--drivers/gpu/drm/radeon/radeon.h12
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h10
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c49
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c57
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c32
-rw-r--r--drivers/gpu/drm/radeon/radeon_cursor.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c26
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c13
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c35
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c6
-rw-r--r--drivers/gpu/drm/radeon/rv515.c13
-rw-r--r--drivers/gpu/drm/radeon/si.c35
-rw-r--r--drivers/gpu/drm/radeon/sid.h3
-rw-r--r--drivers/gpu/drm/udl/udl_gem.c2
-rw-r--r--drivers/iommu/amd_iommu.c25
-rw-r--r--drivers/iommu/amd_iommu_init.c6
-rw-r--r--drivers/iommu/exynos-iommu.c6
-rw-r--r--drivers/iommu/intel-iommu.c26
-rw-r--r--drivers/iommu/tegra-smmu.c17
-rw-r--r--drivers/isdn/isdnloop/isdnloop.c12
-rw-r--r--drivers/isdn/mISDN/layer2.c2
-rw-r--r--drivers/leds/led-triggers.c2
-rw-r--r--drivers/leds/leds-lp8788.c2
-rw-r--r--drivers/leds/leds-renesas-tpu.c2
-rw-r--r--drivers/mtd/maps/uclinux.c5
-rw-r--r--drivers/net/appletalk/cops.c4
-rw-r--r--drivers/net/appletalk/ltpc.c4
-rw-r--r--drivers/net/cris/eth_v10.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c72
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c1
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/82571.c10
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c36
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c16
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_regs.h8
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c28
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c19
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/sense.c14
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c13
-rw-r--r--drivers/net/ethernet/sfc/efx.c6
-rw-r--r--drivers/net/ethernet/sfc/efx.h14
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c16
-rw-r--r--drivers/net/ethernet/sfc/tx.c19
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c2
-rw-r--r--drivers/net/ethernet/xscale/ixp4xx_eth.c1
-rw-r--r--drivers/net/hyperv/netvsc.c7
-rw-r--r--drivers/net/hyperv/rndis_filter.c11
-rw-r--r--drivers/net/irda/bfin_sir.c8
-rw-r--r--drivers/net/macvtap.c3
-rw-r--r--drivers/net/phy/mdio-mux-gpio.c1
-rw-r--r--drivers/net/ppp/pptp.c4
-rw-r--r--drivers/net/tun.c1
-rw-r--r--drivers/net/usb/cdc-phonet.c1
-rw-r--r--drivers/net/usb/cdc_ncm.c20
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c1
-rw-r--r--drivers/net/wireless/b43/main.c21
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/channel.c5
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c3
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rs.c13
-rw-r--r--drivers/net/wireless/libertas/cfg.c1
-rw-r--r--drivers/net/wireless/libertas/if_sdio.c1
-rw-r--r--drivers/net/wireless/libertas/main.c5
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c68
-rw-r--r--drivers/net/wireless/rt2x00/rt2800pci.c71
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.c3
-rw-r--r--drivers/pinctrl/pinctrl-imx23.c2
-rw-r--r--drivers/pinctrl/pinctrl-imx28.c2
-rw-r--r--drivers/pinctrl/pinctrl-nomadik-db8500.c2
-rw-r--r--drivers/pinctrl/pinctrl-nomadik.c1
-rw-r--r--drivers/pinctrl/pinctrl-sirf.c1
-rw-r--r--drivers/pinctrl/pinctrl-u300.c8
-rw-r--r--drivers/platform/x86/classmate-laptop.c4
-rw-r--r--drivers/platform/x86/fujitsu-tablet.c2
-rw-r--r--drivers/platform/x86/hdaps.c2
-rw-r--r--drivers/platform/x86/hp_accel.c2
-rw-r--r--drivers/platform/x86/msi-laptop.c4
-rw-r--r--drivers/platform/x86/panasonic-laptop.c4
-rw-r--r--drivers/platform/x86/sony-laptop.c12
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c2
-rw-r--r--drivers/platform/x86/toshiba_acpi.c2
-rw-r--r--drivers/platform/x86/toshiba_bluetooth.c4
-rw-r--r--drivers/platform/x86/xo15-ebook.c2
-rw-r--r--drivers/rtc/interface.c2
-rw-r--r--drivers/rtc/rtc-cmos.c1
-rw-r--r--drivers/s390/char/sclp_sdias.c2
-rw-r--r--drivers/sh/intc/core.c27
-rw-r--r--drivers/usb/early/ehci-dbgp.c2
-rw-r--r--drivers/vhost/Kconfig3
-rw-r--r--drivers/vhost/Kconfig.tcm6
-rw-r--r--drivers/vhost/Makefile2
-rw-r--r--drivers/vhost/tcm_vhost.c1628
-rw-r--r--drivers/vhost/tcm_vhost.h101
-rw-r--r--drivers/zorro/zorro.c2
-rw-r--r--fs/autofs4/expire.c31
-rw-r--r--fs/bio.c2
-rw-r--r--fs/btrfs/inode.c3
-rw-r--r--fs/btrfs/ioctl.c5
-rw-r--r--fs/btrfs/ordered-data.c2
-rw-r--r--fs/btrfs/super.c4
-rw-r--r--fs/btrfs/volumes.c4
-rw-r--r--fs/exofs/inode.c27
-rw-r--r--fs/exofs/ore.c14
-rw-r--r--fs/exofs/super.c11
-rw-r--r--fs/ext3/inode.c8
-rw-r--r--fs/ext3/super.c11
-rw-r--r--fs/ext4/inode.c10
-rw-r--r--fs/ext4/super.c11
-rw-r--r--fs/fuse/file.c15
-rw-r--r--fs/fuse/fuse_i.h3
-rw-r--r--fs/fuse/inode.c32
-rw-r--r--fs/gfs2/meta_io.c2
-rw-r--r--fs/hfs/mdb.c4
-rw-r--r--fs/jbd/journal.c4
-rw-r--r--fs/jbd2/journal.c4
-rw-r--r--fs/nilfs2/super.c4
-rw-r--r--fs/nilfs2/the_nilfs.h2
-rw-r--r--fs/open.c2
-rw-r--r--fs/super.c40
-rw-r--r--fs/ubifs/file.c10
-rw-r--r--fs/ubifs/super.c2
-rw-r--r--include/acpi/acpixf.h4
-rw-r--r--include/acpi/actypes.h2
-rw-r--r--include/drm/drm_pciids.h3
-rw-r--r--include/drm/radeon_drm.h2
-rw-r--r--include/linux/Kbuild1
-rw-r--r--include/linux/acpi.h2
-rw-r--r--include/linux/backing-dev.h1
-rw-r--r--include/linux/bcma/bcma_driver_chipcommon.h6
-rw-r--r--include/linux/can.h25
-rw-r--r--include/linux/fs.h3
-rw-r--r--include/linux/ftrace_event.h5
-rw-r--r--include/linux/fuse.h19
-rw-r--r--include/linux/hardirq.h2
-rw-r--r--include/linux/iommu.h2
-rw-r--r--include/linux/ipv6.h1
-rw-r--r--include/linux/irq.h1
-rw-r--r--include/linux/jiffies.h29
-rw-r--r--include/linux/kdb.h2
-rw-r--r--include/linux/netdevice.h2
-rw-r--r--include/linux/perf_event.h3
-rw-r--r--include/linux/sched.h8
-rw-r--r--include/linux/security.h2
-rw-r--r--include/linux/timex.h2
-rw-r--r--include/linux/topology.h1
-rw-r--r--include/linux/writeback.h1
-rw-r--r--include/net/cfg80211.h2
-rw-r--r--include/net/codel.h8
-rw-r--r--include/net/dst.h2
-rw-r--r--include/net/inet_connection_sock.h1
-rw-r--r--include/net/inet_sock.h9
-rw-r--r--include/net/ip.h2
-rw-r--r--include/net/sock.h2
-rw-r--r--include/net/tcp.h1
-rw-r--r--include/net/xfrm.h4
-rw-r--r--include/trace/events/sched.h4
-rw-r--r--include/trace/ftrace.h6
-rw-r--r--kernel/debug/kdb/kdb_debugger.c4
-rw-r--r--kernel/debug/kdb/kdb_io.c11
-rw-r--r--kernel/debug/kdb/kdb_main.c15
-rw-r--r--kernel/events/callchain.c9
-rw-r--r--kernel/events/core.c30
-rw-r--r--kernel/events/internal.h3
-rw-r--r--kernel/futex.c17
-rw-r--r--kernel/irq/manage.c15
-rw-r--r--kernel/power/suspend.c3
-rw-r--r--kernel/printk.c2
-rw-r--r--kernel/sched/core.c4
-rw-r--r--kernel/sched/cpupri.c10
-rw-r--r--kernel/sched/fair.c29
-rw-r--r--kernel/time/jiffies.c2
-rw-r--r--kernel/time/ntp.c2
-rw-r--r--kernel/time/timekeeping.c407
-rw-r--r--kernel/trace/trace_event_perf.c2
-rw-r--r--kernel/trace/trace_kprobe.c6
-rw-r--r--kernel/trace/trace_syscalls.c4
-rw-r--r--kernel/trace/trace_uprobe.c2
-rw-r--r--kernel/watchdog.c21
-rw-r--r--mm/backing-dev.c52
-rw-r--r--mm/page-writeback.c1
-rw-r--r--net/batman-adv/gateway_client.c6
-rw-r--r--net/batman-adv/translation-table.c1
-rw-r--r--net/core/dev.c11
-rw-r--r--net/core/dst.c10
-rw-r--r--net/core/sock.c1
-rw-r--r--net/ipv4/fib_trie.c2
-rw-r--r--net/ipv4/ip_output.c8
-rw-r--r--net/ipv4/route.c4
-rw-r--r--net/ipv4/tcp.c4
-rw-r--r--net/ipv4/tcp_cong.c3
-rw-r--r--net/ipv4/tcp_input.c4
-rw-r--r--net/ipv4/tcp_ipv4.c14
-rw-r--r--net/ipv4/tcp_metrics.c12
-rw-r--r--net/ipv4/tcp_minisocks.c2
-rw-r--r--net/ipv4/tcp_output.c23
-rw-r--r--net/ipv4/udp.c2
-rw-r--r--net/ipv6/tcp_ipv6.c28
-rw-r--r--net/llc/llc_station.c6
-rw-r--r--net/mac80211/mesh.c3
-rw-r--r--net/mac80211/mlme.c2
-rw-r--r--net/mac80211/scan.c3
-rw-r--r--net/packet/af_packet.c3
-rw-r--r--net/sched/act_gact.c14
-rw-r--r--net/sched/act_ipt.c7
-rw-r--r--net/sched/act_pedit.c5
-rw-r--r--net/sched/act_simple.c5
-rw-r--r--net/sched/sch_qfq.c95
-rw-r--r--net/wireless/core.c5
-rw-r--r--net/wireless/core.h1
-rw-r--r--net/wireless/reg.c19
-rw-r--r--net/wireless/util.c2
-rw-r--r--net/xfrm/xfrm_state.c21
-rwxr-xr-xscripts/decodecode2
-rw-r--r--security/yama/yama_lsm.c41
-rw-r--r--sound/core/sgbuf.c2
-rw-r--r--sound/pci/emu10k1/memory.c5
-rw-r--r--sound/pci/hda/hda_auto_parser.c5
-rw-r--r--sound/pci/hda/patch_conexant.c6
-rw-r--r--sound/pci/hda/patch_hdmi.c12
-rw-r--r--sound/pci/hda/patch_realtek.c8
-rw-r--r--sound/soc/codecs/ab8500-codec.c4
-rw-r--r--sound/soc/codecs/ad1980.c1
-rw-r--r--sound/soc/codecs/mc13783.c2
-rw-r--r--sound/soc/codecs/sgtl5000.c3
-rw-r--r--sound/soc/codecs/stac9766.c1
-rw-r--r--sound/soc/codecs/wm8962.c3
-rw-r--r--sound/soc/codecs/wm8994.c15
-rw-r--r--sound/soc/codecs/wm9712.c1
-rw-r--r--sound/soc/codecs/wm9713.c1
-rw-r--r--sound/soc/mxs/mxs-saif.c24
-rw-r--r--sound/soc/omap/omap-mcbsp.c1
-rw-r--r--sound/soc/omap/omap-pcm.c1
-rw-r--r--sound/soc/soc-core.c2
-rw-r--r--sound/soc/tegra/tegra_alc5632.c2
-rw-r--r--sound/soc/tegra/tegra_wm8903.c10
-rw-r--r--sound/soc/ux500/ux500_msp_dai.c2
-rw-r--r--sound/soc/ux500/ux500_msp_i2s.c2
-rw-r--r--sound/soc/ux500/ux500_msp_i2s.h2
-rw-r--r--tools/perf/Makefile7
-rw-r--r--tools/perf/builtin-record.c4
-rw-r--r--tools/perf/builtin-report.c5
-rw-r--r--tools/perf/builtin-test.c19
-rw-r--r--tools/perf/builtin-top.c23
-rw-r--r--tools/perf/util/event.h3
-rw-r--r--tools/perf/util/evlist.c7
-rw-r--r--tools/perf/util/evlist.h3
-rw-r--r--tools/perf/util/evsel.c15
-rw-r--r--tools/perf/util/evsel.h10
-rw-r--r--tools/perf/util/header.c9
-rw-r--r--tools/perf/util/intlist.c101
-rw-r--r--tools/perf/util/intlist.h75
-rw-r--r--tools/perf/util/parse-events-test.c12
-rw-r--r--tools/perf/util/parse-options.c3
-rw-r--r--tools/perf/util/python.c6
-rw-r--r--tools/perf/util/rblist.c107
-rw-r--r--tools/perf/util/rblist.h47
-rw-r--r--tools/perf/util/session.c48
-rw-r--r--tools/perf/util/session.h24
-rw-r--r--tools/perf/util/strlist.c130
-rw-r--r--tools/perf/util/strlist.h11
-rw-r--r--tools/perf/util/symbol.c14
-rw-r--r--tools/perf/util/target.c2
455 files changed, 4960 insertions, 2722 deletions
diff --git a/Documentation/DocBook/filesystems.tmpl b/Documentation/DocBook/filesystems.tmpl
index 3fca32c41927..25b58efd955d 100644
--- a/Documentation/DocBook/filesystems.tmpl
+++ b/Documentation/DocBook/filesystems.tmpl
@@ -224,8 +224,8 @@ all your transactions.
</para>
<para>
-Then at umount time , in your put_super() (2.4) or write_super() (2.5)
-you can then call journal_destroy() to clean up your in-core journal object.
+Then at umount time , in your put_super() you can then call journal_destroy()
+to clean up your in-core journal object.
</para>
<para>
diff --git a/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt b/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt
index 70cd49b1caa8..1dd622546d06 100644
--- a/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt
+++ b/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt
@@ -10,8 +10,8 @@ Required properties:
- compatible : Should be "fsl,<chip>-esdhc"
Optional properties:
-- fsl,cd-internal : Indicate to use controller internal card detection
-- fsl,wp-internal : Indicate to use controller internal write protection
+- fsl,cd-controller : Indicate to use controller internal card detection
+- fsl,wp-controller : Indicate to use controller internal write protection
Examples:
@@ -19,8 +19,8 @@ esdhc@70004000 {
compatible = "fsl,imx51-esdhc";
reg = <0x70004000 0x4000>;
interrupts = <1>;
- fsl,cd-internal;
- fsl,wp-internal;
+ fsl,cd-controller;
+ fsl,wp-controller;
};
esdhc@70008000 {
diff --git a/Documentation/filesystems/Locking b/Documentation/filesystems/Locking
index 0f103e39b4f6..e540a24e5d06 100644
--- a/Documentation/filesystems/Locking
+++ b/Documentation/filesystems/Locking
@@ -114,7 +114,6 @@ prototypes:
int (*drop_inode) (struct inode *);
void (*evict_inode) (struct inode *);
void (*put_super) (struct super_block *);
- void (*write_super) (struct super_block *);
int (*sync_fs)(struct super_block *sb, int wait);
int (*freeze_fs) (struct super_block *);
int (*unfreeze_fs) (struct super_block *);
@@ -136,7 +135,6 @@ write_inode:
drop_inode: !!!inode->i_lock!!!
evict_inode:
put_super: write
-write_super: read
sync_fs: read
freeze_fs: write
unfreeze_fs: write
diff --git a/Documentation/filesystems/porting b/Documentation/filesystems/porting
index 2bef2b3843d1..0742feebc6e2 100644
--- a/Documentation/filesystems/porting
+++ b/Documentation/filesystems/porting
@@ -94,9 +94,8 @@ protected.
---
[mandatory]
-BKL is also moved from around sb operations. ->write_super() Is now called
-without BKL held. BKL should have been shifted into individual fs sb_op
-functions. If you don't need it, remove it.
+BKL is also moved from around sb operations. BKL should have been shifted into
+individual fs sb_op functions. If you don't need it, remove it.
---
[informational]
diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt
index 065aa2dc0835..2ee133e030c3 100644
--- a/Documentation/filesystems/vfs.txt
+++ b/Documentation/filesystems/vfs.txt
@@ -216,7 +216,6 @@ struct super_operations {
void (*drop_inode) (struct inode *);
void (*delete_inode) (struct inode *);
void (*put_super) (struct super_block *);
- void (*write_super) (struct super_block *);
int (*sync_fs)(struct super_block *sb, int wait);
int (*freeze_fs) (struct super_block *);
int (*unfreeze_fs) (struct super_block *);
@@ -273,9 +272,6 @@ or bottom half).
put_super: called when the VFS wishes to free the superblock
(i.e. unmount). This is called with the superblock lock held
- write_super: called when the VFS superblock needs to be written to
- disc. This method is optional
-
sync_fs: called when VFS is writing out all dirty data associated with
a superblock. The second parameter indicates whether the method
should wait until the write out has been completed. Optional.
diff --git a/Documentation/laptops/laptop-mode.txt b/Documentation/laptops/laptop-mode.txt
index 0bf25eebce94..4ebbfc3f1c6e 100644
--- a/Documentation/laptops/laptop-mode.txt
+++ b/Documentation/laptops/laptop-mode.txt
@@ -262,9 +262,9 @@ MINIMUM_BATTERY_MINUTES=10
#
# Allowed dirty background ratio, in percent. Once DIRTY_RATIO has been
-# exceeded, the kernel will wake pdflush which will then reduce the amount
-# of dirty memory to dirty_background_ratio. Set this nice and low, so once
-# some writeout has commenced, we do a lot of it.
+# exceeded, the kernel will wake flusher threads which will then reduce the
+# amount of dirty memory to dirty_background_ratio. Set this nice and low,
+# so once some writeout has commenced, we do a lot of it.
#
#DIRTY_BACKGROUND_RATIO=5
@@ -384,9 +384,9 @@ CPU_MAXFREQ=${CPU_MAXFREQ:-'slowest'}
#
# Allowed dirty background ratio, in percent. Once DIRTY_RATIO has been
-# exceeded, the kernel will wake pdflush which will then reduce the amount
-# of dirty memory to dirty_background_ratio. Set this nice and low, so once
-# some writeout has commenced, we do a lot of it.
+# exceeded, the kernel will wake flusher threads which will then reduce the
+# amount of dirty memory to dirty_background_ratio. Set this nice and low,
+# so once some writeout has commenced, we do a lot of it.
#
DIRTY_BACKGROUND_RATIO=${DIRTY_BACKGROUND_RATIO:-'5'}
diff --git a/Documentation/security/Yama.txt b/Documentation/security/Yama.txt
index e369de2d48cd..dd908cf64ecf 100644
--- a/Documentation/security/Yama.txt
+++ b/Documentation/security/Yama.txt
@@ -46,14 +46,13 @@ restrictions, it can call prctl(PR_SET_PTRACER, PR_SET_PTRACER_ANY, ...)
so that any otherwise allowed process (even those in external pid namespaces)
may attach.
-These restrictions do not change how ptrace via PTRACE_TRACEME operates.
-
-The sysctl settings are:
+The sysctl settings (writable only with CAP_SYS_PTRACE) are:
0 - classic ptrace permissions: a process can PTRACE_ATTACH to any other
process running under the same uid, as long as it is dumpable (i.e.
did not transition uids, start privileged, or have called
- prctl(PR_SET_DUMPABLE...) already).
+ prctl(PR_SET_DUMPABLE...) already). Similarly, PTRACE_TRACEME is
+ unchanged.
1 - restricted ptrace: a process must have a predefined relationship
with the inferior it wants to call PTRACE_ATTACH on. By default,
@@ -61,12 +60,13 @@ The sysctl settings are:
classic criteria is also met. To change the relationship, an
inferior can call prctl(PR_SET_PTRACER, debugger, ...) to declare
an allowed debugger PID to call PTRACE_ATTACH on the inferior.
+ Using PTRACE_TRACEME is unchanged.
2 - admin-only attach: only processes with CAP_SYS_PTRACE may use ptrace
- with PTRACE_ATTACH.
+ with PTRACE_ATTACH, or through children calling PTRACE_TRACEME.
-3 - no attach: no processes may use ptrace with PTRACE_ATTACH. Once set,
- this sysctl cannot be changed to a lower value.
+3 - no attach: no processes may use ptrace with PTRACE_ATTACH nor via
+ PTRACE_TRACEME. Once set, this sysctl value cannot be changed.
The original children-only logic was based on the restrictions in grsecurity.
diff --git a/Documentation/sysctl/vm.txt b/Documentation/sysctl/vm.txt
index dcc2a94ae34e..078701fdbd4d 100644
--- a/Documentation/sysctl/vm.txt
+++ b/Documentation/sysctl/vm.txt
@@ -76,8 +76,8 @@ huge pages although processes will also directly compact memory as required.
dirty_background_bytes
-Contains the amount of dirty memory at which the pdflush background writeback
-daemon will start writeback.
+Contains the amount of dirty memory at which the background kernel
+flusher threads will start writeback.
Note: dirty_background_bytes is the counterpart of dirty_background_ratio. Only
one of them may be specified at a time. When one sysctl is written it is
@@ -89,7 +89,7 @@ other appears as 0 when read.
dirty_background_ratio
Contains, as a percentage of total system memory, the number of pages at which
-the pdflush background writeback daemon will start writing out dirty data.
+the background kernel flusher threads will start writing out dirty data.
==============================================================
@@ -112,9 +112,9 @@ retained.
dirty_expire_centisecs
This tunable is used to define when dirty data is old enough to be eligible
-for writeout by the pdflush daemons. It is expressed in 100'ths of a second.
-Data which has been dirty in-memory for longer than this interval will be
-written out next time a pdflush daemon wakes up.
+for writeout by the kernel flusher threads. It is expressed in 100'ths
+of a second. Data which has been dirty in-memory for longer than this
+interval will be written out next time a flusher thread wakes up.
==============================================================
@@ -128,7 +128,7 @@ data.
dirty_writeback_centisecs
-The pdflush writeback daemons will periodically wake up and write `old' data
+The kernel flusher threads will periodically wake up and write `old' data
out to disk. This tunable expresses the interval between those wakeups, in
100'ths of a second.
diff --git a/MAINTAINERS b/MAINTAINERS
index b694a9d4e3ba..61ad79ea2b08 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -827,24 +827,24 @@ F: arch/arm/mach-pxa/colibri-pxa270-income.c
ARM/INTEL IOP32X ARM ARCHITECTURE
M: Lennert Buytenhek <kernel@wantstofly.org>
-M: Dan Williams <dan.j.williams@intel.com>
+M: Dan Williams <djbw@fb.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
ARM/INTEL IOP33X ARM ARCHITECTURE
-M: Dan Williams <dan.j.williams@intel.com>
+M: Dan Williams <djbw@fb.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
ARM/INTEL IOP13XX ARM ARCHITECTURE
M: Lennert Buytenhek <kernel@wantstofly.org>
-M: Dan Williams <dan.j.williams@intel.com>
+M: Dan Williams <djbw@fb.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
ARM/INTEL IQ81342EX MACHINE SUPPORT
M: Lennert Buytenhek <kernel@wantstofly.org>
-M: Dan Williams <dan.j.williams@intel.com>
+M: Dan Williams <djbw@fb.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
@@ -869,7 +869,7 @@ F: drivers/pcmcia/pxa2xx_stargate2.c
ARM/INTEL XSC3 (MANZANO) ARM CORE
M: Lennert Buytenhek <kernel@wantstofly.org>
-M: Dan Williams <dan.j.williams@intel.com>
+M: Dan Williams <djbw@fb.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
@@ -1232,9 +1232,9 @@ S: Maintained
F: drivers/hwmon/asb100.c
ASYNCHRONOUS TRANSFERS/TRANSFORMS (IOAT) API
-M: Dan Williams <dan.j.williams@intel.com>
+M: Dan Williams <djbw@fb.com>
W: http://sourceforge.net/projects/xscaleiop
-S: Supported
+S: Maintained
F: Documentation/crypto/async-tx-api.txt
F: crypto/async_tx/
F: drivers/dma/
@@ -2364,7 +2364,7 @@ T: git git://git.linaro.org/people/sumitsemwal/linux-dma-buf.git
DMA GENERIC OFFLOAD ENGINE SUBSYSTEM
M: Vinod Koul <vinod.koul@intel.com>
-M: Dan Williams <dan.j.williams@intel.com>
+M: Dan Williams <djbw@fb.com>
S: Supported
F: drivers/dma/
F: include/linux/dma*
@@ -3552,7 +3552,6 @@ K: \b(ABS|SYN)_MT_
INTEL C600 SERIES SAS CONTROLLER DRIVER
M: Intel SCU Linux support <intel-linux-scu@intel.com>
-M: Dan Williams <dan.j.williams@intel.com>
M: Dave Jiang <dave.jiang@intel.com>
M: Ed Nadolski <edmund.nadolski@intel.com>
L: linux-scsi@vger.kernel.org
@@ -3595,8 +3594,8 @@ F: arch/x86/kernel/microcode_core.c
F: arch/x86/kernel/microcode_intel.c
INTEL I/OAT DMA DRIVER
-M: Dan Williams <dan.j.williams@intel.com>
-S: Supported
+M: Dan Williams <djbw@fb.com>
+S: Maintained
F: drivers/dma/ioat*
INTEL IOMMU (VT-d)
@@ -3608,8 +3607,8 @@ F: drivers/iommu/intel-iommu.c
F: include/linux/intel-iommu.h
INTEL IOP-ADMA DMA DRIVER
-M: Dan Williams <dan.j.williams@intel.com>
-S: Maintained
+M: Dan Williams <djbw@fb.com>
+S: Odd fixes
F: drivers/dma/iop-adma.c
INTEL IXP4XX QMGR, NPE, ETHERNET and HSS SUPPORT
@@ -5334,14 +5333,15 @@ PIN CONTROL SUBSYSTEM
M: Linus Walleij <linus.walleij@linaro.org>
S: Maintained
F: drivers/pinctrl/
+F: include/linux/pinctrl/
PIN CONTROLLER - ST SPEAR
-M: Viresh Kumar <viresh.linux@gmail.com>
+M: Viresh Kumar <viresh.linux@gmail.com>
L: spear-devel@list.st.com
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
W: http://www.st.com/spear
S: Maintained
-F: driver/pinctrl/spear/
+F: drivers/pinctrl/spear/
PKTCDVD DRIVER
M: Peter Osterlund <petero2@telia.com>
diff --git a/Makefile b/Makefile
index ddf5be952e45..9cc77acfc881 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 3
PATCHLEVEL = 6
SUBLEVEL = 0
-EXTRAVERSION = -rc1
+EXTRAVERSION = -rc2
NAME = Saber-toothed Squirrel
# *DOCUMENTATION*
diff --git a/arch/arm/boot/dts/imx27.dtsi b/arch/arm/boot/dts/imx27.dtsi
index 00bae3aad5ab..5303ab680a34 100644
--- a/arch/arm/boot/dts/imx27.dtsi
+++ b/arch/arm/boot/dts/imx27.dtsi
@@ -19,6 +19,12 @@
serial3 = &uart4;
serial4 = &uart5;
serial5 = &uart6;
+ gpio0 = &gpio1;
+ gpio1 = &gpio2;
+ gpio2 = &gpio3;
+ gpio3 = &gpio4;
+ gpio4 = &gpio5;
+ gpio5 = &gpio6;
};
avic: avic-interrupt-controller@e0000000 {
diff --git a/arch/arm/boot/dts/imx51-babbage.dts b/arch/arm/boot/dts/imx51-babbage.dts
index cd86177a3ea2..59d9789e5508 100644
--- a/arch/arm/boot/dts/imx51-babbage.dts
+++ b/arch/arm/boot/dts/imx51-babbage.dts
@@ -25,8 +25,8 @@
aips@70000000 { /* aips-1 */
spba@70000000 {
esdhc@70004000 { /* ESDHC1 */
- fsl,cd-internal;
- fsl,wp-internal;
+ fsl,cd-controller;
+ fsl,wp-controller;
status = "okay";
};
diff --git a/arch/arm/boot/dts/imx51.dtsi b/arch/arm/boot/dts/imx51.dtsi
index 53cbaa3d4f90..aba28dc87fc8 100644
--- a/arch/arm/boot/dts/imx51.dtsi
+++ b/arch/arm/boot/dts/imx51.dtsi
@@ -17,6 +17,10 @@
serial0 = &uart1;
serial1 = &uart2;
serial2 = &uart3;
+ gpio0 = &gpio1;
+ gpio1 = &gpio2;
+ gpio2 = &gpio3;
+ gpio3 = &gpio4;
};
tzic: tz-interrupt-controller@e0000000 {
diff --git a/arch/arm/boot/dts/imx53.dtsi b/arch/arm/boot/dts/imx53.dtsi
index fc79cdc4b4e6..cd37165edce5 100644
--- a/arch/arm/boot/dts/imx53.dtsi
+++ b/arch/arm/boot/dts/imx53.dtsi
@@ -19,6 +19,13 @@
serial2 = &uart3;
serial3 = &uart4;
serial4 = &uart5;
+ gpio0 = &gpio1;
+ gpio1 = &gpio2;
+ gpio2 = &gpio3;
+ gpio3 = &gpio4;
+ gpio4 = &gpio5;
+ gpio5 = &gpio6;
+ gpio6 = &gpio7;
};
tzic: tz-interrupt-controller@0fffc000 {
diff --git a/arch/arm/boot/dts/imx6q.dtsi b/arch/arm/boot/dts/imx6q.dtsi
index 3d3c64b014e6..fd57079f71a9 100644
--- a/arch/arm/boot/dts/imx6q.dtsi
+++ b/arch/arm/boot/dts/imx6q.dtsi
@@ -19,6 +19,13 @@
serial2 = &uart3;
serial3 = &uart4;
serial4 = &uart5;
+ gpio0 = &gpio1;
+ gpio1 = &gpio2;
+ gpio2 = &gpio3;
+ gpio3 = &gpio4;
+ gpio4 = &gpio5;
+ gpio5 = &gpio6;
+ gpio6 = &gpio7;
};
cpus {
diff --git a/arch/arm/mach-imx/clk-imx6q.c b/arch/arm/mach-imx/clk-imx6q.c
index ea89520b6e22..4233d9e3531d 100644
--- a/arch/arm/mach-imx/clk-imx6q.c
+++ b/arch/arm/mach-imx/clk-imx6q.c
@@ -152,7 +152,7 @@ enum mx6q_clks {
ssi2, ssi3, uart_ipg, uart_serial, usboh3, usdhc1, usdhc2, usdhc3,
usdhc4, vdo_axi, vpu_axi, cko1, pll1_sys, pll2_bus, pll3_usb_otg,
pll4_audio, pll5_video, pll6_mlb, pll7_usb_host, pll8_enet, ssi1_ipg,
- ssi2_ipg, ssi3_ipg, rom, usbphy1, usbphy2,
+ ssi2_ipg, ssi3_ipg, rom, usbphy1, usbphy2, ldb_di0_div_3_5, ldb_di1_div_3_5,
clk_max
};
@@ -288,8 +288,10 @@ int __init mx6q_clocks_init(void)
clk[gpu3d_shader] = imx_clk_divider("gpu3d_shader", "gpu3d_shader_sel", base + 0x18, 29, 3);
clk[ipu1_podf] = imx_clk_divider("ipu1_podf", "ipu1_sel", base + 0x3c, 11, 3);
clk[ipu2_podf] = imx_clk_divider("ipu2_podf", "ipu2_sel", base + 0x3c, 16, 3);
- clk[ldb_di0_podf] = imx_clk_divider("ldb_di0_podf", "ldb_di0_sel", base + 0x20, 10, 1);
- clk[ldb_di1_podf] = imx_clk_divider("ldb_di1_podf", "ldb_di1_sel", base + 0x20, 11, 1);
+ clk[ldb_di0_div_3_5] = imx_clk_fixed_factor("ldb_di0_div_3_5", "ldb_di0_sel", 2, 7);
+ clk[ldb_di0_podf] = imx_clk_divider("ldb_di0_podf", "ldb_di0_div_3_5", base + 0x20, 10, 1);
+ clk[ldb_di1_div_3_5] = imx_clk_fixed_factor("ldb_di1_div_3_5", "ldb_di1_sel", 2, 7);
+ clk[ldb_di1_podf] = imx_clk_divider("ldb_di1_podf", "ldb_di1_div_3_5", base + 0x20, 11, 1);
clk[ipu1_di0_pre] = imx_clk_divider("ipu1_di0_pre", "ipu1_di0_pre_sel", base + 0x34, 3, 3);
clk[ipu1_di1_pre] = imx_clk_divider("ipu1_di1_pre", "ipu1_di1_pre_sel", base + 0x34, 12, 3);
clk[ipu2_di0_pre] = imx_clk_divider("ipu2_di0_pre", "ipu2_di0_pre_sel", base + 0x38, 3, 3);
diff --git a/arch/arm/mach-imx/hotplug.c b/arch/arm/mach-imx/hotplug.c
index 20ed2d56c1af..f8f7437c83b8 100644
--- a/arch/arm/mach-imx/hotplug.c
+++ b/arch/arm/mach-imx/hotplug.c
@@ -42,22 +42,6 @@ static inline void cpu_enter_lowpower(void)
: "cc");
}
-static inline void cpu_leave_lowpower(void)
-{
- unsigned int v;
-
- asm volatile(
- "mrc p15, 0, %0, c1, c0, 0\n"
- " orr %0, %0, %1\n"
- " mcr p15, 0, %0, c1, c0, 0\n"
- " mrc p15, 0, %0, c1, c0, 1\n"
- " orr %0, %0, %2\n"
- " mcr p15, 0, %0, c1, c0, 1\n"
- : "=&r" (v)
- : "Ir" (CR_C), "Ir" (0x40)
- : "cc");
-}
-
/*
* platform-specific code to shutdown a CPU
*
@@ -67,11 +51,10 @@ void platform_cpu_die(unsigned int cpu)
{
cpu_enter_lowpower();
imx_enable_cpu(cpu, false);
- cpu_do_idle();
- cpu_leave_lowpower();
- /* We should never return from idle */
- panic("cpu %d unexpectedly exit from shutdown\n", cpu);
+ /* spin here until hardware takes it down */
+ while (1)
+ ;
}
int platform_cpu_disable(unsigned int cpu)
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index c2cdf6500f75..4e7d1182e8a3 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -358,7 +358,7 @@ void __init dma_contiguous_remap(void)
if (end > arm_lowmem_limit)
end = arm_lowmem_limit;
if (start >= end)
- return;
+ continue;
map.pfn = __phys_to_pfn(start);
map.virtual = __phys_to_virt(start);
@@ -423,7 +423,7 @@ static void *__alloc_from_pool(size_t size, struct page **ret_page)
unsigned int pageno;
unsigned long flags;
void *ptr = NULL;
- size_t align;
+ unsigned long align_mask;
if (!pool->vaddr) {
WARN(1, "coherent pool not initialised!\n");
@@ -435,11 +435,11 @@ static void *__alloc_from_pool(size_t size, struct page **ret_page)
* small, so align them to their order in pages, minimum is a page
* size. This helps reduce fragmentation of the DMA space.
*/
- align = PAGE_SIZE << get_order(size);
+ align_mask = (1 << get_order(size)) - 1;
spin_lock_irqsave(&pool->lock, flags);
pageno = bitmap_find_next_zero_area(pool->bitmap, pool->nr_pages,
- 0, count, (1 << align) - 1);
+ 0, count, align_mask);
if (pageno < pool->nr_pages) {
bitmap_set(pool->bitmap, pageno, count);
ptr = pool->vaddr + PAGE_SIZE * pageno;
@@ -648,12 +648,12 @@ void arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
if (arch_is_coherent() || nommu()) {
__dma_free_buffer(page, size);
+ } else if (__free_from_pool(cpu_addr, size)) {
+ return;
} else if (!IS_ENABLED(CONFIG_CMA)) {
__dma_free_remap(cpu_addr, size);
__dma_free_buffer(page, size);
} else {
- if (__free_from_pool(cpu_addr, size))
- return;
/*
* Non-atomic allocations cannot be freed with IRQs disabled
*/
diff --git a/arch/blackfin/kernel/setup.c b/arch/blackfin/kernel/setup.c
index ada8f0fc71e4..fb96e607adcf 100644
--- a/arch/blackfin/kernel/setup.c
+++ b/arch/blackfin/kernel/setup.c
@@ -52,7 +52,6 @@ EXPORT_SYMBOL(reserved_mem_dcache_on);
#ifdef CONFIG_MTD_UCLINUX
extern struct map_info uclinux_ram_map;
unsigned long memory_mtd_end, memory_mtd_start, mtd_size;
-unsigned long _ebss;
EXPORT_SYMBOL(memory_mtd_end);
EXPORT_SYMBOL(memory_mtd_start);
EXPORT_SYMBOL(mtd_size);
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index 6f38b6120d96..440578850ae5 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -497,7 +497,7 @@ acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa)
srat_num_cpus++;
}
-void __init
+int __init
acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
{
unsigned long paddr, size;
@@ -512,7 +512,7 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
/* Ignore disabled entries */
if (!(ma->flags & ACPI_SRAT_MEM_ENABLED))
- return;
+ return -1;
/* record this node in proximity bitmap */
pxm_bit_set(pxm);
@@ -531,6 +531,7 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
p->size = size;
p->nid = pxm;
num_node_memblks++;
+ return 0;
}
void __init acpi_numa_arch_fixup(void)
diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig
index 0b0f8b8c4a26..4a469907f04a 100644
--- a/arch/m68k/Kconfig
+++ b/arch/m68k/Kconfig
@@ -54,18 +54,6 @@ config ZONE_DMA
bool
default y
-config CPU_HAS_NO_BITFIELDS
- bool
-
-config CPU_HAS_NO_MULDIV64
- bool
-
-config CPU_HAS_ADDRESS_SPACES
- bool
-
-config FPU
- bool
-
config HZ
int
default 1000 if CLEOPATRA
diff --git a/arch/m68k/Kconfig.cpu b/arch/m68k/Kconfig.cpu
index 43a9f8f1b8eb..82068349a2bb 100644
--- a/arch/m68k/Kconfig.cpu
+++ b/arch/m68k/Kconfig.cpu
@@ -37,6 +37,7 @@ config M68000
bool
select CPU_HAS_NO_BITFIELDS
select CPU_HAS_NO_MULDIV64
+ select CPU_HAS_NO_UNALIGNED
select GENERIC_CSUM
help
The Freescale (was Motorola) 68000 CPU is the first generation of
@@ -48,6 +49,7 @@ config M68000
config MCPU32
bool
select CPU_HAS_NO_BITFIELDS
+ select CPU_HAS_NO_UNALIGNED
help
The Freescale (was then Motorola) CPU32 is a CPU core that is
based on the 68020 processor. For the most part it is used in
@@ -376,6 +378,18 @@ config NODES_SHIFT
default "3"
depends on !SINGLE_MEMORY_CHUNK
+config CPU_HAS_NO_BITFIELDS
+ bool
+
+config CPU_HAS_NO_MULDIV64
+ bool
+
+config CPU_HAS_NO_UNALIGNED
+ bool
+
+config CPU_HAS_ADDRESS_SPACES
+ bool
+
config FPU
bool
diff --git a/arch/m68k/apollo/config.c b/arch/m68k/apollo/config.c
index 0a30406b9442..f5565d6eeb8e 100644
--- a/arch/m68k/apollo/config.c
+++ b/arch/m68k/apollo/config.c
@@ -177,8 +177,8 @@ irqreturn_t dn_timer_int(int irq, void *dev_id)
timer_handler(irq, dev_id);
- x=*(volatile unsigned char *)(timer+3);
- x=*(volatile unsigned char *)(timer+5);
+ x = *(volatile unsigned char *)(apollo_timer + 3);
+ x = *(volatile unsigned char *)(apollo_timer + 5);
return IRQ_HANDLED;
}
@@ -186,17 +186,17 @@ irqreturn_t dn_timer_int(int irq, void *dev_id)
void dn_sched_init(irq_handler_t timer_routine)
{
/* program timer 1 */
- *(volatile unsigned char *)(timer+3)=0x01;
- *(volatile unsigned char *)(timer+1)=0x40;
- *(volatile unsigned char *)(timer+5)=0x09;
- *(volatile unsigned char *)(timer+7)=0xc4;
+ *(volatile unsigned char *)(apollo_timer + 3) = 0x01;
+ *(volatile unsigned char *)(apollo_timer + 1) = 0x40;
+ *(volatile unsigned char *)(apollo_timer + 5) = 0x09;
+ *(volatile unsigned char *)(apollo_timer + 7) = 0xc4;
/* enable IRQ of PIC B */
*(volatile unsigned char *)(pica+1)&=(~8);
#if 0
- printk("*(0x10803) %02x\n",*(volatile unsigned char *)(timer+0x3));
- printk("*(0x10803) %02x\n",*(volatile unsigned char *)(timer+0x3));
+ printk("*(0x10803) %02x\n",*(volatile unsigned char *)(apollo_timer + 0x3));
+ printk("*(0x10803) %02x\n",*(volatile unsigned char *)(apollo_timer + 0x3));
#endif
if (request_irq(IRQ_APOLLO, dn_timer_int, 0, "time", timer_routine))
diff --git a/arch/m68k/include/asm/Kbuild b/arch/m68k/include/asm/Kbuild
index eafa2539a8ee..a74e5d95c384 100644
--- a/arch/m68k/include/asm/Kbuild
+++ b/arch/m68k/include/asm/Kbuild
@@ -1,4 +1,29 @@
include include/asm-generic/Kbuild.asm
header-y += cachectl.h
+generic-y += bitsperlong.h
+generic-y += cputime.h
+generic-y += device.h
+generic-y += emergency-restart.h
+generic-y += errno.h
+generic-y += futex.h
+generic-y += ioctl.h
+generic-y += ipcbuf.h
+generic-y += irq_regs.h
+generic-y += kdebug.h
+generic-y += kmap_types.h
+generic-y += kvm_para.h
+generic-y += local64.h
+generic-y += local.h
+generic-y += mman.h
+generic-y += mutex.h
+generic-y += percpu.h
+generic-y += resource.h
+generic-y += scatterlist.h
+generic-y += sections.h
+generic-y += siginfo.h
+generic-y += statfs.h
+generic-y += topology.h
+generic-y += types.h
generic-y += word-at-a-time.h
+generic-y += xor.h
diff --git a/arch/m68k/include/asm/MC68332.h b/arch/m68k/include/asm/MC68332.h
deleted file mode 100644
index 6bb8f02685a2..000000000000
--- a/arch/m68k/include/asm/MC68332.h
+++ /dev/null
@@ -1,152 +0,0 @@
-
-/* include/asm-m68knommu/MC68332.h: '332 control registers
- *
- * Copyright (C) 1998 Kenneth Albanowski <kjahds@kjahds.com>,
- *
- */
-
-#ifndef _MC68332_H_
-#define _MC68332_H_
-
-#define BYTE_REF(addr) (*((volatile unsigned char*)addr))
-#define WORD_REF(addr) (*((volatile unsigned short*)addr))
-
-#define PORTE_ADDR 0xfffa11
-#define PORTE BYTE_REF(PORTE_ADDR)
-#define DDRE_ADDR 0xfffa15
-#define DDRE BYTE_REF(DDRE_ADDR)
-#define PEPAR_ADDR 0xfffa17
-#define PEPAR BYTE_REF(PEPAR_ADDR)
-
-#define PORTF_ADDR 0xfffa19
-#define PORTF BYTE_REF(PORTF_ADDR)
-#define DDRF_ADDR 0xfffa1d
-#define DDRF BYTE_REF(DDRF_ADDR)
-#define PFPAR_ADDR 0xfffa1f
-#define PFPAR BYTE_REF(PFPAR_ADDR)
-
-#define PORTQS_ADDR 0xfffc15
-#define PORTQS BYTE_REF(PORTQS_ADDR)
-#define DDRQS_ADDR 0xfffc17
-#define DDRQS BYTE_REF(DDRQS_ADDR)
-#define PQSPAR_ADDR 0xfffc16
-#define PQSPAR BYTE_REF(PQSPAR_ADDR)
-
-#define CSPAR0_ADDR 0xFFFA44
-#define CSPAR0 WORD_REF(CSPAR0_ADDR)
-#define CSPAR1_ADDR 0xFFFA46
-#define CSPAR1 WORD_REF(CSPAR1_ADDR)
-#define CSARBT_ADDR 0xFFFA48
-#define CSARBT WORD_REF(CSARBT_ADDR)
-#define CSOPBT_ADDR 0xFFFA4A
-#define CSOPBT WORD_REF(CSOPBT_ADDR)
-#define CSBAR0_ADDR 0xFFFA4C
-#define CSBAR0 WORD_REF(CSBAR0_ADDR)
-#define CSOR0_ADDR 0xFFFA4E
-#define CSOR0 WORD_REF(CSOR0_ADDR)
-#define CSBAR1_ADDR 0xFFFA50
-#define CSBAR1 WORD_REF(CSBAR1_ADDR)
-#define CSOR1_ADDR 0xFFFA52
-#define CSOR1 WORD_REF(CSOR1_ADDR)
-#define CSBAR2_ADDR 0xFFFA54
-#define CSBAR2 WORD_REF(CSBAR2_ADDR)
-#define CSOR2_ADDR 0xFFFA56
-#define CSOR2 WORD_REF(CSOR2_ADDR)
-#define CSBAR3_ADDR 0xFFFA58
-#define CSBAR3 WORD_REF(CSBAR3_ADDR)
-#define CSOR3_ADDR 0xFFFA5A
-#define CSOR3 WORD_REF(CSOR3_ADDR)
-#define CSBAR4_ADDR 0xFFFA5C
-#define CSBAR4 WORD_REF(CSBAR4_ADDR)
-#define CSOR4_ADDR 0xFFFA5E
-#define CSOR4 WORD_REF(CSOR4_ADDR)
-#define CSBAR5_ADDR 0xFFFA60
-#define CSBAR5 WORD_REF(CSBAR5_ADDR)
-#define CSOR5_ADDR 0xFFFA62
-#define CSOR5 WORD_REF(CSOR5_ADDR)
-#define CSBAR6_ADDR 0xFFFA64
-#define CSBAR6 WORD_REF(CSBAR6_ADDR)
-#define CSOR6_ADDR 0xFFFA66
-#define CSOR6 WORD_REF(CSOR6_ADDR)
-#define CSBAR7_ADDR 0xFFFA68
-#define CSBAR7 WORD_REF(CSBAR7_ADDR)
-#define CSOR7_ADDR 0xFFFA6A
-#define CSOR7 WORD_REF(CSOR7_ADDR)
-#define CSBAR8_ADDR 0xFFFA6C
-#define CSBAR8 WORD_REF(CSBAR8_ADDR)
-#define CSOR8_ADDR 0xFFFA6E
-#define CSOR8 WORD_REF(CSOR8_ADDR)
-#define CSBAR9_ADDR 0xFFFA70
-#define CSBAR9 WORD_REF(CSBAR9_ADDR)
-#define CSOR9_ADDR 0xFFFA72
-#define CSOR9 WORD_REF(CSOR9_ADDR)
-#define CSBAR10_ADDR 0xFFFA74
-#define CSBAR10 WORD_REF(CSBAR10_ADDR)
-#define CSOR10_ADDR 0xFFFA76
-#define CSOR10 WORD_REF(CSOR10_ADDR)
-
-#define CSOR_MODE_ASYNC 0x0000
-#define CSOR_MODE_SYNC 0x8000
-#define CSOR_MODE_MASK 0x8000
-#define CSOR_BYTE_DISABLE 0x0000
-#define CSOR_BYTE_UPPER 0x4000
-#define CSOR_BYTE_LOWER 0x2000
-#define CSOR_BYTE_BOTH 0x6000
-#define CSOR_BYTE_MASK 0x6000
-#define CSOR_RW_RSVD 0x0000
-#define CSOR_RW_READ 0x0800
-#define CSOR_RW_WRITE 0x1000
-#define CSOR_RW_BOTH 0x1800
-#define CSOR_RW_MASK 0x1800
-#define CSOR_STROBE_DS 0x0400
-#define CSOR_STROBE_AS 0x0000
-#define CSOR_STROBE_MASK 0x0400
-#define CSOR_DSACK_WAIT(x) (wait << 6)
-#define CSOR_DSACK_FTERM (14 << 6)
-#define CSOR_DSACK_EXTERNAL (15 << 6)
-#define CSOR_DSACK_MASK 0x03c0
-#define CSOR_SPACE_CPU 0x0000
-#define CSOR_SPACE_USER 0x0010
-#define CSOR_SPACE_SU 0x0020
-#define CSOR_SPACE_BOTH 0x0030
-#define CSOR_SPACE_MASK 0x0030
-#define CSOR_IPL_ALL 0x0000
-#define CSOR_IPL_PRIORITY(x) (x << 1)
-#define CSOR_IPL_MASK 0x000e
-#define CSOR_AVEC_ON 0x0001
-#define CSOR_AVEC_OFF 0x0000
-#define CSOR_AVEC_MASK 0x0001
-
-#define CSBAR_ADDR(x) ((addr >> 11) << 3)
-#define CSBAR_ADDR_MASK 0xfff8
-#define CSBAR_BLKSIZE_2K 0x0000
-#define CSBAR_BLKSIZE_8K 0x0001
-#define CSBAR_BLKSIZE_16K 0x0002
-#define CSBAR_BLKSIZE_64K 0x0003
-#define CSBAR_BLKSIZE_128K 0x0004
-#define CSBAR_BLKSIZE_256K 0x0005
-#define CSBAR_BLKSIZE_512K 0x0006
-#define CSBAR_BLKSIZE_1M 0x0007
-#define CSBAR_BLKSIZE_MASK 0x0007
-
-#define CSPAR_DISC 0
-#define CSPAR_ALT 1
-#define CSPAR_CS8 2
-#define CSPAR_CS16 3
-#define CSPAR_MASK 3
-
-#define CSPAR0_CSBOOT(x) (x << 0)
-#define CSPAR0_CS0(x) (x << 2)
-#define CSPAR0_CS1(x) (x << 4)
-#define CSPAR0_CS2(x) (x << 6)
-#define CSPAR0_CS3(x) (x << 8)
-#define CSPAR0_CS4(x) (x << 10)
-#define CSPAR0_CS5(x) (x << 12)
-
-#define CSPAR1_CS6(x) (x << 0)
-#define CSPAR1_CS7(x) (x << 2)
-#define CSPAR1_CS8(x) (x << 4)
-#define CSPAR1_CS9(x) (x << 6)
-#define CSPAR1_CS10(x) (x << 8)
-
-#endif
diff --git a/arch/m68k/include/asm/apollodma.h b/arch/m68k/include/asm/apollodma.h
deleted file mode 100644
index 954adc851adb..000000000000
--- a/arch/m68k/include/asm/apollodma.h
+++ /dev/null
@@ -1,248 +0,0 @@
-/*
- * linux/include/asm/dma.h: Defines for using and allocating dma channels.
- * Written by Hennus Bergman, 1992.
- * High DMA channel support & info by Hannu Savolainen
- * and John Boyd, Nov. 1992.
- */
-
-#ifndef _ASM_APOLLO_DMA_H
-#define _ASM_APOLLO_DMA_H
-
-#include <asm/apollohw.h> /* need byte IO */
-#include <linux/spinlock.h> /* And spinlocks */
-#include <linux/delay.h>
-
-
-#define dma_outb(val,addr) (*((volatile unsigned char *)(addr+IO_BASE)) = (val))
-#define dma_inb(addr) (*((volatile unsigned char *)(addr+IO_BASE)))
-
-/*
- * NOTES about DMA transfers:
- *
- * controller 1: channels 0-3, byte operations, ports 00-1F
- * controller 2: channels 4-7, word operations, ports C0-DF
- *
- * - ALL registers are 8 bits only, regardless of transfer size
- * - channel 4 is not used - cascades 1 into 2.
- * - channels 0-3 are byte - addresses/counts are for physical bytes
- * - channels 5-7 are word - addresses/counts are for physical words
- * - transfers must not cross physical 64K (0-3) or 128K (5-7) boundaries
- * - transfer count loaded to registers is 1 less than actual count
- * - controller 2 offsets are all even (2x offsets for controller 1)
- * - page registers for 5-7 don't use data bit 0, represent 128K pages
- * - page registers for 0-3 use bit 0, represent 64K pages
- *
- * DMA transfers are limited to the lower 16MB of _physical_ memory.
- * Note that addresses loaded into registers must be _physical_ addresses,
- * not logical addresses (which may differ if paging is active).
- *
- * Address mapping for channels 0-3:
- *
- * A23 ... A16 A15 ... A8 A7 ... A0 (Physical addresses)
- * | ... | | ... | | ... |
- * | ... | | ... | | ... |
- * | ... | | ... | | ... |
- * P7 ... P0 A7 ... A0 A7 ... A0
- * | Page | Addr MSB | Addr LSB | (DMA registers)
- *
- * Address mapping for channels 5-7:
- *
- * A23 ... A17 A16 A15 ... A9 A8 A7 ... A1 A0 (Physical addresses)
- * | ... | \ \ ... \ \ \ ... \ \
- * | ... | \ \ ... \ \ \ ... \ (not used)
- * | ... | \ \ ... \ \ \ ... \
- * P7 ... P1 (0) A7 A6 ... A0 A7 A6 ... A0
- * | Page | Addr MSB | Addr LSB | (DMA registers)
- *
- * Again, channels 5-7 transfer _physical_ words (16 bits), so addresses
- * and counts _must_ be word-aligned (the lowest address bit is _ignored_ at
- * the hardware level, so odd-byte transfers aren't possible).
- *
- * Transfer count (_not # bytes_) is limited to 64K, represented as actual
- * count - 1 : 64K => 0xFFFF, 1 => 0x0000. Thus, count is always 1 or more,
- * and up to 128K bytes may be transferred on channels 5-7 in one operation.
- *
- */
-
-#define MAX_DMA_CHANNELS 8
-
-/* The maximum address that we can perform a DMA transfer to on this platform */#define MAX_DMA_ADDRESS (PAGE_OFFSET+0x1000000)
-
-/* 8237 DMA controllers */
-#define IO_DMA1_BASE 0x10C00 /* 8 bit slave DMA, channels 0..3 */
-#define IO_DMA2_BASE 0x10D00 /* 16 bit master DMA, ch 4(=slave input)..7 */
-
-/* DMA controller registers */
-#define DMA1_CMD_REG (IO_DMA1_BASE+0x08) /* command register (w) */
-#define DMA1_STAT_REG (IO_DMA1_BASE+0x08) /* status register (r) */
-#define DMA1_REQ_REG (IO_DMA1_BASE+0x09) /* request register (w) */
-#define DMA1_MASK_REG (IO_DMA1_BASE+0x0A) /* single-channel mask (w) */
-#define DMA1_MODE_REG (IO_DMA1_BASE+0x0B) /* mode register (w) */
-#define DMA1_CLEAR_FF_REG (IO_DMA1_BASE+0x0C) /* clear pointer flip-flop (w) */
-#define DMA1_TEMP_REG (IO_DMA1_BASE+0x0D) /* Temporary Register (r) */
-#define DMA1_RESET_REG (IO_DMA1_BASE+0x0D) /* Master Clear (w) */
-#define DMA1_CLR_MASK_REG (IO_DMA1_BASE+0x0E) /* Clear Mask */
-#define DMA1_MASK_ALL_REG (IO_DMA1_BASE+0x0F) /* all-channels mask (w) */
-
-#define DMA2_CMD_REG (IO_DMA2_BASE+0x10) /* command register (w) */
-#define DMA2_STAT_REG (IO_DMA2_BASE+0x10) /* status register (r) */
-#define DMA2_REQ_REG (IO_DMA2_BASE+0x12) /* request register (w) */
-#define DMA2_MASK_REG (IO_DMA2_BASE+0x14) /* single-channel mask (w) */
-#define DMA2_MODE_REG (IO_DMA2_BASE+0x16) /* mode register (w) */
-#define DMA2_CLEAR_FF_REG (IO_DMA2_BASE+0x18) /* clear pointer flip-flop (w) */
-#define DMA2_TEMP_REG (IO_DMA2_BASE+0x1A) /* Temporary Register (r) */
-#define DMA2_RESET_REG (IO_DMA2_BASE+0x1A) /* Master Clear (w) */
-#define DMA2_CLR_MASK_REG (IO_DMA2_BASE+0x1C) /* Clear Mask */
-#define DMA2_MASK_ALL_REG (IO_DMA2_BASE+0x1E) /* all-channels mask (w) */
-
-#define DMA_ADDR_0 (IO_DMA1_BASE+0x00) /* DMA address registers */
-#define DMA_ADDR_1 (IO_DMA1_BASE+0x02)
-#define DMA_ADDR_2 (IO_DMA1_BASE+0x04)
-#define DMA_ADDR_3 (IO_DMA1_BASE+0x06)
-#define DMA_ADDR_4 (IO_DMA2_BASE+0x00)
-#define DMA_ADDR_5 (IO_DMA2_BASE+0x04)
-#define DMA_ADDR_6 (IO_DMA2_BASE+0x08)
-#define DMA_ADDR_7 (IO_DMA2_BASE+0x0C)
-
-#define DMA_CNT_0 (IO_DMA1_BASE+0x01) /* DMA count registers */
-#define DMA_CNT_1 (IO_DMA1_BASE+0x03)
-#define DMA_CNT_2 (IO_DMA1_BASE+0x05)
-#define DMA_CNT_3 (IO_DMA1_BASE+0x07)
-#define DMA_CNT_4 (IO_DMA2_BASE+0x02)
-#define DMA_CNT_5 (IO_DMA2_BASE+0x06)
-#define DMA_CNT_6 (IO_DMA2_BASE+0x0A)
-#define DMA_CNT_7 (IO_DMA2_BASE+0x0E)
-
-#define DMA_MODE_READ 0x44 /* I/O to memory, no autoinit, increment, single mode */
-#define DMA_MODE_WRITE 0x48 /* memory to I/O, no autoinit, increment, single mode */
-#define DMA_MODE_CASCADE 0xC0 /* pass thru DREQ->HRQ, DACK<-HLDA only */
-
-#define DMA_AUTOINIT 0x10
-
-#define DMA_8BIT 0
-#define DMA_16BIT 1
-#define DMA_BUSMASTER 2
-
-extern spinlock_t dma_spin_lock;
-
-static __inline__ unsigned long claim_dma_lock(void)
-{
- unsigned long flags;
- spin_lock_irqsave(&dma_spin_lock, flags);
- return flags;
-}
-
-static __inline__ void release_dma_lock(unsigned long flags)
-{
- spin_unlock_irqrestore(&dma_spin_lock, flags);
-}
-
-/* enable/disable a specific DMA channel */
-static __inline__ void enable_dma(unsigned int dmanr)
-{
- if (dmanr<=3)
- dma_outb(dmanr, DMA1_MASK_REG);
- else
- dma_outb(dmanr & 3, DMA2_MASK_REG);
-}
-
-static __inline__ void disable_dma(unsigned int dmanr)
-{
- if (dmanr<=3)
- dma_outb(dmanr | 4, DMA1_MASK_REG);
- else
- dma_outb((dmanr & 3) | 4, DMA2_MASK_REG);
-}
-
-/* Clear the 'DMA Pointer Flip Flop'.
- * Write 0 for LSB/MSB, 1 for MSB/LSB access.
- * Use this once to initialize the FF to a known state.
- * After that, keep track of it. :-)
- * --- In order to do that, the DMA routines below should ---
- * --- only be used while holding the DMA lock ! ---
- */
-static __inline__ void clear_dma_ff(unsigned int dmanr)
-{
- if (dmanr<=3)
- dma_outb(0, DMA1_CLEAR_FF_REG);
- else
- dma_outb(0, DMA2_CLEAR_FF_REG);
-}
-
-/* set mode (above) for a specific DMA channel */
-static __inline__ void set_dma_mode(unsigned int dmanr, char mode)
-{
- if (dmanr<=3)
- dma_outb(mode | dmanr, DMA1_MODE_REG);
- else
- dma_outb(mode | (dmanr&3), DMA2_MODE_REG);
-}
-
-/* Set transfer address & page bits for specific DMA channel.
- * Assumes dma flipflop is clear.
- */
-static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int a)
-{
- if (dmanr <= 3) {
- dma_outb( a & 0xff, ((dmanr&3)<<1) + IO_DMA1_BASE );
- dma_outb( (a>>8) & 0xff, ((dmanr&3)<<1) + IO_DMA1_BASE );
- } else {
- dma_outb( (a>>1) & 0xff, ((dmanr&3)<<2) + IO_DMA2_BASE );
- dma_outb( (a>>9) & 0xff, ((dmanr&3)<<2) + IO_DMA2_BASE );
- }
-}
-
-
-/* Set transfer size (max 64k for DMA1..3, 128k for DMA5..7) for
- * a specific DMA channel.
- * You must ensure the parameters are valid.
- * NOTE: from a manual: "the number of transfers is one more
- * than the initial word count"! This is taken into account.
- * Assumes dma flip-flop is clear.
- * NOTE 2: "count" represents _bytes_ and must be even for channels 5-7.
- */
-static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count)
-{
- count--;
- if (dmanr <= 3) {
- dma_outb( count & 0xff, ((dmanr&3)<<1) + 1 + IO_DMA1_BASE );
- dma_outb( (count>>8) & 0xff, ((dmanr&3)<<1) + 1 + IO_DMA1_BASE );
- } else {
- dma_outb( (count>>1) & 0xff, ((dmanr&3)<<2) + 2 + IO_DMA2_BASE );
- dma_outb( (count>>9) & 0xff, ((dmanr&3)<<2) + 2 + IO_DMA2_BASE );
- }
-}
-
-
-/* Get DMA residue count. After a DMA transfer, this
- * should return zero. Reading this while a DMA transfer is
- * still in progress will return unpredictable results.
- * If called before the channel has been used, it may return 1.
- * Otherwise, it returns the number of _bytes_ left to transfer.
- *
- * Assumes DMA flip-flop is clear.
- */
-static __inline__ int get_dma_residue(unsigned int dmanr)
-{
- unsigned int io_port = (dmanr<=3)? ((dmanr&3)<<1) + 1 + IO_DMA1_BASE
- : ((dmanr&3)<<2) + 2 + IO_DMA2_BASE;
-
- /* using short to get 16-bit wrap around */
- unsigned short count;
-
- count = 1 + dma_inb(io_port);
- count += dma_inb(io_port) << 8;
-
- return (dmanr<=3)? count : (count<<1);
-}
-
-
-/* These are in kernel/dma.c: */
-extern int request_dma(unsigned int dmanr, const char * device_id); /* reserve a DMA channel */
-extern void free_dma(unsigned int dmanr); /* release it again */
-
-/* These are in arch/m68k/apollo/dma.c: */
-extern unsigned short dma_map_page(unsigned long phys_addr,int count,int type);
-extern void dma_unmap_page(unsigned short dma_addr);
-
-#endif /* _ASM_APOLLO_DMA_H */
diff --git a/arch/m68k/include/asm/apollohw.h b/arch/m68k/include/asm/apollohw.h
index a1373b9aa281..635ef4f89010 100644
--- a/arch/m68k/include/asm/apollohw.h
+++ b/arch/m68k/include/asm/apollohw.h
@@ -98,7 +98,7 @@ extern u_long timer_physaddr;
#define cpuctrl (*(volatile unsigned int *)(IO_BASE + cpuctrl_physaddr))
#define pica (IO_BASE + pica_physaddr)
#define picb (IO_BASE + picb_physaddr)
-#define timer (IO_BASE + timer_physaddr)
+#define apollo_timer (IO_BASE + timer_physaddr)
#define addr_xlat_map ((unsigned short *)(IO_BASE + 0x17000))
#define isaIO2mem(x) (((((x) & 0x3f8) << 7) | (((x) & 0xfc00) >> 6) | ((x) & 0x7)) + 0x40000 + IO_BASE)
diff --git a/arch/m68k/include/asm/bitsperlong.h b/arch/m68k/include/asm/bitsperlong.h
deleted file mode 100644
index 6dc0bb0c13b2..000000000000
--- a/arch/m68k/include/asm/bitsperlong.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/bitsperlong.h>
diff --git a/arch/m68k/include/asm/cputime.h b/arch/m68k/include/asm/cputime.h
deleted file mode 100644
index c79c5e892305..000000000000
--- a/arch/m68k/include/asm/cputime.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __M68K_CPUTIME_H
-#define __M68K_CPUTIME_H
-
-#include <asm-generic/cputime.h>
-
-#endif /* __M68K_CPUTIME_H */
diff --git a/arch/m68k/include/asm/delay.h b/arch/m68k/include/asm/delay.h
index 9c09becfd4c9..12d8fe4f1d30 100644
--- a/arch/m68k/include/asm/delay.h
+++ b/arch/m68k/include/asm/delay.h
@@ -43,7 +43,7 @@ static inline void __delay(unsigned long loops)
extern void __bad_udelay(void);
-#if defined(CONFIG_M68000) || defined(CONFIG_COLDFIRE)
+#ifdef CONFIG_CPU_HAS_NO_MULDIV64
/*
* The simpler m68k and ColdFire processors do not have a 32*32->64
* multiply instruction. So we need to handle them a little differently.
diff --git a/arch/m68k/include/asm/device.h b/arch/m68k/include/asm/device.h
deleted file mode 100644
index d8f9872b0e2d..000000000000
--- a/arch/m68k/include/asm/device.h
+++ /dev/null
@@ -1,7 +0,0 @@
-/*
- * Arch specific extensions to struct device
- *
- * This file is released under the GPLv2
- */
-#include <asm-generic/device.h>
-
diff --git a/arch/m68k/include/asm/emergency-restart.h b/arch/m68k/include/asm/emergency-restart.h
deleted file mode 100644
index 108d8c48e42e..000000000000
--- a/arch/m68k/include/asm/emergency-restart.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _ASM_EMERGENCY_RESTART_H
-#define _ASM_EMERGENCY_RESTART_H
-
-#include <asm-generic/emergency-restart.h>
-
-#endif /* _ASM_EMERGENCY_RESTART_H */
diff --git a/arch/m68k/include/asm/errno.h b/arch/m68k/include/asm/errno.h
deleted file mode 100644
index 0d4e188d6ef6..000000000000
--- a/arch/m68k/include/asm/errno.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _M68K_ERRNO_H
-#define _M68K_ERRNO_H
-
-#include <asm-generic/errno.h>
-
-#endif /* _M68K_ERRNO_H */
diff --git a/arch/m68k/include/asm/futex.h b/arch/m68k/include/asm/futex.h
deleted file mode 100644
index 6a332a9f099c..000000000000
--- a/arch/m68k/include/asm/futex.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _ASM_FUTEX_H
-#define _ASM_FUTEX_H
-
-#include <asm-generic/futex.h>
-
-#endif
diff --git a/arch/m68k/include/asm/ioctl.h b/arch/m68k/include/asm/ioctl.h
deleted file mode 100644
index b279fe06dfe5..000000000000
--- a/arch/m68k/include/asm/ioctl.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/ioctl.h>
diff --git a/arch/m68k/include/asm/ipcbuf.h b/arch/m68k/include/asm/ipcbuf.h
deleted file mode 100644
index 84c7e51cb6d0..000000000000
--- a/arch/m68k/include/asm/ipcbuf.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/ipcbuf.h>
diff --git a/arch/m68k/include/asm/irq_regs.h b/arch/m68k/include/asm/irq_regs.h
deleted file mode 100644
index 3dd9c0b70270..000000000000
--- a/arch/m68k/include/asm/irq_regs.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/irq_regs.h>
diff --git a/arch/m68k/include/asm/kdebug.h b/arch/m68k/include/asm/kdebug.h
deleted file mode 100644
index 6ece1b037665..000000000000
--- a/arch/m68k/include/asm/kdebug.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/kdebug.h>
diff --git a/arch/m68k/include/asm/kmap_types.h b/arch/m68k/include/asm/kmap_types.h
deleted file mode 100644
index 3413cc1390ec..000000000000
--- a/arch/m68k/include/asm/kmap_types.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __ASM_M68K_KMAP_TYPES_H
-#define __ASM_M68K_KMAP_TYPES_H
-
-#include <asm-generic/kmap_types.h>
-
-#endif /* __ASM_M68K_KMAP_TYPES_H */
diff --git a/arch/m68k/include/asm/kvm_para.h b/arch/m68k/include/asm/kvm_para.h
deleted file mode 100644
index 14fab8f0b957..000000000000
--- a/arch/m68k/include/asm/kvm_para.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/kvm_para.h>
diff --git a/arch/m68k/include/asm/local.h b/arch/m68k/include/asm/local.h
deleted file mode 100644
index 6c259263e1f0..000000000000
--- a/arch/m68k/include/asm/local.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _ASM_M68K_LOCAL_H
-#define _ASM_M68K_LOCAL_H
-
-#include <asm-generic/local.h>
-
-#endif /* _ASM_M68K_LOCAL_H */
diff --git a/arch/m68k/include/asm/local64.h b/arch/m68k/include/asm/local64.h
deleted file mode 100644
index 36c93b5cc239..000000000000
--- a/arch/m68k/include/asm/local64.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/local64.h>
diff --git a/arch/m68k/include/asm/mac_mouse.h b/arch/m68k/include/asm/mac_mouse.h
deleted file mode 100644
index 39a5c292eaee..000000000000
--- a/arch/m68k/include/asm/mac_mouse.h
+++ /dev/null
@@ -1,23 +0,0 @@
-#ifndef _ASM_MAC_MOUSE_H
-#define _ASM_MAC_MOUSE_H
-
-/*
- * linux/include/asm-m68k/mac_mouse.h
- * header file for Macintosh ADB mouse driver
- * 27-10-97 Michael Schmitz
- * copied from:
- * header file for Atari Mouse driver
- * by Robert de Vries (robert@and.nl) on 19Jul93
- */
-
-struct mouse_status {
- char buttons;
- short dx;
- short dy;
- int ready;
- int active;
- wait_queue_head_t wait;
- struct fasync_struct *fasyncptr;
-};
-
-#endif
diff --git a/arch/m68k/include/asm/mcfmbus.h b/arch/m68k/include/asm/mcfmbus.h
deleted file mode 100644
index 319899c47a2c..000000000000
--- a/arch/m68k/include/asm/mcfmbus.h
+++ /dev/null
@@ -1,77 +0,0 @@
-/****************************************************************************/
-
-/*
- * mcfmbus.h -- Coldfire MBUS support defines.
- *
- * (C) Copyright 1999, Martin Floeer (mfloeer@axcent.de)
- */
-
-/****************************************************************************/
-
-
-#ifndef mcfmbus_h
-#define mcfmbus_h
-
-
-#define MCFMBUS_BASE 0x280
-#define MCFMBUS_IRQ_VECTOR 0x19
-#define MCFMBUS_IRQ 0x1
-#define MCFMBUS_CLK 0x3f
-#define MCFMBUS_IRQ_LEVEL 0x07 /*IRQ Level 1*/
-#define MCFMBUS_ADDRESS 0x01
-
-
-/*
-* Define the 5307 MBUS register set addresses
-*/
-
-#define MCFMBUS_MADR 0x00
-#define MCFMBUS_MFDR 0x04
-#define MCFMBUS_MBCR 0x08
-#define MCFMBUS_MBSR 0x0C
-#define MCFMBUS_MBDR 0x10
-
-
-#define MCFMBUS_MADR_ADDR(a) (((a)&0x7F)<<0x01) /*Slave Address*/
-
-#define MCFMBUS_MFDR_MBC(a) ((a)&0x3F) /*M-Bus Clock*/
-
-/*
-* Define bit flags in Control Register
-*/
-
-#define MCFMBUS_MBCR_MEN (0x80) /* M-Bus Enable */
-#define MCFMBUS_MBCR_MIEN (0x40) /* M-Bus Interrupt Enable */
-#define MCFMBUS_MBCR_MSTA (0x20) /* Master/Slave Mode Select Bit */
-#define MCFMBUS_MBCR_MTX (0x10) /* Transmit/Rcv Mode Select Bit */
-#define MCFMBUS_MBCR_TXAK (0x08) /* Transmit Acknowledge Enable */
-#define MCFMBUS_MBCR_RSTA (0x04) /* Repeat Start */
-
-/*
-* Define bit flags in Status Register
-*/
-
-#define MCFMBUS_MBSR_MCF (0x80) /* Data Transfer Complete */
-#define MCFMBUS_MBSR_MAAS (0x40) /* Addressed as a Slave */
-#define MCFMBUS_MBSR_MBB (0x20) /* Bus Busy */
-#define MCFMBUS_MBSR_MAL (0x10) /* Arbitration Lost */
-#define MCFMBUS_MBSR_SRW (0x04) /* Slave Transmit */
-#define MCFMBUS_MBSR_MIF (0x02) /* M-Bus Interrupt */
-#define MCFMBUS_MBSR_RXAK (0x01) /* No Acknowledge Received */
-
-/*
-* Define bit flags in DATA I/O Register
-*/
-
-#define MCFMBUS_MBDR_READ (0x01) /* 1=read 0=write MBUS */
-
-#define MBUSIOCSCLOCK 1
-#define MBUSIOCGCLOCK 2
-#define MBUSIOCSADDR 3
-#define MBUSIOCGADDR 4
-#define MBUSIOCSSLADDR 5
-#define MBUSIOCGSLADDR 6
-#define MBUSIOCSSUBADDR 7
-#define MBUSIOCGSUBADDR 8
-
-#endif
diff --git a/arch/m68k/include/asm/mman.h b/arch/m68k/include/asm/mman.h
deleted file mode 100644
index 8eebf89f5ab1..000000000000
--- a/arch/m68k/include/asm/mman.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/mman.h>
diff --git a/arch/m68k/include/asm/mutex.h b/arch/m68k/include/asm/mutex.h
deleted file mode 100644
index 458c1f7fbc18..000000000000
--- a/arch/m68k/include/asm/mutex.h
+++ /dev/null
@@ -1,9 +0,0 @@
-/*
- * Pull in the generic implementation for the mutex fastpath.
- *
- * TODO: implement optimized primitives instead, or leave the generic
- * implementation in place, or pick the atomic_xchg() based generic
- * implementation. (see asm-generic/mutex-xchg.h for details)
- */
-
-#include <asm-generic/mutex-dec.h>
diff --git a/arch/m68k/include/asm/percpu.h b/arch/m68k/include/asm/percpu.h
deleted file mode 100644
index 0859d048faf5..000000000000
--- a/arch/m68k/include/asm/percpu.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __ASM_M68K_PERCPU_H
-#define __ASM_M68K_PERCPU_H
-
-#include <asm-generic/percpu.h>
-
-#endif /* __ASM_M68K_PERCPU_H */
diff --git a/arch/m68k/include/asm/resource.h b/arch/m68k/include/asm/resource.h
deleted file mode 100644
index e7d35019f337..000000000000
--- a/arch/m68k/include/asm/resource.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _M68K_RESOURCE_H
-#define _M68K_RESOURCE_H
-
-#include <asm-generic/resource.h>
-
-#endif /* _M68K_RESOURCE_H */
diff --git a/arch/m68k/include/asm/sbus.h b/arch/m68k/include/asm/sbus.h
deleted file mode 100644
index bfe3ba147f2e..000000000000
--- a/arch/m68k/include/asm/sbus.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * some sbus structures and macros to make usage of sbus drivers possible
- */
-
-#ifndef __M68K_SBUS_H
-#define __M68K_SBUS_H
-
-struct sbus_dev {
- struct {
- unsigned int which_io;
- unsigned int phys_addr;
- } reg_addrs[1];
-};
-
-/* sbus IO functions stolen from include/asm-sparc/io.h for the serial driver */
-/* No SBUS on the Sun3, kludge -- sam */
-
-static inline void _sbus_writeb(unsigned char val, unsigned long addr)
-{
- *(volatile unsigned char *)addr = val;
-}
-
-static inline unsigned char _sbus_readb(unsigned long addr)
-{
- return *(volatile unsigned char *)addr;
-}
-
-static inline void _sbus_writel(unsigned long val, unsigned long addr)
-{
- *(volatile unsigned long *)addr = val;
-
-}
-
-extern inline unsigned long _sbus_readl(unsigned long addr)
-{
- return *(volatile unsigned long *)addr;
-}
-
-
-#define sbus_readb(a) _sbus_readb((unsigned long)a)
-#define sbus_writeb(v, a) _sbus_writeb(v, (unsigned long)a)
-#define sbus_readl(a) _sbus_readl((unsigned long)a)
-#define sbus_writel(v, a) _sbus_writel(v, (unsigned long)a)
-
-#endif
diff --git a/arch/m68k/include/asm/scatterlist.h b/arch/m68k/include/asm/scatterlist.h
deleted file mode 100644
index 312505452a1e..000000000000
--- a/arch/m68k/include/asm/scatterlist.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _M68K_SCATTERLIST_H
-#define _M68K_SCATTERLIST_H
-
-#include <asm-generic/scatterlist.h>
-
-#endif /* !(_M68K_SCATTERLIST_H) */
diff --git a/arch/m68k/include/asm/sections.h b/arch/m68k/include/asm/sections.h
deleted file mode 100644
index 5277e52715ec..000000000000
--- a/arch/m68k/include/asm/sections.h
+++ /dev/null
@@ -1,8 +0,0 @@
-#ifndef _ASM_M68K_SECTIONS_H
-#define _ASM_M68K_SECTIONS_H
-
-#include <asm-generic/sections.h>
-
-extern char _sbss[], _ebss[];
-
-#endif /* _ASM_M68K_SECTIONS_H */
diff --git a/arch/m68k/include/asm/shm.h b/arch/m68k/include/asm/shm.h
deleted file mode 100644
index fa56ec84a126..000000000000
--- a/arch/m68k/include/asm/shm.h
+++ /dev/null
@@ -1,31 +0,0 @@
-#ifndef _M68K_SHM_H
-#define _M68K_SHM_H
-
-
-/* format of page table entries that correspond to shared memory pages
- currently out in swap space (see also mm/swap.c):
- bits 0-1 (PAGE_PRESENT) is = 0
- bits 8..2 (SWP_TYPE) are = SHM_SWP_TYPE
- bits 31..9 are used like this:
- bits 15..9 (SHM_ID) the id of the shared memory segment
- bits 30..16 (SHM_IDX) the index of the page within the shared memory segment
- (actually only bits 25..16 get used since SHMMAX is so low)
- bit 31 (SHM_READ_ONLY) flag whether the page belongs to a read-only attach
-*/
-/* on the m68k both bits 0 and 1 must be zero */
-/* format on the sun3 is similar, but bits 30, 31 are set to zero and all
- others are reduced by 2. --m */
-
-#ifndef CONFIG_SUN3
-#define SHM_ID_SHIFT 9
-#else
-#define SHM_ID_SHIFT 7
-#endif
-#define _SHM_ID_BITS 7
-#define SHM_ID_MASK ((1<<_SHM_ID_BITS)-1)
-
-#define SHM_IDX_SHIFT (SHM_ID_SHIFT+_SHM_ID_BITS)
-#define _SHM_IDX_BITS 15
-#define SHM_IDX_MASK ((1<<_SHM_IDX_BITS)-1)
-
-#endif /* _M68K_SHM_H */
diff --git a/arch/m68k/include/asm/siginfo.h b/arch/m68k/include/asm/siginfo.h
deleted file mode 100644
index 851d3d784b53..000000000000
--- a/arch/m68k/include/asm/siginfo.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _M68K_SIGINFO_H
-#define _M68K_SIGINFO_H
-
-#include <asm-generic/siginfo.h>
-
-#endif
diff --git a/arch/m68k/include/asm/statfs.h b/arch/m68k/include/asm/statfs.h
deleted file mode 100644
index 08d93f14e061..000000000000
--- a/arch/m68k/include/asm/statfs.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _M68K_STATFS_H
-#define _M68K_STATFS_H
-
-#include <asm-generic/statfs.h>
-
-#endif /* _M68K_STATFS_H */
diff --git a/arch/m68k/include/asm/topology.h b/arch/m68k/include/asm/topology.h
deleted file mode 100644
index ca173e9f26ff..000000000000
--- a/arch/m68k/include/asm/topology.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _ASM_M68K_TOPOLOGY_H
-#define _ASM_M68K_TOPOLOGY_H
-
-#include <asm-generic/topology.h>
-
-#endif /* _ASM_M68K_TOPOLOGY_H */
diff --git a/arch/m68k/include/asm/types.h b/arch/m68k/include/asm/types.h
deleted file mode 100644
index 89705adcbd52..000000000000
--- a/arch/m68k/include/asm/types.h
+++ /dev/null
@@ -1,22 +0,0 @@
-#ifndef _M68K_TYPES_H
-#define _M68K_TYPES_H
-
-/*
- * This file is never included by application software unless
- * explicitly requested (e.g., via linux/types.h) in which case the
- * application is Linux specific so (user-) name space pollution is
- * not a major issue. However, for interoperability, libraries still
- * need to be careful to avoid a name clashes.
- */
-#include <asm-generic/int-ll64.h>
-
-/*
- * These aren't exported outside the kernel to avoid name space clashes
- */
-#ifdef __KERNEL__
-
-#define BITS_PER_LONG 32
-
-#endif /* __KERNEL__ */
-
-#endif /* _M68K_TYPES_H */
diff --git a/arch/m68k/include/asm/unaligned.h b/arch/m68k/include/asm/unaligned.h
index f4043ae63db1..2b3ca0bf7a0d 100644
--- a/arch/m68k/include/asm/unaligned.h
+++ b/arch/m68k/include/asm/unaligned.h
@@ -2,7 +2,7 @@
#define _ASM_M68K_UNALIGNED_H
-#if defined(CONFIG_COLDFIRE) || defined(CONFIG_M68000)
+#ifdef CONFIG_CPU_HAS_NO_UNALIGNED
#include <linux/unaligned/be_struct.h>
#include <linux/unaligned/le_byteshift.h>
#include <linux/unaligned/generic.h>
@@ -12,7 +12,7 @@
#else
/*
- * The m68k can do unaligned accesses itself.
+ * The m68k can do unaligned accesses itself.
*/
#include <linux/unaligned/access_ok.h>
#include <linux/unaligned/generic.h>
diff --git a/arch/m68k/include/asm/xor.h b/arch/m68k/include/asm/xor.h
deleted file mode 100644
index c82eb12a5b18..000000000000
--- a/arch/m68k/include/asm/xor.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/xor.h>
diff --git a/arch/m68k/kernel/setup_no.c b/arch/m68k/kernel/setup_no.c
index 7dc186b7a85f..71fb29938dba 100644
--- a/arch/m68k/kernel/setup_no.c
+++ b/arch/m68k/kernel/setup_no.c
@@ -218,13 +218,10 @@ void __init setup_arch(char **cmdline_p)
printk(KERN_INFO "Motorola M5235EVB support (C)2005 Syn-tech Systems, Inc. (Jate Sujjavanich)\n");
#endif
- pr_debug("KERNEL -> TEXT=0x%06x-0x%06x DATA=0x%06x-0x%06x "
- "BSS=0x%06x-0x%06x\n", (int) &_stext, (int) &_etext,
- (int) &_sdata, (int) &_edata,
- (int) &_sbss, (int) &_ebss);
- pr_debug("MEMORY -> ROMFS=0x%06x-0x%06x MEM=0x%06x-0x%06x\n ",
- (int) &_ebss, (int) memory_start,
- (int) memory_start, (int) memory_end);
+ pr_debug("KERNEL -> TEXT=0x%p-0x%p DATA=0x%p-0x%p BSS=0x%p-0x%p\n",
+ _stext, _etext, _sdata, _edata, __bss_start, __bss_stop);
+ pr_debug("MEMORY -> ROMFS=0x%p-0x%06lx MEM=0x%06lx-0x%06lx\n ",
+ __bss_stop, memory_start, memory_start, memory_end);
/* Keep a copy of command line */
*cmdline_p = &command_line[0];
diff --git a/arch/m68k/kernel/sys_m68k.c b/arch/m68k/kernel/sys_m68k.c
index 8623f8dc16f8..9a5932ec3689 100644
--- a/arch/m68k/kernel/sys_m68k.c
+++ b/arch/m68k/kernel/sys_m68k.c
@@ -479,9 +479,13 @@ sys_atomic_cmpxchg_32(unsigned long newval, int oldval, int d3, int d4, int d5,
goto bad_access;
}
- mem_value = *mem;
+ /*
+ * No need to check for EFAULT; we know that the page is
+ * present and writable.
+ */
+ __get_user(mem_value, mem);
if (mem_value == oldval)
- *mem = newval;
+ __put_user(newval, mem);
pte_unmap_unlock(pte, ptl);
up_read(&mm->mmap_sem);
diff --git a/arch/m68k/kernel/vmlinux-nommu.lds b/arch/m68k/kernel/vmlinux-nommu.lds
index 40e02d9c38b4..06a763f49fd3 100644
--- a/arch/m68k/kernel/vmlinux-nommu.lds
+++ b/arch/m68k/kernel/vmlinux-nommu.lds
@@ -78,9 +78,7 @@ SECTIONS {
__init_end = .;
}
- _sbss = .;
BSS_SECTION(0, 0, 0)
- _ebss = .;
_end = .;
diff --git a/arch/m68k/kernel/vmlinux-std.lds b/arch/m68k/kernel/vmlinux-std.lds
index 63407c836826..d0993594f558 100644
--- a/arch/m68k/kernel/vmlinux-std.lds
+++ b/arch/m68k/kernel/vmlinux-std.lds
@@ -31,9 +31,7 @@ SECTIONS
RW_DATA_SECTION(16, PAGE_SIZE, THREAD_SIZE)
- _sbss = .;
BSS_SECTION(0, 0, 0)
- _ebss = .;
_edata = .; /* End of data section */
diff --git a/arch/m68k/kernel/vmlinux-sun3.lds b/arch/m68k/kernel/vmlinux-sun3.lds
index ad0f46d64c0b..8080469ee6c1 100644
--- a/arch/m68k/kernel/vmlinux-sun3.lds
+++ b/arch/m68k/kernel/vmlinux-sun3.lds
@@ -44,9 +44,7 @@ __init_begin = .;
. = ALIGN(PAGE_SIZE);
__init_end = .;
- _sbss = .;
BSS_SECTION(0, 0, 0)
- _ebss = .;
_end = . ;
diff --git a/arch/m68k/lib/muldi3.c b/arch/m68k/lib/muldi3.c
index 79e928a525d0..ee5f0b1b5c5d 100644
--- a/arch/m68k/lib/muldi3.c
+++ b/arch/m68k/lib/muldi3.c
@@ -19,7 +19,7 @@ along with GNU CC; see the file COPYING. If not, write to
the Free Software Foundation, 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA. */
-#if defined(CONFIG_M68000) || defined(CONFIG_COLDFIRE)
+#ifdef CONFIG_CPU_HAS_NO_MULDIV64
#define SI_TYPE_SIZE 32
#define __BITS4 (SI_TYPE_SIZE / 4)
diff --git a/arch/m68k/mm/init_mm.c b/arch/m68k/mm/init_mm.c
index f77f258dce3a..282f9de68966 100644
--- a/arch/m68k/mm/init_mm.c
+++ b/arch/m68k/mm/init_mm.c
@@ -104,7 +104,7 @@ void __init print_memmap(void)
MLK_ROUNDUP(__init_begin, __init_end),
MLK_ROUNDUP(_stext, _etext),
MLK_ROUNDUP(_sdata, _edata),
- MLK_ROUNDUP(_sbss, _ebss));
+ MLK_ROUNDUP(__bss_start, __bss_stop));
}
void __init mem_init(void)
diff --git a/arch/m68k/mm/init_no.c b/arch/m68k/mm/init_no.c
index 345ec0d83e3d..688e3664aea0 100644
--- a/arch/m68k/mm/init_no.c
+++ b/arch/m68k/mm/init_no.c
@@ -91,7 +91,7 @@ void __init mem_init(void)
totalram_pages = free_all_bootmem();
codek = (_etext - _stext) >> 10;
- datak = (_ebss - _sdata) >> 10;
+ datak = (__bss_stop - _sdata) >> 10;
initk = (__init_begin - __init_end) >> 10;
tmp = nr_free_pages() << PAGE_SHIFT;
diff --git a/arch/m68k/platform/68328/head-de2.S b/arch/m68k/platform/68328/head-de2.S
index f632fdcb93e9..537d3245b539 100644
--- a/arch/m68k/platform/68328/head-de2.S
+++ b/arch/m68k/platform/68328/head-de2.S
@@ -60,8 +60,8 @@ _start:
* Move ROM filesystem above bss :-)
*/
- moveal #_sbss, %a0 /* romfs at the start of bss */
- moveal #_ebss, %a1 /* Set up destination */
+ moveal #__bss_start, %a0 /* romfs at the start of bss */
+ moveal #__bss_stop, %a1 /* Set up destination */
movel %a0, %a2 /* Copy of bss start */
movel 8(%a0), %d1 /* Get size of ROMFS */
@@ -84,8 +84,8 @@ _start:
* Initialize BSS segment to 0
*/
- lea _sbss, %a0
- lea _ebss, %a1
+ lea __bss_start, %a0
+ lea __bss_stop, %a1
/* Copy 0 to %a0 until %a0 == %a1 */
2: cmpal %a0, %a1
diff --git a/arch/m68k/platform/68328/head-pilot.S b/arch/m68k/platform/68328/head-pilot.S
index 2ebfd6420818..45a9dad29e3d 100644
--- a/arch/m68k/platform/68328/head-pilot.S
+++ b/arch/m68k/platform/68328/head-pilot.S
@@ -110,7 +110,7 @@ L0:
movel #CONFIG_VECTORBASE, %d7
addl #16, %d7
moveal %d7, %a0
- moveal #_ebss, %a1
+ moveal #__bss_stop, %a1
lea %a1@(512), %a2
DBG_PUTC('C')
@@ -138,8 +138,8 @@ LD1:
DBG_PUTC('E')
- moveal #_sbss, %a0
- moveal #_ebss, %a1
+ moveal #__bss_start, %a0
+ moveal #__bss_stop, %a1
/* Copy 0 to %a0 until %a0 == %a1 */
L1:
@@ -150,7 +150,7 @@ L1:
DBG_PUTC('F')
/* Copy command line from end of bss to command line */
- moveal #_ebss, %a0
+ moveal #__bss_stop, %a0
moveal #command_line, %a1
lea %a1@(512), %a2
@@ -165,7 +165,7 @@ L3:
movel #_sdata, %d0
movel %d0, _rambase
- movel #_ebss, %d0
+ movel #__bss_stop, %d0
movel %d0, _ramstart
movel %a4, %d0
diff --git a/arch/m68k/platform/68328/head-ram.S b/arch/m68k/platform/68328/head-ram.S
index 7f1aeeacb219..5189ef926098 100644
--- a/arch/m68k/platform/68328/head-ram.S
+++ b/arch/m68k/platform/68328/head-ram.S
@@ -76,8 +76,8 @@ pclp3:
beq pclp3
#endif /* DEBUG */
moveal #0x007ffff0, %ssp
- moveal #_sbss, %a0
- moveal #_ebss, %a1
+ moveal #__bss_start, %a0
+ moveal #__bss_stop, %a1
/* Copy 0 to %a0 until %a0 >= %a1 */
L1:
diff --git a/arch/m68k/platform/68328/head-rom.S b/arch/m68k/platform/68328/head-rom.S
index a5ff96d0295f..3dff98ba2e97 100644
--- a/arch/m68k/platform/68328/head-rom.S
+++ b/arch/m68k/platform/68328/head-rom.S
@@ -59,8 +59,8 @@ _stext: movew #0x2700,%sr
cmpal %a1, %a2
bhi 1b
- moveal #_sbss, %a0
- moveal #_ebss, %a1
+ moveal #__bss_start, %a0
+ moveal #__bss_stop, %a1
/* Copy 0 to %a0 until %a0 == %a1 */
1:
@@ -70,7 +70,7 @@ _stext: movew #0x2700,%sr
movel #_sdata, %d0
movel %d0, _rambase
- movel #_ebss, %d0
+ movel #__bss_stop, %d0
movel %d0, _ramstart
movel #RAMEND-CONFIG_MEMORY_RESERVE*0x100000, %d0
movel %d0, _ramend
diff --git a/arch/m68k/platform/68360/head-ram.S b/arch/m68k/platform/68360/head-ram.S
index 8eb94fb6b971..acd213170d80 100644
--- a/arch/m68k/platform/68360/head-ram.S
+++ b/arch/m68k/platform/68360/head-ram.S
@@ -219,8 +219,8 @@ LD1:
cmp.l #_edata, %a1
blt LD1
- moveal #_sbss, %a0
- moveal #_ebss, %a1
+ moveal #__bss_start, %a0
+ moveal #__bss_stop, %a1
/* Copy 0 to %a0 until %a0 == %a1 */
L1:
@@ -234,7 +234,7 @@ load_quicc:
store_ram_size:
/* Set ram size information */
move.l #_sdata, _rambase
- move.l #_ebss, _ramstart
+ move.l #__bss_stop, _ramstart
move.l #RAMEND, %d0
sub.l #0x1000, %d0 /* Reserve 4K for stack space.*/
move.l %d0, _ramend /* Different from RAMEND.*/
diff --git a/arch/m68k/platform/68360/head-rom.S b/arch/m68k/platform/68360/head-rom.S
index 97510e55b802..dfc756d99886 100644
--- a/arch/m68k/platform/68360/head-rom.S
+++ b/arch/m68k/platform/68360/head-rom.S
@@ -13,7 +13,7 @@
*/
.global _stext
-.global _sbss
+.global __bss_start
.global _start
.global _rambase
@@ -229,8 +229,8 @@ LD1:
cmp.l #_edata, %a1
blt LD1
- moveal #_sbss, %a0
- moveal #_ebss, %a1
+ moveal #__bss_start, %a0
+ moveal #__bss_stop, %a1
/* Copy 0 to %a0 until %a0 == %a1 */
L1:
@@ -244,7 +244,7 @@ load_quicc:
store_ram_size:
/* Set ram size information */
move.l #_sdata, _rambase
- move.l #_ebss, _ramstart
+ move.l #__bss_stop, _ramstart
move.l #RAMEND, %d0
sub.l #0x1000, %d0 /* Reserve 4K for stack space.*/
move.l %d0, _ramend /* Different from RAMEND.*/
diff --git a/arch/m68k/platform/coldfire/head.S b/arch/m68k/platform/coldfire/head.S
index 4e0c9eb3bd1f..b88f5716f357 100644
--- a/arch/m68k/platform/coldfire/head.S
+++ b/arch/m68k/platform/coldfire/head.S
@@ -230,8 +230,8 @@ _vstart:
/*
* Move ROM filesystem above bss :-)
*/
- lea _sbss,%a0 /* get start of bss */
- lea _ebss,%a1 /* set up destination */
+ lea __bss_start,%a0 /* get start of bss */
+ lea __bss_stop,%a1 /* set up destination */
movel %a0,%a2 /* copy of bss start */
movel 8(%a0),%d0 /* get size of ROMFS */
@@ -249,7 +249,7 @@ _copy_romfs:
bne _copy_romfs
#else /* CONFIG_ROMFS_FS */
- lea _ebss,%a1
+ lea __bss_stop,%a1
movel %a1,_ramstart
#endif /* CONFIG_ROMFS_FS */
@@ -257,8 +257,8 @@ _copy_romfs:
/*
* Zero out the bss region.
*/
- lea _sbss,%a0 /* get start of bss */
- lea _ebss,%a1 /* get end of bss */
+ lea __bss_start,%a0 /* get start of bss */
+ lea __bss_stop,%a1 /* get end of bss */
clrl %d0 /* set value */
_clear_bss:
movel %d0,(%a0)+ /* clear each word */
diff --git a/arch/m68k/sun3/prom/init.c b/arch/m68k/sun3/prom/init.c
index d8e6349336b4..eeba067d565f 100644
--- a/arch/m68k/sun3/prom/init.c
+++ b/arch/m68k/sun3/prom/init.c
@@ -22,57 +22,13 @@ int prom_root_node;
struct linux_nodeops *prom_nodeops;
/* You must call prom_init() before you attempt to use any of the
- * routines in the prom library. It returns 0 on success, 1 on
- * failure. It gets passed the pointer to the PROM vector.
+ * routines in the prom library.
+ * It gets passed the pointer to the PROM vector.
*/
-extern void prom_meminit(void);
-extern void prom_ranges_init(void);
-
void __init prom_init(struct linux_romvec *rp)
{
romvec = rp;
-#ifndef CONFIG_SUN3
- switch(romvec->pv_romvers) {
- case 0:
- prom_vers = PROM_V0;
- break;
- case 2:
- prom_vers = PROM_V2;
- break;
- case 3:
- prom_vers = PROM_V3;
- break;
- case 4:
- prom_vers = PROM_P1275;
- prom_printf("PROMLIB: Sun IEEE Prom not supported yet\n");
- prom_halt();
- break;
- default:
- prom_printf("PROMLIB: Bad PROM version %d\n",
- romvec->pv_romvers);
- prom_halt();
- break;
- };
-
- prom_rev = romvec->pv_plugin_revision;
- prom_prev = romvec->pv_printrev;
- prom_nodeops = romvec->pv_nodeops;
-
- prom_root_node = prom_getsibling(0);
- if((prom_root_node == 0) || (prom_root_node == -1))
- prom_halt();
-
- if((((unsigned long) prom_nodeops) == 0) ||
- (((unsigned long) prom_nodeops) == -1))
- prom_halt();
-
- prom_meminit();
-
- prom_ranges_init();
-#endif
-// printk("PROMLIB: Sun Boot Prom Version %d Revision %d\n",
-// romvec->pv_romvers, prom_rev);
/* Initialization successful. */
return;
diff --git a/arch/microblaze/include/asm/sections.h b/arch/microblaze/include/asm/sections.h
index 4487e150b455..c07ed5d2a820 100644
--- a/arch/microblaze/include/asm/sections.h
+++ b/arch/microblaze/include/asm/sections.h
@@ -18,10 +18,6 @@ extern char _ssbss[], _esbss[];
extern unsigned long __ivt_start[], __ivt_end[];
extern char _etext[], _stext[];
-# ifdef CONFIG_MTD_UCLINUX
-extern char *_ebss;
-# endif
-
extern u32 _fdt_start[], _fdt_end[];
# endif /* !__ASSEMBLY__ */
diff --git a/arch/microblaze/kernel/microblaze_ksyms.c b/arch/microblaze/kernel/microblaze_ksyms.c
index bb4907c828dc..2b25bcf05c00 100644
--- a/arch/microblaze/kernel/microblaze_ksyms.c
+++ b/arch/microblaze/kernel/microblaze_ksyms.c
@@ -21,9 +21,6 @@
#include <linux/ftrace.h>
#include <linux/uaccess.h>
-extern char *_ebss;
-EXPORT_SYMBOL_GPL(_ebss);
-
#ifdef CONFIG_FUNCTION_TRACER
extern void _mcount(void);
EXPORT_SYMBOL(_mcount);
diff --git a/arch/microblaze/kernel/setup.c b/arch/microblaze/kernel/setup.c
index 16d8dfd9094b..4da971d4392f 100644
--- a/arch/microblaze/kernel/setup.c
+++ b/arch/microblaze/kernel/setup.c
@@ -121,7 +121,7 @@ void __init machine_early_init(const char *cmdline, unsigned int ram,
/* Move ROMFS out of BSS before clearing it */
if (romfs_size > 0) {
- memmove(&_ebss, (int *)romfs_base, romfs_size);
+ memmove(&__bss_stop, (int *)romfs_base, romfs_size);
klimit += romfs_size;
}
#endif
@@ -165,7 +165,7 @@ void __init machine_early_init(const char *cmdline, unsigned int ram,
BUG_ON(romfs_size < 0); /* What else can we do? */
printk("Moved 0x%08x bytes from 0x%08x to 0x%08x\n",
- romfs_size, romfs_base, (unsigned)&_ebss);
+ romfs_size, romfs_base, (unsigned)&__bss_stop);
printk("New klimit: 0x%08x\n", (unsigned)klimit);
#endif
diff --git a/arch/microblaze/kernel/vmlinux.lds.S b/arch/microblaze/kernel/vmlinux.lds.S
index 109e9d86ade4..936d01a689d7 100644
--- a/arch/microblaze/kernel/vmlinux.lds.S
+++ b/arch/microblaze/kernel/vmlinux.lds.S
@@ -131,7 +131,6 @@ SECTIONS {
*(COMMON)
. = ALIGN (4) ;
__bss_stop = . ;
- _ebss = . ;
}
. = ALIGN(PAGE_SIZE);
_end = .;
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 76de6b68487c..107610e01a29 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -124,6 +124,7 @@ config S390
select GENERIC_TIME_VSYSCALL
select GENERIC_CLOCKEVENTS
select KTIME_SCALAR if 32BIT
+ select HAVE_ARCH_SECCOMP_FILTER
config SCHED_OMIT_FRAME_POINTER
def_bool y
diff --git a/arch/s390/include/asm/sparsemem.h b/arch/s390/include/asm/sparsemem.h
index 0fb34027d3f6..a60d085ddb4d 100644
--- a/arch/s390/include/asm/sparsemem.h
+++ b/arch/s390/include/asm/sparsemem.h
@@ -4,13 +4,11 @@
#ifdef CONFIG_64BIT
#define SECTION_SIZE_BITS 28
-#define MAX_PHYSADDR_BITS 46
#define MAX_PHYSMEM_BITS 46
#else
#define SECTION_SIZE_BITS 25
-#define MAX_PHYSADDR_BITS 31
#define MAX_PHYSMEM_BITS 31
#endif /* CONFIG_64BIT */
diff --git a/arch/s390/include/asm/syscall.h b/arch/s390/include/asm/syscall.h
index fb214dd9b7e0..fe7b99759e12 100644
--- a/arch/s390/include/asm/syscall.h
+++ b/arch/s390/include/asm/syscall.h
@@ -12,6 +12,7 @@
#ifndef _ASM_SYSCALL_H
#define _ASM_SYSCALL_H 1
+#include <linux/audit.h>
#include <linux/sched.h>
#include <linux/err.h>
#include <asm/ptrace.h>
@@ -87,4 +88,13 @@ static inline void syscall_set_arguments(struct task_struct *task,
regs->orig_gpr2 = args[0];
}
+static inline int syscall_get_arch(struct task_struct *task,
+ struct pt_regs *regs)
+{
+#ifdef CONFIG_COMPAT
+ if (test_tsk_thread_flag(task, TIF_31BIT))
+ return AUDIT_ARCH_S390;
+#endif
+ return sizeof(long) == 8 ? AUDIT_ARCH_S390X : AUDIT_ARCH_S390;
+}
#endif /* _ASM_SYSCALL_H */
diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c
index d1225089a4bb..f606d935f495 100644
--- a/arch/s390/kernel/compat_linux.c
+++ b/arch/s390/kernel/compat_linux.c
@@ -620,7 +620,6 @@ asmlinkage unsigned long old32_mmap(struct mmap_arg_struct_emu31 __user *arg)
return -EFAULT;
if (a.offset & ~PAGE_MASK)
return -EINVAL;
- a.addr = (unsigned long) compat_ptr(a.addr);
return sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
a.offset >> PAGE_SHIFT);
}
@@ -631,7 +630,6 @@ asmlinkage long sys32_mmap2(struct mmap_arg_struct_emu31 __user *arg)
if (copy_from_user(&a, arg, sizeof(a)))
return -EFAULT;
- a.addr = (unsigned long) compat_ptr(a.addr);
return sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, a.offset);
}
diff --git a/arch/s390/kernel/compat_wrapper.S b/arch/s390/kernel/compat_wrapper.S
index e835d6d5b7fd..2d82cfcbce5b 100644
--- a/arch/s390/kernel/compat_wrapper.S
+++ b/arch/s390/kernel/compat_wrapper.S
@@ -1635,7 +1635,7 @@ ENTRY(compat_sys_process_vm_readv_wrapper)
llgfr %r6,%r6 # unsigned long
llgf %r0,164(%r15) # unsigned long
stg %r0,160(%r15)
- jg sys_process_vm_readv
+ jg compat_sys_process_vm_readv
ENTRY(compat_sys_process_vm_writev_wrapper)
lgfr %r2,%r2 # compat_pid_t
@@ -1645,4 +1645,4 @@ ENTRY(compat_sys_process_vm_writev_wrapper)
llgfr %r6,%r6 # unsigned long
llgf %r0,164(%r15) # unsigned long
stg %r0,160(%r15)
- jg sys_process_vm_writev
+ jg compat_sys_process_vm_writev
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index f4eb37680b91..e4be113fbac6 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -719,7 +719,11 @@ asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
long ret = 0;
/* Do the secure computing check first. */
- secure_computing_strict(regs->gprs[2]);
+ if (secure_computing(regs->gprs[2])) {
+ /* seccomp failures shouldn't expose any additional code. */
+ ret = -1;
+ goto out;
+ }
/*
* The sysc_tracesys code in entry.S stored the system
@@ -745,6 +749,7 @@ asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
regs->gprs[2], regs->orig_gpr2,
regs->gprs[3], regs->gprs[4],
regs->gprs[5]);
+out:
return ret ?: regs->gprs[2];
}
diff --git a/arch/s390/kernel/sys_s390.c b/arch/s390/kernel/sys_s390.c
index b4a29eee41b8..d0964d22adb5 100644
--- a/arch/s390/kernel/sys_s390.c
+++ b/arch/s390/kernel/sys_s390.c
@@ -81,11 +81,12 @@ SYSCALL_DEFINE1(s390_personality, unsigned int, personality)
{
unsigned int ret;
- if (current->personality == PER_LINUX32 && personality == PER_LINUX)
- personality = PER_LINUX32;
+ if (personality(current->personality) == PER_LINUX32 &&
+ personality(personality) == PER_LINUX)
+ personality |= PER_LINUX32;
ret = sys_personality(personality);
- if (ret == PER_LINUX32)
- ret = PER_LINUX;
+ if (personality(ret) == PER_LINUX32)
+ ret &= ~PER_LINUX32;
return ret;
}
diff --git a/arch/sh/drivers/dma/dma-sh.c b/arch/sh/drivers/dma/dma-sh.c
index 4c171f13b0e8..b22565623142 100644
--- a/arch/sh/drivers/dma/dma-sh.c
+++ b/arch/sh/drivers/dma/dma-sh.c
@@ -335,7 +335,7 @@ static int dmae_irq_init(void)
for (n = 0; n < NR_DMAE; n++) {
int i = request_irq(get_dma_error_irq(n), dma_err,
- IRQF_SHARED, dmae_name[n], NULL);
+ IRQF_SHARED, dmae_name[n], (void *)dmae_name[n]);
if (unlikely(i < 0)) {
printk(KERN_ERR "%s request_irq fail\n", dmae_name[n]);
return i;
diff --git a/arch/sh/include/asm/sections.h b/arch/sh/include/asm/sections.h
index 4a5350037c8f..1b6199740e98 100644
--- a/arch/sh/include/asm/sections.h
+++ b/arch/sh/include/asm/sections.h
@@ -6,7 +6,6 @@
extern long __nosave_begin, __nosave_end;
extern long __machvec_start, __machvec_end;
extern char __uncached_start, __uncached_end;
-extern char _ebss[];
extern char __start_eh_frame[], __stop_eh_frame[];
#endif /* __ASM_SH_SECTIONS_H */
diff --git a/arch/sh/include/cpu-sh2a/cpu/sh7269.h b/arch/sh/include/cpu-sh2a/cpu/sh7269.h
index 48d14498e774..2a0ca8780f0d 100644
--- a/arch/sh/include/cpu-sh2a/cpu/sh7269.h
+++ b/arch/sh/include/cpu-sh2a/cpu/sh7269.h
@@ -183,18 +183,30 @@ enum {
GPIO_FN_DV_DATA1, GPIO_FN_DV_DATA0,
GPIO_FN_LCD_CLK, GPIO_FN_LCD_EXTCLK,
GPIO_FN_LCD_VSYNC, GPIO_FN_LCD_HSYNC, GPIO_FN_LCD_DE,
- GPIO_FN_LCD_DATA23, GPIO_FN_LCD_DATA22,
- GPIO_FN_LCD_DATA21, GPIO_FN_LCD_DATA20,
- GPIO_FN_LCD_DATA19, GPIO_FN_LCD_DATA18,
- GPIO_FN_LCD_DATA17, GPIO_FN_LCD_DATA16,
- GPIO_FN_LCD_DATA15, GPIO_FN_LCD_DATA14,
- GPIO_FN_LCD_DATA13, GPIO_FN_LCD_DATA12,
- GPIO_FN_LCD_DATA11, GPIO_FN_LCD_DATA10,
- GPIO_FN_LCD_DATA9, GPIO_FN_LCD_DATA8,
- GPIO_FN_LCD_DATA7, GPIO_FN_LCD_DATA6,
- GPIO_FN_LCD_DATA5, GPIO_FN_LCD_DATA4,
- GPIO_FN_LCD_DATA3, GPIO_FN_LCD_DATA2,
- GPIO_FN_LCD_DATA1, GPIO_FN_LCD_DATA0,
+ GPIO_FN_LCD_DATA23_PG23, GPIO_FN_LCD_DATA22_PG22,
+ GPIO_FN_LCD_DATA21_PG21, GPIO_FN_LCD_DATA20_PG20,
+ GPIO_FN_LCD_DATA19_PG19, GPIO_FN_LCD_DATA18_PG18,
+ GPIO_FN_LCD_DATA17_PG17, GPIO_FN_LCD_DATA16_PG16,
+ GPIO_FN_LCD_DATA15_PG15, GPIO_FN_LCD_DATA14_PG14,
+ GPIO_FN_LCD_DATA13_PG13, GPIO_FN_LCD_DATA12_PG12,
+ GPIO_FN_LCD_DATA11_PG11, GPIO_FN_LCD_DATA10_PG10,
+ GPIO_FN_LCD_DATA9_PG9, GPIO_FN_LCD_DATA8_PG8,
+ GPIO_FN_LCD_DATA7_PG7, GPIO_FN_LCD_DATA6_PG6,
+ GPIO_FN_LCD_DATA5_PG5, GPIO_FN_LCD_DATA4_PG4,
+ GPIO_FN_LCD_DATA3_PG3, GPIO_FN_LCD_DATA2_PG2,
+ GPIO_FN_LCD_DATA1_PG1, GPIO_FN_LCD_DATA0_PG0,
+ GPIO_FN_LCD_DATA23_PJ23, GPIO_FN_LCD_DATA22_PJ22,
+ GPIO_FN_LCD_DATA21_PJ21, GPIO_FN_LCD_DATA20_PJ20,
+ GPIO_FN_LCD_DATA19_PJ19, GPIO_FN_LCD_DATA18_PJ18,
+ GPIO_FN_LCD_DATA17_PJ17, GPIO_FN_LCD_DATA16_PJ16,
+ GPIO_FN_LCD_DATA15_PJ15, GPIO_FN_LCD_DATA14_PJ14,
+ GPIO_FN_LCD_DATA13_PJ13, GPIO_FN_LCD_DATA12_PJ12,
+ GPIO_FN_LCD_DATA11_PJ11, GPIO_FN_LCD_DATA10_PJ10,
+ GPIO_FN_LCD_DATA9_PJ9, GPIO_FN_LCD_DATA8_PJ8,
+ GPIO_FN_LCD_DATA7_PJ7, GPIO_FN_LCD_DATA6_PJ6,
+ GPIO_FN_LCD_DATA5_PJ5, GPIO_FN_LCD_DATA4_PJ4,
+ GPIO_FN_LCD_DATA3_PJ3, GPIO_FN_LCD_DATA2_PJ2,
+ GPIO_FN_LCD_DATA1_PJ1, GPIO_FN_LCD_DATA0_PJ0,
GPIO_FN_LCD_M_DISP,
};
diff --git a/arch/sh/kernel/cpu/sh2a/pinmux-sh7269.c b/arch/sh/kernel/cpu/sh2a/pinmux-sh7269.c
index f25127c46eca..039e4587dd9b 100644
--- a/arch/sh/kernel/cpu/sh2a/pinmux-sh7269.c
+++ b/arch/sh/kernel/cpu/sh2a/pinmux-sh7269.c
@@ -758,12 +758,22 @@ enum {
DV_DATA3_MARK, DV_DATA2_MARK, DV_DATA1_MARK, DV_DATA0_MARK,
LCD_CLK_MARK, LCD_EXTCLK_MARK,
LCD_VSYNC_MARK, LCD_HSYNC_MARK, LCD_DE_MARK,
- LCD_DATA23_MARK, LCD_DATA22_MARK, LCD_DATA21_MARK, LCD_DATA20_MARK,
- LCD_DATA19_MARK, LCD_DATA18_MARK, LCD_DATA17_MARK, LCD_DATA16_MARK,
- LCD_DATA15_MARK, LCD_DATA14_MARK, LCD_DATA13_MARK, LCD_DATA12_MARK,
- LCD_DATA11_MARK, LCD_DATA10_MARK, LCD_DATA9_MARK, LCD_DATA8_MARK,
- LCD_DATA7_MARK, LCD_DATA6_MARK, LCD_DATA5_MARK, LCD_DATA4_MARK,
- LCD_DATA3_MARK, LCD_DATA2_MARK, LCD_DATA1_MARK, LCD_DATA0_MARK,
+ LCD_DATA23_PG23_MARK, LCD_DATA22_PG22_MARK, LCD_DATA21_PG21_MARK,
+ LCD_DATA20_PG20_MARK, LCD_DATA19_PG19_MARK, LCD_DATA18_PG18_MARK,
+ LCD_DATA17_PG17_MARK, LCD_DATA16_PG16_MARK, LCD_DATA15_PG15_MARK,
+ LCD_DATA14_PG14_MARK, LCD_DATA13_PG13_MARK, LCD_DATA12_PG12_MARK,
+ LCD_DATA11_PG11_MARK, LCD_DATA10_PG10_MARK, LCD_DATA9_PG9_MARK,
+ LCD_DATA8_PG8_MARK, LCD_DATA7_PG7_MARK, LCD_DATA6_PG6_MARK,
+ LCD_DATA5_PG5_MARK, LCD_DATA4_PG4_MARK, LCD_DATA3_PG3_MARK,
+ LCD_DATA2_PG2_MARK, LCD_DATA1_PG1_MARK, LCD_DATA0_PG0_MARK,
+ LCD_DATA23_PJ23_MARK, LCD_DATA22_PJ22_MARK, LCD_DATA21_PJ21_MARK,
+ LCD_DATA20_PJ20_MARK, LCD_DATA19_PJ19_MARK, LCD_DATA18_PJ18_MARK,
+ LCD_DATA17_PJ17_MARK, LCD_DATA16_PJ16_MARK, LCD_DATA15_PJ15_MARK,
+ LCD_DATA14_PJ14_MARK, LCD_DATA13_PJ13_MARK, LCD_DATA12_PJ12_MARK,
+ LCD_DATA11_PJ11_MARK, LCD_DATA10_PJ10_MARK, LCD_DATA9_PJ9_MARK,
+ LCD_DATA8_PJ8_MARK, LCD_DATA7_PJ7_MARK, LCD_DATA6_PJ6_MARK,
+ LCD_DATA5_PJ5_MARK, LCD_DATA4_PJ4_MARK, LCD_DATA3_PJ3_MARK,
+ LCD_DATA2_PJ2_MARK, LCD_DATA1_PJ1_MARK, LCD_DATA0_PJ0_MARK,
LCD_TCON6_MARK, LCD_TCON5_MARK, LCD_TCON4_MARK,
LCD_TCON3_MARK, LCD_TCON2_MARK, LCD_TCON1_MARK, LCD_TCON0_MARK,
LCD_M_DISP_MARK,
@@ -1036,6 +1046,7 @@ static pinmux_enum_t pinmux_data[] = {
PINMUX_DATA(PF1_DATA, PF1MD_000),
PINMUX_DATA(BACK_MARK, PF1MD_001),
+ PINMUX_DATA(SSL10_MARK, PF1MD_011),
PINMUX_DATA(TIOC4B_MARK, PF1MD_100),
PINMUX_DATA(DACK0_MARK, PF1MD_101),
@@ -1049,47 +1060,50 @@ static pinmux_enum_t pinmux_data[] = {
PINMUX_DATA(PG27_DATA, PG27MD_00),
PINMUX_DATA(LCD_TCON2_MARK, PG27MD_10),
PINMUX_DATA(LCD_EXTCLK_MARK, PG27MD_11),
+ PINMUX_DATA(LCD_DE_MARK, PG27MD_11),
PINMUX_DATA(PG26_DATA, PG26MD_00),
PINMUX_DATA(LCD_TCON1_MARK, PG26MD_10),
+ PINMUX_DATA(LCD_HSYNC_MARK, PG26MD_10),
PINMUX_DATA(PG25_DATA, PG25MD_00),
PINMUX_DATA(LCD_TCON0_MARK, PG25MD_10),
+ PINMUX_DATA(LCD_VSYNC_MARK, PG25MD_10),
PINMUX_DATA(PG24_DATA, PG24MD_00),
PINMUX_DATA(LCD_CLK_MARK, PG24MD_10),
PINMUX_DATA(PG23_DATA, PG23MD_000),
- PINMUX_DATA(LCD_DATA23_MARK, PG23MD_010),
+ PINMUX_DATA(LCD_DATA23_PG23_MARK, PG23MD_010),
PINMUX_DATA(LCD_TCON6_MARK, PG23MD_011),
PINMUX_DATA(TXD5_MARK, PG23MD_100),
PINMUX_DATA(PG22_DATA, PG22MD_000),
- PINMUX_DATA(LCD_DATA22_MARK, PG22MD_010),
+ PINMUX_DATA(LCD_DATA22_PG22_MARK, PG22MD_010),
PINMUX_DATA(LCD_TCON5_MARK, PG22MD_011),
PINMUX_DATA(RXD5_MARK, PG22MD_100),
PINMUX_DATA(PG21_DATA, PG21MD_000),
PINMUX_DATA(DV_DATA7_MARK, PG21MD_001),
- PINMUX_DATA(LCD_DATA21_MARK, PG21MD_010),
+ PINMUX_DATA(LCD_DATA21_PG21_MARK, PG21MD_010),
PINMUX_DATA(LCD_TCON4_MARK, PG21MD_011),
PINMUX_DATA(TXD4_MARK, PG21MD_100),
PINMUX_DATA(PG20_DATA, PG20MD_000),
PINMUX_DATA(DV_DATA6_MARK, PG20MD_001),
- PINMUX_DATA(LCD_DATA20_MARK, PG21MD_010),
+ PINMUX_DATA(LCD_DATA20_PG20_MARK, PG21MD_010),
PINMUX_DATA(LCD_TCON3_MARK, PG20MD_011),
PINMUX_DATA(RXD4_MARK, PG20MD_100),
PINMUX_DATA(PG19_DATA, PG19MD_000),
PINMUX_DATA(DV_DATA5_MARK, PG19MD_001),
- PINMUX_DATA(LCD_DATA19_MARK, PG19MD_010),
+ PINMUX_DATA(LCD_DATA19_PG19_MARK, PG19MD_010),
PINMUX_DATA(SPDIF_OUT_MARK, PG19MD_011),
PINMUX_DATA(SCK5_MARK, PG19MD_100),
PINMUX_DATA(PG18_DATA, PG18MD_000),
PINMUX_DATA(DV_DATA4_MARK, PG18MD_001),
- PINMUX_DATA(LCD_DATA18_MARK, PG18MD_010),
+ PINMUX_DATA(LCD_DATA18_PG18_MARK, PG18MD_010),
PINMUX_DATA(SPDIF_IN_MARK, PG18MD_011),
PINMUX_DATA(SCK4_MARK, PG18MD_100),
@@ -1097,103 +1111,103 @@ static pinmux_enum_t pinmux_data[] = {
// we're going with 2 bits
PINMUX_DATA(PG17_DATA, PG17MD_00),
PINMUX_DATA(WE3ICIOWRAHDQMUU_MARK, PG17MD_01),
- PINMUX_DATA(LCD_DATA17_MARK, PG17MD_10),
+ PINMUX_DATA(LCD_DATA17_PG17_MARK, PG17MD_10),
// TODO hardware manual has PG16 3 bits wide in reg picture and 2 bits in description
// we're going with 2 bits
PINMUX_DATA(PG16_DATA, PG16MD_00),
PINMUX_DATA(WE2ICIORDDQMUL_MARK, PG16MD_01),
- PINMUX_DATA(LCD_DATA16_MARK, PG16MD_10),
+ PINMUX_DATA(LCD_DATA16_PG16_MARK, PG16MD_10),
PINMUX_DATA(PG15_DATA, PG15MD_00),
PINMUX_DATA(D31_MARK, PG15MD_01),
- PINMUX_DATA(LCD_DATA15_MARK, PG15MD_10),
+ PINMUX_DATA(LCD_DATA15_PG15_MARK, PG15MD_10),
PINMUX_DATA(PINT7_PG_MARK, PG15MD_11),
PINMUX_DATA(PG14_DATA, PG14MD_00),
PINMUX_DATA(D30_MARK, PG14MD_01),
- PINMUX_DATA(LCD_DATA14_MARK, PG14MD_10),
+ PINMUX_DATA(LCD_DATA14_PG14_MARK, PG14MD_10),
PINMUX_DATA(PINT6_PG_MARK, PG14MD_11),
PINMUX_DATA(PG13_DATA, PG13MD_00),
PINMUX_DATA(D29_MARK, PG13MD_01),
- PINMUX_DATA(LCD_DATA13_MARK, PG13MD_10),
+ PINMUX_DATA(LCD_DATA13_PG13_MARK, PG13MD_10),
PINMUX_DATA(PINT5_PG_MARK, PG13MD_11),
PINMUX_DATA(PG12_DATA, PG12MD_00),
PINMUX_DATA(D28_MARK, PG12MD_01),
- PINMUX_DATA(LCD_DATA12_MARK, PG12MD_10),
+ PINMUX_DATA(LCD_DATA12_PG12_MARK, PG12MD_10),
PINMUX_DATA(PINT4_PG_MARK, PG12MD_11),
PINMUX_DATA(PG11_DATA, PG11MD_000),
PINMUX_DATA(D27_MARK, PG11MD_001),
- PINMUX_DATA(LCD_DATA11_MARK, PG11MD_010),
+ PINMUX_DATA(LCD_DATA11_PG11_MARK, PG11MD_010),
PINMUX_DATA(PINT3_PG_MARK, PG11MD_011),
PINMUX_DATA(TIOC3D_MARK, PG11MD_100),
PINMUX_DATA(PG10_DATA, PG10MD_000),
PINMUX_DATA(D26_MARK, PG10MD_001),
- PINMUX_DATA(LCD_DATA10_MARK, PG10MD_010),
+ PINMUX_DATA(LCD_DATA10_PG10_MARK, PG10MD_010),
PINMUX_DATA(PINT2_PG_MARK, PG10MD_011),
PINMUX_DATA(TIOC3C_MARK, PG10MD_100),
PINMUX_DATA(PG9_DATA, PG9MD_000),
PINMUX_DATA(D25_MARK, PG9MD_001),
- PINMUX_DATA(LCD_DATA9_MARK, PG9MD_010),
+ PINMUX_DATA(LCD_DATA9_PG9_MARK, PG9MD_010),
PINMUX_DATA(PINT1_PG_MARK, PG9MD_011),
PINMUX_DATA(TIOC3B_MARK, PG9MD_100),
PINMUX_DATA(PG8_DATA, PG8MD_000),
PINMUX_DATA(D24_MARK, PG8MD_001),
- PINMUX_DATA(LCD_DATA8_MARK, PG8MD_010),
+ PINMUX_DATA(LCD_DATA8_PG8_MARK, PG8MD_010),
PINMUX_DATA(PINT0_PG_MARK, PG8MD_011),
PINMUX_DATA(TIOC3A_MARK, PG8MD_100),
PINMUX_DATA(PG7_DATA, PG7MD_000),
PINMUX_DATA(D23_MARK, PG7MD_001),
- PINMUX_DATA(LCD_DATA7_MARK, PG7MD_010),
+ PINMUX_DATA(LCD_DATA7_PG7_MARK, PG7MD_010),
PINMUX_DATA(IRQ7_PG_MARK, PG7MD_011),
PINMUX_DATA(TIOC2B_MARK, PG7MD_100),
PINMUX_DATA(PG6_DATA, PG6MD_000),
PINMUX_DATA(D22_MARK, PG6MD_001),
- PINMUX_DATA(LCD_DATA6_MARK, PG6MD_010),
+ PINMUX_DATA(LCD_DATA6_PG6_MARK, PG6MD_010),
PINMUX_DATA(IRQ6_PG_MARK, PG6MD_011),
PINMUX_DATA(TIOC2A_MARK, PG6MD_100),
PINMUX_DATA(PG5_DATA, PG5MD_000),
PINMUX_DATA(D21_MARK, PG5MD_001),
- PINMUX_DATA(LCD_DATA5_MARK, PG5MD_010),
+ PINMUX_DATA(LCD_DATA5_PG5_MARK, PG5MD_010),
PINMUX_DATA(IRQ5_PG_MARK, PG5MD_011),
PINMUX_DATA(TIOC1B_MARK, PG5MD_100),
PINMUX_DATA(PG4_DATA, PG4MD_000),
PINMUX_DATA(D20_MARK, PG4MD_001),
- PINMUX_DATA(LCD_DATA4_MARK, PG4MD_010),
+ PINMUX_DATA(LCD_DATA4_PG4_MARK, PG4MD_010),
PINMUX_DATA(IRQ4_PG_MARK, PG4MD_011),
PINMUX_DATA(TIOC1A_MARK, PG4MD_100),
PINMUX_DATA(PG3_DATA, PG3MD_000),
PINMUX_DATA(D19_MARK, PG3MD_001),
- PINMUX_DATA(LCD_DATA3_MARK, PG3MD_010),
+ PINMUX_DATA(LCD_DATA3_PG3_MARK, PG3MD_010),
PINMUX_DATA(IRQ3_PG_MARK, PG3MD_011),
PINMUX_DATA(TIOC0D_MARK, PG3MD_100),
PINMUX_DATA(PG2_DATA, PG2MD_000),
PINMUX_DATA(D18_MARK, PG2MD_001),
- PINMUX_DATA(LCD_DATA2_MARK, PG2MD_010),
+ PINMUX_DATA(LCD_DATA2_PG2_MARK, PG2MD_010),
PINMUX_DATA(IRQ2_PG_MARK, PG2MD_011),
PINMUX_DATA(TIOC0C_MARK, PG2MD_100),
PINMUX_DATA(PG1_DATA, PG1MD_000),
PINMUX_DATA(D17_MARK, PG1MD_001),
- PINMUX_DATA(LCD_DATA1_MARK, PG1MD_010),
+ PINMUX_DATA(LCD_DATA1_PG1_MARK, PG1MD_010),
PINMUX_DATA(IRQ1_PG_MARK, PG1MD_011),
PINMUX_DATA(TIOC0B_MARK, PG1MD_100),
PINMUX_DATA(PG0_DATA, PG0MD_000),
PINMUX_DATA(D16_MARK, PG0MD_001),
- PINMUX_DATA(LCD_DATA0_MARK, PG0MD_010),
+ PINMUX_DATA(LCD_DATA0_PG0_MARK, PG0MD_010),
PINMUX_DATA(IRQ0_PG_MARK, PG0MD_011),
PINMUX_DATA(TIOC0A_MARK, PG0MD_100),
@@ -1275,14 +1289,14 @@ static pinmux_enum_t pinmux_data[] = {
PINMUX_DATA(PJ23_DATA, PJ23MD_000),
PINMUX_DATA(DV_DATA23_MARK, PJ23MD_001),
- PINMUX_DATA(LCD_DATA23_MARK, PJ23MD_010),
+ PINMUX_DATA(LCD_DATA23_PJ23_MARK, PJ23MD_010),
PINMUX_DATA(LCD_TCON6_MARK, PJ23MD_011),
PINMUX_DATA(IRQ3_PJ_MARK, PJ23MD_100),
PINMUX_DATA(CTX1_MARK, PJ23MD_101),
PINMUX_DATA(PJ22_DATA, PJ22MD_000),
PINMUX_DATA(DV_DATA22_MARK, PJ22MD_001),
- PINMUX_DATA(LCD_DATA22_MARK, PJ22MD_010),
+ PINMUX_DATA(LCD_DATA22_PJ22_MARK, PJ22MD_010),
PINMUX_DATA(LCD_TCON5_MARK, PJ22MD_011),
PINMUX_DATA(IRQ2_PJ_MARK, PJ22MD_100),
PINMUX_DATA(CRX1_MARK, PJ22MD_101),
@@ -1290,14 +1304,14 @@ static pinmux_enum_t pinmux_data[] = {
PINMUX_DATA(PJ21_DATA, PJ21MD_000),
PINMUX_DATA(DV_DATA21_MARK, PJ21MD_001),
- PINMUX_DATA(LCD_DATA21_MARK, PJ21MD_010),
+ PINMUX_DATA(LCD_DATA21_PJ21_MARK, PJ21MD_010),
PINMUX_DATA(LCD_TCON4_MARK, PJ21MD_011),
PINMUX_DATA(IRQ1_PJ_MARK, PJ21MD_100),
PINMUX_DATA(CTX2_MARK, PJ21MD_101),
PINMUX_DATA(PJ20_DATA, PJ20MD_000),
PINMUX_DATA(DV_DATA20_MARK, PJ20MD_001),
- PINMUX_DATA(LCD_DATA20_MARK, PJ20MD_010),
+ PINMUX_DATA(LCD_DATA20_PJ20_MARK, PJ20MD_010),
PINMUX_DATA(LCD_TCON3_MARK, PJ20MD_011),
PINMUX_DATA(IRQ0_PJ_MARK, PJ20MD_100),
PINMUX_DATA(CRX2_MARK, PJ20MD_101),
@@ -1305,7 +1319,7 @@ static pinmux_enum_t pinmux_data[] = {
PINMUX_DATA(PJ19_DATA, PJ19MD_000),
PINMUX_DATA(DV_DATA19_MARK, PJ19MD_001),
- PINMUX_DATA(LCD_DATA19_MARK, PJ19MD_010),
+ PINMUX_DATA(LCD_DATA19_PJ19_MARK, PJ19MD_010),
PINMUX_DATA(MISO0_PJ19_MARK, PJ19MD_011),
PINMUX_DATA(TIOC0D_MARK, PJ19MD_100),
PINMUX_DATA(SIOFRXD_MARK, PJ19MD_101),
@@ -1313,126 +1327,126 @@ static pinmux_enum_t pinmux_data[] = {
PINMUX_DATA(PJ18_DATA, PJ18MD_000),
PINMUX_DATA(DV_DATA18_MARK, PJ18MD_001),
- PINMUX_DATA(LCD_DATA18_MARK, PJ18MD_010),
+ PINMUX_DATA(LCD_DATA18_PJ18_MARK, PJ18MD_010),
PINMUX_DATA(MOSI0_PJ18_MARK, PJ18MD_011),
PINMUX_DATA(TIOC0C_MARK, PJ18MD_100),
PINMUX_DATA(SIOFTXD_MARK, PJ18MD_101),
PINMUX_DATA(PJ17_DATA, PJ17MD_000),
PINMUX_DATA(DV_DATA17_MARK, PJ17MD_001),
- PINMUX_DATA(LCD_DATA17_MARK, PJ17MD_010),
+ PINMUX_DATA(LCD_DATA17_PJ17_MARK, PJ17MD_010),
PINMUX_DATA(SSL00_PJ17_MARK, PJ17MD_011),
PINMUX_DATA(TIOC0B_MARK, PJ17MD_100),
PINMUX_DATA(SIOFSYNC_MARK, PJ17MD_101),
PINMUX_DATA(PJ16_DATA, PJ16MD_000),
PINMUX_DATA(DV_DATA16_MARK, PJ16MD_001),
- PINMUX_DATA(LCD_DATA16_MARK, PJ16MD_010),
+ PINMUX_DATA(LCD_DATA16_PJ16_MARK, PJ16MD_010),
PINMUX_DATA(RSPCK0_PJ16_MARK, PJ16MD_011),
PINMUX_DATA(TIOC0A_MARK, PJ16MD_100),
PINMUX_DATA(SIOFSCK_MARK, PJ16MD_101),
PINMUX_DATA(PJ15_DATA, PJ15MD_000),
PINMUX_DATA(DV_DATA15_MARK, PJ15MD_001),
- PINMUX_DATA(LCD_DATA15_MARK, PJ15MD_010),
+ PINMUX_DATA(LCD_DATA15_PJ15_MARK, PJ15MD_010),
PINMUX_DATA(PINT7_PJ_MARK, PJ15MD_011),
PINMUX_DATA(PWM2H_MARK, PJ15MD_100),
PINMUX_DATA(TXD7_MARK, PJ15MD_101),
PINMUX_DATA(PJ14_DATA, PJ14MD_000),
PINMUX_DATA(DV_DATA14_MARK, PJ14MD_001),
- PINMUX_DATA(LCD_DATA14_MARK, PJ14MD_010),
+ PINMUX_DATA(LCD_DATA14_PJ14_MARK, PJ14MD_010),
PINMUX_DATA(PINT6_PJ_MARK, PJ14MD_011),
PINMUX_DATA(PWM2G_MARK, PJ14MD_100),
PINMUX_DATA(TXD6_MARK, PJ14MD_101),
PINMUX_DATA(PJ13_DATA, PJ13MD_000),
PINMUX_DATA(DV_DATA13_MARK, PJ13MD_001),
- PINMUX_DATA(LCD_DATA13_MARK, PJ13MD_010),
+ PINMUX_DATA(LCD_DATA13_PJ13_MARK, PJ13MD_010),
PINMUX_DATA(PINT5_PJ_MARK, PJ13MD_011),
PINMUX_DATA(PWM2F_MARK, PJ13MD_100),
PINMUX_DATA(TXD5_MARK, PJ13MD_101),
PINMUX_DATA(PJ12_DATA, PJ12MD_000),
PINMUX_DATA(DV_DATA12_MARK, PJ12MD_001),
- PINMUX_DATA(LCD_DATA12_MARK, PJ12MD_010),
+ PINMUX_DATA(LCD_DATA12_PJ12_MARK, PJ12MD_010),
PINMUX_DATA(PINT4_PJ_MARK, PJ12MD_011),
PINMUX_DATA(PWM2E_MARK, PJ12MD_100),
PINMUX_DATA(SCK7_MARK, PJ12MD_101),
PINMUX_DATA(PJ11_DATA, PJ11MD_000),
PINMUX_DATA(DV_DATA11_MARK, PJ11MD_001),
- PINMUX_DATA(LCD_DATA11_MARK, PJ11MD_010),
+ PINMUX_DATA(LCD_DATA11_PJ11_MARK, PJ11MD_010),
PINMUX_DATA(PINT3_PJ_MARK, PJ11MD_011),
PINMUX_DATA(PWM2D_MARK, PJ11MD_100),
PINMUX_DATA(SCK6_MARK, PJ11MD_101),
PINMUX_DATA(PJ10_DATA, PJ10MD_000),
PINMUX_DATA(DV_DATA10_MARK, PJ10MD_001),
- PINMUX_DATA(LCD_DATA10_MARK, PJ10MD_010),
+ PINMUX_DATA(LCD_DATA10_PJ10_MARK, PJ10MD_010),
PINMUX_DATA(PINT2_PJ_MARK, PJ10MD_011),
PINMUX_DATA(PWM2C_MARK, PJ10MD_100),
PINMUX_DATA(SCK5_MARK, PJ10MD_101),
PINMUX_DATA(PJ9_DATA, PJ9MD_000),
PINMUX_DATA(DV_DATA9_MARK, PJ9MD_001),
- PINMUX_DATA(LCD_DATA9_MARK, PJ9MD_010),
+ PINMUX_DATA(LCD_DATA9_PJ9_MARK, PJ9MD_010),
PINMUX_DATA(PINT1_PJ_MARK, PJ9MD_011),
PINMUX_DATA(PWM2B_MARK, PJ9MD_100),
PINMUX_DATA(RTS5_MARK, PJ9MD_101),
PINMUX_DATA(PJ8_DATA, PJ8MD_000),
PINMUX_DATA(DV_DATA8_MARK, PJ8MD_001),
- PINMUX_DATA(LCD_DATA8_MARK, PJ8MD_010),
+ PINMUX_DATA(LCD_DATA8_PJ8_MARK, PJ8MD_010),
PINMUX_DATA(PINT0_PJ_MARK, PJ8MD_011),
PINMUX_DATA(PWM2A_MARK, PJ8MD_100),
PINMUX_DATA(CTS5_MARK, PJ8MD_101),
PINMUX_DATA(PJ7_DATA, PJ7MD_000),
PINMUX_DATA(DV_DATA7_MARK, PJ7MD_001),
- PINMUX_DATA(LCD_DATA7_MARK, PJ7MD_010),
+ PINMUX_DATA(LCD_DATA7_PJ7_MARK, PJ7MD_010),
PINMUX_DATA(SD_D2_MARK, PJ7MD_011),
PINMUX_DATA(PWM1H_MARK, PJ7MD_100),
PINMUX_DATA(PJ6_DATA, PJ6MD_000),
PINMUX_DATA(DV_DATA6_MARK, PJ6MD_001),
- PINMUX_DATA(LCD_DATA6_MARK, PJ6MD_010),
+ PINMUX_DATA(LCD_DATA6_PJ6_MARK, PJ6MD_010),
PINMUX_DATA(SD_D3_MARK, PJ6MD_011),
PINMUX_DATA(PWM1G_MARK, PJ6MD_100),
PINMUX_DATA(PJ5_DATA, PJ5MD_000),
PINMUX_DATA(DV_DATA5_MARK, PJ5MD_001),
- PINMUX_DATA(LCD_DATA5_MARK, PJ5MD_010),
+ PINMUX_DATA(LCD_DATA5_PJ5_MARK, PJ5MD_010),
PINMUX_DATA(SD_CMD_MARK, PJ5MD_011),
PINMUX_DATA(PWM1F_MARK, PJ5MD_100),
PINMUX_DATA(PJ4_DATA, PJ4MD_000),
PINMUX_DATA(DV_DATA4_MARK, PJ4MD_001),
- PINMUX_DATA(LCD_DATA4_MARK, PJ4MD_010),
+ PINMUX_DATA(LCD_DATA4_PJ4_MARK, PJ4MD_010),
PINMUX_DATA(SD_CLK_MARK, PJ4MD_011),
PINMUX_DATA(PWM1E_MARK, PJ4MD_100),
PINMUX_DATA(PJ3_DATA, PJ3MD_000),
PINMUX_DATA(DV_DATA3_MARK, PJ3MD_001),
- PINMUX_DATA(LCD_DATA3_MARK, PJ3MD_010),
+ PINMUX_DATA(LCD_DATA3_PJ3_MARK, PJ3MD_010),
PINMUX_DATA(SD_D0_MARK, PJ3MD_011),
PINMUX_DATA(PWM1D_MARK, PJ3MD_100),
PINMUX_DATA(PJ2_DATA, PJ2MD_000),
PINMUX_DATA(DV_DATA2_MARK, PJ2MD_001),
- PINMUX_DATA(LCD_DATA2_MARK, PJ2MD_010),
+ PINMUX_DATA(LCD_DATA2_PJ2_MARK, PJ2MD_010),
PINMUX_DATA(SD_D1_MARK, PJ2MD_011),
PINMUX_DATA(PWM1C_MARK, PJ2MD_100),
PINMUX_DATA(PJ1_DATA, PJ1MD_000),
PINMUX_DATA(DV_DATA1_MARK, PJ1MD_001),
- PINMUX_DATA(LCD_DATA1_MARK, PJ1MD_010),
+ PINMUX_DATA(LCD_DATA1_PJ1_MARK, PJ1MD_010),
PINMUX_DATA(SD_WP_MARK, PJ1MD_011),
PINMUX_DATA(PWM1B_MARK, PJ1MD_100),
PINMUX_DATA(PJ0_DATA, PJ0MD_000),
PINMUX_DATA(DV_DATA0_MARK, PJ0MD_001),
- PINMUX_DATA(LCD_DATA0_MARK, PJ0MD_010),
+ PINMUX_DATA(LCD_DATA0_PJ0_MARK, PJ0MD_010),
PINMUX_DATA(SD_CD_MARK, PJ0MD_011),
PINMUX_DATA(PWM1A_MARK, PJ0MD_100),
};
@@ -1877,30 +1891,55 @@ static struct pinmux_gpio pinmux_gpios[] = {
PINMUX_GPIO(GPIO_FN_LCD_HSYNC, LCD_HSYNC_MARK),
PINMUX_GPIO(GPIO_FN_LCD_DE, LCD_DE_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA23, LCD_DATA23_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA22, LCD_DATA22_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA21, LCD_DATA21_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA20, LCD_DATA20_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA19, LCD_DATA19_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA18, LCD_DATA18_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA17, LCD_DATA17_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA16, LCD_DATA16_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA15, LCD_DATA15_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA14, LCD_DATA14_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA13, LCD_DATA13_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA12, LCD_DATA12_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA11, LCD_DATA11_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA10, LCD_DATA10_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA9, LCD_DATA9_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA8, LCD_DATA8_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA7, LCD_DATA7_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA6, LCD_DATA6_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA5, LCD_DATA5_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA4, LCD_DATA4_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA3, LCD_DATA3_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA2, LCD_DATA2_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA1, LCD_DATA1_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA0, LCD_DATA0_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA23_PG23, LCD_DATA23_PG23_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA22_PG22, LCD_DATA22_PG22_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA21_PG21, LCD_DATA21_PG21_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA20_PG20, LCD_DATA20_PG20_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA19_PG19, LCD_DATA19_PG19_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA18_PG18, LCD_DATA18_PG18_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA17_PG17, LCD_DATA17_PG17_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA16_PG16, LCD_DATA16_PG16_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA15_PG15, LCD_DATA15_PG15_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA14_PG14, LCD_DATA14_PG14_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA13_PG13, LCD_DATA13_PG13_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA12_PG12, LCD_DATA12_PG12_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA11_PG11, LCD_DATA11_PG11_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA10_PG10, LCD_DATA10_PG10_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA9_PG9, LCD_DATA9_PG9_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA8_PG8, LCD_DATA8_PG8_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA7_PG7, LCD_DATA7_PG7_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA6_PG6, LCD_DATA6_PG6_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA5_PG5, LCD_DATA5_PG5_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA4_PG4, LCD_DATA4_PG4_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA3_PG3, LCD_DATA3_PG3_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA2_PG2, LCD_DATA2_PG2_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA1_PG1, LCD_DATA1_PG1_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA0_PG0, LCD_DATA0_PG0_MARK),
+
+ PINMUX_GPIO(GPIO_FN_LCD_DATA23_PJ23, LCD_DATA23_PJ23_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA22_PJ22, LCD_DATA22_PJ22_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA21_PJ21, LCD_DATA21_PJ21_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA20_PJ20, LCD_DATA20_PJ20_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA19_PJ19, LCD_DATA19_PJ19_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA18_PJ18, LCD_DATA18_PJ18_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA17_PJ17, LCD_DATA17_PJ17_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA16_PJ16, LCD_DATA16_PJ16_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA15_PJ15, LCD_DATA15_PJ15_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA14_PJ14, LCD_DATA14_PJ14_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA13_PJ13, LCD_DATA13_PJ13_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA12_PJ12, LCD_DATA12_PJ12_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA11_PJ11, LCD_DATA11_PJ11_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA10_PJ10, LCD_DATA10_PJ10_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA9_PJ9, LCD_DATA9_PJ9_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA8_PJ8, LCD_DATA8_PJ8_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA7_PJ7, LCD_DATA7_PJ7_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA6_PJ6, LCD_DATA6_PJ6_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA5_PJ5, LCD_DATA5_PJ5_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA4_PJ4, LCD_DATA4_PJ4_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA3_PJ3, LCD_DATA3_PJ3_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA2_PJ2, LCD_DATA2_PJ2_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA1_PJ1, LCD_DATA1_PJ1_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA0_PJ0, LCD_DATA0_PJ0_MARK),
PINMUX_GPIO(GPIO_FN_LCD_M_DISP, LCD_M_DISP_MARK),
};
diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c
index 7b57bf1dc855..ebe7a7d97215 100644
--- a/arch/sh/kernel/setup.c
+++ b/arch/sh/kernel/setup.c
@@ -273,7 +273,7 @@ void __init setup_arch(char **cmdline_p)
data_resource.start = virt_to_phys(_etext);
data_resource.end = virt_to_phys(_edata)-1;
bss_resource.start = virt_to_phys(__bss_start);
- bss_resource.end = virt_to_phys(_ebss)-1;
+ bss_resource.end = virt_to_phys(__bss_stop)-1;
#ifdef CONFIG_CMDLINE_OVERWRITE
strlcpy(command_line, CONFIG_CMDLINE, sizeof(command_line));
diff --git a/arch/sh/kernel/sh_ksyms_32.c b/arch/sh/kernel/sh_ksyms_32.c
index 3896f26efa4a..2a0a596ebf67 100644
--- a/arch/sh/kernel/sh_ksyms_32.c
+++ b/arch/sh/kernel/sh_ksyms_32.c
@@ -19,7 +19,6 @@ EXPORT_SYMBOL(csum_partial);
EXPORT_SYMBOL(csum_partial_copy_generic);
EXPORT_SYMBOL(copy_page);
EXPORT_SYMBOL(__clear_user);
-EXPORT_SYMBOL(_ebss);
EXPORT_SYMBOL(empty_zero_page);
#define DECLARE_EXPORT(name) \
diff --git a/arch/sh/kernel/vmlinux.lds.S b/arch/sh/kernel/vmlinux.lds.S
index c98905f71e28..db88cbf9eafd 100644
--- a/arch/sh/kernel/vmlinux.lds.S
+++ b/arch/sh/kernel/vmlinux.lds.S
@@ -78,7 +78,6 @@ SECTIONS
. = ALIGN(PAGE_SIZE);
__init_end = .;
BSS_SECTION(0, PAGE_SIZE, 4)
- _ebss = .; /* uClinux MTD sucks */
_end = . ;
STABS_DEBUG
diff --git a/arch/sh/lib/mcount.S b/arch/sh/lib/mcount.S
index 84a57761f17e..60164e65d665 100644
--- a/arch/sh/lib/mcount.S
+++ b/arch/sh/lib/mcount.S
@@ -39,7 +39,7 @@
*
* Make sure the stack pointer contains a valid address. Valid
* addresses for kernel stacks are anywhere after the bss
- * (after _ebss) and anywhere in init_thread_union (init_stack).
+ * (after __bss_stop) and anywhere in init_thread_union (init_stack).
*/
#define STACK_CHECK() \
mov #(THREAD_SIZE >> 10), r0; \
@@ -60,7 +60,7 @@
cmp/hi r2, r1; \
bf stack_panic; \
\
- /* If sp > _ebss then we're OK. */ \
+ /* If sp > __bss_stop then we're OK. */ \
mov.l .L_ebss, r1; \
cmp/hi r1, r15; \
bt 1f; \
@@ -70,7 +70,7 @@
cmp/hs r1, r15; \
bf stack_panic; \
\
- /* If sp > init_stack && sp < _ebss, not OK. */ \
+ /* If sp > init_stack && sp < __bss_stop, not OK. */ \
add r0, r1; \
cmp/hs r1, r15; \
bt stack_panic; \
@@ -292,8 +292,6 @@ stack_panic:
nop
.align 2
-.L_ebss:
- .long _ebss
.L_init_thread_union:
.long init_thread_union
.Lpanic:
diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
index 0dc1f5786081..11c6c9603e71 100644
--- a/arch/sparc/kernel/sys_sparc_64.c
+++ b/arch/sparc/kernel/sys_sparc_64.c
@@ -502,12 +502,12 @@ SYSCALL_DEFINE1(sparc64_personality, unsigned long, personality)
{
int ret;
- if (current->personality == PER_LINUX32 &&
- personality == PER_LINUX)
- personality = PER_LINUX32;
+ if (personality(current->personality) == PER_LINUX32 &&
+ personality(personality) == PER_LINUX)
+ personality |= PER_LINUX32;
ret = sys_personality(personality);
- if (ret == PER_LINUX32)
- ret = PER_LINUX;
+ if (personality(ret) == PER_LINUX32)
+ ret &= ~PER_LINUX32;
return ret;
}
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index 6026fdd1b2ed..d58edf5fefdb 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -2020,6 +2020,9 @@ EXPORT_SYMBOL(_PAGE_CACHE);
#ifdef CONFIG_SPARSEMEM_VMEMMAP
unsigned long vmemmap_table[VMEMMAP_SIZE];
+static long __meminitdata addr_start, addr_end;
+static int __meminitdata node_start;
+
int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
{
unsigned long vstart = (unsigned long) start;
@@ -2050,15 +2053,30 @@ int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
*vmem_pp = pte_base | __pa(block);
- printk(KERN_INFO "[%p-%p] page_structs=%lu "
- "node=%d entry=%lu/%lu\n", start, block, nr,
- node,
- addr >> VMEMMAP_CHUNK_SHIFT,
- VMEMMAP_SIZE);
+ /* check to see if we have contiguous blocks */
+ if (addr_end != addr || node_start != node) {
+ if (addr_start)
+ printk(KERN_DEBUG " [%lx-%lx] on node %d\n",
+ addr_start, addr_end-1, node_start);
+ addr_start = addr;
+ node_start = node;
+ }
+ addr_end = addr + VMEMMAP_CHUNK;
}
}
return 0;
}
+
+void __meminit vmemmap_populate_print_last(void)
+{
+ if (addr_start) {
+ printk(KERN_DEBUG " [%lx-%lx] on node %d\n",
+ addr_start, addr_end-1, node_start);
+ addr_start = 0;
+ addr_end = 0;
+ node_start = 0;
+ }
+}
#endif /* CONFIG_SPARSEMEM_VMEMMAP */
static void prot_init_common(unsigned long page_none,
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index ba2657c49217..8ec3a1aa4abd 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -1527,7 +1527,7 @@ config SECCOMP
If unsure, say Y. Only embedded should say N here.
config CC_STACKPROTECTOR
- bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
+ bool "Enable -fstack-protector buffer overflow detection"
---help---
This option turns on the -fstack-protector GCC feature. This
feature puts, at the beginning of functions, a canary value on
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
index 441520e4174f..a3ac52b29cbf 100644
--- a/arch/x86/include/asm/mce.h
+++ b/arch/x86/include/asm/mce.h
@@ -33,6 +33,14 @@
#define MCI_STATUS_PCC (1ULL<<57) /* processor context corrupt */
#define MCI_STATUS_S (1ULL<<56) /* Signaled machine check */
#define MCI_STATUS_AR (1ULL<<55) /* Action required */
+#define MCACOD 0xffff /* MCA Error Code */
+
+/* Architecturally defined codes from SDM Vol. 3B Chapter 15 */
+#define MCACOD_SCRUB 0x00C0 /* 0xC0-0xCF Memory Scrubbing */
+#define MCACOD_SCRUBMSK 0xfff0
+#define MCACOD_L3WB 0x017A /* L3 Explicit Writeback */
+#define MCACOD_DATA 0x0134 /* Data Load */
+#define MCACOD_INSTR 0x0150 /* Instruction Fetch */
/* MCi_MISC register defines */
#define MCI_MISC_ADDR_LSB(m) ((m) & 0x3f)
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
index dab39350e51e..cb4e43bce98a 100644
--- a/arch/x86/include/asm/perf_event.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -196,11 +196,16 @@ static inline u32 get_ibs_caps(void) { return 0; }
extern void perf_events_lapic_init(void);
/*
- * Abuse bit 3 of the cpu eflags register to indicate proper PEBS IP fixups.
- * This flag is otherwise unused and ABI specified to be 0, so nobody should
- * care what we do with it.
+ * Abuse bits {3,5} of the cpu eflags register. These flags are otherwise
+ * unused and ABI specified to be 0, so nobody should care what we do with
+ * them.
+ *
+ * EXACT - the IP points to the exact instruction that triggered the
+ * event (HW bugs exempt).
+ * VM - original X86_VM_MASK; see set_linear_ip().
*/
#define PERF_EFLAGS_EXACT (1UL << 3)
+#define PERF_EFLAGS_VM (1UL << 5)
struct pt_regs;
extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
index 95bf99de9058..1b8e5a03d942 100644
--- a/arch/x86/kernel/acpi/sleep.c
+++ b/arch/x86/kernel/acpi/sleep.c
@@ -25,10 +25,6 @@ unsigned long acpi_realmode_flags;
static char temp_stack[4096];
#endif
-asmlinkage void acpi_enter_s3(void)
-{
- acpi_enter_sleep_state(3, wake_sleep_flags);
-}
/**
* acpi_suspend_lowlevel - save kernel state
*
diff --git a/arch/x86/kernel/acpi/sleep.h b/arch/x86/kernel/acpi/sleep.h
index 5653a5791ec9..67f59f8c6956 100644
--- a/arch/x86/kernel/acpi/sleep.h
+++ b/arch/x86/kernel/acpi/sleep.h
@@ -2,7 +2,6 @@
* Variables and functions used by the code in sleep.c
*/
-#include <linux/linkage.h>
#include <asm/realmode.h>
extern unsigned long saved_video_mode;
@@ -11,7 +10,6 @@ extern long saved_magic;
extern int wakeup_pmode_return;
extern u8 wake_sleep_flags;
-extern asmlinkage void acpi_enter_s3(void);
extern unsigned long acpi_copy_wakeup_routine(unsigned long);
extern void wakeup_long64(void);
diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
index 72610839f03b..13ab720573e3 100644
--- a/arch/x86/kernel/acpi/wakeup_32.S
+++ b/arch/x86/kernel/acpi/wakeup_32.S
@@ -74,7 +74,9 @@ restore_registers:
ENTRY(do_suspend_lowlevel)
call save_processor_state
call save_registers
- call acpi_enter_s3
+ pushl $3
+ call acpi_enter_sleep_state
+ addl $4, %esp
# In case of S3 failure, we'll emerge here. Jump
# to ret_point to recover
diff --git a/arch/x86/kernel/acpi/wakeup_64.S b/arch/x86/kernel/acpi/wakeup_64.S
index 014d1d28c397..8ea5164cbd04 100644
--- a/arch/x86/kernel/acpi/wakeup_64.S
+++ b/arch/x86/kernel/acpi/wakeup_64.S
@@ -71,7 +71,9 @@ ENTRY(do_suspend_lowlevel)
movq %rsi, saved_rsi
addq $8, %rsp
- call acpi_enter_s3
+ movl $3, %edi
+ xorl %eax, %eax
+ call acpi_enter_sleep_state
/* in case something went wrong, restore the machine status and go on */
jmp resume_point
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index 931280ff8299..afb7ff79a29f 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -224,7 +224,7 @@ void __init arch_init_ideal_nops(void)
ideal_nops = intel_nops;
#endif
}
-
+ break;
default:
#ifdef CONFIG_X86_64
ideal_nops = k8_nops;
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 406eee784684..a6c64aaddf9a 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -1204,7 +1204,7 @@ static void __clear_irq_vector(int irq, struct irq_cfg *cfg)
BUG_ON(!cfg->vector);
vector = cfg->vector;
- for_each_cpu(cpu, cfg->domain)
+ for_each_cpu_and(cpu, cfg->domain, cpu_online_mask)
per_cpu(vector_irq, cpu)[vector] = -1;
cfg->vector = 0;
@@ -1212,7 +1212,7 @@ static void __clear_irq_vector(int irq, struct irq_cfg *cfg)
if (likely(!cfg->move_in_progress))
return;
- for_each_cpu(cpu, cfg->old_domain) {
+ for_each_cpu_and(cpu, cfg->old_domain, cpu_online_mask) {
for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS;
vector++) {
if (per_cpu(vector_irq, cpu)[vector] != irq)
diff --git a/arch/x86/kernel/cpu/mcheck/mce-severity.c b/arch/x86/kernel/cpu/mcheck/mce-severity.c
index 413c2ced887c..13017626f9a8 100644
--- a/arch/x86/kernel/cpu/mcheck/mce-severity.c
+++ b/arch/x86/kernel/cpu/mcheck/mce-severity.c
@@ -55,13 +55,6 @@ static struct severity {
#define MCI_UC_S (MCI_STATUS_UC|MCI_STATUS_S)
#define MCI_UC_SAR (MCI_STATUS_UC|MCI_STATUS_S|MCI_STATUS_AR)
#define MCI_ADDR (MCI_STATUS_ADDRV|MCI_STATUS_MISCV)
-#define MCACOD 0xffff
-/* Architecturally defined codes from SDM Vol. 3B Chapter 15 */
-#define MCACOD_SCRUB 0x00C0 /* 0xC0-0xCF Memory Scrubbing */
-#define MCACOD_SCRUBMSK 0xfff0
-#define MCACOD_L3WB 0x017A /* L3 Explicit Writeback */
-#define MCACOD_DATA 0x0134 /* Data Load */
-#define MCACOD_INSTR 0x0150 /* Instruction Fetch */
MCESEV(
NO, "Invalid",
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 5e095f873e3e..292d0258311c 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -103,6 +103,8 @@ DEFINE_PER_CPU(mce_banks_t, mce_poll_banks) = {
static DEFINE_PER_CPU(struct work_struct, mce_work);
+static void (*quirk_no_way_out)(int bank, struct mce *m, struct pt_regs *regs);
+
/*
* CPU/chipset specific EDAC code can register a notifier call here to print
* MCE errors in a human-readable form.
@@ -650,14 +652,18 @@ EXPORT_SYMBOL_GPL(machine_check_poll);
* Do a quick check if any of the events requires a panic.
* This decides if we keep the events around or clear them.
*/
-static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp)
+static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp,
+ struct pt_regs *regs)
{
int i, ret = 0;
for (i = 0; i < banks; i++) {
m->status = mce_rdmsrl(MSR_IA32_MCx_STATUS(i));
- if (m->status & MCI_STATUS_VAL)
+ if (m->status & MCI_STATUS_VAL) {
__set_bit(i, validp);
+ if (quirk_no_way_out)
+ quirk_no_way_out(i, m, regs);
+ }
if (mce_severity(m, tolerant, msg) >= MCE_PANIC_SEVERITY)
ret = 1;
}
@@ -1040,7 +1046,7 @@ void do_machine_check(struct pt_regs *regs, long error_code)
*final = m;
memset(valid_banks, 0, sizeof(valid_banks));
- no_way_out = mce_no_way_out(&m, &msg, valid_banks);
+ no_way_out = mce_no_way_out(&m, &msg, valid_banks, regs);
barrier();
@@ -1418,6 +1424,34 @@ static void __mcheck_cpu_init_generic(void)
}
}
+/*
+ * During IFU recovery Sandy Bridge -EP4S processors set the RIPV and
+ * EIPV bits in MCG_STATUS to zero on the affected logical processor (SDM
+ * Vol 3B Table 15-20). But this confuses both the code that determines
+ * whether the machine check occurred in kernel or user mode, and also
+ * the severity assessment code. Pretend that EIPV was set, and take the
+ * ip/cs values from the pt_regs that mce_gather_info() ignored earlier.
+ */
+static void quirk_sandybridge_ifu(int bank, struct mce *m, struct pt_regs *regs)
+{
+ if (bank != 0)
+ return;
+ if ((m->mcgstatus & (MCG_STATUS_EIPV|MCG_STATUS_RIPV)) != 0)
+ return;
+ if ((m->status & (MCI_STATUS_OVER|MCI_STATUS_UC|
+ MCI_STATUS_EN|MCI_STATUS_MISCV|MCI_STATUS_ADDRV|
+ MCI_STATUS_PCC|MCI_STATUS_S|MCI_STATUS_AR|
+ MCACOD)) !=
+ (MCI_STATUS_UC|MCI_STATUS_EN|
+ MCI_STATUS_MISCV|MCI_STATUS_ADDRV|MCI_STATUS_S|
+ MCI_STATUS_AR|MCACOD_INSTR))
+ return;
+
+ m->mcgstatus |= MCG_STATUS_EIPV;
+ m->ip = regs->ip;
+ m->cs = regs->cs;
+}
+
/* Add per CPU specific workarounds here */
static int __cpuinit __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c)
{
@@ -1515,6 +1549,9 @@ static int __cpuinit __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c)
*/
if (c->x86 == 6 && c->x86_model <= 13 && mce_bootlog < 0)
mce_bootlog = 0;
+
+ if (c->x86 == 6 && c->x86_model == 45)
+ quirk_no_way_out = quirk_sandybridge_ifu;
}
if (monarch_timeout < 0)
monarch_timeout = 0;
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 29557aa06dda..915b876edd1e 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -32,6 +32,8 @@
#include <asm/smp.h>
#include <asm/alternative.h>
#include <asm/timer.h>
+#include <asm/desc.h>
+#include <asm/ldt.h>
#include "perf_event.h"
@@ -1738,6 +1740,29 @@ valid_user_frame(const void __user *fp, unsigned long size)
return (__range_not_ok(fp, size, TASK_SIZE) == 0);
}
+static unsigned long get_segment_base(unsigned int segment)
+{
+ struct desc_struct *desc;
+ int idx = segment >> 3;
+
+ if ((segment & SEGMENT_TI_MASK) == SEGMENT_LDT) {
+ if (idx > LDT_ENTRIES)
+ return 0;
+
+ if (idx > current->active_mm->context.size)
+ return 0;
+
+ desc = current->active_mm->context.ldt;
+ } else {
+ if (idx > GDT_ENTRIES)
+ return 0;
+
+ desc = __this_cpu_ptr(&gdt_page.gdt[0]);
+ }
+
+ return get_desc_base(desc + idx);
+}
+
#ifdef CONFIG_COMPAT
#include <asm/compat.h>
@@ -1746,13 +1771,17 @@ static inline int
perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
{
/* 32-bit process in 64-bit kernel. */
+ unsigned long ss_base, cs_base;
struct stack_frame_ia32 frame;
const void __user *fp;
if (!test_thread_flag(TIF_IA32))
return 0;
- fp = compat_ptr(regs->bp);
+ cs_base = get_segment_base(regs->cs);
+ ss_base = get_segment_base(regs->ss);
+
+ fp = compat_ptr(ss_base + regs->bp);
while (entry->nr < PERF_MAX_STACK_DEPTH) {
unsigned long bytes;
frame.next_frame = 0;
@@ -1765,8 +1794,8 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
if (!valid_user_frame(fp, sizeof(frame)))
break;
- perf_callchain_store(entry, frame.return_address);
- fp = compat_ptr(frame.next_frame);
+ perf_callchain_store(entry, cs_base + frame.return_address);
+ fp = compat_ptr(ss_base + frame.next_frame);
}
return 1;
}
@@ -1789,6 +1818,12 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
return;
}
+ /*
+ * We don't know what to do with VM86 stacks.. ignore them for now.
+ */
+ if (regs->flags & (X86_VM_MASK | PERF_EFLAGS_VM))
+ return;
+
fp = (void __user *)regs->bp;
perf_callchain_store(entry, regs->ip);
@@ -1816,16 +1851,50 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
}
}
-unsigned long perf_instruction_pointer(struct pt_regs *regs)
+/*
+ * Deal with code segment offsets for the various execution modes:
+ *
+ * VM86 - the good olde 16 bit days, where the linear address is
+ * 20 bits and we use regs->ip + 0x10 * regs->cs.
+ *
+ * IA32 - Where we need to look at GDT/LDT segment descriptor tables
+ * to figure out what the 32bit base address is.
+ *
+ * X32 - has TIF_X32 set, but is running in x86_64
+ *
+ * X86_64 - CS,DS,SS,ES are all zero based.
+ */
+static unsigned long code_segment_base(struct pt_regs *regs)
{
- unsigned long ip;
+ /*
+ * If we are in VM86 mode, add the segment offset to convert to a
+ * linear address.
+ */
+ if (regs->flags & X86_VM_MASK)
+ return 0x10 * regs->cs;
+
+ /*
+ * For IA32 we look at the GDT/LDT segment base to convert the
+ * effective IP to a linear address.
+ */
+#ifdef CONFIG_X86_32
+ if (user_mode(regs) && regs->cs != __USER_CS)
+ return get_segment_base(regs->cs);
+#else
+ if (test_thread_flag(TIF_IA32)) {
+ if (user_mode(regs) && regs->cs != __USER32_CS)
+ return get_segment_base(regs->cs);
+ }
+#endif
+ return 0;
+}
+unsigned long perf_instruction_pointer(struct pt_regs *regs)
+{
if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
- ip = perf_guest_cbs->get_guest_ip();
- else
- ip = instruction_pointer(regs);
+ return perf_guest_cbs->get_guest_ip();
- return ip;
+ return regs->ip + code_segment_base(regs);
}
unsigned long perf_misc_flags(struct pt_regs *regs)
@@ -1838,7 +1907,7 @@ unsigned long perf_misc_flags(struct pt_regs *regs)
else
misc |= PERF_RECORD_MISC_GUEST_KERNEL;
} else {
- if (!kernel_ip(regs->ip))
+ if (user_mode(regs))
misc |= PERF_RECORD_MISC_USER;
else
misc |= PERF_RECORD_MISC_KERNEL;
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h
index 821d53b696d1..6605a81ba339 100644
--- a/arch/x86/kernel/cpu/perf_event.h
+++ b/arch/x86/kernel/cpu/perf_event.h
@@ -516,6 +516,26 @@ static inline bool kernel_ip(unsigned long ip)
#endif
}
+/*
+ * Not all PMUs provide the right context information to place the reported IP
+ * into full context. Specifically segment registers are typically not
+ * supplied.
+ *
+ * Assuming the address is a linear address (it is for IBS), we fake the CS and
+ * vm86 mode using the known zero-based code segment and 'fix up' the registers
+ * to reflect this.
+ *
+ * Intel PEBS/LBR appear to typically provide the effective address, nothing
+ * much we can do about that but pray and treat it like a linear address.
+ */
+static inline void set_linear_ip(struct pt_regs *regs, unsigned long ip)
+{
+ regs->cs = kernel_ip(ip) ? __KERNEL_CS : __USER_CS;
+ if (regs->flags & X86_VM_MASK)
+ regs->flags ^= (PERF_EFLAGS_VM | X86_VM_MASK);
+ regs->ip = ip;
+}
+
#ifdef CONFIG_CPU_SUP_AMD
int amd_pmu_init(void);
diff --git a/arch/x86/kernel/cpu/perf_event_amd_ibs.c b/arch/x86/kernel/cpu/perf_event_amd_ibs.c
index da9bcdcd9856..7bfb5bec8630 100644
--- a/arch/x86/kernel/cpu/perf_event_amd_ibs.c
+++ b/arch/x86/kernel/cpu/perf_event_amd_ibs.c
@@ -13,6 +13,8 @@
#include <asm/apic.h>
+#include "perf_event.h"
+
static u32 ibs_caps;
#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD)
@@ -536,7 +538,7 @@ static int perf_ibs_handle_irq(struct perf_ibs *perf_ibs, struct pt_regs *iregs)
if (check_rip && (ibs_data.regs[2] & IBS_RIP_INVALID)) {
regs.flags &= ~PERF_EFLAGS_EXACT;
} else {
- instruction_pointer_set(&regs, ibs_data.regs[1]);
+ set_linear_ip(&regs, ibs_data.regs[1]);
regs.flags |= PERF_EFLAGS_EXACT;
}
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c
index 629ae0b7ad90..e38d97bf4259 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_ds.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c
@@ -499,7 +499,7 @@ static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs)
* We sampled a branch insn, rewind using the LBR stack
*/
if (ip == to) {
- regs->ip = from;
+ set_linear_ip(regs, from);
return 1;
}
@@ -529,7 +529,7 @@ static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs)
} while (to < ip);
if (to == ip) {
- regs->ip = old_to;
+ set_linear_ip(regs, old_to);
return 1;
}
@@ -569,7 +569,8 @@ static void __intel_pmu_pebs_event(struct perf_event *event,
* A possible PERF_SAMPLE_REGS will have to transfer all regs.
*/
regs = *iregs;
- regs.ip = pebs->ip;
+ regs.flags = pebs->flags;
+ set_linear_ip(&regs, pebs->ip);
regs.bp = pebs->bp;
regs.sp = pebs->sp;
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
index f3851892e077..c9e5dc56630a 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.h
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
@@ -5,7 +5,7 @@
#include "perf_event.h"
#define UNCORE_PMU_NAME_LEN 32
-#define UNCORE_PMU_HRTIMER_INTERVAL (60 * NSEC_PER_SEC)
+#define UNCORE_PMU_HRTIMER_INTERVAL (60LL * NSEC_PER_SEC)
#define UNCORE_FIXED_EVENT 0xff
#define UNCORE_PMC_IDX_MAX_GENERIC 8
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index 1f5f1d5d2a02..7ad683d78645 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -328,6 +328,7 @@ void fixup_irqs(void)
chip->irq_retrigger(data);
raw_spin_unlock(&desc->lock);
}
+ __this_cpu_write(vector_irq[vector], -1);
}
}
#endif
diff --git a/arch/x86/kernel/kdebugfs.c b/arch/x86/kernel/kdebugfs.c
index 1d5d31ea686b..dc1404bf8e4b 100644
--- a/arch/x86/kernel/kdebugfs.c
+++ b/arch/x86/kernel/kdebugfs.c
@@ -107,7 +107,7 @@ static int __init create_setup_data_nodes(struct dentry *parent)
{
struct setup_data_node *node;
struct setup_data *data;
- int error = -ENOMEM;
+ int error;
struct dentry *d;
struct page *pg;
u64 pa_data;
@@ -121,8 +121,10 @@ static int __init create_setup_data_nodes(struct dentry *parent)
while (pa_data) {
node = kmalloc(sizeof(*node), GFP_KERNEL);
- if (!node)
+ if (!node) {
+ error = -ENOMEM;
goto err_dir;
+ }
pg = pfn_to_page((pa_data+sizeof(*data)-1) >> PAGE_SHIFT);
if (PageHighMem(pg)) {
diff --git a/arch/x86/kvm/i8259.c b/arch/x86/kvm/i8259.c
index 1df8fb9e1d5d..e498b18f010c 100644
--- a/arch/x86/kvm/i8259.c
+++ b/arch/x86/kvm/i8259.c
@@ -316,6 +316,11 @@ static void pic_ioport_write(void *opaque, u32 addr, u32 val)
addr &= 1;
if (addr == 0) {
if (val & 0x10) {
+ u8 edge_irr = s->irr & ~s->elcr;
+ int i;
+ bool found;
+ struct kvm_vcpu *vcpu;
+
s->init4 = val & 1;
s->last_irr = 0;
s->irr &= s->elcr;
@@ -333,6 +338,18 @@ static void pic_ioport_write(void *opaque, u32 addr, u32 val)
if (val & 0x08)
pr_pic_unimpl(
"level sensitive irq not supported");
+
+ kvm_for_each_vcpu(i, vcpu, s->pics_state->kvm)
+ if (kvm_apic_accept_pic_intr(vcpu)) {
+ found = true;
+ break;
+ }
+
+
+ if (found)
+ for (irq = 0; irq < PIC_NUM_PINS/2; irq++)
+ if (edge_irr & (1 << irq))
+ pic_clear_isr(s, irq);
} else if (val & 0x08) {
if (val & 0x04)
s->poll = 1;
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index c39b60707e02..c00f03de1b79 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -1488,13 +1488,6 @@ static void __vmx_load_host_state(struct vcpu_vmx *vmx)
loadsegment(ds, vmx->host_state.ds_sel);
loadsegment(es, vmx->host_state.es_sel);
}
-#else
- /*
- * The sysexit path does not restore ds/es, so we must set them to
- * a reasonable value ourselves.
- */
- loadsegment(ds, __USER_DS);
- loadsegment(es, __USER_DS);
#endif
reload_tss();
#ifdef CONFIG_X86_64
@@ -6370,6 +6363,19 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
#endif
);
+#ifndef CONFIG_X86_64
+ /*
+ * The sysexit path does not restore ds/es, so we must set them to
+ * a reasonable value ourselves.
+ *
+ * We can't defer this to vmx_load_host_state() since that function
+ * may be executed in interrupt context, which saves and restore segments
+ * around it, nullifying its effect.
+ */
+ loadsegment(ds, __USER_DS);
+ loadsegment(es, __USER_DS);
+#endif
+
vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
| (1 << VCPU_EXREG_RFLAGS)
| (1 << VCPU_EXREG_CPL)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 59b59508ff07..42bce48f6928 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -925,6 +925,10 @@ static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
*/
getboottime(&boot);
+ if (kvm->arch.kvmclock_offset) {
+ struct timespec ts = ns_to_timespec(kvm->arch.kvmclock_offset);
+ boot = timespec_sub(boot, ts);
+ }
wc.sec = boot.tv_sec;
wc.nsec = boot.tv_nsec;
wc.version = version;
diff --git a/arch/x86/mm/srat.c b/arch/x86/mm/srat.c
index 4599c3e8bcb6..4ddf497ca65b 100644
--- a/arch/x86/mm/srat.c
+++ b/arch/x86/mm/srat.c
@@ -142,23 +142,23 @@ static inline int save_add_info(void) {return 0;}
#endif
/* Callback for parsing of the Proximity Domain <-> Memory Area mappings */
-void __init
+int __init
acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
{
u64 start, end;
int node, pxm;
if (srat_disabled())
- return;
+ return -1;
if (ma->header.length != sizeof(struct acpi_srat_mem_affinity)) {
bad_srat();
- return;
+ return -1;
}
if ((ma->flags & ACPI_SRAT_MEM_ENABLED) == 0)
- return;
+ return -1;
if ((ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) && !save_add_info())
- return;
+ return -1;
start = ma->base_address;
end = start + ma->length;
pxm = ma->proximity_domain;
@@ -168,12 +168,12 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
if (node < 0) {
printk(KERN_ERR "SRAT: Too many proximity domains.\n");
bad_srat();
- return;
+ return -1;
}
if (numa_add_memblk(node, start, end) < 0) {
bad_srat();
- return;
+ return -1;
}
node_set(node, numa_nodes_parsed);
@@ -181,6 +181,7 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
printk(KERN_INFO "SRAT: Node %u PXM %u [mem %#010Lx-%#010Lx]\n",
node, pxm,
(unsigned long long) start, (unsigned long long) end - 1);
+ return 0;
}
void __init acpi_numa_arch_fixup(void) {}
diff --git a/arch/x86/syscalls/syscall_64.tbl b/arch/x86/syscalls/syscall_64.tbl
index 51171aeff0dc..29aed7ac2c02 100644
--- a/arch/x86/syscalls/syscall_64.tbl
+++ b/arch/x86/syscalls/syscall_64.tbl
@@ -318,7 +318,7 @@
309 common getcpu sys_getcpu
310 64 process_vm_readv sys_process_vm_readv
311 64 process_vm_writev sys_process_vm_writev
-312 64 kcmp sys_kcmp
+312 common kcmp sys_kcmp
#
# x32-specific system call numbers start at 512 to avoid cache impact
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
index 64effdc6da94..b2e91d40a4cb 100644
--- a/arch/x86/xen/p2m.c
+++ b/arch/x86/xen/p2m.c
@@ -194,6 +194,11 @@ RESERVE_BRK(p2m_mid_mfn, PAGE_SIZE * (MAX_DOMAIN_PAGES / (P2M_PER_PAGE * P2M_MID
* boundary violation will require three middle nodes. */
RESERVE_BRK(p2m_mid_identity, PAGE_SIZE * 2 * 3);
+/* When we populate back during bootup, the amount of pages can vary. The
+ * max we have is seen is 395979, but that does not mean it can't be more.
+ * But some machines can have 3GB I/O holes even. So lets reserve enough
+ * for 4GB of I/O and E820 holes. */
+RESERVE_BRK(p2m_populated, PMD_SIZE * 4);
static inline unsigned p2m_top_index(unsigned long pfn)
{
BUG_ON(pfn >= MAX_P2M_PFN);
diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c
index ac7034129f3f..d5fdd36190cc 100644
--- a/drivers/acpi/ac.c
+++ b/drivers/acpi/ac.c
@@ -69,7 +69,9 @@ static const struct acpi_device_id ac_device_ids[] = {
};
MODULE_DEVICE_TABLE(acpi, ac_device_ids);
+#ifdef CONFIG_PM_SLEEP
static int acpi_ac_resume(struct device *dev);
+#endif
static SIMPLE_DEV_PM_OPS(acpi_ac_pm, NULL, acpi_ac_resume);
static struct acpi_driver acpi_ac_driver = {
@@ -313,6 +315,7 @@ static int acpi_ac_add(struct acpi_device *device)
return result;
}
+#ifdef CONFIG_PM_SLEEP
static int acpi_ac_resume(struct device *dev)
{
struct acpi_ac *ac;
@@ -332,6 +335,7 @@ static int acpi_ac_resume(struct device *dev)
kobject_uevent(&ac->charger.dev->kobj, KOBJ_CHANGE);
return 0;
}
+#endif
static int acpi_ac_remove(struct acpi_device *device, int type)
{
diff --git a/drivers/acpi/acpica/achware.h b/drivers/acpi/acpica/achware.h
index 5ccb99ae3a6f..5de4ec72766d 100644
--- a/drivers/acpi/acpica/achware.h
+++ b/drivers/acpi/acpica/achware.h
@@ -83,22 +83,22 @@ acpi_status acpi_hw_clear_acpi_status(void);
/*
* hwsleep - sleep/wake support (Legacy sleep registers)
*/
-acpi_status acpi_hw_legacy_sleep(u8 sleep_state, u8 flags);
+acpi_status acpi_hw_legacy_sleep(u8 sleep_state);
-acpi_status acpi_hw_legacy_wake_prep(u8 sleep_state, u8 flags);
+acpi_status acpi_hw_legacy_wake_prep(u8 sleep_state);
-acpi_status acpi_hw_legacy_wake(u8 sleep_state, u8 flags);
+acpi_status acpi_hw_legacy_wake(u8 sleep_state);
/*
* hwesleep - sleep/wake support (Extended FADT-V5 sleep registers)
*/
void acpi_hw_execute_sleep_method(char *method_name, u32 integer_argument);
-acpi_status acpi_hw_extended_sleep(u8 sleep_state, u8 flags);
+acpi_status acpi_hw_extended_sleep(u8 sleep_state);
-acpi_status acpi_hw_extended_wake_prep(u8 sleep_state, u8 flags);
+acpi_status acpi_hw_extended_wake_prep(u8 sleep_state);
-acpi_status acpi_hw_extended_wake(u8 sleep_state, u8 flags);
+acpi_status acpi_hw_extended_wake(u8 sleep_state);
/*
* hwvalid - Port I/O with validation
diff --git a/drivers/acpi/acpica/hwesleep.c b/drivers/acpi/acpica/hwesleep.c
index 48518dac5342..94996f9ae3ad 100644
--- a/drivers/acpi/acpica/hwesleep.c
+++ b/drivers/acpi/acpica/hwesleep.c
@@ -90,7 +90,6 @@ void acpi_hw_execute_sleep_method(char *method_pathname, u32 integer_argument)
* FUNCTION: acpi_hw_extended_sleep
*
* PARAMETERS: sleep_state - Which sleep state to enter
- * flags - ACPI_EXECUTE_GTS to run optional method
*
* RETURN: Status
*
@@ -100,7 +99,7 @@ void acpi_hw_execute_sleep_method(char *method_pathname, u32 integer_argument)
*
******************************************************************************/
-acpi_status acpi_hw_extended_sleep(u8 sleep_state, u8 flags)
+acpi_status acpi_hw_extended_sleep(u8 sleep_state)
{
acpi_status status;
u8 sleep_type_value;
@@ -125,12 +124,6 @@ acpi_status acpi_hw_extended_sleep(u8 sleep_state, u8 flags)
acpi_gbl_system_awake_and_running = FALSE;
- /* Optionally execute _GTS (Going To Sleep) */
-
- if (flags & ACPI_EXECUTE_GTS) {
- acpi_hw_execute_sleep_method(METHOD_PATHNAME__GTS, sleep_state);
- }
-
/* Flush caches, as per ACPI specification */
ACPI_FLUSH_CPU_CACHE();
@@ -172,7 +165,6 @@ acpi_status acpi_hw_extended_sleep(u8 sleep_state, u8 flags)
* FUNCTION: acpi_hw_extended_wake_prep
*
* PARAMETERS: sleep_state - Which sleep state we just exited
- * flags - ACPI_EXECUTE_BFS to run optional method
*
* RETURN: Status
*
@@ -181,7 +173,7 @@ acpi_status acpi_hw_extended_sleep(u8 sleep_state, u8 flags)
*
******************************************************************************/
-acpi_status acpi_hw_extended_wake_prep(u8 sleep_state, u8 flags)
+acpi_status acpi_hw_extended_wake_prep(u8 sleep_state)
{
acpi_status status;
u8 sleep_type_value;
@@ -200,11 +192,6 @@ acpi_status acpi_hw_extended_wake_prep(u8 sleep_state, u8 flags)
&acpi_gbl_FADT.sleep_control);
}
- /* Optionally execute _BFS (Back From Sleep) */
-
- if (flags & ACPI_EXECUTE_BFS) {
- acpi_hw_execute_sleep_method(METHOD_PATHNAME__BFS, sleep_state);
- }
return_ACPI_STATUS(AE_OK);
}
@@ -222,7 +209,7 @@ acpi_status acpi_hw_extended_wake_prep(u8 sleep_state, u8 flags)
*
******************************************************************************/
-acpi_status acpi_hw_extended_wake(u8 sleep_state, u8 flags)
+acpi_status acpi_hw_extended_wake(u8 sleep_state)
{
ACPI_FUNCTION_TRACE(hw_extended_wake);
diff --git a/drivers/acpi/acpica/hwsleep.c b/drivers/acpi/acpica/hwsleep.c
index 9960fe9ef533..3fddde056a5e 100644
--- a/drivers/acpi/acpica/hwsleep.c
+++ b/drivers/acpi/acpica/hwsleep.c
@@ -56,7 +56,6 @@ ACPI_MODULE_NAME("hwsleep")
* FUNCTION: acpi_hw_legacy_sleep
*
* PARAMETERS: sleep_state - Which sleep state to enter
- * flags - ACPI_EXECUTE_GTS to run optional method
*
* RETURN: Status
*
@@ -64,7 +63,7 @@ ACPI_MODULE_NAME("hwsleep")
* THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED
*
******************************************************************************/
-acpi_status acpi_hw_legacy_sleep(u8 sleep_state, u8 flags)
+acpi_status acpi_hw_legacy_sleep(u8 sleep_state)
{
struct acpi_bit_register_info *sleep_type_reg_info;
struct acpi_bit_register_info *sleep_enable_reg_info;
@@ -110,12 +109,6 @@ acpi_status acpi_hw_legacy_sleep(u8 sleep_state, u8 flags)
return_ACPI_STATUS(status);
}
- /* Optionally execute _GTS (Going To Sleep) */
-
- if (flags & ACPI_EXECUTE_GTS) {
- acpi_hw_execute_sleep_method(METHOD_PATHNAME__GTS, sleep_state);
- }
-
/* Get current value of PM1A control */
status = acpi_hw_register_read(ACPI_REGISTER_PM1_CONTROL,
@@ -214,7 +207,6 @@ acpi_status acpi_hw_legacy_sleep(u8 sleep_state, u8 flags)
* FUNCTION: acpi_hw_legacy_wake_prep
*
* PARAMETERS: sleep_state - Which sleep state we just exited
- * flags - ACPI_EXECUTE_BFS to run optional method
*
* RETURN: Status
*
@@ -224,7 +216,7 @@ acpi_status acpi_hw_legacy_sleep(u8 sleep_state, u8 flags)
*
******************************************************************************/
-acpi_status acpi_hw_legacy_wake_prep(u8 sleep_state, u8 flags)
+acpi_status acpi_hw_legacy_wake_prep(u8 sleep_state)
{
acpi_status status;
struct acpi_bit_register_info *sleep_type_reg_info;
@@ -275,11 +267,6 @@ acpi_status acpi_hw_legacy_wake_prep(u8 sleep_state, u8 flags)
}
}
- /* Optionally execute _BFS (Back From Sleep) */
-
- if (flags & ACPI_EXECUTE_BFS) {
- acpi_hw_execute_sleep_method(METHOD_PATHNAME__BFS, sleep_state);
- }
return_ACPI_STATUS(status);
}
@@ -288,7 +275,6 @@ acpi_status acpi_hw_legacy_wake_prep(u8 sleep_state, u8 flags)
* FUNCTION: acpi_hw_legacy_wake
*
* PARAMETERS: sleep_state - Which sleep state we just exited
- * flags - Reserved, set to zero
*
* RETURN: Status
*
@@ -297,7 +283,7 @@ acpi_status acpi_hw_legacy_wake_prep(u8 sleep_state, u8 flags)
*
******************************************************************************/
-acpi_status acpi_hw_legacy_wake(u8 sleep_state, u8 flags)
+acpi_status acpi_hw_legacy_wake(u8 sleep_state)
{
acpi_status status;
diff --git a/drivers/acpi/acpica/hwxfsleep.c b/drivers/acpi/acpica/hwxfsleep.c
index f8684bfe7907..1f165a750ae2 100644
--- a/drivers/acpi/acpica/hwxfsleep.c
+++ b/drivers/acpi/acpica/hwxfsleep.c
@@ -50,7 +50,7 @@ ACPI_MODULE_NAME("hwxfsleep")
/* Local prototypes */
static acpi_status
-acpi_hw_sleep_dispatch(u8 sleep_state, u8 flags, u32 function_id);
+acpi_hw_sleep_dispatch(u8 sleep_state, u32 function_id);
/*
* Dispatch table used to efficiently branch to the various sleep
@@ -235,7 +235,7 @@ ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state_s4bios)
*
******************************************************************************/
static acpi_status
-acpi_hw_sleep_dispatch(u8 sleep_state, u8 flags, u32 function_id)
+acpi_hw_sleep_dispatch(u8 sleep_state, u32 function_id)
{
acpi_status status;
struct acpi_sleep_functions *sleep_functions =
@@ -248,11 +248,11 @@ acpi_hw_sleep_dispatch(u8 sleep_state, u8 flags, u32 function_id)
* use the extended sleep registers
*/
if (acpi_gbl_reduced_hardware || acpi_gbl_FADT.sleep_control.address) {
- status = sleep_functions->extended_function(sleep_state, flags);
+ status = sleep_functions->extended_function(sleep_state);
} else {
/* Legacy sleep */
- status = sleep_functions->legacy_function(sleep_state, flags);
+ status = sleep_functions->legacy_function(sleep_state);
}
return (status);
@@ -262,7 +262,7 @@ acpi_hw_sleep_dispatch(u8 sleep_state, u8 flags, u32 function_id)
* For the case where reduced-hardware-only code is being generated,
* we know that only the extended sleep registers are available
*/
- status = sleep_functions->extended_function(sleep_state, flags);
+ status = sleep_functions->extended_function(sleep_state);
return (status);
#endif /* !ACPI_REDUCED_HARDWARE */
@@ -349,7 +349,6 @@ ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state_prep)
* FUNCTION: acpi_enter_sleep_state
*
* PARAMETERS: sleep_state - Which sleep state to enter
- * flags - ACPI_EXECUTE_GTS to run optional method
*
* RETURN: Status
*
@@ -357,7 +356,7 @@ ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state_prep)
* THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED
*
******************************************************************************/
-acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state, u8 flags)
+acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state)
{
acpi_status status;
@@ -371,7 +370,7 @@ acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state, u8 flags)
}
status =
- acpi_hw_sleep_dispatch(sleep_state, flags, ACPI_SLEEP_FUNCTION_ID);
+ acpi_hw_sleep_dispatch(sleep_state, ACPI_SLEEP_FUNCTION_ID);
return_ACPI_STATUS(status);
}
@@ -391,14 +390,14 @@ ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state)
* Called with interrupts DISABLED.
*
******************************************************************************/
-acpi_status acpi_leave_sleep_state_prep(u8 sleep_state, u8 flags)
+acpi_status acpi_leave_sleep_state_prep(u8 sleep_state)
{
acpi_status status;
ACPI_FUNCTION_TRACE(acpi_leave_sleep_state_prep);
status =
- acpi_hw_sleep_dispatch(sleep_state, flags,
+ acpi_hw_sleep_dispatch(sleep_state,
ACPI_WAKE_PREP_FUNCTION_ID);
return_ACPI_STATUS(status);
}
@@ -423,8 +422,7 @@ acpi_status acpi_leave_sleep_state(u8 sleep_state)
ACPI_FUNCTION_TRACE(acpi_leave_sleep_state);
-
- status = acpi_hw_sleep_dispatch(sleep_state, 0, ACPI_WAKE_FUNCTION_ID);
+ status = acpi_hw_sleep_dispatch(sleep_state, ACPI_WAKE_FUNCTION_ID);
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index ff2c876ec412..45e3e1759fb8 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -1052,6 +1052,7 @@ static int acpi_battery_remove(struct acpi_device *device, int type)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
/* this is needed to learn about changes made in suspended state */
static int acpi_battery_resume(struct device *dev)
{
@@ -1068,6 +1069,7 @@ static int acpi_battery_resume(struct device *dev)
acpi_battery_update(battery);
return 0;
}
+#endif
static SIMPLE_DEV_PM_OPS(acpi_battery_pm, NULL, acpi_battery_resume);
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index 79d4c22f7a6d..314a3b84bbc7 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -78,7 +78,9 @@ static int acpi_button_add(struct acpi_device *device);
static int acpi_button_remove(struct acpi_device *device, int type);
static void acpi_button_notify(struct acpi_device *device, u32 event);
+#ifdef CONFIG_PM_SLEEP
static int acpi_button_resume(struct device *dev);
+#endif
static SIMPLE_DEV_PM_OPS(acpi_button_pm, NULL, acpi_button_resume);
static struct acpi_driver acpi_button_driver = {
@@ -310,6 +312,7 @@ static void acpi_button_notify(struct acpi_device *device, u32 event)
}
}
+#ifdef CONFIG_PM_SLEEP
static int acpi_button_resume(struct device *dev)
{
struct acpi_device *device = to_acpi_device(dev);
@@ -319,6 +322,7 @@ static int acpi_button_resume(struct device *dev)
return acpi_lid_send_state(device);
return 0;
}
+#endif
static int acpi_button_add(struct acpi_device *device)
{
diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c
index 669d9ee80d16..bc36a476f1ab 100644
--- a/drivers/acpi/fan.c
+++ b/drivers/acpi/fan.c
@@ -53,8 +53,10 @@ static const struct acpi_device_id fan_device_ids[] = {
};
MODULE_DEVICE_TABLE(acpi, fan_device_ids);
+#ifdef CONFIG_PM_SLEEP
static int acpi_fan_suspend(struct device *dev);
static int acpi_fan_resume(struct device *dev);
+#endif
static SIMPLE_DEV_PM_OPS(acpi_fan_pm, acpi_fan_suspend, acpi_fan_resume);
static struct acpi_driver acpi_fan_driver = {
@@ -184,6 +186,7 @@ static int acpi_fan_remove(struct acpi_device *device, int type)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
static int acpi_fan_suspend(struct device *dev)
{
if (!dev)
@@ -207,6 +210,7 @@ static int acpi_fan_resume(struct device *dev)
return result;
}
+#endif
static int __init acpi_fan_init(void)
{
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
index e56f3be7b07d..cb31298ca684 100644
--- a/drivers/acpi/numa.c
+++ b/drivers/acpi/numa.c
@@ -237,6 +237,8 @@ acpi_parse_processor_affinity(struct acpi_subtable_header *header,
return 0;
}
+static int __initdata parsed_numa_memblks;
+
static int __init
acpi_parse_memory_affinity(struct acpi_subtable_header * header,
const unsigned long end)
@@ -250,8 +252,8 @@ acpi_parse_memory_affinity(struct acpi_subtable_header * header,
acpi_table_print_srat_entry(header);
/* let architecture-dependent part to do it */
- acpi_numa_memory_affinity_init(memory_affinity);
-
+ if (!acpi_numa_memory_affinity_init(memory_affinity))
+ parsed_numa_memblks++;
return 0;
}
@@ -304,8 +306,10 @@ int __init acpi_numa_init(void)
acpi_numa_arch_fixup();
- if (cnt <= 0)
- return cnt ?: -ENOENT;
+ if (cnt < 0)
+ return cnt;
+ else if (!parsed_numa_memblks)
+ return -ENOENT;
return 0;
}
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index ec54014c321c..72a2c98bc429 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -573,8 +573,15 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device)
OSC_CLOCK_PWR_CAPABILITY_SUPPORT;
if (pci_msi_enabled())
flags |= OSC_MSI_SUPPORT;
- if (flags != base_flags)
- acpi_pci_osc_support(root, flags);
+ if (flags != base_flags) {
+ status = acpi_pci_osc_support(root, flags);
+ if (ACPI_FAILURE(status)) {
+ dev_info(root->bus->bridge, "ACPI _OSC support "
+ "notification failed, disabling PCIe ASPM\n");
+ pcie_no_aspm();
+ flags = base_flags;
+ }
+ }
if (!pcie_ports_disabled
&& (flags & ACPI_PCIE_REQ_SUPPORT) == ACPI_PCIE_REQ_SUPPORT) {
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index 215ecd097408..fc1803414629 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -67,7 +67,9 @@ static const struct acpi_device_id power_device_ids[] = {
};
MODULE_DEVICE_TABLE(acpi, power_device_ids);
+#ifdef CONFIG_PM_SLEEP
static int acpi_power_resume(struct device *dev);
+#endif
static SIMPLE_DEV_PM_OPS(acpi_power_pm, NULL, acpi_power_resume);
static struct acpi_driver acpi_power_driver = {
@@ -775,6 +777,7 @@ static int acpi_power_remove(struct acpi_device *device, int type)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
static int acpi_power_resume(struct device *dev)
{
int result = 0, state;
@@ -803,6 +806,7 @@ static int acpi_power_resume(struct device *dev)
return result;
}
+#endif
int __init acpi_power_init(void)
{
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
index ff8e04f2fab4..bfc31cb0dd3e 100644
--- a/drivers/acpi/processor_driver.c
+++ b/drivers/acpi/processor_driver.c
@@ -437,7 +437,7 @@ static int acpi_cpu_soft_notify(struct notifier_block *nfb,
/* Normal CPU soft online event */
} else {
acpi_processor_ppc_has_changed(pr, 0);
- acpi_processor_cst_has_changed(pr);
+ acpi_processor_hotplug(pr);
acpi_processor_reevaluate_tstate(pr, action);
acpi_processor_tstate_has_changed(pr);
}
diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c
index c0b9aa5faf4c..ff0740e0a9c2 100644
--- a/drivers/acpi/sbs.c
+++ b/drivers/acpi/sbs.c
@@ -988,6 +988,7 @@ static void acpi_sbs_rmdirs(void)
#endif
}
+#ifdef CONFIG_PM_SLEEP
static int acpi_sbs_resume(struct device *dev)
{
struct acpi_sbs *sbs;
@@ -997,6 +998,7 @@ static int acpi_sbs_resume(struct device *dev)
acpi_sbs_callback(sbs);
return 0;
}
+#endif
static SIMPLE_DEV_PM_OPS(acpi_sbs_pm, NULL, acpi_sbs_resume);
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 7a7a9c929247..fdcdbb652915 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -28,36 +28,7 @@
#include "internal.h"
#include "sleep.h"
-u8 wake_sleep_flags = ACPI_NO_OPTIONAL_METHODS;
-static unsigned int gts, bfs;
-static int set_param_wake_flag(const char *val, struct kernel_param *kp)
-{
- int ret = param_set_int(val, kp);
-
- if (ret)
- return ret;
-
- if (kp->arg == (const char *)&gts) {
- if (gts)
- wake_sleep_flags |= ACPI_EXECUTE_GTS;
- else
- wake_sleep_flags &= ~ACPI_EXECUTE_GTS;
- }
- if (kp->arg == (const char *)&bfs) {
- if (bfs)
- wake_sleep_flags |= ACPI_EXECUTE_BFS;
- else
- wake_sleep_flags &= ~ACPI_EXECUTE_BFS;
- }
- return ret;
-}
-module_param_call(gts, set_param_wake_flag, param_get_int, &gts, 0644);
-module_param_call(bfs, set_param_wake_flag, param_get_int, &bfs, 0644);
-MODULE_PARM_DESC(gts, "Enable evaluation of _GTS on suspend.");
-MODULE_PARM_DESC(bfs, "Enable evaluation of _BFS on resume".);
-
static u8 sleep_states[ACPI_S_STATE_COUNT];
-static bool pwr_btn_event_pending;
static void acpi_sleep_tts_switch(u32 acpi_state)
{
@@ -110,6 +81,7 @@ static int acpi_sleep_prepare(u32 acpi_state)
#ifdef CONFIG_ACPI_SLEEP
static u32 acpi_target_sleep_state = ACPI_STATE_S0;
+static bool pwr_btn_event_pending;
/*
* The ACPI specification wants us to save NVS memory regions during hibernation
@@ -305,7 +277,7 @@ static int acpi_suspend_enter(suspend_state_t pm_state)
switch (acpi_state) {
case ACPI_STATE_S1:
barrier();
- status = acpi_enter_sleep_state(acpi_state, wake_sleep_flags);
+ status = acpi_enter_sleep_state(acpi_state);
break;
case ACPI_STATE_S3:
@@ -319,8 +291,8 @@ static int acpi_suspend_enter(suspend_state_t pm_state)
/* This violates the spec but is required for bug compatibility. */
acpi_write_bit_register(ACPI_BITREG_SCI_ENABLE, 1);
- /* Reprogram control registers and execute _BFS */
- acpi_leave_sleep_state_prep(acpi_state, wake_sleep_flags);
+ /* Reprogram control registers */
+ acpi_leave_sleep_state_prep(acpi_state);
/* ACPI 3.0 specs (P62) says that it's the responsibility
* of the OSPM to clear the status bit [ implying that the
@@ -603,9 +575,9 @@ static int acpi_hibernation_enter(void)
ACPI_FLUSH_CPU_CACHE();
/* This shouldn't return. If it returns, we have a problem */
- status = acpi_enter_sleep_state(ACPI_STATE_S4, wake_sleep_flags);
- /* Reprogram control registers and execute _BFS */
- acpi_leave_sleep_state_prep(ACPI_STATE_S4, wake_sleep_flags);
+ status = acpi_enter_sleep_state(ACPI_STATE_S4);
+ /* Reprogram control registers */
+ acpi_leave_sleep_state_prep(ACPI_STATE_S4);
return ACPI_SUCCESS(status) ? 0 : -EFAULT;
}
@@ -617,8 +589,8 @@ static void acpi_hibernation_leave(void)
* enable it here.
*/
acpi_enable();
- /* Reprogram control registers and execute _BFS */
- acpi_leave_sleep_state_prep(ACPI_STATE_S4, wake_sleep_flags);
+ /* Reprogram control registers */
+ acpi_leave_sleep_state_prep(ACPI_STATE_S4);
/* Check the hardware signature */
if (facs && s4_hardware_signature != facs->hardware_signature) {
printk(KERN_EMERG "ACPI: Hardware changed while hibernated, "
@@ -892,33 +864,7 @@ static void acpi_power_off(void)
/* acpi_sleep_prepare(ACPI_STATE_S5) should have already been called */
printk(KERN_DEBUG "%s called\n", __func__);
local_irq_disable();
- acpi_enter_sleep_state(ACPI_STATE_S5, wake_sleep_flags);
-}
-
-/*
- * ACPI 2.0 created the optional _GTS and _BFS,
- * but industry adoption has been neither rapid nor broad.
- *
- * Linux gets into trouble when it executes poorly validated
- * paths through the BIOS, so disable _GTS and _BFS by default,
- * but do speak up and offer the option to enable them.
- */
-static void __init acpi_gts_bfs_check(void)
-{
- acpi_handle dummy;
-
- if (ACPI_SUCCESS(acpi_get_handle(ACPI_ROOT_OBJECT, METHOD_PATHNAME__GTS, &dummy)))
- {
- printk(KERN_NOTICE PREFIX "BIOS offers _GTS\n");
- printk(KERN_NOTICE PREFIX "If \"acpi.gts=1\" improves suspend, "
- "please notify linux-acpi@vger.kernel.org\n");
- }
- if (ACPI_SUCCESS(acpi_get_handle(ACPI_ROOT_OBJECT, METHOD_PATHNAME__BFS, &dummy)))
- {
- printk(KERN_NOTICE PREFIX "BIOS offers _BFS\n");
- printk(KERN_NOTICE PREFIX "If \"acpi.bfs=1\" improves resume, "
- "please notify linux-acpi@vger.kernel.org\n");
- }
+ acpi_enter_sleep_state(ACPI_STATE_S5);
}
int __init acpi_sleep_init(void)
@@ -979,6 +925,5 @@ int __init acpi_sleep_init(void)
* object can also be evaluated when the system enters S5.
*/
register_reboot_notifier(&tts_notifier);
- acpi_gts_bfs_check();
return 0;
}
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
index 240a24400976..7c3f98ba4afe 100644
--- a/drivers/acpi/sysfs.c
+++ b/drivers/acpi/sysfs.c
@@ -173,7 +173,7 @@ static int param_set_trace_state(const char *val, struct kernel_param *kp)
{
int result = 0;
- if (!strncmp(val, "enable", strlen("enable"))) {
+ if (!strncmp(val, "enable", sizeof("enable") - 1)) {
result = acpi_debug_trace(trace_method_name, trace_debug_level,
trace_debug_layer, 0);
if (result)
@@ -181,7 +181,7 @@ static int param_set_trace_state(const char *val, struct kernel_param *kp)
goto exit;
}
- if (!strncmp(val, "disable", strlen("disable"))) {
+ if (!strncmp(val, "disable", sizeof("disable") - 1)) {
int name = 0;
result = acpi_debug_trace((char *)&name, trace_debug_level,
trace_debug_layer, 0);
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index 9fe90e9fecb5..edda74a43406 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -106,7 +106,9 @@ static const struct acpi_device_id thermal_device_ids[] = {
};
MODULE_DEVICE_TABLE(acpi, thermal_device_ids);
+#ifdef CONFIG_PM_SLEEP
static int acpi_thermal_resume(struct device *dev);
+#endif
static SIMPLE_DEV_PM_OPS(acpi_thermal_pm, NULL, acpi_thermal_resume);
static struct acpi_driver acpi_thermal_driver = {
@@ -1041,6 +1043,7 @@ static int acpi_thermal_remove(struct acpi_device *device, int type)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
static int acpi_thermal_resume(struct device *dev)
{
struct acpi_thermal *tz;
@@ -1075,6 +1078,7 @@ static int acpi_thermal_resume(struct device *dev)
return AE_OK;
}
+#endif
static int thermal_act(const struct dmi_system_id *d) {
diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
index d4386019af5d..96cce6d53195 100644
--- a/drivers/atm/iphase.c
+++ b/drivers/atm/iphase.c
@@ -2362,7 +2362,7 @@ static int __devinit ia_init(struct atm_dev *dev)
{
printk(DEV_LABEL " (itf %d): can't set up page mapping\n",
dev->number);
- return error;
+ return -ENOMEM;
}
IF_INIT(printk(DEV_LABEL " (itf %d): rev.%d,base=%p,irq=%d\n",
dev->number, iadev->pci->revision, base, iadev->irq);)
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
index 869d7ff2227f..eb78e9640c4a 100644
--- a/drivers/base/power/clock_ops.c
+++ b/drivers/base/power/clock_ops.c
@@ -169,8 +169,7 @@ void pm_clk_init(struct device *dev)
*/
int pm_clk_create(struct device *dev)
{
- int ret = dev_pm_get_subsys_data(dev);
- return ret < 0 ? ret : 0;
+ return dev_pm_get_subsys_data(dev);
}
/**
diff --git a/drivers/base/power/common.c b/drivers/base/power/common.c
index a14085cc613f..39c32529b833 100644
--- a/drivers/base/power/common.c
+++ b/drivers/base/power/common.c
@@ -24,7 +24,6 @@
int dev_pm_get_subsys_data(struct device *dev)
{
struct pm_subsys_data *psd;
- int ret = 0;
psd = kzalloc(sizeof(*psd), GFP_KERNEL);
if (!psd)
@@ -40,7 +39,6 @@ int dev_pm_get_subsys_data(struct device *dev)
dev->power.subsys_data = psd;
pm_clk_init(dev);
psd = NULL;
- ret = 1;
}
spin_unlock_irq(&dev->power.lock);
@@ -48,7 +46,7 @@ int dev_pm_get_subsys_data(struct device *dev)
/* kfree() verifies that its argument is nonzero. */
kfree(psd);
- return ret;
+ return 0;
}
EXPORT_SYMBOL_GPL(dev_pm_get_subsys_data);
diff --git a/drivers/bcma/host_pci.c b/drivers/bcma/host_pci.c
index 11b32d2642df..a6e5672c67e7 100644
--- a/drivers/bcma/host_pci.c
+++ b/drivers/bcma/host_pci.c
@@ -272,6 +272,7 @@ static DEFINE_PCI_DEVICE_TABLE(bcma_pci_bridge_tbl) = {
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4331) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4353) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4357) },
+ { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4359) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4727) },
{ 0, },
};
diff --git a/drivers/bcma/sprom.c b/drivers/bcma/sprom.c
index 26823d97fd9f..9ea4627dc0c2 100644
--- a/drivers/bcma/sprom.c
+++ b/drivers/bcma/sprom.c
@@ -507,7 +507,9 @@ static bool bcma_sprom_onchip_available(struct bcma_bus *bus)
/* for these chips OTP is always available */
present = true;
break;
-
+ case BCMA_CHIP_ID_BCM43228:
+ present = chip_status & BCMA_CC_CHIPST_43228_OTP_PRESENT;
+ break;
default:
present = false;
break;
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 2e0e7fc1dbba..dbe6135a2abe 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -3537,9 +3537,9 @@ static void drbd_cleanup(void)
}
/**
- * drbd_congested() - Callback for pdflush
+ * drbd_congested() - Callback for the flusher thread
* @congested_data: User data
- * @bdi_bits: Bits pdflush is currently interested in
+ * @bdi_bits: Bits the BDI flusher thread is currently interested in
*
* Returns 1<<BDI_async_congested and/or 1<<BDI_sync_congested if we are congested.
*/
diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h
index 57226424690c..6f007b6c240d 100644
--- a/drivers/char/agp/intel-agp.h
+++ b/drivers/char/agp/intel-agp.h
@@ -239,16 +239,45 @@
#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT2_IG 0x016A
#define PCI_DEVICE_ID_INTEL_VALLEYVIEW_HB 0x0F00 /* VLV1 */
#define PCI_DEVICE_ID_INTEL_VALLEYVIEW_IG 0x0F30
-#define PCI_DEVICE_ID_INTEL_HASWELL_HB 0x0400 /* Desktop */
+#define PCI_DEVICE_ID_INTEL_HASWELL_HB 0x0400 /* Desktop */
#define PCI_DEVICE_ID_INTEL_HASWELL_D_GT1_IG 0x0402
#define PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_IG 0x0412
-#define PCI_DEVICE_ID_INTEL_HASWELL_M_HB 0x0404 /* Mobile */
+#define PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_PLUS_IG 0x0422
+#define PCI_DEVICE_ID_INTEL_HASWELL_M_HB 0x0404 /* Mobile */
#define PCI_DEVICE_ID_INTEL_HASWELL_M_GT1_IG 0x0406
#define PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_IG 0x0416
-#define PCI_DEVICE_ID_INTEL_HASWELL_S_HB 0x0408 /* Server */
+#define PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_PLUS_IG 0x0426
+#define PCI_DEVICE_ID_INTEL_HASWELL_S_HB 0x0408 /* Server */
#define PCI_DEVICE_ID_INTEL_HASWELL_S_GT1_IG 0x040a
#define PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_IG 0x041a
-#define PCI_DEVICE_ID_INTEL_HASWELL_SDV 0x0c16 /* SDV */
-#define PCI_DEVICE_ID_INTEL_HASWELL_E_HB 0x0c04
+#define PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_PLUS_IG 0x042a
+#define PCI_DEVICE_ID_INTEL_HASWELL_E_HB 0x0c04
+#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT1_IG 0x0C02
+#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_IG 0x0C12
+#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_PLUS_IG 0x0C22
+#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT1_IG 0x0C06
+#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_IG 0x0C16
+#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_PLUS_IG 0x0C26
+#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT1_IG 0x0C0A
+#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_IG 0x0C1A
+#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_PLUS_IG 0x0C2A
+#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT1_IG 0x0A02
+#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_IG 0x0A12
+#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_PLUS_IG 0x0A22
+#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT1_IG 0x0A06
+#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_IG 0x0A16
+#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_PLUS_IG 0x0A26
+#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT1_IG 0x0A0A
+#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_IG 0x0A1A
+#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_PLUS_IG 0x0A2A
+#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT1_IG 0x0D12
+#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_IG 0x0D22
+#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_PLUS_IG 0x0D32
+#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT1_IG 0x0D16
+#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_IG 0x0D26
+#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_PLUS_IG 0x0D36
+#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT1_IG 0x0D1A
+#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_IG 0x0D2A
+#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_PLUS_IG 0x0D3A
#endif
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index 9ed92ef5829b..08fc5cbb13cd 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -1502,15 +1502,73 @@ static const struct intel_gtt_driver_description {
"Haswell", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_IG,
"Haswell", &sandybridge_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_PLUS_IG,
+ "Haswell", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_M_GT1_IG,
"Haswell", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_IG,
"Haswell", &sandybridge_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_PLUS_IG,
+ "Haswell", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_S_GT1_IG,
"Haswell", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_IG,
"Haswell", &sandybridge_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_SDV,
+ { PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_PLUS_IG,
+ "Haswell", &sandybridge_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT1_IG,
+ "Haswell", &sandybridge_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_IG,
+ "Haswell", &sandybridge_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_PLUS_IG,
+ "Haswell", &sandybridge_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT1_IG,
+ "Haswell", &sandybridge_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_IG,
+ "Haswell", &sandybridge_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_PLUS_IG,
+ "Haswell", &sandybridge_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT1_IG,
+ "Haswell", &sandybridge_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_IG,
+ "Haswell", &sandybridge_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_PLUS_IG,
+ "Haswell", &sandybridge_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT1_IG,
+ "Haswell", &sandybridge_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_IG,
+ "Haswell", &sandybridge_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_PLUS_IG,
+ "Haswell", &sandybridge_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT1_IG,
+ "Haswell", &sandybridge_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_IG,
+ "Haswell", &sandybridge_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_PLUS_IG,
+ "Haswell", &sandybridge_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT1_IG,
+ "Haswell", &sandybridge_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_IG,
+ "Haswell", &sandybridge_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_PLUS_IG,
+ "Haswell", &sandybridge_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT1_IG,
+ "Haswell", &sandybridge_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_IG,
+ "Haswell", &sandybridge_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_PLUS_IG,
+ "Haswell", &sandybridge_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT1_IG,
+ "Haswell", &sandybridge_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_IG,
+ "Haswell", &sandybridge_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_PLUS_IG,
+ "Haswell", &sandybridge_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT1_IG,
+ "Haswell", &sandybridge_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_IG,
+ "Haswell", &sandybridge_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_PLUS_IG,
"Haswell", &sandybridge_gtt_driver },
{ 0, NULL, NULL }
};
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
index 89682fa8801e..c4be3519a587 100644
--- a/drivers/char/tpm/tpm_tis.c
+++ b/drivers/char/tpm/tpm_tis.c
@@ -807,6 +807,7 @@ module_param_string(hid, tpm_pnp_tbl[TIS_HID_USR_IDX].id,
MODULE_PARM_DESC(hid, "Set additional specific HID for this driver to probe");
#endif
+#ifdef CONFIG_PM_SLEEP
static int tpm_tis_resume(struct device *dev)
{
struct tpm_chip *chip = dev_get_drvdata(dev);
@@ -816,6 +817,7 @@ static int tpm_tis_resume(struct device *dev)
return tpm_pm_resume(dev);
}
+#endif
static SIMPLE_DEV_PM_OPS(tpm_tis_pm, tpm_pm_suspend, tpm_tis_resume);
diff --git a/drivers/cpufreq/pcc-cpufreq.c b/drivers/cpufreq/pcc-cpufreq.c
index cdc02ac8f41a..503996a94a6a 100644
--- a/drivers/cpufreq/pcc-cpufreq.c
+++ b/drivers/cpufreq/pcc-cpufreq.c
@@ -454,6 +454,7 @@ static int __init pcc_cpufreq_probe(void)
mem_resource->address_length);
if (pcch_virt_addr == NULL) {
pr_debug("probe: could not map shared mem region\n");
+ ret = -ENOMEM;
goto out_free;
}
pcch_hdr = pcch_virt_addr;
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index fcfeb3cd8d31..5084975d793c 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -172,7 +172,8 @@ struct imxdma_engine {
struct device_dma_parameters dma_parms;
struct dma_device dma_device;
void __iomem *base;
- struct clk *dma_clk;
+ struct clk *dma_ahb;
+ struct clk *dma_ipg;
spinlock_t lock;
struct imx_dma_2d_config slots_2d[IMX_DMA_2D_SLOTS];
struct imxdma_channel channel[IMX_DMA_CHANNELS];
@@ -976,10 +977,20 @@ static int __init imxdma_probe(struct platform_device *pdev)
return 0;
}
- imxdma->dma_clk = clk_get(NULL, "dma");
- if (IS_ERR(imxdma->dma_clk))
- return PTR_ERR(imxdma->dma_clk);
- clk_enable(imxdma->dma_clk);
+ imxdma->dma_ipg = devm_clk_get(&pdev->dev, "ipg");
+ if (IS_ERR(imxdma->dma_ipg)) {
+ ret = PTR_ERR(imxdma->dma_ipg);
+ goto err_clk;
+ }
+
+ imxdma->dma_ahb = devm_clk_get(&pdev->dev, "ahb");
+ if (IS_ERR(imxdma->dma_ahb)) {
+ ret = PTR_ERR(imxdma->dma_ahb);
+ goto err_clk;
+ }
+
+ clk_prepare_enable(imxdma->dma_ipg);
+ clk_prepare_enable(imxdma->dma_ahb);
/* reset DMA module */
imx_dmav1_writel(imxdma, DCR_DRST, DMA_DCR);
@@ -988,16 +999,14 @@ static int __init imxdma_probe(struct platform_device *pdev)
ret = request_irq(MX1_DMA_INT, dma_irq_handler, 0, "DMA", imxdma);
if (ret) {
dev_warn(imxdma->dev, "Can't register IRQ for DMA\n");
- kfree(imxdma);
- return ret;
+ goto err_enable;
}
ret = request_irq(MX1_DMA_ERR, imxdma_err_handler, 0, "DMA", imxdma);
if (ret) {
dev_warn(imxdma->dev, "Can't register ERRIRQ for DMA\n");
free_irq(MX1_DMA_INT, NULL);
- kfree(imxdma);
- return ret;
+ goto err_enable;
}
}
@@ -1094,7 +1103,10 @@ err_init:
free_irq(MX1_DMA_INT, NULL);
free_irq(MX1_DMA_ERR, NULL);
}
-
+err_enable:
+ clk_disable_unprepare(imxdma->dma_ipg);
+ clk_disable_unprepare(imxdma->dma_ahb);
+err_clk:
kfree(imxdma);
return ret;
}
@@ -1114,7 +1126,9 @@ static int __exit imxdma_remove(struct platform_device *pdev)
free_irq(MX1_DMA_ERR, NULL);
}
- kfree(imxdma);
+ clk_disable_unprepare(imxdma->dma_ipg);
+ clk_disable_unprepare(imxdma->dma_ahb);
+ kfree(imxdma);
return 0;
}
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c
index d52dbc6c54ab..24acd711e032 100644
--- a/drivers/dma/tegra20-apb-dma.c
+++ b/drivers/dma/tegra20-apb-dma.c
@@ -1119,15 +1119,21 @@ struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic(
static int tegra_dma_alloc_chan_resources(struct dma_chan *dc)
{
struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
+ struct tegra_dma *tdma = tdc->tdma;
+ int ret;
dma_cookie_init(&tdc->dma_chan);
tdc->config_init = false;
- return 0;
+ ret = clk_prepare_enable(tdma->dma_clk);
+ if (ret < 0)
+ dev_err(tdc2dev(tdc), "clk_prepare_enable failed: %d\n", ret);
+ return ret;
}
static void tegra_dma_free_chan_resources(struct dma_chan *dc)
{
struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
+ struct tegra_dma *tdma = tdc->tdma;
struct tegra_dma_desc *dma_desc;
struct tegra_dma_sg_req *sg_req;
@@ -1163,6 +1169,7 @@ static void tegra_dma_free_chan_resources(struct dma_chan *dc)
list_del(&sg_req->node);
kfree(sg_req);
}
+ clk_disable_unprepare(tdma->dma_clk);
}
/* Tegra20 specific DMA controller information */
@@ -1255,6 +1262,13 @@ static int __devinit tegra_dma_probe(struct platform_device *pdev)
}
}
+ /* Enable clock before accessing registers */
+ ret = clk_prepare_enable(tdma->dma_clk);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "clk_prepare_enable failed: %d\n", ret);
+ goto err_pm_disable;
+ }
+
/* Reset DMA controller */
tegra_periph_reset_assert(tdma->dma_clk);
udelay(2);
@@ -1265,6 +1279,8 @@ static int __devinit tegra_dma_probe(struct platform_device *pdev)
tdma_write(tdma, TEGRA_APBDMA_CONTROL, 0);
tdma_write(tdma, TEGRA_APBDMA_IRQ_MASK_SET, 0xFFFFFFFFul);
+ clk_disable_unprepare(tdma->dma_clk);
+
INIT_LIST_HEAD(&tdma->dma_dev.channels);
for (i = 0; i < cdata->nr_channels; i++) {
struct tegra_dma_channel *tdc = &tdma->channels[i];
diff --git a/drivers/gpio/gpio-langwell.c b/drivers/gpio/gpio-langwell.c
index a1c8754f52cf..202a99207b7d 100644
--- a/drivers/gpio/gpio-langwell.c
+++ b/drivers/gpio/gpio-langwell.c
@@ -339,7 +339,7 @@ static int __devinit lnw_gpio_probe(struct pci_dev *pdev,
resource_size_t start, len;
struct lnw_gpio *lnw;
u32 gpio_base;
- int retval = 0;
+ int retval;
int ngpio = id->driver_data;
retval = pci_enable_device(pdev);
@@ -357,6 +357,7 @@ static int __devinit lnw_gpio_probe(struct pci_dev *pdev,
base = ioremap_nocache(start, len);
if (!base) {
dev_err(&pdev->dev, "error mapping bar1\n");
+ retval = -EFAULT;
goto err3;
}
gpio_base = *((u32 *)base + 1);
@@ -381,8 +382,10 @@ static int __devinit lnw_gpio_probe(struct pci_dev *pdev,
lnw->domain = irq_domain_add_linear(pdev->dev.of_node, ngpio,
&lnw_gpio_irq_ops, lnw);
- if (!lnw->domain)
+ if (!lnw->domain) {
+ retval = -ENOMEM;
goto err3;
+ }
lnw->reg_base = base;
lnw->chip.label = dev_name(&pdev->dev);
diff --git a/drivers/gpio/gpio-msic.c b/drivers/gpio/gpio-msic.c
index 71a838f44501..b38986285868 100644
--- a/drivers/gpio/gpio-msic.c
+++ b/drivers/gpio/gpio-msic.c
@@ -99,7 +99,7 @@ static int msic_gpio_to_oreg(unsigned offset)
if (offset < 20)
return INTEL_MSIC_GPIO0HV0CTLO - offset + 16;
- return INTEL_MSIC_GPIO1HV0CTLO + offset + 20;
+ return INTEL_MSIC_GPIO1HV0CTLO - offset + 20;
}
static int msic_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
diff --git a/drivers/gpio/gpio-mxc.c b/drivers/gpio/gpio-mxc.c
index 4db460b6ecf7..80f44bb64a87 100644
--- a/drivers/gpio/gpio-mxc.c
+++ b/drivers/gpio/gpio-mxc.c
@@ -465,9 +465,8 @@ static int __devinit mxc_gpio_probe(struct platform_device *pdev)
goto out_iounmap;
port->bgc.gc.to_irq = mxc_gpio_to_irq;
- port->bgc.gc.base = pdev->id * 32;
- port->bgc.dir = port->bgc.read_reg(port->bgc.reg_dir);
- port->bgc.data = port->bgc.read_reg(port->bgc.reg_set);
+ port->bgc.gc.base = (pdev->id < 0) ? of_alias_get_id(np, "gpio") * 32 :
+ pdev->id * 32;
err = gpiochip_add(&port->bgc.gc);
if (err)
diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c
index 58a6a63a6ece..9cac88a65f78 100644
--- a/drivers/gpio/gpio-pxa.c
+++ b/drivers/gpio/gpio-pxa.c
@@ -62,6 +62,7 @@ int pxa_last_gpio;
#ifdef CONFIG_OF
static struct irq_domain *domain;
+static struct device_node *pxa_gpio_of_node;
#endif
struct pxa_gpio_chip {
@@ -277,6 +278,24 @@ static void pxa_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
(value ? GPSR_OFFSET : GPCR_OFFSET));
}
+#ifdef CONFIG_OF_GPIO
+static int pxa_gpio_of_xlate(struct gpio_chip *gc,
+ const struct of_phandle_args *gpiospec,
+ u32 *flags)
+{
+ if (gpiospec->args[0] > pxa_last_gpio)
+ return -EINVAL;
+
+ if (gc != &pxa_gpio_chips[gpiospec->args[0] / 32].chip)
+ return -EINVAL;
+
+ if (flags)
+ *flags = gpiospec->args[1];
+
+ return gpiospec->args[0] % 32;
+}
+#endif
+
static int __devinit pxa_init_gpio_chip(int gpio_end,
int (*set_wake)(unsigned int, unsigned int))
{
@@ -304,6 +323,11 @@ static int __devinit pxa_init_gpio_chip(int gpio_end,
c->get = pxa_gpio_get;
c->set = pxa_gpio_set;
c->to_irq = pxa_gpio_to_irq;
+#ifdef CONFIG_OF_GPIO
+ c->of_node = pxa_gpio_of_node;
+ c->of_xlate = pxa_gpio_of_xlate;
+ c->of_gpio_n_cells = 2;
+#endif
/* number of GPIOs on last bank may be less than 32 */
c->ngpio = (gpio + 31 > gpio_end) ? (gpio_end - gpio + 1) : 32;
@@ -488,6 +512,7 @@ static int pxa_gpio_nums(void)
return count;
}
+#ifdef CONFIG_OF
static struct of_device_id pxa_gpio_dt_ids[] = {
{ .compatible = "mrvl,pxa-gpio" },
{ .compatible = "mrvl,mmp-gpio", .data = (void *)MMP_GPIO },
@@ -505,9 +530,9 @@ static int pxa_irq_domain_map(struct irq_domain *d, unsigned int irq,
const struct irq_domain_ops pxa_irq_domain_ops = {
.map = pxa_irq_domain_map,
+ .xlate = irq_domain_xlate_twocell,
};
-#ifdef CONFIG_OF
static int __devinit pxa_gpio_probe_dt(struct platform_device *pdev)
{
int ret, nr_banks, nr_gpios, irq_base;
@@ -545,6 +570,7 @@ static int __devinit pxa_gpio_probe_dt(struct platform_device *pdev)
}
domain = irq_domain_add_legacy(np, nr_gpios, irq_base, 0,
&pxa_irq_domain_ops, NULL);
+ pxa_gpio_of_node = np;
return 0;
err:
iounmap(gpio_reg_base);
@@ -653,7 +679,7 @@ static struct platform_driver pxa_gpio_driver = {
.probe = pxa_gpio_probe,
.driver = {
.name = "pxa-gpio",
- .of_match_table = pxa_gpio_dt_ids,
+ .of_match_table = of_match_ptr(pxa_gpio_dt_ids),
},
};
diff --git a/drivers/gpio/gpio-samsung.c b/drivers/gpio/gpio-samsung.c
index 92f7b2bb79d4..ba126cc04073 100644
--- a/drivers/gpio/gpio-samsung.c
+++ b/drivers/gpio/gpio-samsung.c
@@ -2454,12 +2454,6 @@ static struct samsung_gpio_chip exynos5_gpios_1[] = {
},
}, {
.chip = {
- .base = EXYNOS5_GPC4(0),
- .ngpio = EXYNOS5_GPIO_C4_NR,
- .label = "GPC4",
- },
- }, {
- .chip = {
.base = EXYNOS5_GPD0(0),
.ngpio = EXYNOS5_GPIO_D0_NR,
.label = "GPD0",
@@ -2513,6 +2507,12 @@ static struct samsung_gpio_chip exynos5_gpios_1[] = {
.label = "GPY6",
},
}, {
+ .chip = {
+ .base = EXYNOS5_GPC4(0),
+ .ngpio = EXYNOS5_GPIO_C4_NR,
+ .label = "GPC4",
+ },
+ }, {
.config = &samsung_gpio_cfgs[9],
.irq_base = IRQ_EINT(0),
.chip = {
@@ -2836,7 +2836,7 @@ static __init void exynos5_gpiolib_init(void)
}
/* need to set base address for gpc4 */
- exynos5_gpios_1[11].base = gpio_base1 + 0x2E0;
+ exynos5_gpios_1[20].base = gpio_base1 + 0x2E0;
/* need to set base address for gpx */
chip = &exynos5_gpios_1[21];
diff --git a/drivers/gpio/gpio-sch.c b/drivers/gpio/gpio-sch.c
index 424dce8e3f30..8707d4572a06 100644
--- a/drivers/gpio/gpio-sch.c
+++ b/drivers/gpio/gpio-sch.c
@@ -241,7 +241,8 @@ static int __devinit sch_gpio_probe(struct platform_device *pdev)
break;
default:
- return -ENODEV;
+ err = -ENODEV;
+ goto err_sch_gpio_core;
}
sch_gpio_core.dev = &pdev->dev;
diff --git a/drivers/gpu/drm/drm_edid_load.c b/drivers/gpu/drm/drm_edid_load.c
index 66d4a28ad5a2..0303935d10e2 100644
--- a/drivers/gpu/drm/drm_edid_load.c
+++ b/drivers/gpu/drm/drm_edid_load.c
@@ -119,7 +119,7 @@ static int edid_load(struct drm_connector *connector, char *name,
{
const struct firmware *fw;
struct platform_device *pdev;
- u8 *fwdata = NULL, *edid;
+ u8 *fwdata = NULL, *edid, *new_edid;
int fwsize, expected;
int builtin = 0, err = 0;
int i, valid_extensions = 0;
@@ -195,12 +195,14 @@ static int edid_load(struct drm_connector *connector, char *name,
"\"%s\" for connector \"%s\"\n", valid_extensions,
edid[0x7e], name, connector_name);
edid[0x7e] = valid_extensions;
- edid = krealloc(edid, (valid_extensions + 1) * EDID_LENGTH,
+ new_edid = krealloc(edid, (valid_extensions + 1) * EDID_LENGTH,
GFP_KERNEL);
- if (edid == NULL) {
+ if (new_edid == NULL) {
err = -ENOMEM;
+ kfree(edid);
goto relfw_out;
}
+ edid = new_edid;
}
connector->display_info.raw_edid = edid;
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index ed22612bc847..a24ffbe97c01 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -346,11 +346,40 @@ static const struct pci_device_id pciidlist[] = { /* aka */
INTEL_VGA_DEVICE(0x016a, &intel_ivybridge_d_info), /* GT2 server */
INTEL_VGA_DEVICE(0x0402, &intel_haswell_d_info), /* GT1 desktop */
INTEL_VGA_DEVICE(0x0412, &intel_haswell_d_info), /* GT2 desktop */
+ INTEL_VGA_DEVICE(0x0422, &intel_haswell_d_info), /* GT2 desktop */
INTEL_VGA_DEVICE(0x040a, &intel_haswell_d_info), /* GT1 server */
INTEL_VGA_DEVICE(0x041a, &intel_haswell_d_info), /* GT2 server */
+ INTEL_VGA_DEVICE(0x042a, &intel_haswell_d_info), /* GT2 server */
INTEL_VGA_DEVICE(0x0406, &intel_haswell_m_info), /* GT1 mobile */
INTEL_VGA_DEVICE(0x0416, &intel_haswell_m_info), /* GT2 mobile */
- INTEL_VGA_DEVICE(0x0c16, &intel_haswell_d_info), /* SDV */
+ INTEL_VGA_DEVICE(0x0426, &intel_haswell_m_info), /* GT2 mobile */
+ INTEL_VGA_DEVICE(0x0C02, &intel_haswell_d_info), /* SDV GT1 desktop */
+ INTEL_VGA_DEVICE(0x0C12, &intel_haswell_d_info), /* SDV GT2 desktop */
+ INTEL_VGA_DEVICE(0x0C22, &intel_haswell_d_info), /* SDV GT2 desktop */
+ INTEL_VGA_DEVICE(0x0C0A, &intel_haswell_d_info), /* SDV GT1 server */
+ INTEL_VGA_DEVICE(0x0C1A, &intel_haswell_d_info), /* SDV GT2 server */
+ INTEL_VGA_DEVICE(0x0C2A, &intel_haswell_d_info), /* SDV GT2 server */
+ INTEL_VGA_DEVICE(0x0C06, &intel_haswell_m_info), /* SDV GT1 mobile */
+ INTEL_VGA_DEVICE(0x0C16, &intel_haswell_m_info), /* SDV GT2 mobile */
+ INTEL_VGA_DEVICE(0x0C26, &intel_haswell_m_info), /* SDV GT2 mobile */
+ INTEL_VGA_DEVICE(0x0A02, &intel_haswell_d_info), /* ULT GT1 desktop */
+ INTEL_VGA_DEVICE(0x0A12, &intel_haswell_d_info), /* ULT GT2 desktop */
+ INTEL_VGA_DEVICE(0x0A22, &intel_haswell_d_info), /* ULT GT2 desktop */
+ INTEL_VGA_DEVICE(0x0A0A, &intel_haswell_d_info), /* ULT GT1 server */
+ INTEL_VGA_DEVICE(0x0A1A, &intel_haswell_d_info), /* ULT GT2 server */
+ INTEL_VGA_DEVICE(0x0A2A, &intel_haswell_d_info), /* ULT GT2 server */
+ INTEL_VGA_DEVICE(0x0A06, &intel_haswell_m_info), /* ULT GT1 mobile */
+ INTEL_VGA_DEVICE(0x0A16, &intel_haswell_m_info), /* ULT GT2 mobile */
+ INTEL_VGA_DEVICE(0x0A26, &intel_haswell_m_info), /* ULT GT2 mobile */
+ INTEL_VGA_DEVICE(0x0D12, &intel_haswell_d_info), /* CRW GT1 desktop */
+ INTEL_VGA_DEVICE(0x0D22, &intel_haswell_d_info), /* CRW GT2 desktop */
+ INTEL_VGA_DEVICE(0x0D32, &intel_haswell_d_info), /* CRW GT2 desktop */
+ INTEL_VGA_DEVICE(0x0D1A, &intel_haswell_d_info), /* CRW GT1 server */
+ INTEL_VGA_DEVICE(0x0D2A, &intel_haswell_d_info), /* CRW GT2 server */
+ INTEL_VGA_DEVICE(0x0D3A, &intel_haswell_d_info), /* CRW GT2 server */
+ INTEL_VGA_DEVICE(0x0D16, &intel_haswell_m_info), /* CRW GT1 mobile */
+ INTEL_VGA_DEVICE(0x0D26, &intel_haswell_m_info), /* CRW GT2 mobile */
+ INTEL_VGA_DEVICE(0x0D36, &intel_haswell_m_info), /* CRW GT2 mobile */
INTEL_VGA_DEVICE(0x0f30, &intel_valleyview_m_info),
INTEL_VGA_DEVICE(0x0157, &intel_valleyview_m_info),
INTEL_VGA_DEVICE(0x0155, &intel_valleyview_d_info),
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index da8b01fb1bf8..a9d58d72bb4d 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -451,7 +451,6 @@ int i915_switch_context(struct intel_ring_buffer *ring,
struct drm_i915_file_private *file_priv = NULL;
struct i915_hw_context *to;
struct drm_i915_gem_object *from_obj = ring->last_context_obj;
- int ret;
if (dev_priv->hw_contexts_disabled)
return 0;
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 5af631e788c8..ff2819ea0813 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -291,6 +291,16 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
target_i915_obj = to_intel_bo(target_obj);
target_offset = target_i915_obj->gtt_offset;
+ /* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
+ * pipe_control writes because the gpu doesn't properly redirect them
+ * through the ppgtt for non_secure batchbuffers. */
+ if (unlikely(IS_GEN6(dev) &&
+ reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
+ !target_i915_obj->has_global_gtt_mapping)) {
+ i915_gem_gtt_bind_object(target_i915_obj,
+ target_i915_obj->cache_level);
+ }
+
/* The target buffer should have appeared before us in the
* exec_object list, so it should have a GTT space bound by now.
*/
@@ -399,16 +409,6 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
io_mapping_unmap_atomic(reloc_page);
}
- /* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
- * pipe_control writes because the gpu doesn't properly redirect them
- * through the ppgtt for non_secure batchbuffers. */
- if (unlikely(IS_GEN6(dev) &&
- reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
- !target_i915_obj->has_global_gtt_mapping)) {
- i915_gem_gtt_bind_object(target_i915_obj,
- target_i915_obj->cache_level);
- }
-
/* and update the user's relocation entry */
reloc->presumed_offset = target_offset;
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 9fd25a435536..ee9b68f6bc36 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -361,7 +361,8 @@ int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- if (dev_priv->mm.gtt->needs_dmar)
+ /* don't map imported dma buf objects */
+ if (dev_priv->mm.gtt->needs_dmar && !obj->sg_table)
return intel_gtt_map_memory(obj->pages,
obj->base.size >> PAGE_SHIFT,
&obj->sg_list,
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 2f5388af8df9..7631807a2788 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -32,6 +32,7 @@
#include "intel_drv.h"
#include "i915_drv.h"
+#ifdef CONFIG_PM
static u32 calc_residency(struct drm_device *dev, const u32 reg)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -224,3 +225,14 @@ void i915_teardown_sysfs(struct drm_device *dev)
device_remove_bin_file(&dev->primary->kdev, &dpf_attrs);
sysfs_unmerge_group(&dev->primary->kdev.kobj, &rc6_attr_group);
}
+#else
+void i915_setup_sysfs(struct drm_device *dev)
+{
+ return;
+}
+
+void i915_teardown_sysfs(struct drm_device *dev)
+{
+ return;
+}
+#endif /* CONFIG_PM */
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index f6159765f1eb..a69a3d0d3acf 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -869,6 +869,7 @@ intel_vlv_find_best_pll(const intel_limit_t *limit, struct drm_crtc *crtc,
unsigned long bestppm, ppm, absppm;
int dotclk, flag;
+ flag = 0;
dotclk = target * 1000;
bestppm = 1000000;
ppm = absppm = 0;
@@ -3753,17 +3754,6 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
continue;
}
- if (intel_encoder->type == INTEL_OUTPUT_EDP) {
- /* Use VBT settings if we have an eDP panel */
- unsigned int edp_bpc = dev_priv->edp.bpp / 3;
-
- if (edp_bpc < display_bpc) {
- DRM_DEBUG_KMS("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc);
- display_bpc = edp_bpc;
- }
- continue;
- }
-
/* Not one of the known troublemakers, check the EDID */
list_for_each_entry(connector, &dev->mode_config.connector_list,
head) {
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 0a56b9ab0f58..a6c426afaa7a 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -1174,10 +1174,14 @@ static void ironlake_edp_panel_off(struct intel_dp *intel_dp)
WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n");
pp = ironlake_get_pp_control(dev_priv);
- pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_BLC_ENABLE);
+ /* We need to switch off panel power _and_ force vdd, for otherwise some
+ * panels get very unhappy and cease to work. */
+ pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE);
I915_WRITE(PCH_PP_CONTROL, pp);
POSTING_READ(PCH_PP_CONTROL);
+ intel_dp->want_panel_vdd = false;
+
ironlake_wait_panel_off(intel_dp);
}
@@ -1287,11 +1291,9 @@ static void intel_dp_prepare(struct drm_encoder *encoder)
* ensure that we have vdd while we switch off the panel. */
ironlake_edp_panel_vdd_on(intel_dp);
ironlake_edp_backlight_off(intel_dp);
- ironlake_edp_panel_off(intel_dp);
-
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
+ ironlake_edp_panel_off(intel_dp);
intel_dp_link_down(intel_dp);
- ironlake_edp_panel_vdd_off(intel_dp, false);
}
static void intel_dp_commit(struct drm_encoder *encoder)
@@ -1326,11 +1328,9 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
/* Switching the panel off requires vdd. */
ironlake_edp_panel_vdd_on(intel_dp);
ironlake_edp_backlight_off(intel_dp);
- ironlake_edp_panel_off(intel_dp);
-
intel_dp_sink_dpms(intel_dp, mode);
+ ironlake_edp_panel_off(intel_dp);
intel_dp_link_down(intel_dp);
- ironlake_edp_panel_vdd_off(intel_dp, false);
if (is_cpu_edp(intel_dp))
ironlake_edp_pll_off(encoder);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 84353559441c..132ab511b90c 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -46,15 +46,16 @@
})
#define wait_for_atomic_us(COND, US) ({ \
- int i, ret__ = -ETIMEDOUT; \
- for (i = 0; i < (US); i++) { \
- if ((COND)) { \
- ret__ = 0; \
- break; \
- } \
- udelay(1); \
- } \
- ret__; \
+ unsigned long timeout__ = jiffies + usecs_to_jiffies(US); \
+ int ret__ = 0; \
+ while (!(COND)) { \
+ if (time_after(jiffies, timeout__)) { \
+ ret__ = -ETIMEDOUT; \
+ break; \
+ } \
+ cpu_relax(); \
+ } \
+ ret__; \
})
#define wait_for(COND, MS) _wait_for(COND, MS, 1)
@@ -380,7 +381,6 @@ extern void intel_pch_panel_fitting(struct drm_device *dev,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
extern u32 intel_panel_get_max_backlight(struct drm_device *dev);
-extern u32 intel_panel_get_backlight(struct drm_device *dev);
extern void intel_panel_set_backlight(struct drm_device *dev, u32 level);
extern int intel_panel_setup_backlight(struct drm_device *dev);
extern void intel_panel_enable_backlight(struct drm_device *dev,
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index 1991a4408cf9..b9755f6378d8 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -486,9 +486,6 @@ int intel_setup_gmbus(struct drm_device *dev)
bus->dev_priv = dev_priv;
bus->adapter.algo = &gmbus_algorithm;
- ret = i2c_add_adapter(&bus->adapter);
- if (ret)
- goto err;
/* By default use a conservative clock rate */
bus->reg0 = port | GMBUS_RATE_100KHZ;
@@ -498,6 +495,10 @@ int intel_setup_gmbus(struct drm_device *dev)
bus->force_bit = true;
intel_gpio_setup(bus, port);
+
+ ret = i2c_add_adapter(&bus->adapter);
+ if (ret)
+ goto err;
}
intel_i2c_reset(dev_priv->dev);
@@ -540,9 +541,6 @@ void intel_teardown_gmbus(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
- if (dev_priv->gmbus == NULL)
- return;
-
for (i = 0; i < GMBUS_NUM_PORTS; i++) {
struct intel_gmbus *bus = &dev_priv->gmbus[i];
i2c_del_adapter(&bus->adapter);
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 10c7d39034e1..3df4f5fa892a 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -213,7 +213,7 @@ static u32 intel_panel_compute_brightness(struct drm_device *dev, u32 val)
return val;
}
-u32 intel_panel_get_backlight(struct drm_device *dev)
+static u32 intel_panel_get_backlight(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 val;
@@ -311,9 +311,6 @@ void intel_panel_enable_backlight(struct drm_device *dev,
if (dev_priv->backlight_level == 0)
dev_priv->backlight_level = intel_panel_get_max_backlight(dev);
- dev_priv->backlight_enabled = true;
- intel_panel_actually_set_backlight(dev, dev_priv->backlight_level);
-
if (INTEL_INFO(dev)->gen >= 4) {
uint32_t reg, tmp;
@@ -326,7 +323,7 @@ void intel_panel_enable_backlight(struct drm_device *dev,
* we don't track the backlight dpms state, hence check whether
* we have to do anything first. */
if (tmp & BLM_PWM_ENABLE)
- return;
+ goto set_level;
if (dev_priv->num_pipe == 3)
tmp &= ~BLM_PIPE_SELECT_IVB;
@@ -347,6 +344,14 @@ void intel_panel_enable_backlight(struct drm_device *dev,
I915_WRITE(BLC_PWM_PCH_CTL1, tmp);
}
}
+
+set_level:
+ /* Call below after setting BLC_PWM_CPU_CTL2 and BLC_PWM_PCH_CTL1.
+ * BLC_PWM_CPU_CTL may be cleared to zero automatically when these
+ * registers are set.
+ */
+ dev_priv->backlight_enabled = true;
+ intel_panel_actually_set_backlight(dev, dev_priv->backlight_level);
}
static void intel_panel_init_backlight(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 94aabcaa3a67..58c07cdafb7e 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -3963,6 +3963,7 @@ static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
DRM_ERROR("Force wake wait timed out\n");
I915_WRITE_NOTRACE(FORCEWAKE, 1);
+ POSTING_READ(FORCEWAKE);
if (wait_for_atomic_us((I915_READ_NOTRACE(forcewake_ack) & 1), 500))
DRM_ERROR("Force wake wait timed out\n");
@@ -3983,6 +3984,7 @@ static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
DRM_ERROR("Force wake wait timed out\n");
I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(1));
+ POSTING_READ(FORCEWAKE_MT);
if (wait_for_atomic_us((I915_READ_NOTRACE(forcewake_ack) & 1), 500))
DRM_ERROR("Force wake wait timed out\n");
@@ -4018,14 +4020,14 @@ void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
{
I915_WRITE_NOTRACE(FORCEWAKE, 0);
- /* The below doubles as a POSTING_READ */
+ POSTING_READ(FORCEWAKE);
gen6_gt_check_fifodbg(dev_priv);
}
static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
{
I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(1));
- /* The below doubles as a POSTING_READ */
+ POSTING_READ(FORCEWAKE_MT);
gen6_gt_check_fifodbg(dev_priv);
}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index bf0195a96d53..e2a73b38abe9 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -227,31 +227,36 @@ gen6_render_ring_flush(struct intel_ring_buffer *ring,
* number of bits based on the write domains has little performance
* impact.
*/
- flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
- flags |= PIPE_CONTROL_TLB_INVALIDATE;
- flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
- flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
- flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
- flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
- flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
- flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
- /*
- * Ensure that any following seqno writes only happen when the render
- * cache is indeed flushed (but only if the caller actually wants that).
- */
- if (flush_domains)
+ if (flush_domains) {
+ flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
+ flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
+ /*
+ * Ensure that any following seqno writes only happen
+ * when the render cache is indeed flushed.
+ */
flags |= PIPE_CONTROL_CS_STALL;
+ }
+ if (invalidate_domains) {
+ flags |= PIPE_CONTROL_TLB_INVALIDATE;
+ flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
+ /*
+ * TLB invalidate requires a post-sync write.
+ */
+ flags |= PIPE_CONTROL_QW_WRITE;
+ }
- ret = intel_ring_begin(ring, 6);
+ ret = intel_ring_begin(ring, 4);
if (ret)
return ret;
- intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
+ intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
intel_ring_emit(ring, flags);
intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
- intel_ring_emit(ring, 0); /* lower dword */
- intel_ring_emit(ring, 0); /* uppwer dword */
- intel_ring_emit(ring, MI_NOOP);
+ intel_ring_emit(ring, 0);
intel_ring_advance(ring);
return 0;
@@ -289,8 +294,6 @@ static int init_ring_common(struct intel_ring_buffer *ring)
I915_WRITE_HEAD(ring, 0);
ring->write_tail(ring, 0);
- /* Initialize the ring. */
- I915_WRITE_START(ring, obj->gtt_offset);
head = I915_READ_HEAD(ring) & HEAD_ADDR;
/* G45 ring initialization fails to reset head to zero */
@@ -316,6 +319,11 @@ static int init_ring_common(struct intel_ring_buffer *ring)
}
}
+ /* Initialize the ring. This must happen _after_ we've cleared the ring
+ * registers with the above sequence (the readback of the HEAD registers
+ * also enforces ordering), otherwise the hw might lose the new ring
+ * register values. */
+ I915_WRITE_START(ring, obj->gtt_offset);
I915_WRITE_CTL(ring,
((ring->size - PAGE_SIZE) & RING_NR_PAGES)
| RING_VALID);
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 26a6a4d0d078..d172e9873131 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -444,13 +444,16 @@ static bool intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd,
struct i2c_msg *msgs;
int i, ret = true;
+ /* Would be simpler to allocate both in one go ? */
buf = (u8 *)kzalloc(args_len * 2 + 2, GFP_KERNEL);
if (!buf)
return false;
msgs = kcalloc(args_len + 3, sizeof(*msgs), GFP_KERNEL);
- if (!msgs)
+ if (!msgs) {
+ kfree(buf);
return false;
+ }
intel_sdvo_debug_write(intel_sdvo, cmd, args, args_len);
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index a4d7c500c97b..b69642d5d850 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -468,10 +468,11 @@ static int mga_g200er_set_plls(struct mga_device *mdev, long clock)
{
unsigned int vcomax, vcomin, pllreffreq;
unsigned int delta, tmpdelta;
- unsigned int testr, testn, testm, testo;
+ int testr, testn, testm, testo;
unsigned int p, m, n;
- unsigned int computed;
+ unsigned int computed, vco;
int tmp;
+ const unsigned int m_div_val[] = { 1, 2, 4, 8 };
m = n = p = 0;
vcomax = 1488000;
@@ -490,12 +491,13 @@ static int mga_g200er_set_plls(struct mga_device *mdev, long clock)
if (delta == 0)
break;
for (testo = 5; testo < 33; testo++) {
- computed = pllreffreq * (testn + 1) /
+ vco = pllreffreq * (testn + 1) /
(testr + 1);
- if (computed < vcomin)
+ if (vco < vcomin)
continue;
- if (computed > vcomax)
+ if (vco > vcomax)
continue;
+ computed = vco / (m_div_val[testm] * (testo + 1));
if (computed > clock)
tmpdelta = computed - clock;
else
diff --git a/drivers/gpu/drm/nouveau/nouveau_i2c.c b/drivers/gpu/drm/nouveau/nouveau_i2c.c
index 77e564667b5c..240cf962c999 100644
--- a/drivers/gpu/drm/nouveau/nouveau_i2c.c
+++ b/drivers/gpu/drm/nouveau/nouveau_i2c.c
@@ -229,7 +229,7 @@ nouveau_i2c_init(struct drm_device *dev)
}
break;
case 6: /* NV50- DP AUX */
- port->drive = entry[0];
+ port->drive = entry[0] & 0x0f;
port->sense = port->drive;
port->adapter.algo = &nouveau_dp_i2c_algo;
break;
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index 1cdfd6e757ce..1866dbb49979 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -731,7 +731,6 @@ nouveau_card_init(struct drm_device *dev)
case 0xa3:
case 0xa5:
case 0xa8:
- case 0xaf:
nva3_copy_create(dev);
break;
}
diff --git a/drivers/gpu/drm/nouveau/nv84_fifo.c b/drivers/gpu/drm/nouveau/nv84_fifo.c
index cc82d799fc3b..c564c5e4c30a 100644
--- a/drivers/gpu/drm/nouveau/nv84_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv84_fifo.c
@@ -117,17 +117,22 @@ nv84_fifo_context_del(struct nouveau_channel *chan, int engine)
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
unsigned long flags;
+ u32 save;
/* remove channel from playlist, will context switch if active */
spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
nv_mask(dev, 0x002600 + (chan->id * 4), 0x80000000, 0x00000000);
nv50_fifo_playlist_update(dev);
+ save = nv_mask(dev, 0x002520, 0x0000003f, 0x15);
+
/* tell any engines on this channel to unload their contexts */
nv_wr32(dev, 0x0032fc, chan->ramin->vinst >> 12);
if (!nv_wait_ne(dev, 0x0032fc, 0xffffffff, 0xffffffff))
NV_INFO(dev, "PFIFO: channel %d unload timeout\n", chan->id);
+ nv_wr32(dev, 0x002520, save);
+
nv_wr32(dev, 0x002600 + (chan->id * 4), 0x00000000);
spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
@@ -184,10 +189,13 @@ nv84_fifo_fini(struct drm_device *dev, int engine, bool suspend)
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nv84_fifo_priv *priv = nv_engine(dev, engine);
int i;
+ u32 save;
/* set playlist length to zero, fifo will unload context */
nv_wr32(dev, 0x0032ec, 0);
+ save = nv_mask(dev, 0x002520, 0x0000003f, 0x15);
+
/* tell all connected engines to unload their contexts */
for (i = 0; i < priv->base.channels; i++) {
struct nouveau_channel *chan = dev_priv->channels.ptr[i];
@@ -199,6 +207,7 @@ nv84_fifo_fini(struct drm_device *dev, int engine, bool suspend)
}
}
+ nv_wr32(dev, 0x002520, save);
nv_wr32(dev, 0x002140, 0);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvc0_pm.c b/drivers/gpu/drm/nouveau/nvc0_pm.c
index 7c95c44e2887..4e712b10ebdb 100644
--- a/drivers/gpu/drm/nouveau/nvc0_pm.c
+++ b/drivers/gpu/drm/nouveau/nvc0_pm.c
@@ -557,7 +557,7 @@ prog_mem(struct drm_device *dev, struct nvc0_pm_state *info)
nouveau_mem_exec(&exec, info->perflvl);
if (dev_priv->chipset < 0xd0)
- nv_wr32(dev, 0x611200, 0x00003300);
+ nv_wr32(dev, 0x611200, 0x00003330);
else
nv_wr32(dev, 0x62c000, 0x03030300);
}
diff --git a/drivers/gpu/drm/nouveau/nvd0_display.c b/drivers/gpu/drm/nouveau/nvd0_display.c
index d0d60e1e7f95..dac525b2994e 100644
--- a/drivers/gpu/drm/nouveau/nvd0_display.c
+++ b/drivers/gpu/drm/nouveau/nvd0_display.c
@@ -790,7 +790,7 @@ nvd0_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
int ch = EVO_CURS(nv_crtc->index);
- evo_piow(crtc->dev, ch, 0x0084, (y << 16) | x);
+ evo_piow(crtc->dev, ch, 0x0084, (y << 16) | (x & 0xffff));
evo_piow(crtc->dev, ch, 0x0080, 0x00000000);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nve0_fifo.c b/drivers/gpu/drm/nouveau/nve0_fifo.c
index 1855ecbd843b..e98d144e6eb9 100644
--- a/drivers/gpu/drm/nouveau/nve0_fifo.c
+++ b/drivers/gpu/drm/nouveau/nve0_fifo.c
@@ -294,6 +294,25 @@ nve0_fifo_isr_vm_fault(struct drm_device *dev, int unit)
printk(" on channel 0x%010llx\n", (u64)inst << 12);
}
+static int
+nve0_fifo_page_flip(struct drm_device *dev, u32 chid)
+{
+ struct nve0_fifo_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FIFO);
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_channel *chan = NULL;
+ unsigned long flags;
+ int ret = -EINVAL;
+
+ spin_lock_irqsave(&dev_priv->channels.lock, flags);
+ if (likely(chid >= 0 && chid < priv->base.channels)) {
+ chan = dev_priv->channels.ptr[chid];
+ if (likely(chan))
+ ret = nouveau_finish_page_flip(chan, NULL);
+ }
+ spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
+ return ret;
+}
+
static void
nve0_fifo_isr_subfifo_intr(struct drm_device *dev, int unit)
{
@@ -303,11 +322,21 @@ nve0_fifo_isr_subfifo_intr(struct drm_device *dev, int unit)
u32 chid = nv_rd32(dev, 0x040120 + (unit * 0x2000)) & 0x7f;
u32 subc = (addr & 0x00070000);
u32 mthd = (addr & 0x00003ffc);
+ u32 show = stat;
+
+ if (stat & 0x00200000) {
+ if (mthd == 0x0054) {
+ if (!nve0_fifo_page_flip(dev, chid))
+ show &= ~0x00200000;
+ }
+ }
- NV_INFO(dev, "PSUBFIFO %d:", unit);
- nouveau_bitfield_print(nve0_fifo_subfifo_intr, stat);
- NV_INFO(dev, "PSUBFIFO %d: ch %d subc %d mthd 0x%04x data 0x%08x\n",
- unit, chid, subc, mthd, data);
+ if (show) {
+ NV_INFO(dev, "PFIFO%d:", unit);
+ nouveau_bitfield_print(nve0_fifo_subfifo_intr, show);
+ NV_INFO(dev, "PFIFO%d: ch %d subc %d mthd 0x%04x data 0x%08x\n",
+ unit, chid, subc, mthd, data);
+ }
nv_wr32(dev, 0x0400c0 + (unit * 0x2000), 0x80600008);
nv_wr32(dev, 0x040108 + (unit * 0x2000), stat);
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 9e6f76fec527..c6fcb5b86a45 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -259,7 +259,7 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
/* adjust pm to dpms changes BEFORE enabling crtcs */
radeon_pm_compute_clocks(rdev);
/* disable crtc pair power gating before programming */
- if (ASIC_IS_DCE6(rdev))
+ if (ASIC_IS_DCE6(rdev) && !radeon_crtc->in_mode_set)
atombios_powergate_crtc(crtc, ATOM_DISABLE);
atombios_enable_crtc(crtc, ATOM_ENABLE);
if (ASIC_IS_DCE3(rdev) && !ASIC_IS_DCE6(rdev))
@@ -279,7 +279,7 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
atombios_enable_crtc(crtc, ATOM_DISABLE);
radeon_crtc->enabled = false;
/* power gating is per-pair */
- if (ASIC_IS_DCE6(rdev)) {
+ if (ASIC_IS_DCE6(rdev) && !radeon_crtc->in_mode_set) {
struct drm_crtc *other_crtc;
struct radeon_crtc *other_radeon_crtc;
list_for_each_entry(other_crtc, &rdev->ddev->mode_config.crtc_list, head) {
@@ -1531,12 +1531,12 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc)
* crtc virtual pixel clock.
*/
if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_encoder))) {
- if (ASIC_IS_DCE5(rdev))
- return ATOM_DCPLL;
+ if (rdev->clock.dp_extclk)
+ return ATOM_PPLL_INVALID;
else if (ASIC_IS_DCE6(rdev))
return ATOM_PPLL0;
- else if (rdev->clock.dp_extclk)
- return ATOM_PPLL_INVALID;
+ else if (ASIC_IS_DCE5(rdev))
+ return ATOM_DCPLL;
}
}
}
@@ -1635,18 +1635,28 @@ static bool atombios_crtc_mode_fixup(struct drm_crtc *crtc,
static void atombios_crtc_prepare(struct drm_crtc *crtc)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ radeon_crtc->in_mode_set = true;
/* pick pll */
radeon_crtc->pll_id = radeon_atom_pick_pll(crtc);
+ /* disable crtc pair power gating before programming */
+ if (ASIC_IS_DCE6(rdev))
+ atombios_powergate_crtc(crtc, ATOM_DISABLE);
+
atombios_lock_crtc(crtc, ATOM_ENABLE);
atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
}
static void atombios_crtc_commit(struct drm_crtc *crtc)
{
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+
atombios_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
atombios_lock_crtc(crtc, ATOM_DISABLE);
+ radeon_crtc->in_mode_set = false;
}
static void atombios_crtc_disable(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index e585a3b947eb..e93b80a6d4e9 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -1229,24 +1229,8 @@ void evergreen_agp_enable(struct radeon_device *rdev)
void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save)
{
- save->vga_control[0] = RREG32(D1VGA_CONTROL);
- save->vga_control[1] = RREG32(D2VGA_CONTROL);
save->vga_render_control = RREG32(VGA_RENDER_CONTROL);
save->vga_hdp_control = RREG32(VGA_HDP_CONTROL);
- save->crtc_control[0] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET);
- save->crtc_control[1] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
- if (rdev->num_crtc >= 4) {
- save->vga_control[2] = RREG32(EVERGREEN_D3VGA_CONTROL);
- save->vga_control[3] = RREG32(EVERGREEN_D4VGA_CONTROL);
- save->crtc_control[2] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET);
- save->crtc_control[3] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
- }
- if (rdev->num_crtc >= 6) {
- save->vga_control[4] = RREG32(EVERGREEN_D5VGA_CONTROL);
- save->vga_control[5] = RREG32(EVERGREEN_D6VGA_CONTROL);
- save->crtc_control[4] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET);
- save->crtc_control[5] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
- }
/* Stop all video */
WREG32(VGA_RENDER_CONTROL, 0);
@@ -1357,47 +1341,6 @@ void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *s
/* Unlock host access */
WREG32(VGA_HDP_CONTROL, save->vga_hdp_control);
mdelay(1);
- /* Restore video state */
- WREG32(D1VGA_CONTROL, save->vga_control[0]);
- WREG32(D2VGA_CONTROL, save->vga_control[1]);
- if (rdev->num_crtc >= 4) {
- WREG32(EVERGREEN_D3VGA_CONTROL, save->vga_control[2]);
- WREG32(EVERGREEN_D4VGA_CONTROL, save->vga_control[3]);
- }
- if (rdev->num_crtc >= 6) {
- WREG32(EVERGREEN_D5VGA_CONTROL, save->vga_control[4]);
- WREG32(EVERGREEN_D6VGA_CONTROL, save->vga_control[5]);
- }
- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 1);
- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 1);
- if (rdev->num_crtc >= 4) {
- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 1);
- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 1);
- }
- if (rdev->num_crtc >= 6) {
- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 1);
- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 1);
- }
- WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, save->crtc_control[0]);
- WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, save->crtc_control[1]);
- if (rdev->num_crtc >= 4) {
- WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, save->crtc_control[2]);
- WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, save->crtc_control[3]);
- }
- if (rdev->num_crtc >= 6) {
- WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, save->crtc_control[4]);
- WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, save->crtc_control[5]);
- }
- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
- if (rdev->num_crtc >= 4) {
- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
- }
- if (rdev->num_crtc >= 6) {
- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
- }
WREG32(VGA_RENDER_CONTROL, save->vga_render_control);
}
@@ -1986,10 +1929,18 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
if (rdev->flags & RADEON_IS_IGP)
rdev->config.evergreen.tile_config |= 1 << 4;
else {
- if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT)
- rdev->config.evergreen.tile_config |= 1 << 4;
- else
+ switch ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) {
+ case 0: /* four banks */
rdev->config.evergreen.tile_config |= 0 << 4;
+ break;
+ case 1: /* eight banks */
+ rdev->config.evergreen.tile_config |= 1 << 4;
+ break;
+ case 2: /* sixteen banks */
+ default:
+ rdev->config.evergreen.tile_config |= 2 << 4;
+ break;
+ }
}
rdev->config.evergreen.tile_config |= 0 << 8;
rdev->config.evergreen.tile_config |=
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index c16554122ccd..e44a62a07fe3 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -788,6 +788,13 @@ static int evergreen_cs_track_validate_texture(struct radeon_cs_parser *p,
case V_030000_SQ_TEX_DIM_1D_ARRAY:
case V_030000_SQ_TEX_DIM_2D_ARRAY:
depth = 1;
+ break;
+ case V_030000_SQ_TEX_DIM_2D_MSAA:
+ case V_030000_SQ_TEX_DIM_2D_ARRAY_MSAA:
+ surf.nsamples = 1 << llevel;
+ llevel = 0;
+ depth = 1;
+ break;
case V_030000_SQ_TEX_DIM_3D:
break;
default:
@@ -961,13 +968,15 @@ static int evergreen_cs_track_check(struct radeon_cs_parser *p)
if (track->db_dirty) {
/* Check stencil buffer */
- if (G_028800_STENCIL_ENABLE(track->db_depth_control)) {
+ if (G_028044_FORMAT(track->db_s_info) != V_028044_STENCIL_INVALID &&
+ G_028800_STENCIL_ENABLE(track->db_depth_control)) {
r = evergreen_cs_track_validate_stencil(p);
if (r)
return r;
}
/* Check depth buffer */
- if (G_028800_Z_ENABLE(track->db_depth_control)) {
+ if (G_028040_FORMAT(track->db_z_info) != V_028040_Z_INVALID &&
+ G_028800_Z_ENABLE(track->db_depth_control)) {
r = evergreen_cs_track_validate_depth(p);
if (r)
return r;
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index d3bd098e4e19..79347855d9bf 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -1277,6 +1277,8 @@
#define S_028044_FORMAT(x) (((x) & 0x1) << 0)
#define G_028044_FORMAT(x) (((x) >> 0) & 0x1)
#define C_028044_FORMAT 0xFFFFFFFE
+#define V_028044_STENCIL_INVALID 0
+#define V_028044_STENCIL_8 1
#define G_028044_TILE_SPLIT(x) (((x) >> 8) & 0x7)
#define DB_Z_READ_BASE 0x28048
#define DB_STENCIL_READ_BASE 0x2804c
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 9945d86d9001..853800e8582f 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -574,10 +574,18 @@ static void cayman_gpu_init(struct radeon_device *rdev)
if (rdev->flags & RADEON_IS_IGP)
rdev->config.cayman.tile_config |= 1 << 4;
else {
- if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT)
- rdev->config.cayman.tile_config |= 1 << 4;
- else
+ switch ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) {
+ case 0: /* four banks */
rdev->config.cayman.tile_config |= 0 << 4;
+ break;
+ case 1: /* eight banks */
+ rdev->config.cayman.tile_config |= 1 << 4;
+ break;
+ case 2: /* sixteen banks */
+ default:
+ rdev->config.cayman.tile_config |= 2 << 4;
+ break;
+ }
}
rdev->config.cayman.tile_config |=
((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8;
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 637280f541a3..d79c639ae739 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -3789,3 +3789,23 @@ static void r600_pcie_gen2_enable(struct radeon_device *rdev)
WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
}
}
+
+/**
+ * r600_get_gpu_clock - return GPU clock counter snapshot
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Fetches a GPU clock counter snapshot (R6xx-cayman).
+ * Returns the 64 bit clock counter snapshot.
+ */
+uint64_t r600_get_gpu_clock(struct radeon_device *rdev)
+{
+ uint64_t clock;
+
+ mutex_lock(&rdev->gpu_clock_mutex);
+ WREG32(RLC_CAPTURE_GPU_CLOCK_COUNT, 1);
+ clock = (uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_LSB) |
+ ((uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
+ mutex_unlock(&rdev->gpu_clock_mutex);
+ return clock;
+}
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index ca87f7afaf23..3dab49cb1d4a 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -764,8 +764,10 @@ static int r600_cs_track_check(struct radeon_cs_parser *p)
}
/* Check depth buffer */
- if (track->db_dirty && (G_028800_STENCIL_ENABLE(track->db_depth_control) ||
- G_028800_Z_ENABLE(track->db_depth_control))) {
+ if (track->db_dirty &&
+ G_028010_FORMAT(track->db_depth_info) != V_028010_DEPTH_INVALID &&
+ (G_028800_STENCIL_ENABLE(track->db_depth_control) ||
+ G_028800_Z_ENABLE(track->db_depth_control))) {
r = r600_cs_track_validate_db(p);
if (r)
return r;
@@ -1557,13 +1559,14 @@ static int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx,
u32 tiling_flags)
{
struct r600_cs_track *track = p->track;
- u32 nfaces, llevel, blevel, w0, h0, d0;
- u32 word0, word1, l0_size, mipmap_size, word2, word3;
+ u32 dim, nfaces, llevel, blevel, w0, h0, d0;
+ u32 word0, word1, l0_size, mipmap_size, word2, word3, word4, word5;
u32 height_align, pitch, pitch_align, depth_align;
- u32 array, barray, larray;
+ u32 barray, larray;
u64 base_align;
struct array_mode_checker array_check;
u32 format;
+ bool is_array;
/* on legacy kernel we don't perform advanced check */
if (p->rdev == NULL)
@@ -1581,12 +1584,28 @@ static int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx,
word0 |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1);
}
word1 = radeon_get_ib_value(p, idx + 1);
+ word2 = radeon_get_ib_value(p, idx + 2) << 8;
+ word3 = radeon_get_ib_value(p, idx + 3) << 8;
+ word4 = radeon_get_ib_value(p, idx + 4);
+ word5 = radeon_get_ib_value(p, idx + 5);
+ dim = G_038000_DIM(word0);
w0 = G_038000_TEX_WIDTH(word0) + 1;
+ pitch = (G_038000_PITCH(word0) + 1) * 8;
h0 = G_038004_TEX_HEIGHT(word1) + 1;
d0 = G_038004_TEX_DEPTH(word1);
+ format = G_038004_DATA_FORMAT(word1);
+ blevel = G_038010_BASE_LEVEL(word4);
+ llevel = G_038014_LAST_LEVEL(word5);
+ /* pitch in texels */
+ array_check.array_mode = G_038000_TILE_MODE(word0);
+ array_check.group_size = track->group_size;
+ array_check.nbanks = track->nbanks;
+ array_check.npipes = track->npipes;
+ array_check.nsamples = 1;
+ array_check.blocksize = r600_fmt_get_blocksize(format);
nfaces = 1;
- array = 0;
- switch (G_038000_DIM(word0)) {
+ is_array = false;
+ switch (dim) {
case V_038000_SQ_TEX_DIM_1D:
case V_038000_SQ_TEX_DIM_2D:
case V_038000_SQ_TEX_DIM_3D:
@@ -1599,29 +1618,25 @@ static int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx,
break;
case V_038000_SQ_TEX_DIM_1D_ARRAY:
case V_038000_SQ_TEX_DIM_2D_ARRAY:
- array = 1;
+ is_array = true;
break;
- case V_038000_SQ_TEX_DIM_2D_MSAA:
case V_038000_SQ_TEX_DIM_2D_ARRAY_MSAA:
+ is_array = true;
+ /* fall through */
+ case V_038000_SQ_TEX_DIM_2D_MSAA:
+ array_check.nsamples = 1 << llevel;
+ llevel = 0;
+ break;
default:
dev_warn(p->dev, "this kernel doesn't support %d texture dim\n", G_038000_DIM(word0));
return -EINVAL;
}
- format = G_038004_DATA_FORMAT(word1);
if (!r600_fmt_is_valid_texture(format, p->family)) {
dev_warn(p->dev, "%s:%d texture invalid format %d\n",
__func__, __LINE__, format);
return -EINVAL;
}
- /* pitch in texels */
- pitch = (G_038000_PITCH(word0) + 1) * 8;
- array_check.array_mode = G_038000_TILE_MODE(word0);
- array_check.group_size = track->group_size;
- array_check.nbanks = track->nbanks;
- array_check.npipes = track->npipes;
- array_check.nsamples = 1;
- array_check.blocksize = r600_fmt_get_blocksize(format);
if (r600_get_array_mode_alignment(&array_check,
&pitch_align, &height_align, &depth_align, &base_align)) {
dev_warn(p->dev, "%s:%d tex array mode (%d) invalid\n",
@@ -1647,20 +1662,13 @@ static int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx,
return -EINVAL;
}
- word2 = radeon_get_ib_value(p, idx + 2) << 8;
- word3 = radeon_get_ib_value(p, idx + 3) << 8;
-
- word0 = radeon_get_ib_value(p, idx + 4);
- word1 = radeon_get_ib_value(p, idx + 5);
- blevel = G_038010_BASE_LEVEL(word0);
- llevel = G_038014_LAST_LEVEL(word1);
if (blevel > llevel) {
dev_warn(p->dev, "texture blevel %d > llevel %d\n",
blevel, llevel);
}
- if (array == 1) {
- barray = G_038014_BASE_ARRAY(word1);
- larray = G_038014_LAST_ARRAY(word1);
+ if (is_array) {
+ barray = G_038014_BASE_ARRAY(word5);
+ larray = G_038014_LAST_ARRAY(word5);
nfaces = larray - barray + 1;
}
@@ -1677,7 +1685,6 @@ static int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx,
return -EINVAL;
}
/* using get ib will give us the offset into the mipmap bo */
- word3 = radeon_get_ib_value(p, idx + 3) << 8;
if ((mipmap_size + word3) > radeon_bo_size(mipmap)) {
/*dev_warn(p->dev, "mipmap bo too small (%d %d %d %d %d %d -> %d have %ld)\n",
w0, h0, format, blevel, nlevels, word3, mipmap_size, radeon_bo_size(texture));*/
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index 4b116ae75fc2..fd328f4c3ea8 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -602,6 +602,9 @@
#define RLC_HB_WPTR 0x3f1c
#define RLC_HB_WPTR_LSB_ADDR 0x3f14
#define RLC_HB_WPTR_MSB_ADDR 0x3f18
+#define RLC_GPU_CLOCK_COUNT_LSB 0x3f38
+#define RLC_GPU_CLOCK_COUNT_MSB 0x3f3c
+#define RLC_CAPTURE_GPU_CLOCK_COUNT 0x3f40
#define RLC_MC_CNTL 0x3f44
#define RLC_UCODE_CNTL 0x3f48
#define RLC_UCODE_ADDR 0x3f2c
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 5431af292408..99304194a65c 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -300,6 +300,7 @@ struct radeon_bo_va {
uint64_t soffset;
uint64_t eoffset;
uint32_t flags;
+ struct radeon_fence *fence;
bool valid;
};
@@ -1533,6 +1534,7 @@ struct radeon_device {
unsigned debugfs_count;
/* virtual memory */
struct radeon_vm_manager vm_manager;
+ struct mutex gpu_clock_mutex;
};
int radeon_device_init(struct radeon_device *rdev,
@@ -1733,11 +1735,11 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
#define radeon_pm_finish(rdev) (rdev)->asic->pm.finish((rdev))
#define radeon_pm_init_profile(rdev) (rdev)->asic->pm.init_profile((rdev))
#define radeon_pm_get_dynpm_state(rdev) (rdev)->asic->pm.get_dynpm_state((rdev))
-#define radeon_pre_page_flip(rdev, crtc) rdev->asic->pflip.pre_page_flip((rdev), (crtc))
-#define radeon_page_flip(rdev, crtc, base) rdev->asic->pflip.page_flip((rdev), (crtc), (base))
-#define radeon_post_page_flip(rdev, crtc) rdev->asic->pflip.post_page_flip((rdev), (crtc))
-#define radeon_wait_for_vblank(rdev, crtc) rdev->asic->display.wait_for_vblank((rdev), (crtc))
-#define radeon_mc_wait_for_idle(rdev) rdev->asic->mc_wait_for_idle((rdev))
+#define radeon_pre_page_flip(rdev, crtc) (rdev)->asic->pflip.pre_page_flip((rdev), (crtc))
+#define radeon_page_flip(rdev, crtc, base) (rdev)->asic->pflip.page_flip((rdev), (crtc), (base))
+#define radeon_post_page_flip(rdev, crtc) (rdev)->asic->pflip.post_page_flip((rdev), (crtc))
+#define radeon_wait_for_vblank(rdev, crtc) (rdev)->asic->display.wait_for_vblank((rdev), (crtc))
+#define radeon_mc_wait_for_idle(rdev) (rdev)->asic->mc_wait_for_idle((rdev))
/* Common functions */
/* AGP */
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index f4af24310438..18c38d14c8cd 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -255,13 +255,10 @@ extern int rs690_mc_wait_for_idle(struct radeon_device *rdev);
* rv515
*/
struct rv515_mc_save {
- u32 d1vga_control;
- u32 d2vga_control;
u32 vga_render_control;
u32 vga_hdp_control;
- u32 d1crtc_control;
- u32 d2crtc_control;
};
+
int rv515_init(struct radeon_device *rdev);
void rv515_fini(struct radeon_device *rdev);
uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg);
@@ -371,6 +368,7 @@ void r600_kms_blit_copy(struct radeon_device *rdev,
unsigned num_gpu_pages,
struct radeon_sa_bo *vb);
int r600_mc_wait_for_idle(struct radeon_device *rdev);
+uint64_t r600_get_gpu_clock(struct radeon_device *rdev);
/*
* rv770,rv730,rv710,rv740
@@ -389,11 +387,10 @@ void r700_cp_fini(struct radeon_device *rdev);
* evergreen
*/
struct evergreen_mc_save {
- u32 vga_control[6];
u32 vga_render_control;
u32 vga_hdp_control;
- u32 crtc_control[6];
};
+
void evergreen_pcie_gart_tlb_flush(struct radeon_device *rdev);
int evergreen_init(struct radeon_device *rdev);
void evergreen_fini(struct radeon_device *rdev);
@@ -472,5 +469,6 @@ int si_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm, int id);
void si_vm_unbind(struct radeon_device *rdev, struct radeon_vm *vm);
void si_vm_tlb_flush(struct radeon_device *rdev, struct radeon_vm *vm);
int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
+uint64_t si_get_gpu_clock(struct radeon_device *rdev);
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index b1e3820df363..f9c21f9d16bc 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -1263,6 +1263,8 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
union igp_info {
struct _ATOM_INTEGRATED_SYSTEM_INFO info;
struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2;
+ struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6;
+ struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7;
};
bool radeon_atombios_sideport_present(struct radeon_device *rdev)
@@ -1390,27 +1392,50 @@ static void radeon_atombios_get_igp_ss_overrides(struct radeon_device *rdev,
struct radeon_mode_info *mode_info = &rdev->mode_info;
int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo);
u16 data_offset, size;
- struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 *igp_info;
+ union igp_info *igp_info;
u8 frev, crev;
u16 percentage = 0, rate = 0;
/* get any igp specific overrides */
if (atom_parse_data_header(mode_info->atom_context, index, &size,
&frev, &crev, &data_offset)) {
- igp_info = (struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 *)
+ igp_info = (union igp_info *)
(mode_info->atom_context->bios + data_offset);
- switch (id) {
- case ASIC_INTERNAL_SS_ON_TMDS:
- percentage = le16_to_cpu(igp_info->usDVISSPercentage);
- rate = le16_to_cpu(igp_info->usDVISSpreadRateIn10Hz);
+ switch (crev) {
+ case 6:
+ switch (id) {
+ case ASIC_INTERNAL_SS_ON_TMDS:
+ percentage = le16_to_cpu(igp_info->info_6.usDVISSPercentage);
+ rate = le16_to_cpu(igp_info->info_6.usDVISSpreadRateIn10Hz);
+ break;
+ case ASIC_INTERNAL_SS_ON_HDMI:
+ percentage = le16_to_cpu(igp_info->info_6.usHDMISSPercentage);
+ rate = le16_to_cpu(igp_info->info_6.usHDMISSpreadRateIn10Hz);
+ break;
+ case ASIC_INTERNAL_SS_ON_LVDS:
+ percentage = le16_to_cpu(igp_info->info_6.usLvdsSSPercentage);
+ rate = le16_to_cpu(igp_info->info_6.usLvdsSSpreadRateIn10Hz);
+ break;
+ }
break;
- case ASIC_INTERNAL_SS_ON_HDMI:
- percentage = le16_to_cpu(igp_info->usHDMISSPercentage);
- rate = le16_to_cpu(igp_info->usHDMISSpreadRateIn10Hz);
+ case 7:
+ switch (id) {
+ case ASIC_INTERNAL_SS_ON_TMDS:
+ percentage = le16_to_cpu(igp_info->info_7.usDVISSPercentage);
+ rate = le16_to_cpu(igp_info->info_7.usDVISSpreadRateIn10Hz);
+ break;
+ case ASIC_INTERNAL_SS_ON_HDMI:
+ percentage = le16_to_cpu(igp_info->info_7.usHDMISSPercentage);
+ rate = le16_to_cpu(igp_info->info_7.usHDMISSpreadRateIn10Hz);
+ break;
+ case ASIC_INTERNAL_SS_ON_LVDS:
+ percentage = le16_to_cpu(igp_info->info_7.usLvdsSSPercentage);
+ rate = le16_to_cpu(igp_info->info_7.usLvdsSSpreadRateIn10Hz);
+ break;
+ }
break;
- case ASIC_INTERNAL_SS_ON_LVDS:
- percentage = le16_to_cpu(igp_info->usLvdsSSPercentage);
- rate = le16_to_cpu(igp_info->usLvdsSSpreadRateIn10Hz);
+ default:
+ DRM_ERROR("Unsupported IGP table: %d %d\n", frev, crev);
break;
}
if (percentage)
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 576f4f6919f2..f75247d42ffd 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -719,6 +719,34 @@ static struct radeon_i2c_bus_rec combios_setup_i2c_bus(struct radeon_device *rde
return i2c;
}
+static struct radeon_i2c_bus_rec radeon_combios_get_i2c_info_from_table(struct radeon_device *rdev)
+{
+ struct drm_device *dev = rdev->ddev;
+ struct radeon_i2c_bus_rec i2c;
+ u16 offset;
+ u8 id, blocks, clk, data;
+ int i;
+
+ i2c.valid = false;
+
+ offset = combios_get_table_offset(dev, COMBIOS_I2C_INFO_TABLE);
+ if (offset) {
+ blocks = RBIOS8(offset + 2);
+ for (i = 0; i < blocks; i++) {
+ id = RBIOS8(offset + 3 + (i * 5) + 0);
+ if (id == 136) {
+ clk = RBIOS8(offset + 3 + (i * 5) + 3);
+ data = RBIOS8(offset + 3 + (i * 5) + 4);
+ /* gpiopad */
+ i2c = combios_setup_i2c_bus(rdev, DDC_MONID,
+ (1 << clk), (1 << data));
+ break;
+ }
+ }
+ }
+ return i2c;
+}
+
void radeon_combios_i2c_init(struct radeon_device *rdev)
{
struct drm_device *dev = rdev->ddev;
@@ -755,30 +783,14 @@ void radeon_combios_i2c_init(struct radeon_device *rdev)
} else if (rdev->family == CHIP_RS300 ||
rdev->family == CHIP_RS400 ||
rdev->family == CHIP_RS480) {
- u16 offset;
- u8 id, blocks, clk, data;
- int i;
-
/* 0x68 */
i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0);
rdev->i2c_bus[3] = radeon_i2c_create(dev, &i2c, "MONID");
- offset = combios_get_table_offset(dev, COMBIOS_I2C_INFO_TABLE);
- if (offset) {
- blocks = RBIOS8(offset + 2);
- for (i = 0; i < blocks; i++) {
- id = RBIOS8(offset + 3 + (i * 5) + 0);
- if (id == 136) {
- clk = RBIOS8(offset + 3 + (i * 5) + 3);
- data = RBIOS8(offset + 3 + (i * 5) + 4);
- /* gpiopad */
- i2c = combios_setup_i2c_bus(rdev, DDC_MONID,
- (1 << clk), (1 << data));
- rdev->i2c_bus[4] = radeon_i2c_create(dev, &i2c, "GPIOPAD_MASK");
- break;
- }
- }
- }
+ /* gpiopad */
+ i2c = radeon_combios_get_i2c_info_from_table(rdev);
+ if (i2c.valid)
+ rdev->i2c_bus[4] = radeon_i2c_create(dev, &i2c, "GPIOPAD_MASK");
} else if ((rdev->family == CHIP_R200) ||
(rdev->family >= CHIP_R300)) {
/* 0x68 */
@@ -2321,7 +2333,10 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
connector = (tmp >> 12) & 0xf;
ddc_type = (tmp >> 8) & 0xf;
- ddc_i2c = combios_setup_i2c_bus(rdev, ddc_type, 0, 0);
+ if (ddc_type == 5)
+ ddc_i2c = radeon_combios_get_i2c_info_from_table(rdev);
+ else
+ ddc_i2c = combios_setup_i2c_bus(rdev, ddc_type, 0, 0);
switch (connector) {
case CONNECTOR_PROPRIETARY_LEGACY:
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 8a4c49ef0cc4..b4a0db24f4dd 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -278,6 +278,30 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
return 0;
}
+static void radeon_bo_vm_fence_va(struct radeon_cs_parser *parser,
+ struct radeon_fence *fence)
+{
+ struct radeon_fpriv *fpriv = parser->filp->driver_priv;
+ struct radeon_vm *vm = &fpriv->vm;
+ struct radeon_bo_list *lobj;
+
+ if (parser->chunk_ib_idx == -1) {
+ return;
+ }
+ if ((parser->cs_flags & RADEON_CS_USE_VM) == 0) {
+ return;
+ }
+
+ list_for_each_entry(lobj, &parser->validated, tv.head) {
+ struct radeon_bo_va *bo_va;
+ struct radeon_bo *rbo = lobj->bo;
+
+ bo_va = radeon_bo_va(rbo, vm);
+ radeon_fence_unref(&bo_va->fence);
+ bo_va->fence = radeon_fence_ref(fence);
+ }
+}
+
/**
* cs_parser_fini() - clean parser states
* @parser: parser structure holding parsing context.
@@ -290,11 +314,14 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error)
{
unsigned i;
- if (!error)
+ if (!error) {
+ /* fence all bo va before ttm_eu_fence_buffer_objects so bo are still reserved */
+ radeon_bo_vm_fence_va(parser, parser->ib.fence);
ttm_eu_fence_buffer_objects(&parser->validated,
parser->ib.fence);
- else
+ } else {
ttm_eu_backoff_reservation(&parser->validated);
+ }
if (parser->relocs != NULL) {
for (i = 0; i < parser->nrelocs; i++) {
@@ -388,7 +415,6 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
if (parser->chunk_ib_idx == -1)
return 0;
-
if ((parser->cs_flags & RADEON_CS_USE_VM) == 0)
return 0;
diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c
index 711e95ad39bf..8794744cdf1a 100644
--- a/drivers/gpu/drm/radeon/radeon_cursor.c
+++ b/drivers/gpu/drm/radeon/radeon_cursor.c
@@ -67,7 +67,8 @@ static void radeon_hide_cursor(struct drm_crtc *crtc)
if (ASIC_IS_DCE4(rdev)) {
WREG32(RADEON_MM_INDEX, EVERGREEN_CUR_CONTROL + radeon_crtc->crtc_offset);
- WREG32(RADEON_MM_DATA, EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT));
+ WREG32(RADEON_MM_DATA, EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT) |
+ EVERGREEN_CURSOR_URGENT_CONTROL(EVERGREEN_CURSOR_URGENT_1_2));
} else if (ASIC_IS_AVIVO(rdev)) {
WREG32(RADEON_MM_INDEX, AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset);
WREG32(RADEON_MM_DATA, (AVIVO_D1CURSOR_MODE_24BPP << AVIVO_D1CURSOR_MODE_SHIFT));
@@ -94,7 +95,8 @@ static void radeon_show_cursor(struct drm_crtc *crtc)
if (ASIC_IS_DCE4(rdev)) {
WREG32(RADEON_MM_INDEX, EVERGREEN_CUR_CONTROL + radeon_crtc->crtc_offset);
WREG32(RADEON_MM_DATA, EVERGREEN_CURSOR_EN |
- EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT));
+ EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT) |
+ EVERGREEN_CURSOR_URGENT_CONTROL(EVERGREEN_CURSOR_URGENT_1_2));
} else if (ASIC_IS_AVIVO(rdev)) {
WREG32(RADEON_MM_INDEX, AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset);
WREG32(RADEON_MM_DATA, AVIVO_D1CURSOR_EN |
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 742af8244e89..d2e243867ac6 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1009,6 +1009,7 @@ int radeon_device_init(struct radeon_device *rdev,
atomic_set(&rdev->ih.lock, 0);
mutex_init(&rdev->gem.mutex);
mutex_init(&rdev->pm.mutex);
+ mutex_init(&rdev->gpu_clock_mutex);
init_rwsem(&rdev->pm.mclk_lock);
init_rwsem(&rdev->exclusive_lock);
init_waitqueue_head(&rdev->irq.vblank_queue);
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index dcea6f01ae4e..d7269f48d37c 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -59,9 +59,12 @@
* 2.15.0 - add max_pipes query
* 2.16.0 - fix evergreen 2D tiled surface calculation
* 2.17.0 - add STRMOUT_BASE_UPDATE for r7xx
+ * 2.18.0 - r600-eg: allow "invalid" DB formats
+ * 2.19.0 - r600-eg: MSAA textures
+ * 2.20.0 - r600-si: RADEON_INFO_TIMESTAMP query
*/
#define KMS_DRIVER_MAJOR 2
-#define KMS_DRIVER_MINOR 17
+#define KMS_DRIVER_MINOR 20
#define KMS_DRIVER_PATCHLEVEL 0
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
int radeon_driver_unload_kms(struct drm_device *dev);
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index b3720054614d..bb3b7fe05ccd 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -814,7 +814,7 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
return -EINVAL;
}
- if (bo_va->valid)
+ if (bo_va->valid && mem)
return 0;
ngpu_pages = radeon_bo_ngpu_pages(bo);
@@ -859,11 +859,27 @@ int radeon_vm_bo_rmv(struct radeon_device *rdev,
struct radeon_bo *bo)
{
struct radeon_bo_va *bo_va;
+ int r;
bo_va = radeon_bo_va(bo, vm);
if (bo_va == NULL)
return 0;
+ /* wait for va use to end */
+ while (bo_va->fence) {
+ r = radeon_fence_wait(bo_va->fence, false);
+ if (r) {
+ DRM_ERROR("error while waiting for fence: %d\n", r);
+ }
+ if (r == -EDEADLK) {
+ r = radeon_gpu_reset(rdev);
+ if (!r)
+ continue;
+ }
+ break;
+ }
+ radeon_fence_unref(&bo_va->fence);
+
mutex_lock(&rdev->vm_manager.lock);
mutex_lock(&vm->mutex);
radeon_vm_bo_update_pte(rdev, vm, bo, NULL);
@@ -934,7 +950,7 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
}
/**
- * radeon_vm_init - tear down a vm instance
+ * radeon_vm_fini - tear down a vm instance
*
* @rdev: radeon_device pointer
* @vm: requested vm
@@ -952,12 +968,15 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
radeon_vm_unbind_locked(rdev, vm);
mutex_unlock(&rdev->vm_manager.lock);
- /* remove all bo */
+ /* remove all bo at this point non are busy any more because unbind
+ * waited for the last vm fence to signal
+ */
r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
if (!r) {
bo_va = radeon_bo_va(rdev->ring_tmp_bo.bo, vm);
list_del_init(&bo_va->bo_list);
list_del_init(&bo_va->vm_list);
+ radeon_fence_unref(&bo_va->fence);
radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
kfree(bo_va);
}
@@ -969,6 +988,7 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
r = radeon_bo_reserve(bo_va->bo, false);
if (!r) {
list_del_init(&bo_va->bo_list);
+ radeon_fence_unref(&bo_va->fence);
radeon_bo_unreserve(bo_va->bo);
kfree(bo_va);
}
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index 84d045245739..1b57b0058ad6 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -134,25 +134,16 @@ void radeon_gem_object_close(struct drm_gem_object *obj,
struct radeon_device *rdev = rbo->rdev;
struct radeon_fpriv *fpriv = file_priv->driver_priv;
struct radeon_vm *vm = &fpriv->vm;
- struct radeon_bo_va *bo_va, *tmp;
if (rdev->family < CHIP_CAYMAN) {
return;
}
if (radeon_bo_reserve(rbo, false)) {
+ dev_err(rdev->dev, "leaking bo va because we fail to reserve bo\n");
return;
}
- list_for_each_entry_safe(bo_va, tmp, &rbo->va, bo_list) {
- if (bo_va->vm == vm) {
- /* remove from this vm address space */
- mutex_lock(&vm->mutex);
- list_del(&bo_va->vm_list);
- mutex_unlock(&vm->mutex);
- list_del(&bo_va->bo_list);
- kfree(bo_va);
- }
- }
+ radeon_vm_bo_rmv(rdev, vm, rbo);
radeon_bo_unreserve(rbo);
}
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 1d73f16b5d97..414b4acf6947 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -29,6 +29,7 @@
#include "drm_sarea.h"
#include "radeon.h"
#include "radeon_drm.h"
+#include "radeon_asic.h"
#include <linux/vga_switcheroo.h>
#include <linux/slab.h>
@@ -167,17 +168,39 @@ static void radeon_set_filp_rights(struct drm_device *dev,
int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
{
struct radeon_device *rdev = dev->dev_private;
- struct drm_radeon_info *info;
+ struct drm_radeon_info *info = data;
struct radeon_mode_info *minfo = &rdev->mode_info;
- uint32_t *value_ptr;
- uint32_t value;
+ uint32_t value, *value_ptr;
+ uint64_t value64, *value_ptr64;
struct drm_crtc *crtc;
int i, found;
- info = data;
+ /* TIMESTAMP is a 64-bit value, needs special handling. */
+ if (info->request == RADEON_INFO_TIMESTAMP) {
+ if (rdev->family >= CHIP_R600) {
+ value_ptr64 = (uint64_t*)((unsigned long)info->value);
+ if (rdev->family >= CHIP_TAHITI) {
+ value64 = si_get_gpu_clock(rdev);
+ } else {
+ value64 = r600_get_gpu_clock(rdev);
+ }
+
+ if (DRM_COPY_TO_USER(value_ptr64, &value64, sizeof(value64))) {
+ DRM_ERROR("copy_to_user %s:%u\n", __func__, __LINE__);
+ return -EFAULT;
+ }
+ return 0;
+ } else {
+ DRM_DEBUG_KMS("timestamp is r6xx+ only!\n");
+ return -EINVAL;
+ }
+ }
+
value_ptr = (uint32_t *)((unsigned long)info->value);
- if (DRM_COPY_FROM_USER(&value, value_ptr, sizeof(value)))
+ if (DRM_COPY_FROM_USER(&value, value_ptr, sizeof(value))) {
+ DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
return -EFAULT;
+ }
switch (info->request) {
case RADEON_INFO_DEVICE_ID:
@@ -337,7 +360,7 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
return -EINVAL;
}
if (DRM_COPY_TO_USER(value_ptr, &value, sizeof(uint32_t))) {
- DRM_ERROR("copy_to_user\n");
+ DRM_ERROR("copy_to_user %s:%u\n", __func__, __LINE__);
return -EFAULT;
}
return 0;
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index d5fd615897ec..94b4a1c12893 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -1025,9 +1025,11 @@ static int radeon_crtc_mode_set(struct drm_crtc *crtc,
static void radeon_crtc_prepare(struct drm_crtc *crtc)
{
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct drm_crtc *crtci;
+ radeon_crtc->in_mode_set = true;
/*
* The hardware wedges sometimes if you reconfigure one CRTC
* whilst another is running (see fdo bug #24611).
@@ -1038,6 +1040,7 @@ static void radeon_crtc_prepare(struct drm_crtc *crtc)
static void radeon_crtc_commit(struct drm_crtc *crtc)
{
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct drm_crtc *crtci;
@@ -1048,6 +1051,7 @@ static void radeon_crtc_commit(struct drm_crtc *crtc)
if (crtci->enabled)
radeon_crtc_dpms(crtci, DRM_MODE_DPMS_ON);
}
+ radeon_crtc->in_mode_set = false;
}
static const struct drm_crtc_helper_funcs legacy_helper_funcs = {
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index f380d59c5763..d56978949f34 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -275,6 +275,7 @@ struct radeon_crtc {
u16 lut_r[256], lut_g[256], lut_b[256];
bool enabled;
bool can_tile;
+ bool in_mode_set;
uint32_t crtc_offset;
struct drm_gem_object *cursor_bo;
uint64_t cursor_addr;
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 1f1a4c803c1d..1cb014b571ab 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -52,11 +52,7 @@ void radeon_bo_clear_va(struct radeon_bo *bo)
list_for_each_entry_safe(bo_va, tmp, &bo->va, bo_list) {
/* remove from all vm address space */
- mutex_lock(&bo_va->vm->mutex);
- list_del(&bo_va->vm_list);
- mutex_unlock(&bo_va->vm->mutex);
- list_del(&bo_va->bo_list);
- kfree(bo_va);
+ radeon_vm_bo_rmv(bo->rdev, bo_va->vm, bo);
}
}
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index a12fbcc8ccb6..aa8ef491ef3c 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -281,12 +281,8 @@ int rv515_debugfs_ga_info_init(struct radeon_device *rdev)
void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save)
{
- save->d1vga_control = RREG32(R_000330_D1VGA_CONTROL);
- save->d2vga_control = RREG32(R_000338_D2VGA_CONTROL);
save->vga_render_control = RREG32(R_000300_VGA_RENDER_CONTROL);
save->vga_hdp_control = RREG32(R_000328_VGA_HDP_CONTROL);
- save->d1crtc_control = RREG32(R_006080_D1CRTC_CONTROL);
- save->d2crtc_control = RREG32(R_006880_D2CRTC_CONTROL);
/* Stop all video */
WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 0);
@@ -311,15 +307,6 @@ void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save)
/* Unlock host access */
WREG32(R_000328_VGA_HDP_CONTROL, save->vga_hdp_control);
mdelay(1);
- /* Restore video state */
- WREG32(R_000330_D1VGA_CONTROL, save->d1vga_control);
- WREG32(R_000338_D2VGA_CONTROL, save->d2vga_control);
- WREG32(R_0060E8_D1CRTC_UPDATE_LOCK, 1);
- WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 1);
- WREG32(R_006080_D1CRTC_CONTROL, save->d1crtc_control);
- WREG32(R_006880_D2CRTC_CONTROL, save->d2crtc_control);
- WREG32(R_0060E8_D1CRTC_UPDATE_LOCK, 0);
- WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 0);
WREG32(R_000300_VGA_RENDER_CONTROL, save->vga_render_control);
}
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index c053f8193771..0139e227e3c7 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -1639,11 +1639,19 @@ static void si_gpu_init(struct radeon_device *rdev)
/* XXX what about 12? */
rdev->config.si.tile_config |= (3 << 0);
break;
- }
- if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT)
- rdev->config.si.tile_config |= 1 << 4;
- else
+ }
+ switch ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) {
+ case 0: /* four banks */
rdev->config.si.tile_config |= 0 << 4;
+ break;
+ case 1: /* eight banks */
+ rdev->config.si.tile_config |= 1 << 4;
+ break;
+ case 2: /* sixteen banks */
+ default:
+ rdev->config.si.tile_config |= 2 << 4;
+ break;
+ }
rdev->config.si.tile_config |=
((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8;
rdev->config.si.tile_config |=
@@ -3960,3 +3968,22 @@ void si_fini(struct radeon_device *rdev)
rdev->bios = NULL;
}
+/**
+ * si_get_gpu_clock - return GPU clock counter snapshot
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Fetches a GPU clock counter snapshot (SI).
+ * Returns the 64 bit clock counter snapshot.
+ */
+uint64_t si_get_gpu_clock(struct radeon_device *rdev)
+{
+ uint64_t clock;
+
+ mutex_lock(&rdev->gpu_clock_mutex);
+ WREG32(RLC_CAPTURE_GPU_CLOCK_COUNT, 1);
+ clock = (uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_LSB) |
+ ((uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
+ mutex_unlock(&rdev->gpu_clock_mutex);
+ return clock;
+}
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
index 7869089e8761..ef4815c27b1c 100644
--- a/drivers/gpu/drm/radeon/sid.h
+++ b/drivers/gpu/drm/radeon/sid.h
@@ -698,6 +698,9 @@
#define RLC_UCODE_ADDR 0xC32C
#define RLC_UCODE_DATA 0xC330
+#define RLC_GPU_CLOCK_COUNT_LSB 0xC338
+#define RLC_GPU_CLOCK_COUNT_MSB 0xC33C
+#define RLC_CAPTURE_GPU_CLOCK_COUNT 0xC340
#define RLC_MC_CNTL 0xC344
#define RLC_UCODE_CNTL 0xC348
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
index 7bd65bdd15a8..291ecc145585 100644
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -308,7 +308,7 @@ struct drm_gem_object *udl_gem_prime_import(struct drm_device *dev,
/* need to attach */
attach = dma_buf_attach(dma_buf, dev->dev);
if (IS_ERR(attach))
- return ERR_PTR(PTR_ERR(attach));
+ return ERR_CAST(attach);
sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
if (IS_ERR(sg)) {
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 6d1cbdfc9b2a..b64502dfa9f4 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -296,8 +296,13 @@ static int iommu_init_device(struct device *dev)
} else
dma_pdev = pci_dev_get(pdev);
+ /* Account for quirked devices */
swap_pci_ref(&dma_pdev, pci_get_dma_source(dma_pdev));
+ /*
+ * If it's a multifunction device that does not support our
+ * required ACS flags, add to the same group as function 0.
+ */
if (dma_pdev->multifunction &&
!pci_acs_enabled(dma_pdev, REQ_ACS_FLAGS))
swap_pci_ref(&dma_pdev,
@@ -305,14 +310,28 @@ static int iommu_init_device(struct device *dev)
PCI_DEVFN(PCI_SLOT(dma_pdev->devfn),
0)));
+ /*
+ * Devices on the root bus go through the iommu. If that's not us,
+ * find the next upstream device and test ACS up to the root bus.
+ * Finding the next device may require skipping virtual buses.
+ */
while (!pci_is_root_bus(dma_pdev->bus)) {
- if (pci_acs_path_enabled(dma_pdev->bus->self,
- NULL, REQ_ACS_FLAGS))
+ struct pci_bus *bus = dma_pdev->bus;
+
+ while (!bus->self) {
+ if (!pci_is_root_bus(bus))
+ bus = bus->parent;
+ else
+ goto root_bus;
+ }
+
+ if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS))
break;
- swap_pci_ref(&dma_pdev, pci_dev_get(dma_pdev->bus->self));
+ swap_pci_ref(&dma_pdev, pci_dev_get(bus->self));
}
+root_bus:
group = iommu_group_get(&dma_pdev->dev);
pci_dev_put(dma_pdev);
if (!group) {
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index 500e7f15f5c2..0a2ea317120a 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -1131,9 +1131,6 @@ static int __init amd_iommu_init_pci(void)
break;
}
- /* Make sure ACS will be enabled */
- pci_request_acs();
-
ret = amd_iommu_init_devices();
print_iommu_info();
@@ -1652,6 +1649,9 @@ static bool detect_ivrs(void)
early_acpi_os_unmap_memory((char __iomem *)ivrs_base, ivrs_size);
+ /* Make sure ACS will be enabled during PCI probe */
+ pci_request_acs();
+
return true;
}
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
index 45350ff5e93c..80bad32aa463 100644
--- a/drivers/iommu/exynos-iommu.c
+++ b/drivers/iommu/exynos-iommu.c
@@ -732,9 +732,9 @@ static int exynos_iommu_domain_init(struct iommu_domain *domain)
spin_lock_init(&priv->pgtablelock);
INIT_LIST_HEAD(&priv->clients);
- dom->geometry.aperture_start = 0;
- dom->geometry.aperture_end = ~0UL;
- dom->geometry.force_aperture = true;
+ domain->geometry.aperture_start = 0;
+ domain->geometry.aperture_end = ~0UL;
+ domain->geometry.force_aperture = true;
domain->priv = priv;
return 0;
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 7469b5346643..2297ec193eb4 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -2008,6 +2008,7 @@ static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
if (!drhd) {
printk(KERN_ERR "IOMMU: can't find DMAR for device %s\n",
pci_name(pdev));
+ free_domain_mem(domain);
return NULL;
}
iommu = drhd->iommu;
@@ -4124,8 +4125,13 @@ static int intel_iommu_add_device(struct device *dev)
} else
dma_pdev = pci_dev_get(pdev);
+ /* Account for quirked devices */
swap_pci_ref(&dma_pdev, pci_get_dma_source(dma_pdev));
+ /*
+ * If it's a multifunction device that does not support our
+ * required ACS flags, add to the same group as function 0.
+ */
if (dma_pdev->multifunction &&
!pci_acs_enabled(dma_pdev, REQ_ACS_FLAGS))
swap_pci_ref(&dma_pdev,
@@ -4133,14 +4139,28 @@ static int intel_iommu_add_device(struct device *dev)
PCI_DEVFN(PCI_SLOT(dma_pdev->devfn),
0)));
+ /*
+ * Devices on the root bus go through the iommu. If that's not us,
+ * find the next upstream device and test ACS up to the root bus.
+ * Finding the next device may require skipping virtual buses.
+ */
while (!pci_is_root_bus(dma_pdev->bus)) {
- if (pci_acs_path_enabled(dma_pdev->bus->self,
- NULL, REQ_ACS_FLAGS))
+ struct pci_bus *bus = dma_pdev->bus;
+
+ while (!bus->self) {
+ if (!pci_is_root_bus(bus))
+ bus = bus->parent;
+ else
+ goto root_bus;
+ }
+
+ if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS))
break;
- swap_pci_ref(&dma_pdev, pci_dev_get(dma_pdev->bus->self));
+ swap_pci_ref(&dma_pdev, pci_dev_get(bus->self));
}
+root_bus:
group = iommu_group_get(&dma_pdev->dev);
pci_dev_put(dma_pdev);
if (!group) {
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
index 4ba325ab6262..2a4bb36bc688 100644
--- a/drivers/iommu/tegra-smmu.c
+++ b/drivers/iommu/tegra-smmu.c
@@ -799,14 +799,14 @@ static void smmu_iommu_detach_dev(struct iommu_domain *domain,
goto out;
}
}
- dev_err(smmu->dev, "Couldn't find %s\n", dev_name(c->dev));
+ dev_err(smmu->dev, "Couldn't find %s\n", dev_name(dev));
out:
spin_unlock(&as->client_lock);
}
static int smmu_iommu_domain_init(struct iommu_domain *domain)
{
- int i, err = -ENODEV;
+ int i, err = -EAGAIN;
unsigned long flags;
struct smmu_as *as;
struct smmu_device *smmu = smmu_handle;
@@ -814,11 +814,14 @@ static int smmu_iommu_domain_init(struct iommu_domain *domain)
/* Look for a free AS with lock held */
for (i = 0; i < smmu->num_as; i++) {
as = &smmu->as[i];
- if (!as->pdir_page) {
- err = alloc_pdir(as);
- if (!err)
- goto found;
- }
+
+ if (as->pdir_page)
+ continue;
+
+ err = alloc_pdir(as);
+ if (!err)
+ goto found;
+
if (err != -EAGAIN)
break;
}
diff --git a/drivers/isdn/isdnloop/isdnloop.c b/drivers/isdn/isdnloop/isdnloop.c
index 5405ec644db3..baf2686aa8eb 100644
--- a/drivers/isdn/isdnloop/isdnloop.c
+++ b/drivers/isdn/isdnloop/isdnloop.c
@@ -16,7 +16,6 @@
#include <linux/sched.h>
#include "isdnloop.h"
-static char *revision = "$Revision: 1.11.6.7 $";
static char *isdnloop_id = "loop0";
MODULE_DESCRIPTION("ISDN4Linux: Pseudo Driver that simulates an ISDN card");
@@ -1494,17 +1493,6 @@ isdnloop_addcard(char *id1)
static int __init
isdnloop_init(void)
{
- char *p;
- char rev[10];
-
- if ((p = strchr(revision, ':'))) {
- strcpy(rev, p + 1);
- p = strchr(rev, '$');
- *p = 0;
- } else
- strcpy(rev, " ??? ");
- printk(KERN_NOTICE "isdnloop-ISDN-driver Rev%s\n", rev);
-
if (isdnloop_id)
return (isdnloop_addcard(isdnloop_id));
diff --git a/drivers/isdn/mISDN/layer2.c b/drivers/isdn/mISDN/layer2.c
index 0dc8abca1407..949cabb88f1c 100644
--- a/drivers/isdn/mISDN/layer2.c
+++ b/drivers/isdn/mISDN/layer2.c
@@ -2222,7 +2222,7 @@ create_l2(struct mISDNchannel *ch, u_int protocol, u_long options, int tei,
InitWin(l2);
l2->l2m.fsm = &l2fsm;
if (test_bit(FLG_LAPB, &l2->flag) ||
- test_bit(FLG_PTP, &l2->flag) ||
+ test_bit(FLG_FIXED_TEI, &l2->flag) ||
test_bit(FLG_LAPD_NET, &l2->flag))
l2->l2m.state = ST_L2_4;
else
diff --git a/drivers/leds/led-triggers.c b/drivers/leds/led-triggers.c
index 6157cbbf4113..363975b3c925 100644
--- a/drivers/leds/led-triggers.c
+++ b/drivers/leds/led-triggers.c
@@ -224,7 +224,7 @@ void led_trigger_event(struct led_trigger *trig,
struct led_classdev *led_cdev;
led_cdev = list_entry(entry, struct led_classdev, trig_list);
- led_set_brightness(led_cdev, brightness);
+ __led_set_brightness(led_cdev, brightness);
}
read_unlock(&trig->leddev_list_lock);
}
diff --git a/drivers/leds/leds-lp8788.c b/drivers/leds/leds-lp8788.c
index 53bd136f1ef0..0ade6ebfc914 100644
--- a/drivers/leds/leds-lp8788.c
+++ b/drivers/leds/leds-lp8788.c
@@ -63,7 +63,7 @@ static int lp8788_led_init_device(struct lp8788_led *led,
/* scale configuration */
addr = LP8788_ISINK_CTRL;
mask = 1 << (cfg->num + LP8788_ISINK_SCALE_OFFSET);
- val = cfg->scale << cfg->num;
+ val = cfg->scale << (cfg->num + LP8788_ISINK_SCALE_OFFSET);
ret = lp8788_update_bits(led->lp, addr, mask, val);
if (ret)
return ret;
diff --git a/drivers/leds/leds-renesas-tpu.c b/drivers/leds/leds-renesas-tpu.c
index 9ee12c28059a..771ea067e680 100644
--- a/drivers/leds/leds-renesas-tpu.c
+++ b/drivers/leds/leds-renesas-tpu.c
@@ -247,7 +247,7 @@ static int __devinit r_tpu_probe(struct platform_device *pdev)
if (!cfg) {
dev_err(&pdev->dev, "missing platform data\n");
- goto err0;
+ return -ENODEV;
}
p = devm_kzalloc(&pdev->dev, sizeof(*p), GFP_KERNEL);
diff --git a/drivers/mtd/maps/uclinux.c b/drivers/mtd/maps/uclinux.c
index cfff454f628b..c3bb304eca07 100644
--- a/drivers/mtd/maps/uclinux.c
+++ b/drivers/mtd/maps/uclinux.c
@@ -19,14 +19,13 @@
#include <linux/mtd/map.h>
#include <linux/mtd/partitions.h>
#include <asm/io.h>
+#include <asm/sections.h>
/****************************************************************************/
-extern char _ebss;
-
struct map_info uclinux_ram_map = {
.name = "RAM",
- .phys = (unsigned long)&_ebss,
+ .phys = (unsigned long)__bss_stop,
.size = 0,
};
diff --git a/drivers/net/appletalk/cops.c b/drivers/net/appletalk/cops.c
index 545c09ed9079..cff6f023c03a 100644
--- a/drivers/net/appletalk/cops.c
+++ b/drivers/net/appletalk/cops.c
@@ -996,9 +996,7 @@ static int __init cops_module_init(void)
printk(KERN_WARNING "%s: You shouldn't autoprobe with insmod\n",
cardname);
cops_dev = cops_probe(-1);
- if (IS_ERR(cops_dev))
- return PTR_ERR(cops_dev);
- return 0;
+ return PTR_RET(cops_dev);
}
static void __exit cops_module_exit(void)
diff --git a/drivers/net/appletalk/ltpc.c b/drivers/net/appletalk/ltpc.c
index 0910dce3996d..b5782cdf0bca 100644
--- a/drivers/net/appletalk/ltpc.c
+++ b/drivers/net/appletalk/ltpc.c
@@ -1243,9 +1243,7 @@ static int __init ltpc_module_init(void)
"ltpc: Autoprobing is not recommended for modules\n");
dev_ltpc = ltpc_probe();
- if (IS_ERR(dev_ltpc))
- return PTR_ERR(dev_ltpc);
- return 0;
+ return PTR_RET(dev_ltpc);
}
module_init(ltpc_module_init);
#endif
diff --git a/drivers/net/cris/eth_v10.c b/drivers/net/cris/eth_v10.c
index f0c8bd54ce29..021d69c5d9bc 100644
--- a/drivers/net/cris/eth_v10.c
+++ b/drivers/net/cris/eth_v10.c
@@ -1712,7 +1712,7 @@ e100_set_network_leds(int active)
static void
e100_netpoll(struct net_device* netdev)
{
- e100rxtx_interrupt(NETWORK_DMA_TX_IRQ_NBR, netdev, NULL);
+ e100rxtx_interrupt(NETWORK_DMA_TX_IRQ_NBR, netdev);
}
#endif
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index 77bcd4cb4ffb..463b9ec57d80 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -1278,7 +1278,7 @@ struct bnx2x {
#define BNX2X_FW_RX_ALIGN_START (1UL << BNX2X_RX_ALIGN_SHIFT)
#define BNX2X_FW_RX_ALIGN_END \
- max(1UL << BNX2X_RX_ALIGN_SHIFT, \
+ max_t(u64, 1UL << BNX2X_RX_ALIGN_SHIFT, \
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
#define BNX2X_PXP_DRAM_ALIGN (BNX2X_RX_ALIGN_SHIFT - 5)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index dd451c3dd83d..02b5a343b195 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -4041,20 +4041,6 @@ static bool bnx2x_get_load_status(struct bnx2x *bp, int engine)
return val != 0;
}
-/*
- * Reset the load status for the current engine.
- */
-static void bnx2x_clear_load_status(struct bnx2x *bp)
-{
- u32 val;
- u32 mask = (BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK :
- BNX2X_PATH0_LOAD_CNT_MASK);
- bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
- val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
- REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val & (~mask));
- bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
-}
-
static void _print_next_block(int idx, const char *blk)
{
pr_cont("%s%s", idx ? ", " : "", blk);
@@ -9384,32 +9370,24 @@ static int __devinit bnx2x_prev_mark_path(struct bnx2x *bp)
return rc;
}
-static bool __devinit bnx2x_can_flr(struct bnx2x *bp)
-{
- int pos;
- u32 cap;
- struct pci_dev *dev = bp->pdev;
-
- pos = pci_pcie_cap(dev);
- if (!pos)
- return false;
-
- pci_read_config_dword(dev, pos + PCI_EXP_DEVCAP, &cap);
- if (!(cap & PCI_EXP_DEVCAP_FLR))
- return false;
-
- return true;
-}
-
static int __devinit bnx2x_do_flr(struct bnx2x *bp)
{
int i, pos;
u16 status;
struct pci_dev *dev = bp->pdev;
- /* probe the capability first */
- if (bnx2x_can_flr(bp))
- return -ENOTTY;
+
+ if (CHIP_IS_E1x(bp)) {
+ BNX2X_DEV_INFO("FLR not supported in E1/E1H\n");
+ return -EINVAL;
+ }
+
+ /* only bootcode REQ_BC_VER_4_INITIATE_FLR and onwards support flr */
+ if (bp->common.bc_ver < REQ_BC_VER_4_INITIATE_FLR) {
+ BNX2X_ERR("FLR not supported by BC_VER: 0x%x\n",
+ bp->common.bc_ver);
+ return -EINVAL;
+ }
pos = pci_pcie_cap(dev);
if (!pos)
@@ -9429,12 +9407,8 @@ static int __devinit bnx2x_do_flr(struct bnx2x *bp)
"transaction is not cleared; proceeding with reset anyway\n");
clear:
- if (bp->common.bc_ver < REQ_BC_VER_4_INITIATE_FLR) {
- BNX2X_ERR("FLR not supported by BC_VER: 0x%x\n",
- bp->common.bc_ver);
- return -EINVAL;
- }
+ BNX2X_DEV_INFO("Initiating FLR\n");
bnx2x_fw_command(bp, DRV_MSG_CODE_INITIATE_FLR, 0);
return 0;
@@ -9454,8 +9428,21 @@ static int __devinit bnx2x_prev_unload_uncommon(struct bnx2x *bp)
* the one required, then FLR will be sufficient to clean any residue
* left by previous driver
*/
- if (bnx2x_test_firmware_version(bp, false) && bnx2x_can_flr(bp))
- return bnx2x_do_flr(bp);
+ rc = bnx2x_test_firmware_version(bp, false);
+
+ if (!rc) {
+ /* fw version is good */
+ BNX2X_DEV_INFO("FW version matches our own. Attempting FLR\n");
+ rc = bnx2x_do_flr(bp);
+ }
+
+ if (!rc) {
+ /* FLR was performed */
+ BNX2X_DEV_INFO("FLR successful\n");
+ return 0;
+ }
+
+ BNX2X_DEV_INFO("Could not FLR\n");
/* Close the MCP request, return failure*/
rc = bnx2x_prev_mcp_done(bp);
@@ -11427,9 +11414,6 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
if (!chip_is_e1x)
REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
- /* Reset the load counter */
- bnx2x_clear_load_status(bp);
-
dev->watchdog_timeo = TX_TIMEOUT;
dev->netdev_ops = &bnx2x_netdev_ops;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index 734fd87cd990..62f754bd0dfe 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -2485,6 +2485,7 @@ static int bnx2x_mcast_enqueue_cmd(struct bnx2x *bp,
break;
default:
+ kfree(new_cmd);
BNX2X_ERR("Unknown command: %d\n", cmd);
return -EINVAL;
}
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index c60de89b6669..90a903d83d87 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -1948,7 +1948,7 @@ static int be_rx_cqs_create(struct be_adapter *adapter)
if (adapter->num_rx_qs != MAX_RX_QS)
dev_info(&adapter->pdev->dev,
- "Created only %d receive queues", adapter->num_rx_qs);
+ "Created only %d receive queues\n", adapter->num_rx_qs);
return 0;
}
diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
index 0b3bade957fd..080c89093feb 100644
--- a/drivers/net/ethernet/intel/e1000e/82571.c
+++ b/drivers/net/ethernet/intel/e1000e/82571.c
@@ -999,7 +999,7 @@ static s32 e1000_set_d0_lplu_state_82571(struct e1000_hw *hw, bool active)
**/
static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
{
- u32 ctrl, ctrl_ext, eecd;
+ u32 ctrl, ctrl_ext, eecd, tctl;
s32 ret_val;
/*
@@ -1014,7 +1014,9 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
ew32(IMC, 0xffffffff);
ew32(RCTL, 0);
- ew32(TCTL, E1000_TCTL_PSP);
+ tctl = er32(TCTL);
+ tctl &= ~E1000_TCTL_EN;
+ ew32(TCTL, tctl);
e1e_flush();
usleep_range(10000, 20000);
@@ -1601,10 +1603,8 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
* auto-negotiation in the TXCW register and disable
* forced link in the Device Control register in an
* attempt to auto-negotiate with our link partner.
- * If the partner code word is null, stop forcing
- * and restart auto negotiation.
*/
- if ((rxcw & E1000_RXCW_C) || !(rxcw & E1000_RXCW_CW)) {
+ if (rxcw & E1000_RXCW_C) {
/* Enable autoneg, and unforce link up */
ew32(TXCW, mac->txcw);
ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 95b245310f17..46c3b1f9ff89 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -178,6 +178,24 @@ static void e1000_regdump(struct e1000_hw *hw, struct e1000_reg_info *reginfo)
pr_info("%-15s %08x %08x\n", rname, regs[0], regs[1]);
}
+static void e1000e_dump_ps_pages(struct e1000_adapter *adapter,
+ struct e1000_buffer *bi)
+{
+ int i;
+ struct e1000_ps_page *ps_page;
+
+ for (i = 0; i < adapter->rx_ps_pages; i++) {
+ ps_page = &bi->ps_pages[i];
+
+ if (ps_page->page) {
+ pr_info("packet dump for ps_page %d:\n", i);
+ print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS,
+ 16, 1, page_address(ps_page->page),
+ PAGE_SIZE, true);
+ }
+ }
+}
+
/*
* e1000e_dump - Print registers, Tx-ring and Rx-ring
*/
@@ -299,10 +317,10 @@ static void e1000e_dump(struct e1000_adapter *adapter)
(unsigned long long)buffer_info->time_stamp,
buffer_info->skb, next_desc);
- if (netif_msg_pktdata(adapter) && buffer_info->dma != 0)
+ if (netif_msg_pktdata(adapter) && buffer_info->skb)
print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS,
- 16, 1, phys_to_virt(buffer_info->dma),
- buffer_info->length, true);
+ 16, 1, buffer_info->skb->data,
+ buffer_info->skb->len, true);
}
/* Print Rx Ring Summary */
@@ -381,10 +399,8 @@ rx_ring_summary:
buffer_info->skb, next_desc);
if (netif_msg_pktdata(adapter))
- print_hex_dump(KERN_INFO, "",
- DUMP_PREFIX_ADDRESS, 16, 1,
- phys_to_virt(buffer_info->dma),
- adapter->rx_ps_bsize0, true);
+ e1000e_dump_ps_pages(adapter,
+ buffer_info);
}
}
break;
@@ -444,12 +460,12 @@ rx_ring_summary:
(unsigned long long)buffer_info->dma,
buffer_info->skb, next_desc);
- if (netif_msg_pktdata(adapter))
+ if (netif_msg_pktdata(adapter) &&
+ buffer_info->skb)
print_hex_dump(KERN_INFO, "",
DUMP_PREFIX_ADDRESS, 16,
1,
- phys_to_virt
- (buffer_info->dma),
+ buffer_info->skb->data,
adapter->rx_buffer_len,
true);
}
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index 5e84eaac48c1..ba994fb4cec6 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -254,6 +254,14 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
*/
size += NVM_WORD_SIZE_BASE_SHIFT;
+ /*
+ * Check for invalid size
+ */
+ if ((hw->mac.type == e1000_82576) && (size > 15)) {
+ pr_notice("The NVM size is not valid, defaulting to 32K\n");
+ size = 15;
+ }
+
nvm->word_size = 1 << size;
if (hw->mac.type < e1000_i210) {
nvm->opcode_bits = 8;
@@ -281,14 +289,6 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
} else
nvm->type = e1000_nvm_flash_hw;
- /*
- * Check for invalid size
- */
- if ((hw->mac.type == e1000_82576) && (size > 15)) {
- pr_notice("The NVM size is not valid, defaulting to 32K\n");
- size = 15;
- }
-
/* NVM Function Pointers */
switch (hw->mac.type) {
case e1000_82580:
diff --git a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h
index 10efcd88dca0..28394bea5253 100644
--- a/drivers/net/ethernet/intel/igb/e1000_regs.h
+++ b/drivers/net/ethernet/intel/igb/e1000_regs.h
@@ -156,8 +156,12 @@
: (0x0E018 + ((_n) * 0x40)))
#define E1000_TXDCTL(_n) ((_n) < 4 ? (0x03828 + ((_n) * 0x100)) \
: (0x0E028 + ((_n) * 0x40)))
-#define E1000_DCA_TXCTRL(_n) (0x03814 + (_n << 8))
-#define E1000_DCA_RXCTRL(_n) (0x02814 + (_n << 8))
+#define E1000_RXCTL(_n) ((_n) < 4 ? (0x02814 + ((_n) * 0x100)) : \
+ (0x0C014 + ((_n) * 0x40)))
+#define E1000_DCA_RXCTRL(_n) E1000_RXCTL(_n)
+#define E1000_TXCTL(_n) ((_n) < 4 ? (0x03814 + ((_n) * 0x100)) : \
+ (0x0E014 + ((_n) * 0x40)))
+#define E1000_DCA_TXCTRL(_n) E1000_TXCTL(_n)
#define E1000_TDWBAL(_n) ((_n) < 4 ? (0x03838 + ((_n) * 0x100)) \
: (0x0E038 + ((_n) * 0x40)))
#define E1000_TDWBAH(_n) ((_n) < 4 ? (0x0383C + ((_n) * 0x100)) \
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index a19c84cad0e9..70591117051b 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -209,8 +209,8 @@ static int igb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
/* When SoL/IDER sessions are active, autoneg/speed/duplex
* cannot be changed */
if (igb_check_reset_block(hw)) {
- dev_err(&adapter->pdev->dev, "Cannot change link "
- "characteristics when SoL/IDER is active.\n");
+ dev_err(&adapter->pdev->dev,
+ "Cannot change link characteristics when SoL/IDER is active.\n");
return -EINVAL;
}
@@ -1089,8 +1089,8 @@ static bool reg_pattern_test(struct igb_adapter *adapter, u64 *data,
wr32(reg, (_test[pat] & write));
val = rd32(reg) & mask;
if (val != (_test[pat] & write & mask)) {
- dev_err(&adapter->pdev->dev, "pattern test reg %04X "
- "failed: got 0x%08X expected 0x%08X\n",
+ dev_err(&adapter->pdev->dev,
+ "pattern test reg %04X failed: got 0x%08X expected 0x%08X\n",
reg, val, (_test[pat] & write & mask));
*data = reg;
return 1;
@@ -1108,8 +1108,8 @@ static bool reg_set_and_check(struct igb_adapter *adapter, u64 *data,
wr32(reg, write & mask);
val = rd32(reg);
if ((write & mask) != (val & mask)) {
- dev_err(&adapter->pdev->dev, "set/check reg %04X test failed:"
- " got 0x%08X expected 0x%08X\n", reg,
+ dev_err(&adapter->pdev->dev,
+ "set/check reg %04X test failed: got 0x%08X expected 0x%08X\n", reg,
(val & mask), (write & mask));
*data = reg;
return 1;
@@ -1171,8 +1171,9 @@ static int igb_reg_test(struct igb_adapter *adapter, u64 *data)
wr32(E1000_STATUS, toggle);
after = rd32(E1000_STATUS) & toggle;
if (value != after) {
- dev_err(&adapter->pdev->dev, "failed STATUS register test "
- "got: 0x%08X expected: 0x%08X\n", after, value);
+ dev_err(&adapter->pdev->dev,
+ "failed STATUS register test got: 0x%08X expected: 0x%08X\n",
+ after, value);
*data = 1;
return 1;
}
@@ -1497,6 +1498,9 @@ static int igb_integrated_phy_loopback(struct igb_adapter *adapter)
break;
}
+ /* add small delay to avoid loopback test failure */
+ msleep(50);
+
/* force 1000, set loopback */
igb_write_phy_reg(hw, PHY_CONTROL, 0x4140);
@@ -1777,16 +1781,14 @@ static int igb_loopback_test(struct igb_adapter *adapter, u64 *data)
* sessions are active */
if (igb_check_reset_block(&adapter->hw)) {
dev_err(&adapter->pdev->dev,
- "Cannot do PHY loopback test "
- "when SoL/IDER is active.\n");
+ "Cannot do PHY loopback test when SoL/IDER is active.\n");
*data = 0;
goto out;
}
if ((adapter->hw.mac.type == e1000_i210)
- || (adapter->hw.mac.type == e1000_i210)) {
+ || (adapter->hw.mac.type == e1000_i211)) {
dev_err(&adapter->pdev->dev,
- "Loopback test not supported "
- "on this part at this time.\n");
+ "Loopback test not supported on this part at this time.\n");
*data = 0;
goto out;
}
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index b7c2d5050572..48cc4fb1a307 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -462,10 +462,10 @@ static void igb_dump(struct igb_adapter *adapter)
(u64)buffer_info->time_stamp,
buffer_info->skb, next_desc);
- if (netif_msg_pktdata(adapter) && buffer_info->dma != 0)
+ if (netif_msg_pktdata(adapter) && buffer_info->skb)
print_hex_dump(KERN_INFO, "",
DUMP_PREFIX_ADDRESS,
- 16, 1, phys_to_virt(buffer_info->dma),
+ 16, 1, buffer_info->skb->data,
buffer_info->length, true);
}
}
@@ -547,18 +547,17 @@ rx_ring_summary:
(u64)buffer_info->dma,
buffer_info->skb, next_desc);
- if (netif_msg_pktdata(adapter)) {
+ if (netif_msg_pktdata(adapter) &&
+ buffer_info->dma && buffer_info->skb) {
print_hex_dump(KERN_INFO, "",
- DUMP_PREFIX_ADDRESS,
- 16, 1,
- phys_to_virt(buffer_info->dma),
- IGB_RX_HDR_LEN, true);
+ DUMP_PREFIX_ADDRESS,
+ 16, 1, buffer_info->skb->data,
+ IGB_RX_HDR_LEN, true);
print_hex_dump(KERN_INFO, "",
DUMP_PREFIX_ADDRESS,
16, 1,
- phys_to_virt(
- buffer_info->page_dma +
- buffer_info->page_offset),
+ page_address(buffer_info->page) +
+ buffer_info->page_offset,
PAGE_SIZE/2, true);
}
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
index 50fc137501da..18bf08c9d7a4 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -804,12 +804,13 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
/* Set KX4/KX/KR support according to speed requested */
autoc &= ~(IXGBE_AUTOC_KX4_KX_SUPP_MASK | IXGBE_AUTOC_KR_SUPP);
- if (speed & IXGBE_LINK_SPEED_10GB_FULL)
+ if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
if (orig_autoc & IXGBE_AUTOC_KX4_SUPP)
autoc |= IXGBE_AUTOC_KX4_SUPP;
if ((orig_autoc & IXGBE_AUTOC_KR_SUPP) &&
(hw->phy.smart_speed_active == false))
autoc |= IXGBE_AUTOC_KR_SUPP;
+ }
if (speed & IXGBE_LINK_SPEED_1GB_FULL)
autoc |= IXGBE_AUTOC_KX_SUPP;
} else if ((pma_pmd_1g == IXGBE_AUTOC_1G_SFI) &&
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index f32e70300770..5aba5ecdf1e2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -614,8 +614,8 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
/* If source MAC is equal to our own MAC and not performing
* the selftest or flb disabled - drop the packet */
if (s_mac == priv->mac &&
- (!(dev->features & NETIF_F_LOOPBACK) ||
- !priv->validate_loopback))
+ !((dev->features & NETIF_F_LOOPBACK) ||
+ priv->validate_loopback))
goto next;
/*
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 019d856b1334..10bba09c44ea 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -164,7 +164,6 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
ring->cons = 0xffffffff;
ring->last_nr_txbb = 1;
ring->poll_cnt = 0;
- ring->blocked = 0;
memset(ring->tx_info, 0, ring->size * sizeof(struct mlx4_en_tx_info));
memset(ring->buf, 0, ring->buf_size);
@@ -365,14 +364,13 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
ring->cons += txbbs_skipped;
netdev_tx_completed_queue(ring->tx_queue, packets, bytes);
- /* Wakeup Tx queue if this ring stopped it */
- if (unlikely(ring->blocked)) {
- if ((u32) (ring->prod - ring->cons) <=
- ring->size - HEADROOM - MAX_DESC_TXBBS) {
- ring->blocked = 0;
- netif_tx_wake_queue(ring->tx_queue);
- priv->port_stats.wake_queue++;
- }
+ /*
+ * Wakeup Tx queue if this stopped, and at least 1 packet
+ * was completed
+ */
+ if (netif_tx_queue_stopped(ring->tx_queue) && txbbs_skipped > 0) {
+ netif_tx_wake_queue(ring->tx_queue);
+ priv->port_stats.wake_queue++;
}
}
@@ -592,7 +590,6 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
ring->size - HEADROOM - MAX_DESC_TXBBS)) {
/* every full Tx ring stops queue */
netif_tx_stop_queue(ring->tx_queue);
- ring->blocked = 1;
priv->port_stats.queue_stopped++;
return NETDEV_TX_BUSY;
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 48d0e90194cb..827b72dfce99 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -157,9 +157,6 @@ int mlx4_check_port_params(struct mlx4_dev *dev,
"on this HCA, aborting.\n");
return -EINVAL;
}
- if (port_type[i] == MLX4_PORT_TYPE_ETH &&
- port_type[i + 1] == MLX4_PORT_TYPE_IB)
- return -EINVAL;
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 5f1ab105debc..9d27e42264e2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -248,7 +248,6 @@ struct mlx4_en_tx_ring {
u32 doorbell_qpn;
void *buf;
u16 poll_cnt;
- int blocked;
struct mlx4_en_tx_info *tx_info;
u8 *bounce_buf;
u32 last_nr_txbb;
diff --git a/drivers/net/ethernet/mellanox/mlx4/sense.c b/drivers/net/ethernet/mellanox/mlx4/sense.c
index 802498293528..34ee09bae36e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/sense.c
+++ b/drivers/net/ethernet/mellanox/mlx4/sense.c
@@ -81,20 +81,6 @@ void mlx4_do_sense_ports(struct mlx4_dev *dev,
}
/*
- * Adjust port configuration:
- * If port 1 sensed nothing and port 2 is IB, set both as IB
- * If port 2 sensed nothing and port 1 is Eth, set both as Eth
- */
- if (stype[0] == MLX4_PORT_TYPE_ETH) {
- for (i = 1; i < dev->caps.num_ports; i++)
- stype[i] = stype[i] ? stype[i] : MLX4_PORT_TYPE_ETH;
- }
- if (stype[dev->caps.num_ports - 1] == MLX4_PORT_TYPE_IB) {
- for (i = 0; i < dev->caps.num_ports - 1; i++)
- stype[i] = stype[i] ? stype[i] : MLX4_PORT_TYPE_IB;
- }
-
- /*
* If sensed nothing, remain in current configuration.
*/
for (i = 0; i < dev->caps.num_ports; i++)
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index 4069edab229e..53743f7a2ca9 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -346,28 +346,15 @@ static phy_interface_t lpc_phy_interface_mode(struct device *dev)
"phy-mode", NULL);
if (mode && !strcmp(mode, "mii"))
return PHY_INTERFACE_MODE_MII;
- return PHY_INTERFACE_MODE_RMII;
}
-
- /* non-DT */
-#ifdef CONFIG_ARCH_LPC32XX_MII_SUPPORT
- return PHY_INTERFACE_MODE_MII;
-#else
return PHY_INTERFACE_MODE_RMII;
-#endif
}
static bool use_iram_for_net(struct device *dev)
{
if (dev && dev->of_node)
return of_property_read_bool(dev->of_node, "use-iram");
-
- /* non-DT */
-#ifdef CONFIG_ARCH_LPC32XX_IRAM_FOR_NET
- return true;
-#else
return false;
-#endif
}
/* Receive Status information word */
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 70554a1b2b02..65a8d49106a4 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -1503,6 +1503,11 @@ static int efx_probe_all(struct efx_nic *efx)
goto fail2;
}
+ BUILD_BUG_ON(EFX_DEFAULT_DMAQ_SIZE < EFX_RXQ_MIN_ENT);
+ if (WARN_ON(EFX_DEFAULT_DMAQ_SIZE < EFX_TXQ_MIN_ENT(efx))) {
+ rc = -EINVAL;
+ goto fail3;
+ }
efx->rxq_entries = efx->txq_entries = EFX_DEFAULT_DMAQ_SIZE;
rc = efx_probe_filters(efx);
@@ -2070,6 +2075,7 @@ static int efx_register_netdev(struct efx_nic *efx)
net_dev->irq = efx->pci_dev->irq;
net_dev->netdev_ops = &efx_netdev_ops;
SET_ETHTOOL_OPS(net_dev, &efx_ethtool_ops);
+ net_dev->gso_max_segs = EFX_TSO_MAX_SEGS;
rtnl_lock();
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index be8f9158a714..70755c97251a 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -30,6 +30,7 @@ extern netdev_tx_t
efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
extern int efx_setup_tc(struct net_device *net_dev, u8 num_tc);
+extern unsigned int efx_tx_max_skb_descs(struct efx_nic *efx);
/* RX */
extern int efx_probe_rx_queue(struct efx_rx_queue *rx_queue);
@@ -52,10 +53,15 @@ extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
#define EFX_MAX_EVQ_SIZE 16384UL
#define EFX_MIN_EVQ_SIZE 512UL
-/* The smallest [rt]xq_entries that the driver supports. Callers of
- * efx_wake_queue() assume that they can subsequently send at least one
- * skb. Falcon/A1 may require up to three descriptors per skb_frag. */
-#define EFX_MIN_RING_SIZE (roundup_pow_of_two(2 * 3 * MAX_SKB_FRAGS))
+/* Maximum number of TCP segments we support for soft-TSO */
+#define EFX_TSO_MAX_SEGS 100
+
+/* The smallest [rt]xq_entries that the driver supports. RX minimum
+ * is a bit arbitrary. For TX, we must have space for at least 2
+ * TSO skbs.
+ */
+#define EFX_RXQ_MIN_ENT 128U
+#define EFX_TXQ_MIN_ENT(efx) (2 * efx_tx_max_skb_descs(efx))
/* Filters */
extern int efx_probe_filters(struct efx_nic *efx);
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index 10536f93b561..8cba2df82b18 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -680,21 +680,27 @@ static int efx_ethtool_set_ringparam(struct net_device *net_dev,
struct ethtool_ringparam *ring)
{
struct efx_nic *efx = netdev_priv(net_dev);
+ u32 txq_entries;
if (ring->rx_mini_pending || ring->rx_jumbo_pending ||
ring->rx_pending > EFX_MAX_DMAQ_SIZE ||
ring->tx_pending > EFX_MAX_DMAQ_SIZE)
return -EINVAL;
- if (ring->rx_pending < EFX_MIN_RING_SIZE ||
- ring->tx_pending < EFX_MIN_RING_SIZE) {
+ if (ring->rx_pending < EFX_RXQ_MIN_ENT) {
netif_err(efx, drv, efx->net_dev,
- "TX and RX queues cannot be smaller than %ld\n",
- EFX_MIN_RING_SIZE);
+ "RX queues cannot be smaller than %u\n",
+ EFX_RXQ_MIN_ENT);
return -EINVAL;
}
- return efx_realloc_channels(efx, ring->rx_pending, ring->tx_pending);
+ txq_entries = max(ring->tx_pending, EFX_TXQ_MIN_ENT(efx));
+ if (txq_entries != ring->tx_pending)
+ netif_warn(efx, drv, efx->net_dev,
+ "increasing TX queue size to minimum of %u\n",
+ txq_entries);
+
+ return efx_realloc_channels(efx, ring->rx_pending, txq_entries);
}
static int efx_ethtool_set_pauseparam(struct net_device *net_dev,
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index 9b225a7769f7..18713436b443 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -119,6 +119,25 @@ efx_max_tx_len(struct efx_nic *efx, dma_addr_t dma_addr)
return len;
}
+unsigned int efx_tx_max_skb_descs(struct efx_nic *efx)
+{
+ /* Header and payload descriptor for each output segment, plus
+ * one for every input fragment boundary within a segment
+ */
+ unsigned int max_descs = EFX_TSO_MAX_SEGS * 2 + MAX_SKB_FRAGS;
+
+ /* Possibly one more per segment for the alignment workaround */
+ if (EFX_WORKAROUND_5391(efx))
+ max_descs += EFX_TSO_MAX_SEGS;
+
+ /* Possibly more for PCIe page boundaries within input fragments */
+ if (PAGE_SIZE > EFX_PAGE_SIZE)
+ max_descs += max_t(unsigned int, MAX_SKB_FRAGS,
+ DIV_ROUND_UP(GSO_MAX_SIZE, EFX_PAGE_SIZE));
+
+ return max_descs;
+}
+
/*
* Add a socket buffer to a TX queue
*
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index cd01ee7ecef1..b93245c11995 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -74,7 +74,7 @@ static int __devinit stmmac_probe_config_dt(struct platform_device *pdev,
* the necessary resources and invokes the main to init
* the net device, register the mdio bus etc.
*/
-static int stmmac_pltfr_probe(struct platform_device *pdev)
+static int __devinit stmmac_pltfr_probe(struct platform_device *pdev)
{
int ret = 0;
struct resource *res;
diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
index 482648fcf0b6..98934bdf6acf 100644
--- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
+++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
@@ -1003,6 +1003,7 @@ static int ixp4xx_nway_reset(struct net_device *dev)
}
int ixp46x_phc_index = -1;
+EXPORT_SYMBOL_GPL(ixp46x_phc_index);
static int ixp4xx_get_ts_info(struct net_device *dev,
struct ethtool_ts_info *info)
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 6cee2917eb02..4a1a5f58fa73 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -383,13 +383,6 @@ int netvsc_device_remove(struct hv_device *device)
unsigned long flags;
net_device = hv_get_drvdata(device);
- spin_lock_irqsave(&device->channel->inbound_lock, flags);
- net_device->destroy = true;
- spin_unlock_irqrestore(&device->channel->inbound_lock, flags);
-
- /* Wait for all send completions */
- wait_event(net_device->wait_drain,
- atomic_read(&net_device->num_outstanding_sends) == 0);
netvsc_disconnect_vsp(net_device);
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index e5d6146937fa..1e88a1095934 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -718,6 +718,9 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
{
struct rndis_request *request;
struct rndis_halt_request *halt;
+ struct netvsc_device *nvdev = dev->net_dev;
+ struct hv_device *hdev = nvdev->dev;
+ ulong flags;
/* Attempt to do a rndis device halt */
request = get_rndis_request(dev, RNDIS_MSG_HALT,
@@ -735,6 +738,14 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
dev->state = RNDIS_DEV_UNINITIALIZED;
cleanup:
+ spin_lock_irqsave(&hdev->channel->inbound_lock, flags);
+ nvdev->destroy = true;
+ spin_unlock_irqrestore(&hdev->channel->inbound_lock, flags);
+
+ /* Wait for all send completions */
+ wait_event(nvdev->wait_drain,
+ atomic_read(&nvdev->num_outstanding_sends) == 0);
+
if (request)
put_rndis_request(dev, request);
return;
diff --git a/drivers/net/irda/bfin_sir.c b/drivers/net/irda/bfin_sir.c
index a561ae44a9ac..c6a0299aa9f9 100644
--- a/drivers/net/irda/bfin_sir.c
+++ b/drivers/net/irda/bfin_sir.c
@@ -158,7 +158,7 @@ static int bfin_sir_set_speed(struct bfin_sir_port *port, int speed)
/* If not add the 'RPOLC', we can't catch the receive interrupt.
* It's related with the HW layout and the IR transiver.
*/
- val |= IREN | RPOLC;
+ val |= UMOD_IRDA | RPOLC;
UART_PUT_GCTL(port, val);
return ret;
}
@@ -432,7 +432,7 @@ static void bfin_sir_shutdown(struct bfin_sir_port *port, struct net_device *dev
bfin_sir_stop_rx(port);
val = UART_GET_GCTL(port);
- val &= ~(UCEN | IREN | RPOLC);
+ val &= ~(UCEN | UMOD_MASK | RPOLC);
UART_PUT_GCTL(port, val);
#ifdef CONFIG_SIR_BFIN_DMA
@@ -518,10 +518,10 @@ static void bfin_sir_send_work(struct work_struct *work)
* reset all the UART.
*/
val = UART_GET_GCTL(port);
- val &= ~(IREN | RPOLC);
+ val &= ~(UMOD_MASK | RPOLC);
UART_PUT_GCTL(port, val);
SSYNC();
- val |= IREN | RPOLC;
+ val |= UMOD_IRDA | RPOLC;
UART_PUT_GCTL(port, val);
SSYNC();
/* bfin_sir_set_speed(port, self->speed); */
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 0737bd4d1669..0f0f9ce3a776 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -94,7 +94,8 @@ static int get_slot(struct macvlan_dev *vlan, struct macvtap_queue *q)
int i;
for (i = 0; i < MAX_MACVTAP_QUEUES; i++) {
- if (rcu_dereference(vlan->taps[i]) == q)
+ if (rcu_dereference_protected(vlan->taps[i],
+ lockdep_is_held(&macvtap_lock)) == q)
return i;
}
diff --git a/drivers/net/phy/mdio-mux-gpio.c b/drivers/net/phy/mdio-mux-gpio.c
index e0cc4ef33dee..eefe49e8713c 100644
--- a/drivers/net/phy/mdio-mux-gpio.c
+++ b/drivers/net/phy/mdio-mux-gpio.c
@@ -101,7 +101,6 @@ err:
n--;
gpio_free(s->gpio[n]);
}
- devm_kfree(&pdev->dev, s);
return r;
}
diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
index 1c98321b56cc..162464fe86bf 100644
--- a/drivers/net/ppp/pptp.c
+++ b/drivers/net/ppp/pptp.c
@@ -189,7 +189,7 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
if (sk_pppox(po)->sk_state & PPPOX_DEAD)
goto tx_error;
- rt = ip_route_output_ports(&init_net, &fl4, NULL,
+ rt = ip_route_output_ports(sock_net(sk), &fl4, NULL,
opt->dst_addr.sin_addr.s_addr,
opt->src_addr.sin_addr.s_addr,
0, 0, IPPROTO_GRE,
@@ -468,7 +468,7 @@ static int pptp_connect(struct socket *sock, struct sockaddr *uservaddr,
po->chan.private = sk;
po->chan.ops = &pptp_chan_ops;
- rt = ip_route_output_ports(&init_net, &fl4, sk,
+ rt = ip_route_output_ports(sock_net(sk), &fl4, sk,
opt->dst_addr.sin_addr.s_addr,
opt->src_addr.sin_addr.s_addr,
0, 0,
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 926d4db5cb38..3a16d4fdaa05 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -187,7 +187,6 @@ static void __tun_detach(struct tun_struct *tun)
netif_tx_lock_bh(tun->dev);
netif_carrier_off(tun->dev);
tun->tfile = NULL;
- tun->socket.file = NULL;
netif_tx_unlock_bh(tun->dev);
/* Drop read queue */
diff --git a/drivers/net/usb/cdc-phonet.c b/drivers/net/usb/cdc-phonet.c
index 64610048ce87..7d78669000d7 100644
--- a/drivers/net/usb/cdc-phonet.c
+++ b/drivers/net/usb/cdc-phonet.c
@@ -232,6 +232,7 @@ static int usbpn_open(struct net_device *dev)
struct urb *req = usb_alloc_urb(0, GFP_KERNEL);
if (!req || rx_submit(pnd, req, GFP_KERNEL | __GFP_COLD)) {
+ usb_free_urb(req);
usbpn_close(dev);
return -ENOMEM;
}
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index f4ce5957df32..4cd582a4f625 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -1225,6 +1225,26 @@ static const struct usb_device_id cdc_devs[] = {
.driver_info = (unsigned long) &wwan_info,
},
+ /* Dell branded MBM devices like DW5550 */
+ { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
+ | USB_DEVICE_ID_MATCH_VENDOR,
+ .idVendor = 0x413c,
+ .bInterfaceClass = USB_CLASS_COMM,
+ .bInterfaceSubClass = USB_CDC_SUBCLASS_NCM,
+ .bInterfaceProtocol = USB_CDC_PROTO_NONE,
+ .driver_info = (unsigned long) &wwan_info,
+ },
+
+ /* Toshiba branded MBM devices */
+ { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
+ | USB_DEVICE_ID_MATCH_VENDOR,
+ .idVendor = 0x0930,
+ .bInterfaceClass = USB_CLASS_COMM,
+ .bInterfaceSubClass = USB_CDC_SUBCLASS_NCM,
+ .bInterfaceProtocol = USB_CDC_PROTO_NONE,
+ .driver_info = (unsigned long) &wwan_info,
+ },
+
/* Generic CDC-NCM devices */
{ USB_INTERFACE_INFO(USB_CLASS_COMM,
USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE),
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index cfa91ab7acf8..60b6a9daff7e 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -730,6 +730,7 @@ int ath9k_hw_init(struct ath_hw *ah)
case AR9300_DEVID_QCA955X:
case AR9300_DEVID_AR9580:
case AR9300_DEVID_AR9462:
+ case AR9485_DEVID_AR1111:
break;
default:
if (common->bus_ops->ath_bus_type == ATH_USB)
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index dd0c146d81dc..ce7332c64efb 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -49,6 +49,7 @@
#define AR9300_DEVID_AR9462 0x0034
#define AR9300_DEVID_AR9330 0x0035
#define AR9300_DEVID_QCA955X 0x0038
+#define AR9485_DEVID_AR1111 0x0037
#define AR5416_AR9100_DEVID 0x000b
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index 87b89d55e637..d455de9162ec 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -37,6 +37,7 @@ static DEFINE_PCI_DEVICE_TABLE(ath_pci_id_table) = {
{ PCI_VDEVICE(ATHEROS, 0x0032) }, /* PCI-E AR9485 */
{ PCI_VDEVICE(ATHEROS, 0x0033) }, /* PCI-E AR9580 */
{ PCI_VDEVICE(ATHEROS, 0x0034) }, /* PCI-E AR9462 */
+ { PCI_VDEVICE(ATHEROS, 0x0037) }, /* PCI-E AR1111/AR9485 */
{ 0 }
};
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index b80352b308d5..a140165dfee0 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -2719,32 +2719,37 @@ static int b43_gpio_init(struct b43_wldev *dev)
if (dev->dev->chip_id == 0x4301) {
mask |= 0x0060;
set |= 0x0060;
+ } else if (dev->dev->chip_id == 0x5354) {
+ /* Don't allow overtaking buttons GPIOs */
+ set &= 0x2; /* 0x2 is LED GPIO on BCM5354 */
}
- if (dev->dev->chip_id == 0x5354)
- set &= 0xff02;
+
if (0 /* FIXME: conditional unknown */ ) {
b43_write16(dev, B43_MMIO_GPIO_MASK,
b43_read16(dev, B43_MMIO_GPIO_MASK)
| 0x0100);
- mask |= 0x0180;
- set |= 0x0180;
+ /* BT Coexistance Input */
+ mask |= 0x0080;
+ set |= 0x0080;
+ /* BT Coexistance Out */
+ mask |= 0x0100;
+ set |= 0x0100;
}
if (dev->dev->bus_sprom->boardflags_lo & B43_BFL_PACTRL) {
+ /* PA is controlled by gpio 9, let ucode handle it */
b43_write16(dev, B43_MMIO_GPIO_MASK,
b43_read16(dev, B43_MMIO_GPIO_MASK)
| 0x0200);
mask |= 0x0200;
set |= 0x0200;
}
- if (dev->dev->core_rev >= 2)
- mask |= 0x0010; /* FIXME: This is redundant. */
switch (dev->dev->bus_type) {
#ifdef CONFIG_B43_BCMA
case B43_BUS_BCMA:
bcma_cc_write32(&dev->dev->bdev->bus->drv_cc, BCMA_CC_GPIOCTL,
(bcma_cc_read32(&dev->dev->bdev->bus->drv_cc,
- BCMA_CC_GPIOCTL) & mask) | set);
+ BCMA_CC_GPIOCTL) & ~mask) | set);
break;
#endif
#ifdef CONFIG_B43_SSB
@@ -2753,7 +2758,7 @@ static int b43_gpio_init(struct b43_wldev *dev)
if (gpiodev)
ssb_write32(gpiodev, B43_GPIO_CONTROL,
(ssb_read32(gpiodev, B43_GPIO_CONTROL)
- & mask) | set);
+ & ~mask) | set);
break;
#endif
}
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/channel.c b/drivers/net/wireless/brcm80211/brcmsmac/channel.c
index 9a4c63f927cb..7ed7d7577024 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/channel.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/channel.c
@@ -382,9 +382,7 @@ brcms_c_channel_set_chanspec(struct brcms_cm_info *wlc_cm, u16 chanspec,
{
struct brcms_c_info *wlc = wlc_cm->wlc;
struct ieee80211_channel *ch = wlc->pub->ieee_hw->conf.channel;
- const struct ieee80211_reg_rule *reg_rule;
struct txpwr_limits txpwr;
- int ret;
brcms_c_channel_reg_limits(wlc_cm, chanspec, &txpwr);
@@ -393,8 +391,7 @@ brcms_c_channel_set_chanspec(struct brcms_cm_info *wlc_cm, u16 chanspec,
);
/* set or restore gmode as required by regulatory */
- ret = freq_reg_info(wlc->wiphy, ch->center_freq, 0, &reg_rule);
- if (!ret && (reg_rule->flags & NL80211_RRF_NO_OFDM))
+ if (ch->flags & IEEE80211_CHAN_NO_OFDM)
brcms_c_set_gmode(wlc, GMODE_LEGACY_B, false);
else
brcms_c_set_gmode(wlc, wlc->protection->gmode_user, false);
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
index 9e79d47e077f..192ad5c1fcc8 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
@@ -121,7 +121,8 @@ static struct ieee80211_channel brcms_2ghz_chantable[] = {
IEEE80211_CHAN_NO_HT40PLUS),
CHAN2GHZ(14, 2484,
IEEE80211_CHAN_PASSIVE_SCAN | IEEE80211_CHAN_NO_IBSS |
- IEEE80211_CHAN_NO_HT40PLUS | IEEE80211_CHAN_NO_HT40MINUS)
+ IEEE80211_CHAN_NO_HT40PLUS | IEEE80211_CHAN_NO_HT40MINUS |
+ IEEE80211_CHAN_NO_OFDM)
};
static struct ieee80211_channel brcms_5ghz_nphy_chantable[] = {
diff --git a/drivers/net/wireless/iwlwifi/dvm/rs.c b/drivers/net/wireless/iwlwifi/dvm/rs.c
index 6fddd2785e6e..a82f46c10f5e 100644
--- a/drivers/net/wireless/iwlwifi/dvm/rs.c
+++ b/drivers/net/wireless/iwlwifi/dvm/rs.c
@@ -707,11 +707,14 @@ static int rs_toggle_antenna(u32 valid_ant, u32 *rate_n_flags,
*/
static bool rs_use_green(struct ieee80211_sta *sta)
{
- struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
- struct iwl_rxon_context *ctx = sta_priv->ctx;
-
- return (sta->ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD) &&
- !(ctx->ht.non_gf_sta_present);
+ /*
+ * There's a bug somewhere in this code that causes the
+ * scaling to get stuck because GF+SGI can't be combined
+ * in SISO rates. Until we find that bug, disable GF, it
+ * has only limited benefit and we still interoperate with
+ * GF APs since we can always receive GF transmissions.
+ */
+ return false;
}
/**
diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/libertas/cfg.c
index eb5de800ed90..1c10b542ab23 100644
--- a/drivers/net/wireless/libertas/cfg.c
+++ b/drivers/net/wireless/libertas/cfg.c
@@ -1254,6 +1254,7 @@ static int lbs_associate(struct lbs_private *priv,
netif_tx_wake_all_queues(priv->dev);
}
+ kfree(cmd);
done:
lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret);
return ret;
diff --git a/drivers/net/wireless/libertas/if_sdio.c b/drivers/net/wireless/libertas/if_sdio.c
index 76caebaa4397..e970897f6ab5 100644
--- a/drivers/net/wireless/libertas/if_sdio.c
+++ b/drivers/net/wireless/libertas/if_sdio.c
@@ -1314,6 +1314,7 @@ static void if_sdio_remove(struct sdio_func *func)
kfree(packet);
}
+ kfree(card);
lbs_deb_leave(LBS_DEB_SDIO);
}
diff --git a/drivers/net/wireless/libertas/main.c b/drivers/net/wireless/libertas/main.c
index 58048189bd24..fe1ea43c5149 100644
--- a/drivers/net/wireless/libertas/main.c
+++ b/drivers/net/wireless/libertas/main.c
@@ -571,7 +571,10 @@ static int lbs_thread(void *data)
netdev_info(dev, "Timeout submitting command 0x%04x\n",
le16_to_cpu(cmdnode->cmdbuf->command));
lbs_complete_command(priv, cmdnode, -ETIMEDOUT);
- if (priv->reset_card)
+
+ /* Reset card, but only when it isn't in the process
+ * of being shutdown anyway. */
+ if (!dev->dismantle && priv->reset_card)
priv->reset_card(priv);
}
priv->cmd_timed_out = 0;
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index 88455b1b9fe0..cb8c2aca54e4 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -221,6 +221,67 @@ static void rt2800_rf_write(struct rt2x00_dev *rt2x00dev,
mutex_unlock(&rt2x00dev->csr_mutex);
}
+static int rt2800_enable_wlan_rt3290(struct rt2x00_dev *rt2x00dev)
+{
+ u32 reg;
+ int i, count;
+
+ rt2800_register_read(rt2x00dev, WLAN_FUN_CTRL, &reg);
+ if (rt2x00_get_field32(reg, WLAN_EN))
+ return 0;
+
+ rt2x00_set_field32(&reg, WLAN_GPIO_OUT_OE_BIT_ALL, 0xff);
+ rt2x00_set_field32(&reg, FRC_WL_ANT_SET, 1);
+ rt2x00_set_field32(&reg, WLAN_CLK_EN, 0);
+ rt2x00_set_field32(&reg, WLAN_EN, 1);
+ rt2800_register_write(rt2x00dev, WLAN_FUN_CTRL, reg);
+
+ udelay(REGISTER_BUSY_DELAY);
+
+ count = 0;
+ do {
+ /*
+ * Check PLL_LD & XTAL_RDY.
+ */
+ for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
+ rt2800_register_read(rt2x00dev, CMB_CTRL, &reg);
+ if (rt2x00_get_field32(reg, PLL_LD) &&
+ rt2x00_get_field32(reg, XTAL_RDY))
+ break;
+ udelay(REGISTER_BUSY_DELAY);
+ }
+
+ if (i >= REGISTER_BUSY_COUNT) {
+
+ if (count >= 10)
+ return -EIO;
+
+ rt2800_register_write(rt2x00dev, 0x58, 0x018);
+ udelay(REGISTER_BUSY_DELAY);
+ rt2800_register_write(rt2x00dev, 0x58, 0x418);
+ udelay(REGISTER_BUSY_DELAY);
+ rt2800_register_write(rt2x00dev, 0x58, 0x618);
+ udelay(REGISTER_BUSY_DELAY);
+ count++;
+ } else {
+ count = 0;
+ }
+
+ rt2800_register_read(rt2x00dev, WLAN_FUN_CTRL, &reg);
+ rt2x00_set_field32(&reg, PCIE_APP0_CLK_REQ, 0);
+ rt2x00_set_field32(&reg, WLAN_CLK_EN, 1);
+ rt2x00_set_field32(&reg, WLAN_RESET, 1);
+ rt2800_register_write(rt2x00dev, WLAN_FUN_CTRL, reg);
+ udelay(10);
+ rt2x00_set_field32(&reg, WLAN_RESET, 0);
+ rt2800_register_write(rt2x00dev, WLAN_FUN_CTRL, reg);
+ udelay(10);
+ rt2800_register_write(rt2x00dev, INT_SOURCE_CSR, 0x7fffffff);
+ } while (count != 0);
+
+ return 0;
+}
+
void rt2800_mcu_request(struct rt2x00_dev *rt2x00dev,
const u8 command, const u8 token,
const u8 arg0, const u8 arg1)
@@ -400,6 +461,13 @@ int rt2800_load_firmware(struct rt2x00_dev *rt2x00dev,
{
unsigned int i;
u32 reg;
+ int retval;
+
+ if (rt2x00_rt(rt2x00dev, RT3290)) {
+ retval = rt2800_enable_wlan_rt3290(rt2x00dev);
+ if (retval)
+ return -EBUSY;
+ }
/*
* If driver doesn't wake up firmware here,
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c
index 235376e9cb04..98aa426a3564 100644
--- a/drivers/net/wireless/rt2x00/rt2800pci.c
+++ b/drivers/net/wireless/rt2x00/rt2800pci.c
@@ -980,66 +980,6 @@ static int rt2800pci_validate_eeprom(struct rt2x00_dev *rt2x00dev)
return rt2800_validate_eeprom(rt2x00dev);
}
-static int rt2800_enable_wlan_rt3290(struct rt2x00_dev *rt2x00dev)
-{
- u32 reg;
- int i, count;
-
- rt2800_register_read(rt2x00dev, WLAN_FUN_CTRL, &reg);
- if (rt2x00_get_field32(reg, WLAN_EN))
- return 0;
-
- rt2x00_set_field32(&reg, WLAN_GPIO_OUT_OE_BIT_ALL, 0xff);
- rt2x00_set_field32(&reg, FRC_WL_ANT_SET, 1);
- rt2x00_set_field32(&reg, WLAN_CLK_EN, 0);
- rt2x00_set_field32(&reg, WLAN_EN, 1);
- rt2800_register_write(rt2x00dev, WLAN_FUN_CTRL, reg);
-
- udelay(REGISTER_BUSY_DELAY);
-
- count = 0;
- do {
- /*
- * Check PLL_LD & XTAL_RDY.
- */
- for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
- rt2800_register_read(rt2x00dev, CMB_CTRL, &reg);
- if (rt2x00_get_field32(reg, PLL_LD) &&
- rt2x00_get_field32(reg, XTAL_RDY))
- break;
- udelay(REGISTER_BUSY_DELAY);
- }
-
- if (i >= REGISTER_BUSY_COUNT) {
-
- if (count >= 10)
- return -EIO;
-
- rt2800_register_write(rt2x00dev, 0x58, 0x018);
- udelay(REGISTER_BUSY_DELAY);
- rt2800_register_write(rt2x00dev, 0x58, 0x418);
- udelay(REGISTER_BUSY_DELAY);
- rt2800_register_write(rt2x00dev, 0x58, 0x618);
- udelay(REGISTER_BUSY_DELAY);
- count++;
- } else {
- count = 0;
- }
-
- rt2800_register_read(rt2x00dev, WLAN_FUN_CTRL, &reg);
- rt2x00_set_field32(&reg, PCIE_APP0_CLK_REQ, 0);
- rt2x00_set_field32(&reg, WLAN_CLK_EN, 1);
- rt2x00_set_field32(&reg, WLAN_RESET, 1);
- rt2800_register_write(rt2x00dev, WLAN_FUN_CTRL, reg);
- udelay(10);
- rt2x00_set_field32(&reg, WLAN_RESET, 0);
- rt2800_register_write(rt2x00dev, WLAN_FUN_CTRL, reg);
- udelay(10);
- rt2800_register_write(rt2x00dev, INT_SOURCE_CSR, 0x7fffffff);
- } while (count != 0);
-
- return 0;
-}
static int rt2800pci_probe_hw(struct rt2x00_dev *rt2x00dev)
{
int retval;
@@ -1063,17 +1003,6 @@ static int rt2800pci_probe_hw(struct rt2x00_dev *rt2x00dev)
return retval;
/*
- * In probe phase call rt2800_enable_wlan_rt3290 to enable wlan
- * clk for rt3290. That avoid the MCU fail in start phase.
- */
- if (rt2x00_rt(rt2x00dev, RT3290)) {
- retval = rt2800_enable_wlan_rt3290(rt2x00dev);
-
- if (retval)
- return retval;
- }
-
- /*
* This device has multiple filters for control frames
* and has a separate filter for PS Poll frames.
*/
diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
index f32259686b45..3f7bc5cadf9a 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/rt2x00/rt61pci.c
@@ -2243,8 +2243,7 @@ static void rt61pci_txdone(struct rt2x00_dev *rt2x00dev)
static void rt61pci_wakeup(struct rt2x00_dev *rt2x00dev)
{
- struct ieee80211_conf conf = { .flags = 0 };
- struct rt2x00lib_conf libconf = { .conf = &conf };
+ struct rt2x00lib_conf libconf = { .conf = &rt2x00dev->hw->conf };
rt61pci_config(rt2x00dev, &libconf, IEEE80211_CONF_CHANGE_PS);
}
diff --git a/drivers/pinctrl/pinctrl-imx23.c b/drivers/pinctrl/pinctrl-imx23.c
index 75d3eff94296..3674d877ed7c 100644
--- a/drivers/pinctrl/pinctrl-imx23.c
+++ b/drivers/pinctrl/pinctrl-imx23.c
@@ -292,7 +292,7 @@ static int __init imx23_pinctrl_init(void)
{
return platform_driver_register(&imx23_pinctrl_driver);
}
-arch_initcall(imx23_pinctrl_init);
+postcore_initcall(imx23_pinctrl_init);
static void __exit imx23_pinctrl_exit(void)
{
diff --git a/drivers/pinctrl/pinctrl-imx28.c b/drivers/pinctrl/pinctrl-imx28.c
index b973026811a2..0f5b2122b1ba 100644
--- a/drivers/pinctrl/pinctrl-imx28.c
+++ b/drivers/pinctrl/pinctrl-imx28.c
@@ -408,7 +408,7 @@ static int __init imx28_pinctrl_init(void)
{
return platform_driver_register(&imx28_pinctrl_driver);
}
-arch_initcall(imx28_pinctrl_init);
+postcore_initcall(imx28_pinctrl_init);
static void __exit imx28_pinctrl_exit(void)
{
diff --git a/drivers/pinctrl/pinctrl-nomadik-db8500.c b/drivers/pinctrl/pinctrl-nomadik-db8500.c
index 6f99769c6733..5f3e9d0221e1 100644
--- a/drivers/pinctrl/pinctrl-nomadik-db8500.c
+++ b/drivers/pinctrl/pinctrl-nomadik-db8500.c
@@ -766,7 +766,7 @@ DB8500_FUNC_GROUPS(ipgpio, "ipgpio0_a_1", "ipgpio1_a_1", "ipgpio7_b_1",
DB8500_FUNC_GROUPS(msp2, "msp2sck_a_1", "msp2_a_1");
DB8500_FUNC_GROUPS(mc4, "mc4_a_1", "mc4rstn_c_1");
DB8500_FUNC_GROUPS(mc1, "mc1_a_1", "mc1dir_a_1");
-DB8500_FUNC_GROUPS(hsi, "hsir1_a_1", "hsit1_a_1", "hsit_a_2");
+DB8500_FUNC_GROUPS(hsi, "hsir_a_1", "hsit_a_1", "hsit_a_2");
DB8500_FUNC_GROUPS(clkout, "clkout_a_1", "clkout_a_2", "clkout_c_1");
DB8500_FUNC_GROUPS(usb, "usb_a_1");
DB8500_FUNC_GROUPS(trig, "trig_b_1");
diff --git a/drivers/pinctrl/pinctrl-nomadik.c b/drivers/pinctrl/pinctrl-nomadik.c
index 53b0d49a7a1c..ec6ac501b23a 100644
--- a/drivers/pinctrl/pinctrl-nomadik.c
+++ b/drivers/pinctrl/pinctrl-nomadik.c
@@ -1731,7 +1731,6 @@ static int __devinit nmk_pinctrl_probe(struct platform_device *pdev)
for (i = 0; i < npct->soc->gpio_num_ranges; i++) {
if (!nmk_gpio_chips[i]) {
dev_warn(&pdev->dev, "GPIO chip %d not registered yet\n", i);
- devm_kfree(&pdev->dev, npct);
return -EPROBE_DEFER;
}
npct->soc->gpio_ranges[i].gc = &nmk_gpio_chips[i]->chip;
diff --git a/drivers/pinctrl/pinctrl-sirf.c b/drivers/pinctrl/pinctrl-sirf.c
index 2aae8a8978e9..7fca6ce5952b 100644
--- a/drivers/pinctrl/pinctrl-sirf.c
+++ b/drivers/pinctrl/pinctrl-sirf.c
@@ -1217,7 +1217,6 @@ out_no_rsc_remap:
iounmap(spmx->gpio_virtbase);
out_no_gpio_remap:
platform_set_drvdata(pdev, NULL);
- devm_kfree(&pdev->dev, spmx);
return ret;
}
diff --git a/drivers/pinctrl/pinctrl-u300.c b/drivers/pinctrl/pinctrl-u300.c
index a7ad8c112d91..309f5b9a70ec 100644
--- a/drivers/pinctrl/pinctrl-u300.c
+++ b/drivers/pinctrl/pinctrl-u300.c
@@ -1121,10 +1121,8 @@ static int __devinit u300_pmx_probe(struct platform_device *pdev)
upmx->dev = &pdev->dev;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- ret = -ENOENT;
- goto out_no_resource;
- }
+ if (!res)
+ return -ENOENT;
upmx->phybase = res->start;
upmx->physize = resource_size(res);
@@ -1165,8 +1163,6 @@ out_no_remap:
platform_set_drvdata(pdev, NULL);
out_no_memregion:
release_mem_region(upmx->phybase, upmx->physize);
-out_no_resource:
- devm_kfree(&pdev->dev, upmx);
return ret;
}
diff --git a/drivers/platform/x86/classmate-laptop.c b/drivers/platform/x86/classmate-laptop.c
index 2ca7dd1ab3e4..cd33add118ce 100644
--- a/drivers/platform/x86/classmate-laptop.c
+++ b/drivers/platform/x86/classmate-laptop.c
@@ -350,6 +350,7 @@ static void cmpc_accel_idev_init_v4(struct input_dev *inputdev)
inputdev->close = cmpc_accel_close_v4;
}
+#ifdef CONFIG_PM_SLEEP
static int cmpc_accel_suspend_v4(struct device *dev)
{
struct input_dev *inputdev;
@@ -384,6 +385,7 @@ static int cmpc_accel_resume_v4(struct device *dev)
return 0;
}
+#endif
static int cmpc_accel_add_v4(struct acpi_device *acpi)
{
@@ -752,6 +754,7 @@ static int cmpc_tablet_remove(struct acpi_device *acpi, int type)
return cmpc_remove_acpi_notify_device(acpi);
}
+#ifdef CONFIG_PM_SLEEP
static int cmpc_tablet_resume(struct device *dev)
{
struct input_dev *inputdev = dev_get_drvdata(dev);
@@ -761,6 +764,7 @@ static int cmpc_tablet_resume(struct device *dev)
input_report_switch(inputdev, SW_TABLET_MODE, !val);
return 0;
}
+#endif
static SIMPLE_DEV_PM_OPS(cmpc_tablet_pm, NULL, cmpc_tablet_resume);
diff --git a/drivers/platform/x86/fujitsu-tablet.c b/drivers/platform/x86/fujitsu-tablet.c
index d2e41735a47b..7acae3f85f3b 100644
--- a/drivers/platform/x86/fujitsu-tablet.c
+++ b/drivers/platform/x86/fujitsu-tablet.c
@@ -440,11 +440,13 @@ static int __devexit acpi_fujitsu_remove(struct acpi_device *adev, int type)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
static int acpi_fujitsu_resume(struct device *dev)
{
fujitsu_reset();
return 0;
}
+#endif
static SIMPLE_DEV_PM_OPS(acpi_fujitsu_pm, NULL, acpi_fujitsu_resume);
diff --git a/drivers/platform/x86/hdaps.c b/drivers/platform/x86/hdaps.c
index d9ab6f64dcec..777c7e3dda51 100644
--- a/drivers/platform/x86/hdaps.c
+++ b/drivers/platform/x86/hdaps.c
@@ -305,10 +305,12 @@ static int hdaps_probe(struct platform_device *dev)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
static int hdaps_resume(struct device *dev)
{
return hdaps_device_init();
}
+#endif
static SIMPLE_DEV_PM_OPS(hdaps_pm, NULL, hdaps_resume);
diff --git a/drivers/platform/x86/hp_accel.c b/drivers/platform/x86/hp_accel.c
index f4d91154ad67..6b9af989632b 100644
--- a/drivers/platform/x86/hp_accel.c
+++ b/drivers/platform/x86/hp_accel.c
@@ -352,7 +352,7 @@ static int lis3lv02d_remove(struct acpi_device *device, int type)
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int lis3lv02d_suspend(struct device *dev)
{
/* make sure the device is off when we suspend */
diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c
index f64441844317..2111dbb7e1e3 100644
--- a/drivers/platform/x86/msi-laptop.c
+++ b/drivers/platform/x86/msi-laptop.c
@@ -85,7 +85,9 @@
#define MSI_STANDARD_EC_TOUCHPAD_ADDRESS 0xe4
#define MSI_STANDARD_EC_TOUCHPAD_MASK (1 << 4)
+#ifdef CONFIG_PM_SLEEP
static int msi_laptop_resume(struct device *device);
+#endif
static SIMPLE_DEV_PM_OPS(msi_laptop_pm, NULL, msi_laptop_resume);
#define MSI_STANDARD_EC_DEVICES_EXISTS_ADDRESS 0x2f
@@ -753,6 +755,7 @@ err_bluetooth:
return retval;
}
+#ifdef CONFIG_PM_SLEEP
static int msi_laptop_resume(struct device *device)
{
u8 data;
@@ -773,6 +776,7 @@ static int msi_laptop_resume(struct device *device)
return 0;
}
+#endif
static int __init msi_laptop_input_setup(void)
{
diff --git a/drivers/platform/x86/panasonic-laptop.c b/drivers/platform/x86/panasonic-laptop.c
index 24480074bcf0..8e8caa767d6a 100644
--- a/drivers/platform/x86/panasonic-laptop.c
+++ b/drivers/platform/x86/panasonic-laptop.c
@@ -188,7 +188,9 @@ static const struct acpi_device_id pcc_device_ids[] = {
};
MODULE_DEVICE_TABLE(acpi, pcc_device_ids);
+#ifdef CONFIG_PM_SLEEP
static int acpi_pcc_hotkey_resume(struct device *dev);
+#endif
static SIMPLE_DEV_PM_OPS(acpi_pcc_hotkey_pm, NULL, acpi_pcc_hotkey_resume);
static struct acpi_driver acpi_pcc_driver = {
@@ -540,6 +542,7 @@ static void acpi_pcc_destroy_input(struct pcc_acpi *pcc)
/* kernel module interface */
+#ifdef CONFIG_PM_SLEEP
static int acpi_pcc_hotkey_resume(struct device *dev)
{
struct pcc_acpi *pcc;
@@ -556,6 +559,7 @@ static int acpi_pcc_hotkey_resume(struct device *dev)
return acpi_pcc_write_sset(pcc, SINF_STICKY_KEY, pcc->sticky_mode);
}
+#endif
static int acpi_pcc_hotkey_add(struct acpi_device *device)
{
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
index 9363969ad07a..daaddec68def 100644
--- a/drivers/platform/x86/sony-laptop.c
+++ b/drivers/platform/x86/sony-laptop.c
@@ -140,7 +140,10 @@ MODULE_PARM_DESC(kbd_backlight_timeout,
"1 for 30 seconds, 2 for 60 seconds and 3 to disable timeout "
"(default: 0)");
+#ifdef CONFIG_PM_SLEEP
static void sony_nc_kbd_backlight_resume(void);
+static void sony_nc_thermal_resume(void);
+#endif
static int sony_nc_kbd_backlight_setup(struct platform_device *pd,
unsigned int handle);
static void sony_nc_kbd_backlight_cleanup(struct platform_device *pd);
@@ -151,7 +154,6 @@ static void sony_nc_battery_care_cleanup(struct platform_device *pd);
static int sony_nc_thermal_setup(struct platform_device *pd);
static void sony_nc_thermal_cleanup(struct platform_device *pd);
-static void sony_nc_thermal_resume(void);
static int sony_nc_lid_resume_setup(struct platform_device *pd);
static void sony_nc_lid_resume_cleanup(struct platform_device *pd);
@@ -1431,6 +1433,7 @@ static void sony_nc_function_cleanup(struct platform_device *pd)
sony_nc_handles_cleanup(pd);
}
+#ifdef CONFIG_PM_SLEEP
static void sony_nc_function_resume(void)
{
unsigned int i, result, bitmask, arg;
@@ -1508,6 +1511,7 @@ static int sony_nc_resume(struct device *dev)
return 0;
}
+#endif
static SIMPLE_DEV_PM_OPS(sony_nc_pm, NULL, sony_nc_resume);
@@ -1872,6 +1876,7 @@ static void sony_nc_kbd_backlight_cleanup(struct platform_device *pd)
}
}
+#ifdef CONFIG_PM_SLEEP
static void sony_nc_kbd_backlight_resume(void)
{
int ignore = 0;
@@ -1888,6 +1893,7 @@ static void sony_nc_kbd_backlight_resume(void)
(kbdbl_ctl->base + 0x200) |
(kbdbl_ctl->timeout << 0x10), &ignore);
}
+#endif
struct battery_care_control {
struct device_attribute attrs[2];
@@ -2210,6 +2216,7 @@ static void sony_nc_thermal_cleanup(struct platform_device *pd)
}
}
+#ifdef CONFIG_PM_SLEEP
static void sony_nc_thermal_resume(void)
{
unsigned int status = sony_nc_thermal_mode_get();
@@ -2217,6 +2224,7 @@ static void sony_nc_thermal_resume(void)
if (status != th_handle->mode)
sony_nc_thermal_mode_set(th_handle->mode);
}
+#endif
/* resume on LID open */
struct snc_lid_resume_control {
@@ -4287,6 +4295,7 @@ err_free_resources:
return result;
}
+#ifdef CONFIG_PM_SLEEP
static int sony_pic_suspend(struct device *dev)
{
if (sony_pic_disable(to_acpi_device(dev)))
@@ -4300,6 +4309,7 @@ static int sony_pic_resume(struct device *dev)
spic_dev.cur_ioport, spic_dev.cur_irq);
return 0;
}
+#endif
static SIMPLE_DEV_PM_OPS(sony_pic_pm, sony_pic_suspend, sony_pic_resume);
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index e7f73287636c..f28f36ccdcf4 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -922,6 +922,7 @@ static struct input_dev *tpacpi_inputdev;
static struct mutex tpacpi_inputdev_send_mutex;
static LIST_HEAD(tpacpi_all_drivers);
+#ifdef CONFIG_PM_SLEEP
static int tpacpi_suspend_handler(struct device *dev)
{
struct ibm_struct *ibm, *itmp;
@@ -949,6 +950,7 @@ static int tpacpi_resume_handler(struct device *dev)
return 0;
}
+#endif
static SIMPLE_DEV_PM_OPS(tpacpi_pm,
tpacpi_suspend_handler, tpacpi_resume_handler);
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index c13ba5bac93f..5f1256d5e933 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -1296,6 +1296,7 @@ static void toshiba_acpi_notify(struct acpi_device *acpi_dev, u32 event)
}
}
+#ifdef CONFIG_PM_SLEEP
static int toshiba_acpi_suspend(struct device *device)
{
struct toshiba_acpi_dev *dev = acpi_driver_data(to_acpi_device(device));
@@ -1317,6 +1318,7 @@ static int toshiba_acpi_resume(struct device *device)
return 0;
}
+#endif
static SIMPLE_DEV_PM_OPS(toshiba_acpi_pm,
toshiba_acpi_suspend, toshiba_acpi_resume);
diff --git a/drivers/platform/x86/toshiba_bluetooth.c b/drivers/platform/x86/toshiba_bluetooth.c
index 715a43cb5e3c..5e5d6317d690 100644
--- a/drivers/platform/x86/toshiba_bluetooth.c
+++ b/drivers/platform/x86/toshiba_bluetooth.c
@@ -41,7 +41,9 @@ static const struct acpi_device_id bt_device_ids[] = {
};
MODULE_DEVICE_TABLE(acpi, bt_device_ids);
+#ifdef CONFIG_PM_SLEEP
static int toshiba_bt_resume(struct device *dev);
+#endif
static SIMPLE_DEV_PM_OPS(toshiba_bt_pm, NULL, toshiba_bt_resume);
static struct acpi_driver toshiba_bt_rfkill_driver = {
@@ -90,10 +92,12 @@ static void toshiba_bt_rfkill_notify(struct acpi_device *device, u32 event)
toshiba_bluetooth_enable(device->handle);
}
+#ifdef CONFIG_PM_SLEEP
static int toshiba_bt_resume(struct device *dev)
{
return toshiba_bluetooth_enable(to_acpi_device(dev)->handle);
}
+#endif
static int toshiba_bt_rfkill_add(struct acpi_device *device)
{
diff --git a/drivers/platform/x86/xo15-ebook.c b/drivers/platform/x86/xo15-ebook.c
index 849c07c13bf6..38ba39d7ca7d 100644
--- a/drivers/platform/x86/xo15-ebook.c
+++ b/drivers/platform/x86/xo15-ebook.c
@@ -77,10 +77,12 @@ static void ebook_switch_notify(struct acpi_device *device, u32 event)
}
}
+#ifdef CONFIG_PM_SLEEP
static int ebook_switch_resume(struct device *dev)
{
return ebook_send_state(to_acpi_device(dev));
}
+#endif
static SIMPLE_DEV_PM_OPS(ebook_switch_pm, NULL, ebook_switch_resume);
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index eb415bd76494..9592b936b71b 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -582,6 +582,7 @@ enum hrtimer_restart rtc_pie_update_irq(struct hrtimer *timer)
void rtc_update_irq(struct rtc_device *rtc,
unsigned long num, unsigned long events)
{
+ pm_stay_awake(rtc->dev.parent);
schedule_work(&rtc->irqwork);
}
EXPORT_SYMBOL_GPL(rtc_update_irq);
@@ -844,6 +845,7 @@ void rtc_timer_do_work(struct work_struct *work)
mutex_lock(&rtc->ops_lock);
again:
+ pm_relax(rtc->dev.parent);
__rtc_read_time(rtc, &tm);
now = rtc_tm_to_ktime(tm);
while ((next = timerqueue_getnext(&rtc->timerqueue))) {
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index 132333d75408..4267789ca995 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -568,7 +568,6 @@ static irqreturn_t cmos_interrupt(int irq, void *p)
hpet_mask_rtc_irq_bit(RTC_AIE);
CMOS_READ(RTC_INTR_FLAGS);
- pm_wakeup_event(cmos_rtc.dev, 0);
}
spin_unlock(&rtc_lock);
diff --git a/drivers/s390/char/sclp_sdias.c b/drivers/s390/char/sclp_sdias.c
index 6a6f76bf6e3d..b1032931a1c4 100644
--- a/drivers/s390/char/sclp_sdias.c
+++ b/drivers/s390/char/sclp_sdias.c
@@ -242,11 +242,13 @@ int sclp_sdias_copy(void *dest, int start_blk, int nr_blks)
switch (sdias_evbuf.event_status) {
case EVSTATE_ALL_STORED:
TRACE("all stored\n");
+ break;
case EVSTATE_PART_STORED:
TRACE("part stored: %i\n", sdias_evbuf.blk_cnt);
break;
case EVSTATE_NO_DATA:
TRACE("no data\n");
+ /* fall through */
default:
pr_err("Error from SCLP while copying hsa. "
"Event status = %x\n",
diff --git a/drivers/sh/intc/core.c b/drivers/sh/intc/core.c
index 2374468615ed..32c26d795ed0 100644
--- a/drivers/sh/intc/core.c
+++ b/drivers/sh/intc/core.c
@@ -324,8 +324,16 @@ int __init register_intc_controller(struct intc_desc *desc)
res = irq_create_identity_mapping(d->domain, irq);
if (unlikely(res)) {
- pr_err("can't get irq_desc for %d\n", irq);
- continue;
+ if (res == -EEXIST) {
+ res = irq_domain_associate(d->domain, irq, irq);
+ if (unlikely(res)) {
+ pr_err("domain association failure\n");
+ continue;
+ }
+ } else {
+ pr_err("can't identity map IRQ %d\n", irq);
+ continue;
+ }
}
intc_irq_xlate_set(irq, vect->enum_id, d);
@@ -345,8 +353,19 @@ int __init register_intc_controller(struct intc_desc *desc)
*/
res = irq_create_identity_mapping(d->domain, irq2);
if (unlikely(res)) {
- pr_err("can't get irq_desc for %d\n", irq2);
- continue;
+ if (res == -EEXIST) {
+ res = irq_domain_associate(d->domain,
+ irq, irq);
+ if (unlikely(res)) {
+ pr_err("domain association "
+ "failure\n");
+ continue;
+ }
+ } else {
+ pr_err("can't identity map IRQ %d\n",
+ irq);
+ continue;
+ }
}
vect2->enum_id = 0;
diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
index ee0ebacf8227..89dcf155d57e 100644
--- a/drivers/usb/early/ehci-dbgp.c
+++ b/drivers/usb/early/ehci-dbgp.c
@@ -450,7 +450,7 @@ static int dbgp_ehci_startup(void)
writel(FLAG_CF, &ehci_regs->configured_flag);
/* Wait until the controller is no longer halted */
- loop = 10;
+ loop = 1000;
do {
status = readl(&ehci_regs->status);
if (!(status & STS_HALT))
diff --git a/drivers/vhost/Kconfig b/drivers/vhost/Kconfig
index e4e2fd1b5107..202bba6c997c 100644
--- a/drivers/vhost/Kconfig
+++ b/drivers/vhost/Kconfig
@@ -9,3 +9,6 @@ config VHOST_NET
To compile this driver as a module, choose M here: the module will
be called vhost_net.
+if STAGING
+source "drivers/vhost/Kconfig.tcm"
+endif
diff --git a/drivers/vhost/Kconfig.tcm b/drivers/vhost/Kconfig.tcm
new file mode 100644
index 000000000000..a9c6f76e3208
--- /dev/null
+++ b/drivers/vhost/Kconfig.tcm
@@ -0,0 +1,6 @@
+config TCM_VHOST
+ tristate "TCM_VHOST fabric module (EXPERIMENTAL)"
+ depends on TARGET_CORE && EVENTFD && EXPERIMENTAL && m
+ default n
+ ---help---
+ Say M here to enable the TCM_VHOST fabric module for use with virtio-scsi guests
diff --git a/drivers/vhost/Makefile b/drivers/vhost/Makefile
index 72dd02050bb9..a27b053bc9ab 100644
--- a/drivers/vhost/Makefile
+++ b/drivers/vhost/Makefile
@@ -1,2 +1,4 @@
obj-$(CONFIG_VHOST_NET) += vhost_net.o
vhost_net-y := vhost.o net.o
+
+obj-$(CONFIG_TCM_VHOST) += tcm_vhost.o
diff --git a/drivers/vhost/tcm_vhost.c b/drivers/vhost/tcm_vhost.c
new file mode 100644
index 000000000000..fb366540ed54
--- /dev/null
+++ b/drivers/vhost/tcm_vhost.c
@@ -0,0 +1,1628 @@
+/*******************************************************************************
+ * Vhost kernel TCM fabric driver for virtio SCSI initiators
+ *
+ * (C) Copyright 2010-2012 RisingTide Systems LLC.
+ * (C) Copyright 2010-2012 IBM Corp.
+ *
+ * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ *
+ * Authors: Nicholas A. Bellinger <nab@risingtidesystems.com>
+ * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ ****************************************************************************/
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <generated/utsrelease.h>
+#include <linux/utsname.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/kthread.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/configfs.h>
+#include <linux/ctype.h>
+#include <linux/compat.h>
+#include <linux/eventfd.h>
+#include <linux/vhost.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <asm/unaligned.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_tcq.h>
+#include <target/target_core_base.h>
+#include <target/target_core_fabric.h>
+#include <target/target_core_fabric_configfs.h>
+#include <target/target_core_configfs.h>
+#include <target/configfs_macros.h>
+#include <linux/vhost.h>
+#include <linux/virtio_net.h> /* TODO vhost.h currently depends on this */
+#include <linux/virtio_scsi.h>
+
+#include "vhost.c"
+#include "vhost.h"
+#include "tcm_vhost.h"
+
+struct vhost_scsi {
+ atomic_t vhost_ref_cnt;
+ struct tcm_vhost_tpg *vs_tpg;
+ struct vhost_dev dev;
+ struct vhost_virtqueue vqs[3];
+
+ struct vhost_work vs_completion_work; /* cmd completion work item */
+ struct list_head vs_completion_list; /* cmd completion queue */
+ spinlock_t vs_completion_lock; /* protects s_completion_list */
+};
+
+/* Local pointer to allocated TCM configfs fabric module */
+static struct target_fabric_configfs *tcm_vhost_fabric_configfs;
+
+static struct workqueue_struct *tcm_vhost_workqueue;
+
+/* Global spinlock to protect tcm_vhost TPG list for vhost IOCTL access */
+static DEFINE_MUTEX(tcm_vhost_mutex);
+static LIST_HEAD(tcm_vhost_list);
+
+static int tcm_vhost_check_true(struct se_portal_group *se_tpg)
+{
+ return 1;
+}
+
+static int tcm_vhost_check_false(struct se_portal_group *se_tpg)
+{
+ return 0;
+}
+
+static char *tcm_vhost_get_fabric_name(void)
+{
+ return "vhost";
+}
+
+static u8 tcm_vhost_get_fabric_proto_ident(struct se_portal_group *se_tpg)
+{
+ struct tcm_vhost_tpg *tpg = container_of(se_tpg,
+ struct tcm_vhost_tpg, se_tpg);
+ struct tcm_vhost_tport *tport = tpg->tport;
+
+ switch (tport->tport_proto_id) {
+ case SCSI_PROTOCOL_SAS:
+ return sas_get_fabric_proto_ident(se_tpg);
+ case SCSI_PROTOCOL_FCP:
+ return fc_get_fabric_proto_ident(se_tpg);
+ case SCSI_PROTOCOL_ISCSI:
+ return iscsi_get_fabric_proto_ident(se_tpg);
+ default:
+ pr_err("Unknown tport_proto_id: 0x%02x, using"
+ " SAS emulation\n", tport->tport_proto_id);
+ break;
+ }
+
+ return sas_get_fabric_proto_ident(se_tpg);
+}
+
+static char *tcm_vhost_get_fabric_wwn(struct se_portal_group *se_tpg)
+{
+ struct tcm_vhost_tpg *tpg = container_of(se_tpg,
+ struct tcm_vhost_tpg, se_tpg);
+ struct tcm_vhost_tport *tport = tpg->tport;
+
+ return &tport->tport_name[0];
+}
+
+static u16 tcm_vhost_get_tag(struct se_portal_group *se_tpg)
+{
+ struct tcm_vhost_tpg *tpg = container_of(se_tpg,
+ struct tcm_vhost_tpg, se_tpg);
+ return tpg->tport_tpgt;
+}
+
+static u32 tcm_vhost_get_default_depth(struct se_portal_group *se_tpg)
+{
+ return 1;
+}
+
+static u32 tcm_vhost_get_pr_transport_id(
+ struct se_portal_group *se_tpg,
+ struct se_node_acl *se_nacl,
+ struct t10_pr_registration *pr_reg,
+ int *format_code,
+ unsigned char *buf)
+{
+ struct tcm_vhost_tpg *tpg = container_of(se_tpg,
+ struct tcm_vhost_tpg, se_tpg);
+ struct tcm_vhost_tport *tport = tpg->tport;
+
+ switch (tport->tport_proto_id) {
+ case SCSI_PROTOCOL_SAS:
+ return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
+ format_code, buf);
+ case SCSI_PROTOCOL_FCP:
+ return fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
+ format_code, buf);
+ case SCSI_PROTOCOL_ISCSI:
+ return iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
+ format_code, buf);
+ default:
+ pr_err("Unknown tport_proto_id: 0x%02x, using"
+ " SAS emulation\n", tport->tport_proto_id);
+ break;
+ }
+
+ return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
+ format_code, buf);
+}
+
+static u32 tcm_vhost_get_pr_transport_id_len(
+ struct se_portal_group *se_tpg,
+ struct se_node_acl *se_nacl,
+ struct t10_pr_registration *pr_reg,
+ int *format_code)
+{
+ struct tcm_vhost_tpg *tpg = container_of(se_tpg,
+ struct tcm_vhost_tpg, se_tpg);
+ struct tcm_vhost_tport *tport = tpg->tport;
+
+ switch (tport->tport_proto_id) {
+ case SCSI_PROTOCOL_SAS:
+ return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
+ format_code);
+ case SCSI_PROTOCOL_FCP:
+ return fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
+ format_code);
+ case SCSI_PROTOCOL_ISCSI:
+ return iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
+ format_code);
+ default:
+ pr_err("Unknown tport_proto_id: 0x%02x, using"
+ " SAS emulation\n", tport->tport_proto_id);
+ break;
+ }
+
+ return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
+ format_code);
+}
+
+static char *tcm_vhost_parse_pr_out_transport_id(
+ struct se_portal_group *se_tpg,
+ const char *buf,
+ u32 *out_tid_len,
+ char **port_nexus_ptr)
+{
+ struct tcm_vhost_tpg *tpg = container_of(se_tpg,
+ struct tcm_vhost_tpg, se_tpg);
+ struct tcm_vhost_tport *tport = tpg->tport;
+
+ switch (tport->tport_proto_id) {
+ case SCSI_PROTOCOL_SAS:
+ return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
+ port_nexus_ptr);
+ case SCSI_PROTOCOL_FCP:
+ return fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
+ port_nexus_ptr);
+ case SCSI_PROTOCOL_ISCSI:
+ return iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
+ port_nexus_ptr);
+ default:
+ pr_err("Unknown tport_proto_id: 0x%02x, using"
+ " SAS emulation\n", tport->tport_proto_id);
+ break;
+ }
+
+ return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
+ port_nexus_ptr);
+}
+
+static struct se_node_acl *tcm_vhost_alloc_fabric_acl(
+ struct se_portal_group *se_tpg)
+{
+ struct tcm_vhost_nacl *nacl;
+
+ nacl = kzalloc(sizeof(struct tcm_vhost_nacl), GFP_KERNEL);
+ if (!nacl) {
+ pr_err("Unable to alocate struct tcm_vhost_nacl\n");
+ return NULL;
+ }
+
+ return &nacl->se_node_acl;
+}
+
+static void tcm_vhost_release_fabric_acl(
+ struct se_portal_group *se_tpg,
+ struct se_node_acl *se_nacl)
+{
+ struct tcm_vhost_nacl *nacl = container_of(se_nacl,
+ struct tcm_vhost_nacl, se_node_acl);
+ kfree(nacl);
+}
+
+static u32 tcm_vhost_tpg_get_inst_index(struct se_portal_group *se_tpg)
+{
+ return 1;
+}
+
+static void tcm_vhost_release_cmd(struct se_cmd *se_cmd)
+{
+ return;
+}
+
+static int tcm_vhost_shutdown_session(struct se_session *se_sess)
+{
+ return 0;
+}
+
+static void tcm_vhost_close_session(struct se_session *se_sess)
+{
+ return;
+}
+
+static u32 tcm_vhost_sess_get_index(struct se_session *se_sess)
+{
+ return 0;
+}
+
+static int tcm_vhost_write_pending(struct se_cmd *se_cmd)
+{
+ /* Go ahead and process the write immediately */
+ target_execute_cmd(se_cmd);
+ return 0;
+}
+
+static int tcm_vhost_write_pending_status(struct se_cmd *se_cmd)
+{
+ return 0;
+}
+
+static void tcm_vhost_set_default_node_attrs(struct se_node_acl *nacl)
+{
+ return;
+}
+
+static u32 tcm_vhost_get_task_tag(struct se_cmd *se_cmd)
+{
+ return 0;
+}
+
+static int tcm_vhost_get_cmd_state(struct se_cmd *se_cmd)
+{
+ return 0;
+}
+
+static void vhost_scsi_complete_cmd(struct tcm_vhost_cmd *);
+
+static int tcm_vhost_queue_data_in(struct se_cmd *se_cmd)
+{
+ struct tcm_vhost_cmd *tv_cmd = container_of(se_cmd,
+ struct tcm_vhost_cmd, tvc_se_cmd);
+ vhost_scsi_complete_cmd(tv_cmd);
+ return 0;
+}
+
+static int tcm_vhost_queue_status(struct se_cmd *se_cmd)
+{
+ struct tcm_vhost_cmd *tv_cmd = container_of(se_cmd,
+ struct tcm_vhost_cmd, tvc_se_cmd);
+ vhost_scsi_complete_cmd(tv_cmd);
+ return 0;
+}
+
+static int tcm_vhost_queue_tm_rsp(struct se_cmd *se_cmd)
+{
+ return 0;
+}
+
+static u16 tcm_vhost_set_fabric_sense_len(struct se_cmd *se_cmd,
+ u32 sense_length)
+{
+ return 0;
+}
+
+static u16 tcm_vhost_get_fabric_sense_len(void)
+{
+ return 0;
+}
+
+static void vhost_scsi_free_cmd(struct tcm_vhost_cmd *tv_cmd)
+{
+ struct se_cmd *se_cmd = &tv_cmd->tvc_se_cmd;
+
+ /* TODO locking against target/backend threads? */
+ transport_generic_free_cmd(se_cmd, 1);
+
+ if (tv_cmd->tvc_sgl_count) {
+ u32 i;
+ for (i = 0; i < tv_cmd->tvc_sgl_count; i++)
+ put_page(sg_page(&tv_cmd->tvc_sgl[i]));
+
+ kfree(tv_cmd->tvc_sgl);
+ }
+
+ kfree(tv_cmd);
+}
+
+/* Dequeue a command from the completion list */
+static struct tcm_vhost_cmd *vhost_scsi_get_cmd_from_completion(
+ struct vhost_scsi *vs)
+{
+ struct tcm_vhost_cmd *tv_cmd = NULL;
+
+ spin_lock_bh(&vs->vs_completion_lock);
+ if (list_empty(&vs->vs_completion_list)) {
+ spin_unlock_bh(&vs->vs_completion_lock);
+ return NULL;
+ }
+
+ list_for_each_entry(tv_cmd, &vs->vs_completion_list,
+ tvc_completion_list) {
+ list_del(&tv_cmd->tvc_completion_list);
+ break;
+ }
+ spin_unlock_bh(&vs->vs_completion_lock);
+ return tv_cmd;
+}
+
+/* Fill in status and signal that we are done processing this command
+ *
+ * This is scheduled in the vhost work queue so we are called with the owner
+ * process mm and can access the vring.
+ */
+static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
+{
+ struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
+ vs_completion_work);
+ struct tcm_vhost_cmd *tv_cmd;
+
+ while ((tv_cmd = vhost_scsi_get_cmd_from_completion(vs)) != NULL) {
+ struct virtio_scsi_cmd_resp v_rsp;
+ struct se_cmd *se_cmd = &tv_cmd->tvc_se_cmd;
+ int ret;
+
+ pr_debug("%s tv_cmd %p resid %u status %#02x\n", __func__,
+ tv_cmd, se_cmd->residual_count, se_cmd->scsi_status);
+
+ memset(&v_rsp, 0, sizeof(v_rsp));
+ v_rsp.resid = se_cmd->residual_count;
+ /* TODO is status_qualifier field needed? */
+ v_rsp.status = se_cmd->scsi_status;
+ v_rsp.sense_len = se_cmd->scsi_sense_length;
+ memcpy(v_rsp.sense, tv_cmd->tvc_sense_buf,
+ v_rsp.sense_len);
+ ret = copy_to_user(tv_cmd->tvc_resp, &v_rsp, sizeof(v_rsp));
+ if (likely(ret == 0))
+ vhost_add_used(&vs->vqs[2], tv_cmd->tvc_vq_desc, 0);
+ else
+ pr_err("Faulted on virtio_scsi_cmd_resp\n");
+
+ vhost_scsi_free_cmd(tv_cmd);
+ }
+
+ vhost_signal(&vs->dev, &vs->vqs[2]);
+}
+
+static void vhost_scsi_complete_cmd(struct tcm_vhost_cmd *tv_cmd)
+{
+ struct vhost_scsi *vs = tv_cmd->tvc_vhost;
+
+ pr_debug("%s tv_cmd %p\n", __func__, tv_cmd);
+
+ spin_lock_bh(&vs->vs_completion_lock);
+ list_add_tail(&tv_cmd->tvc_completion_list, &vs->vs_completion_list);
+ spin_unlock_bh(&vs->vs_completion_lock);
+
+ vhost_work_queue(&vs->dev, &vs->vs_completion_work);
+}
+
+static struct tcm_vhost_cmd *vhost_scsi_allocate_cmd(
+ struct tcm_vhost_tpg *tv_tpg,
+ struct virtio_scsi_cmd_req *v_req,
+ u32 exp_data_len,
+ int data_direction)
+{
+ struct tcm_vhost_cmd *tv_cmd;
+ struct tcm_vhost_nexus *tv_nexus;
+ struct se_portal_group *se_tpg = &tv_tpg->se_tpg;
+ struct se_session *se_sess;
+ struct se_cmd *se_cmd;
+ int sam_task_attr;
+
+ tv_nexus = tv_tpg->tpg_nexus;
+ if (!tv_nexus) {
+ pr_err("Unable to locate active struct tcm_vhost_nexus\n");
+ return ERR_PTR(-EIO);
+ }
+ se_sess = tv_nexus->tvn_se_sess;
+
+ tv_cmd = kzalloc(sizeof(struct tcm_vhost_cmd), GFP_ATOMIC);
+ if (!tv_cmd) {
+ pr_err("Unable to allocate struct tcm_vhost_cmd\n");
+ return ERR_PTR(-ENOMEM);
+ }
+ INIT_LIST_HEAD(&tv_cmd->tvc_completion_list);
+ tv_cmd->tvc_tag = v_req->tag;
+
+ se_cmd = &tv_cmd->tvc_se_cmd;
+ /*
+ * Locate the SAM Task Attr from virtio_scsi_cmd_req
+ */
+ sam_task_attr = v_req->task_attr;
+ /*
+ * Initialize struct se_cmd descriptor from TCM infrastructure
+ */
+ transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, exp_data_len,
+ data_direction, sam_task_attr,
+ &tv_cmd->tvc_sense_buf[0]);
+
+#if 0 /* FIXME: vhost_scsi_allocate_cmd() BIDI operation */
+ if (bidi)
+ se_cmd->se_cmd_flags |= SCF_BIDI;
+#endif
+ return tv_cmd;
+}
+
+/*
+ * Map a user memory range into a scatterlist
+ *
+ * Returns the number of scatterlist entries used or -errno on error.
+ */
+static int vhost_scsi_map_to_sgl(struct scatterlist *sgl,
+ unsigned int sgl_count, void __user *ptr, size_t len, int write)
+{
+ struct scatterlist *sg = sgl;
+ unsigned int npages = 0;
+ int ret;
+
+ while (len > 0) {
+ struct page *page;
+ unsigned int offset = (uintptr_t)ptr & ~PAGE_MASK;
+ unsigned int nbytes = min_t(unsigned int,
+ PAGE_SIZE - offset, len);
+
+ if (npages == sgl_count) {
+ ret = -ENOBUFS;
+ goto err;
+ }
+
+ ret = get_user_pages_fast((unsigned long)ptr, 1, write, &page);
+ BUG_ON(ret == 0); /* we should either get our page or fail */
+ if (ret < 0)
+ goto err;
+
+ sg_set_page(sg, page, nbytes, offset);
+ ptr += nbytes;
+ len -= nbytes;
+ sg++;
+ npages++;
+ }
+ return npages;
+
+err:
+ /* Put pages that we hold */
+ for (sg = sgl; sg != &sgl[npages]; sg++)
+ put_page(sg_page(sg));
+ return ret;
+}
+
+static int vhost_scsi_map_iov_to_sgl(struct tcm_vhost_cmd *tv_cmd,
+ struct iovec *iov, unsigned int niov, int write)
+{
+ int ret;
+ unsigned int i;
+ u32 sgl_count;
+ struct scatterlist *sg;
+
+ /*
+ * Find out how long sglist needs to be
+ */
+ sgl_count = 0;
+ for (i = 0; i < niov; i++) {
+ sgl_count += (((uintptr_t)iov[i].iov_base + iov[i].iov_len +
+ PAGE_SIZE - 1) >> PAGE_SHIFT) -
+ ((uintptr_t)iov[i].iov_base >> PAGE_SHIFT);
+ }
+ /* TODO overflow checking */
+
+ sg = kmalloc(sizeof(tv_cmd->tvc_sgl[0]) * sgl_count, GFP_ATOMIC);
+ if (!sg)
+ return -ENOMEM;
+ pr_debug("%s sg %p sgl_count %u is_err %ld\n", __func__,
+ sg, sgl_count, IS_ERR(sg));
+ sg_init_table(sg, sgl_count);
+
+ tv_cmd->tvc_sgl = sg;
+ tv_cmd->tvc_sgl_count = sgl_count;
+
+ pr_debug("Mapping %u iovecs for %u pages\n", niov, sgl_count);
+ for (i = 0; i < niov; i++) {
+ ret = vhost_scsi_map_to_sgl(sg, sgl_count, iov[i].iov_base,
+ iov[i].iov_len, write);
+ if (ret < 0) {
+ for (i = 0; i < tv_cmd->tvc_sgl_count; i++)
+ put_page(sg_page(&tv_cmd->tvc_sgl[i]));
+ kfree(tv_cmd->tvc_sgl);
+ tv_cmd->tvc_sgl = NULL;
+ tv_cmd->tvc_sgl_count = 0;
+ return ret;
+ }
+
+ sg += ret;
+ sgl_count -= ret;
+ }
+ return 0;
+}
+
+static void tcm_vhost_submission_work(struct work_struct *work)
+{
+ struct tcm_vhost_cmd *tv_cmd =
+ container_of(work, struct tcm_vhost_cmd, work);
+ struct se_cmd *se_cmd = &tv_cmd->tvc_se_cmd;
+ struct scatterlist *sg_ptr, *sg_bidi_ptr = NULL;
+ int rc, sg_no_bidi = 0;
+ /*
+ * Locate the struct se_lun pointer based on v_req->lun, and
+ * attach it to struct se_cmd
+ */
+ rc = transport_lookup_cmd_lun(&tv_cmd->tvc_se_cmd, tv_cmd->tvc_lun);
+ if (rc < 0) {
+ pr_err("Failed to look up lun: %d\n", tv_cmd->tvc_lun);
+ transport_send_check_condition_and_sense(&tv_cmd->tvc_se_cmd,
+ tv_cmd->tvc_se_cmd.scsi_sense_reason, 0);
+ transport_generic_free_cmd(se_cmd, 0);
+ return;
+ }
+
+ rc = target_setup_cmd_from_cdb(se_cmd, tv_cmd->tvc_cdb);
+ if (rc == -ENOMEM) {
+ transport_send_check_condition_and_sense(se_cmd,
+ TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
+ transport_generic_free_cmd(se_cmd, 0);
+ return;
+ } else if (rc < 0) {
+ if (se_cmd->se_cmd_flags & SCF_SCSI_RESERVATION_CONFLICT)
+ tcm_vhost_queue_status(se_cmd);
+ else
+ transport_send_check_condition_and_sense(se_cmd,
+ se_cmd->scsi_sense_reason, 0);
+ transport_generic_free_cmd(se_cmd, 0);
+ return;
+ }
+
+ if (tv_cmd->tvc_sgl_count) {
+ sg_ptr = tv_cmd->tvc_sgl;
+ /*
+ * For BIDI commands, pass in the extra READ buffer
+ * to transport_generic_map_mem_to_cmd() below..
+ */
+/* FIXME: Fix BIDI operation in tcm_vhost_submission_work() */
+#if 0
+ if (se_cmd->se_cmd_flags & SCF_BIDI) {
+ sg_bidi_ptr = NULL;
+ sg_no_bidi = 0;
+ }
+#endif
+ } else {
+ sg_ptr = NULL;
+ }
+
+ rc = transport_generic_map_mem_to_cmd(se_cmd, sg_ptr,
+ tv_cmd->tvc_sgl_count, sg_bidi_ptr,
+ sg_no_bidi);
+ if (rc < 0) {
+ transport_send_check_condition_and_sense(se_cmd,
+ se_cmd->scsi_sense_reason, 0);
+ transport_generic_free_cmd(se_cmd, 0);
+ return;
+ }
+ transport_handle_cdb_direct(se_cmd);
+}
+
+static void vhost_scsi_handle_vq(struct vhost_scsi *vs)
+{
+ struct vhost_virtqueue *vq = &vs->vqs[2];
+ struct virtio_scsi_cmd_req v_req;
+ struct tcm_vhost_tpg *tv_tpg;
+ struct tcm_vhost_cmd *tv_cmd;
+ u32 exp_data_len, data_first, data_num, data_direction;
+ unsigned out, in, i;
+ int head, ret;
+
+ /* Must use ioctl VHOST_SCSI_SET_ENDPOINT */
+ tv_tpg = vs->vs_tpg;
+ if (unlikely(!tv_tpg)) {
+ pr_err("%s endpoint not set\n", __func__);
+ return;
+ }
+
+ mutex_lock(&vq->mutex);
+ vhost_disable_notify(&vs->dev, vq);
+
+ for (;;) {
+ head = vhost_get_vq_desc(&vs->dev, vq, vq->iov,
+ ARRAY_SIZE(vq->iov), &out, &in,
+ NULL, NULL);
+ pr_debug("vhost_get_vq_desc: head: %d, out: %u in: %u\n",
+ head, out, in);
+ /* On error, stop handling until the next kick. */
+ if (unlikely(head < 0))
+ break;
+ /* Nothing new? Wait for eventfd to tell us they refilled. */
+ if (head == vq->num) {
+ if (unlikely(vhost_enable_notify(&vs->dev, vq))) {
+ vhost_disable_notify(&vs->dev, vq);
+ continue;
+ }
+ break;
+ }
+
+/* FIXME: BIDI operation */
+ if (out == 1 && in == 1) {
+ data_direction = DMA_NONE;
+ data_first = 0;
+ data_num = 0;
+ } else if (out == 1 && in > 1) {
+ data_direction = DMA_FROM_DEVICE;
+ data_first = out + 1;
+ data_num = in - 1;
+ } else if (out > 1 && in == 1) {
+ data_direction = DMA_TO_DEVICE;
+ data_first = 1;
+ data_num = out - 1;
+ } else {
+ vq_err(vq, "Invalid buffer layout out: %u in: %u\n",
+ out, in);
+ break;
+ }
+
+ /*
+ * Check for a sane resp buffer so we can report errors to
+ * the guest.
+ */
+ if (unlikely(vq->iov[out].iov_len !=
+ sizeof(struct virtio_scsi_cmd_resp))) {
+ vq_err(vq, "Expecting virtio_scsi_cmd_resp, got %zu"
+ " bytes\n", vq->iov[out].iov_len);
+ break;
+ }
+
+ if (unlikely(vq->iov[0].iov_len != sizeof(v_req))) {
+ vq_err(vq, "Expecting virtio_scsi_cmd_req, got %zu"
+ " bytes\n", vq->iov[0].iov_len);
+ break;
+ }
+ pr_debug("Calling __copy_from_user: vq->iov[0].iov_base: %p,"
+ " len: %zu\n", vq->iov[0].iov_base, sizeof(v_req));
+ ret = __copy_from_user(&v_req, vq->iov[0].iov_base,
+ sizeof(v_req));
+ if (unlikely(ret)) {
+ vq_err(vq, "Faulted on virtio_scsi_cmd_req\n");
+ break;
+ }
+
+ exp_data_len = 0;
+ for (i = 0; i < data_num; i++)
+ exp_data_len += vq->iov[data_first + i].iov_len;
+
+ tv_cmd = vhost_scsi_allocate_cmd(tv_tpg, &v_req,
+ exp_data_len, data_direction);
+ if (IS_ERR(tv_cmd)) {
+ vq_err(vq, "vhost_scsi_allocate_cmd failed %ld\n",
+ PTR_ERR(tv_cmd));
+ break;
+ }
+ pr_debug("Allocated tv_cmd: %p exp_data_len: %d, data_direction"
+ ": %d\n", tv_cmd, exp_data_len, data_direction);
+
+ tv_cmd->tvc_vhost = vs;
+
+ if (unlikely(vq->iov[out].iov_len !=
+ sizeof(struct virtio_scsi_cmd_resp))) {
+ vq_err(vq, "Expecting virtio_scsi_cmd_resp, got %zu"
+ " bytes, out: %d, in: %d\n",
+ vq->iov[out].iov_len, out, in);
+ break;
+ }
+
+ tv_cmd->tvc_resp = vq->iov[out].iov_base;
+
+ /*
+ * Copy in the recieved CDB descriptor into tv_cmd->tvc_cdb
+ * that will be used by tcm_vhost_new_cmd_map() and down into
+ * target_setup_cmd_from_cdb()
+ */
+ memcpy(tv_cmd->tvc_cdb, v_req.cdb, TCM_VHOST_MAX_CDB_SIZE);
+ /*
+ * Check that the recieved CDB size does not exceeded our
+ * hardcoded max for tcm_vhost
+ */
+ /* TODO what if cdb was too small for varlen cdb header? */
+ if (unlikely(scsi_command_size(tv_cmd->tvc_cdb) >
+ TCM_VHOST_MAX_CDB_SIZE)) {
+ vq_err(vq, "Received SCSI CDB with command_size: %d that"
+ " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
+ scsi_command_size(tv_cmd->tvc_cdb),
+ TCM_VHOST_MAX_CDB_SIZE);
+ break; /* TODO */
+ }
+ tv_cmd->tvc_lun = ((v_req.lun[2] << 8) | v_req.lun[3]) & 0x3FFF;
+
+ pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n",
+ tv_cmd->tvc_cdb[0], tv_cmd->tvc_lun);
+
+ if (data_direction != DMA_NONE) {
+ ret = vhost_scsi_map_iov_to_sgl(tv_cmd,
+ &vq->iov[data_first], data_num,
+ data_direction == DMA_TO_DEVICE);
+ if (unlikely(ret)) {
+ vq_err(vq, "Failed to map iov to sgl\n");
+ break; /* TODO */
+ }
+ }
+
+ /*
+ * Save the descriptor from vhost_get_vq_desc() to be used to
+ * complete the virtio-scsi request in TCM callback context via
+ * tcm_vhost_queue_data_in() and tcm_vhost_queue_status()
+ */
+ tv_cmd->tvc_vq_desc = head;
+ /*
+ * Dispatch tv_cmd descriptor for cmwq execution in process
+ * context provided by tcm_vhost_workqueue. This also ensures
+ * tv_cmd is executed on the same kworker CPU as this vhost
+ * thread to gain positive L2 cache locality effects..
+ */
+ INIT_WORK(&tv_cmd->work, tcm_vhost_submission_work);
+ queue_work(tcm_vhost_workqueue, &tv_cmd->work);
+ }
+
+ mutex_unlock(&vq->mutex);
+}
+
+static void vhost_scsi_ctl_handle_kick(struct vhost_work *work)
+{
+ pr_err("%s: The handling func for control queue.\n", __func__);
+}
+
+static void vhost_scsi_evt_handle_kick(struct vhost_work *work)
+{
+ pr_err("%s: The handling func for event queue.\n", __func__);
+}
+
+static void vhost_scsi_handle_kick(struct vhost_work *work)
+{
+ struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
+ poll.work);
+ struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
+
+ vhost_scsi_handle_vq(vs);
+}
+
+/*
+ * Called from vhost_scsi_ioctl() context to walk the list of available
+ * tcm_vhost_tpg with an active struct tcm_vhost_nexus
+ */
+static int vhost_scsi_set_endpoint(
+ struct vhost_scsi *vs,
+ struct vhost_scsi_target *t)
+{
+ struct tcm_vhost_tport *tv_tport;
+ struct tcm_vhost_tpg *tv_tpg;
+ int index;
+
+ mutex_lock(&vs->dev.mutex);
+ /* Verify that ring has been setup correctly. */
+ for (index = 0; index < vs->dev.nvqs; ++index) {
+ /* Verify that ring has been setup correctly. */
+ if (!vhost_vq_access_ok(&vs->vqs[index])) {
+ mutex_unlock(&vs->dev.mutex);
+ return -EFAULT;
+ }
+ }
+
+ if (vs->vs_tpg) {
+ mutex_unlock(&vs->dev.mutex);
+ return -EEXIST;
+ }
+ mutex_unlock(&vs->dev.mutex);
+
+ mutex_lock(&tcm_vhost_mutex);
+ list_for_each_entry(tv_tpg, &tcm_vhost_list, tv_tpg_list) {
+ mutex_lock(&tv_tpg->tv_tpg_mutex);
+ if (!tv_tpg->tpg_nexus) {
+ mutex_unlock(&tv_tpg->tv_tpg_mutex);
+ continue;
+ }
+ if (atomic_read(&tv_tpg->tv_tpg_vhost_count)) {
+ mutex_unlock(&tv_tpg->tv_tpg_mutex);
+ continue;
+ }
+ tv_tport = tv_tpg->tport;
+
+ if (!strcmp(tv_tport->tport_name, t->vhost_wwpn) &&
+ (tv_tpg->tport_tpgt == t->vhost_tpgt)) {
+ atomic_inc(&tv_tpg->tv_tpg_vhost_count);
+ smp_mb__after_atomic_inc();
+ mutex_unlock(&tv_tpg->tv_tpg_mutex);
+ mutex_unlock(&tcm_vhost_mutex);
+
+ mutex_lock(&vs->dev.mutex);
+ vs->vs_tpg = tv_tpg;
+ atomic_inc(&vs->vhost_ref_cnt);
+ smp_mb__after_atomic_inc();
+ mutex_unlock(&vs->dev.mutex);
+ return 0;
+ }
+ mutex_unlock(&tv_tpg->tv_tpg_mutex);
+ }
+ mutex_unlock(&tcm_vhost_mutex);
+ return -EINVAL;
+}
+
+static int vhost_scsi_clear_endpoint(
+ struct vhost_scsi *vs,
+ struct vhost_scsi_target *t)
+{
+ struct tcm_vhost_tport *tv_tport;
+ struct tcm_vhost_tpg *tv_tpg;
+ int index;
+
+ mutex_lock(&vs->dev.mutex);
+ /* Verify that ring has been setup correctly. */
+ for (index = 0; index < vs->dev.nvqs; ++index) {
+ if (!vhost_vq_access_ok(&vs->vqs[index])) {
+ mutex_unlock(&vs->dev.mutex);
+ return -EFAULT;
+ }
+ }
+
+ if (!vs->vs_tpg) {
+ mutex_unlock(&vs->dev.mutex);
+ return -ENODEV;
+ }
+ tv_tpg = vs->vs_tpg;
+ tv_tport = tv_tpg->tport;
+
+ if (strcmp(tv_tport->tport_name, t->vhost_wwpn) ||
+ (tv_tpg->tport_tpgt != t->vhost_tpgt)) {
+ mutex_unlock(&vs->dev.mutex);
+ pr_warn("tv_tport->tport_name: %s, tv_tpg->tport_tpgt: %hu"
+ " does not match t->vhost_wwpn: %s, t->vhost_tpgt: %hu\n",
+ tv_tport->tport_name, tv_tpg->tport_tpgt,
+ t->vhost_wwpn, t->vhost_tpgt);
+ return -EINVAL;
+ }
+ atomic_dec(&tv_tpg->tv_tpg_vhost_count);
+ vs->vs_tpg = NULL;
+ mutex_unlock(&vs->dev.mutex);
+
+ return 0;
+}
+
+static int vhost_scsi_open(struct inode *inode, struct file *f)
+{
+ struct vhost_scsi *s;
+ int r;
+
+ s = kzalloc(sizeof(*s), GFP_KERNEL);
+ if (!s)
+ return -ENOMEM;
+
+ vhost_work_init(&s->vs_completion_work, vhost_scsi_complete_cmd_work);
+ INIT_LIST_HEAD(&s->vs_completion_list);
+ spin_lock_init(&s->vs_completion_lock);
+
+ s->vqs[0].handle_kick = vhost_scsi_ctl_handle_kick;
+ s->vqs[1].handle_kick = vhost_scsi_evt_handle_kick;
+ s->vqs[2].handle_kick = vhost_scsi_handle_kick;
+ r = vhost_dev_init(&s->dev, s->vqs, 3);
+ if (r < 0) {
+ kfree(s);
+ return r;
+ }
+
+ f->private_data = s;
+ return 0;
+}
+
+static int vhost_scsi_release(struct inode *inode, struct file *f)
+{
+ struct vhost_scsi *s = f->private_data;
+
+ if (s->vs_tpg && s->vs_tpg->tport) {
+ struct vhost_scsi_target backend;
+
+ memcpy(backend.vhost_wwpn, s->vs_tpg->tport->tport_name,
+ sizeof(backend.vhost_wwpn));
+ backend.vhost_tpgt = s->vs_tpg->tport_tpgt;
+ vhost_scsi_clear_endpoint(s, &backend);
+ }
+
+ vhost_dev_cleanup(&s->dev, false);
+ kfree(s);
+ return 0;
+}
+
+static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
+{
+ if (features & ~VHOST_FEATURES)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&vs->dev.mutex);
+ if ((features & (1 << VHOST_F_LOG_ALL)) &&
+ !vhost_log_access_ok(&vs->dev)) {
+ mutex_unlock(&vs->dev.mutex);
+ return -EFAULT;
+ }
+ vs->dev.acked_features = features;
+ /* TODO possibly smp_wmb() and flush vqs */
+ mutex_unlock(&vs->dev.mutex);
+ return 0;
+}
+
+static long vhost_scsi_ioctl(struct file *f, unsigned int ioctl,
+ unsigned long arg)
+{
+ struct vhost_scsi *vs = f->private_data;
+ struct vhost_scsi_target backend;
+ void __user *argp = (void __user *)arg;
+ u64 __user *featurep = argp;
+ u64 features;
+ int r;
+
+ switch (ioctl) {
+ case VHOST_SCSI_SET_ENDPOINT:
+ if (copy_from_user(&backend, argp, sizeof backend))
+ return -EFAULT;
+
+ return vhost_scsi_set_endpoint(vs, &backend);
+ case VHOST_SCSI_CLEAR_ENDPOINT:
+ if (copy_from_user(&backend, argp, sizeof backend))
+ return -EFAULT;
+
+ return vhost_scsi_clear_endpoint(vs, &backend);
+ case VHOST_SCSI_GET_ABI_VERSION:
+ if (copy_from_user(&backend, argp, sizeof backend))
+ return -EFAULT;
+
+ backend.abi_version = VHOST_SCSI_ABI_VERSION;
+
+ if (copy_to_user(argp, &backend, sizeof backend))
+ return -EFAULT;
+ return 0;
+ case VHOST_GET_FEATURES:
+ features = VHOST_FEATURES;
+ if (copy_to_user(featurep, &features, sizeof features))
+ return -EFAULT;
+ return 0;
+ case VHOST_SET_FEATURES:
+ if (copy_from_user(&features, featurep, sizeof features))
+ return -EFAULT;
+ return vhost_scsi_set_features(vs, features);
+ default:
+ mutex_lock(&vs->dev.mutex);
+ r = vhost_dev_ioctl(&vs->dev, ioctl, arg);
+ mutex_unlock(&vs->dev.mutex);
+ return r;
+ }
+}
+
+static const struct file_operations vhost_scsi_fops = {
+ .owner = THIS_MODULE,
+ .release = vhost_scsi_release,
+ .unlocked_ioctl = vhost_scsi_ioctl,
+ /* TODO compat ioctl? */
+ .open = vhost_scsi_open,
+ .llseek = noop_llseek,
+};
+
+static struct miscdevice vhost_scsi_misc = {
+ MISC_DYNAMIC_MINOR,
+ "vhost-scsi",
+ &vhost_scsi_fops,
+};
+
+static int __init vhost_scsi_register(void)
+{
+ return misc_register(&vhost_scsi_misc);
+}
+
+static int vhost_scsi_deregister(void)
+{
+ return misc_deregister(&vhost_scsi_misc);
+}
+
+static char *tcm_vhost_dump_proto_id(struct tcm_vhost_tport *tport)
+{
+ switch (tport->tport_proto_id) {
+ case SCSI_PROTOCOL_SAS:
+ return "SAS";
+ case SCSI_PROTOCOL_FCP:
+ return "FCP";
+ case SCSI_PROTOCOL_ISCSI:
+ return "iSCSI";
+ default:
+ break;
+ }
+
+ return "Unknown";
+}
+
+static int tcm_vhost_port_link(
+ struct se_portal_group *se_tpg,
+ struct se_lun *lun)
+{
+ struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg,
+ struct tcm_vhost_tpg, se_tpg);
+
+ atomic_inc(&tv_tpg->tv_tpg_port_count);
+ smp_mb__after_atomic_inc();
+
+ return 0;
+}
+
+static void tcm_vhost_port_unlink(
+ struct se_portal_group *se_tpg,
+ struct se_lun *se_lun)
+{
+ struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg,
+ struct tcm_vhost_tpg, se_tpg);
+
+ atomic_dec(&tv_tpg->tv_tpg_port_count);
+ smp_mb__after_atomic_dec();
+}
+
+static struct se_node_acl *tcm_vhost_make_nodeacl(
+ struct se_portal_group *se_tpg,
+ struct config_group *group,
+ const char *name)
+{
+ struct se_node_acl *se_nacl, *se_nacl_new;
+ struct tcm_vhost_nacl *nacl;
+ u64 wwpn = 0;
+ u32 nexus_depth;
+
+ /* tcm_vhost_parse_wwn(name, &wwpn, 1) < 0)
+ return ERR_PTR(-EINVAL); */
+ se_nacl_new = tcm_vhost_alloc_fabric_acl(se_tpg);
+ if (!se_nacl_new)
+ return ERR_PTR(-ENOMEM);
+
+ nexus_depth = 1;
+ /*
+ * se_nacl_new may be released by core_tpg_add_initiator_node_acl()
+ * when converting a NodeACL from demo mode -> explict
+ */
+ se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,
+ name, nexus_depth);
+ if (IS_ERR(se_nacl)) {
+ tcm_vhost_release_fabric_acl(se_tpg, se_nacl_new);
+ return se_nacl;
+ }
+ /*
+ * Locate our struct tcm_vhost_nacl and set the FC Nport WWPN
+ */
+ nacl = container_of(se_nacl, struct tcm_vhost_nacl, se_node_acl);
+ nacl->iport_wwpn = wwpn;
+
+ return se_nacl;
+}
+
+static void tcm_vhost_drop_nodeacl(struct se_node_acl *se_acl)
+{
+ struct tcm_vhost_nacl *nacl = container_of(se_acl,
+ struct tcm_vhost_nacl, se_node_acl);
+ core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);
+ kfree(nacl);
+}
+
+static int tcm_vhost_make_nexus(
+ struct tcm_vhost_tpg *tv_tpg,
+ const char *name)
+{
+ struct se_portal_group *se_tpg;
+ struct tcm_vhost_nexus *tv_nexus;
+
+ mutex_lock(&tv_tpg->tv_tpg_mutex);
+ if (tv_tpg->tpg_nexus) {
+ mutex_unlock(&tv_tpg->tv_tpg_mutex);
+ pr_debug("tv_tpg->tpg_nexus already exists\n");
+ return -EEXIST;
+ }
+ se_tpg = &tv_tpg->se_tpg;
+
+ tv_nexus = kzalloc(sizeof(struct tcm_vhost_nexus), GFP_KERNEL);
+ if (!tv_nexus) {
+ mutex_unlock(&tv_tpg->tv_tpg_mutex);
+ pr_err("Unable to allocate struct tcm_vhost_nexus\n");
+ return -ENOMEM;
+ }
+ /*
+ * Initialize the struct se_session pointer
+ */
+ tv_nexus->tvn_se_sess = transport_init_session();
+ if (IS_ERR(tv_nexus->tvn_se_sess)) {
+ mutex_unlock(&tv_tpg->tv_tpg_mutex);
+ kfree(tv_nexus);
+ return -ENOMEM;
+ }
+ /*
+ * Since we are running in 'demo mode' this call with generate a
+ * struct se_node_acl for the tcm_vhost struct se_portal_group with
+ * the SCSI Initiator port name of the passed configfs group 'name'.
+ */
+ tv_nexus->tvn_se_sess->se_node_acl = core_tpg_check_initiator_node_acl(
+ se_tpg, (unsigned char *)name);
+ if (!tv_nexus->tvn_se_sess->se_node_acl) {
+ mutex_unlock(&tv_tpg->tv_tpg_mutex);
+ pr_debug("core_tpg_check_initiator_node_acl() failed"
+ " for %s\n", name);
+ transport_free_session(tv_nexus->tvn_se_sess);
+ kfree(tv_nexus);
+ return -ENOMEM;
+ }
+ /*
+ * Now register the TCM vHost virtual I_T Nexus as active with the
+ * call to __transport_register_session()
+ */
+ __transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl,
+ tv_nexus->tvn_se_sess, tv_nexus);
+ tv_tpg->tpg_nexus = tv_nexus;
+
+ mutex_unlock(&tv_tpg->tv_tpg_mutex);
+ return 0;
+}
+
+static int tcm_vhost_drop_nexus(
+ struct tcm_vhost_tpg *tpg)
+{
+ struct se_session *se_sess;
+ struct tcm_vhost_nexus *tv_nexus;
+
+ mutex_lock(&tpg->tv_tpg_mutex);
+ tv_nexus = tpg->tpg_nexus;
+ if (!tv_nexus) {
+ mutex_unlock(&tpg->tv_tpg_mutex);
+ return -ENODEV;
+ }
+
+ se_sess = tv_nexus->tvn_se_sess;
+ if (!se_sess) {
+ mutex_unlock(&tpg->tv_tpg_mutex);
+ return -ENODEV;
+ }
+
+ if (atomic_read(&tpg->tv_tpg_port_count)) {
+ mutex_unlock(&tpg->tv_tpg_mutex);
+ pr_err("Unable to remove TCM_vHost I_T Nexus with"
+ " active TPG port count: %d\n",
+ atomic_read(&tpg->tv_tpg_port_count));
+ return -EPERM;
+ }
+
+ if (atomic_read(&tpg->tv_tpg_vhost_count)) {
+ mutex_unlock(&tpg->tv_tpg_mutex);
+ pr_err("Unable to remove TCM_vHost I_T Nexus with"
+ " active TPG vhost count: %d\n",
+ atomic_read(&tpg->tv_tpg_vhost_count));
+ return -EPERM;
+ }
+
+ pr_debug("TCM_vHost_ConfigFS: Removing I_T Nexus to emulated"
+ " %s Initiator Port: %s\n", tcm_vhost_dump_proto_id(tpg->tport),
+ tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
+ /*
+ * Release the SCSI I_T Nexus to the emulated vHost Target Port
+ */
+ transport_deregister_session(tv_nexus->tvn_se_sess);
+ tpg->tpg_nexus = NULL;
+ mutex_unlock(&tpg->tv_tpg_mutex);
+
+ kfree(tv_nexus);
+ return 0;
+}
+
+static ssize_t tcm_vhost_tpg_show_nexus(
+ struct se_portal_group *se_tpg,
+ char *page)
+{
+ struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg,
+ struct tcm_vhost_tpg, se_tpg);
+ struct tcm_vhost_nexus *tv_nexus;
+ ssize_t ret;
+
+ mutex_lock(&tv_tpg->tv_tpg_mutex);
+ tv_nexus = tv_tpg->tpg_nexus;
+ if (!tv_nexus) {
+ mutex_unlock(&tv_tpg->tv_tpg_mutex);
+ return -ENODEV;
+ }
+ ret = snprintf(page, PAGE_SIZE, "%s\n",
+ tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
+ mutex_unlock(&tv_tpg->tv_tpg_mutex);
+
+ return ret;
+}
+
+static ssize_t tcm_vhost_tpg_store_nexus(
+ struct se_portal_group *se_tpg,
+ const char *page,
+ size_t count)
+{
+ struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg,
+ struct tcm_vhost_tpg, se_tpg);
+ struct tcm_vhost_tport *tport_wwn = tv_tpg->tport;
+ unsigned char i_port[TCM_VHOST_NAMELEN], *ptr, *port_ptr;
+ int ret;
+ /*
+ * Shutdown the active I_T nexus if 'NULL' is passed..
+ */
+ if (!strncmp(page, "NULL", 4)) {
+ ret = tcm_vhost_drop_nexus(tv_tpg);
+ return (!ret) ? count : ret;
+ }
+ /*
+ * Otherwise make sure the passed virtual Initiator port WWN matches
+ * the fabric protocol_id set in tcm_vhost_make_tport(), and call
+ * tcm_vhost_make_nexus().
+ */
+ if (strlen(page) >= TCM_VHOST_NAMELEN) {
+ pr_err("Emulated NAA Sas Address: %s, exceeds"
+ " max: %d\n", page, TCM_VHOST_NAMELEN);
+ return -EINVAL;
+ }
+ snprintf(&i_port[0], TCM_VHOST_NAMELEN, "%s", page);
+
+ ptr = strstr(i_port, "naa.");
+ if (ptr) {
+ if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_SAS) {
+ pr_err("Passed SAS Initiator Port %s does not"
+ " match target port protoid: %s\n", i_port,
+ tcm_vhost_dump_proto_id(tport_wwn));
+ return -EINVAL;
+ }
+ port_ptr = &i_port[0];
+ goto check_newline;
+ }
+ ptr = strstr(i_port, "fc.");
+ if (ptr) {
+ if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_FCP) {
+ pr_err("Passed FCP Initiator Port %s does not"
+ " match target port protoid: %s\n", i_port,
+ tcm_vhost_dump_proto_id(tport_wwn));
+ return -EINVAL;
+ }
+ port_ptr = &i_port[3]; /* Skip over "fc." */
+ goto check_newline;
+ }
+ ptr = strstr(i_port, "iqn.");
+ if (ptr) {
+ if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_ISCSI) {
+ pr_err("Passed iSCSI Initiator Port %s does not"
+ " match target port protoid: %s\n", i_port,
+ tcm_vhost_dump_proto_id(tport_wwn));
+ return -EINVAL;
+ }
+ port_ptr = &i_port[0];
+ goto check_newline;
+ }
+ pr_err("Unable to locate prefix for emulated Initiator Port:"
+ " %s\n", i_port);
+ return -EINVAL;
+ /*
+ * Clear any trailing newline for the NAA WWN
+ */
+check_newline:
+ if (i_port[strlen(i_port)-1] == '\n')
+ i_port[strlen(i_port)-1] = '\0';
+
+ ret = tcm_vhost_make_nexus(tv_tpg, port_ptr);
+ if (ret < 0)
+ return ret;
+
+ return count;
+}
+
+TF_TPG_BASE_ATTR(tcm_vhost, nexus, S_IRUGO | S_IWUSR);
+
+static struct configfs_attribute *tcm_vhost_tpg_attrs[] = {
+ &tcm_vhost_tpg_nexus.attr,
+ NULL,
+};
+
+static struct se_portal_group *tcm_vhost_make_tpg(
+ struct se_wwn *wwn,
+ struct config_group *group,
+ const char *name)
+{
+ struct tcm_vhost_tport *tport = container_of(wwn,
+ struct tcm_vhost_tport, tport_wwn);
+
+ struct tcm_vhost_tpg *tpg;
+ unsigned long tpgt;
+ int ret;
+
+ if (strstr(name, "tpgt_") != name)
+ return ERR_PTR(-EINVAL);
+ if (kstrtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)
+ return ERR_PTR(-EINVAL);
+
+ tpg = kzalloc(sizeof(struct tcm_vhost_tpg), GFP_KERNEL);
+ if (!tpg) {
+ pr_err("Unable to allocate struct tcm_vhost_tpg");
+ return ERR_PTR(-ENOMEM);
+ }
+ mutex_init(&tpg->tv_tpg_mutex);
+ INIT_LIST_HEAD(&tpg->tv_tpg_list);
+ tpg->tport = tport;
+ tpg->tport_tpgt = tpgt;
+
+ ret = core_tpg_register(&tcm_vhost_fabric_configfs->tf_ops, wwn,
+ &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL);
+ if (ret < 0) {
+ kfree(tpg);
+ return NULL;
+ }
+ mutex_lock(&tcm_vhost_mutex);
+ list_add_tail(&tpg->tv_tpg_list, &tcm_vhost_list);
+ mutex_unlock(&tcm_vhost_mutex);
+
+ return &tpg->se_tpg;
+}
+
+static void tcm_vhost_drop_tpg(struct se_portal_group *se_tpg)
+{
+ struct tcm_vhost_tpg *tpg = container_of(se_tpg,
+ struct tcm_vhost_tpg, se_tpg);
+
+ mutex_lock(&tcm_vhost_mutex);
+ list_del(&tpg->tv_tpg_list);
+ mutex_unlock(&tcm_vhost_mutex);
+ /*
+ * Release the virtual I_T Nexus for this vHost TPG
+ */
+ tcm_vhost_drop_nexus(tpg);
+ /*
+ * Deregister the se_tpg from TCM..
+ */
+ core_tpg_deregister(se_tpg);
+ kfree(tpg);
+}
+
+static struct se_wwn *tcm_vhost_make_tport(
+ struct target_fabric_configfs *tf,
+ struct config_group *group,
+ const char *name)
+{
+ struct tcm_vhost_tport *tport;
+ char *ptr;
+ u64 wwpn = 0;
+ int off = 0;
+
+ /* if (tcm_vhost_parse_wwn(name, &wwpn, 1) < 0)
+ return ERR_PTR(-EINVAL); */
+
+ tport = kzalloc(sizeof(struct tcm_vhost_tport), GFP_KERNEL);
+ if (!tport) {
+ pr_err("Unable to allocate struct tcm_vhost_tport");
+ return ERR_PTR(-ENOMEM);
+ }
+ tport->tport_wwpn = wwpn;
+ /*
+ * Determine the emulated Protocol Identifier and Target Port Name
+ * based on the incoming configfs directory name.
+ */
+ ptr = strstr(name, "naa.");
+ if (ptr) {
+ tport->tport_proto_id = SCSI_PROTOCOL_SAS;
+ goto check_len;
+ }
+ ptr = strstr(name, "fc.");
+ if (ptr) {
+ tport->tport_proto_id = SCSI_PROTOCOL_FCP;
+ off = 3; /* Skip over "fc." */
+ goto check_len;
+ }
+ ptr = strstr(name, "iqn.");
+ if (ptr) {
+ tport->tport_proto_id = SCSI_PROTOCOL_ISCSI;
+ goto check_len;
+ }
+
+ pr_err("Unable to locate prefix for emulated Target Port:"
+ " %s\n", name);
+ kfree(tport);
+ return ERR_PTR(-EINVAL);
+
+check_len:
+ if (strlen(name) >= TCM_VHOST_NAMELEN) {
+ pr_err("Emulated %s Address: %s, exceeds"
+ " max: %d\n", name, tcm_vhost_dump_proto_id(tport),
+ TCM_VHOST_NAMELEN);
+ kfree(tport);
+ return ERR_PTR(-EINVAL);
+ }
+ snprintf(&tport->tport_name[0], TCM_VHOST_NAMELEN, "%s", &name[off]);
+
+ pr_debug("TCM_VHost_ConfigFS: Allocated emulated Target"
+ " %s Address: %s\n", tcm_vhost_dump_proto_id(tport), name);
+
+ return &tport->tport_wwn;
+}
+
+static void tcm_vhost_drop_tport(struct se_wwn *wwn)
+{
+ struct tcm_vhost_tport *tport = container_of(wwn,
+ struct tcm_vhost_tport, tport_wwn);
+
+ pr_debug("TCM_VHost_ConfigFS: Deallocating emulated Target"
+ " %s Address: %s\n", tcm_vhost_dump_proto_id(tport),
+ tport->tport_name);
+
+ kfree(tport);
+}
+
+static ssize_t tcm_vhost_wwn_show_attr_version(
+ struct target_fabric_configfs *tf,
+ char *page)
+{
+ return sprintf(page, "TCM_VHOST fabric module %s on %s/%s"
+ "on "UTS_RELEASE"\n", TCM_VHOST_VERSION, utsname()->sysname,
+ utsname()->machine);
+}
+
+TF_WWN_ATTR_RO(tcm_vhost, version);
+
+static struct configfs_attribute *tcm_vhost_wwn_attrs[] = {
+ &tcm_vhost_wwn_version.attr,
+ NULL,
+};
+
+static struct target_core_fabric_ops tcm_vhost_ops = {
+ .get_fabric_name = tcm_vhost_get_fabric_name,
+ .get_fabric_proto_ident = tcm_vhost_get_fabric_proto_ident,
+ .tpg_get_wwn = tcm_vhost_get_fabric_wwn,
+ .tpg_get_tag = tcm_vhost_get_tag,
+ .tpg_get_default_depth = tcm_vhost_get_default_depth,
+ .tpg_get_pr_transport_id = tcm_vhost_get_pr_transport_id,
+ .tpg_get_pr_transport_id_len = tcm_vhost_get_pr_transport_id_len,
+ .tpg_parse_pr_out_transport_id = tcm_vhost_parse_pr_out_transport_id,
+ .tpg_check_demo_mode = tcm_vhost_check_true,
+ .tpg_check_demo_mode_cache = tcm_vhost_check_true,
+ .tpg_check_demo_mode_write_protect = tcm_vhost_check_false,
+ .tpg_check_prod_mode_write_protect = tcm_vhost_check_false,
+ .tpg_alloc_fabric_acl = tcm_vhost_alloc_fabric_acl,
+ .tpg_release_fabric_acl = tcm_vhost_release_fabric_acl,
+ .tpg_get_inst_index = tcm_vhost_tpg_get_inst_index,
+ .release_cmd = tcm_vhost_release_cmd,
+ .shutdown_session = tcm_vhost_shutdown_session,
+ .close_session = tcm_vhost_close_session,
+ .sess_get_index = tcm_vhost_sess_get_index,
+ .sess_get_initiator_sid = NULL,
+ .write_pending = tcm_vhost_write_pending,
+ .write_pending_status = tcm_vhost_write_pending_status,
+ .set_default_node_attributes = tcm_vhost_set_default_node_attrs,
+ .get_task_tag = tcm_vhost_get_task_tag,
+ .get_cmd_state = tcm_vhost_get_cmd_state,
+ .queue_data_in = tcm_vhost_queue_data_in,
+ .queue_status = tcm_vhost_queue_status,
+ .queue_tm_rsp = tcm_vhost_queue_tm_rsp,
+ .get_fabric_sense_len = tcm_vhost_get_fabric_sense_len,
+ .set_fabric_sense_len = tcm_vhost_set_fabric_sense_len,
+ /*
+ * Setup callers for generic logic in target_core_fabric_configfs.c
+ */
+ .fabric_make_wwn = tcm_vhost_make_tport,
+ .fabric_drop_wwn = tcm_vhost_drop_tport,
+ .fabric_make_tpg = tcm_vhost_make_tpg,
+ .fabric_drop_tpg = tcm_vhost_drop_tpg,
+ .fabric_post_link = tcm_vhost_port_link,
+ .fabric_pre_unlink = tcm_vhost_port_unlink,
+ .fabric_make_np = NULL,
+ .fabric_drop_np = NULL,
+ .fabric_make_nodeacl = tcm_vhost_make_nodeacl,
+ .fabric_drop_nodeacl = tcm_vhost_drop_nodeacl,
+};
+
+static int tcm_vhost_register_configfs(void)
+{
+ struct target_fabric_configfs *fabric;
+ int ret;
+
+ pr_debug("TCM_VHOST fabric module %s on %s/%s"
+ " on "UTS_RELEASE"\n", TCM_VHOST_VERSION, utsname()->sysname,
+ utsname()->machine);
+ /*
+ * Register the top level struct config_item_type with TCM core
+ */
+ fabric = target_fabric_configfs_init(THIS_MODULE, "vhost");
+ if (IS_ERR(fabric)) {
+ pr_err("target_fabric_configfs_init() failed\n");
+ return PTR_ERR(fabric);
+ }
+ /*
+ * Setup fabric->tf_ops from our local tcm_vhost_ops
+ */
+ fabric->tf_ops = tcm_vhost_ops;
+ /*
+ * Setup default attribute lists for various fabric->tf_cit_tmpl
+ */
+ TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = tcm_vhost_wwn_attrs;
+ TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = tcm_vhost_tpg_attrs;
+ TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;
+ TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;
+ TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;
+ TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;
+ TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
+ TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
+ TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;
+ /*
+ * Register the fabric for use within TCM
+ */
+ ret = target_fabric_configfs_register(fabric);
+ if (ret < 0) {
+ pr_err("target_fabric_configfs_register() failed"
+ " for TCM_VHOST\n");
+ return ret;
+ }
+ /*
+ * Setup our local pointer to *fabric
+ */
+ tcm_vhost_fabric_configfs = fabric;
+ pr_debug("TCM_VHOST[0] - Set fabric -> tcm_vhost_fabric_configfs\n");
+ return 0;
+};
+
+static void tcm_vhost_deregister_configfs(void)
+{
+ if (!tcm_vhost_fabric_configfs)
+ return;
+
+ target_fabric_configfs_deregister(tcm_vhost_fabric_configfs);
+ tcm_vhost_fabric_configfs = NULL;
+ pr_debug("TCM_VHOST[0] - Cleared tcm_vhost_fabric_configfs\n");
+};
+
+static int __init tcm_vhost_init(void)
+{
+ int ret = -ENOMEM;
+
+ tcm_vhost_workqueue = alloc_workqueue("tcm_vhost", 0, 0);
+ if (!tcm_vhost_workqueue)
+ goto out;
+
+ ret = vhost_scsi_register();
+ if (ret < 0)
+ goto out_destroy_workqueue;
+
+ ret = tcm_vhost_register_configfs();
+ if (ret < 0)
+ goto out_vhost_scsi_deregister;
+
+ return 0;
+
+out_vhost_scsi_deregister:
+ vhost_scsi_deregister();
+out_destroy_workqueue:
+ destroy_workqueue(tcm_vhost_workqueue);
+out:
+ return ret;
+};
+
+static void tcm_vhost_exit(void)
+{
+ tcm_vhost_deregister_configfs();
+ vhost_scsi_deregister();
+ destroy_workqueue(tcm_vhost_workqueue);
+};
+
+MODULE_DESCRIPTION("TCM_VHOST series fabric driver");
+MODULE_LICENSE("GPL");
+module_init(tcm_vhost_init);
+module_exit(tcm_vhost_exit);
diff --git a/drivers/vhost/tcm_vhost.h b/drivers/vhost/tcm_vhost.h
new file mode 100644
index 000000000000..c983ed21e413
--- /dev/null
+++ b/drivers/vhost/tcm_vhost.h
@@ -0,0 +1,101 @@
+#define TCM_VHOST_VERSION "v0.1"
+#define TCM_VHOST_NAMELEN 256
+#define TCM_VHOST_MAX_CDB_SIZE 32
+
+struct tcm_vhost_cmd {
+ /* Descriptor from vhost_get_vq_desc() for virt_queue segment */
+ int tvc_vq_desc;
+ /* The Tag from include/linux/virtio_scsi.h:struct virtio_scsi_cmd_req */
+ u64 tvc_tag;
+ /* The number of scatterlists associated with this cmd */
+ u32 tvc_sgl_count;
+ /* Saved unpacked SCSI LUN for tcm_vhost_submission_work() */
+ u32 tvc_lun;
+ /* Pointer to the SGL formatted memory from virtio-scsi */
+ struct scatterlist *tvc_sgl;
+ /* Pointer to response */
+ struct virtio_scsi_cmd_resp __user *tvc_resp;
+ /* Pointer to vhost_scsi for our device */
+ struct vhost_scsi *tvc_vhost;
+ /* The TCM I/O descriptor that is accessed via container_of() */
+ struct se_cmd tvc_se_cmd;
+ /* work item used for cmwq dispatch to tcm_vhost_submission_work() */
+ struct work_struct work;
+ /* Copy of the incoming SCSI command descriptor block (CDB) */
+ unsigned char tvc_cdb[TCM_VHOST_MAX_CDB_SIZE];
+ /* Sense buffer that will be mapped into outgoing status */
+ unsigned char tvc_sense_buf[TRANSPORT_SENSE_BUFFER];
+ /* Completed commands list, serviced from vhost worker thread */
+ struct list_head tvc_completion_list;
+};
+
+struct tcm_vhost_nexus {
+ /* Pointer to TCM session for I_T Nexus */
+ struct se_session *tvn_se_sess;
+};
+
+struct tcm_vhost_nacl {
+ /* Binary World Wide unique Port Name for Vhost Initiator port */
+ u64 iport_wwpn;
+ /* ASCII formatted WWPN for Sas Initiator port */
+ char iport_name[TCM_VHOST_NAMELEN];
+ /* Returned by tcm_vhost_make_nodeacl() */
+ struct se_node_acl se_node_acl;
+};
+
+struct tcm_vhost_tpg {
+ /* Vhost port target portal group tag for TCM */
+ u16 tport_tpgt;
+ /* Used to track number of TPG Port/Lun Links wrt to explict I_T Nexus shutdown */
+ atomic_t tv_tpg_port_count;
+ /* Used for vhost_scsi device reference to tpg_nexus */
+ atomic_t tv_tpg_vhost_count;
+ /* list for tcm_vhost_list */
+ struct list_head tv_tpg_list;
+ /* Used to protect access for tpg_nexus */
+ struct mutex tv_tpg_mutex;
+ /* Pointer to the TCM VHost I_T Nexus for this TPG endpoint */
+ struct tcm_vhost_nexus *tpg_nexus;
+ /* Pointer back to tcm_vhost_tport */
+ struct tcm_vhost_tport *tport;
+ /* Returned by tcm_vhost_make_tpg() */
+ struct se_portal_group se_tpg;
+};
+
+struct tcm_vhost_tport {
+ /* SCSI protocol the tport is providing */
+ u8 tport_proto_id;
+ /* Binary World Wide unique Port Name for Vhost Target port */
+ u64 tport_wwpn;
+ /* ASCII formatted WWPN for Vhost Target port */
+ char tport_name[TCM_VHOST_NAMELEN];
+ /* Returned by tcm_vhost_make_tport() */
+ struct se_wwn tport_wwn;
+};
+
+/*
+ * As per request from MST, keep TCM_VHOST related ioctl defines out of
+ * linux/vhost.h (user-space) for now..
+ */
+
+#include <linux/vhost.h>
+
+/*
+ * Used by QEMU userspace to ensure a consistent vhost-scsi ABI.
+ *
+ * ABI Rev 0: July 2012 version starting point for v3.6-rc merge candidate +
+ * RFC-v2 vhost-scsi userspace. Add GET_ABI_VERSION ioctl usage
+ */
+
+#define VHOST_SCSI_ABI_VERSION 0
+
+struct vhost_scsi_target {
+ int abi_version;
+ unsigned char vhost_wwpn[TRANSPORT_IQN_LEN];
+ unsigned short vhost_tpgt;
+};
+
+/* VHOST_SCSI specific defines */
+#define VHOST_SCSI_SET_ENDPOINT _IOW(VHOST_VIRTIO, 0x40, struct vhost_scsi_target)
+#define VHOST_SCSI_CLEAR_ENDPOINT _IOW(VHOST_VIRTIO, 0x41, struct vhost_scsi_target)
+#define VHOST_SCSI_GET_ABI_VERSION _IOW(VHOST_VIRTIO, 0x42, struct vhost_scsi_target)
diff --git a/drivers/zorro/zorro.c b/drivers/zorro/zorro.c
index 181fa8158a8b..858c9714b2f3 100644
--- a/drivers/zorro/zorro.c
+++ b/drivers/zorro/zorro.c
@@ -37,7 +37,6 @@ struct zorro_dev zorro_autocon[ZORRO_NUM_AUTO];
*/
struct zorro_bus {
- struct list_head devices; /* list of devices on this bus */
struct device dev;
};
@@ -136,7 +135,6 @@ static int __init amiga_zorro_probe(struct platform_device *pdev)
if (!bus)
return -ENOMEM;
- INIT_LIST_HEAD(&bus->devices);
bus->dev.parent = &pdev->dev;
dev_set_name(&bus->dev, "zorro");
error = device_register(&bus->dev);
diff --git a/fs/autofs4/expire.c b/fs/autofs4/expire.c
index 1feb68ecef95..8c0e56d92938 100644
--- a/fs/autofs4/expire.c
+++ b/fs/autofs4/expire.c
@@ -94,25 +94,21 @@ static struct dentry *get_next_positive_subdir(struct dentry *prev,
{
struct autofs_sb_info *sbi = autofs4_sbi(root->d_sb);
struct list_head *next;
- struct dentry *p, *q;
+ struct dentry *q;
spin_lock(&sbi->lookup_lock);
+ spin_lock(&root->d_lock);
- if (prev == NULL) {
- spin_lock(&root->d_lock);
+ if (prev)
+ next = prev->d_u.d_child.next;
+ else {
prev = dget_dlock(root);
next = prev->d_subdirs.next;
- p = prev;
- goto start;
}
- p = prev;
- spin_lock(&p->d_lock);
-again:
- next = p->d_u.d_child.next;
-start:
+cont:
if (next == &root->d_subdirs) {
- spin_unlock(&p->d_lock);
+ spin_unlock(&root->d_lock);
spin_unlock(&sbi->lookup_lock);
dput(prev);
return NULL;
@@ -121,16 +117,15 @@ start:
q = list_entry(next, struct dentry, d_u.d_child);
spin_lock_nested(&q->d_lock, DENTRY_D_LOCK_NESTED);
- /* Negative dentry - try next */
- if (!simple_positive(q)) {
- spin_unlock(&p->d_lock);
- lock_set_subclass(&q->d_lock.dep_map, 0, _RET_IP_);
- p = q;
- goto again;
+ /* Already gone or negative dentry (under construction) - try next */
+ if (q->d_count == 0 || !simple_positive(q)) {
+ spin_unlock(&q->d_lock);
+ next = q->d_u.d_child.next;
+ goto cont;
}
dget_dlock(q);
spin_unlock(&q->d_lock);
- spin_unlock(&p->d_lock);
+ spin_unlock(&root->d_lock);
spin_unlock(&sbi->lookup_lock);
dput(prev);
diff --git a/fs/bio.c b/fs/bio.c
index 73922abba832..5eaa70c9d96e 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -1312,7 +1312,7 @@ EXPORT_SYMBOL(bio_copy_kern);
* Note that this code is very hard to test under normal circumstances because
* direct-io pins the pages with get_user_pages(). This makes
* is_page_cache_freeable return false, and the VM will not clean the pages.
- * But other code (eg, pdflush) could clean the pages if they are mapped
+ * But other code (eg, flusher threads) could clean the pages if they are mapped
* pagecache.
*
* Simply disabling the call to bio_set_pages_dirty() is a good way to test the
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 83baec24946d..6e8f416773d4 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -324,7 +324,8 @@ static noinline int add_async_extent(struct async_cow *cow,
* If this code finds it can't get good compression, it puts an
* entry onto the work queue to write the uncompressed bytes. This
* makes sure that both compressed inodes and uncompressed inodes
- * are written in the same order that pdflush sent them down.
+ * are written in the same order that the flusher thread sent them
+ * down.
*/
static noinline int compress_file_range(struct inode *inode,
struct page *locked_page,
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index bc2f6ffff3cf..7bb755677a22 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -664,10 +664,6 @@ static noinline int btrfs_mksubvol(struct path *parent,
struct dentry *dentry;
int error;
- error = mnt_want_write(parent->mnt);
- if (error)
- return error;
-
mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT);
dentry = lookup_one_len(name, parent->dentry, namelen);
@@ -703,7 +699,6 @@ out_dput:
dput(dentry);
out_unlock:
mutex_unlock(&dir->i_mutex);
- mnt_drop_write(parent->mnt);
return error;
}
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index 643335a4fe3c..051c7fe551dd 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -596,7 +596,7 @@ void btrfs_start_ordered_extent(struct inode *inode,
/*
* pages in the range can be dirty, clean or writeback. We
* start IO on any dirty ones so the wait doesn't stall waiting
- * for pdflush to find them
+ * for the flusher thread to find them
*/
if (!test_bit(BTRFS_ORDERED_DIRECT, &entry->flags))
filemap_fdatawrite_range(inode->i_mapping, start, end);
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 8c6e61d6eed5..f2eb24c477a3 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -100,10 +100,6 @@ static void __save_error_info(struct btrfs_fs_info *fs_info)
fs_info->fs_state = BTRFS_SUPER_FLAG_ERROR;
}
-/* NOTE:
- * We move write_super stuff at umount in order to avoid deadlock
- * for umount hold all lock.
- */
static void save_error_info(struct btrfs_fs_info *fs_info)
{
__save_error_info(fs_info);
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index b8708f994e67..e86ae04abe6a 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -1744,10 +1744,6 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
device->fs_devices = root->fs_info->fs_devices;
- /*
- * we don't want write_supers to jump in here with our device
- * half setup
- */
mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
list_add_rcu(&device->dev_list, &root->fs_info->fs_devices->devices);
list_add(&device->dev_alloc_list,
diff --git a/fs/exofs/inode.c b/fs/exofs/inode.c
index 5badb0c039de..1562c27a2fab 100644
--- a/fs/exofs/inode.c
+++ b/fs/exofs/inode.c
@@ -37,15 +37,12 @@
#define EXOFS_DBGMSG2(M...) do {} while (0)
-enum {MAX_PAGES_KMALLOC = PAGE_SIZE / sizeof(struct page *), };
-
unsigned exofs_max_io_pages(struct ore_layout *layout,
unsigned expected_pages)
{
- unsigned pages = min_t(unsigned, expected_pages, MAX_PAGES_KMALLOC);
+ unsigned pages = min_t(unsigned, expected_pages,
+ layout->max_io_length / PAGE_SIZE);
- /* TODO: easily support bio chaining */
- pages = min_t(unsigned, pages, layout->max_io_length / PAGE_SIZE);
return pages;
}
@@ -101,7 +98,8 @@ static void _pcol_reset(struct page_collect *pcol)
* it might not end here. don't be left with nothing
*/
if (!pcol->expected_pages)
- pcol->expected_pages = MAX_PAGES_KMALLOC;
+ pcol->expected_pages =
+ exofs_max_io_pages(&pcol->sbi->layout, ~0);
}
static int pcol_try_alloc(struct page_collect *pcol)
@@ -389,6 +387,8 @@ static int readpage_strip(void *data, struct page *page)
size_t len;
int ret;
+ BUG_ON(!PageLocked(page));
+
/* FIXME: Just for debugging, will be removed */
if (PageUptodate(page))
EXOFS_ERR("PageUptodate(0x%lx, 0x%lx)\n", pcol->inode->i_ino,
@@ -572,8 +572,16 @@ static struct page *__r4w_get_page(void *priv, u64 offset, bool *uptodate)
if (!pcol->that_locked_page ||
(pcol->that_locked_page->index != index)) {
- struct page *page = find_get_page(pcol->inode->i_mapping, index);
+ struct page *page;
+ loff_t i_size = i_size_read(pcol->inode);
+
+ if (offset >= i_size) {
+ *uptodate = true;
+ EXOFS_DBGMSG("offset >= i_size index=0x%lx\n", index);
+ return ZERO_PAGE(0);
+ }
+ page = find_get_page(pcol->inode->i_mapping, index);
if (!page) {
page = find_or_create_page(pcol->inode->i_mapping,
index, GFP_NOFS);
@@ -602,12 +610,13 @@ static void __r4w_put_page(void *priv, struct page *page)
{
struct page_collect *pcol = priv;
- if (pcol->that_locked_page != page) {
+ if ((pcol->that_locked_page != page) && (ZERO_PAGE(0) != page)) {
EXOFS_DBGMSG("index=0x%lx\n", page->index);
page_cache_release(page);
return;
}
- EXOFS_DBGMSG("that_locked_page index=0x%lx\n", page->index);
+ EXOFS_DBGMSG("that_locked_page index=0x%lx\n",
+ ZERO_PAGE(0) == page ? -1 : page->index);
}
static const struct _ore_r4w_op _r4w_op = {
diff --git a/fs/exofs/ore.c b/fs/exofs/ore.c
index 24a49d47e935..1585db1aa365 100644
--- a/fs/exofs/ore.c
+++ b/fs/exofs/ore.c
@@ -837,11 +837,11 @@ static int _write_mirror(struct ore_io_state *ios, int cur_comp)
bio->bi_rw |= REQ_WRITE;
}
- osd_req_write(or, _ios_obj(ios, dev), per_dev->offset,
- bio, per_dev->length);
+ osd_req_write(or, _ios_obj(ios, cur_comp),
+ per_dev->offset, bio, per_dev->length);
ORE_DBGMSG("write(0x%llx) offset=0x%llx "
"length=0x%llx dev=%d\n",
- _LLU(_ios_obj(ios, dev)->id),
+ _LLU(_ios_obj(ios, cur_comp)->id),
_LLU(per_dev->offset),
_LLU(per_dev->length), dev);
} else if (ios->kern_buff) {
@@ -853,20 +853,20 @@ static int _write_mirror(struct ore_io_state *ios, int cur_comp)
(ios->si.unit_off + ios->length >
ios->layout->stripe_unit));
- ret = osd_req_write_kern(or, _ios_obj(ios, per_dev->dev),
+ ret = osd_req_write_kern(or, _ios_obj(ios, cur_comp),
per_dev->offset,
ios->kern_buff, ios->length);
if (unlikely(ret))
goto out;
ORE_DBGMSG2("write_kern(0x%llx) offset=0x%llx "
"length=0x%llx dev=%d\n",
- _LLU(_ios_obj(ios, dev)->id),
+ _LLU(_ios_obj(ios, cur_comp)->id),
_LLU(per_dev->offset),
_LLU(ios->length), per_dev->dev);
} else {
- osd_req_set_attributes(or, _ios_obj(ios, dev));
+ osd_req_set_attributes(or, _ios_obj(ios, cur_comp));
ORE_DBGMSG2("obj(0x%llx) set_attributes=%d dev=%d\n",
- _LLU(_ios_obj(ios, dev)->id),
+ _LLU(_ios_obj(ios, cur_comp)->id),
ios->out_attr_len, dev);
}
diff --git a/fs/exofs/super.c b/fs/exofs/super.c
index 433783624d10..dde41a75c7c8 100644
--- a/fs/exofs/super.c
+++ b/fs/exofs/super.c
@@ -400,8 +400,6 @@ static int exofs_sync_fs(struct super_block *sb, int wait)
ret = ore_write(ios);
if (unlikely(ret))
EXOFS_ERR("%s: ore_write failed.\n", __func__);
- else
- sb->s_dirt = 0;
unlock_super(sb);
@@ -412,14 +410,6 @@ out:
return ret;
}
-static void exofs_write_super(struct super_block *sb)
-{
- if (!(sb->s_flags & MS_RDONLY))
- exofs_sync_fs(sb, 1);
- else
- sb->s_dirt = 0;
-}
-
static void _exofs_print_device(const char *msg, const char *dev_path,
struct osd_dev *od, u64 pid)
{
@@ -952,7 +942,6 @@ static const struct super_operations exofs_sops = {
.write_inode = exofs_write_inode,
.evict_inode = exofs_evict_inode,
.put_super = exofs_put_super,
- .write_super = exofs_write_super,
.sync_fs = exofs_sync_fs,
.statfs = exofs_statfs,
};
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c
index 9a4a5c48b1c9..a07597307fd1 100644
--- a/fs/ext3/inode.c
+++ b/fs/ext3/inode.c
@@ -3459,14 +3459,6 @@ ext3_reserve_inode_write(handle_t *handle, struct inode *inode,
* inode out, but prune_icache isn't a user-visible syncing function.
* Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync)
* we start and wait on commits.
- *
- * Is this efficient/effective? Well, we're being nice to the system
- * by cleaning up our inodes proactively so they can be reaped
- * without I/O. But we are potentially leaving up to five seconds'
- * worth of inodes floating about which prune_icache wants us to
- * write out. One way to fix that would be to get prune_icache()
- * to do a write_super() to free up some memory. It has the desired
- * effect.
*/
int ext3_mark_inode_dirty(handle_t *handle, struct inode *inode)
{
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index ff9bcdc5b0d5..8c892e93d8e7 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -64,11 +64,6 @@ static int ext3_freeze(struct super_block *sb);
/*
* Wrappers for journal_start/end.
- *
- * The only special thing we need to do here is to make sure that all
- * journal_end calls result in the superblock being marked dirty, so
- * that sync() will call the filesystem's write_super callback if
- * appropriate.
*/
handle_t *ext3_journal_start_sb(struct super_block *sb, int nblocks)
{
@@ -90,12 +85,6 @@ handle_t *ext3_journal_start_sb(struct super_block *sb, int nblocks)
return journal_start(journal, nblocks);
}
-/*
- * The only special thing we need to do here is to make sure that all
- * journal_stop calls result in the superblock being marked dirty, so
- * that sync() will call the filesystem's write_super callback if
- * appropriate.
- */
int __ext3_journal_stop(const char *where, handle_t *handle)
{
struct super_block *sb;
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 6324f74e0342..dff171c3a123 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -1970,7 +1970,7 @@ static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate);
* This function can get called via...
* - ext4_da_writepages after taking page lock (have journal handle)
* - journal_submit_inode_data_buffers (no journal handle)
- * - shrink_page_list via pdflush (no journal handle)
+ * - shrink_page_list via the kswapd/direct reclaim (no journal handle)
* - grab_page_cache when doing write_begin (have journal handle)
*
* We don't do any block allocation in this function. If we have page with
@@ -4589,14 +4589,6 @@ static int ext4_expand_extra_isize(struct inode *inode,
* inode out, but prune_icache isn't a user-visible syncing function.
* Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync)
* we start and wait on commits.
- *
- * Is this efficient/effective? Well, we're being nice to the system
- * by cleaning up our inodes proactively so they can be reaped
- * without I/O. But we are potentially leaving up to five seconds'
- * worth of inodes floating about which prune_icache wants us to
- * write out. One way to fix that would be to get prune_icache()
- * to do a write_super() to free up some memory. It has the desired
- * effect.
*/
int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
{
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index d76ec8277d3f..3e0851e4f468 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -326,11 +326,6 @@ static void ext4_put_nojournal(handle_t *handle)
/*
* Wrappers for jbd2_journal_start/end.
- *
- * The only special thing we need to do here is to make sure that all
- * journal_end calls result in the superblock being marked dirty, so
- * that sync() will call the filesystem's write_super callback if
- * appropriate.
*/
handle_t *ext4_journal_start_sb(struct super_block *sb, int nblocks)
{
@@ -356,12 +351,6 @@ handle_t *ext4_journal_start_sb(struct super_block *sb, int nblocks)
return jbd2_journal_start(journal, nblocks);
}
-/*
- * The only special thing we need to do here is to make sure that all
- * jbd2_journal_stop calls result in the superblock being marked dirty, so
- * that sync() will call the filesystem's write_super callback if
- * appropriate.
- */
int __ext4_journal_stop(const char *where, unsigned int line, handle_t *handle)
{
struct super_block *sb;
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 93d8d6c9494d..aba15f1b7ad2 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -703,13 +703,16 @@ static ssize_t fuse_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
unsigned long nr_segs, loff_t pos)
{
struct inode *inode = iocb->ki_filp->f_mapping->host;
+ struct fuse_conn *fc = get_fuse_conn(inode);
- if (pos + iov_length(iov, nr_segs) > i_size_read(inode)) {
+ /*
+ * In auto invalidate mode, always update attributes on read.
+ * Otherwise, only update if we attempt to read past EOF (to ensure
+ * i_size is up to date).
+ */
+ if (fc->auto_inval_data ||
+ (pos + iov_length(iov, nr_segs) > i_size_read(inode))) {
int err;
- /*
- * If trying to read past EOF, make sure the i_size
- * attribute is up-to-date.
- */
err = fuse_update_attributes(inode, NULL, iocb->ki_filp, NULL);
if (err)
return err;
@@ -1700,7 +1703,7 @@ static int fuse_verify_ioctl_iov(struct iovec *iov, size_t count)
size_t n;
u32 max = FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT;
- for (n = 0; n < count; n++) {
+ for (n = 0; n < count; n++, iov++) {
if (iov->iov_len > (size_t) max)
return -ENOMEM;
max -= iov->iov_len;
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index 771fb6322c07..e24dd74e3068 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -484,6 +484,9 @@ struct fuse_conn {
/** Is fallocate not implemented by fs? */
unsigned no_fallocate:1;
+ /** Use enhanced/automatic page cache invalidation. */
+ unsigned auto_inval_data:1;
+
/** The number of requests waiting for completion */
atomic_t num_waiting;
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index 1cd61652018c..ce0a2838ccd0 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -197,6 +197,7 @@ void fuse_change_attributes(struct inode *inode, struct fuse_attr *attr,
struct fuse_conn *fc = get_fuse_conn(inode);
struct fuse_inode *fi = get_fuse_inode(inode);
loff_t oldsize;
+ struct timespec old_mtime;
spin_lock(&fc->lock);
if (attr_version != 0 && fi->attr_version > attr_version) {
@@ -204,15 +205,35 @@ void fuse_change_attributes(struct inode *inode, struct fuse_attr *attr,
return;
}
+ old_mtime = inode->i_mtime;
fuse_change_attributes_common(inode, attr, attr_valid);
oldsize = inode->i_size;
i_size_write(inode, attr->size);
spin_unlock(&fc->lock);
- if (S_ISREG(inode->i_mode) && oldsize != attr->size) {
- truncate_pagecache(inode, oldsize, attr->size);
- invalidate_inode_pages2(inode->i_mapping);
+ if (S_ISREG(inode->i_mode)) {
+ bool inval = false;
+
+ if (oldsize != attr->size) {
+ truncate_pagecache(inode, oldsize, attr->size);
+ inval = true;
+ } else if (fc->auto_inval_data) {
+ struct timespec new_mtime = {
+ .tv_sec = attr->mtime,
+ .tv_nsec = attr->mtimensec,
+ };
+
+ /*
+ * Auto inval mode also checks and invalidates if mtime
+ * has changed.
+ */
+ if (!timespec_equal(&old_mtime, &new_mtime))
+ inval = true;
+ }
+
+ if (inval)
+ invalidate_inode_pages2(inode->i_mapping);
}
}
@@ -834,6 +855,8 @@ static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req)
fc->big_writes = 1;
if (arg->flags & FUSE_DONT_MASK)
fc->dont_mask = 1;
+ if (arg->flags & FUSE_AUTO_INVAL_DATA)
+ fc->auto_inval_data = 1;
} else {
ra_pages = fc->max_read / PAGE_CACHE_SIZE;
fc->no_lock = 1;
@@ -859,7 +882,8 @@ static void fuse_send_init(struct fuse_conn *fc, struct fuse_req *req)
arg->max_readahead = fc->bdi.ra_pages * PAGE_CACHE_SIZE;
arg->flags |= FUSE_ASYNC_READ | FUSE_POSIX_LOCKS | FUSE_ATOMIC_O_TRUNC |
FUSE_EXPORT_SUPPORT | FUSE_BIG_WRITES | FUSE_DONT_MASK |
- FUSE_FLOCK_LOCKS;
+ FUSE_SPLICE_WRITE | FUSE_SPLICE_MOVE | FUSE_SPLICE_READ |
+ FUSE_FLOCK_LOCKS | FUSE_IOCTL_DIR | FUSE_AUTO_INVAL_DATA;
req->in.h.opcode = FUSE_INIT;
req->in.numargs = 1;
req->in.args[0].size = sizeof(*arg);
diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c
index 3a56c8d94de0..22255d96b27e 100644
--- a/fs/gfs2/meta_io.c
+++ b/fs/gfs2/meta_io.c
@@ -52,7 +52,7 @@ static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wb
/*
* If it's a fully non-blocking write attempt and we cannot
* lock the buffer then redirty the page. Note that this can
- * potentially cause a busy-wait loop from pdflush and kswapd
+ * potentially cause a busy-wait loop from flusher thread and kswapd
* activity, but those code paths have their own higher-level
* throttling.
*/
diff --git a/fs/hfs/mdb.c b/fs/hfs/mdb.c
index 5fd51a5833ff..b7ec224910c5 100644
--- a/fs/hfs/mdb.c
+++ b/fs/hfs/mdb.c
@@ -236,10 +236,10 @@ out:
* hfs_mdb_commit()
*
* Description:
- * This updates the MDB on disk (look also at hfs_write_super()).
+ * This updates the MDB on disk.
* It does not check, if the superblock has been modified, or
* if the filesystem has been mounted read-only. It is mainly
- * called by hfs_write_super() and hfs_btree_extend().
+ * called by hfs_sync_fs() and flush_mdb().
* Input Variable(s):
* struct hfs_mdb *mdb: Pointer to the hfs MDB
* int backup;
diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c
index 425c2f2cf170..09357508ec9a 100644
--- a/fs/jbd/journal.c
+++ b/fs/jbd/journal.c
@@ -534,8 +534,8 @@ int journal_start_commit(journal_t *journal, tid_t *ptid)
ret = 1;
} else if (journal->j_committing_transaction) {
/*
- * If ext3_write_super() recently started a commit, then we
- * have to wait for completion of that transaction
+ * If commit has been started, then we have to wait for
+ * completion of that transaction.
*/
if (ptid)
*ptid = journal->j_committing_transaction->t_tid;
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index e9a3c4c85594..8625da27eccf 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -612,8 +612,8 @@ int jbd2_journal_start_commit(journal_t *journal, tid_t *ptid)
ret = 1;
} else if (journal->j_committing_transaction) {
/*
- * If ext3_write_super() recently started a commit, then we
- * have to wait for completion of that transaction
+ * If commit has been started, then we have to wait for
+ * completion of that transaction.
*/
if (ptid)
*ptid = journal->j_committing_transaction->t_tid;
diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c
index 6522cac6057c..6a10812711c1 100644
--- a/fs/nilfs2/super.c
+++ b/fs/nilfs2/super.c
@@ -676,17 +676,13 @@ static const struct super_operations nilfs_sops = {
.alloc_inode = nilfs_alloc_inode,
.destroy_inode = nilfs_destroy_inode,
.dirty_inode = nilfs_dirty_inode,
- /* .write_inode = nilfs_write_inode, */
- /* .drop_inode = nilfs_drop_inode, */
.evict_inode = nilfs_evict_inode,
.put_super = nilfs_put_super,
- /* .write_super = nilfs_write_super, */
.sync_fs = nilfs_sync_fs,
.freeze_fs = nilfs_freeze,
.unfreeze_fs = nilfs_unfreeze,
.statfs = nilfs_statfs,
.remount_fs = nilfs_remount,
- /* .umount_begin */
.show_options = nilfs_show_options
};
diff --git a/fs/nilfs2/the_nilfs.h b/fs/nilfs2/the_nilfs.h
index 6eee4177807b..be1267a34cea 100644
--- a/fs/nilfs2/the_nilfs.h
+++ b/fs/nilfs2/the_nilfs.h
@@ -107,8 +107,6 @@ struct the_nilfs {
* used for
* - loading the latest checkpoint exclusively.
* - allocating a new full segment.
- * - protecting s_dirt in the super_block struct
- * (see nilfs_write_super) and the following fields.
*/
struct buffer_head *ns_sbh[2];
struct nilfs_super_block *ns_sbp[2];
diff --git a/fs/open.c b/fs/open.c
index f3d96e7e7b19..bc132e167d2d 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -717,7 +717,7 @@ cleanup_all:
* here, so just reset the state.
*/
file_reset_write(f);
- mnt_drop_write(f->f_path.mnt);
+ __mnt_drop_write(f->f_path.mnt);
}
}
cleanup_file:
diff --git a/fs/super.c b/fs/super.c
index b05cf47463d0..0902cfa6a12e 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -537,46 +537,6 @@ void drop_super(struct super_block *sb)
EXPORT_SYMBOL(drop_super);
/**
- * sync_supers - helper for periodic superblock writeback
- *
- * Call the write_super method if present on all dirty superblocks in
- * the system. This is for the periodic writeback used by most older
- * filesystems. For data integrity superblock writeback use
- * sync_filesystems() instead.
- *
- * Note: check the dirty flag before waiting, so we don't
- * hold up the sync while mounting a device. (The newly
- * mounted device won't need syncing.)
- */
-void sync_supers(void)
-{
- struct super_block *sb, *p = NULL;
-
- spin_lock(&sb_lock);
- list_for_each_entry(sb, &super_blocks, s_list) {
- if (hlist_unhashed(&sb->s_instances))
- continue;
- if (sb->s_op->write_super && sb->s_dirt) {
- sb->s_count++;
- spin_unlock(&sb_lock);
-
- down_read(&sb->s_umount);
- if (sb->s_root && sb->s_dirt && (sb->s_flags & MS_BORN))
- sb->s_op->write_super(sb);
- up_read(&sb->s_umount);
-
- spin_lock(&sb_lock);
- if (p)
- __put_super(p);
- p = sb;
- }
- }
- if (p)
- __put_super(p);
- spin_unlock(&sb_lock);
-}
-
-/**
* iterate_supers - call function for all active superblocks
* @f: function to call
* @arg: argument to pass to it
diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c
index 35389ca2d267..7bd6e72afd11 100644
--- a/fs/ubifs/file.c
+++ b/fs/ubifs/file.c
@@ -37,11 +37,11 @@
*
* A thing to keep in mind: inode @i_mutex is locked in most VFS operations we
* implement. However, this is not true for 'ubifs_writepage()', which may be
- * called with @i_mutex unlocked. For example, when pdflush is doing background
- * write-back, it calls 'ubifs_writepage()' with unlocked @i_mutex. At "normal"
- * work-paths the @i_mutex is locked in 'ubifs_writepage()', e.g. in the
- * "sys_write -> alloc_pages -> direct reclaim path". So, in 'ubifs_writepage()'
- * we are only guaranteed that the page is locked.
+ * called with @i_mutex unlocked. For example, when flusher thread is doing
+ * background write-back, it calls 'ubifs_writepage()' with unlocked @i_mutex.
+ * At "normal" work-paths the @i_mutex is locked in 'ubifs_writepage()', e.g.
+ * in the "sys_write -> alloc_pages -> direct reclaim path". So, in
+ * 'ubifs_writepage()' we are only guaranteed that the page is locked.
*
* Similarly, @i_mutex is not always locked in 'ubifs_readpage()', e.g., the
* read-ahead path does not lock it ("sys_read -> generic_file_aio_read ->
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index 1c766c39c038..c3fa6c5327a3 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -303,7 +303,7 @@ static int ubifs_write_inode(struct inode *inode, struct writeback_control *wbc)
mutex_lock(&ui->ui_mutex);
/*
* Due to races between write-back forced by budgeting
- * (see 'sync_some_inodes()') and pdflush write-back, the inode may
+ * (see 'sync_some_inodes()') and background write-back, the inode may
* have already been synchronized, do not do this again. This might
* also happen if it was synchronized in an VFS operation, e.g.
* 'ubifs_link()'.
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index 2c744c7a5b3d..26a92fc28a59 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -491,11 +491,11 @@ acpi_get_sleep_type_data(u8 sleep_state, u8 * slp_typ_a, u8 * slp_typ_b);
acpi_status acpi_enter_sleep_state_prep(u8 sleep_state);
-acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state, u8 flags);
+acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state);
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status asmlinkage acpi_enter_sleep_state_s4bios(void))
-acpi_status acpi_leave_sleep_state_prep(u8 sleep_state, u8 flags);
+acpi_status acpi_leave_sleep_state_prep(u8 sleep_state);
acpi_status acpi_leave_sleep_state(u8 sleep_state);
diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
index 3af87de6a68c..3d00bd5bd7e3 100644
--- a/include/acpi/actypes.h
+++ b/include/acpi/actypes.h
@@ -803,7 +803,7 @@ typedef u8 acpi_adr_space_type;
/* Sleep function dispatch */
-typedef acpi_status(*ACPI_SLEEP_FUNCTION) (u8 sleep_state, u8 flags);
+typedef acpi_status(*ACPI_SLEEP_FUNCTION) (u8 sleep_state);
struct acpi_sleep_functions {
ACPI_SLEEP_FUNCTION legacy_function;
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
index 7ff5c99b1638..c78bb997e2c6 100644
--- a/include/drm/drm_pciids.h
+++ b/include/drm/drm_pciids.h
@@ -213,9 +213,12 @@
{0x1002, 0x6800, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6801, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6802, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6806, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6808, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6809, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6810, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6816, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6817, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6818, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6819, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6820, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
diff --git a/include/drm/radeon_drm.h b/include/drm/radeon_drm.h
index 58056865b8e9..dc3a8cd7db8a 100644
--- a/include/drm/radeon_drm.h
+++ b/include/drm/radeon_drm.h
@@ -964,6 +964,8 @@ struct drm_radeon_cs {
#define RADEON_INFO_IB_VM_MAX_SIZE 0x0f
/* max pipes - needed for compute shaders */
#define RADEON_INFO_MAX_PIPES 0x10
+/* timestamp for GL_ARB_timer_query (OpenGL), returns the current GPU clock */
+#define RADEON_INFO_TIMESTAMP 0x11
struct drm_radeon_info {
uint32_t request;
diff --git a/include/linux/Kbuild b/include/linux/Kbuild
index d9a754474878..fa217607c582 100644
--- a/include/linux/Kbuild
+++ b/include/linux/Kbuild
@@ -391,6 +391,7 @@ header-y += v4l2-dv-timings.h
header-y += v4l2-mediabus.h
header-y += v4l2-subdev.h
header-y += veth.h
+header-y += vfio.h
header-y += vhost.h
header-y += videodev2.h
header-y += virtio_9p.h
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 3ad510b25283..4f2a76224509 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -96,7 +96,7 @@ void acpi_table_print_madt_entry (struct acpi_subtable_header *madt);
void acpi_numa_slit_init (struct acpi_table_slit *slit);
void acpi_numa_processor_affinity_init (struct acpi_srat_cpu_affinity *pa);
void acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa);
-void acpi_numa_memory_affinity_init (struct acpi_srat_mem_affinity *ma);
+int acpi_numa_memory_affinity_init (struct acpi_srat_mem_affinity *ma);
void acpi_numa_arch_fixup(void);
#ifdef CONFIG_ACPI_HOTPLUG_CPU
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index c97c6b9cd38e..2a9a9abc9126 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -124,7 +124,6 @@ void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
void bdi_start_background_writeback(struct backing_dev_info *bdi);
int bdi_writeback_thread(void *data);
int bdi_has_dirty_io(struct backing_dev_info *bdi);
-void bdi_arm_supers_timer(void);
void bdi_wakeup_thread_delayed(struct backing_dev_info *bdi);
void bdi_lock_two(struct bdi_writeback *wb1, struct bdi_writeback *wb2);
diff --git a/include/linux/bcma/bcma_driver_chipcommon.h b/include/linux/bcma/bcma_driver_chipcommon.h
index 3c80885fa829..d323a4b4143c 100644
--- a/include/linux/bcma/bcma_driver_chipcommon.h
+++ b/include/linux/bcma/bcma_driver_chipcommon.h
@@ -89,6 +89,12 @@
#define BCMA_CC_CHIPST_4313_OTP_PRESENT 2
#define BCMA_CC_CHIPST_4331_SPROM_PRESENT 2
#define BCMA_CC_CHIPST_4331_OTP_PRESENT 4
+#define BCMA_CC_CHIPST_43228_ILP_DIV_EN 0x00000001
+#define BCMA_CC_CHIPST_43228_OTP_PRESENT 0x00000002
+#define BCMA_CC_CHIPST_43228_SERDES_REFCLK_PADSEL 0x00000004
+#define BCMA_CC_CHIPST_43228_SDIO_MODE 0x00000008
+#define BCMA_CC_CHIPST_43228_SDIO_OTP_PRESENT 0x00000010
+#define BCMA_CC_CHIPST_43228_SDIO_RESET 0x00000020
#define BCMA_CC_CHIPST_4706_PKG_OPTION BIT(0) /* 0: full-featured package 1: low-cost package */
#define BCMA_CC_CHIPST_4706_SFLASH_PRESENT BIT(1) /* 0: parallel, 1: serial flash is present */
#define BCMA_CC_CHIPST_4706_SFLASH_TYPE BIT(2) /* 0: 8b-p/ST-s flash, 1: 16b-p/Atmal-s flash */
diff --git a/include/linux/can.h b/include/linux/can.h
index 018055efc034..e52958d7c2d1 100644
--- a/include/linux/can.h
+++ b/include/linux/can.h
@@ -74,20 +74,21 @@ struct can_frame {
/*
* defined bits for canfd_frame.flags
*
- * As the default for CAN FD should be to support the high data rate in the
- * payload section of the frame (HDR) and to support up to 64 byte in the
- * data section (EDL) the bits are only set in the non-default case.
- * Btw. as long as there's no real implementation for CAN FD network driver
- * these bits are only preliminary.
+ * The use of struct canfd_frame implies the Extended Data Length (EDL) bit to
+ * be set in the CAN frame bitstream on the wire. The EDL bit switch turns
+ * the CAN controllers bitstream processor into the CAN FD mode which creates
+ * two new options within the CAN FD frame specification:
*
- * RX: NOHDR/NOEDL - info about received CAN FD frame
- * ESI - bit from originating CAN controller
- * TX: NOHDR/NOEDL - control per-frame settings if supported by CAN controller
- * ESI - bit is set by local CAN controller
+ * Bit Rate Switch - to indicate a second bitrate is/was used for the payload
+ * Error State Indicator - represents the error state of the transmitting node
+ *
+ * As the CANFD_ESI bit is internally generated by the transmitting CAN
+ * controller only the CANFD_BRS bit is relevant for real CAN controllers when
+ * building a CAN FD frame for transmission. Setting the CANFD_ESI bit can make
+ * sense for virtual CAN interfaces to test applications with echoed frames.
*/
-#define CANFD_NOHDR 0x01 /* frame without high data rate */
-#define CANFD_NOEDL 0x02 /* frame without extended data length */
-#define CANFD_ESI 0x04 /* error state indicator */
+#define CANFD_BRS 0x01 /* bit rate switch (second bitrate for payload data) */
+#define CANFD_ESI 0x02 /* error state indicator of the transmitting node */
/**
* struct canfd_frame - CAN flexible data rate frame structure
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 38dba16c4176..aa110476a95b 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -1491,7 +1491,6 @@ struct sb_writers {
struct super_block {
struct list_head s_list; /* Keep this first */
dev_t s_dev; /* search index; _not_ kdev_t */
- unsigned char s_dirt;
unsigned char s_blocksize_bits;
unsigned long s_blocksize;
loff_t s_maxbytes; /* Max file size */
@@ -1861,7 +1860,6 @@ struct super_operations {
int (*drop_inode) (struct inode *);
void (*evict_inode) (struct inode *);
void (*put_super) (struct super_block *);
- void (*write_super) (struct super_block *);
int (*sync_fs)(struct super_block *sb, int wait);
int (*freeze_fs) (struct super_block *);
int (*unfreeze_fs) (struct super_block *);
@@ -2397,7 +2395,6 @@ extern int vfs_fsync_range(struct file *file, loff_t start, loff_t end,
int datasync);
extern int vfs_fsync(struct file *file, int datasync);
extern int generic_write_sync(struct file *file, loff_t pos, loff_t count);
-extern void sync_supers(void);
extern void emergency_sync(void);
extern void emergency_remount(void);
#ifdef CONFIG_BLOCK
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index af961d6f7ab1..642928cf57b4 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
@@ -306,9 +306,10 @@ extern void *perf_trace_buf_prepare(int size, unsigned short type,
static inline void
perf_trace_buf_submit(void *raw_data, int size, int rctx, u64 addr,
- u64 count, struct pt_regs *regs, void *head)
+ u64 count, struct pt_regs *regs, void *head,
+ struct task_struct *task)
{
- perf_tp_event(addr, count, raw_data, size, regs, head, rctx);
+ perf_tp_event(addr, count, raw_data, size, regs, head, rctx, task);
}
#endif
diff --git a/include/linux/fuse.h b/include/linux/fuse.h
index 9303348965fb..d8c713e148e3 100644
--- a/include/linux/fuse.h
+++ b/include/linux/fuse.h
@@ -57,6 +57,9 @@
*
* 7.19
* - add FUSE_FALLOCATE
+ *
+ * 7.20
+ * - add FUSE_AUTO_INVAL_DATA
*/
#ifndef _LINUX_FUSE_H
@@ -88,7 +91,7 @@
#define FUSE_KERNEL_VERSION 7
/** Minor version number of this interface */
-#define FUSE_KERNEL_MINOR_VERSION 19
+#define FUSE_KERNEL_MINOR_VERSION 20
/** The node ID of the root inode */
#define FUSE_ROOT_ID 1
@@ -163,10 +166,19 @@ struct fuse_file_lock {
/**
* INIT request/reply flags
*
+ * FUSE_ASYNC_READ: asynchronous read requests
* FUSE_POSIX_LOCKS: remote locking for POSIX file locks
+ * FUSE_FILE_OPS: kernel sends file handle for fstat, etc... (not yet supported)
+ * FUSE_ATOMIC_O_TRUNC: handles the O_TRUNC open flag in the filesystem
* FUSE_EXPORT_SUPPORT: filesystem handles lookups of "." and ".."
+ * FUSE_BIG_WRITES: filesystem can handle write size larger than 4kB
* FUSE_DONT_MASK: don't apply umask to file mode on create operations
+ * FUSE_SPLICE_WRITE: kernel supports splice write on the device
+ * FUSE_SPLICE_MOVE: kernel supports splice move on the device
+ * FUSE_SPLICE_READ: kernel supports splice read on the device
* FUSE_FLOCK_LOCKS: remote locking for BSD style file locks
+ * FUSE_HAS_IOCTL_DIR: kernel supports ioctl on directories
+ * FUSE_AUTO_INVAL_DATA: automatically invalidate cached pages
*/
#define FUSE_ASYNC_READ (1 << 0)
#define FUSE_POSIX_LOCKS (1 << 1)
@@ -175,7 +187,12 @@ struct fuse_file_lock {
#define FUSE_EXPORT_SUPPORT (1 << 4)
#define FUSE_BIG_WRITES (1 << 5)
#define FUSE_DONT_MASK (1 << 6)
+#define FUSE_SPLICE_WRITE (1 << 7)
+#define FUSE_SPLICE_MOVE (1 << 8)
+#define FUSE_SPLICE_READ (1 << 9)
#define FUSE_FLOCK_LOCKS (1 << 10)
+#define FUSE_HAS_IOCTL_DIR (1 << 11)
+#define FUSE_AUTO_INVAL_DATA (1 << 12)
/**
* CUSE INIT request/reply flags
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h
index bb7f30971858..305f23cd7cff 100644
--- a/include/linux/hardirq.h
+++ b/include/linux/hardirq.h
@@ -22,7 +22,7 @@
*
* - bits 16-25 are the hardirq count (max # of nested hardirqs: 1024)
* - bit 26 is the NMI_MASK
- * - bit 28 is the PREEMPT_ACTIVE flag
+ * - bit 27 is the PREEMPT_ACTIVE flag
*
* PREEMPT_MASK: 0x000000ff
* SOFTIRQ_MASK: 0x0000ff00
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 54d6d690073c..7e83370e6fd2 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -20,6 +20,7 @@
#define __LINUX_IOMMU_H
#include <linux/errno.h>
+#include <linux/types.h>
#define IOMMU_READ (1)
#define IOMMU_WRITE (2)
@@ -30,6 +31,7 @@ struct iommu_group;
struct bus_type;
struct device;
struct iommu_domain;
+struct notifier_block;
/* iommu fault flags */
#define IOMMU_FAULT_READ 0x0
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index 379e433e15e0..879db26ec401 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -369,6 +369,7 @@ struct ipv6_pinfo {
__u8 rcv_tclass;
__u32 dst_cookie;
+ __u32 rx_dst_cookie;
struct ipv6_mc_socklist __rcu *ipv6_mc_list;
struct ipv6_ac_socklist *ipv6_ac_list;
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 553fb66da130..216b0ba109d7 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -349,6 +349,7 @@ enum {
IRQCHIP_MASK_ON_SUSPEND = (1 << 2),
IRQCHIP_ONOFFLINE_ENABLED = (1 << 3),
IRQCHIP_SKIP_SET_WAKE = (1 << 4),
+ IRQCHIP_ONESHOT_SAFE = (1 << 5),
};
/* This include will go away once we isolated irq_desc usage to core code */
diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
index 265e2c3cbd1c..82680541576d 100644
--- a/include/linux/jiffies.h
+++ b/include/linux/jiffies.h
@@ -39,9 +39,6 @@
# error Invalid value of HZ.
#endif
-/* LATCH is used in the interval timer and ftape setup. */
-#define LATCH ((CLOCK_TICK_RATE + HZ/2) / HZ) /* For divider */
-
/* Suppose we want to divide two numbers NOM and DEN: NOM/DEN, then we can
* improve accuracy by shifting LSH bits, hence calculating:
* (NOM << LSH) / DEN
@@ -54,18 +51,30 @@
#define SH_DIV(NOM,DEN,LSH) ( (((NOM) / (DEN)) << (LSH)) \
+ ((((NOM) % (DEN)) << (LSH)) + (DEN) / 2) / (DEN))
-/* HZ is the requested value. ACTHZ is actual HZ ("<< 8" is for accuracy) */
-#define ACTHZ (SH_DIV (CLOCK_TICK_RATE, LATCH, 8))
+#ifdef CLOCK_TICK_RATE
+/* LATCH is used in the interval timer and ftape setup. */
+# define LATCH ((CLOCK_TICK_RATE + HZ/2) / HZ) /* For divider */
+
+/*
+ * HZ is the requested value. However the CLOCK_TICK_RATE may not allow
+ * for exactly HZ. So SHIFTED_HZ is high res HZ ("<< 8" is for accuracy)
+ */
+# define SHIFTED_HZ (SH_DIV(CLOCK_TICK_RATE, LATCH, 8))
+#else
+# define SHIFTED_HZ (HZ << 8)
+#endif
-/* TICK_NSEC is the time between ticks in nsec assuming real ACTHZ */
-#define TICK_NSEC (SH_DIV (1000000UL * 1000, ACTHZ, 8))
+/* TICK_NSEC is the time between ticks in nsec assuming SHIFTED_HZ */
+#define TICK_NSEC (SH_DIV(1000000UL * 1000, SHIFTED_HZ, 8))
/* TICK_USEC is the time between ticks in usec assuming fake USER_HZ */
#define TICK_USEC ((1000000UL + USER_HZ/2) / USER_HZ)
-/* TICK_USEC_TO_NSEC is the time between ticks in nsec assuming real ACTHZ and */
-/* a value TUSEC for TICK_USEC (can be set bij adjtimex) */
-#define TICK_USEC_TO_NSEC(TUSEC) (SH_DIV (TUSEC * USER_HZ * 1000, ACTHZ, 8))
+/*
+ * TICK_USEC_TO_NSEC is the time between ticks in nsec assuming SHIFTED_HZ and
+ * a value TUSEC for TICK_USEC (can be set bij adjtimex)
+ */
+#define TICK_USEC_TO_NSEC(TUSEC) (SH_DIV(TUSEC * USER_HZ * 1000, SHIFTED_HZ, 8))
/* some arch's have a small-data section that can be accessed register-relative
* but that can only take up to, say, 4-byte variables. jiffies being part of
diff --git a/include/linux/kdb.h b/include/linux/kdb.h
index 064725854db8..42d9e863a313 100644
--- a/include/linux/kdb.h
+++ b/include/linux/kdb.h
@@ -75,8 +75,6 @@ extern const char *kdb_diemsg;
#define KDB_FLAG_CATASTROPHIC (1 << 1) /* A catastrophic event has occurred */
#define KDB_FLAG_CMD_INTERRUPT (1 << 2) /* Previous command was interrupted */
#define KDB_FLAG_NOIPI (1 << 3) /* Do not send IPIs */
-#define KDB_FLAG_ONLY_DO_DUMP (1 << 4) /* Only do a dump, used when
- * kdb is off */
#define KDB_FLAG_NO_CONSOLE (1 << 5) /* No console is available,
* kdb is disabled */
#define KDB_FLAG_NO_VT_CONSOLE (1 << 6) /* No VT console is available, do
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index eb06e58bed0b..a9db4f33407f 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -1300,6 +1300,8 @@ struct net_device {
/* for setting kernel sock attribute on TCP connection setup */
#define GSO_MAX_SIZE 65536
unsigned int gso_max_size;
+#define GSO_MAX_SEGS 65535
+ u16 gso_max_segs;
#ifdef CONFIG_DCB
/* Data Center Bridging netlink ops */
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 76c5c8b724a7..7602ccb3f40e 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -1272,7 +1272,8 @@ static inline bool perf_paranoid_kernel(void)
extern void perf_event_init(void);
extern void perf_tp_event(u64 addr, u64 count, void *record,
int entry_size, struct pt_regs *regs,
- struct hlist_head *head, int rctx);
+ struct hlist_head *head, int rctx,
+ struct task_struct *task);
extern void perf_bp_event(struct perf_event *event, void *data);
#ifndef perf_misc_flags
diff --git a/include/linux/sched.h b/include/linux/sched.h
index c147e7024f11..b8c86648a2f9 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -334,14 +334,6 @@ static inline void lockup_detector_init(void)
}
#endif
-#if defined(CONFIG_LOCKUP_DETECTOR) && defined(CONFIG_SUSPEND)
-void lockup_detector_bootcpu_resume(void);
-#else
-static inline void lockup_detector_bootcpu_resume(void)
-{
-}
-#endif
-
#ifdef CONFIG_DETECT_HUNG_TASK
extern unsigned int sysctl_hung_task_panic;
extern unsigned long sysctl_hung_task_check_count;
diff --git a/include/linux/security.h b/include/linux/security.h
index 4e5a73cdbbef..3dea6a9d568f 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -1242,8 +1242,6 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
* Check that the @parent process has sufficient permission to trace the
* current process before allowing the current process to present itself
* to the @parent process for tracing.
- * The parent process will still have to undergo the ptrace_access_check
- * checks before it is allowed to trace this one.
* @parent contains the task_struct structure for debugger process.
* Return 0 if permission is granted.
* @capget:
diff --git a/include/linux/timex.h b/include/linux/timex.h
index 99bc88b1fc02..7c5ceb20e03a 100644
--- a/include/linux/timex.h
+++ b/include/linux/timex.h
@@ -232,7 +232,7 @@ struct timex {
* estimated error = NTP dispersion.
*/
extern unsigned long tick_usec; /* USER_HZ period (usec) */
-extern unsigned long tick_nsec; /* ACTHZ period (nsec) */
+extern unsigned long tick_nsec; /* SHIFTED_HZ period (nsec) */
extern void ntp_init(void);
extern void ntp_clear(void);
diff --git a/include/linux/topology.h b/include/linux/topology.h
index e91cd43394df..fec12d667211 100644
--- a/include/linux/topology.h
+++ b/include/linux/topology.h
@@ -164,6 +164,7 @@ int arch_update_cpu_topology(void);
| 0*SD_SHARE_CPUPOWER \
| 0*SD_SHARE_PKG_RESOURCES \
| 0*SD_SERIALIZE \
+ | 1*SD_PREFER_SIBLING \
, \
.last_balance = jiffies, \
.balance_interval = 1, \
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index c66fe3332d83..50c3e8fa06a8 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -104,7 +104,6 @@ static inline void wait_on_inode(struct inode *inode)
wait_on_bit(&inode->i_state, __I_NEW, inode_wait, TASK_UNINTERRUPTIBLE);
}
-
/*
* mm/page-writeback.c
*/
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 493fa0c79005..3d254e10ff30 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -96,6 +96,7 @@ enum ieee80211_band {
* is not permitted.
* @IEEE80211_CHAN_NO_HT40MINUS: extension channel below this channel
* is not permitted.
+ * @IEEE80211_CHAN_NO_OFDM: OFDM is not allowed on this channel.
*/
enum ieee80211_channel_flags {
IEEE80211_CHAN_DISABLED = 1<<0,
@@ -104,6 +105,7 @@ enum ieee80211_channel_flags {
IEEE80211_CHAN_RADAR = 1<<3,
IEEE80211_CHAN_NO_HT40PLUS = 1<<4,
IEEE80211_CHAN_NO_HT40MINUS = 1<<5,
+ IEEE80211_CHAN_NO_OFDM = 1<<6,
};
#define IEEE80211_CHAN_NO_HT40 \
diff --git a/include/net/codel.h b/include/net/codel.h
index 550debfc2403..389cf621161d 100644
--- a/include/net/codel.h
+++ b/include/net/codel.h
@@ -305,6 +305,8 @@ static struct sk_buff *codel_dequeue(struct Qdisc *sch,
}
}
} else if (drop) {
+ u32 delta;
+
if (params->ecn && INET_ECN_set_ce(skb)) {
stats->ecn_mark++;
} else {
@@ -320,9 +322,11 @@ static struct sk_buff *codel_dequeue(struct Qdisc *sch,
* assume that the drop rate that controlled the queue on the
* last cycle is a good starting point to control it now.
*/
- if (codel_time_before(now - vars->drop_next,
+ delta = vars->count - vars->lastcount;
+ if (delta > 1 &&
+ codel_time_before(now - vars->drop_next,
16 * params->interval)) {
- vars->count = (vars->count - vars->lastcount) | 1;
+ vars->count = delta;
/* we dont care if rec_inv_sqrt approximation
* is not very precise :
* Next Newton steps will correct it quadratically.
diff --git a/include/net/dst.h b/include/net/dst.h
index baf597890064..621e3513ef5e 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -110,7 +110,7 @@ struct dst_entry {
};
extern u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old);
-extern const u32 dst_default_metrics[RTAX_MAX];
+extern const u32 dst_default_metrics[];
#define DST_METRICS_READ_ONLY 0x1UL
#define __DST_METRICS_PTR(Y) \
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index 5ee66f517b4f..ba1d3615acbb 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -39,6 +39,7 @@ struct inet_connection_sock_af_ops {
int (*queue_xmit)(struct sk_buff *skb, struct flowi *fl);
void (*send_check)(struct sock *sk, struct sk_buff *skb);
int (*rebuild_header)(struct sock *sk);
+ void (*sk_rx_dst_set)(struct sock *sk, const struct sk_buff *skb);
int (*conn_request)(struct sock *sk, struct sk_buff *skb);
struct sock *(*syn_recv_sock)(struct sock *sk, struct sk_buff *skb,
struct request_sock *req,
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
index 83b567fe1941..613cfa401672 100644
--- a/include/net/inet_sock.h
+++ b/include/net/inet_sock.h
@@ -249,13 +249,4 @@ static inline __u8 inet_sk_flowi_flags(const struct sock *sk)
return flags;
}
-static inline void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
-{
- struct dst_entry *dst = skb_dst(skb);
-
- dst_hold(dst);
- sk->sk_rx_dst = dst;
- inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
-}
-
#endif /* _INET_SOCK_H */
diff --git a/include/net/ip.h b/include/net/ip.h
index bd5e444a19ce..5a5d84d3d2c6 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -120,7 +120,7 @@ extern struct sk_buff *__ip_make_skb(struct sock *sk,
struct flowi4 *fl4,
struct sk_buff_head *queue,
struct inet_cork *cork);
-extern int ip_send_skb(struct sk_buff *skb);
+extern int ip_send_skb(struct net *net, struct sk_buff *skb);
extern int ip_push_pending_frames(struct sock *sk, struct flowi4 *fl4);
extern void ip_flush_pending_frames(struct sock *sk);
extern struct sk_buff *ip_make_skb(struct sock *sk,
diff --git a/include/net/sock.h b/include/net/sock.h
index b3730239bf18..72132aef53fc 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -218,6 +218,7 @@ struct cg_proto;
* @sk_route_nocaps: forbidden route capabilities (e.g NETIF_F_GSO_MASK)
* @sk_gso_type: GSO type (e.g. %SKB_GSO_TCPV4)
* @sk_gso_max_size: Maximum GSO segment size to build
+ * @sk_gso_max_segs: Maximum number of GSO segments
* @sk_lingertime: %SO_LINGER l_linger setting
* @sk_backlog: always used with the per-socket spinlock held
* @sk_callback_lock: used with the callbacks in the end of this struct
@@ -338,6 +339,7 @@ struct sock {
netdev_features_t sk_route_nocaps;
int sk_gso_type;
unsigned int sk_gso_max_size;
+ u16 sk_gso_max_segs;
int sk_rcvlowat;
unsigned long sk_lingertime;
struct sk_buff_head sk_error_queue;
diff --git a/include/net/tcp.h b/include/net/tcp.h
index e19124b84cd2..1f000ffe7075 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -464,6 +464,7 @@ extern int tcp_disconnect(struct sock *sk, int flags);
void tcp_connect_init(struct sock *sk);
void tcp_finish_connect(struct sock *sk, struct sk_buff *skb);
int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size);
+void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb);
/* From syncookies.c */
extern __u32 syncookie_secret[2][16-4+SHA_DIGEST_WORDS];
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index d9509eb29b80..62b619e82a90 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -213,6 +213,9 @@ struct xfrm_state {
struct xfrm_lifetime_cur curlft;
struct tasklet_hrtimer mtimer;
+ /* used to fix curlft->add_time when changing date */
+ long saved_tmo;
+
/* Last used time */
unsigned long lastused;
@@ -238,6 +241,7 @@ static inline struct net *xs_net(struct xfrm_state *x)
/* xflags - make enum if more show up */
#define XFRM_TIME_DEFER 1
+#define XFRM_SOFT_EXPIRE 2
enum {
XFRM_STATE_VOID,
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index ea7a2035456d..5a8671e8a67f 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -73,6 +73,9 @@ DECLARE_EVENT_CLASS(sched_wakeup_template,
__entry->prio = p->prio;
__entry->success = success;
__entry->target_cpu = task_cpu(p);
+ )
+ TP_perf_assign(
+ __perf_task(p);
),
TP_printk("comm=%s pid=%d prio=%d success=%d target_cpu=%03d",
@@ -325,6 +328,7 @@ DECLARE_EVENT_CLASS(sched_stat_template,
)
TP_perf_assign(
__perf_count(delay);
+ __perf_task(tsk);
),
TP_printk("comm=%s pid=%d delay=%Lu [ns]",
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
index c6bc2faaf261..a763888a36f9 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/ftrace.h
@@ -712,6 +712,9 @@ __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
#undef __perf_count
#define __perf_count(c) __count = (c)
+#undef __perf_task
+#define __perf_task(t) __task = (t)
+
#undef TP_perf_assign
#define TP_perf_assign(args...) args
@@ -725,6 +728,7 @@ perf_trace_##call(void *__data, proto) \
struct ftrace_raw_##call *entry; \
struct pt_regs __regs; \
u64 __addr = 0, __count = 1; \
+ struct task_struct *__task = NULL; \
struct hlist_head *head; \
int __entry_size; \
int __data_size; \
@@ -752,7 +756,7 @@ perf_trace_##call(void *__data, proto) \
\
head = this_cpu_ptr(event_call->perf_events); \
perf_trace_buf_submit(entry, __entry_size, rctx, __addr, \
- __count, &__regs, head); \
+ __count, &__regs, head, __task); \
}
/*
diff --git a/kernel/debug/kdb/kdb_debugger.c b/kernel/debug/kdb/kdb_debugger.c
index 8b68ce78ff17..be7b33b73d30 100644
--- a/kernel/debug/kdb/kdb_debugger.c
+++ b/kernel/debug/kdb/kdb_debugger.c
@@ -12,6 +12,7 @@
#include <linux/kdb.h>
#include <linux/kdebug.h>
#include <linux/export.h>
+#include <linux/hardirq.h>
#include "kdb_private.h"
#include "../debug_core.h"
@@ -52,6 +53,9 @@ int kdb_stub(struct kgdb_state *ks)
if (atomic_read(&kgdb_setting_breakpoint))
reason = KDB_REASON_KEYBOARD;
+ if (in_nmi())
+ reason = KDB_REASON_NMI;
+
for (i = 0, bp = kdb_breakpoints; i < KDB_MAXBPT; i++, bp++) {
if ((bp->bp_enabled) && (bp->bp_addr == addr)) {
reason = KDB_REASON_BREAK;
diff --git a/kernel/debug/kdb/kdb_io.c b/kernel/debug/kdb/kdb_io.c
index bb9520f0f6ff..0a69d2adc4f3 100644
--- a/kernel/debug/kdb/kdb_io.c
+++ b/kernel/debug/kdb/kdb_io.c
@@ -715,9 +715,6 @@ kdb_printit:
/* check for having reached the LINES number of printed lines */
if (kdb_nextline == linecount) {
char buf1[16] = "";
-#if defined(CONFIG_SMP)
- char buf2[32];
-#endif
/* Watch out for recursion here. Any routine that calls
* kdb_printf will come back through here. And kdb_read
@@ -732,14 +729,6 @@ kdb_printit:
if (moreprompt == NULL)
moreprompt = "more> ";
-#if defined(CONFIG_SMP)
- if (strchr(moreprompt, '%')) {
- sprintf(buf2, moreprompt, get_cpu());
- put_cpu();
- moreprompt = buf2;
- }
-#endif
-
kdb_input_flush();
c = console_drivers;
diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
index 1f91413edb87..31df1706b9a9 100644
--- a/kernel/debug/kdb/kdb_main.c
+++ b/kernel/debug/kdb/kdb_main.c
@@ -139,11 +139,10 @@ static const int __nkdb_err = sizeof(kdbmsgs) / sizeof(kdbmsg_t);
static char *__env[] = {
#if defined(CONFIG_SMP)
"PROMPT=[%d]kdb> ",
- "MOREPROMPT=[%d]more> ",
#else
"PROMPT=kdb> ",
- "MOREPROMPT=more> ",
#endif
+ "MOREPROMPT=more> ",
"RADIX=16",
"MDCOUNT=8", /* lines of md output */
KDB_PLATFORM_ENV,
@@ -1236,18 +1235,6 @@ static int kdb_local(kdb_reason_t reason, int error, struct pt_regs *regs,
*cmdbuf = '\0';
*(cmd_hist[cmd_head]) = '\0';
- if (KDB_FLAG(ONLY_DO_DUMP)) {
- /* kdb is off but a catastrophic error requires a dump.
- * Take the dump and reboot.
- * Turn on logging so the kdb output appears in the log
- * buffer in the dump.
- */
- const char *setargs[] = { "set", "LOGGING", "1" };
- kdb_set(2, setargs);
- kdb_reboot(0, NULL);
- /*NOTREACHED*/
- }
-
do_full_getstr:
#if defined(CONFIG_SMP)
snprintf(kdb_prompt_str, CMD_BUFLEN, kdbgetenv("PROMPT"),
diff --git a/kernel/events/callchain.c b/kernel/events/callchain.c
index 6581a040f399..98d4597f43d6 100644
--- a/kernel/events/callchain.c
+++ b/kernel/events/callchain.c
@@ -153,7 +153,8 @@ put_callchain_entry(int rctx)
put_recursion_context(__get_cpu_var(callchain_recursion), rctx);
}
-struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
+struct perf_callchain_entry *
+perf_callchain(struct perf_event *event, struct pt_regs *regs)
{
int rctx;
struct perf_callchain_entry *entry;
@@ -178,6 +179,12 @@ struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
}
if (regs) {
+ /*
+ * Disallow cross-task user callchains.
+ */
+ if (event->ctx->task && event->ctx->task != current)
+ goto exit_put;
+
perf_callchain_store(entry, PERF_CONTEXT_USER);
perf_callchain_user(entry, regs);
}
diff --git a/kernel/events/core.c b/kernel/events/core.c
index f1cf0edeb39a..b7935fcec7d9 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -4039,7 +4039,7 @@ void perf_prepare_sample(struct perf_event_header *header,
if (sample_type & PERF_SAMPLE_CALLCHAIN) {
int size = 1;
- data->callchain = perf_callchain(regs);
+ data->callchain = perf_callchain(event, regs);
if (data->callchain)
size += data->callchain->nr;
@@ -5209,7 +5209,8 @@ static int perf_tp_event_match(struct perf_event *event,
}
void perf_tp_event(u64 addr, u64 count, void *record, int entry_size,
- struct pt_regs *regs, struct hlist_head *head, int rctx)
+ struct pt_regs *regs, struct hlist_head *head, int rctx,
+ struct task_struct *task)
{
struct perf_sample_data data;
struct perf_event *event;
@@ -5228,6 +5229,31 @@ void perf_tp_event(u64 addr, u64 count, void *record, int entry_size,
perf_swevent_event(event, count, &data, regs);
}
+ /*
+ * If we got specified a target task, also iterate its context and
+ * deliver this event there too.
+ */
+ if (task && task != current) {
+ struct perf_event_context *ctx;
+ struct trace_entry *entry = record;
+
+ rcu_read_lock();
+ ctx = rcu_dereference(task->perf_event_ctxp[perf_sw_context]);
+ if (!ctx)
+ goto unlock;
+
+ list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
+ if (event->attr.type != PERF_TYPE_TRACEPOINT)
+ continue;
+ if (event->attr.config != entry->type)
+ continue;
+ if (perf_tp_event_match(event, &data, regs))
+ perf_swevent_event(event, count, &data, regs);
+ }
+unlock:
+ rcu_read_unlock();
+ }
+
perf_swevent_put_recursion_context(rctx);
}
EXPORT_SYMBOL_GPL(perf_tp_event);
diff --git a/kernel/events/internal.h b/kernel/events/internal.h
index b0b107f90afc..a096c19f2c2a 100644
--- a/kernel/events/internal.h
+++ b/kernel/events/internal.h
@@ -101,7 +101,8 @@ __output_copy(struct perf_output_handle *handle,
}
/* Callchain handling */
-extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs);
+extern struct perf_callchain_entry *
+perf_callchain(struct perf_event *event, struct pt_regs *regs);
extern int get_callchain_buffers(void);
extern void put_callchain_buffers(void);
diff --git a/kernel/futex.c b/kernel/futex.c
index e2b0fb9a0b3b..3717e7b306e0 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -2231,11 +2231,11 @@ int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb,
* @uaddr2: the pi futex we will take prior to returning to user-space
*
* The caller will wait on uaddr and will be requeued by futex_requeue() to
- * uaddr2 which must be PI aware. Normal wakeup will wake on uaddr2 and
- * complete the acquisition of the rt_mutex prior to returning to userspace.
- * This ensures the rt_mutex maintains an owner when it has waiters; without
- * one, the pi logic wouldn't know which task to boost/deboost, if there was a
- * need to.
+ * uaddr2 which must be PI aware and unique from uaddr. Normal wakeup will wake
+ * on uaddr2 and complete the acquisition of the rt_mutex prior to returning to
+ * userspace. This ensures the rt_mutex maintains an owner when it has waiters;
+ * without one, the pi logic would not know which task to boost/deboost, if
+ * there was a need to.
*
* We call schedule in futex_wait_queue_me() when we enqueue and return there
* via the following:
@@ -2272,6 +2272,9 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
struct futex_q q = futex_q_init;
int res, ret;
+ if (uaddr == uaddr2)
+ return -EINVAL;
+
if (!bitset)
return -EINVAL;
@@ -2343,7 +2346,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
* signal. futex_unlock_pi() will not destroy the lock_ptr nor
* the pi_state.
*/
- WARN_ON(!&q.pi_state);
+ WARN_ON(!q.pi_state);
pi_mutex = &q.pi_state->pi_mutex;
ret = rt_mutex_finish_proxy_lock(pi_mutex, to, &rt_waiter, 1);
debug_rt_mutex_free_waiter(&rt_waiter);
@@ -2370,7 +2373,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
* fault, unlock the rt_mutex and return the fault to userspace.
*/
if (ret == -EFAULT) {
- if (rt_mutex_owner(pi_mutex) == current)
+ if (pi_mutex && rt_mutex_owner(pi_mutex) == current)
rt_mutex_unlock(pi_mutex);
} else if (ret == -EINTR) {
/*
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 0a8e8f059627..4c69326aa773 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -944,6 +944,18 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
}
/*
+ * Drivers are often written to work w/o knowledge about the
+ * underlying irq chip implementation, so a request for a
+ * threaded irq without a primary hard irq context handler
+ * requires the ONESHOT flag to be set. Some irq chips like
+ * MSI based interrupts are per se one shot safe. Check the
+ * chip flags, so we can avoid the unmask dance at the end of
+ * the threaded handler for those.
+ */
+ if (desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)
+ new->flags &= ~IRQF_ONESHOT;
+
+ /*
* The following block of code has to be executed atomically
*/
raw_spin_lock_irqsave(&desc->lock, flags);
@@ -1017,7 +1029,8 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
*/
new->thread_mask = 1 << ffz(thread_mask);
- } else if (new->handler == irq_default_primary_handler) {
+ } else if (new->handler == irq_default_primary_handler &&
+ !(desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)) {
/*
* The interrupt was requested with handler = NULL, so
* we use the default primary handler for it. But it
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index 1da39ea248fd..c8b7446b27df 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -178,9 +178,6 @@ static int suspend_enter(suspend_state_t state, bool *wakeup)
arch_suspend_enable_irqs();
BUG_ON(irqs_disabled());
- /* Kick the lockup detector */
- lockup_detector_bootcpu_resume();
-
Enable_cpus:
enable_nonboot_cpus();
diff --git a/kernel/printk.c b/kernel/printk.c
index 6a76ab9d4476..66a2ea37b576 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -1034,6 +1034,7 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
struct log *msg = log_from_idx(idx);
len += msg_print_text(msg, prev, true, NULL, 0);
+ prev = msg->flags;
idx = log_next(idx);
seq++;
}
@@ -1046,6 +1047,7 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
struct log *msg = log_from_idx(idx);
len -= msg_print_text(msg, prev, true, NULL, 0);
+ prev = msg->flags;
idx = log_next(idx);
seq++;
}
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index d325c4b2dcbb..82ad284f823b 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -4340,9 +4340,7 @@ recheck:
*/
if (unlikely(policy == p->policy && (!rt_policy(policy) ||
param->sched_priority == p->rt_priority))) {
-
- __task_rq_unlock(rq);
- raw_spin_unlock_irqrestore(&p->pi_lock, flags);
+ task_rq_unlock(rq, p, &flags);
return 0;
}
diff --git a/kernel/sched/cpupri.c b/kernel/sched/cpupri.c
index d72586fdf660..23aa789c53ee 100644
--- a/kernel/sched/cpupri.c
+++ b/kernel/sched/cpupri.c
@@ -65,8 +65,8 @@ static int convert_prio(int prio)
int cpupri_find(struct cpupri *cp, struct task_struct *p,
struct cpumask *lowest_mask)
{
- int idx = 0;
- int task_pri = convert_prio(p->prio);
+ int idx = 0;
+ int task_pri = convert_prio(p->prio);
if (task_pri >= MAX_RT_PRIO)
return 0;
@@ -137,9 +137,9 @@ int cpupri_find(struct cpupri *cp, struct task_struct *p,
*/
void cpupri_set(struct cpupri *cp, int cpu, int newpri)
{
- int *currpri = &cp->cpu_to_pri[cpu];
- int oldpri = *currpri;
- int do_mb = 0;
+ int *currpri = &cp->cpu_to_pri[cpu];
+ int oldpri = *currpri;
+ int do_mb = 0;
newpri = convert_prio(newpri);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 22321db64952..d0cc03b3e70b 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3069,6 +3069,9 @@ struct lb_env {
int new_dst_cpu;
enum cpu_idle_type idle;
long imbalance;
+ /* The set of CPUs under consideration for load-balancing */
+ struct cpumask *cpus;
+
unsigned int flags;
unsigned int loop;
@@ -3653,8 +3656,7 @@ fix_small_capacity(struct sched_domain *sd, struct sched_group *group)
*/
static inline void update_sg_lb_stats(struct lb_env *env,
struct sched_group *group, int load_idx,
- int local_group, const struct cpumask *cpus,
- int *balance, struct sg_lb_stats *sgs)
+ int local_group, int *balance, struct sg_lb_stats *sgs)
{
unsigned long nr_running, max_nr_running, min_nr_running;
unsigned long load, max_cpu_load, min_cpu_load;
@@ -3671,7 +3673,7 @@ static inline void update_sg_lb_stats(struct lb_env *env,
max_nr_running = 0;
min_nr_running = ~0UL;
- for_each_cpu_and(i, sched_group_cpus(group), cpus) {
+ for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
struct rq *rq = cpu_rq(i);
nr_running = rq->nr_running;
@@ -3800,8 +3802,7 @@ static bool update_sd_pick_busiest(struct lb_env *env,
* @sds: variable to hold the statistics for this sched_domain.
*/
static inline void update_sd_lb_stats(struct lb_env *env,
- const struct cpumask *cpus,
- int *balance, struct sd_lb_stats *sds)
+ int *balance, struct sd_lb_stats *sds)
{
struct sched_domain *child = env->sd->child;
struct sched_group *sg = env->sd->groups;
@@ -3818,8 +3819,7 @@ static inline void update_sd_lb_stats(struct lb_env *env,
local_group = cpumask_test_cpu(env->dst_cpu, sched_group_cpus(sg));
memset(&sgs, 0, sizeof(sgs));
- update_sg_lb_stats(env, sg, load_idx, local_group,
- cpus, balance, &sgs);
+ update_sg_lb_stats(env, sg, load_idx, local_group, balance, &sgs);
if (local_group && !(*balance))
return;
@@ -4055,7 +4055,6 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
* to restore balance.
*
* @env: The load balancing environment.
- * @cpus: The set of CPUs under consideration for load-balancing.
* @balance: Pointer to a variable indicating if this_cpu
* is the appropriate cpu to perform load balancing at this_level.
*
@@ -4065,7 +4064,7 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
* put to idle by rebalancing its tasks onto our group.
*/
static struct sched_group *
-find_busiest_group(struct lb_env *env, const struct cpumask *cpus, int *balance)
+find_busiest_group(struct lb_env *env, int *balance)
{
struct sd_lb_stats sds;
@@ -4075,7 +4074,7 @@ find_busiest_group(struct lb_env *env, const struct cpumask *cpus, int *balance)
* Compute the various statistics relavent for load balancing at
* this level.
*/
- update_sd_lb_stats(env, cpus, balance, &sds);
+ update_sd_lb_stats(env, balance, &sds);
/*
* this_cpu is not the appropriate cpu to perform load balancing at
@@ -4155,8 +4154,7 @@ ret:
* find_busiest_queue - find the busiest runqueue among the cpus in group.
*/
static struct rq *find_busiest_queue(struct lb_env *env,
- struct sched_group *group,
- const struct cpumask *cpus)
+ struct sched_group *group)
{
struct rq *busiest = NULL, *rq;
unsigned long max_load = 0;
@@ -4171,7 +4169,7 @@ static struct rq *find_busiest_queue(struct lb_env *env,
if (!capacity)
capacity = fix_small_capacity(env->sd, group);
- if (!cpumask_test_cpu(i, cpus))
+ if (!cpumask_test_cpu(i, env->cpus))
continue;
rq = cpu_rq(i);
@@ -4252,6 +4250,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
.dst_grpmask = sched_group_cpus(sd->groups),
.idle = idle,
.loop_break = sched_nr_migrate_break,
+ .cpus = cpus,
};
cpumask_copy(cpus, cpu_active_mask);
@@ -4260,7 +4259,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
schedstat_inc(sd, lb_count[idle]);
redo:
- group = find_busiest_group(&env, cpus, balance);
+ group = find_busiest_group(&env, balance);
if (*balance == 0)
goto out_balanced;
@@ -4270,7 +4269,7 @@ redo:
goto out_balanced;
}
- busiest = find_busiest_queue(&env, group, cpus);
+ busiest = find_busiest_queue(&env, group);
if (!busiest) {
schedstat_inc(sd, lb_nobusyq[idle]);
goto out_balanced;
diff --git a/kernel/time/jiffies.c b/kernel/time/jiffies.c
index a470154e0408..46da0537c10b 100644
--- a/kernel/time/jiffies.c
+++ b/kernel/time/jiffies.c
@@ -37,7 +37,7 @@
* requested HZ value. It is also not recommended
* for "tick-less" systems.
*/
-#define NSEC_PER_JIFFY ((u32)((((u64)NSEC_PER_SEC)<<8)/ACTHZ))
+#define NSEC_PER_JIFFY ((u32)((((u64)NSEC_PER_SEC)<<8)/SHIFTED_HZ))
/* Since jiffies uses a simple NSEC_PER_JIFFY multiplier
* conversion, the .shift value could be zero. However
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
index b7fbadc5c973..24174b4d669b 100644
--- a/kernel/time/ntp.c
+++ b/kernel/time/ntp.c
@@ -28,7 +28,7 @@ DEFINE_SPINLOCK(ntp_lock);
/* USER_HZ period (usecs): */
unsigned long tick_usec = TICK_USEC;
-/* ACTHZ period (nsecs): */
+/* SHIFTED_HZ period (nsecs): */
unsigned long tick_nsec;
static u64 tick_length;
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index f045cc50832d..e16af197a2bc 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -65,14 +65,14 @@ struct timekeeper {
* used instead.
*/
struct timespec wall_to_monotonic;
- /* time spent in suspend */
- struct timespec total_sleep_time;
- /* The raw monotonic time for the CLOCK_MONOTONIC_RAW posix clock. */
- struct timespec raw_time;
/* Offset clock monotonic -> clock realtime */
ktime_t offs_real;
+ /* time spent in suspend */
+ struct timespec total_sleep_time;
/* Offset clock monotonic -> clock boottime */
ktime_t offs_boot;
+ /* The raw monotonic time for the CLOCK_MONOTONIC_RAW posix clock. */
+ struct timespec raw_time;
/* Seqlock for all timekeeper values */
seqlock_t lock;
};
@@ -108,13 +108,38 @@ static struct timespec tk_xtime(struct timekeeper *tk)
static void tk_set_xtime(struct timekeeper *tk, const struct timespec *ts)
{
tk->xtime_sec = ts->tv_sec;
- tk->xtime_nsec = ts->tv_nsec << tk->shift;
+ tk->xtime_nsec = (u64)ts->tv_nsec << tk->shift;
}
static void tk_xtime_add(struct timekeeper *tk, const struct timespec *ts)
{
tk->xtime_sec += ts->tv_sec;
- tk->xtime_nsec += ts->tv_nsec << tk->shift;
+ tk->xtime_nsec += (u64)ts->tv_nsec << tk->shift;
+}
+
+static void tk_set_wall_to_mono(struct timekeeper *tk, struct timespec wtm)
+{
+ struct timespec tmp;
+
+ /*
+ * Verify consistency of: offset_real = -wall_to_monotonic
+ * before modifying anything
+ */
+ set_normalized_timespec(&tmp, -tk->wall_to_monotonic.tv_sec,
+ -tk->wall_to_monotonic.tv_nsec);
+ WARN_ON_ONCE(tk->offs_real.tv64 != timespec_to_ktime(tmp).tv64);
+ tk->wall_to_monotonic = wtm;
+ set_normalized_timespec(&tmp, -wtm.tv_sec, -wtm.tv_nsec);
+ tk->offs_real = timespec_to_ktime(tmp);
+}
+
+static void tk_set_sleep_time(struct timekeeper *tk, struct timespec t)
+{
+ /* Verify consistency before modifying */
+ WARN_ON_ONCE(tk->offs_boot.tv64 != timespec_to_ktime(tk->total_sleep_time).tv64);
+
+ tk->total_sleep_time = t;
+ tk->offs_boot = timespec_to_ktime(t);
}
/**
@@ -217,14 +242,6 @@ static inline s64 timekeeping_get_ns_raw(struct timekeeper *tk)
return nsec + arch_gettimeoffset();
}
-static void update_rt_offset(struct timekeeper *tk)
-{
- struct timespec tmp, *wtm = &tk->wall_to_monotonic;
-
- set_normalized_timespec(&tmp, -wtm->tv_sec, -wtm->tv_nsec);
- tk->offs_real = timespec_to_ktime(tmp);
-}
-
/* must hold write on timekeeper.lock */
static void timekeeping_update(struct timekeeper *tk, bool clearntp)
{
@@ -234,12 +251,10 @@ static void timekeeping_update(struct timekeeper *tk, bool clearntp)
tk->ntp_error = 0;
ntp_clear();
}
- update_rt_offset(tk);
xt = tk_xtime(tk);
update_vsyscall(&xt, &tk->wall_to_monotonic, tk->clock, tk->mult);
}
-
/**
* timekeeping_forward_now - update clock to the current time
*
@@ -277,18 +292,19 @@ static void timekeeping_forward_now(struct timekeeper *tk)
*/
void getnstimeofday(struct timespec *ts)
{
+ struct timekeeper *tk = &timekeeper;
unsigned long seq;
s64 nsecs = 0;
WARN_ON(timekeeping_suspended);
do {
- seq = read_seqbegin(&timekeeper.lock);
+ seq = read_seqbegin(&tk->lock);
- ts->tv_sec = timekeeper.xtime_sec;
- ts->tv_nsec = timekeeping_get_ns(&timekeeper);
+ ts->tv_sec = tk->xtime_sec;
+ ts->tv_nsec = timekeeping_get_ns(tk);
- } while (read_seqretry(&timekeeper.lock, seq));
+ } while (read_seqretry(&tk->lock, seq));
timespec_add_ns(ts, nsecs);
}
@@ -296,19 +312,18 @@ EXPORT_SYMBOL(getnstimeofday);
ktime_t ktime_get(void)
{
+ struct timekeeper *tk = &timekeeper;
unsigned int seq;
s64 secs, nsecs;
WARN_ON(timekeeping_suspended);
do {
- seq = read_seqbegin(&timekeeper.lock);
- secs = timekeeper.xtime_sec +
- timekeeper.wall_to_monotonic.tv_sec;
- nsecs = timekeeping_get_ns(&timekeeper) +
- timekeeper.wall_to_monotonic.tv_nsec;
+ seq = read_seqbegin(&tk->lock);
+ secs = tk->xtime_sec + tk->wall_to_monotonic.tv_sec;
+ nsecs = timekeeping_get_ns(tk) + tk->wall_to_monotonic.tv_nsec;
- } while (read_seqretry(&timekeeper.lock, seq));
+ } while (read_seqretry(&tk->lock, seq));
/*
* Use ktime_set/ktime_add_ns to create a proper ktime on
* 32-bit architectures without CONFIG_KTIME_SCALAR.
@@ -327,18 +342,19 @@ EXPORT_SYMBOL_GPL(ktime_get);
*/
void ktime_get_ts(struct timespec *ts)
{
+ struct timekeeper *tk = &timekeeper;
struct timespec tomono;
unsigned int seq;
WARN_ON(timekeeping_suspended);
do {
- seq = read_seqbegin(&timekeeper.lock);
- ts->tv_sec = timekeeper.xtime_sec;
- ts->tv_nsec = timekeeping_get_ns(&timekeeper);
- tomono = timekeeper.wall_to_monotonic;
+ seq = read_seqbegin(&tk->lock);
+ ts->tv_sec = tk->xtime_sec;
+ ts->tv_nsec = timekeeping_get_ns(tk);
+ tomono = tk->wall_to_monotonic;
- } while (read_seqretry(&timekeeper.lock, seq));
+ } while (read_seqretry(&tk->lock, seq));
set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec,
ts->tv_nsec + tomono.tv_nsec);
@@ -358,22 +374,23 @@ EXPORT_SYMBOL_GPL(ktime_get_ts);
*/
void getnstime_raw_and_real(struct timespec *ts_raw, struct timespec *ts_real)
{
+ struct timekeeper *tk = &timekeeper;
unsigned long seq;
s64 nsecs_raw, nsecs_real;
WARN_ON_ONCE(timekeeping_suspended);
do {
- seq = read_seqbegin(&timekeeper.lock);
+ seq = read_seqbegin(&tk->lock);
- *ts_raw = timekeeper.raw_time;
- ts_real->tv_sec = timekeeper.xtime_sec;
+ *ts_raw = tk->raw_time;
+ ts_real->tv_sec = tk->xtime_sec;
ts_real->tv_nsec = 0;
- nsecs_raw = timekeeping_get_ns_raw(&timekeeper);
- nsecs_real = timekeeping_get_ns(&timekeeper);
+ nsecs_raw = timekeeping_get_ns_raw(tk);
+ nsecs_real = timekeeping_get_ns(tk);
- } while (read_seqretry(&timekeeper.lock, seq));
+ } while (read_seqretry(&tk->lock, seq));
timespec_add_ns(ts_raw, nsecs_raw);
timespec_add_ns(ts_real, nsecs_real);
@@ -406,28 +423,28 @@ EXPORT_SYMBOL(do_gettimeofday);
*/
int do_settimeofday(const struct timespec *tv)
{
+ struct timekeeper *tk = &timekeeper;
struct timespec ts_delta, xt;
unsigned long flags;
if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
return -EINVAL;
- write_seqlock_irqsave(&timekeeper.lock, flags);
+ write_seqlock_irqsave(&tk->lock, flags);
- timekeeping_forward_now(&timekeeper);
+ timekeeping_forward_now(tk);
- xt = tk_xtime(&timekeeper);
+ xt = tk_xtime(tk);
ts_delta.tv_sec = tv->tv_sec - xt.tv_sec;
ts_delta.tv_nsec = tv->tv_nsec - xt.tv_nsec;
- timekeeper.wall_to_monotonic =
- timespec_sub(timekeeper.wall_to_monotonic, ts_delta);
+ tk_set_wall_to_mono(tk, timespec_sub(tk->wall_to_monotonic, ts_delta));
- tk_set_xtime(&timekeeper, tv);
+ tk_set_xtime(tk, tv);
- timekeeping_update(&timekeeper, true);
+ timekeeping_update(tk, true);
- write_sequnlock_irqrestore(&timekeeper.lock, flags);
+ write_sequnlock_irqrestore(&tk->lock, flags);
/* signal hrtimers about time change */
clock_was_set();
@@ -436,7 +453,6 @@ int do_settimeofday(const struct timespec *tv)
}
EXPORT_SYMBOL(do_settimeofday);
-
/**
* timekeeping_inject_offset - Adds or subtracts from the current time.
* @tv: pointer to the timespec variable containing the offset
@@ -445,23 +461,23 @@ EXPORT_SYMBOL(do_settimeofday);
*/
int timekeeping_inject_offset(struct timespec *ts)
{
+ struct timekeeper *tk = &timekeeper;
unsigned long flags;
if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC)
return -EINVAL;
- write_seqlock_irqsave(&timekeeper.lock, flags);
+ write_seqlock_irqsave(&tk->lock, flags);
- timekeeping_forward_now(&timekeeper);
+ timekeeping_forward_now(tk);
- tk_xtime_add(&timekeeper, ts);
- timekeeper.wall_to_monotonic =
- timespec_sub(timekeeper.wall_to_monotonic, *ts);
+ tk_xtime_add(tk, ts);
+ tk_set_wall_to_mono(tk, timespec_sub(tk->wall_to_monotonic, *ts));
- timekeeping_update(&timekeeper, true);
+ timekeeping_update(tk, true);
- write_sequnlock_irqrestore(&timekeeper.lock, flags);
+ write_sequnlock_irqrestore(&tk->lock, flags);
/* signal hrtimers about time change */
clock_was_set();
@@ -477,23 +493,24 @@ EXPORT_SYMBOL(timekeeping_inject_offset);
*/
static int change_clocksource(void *data)
{
+ struct timekeeper *tk = &timekeeper;
struct clocksource *new, *old;
unsigned long flags;
new = (struct clocksource *) data;
- write_seqlock_irqsave(&timekeeper.lock, flags);
+ write_seqlock_irqsave(&tk->lock, flags);
- timekeeping_forward_now(&timekeeper);
+ timekeeping_forward_now(tk);
if (!new->enable || new->enable(new) == 0) {
- old = timekeeper.clock;
- tk_setup_internals(&timekeeper, new);
+ old = tk->clock;
+ tk_setup_internals(tk, new);
if (old->disable)
old->disable(old);
}
- timekeeping_update(&timekeeper, true);
+ timekeeping_update(tk, true);
- write_sequnlock_irqrestore(&timekeeper.lock, flags);
+ write_sequnlock_irqrestore(&tk->lock, flags);
return 0;
}
@@ -507,7 +524,9 @@ static int change_clocksource(void *data)
*/
void timekeeping_notify(struct clocksource *clock)
{
- if (timekeeper.clock == clock)
+ struct timekeeper *tk = &timekeeper;
+
+ if (tk->clock == clock)
return;
stop_machine(change_clocksource, clock, NULL);
tick_clock_notify();
@@ -536,35 +555,36 @@ EXPORT_SYMBOL_GPL(ktime_get_real);
*/
void getrawmonotonic(struct timespec *ts)
{
+ struct timekeeper *tk = &timekeeper;
unsigned long seq;
s64 nsecs;
do {
- seq = read_seqbegin(&timekeeper.lock);
- nsecs = timekeeping_get_ns_raw(&timekeeper);
- *ts = timekeeper.raw_time;
+ seq = read_seqbegin(&tk->lock);
+ nsecs = timekeeping_get_ns_raw(tk);
+ *ts = tk->raw_time;
- } while (read_seqretry(&timekeeper.lock, seq));
+ } while (read_seqretry(&tk->lock, seq));
timespec_add_ns(ts, nsecs);
}
EXPORT_SYMBOL(getrawmonotonic);
-
/**
* timekeeping_valid_for_hres - Check if timekeeping is suitable for hres
*/
int timekeeping_valid_for_hres(void)
{
+ struct timekeeper *tk = &timekeeper;
unsigned long seq;
int ret;
do {
- seq = read_seqbegin(&timekeeper.lock);
+ seq = read_seqbegin(&tk->lock);
- ret = timekeeper.clock->flags & CLOCK_SOURCE_VALID_FOR_HRES;
+ ret = tk->clock->flags & CLOCK_SOURCE_VALID_FOR_HRES;
- } while (read_seqretry(&timekeeper.lock, seq));
+ } while (read_seqretry(&tk->lock, seq));
return ret;
}
@@ -574,15 +594,16 @@ int timekeeping_valid_for_hres(void)
*/
u64 timekeeping_max_deferment(void)
{
+ struct timekeeper *tk = &timekeeper;
unsigned long seq;
u64 ret;
do {
- seq = read_seqbegin(&timekeeper.lock);
+ seq = read_seqbegin(&tk->lock);
- ret = timekeeper.clock->max_idle_ns;
+ ret = tk->clock->max_idle_ns;
- } while (read_seqretry(&timekeeper.lock, seq));
+ } while (read_seqretry(&tk->lock, seq));
return ret;
}
@@ -622,46 +643,43 @@ void __attribute__((weak)) read_boot_clock(struct timespec *ts)
*/
void __init timekeeping_init(void)
{
+ struct timekeeper *tk = &timekeeper;
struct clocksource *clock;
unsigned long flags;
- struct timespec now, boot;
+ struct timespec now, boot, tmp;
read_persistent_clock(&now);
read_boot_clock(&boot);
- seqlock_init(&timekeeper.lock);
+ seqlock_init(&tk->lock);
ntp_init();
- write_seqlock_irqsave(&timekeeper.lock, flags);
+ write_seqlock_irqsave(&tk->lock, flags);
clock = clocksource_default_clock();
if (clock->enable)
clock->enable(clock);
- tk_setup_internals(&timekeeper, clock);
+ tk_setup_internals(tk, clock);
- tk_set_xtime(&timekeeper, &now);
- timekeeper.raw_time.tv_sec = 0;
- timekeeper.raw_time.tv_nsec = 0;
+ tk_set_xtime(tk, &now);
+ tk->raw_time.tv_sec = 0;
+ tk->raw_time.tv_nsec = 0;
if (boot.tv_sec == 0 && boot.tv_nsec == 0)
- boot = tk_xtime(&timekeeper);
-
- set_normalized_timespec(&timekeeper.wall_to_monotonic,
- -boot.tv_sec, -boot.tv_nsec);
- update_rt_offset(&timekeeper);
- timekeeper.total_sleep_time.tv_sec = 0;
- timekeeper.total_sleep_time.tv_nsec = 0;
- write_sequnlock_irqrestore(&timekeeper.lock, flags);
+ boot = tk_xtime(tk);
+
+ set_normalized_timespec(&tmp, -boot.tv_sec, -boot.tv_nsec);
+ tk_set_wall_to_mono(tk, tmp);
+
+ tmp.tv_sec = 0;
+ tmp.tv_nsec = 0;
+ tk_set_sleep_time(tk, tmp);
+
+ write_sequnlock_irqrestore(&tk->lock, flags);
}
/* time in seconds when suspend began */
static struct timespec timekeeping_suspend_time;
-static void update_sleep_time(struct timespec t)
-{
- timekeeper.total_sleep_time = t;
- timekeeper.offs_boot = timespec_to_ktime(t);
-}
-
/**
* __timekeeping_inject_sleeptime - Internal function to add sleep interval
* @delta: pointer to a timespec delta value
@@ -677,13 +695,11 @@ static void __timekeeping_inject_sleeptime(struct timekeeper *tk,
"sleep delta value!\n");
return;
}
-
tk_xtime_add(tk, delta);
- tk->wall_to_monotonic = timespec_sub(tk->wall_to_monotonic, *delta);
- update_sleep_time(timespec_add(tk->total_sleep_time, *delta));
+ tk_set_wall_to_mono(tk, timespec_sub(tk->wall_to_monotonic, *delta));
+ tk_set_sleep_time(tk, timespec_add(tk->total_sleep_time, *delta));
}
-
/**
* timekeeping_inject_sleeptime - Adds suspend interval to timeekeeping values
* @delta: pointer to a timespec delta value
@@ -696,6 +712,7 @@ static void __timekeeping_inject_sleeptime(struct timekeeper *tk,
*/
void timekeeping_inject_sleeptime(struct timespec *delta)
{
+ struct timekeeper *tk = &timekeeper;
unsigned long flags;
struct timespec ts;
@@ -704,21 +721,20 @@ void timekeeping_inject_sleeptime(struct timespec *delta)
if (!(ts.tv_sec == 0 && ts.tv_nsec == 0))
return;
- write_seqlock_irqsave(&timekeeper.lock, flags);
+ write_seqlock_irqsave(&tk->lock, flags);
- timekeeping_forward_now(&timekeeper);
+ timekeeping_forward_now(tk);
- __timekeeping_inject_sleeptime(&timekeeper, delta);
+ __timekeeping_inject_sleeptime(tk, delta);
- timekeeping_update(&timekeeper, true);
+ timekeeping_update(tk, true);
- write_sequnlock_irqrestore(&timekeeper.lock, flags);
+ write_sequnlock_irqrestore(&tk->lock, flags);
/* signal hrtimers about time change */
clock_was_set();
}
-
/**
* timekeeping_resume - Resumes the generic timekeeping subsystem.
*
@@ -728,6 +744,7 @@ void timekeeping_inject_sleeptime(struct timespec *delta)
*/
static void timekeeping_resume(void)
{
+ struct timekeeper *tk = &timekeeper;
unsigned long flags;
struct timespec ts;
@@ -735,18 +752,18 @@ static void timekeeping_resume(void)
clocksource_resume();
- write_seqlock_irqsave(&timekeeper.lock, flags);
+ write_seqlock_irqsave(&tk->lock, flags);
if (timespec_compare(&ts, &timekeeping_suspend_time) > 0) {
ts = timespec_sub(ts, timekeeping_suspend_time);
- __timekeeping_inject_sleeptime(&timekeeper, &ts);
+ __timekeeping_inject_sleeptime(tk, &ts);
}
/* re-base the last cycle value */
- timekeeper.clock->cycle_last = timekeeper.clock->read(timekeeper.clock);
- timekeeper.ntp_error = 0;
+ tk->clock->cycle_last = tk->clock->read(tk->clock);
+ tk->ntp_error = 0;
timekeeping_suspended = 0;
- timekeeping_update(&timekeeper, false);
- write_sequnlock_irqrestore(&timekeeper.lock, flags);
+ timekeeping_update(tk, false);
+ write_sequnlock_irqrestore(&tk->lock, flags);
touch_softlockup_watchdog();
@@ -758,14 +775,15 @@ static void timekeeping_resume(void)
static int timekeeping_suspend(void)
{
+ struct timekeeper *tk = &timekeeper;
unsigned long flags;
struct timespec delta, delta_delta;
static struct timespec old_delta;
read_persistent_clock(&timekeeping_suspend_time);
- write_seqlock_irqsave(&timekeeper.lock, flags);
- timekeeping_forward_now(&timekeeper);
+ write_seqlock_irqsave(&tk->lock, flags);
+ timekeeping_forward_now(tk);
timekeeping_suspended = 1;
/*
@@ -774,7 +792,7 @@ static int timekeeping_suspend(void)
* try to compensate so the difference in system time
* and persistent_clock time stays close to constant.
*/
- delta = timespec_sub(tk_xtime(&timekeeper), timekeeping_suspend_time);
+ delta = timespec_sub(tk_xtime(tk), timekeeping_suspend_time);
delta_delta = timespec_sub(delta, old_delta);
if (abs(delta_delta.tv_sec) >= 2) {
/*
@@ -787,7 +805,7 @@ static int timekeeping_suspend(void)
timekeeping_suspend_time =
timespec_add(timekeeping_suspend_time, delta_delta);
}
- write_sequnlock_irqrestore(&timekeeper.lock, flags);
+ write_sequnlock_irqrestore(&tk->lock, flags);
clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL);
clocksource_suspend();
@@ -898,27 +916,29 @@ static void timekeeping_adjust(struct timekeeper *tk, s64 offset)
* the error. This causes the likely below to be unlikely.
*
* The proper fix is to avoid rounding up by using
- * the high precision timekeeper.xtime_nsec instead of
+ * the high precision tk->xtime_nsec instead of
* xtime.tv_nsec everywhere. Fixing this will take some
* time.
*/
if (likely(error <= interval))
adj = 1;
else
- adj = timekeeping_bigadjust(tk, error, &interval,
- &offset);
- } else if (error < -interval) {
- /* See comment above, this is just switched for the negative */
- error >>= 2;
- if (likely(error >= -interval)) {
- adj = -1;
- interval = -interval;
- offset = -offset;
- } else
- adj = timekeeping_bigadjust(tk, error, &interval,
- &offset);
- } else
- return;
+ adj = timekeeping_bigadjust(tk, error, &interval, &offset);
+ } else {
+ if (error < -interval) {
+ /* See comment above, this is just switched for the negative */
+ error >>= 2;
+ if (likely(error >= -interval)) {
+ adj = -1;
+ interval = -interval;
+ offset = -offset;
+ } else {
+ adj = timekeeping_bigadjust(tk, error, &interval, &offset);
+ }
+ } else {
+ goto out_adjust;
+ }
+ }
if (unlikely(tk->clock->maxadj &&
(tk->mult + adj > tk->clock->mult + tk->clock->maxadj))) {
@@ -981,6 +1001,7 @@ static void timekeeping_adjust(struct timekeeper *tk, s64 offset)
tk->xtime_nsec -= offset;
tk->ntp_error -= (interval - offset) << tk->ntp_error_shift;
+out_adjust:
/*
* It may be possible that when we entered this function, xtime_nsec
* was very small. Further, if we're slightly speeding the clocksource
@@ -1003,7 +1024,6 @@ static void timekeeping_adjust(struct timekeeper *tk, s64 offset)
}
-
/**
* accumulate_nsecs_to_secs - Accumulates nsecs into secs
*
@@ -1024,15 +1044,21 @@ static inline void accumulate_nsecs_to_secs(struct timekeeper *tk)
/* Figure out if its a leap sec and apply if needed */
leap = second_overflow(tk->xtime_sec);
- tk->xtime_sec += leap;
- tk->wall_to_monotonic.tv_sec -= leap;
- if (leap)
- clock_was_set_delayed();
+ if (unlikely(leap)) {
+ struct timespec ts;
+
+ tk->xtime_sec += leap;
+
+ ts.tv_sec = leap;
+ ts.tv_nsec = 0;
+ tk_set_wall_to_mono(tk,
+ timespec_sub(tk->wall_to_monotonic, ts));
+ clock_was_set_delayed();
+ }
}
}
-
/**
* logarithmic_accumulation - shifted accumulation of cycles
*
@@ -1076,7 +1102,6 @@ static cycle_t logarithmic_accumulation(struct timekeeper *tk, cycle_t offset,
return offset;
}
-
/**
* update_wall_time - Uses the current clocksource to increment the wall time
*
@@ -1084,21 +1109,22 @@ static cycle_t logarithmic_accumulation(struct timekeeper *tk, cycle_t offset,
static void update_wall_time(void)
{
struct clocksource *clock;
+ struct timekeeper *tk = &timekeeper;
cycle_t offset;
int shift = 0, maxshift;
unsigned long flags;
s64 remainder;
- write_seqlock_irqsave(&timekeeper.lock, flags);
+ write_seqlock_irqsave(&tk->lock, flags);
/* Make sure we're fully resumed: */
if (unlikely(timekeeping_suspended))
goto out;
- clock = timekeeper.clock;
+ clock = tk->clock;
#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
- offset = timekeeper.cycle_interval;
+ offset = tk->cycle_interval;
#else
offset = (clock->read(clock) - clock->cycle_last) & clock->mask;
#endif
@@ -1111,19 +1137,19 @@ static void update_wall_time(void)
* chunk in one go, and then try to consume the next smaller
* doubled multiple.
*/
- shift = ilog2(offset) - ilog2(timekeeper.cycle_interval);
+ shift = ilog2(offset) - ilog2(tk->cycle_interval);
shift = max(0, shift);
/* Bound shift to one less than what overflows tick_length */
maxshift = (64 - (ilog2(ntp_tick_length())+1)) - 1;
shift = min(shift, maxshift);
- while (offset >= timekeeper.cycle_interval) {
- offset = logarithmic_accumulation(&timekeeper, offset, shift);
- if(offset < timekeeper.cycle_interval<<shift)
+ while (offset >= tk->cycle_interval) {
+ offset = logarithmic_accumulation(tk, offset, shift);
+ if (offset < tk->cycle_interval<<shift)
shift--;
}
/* correct the clock when NTP error is too big */
- timekeeping_adjust(&timekeeper, offset);
+ timekeeping_adjust(tk, offset);
/*
@@ -1135,21 +1161,21 @@ static void update_wall_time(void)
* the vsyscall implementations are converted to use xtime_nsec
* (shifted nanoseconds), this can be killed.
*/
- remainder = timekeeper.xtime_nsec & ((1 << timekeeper.shift) - 1);
- timekeeper.xtime_nsec -= remainder;
- timekeeper.xtime_nsec += 1 << timekeeper.shift;
- timekeeper.ntp_error += remainder << timekeeper.ntp_error_shift;
+ remainder = tk->xtime_nsec & ((1 << tk->shift) - 1);
+ tk->xtime_nsec -= remainder;
+ tk->xtime_nsec += 1 << tk->shift;
+ tk->ntp_error += remainder << tk->ntp_error_shift;
/*
* Finally, make sure that after the rounding
* xtime_nsec isn't larger than NSEC_PER_SEC
*/
- accumulate_nsecs_to_secs(&timekeeper);
+ accumulate_nsecs_to_secs(tk);
- timekeeping_update(&timekeeper, false);
+ timekeeping_update(tk, false);
out:
- write_sequnlock_irqrestore(&timekeeper.lock, flags);
+ write_sequnlock_irqrestore(&tk->lock, flags);
}
@@ -1166,18 +1192,18 @@ out:
*/
void getboottime(struct timespec *ts)
{
+ struct timekeeper *tk = &timekeeper;
struct timespec boottime = {
- .tv_sec = timekeeper.wall_to_monotonic.tv_sec +
- timekeeper.total_sleep_time.tv_sec,
- .tv_nsec = timekeeper.wall_to_monotonic.tv_nsec +
- timekeeper.total_sleep_time.tv_nsec
+ .tv_sec = tk->wall_to_monotonic.tv_sec +
+ tk->total_sleep_time.tv_sec,
+ .tv_nsec = tk->wall_to_monotonic.tv_nsec +
+ tk->total_sleep_time.tv_nsec
};
set_normalized_timespec(ts, -boottime.tv_sec, -boottime.tv_nsec);
}
EXPORT_SYMBOL_GPL(getboottime);
-
/**
* get_monotonic_boottime - Returns monotonic time since boot
* @ts: pointer to the timespec to be set
@@ -1189,19 +1215,20 @@ EXPORT_SYMBOL_GPL(getboottime);
*/
void get_monotonic_boottime(struct timespec *ts)
{
+ struct timekeeper *tk = &timekeeper;
struct timespec tomono, sleep;
unsigned int seq;
WARN_ON(timekeeping_suspended);
do {
- seq = read_seqbegin(&timekeeper.lock);
- ts->tv_sec = timekeeper.xtime_sec;
- ts->tv_nsec = timekeeping_get_ns(&timekeeper);
- tomono = timekeeper.wall_to_monotonic;
- sleep = timekeeper.total_sleep_time;
+ seq = read_seqbegin(&tk->lock);
+ ts->tv_sec = tk->xtime_sec;
+ ts->tv_nsec = timekeeping_get_ns(tk);
+ tomono = tk->wall_to_monotonic;
+ sleep = tk->total_sleep_time;
- } while (read_seqretry(&timekeeper.lock, seq));
+ } while (read_seqretry(&tk->lock, seq));
set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec + sleep.tv_sec,
ts->tv_nsec + tomono.tv_nsec + sleep.tv_nsec);
@@ -1231,31 +1258,38 @@ EXPORT_SYMBOL_GPL(ktime_get_boottime);
*/
void monotonic_to_bootbased(struct timespec *ts)
{
- *ts = timespec_add(*ts, timekeeper.total_sleep_time);
+ struct timekeeper *tk = &timekeeper;
+
+ *ts = timespec_add(*ts, tk->total_sleep_time);
}
EXPORT_SYMBOL_GPL(monotonic_to_bootbased);
unsigned long get_seconds(void)
{
- return timekeeper.xtime_sec;
+ struct timekeeper *tk = &timekeeper;
+
+ return tk->xtime_sec;
}
EXPORT_SYMBOL(get_seconds);
struct timespec __current_kernel_time(void)
{
- return tk_xtime(&timekeeper);
+ struct timekeeper *tk = &timekeeper;
+
+ return tk_xtime(tk);
}
struct timespec current_kernel_time(void)
{
+ struct timekeeper *tk = &timekeeper;
struct timespec now;
unsigned long seq;
do {
- seq = read_seqbegin(&timekeeper.lock);
+ seq = read_seqbegin(&tk->lock);
- now = tk_xtime(&timekeeper);
- } while (read_seqretry(&timekeeper.lock, seq));
+ now = tk_xtime(tk);
+ } while (read_seqretry(&tk->lock, seq));
return now;
}
@@ -1263,15 +1297,16 @@ EXPORT_SYMBOL(current_kernel_time);
struct timespec get_monotonic_coarse(void)
{
+ struct timekeeper *tk = &timekeeper;
struct timespec now, mono;
unsigned long seq;
do {
- seq = read_seqbegin(&timekeeper.lock);
+ seq = read_seqbegin(&tk->lock);
- now = tk_xtime(&timekeeper);
- mono = timekeeper.wall_to_monotonic;
- } while (read_seqretry(&timekeeper.lock, seq));
+ now = tk_xtime(tk);
+ mono = tk->wall_to_monotonic;
+ } while (read_seqretry(&tk->lock, seq));
set_normalized_timespec(&now, now.tv_sec + mono.tv_sec,
now.tv_nsec + mono.tv_nsec);
@@ -1300,14 +1335,15 @@ void do_timer(unsigned long ticks)
void get_xtime_and_monotonic_and_sleep_offset(struct timespec *xtim,
struct timespec *wtom, struct timespec *sleep)
{
+ struct timekeeper *tk = &timekeeper;
unsigned long seq;
do {
- seq = read_seqbegin(&timekeeper.lock);
- *xtim = tk_xtime(&timekeeper);
- *wtom = timekeeper.wall_to_monotonic;
- *sleep = timekeeper.total_sleep_time;
- } while (read_seqretry(&timekeeper.lock, seq));
+ seq = read_seqbegin(&tk->lock);
+ *xtim = tk_xtime(tk);
+ *wtom = tk->wall_to_monotonic;
+ *sleep = tk->total_sleep_time;
+ } while (read_seqretry(&tk->lock, seq));
}
#ifdef CONFIG_HIGH_RES_TIMERS
@@ -1321,19 +1357,20 @@ void get_xtime_and_monotonic_and_sleep_offset(struct timespec *xtim,
*/
ktime_t ktime_get_update_offsets(ktime_t *offs_real, ktime_t *offs_boot)
{
+ struct timekeeper *tk = &timekeeper;
ktime_t now;
unsigned int seq;
u64 secs, nsecs;
do {
- seq = read_seqbegin(&timekeeper.lock);
+ seq = read_seqbegin(&tk->lock);
- secs = timekeeper.xtime_sec;
- nsecs = timekeeping_get_ns(&timekeeper);
+ secs = tk->xtime_sec;
+ nsecs = timekeeping_get_ns(tk);
- *offs_real = timekeeper.offs_real;
- *offs_boot = timekeeper.offs_boot;
- } while (read_seqretry(&timekeeper.lock, seq));
+ *offs_real = tk->offs_real;
+ *offs_boot = tk->offs_boot;
+ } while (read_seqretry(&tk->lock, seq));
now = ktime_add_ns(ktime_set(secs, 0), nsecs);
now = ktime_sub(now, *offs_real);
@@ -1346,19 +1383,19 @@ ktime_t ktime_get_update_offsets(ktime_t *offs_real, ktime_t *offs_boot)
*/
ktime_t ktime_get_monotonic_offset(void)
{
+ struct timekeeper *tk = &timekeeper;
unsigned long seq;
struct timespec wtom;
do {
- seq = read_seqbegin(&timekeeper.lock);
- wtom = timekeeper.wall_to_monotonic;
- } while (read_seqretry(&timekeeper.lock, seq));
+ seq = read_seqbegin(&tk->lock);
+ wtom = tk->wall_to_monotonic;
+ } while (read_seqretry(&tk->lock, seq));
return timespec_to_ktime(wtom);
}
EXPORT_SYMBOL_GPL(ktime_get_monotonic_offset);
-
/**
* xtime_update() - advances the timekeeping infrastructure
* @ticks: number of ticks, that have elapsed since the last call.
diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c
index fee3752ae8f6..8a6d2ee2086c 100644
--- a/kernel/trace/trace_event_perf.c
+++ b/kernel/trace/trace_event_perf.c
@@ -281,7 +281,7 @@ perf_ftrace_function_call(unsigned long ip, unsigned long parent_ip)
head = this_cpu_ptr(event_function.perf_events);
perf_trace_buf_submit(entry, ENTRY_SIZE, rctx, 0,
- 1, &regs, head);
+ 1, &regs, head, NULL);
#undef ENTRY_SIZE
}
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index b31d3d5699fe..1a2117043bb1 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -1002,7 +1002,8 @@ static __kprobes void kprobe_perf_func(struct kprobe *kp,
store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
head = this_cpu_ptr(call->perf_events);
- perf_trace_buf_submit(entry, size, rctx, entry->ip, 1, regs, head);
+ perf_trace_buf_submit(entry, size, rctx,
+ entry->ip, 1, regs, head, NULL);
}
/* Kretprobe profile handler */
@@ -1033,7 +1034,8 @@ static __kprobes void kretprobe_perf_func(struct kretprobe_instance *ri,
store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
head = this_cpu_ptr(call->perf_events);
- perf_trace_buf_submit(entry, size, rctx, entry->ret_ip, 1, regs, head);
+ perf_trace_buf_submit(entry, size, rctx,
+ entry->ret_ip, 1, regs, head, NULL);
}
#endif /* CONFIG_PERF_EVENTS */
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
index 96fc73369099..60e4d7875672 100644
--- a/kernel/trace/trace_syscalls.c
+++ b/kernel/trace/trace_syscalls.c
@@ -532,7 +532,7 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id)
(unsigned long *)&rec->args);
head = this_cpu_ptr(sys_data->enter_event->perf_events);
- perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head);
+ perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head, NULL);
}
int perf_sysenter_enable(struct ftrace_event_call *call)
@@ -608,7 +608,7 @@ static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
rec->ret = syscall_get_return_value(current, regs);
head = this_cpu_ptr(sys_data->exit_event->perf_events);
- perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head);
+ perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head, NULL);
}
int perf_sysexit_enable(struct ftrace_event_call *call)
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
index 2b36ac68549e..03003cd7dd96 100644
--- a/kernel/trace/trace_uprobe.c
+++ b/kernel/trace/trace_uprobe.c
@@ -670,7 +670,7 @@ static void uprobe_perf_func(struct trace_uprobe *tu, struct pt_regs *regs)
call_fetch(&tu->args[i].fetch, regs, data + tu->args[i].offset);
head = this_cpu_ptr(call->perf_events);
- perf_trace_buf_submit(entry, size, rctx, entry->ip, 1, regs, head);
+ perf_trace_buf_submit(entry, size, rctx, entry->ip, 1, regs, head, NULL);
out:
preempt_enable();
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index 69add8a9da68..4b1dfba70f7c 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -575,7 +575,7 @@ out:
/*
* Create/destroy watchdog threads as CPUs come and go:
*/
-static int
+static int __cpuinit
cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
{
int hotcpu = (unsigned long)hcpu;
@@ -610,27 +610,10 @@ cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
return NOTIFY_OK;
}
-static struct notifier_block cpu_nfb = {
+static struct notifier_block __cpuinitdata cpu_nfb = {
.notifier_call = cpu_callback
};
-#ifdef CONFIG_SUSPEND
-/*
- * On exit from suspend we force an offline->online transition on the boot CPU
- * so that the PMU state that was lost while in suspended state gets set up
- * properly for the boot CPU. This information is required for restarting the
- * NMI watchdog.
- */
-void lockup_detector_bootcpu_resume(void)
-{
- void *cpu = (void *)(long)smp_processor_id();
-
- cpu_callback(&cpu_nfb, CPU_DEAD_FROZEN, cpu);
- cpu_callback(&cpu_nfb, CPU_UP_PREPARE_FROZEN, cpu);
- cpu_callback(&cpu_nfb, CPU_ONLINE_FROZEN, cpu);
-}
-#endif
-
void __init lockup_detector_init(void)
{
void *cpu = (void *)(long)smp_processor_id();
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index 6b4718e2ee34..b41823cc05e6 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -39,12 +39,6 @@ DEFINE_SPINLOCK(bdi_lock);
LIST_HEAD(bdi_list);
LIST_HEAD(bdi_pending_list);
-static struct task_struct *sync_supers_tsk;
-static struct timer_list sync_supers_timer;
-
-static int bdi_sync_supers(void *);
-static void sync_supers_timer_fn(unsigned long);
-
void bdi_lock_two(struct bdi_writeback *wb1, struct bdi_writeback *wb2)
{
if (wb1 < wb2) {
@@ -250,12 +244,6 @@ static int __init default_bdi_init(void)
{
int err;
- sync_supers_tsk = kthread_run(bdi_sync_supers, NULL, "sync_supers");
- BUG_ON(IS_ERR(sync_supers_tsk));
-
- setup_timer(&sync_supers_timer, sync_supers_timer_fn, 0);
- bdi_arm_supers_timer();
-
err = bdi_init(&default_backing_dev_info);
if (!err)
bdi_register(&default_backing_dev_info, NULL, "default");
@@ -270,46 +258,6 @@ int bdi_has_dirty_io(struct backing_dev_info *bdi)
return wb_has_dirty_io(&bdi->wb);
}
-/*
- * kupdated() used to do this. We cannot do it from the bdi_forker_thread()
- * or we risk deadlocking on ->s_umount. The longer term solution would be
- * to implement sync_supers_bdi() or similar and simply do it from the
- * bdi writeback thread individually.
- */
-static int bdi_sync_supers(void *unused)
-{
- set_user_nice(current, 0);
-
- while (!kthread_should_stop()) {
- set_current_state(TASK_INTERRUPTIBLE);
- schedule();
-
- /*
- * Do this periodically, like kupdated() did before.
- */
- sync_supers();
- }
-
- return 0;
-}
-
-void bdi_arm_supers_timer(void)
-{
- unsigned long next;
-
- if (!dirty_writeback_interval)
- return;
-
- next = msecs_to_jiffies(dirty_writeback_interval * 10) + jiffies;
- mod_timer(&sync_supers_timer, round_jiffies_up(next));
-}
-
-static void sync_supers_timer_fn(unsigned long unused)
-{
- wake_up_process(sync_supers_tsk);
- bdi_arm_supers_timer();
-}
-
static void wakeup_timer_fn(unsigned long data)
{
struct backing_dev_info *bdi = (struct backing_dev_info *)data;
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index e5363f34e025..5ad5ce23c1e0 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -1532,7 +1532,6 @@ int dirty_writeback_centisecs_handler(ctl_table *table, int write,
void __user *buffer, size_t *length, loff_t *ppos)
{
proc_dointvec(table, write, buffer, length, ppos);
- bdi_arm_supers_timer();
return 0;
}
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
index b421cc49d2cd..fc866f2e4528 100644
--- a/net/batman-adv/gateway_client.c
+++ b/net/batman-adv/gateway_client.c
@@ -200,11 +200,11 @@ void batadv_gw_election(struct batadv_priv *bat_priv)
if (atomic_read(&bat_priv->gw_mode) != BATADV_GW_MODE_CLIENT)
goto out;
- if (!batadv_atomic_dec_not_zero(&bat_priv->gw_reselect))
- goto out;
-
curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
+ if (!batadv_atomic_dec_not_zero(&bat_priv->gw_reselect) && curr_gw)
+ goto out;
+
next_gw = batadv_gw_get_best_gw_node(bat_priv);
if (curr_gw == next_gw)
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index a438f4b582fc..99dd8f75b3ff 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -197,6 +197,7 @@ static void batadv_tt_local_event(struct batadv_priv *bat_priv,
del:
list_del(&entry->list);
kfree(entry);
+ kfree(tt_change_node);
event_removed = true;
goto unlock;
}
diff --git a/net/core/dev.c b/net/core/dev.c
index 0cb3fe8d8e72..a39354ee1432 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1055,6 +1055,8 @@ rollback:
*/
int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
{
+ char *new_ifalias;
+
ASSERT_RTNL();
if (len >= IFALIASZ)
@@ -1068,9 +1070,10 @@ int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
return 0;
}
- dev->ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
- if (!dev->ifalias)
+ new_ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
+ if (!new_ifalias)
return -ENOMEM;
+ dev->ifalias = new_ifalias;
strlcpy(dev->ifalias, alias, len+1);
return len;
@@ -2134,6 +2137,9 @@ netdev_features_t netif_skb_features(struct sk_buff *skb)
__be16 protocol = skb->protocol;
netdev_features_t features = skb->dev->features;
+ if (skb_shinfo(skb)->gso_segs > skb->dev->gso_max_segs)
+ features &= ~NETIF_F_GSO_MASK;
+
if (protocol == htons(ETH_P_8021Q)) {
struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
protocol = veh->h_vlan_encapsulated_proto;
@@ -5986,6 +5992,7 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
dev_net_set(dev, &init_net);
dev->gso_max_size = GSO_MAX_SIZE;
+ dev->gso_max_segs = GSO_MAX_SEGS;
INIT_LIST_HEAD(&dev->napi_list);
INIT_LIST_HEAD(&dev->unreg_list);
diff --git a/net/core/dst.c b/net/core/dst.c
index 069d51d29414..56d63612e1e4 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -149,7 +149,15 @@ int dst_discard(struct sk_buff *skb)
}
EXPORT_SYMBOL(dst_discard);
-const u32 dst_default_metrics[RTAX_MAX];
+const u32 dst_default_metrics[RTAX_MAX + 1] = {
+ /* This initializer is needed to force linker to place this variable
+ * into const section. Otherwise it might end into bss section.
+ * We really want to avoid false sharing on this variable, and catch
+ * any writes on it.
+ */
+ [RTAX_MAX] = 0xdeadbeef,
+};
+
void *dst_alloc(struct dst_ops *ops, struct net_device *dev,
int initial_ref, int initial_obsolete, unsigned short flags)
diff --git a/net/core/sock.c b/net/core/sock.c
index 6b654b3ddfda..8f67ced8d6a8 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1458,6 +1458,7 @@ void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
} else {
sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
sk->sk_gso_max_size = dst->dev->gso_max_size;
+ sk->sk_gso_max_segs = dst->dev->gso_max_segs;
}
}
}
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index f0cdb30921c0..57bd978483e1 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -367,7 +367,7 @@ static void __leaf_free_rcu(struct rcu_head *head)
static inline void free_leaf(struct leaf *l)
{
- call_rcu_bh(&l->rcu, __leaf_free_rcu);
+ call_rcu(&l->rcu, __leaf_free_rcu);
}
static inline void free_leaf_info(struct leaf_info *leaf)
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index ba39a52d18c1..147ccc3e93db 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -197,7 +197,7 @@ static inline int ip_finish_output2(struct sk_buff *skb)
neigh = __ipv4_neigh_lookup_noref(dev, nexthop);
if (unlikely(!neigh))
neigh = __neigh_create(&arp_tbl, &nexthop, dev, false);
- if (neigh) {
+ if (!IS_ERR(neigh)) {
int res = dst_neigh_output(dst, neigh, skb);
rcu_read_unlock_bh();
@@ -1366,9 +1366,8 @@ out:
return skb;
}
-int ip_send_skb(struct sk_buff *skb)
+int ip_send_skb(struct net *net, struct sk_buff *skb)
{
- struct net *net = sock_net(skb->sk);
int err;
err = ip_local_out(skb);
@@ -1391,7 +1390,7 @@ int ip_push_pending_frames(struct sock *sk, struct flowi4 *fl4)
return 0;
/* Netfilter gets whole the not fragmented skb. */
- return ip_send_skb(skb);
+ return ip_send_skb(sock_net(sk), skb);
}
/*
@@ -1536,6 +1535,7 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, __be32 daddr,
arg->csumoffset) = csum_fold(csum_add(nskb->csum,
arg->csum));
nskb->ip_summed = CHECKSUM_NONE;
+ skb_orphan(nskb);
skb_set_queue_mapping(nskb, skb_get_queue_mapping(skb));
ip_push_pending_frames(sk, &fl4);
}
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index c035251beb07..e4ba974f143c 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -70,7 +70,6 @@
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/mm.h>
-#include <linux/bootmem.h>
#include <linux/string.h>
#include <linux/socket.h>
#include <linux/sockios.h>
@@ -80,7 +79,6 @@
#include <linux/netdevice.h>
#include <linux/proc_fs.h>
#include <linux/init.h>
-#include <linux/workqueue.h>
#include <linux/skbuff.h>
#include <linux/inetdevice.h>
#include <linux/igmp.h>
@@ -88,11 +86,9 @@
#include <linux/mroute.h>
#include <linux/netfilter_ipv4.h>
#include <linux/random.h>
-#include <linux/jhash.h>
#include <linux/rcupdate.h>
#include <linux/times.h>
#include <linux/slab.h>
-#include <linux/prefetch.h>
#include <net/dst.h>
#include <net/net_namespace.h>
#include <net/protocol.h>
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index e7e6eeae49c0..2109ff4a1daf 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -811,7 +811,9 @@ static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now,
old_size_goal + mss_now > xmit_size_goal)) {
xmit_size_goal = old_size_goal;
} else {
- tp->xmit_size_goal_segs = xmit_size_goal / mss_now;
+ tp->xmit_size_goal_segs =
+ min_t(u16, xmit_size_goal / mss_now,
+ sk->sk_gso_max_segs);
xmit_size_goal = tp->xmit_size_goal_segs * mss_now;
}
}
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index 4d4db16e336e..1432cdb0644c 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -291,7 +291,8 @@ bool tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight)
left = tp->snd_cwnd - in_flight;
if (sk_can_gso(sk) &&
left * sysctl_tcp_tso_win_divisor < tp->snd_cwnd &&
- left * tp->mss_cache < sk->sk_gso_max_size)
+ left * tp->mss_cache < sk->sk_gso_max_size &&
+ left < sk->sk_gso_max_segs)
return true;
return left <= tcp_max_tso_deferred_mss(tp);
}
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 2fd2bc9e3c64..85308b90df80 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -5392,6 +5392,8 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
{
struct tcp_sock *tp = tcp_sk(sk);
+ if (unlikely(sk->sk_rx_dst == NULL))
+ inet_csk(sk)->icsk_af_ops->sk_rx_dst_set(sk, skb);
/*
* Header prediction.
* The code loosely follows the one in the famous
@@ -5605,7 +5607,7 @@ void tcp_finish_connect(struct sock *sk, struct sk_buff *skb)
tcp_set_state(sk, TCP_ESTABLISHED);
if (skb != NULL) {
- inet_sk_rx_dst_set(sk, skb);
+ icsk->icsk_af_ops->sk_rx_dst_set(sk, skb);
security_inet_conn_established(sk, skb);
}
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 42b2a6a73092..767823764016 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1627,9 +1627,6 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
sk->sk_rx_dst = NULL;
}
}
- if (unlikely(sk->sk_rx_dst == NULL))
- inet_sk_rx_dst_set(sk, skb);
-
if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) {
rsk = sk;
goto reset;
@@ -1872,10 +1869,21 @@ static struct timewait_sock_ops tcp_timewait_sock_ops = {
.twsk_destructor= tcp_twsk_destructor,
};
+void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
+{
+ struct dst_entry *dst = skb_dst(skb);
+
+ dst_hold(dst);
+ sk->sk_rx_dst = dst;
+ inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
+}
+EXPORT_SYMBOL(inet_sk_rx_dst_set);
+
const struct inet_connection_sock_af_ops ipv4_specific = {
.queue_xmit = ip_queue_xmit,
.send_check = tcp_v4_send_check,
.rebuild_header = inet_sk_rebuild_header,
+ .sk_rx_dst_set = inet_sk_rx_dst_set,
.conn_request = tcp_v4_conn_request,
.syn_recv_sock = tcp_v4_syn_recv_sock,
.net_header_len = sizeof(struct iphdr),
diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c
index 2288a6399e1e..0abe67bb4d3a 100644
--- a/net/ipv4/tcp_metrics.c
+++ b/net/ipv4/tcp_metrics.c
@@ -731,6 +731,18 @@ static int __net_init tcp_net_metrics_init(struct net *net)
static void __net_exit tcp_net_metrics_exit(struct net *net)
{
+ unsigned int i;
+
+ for (i = 0; i < (1U << net->ipv4.tcp_metrics_hash_log) ; i++) {
+ struct tcp_metrics_block *tm, *next;
+
+ tm = rcu_dereference_protected(net->ipv4.tcp_metrics_hash[i].chain, 1);
+ while (tm) {
+ next = rcu_dereference_protected(tm->tcpm_next, 1);
+ kfree(tm);
+ tm = next;
+ }
+ }
kfree(net->ipv4.tcp_metrics_hash);
}
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 232a90c3ec86..d9c9dcef2de3 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -387,7 +387,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
struct tcp_sock *oldtp = tcp_sk(sk);
struct tcp_cookie_values *oldcvp = oldtp->cookie_values;
- inet_sk_rx_dst_set(newsk, skb);
+ newicsk->icsk_af_ops->sk_rx_dst_set(newsk, skb);
/* TCP Cookie Transactions require space for the cookie pair,
* as it differs for each connection. There is no need to
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 3f1bcff0b10b..20dfd892c86f 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -940,7 +940,7 @@ void __init tcp_tasklet_init(void)
* We cant xmit new skbs from this context, as we might already
* hold qdisc lock.
*/
-void tcp_wfree(struct sk_buff *skb)
+static void tcp_wfree(struct sk_buff *skb)
{
struct sock *sk = skb->sk;
struct tcp_sock *tp = tcp_sk(sk);
@@ -1522,21 +1522,21 @@ static void tcp_cwnd_validate(struct sock *sk)
* when we would be allowed to send the split-due-to-Nagle skb fully.
*/
static unsigned int tcp_mss_split_point(const struct sock *sk, const struct sk_buff *skb,
- unsigned int mss_now, unsigned int cwnd)
+ unsigned int mss_now, unsigned int max_segs)
{
const struct tcp_sock *tp = tcp_sk(sk);
- u32 needed, window, cwnd_len;
+ u32 needed, window, max_len;
window = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
- cwnd_len = mss_now * cwnd;
+ max_len = mss_now * max_segs;
- if (likely(cwnd_len <= window && skb != tcp_write_queue_tail(sk)))
- return cwnd_len;
+ if (likely(max_len <= window && skb != tcp_write_queue_tail(sk)))
+ return max_len;
needed = min(skb->len, window);
- if (cwnd_len <= needed)
- return cwnd_len;
+ if (max_len <= needed)
+ return max_len;
return needed - needed % mss_now;
}
@@ -1765,7 +1765,8 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb)
limit = min(send_win, cong_win);
/* If a full-sized TSO skb can be sent, do it. */
- if (limit >= sk->sk_gso_max_size)
+ if (limit >= min_t(unsigned int, sk->sk_gso_max_size,
+ sk->sk_gso_max_segs * tp->mss_cache))
goto send_now;
/* Middle in queue won't get any more data, full sendable already? */
@@ -1999,7 +2000,9 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
limit = mss_now;
if (tso_segs > 1 && !tcp_urg_mode(tp))
limit = tcp_mss_split_point(sk, skb, mss_now,
- cwnd_quota);
+ min_t(unsigned int,
+ cwnd_quota,
+ sk->sk_gso_max_segs));
if (skb->len > limit &&
unlikely(tso_fragment(sk, skb, limit, mss_now, gfp)))
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index b4c3582a991f..6f6d1aca3c3d 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -758,7 +758,7 @@ static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4)
uh->check = CSUM_MANGLED_0;
send:
- err = ip_send_skb(skb);
+ err = ip_send_skb(sock_net(sk), skb);
if (err) {
if (err == -ENOBUFS && !inet->recverr) {
UDP_INC_STATS_USER(sock_net(sk),
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index c66b90f71c9b..bb9ce2b2f377 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1447,7 +1447,17 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
opt_skb = skb_clone(skb, sk_gfp_atomic(sk, GFP_ATOMIC));
if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
+ struct dst_entry *dst = sk->sk_rx_dst;
+
sock_rps_save_rxhash(sk, skb);
+ if (dst) {
+ if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
+ dst->ops->check(dst, np->rx_dst_cookie) == NULL) {
+ dst_release(dst);
+ sk->sk_rx_dst = NULL;
+ }
+ }
+
if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len))
goto reset;
if (opt_skb)
@@ -1705,9 +1715,9 @@ static void tcp_v6_early_demux(struct sk_buff *skb)
struct dst_entry *dst = sk->sk_rx_dst;
struct inet_sock *icsk = inet_sk(sk);
if (dst)
- dst = dst_check(dst, 0);
+ dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
if (dst &&
- icsk->rx_dst_ifindex == inet6_iif(skb))
+ icsk->rx_dst_ifindex == skb->skb_iif)
skb_dst_set_noref(skb, dst);
}
}
@@ -1719,10 +1729,23 @@ static struct timewait_sock_ops tcp6_timewait_sock_ops = {
.twsk_destructor= tcp_twsk_destructor,
};
+static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
+{
+ struct dst_entry *dst = skb_dst(skb);
+ const struct rt6_info *rt = (const struct rt6_info *)dst;
+
+ dst_hold(dst);
+ sk->sk_rx_dst = dst;
+ inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
+ if (rt->rt6i_node)
+ inet6_sk(sk)->rx_dst_cookie = rt->rt6i_node->fn_sernum;
+}
+
static const struct inet_connection_sock_af_ops ipv6_specific = {
.queue_xmit = inet6_csk_xmit,
.send_check = tcp_v6_send_check,
.rebuild_header = inet6_sk_rebuild_header,
+ .sk_rx_dst_set = inet6_sk_rx_dst_set,
.conn_request = tcp_v6_conn_request,
.syn_recv_sock = tcp_v6_syn_recv_sock,
.net_header_len = sizeof(struct ipv6hdr),
@@ -1754,6 +1777,7 @@ static const struct inet_connection_sock_af_ops ipv6_mapped = {
.queue_xmit = ip_queue_xmit,
.send_check = tcp_v4_send_check,
.rebuild_header = inet_sk_rebuild_header,
+ .sk_rx_dst_set = inet_sk_rx_dst_set,
.conn_request = tcp_v6_conn_request,
.syn_recv_sock = tcp_v6_syn_recv_sock,
.net_header_len = sizeof(struct iphdr),
diff --git a/net/llc/llc_station.c b/net/llc/llc_station.c
index 39a8d8924b9c..6828e39ec2ec 100644
--- a/net/llc/llc_station.c
+++ b/net/llc/llc_station.c
@@ -268,7 +268,7 @@ static int llc_station_ac_send_null_dsap_xid_c(struct sk_buff *skb)
out:
return rc;
free:
- kfree_skb(skb);
+ kfree_skb(nskb);
goto out;
}
@@ -293,7 +293,7 @@ static int llc_station_ac_send_xid_r(struct sk_buff *skb)
out:
return rc;
free:
- kfree_skb(skb);
+ kfree_skb(nskb);
goto out;
}
@@ -322,7 +322,7 @@ static int llc_station_ac_send_test_r(struct sk_buff *skb)
out:
return rc;
free:
- kfree_skb(skb);
+ kfree_skb(nskb);
goto out;
}
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index 6fac18c0423f..85572353a7e3 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -622,6 +622,7 @@ void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata)
del_timer_sync(&sdata->u.mesh.housekeeping_timer);
del_timer_sync(&sdata->u.mesh.mesh_path_root_timer);
+ del_timer_sync(&sdata->u.mesh.mesh_path_timer);
/*
* If the timer fired while we waited for it, it will have
* requeued the work. Now the work will be running again
@@ -634,6 +635,8 @@ void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata)
local->fif_other_bss--;
atomic_dec(&local->iff_allmultis);
ieee80211_configure_filter(local);
+
+ sdata->u.mesh.timers_running = 0;
}
static void ieee80211_mesh_rx_bcn_presp(struct ieee80211_sub_if_data *sdata,
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index cef0c9e79aba..a4a5acdbaa4d 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -1430,6 +1430,8 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
del_timer_sync(&sdata->u.mgd.bcn_mon_timer);
del_timer_sync(&sdata->u.mgd.timer);
del_timer_sync(&sdata->u.mgd.chswitch_timer);
+
+ sdata->u.mgd.timers_running = 0;
}
void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata,
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index bcaee5d12839..839dd9737989 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -299,7 +299,7 @@ static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted,
if (local->scan_req != local->int_scan_req)
cfg80211_scan_done(local->scan_req, aborted);
local->scan_req = NULL;
- local->scan_sdata = NULL;
+ rcu_assign_pointer(local->scan_sdata, NULL);
local->scanning = 0;
local->scan_channel = NULL;
@@ -984,7 +984,6 @@ int ieee80211_request_sched_scan_stop(struct ieee80211_sub_if_data *sdata)
kfree(local->sched_scan_ies.ie[i]);
drv_sched_scan_stop(local, sdata);
- rcu_assign_pointer(local->sched_scan_sdata, NULL);
}
out:
mutex_unlock(&local->mtx);
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index ceaca7c134a0..8ac890a1a4c0 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -1079,7 +1079,7 @@ static void *packet_current_rx_frame(struct packet_sock *po,
default:
WARN(1, "TPACKET version not supported\n");
BUG();
- return 0;
+ return NULL;
}
}
@@ -1936,7 +1936,6 @@ static void tpacket_destruct_skb(struct sk_buff *skb)
if (likely(po->tx_ring.pg_vec)) {
ph = skb_shinfo(skb)->destructor_arg;
- BUG_ON(__packet_get_status(po, ph) != TP_STATUS_SENDING);
BUG_ON(atomic_read(&po->tx_ring.pending) == 0);
atomic_dec(&po->tx_ring.pending);
__packet_set_status(po, ph, TP_STATUS_AVAILABLE);
diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c
index f10fb8256442..05d60859d8e3 100644
--- a/net/sched/act_gact.c
+++ b/net/sched/act_gact.c
@@ -67,6 +67,9 @@ static int tcf_gact_init(struct nlattr *nla, struct nlattr *est,
struct tcf_common *pc;
int ret = 0;
int err;
+#ifdef CONFIG_GACT_PROB
+ struct tc_gact_p *p_parm = NULL;
+#endif
if (nla == NULL)
return -EINVAL;
@@ -82,6 +85,12 @@ static int tcf_gact_init(struct nlattr *nla, struct nlattr *est,
#ifndef CONFIG_GACT_PROB
if (tb[TCA_GACT_PROB] != NULL)
return -EOPNOTSUPP;
+#else
+ if (tb[TCA_GACT_PROB]) {
+ p_parm = nla_data(tb[TCA_GACT_PROB]);
+ if (p_parm->ptype >= MAX_RAND)
+ return -EINVAL;
+ }
#endif
pc = tcf_hash_check(parm->index, a, bind, &gact_hash_info);
@@ -103,8 +112,7 @@ static int tcf_gact_init(struct nlattr *nla, struct nlattr *est,
spin_lock_bh(&gact->tcf_lock);
gact->tcf_action = parm->action;
#ifdef CONFIG_GACT_PROB
- if (tb[TCA_GACT_PROB] != NULL) {
- struct tc_gact_p *p_parm = nla_data(tb[TCA_GACT_PROB]);
+ if (p_parm) {
gact->tcfg_paction = p_parm->paction;
gact->tcfg_pval = p_parm->pval;
gact->tcfg_ptype = p_parm->ptype;
@@ -133,7 +141,7 @@ static int tcf_gact(struct sk_buff *skb, const struct tc_action *a,
spin_lock(&gact->tcf_lock);
#ifdef CONFIG_GACT_PROB
- if (gact->tcfg_ptype && gact_rand[gact->tcfg_ptype] != NULL)
+ if (gact->tcfg_ptype)
action = gact_rand[gact->tcfg_ptype](gact);
else
action = gact->tcf_action;
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
index 60e281ad0f07..58fb3c7aab9e 100644
--- a/net/sched/act_ipt.c
+++ b/net/sched/act_ipt.c
@@ -185,7 +185,12 @@ err3:
err2:
kfree(tname);
err1:
- kfree(pc);
+ if (ret == ACT_P_CREATED) {
+ if (est)
+ gen_kill_estimator(&pc->tcfc_bstats,
+ &pc->tcfc_rate_est);
+ kfree_rcu(pc, tcfc_rcu);
+ }
return err;
}
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index 26aa2f6ce257..45c53ab067a6 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -74,7 +74,10 @@ static int tcf_pedit_init(struct nlattr *nla, struct nlattr *est,
p = to_pedit(pc);
keys = kmalloc(ksize, GFP_KERNEL);
if (keys == NULL) {
- kfree(pc);
+ if (est)
+ gen_kill_estimator(&pc->tcfc_bstats,
+ &pc->tcfc_rate_est);
+ kfree_rcu(pc, tcfc_rcu);
return -ENOMEM;
}
ret = ACT_P_CREATED;
diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c
index 3922f2a2821b..3714f60f0b3c 100644
--- a/net/sched/act_simple.c
+++ b/net/sched/act_simple.c
@@ -131,7 +131,10 @@ static int tcf_simp_init(struct nlattr *nla, struct nlattr *est,
d = to_defact(pc);
ret = alloc_defdata(d, defdata);
if (ret < 0) {
- kfree(pc);
+ if (est)
+ gen_kill_estimator(&pc->tcfc_bstats,
+ &pc->tcfc_rate_est);
+ kfree_rcu(pc, tcfc_rcu);
return ret;
}
d->tcf_action = parm->action;
diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
index 9af01f3df18c..e4723d31fdd5 100644
--- a/net/sched/sch_qfq.c
+++ b/net/sched/sch_qfq.c
@@ -203,6 +203,34 @@ out:
return index;
}
+/* Length of the next packet (0 if the queue is empty). */
+static unsigned int qdisc_peek_len(struct Qdisc *sch)
+{
+ struct sk_buff *skb;
+
+ skb = sch->ops->peek(sch);
+ return skb ? qdisc_pkt_len(skb) : 0;
+}
+
+static void qfq_deactivate_class(struct qfq_sched *, struct qfq_class *);
+static void qfq_activate_class(struct qfq_sched *q, struct qfq_class *cl,
+ unsigned int len);
+
+static void qfq_update_class_params(struct qfq_sched *q, struct qfq_class *cl,
+ u32 lmax, u32 inv_w, int delta_w)
+{
+ int i;
+
+ /* update qfq-specific data */
+ cl->lmax = lmax;
+ cl->inv_w = inv_w;
+ i = qfq_calc_index(cl->inv_w, cl->lmax);
+
+ cl->grp = &q->groups[i];
+
+ q->wsum += delta_w;
+}
+
static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
struct nlattr **tca, unsigned long *arg)
{
@@ -250,6 +278,8 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
lmax = 1UL << QFQ_MTU_SHIFT;
if (cl != NULL) {
+ bool need_reactivation = false;
+
if (tca[TCA_RATE]) {
err = gen_replace_estimator(&cl->bstats, &cl->rate_est,
qdisc_root_sleeping_lock(sch),
@@ -258,12 +288,29 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
return err;
}
- if (inv_w != cl->inv_w) {
- sch_tree_lock(sch);
- q->wsum += delta_w;
- cl->inv_w = inv_w;
- sch_tree_unlock(sch);
+ if (lmax == cl->lmax && inv_w == cl->inv_w)
+ return 0; /* nothing to update */
+
+ i = qfq_calc_index(inv_w, lmax);
+ sch_tree_lock(sch);
+ if (&q->groups[i] != cl->grp && cl->qdisc->q.qlen > 0) {
+ /*
+ * shift cl->F back, to not charge the
+ * class for the not-yet-served head
+ * packet
+ */
+ cl->F = cl->S;
+ /* remove class from its slot in the old group */
+ qfq_deactivate_class(q, cl);
+ need_reactivation = true;
}
+
+ qfq_update_class_params(q, cl, lmax, inv_w, delta_w);
+
+ if (need_reactivation) /* activate in new group */
+ qfq_activate_class(q, cl, qdisc_peek_len(cl->qdisc));
+ sch_tree_unlock(sch);
+
return 0;
}
@@ -273,11 +320,8 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
cl->refcnt = 1;
cl->common.classid = classid;
- cl->lmax = lmax;
- cl->inv_w = inv_w;
- i = qfq_calc_index(cl->inv_w, cl->lmax);
- cl->grp = &q->groups[i];
+ qfq_update_class_params(q, cl, lmax, inv_w, delta_w);
cl->qdisc = qdisc_create_dflt(sch->dev_queue,
&pfifo_qdisc_ops, classid);
@@ -294,7 +338,6 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
return err;
}
}
- q->wsum += weight;
sch_tree_lock(sch);
qdisc_class_hash_insert(&q->clhash, &cl->common);
@@ -711,15 +754,6 @@ static void qfq_update_eligible(struct qfq_sched *q, u64 old_V)
}
}
-/* What is length of next packet in queue (0 if queue is empty) */
-static unsigned int qdisc_peek_len(struct Qdisc *sch)
-{
- struct sk_buff *skb;
-
- skb = sch->ops->peek(sch);
- return skb ? qdisc_pkt_len(skb) : 0;
-}
-
/*
* Updates the class, returns true if also the group needs to be updated.
*/
@@ -843,11 +877,8 @@ static void qfq_update_start(struct qfq_sched *q, struct qfq_class *cl)
static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{
struct qfq_sched *q = qdisc_priv(sch);
- struct qfq_group *grp;
struct qfq_class *cl;
int err;
- u64 roundedS;
- int s;
cl = qfq_classify(skb, sch, &err);
if (cl == NULL) {
@@ -876,11 +907,25 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
return err;
/* If reach this point, queue q was idle */
- grp = cl->grp;
+ qfq_activate_class(q, cl, qdisc_pkt_len(skb));
+
+ return err;
+}
+
+/*
+ * Handle class switch from idle to backlogged.
+ */
+static void qfq_activate_class(struct qfq_sched *q, struct qfq_class *cl,
+ unsigned int pkt_len)
+{
+ struct qfq_group *grp = cl->grp;
+ u64 roundedS;
+ int s;
+
qfq_update_start(q, cl);
/* compute new finish time and rounded start. */
- cl->F = cl->S + (u64)qdisc_pkt_len(skb) * cl->inv_w;
+ cl->F = cl->S + (u64)pkt_len * cl->inv_w;
roundedS = qfq_round_down(cl->S, grp->slot_shift);
/*
@@ -917,8 +962,6 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
skip_update:
qfq_slot_insert(grp, cl, roundedS);
-
- return err;
}
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 31b40cc4a9c3..dcd64d5b07aa 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -952,6 +952,11 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
*/
synchronize_rcu();
INIT_LIST_HEAD(&wdev->list);
+ /*
+ * Ensure that all events have been processed and
+ * freed.
+ */
+ cfg80211_process_wdev_events(wdev);
break;
case NETDEV_PRE_UP:
if (!(wdev->wiphy->interface_modes & BIT(wdev->iftype)))
diff --git a/net/wireless/core.h b/net/wireless/core.h
index 5206c6844fd7..bc7430b54771 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -426,6 +426,7 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
struct net_device *dev, enum nl80211_iftype ntype,
u32 *flags, struct vif_params *params);
void cfg80211_process_rdev_events(struct cfg80211_registered_device *rdev);
+void cfg80211_process_wdev_events(struct wireless_dev *wdev);
int cfg80211_can_use_iftype_chan(struct cfg80211_registered_device *rdev,
struct wireless_dev *wdev,
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 2303ee73b50a..2ded3c7fad06 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -680,6 +680,8 @@ static u32 map_regdom_flags(u32 rd_flags)
channel_flags |= IEEE80211_CHAN_NO_IBSS;
if (rd_flags & NL80211_RRF_DFS)
channel_flags |= IEEE80211_CHAN_RADAR;
+ if (rd_flags & NL80211_RRF_NO_OFDM)
+ channel_flags |= IEEE80211_CHAN_NO_OFDM;
return channel_flags;
}
@@ -901,7 +903,21 @@ static void handle_channel(struct wiphy *wiphy,
chan->max_antenna_gain = min(chan->orig_mag,
(int) MBI_TO_DBI(power_rule->max_antenna_gain));
chan->max_reg_power = (int) MBM_TO_DBM(power_rule->max_eirp);
- chan->max_power = min(chan->max_power, chan->max_reg_power);
+ if (chan->orig_mpwr) {
+ /*
+ * Devices that have their own custom regulatory domain
+ * but also use WIPHY_FLAG_STRICT_REGULATORY will follow the
+ * passed country IE power settings.
+ */
+ if (initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE &&
+ wiphy->flags & WIPHY_FLAG_CUSTOM_REGULATORY &&
+ wiphy->flags & WIPHY_FLAG_STRICT_REGULATORY)
+ chan->max_power = chan->max_reg_power;
+ else
+ chan->max_power = min(chan->orig_mpwr,
+ chan->max_reg_power);
+ } else
+ chan->max_power = chan->max_reg_power;
}
static void handle_band(struct wiphy *wiphy,
@@ -1885,6 +1901,7 @@ static void restore_custom_reg_settings(struct wiphy *wiphy)
chan->flags = chan->orig_flags;
chan->max_antenna_gain = chan->orig_mag;
chan->max_power = chan->orig_mpwr;
+ chan->beacon_found = false;
}
}
}
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 26f8cd30f712..994e2f0cc7a8 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -735,7 +735,7 @@ void cfg80211_upload_connect_keys(struct wireless_dev *wdev)
wdev->connect_keys = NULL;
}
-static void cfg80211_process_wdev_events(struct wireless_dev *wdev)
+void cfg80211_process_wdev_events(struct wireless_dev *wdev)
{
struct cfg80211_event *ev;
unsigned long flags;
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 5b228f97d4b3..87cd0e4d4282 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -415,8 +415,17 @@ static enum hrtimer_restart xfrm_timer_handler(struct hrtimer * me)
if (x->lft.hard_add_expires_seconds) {
long tmo = x->lft.hard_add_expires_seconds +
x->curlft.add_time - now;
- if (tmo <= 0)
- goto expired;
+ if (tmo <= 0) {
+ if (x->xflags & XFRM_SOFT_EXPIRE) {
+ /* enter hard expire without soft expire first?!
+ * setting a new date could trigger this.
+ * workarbound: fix x->curflt.add_time by below:
+ */
+ x->curlft.add_time = now - x->saved_tmo - 1;
+ tmo = x->lft.hard_add_expires_seconds - x->saved_tmo;
+ } else
+ goto expired;
+ }
if (tmo < next)
next = tmo;
}
@@ -433,10 +442,14 @@ static enum hrtimer_restart xfrm_timer_handler(struct hrtimer * me)
if (x->lft.soft_add_expires_seconds) {
long tmo = x->lft.soft_add_expires_seconds +
x->curlft.add_time - now;
- if (tmo <= 0)
+ if (tmo <= 0) {
warn = 1;
- else if (tmo < next)
+ x->xflags &= ~XFRM_SOFT_EXPIRE;
+ } else if (tmo < next) {
next = tmo;
+ x->xflags |= XFRM_SOFT_EXPIRE;
+ x->saved_tmo = tmo;
+ }
}
if (x->lft.soft_use_expires_seconds) {
long tmo = x->lft.soft_use_expires_seconds +
diff --git a/scripts/decodecode b/scripts/decodecode
index 18ba881c3415..4f8248d5a11f 100755
--- a/scripts/decodecode
+++ b/scripts/decodecode
@@ -89,7 +89,7 @@ echo $code >> $T.s
disas $T
cat $T.dis >> $T.aa
-faultline=`cat $T.dis | head -1 | cut -d":" -f2`
+faultline=`cat $T.dis | head -1 | cut -d":" -f2-`
faultline=`echo "$faultline" | sed -e 's/\[/\\\[/g; s/\]/\\\]/g'`
cat $T.oo | sed -e "s/\($faultline\)/\*\1 <-- trapping instruction/g"
diff --git a/security/yama/yama_lsm.c b/security/yama/yama_lsm.c
index 83554ee8a587..d51b7c76c37d 100644
--- a/security/yama/yama_lsm.c
+++ b/security/yama/yama_lsm.c
@@ -290,10 +290,51 @@ static int yama_ptrace_access_check(struct task_struct *child,
return rc;
}
+/**
+ * yama_ptrace_traceme - validate PTRACE_TRACEME calls
+ * @parent: task that will become the ptracer of the current task
+ *
+ * Returns 0 if following the ptrace is allowed, -ve on error.
+ */
+static int yama_ptrace_traceme(struct task_struct *parent)
+{
+ int rc;
+
+ /* If standard caps disallows it, so does Yama. We should
+ * only tighten restrictions further.
+ */
+ rc = cap_ptrace_traceme(parent);
+ if (rc)
+ return rc;
+
+ /* Only disallow PTRACE_TRACEME on more aggressive settings. */
+ switch (ptrace_scope) {
+ case YAMA_SCOPE_CAPABILITY:
+ if (!ns_capable(task_user_ns(parent), CAP_SYS_PTRACE))
+ rc = -EPERM;
+ break;
+ case YAMA_SCOPE_NO_ATTACH:
+ rc = -EPERM;
+ break;
+ }
+
+ if (rc) {
+ char name[sizeof(current->comm)];
+ printk_ratelimited(KERN_NOTICE
+ "ptraceme of pid %d was attempted by: %s (pid %d)\n",
+ current->pid,
+ get_task_comm(name, parent),
+ parent->pid);
+ }
+
+ return rc;
+}
+
static struct security_operations yama_ops = {
.name = "yama",
.ptrace_access_check = yama_ptrace_access_check,
+ .ptrace_traceme = yama_ptrace_traceme,
.task_prctl = yama_task_prctl,
.task_free = yama_task_free,
};
diff --git a/sound/core/sgbuf.c b/sound/core/sgbuf.c
index 4e7ec2b49873..d0f00356fc11 100644
--- a/sound/core/sgbuf.c
+++ b/sound/core/sgbuf.c
@@ -101,7 +101,7 @@ void *snd_malloc_sgbuf_pages(struct device *device,
if (snd_dma_alloc_pages_fallback(SNDRV_DMA_TYPE_DEV, device,
chunk, &tmpb) < 0) {
if (!sgbuf->pages)
- return NULL;
+ goto _failed;
if (!res_size)
goto _failed;
size = sgbuf->pages * PAGE_SIZE;
diff --git a/sound/pci/emu10k1/memory.c b/sound/pci/emu10k1/memory.c
index 4f502a2bdc3c..0a436626182b 100644
--- a/sound/pci/emu10k1/memory.c
+++ b/sound/pci/emu10k1/memory.c
@@ -326,7 +326,10 @@ snd_emu10k1_alloc_pages(struct snd_emu10k1 *emu, struct snd_pcm_substream *subst
for (page = blk->first_page; page <= blk->last_page; page++, idx++) {
unsigned long ofs = idx << PAGE_SHIFT;
dma_addr_t addr;
- addr = snd_pcm_sgbuf_get_addr(substream, ofs);
+ if (ofs >= runtime->dma_bytes)
+ addr = emu->silent_page.addr;
+ else
+ addr = snd_pcm_sgbuf_get_addr(substream, ofs);
if (! is_valid_page(emu, addr)) {
printk(KERN_ERR "emu: failure page = %d\n", idx);
mutex_unlock(&hdr->block_mutex);
diff --git a/sound/pci/hda/hda_auto_parser.c b/sound/pci/hda/hda_auto_parser.c
index 647218d69f68..4f7d2dfcef7b 100644
--- a/sound/pci/hda/hda_auto_parser.c
+++ b/sound/pci/hda/hda_auto_parser.c
@@ -332,13 +332,12 @@ int snd_hda_parse_pin_defcfg(struct hda_codec *codec,
if (cfg->dig_outs)
snd_printd(" dig-out=0x%x/0x%x\n",
cfg->dig_out_pins[0], cfg->dig_out_pins[1]);
- snd_printd(" inputs:");
+ snd_printd(" inputs:\n");
for (i = 0; i < cfg->num_inputs; i++) {
- snd_printd(" %s=0x%x",
+ snd_printd(" %s=0x%x\n",
hda_get_autocfg_input_label(codec, cfg, i),
cfg->inputs[i].pin);
}
- snd_printd("\n");
if (cfg->dig_in_pin)
snd_printd(" dig-in=0x%x\n", cfg->dig_in_pin);
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index 14361184ae1e..5e22a8f43d2e 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -2967,12 +2967,10 @@ static const char * const cxt5066_models[CXT5066_MODELS] = {
};
static const struct snd_pci_quirk cxt5066_cfg_tbl[] = {
- SND_PCI_QUIRK(0x1025, 0x054c, "Acer Aspire 3830TG", CXT5066_AUTO),
SND_PCI_QUIRK_MASK(0x1025, 0xff00, 0x0400, "Acer", CXT5066_IDEAPAD),
SND_PCI_QUIRK(0x1028, 0x02d8, "Dell Vostro", CXT5066_DELL_VOSTRO),
SND_PCI_QUIRK(0x1028, 0x02f5, "Dell Vostro 320", CXT5066_IDEAPAD),
SND_PCI_QUIRK(0x1028, 0x0401, "Dell Vostro 1014", CXT5066_DELL_VOSTRO),
- SND_PCI_QUIRK(0x1028, 0x0402, "Dell Vostro", CXT5066_DELL_VOSTRO),
SND_PCI_QUIRK(0x1028, 0x0408, "Dell Inspiron One 19T", CXT5066_IDEAPAD),
SND_PCI_QUIRK(0x1028, 0x050f, "Dell Inspiron", CXT5066_IDEAPAD),
SND_PCI_QUIRK(0x1028, 0x0510, "Dell Vostro", CXT5066_IDEAPAD),
@@ -2988,14 +2986,10 @@ static const struct snd_pci_quirk cxt5066_cfg_tbl[] = {
SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo T400s", CXT5066_THINKPAD),
SND_PCI_QUIRK(0x17aa, 0x21c5, "Thinkpad Edge 13", CXT5066_THINKPAD),
SND_PCI_QUIRK(0x17aa, 0x21c6, "Thinkpad Edge 13", CXT5066_ASUS),
- SND_PCI_QUIRK(0x17aa, 0x215e, "Lenovo T510", CXT5066_AUTO),
- SND_PCI_QUIRK(0x17aa, 0x21cf, "Lenovo T520 & W520", CXT5066_AUTO),
SND_PCI_QUIRK(0x17aa, 0x21da, "Lenovo X220", CXT5066_THINKPAD),
SND_PCI_QUIRK(0x17aa, 0x21db, "Lenovo X220-tablet", CXT5066_THINKPAD),
SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo U350", CXT5066_ASUS),
SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo G560", CXT5066_ASUS),
- SND_PCI_QUIRK(0x17aa, 0x3938, "Lenovo G565", CXT5066_AUTO),
- SND_PCI_QUIRK(0x1b0a, 0x2092, "CyberpowerPC Gamer Xplorer N57001", CXT5066_AUTO),
{}
};
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index 69b928449789..8f23374fa642 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -877,8 +877,6 @@ static int hdmi_pcm_open(struct hda_pcm_stream *hinfo,
struct hdmi_eld *eld;
struct hdmi_spec_per_cvt *per_cvt = NULL;
- hinfo->nid = 0; /* clear the leftover value */
-
/* Validate hinfo */
pin_idx = hinfo_to_pin_index(spec, hinfo);
if (snd_BUG_ON(pin_idx < 0))
@@ -1163,6 +1161,14 @@ static int generic_hdmi_playback_pcm_prepare(struct hda_pcm_stream *hinfo,
return hdmi_setup_stream(codec, cvt_nid, pin_nid, stream_tag, format);
}
+static int generic_hdmi_playback_pcm_cleanup(struct hda_pcm_stream *hinfo,
+ struct hda_codec *codec,
+ struct snd_pcm_substream *substream)
+{
+ snd_hda_codec_cleanup_stream(codec, hinfo->nid);
+ return 0;
+}
+
static int hdmi_pcm_close(struct hda_pcm_stream *hinfo,
struct hda_codec *codec,
struct snd_pcm_substream *substream)
@@ -1202,6 +1208,7 @@ static const struct hda_pcm_ops generic_ops = {
.open = hdmi_pcm_open,
.close = hdmi_pcm_close,
.prepare = generic_hdmi_playback_pcm_prepare,
+ .cleanup = generic_hdmi_playback_pcm_cleanup,
};
static int generic_hdmi_build_pcms(struct hda_codec *codec)
@@ -1220,7 +1227,6 @@ static int generic_hdmi_build_pcms(struct hda_codec *codec)
pstr = &info->stream[SNDRV_PCM_STREAM_PLAYBACK];
pstr->substreams = 1;
pstr->ops = generic_ops;
- pstr->nid = 1; /* FIXME: just for avoiding a debug WARNING */
/* other pstr fields are set in open */
}
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 344b221d2102..4f81dd44c837 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -6099,6 +6099,8 @@ static const struct alc_fixup alc269_fixups[] = {
[ALC269_FIXUP_PCM_44K] = {
.type = ALC_FIXUP_FUNC,
.v.func = alc269_fixup_pcm_44k,
+ .chained = true,
+ .chain_id = ALC269_FIXUP_QUANTA_MUTE
},
[ALC269_FIXUP_STEREO_DMIC] = {
.type = ALC_FIXUP_FUNC,
@@ -6206,9 +6208,11 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x17aa, 0x21b8, "Thinkpad Edge 14", ALC269_FIXUP_SKU_IGNORE),
SND_PCI_QUIRK(0x17aa, 0x21ca, "Thinkpad L412", ALC269_FIXUP_SKU_IGNORE),
SND_PCI_QUIRK(0x17aa, 0x21e9, "Thinkpad Edge 15", ALC269_FIXUP_SKU_IGNORE),
+ SND_PCI_QUIRK(0x17aa, 0x21f6, "Thinkpad T530", ALC269_FIXUP_LENOVO_DOCK),
+ SND_PCI_QUIRK(0x17aa, 0x21fa, "Thinkpad X230", ALC269_FIXUP_LENOVO_DOCK),
+ SND_PCI_QUIRK(0x17aa, 0x21fb, "Thinkpad T430s", ALC269_FIXUP_LENOVO_DOCK),
SND_PCI_QUIRK(0x17aa, 0x2203, "Thinkpad X230 Tablet", ALC269_FIXUP_LENOVO_DOCK),
- SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_QUANTA_MUTE),
- SND_PCI_QUIRK(0x17aa, 0x3bf8, "Lenovo Ideapd", ALC269_FIXUP_PCM_44K),
+ SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD),
#if 0
diff --git a/sound/soc/codecs/ab8500-codec.c b/sound/soc/codecs/ab8500-codec.c
index 3c795921c5f6..23b40186f9b8 100644
--- a/sound/soc/codecs/ab8500-codec.c
+++ b/sound/soc/codecs/ab8500-codec.c
@@ -2406,6 +2406,10 @@ static int ab8500_codec_probe(struct snd_soc_codec *codec)
/* Setup AB8500 according to board-settings */
pdata = (struct ab8500_platform_data *)dev_get_platdata(dev->parent);
+
+ /* Inform SoC Core that we have our own I/O arrangements. */
+ codec->control_data = (void *)true;
+
status = ab8500_audio_setup_mics(codec, &pdata->codec->amics);
if (status < 0) {
pr_err("%s: Failed to setup mics (%d)!\n", __func__, status);
diff --git a/sound/soc/codecs/ad1980.c b/sound/soc/codecs/ad1980.c
index 8c39dddd7d00..11b1b714b8b5 100644
--- a/sound/soc/codecs/ad1980.c
+++ b/sound/soc/codecs/ad1980.c
@@ -186,6 +186,7 @@ static int ad1980_soc_probe(struct snd_soc_codec *codec)
printk(KERN_INFO "AD1980 SoC Audio Codec\n");
+ codec->control_data = codec; /* we don't use regmap! */
ret = snd_soc_new_ac97_codec(codec, &soc_ac97_ops, 0);
if (ret < 0) {
printk(KERN_ERR "ad1980: failed to register AC97 codec\n");
diff --git a/sound/soc/codecs/mc13783.c b/sound/soc/codecs/mc13783.c
index 6276e352125f..8f726c063f42 100644
--- a/sound/soc/codecs/mc13783.c
+++ b/sound/soc/codecs/mc13783.c
@@ -581,6 +581,8 @@ static int mc13783_probe(struct snd_soc_codec *codec)
{
struct mc13783_priv *priv = snd_soc_codec_get_drvdata(codec);
+ codec->control_data = priv->mc13xxx;
+
mc13xxx_lock(priv->mc13xxx);
/* these are the reset values */
diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c
index 8af6a5245b18..df2f99d1d428 100644
--- a/sound/soc/codecs/sgtl5000.c
+++ b/sound/soc/codecs/sgtl5000.c
@@ -239,6 +239,7 @@ static const struct snd_soc_dapm_route sgtl5000_dapm_routes[] = {
{"Headphone Mux", "DAC", "DAC"}, /* dac --> hp_mux */
{"LO", NULL, "DAC"}, /* dac --> line_out */
+ {"LINE_IN", NULL, "VAG_POWER"},
{"Headphone Mux", "LINE_IN", "LINE_IN"},/* line_in --> hp_mux */
{"HP", NULL, "Headphone Mux"}, /* hp_mux --> hp */
@@ -1357,8 +1358,6 @@ static int sgtl5000_probe(struct snd_soc_codec *codec)
if (ret)
goto err;
- snd_soc_dapm_new_widgets(&codec->dapm);
-
return 0;
err:
diff --git a/sound/soc/codecs/stac9766.c b/sound/soc/codecs/stac9766.c
index 982e437799a8..33c0f3d39c87 100644
--- a/sound/soc/codecs/stac9766.c
+++ b/sound/soc/codecs/stac9766.c
@@ -340,6 +340,7 @@ static int stac9766_codec_probe(struct snd_soc_codec *codec)
printk(KERN_INFO "STAC9766 SoC Audio Codec %s\n", STAC9766_VERSION);
+ codec->control_data = codec; /* we don't use regmap! */
ret = snd_soc_new_ac97_codec(codec, &soc_ac97_ops, 0);
if (ret < 0)
goto codec_err;
diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c
index eaf65863ec21..aa9ce9dd7d8a 100644
--- a/sound/soc/codecs/wm8962.c
+++ b/sound/soc/codecs/wm8962.c
@@ -2501,6 +2501,9 @@ static int wm8962_set_bias_level(struct snd_soc_codec *codec,
/* VMID 2*250k */
snd_soc_update_bits(codec, WM8962_PWR_MGMT_1,
WM8962_VMID_SEL_MASK, 0x100);
+
+ if (codec->dapm.bias_level == SND_SOC_BIAS_OFF)
+ msleep(100);
break;
case SND_SOC_BIAS_OFF:
diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
index bb62f4b3d563..04ef03175c51 100644
--- a/sound/soc/codecs/wm8994.c
+++ b/sound/soc/codecs/wm8994.c
@@ -2649,7 +2649,7 @@ static int wm8994_hw_params(struct snd_pcm_substream *substream,
return -EINVAL;
}
- bclk_rate = params_rate(params) * 2;
+ bclk_rate = params_rate(params) * 4;
switch (params_format(params)) {
case SNDRV_PCM_FORMAT_S16_LE:
bclk_rate *= 16;
@@ -3253,10 +3253,13 @@ static void wm8994_mic_work(struct work_struct *work)
int ret;
int report;
+ pm_runtime_get_sync(dev);
+
ret = regmap_read(regmap, WM8994_INTERRUPT_RAW_STATUS_2, &reg);
if (ret < 0) {
dev_err(dev, "Failed to read microphone status: %d\n",
ret);
+ pm_runtime_put(dev);
return;
}
@@ -3299,6 +3302,8 @@ static void wm8994_mic_work(struct work_struct *work)
snd_soc_jack_report(priv->micdet[1].jack, report,
SND_JACK_HEADSET | SND_JACK_BTN_0);
+
+ pm_runtime_put(dev);
}
static irqreturn_t wm8994_mic_irq(int irq, void *data)
@@ -3421,12 +3426,15 @@ static irqreturn_t wm1811_jackdet_irq(int irq, void *data)
int reg;
bool present;
+ pm_runtime_get_sync(codec->dev);
+
mutex_lock(&wm8994->accdet_lock);
reg = snd_soc_read(codec, WM1811_JACKDET_CTRL);
if (reg < 0) {
dev_err(codec->dev, "Failed to read jack status: %d\n", reg);
mutex_unlock(&wm8994->accdet_lock);
+ pm_runtime_put(codec->dev);
return IRQ_NONE;
}
@@ -3491,6 +3499,7 @@ static irqreturn_t wm1811_jackdet_irq(int irq, void *data)
SND_JACK_MECHANICAL | SND_JACK_HEADSET |
wm8994->btn_mask);
+ pm_runtime_put(codec->dev);
return IRQ_HANDLED;
}
@@ -3602,6 +3611,8 @@ static irqreturn_t wm8958_mic_irq(int irq, void *data)
if (!(snd_soc_read(codec, WM8958_MIC_DETECT_1) & WM8958_MICD_ENA))
return IRQ_HANDLED;
+ pm_runtime_get_sync(codec->dev);
+
/* We may occasionally read a detection without an impedence
* range being provided - if that happens loop again.
*/
@@ -3612,6 +3623,7 @@ static irqreturn_t wm8958_mic_irq(int irq, void *data)
dev_err(codec->dev,
"Failed to read mic detect status: %d\n",
reg);
+ pm_runtime_put(codec->dev);
return IRQ_NONE;
}
@@ -3639,6 +3651,7 @@ static irqreturn_t wm8958_mic_irq(int irq, void *data)
dev_warn(codec->dev, "Accessory detection with no callback\n");
out:
+ pm_runtime_put(codec->dev);
return IRQ_HANDLED;
}
diff --git a/sound/soc/codecs/wm9712.c b/sound/soc/codecs/wm9712.c
index 099e6ec32125..f16fb361a4eb 100644
--- a/sound/soc/codecs/wm9712.c
+++ b/sound/soc/codecs/wm9712.c
@@ -619,6 +619,7 @@ static int wm9712_soc_probe(struct snd_soc_codec *codec)
{
int ret = 0;
+ codec->control_data = codec; /* we don't use regmap! */
ret = snd_soc_new_ac97_codec(codec, &soc_ac97_ops, 0);
if (ret < 0) {
printk(KERN_ERR "wm9712: failed to register AC97 codec\n");
diff --git a/sound/soc/codecs/wm9713.c b/sound/soc/codecs/wm9713.c
index 3eb19fb71d17..d0b8a3287a85 100644
--- a/sound/soc/codecs/wm9713.c
+++ b/sound/soc/codecs/wm9713.c
@@ -1196,6 +1196,7 @@ static int wm9713_soc_probe(struct snd_soc_codec *codec)
if (wm9713 == NULL)
return -ENOMEM;
snd_soc_codec_set_drvdata(codec, wm9713);
+ codec->control_data = wm9713; /* we don't use regmap! */
ret = snd_soc_new_ac97_codec(codec, &soc_ac97_ops, 0);
if (ret < 0)
diff --git a/sound/soc/mxs/mxs-saif.c b/sound/soc/mxs/mxs-saif.c
index aba71bfa33b1..b3030718c228 100644
--- a/sound/soc/mxs/mxs-saif.c
+++ b/sound/soc/mxs/mxs-saif.c
@@ -394,9 +394,14 @@ static int mxs_saif_hw_params(struct snd_pcm_substream *substream,
struct snd_soc_dai *cpu_dai)
{
struct mxs_saif *saif = snd_soc_dai_get_drvdata(cpu_dai);
+ struct mxs_saif *master_saif;
u32 scr, stat;
int ret;
+ master_saif = mxs_saif_get_master(saif);
+ if (!master_saif)
+ return -EINVAL;
+
/* mclk should already be set */
if (!saif->mclk && saif->mclk_in_use) {
dev_err(cpu_dai->dev, "set mclk first\n");
@@ -420,6 +425,25 @@ static int mxs_saif_hw_params(struct snd_pcm_substream *substream,
return ret;
}
+ /* prepare clk in hw_param, enable in trigger */
+ clk_prepare(saif->clk);
+ if (saif != master_saif) {
+ /*
+ * Set an initial clock rate for the saif internal logic to work
+ * properly. This is important when working in EXTMASTER mode
+ * that uses the other saif's BITCLK&LRCLK but it still needs a
+ * basic clock which should be fast enough for the internal
+ * logic.
+ */
+ clk_enable(saif->clk);
+ ret = clk_set_rate(saif->clk, 24000000);
+ clk_disable(saif->clk);
+ if (ret)
+ return ret;
+
+ clk_prepare(master_saif->clk);
+ }
+
scr = __raw_readl(saif->base + SAIF_CTRL);
scr &= ~BM_SAIF_CTRL_WORD_LENGTH;
diff --git a/sound/soc/omap/omap-mcbsp.c b/sound/soc/omap/omap-mcbsp.c
index 1046083e90a0..acdd3ef14e08 100644
--- a/sound/soc/omap/omap-mcbsp.c
+++ b/sound/soc/omap/omap-mcbsp.c
@@ -820,3 +820,4 @@ module_platform_driver(asoc_mcbsp_driver);
MODULE_AUTHOR("Jarkko Nikula <jarkko.nikula@bitmer.com>");
MODULE_DESCRIPTION("OMAP I2S SoC Interface");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:omap-mcbsp");
diff --git a/sound/soc/omap/omap-pcm.c b/sound/soc/omap/omap-pcm.c
index 5a649da9122a..f0feb06615f8 100644
--- a/sound/soc/omap/omap-pcm.c
+++ b/sound/soc/omap/omap-pcm.c
@@ -441,3 +441,4 @@ module_platform_driver(omap_pcm_driver);
MODULE_AUTHOR("Jarkko Nikula <jarkko.nikula@bitmer.com>");
MODULE_DESCRIPTION("OMAP PCM DMA module");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:omap-pcm-audio");
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index f219b2f7ee68..f81c5976b961 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -1096,7 +1096,7 @@ static int soc_probe_codec(struct snd_soc_card *card,
}
/* If the driver didn't set I/O up try regmap */
- if (!codec->control_data)
+ if (!codec->write && dev_get_regmap(codec->dev, NULL))
snd_soc_codec_set_cache_io(codec, 0, 0, SND_SOC_REGMAP);
if (driver->controls)
diff --git a/sound/soc/tegra/tegra_alc5632.c b/sound/soc/tegra/tegra_alc5632.c
index d684df294c0c..e463529b38bb 100644
--- a/sound/soc/tegra/tegra_alc5632.c
+++ b/sound/soc/tegra/tegra_alc5632.c
@@ -177,7 +177,7 @@ static __devinit int tegra_alc5632_probe(struct platform_device *pdev)
}
alc5632->gpio_hp_det = of_get_named_gpio(np, "nvidia,hp-det-gpios", 0);
- if (alc5632->gpio_hp_det == -ENODEV)
+ if (alc5632->gpio_hp_det == -EPROBE_DEFER)
return -EPROBE_DEFER;
ret = snd_soc_of_parse_card_name(card, "nvidia,model");
diff --git a/sound/soc/tegra/tegra_wm8903.c b/sound/soc/tegra/tegra_wm8903.c
index 0c5bb33d258e..d4f14e492341 100644
--- a/sound/soc/tegra/tegra_wm8903.c
+++ b/sound/soc/tegra/tegra_wm8903.c
@@ -284,27 +284,27 @@ static __devinit int tegra_wm8903_driver_probe(struct platform_device *pdev)
} else if (np) {
pdata->gpio_spkr_en = of_get_named_gpio(np,
"nvidia,spkr-en-gpios", 0);
- if (pdata->gpio_spkr_en == -ENODEV)
+ if (pdata->gpio_spkr_en == -EPROBE_DEFER)
return -EPROBE_DEFER;
pdata->gpio_hp_mute = of_get_named_gpio(np,
"nvidia,hp-mute-gpios", 0);
- if (pdata->gpio_hp_mute == -ENODEV)
+ if (pdata->gpio_hp_mute == -EPROBE_DEFER)
return -EPROBE_DEFER;
pdata->gpio_hp_det = of_get_named_gpio(np,
"nvidia,hp-det-gpios", 0);
- if (pdata->gpio_hp_det == -ENODEV)
+ if (pdata->gpio_hp_det == -EPROBE_DEFER)
return -EPROBE_DEFER;
pdata->gpio_int_mic_en = of_get_named_gpio(np,
"nvidia,int-mic-en-gpios", 0);
- if (pdata->gpio_int_mic_en == -ENODEV)
+ if (pdata->gpio_int_mic_en == -EPROBE_DEFER)
return -EPROBE_DEFER;
pdata->gpio_ext_mic_en = of_get_named_gpio(np,
"nvidia,ext-mic-en-gpios", 0);
- if (pdata->gpio_ext_mic_en == -ENODEV)
+ if (pdata->gpio_ext_mic_en == -EPROBE_DEFER)
return -EPROBE_DEFER;
}
diff --git a/sound/soc/ux500/ux500_msp_dai.c b/sound/soc/ux500/ux500_msp_dai.c
index 62ac0285bfaf..057e28ef770e 100644
--- a/sound/soc/ux500/ux500_msp_dai.c
+++ b/sound/soc/ux500/ux500_msp_dai.c
@@ -21,7 +21,7 @@
#include <linux/mfd/dbx500-prcmu.h>
#include <mach/hardware.h>
-#include <mach/board-mop500-msp.h>
+#include <mach/msp.h>
#include <sound/soc.h>
#include <sound/soc-dai.h>
diff --git a/sound/soc/ux500/ux500_msp_i2s.c b/sound/soc/ux500/ux500_msp_i2s.c
index ee14d2dac2f5..5c472f335a64 100644
--- a/sound/soc/ux500/ux500_msp_i2s.c
+++ b/sound/soc/ux500/ux500_msp_i2s.c
@@ -19,7 +19,7 @@
#include <linux/slab.h>
#include <mach/hardware.h>
-#include <mach/board-mop500-msp.h>
+#include <mach/msp.h>
#include <sound/soc.h>
diff --git a/sound/soc/ux500/ux500_msp_i2s.h b/sound/soc/ux500/ux500_msp_i2s.h
index 7f71b4a0d4bc..2d9136da9865 100644
--- a/sound/soc/ux500/ux500_msp_i2s.h
+++ b/sound/soc/ux500/ux500_msp_i2s.h
@@ -17,7 +17,7 @@
#include <linux/platform_device.h>
-#include <mach/board-mop500-msp.h>
+#include <mach/msp.h>
#define MSP_INPUT_FREQ_APB 48000000
diff --git a/tools/perf/Makefile b/tools/perf/Makefile
index 77f124fe57ad..35655c3a7b7a 100644
--- a/tools/perf/Makefile
+++ b/tools/perf/Makefile
@@ -319,6 +319,8 @@ LIB_H += $(ARCH_INCLUDE)
LIB_H += util/cgroup.h
LIB_H += $(TRACE_EVENT_DIR)event-parse.h
LIB_H += util/target.h
+LIB_H += util/rblist.h
+LIB_H += util/intlist.h
LIB_OBJS += $(OUTPUT)util/abspath.o
LIB_OBJS += $(OUTPUT)util/alias.o
@@ -383,6 +385,8 @@ LIB_OBJS += $(OUTPUT)util/xyarray.o
LIB_OBJS += $(OUTPUT)util/cpumap.o
LIB_OBJS += $(OUTPUT)util/cgroup.o
LIB_OBJS += $(OUTPUT)util/target.o
+LIB_OBJS += $(OUTPUT)util/rblist.o
+LIB_OBJS += $(OUTPUT)util/intlist.o
BUILTIN_OBJS += $(OUTPUT)builtin-annotate.o
@@ -983,7 +987,8 @@ clean:
$(RM) *.spec *.pyc *.pyo */*.pyc */*.pyo $(OUTPUT)common-cmds.h TAGS tags cscope*
$(MAKE) -C Documentation/ clean
$(RM) $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)PERF-CFLAGS
- $(RM) $(OUTPUT)util/*-{bison,flex}*
+ $(RM) $(OUTPUT)util/*-bison*
+ $(RM) $(OUTPUT)util/*-flex*
$(python-clean)
.PHONY: all install clean strip $(LIBTRACEEVENT)
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index f5a6452931e6..4db6e1ba54e3 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -313,7 +313,7 @@ try_again:
}
}
- perf_session__update_sample_type(session);
+ perf_session__set_id_hdr_size(session);
}
static int process_buildids(struct perf_record *rec)
@@ -844,8 +844,6 @@ int cmd_record(int argc, const char **argv, const char *prefix __used)
struct perf_record *rec = &record;
char errbuf[BUFSIZ];
- perf_header__set_cmdline(argc, argv);
-
evsel_list = perf_evlist__new(NULL, NULL);
if (evsel_list == NULL)
return -ENOMEM;
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index 69b1c1185159..7c88a243b5db 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -249,8 +249,9 @@ static int process_read_event(struct perf_tool *tool,
static int perf_report__setup_sample_type(struct perf_report *rep)
{
struct perf_session *self = rep->session;
+ u64 sample_type = perf_evlist__sample_type(self->evlist);
- if (!self->fd_pipe && !(self->sample_type & PERF_SAMPLE_CALLCHAIN)) {
+ if (!self->fd_pipe && !(sample_type & PERF_SAMPLE_CALLCHAIN)) {
if (sort__has_parent) {
ui__error("Selected --sort parent, but no "
"callchain data. Did you call "
@@ -274,7 +275,7 @@ static int perf_report__setup_sample_type(struct perf_report *rep)
if (sort__branch_mode == 1) {
if (!self->fd_pipe &&
- !(self->sample_type & PERF_SAMPLE_BRANCH_STACK)) {
+ !(sample_type & PERF_SAMPLE_BRANCH_STACK)) {
ui__error("Selected -b but no branch data. "
"Did you call perf record without -b?\n");
return -1;
diff --git a/tools/perf/builtin-test.c b/tools/perf/builtin-test.c
index d909eb74a0eb..1d592f5cbea9 100644
--- a/tools/perf/builtin-test.c
+++ b/tools/perf/builtin-test.c
@@ -478,7 +478,6 @@ static int test__basic_mmap(void)
unsigned int nr_events[nsyscalls],
expected_nr_events[nsyscalls], i, j;
struct perf_evsel *evsels[nsyscalls], *evsel;
- int sample_size = __perf_evsel__sample_size(attr.sample_type);
for (i = 0; i < nsyscalls; ++i) {
char name[64];
@@ -563,8 +562,7 @@ static int test__basic_mmap(void)
goto out_munmap;
}
- err = perf_event__parse_sample(event, attr.sample_type, sample_size,
- false, &sample, false);
+ err = perf_evlist__parse_sample(evlist, event, &sample, false);
if (err) {
pr_err("Can't parse sample, err = %d\n", err);
goto out_munmap;
@@ -661,12 +659,12 @@ static int test__PERF_RECORD(void)
const char *cmd = "sleep";
const char *argv[] = { cmd, "1", NULL, };
char *bname;
- u64 sample_type, prev_time = 0;
+ u64 prev_time = 0;
bool found_cmd_mmap = false,
found_libc_mmap = false,
found_vdso_mmap = false,
found_ld_mmap = false;
- int err = -1, errs = 0, i, wakeups = 0, sample_size;
+ int err = -1, errs = 0, i, wakeups = 0;
u32 cpu;
int total_events = 0, nr_events[PERF_RECORD_MAX] = { 0, };
@@ -757,13 +755,6 @@ static int test__PERF_RECORD(void)
}
/*
- * We'll need these two to parse the PERF_SAMPLE_* fields in each
- * event.
- */
- sample_type = perf_evlist__sample_type(evlist);
- sample_size = __perf_evsel__sample_size(sample_type);
-
- /*
* Now that all is properly set up, enable the events, they will
* count just on workload.pid, which will start...
*/
@@ -788,9 +779,7 @@ static int test__PERF_RECORD(void)
if (type < PERF_RECORD_MAX)
nr_events[type]++;
- err = perf_event__parse_sample(event, sample_type,
- sample_size, true,
- &sample, false);
+ err = perf_evlist__parse_sample(evlist, event, &sample, false);
if (err < 0) {
if (verbose)
perf_event__fprintf(event, stderr);
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index 35e86c6df713..68cd61ef6ac5 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -38,6 +38,7 @@
#include "util/cpumap.h"
#include "util/xyarray.h"
#include "util/sort.h"
+#include "util/intlist.h"
#include "util/debug.h"
@@ -706,8 +707,16 @@ static void perf_event__process_sample(struct perf_tool *tool,
int err;
if (!machine && perf_guest) {
- pr_err("Can't find guest [%d]'s kernel information\n",
- event->ip.pid);
+ static struct intlist *seen;
+
+ if (!seen)
+ seen = intlist__new();
+
+ if (!intlist__has_entry(seen, event->ip.pid)) {
+ pr_err("Can't find guest [%d]'s kernel information\n",
+ event->ip.pid);
+ intlist__add(seen, event->ip.pid);
+ }
return;
}
@@ -811,7 +820,7 @@ static void perf_top__mmap_read_idx(struct perf_top *top, int idx)
int ret;
while ((event = perf_evlist__mmap_read(top->evlist, idx)) != NULL) {
- ret = perf_session__parse_sample(session, event, &sample);
+ ret = perf_evlist__parse_sample(top->evlist, event, &sample, false);
if (ret) {
pr_err("Can't parse sample, err = %d\n", ret);
continue;
@@ -943,8 +952,10 @@ try_again:
* based cpu-clock-tick sw counter, which
* is always available even if no PMU support:
*/
- if (attr->type == PERF_TYPE_HARDWARE &&
- attr->config == PERF_COUNT_HW_CPU_CYCLES) {
+ if ((err == ENOENT || err == ENXIO) &&
+ (attr->type == PERF_TYPE_HARDWARE) &&
+ (attr->config == PERF_COUNT_HW_CPU_CYCLES)) {
+
if (verbose)
ui__warning("Cycles event not supported,\n"
"trying to fall back to cpu-clock-ticks\n");
@@ -1032,7 +1043,7 @@ static int __cmd_top(struct perf_top *top)
&top->session->host_machine);
perf_top__start_counters(top);
top->session->evlist = top->evlist;
- perf_session__update_sample_type(top->session);
+ perf_session__set_id_hdr_size(top->session);
/* Wait for a minimal set of events before starting the snapshot */
poll(top->evlist->pollfd, top->evlist->nr_fds, 100);
diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h
index 1b197280c621..d84870b06426 100644
--- a/tools/perf/util/event.h
+++ b/tools/perf/util/event.h
@@ -197,9 +197,6 @@ int perf_event__preprocess_sample(const union perf_event *self,
const char *perf_event__name(unsigned int id);
-int perf_event__parse_sample(const union perf_event *event, u64 type,
- int sample_size, bool sample_id_all,
- struct perf_sample *sample, bool swapped);
int perf_event__synthesize_sample(union perf_event *event, u64 type,
const struct perf_sample *sample,
bool swapped);
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index 3edfd3483816..9b38681add9e 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -881,3 +881,10 @@ int perf_evlist__start_workload(struct perf_evlist *evlist)
return 0;
}
+
+int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event,
+ struct perf_sample *sample, bool swapped)
+{
+ struct perf_evsel *e = list_entry(evlist->entries.next, struct perf_evsel, node);
+ return perf_evsel__parse_sample(e, event, sample, swapped);
+}
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h
index 40d4d3cdced0..528c1acd9298 100644
--- a/tools/perf/util/evlist.h
+++ b/tools/perf/util/evlist.h
@@ -122,6 +122,9 @@ u64 perf_evlist__sample_type(const struct perf_evlist *evlist);
bool perf_evlist__sample_id_all(const const struct perf_evlist *evlist);
u16 perf_evlist__id_hdr_size(const struct perf_evlist *evlist);
+int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event,
+ struct perf_sample *sample, bool swapped);
+
bool perf_evlist__valid_sample_type(const struct perf_evlist *evlist);
bool perf_evlist__valid_sample_id_all(const struct perf_evlist *evlist);
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index e81771364867..2eaae140def2 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -20,7 +20,7 @@
#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
#define GROUP_FD(group_fd, cpu) (*(int *)xyarray__entry(group_fd, cpu, 0))
-int __perf_evsel__sample_size(u64 sample_type)
+static int __perf_evsel__sample_size(u64 sample_type)
{
u64 mask = sample_type & PERF_SAMPLE_MASK;
int size = 0;
@@ -53,6 +53,7 @@ void perf_evsel__init(struct perf_evsel *evsel,
evsel->attr = *attr;
INIT_LIST_HEAD(&evsel->node);
hists__init(&evsel->hists);
+ evsel->sample_size = __perf_evsel__sample_size(attr->sample_type);
}
struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx)
@@ -728,10 +729,10 @@ static bool sample_overlap(const union perf_event *event,
return false;
}
-int perf_event__parse_sample(const union perf_event *event, u64 type,
- int sample_size, bool sample_id_all,
+int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
struct perf_sample *data, bool swapped)
{
+ u64 type = evsel->attr.sample_type;
const u64 *array;
/*
@@ -746,14 +747,14 @@ int perf_event__parse_sample(const union perf_event *event, u64 type,
data->period = 1;
if (event->header.type != PERF_RECORD_SAMPLE) {
- if (!sample_id_all)
+ if (!evsel->attr.sample_id_all)
return 0;
return perf_event__parse_id_sample(event, type, data, swapped);
}
array = event->sample.array;
- if (sample_size + sizeof(event->header) > event->header.size)
+ if (evsel->sample_size + sizeof(event->header) > event->header.size)
return -EFAULT;
if (type & PERF_SAMPLE_IP) {
@@ -895,7 +896,7 @@ int perf_event__synthesize_sample(union perf_event *event, u64 type,
u.val32[1] = sample->tid;
if (swapped) {
/*
- * Inverse of what is done in perf_event__parse_sample
+ * Inverse of what is done in perf_evsel__parse_sample
*/
u.val32[0] = bswap_32(u.val32[0]);
u.val32[1] = bswap_32(u.val32[1]);
@@ -930,7 +931,7 @@ int perf_event__synthesize_sample(union perf_event *event, u64 type,
u.val32[0] = sample->cpu;
if (swapped) {
/*
- * Inverse of what is done in perf_event__parse_sample
+ * Inverse of what is done in perf_evsel__parse_sample
*/
u.val32[0] = bswap_32(u.val32[0]);
u.val64 = bswap_64(u.val64);
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index 67cc5033d192..b559929983bb 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -65,6 +65,7 @@ struct perf_evsel {
void *func;
void *data;
} handler;
+ unsigned int sample_size;
bool supported;
};
@@ -177,13 +178,8 @@ static inline int perf_evsel__read_scaled(struct perf_evsel *evsel,
return __perf_evsel__read(evsel, ncpus, nthreads, true);
}
-int __perf_evsel__sample_size(u64 sample_type);
-
-static inline int perf_evsel__sample_size(struct perf_evsel *evsel)
-{
- return __perf_evsel__sample_size(evsel->attr.sample_type);
-}
-
void hists__init(struct hists *hists);
+int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
+ struct perf_sample *sample, bool swapped);
#endif /* __PERF_EVSEL_H */
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index 3a6d20443330..74ea3c2f8138 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -174,6 +174,15 @@ perf_header__set_cmdline(int argc, const char **argv)
{
int i;
+ /*
+ * If header_argv has already been set, do not override it.
+ * This allows a command to set the cmdline, parse args and
+ * then call another builtin function that implements a
+ * command -- e.g, cmd_kvm calling cmd_record.
+ */
+ if (header_argv)
+ return 0;
+
header_argc = (u32)argc;
/* do not include NULL termination */
diff --git a/tools/perf/util/intlist.c b/tools/perf/util/intlist.c
new file mode 100644
index 000000000000..fd530dced9cb
--- /dev/null
+++ b/tools/perf/util/intlist.c
@@ -0,0 +1,101 @@
+/*
+ * Based on intlist.c by:
+ * (c) 2009 Arnaldo Carvalho de Melo <acme@redhat.com>
+ *
+ * Licensed under the GPLv2.
+ */
+
+#include <errno.h>
+#include <stdlib.h>
+#include <linux/compiler.h>
+
+#include "intlist.h"
+
+static struct rb_node *intlist__node_new(struct rblist *rblist __used,
+ const void *entry)
+{
+ int i = (int)((long)entry);
+ struct rb_node *rc = NULL;
+ struct int_node *node = malloc(sizeof(*node));
+
+ if (node != NULL) {
+ node->i = i;
+ rc = &node->rb_node;
+ }
+
+ return rc;
+}
+
+static void int_node__delete(struct int_node *ilist)
+{
+ free(ilist);
+}
+
+static void intlist__node_delete(struct rblist *rblist __used,
+ struct rb_node *rb_node)
+{
+ struct int_node *node = container_of(rb_node, struct int_node, rb_node);
+
+ int_node__delete(node);
+}
+
+static int intlist__node_cmp(struct rb_node *rb_node, const void *entry)
+{
+ int i = (int)((long)entry);
+ struct int_node *node = container_of(rb_node, struct int_node, rb_node);
+
+ return node->i - i;
+}
+
+int intlist__add(struct intlist *ilist, int i)
+{
+ return rblist__add_node(&ilist->rblist, (void *)((long)i));
+}
+
+void intlist__remove(struct intlist *ilist __used, struct int_node *node)
+{
+ int_node__delete(node);
+}
+
+struct int_node *intlist__find(struct intlist *ilist, int i)
+{
+ struct int_node *node = NULL;
+ struct rb_node *rb_node = rblist__find(&ilist->rblist, (void *)((long)i));
+
+ if (rb_node)
+ node = container_of(rb_node, struct int_node, rb_node);
+
+ return node;
+}
+
+struct intlist *intlist__new(void)
+{
+ struct intlist *ilist = malloc(sizeof(*ilist));
+
+ if (ilist != NULL) {
+ rblist__init(&ilist->rblist);
+ ilist->rblist.node_cmp = intlist__node_cmp;
+ ilist->rblist.node_new = intlist__node_new;
+ ilist->rblist.node_delete = intlist__node_delete;
+ }
+
+ return ilist;
+}
+
+void intlist__delete(struct intlist *ilist)
+{
+ if (ilist != NULL)
+ rblist__delete(&ilist->rblist);
+}
+
+struct int_node *intlist__entry(const struct intlist *ilist, unsigned int idx)
+{
+ struct int_node *node = NULL;
+ struct rb_node *rb_node;
+
+ rb_node = rblist__entry(&ilist->rblist, idx);
+ if (rb_node)
+ node = container_of(rb_node, struct int_node, rb_node);
+
+ return node;
+}
diff --git a/tools/perf/util/intlist.h b/tools/perf/util/intlist.h
new file mode 100644
index 000000000000..6d63ab90db50
--- /dev/null
+++ b/tools/perf/util/intlist.h
@@ -0,0 +1,75 @@
+#ifndef __PERF_INTLIST_H
+#define __PERF_INTLIST_H
+
+#include <linux/rbtree.h>
+#include <stdbool.h>
+
+#include "rblist.h"
+
+struct int_node {
+ struct rb_node rb_node;
+ int i;
+};
+
+struct intlist {
+ struct rblist rblist;
+};
+
+struct intlist *intlist__new(void);
+void intlist__delete(struct intlist *ilist);
+
+void intlist__remove(struct intlist *ilist, struct int_node *in);
+int intlist__add(struct intlist *ilist, int i);
+
+struct int_node *intlist__entry(const struct intlist *ilist, unsigned int idx);
+struct int_node *intlist__find(struct intlist *ilist, int i);
+
+static inline bool intlist__has_entry(struct intlist *ilist, int i)
+{
+ return intlist__find(ilist, i) != NULL;
+}
+
+static inline bool intlist__empty(const struct intlist *ilist)
+{
+ return rblist__empty(&ilist->rblist);
+}
+
+static inline unsigned int intlist__nr_entries(const struct intlist *ilist)
+{
+ return rblist__nr_entries(&ilist->rblist);
+}
+
+/* For intlist iteration */
+static inline struct int_node *intlist__first(struct intlist *ilist)
+{
+ struct rb_node *rn = rb_first(&ilist->rblist.entries);
+ return rn ? rb_entry(rn, struct int_node, rb_node) : NULL;
+}
+static inline struct int_node *intlist__next(struct int_node *in)
+{
+ struct rb_node *rn;
+ if (!in)
+ return NULL;
+ rn = rb_next(&in->rb_node);
+ return rn ? rb_entry(rn, struct int_node, rb_node) : NULL;
+}
+
+/**
+ * intlist_for_each - iterate over a intlist
+ * @pos: the &struct int_node to use as a loop cursor.
+ * @ilist: the &struct intlist for loop.
+ */
+#define intlist__for_each(pos, ilist) \
+ for (pos = intlist__first(ilist); pos; pos = intlist__next(pos))
+
+/**
+ * intlist_for_each_safe - iterate over a intlist safe against removal of
+ * int_node
+ * @pos: the &struct int_node to use as a loop cursor.
+ * @n: another &struct int_node to use as temporary storage.
+ * @ilist: the &struct intlist for loop.
+ */
+#define intlist__for_each_safe(pos, n, ilist) \
+ for (pos = intlist__first(ilist), n = intlist__next(pos); pos;\
+ pos = n, n = intlist__next(n))
+#endif /* __PERF_INTLIST_H */
diff --git a/tools/perf/util/parse-events-test.c b/tools/perf/util/parse-events-test.c
index 1b997d2b89ce..127d648cc548 100644
--- a/tools/perf/util/parse-events-test.c
+++ b/tools/perf/util/parse-events-test.c
@@ -13,6 +13,9 @@ do { \
} \
} while (0)
+#define PERF_TP_SAMPLE_TYPE (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME | \
+ PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD)
+
static int test__checkevent_tracepoint(struct perf_evlist *evlist)
{
struct perf_evsel *evsel = list_entry(evlist->entries.next,
@@ -21,8 +24,7 @@ static int test__checkevent_tracepoint(struct perf_evlist *evlist)
TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries);
TEST_ASSERT_VAL("wrong type", PERF_TYPE_TRACEPOINT == evsel->attr.type);
TEST_ASSERT_VAL("wrong sample_type",
- (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME | PERF_SAMPLE_CPU) ==
- evsel->attr.sample_type);
+ PERF_TP_SAMPLE_TYPE == evsel->attr.sample_type);
TEST_ASSERT_VAL("wrong sample_period", 1 == evsel->attr.sample_period);
return 0;
}
@@ -37,8 +39,7 @@ static int test__checkevent_tracepoint_multi(struct perf_evlist *evlist)
TEST_ASSERT_VAL("wrong type",
PERF_TYPE_TRACEPOINT == evsel->attr.type);
TEST_ASSERT_VAL("wrong sample_type",
- (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME | PERF_SAMPLE_CPU)
- == evsel->attr.sample_type);
+ PERF_TP_SAMPLE_TYPE == evsel->attr.sample_type);
TEST_ASSERT_VAL("wrong sample_period",
1 == evsel->attr.sample_period);
}
@@ -428,8 +429,7 @@ static int test__checkevent_list(struct perf_evlist *evlist)
evsel = list_entry(evsel->node.next, struct perf_evsel, node);
TEST_ASSERT_VAL("wrong type", PERF_TYPE_TRACEPOINT == evsel->attr.type);
TEST_ASSERT_VAL("wrong sample_type",
- (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME | PERF_SAMPLE_CPU) ==
- evsel->attr.sample_type);
+ PERF_TP_SAMPLE_TYPE == evsel->attr.sample_type);
TEST_ASSERT_VAL("wrong sample_period", 1 == evsel->attr.sample_period);
TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel);
diff --git a/tools/perf/util/parse-options.c b/tools/perf/util/parse-options.c
index 99d02aa57dbf..594f8fad5ecd 100644
--- a/tools/perf/util/parse-options.c
+++ b/tools/perf/util/parse-options.c
@@ -1,6 +1,7 @@
#include "util.h"
#include "parse-options.h"
#include "cache.h"
+#include "header.h"
#define OPT_SHORT 1
#define OPT_UNSET 2
@@ -413,6 +414,8 @@ int parse_options(int argc, const char **argv, const struct option *options,
{
struct parse_opt_ctx_t ctx;
+ perf_header__set_cmdline(argc, argv);
+
parse_options_start(&ctx, argc, argv, flags);
switch (parse_options_step(&ctx, options, usagestr)) {
case PARSE_OPT_HELP:
diff --git a/tools/perf/util/python.c b/tools/perf/util/python.c
index e03b58a48424..0688bfb6d280 100644
--- a/tools/perf/util/python.c
+++ b/tools/perf/util/python.c
@@ -797,17 +797,13 @@ static PyObject *pyrf_evlist__read_on_cpu(struct pyrf_evlist *pevlist,
event = perf_evlist__mmap_read(evlist, cpu);
if (event != NULL) {
- struct perf_evsel *first;
PyObject *pyevent = pyrf_event__new(event);
struct pyrf_event *pevent = (struct pyrf_event *)pyevent;
if (pyevent == NULL)
return PyErr_NoMemory();
- first = list_entry(evlist->entries.next, struct perf_evsel, node);
- err = perf_event__parse_sample(event, first->attr.sample_type,
- perf_evsel__sample_size(first),
- sample_id_all, &pevent->sample, false);
+ err = perf_evlist__parse_sample(evlist, event, &pevent->sample, false);
if (err)
return PyErr_Format(PyExc_OSError,
"perf: can't parse sample, err=%d", err);
diff --git a/tools/perf/util/rblist.c b/tools/perf/util/rblist.c
new file mode 100644
index 000000000000..0171fb611004
--- /dev/null
+++ b/tools/perf/util/rblist.c
@@ -0,0 +1,107 @@
+/*
+ * Based on strlist.c by:
+ * (c) 2009 Arnaldo Carvalho de Melo <acme@redhat.com>
+ *
+ * Licensed under the GPLv2.
+ */
+
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "rblist.h"
+
+int rblist__add_node(struct rblist *rblist, const void *new_entry)
+{
+ struct rb_node **p = &rblist->entries.rb_node;
+ struct rb_node *parent = NULL, *new_node;
+
+ while (*p != NULL) {
+ int rc;
+
+ parent = *p;
+
+ rc = rblist->node_cmp(parent, new_entry);
+ if (rc > 0)
+ p = &(*p)->rb_left;
+ else if (rc < 0)
+ p = &(*p)->rb_right;
+ else
+ return -EEXIST;
+ }
+
+ new_node = rblist->node_new(rblist, new_entry);
+ if (new_node == NULL)
+ return -ENOMEM;
+
+ rb_link_node(new_node, parent, p);
+ rb_insert_color(new_node, &rblist->entries);
+ ++rblist->nr_entries;
+
+ return 0;
+}
+
+void rblist__remove_node(struct rblist *rblist, struct rb_node *rb_node)
+{
+ rb_erase(rb_node, &rblist->entries);
+ rblist->node_delete(rblist, rb_node);
+}
+
+struct rb_node *rblist__find(struct rblist *rblist, const void *entry)
+{
+ struct rb_node **p = &rblist->entries.rb_node;
+ struct rb_node *parent = NULL;
+
+ while (*p != NULL) {
+ int rc;
+
+ parent = *p;
+
+ rc = rblist->node_cmp(parent, entry);
+ if (rc > 0)
+ p = &(*p)->rb_left;
+ else if (rc < 0)
+ p = &(*p)->rb_right;
+ else
+ return parent;
+ }
+
+ return NULL;
+}
+
+void rblist__init(struct rblist *rblist)
+{
+ if (rblist != NULL) {
+ rblist->entries = RB_ROOT;
+ rblist->nr_entries = 0;
+ }
+
+ return;
+}
+
+void rblist__delete(struct rblist *rblist)
+{
+ if (rblist != NULL) {
+ struct rb_node *pos, *next = rb_first(&rblist->entries);
+
+ while (next) {
+ pos = next;
+ next = rb_next(pos);
+ rb_erase(pos, &rblist->entries);
+ rblist->node_delete(rblist, pos);
+ }
+ free(rblist);
+ }
+}
+
+struct rb_node *rblist__entry(const struct rblist *rblist, unsigned int idx)
+{
+ struct rb_node *node;
+
+ for (node = rb_first(&rblist->entries); node; node = rb_next(node)) {
+ if (!idx--)
+ return node;
+ }
+
+ return NULL;
+}
diff --git a/tools/perf/util/rblist.h b/tools/perf/util/rblist.h
new file mode 100644
index 000000000000..6d0cae5ae83d
--- /dev/null
+++ b/tools/perf/util/rblist.h
@@ -0,0 +1,47 @@
+#ifndef __PERF_RBLIST_H
+#define __PERF_RBLIST_H
+
+#include <linux/rbtree.h>
+#include <stdbool.h>
+
+/*
+ * create node structs of the form:
+ * struct my_node {
+ * struct rb_node rb_node;
+ * ... my data ...
+ * };
+ *
+ * create list structs of the form:
+ * struct mylist {
+ * struct rblist rblist;
+ * ... my data ...
+ * };
+ */
+
+struct rblist {
+ struct rb_root entries;
+ unsigned int nr_entries;
+
+ int (*node_cmp)(struct rb_node *rbn, const void *entry);
+ struct rb_node *(*node_new)(struct rblist *rlist, const void *new_entry);
+ void (*node_delete)(struct rblist *rblist, struct rb_node *rb_node);
+};
+
+void rblist__init(struct rblist *rblist);
+void rblist__delete(struct rblist *rblist);
+int rblist__add_node(struct rblist *rblist, const void *new_entry);
+void rblist__remove_node(struct rblist *rblist, struct rb_node *rb_node);
+struct rb_node *rblist__find(struct rblist *rblist, const void *entry);
+struct rb_node *rblist__entry(const struct rblist *rblist, unsigned int idx);
+
+static inline bool rblist__empty(const struct rblist *rblist)
+{
+ return rblist->nr_entries == 0;
+}
+
+static inline unsigned int rblist__nr_entries(const struct rblist *rblist)
+{
+ return rblist->nr_entries;
+}
+
+#endif /* __PERF_RBLIST_H */
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index 8e4f0755d2aa..2437fb0b463a 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -80,14 +80,12 @@ out_close:
return -1;
}
-void perf_session__update_sample_type(struct perf_session *self)
+void perf_session__set_id_hdr_size(struct perf_session *session)
{
- self->sample_type = perf_evlist__sample_type(self->evlist);
- self->sample_size = __perf_evsel__sample_size(self->sample_type);
- self->sample_id_all = perf_evlist__sample_id_all(self->evlist);
- self->id_hdr_size = perf_evlist__id_hdr_size(self->evlist);
- self->host_machine.id_hdr_size = self->id_hdr_size;
- machines__set_id_hdr_size(&self->machines, self->id_hdr_size);
+ u16 id_hdr_size = perf_evlist__id_hdr_size(session->evlist);
+
+ session->host_machine.id_hdr_size = id_hdr_size;
+ machines__set_id_hdr_size(&session->machines, id_hdr_size);
}
int perf_session__create_kernel_maps(struct perf_session *self)
@@ -147,7 +145,7 @@ struct perf_session *perf_session__new(const char *filename, int mode,
if (mode == O_RDONLY) {
if (perf_session__open(self, force) < 0)
goto out_delete;
- perf_session__update_sample_type(self);
+ perf_session__set_id_hdr_size(self);
} else if (mode == O_WRONLY) {
/*
* In O_RDONLY mode this will be performed when reading the
@@ -158,7 +156,7 @@ struct perf_session *perf_session__new(const char *filename, int mode,
}
if (tool && tool->ordering_requires_timestamps &&
- tool->ordered_samples && !self->sample_id_all) {
+ tool->ordered_samples && !perf_evlist__sample_id_all(self->evlist)) {
dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n");
tool->ordered_samples = false;
}
@@ -673,7 +671,8 @@ static void flush_sample_queue(struct perf_session *s,
if (iter->timestamp > limit)
break;
- ret = perf_session__parse_sample(s, iter->event, &sample);
+ ret = perf_evlist__parse_sample(s->evlist, iter->event, &sample,
+ s->header.needs_swap);
if (ret)
pr_err("Can't parse sample, err = %d\n", ret);
else
@@ -865,16 +864,18 @@ static void perf_session__print_tstamp(struct perf_session *session,
union perf_event *event,
struct perf_sample *sample)
{
+ u64 sample_type = perf_evlist__sample_type(session->evlist);
+
if (event->header.type != PERF_RECORD_SAMPLE &&
- !session->sample_id_all) {
+ !perf_evlist__sample_id_all(session->evlist)) {
fputs("-1 -1 ", stdout);
return;
}
- if ((session->sample_type & PERF_SAMPLE_CPU))
+ if ((sample_type & PERF_SAMPLE_CPU))
printf("%u ", sample->cpu);
- if (session->sample_type & PERF_SAMPLE_TIME)
+ if (sample_type & PERF_SAMPLE_TIME)
printf("%" PRIu64 " ", sample->time);
}
@@ -899,6 +900,8 @@ static void dump_event(struct perf_session *session, union perf_event *event,
static void dump_sample(struct perf_session *session, union perf_event *event,
struct perf_sample *sample)
{
+ u64 sample_type;
+
if (!dump_trace)
return;
@@ -906,10 +909,12 @@ static void dump_sample(struct perf_session *session, union perf_event *event,
event->header.misc, sample->pid, sample->tid, sample->ip,
sample->period, sample->addr);
- if (session->sample_type & PERF_SAMPLE_CALLCHAIN)
+ sample_type = perf_evlist__sample_type(session->evlist);
+
+ if (sample_type & PERF_SAMPLE_CALLCHAIN)
callchain__printf(sample);
- if (session->sample_type & PERF_SAMPLE_BRANCH_STACK)
+ if (sample_type & PERF_SAMPLE_BRANCH_STACK)
branch_stack__printf(sample);
}
@@ -1006,7 +1011,7 @@ static int perf_session__preprocess_sample(struct perf_session *session,
union perf_event *event, struct perf_sample *sample)
{
if (event->header.type != PERF_RECORD_SAMPLE ||
- !(session->sample_type & PERF_SAMPLE_CALLCHAIN))
+ !(perf_evlist__sample_type(session->evlist) & PERF_SAMPLE_CALLCHAIN))
return 0;
if (!ip_callchain__valid(sample->callchain, event)) {
@@ -1030,7 +1035,7 @@ static int perf_session__process_user_event(struct perf_session *session, union
case PERF_RECORD_HEADER_ATTR:
err = tool->attr(event, &session->evlist);
if (err == 0)
- perf_session__update_sample_type(session);
+ perf_session__set_id_hdr_size(session);
return err;
case PERF_RECORD_HEADER_EVENT_TYPE:
return tool->event_type(tool, event);
@@ -1065,7 +1070,7 @@ static int perf_session__process_event(struct perf_session *session,
int ret;
if (session->header.needs_swap)
- event_swap(event, session->sample_id_all);
+ event_swap(event, perf_evlist__sample_id_all(session->evlist));
if (event->header.type >= PERF_RECORD_HEADER_MAX)
return -EINVAL;
@@ -1078,7 +1083,8 @@ static int perf_session__process_event(struct perf_session *session,
/*
* For all kernel events we get the sample data
*/
- ret = perf_session__parse_sample(session, event, &sample);
+ ret = perf_evlist__parse_sample(session->evlist, event, &sample,
+ session->header.needs_swap);
if (ret)
return ret;
@@ -1389,9 +1395,9 @@ int perf_session__process_events(struct perf_session *self,
return err;
}
-bool perf_session__has_traces(struct perf_session *self, const char *msg)
+bool perf_session__has_traces(struct perf_session *session, const char *msg)
{
- if (!(self->sample_type & PERF_SAMPLE_RAW)) {
+ if (!(perf_evlist__sample_type(session->evlist) & PERF_SAMPLE_RAW)) {
pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
return false;
}
diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h
index 7c435bde6eb0..1f7ec87db7d7 100644
--- a/tools/perf/util/session.h
+++ b/tools/perf/util/session.h
@@ -41,13 +41,9 @@ struct perf_session {
* perf.data file.
*/
struct hists hists;
- u64 sample_type;
- int sample_size;
int fd;
bool fd_pipe;
bool repipe;
- bool sample_id_all;
- u16 id_hdr_size;
int cwdlen;
char *cwd;
struct ordered_samples ordered_samples;
@@ -86,7 +82,7 @@ void perf_event__attr_swap(struct perf_event_attr *attr);
int perf_session__create_kernel_maps(struct perf_session *self);
-void perf_session__update_sample_type(struct perf_session *self);
+void perf_session__set_id_hdr_size(struct perf_session *session);
void perf_session__remove_thread(struct perf_session *self, struct thread *th);
static inline
@@ -130,24 +126,6 @@ size_t perf_session__fprintf_dsos_buildid(struct perf_session *self,
size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp);
-static inline int perf_session__parse_sample(struct perf_session *session,
- const union perf_event *event,
- struct perf_sample *sample)
-{
- return perf_event__parse_sample(event, session->sample_type,
- session->sample_size,
- session->sample_id_all, sample,
- session->header.needs_swap);
-}
-
-static inline int perf_session__synthesize_sample(struct perf_session *session,
- union perf_event *event,
- const struct perf_sample *sample)
-{
- return perf_event__synthesize_sample(event, session->sample_type,
- sample, session->header.needs_swap);
-}
-
struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session,
unsigned int type);
diff --git a/tools/perf/util/strlist.c b/tools/perf/util/strlist.c
index 6783a2043555..95856ff3dda4 100644
--- a/tools/perf/util/strlist.c
+++ b/tools/perf/util/strlist.c
@@ -10,23 +10,28 @@
#include <stdlib.h>
#include <string.h>
-static struct str_node *str_node__new(const char *s, bool dupstr)
+static
+struct rb_node *strlist__node_new(struct rblist *rblist, const void *entry)
{
- struct str_node *self = malloc(sizeof(*self));
+ const char *s = entry;
+ struct rb_node *rc = NULL;
+ struct strlist *strlist = container_of(rblist, struct strlist, rblist);
+ struct str_node *snode = malloc(sizeof(*snode));
- if (self != NULL) {
- if (dupstr) {
+ if (snode != NULL) {
+ if (strlist->dupstr) {
s = strdup(s);
if (s == NULL)
goto out_delete;
}
- self->s = s;
+ snode->s = s;
+ rc = &snode->rb_node;
}
- return self;
+ return rc;
out_delete:
- free(self);
+ free(snode);
return NULL;
}
@@ -37,36 +42,26 @@ static void str_node__delete(struct str_node *self, bool dupstr)
free(self);
}
-int strlist__add(struct strlist *self, const char *new_entry)
+static
+void strlist__node_delete(struct rblist *rblist, struct rb_node *rb_node)
{
- struct rb_node **p = &self->entries.rb_node;
- struct rb_node *parent = NULL;
- struct str_node *sn;
-
- while (*p != NULL) {
- int rc;
-
- parent = *p;
- sn = rb_entry(parent, struct str_node, rb_node);
- rc = strcmp(sn->s, new_entry);
-
- if (rc > 0)
- p = &(*p)->rb_left;
- else if (rc < 0)
- p = &(*p)->rb_right;
- else
- return -EEXIST;
- }
+ struct strlist *slist = container_of(rblist, struct strlist, rblist);
+ struct str_node *snode = container_of(rb_node, struct str_node, rb_node);
- sn = str_node__new(new_entry, self->dupstr);
- if (sn == NULL)
- return -ENOMEM;
+ str_node__delete(snode, slist->dupstr);
+}
- rb_link_node(&sn->rb_node, parent, p);
- rb_insert_color(&sn->rb_node, &self->entries);
- ++self->nr_entries;
+static int strlist__node_cmp(struct rb_node *rb_node, const void *entry)
+{
+ const char *str = entry;
+ struct str_node *snode = container_of(rb_node, struct str_node, rb_node);
+
+ return strcmp(snode->s, str);
+}
- return 0;
+int strlist__add(struct strlist *self, const char *new_entry)
+{
+ return rblist__add_node(&self->rblist, new_entry);
}
int strlist__load(struct strlist *self, const char *filename)
@@ -96,34 +91,20 @@ out:
return err;
}
-void strlist__remove(struct strlist *self, struct str_node *sn)
+void strlist__remove(struct strlist *slist, struct str_node *snode)
{
- rb_erase(&sn->rb_node, &self->entries);
- str_node__delete(sn, self->dupstr);
+ str_node__delete(snode, slist->dupstr);
}
-struct str_node *strlist__find(struct strlist *self, const char *entry)
+struct str_node *strlist__find(struct strlist *slist, const char *entry)
{
- struct rb_node **p = &self->entries.rb_node;
- struct rb_node *parent = NULL;
-
- while (*p != NULL) {
- struct str_node *sn;
- int rc;
-
- parent = *p;
- sn = rb_entry(parent, struct str_node, rb_node);
- rc = strcmp(sn->s, entry);
-
- if (rc > 0)
- p = &(*p)->rb_left;
- else if (rc < 0)
- p = &(*p)->rb_right;
- else
- return sn;
- }
+ struct str_node *snode = NULL;
+ struct rb_node *rb_node = rblist__find(&slist->rblist, entry);
- return NULL;
+ if (rb_node)
+ snode = container_of(rb_node, struct str_node, rb_node);
+
+ return snode;
}
static int strlist__parse_list_entry(struct strlist *self, const char *s)
@@ -156,9 +137,12 @@ struct strlist *strlist__new(bool dupstr, const char *slist)
struct strlist *self = malloc(sizeof(*self));
if (self != NULL) {
- self->entries = RB_ROOT;
+ rblist__init(&self->rblist);
+ self->rblist.node_cmp = strlist__node_cmp;
+ self->rblist.node_new = strlist__node_new;
+ self->rblist.node_delete = strlist__node_delete;
+
self->dupstr = dupstr;
- self->nr_entries = 0;
if (slist && strlist__parse_list(self, slist) != 0)
goto out_error;
}
@@ -171,30 +155,18 @@ out_error:
void strlist__delete(struct strlist *self)
{
- if (self != NULL) {
- struct str_node *pos;
- struct rb_node *next = rb_first(&self->entries);
-
- while (next) {
- pos = rb_entry(next, struct str_node, rb_node);
- next = rb_next(&pos->rb_node);
- strlist__remove(self, pos);
- }
- self->entries = RB_ROOT;
- free(self);
- }
+ if (self != NULL)
+ rblist__delete(&self->rblist);
}
-struct str_node *strlist__entry(const struct strlist *self, unsigned int idx)
+struct str_node *strlist__entry(const struct strlist *slist, unsigned int idx)
{
- struct rb_node *nd;
+ struct str_node *snode = NULL;
+ struct rb_node *rb_node;
- for (nd = rb_first(&self->entries); nd; nd = rb_next(nd)) {
- struct str_node *pos = rb_entry(nd, struct str_node, rb_node);
+ rb_node = rblist__entry(&slist->rblist, idx);
+ if (rb_node)
+ snode = container_of(rb_node, struct str_node, rb_node);
- if (!idx--)
- return pos;
- }
-
- return NULL;
+ return snode;
}
diff --git a/tools/perf/util/strlist.h b/tools/perf/util/strlist.h
index 3ba839007d2c..dd9f922ec67c 100644
--- a/tools/perf/util/strlist.h
+++ b/tools/perf/util/strlist.h
@@ -4,14 +4,15 @@
#include <linux/rbtree.h>
#include <stdbool.h>
+#include "rblist.h"
+
struct str_node {
struct rb_node rb_node;
const char *s;
};
struct strlist {
- struct rb_root entries;
- unsigned int nr_entries;
+ struct rblist rblist;
bool dupstr;
};
@@ -32,18 +33,18 @@ static inline bool strlist__has_entry(struct strlist *self, const char *entry)
static inline bool strlist__empty(const struct strlist *self)
{
- return self->nr_entries == 0;
+ return rblist__empty(&self->rblist);
}
static inline unsigned int strlist__nr_entries(const struct strlist *self)
{
- return self->nr_entries;
+ return rblist__nr_entries(&self->rblist);
}
/* For strlist iteration */
static inline struct str_node *strlist__first(struct strlist *self)
{
- struct rb_node *rn = rb_first(&self->entries);
+ struct rb_node *rn = rb_first(&self->rblist.entries);
return rn ? rb_entry(rn, struct str_node, rb_node) : NULL;
}
static inline struct str_node *strlist__next(struct str_node *sn)
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index fdad4eeeb429..8b63b678e127 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -64,7 +64,7 @@ static enum dso_binary_type binary_type_symtab[] = {
DSO_BINARY_TYPE__NOT_FOUND,
};
-#define DSO_BINARY_TYPE__SYMTAB_CNT sizeof(binary_type_symtab)
+#define DSO_BINARY_TYPE__SYMTAB_CNT ARRAY_SIZE(binary_type_symtab)
static enum dso_binary_type binary_type_data[] = {
DSO_BINARY_TYPE__BUILD_ID_CACHE,
@@ -72,7 +72,7 @@ static enum dso_binary_type binary_type_data[] = {
DSO_BINARY_TYPE__NOT_FOUND,
};
-#define DSO_BINARY_TYPE__DATA_CNT sizeof(binary_type_data)
+#define DSO_BINARY_TYPE__DATA_CNT ARRAY_SIZE(binary_type_data)
int dso__name_len(const struct dso *dso)
{
@@ -2875,6 +2875,7 @@ int machines__create_guest_kernel_maps(struct rb_root *machines)
int i, items = 0;
char path[PATH_MAX];
pid_t pid;
+ char *endp;
if (symbol_conf.default_guest_vmlinux_name ||
symbol_conf.default_guest_modules ||
@@ -2891,7 +2892,14 @@ int machines__create_guest_kernel_maps(struct rb_root *machines)
/* Filter out . and .. */
continue;
}
- pid = atoi(namelist[i]->d_name);
+ pid = (pid_t)strtol(namelist[i]->d_name, &endp, 10);
+ if ((*endp != '\0') ||
+ (endp == namelist[i]->d_name) ||
+ (errno == ERANGE)) {
+ pr_debug("invalid directory (%s). Skipping.\n",
+ namelist[i]->d_name);
+ continue;
+ }
sprintf(path, "%s/%s/proc/kallsyms",
symbol_conf.guestmount,
namelist[i]->d_name);
diff --git a/tools/perf/util/target.c b/tools/perf/util/target.c
index 3f59c496e64c..051eaa68095e 100644
--- a/tools/perf/util/target.c
+++ b/tools/perf/util/target.c
@@ -110,7 +110,7 @@ int perf_target__strerror(struct perf_target *target, int errnum,
int idx;
const char *msg;
- BUG_ON(buflen > 0);
+ BUG_ON(buflen == 0);
if (errnum >= 0) {
const char *err = strerror_r(errnum, buf, buflen);