summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/acpi/acbuffer.h14
-rw-r--r--include/acpi/acnames.h5
-rw-r--r--include/acpi/acpi_bus.h37
-rw-r--r--include/acpi/acpixf.h3
-rw-r--r--include/acpi/actbl1.h19
-rw-r--r--include/acpi/actbl3.h9
-rw-r--r--include/acpi/actypes.h8
-rw-r--r--include/acpi/processor.h7
-rw-r--r--include/asm-generic/atomic.h194
-rw-r--r--include/asm-generic/atomic64.h20
-rw-r--r--include/asm-generic/barrier.h8
-rw-r--r--include/asm-generic/clkdev.h2
-rw-r--r--include/asm-generic/cputime_jiffies.h2
-rw-r--r--include/asm-generic/cputime_nsecs.h2
-rw-r--r--include/asm-generic/dma-contiguous.h9
-rw-r--r--include/asm-generic/dma-mapping-common.h17
-rw-r--r--include/asm-generic/futex.h114
-rw-r--r--include/asm-generic/gpio.h2
-rw-r--r--include/asm-generic/hash.h9
-rw-r--r--include/asm-generic/io.h753
-rw-r--r--include/asm-generic/irq_work.h10
-rw-r--r--include/asm-generic/mm_hooks.h17
-rw-r--r--include/asm-generic/msi.h32
-rw-r--r--include/asm-generic/pgtable.h56
-rw-r--r--include/asm-generic/preempt.h3
-rw-r--r--include/asm-generic/seccomp.h30
-rw-r--r--include/asm-generic/sections.h4
-rw-r--r--include/asm-generic/syscall.h2
-rw-r--r--include/asm-generic/tlb.h57
-rw-r--r--include/asm-generic/vmlinux.lds.h4
-rw-r--r--include/crypto/drbg.h19
-rw-r--r--include/crypto/hash.h497
-rw-r--r--include/crypto/if_alg.h1
-rw-r--r--include/crypto/internal/hash.h9
-rw-r--r--include/crypto/mcryptd.h112
-rw-r--r--include/crypto/public_key.h6
-rw-r--r--include/crypto/rng.h80
-rw-r--r--include/drm/ati_pcigart.h30
-rw-r--r--include/drm/drmP.h750
-rw-r--r--include/drm/drm_agpsupport.h26
-rw-r--r--include/drm/drm_atomic.h69
-rw-r--r--include/drm/drm_atomic_helper.h126
-rw-r--r--include/drm/drm_buffer.h148
-rw-r--r--include/drm/drm_crtc.h353
-rw-r--r--include/drm/drm_crtc_helper.h13
-rw-r--r--include/drm/drm_displayid.h76
-rw-r--r--include/drm/drm_dp_helper.h42
-rw-r--r--include/drm/drm_dp_mst_helper.h10
-rw-r--r--include/drm/drm_edid.h109
-rw-r--r--include/drm/drm_fb_helper.h7
-rw-r--r--include/drm/drm_flip_work.h33
-rw-r--r--include/drm/drm_gem.h183
-rw-r--r--include/drm/drm_gem_cma_helper.h34
-rw-r--r--include/drm/drm_legacy.h203
-rw-r--r--include/drm/drm_memory.h59
-rw-r--r--include/drm/drm_mipi_dsi.h96
-rw-r--r--include/drm/drm_modeset_lock.h23
-rw-r--r--include/drm/drm_pciids.h1
-rw-r--r--include/drm/drm_plane_helper.h44
-rw-r--r--include/drm/drm_usb.h15
-rw-r--r--include/drm/i915_pciids.h17
-rw-r--r--include/drm/ttm/ttm_bo_api.h49
-rw-r--r--include/drm/ttm/ttm_bo_driver.h32
-rw-r--r--include/drm/ttm/ttm_execbuf_util.h31
-rw-r--r--include/dt-bindings/arm/ux500_pm_domains.h15
-rw-r--r--include/dt-bindings/clock/exynos3250.h27
-rw-r--r--include/dt-bindings/clock/exynos4.h12
-rw-r--r--include/dt-bindings/clock/exynos4415.h360
-rw-r--r--include/dt-bindings/clock/exynos7-clk.h92
-rw-r--r--include/dt-bindings/clock/hix5hd2-clock.h27
-rw-r--r--include/dt-bindings/clock/imx5-clock.h5
-rw-r--r--include/dt-bindings/clock/imx6qdl-clock.h43
-rw-r--r--include/dt-bindings/clock/imx6sl-clock.h30
-rw-r--r--include/dt-bindings/clock/imx6sx-clock.h25
-rw-r--r--include/dt-bindings/clock/marvell,mmp2.h74
-rw-r--r--include/dt-bindings/clock/marvell,pxa168.h57
-rw-r--r--include/dt-bindings/clock/marvell,pxa910.h54
-rw-r--r--include/dt-bindings/clock/maxim,max77686.h23
-rw-r--r--include/dt-bindings/clock/maxim,max77802.h22
-rw-r--r--include/dt-bindings/clock/pxa-clock.h77
-rw-r--r--include/dt-bindings/clock/qcom,mmcc-apq8084.h2
-rw-r--r--include/dt-bindings/clock/r8a7740-clock.h78
-rw-r--r--include/dt-bindings/clock/r8a7790-clock.h13
-rw-r--r--include/dt-bindings/clock/r8a7791-clock.h10
-rw-r--r--include/dt-bindings/clock/r8a7794-clock.h89
-rw-r--r--include/dt-bindings/clock/rk3188-cru-common.h1
-rw-r--r--include/dt-bindings/clock/rk3288-cru.h95
-rw-r--r--include/dt-bindings/clock/rockchip,rk808.h11
-rw-r--r--include/dt-bindings/clock/stih407-clks.h86
-rw-r--r--include/dt-bindings/clock/stih410-clks.h25
-rw-r--r--include/dt-bindings/clock/tegra114-car.h2
-rw-r--r--include/dt-bindings/clock/tegra124-car.h8
-rw-r--r--include/dt-bindings/clock/tegra20-car.h2
-rw-r--r--include/dt-bindings/clock/vf610-clock.h40
-rw-r--r--include/dt-bindings/dma/at91.h25
-rw-r--r--include/dt-bindings/interrupt-controller/mips-gic.h9
-rw-r--r--include/dt-bindings/memory/tegra114-mc.h25
-rw-r--r--include/dt-bindings/memory/tegra124-mc.h31
-rw-r--r--include/dt-bindings/memory/tegra30-mc.h24
-rw-r--r--include/dt-bindings/phy/phy.h19
-rw-r--r--include/dt-bindings/pinctrl/at91.h5
-rw-r--r--include/dt-bindings/pinctrl/dra.h4
-rw-r--r--include/dt-bindings/pinctrl/qcom,pmic-gpio.h142
-rw-r--r--include/dt-bindings/pinctrl/qcom,pmic-mpp.h44
-rw-r--r--include/dt-bindings/pinctrl/rockchip.h2
-rw-r--r--include/dt-bindings/regulator/maxim,max77802.h18
-rw-r--r--include/dt-bindings/reset-controller/stih407-resets.h61
-rw-r--r--include/dt-bindings/sound/cs35l32.h26
-rw-r--r--include/dt-bindings/thermal/tegra124-soctherm.h13
-rw-r--r--include/dt-bindings/thermal/thermal.h2
-rw-r--r--include/keys/asymmetric-type.h41
-rw-r--r--include/keys/user-type.h1
-rw-r--r--include/kvm/arm_arch_timer.h10
-rw-r--r--include/kvm/arm_vgic.h136
-rw-r--r--include/linux/acpi.h171
-rw-r--r--include/linux/aer.h2
-rw-r--r--include/linux/ahci_platform.h13
-rw-r--r--include/linux/amba/bus.h16
-rw-r--r--include/linux/ata_platform.h5
-rw-r--r--include/linux/ath9k_platform.h3
-rw-r--r--include/linux/atmel-mci.h4
-rw-r--r--include/linux/atmel_tc.h13
-rw-r--r--include/linux/atomic.h36
-rw-r--r--include/linux/audit.h40
-rw-r--r--include/linux/backing-dev.h4
-rw-r--r--include/linux/balloon_compaction.h169
-rw-r--r--include/linux/bcma/bcma.h8
-rw-r--r--include/linux/bcma/bcma_driver_chipcommon.h8
-rw-r--r--include/linux/bcma/bcma_driver_mips.h4
-rw-r--r--include/linux/bcma/bcma_regs.h5
-rw-r--r--include/linux/bcma/bcma_soc.h1
-rw-r--r--include/linux/binfmts.h4
-rw-r--r--include/linux/bio.h70
-rw-r--r--include/linux/bitmap.h39
-rw-r--r--include/linux/bitops.h27
-rw-r--r--include/linux/blk-mq.h48
-rw-r--r--include/linux/blk_types.h18
-rw-r--r--include/linux/blkdev.h84
-rw-r--r--include/linux/bootmem.h1
-rw-r--r--include/linux/bpf.h145
-rw-r--r--include/linux/brcmphy.h137
-rw-r--r--include/linux/buffer_head.h47
-rw-r--r--include/linux/cacheinfo.h100
-rw-r--r--include/linux/can/dev.h9
-rw-r--r--include/linux/ceph/auth.h26
-rw-r--r--include/linux/ceph/buffer.h3
-rw-r--r--include/linux/ceph/ceph_features.h1
-rw-r--r--include/linux/ceph/ceph_fs.h10
-rw-r--r--include/linux/ceph/libceph.h3
-rw-r--r--include/linux/ceph/messenger.h9
-rw-r--r--include/linux/ceph/msgr.h11
-rw-r--r--include/linux/ceph/osd_client.h13
-rw-r--r--include/linux/ceph/pagelist.h7
-rw-r--r--include/linux/ceph/rados.h225
-rw-r--r--include/linux/cgroup.h60
-rw-r--r--include/linux/clk-private.h2
-rw-r--r--include/linux/clk-provider.h42
-rw-r--r--include/linux/clk.h31
-rw-r--r--include/linux/clk/at91_pmc.h1
-rw-r--r--include/linux/clk/ti.h16
-rw-r--r--include/linux/clock_cooling.h65
-rw-r--r--include/linux/clocksource.h2
-rw-r--r--include/linux/cma.h8
-rw-r--r--include/linux/com20020.h29
-rw-r--r--include/linux/compaction.h30
-rw-r--r--include/linux/compat.h3
-rw-r--r--include/linux/compiler-gcc4.h1
-rw-r--r--include/linux/compiler-gcc5.h65
-rw-r--r--include/linux/compiler.h74
-rw-r--r--include/linux/coresight.h263
-rw-r--r--include/linux/cpu.h6
-rw-r--r--include/linux/cpu_cooling.h6
-rw-r--r--include/linux/cpufreq-dt.h22
-rw-r--r--include/linux/cpufreq.h57
-rw-r--r--include/linux/cpuidle.h3
-rw-r--r--include/linux/cpumask.h28
-rw-r--r--include/linux/cpuset.h40
-rw-r--r--include/linux/crash_dump.h15
-rw-r--r--include/linux/crc-t10dif.h5
-rw-r--r--include/linux/cred.h1
-rw-r--r--include/linux/crypto.h1112
-rw-r--r--include/linux/cycx_x25.h125
-rw-r--r--include/linux/dcache.h18
-rw-r--r--include/linux/debugfs.h24
-rw-r--r--include/linux/devcoredump.h35
-rw-r--r--include/linux/devfreq.h2
-rw-r--r--include/linux/device-mapper.h2
-rw-r--r--include/linux/device.h55
-rw-r--r--include/linux/dma-mapping.h39
-rw-r--r--include/linux/dma/dw.h64
-rw-r--r--include/linux/dmaengine.h42
-rw-r--r--include/linux/dmar.h58
-rw-r--r--include/linux/drbd.h2
-rw-r--r--include/linux/dw_dmac.h111
-rw-r--r--include/linux/dynamic_debug.h12
-rw-r--r--include/linux/dynamic_queue_limits.h12
-rw-r--r--include/linux/edac.h4
-rw-r--r--include/linux/eeprom_93cx6.h4
-rw-r--r--include/linux/efi.h23
-rw-r--r--include/linux/elf.h5
-rw-r--r--include/linux/etherdevice.h13
-rw-r--r--include/linux/ethtool.h46
-rw-r--r--include/linux/extcon/extcon-gpio.h4
-rw-r--r--include/linux/extcon/sm5502.h287
-rw-r--r--include/linux/f2fs_fs.h33
-rw-r--r--include/linux/fault-inject.h17
-rw-r--r--include/linux/fence.h4
-rw-r--r--include/linux/file.h1
-rw-r--r--include/linux/filter.h197
-rw-r--r--include/linux/flex_proportions.h5
-rw-r--r--include/linux/font.h4
-rw-r--r--include/linux/freezer.h50
-rw-r--r--include/linux/fs.h164
-rw-r--r--include/linux/fs_enet_pd.h1
-rw-r--r--include/linux/fsl_ifc.h23
-rw-r--r--include/linux/fsldma.h13
-rw-r--r--include/linux/fsnotify_backend.h31
-rw-r--r--include/linux/ftrace.h53
-rw-r--r--include/linux/ftrace_event.h11
-rw-r--r--include/linux/genalloc.h7
-rw-r--r--include/linux/genl_magic_func.h4
-rw-r--r--include/linux/gfp.h13
-rw-r--r--include/linux/gpio.h7
-rw-r--r--include/linux/gpio/consumer.h47
-rw-r--r--include/linux/gpio/driver.h20
-rw-r--r--include/linux/gpio_keys.h3
-rw-r--r--include/linux/hash.h35
-rw-r--r--include/linux/hdmi.h21
-rw-r--r--include/linux/hid.h46
-rw-r--r--include/linux/huge_mm.h2
-rw-r--r--include/linux/hugetlb.h49
-rw-r--r--include/linux/hugetlb_cgroup.h1
-rw-r--r--include/linux/hyperv.h2
-rw-r--r--include/linux/i2c.h31
-rw-r--r--include/linux/i2c/pmbus.h4
-rw-r--r--include/linux/i2c/twl.h2
-rw-r--r--include/linux/i82593.h229
-rw-r--r--include/linux/ieee80211.h144
-rw-r--r--include/linux/ieee802154.h (renamed from include/net/ieee802154.h)61
-rw-r--r--include/linux/if_bridge.h31
-rw-r--r--include/linux/if_macvlan.h1
-rw-r--r--include/linux/if_vlan.h107
-rw-r--r--include/linux/igmp.h1
-rw-r--r--include/linux/iio/common/st_sensors.h10
-rw-r--r--include/linux/iio/events.h2
-rw-r--r--include/linux/iio/iio.h8
-rw-r--r--include/linux/ima.h4
-rw-r--r--include/linux/inetdevice.h2
-rw-r--r--include/linux/init_task.h24
-rw-r--r--include/linux/integrity.h6
-rw-r--r--include/linux/interrupt.h11
-rw-r--r--include/linux/io.h4
-rw-r--r--include/linux/iommu.h59
-rw-r--r--include/linux/ioport.h5
-rw-r--r--include/linux/ipack.h24
-rw-r--r--include/linux/ipc_namespace.h23
-rw-r--r--include/linux/ipmi.h6
-rw-r--r--include/linux/ipmi_smi.h10
-rw-r--r--include/linux/ipv6.h11
-rw-r--r--include/linux/irq.h73
-rw-r--r--include/linux/irq_work.h3
-rw-r--r--include/linux/irqchip/arm-gic-v3.h128
-rw-r--r--include/linux/irqchip/arm-gic.h20
-rw-r--r--include/linux/irqchip/irq-omap-intc.h32
-rw-r--r--include/linux/irqchip/mips-gic.h249
-rw-r--r--include/linux/irqdesc.h29
-rw-r--r--include/linux/irqdomain.h101
-rw-r--r--include/linux/irqhandler.h14
-rw-r--r--include/linux/jbd2.h2
-rw-r--r--include/linux/jump_label.h17
-rw-r--r--include/linux/kern_levels.h13
-rw-r--r--include/linux/kernel.h71
-rw-r--r--include/linux/kernel_stat.h10
-rw-r--r--include/linux/kernelcapi.h2
-rw-r--r--include/linux/kernfs.h8
-rw-r--r--include/linux/kexec.h1
-rw-r--r--include/linux/key-type.h34
-rw-r--r--include/linux/kfifo.h2
-rw-r--r--include/linux/kgdb.h2
-rw-r--r--include/linux/khugepaged.h17
-rw-r--r--include/linux/kmemleak.h2
-rw-r--r--include/linux/kprobes.h1
-rw-r--r--include/linux/kvm_host.h140
-rw-r--r--include/linux/kvm_types.h41
-rw-r--r--include/linux/leds.h59
-rw-r--r--include/linux/libata.h22
-rw-r--r--include/linux/list.h35
-rw-r--r--include/linux/lockd/debug.h6
-rw-r--r--include/linux/lockd/lockd.h1
-rw-r--r--include/linux/lockdep.h8
-rw-r--r--include/linux/mailbox_client.h49
-rw-r--r--include/linux/mailbox_controller.h133
-rw-r--r--include/linux/marvell_phy.h1
-rw-r--r--include/linux/mbus.h1
-rw-r--r--include/linux/mei_cl_bus.h1
-rw-r--r--include/linux/memcontrol.h129
-rw-r--r--include/linux/memory.h2
-rw-r--r--include/linux/memory_hotplug.h1
-rw-r--r--include/linux/mempolicy.h7
-rw-r--r--include/linux/mfd/abx500/ab8500-sysctrl.h1
-rw-r--r--include/linux/mfd/arizona/core.h1
-rw-r--r--include/linux/mfd/arizona/registers.h79
-rw-r--r--include/linux/mfd/atmel-hlcdc.h85
-rw-r--r--include/linux/mfd/axp20x.h59
-rw-r--r--include/linux/mfd/core.h10
-rw-r--r--include/linux/mfd/cros_ec.h24
-rw-r--r--include/linux/mfd/cros_ec_commands.h3
-rw-r--r--include/linux/mfd/da9052/da9052.h2
-rw-r--r--include/linux/mfd/davinci_voicecodec.h9
-rw-r--r--include/linux/mfd/dln2.h103
-rw-r--r--include/linux/mfd/hi6421-pmic.h41
-rw-r--r--include/linux/mfd/max14577-private.h95
-rw-r--r--include/linux/mfd/max14577.h30
-rw-r--r--include/linux/mfd/max77686.h7
-rw-r--r--include/linux/mfd/max77693-private.h69
-rw-r--r--include/linux/mfd/max77693.h40
-rw-r--r--include/linux/mfd/rk808.h196
-rw-r--r--include/linux/mfd/rn5t618.h228
-rw-r--r--include/linux/mfd/rtsx_pci.h37
-rw-r--r--include/linux/mfd/samsung/core.h23
-rw-r--r--include/linux/mfd/samsung/s2mpa01.h12
-rw-r--r--include/linux/mfd/samsung/s2mps11.h9
-rw-r--r--include/linux/mfd/samsung/s2mps13.h186
-rw-r--r--include/linux/mfd/samsung/s2mps14.h10
-rw-r--r--include/linux/mfd/syscon/imx6q-iomuxc-gpr.h39
-rw-r--r--include/linux/mfd/tc3589x.h8
-rw-r--r--include/linux/mfd/ti_am335x_tscadc.h1
-rw-r--r--include/linux/mfd/ti_ssp.h93
-rw-r--r--include/linux/mfd/tmio.h25
-rw-r--r--include/linux/mfd/tps65217.h2
-rw-r--r--include/linux/migrate.h24
-rw-r--r--include/linux/mlx4/cmd.h31
-rw-r--r--include/linux/mlx4/device.h140
-rw-r--r--include/linux/mlx4/qp.h4
-rw-r--r--include/linux/mlx5/device.h222
-rw-r--r--include/linux/mlx5/driver.h140
-rw-r--r--include/linux/mlx5/mlx5_ifc.h349
-rw-r--r--include/linux/mlx5/qp.h103
-rw-r--r--include/linux/mm.h108
-rw-r--r--include/linux/mm_types.h21
-rw-r--r--include/linux/mmc/card.h33
-rw-r--r--include/linux/mmc/core.h3
-rw-r--r--include/linux/mmc/dw_mmc.h11
-rw-r--r--include/linux/mmc/host.h13
-rw-r--r--include/linux/mmc/mmc.h10
-rw-r--r--include/linux/mmc/sdhci.h21
-rw-r--r--include/linux/mmc/sdio_func.h2
-rw-r--r--include/linux/mmc/slot-gpio.h5
-rw-r--r--include/linux/mmdebug.h20
-rw-r--r--include/linux/mmu_notifier.h114
-rw-r--r--include/linux/mmzone.h74
-rw-r--r--include/linux/mod_devicetable.h2
-rw-r--r--include/linux/module.h16
-rw-r--r--include/linux/moduleparam.h52
-rw-r--r--include/linux/mount.h3
-rw-r--r--include/linux/msi.h193
-rw-r--r--include/linux/mtd/cfi.h22
-rw-r--r--include/linux/mtd/nand.h30
-rw-r--r--include/linux/mtd/spi-nor.h32
-rw-r--r--include/linux/mutex.h4
-rw-r--r--include/linux/namei.h25
-rw-r--r--include/linux/netdev_features.h9
-rw-r--r--include/linux/netdevice.h396
-rw-r--r--include/linux/netfilter/ipset/ip_set.h60
-rw-r--r--include/linux/netfilter/ipset/ip_set_list.h1
-rw-r--r--include/linux/netfilter_bridge.h50
-rw-r--r--include/linux/netlink.h4
-rw-r--r--include/linux/nfs4.h31
-rw-r--r--include/linux/nfs_fs.h49
-rw-r--r--include/linux/nfs_fs_sb.h3
-rw-r--r--include/linux/nfs_xdr.h61
-rw-r--r--include/linux/nl802154.h4
-rw-r--r--include/linux/nmi.h13
-rw-r--r--include/linux/ns_common.h12
-rw-r--r--include/linux/nvme.h18
-rw-r--r--include/linux/of.h252
-rw-r--r--include/linux/of_address.h31
-rw-r--r--include/linux/of_iommu.h23
-rw-r--r--include/linux/of_pci.h25
-rw-r--r--include/linux/of_pdt.h3
-rw-r--r--include/linux/of_platform.h6
-rw-r--r--include/linux/of_reserved_mem.h9
-rw-r--r--include/linux/omap-dma.h37
-rw-r--r--include/linux/omap-gpmc.h199
-rw-r--r--include/linux/omap-mailbox.h16
-rw-r--r--include/linux/oom.h14
-rw-r--r--include/linux/page-debug-flags.h32
-rw-r--r--include/linux/page-isolation.h8
-rw-r--r--include/linux/page_cgroup.h105
-rw-r--r--include/linux/page_counter.h51
-rw-r--r--include/linux/page_ext.h84
-rw-r--r--include/linux/page_owner.h38
-rw-r--r--include/linux/pagemap.h45
-rw-r--r--include/linux/pci-acpi.h7
-rw-r--r--include/linux/pci.h66
-rw-r--r--include/linux/pci_hotplug.h3
-rw-r--r--include/linux/pci_ids.h23
-rw-r--r--include/linux/percpu-defs.h5
-rw-r--r--include/linux/percpu-refcount.h169
-rw-r--r--include/linux/percpu.h17
-rw-r--r--include/linux/percpu_counter.h10
-rw-r--r--include/linux/perf_event.h51
-rw-r--r--include/linux/phonedev.h25
-rw-r--r--include/linux/phy.h53
-rw-r--r--include/linux/phy/phy.h52
-rw-r--r--include/linux/phy_fixed.h33
-rw-r--r--include/linux/pid_namespace.h3
-rw-r--r--include/linux/pinctrl/pinconf-generic.h2
-rw-r--r--include/linux/pinctrl/pinmux.h7
-rw-r--r--include/linux/pl320-ipc.h (renamed from include/linux/mailbox.h)0
-rw-r--r--include/linux/platform_data/asoc-s3c.h1
-rw-r--r--include/linux/platform_data/bcmgenet.h18
-rw-r--r--include/linux/platform_data/dma-dw.h59
-rw-r--r--include/linux/platform_data/dma-imx.h1
-rw-r--r--include/linux/platform_data/dwc3-exynos.h24
-rw-r--r--include/linux/platform_data/elm.h16
-rw-r--r--include/linux/platform_data/gpio-dwapb.h32
-rw-r--r--include/linux/platform_data/hsmmc-omap.h90
-rw-r--r--include/linux/platform_data/i2c-designware.h21
-rw-r--r--include/linux/platform_data/isl9305.h30
-rw-r--r--include/linux/platform_data/lp855x.h2
-rw-r--r--include/linux/platform_data/mmc-atmel-mci.h22
-rw-r--r--include/linux/platform_data/mmc-omap.h27
-rw-r--r--include/linux/platform_data/mtd-nand-omap2.h1
-rw-r--r--include/linux/platform_data/pxa_sdhci.h5
-rw-r--r--include/linux/platform_data/rcar-du.h74
-rw-r--r--include/linux/platform_data/samsung-usbphy.h27
-rw-r--r--include/linux/platform_data/serial-omap.h3
-rw-r--r--include/linux/platform_data/st21nfca.h1
-rw-r--r--include/linux/platform_data/st21nfcb.h1
-rw-r--r--include/linux/platform_data/tegra_emc.h34
-rw-r--r--include/linux/platform_device.h12
-rw-r--r--include/linux/plist.h10
-rw-r--r--include/linux/pm.h24
-rw-r--r--include/linux/pm_clock.h8
-rw-r--r--include/linux/pm_domain.h157
-rw-r--r--include/linux/pm_opp.h12
-rw-r--r--include/linux/pm_qos.h43
-rw-r--r--include/linux/pm_runtime.h27
-rw-r--r--include/linux/pnfs_osd_xdr.h2
-rw-r--r--include/linux/power/charger-manager.h3
-rw-r--r--include/linux/power_supply.h14
-rw-r--r--include/linux/printk.h3
-rw-r--r--include/linux/prio_heap.h58
-rw-r--r--include/linux/proc_fs.h2
-rw-r--r--include/linux/proc_ns.h43
-rw-r--r--include/linux/property.h143
-rw-r--r--include/linux/proportions.h5
-rw-r--r--include/linux/pstore_ram.h4
-rw-r--r--include/linux/ptrace.h2
-rw-r--r--include/linux/pxa168_eth.h3
-rw-r--r--include/linux/pxa2xx_ssp.h20
-rw-r--r--include/linux/quota.h5
-rw-r--r--include/linux/quotaops.h8
-rw-r--r--include/linux/random.h4
-rw-r--r--include/linux/ratelimit.h12
-rw-r--r--include/linux/rbtree_augmented.h10
-rw-r--r--include/linux/rculist.h17
-rw-r--r--include/linux/rcupdate.h136
-rw-r--r--include/linux/rcutiny.h4
-rw-r--r--include/linux/rcutree.h6
-rw-r--r--include/linux/reboot.h3
-rw-r--r--include/linux/regmap.h7
-rw-r--r--include/linux/regulator/consumer.h28
-rw-r--r--include/linux/regulator/da9211.h9
-rw-r--r--include/linux/regulator/driver.h20
-rw-r--r--include/linux/regulator/max1586.h2
-rw-r--r--include/linux/regulator/of_regulator.h9
-rw-r--r--include/linux/res_counter.h223
-rw-r--r--include/linux/reset-controller.h2
-rw-r--r--include/linux/reset.h7
-rw-r--r--include/linux/rhashtable.h17
-rw-r--r--include/linux/ring_buffer.h2
-rw-r--r--include/linux/rmap.h2
-rw-r--r--include/linux/rtc.h21
-rw-r--r--include/linux/rtnetlink.h24
-rw-r--r--include/linux/rwsem.h2
-rw-r--r--include/linux/sched.h154
-rw-r--r--include/linux/screen_info.h8
-rw-r--r--include/linux/seccomp.h25
-rw-r--r--include/linux/security.h10
-rw-r--r--include/linux/seq_buf.h136
-rw-r--r--include/linux/seq_file.h15
-rw-r--r--include/linux/seqlock.h19
-rw-r--r--include/linux/serial_8250.h3
-rw-r--r--include/linux/serial_bcm63xx.h2
-rw-r--r--include/linux/serial_core.h85
-rw-r--r--include/linux/shrinker.h2
-rw-r--r--include/linux/signal.h29
-rw-r--r--include/linux/skbuff.h544
-rw-r--r--include/linux/slab.h70
-rw-r--r--include/linux/slab_def.h20
-rw-r--r--include/linux/smp.h2
-rw-r--r--include/linux/soc/ti/knav_dma.h175
-rw-r--r--include/linux/soc/ti/knav_qmss.h90
-rw-r--r--include/linux/socket.h26
-rw-r--r--include/linux/spi/mcp23s08.h18
-rw-r--r--include/linux/spi/pxa2xx_spi.h9
-rw-r--r--include/linux/spi/spi.h6
-rw-r--r--include/linux/spinlock.h8
-rw-r--r--include/linux/spmi.h3
-rw-r--r--include/linux/stacktrace.h5
-rw-r--r--include/linux/string.h7
-rw-r--r--include/linux/string_helpers.h65
-rw-r--r--include/linux/sunrpc/auth.h2
-rw-r--r--include/linux/sunrpc/clnt.h4
-rw-r--r--include/linux/sunrpc/debug.h64
-rw-r--r--include/linux/sunrpc/metrics.h3
-rw-r--r--include/linux/sunrpc/sched.h8
-rw-r--r--include/linux/sunrpc/svc.h35
-rw-r--r--include/linux/sunrpc/svc_xprt.h7
-rw-r--r--include/linux/sunrpc/xprt.h4
-rw-r--r--include/linux/sunrpc/xprtsock.h59
-rw-r--r--include/linux/suspend.h6
-rw-r--r--include/linux/swap.h30
-rw-r--r--include/linux/swap_cgroup.h42
-rw-r--r--include/linux/syscalls.h12
-rw-r--r--include/linux/sysfs.h9
-rw-r--r--include/linux/syslog.h9
-rw-r--r--include/linux/t10-pi.h22
-rw-r--r--include/linux/tcp.h18
-rw-r--r--include/linux/thermal.h79
-rw-r--r--include/linux/ti_wilink_st.h2
-rw-r--r--include/linux/tick.h4
-rw-r--r--include/linux/time.h17
-rw-r--r--include/linux/timekeeper_internal.h2
-rw-r--r--include/linux/timekeeping.h51
-rw-r--r--include/linux/topology.h17
-rw-r--r--include/linux/torture.h5
-rw-r--r--include/linux/trace_seq.h77
-rw-r--r--include/linux/tracepoint.h11
-rw-r--r--include/linux/tty.h43
-rw-r--r--include/linux/tty_driver.h4
-rw-r--r--include/linux/udp.h16
-rw-r--r--include/linux/uio.h15
-rw-r--r--include/linux/uio_driver.h14
-rw-r--r--include/linux/uprobes.h14
-rw-r--r--include/linux/usb.h14
-rw-r--r--include/linux/usb/chipidea.h6
-rw-r--r--include/linux/usb/composite.h7
-rw-r--r--include/linux/usb/ehci-dbgp.h83
-rw-r--r--include/linux/usb/ehci_def.h65
-rw-r--r--include/linux/usb/gadget.h31
-rw-r--r--include/linux/usb/hcd.h14
-rw-r--r--include/linux/usb/of.h5
-rw-r--r--include/linux/usb/otg.h7
-rw-r--r--include/linux/usb/phy.h6
-rw-r--r--include/linux/usb/quirks.h22
-rw-r--r--include/linux/usb/renesas_usbhs.h4
-rw-r--r--include/linux/usb/usbnet.h4
-rw-r--r--include/linux/usb_usual.h4
-rw-r--r--include/linux/user_namespace.h15
-rw-r--r--include/linux/utsname.h3
-rw-r--r--include/linux/vexpress.h19
-rw-r--r--include/linux/virtio.h26
-rw-r--r--include/linux/virtio_byteorder.h59
-rw-r--r--include/linux/virtio_config.h149
-rw-r--r--include/linux/vm_event_item.h8
-rw-r--r--include/linux/vmw_vmci_api.h5
-rw-r--r--include/linux/vringh.h37
-rw-r--r--include/linux/wait.h101
-rw-r--r--include/linux/watchdog.h9
-rw-r--r--include/linux/zsmalloc.h2
-rw-r--r--include/media/davinci/dm644x_ccdc.h2
-rw-r--r--include/media/davinci/vpbe.h2
-rw-r--r--include/media/davinci/vpbe_display.h21
-rw-r--r--include/media/davinci/vpbe_venc.h5
-rw-r--r--include/media/exynos-fimc.h2
-rw-r--r--include/media/lirc_dev.h8
-rw-r--r--include/media/omap3isp.h3
-rw-r--r--include/media/radio-si4713.h30
-rw-r--r--include/media/rc-map.h1
-rw-r--r--include/media/si4713.h4
-rw-r--r--include/media/soc_camera.h2
-rw-r--r--include/media/soc_mediabus.h6
-rw-r--r--include/media/v4l2-common.h17
-rw-r--r--include/media/v4l2-ctrls.h25
-rw-r--r--include/media/v4l2-image-sizes.h9
-rw-r--r--include/media/v4l2-mediabus.h6
-rw-r--r--include/media/v4l2-subdev.h2
-rw-r--r--include/media/videobuf2-core.h57
-rw-r--r--include/media/videobuf2-dma-sg.h3
-rw-r--r--include/misc/cxl.h48
-rw-r--r--include/net/6lowpan.h12
-rw-r--r--include/net/9p/transport.h1
-rw-r--r--include/net/addrconf.h2
-rw-r--r--include/net/af_ieee802154.h4
-rw-r--r--include/net/af_vsock.h6
-rw-r--r--include/net/ah.h3
-rw-r--r--include/net/bluetooth/bluetooth.h5
-rw-r--r--include/net/bluetooth/hci.h69
-rw-r--r--include/net/bluetooth/hci_core.h101
-rw-r--r--include/net/bluetooth/l2cap.h85
-rw-r--r--include/net/bluetooth/mgmt.h24
-rw-r--r--include/net/bond_3ad.h283
-rw-r--r--include/net/bond_alb.h181
-rw-r--r--include/net/bond_options.h130
-rw-r--r--include/net/bonding.h654
-rw-r--r--include/net/cfg80211.h298
-rw-r--r--include/net/cfg802154.h161
-rw-r--r--include/net/checksum.h20
-rw-r--r--include/net/codel.h2
-rw-r--r--include/net/compat.h5
-rw-r--r--include/net/dsa.h129
-rw-r--r--include/net/dst_ops.h2
-rw-r--r--include/net/flow_keys.h16
-rw-r--r--include/net/fou.h19
-rw-r--r--include/net/gen_stats.h15
-rw-r--r--include/net/genetlink.h9
-rw-r--r--include/net/geneve.h97
-rw-r--r--include/net/gue.h116
-rw-r--r--include/net/ieee802154_netdev.h18
-rw-r--r--include/net/if_inet6.h2
-rw-r--r--include/net/inet6_hashtables.h15
-rw-r--r--include/net/inet_common.h2
-rw-r--r--include/net/inet_connection_sock.h9
-rw-r--r--include/net/inet_frag.h2
-rw-r--r--include/net/inetpeer.h1
-rw-r--r--include/net/ip.h29
-rw-r--r--include/net/ip6_checksum.h8
-rw-r--r--include/net/ip6_fib.h20
-rw-r--r--include/net/ip6_tunnel.h3
-rw-r--r--include/net/ip_fib.h9
-rw-r--r--include/net/ip_tunnels.h54
-rw-r--r--include/net/ip_vs.h223
-rw-r--r--include/net/ipv6.h6
-rw-r--r--include/net/ipx.h5
-rw-r--r--include/net/irda/irda.h15
-rw-r--r--include/net/irda/irlap.h2
-rw-r--r--include/net/irda/parameters.h6
-rw-r--r--include/net/lib80211.h5
-rw-r--r--include/net/llc_c_st.h4
-rw-r--r--include/net/llc_s_st.h2
-rw-r--r--include/net/mac80211.h351
-rw-r--r--include/net/mac802154.h158
-rw-r--r--include/net/mld.h5
-rw-r--r--include/net/mpls.h39
-rw-r--r--include/net/neighbour.h17
-rw-r--r--include/net/net_namespace.h3
-rw-r--r--include/net/netdma.h32
-rw-r--r--include/net/netfilter/br_netfilter.h6
-rw-r--r--include/net/netfilter/ipv4/nf_nat_masquerade.h14
-rw-r--r--include/net/netfilter/ipv4/nf_reject.h125
-rw-r--r--include/net/netfilter/ipv6/nf_nat_masquerade.h10
-rw-r--r--include/net/netfilter/ipv6/nf_reject.h167
-rw-r--r--include/net/netfilter/nf_conntrack.h17
-rw-r--r--include/net/netfilter/nf_conntrack_core.h2
-rw-r--r--include/net/netfilter/nf_conntrack_l3proto.h4
-rw-r--r--include/net/netfilter/nf_conntrack_l4proto.h6
-rw-r--r--include/net/netfilter/nf_nat.h10
-rw-r--r--include/net/netfilter/nf_nat_l3proto.h75
-rw-r--r--include/net/netfilter/nf_nat_redirect.h12
-rw-r--r--include/net/netfilter/nf_tables.h7
-rw-r--r--include/net/netfilter/nf_tables_bridge.h7
-rw-r--r--include/net/netfilter/nft_masq.h19
-rw-r--r--include/net/netfilter/nft_redir.h21
-rw-r--r--include/net/netfilter/nft_reject.h9
-rw-r--r--include/net/netlink.h12
-rw-r--r--include/net/netns/ipv4.h1
-rw-r--r--include/net/netns/ipv6.h2
-rw-r--r--include/net/netns/xfrm.h18
-rw-r--r--include/net/nfc/digital.h13
-rw-r--r--include/net/nfc/hci.h4
-rw-r--r--include/net/nfc/nci.h51
-rw-r--r--include/net/nfc/nci_core.h18
-rw-r--r--include/net/nfc/nfc.h2
-rw-r--r--include/net/nl802154.h226
-rw-r--r--include/net/ping.h2
-rw-r--r--include/net/pkt_cls.h18
-rw-r--r--include/net/pkt_sched.h8
-rw-r--r--include/net/regulatory.h12
-rw-r--r--include/net/sch_generic.h120
-rw-r--r--include/net/sctp/command.h2
-rw-r--r--include/net/sctp/sctp.h5
-rw-r--r--include/net/sctp/sm.h8
-rw-r--r--include/net/sctp/structs.h7
-rw-r--r--include/net/snmp.h14
-rw-r--r--include/net/sock.h108
-rw-r--r--include/net/switchdev.h37
-rw-r--r--include/net/tc_act/tc_vlan.h27
-rw-r--r--include/net/tcp.h147
-rw-r--r--include/net/udp.h21
-rw-r--r--include/net/udp_tunnel.h94
-rw-r--r--include/net/udplite.h10
-rw-r--r--include/net/vxlan.h38
-rw-r--r--include/net/wpan-phy.h105
-rw-r--r--include/net/xfrm.h1
-rw-r--r--include/ras/ras_event.h48
-rw-r--r--include/rdma/ib_umem.h34
-rw-r--r--include/rdma/ib_umem_odp.h160
-rw-r--r--include/rdma/ib_verbs.h86
-rw-r--r--include/rxrpc/types.h41
-rw-r--r--include/scsi/libfc.h2
-rw-r--r--include/scsi/libiscsi.h2
-rw-r--r--include/scsi/libsas.h29
-rw-r--r--include/scsi/osd_initiator.h2
-rw-r--r--include/scsi/osd_ore.h2
-rw-r--r--include/scsi/osd_protocol.h4
-rw-r--r--include/scsi/osd_sec.h2
-rw-r--r--include/scsi/osd_sense.h2
-rw-r--r--include/scsi/osd_types.h2
-rw-r--r--include/scsi/scsi.h13
-rw-r--r--include/scsi/scsi_cmnd.h42
-rw-r--r--include/scsi/scsi_dbg.h28
-rw-r--r--include/scsi/scsi_device.h26
-rw-r--r--include/scsi/scsi_driver.h1
-rw-r--r--include/scsi/scsi_eh.h24
-rw-r--r--include/scsi/scsi_host.h60
-rw-r--r--include/scsi/scsi_ioctl.h4
-rw-r--r--include/scsi/scsi_tcq.h119
-rw-r--r--include/scsi/scsi_transport_spi.h1
-rw-r--r--include/scsi/sg.h5
-rw-r--r--include/soc/at91/at91rm9200_sdramc.h63
-rw-r--r--include/soc/at91/at91sam9_ddrsdr.h124
-rw-r--r--include/soc/at91/at91sam9_sdramc.h85
-rw-r--r--include/soc/tegra/mc.h107
-rw-r--r--include/sound/atmel-abdac.h2
-rw-r--r--include/sound/atmel-ac97c.h2
-rw-r--r--include/sound/compress_driver.h5
-rw-r--r--include/sound/jack.h26
-rw-r--r--include/sound/omap-hdmi-audio.h43
-rw-r--r--include/sound/pcm.h306
-rw-r--r--include/sound/rcar_snd.h8
-rw-r--r--include/sound/rt5645.h7
-rw-r--r--include/sound/rt5677.h23
-rw-r--r--include/sound/seq_kernel.h4
-rw-r--r--include/sound/soc-dai.h7
-rw-r--r--include/sound/soc-dapm.h14
-rw-r--r--include/sound/soc-dpcm.h2
-rw-r--r--include/sound/soc.h213
-rw-r--r--include/sound/uda134x.h12
-rw-r--r--include/sound/vx_core.h7
-rw-r--r--include/target/target_core_backend.h43
-rw-r--r--include/target/target_core_backend_configfs.h120
-rw-r--r--include/target/target_core_base.h23
-rw-r--r--include/trace/events/asoc.h31
-rw-r--r--include/trace/events/btrfs.h85
-rw-r--r--include/trace/events/ext4.h62
-rw-r--r--include/trace/events/f2fs.h16
-rw-r--r--include/trace/events/filelock.h14
-rw-r--r--include/trace/events/host1x.h27
-rw-r--r--include/trace/events/kvm.h36
-rw-r--r--include/trace/events/module.h2
-rw-r--r--include/trace/events/rcu.h25
-rw-r--r--include/trace/events/sched.h9
-rw-r--r--include/trace/events/scsi.h2
-rw-r--r--include/trace/events/sunrpc.h268
-rw-r--r--include/trace/events/target.h10
-rw-r--r--include/trace/events/thermal.h83
-rw-r--r--include/trace/ftrace.h8
-rw-r--r--include/uapi/Kbuild1
-rw-r--r--include/uapi/asm-generic/siginfo.h9
-rw-r--r--include/uapi/asm-generic/socket.h5
-rw-r--r--include/uapi/asm-generic/unistd.h6
-rw-r--r--include/uapi/drm/drm_mode.h2
-rw-r--r--include/uapi/drm/exynos_drm.h40
-rw-r--r--include/uapi/drm/i915_drm.h7
-rw-r--r--include/uapi/drm/radeon_drm.h23
-rw-r--r--include/uapi/drm/vmwgfx_drm.h2
-rw-r--r--include/uapi/linux/Kbuild101
-rw-r--r--include/uapi/linux/android/Kbuild2
-rw-r--r--include/uapi/linux/android/binder.h352
-rw-r--r--include/uapi/linux/audit.h33
-rw-r--r--include/uapi/linux/bpf.h168
-rw-r--r--include/uapi/linux/bpf_common.h55
-rw-r--r--include/uapi/linux/btrfs.h1
-rw-r--r--include/uapi/linux/can/error.h1
-rw-r--r--include/uapi/linux/dlmconstants.h2
-rw-r--r--include/uapi/linux/dm-ioctl.h9
-rw-r--r--include/uapi/linux/elf-em.h2
-rw-r--r--include/uapi/linux/elf.h3
-rw-r--r--include/uapi/linux/ethtool.h57
-rw-r--r--include/uapi/linux/filter.h56
-rw-r--r--include/uapi/linux/fou.h39
-rw-r--r--include/uapi/linux/fs.h1
-rw-r--r--include/uapi/linux/genwqe/genwqe_card.h2
-rw-r--r--include/uapi/linux/hyperv.h3
-rw-r--r--include/uapi/linux/if_alg.h2
-rw-r--r--include/uapi/linux/if_bridge.h2
-rw-r--r--include/uapi/linux/if_ether.h1
-rw-r--r--include/uapi/linux/if_link.h42
-rw-r--r--include/uapi/linux/if_tun.h18
-rw-r--r--include/uapi/linux/if_tunnel.h18
-rw-r--r--include/uapi/linux/in6.h3
-rw-r--r--include/uapi/linux/inet_diag.h13
-rw-r--r--include/uapi/linux/input.h7
-rw-r--r--include/uapi/linux/ip_vs.h3
-rw-r--r--include/uapi/linux/ipv6.h1
-rw-r--r--include/uapi/linux/kcmp.h (renamed from include/linux/kcmp.h)6
-rw-r--r--include/uapi/linux/kernel-page-flags.h1
-rw-r--r--include/uapi/linux/kfd_ioctl.h154
-rw-r--r--include/uapi/linux/kvm.h39
-rw-r--r--include/uapi/linux/libc-compat.h3
-rw-r--r--include/uapi/linux/magic.h1
-rw-r--r--include/uapi/linux/media-bus-format.h125
-rw-r--r--include/uapi/linux/msg.h28
-rw-r--r--include/uapi/linux/neighbour.h6
-rw-r--r--include/uapi/linux/net_tstamp.h3
-rw-r--r--include/uapi/linux/netfilter/ipset/ip_set.h20
-rw-r--r--include/uapi/linux/netfilter/nf_nat.h5
-rw-r--r--include/uapi/linux/netfilter/nf_tables.h77
-rw-r--r--include/uapi/linux/netfilter/nfnetlink_acct.h8
-rw-r--r--include/uapi/linux/netfilter/xt_set.h19
-rw-r--r--include/uapi/linux/netfilter_arp/arpt_mangle.h2
-rw-r--r--include/uapi/linux/nfc.h23
-rw-r--r--include/uapi/linux/nfsd/debug.h2
-rw-r--r--include/uapi/linux/nfsd/export.h5
-rw-r--r--include/uapi/linux/nl80211.h263
-rw-r--r--include/uapi/linux/nvme.h46
-rw-r--r--include/uapi/linux/openvswitch.h84
-rw-r--r--include/uapi/linux/pci_regs.h3
-rw-r--r--include/uapi/linux/perf_event.h29
-rw-r--r--include/uapi/linux/prctl.h33
-rw-r--r--include/uapi/linux/raid/md_u.h1
-rw-r--r--include/uapi/linux/rtnetlink.h1
-rw-r--r--include/uapi/linux/sched.h6
-rw-r--r--include/uapi/linux/sem.h18
-rw-r--r--include/uapi/linux/serial_core.h6
-rw-r--r--include/uapi/linux/serial_reg.h1
-rw-r--r--include/uapi/linux/smiapp.h29
-rw-r--r--include/uapi/linux/snmp.h5
-rw-r--r--include/uapi/linux/sysctl.h1
-rw-r--r--include/uapi/linux/target_core_user.h138
-rw-r--r--include/uapi/linux/tc_act/Kbuild1
-rw-r--r--include/uapi/linux/tc_act/tc_vlan.h35
-rw-r--r--include/uapi/linux/thermal.h35
-rw-r--r--include/uapi/linux/tipc_netlink.h244
-rw-r--r--include/uapi/linux/tty_flags.h19
-rw-r--r--include/uapi/linux/uhid.h120
-rw-r--r--include/uapi/linux/usb/functionfs.h19
-rw-r--r--include/uapi/linux/v4l2-common.h2
-rw-r--r--include/uapi/linux/v4l2-controls.h6
-rw-r--r--include/uapi/linux/v4l2-mediabus.h219
-rw-r--r--include/uapi/linux/v4l2-subdev.h6
-rw-r--r--include/uapi/linux/vfio.h3
-rw-r--r--include/uapi/linux/videodev2.h114
-rw-r--r--include/uapi/linux/virtio_balloon.h1
-rw-r--r--include/uapi/linux/virtio_blk.h15
-rw-r--r--include/uapi/linux/virtio_config.h9
-rw-r--r--include/uapi/linux/virtio_console.h7
-rw-r--r--include/uapi/linux/virtio_net.h15
-rw-r--r--include/uapi/linux/virtio_pci.h15
-rw-r--r--include/uapi/linux/virtio_ring.h45
-rw-r--r--include/uapi/linux/virtio_scsi.h (renamed from include/linux/virtio_scsi.h)106
-rw-r--r--include/uapi/linux/virtio_types.h46
-rw-r--r--include/uapi/linux/vt.h3
-rw-r--r--include/uapi/linux/wil6210_uapi.h87
-rw-r--r--include/uapi/linux/xfrm.h7
-rw-r--r--include/uapi/misc/Kbuild2
-rw-r--r--include/uapi/misc/cxl.h88
-rw-r--r--include/uapi/rdma/ib_user_verbs.h29
-rw-r--r--include/uapi/sound/asound.h8
-rw-r--r--include/uapi/sound/compress_offload.h17
-rw-r--r--include/uapi/sound/firewire.h3
-rw-r--r--include/uapi/sound/hdspm.h12
-rw-r--r--include/video/imx-ipu-v3.h326
-rw-r--r--include/video/of_display_timing.h16
-rw-r--r--include/video/omapdss.h45
-rw-r--r--include/xen/events.h2
-rw-r--r--include/xen/interface/elfnote.h48
-rw-r--r--include/xen/interface/features.h3
-rw-r--r--include/xen/interface/grant_table.h19
-rw-r--r--include/xen/interface/io/vscsiif.h229
-rw-r--r--include/xen/interface/xen.h272
-rw-r--r--include/xen/xenbus.h21
865 files changed, 28196 insertions, 9096 deletions
diff --git a/include/acpi/acbuffer.h b/include/acpi/acbuffer.h
index 88cb477524a6..d5ec6c87810f 100644
--- a/include/acpi/acbuffer.h
+++ b/include/acpi/acbuffer.h
@@ -111,7 +111,9 @@ struct acpi_gtm_info {
struct acpi_pld_info {
u8 revision;
u8 ignore_color;
- u32 color;
+ u8 red;
+ u8 green;
+ u8 blue;
u16 width;
u16 height;
u8 user_visible;
@@ -155,8 +157,14 @@ struct acpi_pld_info {
#define ACPI_PLD_GET_IGNORE_COLOR(dword) ACPI_GET_BITS (dword, 7, ACPI_1BIT_MASK)
#define ACPI_PLD_SET_IGNORE_COLOR(dword,value) ACPI_SET_BITS (dword, 7, ACPI_1BIT_MASK, value) /* Offset 7, Len 1 */
-#define ACPI_PLD_GET_COLOR(dword) ACPI_GET_BITS (dword, 8, ACPI_24BIT_MASK)
-#define ACPI_PLD_SET_COLOR(dword,value) ACPI_SET_BITS (dword, 8, ACPI_24BIT_MASK, value) /* Offset 8, Len 24 */
+#define ACPI_PLD_GET_RED(dword) ACPI_GET_BITS (dword, 8, ACPI_8BIT_MASK)
+#define ACPI_PLD_SET_RED(dword,value) ACPI_SET_BITS (dword, 8, ACPI_8BIT_MASK, value) /* Offset 8, Len 8 */
+
+#define ACPI_PLD_GET_GREEN(dword) ACPI_GET_BITS (dword, 16, ACPI_8BIT_MASK)
+#define ACPI_PLD_SET_GREEN(dword,value) ACPI_SET_BITS (dword, 16, ACPI_8BIT_MASK, value) /* Offset 16, Len 8 */
+
+#define ACPI_PLD_GET_BLUE(dword) ACPI_GET_BITS (dword, 24, ACPI_8BIT_MASK)
+#define ACPI_PLD_SET_BLUE(dword,value) ACPI_SET_BITS (dword, 24, ACPI_8BIT_MASK, value) /* Offset 24, Len 8 */
/* Second 32-bit dword, bits 33:63 */
diff --git a/include/acpi/acnames.h b/include/acpi/acnames.h
index c728113374f5..7461327e14e4 100644
--- a/include/acpi/acnames.h
+++ b/include/acpi/acnames.h
@@ -52,6 +52,7 @@
#define METHOD_NAME__CBA "_CBA"
#define METHOD_NAME__CID "_CID"
#define METHOD_NAME__CRS "_CRS"
+#define METHOD_NAME__DDN "_DDN"
#define METHOD_NAME__HID "_HID"
#define METHOD_NAME__INI "_INI"
#define METHOD_NAME__PLD "_PLD"
@@ -59,6 +60,10 @@
#define METHOD_NAME__PRS "_PRS"
#define METHOD_NAME__PRT "_PRT"
#define METHOD_NAME__PRW "_PRW"
+#define METHOD_NAME__PS0 "_PS0"
+#define METHOD_NAME__PS1 "_PS1"
+#define METHOD_NAME__PS2 "_PS2"
+#define METHOD_NAME__PS3 "_PS3"
#define METHOD_NAME__REG "_REG"
#define METHOD_NAME__SB_ "_SB_"
#define METHOD_NAME__SEG "_SEG"
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index 57ee0528aacb..61e32ec1fc4d 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -27,6 +27,7 @@
#define __ACPI_BUS_H__
#include <linux/device.h>
+#include <linux/property.h>
/* TBD: Make dynamic */
#define ACPI_MAX_HANDLES 10
@@ -312,6 +313,7 @@ struct acpi_device_wakeup_flags {
u8 valid:1; /* Can successfully enable wakeup? */
u8 run_wake:1; /* Run-Wake GPE devices */
u8 notifier_present:1; /* Wake-up notify handler has been installed */
+ u8 enabled:1; /* Enabled for wakeup */
};
struct acpi_device_wakeup_context {
@@ -337,10 +339,20 @@ struct acpi_device_physical_node {
bool put_online:1;
};
+/* ACPI Device Specific Data (_DSD) */
+struct acpi_device_data {
+ const union acpi_object *pointer;
+ const union acpi_object *properties;
+ const union acpi_object *of_compatible;
+};
+
+struct acpi_gpio_mapping;
+
/* Device */
struct acpi_device {
int device_type;
acpi_handle handle; /* no handle for fixed hardware */
+ struct fwnode_handle fwnode;
struct acpi_device *parent;
struct list_head children;
struct list_head node;
@@ -353,17 +365,35 @@ struct acpi_device {
struct acpi_device_wakeup wakeup;
struct acpi_device_perf performance;
struct acpi_device_dir dir;
+ struct acpi_device_data data;
struct acpi_scan_handler *handler;
struct acpi_hotplug_context *hp;
struct acpi_driver *driver;
+ const struct acpi_gpio_mapping *driver_gpios;
void *driver_data;
struct device dev;
unsigned int physical_node_count;
+ unsigned int dep_unmet;
struct list_head physical_node_list;
struct mutex physical_node_lock;
void (*remove)(struct acpi_device *);
};
+static inline bool is_acpi_node(struct fwnode_handle *fwnode)
+{
+ return fwnode && fwnode->type == FWNODE_ACPI;
+}
+
+static inline struct acpi_device *acpi_node(struct fwnode_handle *fwnode)
+{
+ return fwnode ? container_of(fwnode, struct acpi_device, fwnode) : NULL;
+}
+
+static inline struct fwnode_handle *acpi_fwnode_handle(struct acpi_device *adev)
+{
+ return &adev->fwnode;
+}
+
static inline void *acpi_driver_data(struct acpi_device *d)
{
return d->driver_data;
@@ -433,6 +463,7 @@ int acpi_device_set_power(struct acpi_device *device, int state);
int acpi_bus_init_power(struct acpi_device *device);
int acpi_device_fix_up_power(struct acpi_device *device);
int acpi_bus_update_power(acpi_handle handle, int *state_p);
+int acpi_device_update_power(struct acpi_device *device, int *state_p);
bool acpi_bus_power_manageable(acpi_handle handle);
#ifdef CONFIG_PM
@@ -515,6 +546,7 @@ acpi_status acpi_add_pm_notifier(struct acpi_device *adev, struct device *dev,
void (*work_func)(struct work_struct *work));
acpi_status acpi_remove_pm_notifier(struct acpi_device *adev);
int acpi_pm_device_sleep_state(struct device *, int *, int);
+int acpi_pm_device_run_wake(struct device *, bool);
#else
static inline acpi_status acpi_add_pm_notifier(struct acpi_device *adev,
struct device *dev,
@@ -534,11 +566,6 @@ static inline int acpi_pm_device_sleep_state(struct device *d, int *p, int m)
return (m >= ACPI_STATE_D0 && m <= ACPI_STATE_D3_COLD) ?
m : ACPI_STATE_D0;
}
-#endif
-
-#ifdef CONFIG_PM_RUNTIME
-int acpi_pm_device_run_wake(struct device *, bool);
-#else
static inline int acpi_pm_device_run_wake(struct device *dev, bool enable)
{
return -ENODEV;
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index b7c89d47efbe..5ba78464c1b1 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -46,7 +46,7 @@
/* Current ACPICA subsystem version in YYYYMMDD format */
-#define ACPI_CA_VERSION 0x20140724
+#define ACPI_CA_VERSION 0x20141107
#include <acpi/acconfig.h>
#include <acpi/actypes.h>
@@ -692,6 +692,7 @@ ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
*event_status))
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_disable_all_gpes(void))
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_enable_all_runtime_gpes(void))
+ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_enable_all_wakeup_gpes(void))
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
acpi_get_gpe_device(u32 gpe_index,
diff --git a/include/acpi/actbl1.h b/include/acpi/actbl1.h
index 7626bfeac2cb..29e79370641d 100644
--- a/include/acpi/actbl1.h
+++ b/include/acpi/actbl1.h
@@ -952,7 +952,8 @@ enum acpi_srat_type {
ACPI_SRAT_TYPE_CPU_AFFINITY = 0,
ACPI_SRAT_TYPE_MEMORY_AFFINITY = 1,
ACPI_SRAT_TYPE_X2APIC_CPU_AFFINITY = 2,
- ACPI_SRAT_TYPE_RESERVED = 3 /* 3 and greater are reserved */
+ ACPI_SRAT_TYPE_GICC_AFFINITY = 3,
+ ACPI_SRAT_TYPE_RESERVED = 4 /* 4 and greater are reserved */
};
/*
@@ -968,7 +969,7 @@ struct acpi_srat_cpu_affinity {
u32 flags;
u8 local_sapic_eid;
u8 proximity_domain_hi[3];
- u32 reserved; /* Reserved, must be zero */
+ u32 clock_domain;
};
/* Flags */
@@ -1010,6 +1011,20 @@ struct acpi_srat_x2apic_cpu_affinity {
#define ACPI_SRAT_CPU_ENABLED (1) /* 00: Use affinity structure */
+/* 3: GICC Affinity (ACPI 5.1) */
+
+struct acpi_srat_gicc_affinity {
+ struct acpi_subtable_header header;
+ u32 proximity_domain;
+ u32 acpi_processor_uid;
+ u32 flags;
+ u32 clock_domain;
+};
+
+/* Flags for struct acpi_srat_gicc_affinity */
+
+#define ACPI_SRAT_GICC_ENABLED (1) /* 00: Use affinity structure */
+
/* Reset to default packing */
#pragma pack()
diff --git a/include/acpi/actbl3.h b/include/acpi/actbl3.h
index 787bcc814463..5480cb2236bf 100644
--- a/include/acpi/actbl3.h
+++ b/include/acpi/actbl3.h
@@ -310,10 +310,15 @@ struct acpi_gtdt_timer_entry {
u32 common_flags;
};
+/* Flag Definitions: timer_flags and virtual_timer_flags above */
+
+#define ACPI_GTDT_GT_IRQ_MODE (1)
+#define ACPI_GTDT_GT_IRQ_POLARITY (1<<1)
+
/* Flag Definitions: common_flags above */
-#define ACPI_GTDT_GT_IS_SECURE_TIMER (1)
-#define ACPI_GTDT_GT_ALWAYS_ON (1<<1)
+#define ACPI_GTDT_GT_IS_SECURE_TIMER (1)
+#define ACPI_GTDT_GT_ALWAYS_ON (1<<1)
/* 1: SBSA Generic Watchdog Structure */
diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
index ac03ec81d342..bbef17368e49 100644
--- a/include/acpi/actypes.h
+++ b/include/acpi/actypes.h
@@ -721,7 +721,7 @@ typedef u32 acpi_event_type;
* | | | +--- Enabled for wake?
* | | +----- Set?
* | +------- Has a handler?
- * +----------- <Reserved>
+ * +------------- <Reserved>
*/
typedef u32 acpi_event_status;
@@ -729,13 +729,17 @@ typedef u32 acpi_event_status;
#define ACPI_EVENT_FLAG_ENABLED (acpi_event_status) 0x01
#define ACPI_EVENT_FLAG_WAKE_ENABLED (acpi_event_status) 0x02
#define ACPI_EVENT_FLAG_SET (acpi_event_status) 0x04
-#define ACPI_EVENT_FLAG_HANDLE (acpi_event_status) 0x08
+#define ACPI_EVENT_FLAG_HAS_HANDLER (acpi_event_status) 0x08
/* Actions for acpi_set_gpe, acpi_gpe_wakeup, acpi_hw_low_set_gpe */
#define ACPI_GPE_ENABLE 0
#define ACPI_GPE_DISABLE 1
#define ACPI_GPE_CONDITIONAL_ENABLE 2
+#define ACPI_GPE_SAVE_MASK 4
+
+#define ACPI_GPE_ENABLE_SAVE (ACPI_GPE_ENABLE | ACPI_GPE_SAVE_MASK)
+#define ACPI_GPE_DISABLE_SAVE (ACPI_GPE_DISABLE | ACPI_GPE_SAVE_MASK)
/*
* GPE info flags - Per GPE
diff --git a/include/acpi/processor.h b/include/acpi/processor.h
index 9b9b6f29bbf3..3ca9b751f122 100644
--- a/include/acpi/processor.h
+++ b/include/acpi/processor.h
@@ -67,9 +67,6 @@ struct acpi_processor_cx {
};
struct acpi_processor_power {
- struct acpi_processor_cx *state;
- unsigned long bm_check_timestamp;
- u32 default_state;
int count;
struct acpi_processor_cx states[ACPI_PROCESSOR_MAX_POWER];
int timer_broadcast_on_state;
@@ -313,11 +310,13 @@ static inline int acpi_processor_get_bios_limit(int cpu, unsigned int *limit)
#endif /* CONFIG_CPU_FREQ */
/* in processor_core.c */
-void acpi_processor_set_pdc(acpi_handle handle);
int acpi_get_apicid(acpi_handle, int type, u32 acpi_id);
int acpi_map_cpuid(int apic_id, u32 acpi_id);
int acpi_get_cpuid(acpi_handle, int type, u32 acpi_id);
+/* in processor_pdc.c */
+void acpi_processor_set_pdc(acpi_handle handle);
+
/* in processor_throttling.c */
int acpi_processor_tstate_has_changed(struct acpi_processor *pr);
int acpi_processor_get_throttling_info(struct acpi_processor *pr);
diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h
index 9c79e7603459..1973ad2b13f4 100644
--- a/include/asm-generic/atomic.h
+++ b/include/asm-generic/atomic.h
@@ -18,14 +18,100 @@
#include <asm/cmpxchg.h>
#include <asm/barrier.h>
+/*
+ * atomic_$op() - $op integer to atomic variable
+ * @i: integer value to $op
+ * @v: pointer to the atomic variable
+ *
+ * Atomically $ops @i to @v. Does not strictly guarantee a memory-barrier, use
+ * smp_mb__{before,after}_atomic().
+ */
+
+/*
+ * atomic_$op_return() - $op interer to atomic variable and returns the result
+ * @i: integer value to $op
+ * @v: pointer to the atomic variable
+ *
+ * Atomically $ops @i to @v. Does imply a full memory barrier.
+ */
+
#ifdef CONFIG_SMP
-/* Force people to define core atomics */
-# if !defined(atomic_add_return) || !defined(atomic_sub_return) || \
- !defined(atomic_clear_mask) || !defined(atomic_set_mask)
-# error "SMP requires a little arch-specific magic"
-# endif
+
+/* we can build all atomic primitives from cmpxchg */
+
+#define ATOMIC_OP(op, c_op) \
+static inline void atomic_##op(int i, atomic_t *v) \
+{ \
+ int c, old; \
+ \
+ c = v->counter; \
+ while ((old = cmpxchg(&v->counter, c, c c_op i)) != c) \
+ c = old; \
+}
+
+#define ATOMIC_OP_RETURN(op, c_op) \
+static inline int atomic_##op##_return(int i, atomic_t *v) \
+{ \
+ int c, old; \
+ \
+ c = v->counter; \
+ while ((old = cmpxchg(&v->counter, c, c c_op i)) != c) \
+ c = old; \
+ \
+ return c c_op i; \
+}
+
+#else
+
+#include <linux/irqflags.h>
+
+#define ATOMIC_OP(op, c_op) \
+static inline void atomic_##op(int i, atomic_t *v) \
+{ \
+ unsigned long flags; \
+ \
+ raw_local_irq_save(flags); \
+ v->counter = v->counter c_op i; \
+ raw_local_irq_restore(flags); \
+}
+
+#define ATOMIC_OP_RETURN(op, c_op) \
+static inline int atomic_##op##_return(int i, atomic_t *v) \
+{ \
+ unsigned long flags; \
+ int ret; \
+ \
+ raw_local_irq_save(flags); \
+ ret = (v->counter = v->counter c_op i); \
+ raw_local_irq_restore(flags); \
+ \
+ return ret; \
+}
+
+#endif /* CONFIG_SMP */
+
+#ifndef atomic_add_return
+ATOMIC_OP_RETURN(add, +)
+#endif
+
+#ifndef atomic_sub_return
+ATOMIC_OP_RETURN(sub, -)
+#endif
+
+#ifndef atomic_clear_mask
+ATOMIC_OP(and, &)
+#define atomic_clear_mask(i, v) atomic_and(~(i), (v))
#endif
+#ifndef atomic_set_mask
+#define CONFIG_ARCH_HAS_ATOMIC_OR
+ATOMIC_OP(or, |)
+#define atomic_set_mask(i, v) atomic_or((i), (v))
+#endif
+
+#undef ATOMIC_OP_RETURN
+#undef ATOMIC_OP
+
/*
* Atomic operations that C can't guarantee us. Useful for
* resource counting etc..
@@ -33,8 +119,6 @@
#define ATOMIC_INIT(i) { (i) }
-#ifdef __KERNEL__
-
/**
* atomic_read - read atomic variable
* @v: pointer of type atomic_t
@@ -42,7 +126,7 @@
* Atomically reads the value of @v.
*/
#ifndef atomic_read
-#define atomic_read(v) (*(volatile int *)&(v)->counter)
+#define atomic_read(v) ACCESS_ONCE((v)->counter)
#endif
/**
@@ -56,52 +140,6 @@
#include <linux/irqflags.h>
-/**
- * atomic_add_return - add integer to atomic variable
- * @i: integer value to add
- * @v: pointer of type atomic_t
- *
- * Atomically adds @i to @v and returns the result
- */
-#ifndef atomic_add_return
-static inline int atomic_add_return(int i, atomic_t *v)
-{
- unsigned long flags;
- int temp;
-
- raw_local_irq_save(flags); /* Don't trace it in an irqsoff handler */
- temp = v->counter;
- temp += i;
- v->counter = temp;
- raw_local_irq_restore(flags);
-
- return temp;
-}
-#endif
-
-/**
- * atomic_sub_return - subtract integer from atomic variable
- * @i: integer value to subtract
- * @v: pointer of type atomic_t
- *
- * Atomically subtracts @i from @v and returns the result
- */
-#ifndef atomic_sub_return
-static inline int atomic_sub_return(int i, atomic_t *v)
-{
- unsigned long flags;
- int temp;
-
- raw_local_irq_save(flags); /* Don't trace it in an irqsoff handler */
- temp = v->counter;
- temp -= i;
- v->counter = temp;
- raw_local_irq_restore(flags);
-
- return temp;
-}
-#endif
-
static inline int atomic_add_negative(int i, atomic_t *v)
{
return atomic_add_return(i, v) < 0;
@@ -139,49 +177,11 @@ static inline void atomic_dec(atomic_t *v)
static inline int __atomic_add_unless(atomic_t *v, int a, int u)
{
- int c, old;
- c = atomic_read(v);
- while (c != u && (old = atomic_cmpxchg(v, c, c + a)) != c)
- c = old;
- return c;
-}
-
-/**
- * atomic_clear_mask - Atomically clear bits in atomic variable
- * @mask: Mask of the bits to be cleared
- * @v: pointer of type atomic_t
- *
- * Atomically clears the bits set in @mask from @v
- */
-#ifndef atomic_clear_mask
-static inline void atomic_clear_mask(unsigned long mask, atomic_t *v)
-{
- unsigned long flags;
-
- mask = ~mask;
- raw_local_irq_save(flags); /* Don't trace it in a irqsoff handler */
- v->counter &= mask;
- raw_local_irq_restore(flags);
+ int c, old;
+ c = atomic_read(v);
+ while (c != u && (old = atomic_cmpxchg(v, c, c + a)) != c)
+ c = old;
+ return c;
}
-#endif
-
-/**
- * atomic_set_mask - Atomically set bits in atomic variable
- * @mask: Mask of the bits to be set
- * @v: pointer of type atomic_t
- *
- * Atomically sets the bits set in @mask in @v
- */
-#ifndef atomic_set_mask
-static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
-{
- unsigned long flags;
-
- raw_local_irq_save(flags); /* Don't trace it in a irqsoff handler */
- v->counter |= mask;
- raw_local_irq_restore(flags);
-}
-#endif
-#endif /* __KERNEL__ */
#endif /* __ASM_GENERIC_ATOMIC_H */
diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
index b18ce4f9ee3d..30ad9c86cebb 100644
--- a/include/asm-generic/atomic64.h
+++ b/include/asm-generic/atomic64.h
@@ -20,10 +20,22 @@ typedef struct {
extern long long atomic64_read(const atomic64_t *v);
extern void atomic64_set(atomic64_t *v, long long i);
-extern void atomic64_add(long long a, atomic64_t *v);
-extern long long atomic64_add_return(long long a, atomic64_t *v);
-extern void atomic64_sub(long long a, atomic64_t *v);
-extern long long atomic64_sub_return(long long a, atomic64_t *v);
+
+#define ATOMIC64_OP(op) \
+extern void atomic64_##op(long long a, atomic64_t *v);
+
+#define ATOMIC64_OP_RETURN(op) \
+extern long long atomic64_##op##_return(long long a, atomic64_t *v);
+
+#define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_OP_RETURN(op)
+
+ATOMIC64_OPS(add)
+ATOMIC64_OPS(sub)
+
+#undef ATOMIC64_OPS
+#undef ATOMIC64_OP_RETURN
+#undef ATOMIC64_OP
+
extern long long atomic64_dec_if_positive(atomic64_t *v);
extern long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n);
extern long long atomic64_xchg(atomic64_t *v, long long new);
diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h
index 1402fa855388..f5c40b0fadc2 100644
--- a/include/asm-generic/barrier.h
+++ b/include/asm-generic/barrier.h
@@ -42,6 +42,14 @@
#define wmb() mb()
#endif
+#ifndef dma_rmb
+#define dma_rmb() rmb()
+#endif
+
+#ifndef dma_wmb
+#define dma_wmb() wmb()
+#endif
+
#ifndef read_barrier_depends
#define read_barrier_depends() do { } while (0)
#endif
diff --git a/include/asm-generic/clkdev.h b/include/asm-generic/clkdev.h
index 90a32a61dd21..4ff334749ed5 100644
--- a/include/asm-generic/clkdev.h
+++ b/include/asm-generic/clkdev.h
@@ -15,10 +15,12 @@
#include <linux/slab.h>
+#ifndef CONFIG_COMMON_CLK
struct clk;
static inline int __clk_get(struct clk *clk) { return 1; }
static inline void __clk_put(struct clk *clk) { }
+#endif
static inline struct clk_lookup_alloc *__clkdev_alloc(size_t size)
{
diff --git a/include/asm-generic/cputime_jiffies.h b/include/asm-generic/cputime_jiffies.h
index d5cb78f53986..fe386fc6e85e 100644
--- a/include/asm-generic/cputime_jiffies.h
+++ b/include/asm-generic/cputime_jiffies.h
@@ -3,6 +3,8 @@
typedef unsigned long __nocast cputime_t;
+#define cmpxchg_cputime(ptr, old, new) cmpxchg(ptr, old, new)
+
#define cputime_one_jiffy jiffies_to_cputime(1)
#define cputime_to_jiffies(__ct) (__force unsigned long)(__ct)
#define cputime_to_scaled(__ct) (__ct)
diff --git a/include/asm-generic/cputime_nsecs.h b/include/asm-generic/cputime_nsecs.h
index 4e817606c549..0419485891f2 100644
--- a/include/asm-generic/cputime_nsecs.h
+++ b/include/asm-generic/cputime_nsecs.h
@@ -21,6 +21,8 @@
typedef u64 __nocast cputime_t;
typedef u64 __nocast cputime64_t;
+#define cmpxchg_cputime(ptr, old, new) cmpxchg64(ptr, old, new)
+
#define cputime_one_jiffy jiffies_to_cputime(1)
#define cputime_div(__ct, divisor) div_u64((__force u64)__ct, divisor)
diff --git a/include/asm-generic/dma-contiguous.h b/include/asm-generic/dma-contiguous.h
new file mode 100644
index 000000000000..292c571750f0
--- /dev/null
+++ b/include/asm-generic/dma-contiguous.h
@@ -0,0 +1,9 @@
+#ifndef _ASM_GENERIC_DMA_CONTIGUOUS_H
+#define _ASM_GENERIC_DMA_CONTIGUOUS_H
+
+#include <linux/types.h>
+
+static inline void
+dma_contiguous_early_fixup(phys_addr_t base, unsigned long size) { }
+
+#endif
diff --git a/include/asm-generic/dma-mapping-common.h b/include/asm-generic/dma-mapping-common.h
index de8bf89940f8..3378dcf4c31e 100644
--- a/include/asm-generic/dma-mapping-common.h
+++ b/include/asm-generic/dma-mapping-common.h
@@ -179,6 +179,15 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size);
+void *dma_common_contiguous_remap(struct page *page, size_t size,
+ unsigned long vm_flags,
+ pgprot_t prot, const void *caller);
+
+void *dma_common_pages_remap(struct page **pages, size_t size,
+ unsigned long vm_flags, pgprot_t prot,
+ const void *caller);
+void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags);
+
/**
* dma_mmap_attrs - map a coherent DMA allocation into user space
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
@@ -205,14 +214,6 @@ dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr,
#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, NULL)
-static inline int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma,
- void *cpu_addr, dma_addr_t dma_addr, size_t size)
-{
- DEFINE_DMA_ATTRS(attrs);
- dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
- return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, &attrs);
-}
-
int
dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
void *cpu_addr, dma_addr_t dma_addr, size_t size);
diff --git a/include/asm-generic/futex.h b/include/asm-generic/futex.h
index 01f227e14254..b59b5a52637e 100644
--- a/include/asm-generic/futex.h
+++ b/include/asm-generic/futex.h
@@ -5,6 +5,119 @@
#include <linux/uaccess.h>
#include <asm/errno.h>
+#ifndef CONFIG_SMP
+/*
+ * The following implementation only for uniprocessor machines.
+ * For UP, it's relies on the fact that pagefault_disable() also disables
+ * preemption to ensure mutual exclusion.
+ *
+ */
+
+/**
+ * futex_atomic_op_inuser() - Atomic arithmetic operation with constant
+ * argument and comparison of the previous
+ * futex value with another constant.
+ *
+ * @encoded_op: encoded operation to execute
+ * @uaddr: pointer to user space address
+ *
+ * Return:
+ * 0 - On success
+ * <0 - On error
+ */
+static inline int
+futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
+{
+ int op = (encoded_op >> 28) & 7;
+ int cmp = (encoded_op >> 24) & 15;
+ int oparg = (encoded_op << 8) >> 20;
+ int cmparg = (encoded_op << 20) >> 20;
+ int oldval, ret;
+ u32 tmp;
+
+ if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
+ oparg = 1 << oparg;
+
+ pagefault_disable();
+
+ ret = -EFAULT;
+ if (unlikely(get_user(oldval, uaddr) != 0))
+ goto out_pagefault_enable;
+
+ ret = 0;
+ tmp = oldval;
+
+ switch (op) {
+ case FUTEX_OP_SET:
+ tmp = oparg;
+ break;
+ case FUTEX_OP_ADD:
+ tmp += oparg;
+ break;
+ case FUTEX_OP_OR:
+ tmp |= oparg;
+ break;
+ case FUTEX_OP_ANDN:
+ tmp &= ~oparg;
+ break;
+ case FUTEX_OP_XOR:
+ tmp ^= oparg;
+ break;
+ default:
+ ret = -ENOSYS;
+ }
+
+ if (ret == 0 && unlikely(put_user(tmp, uaddr) != 0))
+ ret = -EFAULT;
+
+out_pagefault_enable:
+ pagefault_enable();
+
+ if (ret == 0) {
+ switch (cmp) {
+ case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
+ case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
+ case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
+ case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
+ case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
+ case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
+ default: ret = -ENOSYS;
+ }
+ }
+ return ret;
+}
+
+/**
+ * futex_atomic_cmpxchg_inatomic() - Compare and exchange the content of the
+ * uaddr with newval if the current value is
+ * oldval.
+ * @uval: pointer to store content of @uaddr
+ * @uaddr: pointer to user space address
+ * @oldval: old value
+ * @newval: new value to store to @uaddr
+ *
+ * Return:
+ * 0 - On success
+ * <0 - On error
+ */
+static inline int
+futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
+ u32 oldval, u32 newval)
+{
+ u32 val;
+
+ if (unlikely(get_user(val, uaddr) != 0))
+ return -EFAULT;
+
+ if (val == oldval && unlikely(put_user(newval, uaddr) != 0))
+ return -EFAULT;
+
+ *uval = val;
+
+ return 0;
+}
+
+#else
static inline int
futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
{
@@ -54,4 +167,5 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
return -ENOSYS;
}
+#endif /* CONFIG_SMP */
#endif
diff --git a/include/asm-generic/gpio.h b/include/asm-generic/gpio.h
index c1d4105e1c1d..383ade1a211b 100644
--- a/include/asm-generic/gpio.h
+++ b/include/asm-generic/gpio.h
@@ -27,7 +27,7 @@
*/
#ifndef ARCH_NR_GPIOS
-#define ARCH_NR_GPIOS 256
+#define ARCH_NR_GPIOS 512
#endif
/*
diff --git a/include/asm-generic/hash.h b/include/asm-generic/hash.h
deleted file mode 100644
index b6312843dbd9..000000000000
--- a/include/asm-generic/hash.h
+++ /dev/null
@@ -1,9 +0,0 @@
-#ifndef __ASM_GENERIC_HASH_H
-#define __ASM_GENERIC_HASH_H
-
-struct fast_hash_ops;
-static inline void setup_arch_fast_hash(struct fast_hash_ops *ops)
-{
-}
-
-#endif /* __ASM_GENERIC_HASH_H */
diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h
index 975e1cc75edb..9db042304df3 100644
--- a/include/asm-generic/io.h
+++ b/include/asm-generic/io.h
@@ -12,6 +12,7 @@
#define __ASM_GENERIC_IO_H
#include <asm/page.h> /* I/O is all done through memory accesses */
+#include <linux/string.h> /* for memset() and memcpy() */
#include <linux/types.h>
#ifdef CONFIG_GENERIC_IOMAP
@@ -24,260 +25,691 @@
#define mmiowb() do {} while (0)
#endif
-/*****************************************************************************/
/*
- * readX/writeX() are used to access memory mapped devices. On some
- * architectures the memory mapped IO stuff needs to be accessed
- * differently. On the simple architectures, we just read/write the
- * memory location directly.
+ * __raw_{read,write}{b,w,l,q}() access memory in native endianness.
+ *
+ * On some architectures memory mapped IO needs to be accessed differently.
+ * On the simple architectures, we just read/write the memory location
+ * directly.
*/
+
#ifndef __raw_readb
+#define __raw_readb __raw_readb
static inline u8 __raw_readb(const volatile void __iomem *addr)
{
- return *(const volatile u8 __force *) addr;
+ return *(const volatile u8 __force *)addr;
}
#endif
#ifndef __raw_readw
+#define __raw_readw __raw_readw
static inline u16 __raw_readw(const volatile void __iomem *addr)
{
- return *(const volatile u16 __force *) addr;
+ return *(const volatile u16 __force *)addr;
}
#endif
#ifndef __raw_readl
+#define __raw_readl __raw_readl
static inline u32 __raw_readl(const volatile void __iomem *addr)
{
- return *(const volatile u32 __force *) addr;
+ return *(const volatile u32 __force *)addr;
}
#endif
-#define readb __raw_readb
-
-#define readw readw
-static inline u16 readw(const volatile void __iomem *addr)
-{
- return __le16_to_cpu(__raw_readw(addr));
-}
-
-#define readl readl
-static inline u32 readl(const volatile void __iomem *addr)
+#ifdef CONFIG_64BIT
+#ifndef __raw_readq
+#define __raw_readq __raw_readq
+static inline u64 __raw_readq(const volatile void __iomem *addr)
{
- return __le32_to_cpu(__raw_readl(addr));
+ return *(const volatile u64 __force *)addr;
}
+#endif
+#endif /* CONFIG_64BIT */
#ifndef __raw_writeb
-static inline void __raw_writeb(u8 b, volatile void __iomem *addr)
+#define __raw_writeb __raw_writeb
+static inline void __raw_writeb(u8 value, volatile void __iomem *addr)
{
- *(volatile u8 __force *) addr = b;
+ *(volatile u8 __force *)addr = value;
}
#endif
#ifndef __raw_writew
-static inline void __raw_writew(u16 b, volatile void __iomem *addr)
+#define __raw_writew __raw_writew
+static inline void __raw_writew(u16 value, volatile void __iomem *addr)
{
- *(volatile u16 __force *) addr = b;
+ *(volatile u16 __force *)addr = value;
}
#endif
#ifndef __raw_writel
-static inline void __raw_writel(u32 b, volatile void __iomem *addr)
+#define __raw_writel __raw_writel
+static inline void __raw_writel(u32 value, volatile void __iomem *addr)
{
- *(volatile u32 __force *) addr = b;
+ *(volatile u32 __force *)addr = value;
}
#endif
-#define writeb __raw_writeb
-#define writew(b,addr) __raw_writew(__cpu_to_le16(b),addr)
-#define writel(b,addr) __raw_writel(__cpu_to_le32(b),addr)
-
#ifdef CONFIG_64BIT
-#ifndef __raw_readq
-static inline u64 __raw_readq(const volatile void __iomem *addr)
+#ifndef __raw_writeq
+#define __raw_writeq __raw_writeq
+static inline void __raw_writeq(u64 value, volatile void __iomem *addr)
{
- return *(const volatile u64 __force *) addr;
+ *(volatile u64 __force *)addr = value;
}
#endif
+#endif /* CONFIG_64BIT */
-#define readq readq
-static inline u64 readq(const volatile void __iomem *addr)
-{
- return __le64_to_cpu(__raw_readq(addr));
-}
+/*
+ * {read,write}{b,w,l,q}() access little endian memory and return result in
+ * native endianness.
+ */
-#ifndef __raw_writeq
-static inline void __raw_writeq(u64 b, volatile void __iomem *addr)
+#ifndef readb
+#define readb readb
+static inline u8 readb(const volatile void __iomem *addr)
{
- *(volatile u64 __force *) addr = b;
+ return __raw_readb(addr);
}
#endif
-#define writeq(b, addr) __raw_writeq(__cpu_to_le64(b), addr)
-#endif /* CONFIG_64BIT */
-
-#ifndef PCI_IOBASE
-#define PCI_IOBASE ((void __iomem *) 0)
+#ifndef readw
+#define readw readw
+static inline u16 readw(const volatile void __iomem *addr)
+{
+ return __le16_to_cpu(__raw_readw(addr));
+}
#endif
-/*****************************************************************************/
-/*
- * traditional input/output functions
- */
-
-static inline u8 inb(unsigned long addr)
+#ifndef readl
+#define readl readl
+static inline u32 readl(const volatile void __iomem *addr)
{
- return readb(addr + PCI_IOBASE);
+ return __le32_to_cpu(__raw_readl(addr));
}
+#endif
-static inline u16 inw(unsigned long addr)
+#ifdef CONFIG_64BIT
+#ifndef readq
+#define readq readq
+static inline u64 readq(const volatile void __iomem *addr)
{
- return readw(addr + PCI_IOBASE);
+ return __le64_to_cpu(__raw_readq(addr));
}
+#endif
+#endif /* CONFIG_64BIT */
-static inline u32 inl(unsigned long addr)
+#ifndef writeb
+#define writeb writeb
+static inline void writeb(u8 value, volatile void __iomem *addr)
{
- return readl(addr + PCI_IOBASE);
+ __raw_writeb(value, addr);
}
+#endif
-static inline void outb(u8 b, unsigned long addr)
+#ifndef writew
+#define writew writew
+static inline void writew(u16 value, volatile void __iomem *addr)
{
- writeb(b, addr + PCI_IOBASE);
+ __raw_writew(cpu_to_le16(value), addr);
}
+#endif
-static inline void outw(u16 b, unsigned long addr)
+#ifndef writel
+#define writel writel
+static inline void writel(u32 value, volatile void __iomem *addr)
{
- writew(b, addr + PCI_IOBASE);
+ __raw_writel(__cpu_to_le32(value), addr);
}
+#endif
-static inline void outl(u32 b, unsigned long addr)
+#ifdef CONFIG_64BIT
+#ifndef writeq
+#define writeq writeq
+static inline void writeq(u64 value, volatile void __iomem *addr)
{
- writel(b, addr + PCI_IOBASE);
+ __raw_writeq(__cpu_to_le64(value), addr);
}
+#endif
+#endif /* CONFIG_64BIT */
+
+/*
+ * {read,write}{b,w,l,q}_relaxed() are like the regular version, but
+ * are not guaranteed to provide ordering against spinlocks or memory
+ * accesses.
+ */
+#ifndef readb_relaxed
+#define readb_relaxed readb
+#endif
-#define inb_p(addr) inb(addr)
-#define inw_p(addr) inw(addr)
-#define inl_p(addr) inl(addr)
-#define outb_p(x, addr) outb((x), (addr))
-#define outw_p(x, addr) outw((x), (addr))
-#define outl_p(x, addr) outl((x), (addr))
+#ifndef readw_relaxed
+#define readw_relaxed readw
+#endif
-#ifndef insb
-static inline void insb(unsigned long addr, void *buffer, int count)
+#ifndef readl_relaxed
+#define readl_relaxed readl
+#endif
+
+#ifndef readq_relaxed
+#define readq_relaxed readq
+#endif
+
+#ifndef writeb_relaxed
+#define writeb_relaxed writeb
+#endif
+
+#ifndef writew_relaxed
+#define writew_relaxed writew
+#endif
+
+#ifndef writel_relaxed
+#define writel_relaxed writel
+#endif
+
+#ifndef writeq_relaxed
+#define writeq_relaxed writeq
+#endif
+
+/*
+ * {read,write}s{b,w,l,q}() repeatedly access the same memory address in
+ * native endianness in 8-, 16-, 32- or 64-bit chunks (@count times).
+ */
+#ifndef readsb
+#define readsb readsb
+static inline void readsb(const volatile void __iomem *addr, void *buffer,
+ unsigned int count)
{
if (count) {
u8 *buf = buffer;
+
do {
- u8 x = __raw_readb(addr + PCI_IOBASE);
+ u8 x = __raw_readb(addr);
*buf++ = x;
} while (--count);
}
}
#endif
-#ifndef insw
-static inline void insw(unsigned long addr, void *buffer, int count)
+#ifndef readsw
+#define readsw readsw
+static inline void readsw(const volatile void __iomem *addr, void *buffer,
+ unsigned int count)
{
if (count) {
u16 *buf = buffer;
+
do {
- u16 x = __raw_readw(addr + PCI_IOBASE);
+ u16 x = __raw_readw(addr);
*buf++ = x;
} while (--count);
}
}
#endif
-#ifndef insl
-static inline void insl(unsigned long addr, void *buffer, int count)
+#ifndef readsl
+#define readsl readsl
+static inline void readsl(const volatile void __iomem *addr, void *buffer,
+ unsigned int count)
{
if (count) {
u32 *buf = buffer;
+
do {
- u32 x = __raw_readl(addr + PCI_IOBASE);
+ u32 x = __raw_readl(addr);
*buf++ = x;
} while (--count);
}
}
#endif
-#ifndef outsb
-static inline void outsb(unsigned long addr, const void *buffer, int count)
+#ifdef CONFIG_64BIT
+#ifndef readsq
+#define readsq readsq
+static inline void readsq(const volatile void __iomem *addr, void *buffer,
+ unsigned int count)
+{
+ if (count) {
+ u64 *buf = buffer;
+
+ do {
+ u64 x = __raw_readq(addr);
+ *buf++ = x;
+ } while (--count);
+ }
+}
+#endif
+#endif /* CONFIG_64BIT */
+
+#ifndef writesb
+#define writesb writesb
+static inline void writesb(volatile void __iomem *addr, const void *buffer,
+ unsigned int count)
{
if (count) {
const u8 *buf = buffer;
+
do {
- __raw_writeb(*buf++, addr + PCI_IOBASE);
+ __raw_writeb(*buf++, addr);
} while (--count);
}
}
#endif
-#ifndef outsw
-static inline void outsw(unsigned long addr, const void *buffer, int count)
+#ifndef writesw
+#define writesw writesw
+static inline void writesw(volatile void __iomem *addr, const void *buffer,
+ unsigned int count)
{
if (count) {
const u16 *buf = buffer;
+
do {
- __raw_writew(*buf++, addr + PCI_IOBASE);
+ __raw_writew(*buf++, addr);
} while (--count);
}
}
#endif
-#ifndef outsl
-static inline void outsl(unsigned long addr, const void *buffer, int count)
+#ifndef writesl
+#define writesl writesl
+static inline void writesl(volatile void __iomem *addr, const void *buffer,
+ unsigned int count)
{
if (count) {
const u32 *buf = buffer;
+
do {
- __raw_writel(*buf++, addr + PCI_IOBASE);
+ __raw_writel(*buf++, addr);
} while (--count);
}
}
#endif
-#ifndef CONFIG_GENERIC_IOMAP
-#define ioread8(addr) readb(addr)
-#define ioread16(addr) readw(addr)
-#define ioread16be(addr) __be16_to_cpu(__raw_readw(addr))
-#define ioread32(addr) readl(addr)
-#define ioread32be(addr) __be32_to_cpu(__raw_readl(addr))
-
-#define iowrite8(v, addr) writeb((v), (addr))
-#define iowrite16(v, addr) writew((v), (addr))
-#define iowrite16be(v, addr) __raw_writew(__cpu_to_be16(v), addr)
-#define iowrite32(v, addr) writel((v), (addr))
-#define iowrite32be(v, addr) __raw_writel(__cpu_to_be32(v), addr)
-
-#define ioread8_rep(p, dst, count) \
- insb((unsigned long) (p), (dst), (count))
-#define ioread16_rep(p, dst, count) \
- insw((unsigned long) (p), (dst), (count))
-#define ioread32_rep(p, dst, count) \
- insl((unsigned long) (p), (dst), (count))
-
-#define iowrite8_rep(p, src, count) \
- outsb((unsigned long) (p), (src), (count))
-#define iowrite16_rep(p, src, count) \
- outsw((unsigned long) (p), (src), (count))
-#define iowrite32_rep(p, src, count) \
- outsl((unsigned long) (p), (src), (count))
-#endif /* CONFIG_GENERIC_IOMAP */
+#ifdef CONFIG_64BIT
+#ifndef writesq
+#define writesq writesq
+static inline void writesq(volatile void __iomem *addr, const void *buffer,
+ unsigned int count)
+{
+ if (count) {
+ const u64 *buf = buffer;
+
+ do {
+ __raw_writeq(*buf++, addr);
+ } while (--count);
+ }
+}
+#endif
+#endif /* CONFIG_64BIT */
+
+#ifndef PCI_IOBASE
+#define PCI_IOBASE ((void __iomem *)0)
+#endif
#ifndef IO_SPACE_LIMIT
#define IO_SPACE_LIMIT 0xffff
#endif
+/*
+ * {in,out}{b,w,l}() access little endian I/O. {in,out}{b,w,l}_p() can be
+ * implemented on hardware that needs an additional delay for I/O accesses to
+ * take effect.
+ */
+
+#ifndef inb
+#define inb inb
+static inline u8 inb(unsigned long addr)
+{
+ return readb(PCI_IOBASE + addr);
+}
+#endif
+
+#ifndef inw
+#define inw inw
+static inline u16 inw(unsigned long addr)
+{
+ return readw(PCI_IOBASE + addr);
+}
+#endif
+
+#ifndef inl
+#define inl inl
+static inline u32 inl(unsigned long addr)
+{
+ return readl(PCI_IOBASE + addr);
+}
+#endif
+
+#ifndef outb
+#define outb outb
+static inline void outb(u8 value, unsigned long addr)
+{
+ writeb(value, PCI_IOBASE + addr);
+}
+#endif
+
+#ifndef outw
+#define outw outw
+static inline void outw(u16 value, unsigned long addr)
+{
+ writew(value, PCI_IOBASE + addr);
+}
+#endif
+
+#ifndef outl
+#define outl outl
+static inline void outl(u32 value, unsigned long addr)
+{
+ writel(value, PCI_IOBASE + addr);
+}
+#endif
+
+#ifndef inb_p
+#define inb_p inb_p
+static inline u8 inb_p(unsigned long addr)
+{
+ return inb(addr);
+}
+#endif
+
+#ifndef inw_p
+#define inw_p inw_p
+static inline u16 inw_p(unsigned long addr)
+{
+ return inw(addr);
+}
+#endif
+
+#ifndef inl_p
+#define inl_p inl_p
+static inline u32 inl_p(unsigned long addr)
+{
+ return inl(addr);
+}
+#endif
+
+#ifndef outb_p
+#define outb_p outb_p
+static inline void outb_p(u8 value, unsigned long addr)
+{
+ outb(value, addr);
+}
+#endif
+
+#ifndef outw_p
+#define outw_p outw_p
+static inline void outw_p(u16 value, unsigned long addr)
+{
+ outw(value, addr);
+}
+#endif
+
+#ifndef outl_p
+#define outl_p outl_p
+static inline void outl_p(u32 value, unsigned long addr)
+{
+ outl(value, addr);
+}
+#endif
+
+/*
+ * {in,out}s{b,w,l}{,_p}() are variants of the above that repeatedly access a
+ * single I/O port multiple times.
+ */
+
+#ifndef insb
+#define insb insb
+static inline void insb(unsigned long addr, void *buffer, unsigned int count)
+{
+ readsb(PCI_IOBASE + addr, buffer, count);
+}
+#endif
+
+#ifndef insw
+#define insw insw
+static inline void insw(unsigned long addr, void *buffer, unsigned int count)
+{
+ readsw(PCI_IOBASE + addr, buffer, count);
+}
+#endif
+
+#ifndef insl
+#define insl insl
+static inline void insl(unsigned long addr, void *buffer, unsigned int count)
+{
+ readsl(PCI_IOBASE + addr, buffer, count);
+}
+#endif
+
+#ifndef outsb
+#define outsb outsb
+static inline void outsb(unsigned long addr, const void *buffer,
+ unsigned int count)
+{
+ writesb(PCI_IOBASE + addr, buffer, count);
+}
+#endif
+
+#ifndef outsw
+#define outsw outsw
+static inline void outsw(unsigned long addr, const void *buffer,
+ unsigned int count)
+{
+ writesw(PCI_IOBASE + addr, buffer, count);
+}
+#endif
+
+#ifndef outsl
+#define outsl outsl
+static inline void outsl(unsigned long addr, const void *buffer,
+ unsigned int count)
+{
+ writesl(PCI_IOBASE + addr, buffer, count);
+}
+#endif
+
+#ifndef insb_p
+#define insb_p insb_p
+static inline void insb_p(unsigned long addr, void *buffer, unsigned int count)
+{
+ insb(addr, buffer, count);
+}
+#endif
+
+#ifndef insw_p
+#define insw_p insw_p
+static inline void insw_p(unsigned long addr, void *buffer, unsigned int count)
+{
+ insw(addr, buffer, count);
+}
+#endif
+
+#ifndef insl_p
+#define insl_p insl_p
+static inline void insl_p(unsigned long addr, void *buffer, unsigned int count)
+{
+ insl(addr, buffer, count);
+}
+#endif
+
+#ifndef outsb_p
+#define outsb_p outsb_p
+static inline void outsb_p(unsigned long addr, const void *buffer,
+ unsigned int count)
+{
+ outsb(addr, buffer, count);
+}
+#endif
+
+#ifndef outsw_p
+#define outsw_p outsw_p
+static inline void outsw_p(unsigned long addr, const void *buffer,
+ unsigned int count)
+{
+ outsw(addr, buffer, count);
+}
+#endif
+
+#ifndef outsl_p
+#define outsl_p outsl_p
+static inline void outsl_p(unsigned long addr, const void *buffer,
+ unsigned int count)
+{
+ outsl(addr, buffer, count);
+}
+#endif
+
+#ifndef CONFIG_GENERIC_IOMAP
+#ifndef ioread8
+#define ioread8 ioread8
+static inline u8 ioread8(const volatile void __iomem *addr)
+{
+ return readb(addr);
+}
+#endif
+
+#ifndef ioread16
+#define ioread16 ioread16
+static inline u16 ioread16(const volatile void __iomem *addr)
+{
+ return readw(addr);
+}
+#endif
+
+#ifndef ioread32
+#define ioread32 ioread32
+static inline u32 ioread32(const volatile void __iomem *addr)
+{
+ return readl(addr);
+}
+#endif
+
+#ifndef iowrite8
+#define iowrite8 iowrite8
+static inline void iowrite8(u8 value, volatile void __iomem *addr)
+{
+ writeb(value, addr);
+}
+#endif
+
+#ifndef iowrite16
+#define iowrite16 iowrite16
+static inline void iowrite16(u16 value, volatile void __iomem *addr)
+{
+ writew(value, addr);
+}
+#endif
+
+#ifndef iowrite32
+#define iowrite32 iowrite32
+static inline void iowrite32(u32 value, volatile void __iomem *addr)
+{
+ writel(value, addr);
+}
+#endif
+
+#ifndef ioread16be
+#define ioread16be ioread16be
+static inline u16 ioread16be(const volatile void __iomem *addr)
+{
+ return __be16_to_cpu(__raw_readw(addr));
+}
+#endif
+
+#ifndef ioread32be
+#define ioread32be ioread32be
+static inline u32 ioread32be(const volatile void __iomem *addr)
+{
+ return __be32_to_cpu(__raw_readl(addr));
+}
+#endif
+
+#ifndef iowrite16be
+#define iowrite16be iowrite16be
+static inline void iowrite16be(u16 value, void volatile __iomem *addr)
+{
+ __raw_writew(__cpu_to_be16(value), addr);
+}
+#endif
+
+#ifndef iowrite32be
+#define iowrite32be iowrite32be
+static inline void iowrite32be(u32 value, volatile void __iomem *addr)
+{
+ __raw_writel(__cpu_to_be32(value), addr);
+}
+#endif
+
+#ifndef ioread8_rep
+#define ioread8_rep ioread8_rep
+static inline void ioread8_rep(const volatile void __iomem *addr, void *buffer,
+ unsigned int count)
+{
+ readsb(addr, buffer, count);
+}
+#endif
+
+#ifndef ioread16_rep
+#define ioread16_rep ioread16_rep
+static inline void ioread16_rep(const volatile void __iomem *addr,
+ void *buffer, unsigned int count)
+{
+ readsw(addr, buffer, count);
+}
+#endif
+
+#ifndef ioread32_rep
+#define ioread32_rep ioread32_rep
+static inline void ioread32_rep(const volatile void __iomem *addr,
+ void *buffer, unsigned int count)
+{
+ readsl(addr, buffer, count);
+}
+#endif
+
+#ifndef iowrite8_rep
+#define iowrite8_rep iowrite8_rep
+static inline void iowrite8_rep(volatile void __iomem *addr,
+ const void *buffer,
+ unsigned int count)
+{
+ writesb(addr, buffer, count);
+}
+#endif
+
+#ifndef iowrite16_rep
+#define iowrite16_rep iowrite16_rep
+static inline void iowrite16_rep(volatile void __iomem *addr,
+ const void *buffer,
+ unsigned int count)
+{
+ writesw(addr, buffer, count);
+}
+#endif
+
+#ifndef iowrite32_rep
+#define iowrite32_rep iowrite32_rep
+static inline void iowrite32_rep(volatile void __iomem *addr,
+ const void *buffer,
+ unsigned int count)
+{
+ writesl(addr, buffer, count);
+}
+#endif
+#endif /* CONFIG_GENERIC_IOMAP */
+
#ifdef __KERNEL__
#include <linux/vmalloc.h>
-#define __io_virt(x) ((void __force *) (x))
+#define __io_virt(x) ((void __force *)(x))
#ifndef CONFIG_GENERIC_IOMAP
struct pci_dev;
extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max);
#ifndef pci_iounmap
+#define pci_iounmap pci_iounmap
static inline void pci_iounmap(struct pci_dev *dev, void __iomem *p)
{
}
@@ -289,11 +721,15 @@ static inline void pci_iounmap(struct pci_dev *dev, void __iomem *p)
* These are pretty trivial
*/
#ifndef virt_to_phys
+#define virt_to_phys virt_to_phys
static inline unsigned long virt_to_phys(volatile void *address)
{
return __pa((unsigned long)address);
}
+#endif
+#ifndef phys_to_virt
+#define phys_to_virt phys_to_virt
static inline void *phys_to_virt(unsigned long address)
{
return __va(address);
@@ -306,37 +742,65 @@ static inline void *phys_to_virt(unsigned long address)
* This implementation is for the no-MMU case only... if you have an MMU
* you'll need to provide your own definitions.
*/
+
#ifndef CONFIG_MMU
-static inline void __iomem *ioremap(phys_addr_t offset, unsigned long size)
+#ifndef ioremap
+#define ioremap ioremap
+static inline void __iomem *ioremap(phys_addr_t offset, size_t size)
{
- return (void __iomem*) (unsigned long)offset;
+ return (void __iomem *)(unsigned long)offset;
}
+#endif
-#define __ioremap(offset, size, flags) ioremap(offset, size)
+#ifndef __ioremap
+#define __ioremap __ioremap
+static inline void __iomem *__ioremap(phys_addr_t offset, size_t size,
+ unsigned long flags)
+{
+ return ioremap(offset, size);
+}
+#endif
#ifndef ioremap_nocache
-#define ioremap_nocache ioremap
+#define ioremap_nocache ioremap_nocache
+static inline void __iomem *ioremap_nocache(phys_addr_t offset, size_t size)
+{
+ return ioremap(offset, size);
+}
#endif
#ifndef ioremap_wc
-#define ioremap_wc ioremap_nocache
+#define ioremap_wc ioremap_wc
+static inline void __iomem *ioremap_wc(phys_addr_t offset, size_t size)
+{
+ return ioremap_nocache(offset, size);
+}
#endif
+#ifndef iounmap
+#define iounmap iounmap
static inline void iounmap(void __iomem *addr)
{
}
+#endif
#endif /* CONFIG_MMU */
#ifdef CONFIG_HAS_IOPORT_MAP
#ifndef CONFIG_GENERIC_IOMAP
+#ifndef ioport_map
+#define ioport_map ioport_map
static inline void __iomem *ioport_map(unsigned long port, unsigned int nr)
{
- return (void __iomem *) port;
+ return PCI_IOBASE + (port & IO_SPACE_LIMIT);
}
+#endif
+#ifndef ioport_unmap
+#define ioport_unmap ioport_unmap
static inline void ioport_unmap(void __iomem *p)
{
}
+#endif
#else /* CONFIG_GENERIC_IOMAP */
extern void __iomem *ioport_map(unsigned long port, unsigned int nr);
extern void ioport_unmap(void __iomem *p);
@@ -344,35 +808,68 @@ extern void ioport_unmap(void __iomem *p);
#endif /* CONFIG_HAS_IOPORT_MAP */
#ifndef xlate_dev_kmem_ptr
-#define xlate_dev_kmem_ptr(p) p
+#define xlate_dev_kmem_ptr xlate_dev_kmem_ptr
+static inline void *xlate_dev_kmem_ptr(void *addr)
+{
+ return addr;
+}
#endif
+
#ifndef xlate_dev_mem_ptr
-#define xlate_dev_mem_ptr(p) __va(p)
+#define xlate_dev_mem_ptr xlate_dev_mem_ptr
+static inline void *xlate_dev_mem_ptr(phys_addr_t addr)
+{
+ return __va(addr);
+}
+#endif
+
+#ifndef unxlate_dev_mem_ptr
+#define unxlate_dev_mem_ptr unxlate_dev_mem_ptr
+static inline void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
+{
+}
#endif
#ifdef CONFIG_VIRT_TO_BUS
#ifndef virt_to_bus
-static inline unsigned long virt_to_bus(volatile void *address)
+static inline unsigned long virt_to_bus(void *address)
{
- return ((unsigned long) address);
+ return (unsigned long)address;
}
static inline void *bus_to_virt(unsigned long address)
{
- return (void *) address;
+ return (void *)address;
}
#endif
#endif
#ifndef memset_io
-#define memset_io(a, b, c) memset(__io_virt(a), (b), (c))
+#define memset_io memset_io
+static inline void memset_io(volatile void __iomem *addr, int value,
+ size_t size)
+{
+ memset(__io_virt(addr), value, size);
+}
#endif
#ifndef memcpy_fromio
-#define memcpy_fromio(a, b, c) memcpy((a), __io_virt(b), (c))
+#define memcpy_fromio memcpy_fromio
+static inline void memcpy_fromio(void *buffer,
+ const volatile void __iomem *addr,
+ size_t size)
+{
+ memcpy(buffer, __io_virt(addr), size);
+}
#endif
+
#ifndef memcpy_toio
-#define memcpy_toio(a, b, c) memcpy(__io_virt(a), (b), (c))
+#define memcpy_toio memcpy_toio
+static inline void memcpy_toio(volatile void __iomem *addr, const void *buffer,
+ size_t size)
+{
+ memcpy(__io_virt(addr), buffer, size);
+}
#endif
#endif /* __KERNEL__ */
diff --git a/include/asm-generic/irq_work.h b/include/asm-generic/irq_work.h
new file mode 100644
index 000000000000..a44f452c6590
--- /dev/null
+++ b/include/asm-generic/irq_work.h
@@ -0,0 +1,10 @@
+#ifndef __ASM_IRQ_WORK_H
+#define __ASM_IRQ_WORK_H
+
+static inline bool arch_irq_work_has_interrupt(void)
+{
+ return false;
+}
+
+#endif /* __ASM_IRQ_WORK_H */
+
diff --git a/include/asm-generic/mm_hooks.h b/include/asm-generic/mm_hooks.h
index 67dea8123683..866aa461efa5 100644
--- a/include/asm-generic/mm_hooks.h
+++ b/include/asm-generic/mm_hooks.h
@@ -1,7 +1,7 @@
/*
- * Define generic no-op hooks for arch_dup_mmap and arch_exit_mmap, to
- * be included in asm-FOO/mmu_context.h for any arch FOO which doesn't
- * need to hook these.
+ * Define generic no-op hooks for arch_dup_mmap, arch_exit_mmap
+ * and arch_unmap to be included in asm-FOO/mmu_context.h for any
+ * arch FOO which doesn't need to hook these.
*/
#ifndef _ASM_GENERIC_MM_HOOKS_H
#define _ASM_GENERIC_MM_HOOKS_H
@@ -15,4 +15,15 @@ static inline void arch_exit_mmap(struct mm_struct *mm)
{
}
+static inline void arch_unmap(struct mm_struct *mm,
+ struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
+{
+}
+
+static inline void arch_bprm_mm_init(struct mm_struct *mm,
+ struct vm_area_struct *vma)
+{
+}
+
#endif /* _ASM_GENERIC_MM_HOOKS_H */
diff --git a/include/asm-generic/msi.h b/include/asm-generic/msi.h
new file mode 100644
index 000000000000..61c58d8878ce
--- /dev/null
+++ b/include/asm-generic/msi.h
@@ -0,0 +1,32 @@
+#ifndef __ASM_GENERIC_MSI_H
+#define __ASM_GENERIC_MSI_H
+
+#include <linux/types.h>
+
+#ifndef NUM_MSI_ALLOC_SCRATCHPAD_REGS
+# define NUM_MSI_ALLOC_SCRATCHPAD_REGS 2
+#endif
+
+struct msi_desc;
+
+/**
+ * struct msi_alloc_info - Default structure for MSI interrupt allocation.
+ * @desc: Pointer to msi descriptor
+ * @hwirq: Associated hw interrupt number in the domain
+ * @scratchpad: Storage for implementation specific scratch data
+ *
+ * Architectures can provide their own implementation by not including
+ * asm-generic/msi.h into their arch specific header file.
+ */
+typedef struct msi_alloc_info {
+ struct msi_desc *desc;
+ irq_hw_number_t hwirq;
+ union {
+ unsigned long ul;
+ void *ptr;
+ } scratchpad[NUM_MSI_ALLOC_SCRATCHPAD_REGS];
+} msi_alloc_info_t;
+
+#define GENERIC_MSI_DOMAIN_OPS 1
+
+#endif
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 53b2acc38213..177d5973b132 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -103,6 +103,17 @@ static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm,
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#endif
+#ifndef __HAVE_ARCH_PMDP_GET_AND_CLEAR_FULL
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+static inline pmd_t pmdp_get_and_clear_full(struct mm_struct *mm,
+ unsigned long address, pmd_t *pmdp,
+ int full)
+{
+ return pmdp_get_and_clear(mm, address, pmdp);
+}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+#endif
+
#ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
unsigned long address, pte_t *ptep,
@@ -249,6 +260,24 @@ static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
#define pgprot_writecombine pgprot_noncached
#endif
+#ifndef pgprot_device
+#define pgprot_device pgprot_noncached
+#endif
+
+#ifndef pgprot_modify
+#define pgprot_modify pgprot_modify
+static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
+{
+ if (pgprot_val(oldprot) == pgprot_val(pgprot_noncached(oldprot)))
+ newprot = pgprot_noncached(newprot);
+ if (pgprot_val(oldprot) == pgprot_val(pgprot_writecombine(oldprot)))
+ newprot = pgprot_writecombine(newprot);
+ if (pgprot_val(oldprot) == pgprot_val(pgprot_device(oldprot)))
+ newprot = pgprot_device(newprot);
+ return newprot;
+}
+#endif
+
/*
* When walking page tables, get the address of the next boundary,
* or the end address of the range if that comes earlier. Although no
@@ -660,11 +689,12 @@ static inline int pmd_trans_unstable(pmd_t *pmd)
}
#ifdef CONFIG_NUMA_BALANCING
-#ifdef CONFIG_ARCH_USES_NUMA_PROT_NONE
/*
- * _PAGE_NUMA works identical to _PAGE_PROTNONE (it's actually the
- * same bit too). It's set only when _PAGE_PRESET is not set and it's
- * never set if _PAGE_PRESENT is set.
+ * _PAGE_NUMA distinguishes between an unmapped page table entry, an entry that
+ * is protected for PROT_NONE and a NUMA hinting fault entry. If the
+ * architecture defines __PAGE_PROTNONE then it should take that into account
+ * but those that do not can rely on the fact that the NUMA hinting scanner
+ * skips inaccessible VMAs.
*
* pte/pmd_present() returns true if pte/pmd_numa returns true. Page
* fault triggers on those regions if pte/pmd_numa returns true
@@ -673,16 +703,14 @@ static inline int pmd_trans_unstable(pmd_t *pmd)
#ifndef pte_numa
static inline int pte_numa(pte_t pte)
{
- return (pte_flags(pte) &
- (_PAGE_NUMA|_PAGE_PROTNONE|_PAGE_PRESENT)) == _PAGE_NUMA;
+ return ptenuma_flags(pte) == _PAGE_NUMA;
}
#endif
#ifndef pmd_numa
static inline int pmd_numa(pmd_t pmd)
{
- return (pmd_flags(pmd) &
- (_PAGE_NUMA|_PAGE_PROTNONE|_PAGE_PRESENT)) == _PAGE_NUMA;
+ return pmdnuma_flags(pmd) == _PAGE_NUMA;
}
#endif
@@ -722,6 +750,8 @@ static inline pte_t pte_mknuma(pte_t pte)
{
pteval_t val = pte_val(pte);
+ VM_BUG_ON(!(val & _PAGE_PRESENT));
+
val &= ~_PAGE_PRESENT;
val |= _PAGE_NUMA;
@@ -765,16 +795,6 @@ static inline void pmdp_set_numa(struct mm_struct *mm, unsigned long addr,
}
#endif
#else
-extern int pte_numa(pte_t pte);
-extern int pmd_numa(pmd_t pmd);
-extern pte_t pte_mknonnuma(pte_t pte);
-extern pmd_t pmd_mknonnuma(pmd_t pmd);
-extern pte_t pte_mknuma(pte_t pte);
-extern pmd_t pmd_mknuma(pmd_t pmd);
-extern void ptep_set_numa(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
-extern void pmdp_set_numa(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp);
-#endif /* CONFIG_ARCH_USES_NUMA_PROT_NONE */
-#else
static inline int pmd_numa(pmd_t pmd)
{
return 0;
diff --git a/include/asm-generic/preempt.h b/include/asm-generic/preempt.h
index 1cd3f5d767a8..eb6f9e6c3075 100644
--- a/include/asm-generic/preempt.h
+++ b/include/asm-generic/preempt.h
@@ -23,9 +23,6 @@ static __always_inline void preempt_count_set(int pc)
/*
* must be macros to avoid header recursion hell
*/
-#define task_preempt_count(p) \
- (task_thread_info(p)->preempt_count & ~PREEMPT_NEED_RESCHED)
-
#define init_task_preempt_count(p) do { \
task_thread_info(p)->preempt_count = PREEMPT_DISABLED; \
} while (0)
diff --git a/include/asm-generic/seccomp.h b/include/asm-generic/seccomp.h
new file mode 100644
index 000000000000..9fa1f653ed3b
--- /dev/null
+++ b/include/asm-generic/seccomp.h
@@ -0,0 +1,30 @@
+/*
+ * include/asm-generic/seccomp.h
+ *
+ * Copyright (C) 2014 Linaro Limited
+ * Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef _ASM_GENERIC_SECCOMP_H
+#define _ASM_GENERIC_SECCOMP_H
+
+#include <linux/unistd.h>
+
+#if defined(CONFIG_COMPAT) && !defined(__NR_seccomp_read_32)
+#define __NR_seccomp_read_32 __NR_read
+#define __NR_seccomp_write_32 __NR_write
+#define __NR_seccomp_exit_32 __NR_exit
+#define __NR_seccomp_sigreturn_32 __NR_rt_sigreturn
+#endif /* CONFIG_COMPAT && ! already defined */
+
+#define __NR_seccomp_read __NR_read
+#define __NR_seccomp_write __NR_write
+#define __NR_seccomp_exit __NR_exit
+#ifndef __NR_seccomp_sigreturn
+#define __NR_seccomp_sigreturn __NR_rt_sigreturn
+#endif
+
+#endif /* _ASM_GENERIC_SECCOMP_H */
diff --git a/include/asm-generic/sections.h b/include/asm-generic/sections.h
index f1a24b5c3b90..b58fd667f87b 100644
--- a/include/asm-generic/sections.h
+++ b/include/asm-generic/sections.h
@@ -3,6 +3,8 @@
/* References to section boundaries */
+#include <linux/compiler.h>
+
/*
* Usage guidelines:
* _text, _data: architecture specific, don't use them in arch-independent code
@@ -37,6 +39,8 @@ extern char __start_rodata[], __end_rodata[];
/* Start and end of .ctors section - used for constructor calls. */
extern char __ctors_start[], __ctors_end[];
+extern __visible const void __nosave_begin, __nosave_end;
+
/* function descriptor handling (if any). Override
* in asm/sections.h */
#ifndef dereference_function_descriptor
diff --git a/include/asm-generic/syscall.h b/include/asm-generic/syscall.h
index d401e5463fb0..0c938a4354f6 100644
--- a/include/asm-generic/syscall.h
+++ b/include/asm-generic/syscall.h
@@ -147,7 +147,7 @@ void syscall_set_arguments(struct task_struct *task, struct pt_regs *regs,
*
* Returns the AUDIT_ARCH_* based on the system call convention in use.
*
- * It's only valid to call this when @task is stopped on entry to a system
+ * It's only valid to call this when current is stopped on entry to a system
* call, due to %TIF_SYSCALL_TRACE, %TIF_SYSCALL_AUDIT, or %TIF_SECCOMP.
*
* Architectures which permit CONFIG_HAVE_ARCH_SECCOMP_FILTER must
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index 5672d7ea1fa0..08848050922e 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -96,10 +96,9 @@ struct mmu_gather {
#endif
unsigned long start;
unsigned long end;
- unsigned int need_flush : 1, /* Did free PTEs */
/* we are in the middle of an operation to clear
* a full mm and can make some optimizations */
- fullmm : 1,
+ unsigned int fullmm : 1,
/* we have performed an operation which
* requires a complete flush of the tlb */
need_flush_all : 1;
@@ -128,16 +127,54 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
tlb_flush_mmu(tlb);
}
+static inline void __tlb_adjust_range(struct mmu_gather *tlb,
+ unsigned long address)
+{
+ tlb->start = min(tlb->start, address);
+ tlb->end = max(tlb->end, address + PAGE_SIZE);
+}
+
+static inline void __tlb_reset_range(struct mmu_gather *tlb)
+{
+ tlb->start = TASK_SIZE;
+ tlb->end = 0;
+}
+
+/*
+ * In the case of tlb vma handling, we can optimise these away in the
+ * case where we're doing a full MM flush. When we're doing a munmap,
+ * the vmas are adjusted to only cover the region to be torn down.
+ */
+#ifndef tlb_start_vma
+#define tlb_start_vma(tlb, vma) do { } while (0)
+#endif
+
+#define __tlb_end_vma(tlb, vma) \
+ do { \
+ if (!tlb->fullmm && tlb->end) { \
+ tlb_flush(tlb); \
+ __tlb_reset_range(tlb); \
+ } \
+ } while (0)
+
+#ifndef tlb_end_vma
+#define tlb_end_vma __tlb_end_vma
+#endif
+
+#ifndef __tlb_remove_tlb_entry
+#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
+#endif
+
/**
* tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation.
*
- * Record the fact that pte's were really umapped in ->need_flush, so we can
- * later optimise away the tlb invalidate. This helps when userspace is
- * unmapping already-unmapped pages, which happens quite a lot.
+ * Record the fact that pte's were really unmapped by updating the range,
+ * so we can later optimise away the tlb invalidate. This helps when
+ * userspace is unmapping already-unmapped pages, which happens quite a lot.
*/
#define tlb_remove_tlb_entry(tlb, ptep, address) \
do { \
- tlb->need_flush = 1; \
+ __tlb_adjust_range(tlb, address); \
__tlb_remove_tlb_entry(tlb, ptep, address); \
} while (0)
@@ -151,27 +188,27 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
#define tlb_remove_pmd_tlb_entry(tlb, pmdp, address) \
do { \
- tlb->need_flush = 1; \
+ __tlb_adjust_range(tlb, address); \
__tlb_remove_pmd_tlb_entry(tlb, pmdp, address); \
} while (0)
#define pte_free_tlb(tlb, ptep, address) \
do { \
- tlb->need_flush = 1; \
+ __tlb_adjust_range(tlb, address); \
__pte_free_tlb(tlb, ptep, address); \
} while (0)
#ifndef __ARCH_HAS_4LEVEL_HACK
#define pud_free_tlb(tlb, pudp, address) \
do { \
- tlb->need_flush = 1; \
+ __tlb_adjust_range(tlb, address); \
__pud_free_tlb(tlb, pudp, address); \
} while (0)
#endif
#define pmd_free_tlb(tlb, pmdp, address) \
do { \
- tlb->need_flush = 1; \
+ __tlb_adjust_range(tlb, address); \
__pmd_free_tlb(tlb, pmdp, address); \
} while (0)
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 5ba0360663a7..bee5d683074d 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -40,6 +40,8 @@
* }
*
* [__init_begin, __init_end] is the init section that may be freed after init
+ * // __init_begin and __init_end should be page aligned, so that we can
+ * // free the whole .init memory
* [_stext, _etext] is the text section
* [_sdata, _edata] is the data section
*
@@ -162,6 +164,7 @@
#define CLKSRC_OF_TABLES() OF_TABLE(CONFIG_CLKSRC_OF, clksrc)
#define IRQCHIP_OF_MATCH_TABLE() OF_TABLE(CONFIG_IRQCHIP, irqchip)
#define CLK_OF_TABLES() OF_TABLE(CONFIG_COMMON_CLK, clk)
+#define IOMMU_OF_TABLES() OF_TABLE(CONFIG_OF_IOMMU, iommu)
#define RESERVEDMEM_OF_TABLES() OF_TABLE(CONFIG_OF_RESERVED_MEM, reservedmem)
#define CPU_METHOD_OF_TABLES() OF_TABLE(CONFIG_SMP, cpu_method)
#define EARLYCON_OF_TABLES() OF_TABLE(CONFIG_SERIAL_EARLYCON, earlycon)
@@ -495,6 +498,7 @@
CLK_OF_TABLES() \
RESERVEDMEM_OF_TABLES() \
CLKSRC_OF_TABLES() \
+ IOMMU_OF_TABLES() \
CPU_METHOD_OF_TABLES() \
KERNEL_DTB() \
IRQCHIP_OF_MATCH_TABLE() \
diff --git a/include/crypto/drbg.h b/include/crypto/drbg.h
index 882675e7c055..5186f750c713 100644
--- a/include/crypto/drbg.h
+++ b/include/crypto/drbg.h
@@ -82,15 +82,6 @@ typedef uint32_t drbg_flag_t;
struct drbg_core {
drbg_flag_t flags; /* flags for the cipher */
__u8 statelen; /* maximum state length */
- /*
- * maximum length of personalization string or additional input
- * string -- exponent for base 2
- */
- __u8 max_addtllen;
- /* maximum bits per RNG request -- exponent for base 2*/
- __u8 max_bits;
- /* maximum number of requests -- exponent for base 2 */
- __u8 max_req;
__u8 blocklen_bytes; /* block size of output in bytes */
char cra_name[CRYPTO_MAX_ALG_NAME]; /* mapping to kernel crypto API */
/* kernel crypto API backend cipher name */
@@ -156,12 +147,13 @@ static inline __u8 drbg_keylen(struct drbg_state *drbg)
static inline size_t drbg_max_request_bytes(struct drbg_state *drbg)
{
- /* max_bits is in bits, but buflen is in bytes */
- return (1 << (drbg->core->max_bits - 3));
+ /* SP800-90A requires the limit 2**19 bits, but we return bytes */
+ return (1 << 16);
}
static inline size_t drbg_max_addtl(struct drbg_state *drbg)
{
+ /* SP800-90A requires 2**35 bytes additional info str / pers str */
#if (__BITS_PER_LONG == 32)
/*
* SP800-90A allows smaller maximum numbers to be returned -- we
@@ -170,16 +162,17 @@ static inline size_t drbg_max_addtl(struct drbg_state *drbg)
*/
return (SIZE_MAX - 1);
#else
- return (1UL<<(drbg->core->max_addtllen));
+ return (1UL<<35);
#endif
}
static inline size_t drbg_max_requests(struct drbg_state *drbg)
{
+ /* SP800-90A requires 2**48 maximum requests before reseeding */
#if (__BITS_PER_LONG == 32)
return SIZE_MAX;
#else
- return (1UL<<(drbg->core->max_req));
+ return (1UL<<48);
#endif
}
diff --git a/include/crypto/hash.h b/include/crypto/hash.h
index a39195539601..98abda9ed3aa 100644
--- a/include/crypto/hash.h
+++ b/include/crypto/hash.h
@@ -17,6 +17,32 @@
struct crypto_ahash;
+/**
+ * DOC: Message Digest Algorithm Definitions
+ *
+ * These data structures define modular message digest algorithm
+ * implementations, managed via crypto_register_ahash(),
+ * crypto_register_shash(), crypto_unregister_ahash() and
+ * crypto_unregister_shash().
+ */
+
+/**
+ * struct hash_alg_common - define properties of message digest
+ * @digestsize: Size of the result of the transformation. A buffer of this size
+ * must be available to the @final and @finup calls, so they can
+ * store the resulting hash into it. For various predefined sizes,
+ * search include/crypto/ using
+ * git grep _DIGEST_SIZE include/crypto.
+ * @statesize: Size of the block for partial state of the transformation. A
+ * buffer of this size must be passed to the @export function as it
+ * will save the partial state of the transformation into it. On the
+ * other side, the @import function will load the state from a
+ * buffer of this size as well.
+ * @base: Start of data structure of cipher algorithm. The common data
+ * structure of crypto_alg contains information common to all ciphers.
+ * The hash_alg_common data structure now adds the hash-specific
+ * information.
+ */
struct hash_alg_common {
unsigned int digestsize;
unsigned int statesize;
@@ -37,6 +63,63 @@ struct ahash_request {
void *__ctx[] CRYPTO_MINALIGN_ATTR;
};
+/**
+ * struct ahash_alg - asynchronous message digest definition
+ * @init: Initialize the transformation context. Intended only to initialize the
+ * state of the HASH transformation at the begining. This shall fill in
+ * the internal structures used during the entire duration of the whole
+ * transformation. No data processing happens at this point.
+ * @update: Push a chunk of data into the driver for transformation. This
+ * function actually pushes blocks of data from upper layers into the
+ * driver, which then passes those to the hardware as seen fit. This
+ * function must not finalize the HASH transformation by calculating the
+ * final message digest as this only adds more data into the
+ * transformation. This function shall not modify the transformation
+ * context, as this function may be called in parallel with the same
+ * transformation object. Data processing can happen synchronously
+ * [SHASH] or asynchronously [AHASH] at this point.
+ * @final: Retrieve result from the driver. This function finalizes the
+ * transformation and retrieves the resulting hash from the driver and
+ * pushes it back to upper layers. No data processing happens at this
+ * point.
+ * @finup: Combination of @update and @final. This function is effectively a
+ * combination of @update and @final calls issued in sequence. As some
+ * hardware cannot do @update and @final separately, this callback was
+ * added to allow such hardware to be used at least by IPsec. Data
+ * processing can happen synchronously [SHASH] or asynchronously [AHASH]
+ * at this point.
+ * @digest: Combination of @init and @update and @final. This function
+ * effectively behaves as the entire chain of operations, @init,
+ * @update and @final issued in sequence. Just like @finup, this was
+ * added for hardware which cannot do even the @finup, but can only do
+ * the whole transformation in one run. Data processing can happen
+ * synchronously [SHASH] or asynchronously [AHASH] at this point.
+ * @setkey: Set optional key used by the hashing algorithm. Intended to push
+ * optional key used by the hashing algorithm from upper layers into
+ * the driver. This function can store the key in the transformation
+ * context or can outright program it into the hardware. In the former
+ * case, one must be careful to program the key into the hardware at
+ * appropriate time and one must be careful that .setkey() can be
+ * called multiple times during the existence of the transformation
+ * object. Not all hashing algorithms do implement this function as it
+ * is only needed for keyed message digests. SHAx/MDx/CRCx do NOT
+ * implement this function. HMAC(MDx)/HMAC(SHAx)/CMAC(AES) do implement
+ * this function. This function must be called before any other of the
+ * @init, @update, @final, @finup, @digest is called. No data
+ * processing happens at this point.
+ * @export: Export partial state of the transformation. This function dumps the
+ * entire state of the ongoing transformation into a provided block of
+ * data so it can be @import 'ed back later on. This is useful in case
+ * you want to save partial result of the transformation after
+ * processing certain amount of data and reload this partial result
+ * multiple times later on for multiple re-use. No data processing
+ * happens at this point.
+ * @import: Import partial state of the transformation. This function loads the
+ * entire state of the ongoing transformation from a provided block of
+ * data so the transformation can continue from this point onward. No
+ * data processing happens at this point.
+ * @halg: see struct hash_alg_common
+ */
struct ahash_alg {
int (*init)(struct ahash_request *req);
int (*update)(struct ahash_request *req);
@@ -58,6 +141,28 @@ struct shash_desc {
void *__ctx[] CRYPTO_MINALIGN_ATTR;
};
+#define SHASH_DESC_ON_STACK(shash, ctx) \
+ char __##shash##_desc[sizeof(struct shash_desc) + \
+ crypto_shash_descsize(ctx)] CRYPTO_MINALIGN_ATTR; \
+ struct shash_desc *shash = (struct shash_desc *)__##shash##_desc
+
+/**
+ * struct shash_alg - synchronous message digest definition
+ * @init: see struct ahash_alg
+ * @update: see struct ahash_alg
+ * @final: see struct ahash_alg
+ * @finup: see struct ahash_alg
+ * @digest: see struct ahash_alg
+ * @export: see struct ahash_alg
+ * @import: see struct ahash_alg
+ * @setkey: see struct ahash_alg
+ * @digestsize: see struct ahash_alg
+ * @statesize: see struct ahash_alg
+ * @descsize: Size of the operational state for the message digest. This state
+ * size is the memory size that needs to be allocated for
+ * shash_desc.__ctx
+ * @base: internally used
+ */
struct shash_alg {
int (*init)(struct shash_desc *desc);
int (*update)(struct shash_desc *desc, const u8 *data,
@@ -102,11 +207,35 @@ struct crypto_shash {
struct crypto_tfm base;
};
+/**
+ * DOC: Asynchronous Message Digest API
+ *
+ * The asynchronous message digest API is used with the ciphers of type
+ * CRYPTO_ALG_TYPE_AHASH (listed as type "ahash" in /proc/crypto)
+ *
+ * The asynchronous cipher operation discussion provided for the
+ * CRYPTO_ALG_TYPE_ABLKCIPHER API applies here as well.
+ */
+
static inline struct crypto_ahash *__crypto_ahash_cast(struct crypto_tfm *tfm)
{
return container_of(tfm, struct crypto_ahash, base);
}
+/**
+ * crypto_alloc_ahash() - allocate ahash cipher handle
+ * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
+ * ahash cipher
+ * @type: specifies the type of the cipher
+ * @mask: specifies the mask for the cipher
+ *
+ * Allocate a cipher handle for an ahash. The returned struct
+ * crypto_ahash is the cipher handle that is required for any subsequent
+ * API invocation for that ahash.
+ *
+ * Return: allocated cipher handle in case of success; IS_ERR() is true in case
+ * of an error, PTR_ERR() returns the error code.
+ */
struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type,
u32 mask);
@@ -115,6 +244,10 @@ static inline struct crypto_tfm *crypto_ahash_tfm(struct crypto_ahash *tfm)
return &tfm->base;
}
+/**
+ * crypto_free_ahash() - zeroize and free the ahash handle
+ * @tfm: cipher handle to be freed
+ */
static inline void crypto_free_ahash(struct crypto_ahash *tfm)
{
crypto_destroy_tfm(tfm, crypto_ahash_tfm(tfm));
@@ -138,6 +271,16 @@ static inline struct hash_alg_common *crypto_hash_alg_common(
return __crypto_hash_alg_common(crypto_ahash_tfm(tfm)->__crt_alg);
}
+/**
+ * crypto_ahash_digestsize() - obtain message digest size
+ * @tfm: cipher handle
+ *
+ * The size for the message digest created by the message digest cipher
+ * referenced with the cipher handle is returned.
+ *
+ *
+ * Return: message digest size of cipher
+ */
static inline unsigned int crypto_ahash_digestsize(struct crypto_ahash *tfm)
{
return crypto_hash_alg_common(tfm)->digestsize;
@@ -163,12 +306,32 @@ static inline void crypto_ahash_clear_flags(struct crypto_ahash *tfm, u32 flags)
crypto_tfm_clear_flags(crypto_ahash_tfm(tfm), flags);
}
+/**
+ * crypto_ahash_reqtfm() - obtain cipher handle from request
+ * @req: asynchronous request handle that contains the reference to the ahash
+ * cipher handle
+ *
+ * Return the ahash cipher handle that is registered with the asynchronous
+ * request handle ahash_request.
+ *
+ * Return: ahash cipher handle
+ */
static inline struct crypto_ahash *crypto_ahash_reqtfm(
struct ahash_request *req)
{
return __crypto_ahash_cast(req->base.tfm);
}
+/**
+ * crypto_ahash_reqsize() - obtain size of the request data structure
+ * @tfm: cipher handle
+ *
+ * Return the size of the ahash state size. With the crypto_ahash_export
+ * function, the caller can export the state into a buffer whose size is
+ * defined with this function.
+ *
+ * Return: size of the ahash state
+ */
static inline unsigned int crypto_ahash_reqsize(struct crypto_ahash *tfm)
{
return tfm->reqsize;
@@ -179,38 +342,166 @@ static inline void *ahash_request_ctx(struct ahash_request *req)
return req->__ctx;
}
+/**
+ * crypto_ahash_setkey - set key for cipher handle
+ * @tfm: cipher handle
+ * @key: buffer holding the key
+ * @keylen: length of the key in bytes
+ *
+ * The caller provided key is set for the ahash cipher. The cipher
+ * handle must point to a keyed hash in order for this function to succeed.
+ *
+ * Return: 0 if the setting of the key was successful; < 0 if an error occurred
+ */
int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen);
+
+/**
+ * crypto_ahash_finup() - update and finalize message digest
+ * @req: reference to the ahash_request handle that holds all information
+ * needed to perform the cipher operation
+ *
+ * This function is a "short-hand" for the function calls of
+ * crypto_ahash_update and crypto_shash_final. The parameters have the same
+ * meaning as discussed for those separate functions.
+ *
+ * Return: 0 if the message digest creation was successful; < 0 if an error
+ * occurred
+ */
int crypto_ahash_finup(struct ahash_request *req);
+
+/**
+ * crypto_ahash_final() - calculate message digest
+ * @req: reference to the ahash_request handle that holds all information
+ * needed to perform the cipher operation
+ *
+ * Finalize the message digest operation and create the message digest
+ * based on all data added to the cipher handle. The message digest is placed
+ * into the output buffer registered with the ahash_request handle.
+ *
+ * Return: 0 if the message digest creation was successful; < 0 if an error
+ * occurred
+ */
int crypto_ahash_final(struct ahash_request *req);
+
+/**
+ * crypto_ahash_digest() - calculate message digest for a buffer
+ * @req: reference to the ahash_request handle that holds all information
+ * needed to perform the cipher operation
+ *
+ * This function is a "short-hand" for the function calls of crypto_ahash_init,
+ * crypto_ahash_update and crypto_ahash_final. The parameters have the same
+ * meaning as discussed for those separate three functions.
+ *
+ * Return: 0 if the message digest creation was successful; < 0 if an error
+ * occurred
+ */
int crypto_ahash_digest(struct ahash_request *req);
+/**
+ * crypto_ahash_export() - extract current message digest state
+ * @req: reference to the ahash_request handle whose state is exported
+ * @out: output buffer of sufficient size that can hold the hash state
+ *
+ * This function exports the hash state of the ahash_request handle into the
+ * caller-allocated output buffer out which must have sufficient size (e.g. by
+ * calling crypto_ahash_reqsize).
+ *
+ * Return: 0 if the export was successful; < 0 if an error occurred
+ */
static inline int crypto_ahash_export(struct ahash_request *req, void *out)
{
return crypto_ahash_reqtfm(req)->export(req, out);
}
+/**
+ * crypto_ahash_import() - import message digest state
+ * @req: reference to ahash_request handle the state is imported into
+ * @in: buffer holding the state
+ *
+ * This function imports the hash state into the ahash_request handle from the
+ * input buffer. That buffer should have been generated with the
+ * crypto_ahash_export function.
+ *
+ * Return: 0 if the import was successful; < 0 if an error occurred
+ */
static inline int crypto_ahash_import(struct ahash_request *req, const void *in)
{
return crypto_ahash_reqtfm(req)->import(req, in);
}
+/**
+ * crypto_ahash_init() - (re)initialize message digest handle
+ * @req: ahash_request handle that already is initialized with all necessary
+ * data using the ahash_request_* API functions
+ *
+ * The call (re-)initializes the message digest referenced by the ahash_request
+ * handle. Any potentially existing state created by previous operations is
+ * discarded.
+ *
+ * Return: 0 if the message digest initialization was successful; < 0 if an
+ * error occurred
+ */
static inline int crypto_ahash_init(struct ahash_request *req)
{
return crypto_ahash_reqtfm(req)->init(req);
}
+/**
+ * crypto_ahash_update() - add data to message digest for processing
+ * @req: ahash_request handle that was previously initialized with the
+ * crypto_ahash_init call.
+ *
+ * Updates the message digest state of the &ahash_request handle. The input data
+ * is pointed to by the scatter/gather list registered in the &ahash_request
+ * handle
+ *
+ * Return: 0 if the message digest update was successful; < 0 if an error
+ * occurred
+ */
static inline int crypto_ahash_update(struct ahash_request *req)
{
return crypto_ahash_reqtfm(req)->update(req);
}
+/**
+ * DOC: Asynchronous Hash Request Handle
+ *
+ * The &ahash_request data structure contains all pointers to data
+ * required for the asynchronous cipher operation. This includes the cipher
+ * handle (which can be used by multiple &ahash_request instances), pointer
+ * to plaintext and the message digest output buffer, asynchronous callback
+ * function, etc. It acts as a handle to the ahash_request_* API calls in a
+ * similar way as ahash handle to the crypto_ahash_* API calls.
+ */
+
+/**
+ * ahash_request_set_tfm() - update cipher handle reference in request
+ * @req: request handle to be modified
+ * @tfm: cipher handle that shall be added to the request handle
+ *
+ * Allow the caller to replace the existing ahash handle in the request
+ * data structure with a different one.
+ */
static inline void ahash_request_set_tfm(struct ahash_request *req,
struct crypto_ahash *tfm)
{
req->base.tfm = crypto_ahash_tfm(tfm);
}
+/**
+ * ahash_request_alloc() - allocate request data structure
+ * @tfm: cipher handle to be registered with the request
+ * @gfp: memory allocation flag that is handed to kmalloc by the API call.
+ *
+ * Allocate the request data structure that must be used with the ahash
+ * message digest API calls. During
+ * the allocation, the provided ahash handle
+ * is registered in the request data structure.
+ *
+ * Return: allocated request handle in case of success; IS_ERR() is true in case
+ * of an error, PTR_ERR() returns the error code.
+ */
static inline struct ahash_request *ahash_request_alloc(
struct crypto_ahash *tfm, gfp_t gfp)
{
@@ -225,6 +516,10 @@ static inline struct ahash_request *ahash_request_alloc(
return req;
}
+/**
+ * ahash_request_free() - zeroize and free the request data structure
+ * @req: request data structure cipher handle to be freed
+ */
static inline void ahash_request_free(struct ahash_request *req)
{
kzfree(req);
@@ -236,6 +531,31 @@ static inline struct ahash_request *ahash_request_cast(
return container_of(req, struct ahash_request, base);
}
+/**
+ * ahash_request_set_callback() - set asynchronous callback function
+ * @req: request handle
+ * @flags: specify zero or an ORing of the flags
+ * CRYPTO_TFM_REQ_MAY_BACKLOG the request queue may back log and
+ * increase the wait queue beyond the initial maximum size;
+ * CRYPTO_TFM_REQ_MAY_SLEEP the request processing may sleep
+ * @compl: callback function pointer to be registered with the request handle
+ * @data: The data pointer refers to memory that is not used by the kernel
+ * crypto API, but provided to the callback function for it to use. Here,
+ * the caller can provide a reference to memory the callback function can
+ * operate on. As the callback function is invoked asynchronously to the
+ * related functionality, it may need to access data structures of the
+ * related functionality which can be referenced using this pointer. The
+ * callback function can access the memory via the "data" field in the
+ * &crypto_async_request data structure provided to the callback function.
+ *
+ * This function allows setting the callback function that is triggered once
+ * the cipher operation completes.
+ *
+ * The callback function is registered with the &ahash_request handle and
+ * must comply with the following template
+ *
+ * void callback_function(struct crypto_async_request *req, int error)
+ */
static inline void ahash_request_set_callback(struct ahash_request *req,
u32 flags,
crypto_completion_t compl,
@@ -246,6 +566,19 @@ static inline void ahash_request_set_callback(struct ahash_request *req,
req->base.flags = flags;
}
+/**
+ * ahash_request_set_crypt() - set data buffers
+ * @req: ahash_request handle to be updated
+ * @src: source scatter/gather list
+ * @result: buffer that is filled with the message digest -- the caller must
+ * ensure that the buffer has sufficient space by, for example, calling
+ * crypto_ahash_digestsize()
+ * @nbytes: number of bytes to process from the source scatter/gather list
+ *
+ * By using this call, the caller references the source scatter/gather list.
+ * The source scatter/gather list points to the data the message digest is to
+ * be calculated for.
+ */
static inline void ahash_request_set_crypt(struct ahash_request *req,
struct scatterlist *src, u8 *result,
unsigned int nbytes)
@@ -255,6 +588,33 @@ static inline void ahash_request_set_crypt(struct ahash_request *req,
req->result = result;
}
+/**
+ * DOC: Synchronous Message Digest API
+ *
+ * The synchronous message digest API is used with the ciphers of type
+ * CRYPTO_ALG_TYPE_SHASH (listed as type "shash" in /proc/crypto)
+ *
+ * The message digest API is able to maintain state information for the
+ * caller.
+ *
+ * The synchronous message digest API can store user-related context in in its
+ * shash_desc request data structure.
+ */
+
+/**
+ * crypto_alloc_shash() - allocate message digest handle
+ * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
+ * message digest cipher
+ * @type: specifies the type of the cipher
+ * @mask: specifies the mask for the cipher
+ *
+ * Allocate a cipher handle for a message digest. The returned &struct
+ * crypto_shash is the cipher handle that is required for any subsequent
+ * API invocation for that message digest.
+ *
+ * Return: allocated cipher handle in case of success; IS_ERR() is true in case
+ * of an error, PTR_ERR() returns the error code.
+ */
struct crypto_shash *crypto_alloc_shash(const char *alg_name, u32 type,
u32 mask);
@@ -263,6 +623,10 @@ static inline struct crypto_tfm *crypto_shash_tfm(struct crypto_shash *tfm)
return &tfm->base;
}
+/**
+ * crypto_free_shash() - zeroize and free the message digest handle
+ * @tfm: cipher handle to be freed
+ */
static inline void crypto_free_shash(struct crypto_shash *tfm)
{
crypto_destroy_tfm(tfm, crypto_shash_tfm(tfm));
@@ -274,6 +638,15 @@ static inline unsigned int crypto_shash_alignmask(
return crypto_tfm_alg_alignmask(crypto_shash_tfm(tfm));
}
+/**
+ * crypto_shash_blocksize() - obtain block size for cipher
+ * @tfm: cipher handle
+ *
+ * The block size for the message digest cipher referenced with the cipher
+ * handle is returned.
+ *
+ * Return: block size of cipher
+ */
static inline unsigned int crypto_shash_blocksize(struct crypto_shash *tfm)
{
return crypto_tfm_alg_blocksize(crypto_shash_tfm(tfm));
@@ -289,6 +662,15 @@ static inline struct shash_alg *crypto_shash_alg(struct crypto_shash *tfm)
return __crypto_shash_alg(crypto_shash_tfm(tfm)->__crt_alg);
}
+/**
+ * crypto_shash_digestsize() - obtain message digest size
+ * @tfm: cipher handle
+ *
+ * The size for the message digest created by the message digest cipher
+ * referenced with the cipher handle is returned.
+ *
+ * Return: digest size of cipher
+ */
static inline unsigned int crypto_shash_digestsize(struct crypto_shash *tfm)
{
return crypto_shash_alg(tfm)->digestsize;
@@ -314,6 +696,21 @@ static inline void crypto_shash_clear_flags(struct crypto_shash *tfm, u32 flags)
crypto_tfm_clear_flags(crypto_shash_tfm(tfm), flags);
}
+/**
+ * crypto_shash_descsize() - obtain the operational state size
+ * @tfm: cipher handle
+ *
+ * The size of the operational state the cipher needs during operation is
+ * returned for the hash referenced with the cipher handle. This size is
+ * required to calculate the memory requirements to allow the caller allocating
+ * sufficient memory for operational state.
+ *
+ * The operational state is defined with struct shash_desc where the size of
+ * that data structure is to be calculated as
+ * sizeof(struct shash_desc) + crypto_shash_descsize(alg)
+ *
+ * Return: size of the operational state
+ */
static inline unsigned int crypto_shash_descsize(struct crypto_shash *tfm)
{
return tfm->descsize;
@@ -324,29 +721,129 @@ static inline void *shash_desc_ctx(struct shash_desc *desc)
return desc->__ctx;
}
+/**
+ * crypto_shash_setkey() - set key for message digest
+ * @tfm: cipher handle
+ * @key: buffer holding the key
+ * @keylen: length of the key in bytes
+ *
+ * The caller provided key is set for the keyed message digest cipher. The
+ * cipher handle must point to a keyed message digest cipher in order for this
+ * function to succeed.
+ *
+ * Return: 0 if the setting of the key was successful; < 0 if an error occurred
+ */
int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key,
unsigned int keylen);
+
+/**
+ * crypto_shash_digest() - calculate message digest for buffer
+ * @desc: see crypto_shash_final()
+ * @data: see crypto_shash_update()
+ * @len: see crypto_shash_update()
+ * @out: see crypto_shash_final()
+ *
+ * This function is a "short-hand" for the function calls of crypto_shash_init,
+ * crypto_shash_update and crypto_shash_final. The parameters have the same
+ * meaning as discussed for those separate three functions.
+ *
+ * Return: 0 if the message digest creation was successful; < 0 if an error
+ * occurred
+ */
int crypto_shash_digest(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out);
+/**
+ * crypto_shash_export() - extract operational state for message digest
+ * @desc: reference to the operational state handle whose state is exported
+ * @out: output buffer of sufficient size that can hold the hash state
+ *
+ * This function exports the hash state of the operational state handle into the
+ * caller-allocated output buffer out which must have sufficient size (e.g. by
+ * calling crypto_shash_descsize).
+ *
+ * Return: 0 if the export creation was successful; < 0 if an error occurred
+ */
static inline int crypto_shash_export(struct shash_desc *desc, void *out)
{
return crypto_shash_alg(desc->tfm)->export(desc, out);
}
+/**
+ * crypto_shash_import() - import operational state
+ * @desc: reference to the operational state handle the state imported into
+ * @in: buffer holding the state
+ *
+ * This function imports the hash state into the operational state handle from
+ * the input buffer. That buffer should have been generated with the
+ * crypto_ahash_export function.
+ *
+ * Return: 0 if the import was successful; < 0 if an error occurred
+ */
static inline int crypto_shash_import(struct shash_desc *desc, const void *in)
{
return crypto_shash_alg(desc->tfm)->import(desc, in);
}
+/**
+ * crypto_shash_init() - (re)initialize message digest
+ * @desc: operational state handle that is already filled
+ *
+ * The call (re-)initializes the message digest referenced by the
+ * operational state handle. Any potentially existing state created by
+ * previous operations is discarded.
+ *
+ * Return: 0 if the message digest initialization was successful; < 0 if an
+ * error occurred
+ */
static inline int crypto_shash_init(struct shash_desc *desc)
{
return crypto_shash_alg(desc->tfm)->init(desc);
}
+/**
+ * crypto_shash_update() - add data to message digest for processing
+ * @desc: operational state handle that is already initialized
+ * @data: input data to be added to the message digest
+ * @len: length of the input data
+ *
+ * Updates the message digest state of the operational state handle.
+ *
+ * Return: 0 if the message digest update was successful; < 0 if an error
+ * occurred
+ */
int crypto_shash_update(struct shash_desc *desc, const u8 *data,
unsigned int len);
+
+/**
+ * crypto_shash_final() - calculate message digest
+ * @desc: operational state handle that is already filled with data
+ * @out: output buffer filled with the message digest
+ *
+ * Finalize the message digest operation and create the message digest
+ * based on all data added to the cipher handle. The message digest is placed
+ * into the output buffer. The caller must ensure that the output buffer is
+ * large enough by using crypto_shash_digestsize.
+ *
+ * Return: 0 if the message digest creation was successful; < 0 if an error
+ * occurred
+ */
int crypto_shash_final(struct shash_desc *desc, u8 *out);
+
+/**
+ * crypto_shash_finup() - calculate message digest of buffer
+ * @desc: see crypto_shash_final()
+ * @data: see crypto_shash_update()
+ * @len: see crypto_shash_update()
+ * @out: see crypto_shash_final()
+ *
+ * This function is a "short-hand" for the function calls of
+ * crypto_shash_update and crypto_shash_final. The parameters have the same
+ * meaning as discussed for those separate functions.
+ *
+ * Return: 0 if the message digest creation was successful; < 0 if an error
+ * occurred
+ */
int crypto_shash_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out);
diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h
index d61c11170213..cd62bf4289e9 100644
--- a/include/crypto/if_alg.h
+++ b/include/crypto/if_alg.h
@@ -42,6 +42,7 @@ struct af_alg_completion {
struct af_alg_control {
struct af_alg_iv *iv;
int op;
+ unsigned int aead_assoclen;
};
struct af_alg_type {
diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h
index 9b6f32a6cad1..3b4af1d7c7e9 100644
--- a/include/crypto/internal/hash.h
+++ b/include/crypto/internal/hash.h
@@ -117,6 +117,15 @@ int shash_ahash_update(struct ahash_request *req, struct shash_desc *desc);
int shash_ahash_finup(struct ahash_request *req, struct shash_desc *desc);
int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc);
+int shash_ahash_mcryptd_update(struct ahash_request *req,
+ struct shash_desc *desc);
+int shash_ahash_mcryptd_final(struct ahash_request *req,
+ struct shash_desc *desc);
+int shash_ahash_mcryptd_finup(struct ahash_request *req,
+ struct shash_desc *desc);
+int shash_ahash_mcryptd_digest(struct ahash_request *req,
+ struct shash_desc *desc);
+
int crypto_init_shash_ops_async(struct crypto_tfm *tfm);
static inline void *crypto_ahash_ctx(struct crypto_ahash *tfm)
diff --git a/include/crypto/mcryptd.h b/include/crypto/mcryptd.h
new file mode 100644
index 000000000000..c23ee1f7ee80
--- /dev/null
+++ b/include/crypto/mcryptd.h
@@ -0,0 +1,112 @@
+/*
+ * Software async multibuffer crypto daemon headers
+ *
+ * Author:
+ * Tim Chen <tim.c.chen@linux.intel.com>
+ *
+ * Copyright (c) 2014, Intel Corporation.
+ */
+
+#ifndef _CRYPTO_MCRYPT_H
+#define _CRYPTO_MCRYPT_H
+
+#include <linux/crypto.h>
+#include <linux/kernel.h>
+#include <crypto/hash.h>
+
+struct mcryptd_ahash {
+ struct crypto_ahash base;
+};
+
+static inline struct mcryptd_ahash *__mcryptd_ahash_cast(
+ struct crypto_ahash *tfm)
+{
+ return (struct mcryptd_ahash *)tfm;
+}
+
+struct mcryptd_cpu_queue {
+ struct crypto_queue queue;
+ struct work_struct work;
+};
+
+struct mcryptd_queue {
+ struct mcryptd_cpu_queue __percpu *cpu_queue;
+};
+
+struct mcryptd_instance_ctx {
+ struct crypto_spawn spawn;
+ struct mcryptd_queue *queue;
+};
+
+struct mcryptd_hash_ctx {
+ struct crypto_shash *child;
+ struct mcryptd_alg_state *alg_state;
+};
+
+struct mcryptd_tag {
+ /* seq number of request */
+ unsigned seq_num;
+ /* arrival time of request */
+ unsigned long arrival;
+ unsigned long expire;
+ int cpu;
+};
+
+struct mcryptd_hash_request_ctx {
+ struct list_head waiter;
+ crypto_completion_t complete;
+ struct mcryptd_tag tag;
+ struct crypto_hash_walk walk;
+ u8 *out;
+ int flag;
+ struct shash_desc desc;
+};
+
+struct mcryptd_ahash *mcryptd_alloc_ahash(const char *alg_name,
+ u32 type, u32 mask);
+struct crypto_shash *mcryptd_ahash_child(struct mcryptd_ahash *tfm);
+struct shash_desc *mcryptd_shash_desc(struct ahash_request *req);
+void mcryptd_free_ahash(struct mcryptd_ahash *tfm);
+void mcryptd_flusher(struct work_struct *work);
+
+enum mcryptd_req_type {
+ MCRYPTD_NONE,
+ MCRYPTD_UPDATE,
+ MCRYPTD_FINUP,
+ MCRYPTD_DIGEST,
+ MCRYPTD_FINAL
+};
+
+struct mcryptd_alg_cstate {
+ unsigned long next_flush;
+ unsigned next_seq_num;
+ bool flusher_engaged;
+ struct delayed_work flush;
+ int cpu;
+ struct mcryptd_alg_state *alg_state;
+ void *mgr;
+ spinlock_t work_lock;
+ struct list_head work_list;
+ struct list_head flush_list;
+};
+
+struct mcryptd_alg_state {
+ struct mcryptd_alg_cstate __percpu *alg_cstate;
+ unsigned long (*flusher)(struct mcryptd_alg_cstate *cstate);
+};
+
+/* return delay in jiffies from current time */
+static inline unsigned long get_delay(unsigned long t)
+{
+ long delay;
+
+ delay = (long) t - (long) jiffies;
+ if (delay <= 0)
+ return 0;
+ else
+ return (unsigned long) delay;
+}
+
+void mcryptd_arm_flusher(struct mcryptd_alg_cstate *cstate, unsigned long delay);
+
+#endif
diff --git a/include/crypto/public_key.h b/include/crypto/public_key.h
index 0d164c6af539..54add2069901 100644
--- a/include/crypto/public_key.h
+++ b/include/crypto/public_key.h
@@ -15,6 +15,7 @@
#define _LINUX_PUBLIC_KEY_H
#include <linux/mpi.h>
+#include <keys/asymmetric-type.h>
#include <crypto/hash_info.h>
enum pkey_algo {
@@ -98,8 +99,9 @@ struct key;
extern int verify_signature(const struct key *key,
const struct public_key_signature *sig);
+struct asymmetric_key_id;
extern struct key *x509_request_asymmetric_key(struct key *keyring,
- const char *issuer,
- const char *key_id);
+ const struct asymmetric_key_id *kid,
+ bool partial);
#endif /* _LINUX_PUBLIC_KEY_H */
diff --git a/include/crypto/rng.h b/include/crypto/rng.h
index c93f9b917925..a16fb10142bf 100644
--- a/include/crypto/rng.h
+++ b/include/crypto/rng.h
@@ -20,11 +20,38 @@ extern struct crypto_rng *crypto_default_rng;
int crypto_get_default_rng(void);
void crypto_put_default_rng(void);
+/**
+ * DOC: Random number generator API
+ *
+ * The random number generator API is used with the ciphers of type
+ * CRYPTO_ALG_TYPE_RNG (listed as type "rng" in /proc/crypto)
+ */
+
static inline struct crypto_rng *__crypto_rng_cast(struct crypto_tfm *tfm)
{
return (struct crypto_rng *)tfm;
}
+/**
+ * crypto_alloc_rng() -- allocate RNG handle
+ * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
+ * message digest cipher
+ * @type: specifies the type of the cipher
+ * @mask: specifies the mask for the cipher
+ *
+ * Allocate a cipher handle for a random number generator. The returned struct
+ * crypto_rng is the cipher handle that is required for any subsequent
+ * API invocation for that random number generator.
+ *
+ * For all random number generators, this call creates a new private copy of
+ * the random number generator that does not share a state with other
+ * instances. The only exception is the "krng" random number generator which
+ * is a kernel crypto API use case for the get_random_bytes() function of the
+ * /dev/random driver.
+ *
+ * Return: allocated cipher handle in case of success; IS_ERR() is true in case
+ * of an error, PTR_ERR() returns the error code.
+ */
static inline struct crypto_rng *crypto_alloc_rng(const char *alg_name,
u32 type, u32 mask)
{
@@ -40,6 +67,14 @@ static inline struct crypto_tfm *crypto_rng_tfm(struct crypto_rng *tfm)
return &tfm->base;
}
+/**
+ * crypto_rng_alg - obtain name of RNG
+ * @tfm: cipher handle
+ *
+ * Return the generic name (cra_name) of the initialized random number generator
+ *
+ * Return: generic name string
+ */
static inline struct rng_alg *crypto_rng_alg(struct crypto_rng *tfm)
{
return &crypto_rng_tfm(tfm)->__crt_alg->cra_rng;
@@ -50,23 +85,68 @@ static inline struct rng_tfm *crypto_rng_crt(struct crypto_rng *tfm)
return &crypto_rng_tfm(tfm)->crt_rng;
}
+/**
+ * crypto_free_rng() - zeroize and free RNG handle
+ * @tfm: cipher handle to be freed
+ */
static inline void crypto_free_rng(struct crypto_rng *tfm)
{
crypto_free_tfm(crypto_rng_tfm(tfm));
}
+/**
+ * crypto_rng_get_bytes() - get random number
+ * @tfm: cipher handle
+ * @rdata: output buffer holding the random numbers
+ * @dlen: length of the output buffer
+ *
+ * This function fills the caller-allocated buffer with random numbers using the
+ * random number generator referenced by the cipher handle.
+ *
+ * Return: > 0 function was successful and returns the number of generated
+ * bytes; < 0 if an error occurred
+ */
static inline int crypto_rng_get_bytes(struct crypto_rng *tfm,
u8 *rdata, unsigned int dlen)
{
return crypto_rng_crt(tfm)->rng_gen_random(tfm, rdata, dlen);
}
+/**
+ * crypto_rng_reset() - re-initialize the RNG
+ * @tfm: cipher handle
+ * @seed: seed input data
+ * @slen: length of the seed input data
+ *
+ * The reset function completely re-initializes the random number generator
+ * referenced by the cipher handle by clearing the current state. The new state
+ * is initialized with the caller provided seed or automatically, depending
+ * on the random number generator type (the ANSI X9.31 RNG requires
+ * caller-provided seed, the SP800-90A DRBGs perform an automatic seeding).
+ * The seed is provided as a parameter to this function call. The provided seed
+ * should have the length of the seed size defined for the random number
+ * generator as defined by crypto_rng_seedsize.
+ *
+ * Return: 0 if the setting of the key was successful; < 0 if an error occurred
+ */
static inline int crypto_rng_reset(struct crypto_rng *tfm,
u8 *seed, unsigned int slen)
{
return crypto_rng_crt(tfm)->rng_reset(tfm, seed, slen);
}
+/**
+ * crypto_rng_seedsize() - obtain seed size of RNG
+ * @tfm: cipher handle
+ *
+ * The function returns the seed size for the random number generator
+ * referenced by the cipher handle. This value may be zero if the random
+ * number generator does not implement or require a reseeding. For example,
+ * the SP800-90A DRBGs implement an automated reseeding after reaching a
+ * pre-defined threshold.
+ *
+ * Return: seed size for the random number generator
+ */
static inline int crypto_rng_seedsize(struct crypto_rng *tfm)
{
return crypto_rng_alg(tfm)->seedsize;
diff --git a/include/drm/ati_pcigart.h b/include/drm/ati_pcigart.h
new file mode 100644
index 000000000000..5765648b5ef7
--- /dev/null
+++ b/include/drm/ati_pcigart.h
@@ -0,0 +1,30 @@
+#ifndef DRM_ATI_PCIGART_H
+#define DRM_ATI_PCIGART_H
+
+#include <drm/drm_legacy.h>
+
+/* location of GART table */
+#define DRM_ATI_GART_MAIN 1
+#define DRM_ATI_GART_FB 2
+
+#define DRM_ATI_GART_PCI 1
+#define DRM_ATI_GART_PCIE 2
+#define DRM_ATI_GART_IGP 3
+
+struct drm_ati_pcigart_info {
+ int gart_table_location;
+ int gart_reg_if;
+ void *addr;
+ dma_addr_t bus_addr;
+ dma_addr_t table_mask;
+ struct drm_dma_handle *table_handle;
+ struct drm_local_map mapping;
+ int table_size;
+};
+
+extern int drm_ati_pcigart_init(struct drm_device *dev,
+ struct drm_ati_pcigart_info * gart_info);
+extern int drm_ati_pcigart_cleanup(struct drm_device *dev,
+ struct drm_ati_pcigart_info * gart_info);
+
+#endif
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 196890735367..e1b2e8b98af7 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -1,17 +1,14 @@
-/**
- * \file drmP.h
- * Private header for Direct Rendering Manager
- *
- * \author Rickard E. (Rik) Faith <faith@valinux.com>
- * \author Gareth Hughes <gareth@valinux.com>
- */
-
/*
+ * Internal Header for the Direct Rendering Manager
+ *
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* Copyright (c) 2009-2010, Code Aurora Forum.
* All rights reserved.
*
+ * Author: Rickard E. (Rik) Faith <faith@valinux.com>
+ * Author: Gareth Hughes <gareth@valinux.com>
+ *
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
@@ -35,59 +32,62 @@
#ifndef _DRM_P_H_
#define _DRM_P_H_
-#ifdef __KERNEL__
-#ifdef __alpha__
-/* add include of current.h so that "current" is defined
- * before static inline funcs in wait.h. Doing this so we
- * can build the DRM (part of PI DRI). 4/21/2000 S + B */
-#include <asm/current.h>
-#endif /* __alpha__ */
-#include <linux/kernel.h>
-#include <linux/kref.h>
-#include <linux/miscdevice.h>
+#include <linux/agp_backend.h>
+#include <linux/cdev.h>
+#include <linux/dma-mapping.h>
+#include <linux/file.h>
#include <linux/fs.h>
+#include <linux/highmem.h>
+#include <linux/idr.h>
#include <linux/init.h>
-#include <linux/file.h>
-#include <linux/platform_device.h>
-#include <linux/pci.h>
+#include <linux/io.h>
#include <linux/jiffies.h>
-#include <linux/dma-mapping.h>
+#include <linux/kernel.h>
+#include <linux/kref.h>
+#include <linux/miscdevice.h>
#include <linux/mm.h>
-#include <linux/cdev.h>
#include <linux/mutex.h>
-#include <linux/io.h>
-#include <linux/slab.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/poll.h>
#include <linux/ratelimit.h>
-#if defined(__alpha__) || defined(__powerpc__)
-#include <asm/pgtable.h> /* For pte_wrprotect */
-#endif
-#include <asm/mman.h>
-#include <asm/uaccess.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
#include <linux/types.h>
-#include <linux/agp_backend.h>
+#include <linux/vmalloc.h>
#include <linux/workqueue.h>
-#include <linux/poll.h>
+
+#include <asm/mman.h>
#include <asm/pgalloc.h>
-#include <drm/drm.h>
-#include <drm/drm_sarea.h>
-#include <drm/drm_vma_manager.h>
+#include <asm/uaccess.h>
-#include <linux/idr.h>
+#include <uapi/drm/drm.h>
+#include <uapi/drm/drm_mode.h>
-#define __OS_HAS_AGP (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE)))
+#include <drm/drm_agpsupport.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_global.h>
+#include <drm/drm_hashtab.h>
+#include <drm/drm_mem_util.h>
+#include <drm/drm_mm.h>
+#include <drm/drm_os_linux.h>
+#include <drm/drm_sarea.h>
+#include <drm/drm_vma_manager.h>
struct module;
struct drm_file;
struct drm_device;
+struct drm_agp_head;
+struct drm_local_map;
+struct drm_device_dma;
+struct drm_dma_handle;
+struct drm_gem_object;
struct device_node;
struct videomode;
struct reservation_object;
-
-#include <drm/drm_os_linux.h>
-#include <drm/drm_hashtab.h>
-#include <drm/drm_mm.h>
+struct dma_buf_attachment;
/*
* 4 debug categories are defined:
@@ -125,8 +125,8 @@ struct reservation_object;
extern __printf(2, 3)
void drm_ut_debug_printk(const char *function_name,
const char *format, ...);
-extern __printf(2, 3)
-int drm_err(const char *func, const char *format, ...);
+extern __printf(1, 2)
+void drm_err(const char *format, ...);
/***********************************************************************/
/** \name DRM template customization defaults */
@@ -145,19 +145,6 @@ int drm_err(const char *func, const char *format, ...);
#define DRIVER_RENDER 0x8000
/***********************************************************************/
-/** \name Begin the DRM... */
-/*@{*/
-
-#define DRM_DEBUG_CODE 2 /**< Include debugging code if > 1, then
- also include looping detection. */
-
-#define DRM_MAGIC_HASH_ORDER 4 /**< Size of key hash table. Must be power of 2. */
-
-#define DRM_MAP_HASH_OFFSET 0x10000000
-
-/*@}*/
-
-/***********************************************************************/
/** \name Macros to make printk easier */
/*@{*/
@@ -168,7 +155,7 @@ int drm_err(const char *func, const char *format, ...);
* \param arg arguments
*/
#define DRM_ERROR(fmt, ...) \
- drm_err(__func__, fmt, ##__VA_ARGS__)
+ drm_err(fmt, ##__VA_ARGS__)
/**
* Rate limited error output. Like DRM_ERROR() but won't flood the log.
@@ -183,7 +170,7 @@ int drm_err(const char *func, const char *format, ...);
DEFAULT_RATELIMIT_BURST); \
\
if (__ratelimit(&_rs)) \
- drm_err(__func__, fmt, ##__VA_ARGS__); \
+ drm_err(fmt, ##__VA_ARGS__); \
})
#define DRM_INFO(fmt, ...) \
@@ -198,7 +185,6 @@ int drm_err(const char *func, const char *format, ...);
* \param fmt printf() like format string.
* \param arg arguments
*/
-#if DRM_DEBUG_CODE
#define DRM_DEBUG(fmt, args...) \
do { \
if (unlikely(drm_debug & DRM_UT_CORE)) \
@@ -220,12 +206,6 @@ int drm_err(const char *func, const char *format, ...);
if (unlikely(drm_debug & DRM_UT_PRIME)) \
drm_ut_debug_printk(__func__, fmt, ##args); \
} while (0)
-#else
-#define DRM_DEBUG_DRIVER(fmt, args...) do { } while (0)
-#define DRM_DEBUG_KMS(fmt, args...) do { } while (0)
-#define DRM_DEBUG_PRIME(fmt, args...) do { } while (0)
-#define DRM_DEBUG(fmt, arg...) do { } while (0)
-#endif
/*@}*/
@@ -236,23 +216,6 @@ int drm_err(const char *func, const char *format, ...);
#define DRM_IF_VERSION(maj, min) (maj << 16 | min)
/**
- * Test that the hardware lock is held by the caller, returning otherwise.
- *
- * \param dev DRM device.
- * \param filp file pointer of the caller.
- */
-#define LOCK_TEST_WITH_RETURN( dev, _file_priv ) \
-do { \
- if (!_DRM_LOCK_IS_HELD(_file_priv->master->lock.hw_lock->lock) || \
- _file_priv->master->lock.file_priv != _file_priv) { \
- DRM_ERROR( "%s called without lock held, held %d owner %p %p\n",\
- __func__, _DRM_LOCK_IS_HELD(_file_priv->master->lock.hw_lock->lock),\
- _file_priv->master->lock.file_priv, _file_priv); \
- return -EINVAL; \
- } \
-} while (0)
-
-/**
* Ioctl function type.
*
* \param inode device inode.
@@ -292,80 +255,6 @@ struct drm_ioctl_desc {
#define DRM_IOCTL_DEF_DRV(ioctl, _func, _flags) \
[DRM_IOCTL_NR(DRM_##ioctl)] = {.cmd = DRM_##ioctl, .func = _func, .flags = _flags, .cmd_drv = DRM_IOCTL_##ioctl, .name = #ioctl}
-struct drm_magic_entry {
- struct list_head head;
- struct drm_hash_item hash_item;
- struct drm_file *priv;
-};
-
-struct drm_vma_entry {
- struct list_head head;
- struct vm_area_struct *vma;
- pid_t pid;
-};
-
-/**
- * DMA buffer.
- */
-struct drm_buf {
- int idx; /**< Index into master buflist */
- int total; /**< Buffer size */
- int order; /**< log-base-2(total) */
- int used; /**< Amount of buffer in use (for DMA) */
- unsigned long offset; /**< Byte offset (used internally) */
- void *address; /**< Address of buffer */
- unsigned long bus_address; /**< Bus address of buffer */
- struct drm_buf *next; /**< Kernel-only: used for free list */
- __volatile__ int waiting; /**< On kernel DMA queue */
- __volatile__ int pending; /**< On hardware DMA queue */
- struct drm_file *file_priv; /**< Private of holding file descr */
- int context; /**< Kernel queue for this buffer */
- int while_locked; /**< Dispatch this buffer while locked */
- enum {
- DRM_LIST_NONE = 0,
- DRM_LIST_FREE = 1,
- DRM_LIST_WAIT = 2,
- DRM_LIST_PEND = 3,
- DRM_LIST_PRIO = 4,
- DRM_LIST_RECLAIM = 5
- } list; /**< Which list we're on */
-
- int dev_priv_size; /**< Size of buffer private storage */
- void *dev_private; /**< Per-buffer private storage */
-};
-
-/** bufs is one longer than it has to be */
-struct drm_waitlist {
- int count; /**< Number of possible buffers */
- struct drm_buf **bufs; /**< List of pointers to buffers */
- struct drm_buf **rp; /**< Read pointer */
- struct drm_buf **wp; /**< Write pointer */
- struct drm_buf **end; /**< End pointer */
- spinlock_t read_lock;
- spinlock_t write_lock;
-};
-
-typedef struct drm_dma_handle {
- dma_addr_t busaddr;
- void *vaddr;
- size_t size;
-} drm_dma_handle_t;
-
-/**
- * Buffer entry. There is one of this for each buffer size order.
- */
-struct drm_buf_entry {
- int buf_size; /**< size */
- int buf_count; /**< number of buffers */
- struct drm_buf *buflist; /**< buffer list */
- int seg_count;
- int page_order;
- struct drm_dma_handle **seglist;
-
- int low_mark; /**< Low water mark */
- int high_mark; /**< High water mark */
-};
-
/* Event queued up for userspace to read */
struct drm_pending_event {
struct drm_event *event;
@@ -444,214 +333,12 @@ struct drm_lock_data {
};
/**
- * DMA data.
- */
-struct drm_device_dma {
-
- struct drm_buf_entry bufs[DRM_MAX_ORDER + 1]; /**< buffers, grouped by their size order */
- int buf_count; /**< total number of buffers */
- struct drm_buf **buflist; /**< Vector of pointers into drm_device_dma::bufs */
- int seg_count;
- int page_count; /**< number of pages */
- unsigned long *pagelist; /**< page list */
- unsigned long byte_count;
- enum {
- _DRM_DMA_USE_AGP = 0x01,
- _DRM_DMA_USE_SG = 0x02,
- _DRM_DMA_USE_FB = 0x04,
- _DRM_DMA_USE_PCI_RO = 0x08
- } flags;
-
-};
-
-/**
- * AGP memory entry. Stored as a doubly linked list.
- */
-struct drm_agp_mem {
- unsigned long handle; /**< handle */
- struct agp_memory *memory;
- unsigned long bound; /**< address */
- int pages;
- struct list_head head;
-};
-
-/**
- * AGP data.
- *
- * \sa drm_agp_init() and drm_device::agp.
- */
-struct drm_agp_head {
- struct agp_kern_info agp_info; /**< AGP device information */
- struct list_head memory;
- unsigned long mode; /**< AGP mode */
- struct agp_bridge_data *bridge;
- int enabled; /**< whether the AGP bus as been enabled */
- int acquired; /**< whether the AGP device has been acquired */
- unsigned long base;
- int agp_mtrr;
- int cant_use_aperture;
- unsigned long page_mask;
-};
-
-/**
- * Scatter-gather memory.
- */
-struct drm_sg_mem {
- unsigned long handle;
- void *virtual;
- int pages;
- struct page **pagelist;
- dma_addr_t *busaddr;
-};
-
-struct drm_sigdata {
- int context;
- struct drm_hw_lock *lock;
-};
-
-
-/**
- * Kernel side of a mapping
- */
-struct drm_local_map {
- resource_size_t offset; /**< Requested physical address (0 for SAREA)*/
- unsigned long size; /**< Requested physical size (bytes) */
- enum drm_map_type type; /**< Type of memory to map */
- enum drm_map_flags flags; /**< Flags */
- void *handle; /**< User-space: "Handle" to pass to mmap() */
- /**< Kernel-space: kernel-virtual address */
- int mtrr; /**< MTRR slot used */
-};
-
-typedef struct drm_local_map drm_local_map_t;
-
-/**
- * Mappings list
- */
-struct drm_map_list {
- struct list_head head; /**< list head */
- struct drm_hash_item hash;
- struct drm_local_map *map; /**< mapping */
- uint64_t user_token;
- struct drm_master *master;
-};
-
-/* location of GART table */
-#define DRM_ATI_GART_MAIN 1
-#define DRM_ATI_GART_FB 2
-
-#define DRM_ATI_GART_PCI 1
-#define DRM_ATI_GART_PCIE 2
-#define DRM_ATI_GART_IGP 3
-
-struct drm_ati_pcigart_info {
- int gart_table_location;
- int gart_reg_if;
- void *addr;
- dma_addr_t bus_addr;
- dma_addr_t table_mask;
- struct drm_dma_handle *table_handle;
- struct drm_local_map mapping;
- int table_size;
-};
-
-/**
- * This structure defines the drm_mm memory object, which will be used by the
- * DRM for its buffer objects.
- */
-struct drm_gem_object {
- /** Reference count of this object */
- struct kref refcount;
-
- /**
- * handle_count - gem file_priv handle count of this object
- *
- * Each handle also holds a reference. Note that when the handle_count
- * drops to 0 any global names (e.g. the id in the flink namespace) will
- * be cleared.
- *
- * Protected by dev->object_name_lock.
- * */
- unsigned handle_count;
-
- /** Related drm device */
- struct drm_device *dev;
-
- /** File representing the shmem storage */
- struct file *filp;
-
- /* Mapping info for this object */
- struct drm_vma_offset_node vma_node;
-
- /**
- * Size of the object, in bytes. Immutable over the object's
- * lifetime.
- */
- size_t size;
-
- /**
- * Global name for this object, starts at 1. 0 means unnamed.
- * Access is covered by the object_name_lock in the related drm_device
- */
- int name;
-
- /**
- * Memory domains. These monitor which caches contain read/write data
- * related to the object. When transitioning from one set of domains
- * to another, the driver is called to ensure that caches are suitably
- * flushed and invalidated
- */
- uint32_t read_domains;
- uint32_t write_domain;
-
- /**
- * While validating an exec operation, the
- * new read/write domain values are computed here.
- * They will be transferred to the above values
- * at the point that any cache flushing occurs
- */
- uint32_t pending_read_domains;
- uint32_t pending_write_domain;
-
- /**
- * dma_buf - dma buf associated with this GEM object
- *
- * Pointer to the dma-buf associated with this gem object (either
- * through importing or exporting). We break the resulting reference
- * loop when the last gem handle for this object is released.
- *
- * Protected by obj->object_name_lock
- */
- struct dma_buf *dma_buf;
-
- /**
- * import_attach - dma buf attachment backing this object
- *
- * Any foreign dma_buf imported as a gem object has this set to the
- * attachment point for the device. This is invariant over the lifetime
- * of a gem object.
- *
- * The driver's ->gem_free_object callback is responsible for cleaning
- * up the dma_buf attachment and references acquired at import time.
- *
- * Note that the drm gem/prime core does not depend upon drivers setting
- * this field any more. So for drivers where this doesn't make sense
- * (e.g. virtual devices or a displaylink behind an usb bus) they can
- * simply leave it as NULL.
- */
- struct dma_buf_attachment *import_attach;
-};
-
-#include <drm/drm_crtc.h>
-
-/**
* struct drm_master - drm master structure
*
* @refcount: Refcount for this master object.
* @minor: Link back to minor char device we are master for. Immutable.
* @unique: Unique identifier: e.g. busid. Protected by drm_global_mutex.
* @unique_len: Length of unique field. Protected by drm_global_mutex.
- * @unique_size: Amount allocated. Protected by drm_global_mutex.
* @magiclist: Hash of used authentication tokens. Protected by struct_mutex.
* @magicfree: List of used authentication tokens. Protected by struct_mutex.
* @lock: DRI lock information.
@@ -662,7 +349,6 @@ struct drm_master {
struct drm_minor *minor;
char *unique;
int unique_len;
- int unique_size;
struct drm_open_hash magiclist;
struct list_head magicfree;
struct drm_lock_data lock;
@@ -677,17 +363,13 @@ struct drm_master {
/* Flags and return codes for get_vblank_timestamp() driver function. */
#define DRM_CALLED_FROM_VBLIRQ 1
#define DRM_VBLANKTIME_SCANOUTPOS_METHOD (1 << 0)
-#define DRM_VBLANKTIME_INVBL (1 << 1)
+#define DRM_VBLANKTIME_IN_VBLANK (1 << 1)
/* get_scanout_position() return flags */
#define DRM_SCANOUTPOS_VALID (1 << 0)
-#define DRM_SCANOUTPOS_INVBL (1 << 1)
+#define DRM_SCANOUTPOS_IN_VBLANK (1 << 1)
#define DRM_SCANOUTPOS_ACCURATE (1 << 2)
-struct drm_bus {
- int (*set_busid)(struct drm_device *dev, struct drm_master *master);
-};
-
/**
* DRM driver structure. This structure represent the common code for
* a family of cards. There will one drm_device for each card present
@@ -706,6 +388,7 @@ struct drm_driver {
int (*dma_ioctl) (struct drm_device *dev, void *data, struct drm_file *file_priv);
int (*dma_quiescent) (struct drm_device *);
int (*context_dtor) (struct drm_device *dev, int context);
+ int (*set_busid)(struct drm_device *dev, struct drm_master *master);
/**
* get_vblank_counter - get raw hardware vblank counter
@@ -888,7 +571,8 @@ struct drm_driver {
struct drm_gem_object *obj);
struct sg_table *(*gem_prime_get_sg_table)(struct drm_gem_object *obj);
struct drm_gem_object *(*gem_prime_import_sg_table)(
- struct drm_device *dev, size_t size,
+ struct drm_device *dev,
+ struct dma_buf_attachment *attach,
struct sg_table *sgt);
void *(*gem_prime_vmap)(struct drm_gem_object *obj);
void (*gem_prime_vunmap)(struct drm_gem_object *obj, void *vaddr);
@@ -924,7 +608,6 @@ struct drm_driver {
const struct drm_ioctl_desc *ioctls;
int num_ioctls;
const struct file_operations *fops;
- struct drm_bus *bus;
/* List of devices hanging off this driver with stealth attach. */
struct list_head legacy_dev_list;
@@ -1079,6 +762,16 @@ struct drm_device {
*/
bool vblank_disable_allowed;
+ /*
+ * If true, vblank interrupt will be disabled immediately when the
+ * refcount drops to zero, as opposed to via the vblank disable
+ * timer.
+ * This can be set to true it the hardware has a working vblank
+ * counter and the driver uses drm_vblank_on() and drm_vblank_off()
+ * appropriately.
+ */
+ bool vblank_disable_immediate;
+
/* array of size num_crtcs */
struct drm_vblank_crtc *vblank;
@@ -1103,17 +796,20 @@ struct drm_device {
#endif
struct platform_device *platformdev; /**< Platform device struture */
- struct usb_device *usbdev;
struct drm_sg_mem *sg; /**< Scatter gather memory */
unsigned int num_crtcs; /**< Number of CRTCs on this device */
- struct drm_sigdata sigdata; /**< For block_all_signals */
sigset_t sigmask;
+ struct {
+ int context;
+ struct drm_hw_lock *lock;
+ } sigdata;
+
struct drm_local_map *agp_buffer_map;
unsigned int agp_buffer_token;
- struct drm_mode_config mode_config; /**< Current mode config */
+ struct drm_mode_config mode_config; /**< Current mode config */
/** \name GEM information */
/*@{ */
@@ -1172,112 +868,32 @@ extern long drm_ioctl(struct file *filp,
unsigned int cmd, unsigned long arg);
extern long drm_compat_ioctl(struct file *filp,
unsigned int cmd, unsigned long arg);
-extern int drm_lastclose(struct drm_device *dev);
extern bool drm_ioctl_flags(unsigned int nr, unsigned int *flags);
/* Device support (drm_fops.h) */
-extern struct mutex drm_global_mutex;
extern int drm_open(struct inode *inode, struct file *filp);
extern ssize_t drm_read(struct file *filp, char __user *buffer,
size_t count, loff_t *offset);
extern int drm_release(struct inode *inode, struct file *filp);
/* Mapping support (drm_vm.h) */
-extern int drm_mmap(struct file *filp, struct vm_area_struct *vma);
-extern int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma);
-extern void drm_vm_open_locked(struct drm_device *dev, struct vm_area_struct *vma);
-extern void drm_vm_close_locked(struct drm_device *dev, struct vm_area_struct *vma);
extern unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait);
- /* Memory management support (drm_memory.h) */
-#include <drm/drm_memory.h>
-
-
- /* Misc. IOCTL support (drm_ioctl.h) */
-extern int drm_irq_by_busid(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern int drm_getunique(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern int drm_setunique(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern int drm_getmap(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern int drm_getclient(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern int drm_getstats(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern int drm_getcap(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern int drm_setclientcap(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern int drm_setversion(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern int drm_noop(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-
- /* Authentication IOCTL support (drm_auth.h) */
-extern int drm_getmagic(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern int drm_authmagic(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern int drm_remove_magic(struct drm_master *master, drm_magic_t magic);
+/* Misc. IOCTL support (drm_ioctl.c) */
+int drm_noop(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
/* Cache management (drm_cache.c) */
void drm_clflush_pages(struct page *pages[], unsigned long num_pages);
void drm_clflush_sg(struct sg_table *st);
void drm_clflush_virt_range(void *addr, unsigned long length);
- /* Locking IOCTL support (drm_lock.h) */
-extern int drm_lock(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern int drm_unlock(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern int drm_lock_free(struct drm_lock_data *lock_data, unsigned int context);
-extern void drm_idlelock_take(struct drm_lock_data *lock_data);
-extern void drm_idlelock_release(struct drm_lock_data *lock_data);
-
/*
* These are exported to drivers so that they can implement fencing using
* DMA quiscent + idle. DMA quiescent usually requires the hardware lock.
*/
-extern int drm_i_have_hw_lock(struct drm_device *dev, struct drm_file *file_priv);
-
- /* Buffer management support (drm_bufs.h) */
-extern int drm_addbufs_agp(struct drm_device *dev, struct drm_buf_desc * request);
-extern int drm_addbufs_pci(struct drm_device *dev, struct drm_buf_desc * request);
-extern int drm_addmap(struct drm_device *dev, resource_size_t offset,
- unsigned int size, enum drm_map_type type,
- enum drm_map_flags flags, struct drm_local_map **map_ptr);
-extern int drm_addmap_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern int drm_rmmap(struct drm_device *dev, struct drm_local_map *map);
-extern int drm_rmmap_locked(struct drm_device *dev, struct drm_local_map *map);
-extern int drm_rmmap_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern int drm_addbufs(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern int drm_infobufs(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern int drm_markbufs(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern int drm_freebufs(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern int drm_mapbufs(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern int drm_dma_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-
- /* DMA support (drm_dma.h) */
-extern int drm_legacy_dma_setup(struct drm_device *dev);
-extern void drm_legacy_dma_takedown(struct drm_device *dev);
-extern void drm_free_buffer(struct drm_device *dev, struct drm_buf * buf);
-extern void drm_core_reclaim_buffers(struct drm_device *dev,
- struct drm_file *filp);
-
/* IRQ support (drm_irq.h) */
-extern int drm_control(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
extern int drm_irq_install(struct drm_device *dev, int irq);
extern int drm_irq_uninstall(struct drm_device *dev);
@@ -1285,23 +901,27 @@ extern int drm_vblank_init(struct drm_device *dev, int num_crtcs);
extern int drm_wait_vblank(struct drm_device *dev, void *data,
struct drm_file *filp);
extern u32 drm_vblank_count(struct drm_device *dev, int crtc);
+extern u32 drm_crtc_vblank_count(struct drm_crtc *crtc);
extern u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc,
struct timeval *vblanktime);
extern void drm_send_vblank_event(struct drm_device *dev, int crtc,
struct drm_pending_vblank_event *e);
+extern void drm_crtc_send_vblank_event(struct drm_crtc *crtc,
+ struct drm_pending_vblank_event *e);
extern bool drm_handle_vblank(struct drm_device *dev, int crtc);
+extern bool drm_crtc_handle_vblank(struct drm_crtc *crtc);
extern int drm_vblank_get(struct drm_device *dev, int crtc);
extern void drm_vblank_put(struct drm_device *dev, int crtc);
extern int drm_crtc_vblank_get(struct drm_crtc *crtc);
extern void drm_crtc_vblank_put(struct drm_crtc *crtc);
+extern void drm_wait_one_vblank(struct drm_device *dev, int crtc);
+extern void drm_crtc_wait_one_vblank(struct drm_crtc *crtc);
extern void drm_vblank_off(struct drm_device *dev, int crtc);
extern void drm_vblank_on(struct drm_device *dev, int crtc);
extern void drm_crtc_vblank_off(struct drm_crtc *crtc);
extern void drm_crtc_vblank_on(struct drm_crtc *crtc);
extern void drm_vblank_cleanup(struct drm_device *dev);
-extern u32 drm_get_last_vbltimestamp(struct drm_device *dev, int crtc,
- struct timeval *tvblank, unsigned flags);
extern int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
int crtc, int *max_error,
struct timeval *vblank_time,
@@ -1311,23 +931,23 @@ extern int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
extern void drm_calc_timestamping_constants(struct drm_crtc *crtc,
const struct drm_display_mode *mode);
+/**
+ * drm_crtc_vblank_waitqueue - get vblank waitqueue for the CRTC
+ * @crtc: which CRTC's vblank waitqueue to retrieve
+ *
+ * This function returns a pointer to the vblank waitqueue for the CRTC.
+ * Drivers can use this to implement vblank waits using wait_event() & co.
+ */
+static inline wait_queue_head_t *drm_crtc_vblank_waitqueue(struct drm_crtc *crtc)
+{
+ return &crtc->dev->vblank[drm_crtc_index(crtc)].queue;
+}
/* Modesetting support */
extern void drm_vblank_pre_modeset(struct drm_device *dev, int crtc);
extern void drm_vblank_post_modeset(struct drm_device *dev, int crtc);
-extern int drm_modeset_ctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-
- /* AGP/GART support (drm_agpsupport.h) */
-
-#include <drm/drm_agpsupport.h>
/* Stub support (drm_stub.h) */
-extern int drm_setmaster_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-struct drm_master *drm_master_create(struct drm_minor *minor);
extern struct drm_master *drm_master_get(struct drm_master *master);
extern void drm_master_put(struct drm_master **master);
@@ -1335,33 +955,14 @@ extern void drm_put_dev(struct drm_device *dev);
extern void drm_unplug_dev(struct drm_device *dev);
extern unsigned int drm_debug;
-extern unsigned int drm_vblank_offdelay;
-extern unsigned int drm_timestamp_precision;
-extern unsigned int drm_timestamp_monotonic;
-
-extern struct class *drm_class;
-
-extern struct drm_local_map *drm_getsarea(struct drm_device *dev);
-
/* Debugfs support */
#if defined(CONFIG_DEBUG_FS)
-extern int drm_debugfs_init(struct drm_minor *minor, int minor_id,
- struct dentry *root);
extern int drm_debugfs_create_files(const struct drm_info_list *files,
int count, struct dentry *root,
struct drm_minor *minor);
extern int drm_debugfs_remove_files(const struct drm_info_list *files,
int count, struct drm_minor *minor);
-extern int drm_debugfs_cleanup(struct drm_minor *minor);
-extern int drm_debugfs_connector_add(struct drm_connector *connector);
-extern void drm_debugfs_connector_remove(struct drm_connector *connector);
#else
-static inline int drm_debugfs_init(struct drm_minor *minor, int minor_id,
- struct dentry *root)
-{
- return 0;
-}
-
static inline int drm_debugfs_create_files(const struct drm_info_list *files,
int count, struct dentry *root,
struct drm_minor *minor)
@@ -1374,31 +975,8 @@ static inline int drm_debugfs_remove_files(const struct drm_info_list *files,
{
return 0;
}
-
-static inline int drm_debugfs_cleanup(struct drm_minor *minor)
-{
- return 0;
-}
-
-static inline int drm_debugfs_connector_add(struct drm_connector *connector)
-{
- return 0;
-}
-static inline void drm_debugfs_connector_remove(struct drm_connector *connector)
-{
-}
-
#endif
- /* Info file support */
-extern int drm_name_info(struct seq_file *m, void *data);
-extern int drm_vm_info(struct seq_file *m, void *data);
-extern int drm_bufs_info(struct seq_file *m, void *data);
-extern int drm_vblank_info(struct seq_file *m, void *data);
-extern int drm_clients_info(struct seq_file *m, void* data);
-extern int drm_gem_name_info(struct seq_file *m, void *data);
-
-
extern struct dma_buf *drm_gem_prime_export(struct drm_device *dev,
struct drm_gem_object *obj, int flags);
extern int drm_gem_prime_handle_to_fd(struct drm_device *dev,
@@ -1410,149 +988,19 @@ extern int drm_gem_prime_fd_to_handle(struct drm_device *dev,
struct drm_file *file_priv, int prime_fd, uint32_t *handle);
extern void drm_gem_dmabuf_release(struct dma_buf *dma_buf);
-extern int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-
extern int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
dma_addr_t *addrs, int max_pages);
-extern struct sg_table *drm_prime_pages_to_sg(struct page **pages, int nr_pages);
+extern struct sg_table *drm_prime_pages_to_sg(struct page **pages, unsigned int nr_pages);
extern void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg);
-int drm_gem_dumb_destroy(struct drm_file *file,
- struct drm_device *dev,
- uint32_t handle);
-void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv);
-void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv);
-void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf);
-
-#if DRM_DEBUG_CODE
-extern int drm_vma_info(struct seq_file *m, void *data);
-#endif
-
- /* Scatter Gather Support (drm_scatter.h) */
-extern void drm_legacy_sg_cleanup(struct drm_device *dev);
-extern int drm_sg_alloc(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern int drm_sg_free(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-
- /* ATI PCIGART support (ati_pcigart.h) */
-extern int drm_ati_pcigart_init(struct drm_device *dev,
- struct drm_ati_pcigart_info * gart_info);
-extern int drm_ati_pcigart_cleanup(struct drm_device *dev,
- struct drm_ati_pcigart_info * gart_info);
-
-extern drm_dma_handle_t *drm_pci_alloc(struct drm_device *dev, size_t size,
- size_t align);
-extern void __drm_pci_free(struct drm_device *dev, drm_dma_handle_t * dmah);
-extern void drm_pci_free(struct drm_device *dev, drm_dma_handle_t * dmah);
-extern int drm_pci_set_unique(struct drm_device *dev,
- struct drm_master *master,
- struct drm_unique *u);
+extern struct drm_dma_handle *drm_pci_alloc(struct drm_device *dev, size_t size,
+ size_t align);
+extern void drm_pci_free(struct drm_device *dev, struct drm_dma_handle * dmah);
/* sysfs support (drm_sysfs.c) */
-struct drm_sysfs_class;
-extern struct class *drm_sysfs_create(struct module *owner, char *name);
-extern void drm_sysfs_destroy(void);
-extern struct device *drm_sysfs_minor_alloc(struct drm_minor *minor);
extern void drm_sysfs_hotplug_event(struct drm_device *dev);
-extern int drm_sysfs_connector_add(struct drm_connector *connector);
-extern void drm_sysfs_connector_remove(struct drm_connector *connector);
-
-/* Graphics Execution Manager library functions (drm_gem.c) */
-int drm_gem_init(struct drm_device *dev);
-void drm_gem_destroy(struct drm_device *dev);
-void drm_gem_object_release(struct drm_gem_object *obj);
-void drm_gem_object_free(struct kref *kref);
-int drm_gem_object_init(struct drm_device *dev,
- struct drm_gem_object *obj, size_t size);
-void drm_gem_private_object_init(struct drm_device *dev,
- struct drm_gem_object *obj, size_t size);
-void drm_gem_vm_open(struct vm_area_struct *vma);
-void drm_gem_vm_close(struct vm_area_struct *vma);
-int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
- struct vm_area_struct *vma);
-int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
-
-#include <drm/drm_global.h>
-
-static inline void
-drm_gem_object_reference(struct drm_gem_object *obj)
-{
- kref_get(&obj->refcount);
-}
-static inline void
-drm_gem_object_unreference(struct drm_gem_object *obj)
-{
- if (obj != NULL)
- kref_put(&obj->refcount, drm_gem_object_free);
-}
-
-static inline void
-drm_gem_object_unreference_unlocked(struct drm_gem_object *obj)
-{
- if (obj && !atomic_add_unless(&obj->refcount.refcount, -1, 1)) {
- struct drm_device *dev = obj->dev;
-
- mutex_lock(&dev->struct_mutex);
- if (likely(atomic_dec_and_test(&obj->refcount.refcount)))
- drm_gem_object_free(&obj->refcount);
- mutex_unlock(&dev->struct_mutex);
- }
-}
-
-int drm_gem_handle_create_tail(struct drm_file *file_priv,
- struct drm_gem_object *obj,
- u32 *handlep);
-int drm_gem_handle_create(struct drm_file *file_priv,
- struct drm_gem_object *obj,
- u32 *handlep);
-int drm_gem_handle_delete(struct drm_file *filp, u32 handle);
-
-
-void drm_gem_free_mmap_offset(struct drm_gem_object *obj);
-int drm_gem_create_mmap_offset(struct drm_gem_object *obj);
-int drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size);
-
-struct page **drm_gem_get_pages(struct drm_gem_object *obj);
-void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
- bool dirty, bool accessed);
-
-struct drm_gem_object *drm_gem_object_lookup(struct drm_device *dev,
- struct drm_file *filp,
- u32 handle);
-int drm_gem_close_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int drm_gem_flink_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int drm_gem_open_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-void drm_gem_open(struct drm_device *dev, struct drm_file *file_private);
-void drm_gem_release(struct drm_device *dev, struct drm_file *file_private);
-
-extern void drm_core_ioremap(struct drm_local_map *map, struct drm_device *dev);
-extern void drm_core_ioremap_wc(struct drm_local_map *map, struct drm_device *dev);
-extern void drm_core_ioremapfree(struct drm_local_map *map, struct drm_device *dev);
-
-static __inline__ struct drm_local_map *drm_core_findmap(struct drm_device *dev,
- unsigned int token)
-{
- struct drm_map_list *_entry;
- list_for_each_entry(_entry, &dev->maplist, head)
- if (_entry->user_token == token)
- return _entry->map;
- return NULL;
-}
-
-static __inline__ void drm_core_dropmap(struct drm_local_map *map)
-{
-}
-
-#include <drm/drm_mem_util.h>
struct drm_device *drm_dev_alloc(struct drm_driver *driver,
struct device *parent);
@@ -1584,9 +1032,25 @@ void drm_pci_agp_destroy(struct drm_device *dev);
extern int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver);
extern void drm_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver);
+#ifdef CONFIG_PCI
extern int drm_get_pci_dev(struct pci_dev *pdev,
const struct pci_device_id *ent,
struct drm_driver *driver);
+extern int drm_pci_set_busid(struct drm_device *dev, struct drm_master *master);
+#else
+static inline int drm_get_pci_dev(struct pci_dev *pdev,
+ const struct pci_device_id *ent,
+ struct drm_driver *driver)
+{
+ return -ENOSYS;
+}
+
+static inline int drm_pci_set_busid(struct drm_device *dev,
+ struct drm_master *master)
+{
+ return -ENOSYS;
+}
+#endif
#define DRM_PCIE_SPEED_25 1
#define DRM_PCIE_SPEED_50 2
@@ -1596,6 +1060,7 @@ extern int drm_pcie_get_speed_cap_mask(struct drm_device *dev, u32 *speed_mask);
/* platform section */
extern int drm_platform_init(struct drm_driver *driver, struct platform_device *platform_device);
+extern int drm_platform_set_busid(struct drm_device *d, struct drm_master *m);
/* returns true if currently okay to sleep */
static __inline__ bool drm_can_sleep(void)
@@ -1605,5 +1070,4 @@ static __inline__ bool drm_can_sleep(void)
return true;
}
-#endif /* __KERNEL__ */
#endif
diff --git a/include/drm/drm_agpsupport.h b/include/drm/drm_agpsupport.h
index 86a02188074b..055dc058d147 100644
--- a/include/drm/drm_agpsupport.h
+++ b/include/drm/drm_agpsupport.h
@@ -1,12 +1,32 @@
#ifndef _DRM_AGPSUPPORT_H_
#define _DRM_AGPSUPPORT_H_
+#include <linux/agp_backend.h>
#include <linux/kernel.h>
+#include <linux/list.h>
#include <linux/mm.h>
#include <linux/mutex.h>
#include <linux/types.h>
-#include <linux/agp_backend.h>
-#include <drm/drmP.h>
+#include <uapi/drm/drm.h>
+
+struct drm_device;
+struct drm_file;
+
+#define __OS_HAS_AGP (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && \
+ defined(MODULE)))
+
+struct drm_agp_head {
+ struct agp_kern_info agp_info;
+ struct list_head memory;
+ unsigned long mode;
+ struct agp_bridge_data *bridge;
+ int enabled;
+ int acquired;
+ unsigned long base;
+ int agp_mtrr;
+ int cant_use_aperture;
+ unsigned long page_mask;
+};
#if __OS_HAS_AGP
@@ -45,6 +65,7 @@ int drm_agp_unbind_ioctl(struct drm_device *dev, void *data,
int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request);
int drm_agp_bind_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+
#else /* __OS_HAS_AGP */
static inline void drm_free_agp(struct agp_memory * handle, int pages)
@@ -172,6 +193,7 @@ static inline int drm_agp_bind_ioctl(struct drm_device *dev, void *data,
{
return -ENODEV;
}
+
#endif /* __OS_HAS_AGP */
#endif /* _DRM_AGPSUPPORT_H_ */
diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h
new file mode 100644
index 000000000000..ad2229574dd9
--- /dev/null
+++ b/include/drm/drm_atomic.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright (C) 2014 Red Hat
+ * Copyright (C) 2014 Intel Corp.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Rob Clark <robdclark@gmail.com>
+ * Daniel Vetter <daniel.vetter@ffwll.ch>
+ */
+
+#ifndef DRM_ATOMIC_H_
+#define DRM_ATOMIC_H_
+
+#include <drm/drm_crtc.h>
+
+struct drm_atomic_state * __must_check
+drm_atomic_state_alloc(struct drm_device *dev);
+void drm_atomic_state_clear(struct drm_atomic_state *state);
+void drm_atomic_state_free(struct drm_atomic_state *state);
+
+struct drm_crtc_state * __must_check
+drm_atomic_get_crtc_state(struct drm_atomic_state *state,
+ struct drm_crtc *crtc);
+struct drm_plane_state * __must_check
+drm_atomic_get_plane_state(struct drm_atomic_state *state,
+ struct drm_plane *plane);
+struct drm_connector_state * __must_check
+drm_atomic_get_connector_state(struct drm_atomic_state *state,
+ struct drm_connector *connector);
+
+int __must_check
+drm_atomic_set_crtc_for_plane(struct drm_atomic_state *state,
+ struct drm_plane *plane, struct drm_crtc *crtc);
+void drm_atomic_set_fb_for_plane(struct drm_plane_state *plane_state,
+ struct drm_framebuffer *fb);
+int __must_check
+drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state,
+ struct drm_crtc *crtc);
+int __must_check
+drm_atomic_add_affected_connectors(struct drm_atomic_state *state,
+ struct drm_crtc *crtc);
+int
+drm_atomic_connectors_for_crtc(struct drm_atomic_state *state,
+ struct drm_crtc *crtc);
+
+void drm_atomic_legacy_backoff(struct drm_atomic_state *state);
+
+int __must_check drm_atomic_check_only(struct drm_atomic_state *state);
+int __must_check drm_atomic_commit(struct drm_atomic_state *state);
+int __must_check drm_atomic_async_commit(struct drm_atomic_state *state);
+
+#endif /* DRM_ATOMIC_H_ */
diff --git a/include/drm/drm_atomic_helper.h b/include/drm/drm_atomic_helper.h
new file mode 100644
index 000000000000..f956b413311e
--- /dev/null
+++ b/include/drm/drm_atomic_helper.h
@@ -0,0 +1,126 @@
+/*
+ * Copyright (C) 2014 Red Hat
+ * Copyright (C) 2014 Intel Corp.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Rob Clark <robdclark@gmail.com>
+ * Daniel Vetter <daniel.vetter@ffwll.ch>
+ */
+
+#ifndef DRM_ATOMIC_HELPER_H_
+#define DRM_ATOMIC_HELPER_H_
+
+#include <drm/drm_crtc.h>
+
+int drm_atomic_helper_check(struct drm_device *dev,
+ struct drm_atomic_state *state);
+int drm_atomic_helper_commit(struct drm_device *dev,
+ struct drm_atomic_state *state,
+ bool async);
+
+void drm_atomic_helper_wait_for_vblanks(struct drm_device *dev,
+ struct drm_atomic_state *old_state);
+
+void drm_atomic_helper_commit_pre_planes(struct drm_device *dev,
+ struct drm_atomic_state *state);
+void drm_atomic_helper_commit_post_planes(struct drm_device *dev,
+ struct drm_atomic_state *old_state);
+
+int drm_atomic_helper_prepare_planes(struct drm_device *dev,
+ struct drm_atomic_state *state);
+void drm_atomic_helper_commit_planes(struct drm_device *dev,
+ struct drm_atomic_state *state);
+void drm_atomic_helper_cleanup_planes(struct drm_device *dev,
+ struct drm_atomic_state *old_state);
+
+void drm_atomic_helper_swap_state(struct drm_device *dev,
+ struct drm_atomic_state *state);
+
+/* implementations for legacy interfaces */
+int drm_atomic_helper_update_plane(struct drm_plane *plane,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h);
+int drm_atomic_helper_disable_plane(struct drm_plane *plane);
+int drm_atomic_helper_set_config(struct drm_mode_set *set);
+
+int drm_atomic_helper_crtc_set_property(struct drm_crtc *crtc,
+ struct drm_property *property,
+ uint64_t val);
+int drm_atomic_helper_plane_set_property(struct drm_plane *plane,
+ struct drm_property *property,
+ uint64_t val);
+int drm_atomic_helper_connector_set_property(struct drm_connector *connector,
+ struct drm_property *property,
+ uint64_t val);
+int drm_atomic_helper_page_flip(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event,
+ uint32_t flags);
+
+/* default implementations for state handling */
+void drm_atomic_helper_crtc_reset(struct drm_crtc *crtc);
+struct drm_crtc_state *
+drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc);
+void drm_atomic_helper_crtc_destroy_state(struct drm_crtc *crtc,
+ struct drm_crtc_state *state);
+
+void drm_atomic_helper_plane_reset(struct drm_plane *plane);
+struct drm_plane_state *
+drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane);
+void drm_atomic_helper_plane_destroy_state(struct drm_plane *plane,
+ struct drm_plane_state *state);
+
+void drm_atomic_helper_connector_reset(struct drm_connector *connector);
+struct drm_connector_state *
+drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector);
+void drm_atomic_helper_connector_destroy_state(struct drm_connector *connector,
+ struct drm_connector_state *state);
+
+/**
+ * drm_atomic_crtc_for_each_plane - iterate over planes currently attached to CRTC
+ * @plane: the loop cursor
+ * @crtc: the crtc whose planes are iterated
+ *
+ * This iterates over the current state, useful (for example) when applying
+ * atomic state after it has been checked and swapped. To iterate over the
+ * planes which *will* be attached (for ->atomic_check()) see
+ * drm_crtc_for_each_pending_plane()
+ */
+#define drm_atomic_crtc_for_each_plane(plane, crtc) \
+ drm_for_each_plane_mask(plane, (crtc)->dev, (crtc)->state->plane_mask)
+
+/**
+ * drm_crtc_atomic_state_for_each_plane - iterate over attached planes in new state
+ * @plane: the loop cursor
+ * @crtc_state: the incoming crtc-state
+ *
+ * Similar to drm_crtc_for_each_plane(), but iterates the planes that will be
+ * attached if the specified state is applied. Useful during (for example)
+ * ->atomic_check() operations, to validate the incoming state
+ */
+#define drm_atomic_crtc_state_for_each_plane(plane, crtc_state) \
+ drm_for_each_plane_mask(plane, (crtc_state)->state->dev, (crtc_state)->plane_mask)
+
+#endif /* DRM_ATOMIC_HELPER_H_ */
diff --git a/include/drm/drm_buffer.h b/include/drm/drm_buffer.h
deleted file mode 100644
index c80d3a340b94..000000000000
--- a/include/drm/drm_buffer.h
+++ /dev/null
@@ -1,148 +0,0 @@
-/**************************************************************************
- *
- * Copyright 2010 Pauli Nieminen.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- *
- **************************************************************************/
-/*
- * Multipart buffer for coping data which is larger than the page size.
- *
- * Authors:
- * Pauli Nieminen <suokkos-at-gmail-dot-com>
- */
-
-#ifndef _DRM_BUFFER_H_
-#define _DRM_BUFFER_H_
-
-#include <drm/drmP.h>
-
-struct drm_buffer {
- int iterator;
- int size;
- char *data[];
-};
-
-
-/**
- * Return the index of page that buffer is currently pointing at.
- */
-static inline int drm_buffer_page(struct drm_buffer *buf)
-{
- return buf->iterator / PAGE_SIZE;
-}
-/**
- * Return the index of the current byte in the page
- */
-static inline int drm_buffer_index(struct drm_buffer *buf)
-{
- return buf->iterator & (PAGE_SIZE - 1);
-}
-/**
- * Return number of bytes that is left to process
- */
-static inline int drm_buffer_unprocessed(struct drm_buffer *buf)
-{
- return buf->size - buf->iterator;
-}
-
-/**
- * Advance the buffer iterator number of bytes that is given.
- */
-static inline void drm_buffer_advance(struct drm_buffer *buf, int bytes)
-{
- buf->iterator += bytes;
-}
-
-/**
- * Allocate the drm buffer object.
- *
- * buf: A pointer to a pointer where the object is stored.
- * size: The number of bytes to allocate.
- */
-extern int drm_buffer_alloc(struct drm_buffer **buf, int size);
-
-/**
- * Copy the user data to the begin of the buffer and reset the processing
- * iterator.
- *
- * user_data: A pointer the data that is copied to the buffer.
- * size: The Number of bytes to copy.
- */
-extern int drm_buffer_copy_from_user(struct drm_buffer *buf,
- void __user *user_data, int size);
-
-/**
- * Free the drm buffer object
- */
-extern void drm_buffer_free(struct drm_buffer *buf);
-
-/**
- * Read an object from buffer that may be split to multiple parts. If object
- * is not split function just returns the pointer to object in buffer. But in
- * case of split object data is copied to given stack object that is suplied
- * by caller.
- *
- * The processing location of the buffer is also advanced to the next byte
- * after the object.
- *
- * objsize: The size of the objet in bytes.
- * stack_obj: A pointer to a memory location where object can be copied.
- */
-extern void *drm_buffer_read_object(struct drm_buffer *buf,
- int objsize, void *stack_obj);
-
-/**
- * Returns the pointer to the dword which is offset number of elements from the
- * current processing location.
- *
- * Caller must make sure that dword is not split in the buffer. This
- * requirement is easily met if all the sizes of objects in buffer are
- * multiples of dword and PAGE_SIZE is multiple dword.
- *
- * Call to this function doesn't change the processing location.
- *
- * offset: The index of the dword relative to the internat iterator.
- */
-static inline void *drm_buffer_pointer_to_dword(struct drm_buffer *buffer,
- int offset)
-{
- int iter = buffer->iterator + offset * 4;
- return &buffer->data[iter / PAGE_SIZE][iter & (PAGE_SIZE - 1)];
-}
-/**
- * Returns the pointer to the dword which is offset number of elements from
- * the current processing location.
- *
- * Call to this function doesn't change the processing location.
- *
- * offset: The index of the byte relative to the internat iterator.
- */
-static inline void *drm_buffer_pointer_to_byte(struct drm_buffer *buffer,
- int offset)
-{
- int iter = buffer->iterator + offset;
- return &buffer->data[iter / PAGE_SIZE][iter & (PAGE_SIZE - 1)];
-}
-
-#endif
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index f1105d0da059..b86329813ad3 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -31,8 +31,8 @@
#include <linux/idr.h>
#include <linux/fb.h>
#include <linux/hdmi.h>
-#include <drm/drm_mode.h>
-#include <drm/drm_fourcc.h>
+#include <uapi/drm/drm_mode.h>
+#include <uapi/drm/drm_fourcc.h>
#include <drm/drm_modeset_lock.h>
struct drm_device;
@@ -42,6 +42,7 @@ struct drm_object_properties;
struct drm_file;
struct drm_clip_rect;
struct device_node;
+struct fence;
#define DRM_MODE_OBJECT_CRTC 0xcccccccc
#define DRM_MODE_OBJECT_CONNECTOR 0xc0c0c0c0
@@ -136,14 +137,22 @@ struct drm_display_info {
u8 cea_rev;
};
+/* data corresponds to displayid vend/prod/serial */
+struct drm_tile_group {
+ struct kref refcount;
+ struct drm_device *dev;
+ int id;
+ u8 group_data[8];
+};
+
struct drm_framebuffer_funcs {
/* note: use drm_framebuffer_remove() */
void (*destroy)(struct drm_framebuffer *framebuffer);
int (*create_handle)(struct drm_framebuffer *fb,
struct drm_file *file_priv,
unsigned int *handle);
- /**
- * Optinal callback for the dirty fb ioctl.
+ /*
+ * Optional callback for the dirty fb ioctl.
*
* Userspace can notify the driver via this callback
* that a area of the framebuffer has changed and should
@@ -196,7 +205,7 @@ struct drm_framebuffer {
struct drm_property_blob {
struct drm_mode_object base;
struct list_head head;
- unsigned int length;
+ size_t length;
unsigned char data[];
};
@@ -215,32 +224,74 @@ struct drm_property {
uint64_t *values;
struct drm_device *dev;
- struct list_head enum_blob_list;
+ struct list_head enum_list;
};
-void drm_modeset_lock_all(struct drm_device *dev);
-void drm_modeset_unlock_all(struct drm_device *dev);
-void drm_warn_on_modeset_not_all_locked(struct drm_device *dev);
-
struct drm_crtc;
struct drm_connector;
struct drm_encoder;
struct drm_pending_vblank_event;
struct drm_plane;
struct drm_bridge;
+struct drm_atomic_state;
+
+/**
+ * struct drm_crtc_state - mutable CRTC state
+ * @enable: whether the CRTC should be enabled, gates all other state
+ * @mode_changed: for use by helpers and drivers when computing state updates
+ * @plane_mask: bitmask of (1 << drm_plane_index(plane)) of attached planes
+ * @last_vblank_count: for helpers and drivers to capture the vblank of the
+ * update to ensure framebuffer cleanup isn't done too early
+ * @planes_changed: for use by helpers and drivers when computing state updates
+ * @adjusted_mode: for use by helpers and drivers to compute adjusted mode timings
+ * @mode: current mode timings
+ * @event: optional pointer to a DRM event to signal upon completion of the
+ * state update
+ * @state: backpointer to global drm_atomic_state
+ */
+struct drm_crtc_state {
+ bool enable;
+
+ /* computed state bits used by helpers and drivers */
+ bool planes_changed : 1;
+ bool mode_changed : 1;
+
+ /* attached planes bitmask:
+ * WARNING: transitional helpers do not maintain plane_mask so
+ * drivers not converted over to atomic helpers should not rely
+ * on plane_mask being accurate!
+ */
+ u32 plane_mask;
+
+ /* last_vblank_count: for vblank waits before cleanup */
+ u32 last_vblank_count;
+
+ /* adjusted_mode: for use by helpers and drivers */
+ struct drm_display_mode adjusted_mode;
+
+ struct drm_display_mode mode;
+
+ struct drm_pending_vblank_event *event;
+
+ struct drm_atomic_state *state;
+};
/**
- * drm_crtc_funcs - control CRTCs for a given device
+ * struct drm_crtc_funcs - control CRTCs for a given device
* @save: save CRTC state
* @restore: restore CRTC state
* @reset: reset CRTC after state has been invalidated (e.g. resume)
* @cursor_set: setup the cursor
+ * @cursor_set2: setup the cursor with hotspot, superseeds @cursor_set if set
* @cursor_move: move the cursor
* @gamma_set: specify color ramp for CRTC
* @destroy: deinit and free object
* @set_property: called when a property is changed
* @set_config: apply a new CRTC configuration
* @page_flip: initiate a page flip
+ * @atomic_duplicate_state: duplicate the atomic state for this CRTC
+ * @atomic_destroy_state: destroy an atomic state for this CRTC
+ * @atomic_set_property: set a property on an atomic state for this CRTC
*
* The drm_crtc_funcs structure is the central CRTC management structure
* in the DRM. Each CRTC controls one or more connectors (note that the name
@@ -291,16 +342,28 @@ struct drm_crtc_funcs {
int (*set_property)(struct drm_crtc *crtc,
struct drm_property *property, uint64_t val);
+
+ /* atomic update handling */
+ struct drm_crtc_state *(*atomic_duplicate_state)(struct drm_crtc *crtc);
+ void (*atomic_destroy_state)(struct drm_crtc *crtc,
+ struct drm_crtc_state *state);
+ int (*atomic_set_property)(struct drm_crtc *crtc,
+ struct drm_crtc_state *state,
+ struct drm_property *property,
+ uint64_t val);
};
/**
- * drm_crtc - central CRTC control structure
+ * struct drm_crtc - central CRTC control structure
* @dev: parent DRM device
+ * @port: OF node used by drm_of_find_possible_crtcs()
* @head: list management
* @mutex: per-CRTC locking
* @base: base KMS object for ID tracking etc.
* @primary: primary plane for this CRTC
* @cursor: cursor plane for this CRTC
+ * @cursor_x: current x position of the cursor, used for universal cursor planes
+ * @cursor_y: current y position of the cursor, used for universal cursor planes
* @enabled: is this CRTC enabled?
* @mode: current mode timings
* @hwmode: mode timings as programmed to hw regs
@@ -313,10 +376,13 @@ struct drm_crtc_funcs {
* @gamma_size: size of gamma ramp
* @gamma_store: gamma ramp values
* @framedur_ns: precise frame timing
- * @framedur_ns: precise line timing
+ * @linedur_ns: precise line timing
* @pixeldur_ns: precise pixel timing
* @helper_private: mid-layer private data
* @properties: property tracking for this CRTC
+ * @state: current atomic state for this CRTC
+ * @acquire_ctx: per-CRTC implicit acquire context used by atomic drivers for
+ * legacy ioctls
*
* Each CRTC may have one or more connectors associated with it. This structure
* allows the CRTC to be controlled.
@@ -326,7 +392,7 @@ struct drm_crtc {
struct device_node *port;
struct list_head head;
- /**
+ /*
* crtc mutex
*
* This provides a read lock for the overall crtc state (mode, dpms
@@ -345,10 +411,6 @@ struct drm_crtc {
int cursor_x;
int cursor_y;
- /* Temporary tracking of the old fb while a modeset is ongoing. Used
- * by drm_mode_set_config_internal to implement correct refcounting. */
- struct drm_framebuffer *old_fb;
-
bool enabled;
/* Requested mode from modesetting. */
@@ -375,11 +437,32 @@ struct drm_crtc {
void *helper_private;
struct drm_object_properties properties;
+
+ struct drm_crtc_state *state;
+
+ /*
+ * For legacy crtc ioctls so that atomic drivers can get at the locking
+ * acquire context.
+ */
+ struct drm_modeset_acquire_ctx *acquire_ctx;
};
+/**
+ * struct drm_connector_state - mutable connector state
+ * @crtc: CRTC to connect connector to, NULL if disabled
+ * @best_encoder: can be used by helpers and drivers to select the encoder
+ * @state: backpointer to global drm_atomic_state
+ */
+struct drm_connector_state {
+ struct drm_crtc *crtc; /* do not write directly, use drm_atomic_set_crtc_for_connector() */
+
+ struct drm_encoder *best_encoder;
+
+ struct drm_atomic_state *state;
+};
/**
- * drm_connector_funcs - control connectors on a given device
+ * struct drm_connector_funcs - control connectors on a given device
* @dpms: set power state (see drm_crtc_funcs above)
* @save: save connector state
* @restore: restore connector state
@@ -389,6 +472,9 @@ struct drm_crtc {
* @set_property: property for this connector may need an update
* @destroy: make object go away
* @force: notify the driver that the connector is forced on
+ * @atomic_duplicate_state: duplicate the atomic state for this connector
+ * @atomic_destroy_state: destroy an atomic state for this connector
+ * @atomic_set_property: set a property on an atomic state for this connector
*
* Each CRTC may have one or more connectors attached to it. The functions
* below allow the core DRM code to control connectors, enumerate available modes,
@@ -413,10 +499,19 @@ struct drm_connector_funcs {
uint64_t val);
void (*destroy)(struct drm_connector *connector);
void (*force)(struct drm_connector *connector);
+
+ /* atomic update handling */
+ struct drm_connector_state *(*atomic_duplicate_state)(struct drm_connector *connector);
+ void (*atomic_destroy_state)(struct drm_connector *connector,
+ struct drm_connector_state *state);
+ int (*atomic_set_property)(struct drm_connector *connector,
+ struct drm_connector_state *state,
+ struct drm_property *property,
+ uint64_t val);
};
/**
- * drm_encoder_funcs - encoder controls
+ * struct drm_encoder_funcs - encoder controls
* @reset: reset state (e.g. at init or resume time)
* @destroy: cleanup and free associated data
*
@@ -430,7 +525,7 @@ struct drm_encoder_funcs {
#define DRM_CONNECTOR_MAX_ENCODER 3
/**
- * drm_encoder - central DRM encoder structure
+ * struct drm_encoder - central DRM encoder structure
* @dev: parent DRM device
* @head: list management
* @base: base KMS object
@@ -474,7 +569,7 @@ struct drm_encoder {
#define MAX_ELD_BYTES 128
/**
- * drm_connector - central DRM connector control structure
+ * struct drm_connector - central DRM connector control structure
* @dev: parent DRM device
* @kdev: kernel device for sysfs attributes
* @attr: sysfs attributes
@@ -485,6 +580,7 @@ struct drm_encoder {
* @connector_type_id: index into connector type enum
* @interlace_allowed: can this connector handle interlaced modes?
* @doublescan_allowed: can this connector handle doublescan?
+ * @stereo_allowed: can this connector handle stereo modes?
* @modes: modes available on this connector (from fill_modes() + user)
* @status: one of the drm_connector_status enums (connected, not, or unknown)
* @probed_modes: list of modes derived directly from the display
@@ -492,10 +588,13 @@ struct drm_encoder {
* @funcs: connector control functions
* @edid_blob_ptr: DRM property containing EDID if present
* @properties: property tracking for this connector
+ * @path_blob_ptr: DRM blob property data for the DP MST path property
* @polled: a %DRM_CONNECTOR_POLL_<foo> value for core driven polling
* @dpms: current dpms state
* @helper_private: mid-layer private data
+ * @cmdline_mode: mode line parsed from the kernel cmdline for this connector
* @force: a %DRM_FORCE_<foo> state for forced mode sets
+ * @override_edid: has the EDID been overwritten through debugfs for testing?
* @encoder_ids: valid encoders for this connector
* @encoder: encoder driving this connector, if any
* @eld: EDID-like data, if present
@@ -505,6 +604,18 @@ struct drm_encoder {
* @video_latency: video latency info from ELD, if found
* @audio_latency: audio latency info from ELD, if found
* @null_edid_counter: track sinks that give us all zeros for the EDID
+ * @bad_edid_counter: track sinks that give us an EDID with invalid checksum
+ * @debugfs_entry: debugfs directory for this connector
+ * @state: current atomic state for this connector
+ * @has_tile: is this connector connected to a tiled monitor
+ * @tile_group: tile group for the connected monitor
+ * @tile_is_single_monitor: whether the tile is one monitor housing
+ * @num_h_tile: number of horizontal tiles in the tile group
+ * @num_v_tile: number of vertical tiles in the tile group
+ * @tile_h_loc: horizontal location of this tile
+ * @tile_v_loc: vertical location of this tile
+ * @tile_h_size: horizontal size of this tile.
+ * @tile_v_size: vertical size of this tile.
*
* Each connector may be connected to one or more CRTCs, or may be clonable by
* another connector if they can share a CRTC. Each connector also has a specific
@@ -540,6 +651,8 @@ struct drm_connector {
struct drm_property_blob *path_blob_ptr;
+ struct drm_property_blob *tile_blob_ptr;
+
uint8_t polled; /* DRM_CONNECTOR_POLL_* */
/* requested DPMS state */
@@ -548,6 +661,7 @@ struct drm_connector {
void *helper_private;
/* forced on connector */
+ struct drm_cmdline_mode cmdline_mode;
enum drm_connector_force force;
bool override_edid;
uint32_t encoder_ids[DRM_CONNECTOR_MAX_ENCODER];
@@ -564,14 +678,63 @@ struct drm_connector {
unsigned bad_edid_counter;
struct dentry *debugfs_entry;
+
+ struct drm_connector_state *state;
+
+ /* DisplayID bits */
+ bool has_tile;
+ struct drm_tile_group *tile_group;
+ bool tile_is_single_monitor;
+
+ uint8_t num_h_tile, num_v_tile;
+ uint8_t tile_h_loc, tile_v_loc;
+ uint16_t tile_h_size, tile_v_size;
+};
+
+/**
+ * struct drm_plane_state - mutable plane state
+ * @crtc: currently bound CRTC, NULL if disabled
+ * @fb: currently bound framebuffer
+ * @fence: optional fence to wait for before scanning out @fb
+ * @crtc_x: left position of visible portion of plane on crtc
+ * @crtc_y: upper position of visible portion of plane on crtc
+ * @crtc_w: width of visible portion of plane on crtc
+ * @crtc_h: height of visible portion of plane on crtc
+ * @src_x: left position of visible portion of plane within
+ * plane (in 16.16)
+ * @src_y: upper position of visible portion of plane within
+ * plane (in 16.16)
+ * @src_w: width of visible portion of plane (in 16.16)
+ * @src_h: height of visible portion of plane (in 16.16)
+ * @state: backpointer to global drm_atomic_state
+ */
+struct drm_plane_state {
+ struct drm_crtc *crtc; /* do not write directly, use drm_atomic_set_crtc_for_plane() */
+ struct drm_framebuffer *fb; /* do not write directly, use drm_atomic_set_fb_for_plane() */
+ struct fence *fence;
+
+ /* Signed dest location allows it to be partially off screen */
+ int32_t crtc_x, crtc_y;
+ uint32_t crtc_w, crtc_h;
+
+ /* Source values are 16.16 fixed point */
+ uint32_t src_x, src_y;
+ uint32_t src_h, src_w;
+
+ struct drm_atomic_state *state;
};
+
/**
- * drm_plane_funcs - driver plane control functions
+ * struct drm_plane_funcs - driver plane control functions
* @update_plane: update the plane configuration
* @disable_plane: shut down the plane
* @destroy: clean up plane resources
+ * @reset: reset plane after state has been invalidated (e.g. resume)
* @set_property: called when a property is changed
+ * @atomic_duplicate_state: duplicate the atomic state for this plane
+ * @atomic_destroy_state: destroy an atomic state for this plane
+ * @atomic_set_property: set a property on an atomic state for this plane
*/
struct drm_plane_funcs {
int (*update_plane)(struct drm_plane *plane,
@@ -582,9 +745,19 @@ struct drm_plane_funcs {
uint32_t src_w, uint32_t src_h);
int (*disable_plane)(struct drm_plane *plane);
void (*destroy)(struct drm_plane *plane);
+ void (*reset)(struct drm_plane *plane);
int (*set_property)(struct drm_plane *plane,
struct drm_property *property, uint64_t val);
+
+ /* atomic update handling */
+ struct drm_plane_state *(*atomic_duplicate_state)(struct drm_plane *plane);
+ void (*atomic_destroy_state)(struct drm_plane *plane,
+ struct drm_plane_state *state);
+ int (*atomic_set_property)(struct drm_plane *plane,
+ struct drm_plane_state *state,
+ struct drm_property *property,
+ uint64_t val);
};
enum drm_plane_type {
@@ -594,7 +767,7 @@ enum drm_plane_type {
};
/**
- * drm_plane - central DRM plane control structure
+ * struct drm_plane - central DRM plane control structure
* @dev: DRM device this plane belongs to
* @head: for list management
* @base: base mode object
@@ -603,14 +776,19 @@ enum drm_plane_type {
* @format_count: number of formats supported
* @crtc: currently bound CRTC
* @fb: currently bound fb
+ * @old_fb: Temporary tracking of the old fb while a modeset is ongoing. Used by
+ * drm_mode_set_config_internal() to implement correct refcounting.
* @funcs: helper functions
* @properties: property tracking for this plane
* @type: type of plane (overlay, primary, cursor)
+ * @state: current atomic state for this plane
*/
struct drm_plane {
struct drm_device *dev;
struct list_head head;
+ struct drm_modeset_lock mutex;
+
struct drm_mode_object base;
uint32_t possible_crtcs;
@@ -620,15 +798,21 @@ struct drm_plane {
struct drm_crtc *crtc;
struct drm_framebuffer *fb;
+ struct drm_framebuffer *old_fb;
+
const struct drm_plane_funcs *funcs;
struct drm_object_properties properties;
enum drm_plane_type type;
+
+ void *helper_private;
+
+ struct drm_plane_state *state;
};
/**
- * drm_bridge_funcs - drm_bridge control functions
+ * struct drm_bridge_funcs - drm_bridge control functions
* @mode_fixup: Try to fixup (or reject entirely) proposed mode for this bridge
* @disable: Called right before encoder prepare, disables the bridge
* @post_disable: Called right after encoder prepare, for lockstepped disable
@@ -652,7 +836,7 @@ struct drm_bridge_funcs {
};
/**
- * drm_bridge - central DRM bridge control structure
+ * struct drm_bridge - central DRM bridge control structure
* @dev: DRM device this bridge belongs to
* @head: list management
* @base: base mode object
@@ -670,8 +854,35 @@ struct drm_bridge {
};
/**
- * drm_mode_set - new values for a CRTC config change
- * @head: list management
+ * struct struct drm_atomic_state - the global state object for atomic updates
+ * @dev: parent DRM device
+ * @flags: state flags like async update
+ * @planes: pointer to array of plane pointers
+ * @plane_states: pointer to array of plane states pointers
+ * @crtcs: pointer to array of CRTC pointers
+ * @crtc_states: pointer to array of CRTC states pointers
+ * @num_connector: size of the @connectors and @connector_states arrays
+ * @connectors: pointer to array of connector pointers
+ * @connector_states: pointer to array of connector states pointers
+ * @acquire_ctx: acquire context for this atomic modeset state update
+ */
+struct drm_atomic_state {
+ struct drm_device *dev;
+ uint32_t flags;
+ struct drm_plane **planes;
+ struct drm_plane_state **plane_states;
+ struct drm_crtc **crtcs;
+ struct drm_crtc_state **crtc_states;
+ int num_connector;
+ struct drm_connector **connectors;
+ struct drm_connector_state **connector_states;
+
+ struct drm_modeset_acquire_ctx *acquire_ctx;
+};
+
+
+/**
+ * struct drm_mode_set - new values for a CRTC config change
* @fb: framebuffer to use for new config
* @crtc: CRTC whose configuration we're about to change
* @mode: mode timings to use
@@ -701,6 +912,9 @@ struct drm_mode_set {
* struct drm_mode_config_funcs - basic driver provided mode setting functions
* @fb_create: create a new framebuffer object
* @output_poll_changed: function to handle output configuration changes
+ * @atomic_check: check whether a give atomic state update is possible
+ * @atomic_commit: commit an atomic state update previously verified with
+ * atomic_check()
*
* Some global (i.e. not per-CRTC, connector, etc) mode setting functions that
* involve drivers.
@@ -710,13 +924,20 @@ struct drm_mode_config_funcs {
struct drm_file *file_priv,
struct drm_mode_fb_cmd2 *mode_cmd);
void (*output_poll_changed)(struct drm_device *dev);
+
+ int (*atomic_check)(struct drm_device *dev,
+ struct drm_atomic_state *a);
+ int (*atomic_commit)(struct drm_device *dev,
+ struct drm_atomic_state *a,
+ bool async);
};
/**
- * drm_mode_group - group of mode setting resources for potential sub-grouping
+ * struct drm_mode_group - group of mode setting resources for potential sub-grouping
* @num_crtcs: CRTC count
* @num_encoders: encoder count
* @num_connectors: connector count
+ * @num_bridges: bridge count
* @id_list: list of KMS object IDs in this group
*
* Currently this simply tracks the global mode setting state. But in the
@@ -736,10 +957,14 @@ struct drm_mode_group {
};
/**
- * drm_mode_config - Mode configuration control structure
+ * struct drm_mode_config - Mode configuration control structure
* @mutex: mutex protecting KMS related lists and structures
+ * @connection_mutex: ww mutex protecting connector state and routing
+ * @acquire_ctx: global implicit acquire context used by atomic drivers for
+ * legacy ioctls
* @idr_mutex: mutex for KMS ID allocation and management
* @crtc_idr: main KMS ID tracking object
+ * @fb_lock: mutex to protect fb state and lists
* @num_fb: number of fbs available
* @fb_list: list of framebuffers available
* @num_connector: number of connectors on this device
@@ -748,17 +973,28 @@ struct drm_mode_group {
* @bridge_list: list of bridge objects
* @num_encoder: number of encoders on this device
* @encoder_list: list of encoder objects
+ * @num_overlay_plane: number of overlay planes on this device
+ * @num_total_plane: number of universal (i.e. with primary/curso) planes on this device
+ * @plane_list: list of plane objects
* @num_crtc: number of CRTCs on this device
* @crtc_list: list of CRTC objects
+ * @property_list: list of property objects
* @min_width: minimum pixel width on this device
* @min_height: minimum pixel height on this device
* @max_width: maximum pixel width on this device
* @max_height: maximum pixel height on this device
* @funcs: core driver provided mode setting functions
* @fb_base: base address of the framebuffer
- * @poll_enabled: track polling status for this device
+ * @poll_enabled: track polling support for this device
+ * @poll_running: track polling status for this device
* @output_poll_work: delayed work for polling in process context
+ * @property_blob_list: list of all the blob property objects
* @*_property: core property tracking
+ * @preferred_depth: preferred RBG pixel depth, used by fb helpers
+ * @prefer_shadow: hint to userspace to prefer shadow-fb rendering
+ * @async_page_flip: does this device support async flips on the primary plane?
+ * @cursor_width: hint to userspace for max cursor width
+ * @cursor_height: hint to userspace for max cursor height
*
* Core mode resource tracking structure. All CRTC, encoders, and connectors
* enumerated by the driver are added here, as are global properties. Some
@@ -770,16 +1006,10 @@ struct drm_mode_config {
struct drm_modeset_acquire_ctx *acquire_ctx; /* for legacy _lock_all() / _unlock_all() */
struct mutex idr_mutex; /* for IDR management */
struct idr crtc_idr; /* use this idr for all IDs, fb, crtc, connector, modes - just makes life easier */
+ struct idr tile_idr; /* use this idr for all IDs, fb, crtc, connector, modes - just makes life easier */
/* this is limited to one for now */
-
- /**
- * fb_lock - mutex to protect fb state
- *
- * Besides the global fb list his also protects the fbs list in the
- * file_priv
- */
- struct mutex fb_lock;
+ struct mutex fb_lock; /* proctects global and per-file fb lists */
int num_fb;
struct list_head fb_list;
@@ -820,7 +1050,9 @@ struct drm_mode_config {
struct drm_property *edid_property;
struct drm_property *dpms_property;
struct drm_property *path_property;
+ struct drm_property *tile_property;
struct drm_property *plane_type_property;
+ struct drm_property *rotation_property;
/* DVI-I properties */
struct drm_property *dvi_i_subconnector_property;
@@ -846,6 +1078,10 @@ struct drm_mode_config {
struct drm_property *aspect_ratio_property;
struct drm_property *dirty_info_property;
+ /* properties for virtual machine layout */
+ struct drm_property *suggested_x_property;
+ struct drm_property *suggested_y_property;
+
/* dumb ioctl parameters */
uint32_t preferred_depth, prefer_shadow;
@@ -856,6 +1092,19 @@ struct drm_mode_config {
uint32_t cursor_width, cursor_height;
};
+/**
+ * drm_for_each_plane_mask - iterate over planes specified by bitmask
+ * @plane: the loop cursor
+ * @dev: the DRM device
+ * @plane_mask: bitmask of plane indices
+ *
+ * Iterate over all planes specified by bitmask.
+ */
+#define drm_for_each_plane_mask(plane, dev, plane_mask) \
+ list_for_each_entry((plane), &(dev)->mode_config.plane_list, head) \
+ if ((plane_mask) & (1 << drm_plane_index(plane)))
+
+
#define obj_to_crtc(x) container_of(x, struct drm_crtc, base)
#define obj_to_connector(x) container_of(x, struct drm_connector, base)
#define obj_to_encoder(x) container_of(x, struct drm_encoder, base)
@@ -875,9 +1124,6 @@ extern int drm_crtc_init_with_planes(struct drm_device *dev,
struct drm_plane *primary,
struct drm_plane *cursor,
const struct drm_crtc_funcs *funcs);
-extern int drm_crtc_init(struct drm_device *dev,
- struct drm_crtc *crtc,
- const struct drm_crtc_funcs *funcs);
extern void drm_crtc_cleanup(struct drm_crtc *crtc);
extern unsigned int drm_crtc_index(struct drm_crtc *crtc);
@@ -903,6 +1149,7 @@ int drm_connector_register(struct drm_connector *connector);
void drm_connector_unregister(struct drm_connector *connector);
extern void drm_connector_cleanup(struct drm_connector *connector);
+extern unsigned int drm_connector_index(struct drm_connector *connector);
/* helper to unplug all connectors from sysfs for device */
extern void drm_connector_unplug_all(struct drm_device *dev);
@@ -942,6 +1189,7 @@ extern int drm_plane_init(struct drm_device *dev,
const uint32_t *formats, uint32_t format_count,
bool is_primary);
extern void drm_plane_cleanup(struct drm_plane *plane);
+extern unsigned int drm_plane_index(struct drm_plane *plane);
extern void drm_plane_force_disable(struct drm_plane *plane);
extern int drm_crtc_check_viewport(const struct drm_crtc *crtc,
int x, int y,
@@ -971,9 +1219,10 @@ extern void drm_mode_config_reset(struct drm_device *dev);
extern void drm_mode_config_cleanup(struct drm_device *dev);
extern int drm_mode_connector_set_path_property(struct drm_connector *connector,
- char *path);
+ const char *path);
+int drm_mode_connector_set_tile_property(struct drm_connector *connector);
extern int drm_mode_connector_update_edid_property(struct drm_connector *connector,
- struct edid *edid);
+ const struct edid *edid);
static inline bool drm_property_type_is(struct drm_property *property,
uint32_t type)
@@ -1034,11 +1283,13 @@ extern void drm_property_destroy(struct drm_device *dev, struct drm_property *pr
extern int drm_property_add_enum(struct drm_property *property, int index,
uint64_t value, const char *name);
extern int drm_mode_create_dvi_i_properties(struct drm_device *dev);
-extern int drm_mode_create_tv_properties(struct drm_device *dev, int num_formats,
- char *formats[]);
+extern int drm_mode_create_tv_properties(struct drm_device *dev,
+ unsigned int num_modes,
+ char *modes[]);
extern int drm_mode_create_scaling_mode_property(struct drm_device *dev);
extern int drm_mode_create_aspect_ratio_property(struct drm_device *dev);
extern int drm_mode_create_dirty_info_property(struct drm_device *dev);
+extern int drm_mode_create_suggested_offset_properties(struct drm_device *dev);
extern int drm_mode_connector_attach_encoder(struct drm_connector *connector,
struct drm_encoder *encoder);
@@ -1106,6 +1357,13 @@ extern void drm_set_preferred_mode(struct drm_connector *connector,
extern int drm_edid_header_is_valid(const u8 *raw_edid);
extern bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid);
extern bool drm_edid_is_valid(struct edid *edid);
+
+extern struct drm_tile_group *drm_mode_create_tile_group(struct drm_device *dev,
+ char topology[8]);
+extern struct drm_tile_group *drm_mode_get_tile_group(struct drm_device *dev,
+ char topology[8]);
+extern void drm_mode_put_tile_group(struct drm_device *dev,
+ struct drm_tile_group *tg);
struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev,
int hsize, int vsize, int fresh,
bool rb);
@@ -1120,6 +1378,9 @@ extern int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+extern int drm_mode_plane_set_obj_prop(struct drm_plane *plane,
+ struct drm_property *property,
+ uint64_t value);
extern void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth,
int *bpp);
diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
index a3d75fefd010..7adbb65ea8ae 100644
--- a/include/drm/drm_crtc_helper.h
+++ b/include/drm/drm_crtc_helper.h
@@ -68,6 +68,7 @@ struct drm_crtc_helper_funcs {
int (*mode_set)(struct drm_crtc *crtc, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode, int x, int y,
struct drm_framebuffer *old_fb);
+ void (*mode_set_nofb)(struct drm_crtc *crtc);
/* Move the crtc on the current fb to the given position *optional* */
int (*mode_set_base)(struct drm_crtc *crtc, int x, int y,
@@ -81,6 +82,12 @@ struct drm_crtc_helper_funcs {
/* disable crtc when not in use - more explicit than dpms off */
void (*disable)(struct drm_crtc *crtc);
+
+ /* atomic helpers */
+ int (*atomic_check)(struct drm_crtc *crtc,
+ struct drm_crtc_state *state);
+ void (*atomic_begin)(struct drm_crtc *crtc);
+ void (*atomic_flush)(struct drm_crtc *crtc);
};
/**
@@ -161,6 +168,12 @@ static inline void drm_connector_helper_add(struct drm_connector *connector,
extern void drm_helper_resume_force_mode(struct drm_device *dev);
+int drm_helper_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode, int x, int y,
+ struct drm_framebuffer *old_fb);
+int drm_helper_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *old_fb);
+
/* drm_probe_helper.c */
extern int drm_helper_probe_single_connector_modes(struct drm_connector
*connector, uint32_t maxX,
diff --git a/include/drm/drm_displayid.h b/include/drm/drm_displayid.h
new file mode 100644
index 000000000000..623b4e98e748
--- /dev/null
+++ b/include/drm/drm_displayid.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright © 2014 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef DRM_DISPLAYID_H
+#define DRM_DISPLAYID_H
+
+#define DATA_BLOCK_PRODUCT_ID 0x00
+#define DATA_BLOCK_DISPLAY_PARAMETERS 0x01
+#define DATA_BLOCK_COLOR_CHARACTERISTICS 0x02
+#define DATA_BLOCK_TYPE_1_DETAILED_TIMING 0x03
+#define DATA_BLOCK_TYPE_2_DETAILED_TIMING 0x04
+#define DATA_BLOCK_TYPE_3_SHORT_TIMING 0x05
+#define DATA_BLOCK_TYPE_4_DMT_TIMING 0x06
+#define DATA_BLOCK_VESA_TIMING 0x07
+#define DATA_BLOCK_CEA_TIMING 0x08
+#define DATA_BLOCK_VIDEO_TIMING_RANGE 0x09
+#define DATA_BLOCK_PRODUCT_SERIAL_NUMBER 0x0a
+#define DATA_BLOCK_GP_ASCII_STRING 0x0b
+#define DATA_BLOCK_DISPLAY_DEVICE_DATA 0x0c
+#define DATA_BLOCK_INTERFACE_POWER_SEQUENCING 0x0d
+#define DATA_BLOCK_TRANSFER_CHARACTERISTICS 0x0e
+#define DATA_BLOCK_DISPLAY_INTERFACE 0x0f
+#define DATA_BLOCK_STEREO_DISPLAY_INTERFACE 0x10
+#define DATA_BLOCK_TILED_DISPLAY 0x12
+
+#define DATA_BLOCK_VENDOR_SPECIFIC 0x7f
+
+#define PRODUCT_TYPE_EXTENSION 0
+#define PRODUCT_TYPE_TEST 1
+#define PRODUCT_TYPE_PANEL 2
+#define PRODUCT_TYPE_MONITOR 3
+#define PRODUCT_TYPE_TV 4
+#define PRODUCT_TYPE_REPEATER 5
+#define PRODUCT_TYPE_DIRECT_DRIVE 6
+
+struct displayid_hdr {
+ u8 rev;
+ u8 bytes;
+ u8 prod_id;
+ u8 ext_count;
+} __packed;
+
+struct displayid_block {
+ u8 tag;
+ u8 rev;
+ u8 num_bytes;
+} __packed;
+
+struct displayid_tiled_block {
+ struct displayid_block base;
+ u8 tile_cap;
+ u8 topo[3];
+ u8 tile_size[4];
+ u8 tile_pixel_bezel[5];
+ u8 topology_id[8];
+} __packed;
+
+#endif
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index a21568bf1514..11f8c84f98ce 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -190,16 +190,16 @@
# define DP_TRAIN_VOLTAGE_SWING_MASK 0x3
# define DP_TRAIN_VOLTAGE_SWING_SHIFT 0
# define DP_TRAIN_MAX_SWING_REACHED (1 << 2)
-# define DP_TRAIN_VOLTAGE_SWING_400 (0 << 0)
-# define DP_TRAIN_VOLTAGE_SWING_600 (1 << 0)
-# define DP_TRAIN_VOLTAGE_SWING_800 (2 << 0)
-# define DP_TRAIN_VOLTAGE_SWING_1200 (3 << 0)
+# define DP_TRAIN_VOLTAGE_SWING_LEVEL_0 (0 << 0)
+# define DP_TRAIN_VOLTAGE_SWING_LEVEL_1 (1 << 0)
+# define DP_TRAIN_VOLTAGE_SWING_LEVEL_2 (2 << 0)
+# define DP_TRAIN_VOLTAGE_SWING_LEVEL_3 (3 << 0)
# define DP_TRAIN_PRE_EMPHASIS_MASK (3 << 3)
-# define DP_TRAIN_PRE_EMPHASIS_0 (0 << 3)
-# define DP_TRAIN_PRE_EMPHASIS_3_5 (1 << 3)
-# define DP_TRAIN_PRE_EMPHASIS_6 (2 << 3)
-# define DP_TRAIN_PRE_EMPHASIS_9_5 (3 << 3)
+# define DP_TRAIN_PRE_EMPH_LEVEL_0 (0 << 3)
+# define DP_TRAIN_PRE_EMPH_LEVEL_1 (1 << 3)
+# define DP_TRAIN_PRE_EMPH_LEVEL_2 (2 << 3)
+# define DP_TRAIN_PRE_EMPH_LEVEL_3 (3 << 3)
# define DP_TRAIN_PRE_EMPHASIS_SHIFT 3
# define DP_TRAIN_MAX_PRE_EMPHASIS_REACHED (1 << 5)
@@ -303,7 +303,8 @@
#define DP_TEST_CRC_B_CB 0x244
#define DP_TEST_SINK_MISC 0x246
-#define DP_TEST_CRC_SUPPORTED (1 << 5)
+# define DP_TEST_CRC_SUPPORTED (1 << 5)
+# define DP_TEST_COUNT_MASK 0x7
#define DP_TEST_RESPONSE 0x260
# define DP_TEST_ACK (1 << 0)
@@ -313,7 +314,7 @@
#define DP_TEST_EDID_CHECKSUM 0x261
#define DP_TEST_SINK 0x270
-#define DP_TEST_SINK_START (1 << 0)
+# define DP_TEST_SINK_START (1 << 0)
#define DP_PAYLOAD_TABLE_UPDATE_STATUS 0x2c0 /* 1.2 MST */
# define DP_PAYLOAD_TABLE_UPDATED (1 << 0)
@@ -404,26 +405,6 @@
#define MODE_I2C_READ 4
#define MODE_I2C_STOP 8
-/**
- * struct i2c_algo_dp_aux_data - driver interface structure for i2c over dp
- * aux algorithm
- * @running: set by the algo indicating whether an i2c is ongoing or whether
- * the i2c bus is quiescent
- * @address: i2c target address for the currently ongoing transfer
- * @aux_ch: driver callback to transfer a single byte of the i2c payload
- */
-struct i2c_algo_dp_aux_data {
- bool running;
- u16 address;
- int (*aux_ch) (struct i2c_adapter *adapter,
- int mode, uint8_t write_byte,
- uint8_t *read_byte);
-};
-
-int
-i2c_dp_aux_add_bus(struct i2c_adapter *adapter);
-
-
#define DP_LINK_STATUS_SIZE 6
bool drm_dp_channel_eq_ok(const u8 link_status[DP_LINK_STATUS_SIZE],
int lane_count);
@@ -550,6 +531,7 @@ struct drm_dp_aux {
struct mutex hw_mutex;
ssize_t (*transfer)(struct drm_dp_aux *aux,
struct drm_dp_aux_msg *msg);
+ unsigned i2c_nack_count, i2c_defer_count;
};
ssize_t drm_dp_dpcd_read(struct drm_dp_aux *aux, unsigned int offset,
diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h
index 9b446ada2532..00c1da927245 100644
--- a/include/drm/drm_dp_mst_helper.h
+++ b/include/drm/drm_dp_mst_helper.h
@@ -28,7 +28,7 @@
struct drm_dp_mst_branch;
/**
- * struct drm_dp_vcpi - Virtual Channel Payload Identifer
+ * struct drm_dp_vcpi - Virtual Channel Payload Identifier
* @vcpi: Virtual channel ID.
* @pbn: Payload Bandwidth Number for this channel
* @aligned_pbn: PBN aligned with slot size
@@ -92,6 +92,8 @@ struct drm_dp_mst_port {
struct drm_dp_vcpi vcpi;
struct drm_connector *connector;
struct drm_dp_mst_topology_mgr *mgr;
+
+ struct edid *cached_edid; /* for DP logical ports - make tiling work */
};
/**
@@ -371,7 +373,7 @@ struct drm_dp_sideband_msg_tx {
struct drm_dp_mst_topology_mgr;
struct drm_dp_mst_topology_cbs {
/* create a connector for a port */
- struct drm_connector *(*add_connector)(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, char *path);
+ struct drm_connector *(*add_connector)(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, const char *path);
void (*destroy_connector)(struct drm_dp_mst_topology_mgr *mgr,
struct drm_connector *connector);
void (*hotplug)(struct drm_dp_mst_topology_mgr *mgr);
@@ -388,6 +390,7 @@ struct drm_dp_payload {
int payload_state;
int start_slot;
int num_slots;
+ int vcpi;
};
/**
@@ -454,6 +457,7 @@ struct drm_dp_mst_topology_mgr {
struct drm_dp_vcpi **proposed_vcpis;
struct drm_dp_payload *payloads;
unsigned long payload_mask;
+ unsigned long vcpi_mask;
wait_queue_head_t tx_waitq;
struct work_struct work;
@@ -472,7 +476,7 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handled);
-enum drm_connector_status drm_dp_mst_detect_port(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port);
+enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port);
struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port);
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
index b96031d947a0..87d85e81d3a7 100644
--- a/include/drm/drm_edid.h
+++ b/include/drm/drm_edid.h
@@ -27,12 +27,14 @@
#define EDID_LENGTH 128
#define DDC_ADDR 0x50
+#define DDC_ADDR2 0x52 /* E-DDC 1.2 - where DisplayID can hide */
#define CEA_EXT 0x02
#define VTB_EXT 0x10
#define DI_EXT 0x40
#define LS_EXT 0x50
#define MI_EXT 0x60
+#define DISPLAYID_EXT 0x70
struct est_timings {
u8 t1;
@@ -207,6 +209,61 @@ struct detailed_timing {
#define DRM_EDID_HDMI_DC_30 (1 << 4)
#define DRM_EDID_HDMI_DC_Y444 (1 << 3)
+/* ELD Header Block */
+#define DRM_ELD_HEADER_BLOCK_SIZE 4
+
+#define DRM_ELD_VER 0
+# define DRM_ELD_VER_SHIFT 3
+# define DRM_ELD_VER_MASK (0x1f << 3)
+
+#define DRM_ELD_BASELINE_ELD_LEN 2 /* in dwords! */
+
+/* ELD Baseline Block for ELD_Ver == 2 */
+#define DRM_ELD_CEA_EDID_VER_MNL 4
+# define DRM_ELD_CEA_EDID_VER_SHIFT 5
+# define DRM_ELD_CEA_EDID_VER_MASK (7 << 5)
+# define DRM_ELD_CEA_EDID_VER_NONE (0 << 5)
+# define DRM_ELD_CEA_EDID_VER_CEA861 (1 << 5)
+# define DRM_ELD_CEA_EDID_VER_CEA861A (2 << 5)
+# define DRM_ELD_CEA_EDID_VER_CEA861BCD (3 << 5)
+# define DRM_ELD_MNL_SHIFT 0
+# define DRM_ELD_MNL_MASK (0x1f << 0)
+
+#define DRM_ELD_SAD_COUNT_CONN_TYPE 5
+# define DRM_ELD_SAD_COUNT_SHIFT 4
+# define DRM_ELD_SAD_COUNT_MASK (0xf << 4)
+# define DRM_ELD_CONN_TYPE_SHIFT 2
+# define DRM_ELD_CONN_TYPE_MASK (3 << 2)
+# define DRM_ELD_CONN_TYPE_HDMI (0 << 2)
+# define DRM_ELD_CONN_TYPE_DP (1 << 2)
+# define DRM_ELD_SUPPORTS_AI (1 << 1)
+# define DRM_ELD_SUPPORTS_HDCP (1 << 0)
+
+#define DRM_ELD_AUD_SYNCH_DELAY 6 /* in units of 2 ms */
+# define DRM_ELD_AUD_SYNCH_DELAY_MAX 0xfa /* 500 ms */
+
+#define DRM_ELD_SPEAKER 7
+# define DRM_ELD_SPEAKER_RLRC (1 << 6)
+# define DRM_ELD_SPEAKER_FLRC (1 << 5)
+# define DRM_ELD_SPEAKER_RC (1 << 4)
+# define DRM_ELD_SPEAKER_RLR (1 << 3)
+# define DRM_ELD_SPEAKER_FC (1 << 2)
+# define DRM_ELD_SPEAKER_LFE (1 << 1)
+# define DRM_ELD_SPEAKER_FLR (1 << 0)
+
+#define DRM_ELD_PORT_ID 8 /* offsets 8..15 inclusive */
+# define DRM_ELD_PORT_ID_LEN 8
+
+#define DRM_ELD_MANUFACTURER_NAME0 16
+#define DRM_ELD_MANUFACTURER_NAME1 17
+
+#define DRM_ELD_PRODUCT_CODE0 18
+#define DRM_ELD_PRODUCT_CODE1 19
+
+#define DRM_ELD_MONITOR_NAME_STRING 20 /* offsets 20..(20+mnl-1) inclusive */
+
+#define DRM_ELD_CEA_SAD(mnl, sad) (20 + (mnl) + 3 * (sad))
+
struct edid {
u8 header[8];
/* Vendor & product info */
@@ -279,4 +336,56 @@ int
drm_hdmi_vendor_infoframe_from_display_mode(struct hdmi_vendor_infoframe *frame,
const struct drm_display_mode *mode);
+/**
+ * drm_eld_mnl - Get ELD monitor name length in bytes.
+ * @eld: pointer to an eld memory structure with mnl set
+ */
+static inline int drm_eld_mnl(const uint8_t *eld)
+{
+ return (eld[DRM_ELD_CEA_EDID_VER_MNL] & DRM_ELD_MNL_MASK) >> DRM_ELD_MNL_SHIFT;
+}
+
+/**
+ * drm_eld_sad_count - Get ELD SAD count.
+ * @eld: pointer to an eld memory structure with sad_count set
+ */
+static inline int drm_eld_sad_count(const uint8_t *eld)
+{
+ return (eld[DRM_ELD_SAD_COUNT_CONN_TYPE] & DRM_ELD_SAD_COUNT_MASK) >>
+ DRM_ELD_SAD_COUNT_SHIFT;
+}
+
+/**
+ * drm_eld_calc_baseline_block_size - Calculate baseline block size in bytes
+ * @eld: pointer to an eld memory structure with mnl and sad_count set
+ *
+ * This is a helper for determining the payload size of the baseline block, in
+ * bytes, for e.g. setting the Baseline_ELD_Len field in the ELD header block.
+ */
+static inline int drm_eld_calc_baseline_block_size(const uint8_t *eld)
+{
+ return DRM_ELD_MONITOR_NAME_STRING - DRM_ELD_HEADER_BLOCK_SIZE +
+ drm_eld_mnl(eld) + drm_eld_sad_count(eld) * 3;
+}
+
+/**
+ * drm_eld_size - Get ELD size in bytes
+ * @eld: pointer to a complete eld memory structure
+ *
+ * The returned value does not include the vendor block. It's vendor specific,
+ * and comprises of the remaining bytes in the ELD memory buffer after
+ * drm_eld_size() bytes of header and baseline block.
+ *
+ * The returned value is guaranteed to be a multiple of 4.
+ */
+static inline int drm_eld_size(const uint8_t *eld)
+{
+ return DRM_ELD_HEADER_BLOCK_SIZE + eld[DRM_ELD_BASELINE_ELD_LEN] * 4;
+}
+
+struct edid *drm_do_get_edid(struct drm_connector *connector,
+ int (*get_edid_block)(void *data, u8 *buf, unsigned int block,
+ size_t len),
+ void *data);
+
#endif /* __DRM_EDID_H__ */
diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h
index bfd329d613c4..b597068103aa 100644
--- a/include/drm/drm_fb_helper.h
+++ b/include/drm/drm_fb_helper.h
@@ -34,9 +34,14 @@ struct drm_fb_helper;
#include <linux/kgdb.h>
+struct drm_fb_offset {
+ int x, y;
+};
+
struct drm_fb_helper_crtc {
struct drm_mode_set mode_set;
struct drm_display_mode *desired_mode;
+ int x, y;
};
struct drm_fb_helper_surface_size {
@@ -72,12 +77,12 @@ struct drm_fb_helper_funcs {
bool (*initial_config)(struct drm_fb_helper *fb_helper,
struct drm_fb_helper_crtc **crtcs,
struct drm_display_mode **modes,
+ struct drm_fb_offset *offsets,
bool *enabled, int width, int height);
};
struct drm_fb_helper_connector {
struct drm_connector *connector;
- struct drm_cmdline_mode cmdline_mode;
};
struct drm_fb_helper {
diff --git a/include/drm/drm_flip_work.h b/include/drm/drm_flip_work.h
index 9eed34dcd6af..d387cf06ae05 100644
--- a/include/drm/drm_flip_work.h
+++ b/include/drm/drm_flip_work.h
@@ -25,6 +25,7 @@
#define DRM_FLIP_WORK_H
#include <linux/kfifo.h>
+#include <linux/spinlock.h>
#include <linux/workqueue.h>
/**
@@ -32,9 +33,9 @@
*
* Util to queue up work to run from work-queue context after flip/vblank.
* Typically this can be used to defer unref of framebuffer's, cursor
- * bo's, etc until after vblank. The APIs are all safe (and lockless)
- * for up to one producer and once consumer at a time. The single-consumer
- * aspect is ensured by committing the queued work to a single work-queue.
+ * bo's, etc until after vblank. The APIs are all thread-safe.
+ * Moreover, drm_flip_work_queue_task and drm_flip_work_queue can be called
+ * in atomic context.
*/
struct drm_flip_work;
@@ -51,26 +52,40 @@ struct drm_flip_work;
typedef void (*drm_flip_func_t)(struct drm_flip_work *work, void *val);
/**
+ * struct drm_flip_task - flip work task
+ * @node: list entry element
+ * @data: data to pass to work->func
+ */
+struct drm_flip_task {
+ struct list_head node;
+ void *data;
+};
+
+/**
* struct drm_flip_work - flip work queue
* @name: debug name
- * @pending: number of queued but not committed items
- * @count: number of committed items
* @func: callback fxn called for each committed item
* @worker: worker which calls @func
- * @fifo: queue of committed items
+ * @queued: queued tasks
+ * @commited: commited tasks
+ * @lock: lock to access queued and commited lists
*/
struct drm_flip_work {
const char *name;
- atomic_t pending, count;
drm_flip_func_t func;
struct work_struct worker;
- DECLARE_KFIFO_PTR(fifo, void *);
+ struct list_head queued;
+ struct list_head commited;
+ spinlock_t lock;
};
+struct drm_flip_task *drm_flip_work_allocate_task(void *data, gfp_t flags);
+void drm_flip_work_queue_task(struct drm_flip_work *work,
+ struct drm_flip_task *task);
void drm_flip_work_queue(struct drm_flip_work *work, void *val);
void drm_flip_work_commit(struct drm_flip_work *work,
struct workqueue_struct *wq);
-int drm_flip_work_init(struct drm_flip_work *work, int size,
+void drm_flip_work_init(struct drm_flip_work *work,
const char *name, drm_flip_func_t func);
void drm_flip_work_cleanup(struct drm_flip_work *work);
diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h
new file mode 100644
index 000000000000..1e6ae1458f7a
--- /dev/null
+++ b/include/drm/drm_gem.h
@@ -0,0 +1,183 @@
+#ifndef __DRM_GEM_H__
+#define __DRM_GEM_H__
+
+/*
+ * GEM Graphics Execution Manager Driver Interfaces
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * Copyright (c) 2009-2010, Code Aurora Forum.
+ * All rights reserved.
+ * Copyright © 2014 Intel Corporation
+ * Daniel Vetter <daniel.vetter@ffwll.ch>
+ *
+ * Author: Rickard E. (Rik) Faith <faith@valinux.com>
+ * Author: Gareth Hughes <gareth@valinux.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * This structure defines the drm_mm memory object, which will be used by the
+ * DRM for its buffer objects.
+ */
+struct drm_gem_object {
+ /** Reference count of this object */
+ struct kref refcount;
+
+ /**
+ * handle_count - gem file_priv handle count of this object
+ *
+ * Each handle also holds a reference. Note that when the handle_count
+ * drops to 0 any global names (e.g. the id in the flink namespace) will
+ * be cleared.
+ *
+ * Protected by dev->object_name_lock.
+ * */
+ unsigned handle_count;
+
+ /** Related drm device */
+ struct drm_device *dev;
+
+ /** File representing the shmem storage */
+ struct file *filp;
+
+ /* Mapping info for this object */
+ struct drm_vma_offset_node vma_node;
+
+ /**
+ * Size of the object, in bytes. Immutable over the object's
+ * lifetime.
+ */
+ size_t size;
+
+ /**
+ * Global name for this object, starts at 1. 0 means unnamed.
+ * Access is covered by the object_name_lock in the related drm_device
+ */
+ int name;
+
+ /**
+ * Memory domains. These monitor which caches contain read/write data
+ * related to the object. When transitioning from one set of domains
+ * to another, the driver is called to ensure that caches are suitably
+ * flushed and invalidated
+ */
+ uint32_t read_domains;
+ uint32_t write_domain;
+
+ /**
+ * While validating an exec operation, the
+ * new read/write domain values are computed here.
+ * They will be transferred to the above values
+ * at the point that any cache flushing occurs
+ */
+ uint32_t pending_read_domains;
+ uint32_t pending_write_domain;
+
+ /**
+ * dma_buf - dma buf associated with this GEM object
+ *
+ * Pointer to the dma-buf associated with this gem object (either
+ * through importing or exporting). We break the resulting reference
+ * loop when the last gem handle for this object is released.
+ *
+ * Protected by obj->object_name_lock
+ */
+ struct dma_buf *dma_buf;
+
+ /**
+ * import_attach - dma buf attachment backing this object
+ *
+ * Any foreign dma_buf imported as a gem object has this set to the
+ * attachment point for the device. This is invariant over the lifetime
+ * of a gem object.
+ *
+ * The driver's ->gem_free_object callback is responsible for cleaning
+ * up the dma_buf attachment and references acquired at import time.
+ *
+ * Note that the drm gem/prime core does not depend upon drivers setting
+ * this field any more. So for drivers where this doesn't make sense
+ * (e.g. virtual devices or a displaylink behind an usb bus) they can
+ * simply leave it as NULL.
+ */
+ struct dma_buf_attachment *import_attach;
+};
+
+void drm_gem_object_release(struct drm_gem_object *obj);
+void drm_gem_object_free(struct kref *kref);
+int drm_gem_object_init(struct drm_device *dev,
+ struct drm_gem_object *obj, size_t size);
+void drm_gem_private_object_init(struct drm_device *dev,
+ struct drm_gem_object *obj, size_t size);
+void drm_gem_vm_open(struct vm_area_struct *vma);
+void drm_gem_vm_close(struct vm_area_struct *vma);
+int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
+ struct vm_area_struct *vma);
+int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
+
+static inline void
+drm_gem_object_reference(struct drm_gem_object *obj)
+{
+ kref_get(&obj->refcount);
+}
+
+static inline void
+drm_gem_object_unreference(struct drm_gem_object *obj)
+{
+ if (obj != NULL)
+ kref_put(&obj->refcount, drm_gem_object_free);
+}
+
+static inline void
+drm_gem_object_unreference_unlocked(struct drm_gem_object *obj)
+{
+ if (obj && !atomic_add_unless(&obj->refcount.refcount, -1, 1)) {
+ struct drm_device *dev = obj->dev;
+
+ mutex_lock(&dev->struct_mutex);
+ if (likely(atomic_dec_and_test(&obj->refcount.refcount)))
+ drm_gem_object_free(&obj->refcount);
+ mutex_unlock(&dev->struct_mutex);
+ }
+}
+
+int drm_gem_handle_create(struct drm_file *file_priv,
+ struct drm_gem_object *obj,
+ u32 *handlep);
+int drm_gem_handle_delete(struct drm_file *filp, u32 handle);
+
+
+void drm_gem_free_mmap_offset(struct drm_gem_object *obj);
+int drm_gem_create_mmap_offset(struct drm_gem_object *obj);
+int drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size);
+
+struct page **drm_gem_get_pages(struct drm_gem_object *obj);
+void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
+ bool dirty, bool accessed);
+
+struct drm_gem_object *drm_gem_object_lookup(struct drm_device *dev,
+ struct drm_file *filp,
+ u32 handle);
+int drm_gem_dumb_destroy(struct drm_file *file,
+ struct drm_device *dev,
+ uint32_t handle);
+
+#endif /* __DRM_GEM_H__ */
diff --git a/include/drm/drm_gem_cma_helper.h b/include/drm/drm_gem_cma_helper.h
index 2a3cea91606d..acd6af8a8e67 100644
--- a/include/drm/drm_gem_cma_helper.h
+++ b/include/drm/drm_gem_cma_helper.h
@@ -2,7 +2,15 @@
#define __DRM_GEM_CMA_HELPER_H__
#include <drm/drmP.h>
+#include <drm/drm_gem.h>
+/**
+ * struct drm_gem_cma_object - GEM object backed by CMA memory allocations
+ * @base: base GEM object
+ * @paddr: physical address of the backing memory
+ * @sgt: scatter/gather table for imported PRIME buffers
+ * @vaddr: kernel virtual address of the backing memory
+ */
struct drm_gem_cma_object {
struct drm_gem_object base;
dma_addr_t paddr;
@@ -18,23 +26,30 @@ to_drm_gem_cma_obj(struct drm_gem_object *gem_obj)
return container_of(gem_obj, struct drm_gem_cma_object, base);
}
-/* free gem object. */
+/* free GEM object */
void drm_gem_cma_free_object(struct drm_gem_object *gem_obj);
-/* create memory region for drm framebuffer. */
+/* create memory region for DRM framebuffer */
+int drm_gem_cma_dumb_create_internal(struct drm_file *file_priv,
+ struct drm_device *drm,
+ struct drm_mode_create_dumb *args);
+
+/* create memory region for DRM framebuffer */
int drm_gem_cma_dumb_create(struct drm_file *file_priv,
- struct drm_device *drm, struct drm_mode_create_dumb *args);
+ struct drm_device *drm,
+ struct drm_mode_create_dumb *args);
-/* map memory region for drm framebuffer to user space. */
+/* map memory region for DRM framebuffer to user space */
int drm_gem_cma_dumb_map_offset(struct drm_file *file_priv,
- struct drm_device *drm, uint32_t handle, uint64_t *offset);
+ struct drm_device *drm, u32 handle,
+ u64 *offset);
-/* set vm_flags and we can change the vm attribute to other one at here. */
+/* set vm_flags and we can change the VM attribute to other one at here */
int drm_gem_cma_mmap(struct file *filp, struct vm_area_struct *vma);
-/* allocate physical memory. */
+/* allocate physical memory */
struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm,
- unsigned int size);
+ size_t size);
extern const struct vm_operations_struct drm_gem_cma_vm_ops;
@@ -44,7 +59,8 @@ void drm_gem_cma_describe(struct drm_gem_cma_object *obj, struct seq_file *m);
struct sg_table *drm_gem_cma_prime_get_sg_table(struct drm_gem_object *obj);
struct drm_gem_object *
-drm_gem_cma_prime_import_sg_table(struct drm_device *dev, size_t size,
+drm_gem_cma_prime_import_sg_table(struct drm_device *dev,
+ struct dma_buf_attachment *attach,
struct sg_table *sgt);
int drm_gem_cma_prime_mmap(struct drm_gem_object *obj,
struct vm_area_struct *vma);
diff --git a/include/drm/drm_legacy.h b/include/drm/drm_legacy.h
new file mode 100644
index 000000000000..3e698038dc7b
--- /dev/null
+++ b/include/drm/drm_legacy.h
@@ -0,0 +1,203 @@
+#ifndef __DRM_DRM_LEGACY_H__
+#define __DRM_DRM_LEGACY_H__
+
+/*
+ * Legacy driver interfaces for the Direct Rendering Manager
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * Copyright (c) 2009-2010, Code Aurora Forum.
+ * All rights reserved.
+ * Copyright © 2014 Intel Corporation
+ * Daniel Vetter <daniel.vetter@ffwll.ch>
+ *
+ * Author: Rickard E. (Rik) Faith <faith@valinux.com>
+ * Author: Gareth Hughes <gareth@valinux.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+
+/*
+ * Legacy Support for palateontologic DRM drivers
+ *
+ * If you add a new driver and it uses any of these functions or structures,
+ * you're doing it terribly wrong.
+ */
+
+/**
+ * DMA buffer.
+ */
+struct drm_buf {
+ int idx; /**< Index into master buflist */
+ int total; /**< Buffer size */
+ int order; /**< log-base-2(total) */
+ int used; /**< Amount of buffer in use (for DMA) */
+ unsigned long offset; /**< Byte offset (used internally) */
+ void *address; /**< Address of buffer */
+ unsigned long bus_address; /**< Bus address of buffer */
+ struct drm_buf *next; /**< Kernel-only: used for free list */
+ __volatile__ int waiting; /**< On kernel DMA queue */
+ __volatile__ int pending; /**< On hardware DMA queue */
+ struct drm_file *file_priv; /**< Private of holding file descr */
+ int context; /**< Kernel queue for this buffer */
+ int while_locked; /**< Dispatch this buffer while locked */
+ enum {
+ DRM_LIST_NONE = 0,
+ DRM_LIST_FREE = 1,
+ DRM_LIST_WAIT = 2,
+ DRM_LIST_PEND = 3,
+ DRM_LIST_PRIO = 4,
+ DRM_LIST_RECLAIM = 5
+ } list; /**< Which list we're on */
+
+ int dev_priv_size; /**< Size of buffer private storage */
+ void *dev_private; /**< Per-buffer private storage */
+};
+
+typedef struct drm_dma_handle {
+ dma_addr_t busaddr;
+ void *vaddr;
+ size_t size;
+} drm_dma_handle_t;
+
+/**
+ * Buffer entry. There is one of this for each buffer size order.
+ */
+struct drm_buf_entry {
+ int buf_size; /**< size */
+ int buf_count; /**< number of buffers */
+ struct drm_buf *buflist; /**< buffer list */
+ int seg_count;
+ int page_order;
+ struct drm_dma_handle **seglist;
+
+ int low_mark; /**< Low water mark */
+ int high_mark; /**< High water mark */
+};
+
+/**
+ * DMA data.
+ */
+struct drm_device_dma {
+
+ struct drm_buf_entry bufs[DRM_MAX_ORDER + 1]; /**< buffers, grouped by their size order */
+ int buf_count; /**< total number of buffers */
+ struct drm_buf **buflist; /**< Vector of pointers into drm_device_dma::bufs */
+ int seg_count;
+ int page_count; /**< number of pages */
+ unsigned long *pagelist; /**< page list */
+ unsigned long byte_count;
+ enum {
+ _DRM_DMA_USE_AGP = 0x01,
+ _DRM_DMA_USE_SG = 0x02,
+ _DRM_DMA_USE_FB = 0x04,
+ _DRM_DMA_USE_PCI_RO = 0x08
+ } flags;
+
+};
+
+/**
+ * Scatter-gather memory.
+ */
+struct drm_sg_mem {
+ unsigned long handle;
+ void *virtual;
+ int pages;
+ struct page **pagelist;
+ dma_addr_t *busaddr;
+};
+
+/**
+ * Kernel side of a mapping
+ */
+struct drm_local_map {
+ resource_size_t offset; /**< Requested physical address (0 for SAREA)*/
+ unsigned long size; /**< Requested physical size (bytes) */
+ enum drm_map_type type; /**< Type of memory to map */
+ enum drm_map_flags flags; /**< Flags */
+ void *handle; /**< User-space: "Handle" to pass to mmap() */
+ /**< Kernel-space: kernel-virtual address */
+ int mtrr; /**< MTRR slot used */
+};
+
+typedef struct drm_local_map drm_local_map_t;
+
+/**
+ * Mappings list
+ */
+struct drm_map_list {
+ struct list_head head; /**< list head */
+ struct drm_hash_item hash;
+ struct drm_local_map *map; /**< mapping */
+ uint64_t user_token;
+ struct drm_master *master;
+};
+
+int drm_legacy_addmap(struct drm_device *d, resource_size_t offset,
+ unsigned int size, enum drm_map_type type,
+ enum drm_map_flags flags, struct drm_local_map **map_p);
+int drm_legacy_rmmap(struct drm_device *d, struct drm_local_map *map);
+int drm_legacy_rmmap_locked(struct drm_device *d, struct drm_local_map *map);
+struct drm_local_map *drm_legacy_getsarea(struct drm_device *dev);
+int drm_legacy_mmap(struct file *filp, struct vm_area_struct *vma);
+
+int drm_legacy_addbufs_agp(struct drm_device *d, struct drm_buf_desc *req);
+int drm_legacy_addbufs_pci(struct drm_device *d, struct drm_buf_desc *req);
+
+/**
+ * Test that the hardware lock is held by the caller, returning otherwise.
+ *
+ * \param dev DRM device.
+ * \param filp file pointer of the caller.
+ */
+#define LOCK_TEST_WITH_RETURN( dev, _file_priv ) \
+do { \
+ if (!_DRM_LOCK_IS_HELD(_file_priv->master->lock.hw_lock->lock) || \
+ _file_priv->master->lock.file_priv != _file_priv) { \
+ DRM_ERROR( "%s called without lock held, held %d owner %p %p\n",\
+ __func__, _DRM_LOCK_IS_HELD(_file_priv->master->lock.hw_lock->lock),\
+ _file_priv->master->lock.file_priv, _file_priv); \
+ return -EINVAL; \
+ } \
+} while (0)
+
+void drm_legacy_idlelock_take(struct drm_lock_data *lock);
+void drm_legacy_idlelock_release(struct drm_lock_data *lock);
+
+/* drm_pci.c dma alloc wrappers */
+void __drm_legacy_pci_free(struct drm_device *dev, drm_dma_handle_t * dmah);
+
+/* drm_memory.c */
+void drm_legacy_ioremap(struct drm_local_map *map, struct drm_device *dev);
+void drm_legacy_ioremap_wc(struct drm_local_map *map, struct drm_device *dev);
+void drm_legacy_ioremapfree(struct drm_local_map *map, struct drm_device *dev);
+
+static __inline__ struct drm_local_map *drm_legacy_findmap(struct drm_device *dev,
+ unsigned int token)
+{
+ struct drm_map_list *_entry;
+ list_for_each_entry(_entry, &dev->maplist, head)
+ if (_entry->user_token == token)
+ return _entry->map;
+ return NULL;
+}
+
+#endif /* __DRM_DRM_LEGACY_H__ */
diff --git a/include/drm/drm_memory.h b/include/drm/drm_memory.h
deleted file mode 100644
index 4baf57a207e7..000000000000
--- a/include/drm/drm_memory.h
+++ /dev/null
@@ -1,59 +0,0 @@
-/**
- * \file drm_memory.h
- * Memory management wrappers for DRM
- *
- * \author Rickard E. (Rik) Faith <faith@valinux.com>
- * \author Gareth Hughes <gareth@valinux.com>
- */
-
-/*
- * Created: Thu Feb 4 14:00:34 1999 by faith@valinux.com
- *
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include <linux/highmem.h>
-#include <linux/vmalloc.h>
-#include <drm/drmP.h>
-
-/**
- * Cut down version of drm_memory_debug.h, which used to be called
- * drm_memory.h.
- */
-
-#if __OS_HAS_AGP
-
-#ifdef HAVE_PAGE_AGP
-#include <asm/agp.h>
-#else
-# ifdef __powerpc__
-# define PAGE_AGP __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE)
-# else
-# define PAGE_AGP PAGE_KERNEL
-# endif
-#endif
-
-#else /* __OS_HAS_AGP */
-
-#endif
diff --git a/include/drm/drm_mipi_dsi.h b/include/drm/drm_mipi_dsi.h
index 2bb55b8b9031..f1d8d0dbb4f1 100644
--- a/include/drm/drm_mipi_dsi.h
+++ b/include/drm/drm_mipi_dsi.h
@@ -26,6 +26,7 @@ struct mipi_dsi_device;
* struct mipi_dsi_msg - read/write DSI buffer
* @channel: virtual channel id
* @type: payload data type
+ * @flags: flags controlling this message transmission
* @tx_len: length of @tx_buf
* @tx_buf: data to be written
* @rx_len: length of @rx_buf
@@ -43,12 +44,44 @@ struct mipi_dsi_msg {
void *rx_buf;
};
+bool mipi_dsi_packet_format_is_short(u8 type);
+bool mipi_dsi_packet_format_is_long(u8 type);
+
+/**
+ * struct mipi_dsi_packet - represents a MIPI DSI packet in protocol format
+ * @size: size (in bytes) of the packet
+ * @header: the four bytes that make up the header (Data ID, Word Count or
+ * Packet Data, and ECC)
+ * @payload_length: number of bytes in the payload
+ * @payload: a pointer to a buffer containing the payload, if any
+ */
+struct mipi_dsi_packet {
+ size_t size;
+ u8 header[4];
+ size_t payload_length;
+ const u8 *payload;
+};
+
+int mipi_dsi_create_packet(struct mipi_dsi_packet *packet,
+ const struct mipi_dsi_msg *msg);
+
/**
* struct mipi_dsi_host_ops - DSI bus operations
* @attach: attach DSI device to DSI host
* @detach: detach DSI device from DSI host
- * @transfer: send and/or receive DSI packet, return number of received bytes,
- * or error
+ * @transfer: transmit a DSI packet
+ *
+ * DSI packets transmitted by .transfer() are passed in as mipi_dsi_msg
+ * structures. This structure contains information about the type of packet
+ * being transmitted as well as the transmit and receive buffers. When an
+ * error is encountered during transmission, this function will return a
+ * negative error code. On success it shall return the number of bytes
+ * transmitted for write packets or the number of bytes received for read
+ * packets.
+ *
+ * Note that typically DSI packet transmission is atomic, so the .transfer()
+ * function will seldomly return anything other than the number of bytes
+ * contained in the transmit buffer on success.
*/
struct mipi_dsi_host_ops {
int (*attach)(struct mipi_dsi_host *host,
@@ -56,7 +89,7 @@ struct mipi_dsi_host_ops {
int (*detach)(struct mipi_dsi_host *host,
struct mipi_dsi_device *dsi);
ssize_t (*transfer)(struct mipi_dsi_host *host,
- struct mipi_dsi_msg *msg);
+ const struct mipi_dsi_msg *msg);
};
/**
@@ -96,6 +129,8 @@ void mipi_dsi_host_unregister(struct mipi_dsi_host *host);
#define MIPI_DSI_MODE_EOT_PACKET BIT(9)
/* device supports non-continuous clock behavior (DSI spec 5.6.1) */
#define MIPI_DSI_CLOCK_NON_CONTINUOUS BIT(10)
+/* transmit data in low power */
+#define MIPI_DSI_MODE_LPM BIT(11)
enum mipi_dsi_pixel_format {
MIPI_DSI_FMT_RGB888,
@@ -128,12 +163,57 @@ static inline struct mipi_dsi_device *to_mipi_dsi_device(struct device *dev)
return container_of(dev, struct mipi_dsi_device, dev);
}
+struct mipi_dsi_device *of_find_mipi_dsi_device_by_node(struct device_node *np);
int mipi_dsi_attach(struct mipi_dsi_device *dsi);
int mipi_dsi_detach(struct mipi_dsi_device *dsi);
-ssize_t mipi_dsi_dcs_write(struct mipi_dsi_device *dsi, const void *data,
- size_t len);
+int mipi_dsi_set_maximum_return_packet_size(struct mipi_dsi_device *dsi,
+ u16 value);
+
+ssize_t mipi_dsi_generic_write(struct mipi_dsi_device *dsi, const void *payload,
+ size_t size);
+ssize_t mipi_dsi_generic_read(struct mipi_dsi_device *dsi, const void *params,
+ size_t num_params, void *data, size_t size);
+
+/**
+ * enum mipi_dsi_dcs_tear_mode - Tearing Effect Output Line mode
+ * @MIPI_DSI_DCS_TEAR_MODE_VBLANK: the TE output line consists of V-Blanking
+ * information only
+ * @MIPI_DSI_DCS_TEAR_MODE_VHBLANK : the TE output line consists of both
+ * V-Blanking and H-Blanking information
+ */
+enum mipi_dsi_dcs_tear_mode {
+ MIPI_DSI_DCS_TEAR_MODE_VBLANK,
+ MIPI_DSI_DCS_TEAR_MODE_VHBLANK,
+};
+
+#define MIPI_DSI_DCS_POWER_MODE_DISPLAY (1 << 2)
+#define MIPI_DSI_DCS_POWER_MODE_NORMAL (1 << 3)
+#define MIPI_DSI_DCS_POWER_MODE_SLEEP (1 << 4)
+#define MIPI_DSI_DCS_POWER_MODE_PARTIAL (1 << 5)
+#define MIPI_DSI_DCS_POWER_MODE_IDLE (1 << 6)
+
+ssize_t mipi_dsi_dcs_write_buffer(struct mipi_dsi_device *dsi,
+ const void *data, size_t len);
+ssize_t mipi_dsi_dcs_write(struct mipi_dsi_device *dsi, u8 cmd,
+ const void *data, size_t len);
ssize_t mipi_dsi_dcs_read(struct mipi_dsi_device *dsi, u8 cmd, void *data,
size_t len);
+int mipi_dsi_dcs_nop(struct mipi_dsi_device *dsi);
+int mipi_dsi_dcs_soft_reset(struct mipi_dsi_device *dsi);
+int mipi_dsi_dcs_get_power_mode(struct mipi_dsi_device *dsi, u8 *mode);
+int mipi_dsi_dcs_get_pixel_format(struct mipi_dsi_device *dsi, u8 *format);
+int mipi_dsi_dcs_enter_sleep_mode(struct mipi_dsi_device *dsi);
+int mipi_dsi_dcs_exit_sleep_mode(struct mipi_dsi_device *dsi);
+int mipi_dsi_dcs_set_display_off(struct mipi_dsi_device *dsi);
+int mipi_dsi_dcs_set_display_on(struct mipi_dsi_device *dsi);
+int mipi_dsi_dcs_set_column_address(struct mipi_dsi_device *dsi, u16 start,
+ u16 end);
+int mipi_dsi_dcs_set_page_address(struct mipi_dsi_device *dsi, u16 start,
+ u16 end);
+int mipi_dsi_dcs_set_tear_off(struct mipi_dsi_device *dsi);
+int mipi_dsi_dcs_set_tear_on(struct mipi_dsi_device *dsi,
+ enum mipi_dsi_dcs_tear_mode mode);
+int mipi_dsi_dcs_set_pixel_format(struct mipi_dsi_device *dsi, u8 format);
/**
* struct mipi_dsi_driver - DSI driver
@@ -165,9 +245,13 @@ static inline void mipi_dsi_set_drvdata(struct mipi_dsi_device *dsi, void *data)
dev_set_drvdata(&dsi->dev, data);
}
-int mipi_dsi_driver_register(struct mipi_dsi_driver *driver);
+int mipi_dsi_driver_register_full(struct mipi_dsi_driver *driver,
+ struct module *owner);
void mipi_dsi_driver_unregister(struct mipi_dsi_driver *driver);
+#define mipi_dsi_driver_register(driver) \
+ mipi_dsi_driver_register_full(driver, THIS_MODULE)
+
#define module_mipi_dsi_driver(__mipi_dsi_driver) \
module_driver(__mipi_dsi_driver, mipi_dsi_driver_register, \
mipi_dsi_driver_unregister)
diff --git a/include/drm/drm_modeset_lock.h b/include/drm/drm_modeset_lock.h
index 402aa7a6a058..70595ff565ba 100644
--- a/include/drm/drm_modeset_lock.h
+++ b/include/drm/drm_modeset_lock.h
@@ -29,10 +29,11 @@
struct drm_modeset_lock;
/**
- * drm_modeset_acquire_ctx - locking context (see ww_acquire_ctx)
+ * struct drm_modeset_acquire_ctx - locking context (see ww_acquire_ctx)
* @ww_ctx: base acquire ctx
* @contended: used internally for -EDEADLK handling
* @locked: list of held locks
+ * @trylock_only: trylock mode used in atomic contexts/panic notifiers
*
* Each thread competing for a set of locks must use one acquire
* ctx. And if any lock fxn returns -EDEADLK, it must backoff and
@@ -53,10 +54,15 @@ struct drm_modeset_acquire_ctx {
* list of held locks (drm_modeset_lock)
*/
struct list_head locked;
+
+ /**
+ * Trylock mode, use only for panic handlers!
+ */
+ bool trylock_only;
};
/**
- * drm_modeset_lock - used for locking modeset resources.
+ * struct drm_modeset_lock - used for locking modeset resources.
* @mutex: resource locking
* @head: used to hold it's place on state->locked list when
* part of an atomic update
@@ -120,6 +126,19 @@ int drm_modeset_lock_interruptible(struct drm_modeset_lock *lock,
void drm_modeset_unlock(struct drm_modeset_lock *lock);
struct drm_device;
+struct drm_crtc;
+struct drm_plane;
+
+void drm_modeset_lock_all(struct drm_device *dev);
+int __drm_modeset_lock_all(struct drm_device *dev, bool trylock);
+void drm_modeset_unlock_all(struct drm_device *dev);
+void drm_modeset_lock_crtc(struct drm_crtc *crtc,
+ struct drm_plane *plane);
+void drm_modeset_unlock_crtc(struct drm_crtc *crtc);
+void drm_warn_on_modeset_not_all_locked(struct drm_device *dev);
+struct drm_modeset_acquire_ctx *
+drm_modeset_legacy_acquire_ctx(struct drm_crtc *crtc);
+
int drm_modeset_lock_all_crtcs(struct drm_device *dev,
struct drm_modeset_acquire_ctx *ctx);
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
index e973540cd15b..2dd405c9be78 100644
--- a/include/drm/drm_pciids.h
+++ b/include/drm/drm_pciids.h
@@ -74,7 +74,6 @@
{0x1002, 0x4C64, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250|RADEON_IS_MOBILITY}, \
{0x1002, 0x4C66, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250|RADEON_IS_MOBILITY}, \
{0x1002, 0x4C67, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250|RADEON_IS_MOBILITY}, \
- {0x1002, 0x4C6E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280|RADEON_IS_MOBILITY}, \
{0x1002, 0x4E44, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
{0x1002, 0x4E45, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
{0x1002, 0x4E46, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
diff --git a/include/drm/drm_plane_helper.h b/include/drm/drm_plane_helper.h
index 52e6870534b2..a185392cafeb 100644
--- a/include/drm/drm_plane_helper.h
+++ b/include/drm/drm_plane_helper.h
@@ -25,6 +25,7 @@
#define DRM_PLANE_HELPER_H
#include <drm/drm_rect.h>
+#include <drm/drm_crtc.h>
/*
* Drivers that don't allow primary plane scaling may pass this macro in place
@@ -42,6 +43,37 @@
* planes.
*/
+extern int drm_crtc_init(struct drm_device *dev,
+ struct drm_crtc *crtc,
+ const struct drm_crtc_funcs *funcs);
+
+/**
+ * drm_plane_helper_funcs - helper operations for CRTCs
+ * @prepare_fb: prepare a framebuffer for use by the plane
+ * @cleanup_fb: cleanup a framebuffer when it's no longer used by the plane
+ * @atomic_check: check that a given atomic state is valid and can be applied
+ * @atomic_update: apply an atomic state to the plane
+ *
+ * The helper operations are called by the mid-layer CRTC helper.
+ */
+struct drm_plane_helper_funcs {
+ int (*prepare_fb)(struct drm_plane *plane,
+ struct drm_framebuffer *fb);
+ void (*cleanup_fb)(struct drm_plane *plane,
+ struct drm_framebuffer *fb);
+
+ int (*atomic_check)(struct drm_plane *plane,
+ struct drm_plane_state *state);
+ void (*atomic_update)(struct drm_plane *plane,
+ struct drm_plane_state *old_state);
+};
+
+static inline void drm_plane_helper_add(struct drm_plane *plane,
+ const struct drm_plane_helper_funcs *funcs)
+{
+ plane->helper_private = (void *)funcs;
+}
+
extern int drm_plane_helper_check_update(struct drm_plane *plane,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
@@ -68,4 +100,16 @@ extern struct drm_plane *drm_primary_helper_create_plane(struct drm_device *dev,
int num_formats);
+int drm_plane_helper_update(struct drm_plane *plane, struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h);
+int drm_plane_helper_disable(struct drm_plane *plane);
+
+/* For use by drm_crtc_helper.c */
+int drm_plane_helper_commit(struct drm_plane *plane,
+ struct drm_plane_state *plane_state,
+ struct drm_framebuffer *old_fb);
#endif
diff --git a/include/drm/drm_usb.h b/include/drm/drm_usb.h
deleted file mode 100644
index 33506c11da8b..000000000000
--- a/include/drm/drm_usb.h
+++ /dev/null
@@ -1,15 +0,0 @@
-#ifndef DRM_USB_H
-#define DRM_USB_H
-
-#include <drmP.h>
-
-#include <linux/usb.h>
-
-extern int drm_usb_init(struct drm_driver *driver, struct usb_driver *udriver);
-extern void drm_usb_exit(struct drm_driver *driver, struct usb_driver *udriver);
-
-int drm_get_usb_dev(struct usb_interface *interface,
- const struct usb_device_id *id,
- struct drm_driver *driver);
-
-#endif
diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
index a70d45647898..180ad0e6de21 100644
--- a/include/drm/i915_pciids.h
+++ b/include/drm/i915_pciids.h
@@ -259,4 +259,21 @@
INTEL_VGA_DEVICE(0x22b2, info), \
INTEL_VGA_DEVICE(0x22b3, info)
+#define INTEL_SKL_IDS(info) \
+ INTEL_VGA_DEVICE(0x1916, info), /* ULT GT2 */ \
+ INTEL_VGA_DEVICE(0x1906, info), /* ULT GT1 */ \
+ INTEL_VGA_DEVICE(0x1926, info), /* ULT GT3 */ \
+ INTEL_VGA_DEVICE(0x1921, info), /* ULT GT2F */ \
+ INTEL_VGA_DEVICE(0x190E, info), /* ULX GT1 */ \
+ INTEL_VGA_DEVICE(0x191E, info), /* ULX GT2 */ \
+ INTEL_VGA_DEVICE(0x1912, info), /* DT GT2 */ \
+ INTEL_VGA_DEVICE(0x1902, info), /* DT GT1 */ \
+ INTEL_VGA_DEVICE(0x191B, info), /* Halo GT2 */ \
+ INTEL_VGA_DEVICE(0x192B, info), /* Halo GT3 */ \
+ INTEL_VGA_DEVICE(0x190B, info), /* Halo GT1 */ \
+ INTEL_VGA_DEVICE(0x191A, info), /* SRV GT2 */ \
+ INTEL_VGA_DEVICE(0x192A, info), /* SRV GT3 */ \
+ INTEL_VGA_DEVICE(0x190A, info), /* SRV GT1 */ \
+ INTEL_VGA_DEVICE(0x191D, info) /* WKS GT2 */
+
#endif /* _I915_PCIIDS_H */
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index 7526c5bf5610..0ccf7f267ff9 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -45,12 +45,24 @@ struct ttm_bo_device;
struct drm_mm_node;
+/**
+ * struct ttm_place
+ *
+ * @fpfn: first valid page frame number to put the object
+ * @lpfn: last valid page frame number to put the object
+ * @flags: memory domain and caching flags for the object
+ *
+ * Structure indicating a possible place to put an object.
+ */
+struct ttm_place {
+ unsigned fpfn;
+ unsigned lpfn;
+ uint32_t flags;
+};
/**
* struct ttm_placement
*
- * @fpfn: first valid page frame number to put the object
- * @lpfn: last valid page frame number to put the object
* @num_placement: number of preferred placements
* @placement: preferred placements
* @num_busy_placement: number of preferred placements when need to evict buffer
@@ -59,12 +71,10 @@ struct drm_mm_node;
* Structure indicating the placement you request for an object.
*/
struct ttm_placement {
- unsigned fpfn;
- unsigned lpfn;
- unsigned num_placement;
- const uint32_t *placement;
- unsigned num_busy_placement;
- const uint32_t *busy_placement;
+ unsigned num_placement;
+ const struct ttm_place *placement;
+ unsigned num_busy_placement;
+ const struct ttm_place *busy_placement;
};
/**
@@ -163,7 +173,6 @@ struct ttm_tt;
* @lru: List head for the lru list.
* @ddestroy: List head for the delayed destroy list.
* @swap: List head for swap LRU list.
- * @sync_obj: Pointer to a synchronization object.
* @priv_flags: Flags describing buffer object internal state.
* @vma_node: Address space manager node.
* @offset: The current GPU offset, which can have different meanings
@@ -227,13 +236,9 @@ struct ttm_buffer_object {
struct list_head io_reserve_lru;
/**
- * Members protected by struct buffer_object_device::fence_lock
- * In addition, setting sync_obj to anything else
- * than NULL requires bo::reserved to be held. This allows for
- * checking NULL while reserved but not holding the mentioned lock.
+ * Members protected by a bo reservation.
*/
- void *sync_obj;
unsigned long priv_flags;
struct drm_vma_offset_node vma_node;
@@ -455,6 +460,7 @@ size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
* point to the shmem object backing a GEM object if TTM is used to back a
* GEM user interface.
* @acc_size: Accounted size for this object.
+ * @resv: Pointer to a reservation_object, or NULL to let ttm allocate one.
* @destroy: Destroy function. Use NULL for kfree().
*
* This function initializes a pre-allocated struct ttm_buffer_object.
@@ -482,6 +488,7 @@ extern int ttm_bo_init(struct ttm_bo_device *bdev,
struct file *persistent_swap_storage,
size_t acc_size,
struct sg_table *sg,
+ struct reservation_object *resv,
void (*destroy) (struct ttm_buffer_object *));
/**
@@ -519,20 +526,6 @@ extern int ttm_bo_create(struct ttm_bo_device *bdev,
struct ttm_buffer_object **p_bo);
/**
- * ttm_bo_check_placement
- *
- * @bo: the buffer object.
- * @placement: placements
- *
- * Performs minimal validity checking on an intended change of
- * placement flags.
- * Returns
- * -EINVAL: Intended change is invalid or not allowed.
- */
-extern int ttm_bo_check_placement(struct ttm_buffer_object *bo,
- struct ttm_placement *placement);
-
-/**
* ttm_bo_init_mm
*
* @bdev: Pointer to a ttm_bo_device struct.
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index 1d9f0f1ff52d..142d752fc450 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -208,8 +208,7 @@ struct ttm_mem_type_manager_func {
*/
int (*get_node)(struct ttm_mem_type_manager *man,
struct ttm_buffer_object *bo,
- struct ttm_placement *placement,
- uint32_t flags,
+ const struct ttm_place *place,
struct ttm_mem_reg *mem);
/**
@@ -313,11 +312,6 @@ struct ttm_mem_type_manager {
* @move: Callback for a driver to hook in accelerated functions to
* move a buffer.
* If set to NULL, a potentially slow memcpy() move is used.
- * @sync_obj_signaled: See ttm_fence_api.h
- * @sync_obj_wait: See ttm_fence_api.h
- * @sync_obj_flush: See ttm_fence_api.h
- * @sync_obj_unref: See ttm_fence_api.h
- * @sync_obj_ref: See ttm_fence_api.h
*/
struct ttm_bo_driver {
@@ -419,23 +413,6 @@ struct ttm_bo_driver {
int (*verify_access) (struct ttm_buffer_object *bo,
struct file *filp);
- /**
- * In case a driver writer dislikes the TTM fence objects,
- * the driver writer can replace those with sync objects of
- * his / her own. If it turns out that no driver writer is
- * using these. I suggest we remove these hooks and plug in
- * fences directly. The bo driver needs the following functionality:
- * See the corresponding functions in the fence object API
- * documentation.
- */
-
- bool (*sync_obj_signaled) (void *sync_obj);
- int (*sync_obj_wait) (void *sync_obj,
- bool lazy, bool interruptible);
- int (*sync_obj_flush) (void *sync_obj);
- void (*sync_obj_unref) (void **sync_obj);
- void *(*sync_obj_ref) (void *sync_obj);
-
/* hook to notify driver about a driver move so it
* can do tiling things */
void (*move_notify)(struct ttm_buffer_object *bo,
@@ -522,8 +499,6 @@ struct ttm_bo_global {
*
* @driver: Pointer to a struct ttm_bo_driver struct setup by the driver.
* @man: An array of mem_type_managers.
- * @fence_lock: Protects the synchronizing members on *all* bos belonging
- * to this device.
* @vma_manager: Address space manager
* lru_lock: Spinlock that protects the buffer+device lru lists and
* ddestroy lists.
@@ -543,7 +518,6 @@ struct ttm_bo_device {
struct ttm_bo_global *glob;
struct ttm_bo_driver *driver;
struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES];
- spinlock_t fence_lock;
/*
* Protected by internal locks.
@@ -1026,7 +1000,7 @@ extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo);
* ttm_bo_move_accel_cleanup.
*
* @bo: A pointer to a struct ttm_buffer_object.
- * @sync_obj: A sync object that signals when moving is complete.
+ * @fence: A fence object that signals when moving is complete.
* @evict: This is an evict move. Don't return until the buffer is idle.
* @no_wait_gpu: Return immediately if the GPU is busy.
* @new_mem: struct ttm_mem_reg indicating where to move.
@@ -1040,7 +1014,7 @@ extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo);
*/
extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
- void *sync_obj,
+ struct fence *fence,
bool evict, bool no_wait_gpu,
struct ttm_mem_reg *new_mem);
/**
diff --git a/include/drm/ttm/ttm_execbuf_util.h b/include/drm/ttm/ttm_execbuf_util.h
index 16db7d01a336..b620c317c772 100644
--- a/include/drm/ttm/ttm_execbuf_util.h
+++ b/include/drm/ttm/ttm_execbuf_util.h
@@ -39,19 +39,13 @@
*
* @head: list head for thread-private list.
* @bo: refcounted buffer object pointer.
- * @reserved: Indicates whether @bo has been reserved for validation.
- * @removed: Indicates whether @bo has been removed from lru lists.
- * @put_count: Number of outstanding references on bo::list_kref.
- * @old_sync_obj: Pointer to a sync object about to be unreferenced
+ * @shared: should the fence be added shared?
*/
struct ttm_validate_buffer {
struct list_head head;
struct ttm_buffer_object *bo;
- bool reserved;
- bool removed;
- int put_count;
- void *old_sync_obj;
+ bool shared;
};
/**
@@ -73,6 +67,8 @@ extern void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
* @ticket: [out] ww_acquire_ctx filled in by call, or NULL if only
* non-blocking reserves should be tried.
* @list: thread private list of ttm_validate_buffer structs.
+ * @intr: should the wait be interruptible
+ * @dups: [out] optional list of duplicates.
*
* Tries to reserve bos pointed to by the list entries for validation.
* If the function returns 0, all buffers are marked as "unfenced",
@@ -84,9 +80,14 @@ extern void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
* CPU write reservations to be cleared, and for other threads to
* unreserve their buffers.
*
- * This function may return -ERESTART or -EAGAIN if the calling process
- * receives a signal while waiting. In that case, no buffers on the list
- * will be reserved upon return.
+ * If intr is set to true, this function may return -ERESTARTSYS if the
+ * calling process receives a signal while waiting. In that case, no
+ * buffers on the list will be reserved upon return.
+ *
+ * If dups is non NULL all buffers already reserved by the current thread
+ * (e.g. duplicates) are added to this list, otherwise -EALREADY is returned
+ * on the first already reserved buffer and all buffers from the list are
+ * unreserved again.
*
* Buffers reserved by this function should be unreserved by
* a call to either ttm_eu_backoff_reservation() or
@@ -95,14 +96,15 @@ extern void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
*/
extern int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
- struct list_head *list);
+ struct list_head *list, bool intr,
+ struct list_head *dups);
/**
* function ttm_eu_fence_buffer_objects.
*
* @ticket: ww_acquire_ctx from reserve call
* @list: thread private list of ttm_validate_buffer structs.
- * @sync_obj: The new sync object for the buffers.
+ * @fence: The new exclusive fence for the buffers.
*
* This function should be called when command submission is complete, and
* it will add a new sync object to bos pointed to by entries on @list.
@@ -111,6 +113,7 @@ extern int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
*/
extern void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
- struct list_head *list, void *sync_obj);
+ struct list_head *list,
+ struct fence *fence);
#endif
diff --git a/include/dt-bindings/arm/ux500_pm_domains.h b/include/dt-bindings/arm/ux500_pm_domains.h
new file mode 100644
index 000000000000..398a6c0288d1
--- /dev/null
+++ b/include/dt-bindings/arm/ux500_pm_domains.h
@@ -0,0 +1,15 @@
+/*
+ * Copyright (C) 2014 Linaro Ltd.
+ *
+ * Author: Ulf Hansson <ulf.hansson@linaro.org>
+ * License terms: GNU General Public License (GPL) version 2
+ */
+#ifndef _DT_BINDINGS_ARM_UX500_PM_DOMAINS_H
+#define _DT_BINDINGS_ARM_UX500_PM_DOMAINS_H
+
+#define DOMAIN_VAPE 0
+
+/* Number of PM domains. */
+#define NR_DOMAINS (DOMAIN_VAPE + 1)
+
+#endif
diff --git a/include/dt-bindings/clock/exynos3250.h b/include/dt-bindings/clock/exynos3250.h
index b535e9da7de6..961b9c130ea9 100644
--- a/include/dt-bindings/clock/exynos3250.h
+++ b/include/dt-bindings/clock/exynos3250.h
@@ -255,4 +255,31 @@
*/
#define CLK_NR_CLKS 248
+/*
+ * CMU DMC
+ */
+
+#define CLK_FOUT_BPLL 1
+#define CLK_FOUT_EPLL 2
+
+/* Muxes */
+#define CLK_MOUT_MPLL_MIF 8
+#define CLK_MOUT_BPLL 9
+#define CLK_MOUT_DPHY 10
+#define CLK_MOUT_DMC_BUS 11
+#define CLK_MOUT_EPLL 12
+
+/* Dividers */
+#define CLK_DIV_DMC 16
+#define CLK_DIV_DPHY 17
+#define CLK_DIV_DMC_PRE 18
+#define CLK_DIV_DMCP 19
+#define CLK_DIV_DMCD 20
+
+/*
+ * Total number of clocks of main CMU.
+ * NOTE: Must be equal to last clock ID increased by one.
+ */
+#define NR_CLKS_DMC 21
+
#endif /* _DT_BINDINGS_CLOCK_SAMSUNG_EXYNOS3250_CLOCK_H */
diff --git a/include/dt-bindings/clock/exynos4.h b/include/dt-bindings/clock/exynos4.h
index 459bd2bd411f..34fe28c622d0 100644
--- a/include/dt-bindings/clock/exynos4.h
+++ b/include/dt-bindings/clock/exynos4.h
@@ -115,11 +115,11 @@
#define CLK_SMMU_MFCR 275
#define CLK_G3D 276
#define CLK_G2D 277
-#define CLK_ROTATOR 278 /* Exynos4210 only */
-#define CLK_MDMA 279 /* Exynos4210 only */
-#define CLK_SMMU_G2D 280 /* Exynos4210 only */
-#define CLK_SMMU_ROTATOR 281 /* Exynos4210 only */
-#define CLK_SMMU_MDMA 282 /* Exynos4210 only */
+#define CLK_ROTATOR 278
+#define CLK_MDMA 279
+#define CLK_SMMU_G2D 280
+#define CLK_SMMU_ROTATOR 281
+#define CLK_SMMU_MDMA 282
#define CLK_FIMD0 283
#define CLK_MIE0 284
#define CLK_MDNIE0 285 /* Exynos4412 only */
@@ -234,6 +234,8 @@
#define CLK_MOUT_G3D1 393
#define CLK_MOUT_G3D 394
#define CLK_ACLK400_MCUISP 395 /* Exynos4x12 only */
+#define CLK_MOUT_HDMI 396
+#define CLK_MOUT_MIXER 397
/* gate clocks - ppmu */
#define CLK_PPMULEFT 400
diff --git a/include/dt-bindings/clock/exynos4415.h b/include/dt-bindings/clock/exynos4415.h
new file mode 100644
index 000000000000..7eed55100721
--- /dev/null
+++ b/include/dt-bindings/clock/exynos4415.h
@@ -0,0 +1,360 @@
+/*
+ * Copyright (c) 2014 Samsung Electronics Co., Ltd.
+ * Author: Chanwoo Choi <cw00.choi@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Device Tree binding constants for Samsung Exynos4415 clock controllers.
+ */
+
+#ifndef _DT_BINDINGS_CLOCK_SAMSUNG_EXYNOS4415_CLOCK_H
+#define _DT_BINDINGS_CLOCK_SAMSUNG_EXYNOS4415_CLOCK_H
+
+/*
+ * Let each exported clock get a unique index, which is used on DT-enabled
+ * platforms to lookup the clock from a clock specifier. These indices are
+ * therefore considered an ABI and so must not be changed. This implies
+ * that new clocks should be added either in free spaces between clock groups
+ * or at the end.
+ */
+
+/*
+ * Main CMU
+ */
+
+#define CLK_OSCSEL 1
+#define CLK_FIN_PLL 2
+#define CLK_FOUT_APLL 3
+#define CLK_FOUT_MPLL 4
+#define CLK_FOUT_EPLL 5
+#define CLK_FOUT_G3D_PLL 6
+#define CLK_FOUT_ISP_PLL 7
+#define CLK_FOUT_DISP_PLL 8
+
+/* Muxes */
+#define CLK_MOUT_MPLL_USER_L 16
+#define CLK_MOUT_GDL 17
+#define CLK_MOUT_MPLL_USER_R 18
+#define CLK_MOUT_GDR 19
+#define CLK_MOUT_EBI 20
+#define CLK_MOUT_ACLK_200 21
+#define CLK_MOUT_ACLK_160 22
+#define CLK_MOUT_ACLK_100 23
+#define CLK_MOUT_ACLK_266 24
+#define CLK_MOUT_G3D_PLL 25
+#define CLK_MOUT_EPLL 26
+#define CLK_MOUT_EBI_1 27
+#define CLK_MOUT_ISP_PLL 28
+#define CLK_MOUT_DISP_PLL 29
+#define CLK_MOUT_MPLL_USER_T 30
+#define CLK_MOUT_ACLK_400_MCUISP 31
+#define CLK_MOUT_G3D_PLLSRC 32
+#define CLK_MOUT_CSIS1 33
+#define CLK_MOUT_CSIS0 34
+#define CLK_MOUT_CAM1 35
+#define CLK_MOUT_FIMC3_LCLK 36
+#define CLK_MOUT_FIMC2_LCLK 37
+#define CLK_MOUT_FIMC1_LCLK 38
+#define CLK_MOUT_FIMC0_LCLK 39
+#define CLK_MOUT_MFC 40
+#define CLK_MOUT_MFC_1 41
+#define CLK_MOUT_MFC_0 42
+#define CLK_MOUT_G3D 43
+#define CLK_MOUT_G3D_1 44
+#define CLK_MOUT_G3D_0 45
+#define CLK_MOUT_MIPI0 46
+#define CLK_MOUT_FIMD0 47
+#define CLK_MOUT_TSADC_ISP 48
+#define CLK_MOUT_UART_ISP 49
+#define CLK_MOUT_SPI1_ISP 50
+#define CLK_MOUT_SPI0_ISP 51
+#define CLK_MOUT_PWM_ISP 52
+#define CLK_MOUT_AUDIO0 53
+#define CLK_MOUT_TSADC 54
+#define CLK_MOUT_MMC2 55
+#define CLK_MOUT_MMC1 56
+#define CLK_MOUT_MMC0 57
+#define CLK_MOUT_UART3 58
+#define CLK_MOUT_UART2 59
+#define CLK_MOUT_UART1 60
+#define CLK_MOUT_UART0 61
+#define CLK_MOUT_SPI2 62
+#define CLK_MOUT_SPI1 63
+#define CLK_MOUT_SPI0 64
+#define CLK_MOUT_SPDIF 65
+#define CLK_MOUT_AUDIO2 66
+#define CLK_MOUT_AUDIO1 67
+#define CLK_MOUT_MPLL_USER_C 68
+#define CLK_MOUT_HPM 69
+#define CLK_MOUT_CORE 70
+#define CLK_MOUT_APLL 71
+#define CLK_MOUT_PXLASYNC_CSIS1_FIMC 72
+#define CLK_MOUT_PXLASYNC_CSIS0_FIMC 73
+#define CLK_MOUT_JPEG 74
+#define CLK_MOUT_JPEG1 75
+#define CLK_MOUT_JPEG0 76
+#define CLK_MOUT_ACLK_ISP0_300 77
+#define CLK_MOUT_ACLK_ISP0_400 78
+#define CLK_MOUT_ACLK_ISP0_300_USER 79
+#define CLK_MOUT_ACLK_ISP1_300 80
+#define CLK_MOUT_ACLK_ISP1_300_USER 81
+#define CLK_MOUT_HDMI 82
+
+/* Dividers */
+#define CLK_DIV_GPL 90
+#define CLK_DIV_GDL 91
+#define CLK_DIV_GPR 92
+#define CLK_DIV_GDR 93
+#define CLK_DIV_ACLK_400_MCUISP 94
+#define CLK_DIV_EBI 95
+#define CLK_DIV_ACLK_200 96
+#define CLK_DIV_ACLK_160 97
+#define CLK_DIV_ACLK_100 98
+#define CLK_DIV_ACLK_266 99
+#define CLK_DIV_CSIS1 100
+#define CLK_DIV_CSIS0 101
+#define CLK_DIV_CAM1 102
+#define CLK_DIV_FIMC3_LCLK 103
+#define CLK_DIV_FIMC2_LCLK 104
+#define CLK_DIV_FIMC1_LCLK 105
+#define CLK_DIV_FIMC0_LCLK 106
+#define CLK_DIV_TV_BLK 107
+#define CLK_DIV_MFC 108
+#define CLK_DIV_G3D 109
+#define CLK_DIV_MIPI0_PRE 110
+#define CLK_DIV_MIPI0 111
+#define CLK_DIV_FIMD0 112
+#define CLK_DIV_UART_ISP 113
+#define CLK_DIV_SPI1_ISP_PRE 114
+#define CLK_DIV_SPI1_ISP 115
+#define CLK_DIV_SPI0_ISP_PRE 116
+#define CLK_DIV_SPI0_ISP 117
+#define CLK_DIV_PWM_ISP 118
+#define CLK_DIV_PCM0 119
+#define CLK_DIV_AUDIO0 120
+#define CLK_DIV_TSADC_PRE 121
+#define CLK_DIV_TSADC 122
+#define CLK_DIV_MMC1_PRE 123
+#define CLK_DIV_MMC1 124
+#define CLK_DIV_MMC0_PRE 125
+#define CLK_DIV_MMC0 126
+#define CLK_DIV_MMC2_PRE 127
+#define CLK_DIV_MMC2 128
+#define CLK_DIV_UART3 129
+#define CLK_DIV_UART2 130
+#define CLK_DIV_UART1 131
+#define CLK_DIV_UART0 132
+#define CLK_DIV_SPI1_PRE 133
+#define CLK_DIV_SPI1 134
+#define CLK_DIV_SPI0_PRE 135
+#define CLK_DIV_SPI0 136
+#define CLK_DIV_SPI2_PRE 137
+#define CLK_DIV_SPI2 138
+#define CLK_DIV_PCM2 139
+#define CLK_DIV_AUDIO2 140
+#define CLK_DIV_PCM1 141
+#define CLK_DIV_AUDIO1 142
+#define CLK_DIV_I2S1 143
+#define CLK_DIV_PXLASYNC_CSIS1_FIMC 144
+#define CLK_DIV_PXLASYNC_CSIS0_FIMC 145
+#define CLK_DIV_JPEG 146
+#define CLK_DIV_CORE2 147
+#define CLK_DIV_APLL 148
+#define CLK_DIV_PCLK_DBG 149
+#define CLK_DIV_ATB 150
+#define CLK_DIV_PERIPH 151
+#define CLK_DIV_COREM1 152
+#define CLK_DIV_COREM0 153
+#define CLK_DIV_CORE 154
+#define CLK_DIV_HPM 155
+#define CLK_DIV_COPY 156
+
+/* Gates */
+#define CLK_ASYNC_G3D 180
+#define CLK_ASYNC_MFCL 181
+#define CLK_ASYNC_TVX 182
+#define CLK_PPMULEFT 183
+#define CLK_GPIO_LEFT 184
+#define CLK_PPMUIMAGE 185
+#define CLK_QEMDMA2 186
+#define CLK_QEROTATOR 187
+#define CLK_SMMUMDMA2 188
+#define CLK_SMMUROTATOR 189
+#define CLK_MDMA2 190
+#define CLK_ROTATOR 191
+#define CLK_ASYNC_ISPMX 192
+#define CLK_ASYNC_MAUDIOX 193
+#define CLK_ASYNC_MFCR 194
+#define CLK_ASYNC_FSYSD 195
+#define CLK_ASYNC_LCD0X 196
+#define CLK_ASYNC_CAMX 197
+#define CLK_PPMURIGHT 198
+#define CLK_GPIO_RIGHT 199
+#define CLK_ANTIRBK_APBIF 200
+#define CLK_EFUSE_WRITER_APBIF 201
+#define CLK_MONOCNT 202
+#define CLK_TZPC6 203
+#define CLK_PROVISIONKEY1 204
+#define CLK_PROVISIONKEY0 205
+#define CLK_CMU_ISPPART 206
+#define CLK_TMU_APBIF 207
+#define CLK_KEYIF 208
+#define CLK_RTC 209
+#define CLK_WDT 210
+#define CLK_MCT 211
+#define CLK_SECKEY 212
+#define CLK_HDMI_CEC 213
+#define CLK_TZPC5 214
+#define CLK_TZPC4 215
+#define CLK_TZPC3 216
+#define CLK_TZPC2 217
+#define CLK_TZPC1 218
+#define CLK_TZPC0 219
+#define CLK_CMU_COREPART 220
+#define CLK_CMU_TOPPART 221
+#define CLK_PMU_APBIF 222
+#define CLK_SYSREG 223
+#define CLK_CHIP_ID 224
+#define CLK_SMMUFIMC_LITE2 225
+#define CLK_FIMC_LITE2 226
+#define CLK_PIXELASYNCM1 227
+#define CLK_PIXELASYNCM0 228
+#define CLK_PPMUCAMIF 229
+#define CLK_SMMUJPEG 230
+#define CLK_SMMUFIMC3 231
+#define CLK_SMMUFIMC2 232
+#define CLK_SMMUFIMC1 233
+#define CLK_SMMUFIMC0 234
+#define CLK_JPEG 235
+#define CLK_CSIS1 236
+#define CLK_CSIS0 237
+#define CLK_FIMC3 238
+#define CLK_FIMC2 239
+#define CLK_FIMC1 240
+#define CLK_FIMC0 241
+#define CLK_PPMUTV 242
+#define CLK_SMMUTV 243
+#define CLK_HDMI 244
+#define CLK_MIXER 245
+#define CLK_VP 246
+#define CLK_PPMUMFC_R 247
+#define CLK_PPMUMFC_L 248
+#define CLK_SMMUMFC_R 249
+#define CLK_SMMUMFC_L 250
+#define CLK_MFC 251
+#define CLK_PPMUG3D 252
+#define CLK_G3D 253
+#define CLK_PPMULCD0 254
+#define CLK_SMMUFIMD0 255
+#define CLK_DSIM0 256
+#define CLK_SMIES 257
+#define CLK_MIE0 258
+#define CLK_FIMD0 259
+#define CLK_TSADC 260
+#define CLK_PPMUFILE 261
+#define CLK_NFCON 262
+#define CLK_USBDEVICE 263
+#define CLK_USBHOST 264
+#define CLK_SROMC 265
+#define CLK_SDMMC2 266
+#define CLK_SDMMC1 267
+#define CLK_SDMMC0 268
+#define CLK_PDMA1 269
+#define CLK_PDMA0 270
+#define CLK_SPDIF 271
+#define CLK_PWM 272
+#define CLK_PCM2 273
+#define CLK_PCM1 274
+#define CLK_I2S1 275
+#define CLK_SPI2 276
+#define CLK_SPI1 277
+#define CLK_SPI0 278
+#define CLK_I2CHDMI 279
+#define CLK_I2C7 280
+#define CLK_I2C6 281
+#define CLK_I2C5 282
+#define CLK_I2C4 283
+#define CLK_I2C3 284
+#define CLK_I2C2 285
+#define CLK_I2C1 286
+#define CLK_I2C0 287
+#define CLK_UART3 288
+#define CLK_UART2 289
+#define CLK_UART1 290
+#define CLK_UART0 291
+
+/* Special clocks */
+#define CLK_SCLK_PXLAYSNC_CSIS1_FIMC 330
+#define CLK_SCLK_PXLAYSNC_CSIS0_FIMC 331
+#define CLK_SCLK_JPEG 332
+#define CLK_SCLK_CSIS1 333
+#define CLK_SCLK_CSIS0 334
+#define CLK_SCLK_CAM1 335
+#define CLK_SCLK_FIMC3_LCLK 336
+#define CLK_SCLK_FIMC2_LCLK 337
+#define CLK_SCLK_FIMC1_LCLK 338
+#define CLK_SCLK_FIMC0_LCLK 339
+#define CLK_SCLK_PIXEL 340
+#define CLK_SCLK_HDMI 341
+#define CLK_SCLK_MIXER 342
+#define CLK_SCLK_MFC 343
+#define CLK_SCLK_G3D 344
+#define CLK_SCLK_MIPIDPHY4L 345
+#define CLK_SCLK_MIPI0 346
+#define CLK_SCLK_MDNIE0 347
+#define CLK_SCLK_FIMD0 348
+#define CLK_SCLK_PCM0 349
+#define CLK_SCLK_AUDIO0 350
+#define CLK_SCLK_TSADC 351
+#define CLK_SCLK_EBI 352
+#define CLK_SCLK_MMC2 353
+#define CLK_SCLK_MMC1 354
+#define CLK_SCLK_MMC0 355
+#define CLK_SCLK_I2S 356
+#define CLK_SCLK_PCM2 357
+#define CLK_SCLK_PCM1 358
+#define CLK_SCLK_AUDIO2 359
+#define CLK_SCLK_AUDIO1 360
+#define CLK_SCLK_SPDIF 361
+#define CLK_SCLK_SPI2 362
+#define CLK_SCLK_SPI1 363
+#define CLK_SCLK_SPI0 364
+#define CLK_SCLK_UART3 365
+#define CLK_SCLK_UART2 366
+#define CLK_SCLK_UART1 367
+#define CLK_SCLK_UART0 368
+#define CLK_SCLK_HDMIPHY 369
+
+/*
+ * Total number of clocks of main CMU.
+ * NOTE: Must be equal to last clock ID increased by one.
+ */
+#define CLK_NR_CLKS 370
+
+/*
+ * CMU DMC
+ */
+#define CLK_DMC_FOUT_MPLL 1
+#define CLK_DMC_FOUT_BPLL 2
+
+#define CLK_DMC_MOUT_MPLL 3
+#define CLK_DMC_MOUT_BPLL 4
+#define CLK_DMC_MOUT_DPHY 5
+#define CLK_DMC_MOUT_DMC_BUS 6
+
+#define CLK_DMC_DIV_DMC 7
+#define CLK_DMC_DIV_DPHY 8
+#define CLK_DMC_DIV_DMC_PRE 9
+#define CLK_DMC_DIV_DMCP 10
+#define CLK_DMC_DIV_DMCD 11
+#define CLK_DMC_DIV_MPLL_PRE 12
+
+/*
+ * Total number of clocks of CMU_DMC.
+ * NOTE: Must be equal to highest clock ID increased by one.
+ */
+#define NR_CLKS_DMC 13
+
+#endif /* _DT_BINDINGS_CLOCK_SAMSUNG_EXYNOS4415_CLOCK_H */
diff --git a/include/dt-bindings/clock/exynos7-clk.h b/include/dt-bindings/clock/exynos7-clk.h
new file mode 100644
index 000000000000..8e4681b07ae7
--- /dev/null
+++ b/include/dt-bindings/clock/exynos7-clk.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2014 Samsung Electronics Co., Ltd.
+ * Author: Naveen Krishna Ch <naveenkrishna.ch@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef _DT_BINDINGS_CLOCK_EXYNOS7_H
+#define _DT_BINDINGS_CLOCK_EXYNOS7_H
+
+/* TOPC */
+#define DOUT_ACLK_PERIS 1
+#define DOUT_SCLK_BUS0_PLL 2
+#define DOUT_SCLK_BUS1_PLL 3
+#define DOUT_SCLK_CC_PLL 4
+#define DOUT_SCLK_MFC_PLL 5
+#define DOUT_ACLK_CCORE_133 6
+#define TOPC_NR_CLK 7
+
+/* TOP0 */
+#define DOUT_ACLK_PERIC1 1
+#define DOUT_ACLK_PERIC0 2
+#define CLK_SCLK_UART0 3
+#define CLK_SCLK_UART1 4
+#define CLK_SCLK_UART2 5
+#define CLK_SCLK_UART3 6
+#define TOP0_NR_CLK 7
+
+/* TOP1 */
+#define DOUT_ACLK_FSYS1_200 1
+#define DOUT_ACLK_FSYS0_200 2
+#define DOUT_SCLK_MMC2 3
+#define DOUT_SCLK_MMC1 4
+#define DOUT_SCLK_MMC0 5
+#define CLK_SCLK_MMC2 6
+#define CLK_SCLK_MMC1 7
+#define CLK_SCLK_MMC0 8
+#define TOP1_NR_CLK 9
+
+/* CCORE */
+#define PCLK_RTC 1
+#define CCORE_NR_CLK 2
+
+/* PERIC0 */
+#define PCLK_UART0 1
+#define SCLK_UART0 2
+#define PCLK_HSI2C0 3
+#define PCLK_HSI2C1 4
+#define PCLK_HSI2C4 5
+#define PCLK_HSI2C5 6
+#define PCLK_HSI2C9 7
+#define PCLK_HSI2C10 8
+#define PCLK_HSI2C11 9
+#define PCLK_PWM 10
+#define SCLK_PWM 11
+#define PCLK_ADCIF 12
+#define PERIC0_NR_CLK 13
+
+/* PERIC1 */
+#define PCLK_UART1 1
+#define PCLK_UART2 2
+#define PCLK_UART3 3
+#define SCLK_UART1 4
+#define SCLK_UART2 5
+#define SCLK_UART3 6
+#define PCLK_HSI2C2 7
+#define PCLK_HSI2C3 8
+#define PCLK_HSI2C6 9
+#define PCLK_HSI2C7 10
+#define PCLK_HSI2C8 11
+#define PERIC1_NR_CLK 12
+
+/* PERIS */
+#define PCLK_CHIPID 1
+#define SCLK_CHIPID 2
+#define PCLK_WDT 3
+#define PCLK_TMU 4
+#define SCLK_TMU 5
+#define PERIS_NR_CLK 6
+
+/* FSYS0 */
+#define ACLK_MMC2 1
+#define FSYS0_NR_CLK 2
+
+/* FSYS1 */
+#define ACLK_MMC1 1
+#define ACLK_MMC0 2
+#define FSYS1_NR_CLK 3
+
+#endif /* _DT_BINDINGS_CLOCK_EXYNOS7_H */
diff --git a/include/dt-bindings/clock/hix5hd2-clock.h b/include/dt-bindings/clock/hix5hd2-clock.h
index aad579a75802..fd29c174ba63 100644
--- a/include/dt-bindings/clock/hix5hd2-clock.h
+++ b/include/dt-bindings/clock/hix5hd2-clock.h
@@ -46,6 +46,7 @@
#define HIX5HD2_SFC_MUX 64
#define HIX5HD2_MMC_MUX 65
#define HIX5HD2_FEPHY_MUX 66
+#define HIX5HD2_SD_MUX 67
/* gate clocks */
#define HIX5HD2_SFC_RST 128
@@ -53,6 +54,32 @@
#define HIX5HD2_MMC_CIU_CLK 130
#define HIX5HD2_MMC_BIU_CLK 131
#define HIX5HD2_MMC_CIU_RST 132
+#define HIX5HD2_FWD_BUS_CLK 133
+#define HIX5HD2_FWD_SYS_CLK 134
+#define HIX5HD2_MAC0_PHY_CLK 135
+#define HIX5HD2_SD_CIU_CLK 136
+#define HIX5HD2_SD_BIU_CLK 137
+#define HIX5HD2_SD_CIU_RST 138
+#define HIX5HD2_WDG0_CLK 139
+#define HIX5HD2_WDG0_RST 140
+#define HIX5HD2_I2C0_CLK 141
+#define HIX5HD2_I2C0_RST 142
+#define HIX5HD2_I2C1_CLK 143
+#define HIX5HD2_I2C1_RST 144
+#define HIX5HD2_I2C2_CLK 145
+#define HIX5HD2_I2C2_RST 146
+#define HIX5HD2_I2C3_CLK 147
+#define HIX5HD2_I2C3_RST 148
+#define HIX5HD2_I2C4_CLK 149
+#define HIX5HD2_I2C4_RST 150
+#define HIX5HD2_I2C5_CLK 151
+#define HIX5HD2_I2C5_RST 152
+
+/* complex */
+#define HIX5HD2_MAC0_CLK 192
+#define HIX5HD2_MAC1_CLK 193
+#define HIX5HD2_SATA_CLK 194
+#define HIX5HD2_USB_CLK 195
#define HIX5HD2_NR_CLKS 256
#endif /* __DTS_HIX5HD2_CLOCK_H */
diff --git a/include/dt-bindings/clock/imx5-clock.h b/include/dt-bindings/clock/imx5-clock.h
index 5f2667ecd98e..f4b7478e23c8 100644
--- a/include/dt-bindings/clock/imx5-clock.h
+++ b/include/dt-bindings/clock/imx5-clock.h
@@ -198,6 +198,9 @@
#define IMX5_CLK_OCRAM 186
#define IMX5_CLK_SAHARA_IPG_GATE 187
#define IMX5_CLK_SATA_REF 188
-#define IMX5_CLK_END 189
+#define IMX5_CLK_STEP_SEL 189
+#define IMX5_CLK_CPU_PODF_SEL 190
+#define IMX5_CLK_ARM 191
+#define IMX5_CLK_END 192
#endif /* __DT_BINDINGS_CLOCK_IMX5_H */
diff --git a/include/dt-bindings/clock/imx6qdl-clock.h b/include/dt-bindings/clock/imx6qdl-clock.h
index 654151e24288..b690cdba163b 100644
--- a/include/dt-bindings/clock/imx6qdl-clock.h
+++ b/include/dt-bindings/clock/imx6qdl-clock.h
@@ -62,8 +62,8 @@
#define IMX6QDL_CLK_USDHC3_SEL 50
#define IMX6QDL_CLK_USDHC4_SEL 51
#define IMX6QDL_CLK_ENFC_SEL 52
-#define IMX6QDL_CLK_EMI_SEL 53
-#define IMX6QDL_CLK_EMI_SLOW_SEL 54
+#define IMX6QDL_CLK_EIM_SEL 53
+#define IMX6QDL_CLK_EIM_SLOW_SEL 54
#define IMX6QDL_CLK_VDO_AXI_SEL 55
#define IMX6QDL_CLK_VPU_AXI_SEL 56
#define IMX6QDL_CLK_CKO1_SEL 57
@@ -106,8 +106,8 @@
#define IMX6QDL_CLK_USDHC4_PODF 94
#define IMX6QDL_CLK_ENFC_PRED 95
#define IMX6QDL_CLK_ENFC_PODF 96
-#define IMX6QDL_CLK_EMI_PODF 97
-#define IMX6QDL_CLK_EMI_SLOW_PODF 98
+#define IMX6QDL_CLK_EIM_PODF 97
+#define IMX6QDL_CLK_EIM_SLOW_PODF 98
#define IMX6QDL_CLK_VPU_AXI_PODF 99
#define IMX6QDL_CLK_CKO1_PODF 100
#define IMX6QDL_CLK_AXI 101
@@ -128,7 +128,7 @@
#define IMX6Q_CLK_ECSPI5 116
#define IMX6DL_CLK_I2C4 116
#define IMX6QDL_CLK_ENET 117
-#define IMX6QDL_CLK_ESAI 118
+#define IMX6QDL_CLK_ESAI_EXTAL 118
#define IMX6QDL_CLK_GPT_IPG 119
#define IMX6QDL_CLK_GPT_IPG_PER 120
#define IMX6QDL_CLK_GPU2D_CORE 121
@@ -218,7 +218,36 @@
#define IMX6QDL_CLK_LVDS2_SEL 205
#define IMX6QDL_CLK_LVDS1_GATE 206
#define IMX6QDL_CLK_LVDS2_GATE 207
-#define IMX6QDL_CLK_ESAI_AHB 208
-#define IMX6QDL_CLK_END 209
+#define IMX6QDL_CLK_ESAI_IPG 208
+#define IMX6QDL_CLK_ESAI_MEM 209
+#define IMX6QDL_CLK_ASRC_IPG 210
+#define IMX6QDL_CLK_ASRC_MEM 211
+#define IMX6QDL_CLK_LVDS1_IN 212
+#define IMX6QDL_CLK_LVDS2_IN 213
+#define IMX6QDL_CLK_ANACLK1 214
+#define IMX6QDL_CLK_ANACLK2 215
+#define IMX6QDL_PLL1_BYPASS_SRC 216
+#define IMX6QDL_PLL2_BYPASS_SRC 217
+#define IMX6QDL_PLL3_BYPASS_SRC 218
+#define IMX6QDL_PLL4_BYPASS_SRC 219
+#define IMX6QDL_PLL5_BYPASS_SRC 220
+#define IMX6QDL_PLL6_BYPASS_SRC 221
+#define IMX6QDL_PLL7_BYPASS_SRC 222
+#define IMX6QDL_CLK_PLL1 223
+#define IMX6QDL_CLK_PLL2 224
+#define IMX6QDL_CLK_PLL3 225
+#define IMX6QDL_CLK_PLL4 226
+#define IMX6QDL_CLK_PLL5 227
+#define IMX6QDL_CLK_PLL6 228
+#define IMX6QDL_CLK_PLL7 229
+#define IMX6QDL_PLL1_BYPASS 230
+#define IMX6QDL_PLL2_BYPASS 231
+#define IMX6QDL_PLL3_BYPASS 232
+#define IMX6QDL_PLL4_BYPASS 233
+#define IMX6QDL_PLL5_BYPASS 234
+#define IMX6QDL_PLL6_BYPASS 235
+#define IMX6QDL_PLL7_BYPASS 236
+#define IMX6QDL_CLK_GPT_3M 237
+#define IMX6QDL_CLK_END 238
#endif /* __DT_BINDINGS_CLOCK_IMX6QDL_H */
diff --git a/include/dt-bindings/clock/imx6sl-clock.h b/include/dt-bindings/clock/imx6sl-clock.h
index b91dd462ba85..9ce4e421096f 100644
--- a/include/dt-bindings/clock/imx6sl-clock.h
+++ b/include/dt-bindings/clock/imx6sl-clock.h
@@ -146,6 +146,34 @@
#define IMX6SL_CLK_PLL4_AUDIO_DIV 133
#define IMX6SL_CLK_SPBA 134
#define IMX6SL_CLK_ENET 135
-#define IMX6SL_CLK_END 136
+#define IMX6SL_CLK_LVDS1_SEL 136
+#define IMX6SL_CLK_LVDS1_OUT 137
+#define IMX6SL_CLK_LVDS1_IN 138
+#define IMX6SL_CLK_ANACLK1 139
+#define IMX6SL_PLL1_BYPASS_SRC 140
+#define IMX6SL_PLL2_BYPASS_SRC 141
+#define IMX6SL_PLL3_BYPASS_SRC 142
+#define IMX6SL_PLL4_BYPASS_SRC 143
+#define IMX6SL_PLL5_BYPASS_SRC 144
+#define IMX6SL_PLL6_BYPASS_SRC 145
+#define IMX6SL_PLL7_BYPASS_SRC 146
+#define IMX6SL_CLK_PLL1 147
+#define IMX6SL_CLK_PLL2 148
+#define IMX6SL_CLK_PLL3 149
+#define IMX6SL_CLK_PLL4 150
+#define IMX6SL_CLK_PLL5 151
+#define IMX6SL_CLK_PLL6 152
+#define IMX6SL_CLK_PLL7 153
+#define IMX6SL_PLL1_BYPASS 154
+#define IMX6SL_PLL2_BYPASS 155
+#define IMX6SL_PLL3_BYPASS 156
+#define IMX6SL_PLL4_BYPASS 157
+#define IMX6SL_PLL5_BYPASS 158
+#define IMX6SL_PLL6_BYPASS 159
+#define IMX6SL_PLL7_BYPASS 160
+#define IMX6SL_CLK_SSI1_IPG 161
+#define IMX6SL_CLK_SSI2_IPG 162
+#define IMX6SL_CLK_SSI3_IPG 163
+#define IMX6SL_CLK_END 164
#endif /* __DT_BINDINGS_CLOCK_IMX6SL_H */
diff --git a/include/dt-bindings/clock/imx6sx-clock.h b/include/dt-bindings/clock/imx6sx-clock.h
index 421d8bb76f2f..995709119ec5 100644
--- a/include/dt-bindings/clock/imx6sx-clock.h
+++ b/include/dt-bindings/clock/imx6sx-clock.h
@@ -251,6 +251,29 @@
#define IMX6SX_CLK_SAI2_IPG 238
#define IMX6SX_CLK_ESAI_IPG 239
#define IMX6SX_CLK_ESAI_MEM 240
-#define IMX6SX_CLK_CLK_END 241
+#define IMX6SX_CLK_LVDS1_IN 241
+#define IMX6SX_CLK_ANACLK1 242
+#define IMX6SX_PLL1_BYPASS_SRC 243
+#define IMX6SX_PLL2_BYPASS_SRC 244
+#define IMX6SX_PLL3_BYPASS_SRC 245
+#define IMX6SX_PLL4_BYPASS_SRC 246
+#define IMX6SX_PLL5_BYPASS_SRC 247
+#define IMX6SX_PLL6_BYPASS_SRC 248
+#define IMX6SX_PLL7_BYPASS_SRC 249
+#define IMX6SX_CLK_PLL1 250
+#define IMX6SX_CLK_PLL2 251
+#define IMX6SX_CLK_PLL3 252
+#define IMX6SX_CLK_PLL4 253
+#define IMX6SX_CLK_PLL5 254
+#define IMX6SX_CLK_PLL6 255
+#define IMX6SX_CLK_PLL7 256
+#define IMX6SX_PLL1_BYPASS 257
+#define IMX6SX_PLL2_BYPASS 258
+#define IMX6SX_PLL3_BYPASS 259
+#define IMX6SX_PLL4_BYPASS 260
+#define IMX6SX_PLL5_BYPASS 261
+#define IMX6SX_PLL6_BYPASS 262
+#define IMX6SX_PLL7_BYPASS 263
+#define IMX6SX_CLK_CLK_END 264
#endif /* __DT_BINDINGS_CLOCK_IMX6SX_H */
diff --git a/include/dt-bindings/clock/marvell,mmp2.h b/include/dt-bindings/clock/marvell,mmp2.h
new file mode 100644
index 000000000000..591f7fba89e2
--- /dev/null
+++ b/include/dt-bindings/clock/marvell,mmp2.h
@@ -0,0 +1,74 @@
+#ifndef __DTS_MARVELL_MMP2_CLOCK_H
+#define __DTS_MARVELL_MMP2_CLOCK_H
+
+/* fixed clocks and plls */
+#define MMP2_CLK_CLK32 1
+#define MMP2_CLK_VCTCXO 2
+#define MMP2_CLK_PLL1 3
+#define MMP2_CLK_PLL1_2 8
+#define MMP2_CLK_PLL1_4 9
+#define MMP2_CLK_PLL1_8 10
+#define MMP2_CLK_PLL1_16 11
+#define MMP2_CLK_PLL1_3 12
+#define MMP2_CLK_PLL1_6 13
+#define MMP2_CLK_PLL1_12 14
+#define MMP2_CLK_PLL1_20 15
+#define MMP2_CLK_PLL2 16
+#define MMP2_CLK_PLL2_2 17
+#define MMP2_CLK_PLL2_4 18
+#define MMP2_CLK_PLL2_8 19
+#define MMP2_CLK_PLL2_16 20
+#define MMP2_CLK_PLL2_3 21
+#define MMP2_CLK_PLL2_6 22
+#define MMP2_CLK_PLL2_12 23
+#define MMP2_CLK_VCTCXO_2 24
+#define MMP2_CLK_VCTCXO_4 25
+#define MMP2_CLK_UART_PLL 26
+#define MMP2_CLK_USB_PLL 27
+
+/* apb periphrals */
+#define MMP2_CLK_TWSI0 60
+#define MMP2_CLK_TWSI1 61
+#define MMP2_CLK_TWSI2 62
+#define MMP2_CLK_TWSI3 63
+#define MMP2_CLK_TWSI4 64
+#define MMP2_CLK_TWSI5 65
+#define MMP2_CLK_GPIO 66
+#define MMP2_CLK_KPC 67
+#define MMP2_CLK_RTC 68
+#define MMP2_CLK_PWM0 69
+#define MMP2_CLK_PWM1 70
+#define MMP2_CLK_PWM2 71
+#define MMP2_CLK_PWM3 72
+#define MMP2_CLK_UART0 73
+#define MMP2_CLK_UART1 74
+#define MMP2_CLK_UART2 75
+#define MMP2_CLK_UART3 76
+#define MMP2_CLK_SSP0 77
+#define MMP2_CLK_SSP1 78
+#define MMP2_CLK_SSP2 79
+#define MMP2_CLK_SSP3 80
+
+/* axi periphrals */
+#define MMP2_CLK_SDH0 101
+#define MMP2_CLK_SDH1 102
+#define MMP2_CLK_SDH2 103
+#define MMP2_CLK_SDH3 104
+#define MMP2_CLK_USB 105
+#define MMP2_CLK_DISP0 106
+#define MMP2_CLK_DISP0_MUX 107
+#define MMP2_CLK_DISP0_SPHY 108
+#define MMP2_CLK_DISP1 109
+#define MMP2_CLK_DISP1_MUX 110
+#define MMP2_CLK_CCIC_ARBITER 111
+#define MMP2_CLK_CCIC0 112
+#define MMP2_CLK_CCIC0_MIX 113
+#define MMP2_CLK_CCIC0_PHY 114
+#define MMP2_CLK_CCIC0_SPHY 115
+#define MMP2_CLK_CCIC1 116
+#define MMP2_CLK_CCIC1_MIX 117
+#define MMP2_CLK_CCIC1_PHY 118
+#define MMP2_CLK_CCIC1_SPHY 119
+
+#define MMP2_NR_CLKS 200
+#endif
diff --git a/include/dt-bindings/clock/marvell,pxa168.h b/include/dt-bindings/clock/marvell,pxa168.h
new file mode 100644
index 000000000000..79630b9d74b8
--- /dev/null
+++ b/include/dt-bindings/clock/marvell,pxa168.h
@@ -0,0 +1,57 @@
+#ifndef __DTS_MARVELL_PXA168_CLOCK_H
+#define __DTS_MARVELL_PXA168_CLOCK_H
+
+/* fixed clocks and plls */
+#define PXA168_CLK_CLK32 1
+#define PXA168_CLK_VCTCXO 2
+#define PXA168_CLK_PLL1 3
+#define PXA168_CLK_PLL1_2 8
+#define PXA168_CLK_PLL1_4 9
+#define PXA168_CLK_PLL1_8 10
+#define PXA168_CLK_PLL1_16 11
+#define PXA168_CLK_PLL1_6 12
+#define PXA168_CLK_PLL1_12 13
+#define PXA168_CLK_PLL1_24 14
+#define PXA168_CLK_PLL1_48 15
+#define PXA168_CLK_PLL1_96 16
+#define PXA168_CLK_PLL1_13 17
+#define PXA168_CLK_PLL1_13_1_5 18
+#define PXA168_CLK_PLL1_2_1_5 19
+#define PXA168_CLK_PLL1_3_16 20
+#define PXA168_CLK_UART_PLL 27
+
+/* apb periphrals */
+#define PXA168_CLK_TWSI0 60
+#define PXA168_CLK_TWSI1 61
+#define PXA168_CLK_TWSI2 62
+#define PXA168_CLK_TWSI3 63
+#define PXA168_CLK_GPIO 64
+#define PXA168_CLK_KPC 65
+#define PXA168_CLK_RTC 66
+#define PXA168_CLK_PWM0 67
+#define PXA168_CLK_PWM1 68
+#define PXA168_CLK_PWM2 69
+#define PXA168_CLK_PWM3 70
+#define PXA168_CLK_UART0 71
+#define PXA168_CLK_UART1 72
+#define PXA168_CLK_UART2 73
+#define PXA168_CLK_SSP0 74
+#define PXA168_CLK_SSP1 75
+#define PXA168_CLK_SSP2 76
+#define PXA168_CLK_SSP3 77
+#define PXA168_CLK_SSP4 78
+
+/* axi periphrals */
+#define PXA168_CLK_DFC 100
+#define PXA168_CLK_SDH0 101
+#define PXA168_CLK_SDH1 102
+#define PXA168_CLK_SDH2 103
+#define PXA168_CLK_USB 104
+#define PXA168_CLK_SPH 105
+#define PXA168_CLK_DISP0 106
+#define PXA168_CLK_CCIC0 107
+#define PXA168_CLK_CCIC0_PHY 108
+#define PXA168_CLK_CCIC0_SPHY 109
+
+#define PXA168_NR_CLKS 200
+#endif
diff --git a/include/dt-bindings/clock/marvell,pxa910.h b/include/dt-bindings/clock/marvell,pxa910.h
new file mode 100644
index 000000000000..719cffb2bea2
--- /dev/null
+++ b/include/dt-bindings/clock/marvell,pxa910.h
@@ -0,0 +1,54 @@
+#ifndef __DTS_MARVELL_PXA910_CLOCK_H
+#define __DTS_MARVELL_PXA910_CLOCK_H
+
+/* fixed clocks and plls */
+#define PXA910_CLK_CLK32 1
+#define PXA910_CLK_VCTCXO 2
+#define PXA910_CLK_PLL1 3
+#define PXA910_CLK_PLL1_2 8
+#define PXA910_CLK_PLL1_4 9
+#define PXA910_CLK_PLL1_8 10
+#define PXA910_CLK_PLL1_16 11
+#define PXA910_CLK_PLL1_6 12
+#define PXA910_CLK_PLL1_12 13
+#define PXA910_CLK_PLL1_24 14
+#define PXA910_CLK_PLL1_48 15
+#define PXA910_CLK_PLL1_96 16
+#define PXA910_CLK_PLL1_13 17
+#define PXA910_CLK_PLL1_13_1_5 18
+#define PXA910_CLK_PLL1_2_1_5 19
+#define PXA910_CLK_PLL1_3_16 20
+#define PXA910_CLK_UART_PLL 27
+
+/* apb periphrals */
+#define PXA910_CLK_TWSI0 60
+#define PXA910_CLK_TWSI1 61
+#define PXA910_CLK_TWSI2 62
+#define PXA910_CLK_TWSI3 63
+#define PXA910_CLK_GPIO 64
+#define PXA910_CLK_KPC 65
+#define PXA910_CLK_RTC 66
+#define PXA910_CLK_PWM0 67
+#define PXA910_CLK_PWM1 68
+#define PXA910_CLK_PWM2 69
+#define PXA910_CLK_PWM3 70
+#define PXA910_CLK_UART0 71
+#define PXA910_CLK_UART1 72
+#define PXA910_CLK_UART2 73
+#define PXA910_CLK_SSP0 74
+#define PXA910_CLK_SSP1 75
+
+/* axi periphrals */
+#define PXA910_CLK_DFC 100
+#define PXA910_CLK_SDH0 101
+#define PXA910_CLK_SDH1 102
+#define PXA910_CLK_SDH2 103
+#define PXA910_CLK_USB 104
+#define PXA910_CLK_SPH 105
+#define PXA910_CLK_DISP0 106
+#define PXA910_CLK_CCIC0 107
+#define PXA910_CLK_CCIC0_PHY 108
+#define PXA910_CLK_CCIC0_SPHY 109
+
+#define PXA910_NR_CLKS 200
+#endif
diff --git a/include/dt-bindings/clock/maxim,max77686.h b/include/dt-bindings/clock/maxim,max77686.h
new file mode 100644
index 000000000000..7b28b0905869
--- /dev/null
+++ b/include/dt-bindings/clock/maxim,max77686.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2014 Google, Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Device Tree binding constants clocks for the Maxim 77686 PMIC.
+ */
+
+#ifndef _DT_BINDINGS_CLOCK_MAXIM_MAX77686_CLOCK_H
+#define _DT_BINDINGS_CLOCK_MAXIM_MAX77686_CLOCK_H
+
+/* Fixed rate clocks. */
+
+#define MAX77686_CLK_AP 0
+#define MAX77686_CLK_CP 1
+#define MAX77686_CLK_PMIC 2
+
+/* Total number of clocks. */
+#define MAX77686_CLKS_NUM (MAX77686_CLK_PMIC + 1)
+
+#endif /* _DT_BINDINGS_CLOCK_MAXIM_MAX77686_CLOCK_H */
diff --git a/include/dt-bindings/clock/maxim,max77802.h b/include/dt-bindings/clock/maxim,max77802.h
new file mode 100644
index 000000000000..997312edcbb5
--- /dev/null
+++ b/include/dt-bindings/clock/maxim,max77802.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) 2014 Google, Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Device Tree binding constants clocks for the Maxim 77802 PMIC.
+ */
+
+#ifndef _DT_BINDINGS_CLOCK_MAXIM_MAX77802_CLOCK_H
+#define _DT_BINDINGS_CLOCK_MAXIM_MAX77802_CLOCK_H
+
+/* Fixed rate clocks. */
+
+#define MAX77802_CLK_32K_AP 0
+#define MAX77802_CLK_32K_CP 1
+
+/* Total number of clocks. */
+#define MAX77802_CLKS_NUM (MAX77802_CLK_32K_CP + 1)
+
+#endif /* _DT_BINDINGS_CLOCK_MAXIM_MAX77802_CLOCK_H */
diff --git a/include/dt-bindings/clock/pxa-clock.h b/include/dt-bindings/clock/pxa-clock.h
new file mode 100644
index 000000000000..e65803b1dc7e
--- /dev/null
+++ b/include/dt-bindings/clock/pxa-clock.h
@@ -0,0 +1,77 @@
+/*
+ * Inspired by original work from pxa2xx-regs.h by Nicolas Pitre
+ * Copyright (C) 2014 Robert Jarzmik
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_PXA2XX_H__
+#define __DT_BINDINGS_CLOCK_PXA2XX_H__
+
+#define CLK_NONE 0
+#define CLK_1WIRE 1
+#define CLK_AC97 2
+#define CLK_AC97CONF 3
+#define CLK_ASSP 4
+#define CLK_BOOT 5
+#define CLK_BTUART 6
+#define CLK_CAMERA 7
+#define CLK_CIR 8
+#define CLK_CORE 9
+#define CLK_DMC 10
+#define CLK_FFUART 11
+#define CLK_FICP 12
+#define CLK_GPIO 13
+#define CLK_HSIO2 14
+#define CLK_HWUART 15
+#define CLK_I2C 16
+#define CLK_I2S 17
+#define CLK_IM 18
+#define CLK_INC 19
+#define CLK_ISC 20
+#define CLK_KEYPAD 21
+#define CLK_LCD 22
+#define CLK_MEMC 23
+#define CLK_MEMSTK 24
+#define CLK_MINI_IM 25
+#define CLK_MINI_LCD 26
+#define CLK_MMC 27
+#define CLK_MMC1 28
+#define CLK_MMC2 29
+#define CLK_MMC3 30
+#define CLK_MSL 31
+#define CLK_MSL0 32
+#define CLK_MVED 33
+#define CLK_NAND 34
+#define CLK_NSSP 35
+#define CLK_OSTIMER 36
+#define CLK_PWM0 37
+#define CLK_PWM1 38
+#define CLK_PWM2 39
+#define CLK_PWM3 40
+#define CLK_PWRI2C 41
+#define CLK_PXA300_GCU 42
+#define CLK_PXA320_GCU 43
+#define CLK_SMC 44
+#define CLK_SSP 45
+#define CLK_SSP1 46
+#define CLK_SSP2 47
+#define CLK_SSP3 48
+#define CLK_SSP4 49
+#define CLK_STUART 50
+#define CLK_TOUCH 51
+#define CLK_TPM 52
+#define CLK_UDC 53
+#define CLK_USB 54
+#define CLK_USB2 55
+#define CLK_USBH 56
+#define CLK_USBHOST 57
+#define CLK_USIM 58
+#define CLK_USIM1 59
+#define CLK_USMI0 60
+#define CLK_MAX 61
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,mmcc-apq8084.h b/include/dt-bindings/clock/qcom,mmcc-apq8084.h
index a929f86d0ddd..d72b5b35f15e 100644
--- a/include/dt-bindings/clock/qcom,mmcc-apq8084.h
+++ b/include/dt-bindings/clock/qcom,mmcc-apq8084.h
@@ -60,7 +60,7 @@
#define ESC1_CLK_SRC 43
#define HDMI_CLK_SRC 44
#define VSYNC_CLK_SRC 45
-#define RBCPR_CLK_SRC 46
+#define MMSS_RBCPR_CLK_SRC 46
#define RBBMTIMER_CLK_SRC 47
#define MAPLE_CLK_SRC 48
#define VDP_CLK_SRC 49
diff --git a/include/dt-bindings/clock/r8a7740-clock.h b/include/dt-bindings/clock/r8a7740-clock.h
new file mode 100644
index 000000000000..476135da0f23
--- /dev/null
+++ b/include/dt-bindings/clock/r8a7740-clock.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright 2014 Ulrich Hecht
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_R8A7740_H__
+#define __DT_BINDINGS_CLOCK_R8A7740_H__
+
+/* CPG */
+#define R8A7740_CLK_SYSTEM 0
+#define R8A7740_CLK_PLLC0 1
+#define R8A7740_CLK_PLLC1 2
+#define R8A7740_CLK_PLLC2 3
+#define R8A7740_CLK_R 4
+#define R8A7740_CLK_USB24S 5
+#define R8A7740_CLK_I 6
+#define R8A7740_CLK_ZG 7
+#define R8A7740_CLK_B 8
+#define R8A7740_CLK_M1 9
+#define R8A7740_CLK_HP 10
+#define R8A7740_CLK_HPP 11
+#define R8A7740_CLK_USBP 12
+#define R8A7740_CLK_S 13
+#define R8A7740_CLK_ZB 14
+#define R8A7740_CLK_M3 15
+#define R8A7740_CLK_CP 16
+
+/* MSTP1 */
+#define R8A7740_CLK_CEU21 28
+#define R8A7740_CLK_CEU20 27
+#define R8A7740_CLK_TMU0 25
+#define R8A7740_CLK_LCDC1 17
+#define R8A7740_CLK_IIC0 16
+#define R8A7740_CLK_TMU1 11
+#define R8A7740_CLK_LCDC0 0
+
+/* MSTP2 */
+#define R8A7740_CLK_SCIFA6 30
+#define R8A7740_CLK_INTCA 29
+#define R8A7740_CLK_SCIFA7 22
+#define R8A7740_CLK_DMAC1 18
+#define R8A7740_CLK_DMAC2 17
+#define R8A7740_CLK_DMAC3 16
+#define R8A7740_CLK_USBDMAC 14
+#define R8A7740_CLK_SCIFA5 7
+#define R8A7740_CLK_SCIFB 6
+#define R8A7740_CLK_SCIFA0 4
+#define R8A7740_CLK_SCIFA1 3
+#define R8A7740_CLK_SCIFA2 2
+#define R8A7740_CLK_SCIFA3 1
+#define R8A7740_CLK_SCIFA4 0
+
+/* MSTP3 */
+#define R8A7740_CLK_CMT1 29
+#define R8A7740_CLK_FSI 28
+#define R8A7740_CLK_IIC1 23
+#define R8A7740_CLK_USBF 20
+#define R8A7740_CLK_SDHI0 14
+#define R8A7740_CLK_SDHI1 13
+#define R8A7740_CLK_MMC 12
+#define R8A7740_CLK_GETHER 9
+#define R8A7740_CLK_TPU0 4
+
+/* MSTP4 */
+#define R8A7740_CLK_USBH 16
+#define R8A7740_CLK_SDHI2 15
+#define R8A7740_CLK_USBFUNC 7
+#define R8A7740_CLK_USBPHY 6
+
+/* SUBCK* */
+#define R8A7740_CLK_SUBCK 9
+#define R8A7740_CLK_SUBCK2 10
+
+#endif /* __DT_BINDINGS_CLOCK_R8A7740_H__ */
diff --git a/include/dt-bindings/clock/r8a7790-clock.h b/include/dt-bindings/clock/r8a7790-clock.h
index f929a79e6998..c27b3b5133b9 100644
--- a/include/dt-bindings/clock/r8a7790-clock.h
+++ b/include/dt-bindings/clock/r8a7790-clock.h
@@ -26,7 +26,18 @@
#define R8A7790_CLK_MSIOF0 0
/* MSTP1 */
+#define R8A7790_CLK_VCP1 0
+#define R8A7790_CLK_VCP0 1
+#define R8A7790_CLK_VPC1 2
+#define R8A7790_CLK_VPC0 3
+#define R8A7790_CLK_JPU 6
+#define R8A7790_CLK_SSP1 9
#define R8A7790_CLK_TMU1 11
+#define R8A7790_CLK_3DG 12
+#define R8A7790_CLK_2DDMAC 15
+#define R8A7790_CLK_FDP1_2 17
+#define R8A7790_CLK_FDP1_1 18
+#define R8A7790_CLK_FDP1_0 19
#define R8A7790_CLK_TMU3 21
#define R8A7790_CLK_TMU2 22
#define R8A7790_CLK_CMT0 24
@@ -67,6 +78,8 @@
#define R8A7790_CLK_USBDMAC1 31
/* MSTP5 */
+#define R8A7790_CLK_AUDIO_DMAC1 1
+#define R8A7790_CLK_AUDIO_DMAC0 2
#define R8A7790_CLK_THERMAL 22
#define R8A7790_CLK_PWM 23
diff --git a/include/dt-bindings/clock/r8a7791-clock.h b/include/dt-bindings/clock/r8a7791-clock.h
index f0d4d1049162..3ea2bbc0da3f 100644
--- a/include/dt-bindings/clock/r8a7791-clock.h
+++ b/include/dt-bindings/clock/r8a7791-clock.h
@@ -25,7 +25,15 @@
#define R8A7791_CLK_MSIOF0 0
/* MSTP1 */
+#define R8A7791_CLK_VCP0 1
+#define R8A7791_CLK_VPC0 3
+#define R8A7791_CLK_JPU 6
+#define R8A7791_CLK_SSP1 9
#define R8A7791_CLK_TMU1 11
+#define R8A7791_CLK_3DG 12
+#define R8A7791_CLK_2DDMAC 15
+#define R8A7791_CLK_FDP1_1 18
+#define R8A7791_CLK_FDP1_0 19
#define R8A7791_CLK_TMU3 21
#define R8A7791_CLK_TMU2 22
#define R8A7791_CLK_CMT0 24
@@ -61,6 +69,8 @@
#define R8A7791_CLK_USBDMAC1 31
/* MSTP5 */
+#define R8A7791_CLK_AUDIO_DMAC1 1
+#define R8A7791_CLK_AUDIO_DMAC0 2
#define R8A7791_CLK_THERMAL 22
#define R8A7791_CLK_PWM 23
diff --git a/include/dt-bindings/clock/r8a7794-clock.h b/include/dt-bindings/clock/r8a7794-clock.h
new file mode 100644
index 000000000000..aa9c286e60c0
--- /dev/null
+++ b/include/dt-bindings/clock/r8a7794-clock.h
@@ -0,0 +1,89 @@
+/*
+ * Copyright (C) 2014 Renesas Electronics Corporation
+ * Copyright 2013 Ideas On Board SPRL
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_R8A7794_H__
+#define __DT_BINDINGS_CLOCK_R8A7794_H__
+
+/* CPG */
+#define R8A7794_CLK_MAIN 0
+#define R8A7794_CLK_PLL0 1
+#define R8A7794_CLK_PLL1 2
+#define R8A7794_CLK_PLL3 3
+#define R8A7794_CLK_LB 4
+#define R8A7794_CLK_QSPI 5
+#define R8A7794_CLK_SDH 6
+#define R8A7794_CLK_SD0 7
+#define R8A7794_CLK_Z 8
+
+/* MSTP0 */
+#define R8A7794_CLK_MSIOF0 0
+
+/* MSTP1 */
+#define R8A7794_CLK_VCP0 1
+#define R8A7794_CLK_VPC0 3
+#define R8A7794_CLK_TMU1 11
+#define R8A7794_CLK_3DG 12
+#define R8A7794_CLK_2DDMAC 15
+#define R8A7794_CLK_FDP1_0 19
+#define R8A7794_CLK_TMU3 21
+#define R8A7794_CLK_TMU2 22
+#define R8A7794_CLK_CMT0 24
+#define R8A7794_CLK_TMU0 25
+#define R8A7794_CLK_VSP1_DU0 28
+#define R8A7794_CLK_VSP1_S 31
+
+/* MSTP2 */
+#define R8A7794_CLK_SCIFA2 2
+#define R8A7794_CLK_SCIFA1 3
+#define R8A7794_CLK_SCIFA0 4
+#define R8A7794_CLK_MSIOF2 5
+#define R8A7794_CLK_SCIFB0 6
+#define R8A7794_CLK_SCIFB1 7
+#define R8A7794_CLK_MSIOF1 8
+#define R8A7794_CLK_SCIFB2 16
+
+/* MSTP3 */
+#define R8A7794_CLK_CMT1 29
+
+/* MSTP5 */
+#define R8A7794_CLK_THERMAL 22
+#define R8A7794_CLK_PWM 23
+
+/* MSTP7 */
+#define R8A7794_CLK_HSCIF2 13
+#define R8A7794_CLK_SCIF5 14
+#define R8A7794_CLK_SCIF4 15
+#define R8A7794_CLK_HSCIF1 16
+#define R8A7794_CLK_HSCIF0 17
+#define R8A7794_CLK_SCIF3 18
+#define R8A7794_CLK_SCIF2 19
+#define R8A7794_CLK_SCIF1 20
+#define R8A7794_CLK_SCIF0 21
+
+/* MSTP8 */
+#define R8A7794_CLK_VIN1 10
+#define R8A7794_CLK_VIN0 11
+#define R8A7794_CLK_ETHER 13
+
+/* MSTP9 */
+#define R8A7794_CLK_GPIO6 5
+#define R8A7794_CLK_GPIO5 7
+#define R8A7794_CLK_GPIO4 8
+#define R8A7794_CLK_GPIO3 9
+#define R8A7794_CLK_GPIO2 10
+#define R8A7794_CLK_GPIO1 11
+#define R8A7794_CLK_GPIO0 12
+
+/* MSTP11 */
+#define R8A7794_CLK_SCIFA3 6
+#define R8A7794_CLK_SCIFA4 7
+#define R8A7794_CLK_SCIFA5 8
+
+#endif /* __DT_BINDINGS_CLOCK_R8A7794_H__ */
diff --git a/include/dt-bindings/clock/rk3188-cru-common.h b/include/dt-bindings/clock/rk3188-cru-common.h
index 750ee60e75fb..6a370503c954 100644
--- a/include/dt-bindings/clock/rk3188-cru-common.h
+++ b/include/dt-bindings/clock/rk3188-cru-common.h
@@ -20,6 +20,7 @@
#define PLL_GPLL 4
#define CORE_PERI 5
#define CORE_L2C 6
+#define ARMCLK 7
/* sclk gates (special clocks) */
#define SCLK_UART0 64
diff --git a/include/dt-bindings/clock/rk3288-cru.h b/include/dt-bindings/clock/rk3288-cru.h
index ebcb460ea4ad..f60ce72a2b2c 100644
--- a/include/dt-bindings/clock/rk3288-cru.h
+++ b/include/dt-bindings/clock/rk3288-cru.h
@@ -19,6 +19,7 @@
#define PLL_CPLL 3
#define PLL_GPLL 4
#define PLL_NPLL 5
+#define ARMCLK 6
/* sclk gates (special clocks) */
#define SCLK_GPU 64
@@ -61,6 +62,24 @@
#define SCLK_LCDC_PWM1 101
#define SCLK_MAC_RX 102
#define SCLK_MAC_TX 103
+#define SCLK_EDP_24M 104
+#define SCLK_EDP 105
+#define SCLK_RGA 106
+#define SCLK_ISP 107
+#define SCLK_ISP_JPE 108
+#define SCLK_HDMI_HDCP 109
+#define SCLK_HDMI_CEC 110
+#define SCLK_HEVC_CABAC 111
+#define SCLK_HEVC_CORE 112
+#define SCLK_I2S0_OUT 113
+#define SCLK_SDMMC_DRV 114
+#define SCLK_SDIO0_DRV 115
+#define SCLK_SDIO1_DRV 116
+#define SCLK_EMMC_DRV 117
+#define SCLK_SDMMC_SAMPLE 118
+#define SCLK_SDIO0_SAMPLE 119
+#define SCLK_SDIO1_SAMPLE 120
+#define SCLK_EMMC_SAMPLE 121
#define DCLK_VOP0 190
#define DCLK_VOP1 191
@@ -75,6 +94,16 @@
#define ACLK_VOP1 198
#define ACLK_CRYPTO 199
#define ACLK_RGA 200
+#define ACLK_RGA_NIU 201
+#define ACLK_IEP 202
+#define ACLK_VIO0_NIU 203
+#define ACLK_VIP 204
+#define ACLK_ISP 205
+#define ACLK_VIO1_NIU 206
+#define ACLK_HEVC 207
+#define ACLK_VCODEC 208
+#define ACLK_CPU 209
+#define ACLK_PERI 210
/* pclk gates */
#define PCLK_GPIO0 320
@@ -112,6 +141,19 @@
#define PCLK_PS2C 352
#define PCLK_TIMER 353
#define PCLK_TZPC 354
+#define PCLK_EDP_CTRL 355
+#define PCLK_MIPI_DSI0 356
+#define PCLK_MIPI_DSI1 357
+#define PCLK_MIPI_CSI 358
+#define PCLK_LVDS_PHY 359
+#define PCLK_HDMI_CTRL 360
+#define PCLK_VIO2_H2P 361
+#define PCLK_CPU 362
+#define PCLK_PERI 363
+#define PCLK_DDRUPCTL0 364
+#define PCLK_PUBL0 365
+#define PCLK_DDRUPCTL1 366
+#define PCLK_PUBL1 367
/* hclk gates */
#define HCLK_GPS 448
@@ -137,8 +179,16 @@
#define HCLK_IEP 468
#define HCLK_ISP 469
#define HCLK_RGA 470
+#define HCLK_VIO_AHB_ARBI 471
+#define HCLK_VIO_NIU 472
+#define HCLK_VIP 473
+#define HCLK_VIO2_H2P 474
+#define HCLK_HEVC 475
+#define HCLK_VCODEC 476
+#define HCLK_CPU 477
+#define HCLK_PERI 478
-#define CLK_NR_CLKS (HCLK_RGA + 1)
+#define CLK_NR_CLKS (HCLK_PERI + 1)
/* soft-reset indices */
#define SRST_CORE0 0
@@ -276,3 +326,46 @@
#define SRST_USBHOST1_CON 140
#define SRST_USB_ADP 141
#define SRST_ACC_EFUSE 142
+
+#define SRST_CORESIGHT 144
+#define SRST_PD_CORE_AHB_NOC 145
+#define SRST_PD_CORE_APB_NOC 146
+#define SRST_PD_CORE_MP_AXI 147
+#define SRST_GIC 148
+#define SRST_LCDC_PWM0 149
+#define SRST_LCDC_PWM1 150
+#define SRST_VIO0_H2P_BRG 151
+#define SRST_VIO1_H2P_BRG 152
+#define SRST_RGA_H2P_BRG 153
+#define SRST_HEVC 154
+#define SRST_TSADC 159
+
+#define SRST_DDRPHY0 160
+#define SRST_DDRPHY0_APB 161
+#define SRST_DDRCTRL0 162
+#define SRST_DDRCTRL0_APB 163
+#define SRST_DDRPHY0_CTRL 164
+#define SRST_DDRPHY1 165
+#define SRST_DDRPHY1_APB 166
+#define SRST_DDRCTRL1 167
+#define SRST_DDRCTRL1_APB 168
+#define SRST_DDRPHY1_CTRL 169
+#define SRST_DDRMSCH0 170
+#define SRST_DDRMSCH1 171
+#define SRST_CRYPTO 174
+#define SRST_C2C_HOST 175
+
+#define SRST_LCDC1_AXI 176
+#define SRST_LCDC1_AHB 177
+#define SRST_LCDC1_DCLK 178
+#define SRST_UART0 179
+#define SRST_UART1 180
+#define SRST_UART2 181
+#define SRST_UART3 182
+#define SRST_UART4 183
+#define SRST_SIMC 186
+#define SRST_PS2C 187
+#define SRST_TSP 188
+#define SRST_TSP_CLKIN0 189
+#define SRST_TSP_CLKIN1 190
+#define SRST_TSP_27M 191
diff --git a/include/dt-bindings/clock/rockchip,rk808.h b/include/dt-bindings/clock/rockchip,rk808.h
new file mode 100644
index 000000000000..1a873432f965
--- /dev/null
+++ b/include/dt-bindings/clock/rockchip,rk808.h
@@ -0,0 +1,11 @@
+/*
+ * This header provides constants clk index RK808 pmic clkout
+ */
+#ifndef _CLK_ROCKCHIP_RK808
+#define _CLK_ROCKCHIP_RK808
+
+/* CLOCKOUT index */
+#define RK808_CLKOUT0 0
+#define RK808_CLKOUT1 1
+
+#endif
diff --git a/include/dt-bindings/clock/stih407-clks.h b/include/dt-bindings/clock/stih407-clks.h
new file mode 100644
index 000000000000..7af2b717b3b2
--- /dev/null
+++ b/include/dt-bindings/clock/stih407-clks.h
@@ -0,0 +1,86 @@
+/*
+ * This header provides constants clk index STMicroelectronics
+ * STiH407 SoC.
+ */
+#ifndef _DT_BINDINGS_CLK_STIH407
+#define _DT_BINDINGS_CLK_STIH407
+
+/* CLOCKGEN C0 */
+#define CLK_ICN_GPU 0
+#define CLK_FDMA 1
+#define CLK_NAND 2
+#define CLK_HVA 3
+#define CLK_PROC_STFE 4
+#define CLK_PROC_TP 5
+#define CLK_RX_ICN_DMU 6
+#define CLK_RX_ICN_DISP_0 6
+#define CLK_RX_ICN_DISP_1 6
+#define CLK_RX_ICN_HVA 7
+#define CLK_RX_ICN_TS 7
+#define CLK_ICN_CPU 8
+#define CLK_TX_ICN_DMU 9
+#define CLK_TX_ICN_HVA 9
+#define CLK_TX_ICN_TS 9
+#define CLK_ICN_COMPO 9
+#define CLK_MMC_0 10
+#define CLK_MMC_1 11
+#define CLK_JPEGDEC 12
+#define CLK_ICN_REG 13
+#define CLK_TRACE_A9 13
+#define CLK_PTI_STM 13
+#define CLK_EXT2F_A9 13
+#define CLK_IC_BDISP_0 14
+#define CLK_IC_BDISP_1 15
+#define CLK_PP_DMU 16
+#define CLK_VID_DMU 17
+#define CLK_DSS_LPC 18
+#define CLK_ST231_AUD_0 19
+#define CLK_ST231_GP_0 19
+#define CLK_ST231_GP_1 20
+#define CLK_ST231_DMU 21
+#define CLK_ICN_LMI 22
+#define CLK_TX_ICN_DISP_0 23
+#define CLK_TX_ICN_DISP_1 23
+#define CLK_ICN_SBC 24
+#define CLK_STFE_FRC2 25
+#define CLK_ETH_PHY 26
+#define CLK_ETH_REF_PHYCLK 27
+#define CLK_FLASH_PROMIP 28
+#define CLK_MAIN_DISP 29
+#define CLK_AUX_DISP 30
+#define CLK_COMPO_DVP 31
+
+/* CLOCKGEN D0 */
+#define CLK_PCM_0 0
+#define CLK_PCM_1 1
+#define CLK_PCM_2 2
+#define CLK_SPDIFF 3
+
+/* CLOCKGEN D2 */
+#define CLK_PIX_MAIN_DISP 0
+#define CLK_PIX_PIP 1
+#define CLK_PIX_GDP1 2
+#define CLK_PIX_GDP2 3
+#define CLK_PIX_GDP3 4
+#define CLK_PIX_GDP4 5
+#define CLK_PIX_AUX_DISP 6
+#define CLK_DENC 7
+#define CLK_PIX_HDDAC 8
+#define CLK_HDDAC 9
+#define CLK_SDDAC 10
+#define CLK_PIX_DVO 11
+#define CLK_DVO 12
+#define CLK_PIX_HDMI 13
+#define CLK_TMDS_HDMI 14
+#define CLK_REF_HDMIPHY 15
+
+/* CLOCKGEN D3 */
+#define CLK_STFE_FRC1 0
+#define CLK_TSOUT_0 1
+#define CLK_TSOUT_1 2
+#define CLK_MCHI 3
+#define CLK_VSENS_COMPO 4
+#define CLK_FRC1_REMOTE 5
+#define CLK_LPC_0 6
+#define CLK_LPC_1 7
+#endif
diff --git a/include/dt-bindings/clock/stih410-clks.h b/include/dt-bindings/clock/stih410-clks.h
new file mode 100644
index 000000000000..2097a4bbe155
--- /dev/null
+++ b/include/dt-bindings/clock/stih410-clks.h
@@ -0,0 +1,25 @@
+/*
+ * This header provides constants clk index STMicroelectronics
+ * STiH410 SoC.
+ */
+#ifndef _DT_BINDINGS_CLK_STIH410
+#define _DT_BINDINGS_CLK_STIH410
+
+#include "stih407-clks.h"
+
+/* STiH410 introduces new clock outputs compared to STiH407 */
+
+/* CLOCKGEN C0 */
+#define CLK_TX_ICN_HADES 32
+#define CLK_RX_ICN_HADES 33
+#define CLK_ICN_REG_16 34
+#define CLK_PP_HADES 35
+#define CLK_CLUST_HADES 36
+#define CLK_HWPE_HADES 37
+#define CLK_FC_HADES 38
+
+/* CLOCKGEN D0 */
+#define CLK_PCMR10_MASTER 4
+#define CLK_USB2_PHY 5
+
+#endif
diff --git a/include/dt-bindings/clock/tegra114-car.h b/include/dt-bindings/clock/tegra114-car.h
index fc12621fb432..534c03f8ad72 100644
--- a/include/dt-bindings/clock/tegra114-car.h
+++ b/include/dt-bindings/clock/tegra114-car.h
@@ -49,7 +49,7 @@
#define TEGRA114_CLK_I2S0 30
/* 31 */
-/* 32 */
+#define TEGRA114_CLK_MC 32
/* 33 */
#define TEGRA114_CLK_APBDMA 34
/* 35 */
diff --git a/include/dt-bindings/clock/tegra124-car.h b/include/dt-bindings/clock/tegra124-car.h
index 8a4c5892890f..af9bc9a3ddbc 100644
--- a/include/dt-bindings/clock/tegra124-car.h
+++ b/include/dt-bindings/clock/tegra124-car.h
@@ -48,7 +48,7 @@
#define TEGRA124_CLK_I2S0 30
/* 31 */
-/* 32 */
+#define TEGRA124_CLK_MC 32
/* 33 */
#define TEGRA124_CLK_APBDMA 34
/* 35 */
@@ -337,6 +337,10 @@
#define TEGRA124_CLK_DSIB_MUX 310
#define TEGRA124_CLK_SOR0_LVDS 311
#define TEGRA124_CLK_XUSB_SS_DIV2 312
-#define TEGRA124_CLK_CLK_MAX 313
+
+#define TEGRA124_CLK_PLL_M_UD 313
+#define TEGRA124_CLK_PLL_C_UD 314
+
+#define TEGRA124_CLK_CLK_MAX 315
#endif /* _DT_BINDINGS_CLOCK_TEGRA124_CAR_H */
diff --git a/include/dt-bindings/clock/tegra20-car.h b/include/dt-bindings/clock/tegra20-car.h
index 9406207cfac8..04500b243a4d 100644
--- a/include/dt-bindings/clock/tegra20-car.h
+++ b/include/dt-bindings/clock/tegra20-car.h
@@ -49,7 +49,7 @@
/* 30 */
#define TEGRA20_CLK_CACHE2 31
-#define TEGRA20_CLK_MEM 32
+#define TEGRA20_CLK_MC 32
#define TEGRA20_CLK_AHBDMA 33
#define TEGRA20_CLK_APBDMA 34
/* 35 */
diff --git a/include/dt-bindings/clock/vf610-clock.h b/include/dt-bindings/clock/vf610-clock.h
index 00953d9484cb..801c0ac50c47 100644
--- a/include/dt-bindings/clock/vf610-clock.h
+++ b/include/dt-bindings/clock/vf610-clock.h
@@ -21,24 +21,24 @@
#define VF610_CLK_FASK_CLK_SEL 8
#define VF610_CLK_AUDIO_EXT 9
#define VF610_CLK_ENET_EXT 10
-#define VF610_CLK_PLL1_MAIN 11
+#define VF610_CLK_PLL1_SYS 11
#define VF610_CLK_PLL1_PFD1 12
#define VF610_CLK_PLL1_PFD2 13
#define VF610_CLK_PLL1_PFD3 14
#define VF610_CLK_PLL1_PFD4 15
-#define VF610_CLK_PLL2_MAIN 16
+#define VF610_CLK_PLL2_BUS 16
#define VF610_CLK_PLL2_PFD1 17
#define VF610_CLK_PLL2_PFD2 18
#define VF610_CLK_PLL2_PFD3 19
#define VF610_CLK_PLL2_PFD4 20
-#define VF610_CLK_PLL3_MAIN 21
+#define VF610_CLK_PLL3_USB_OTG 21
#define VF610_CLK_PLL3_PFD1 22
#define VF610_CLK_PLL3_PFD2 23
#define VF610_CLK_PLL3_PFD3 24
#define VF610_CLK_PLL3_PFD4 25
-#define VF610_CLK_PLL4_MAIN 26
-#define VF610_CLK_PLL5_MAIN 27
-#define VF610_CLK_PLL6_MAIN 28
+#define VF610_CLK_PLL4_AUDIO 26
+#define VF610_CLK_PLL5_ENET 27
+#define VF610_CLK_PLL6_VIDEO 28
#define VF610_CLK_PLL3_MAIN_DIV 29
#define VF610_CLK_PLL4_MAIN_DIV 30
#define VF610_CLK_PLL6_MAIN_DIV 31
@@ -166,6 +166,32 @@
#define VF610_CLK_DMAMUX3 153
#define VF610_CLK_FLEXCAN0_EN 154
#define VF610_CLK_FLEXCAN1_EN 155
-#define VF610_CLK_END 156
+#define VF610_CLK_PLL7_USB_HOST 156
+#define VF610_CLK_USBPHY0 157
+#define VF610_CLK_USBPHY1 158
+#define VF610_CLK_LVDS1_IN 159
+#define VF610_CLK_ANACLK1 160
+#define VF610_CLK_PLL1_BYPASS_SRC 161
+#define VF610_CLK_PLL2_BYPASS_SRC 162
+#define VF610_CLK_PLL3_BYPASS_SRC 163
+#define VF610_CLK_PLL4_BYPASS_SRC 164
+#define VF610_CLK_PLL5_BYPASS_SRC 165
+#define VF610_CLK_PLL6_BYPASS_SRC 166
+#define VF610_CLK_PLL7_BYPASS_SRC 167
+#define VF610_CLK_PLL1 168
+#define VF610_CLK_PLL2 169
+#define VF610_CLK_PLL3 170
+#define VF610_CLK_PLL4 171
+#define VF610_CLK_PLL5 172
+#define VF610_CLK_PLL6 173
+#define VF610_CLK_PLL7 174
+#define VF610_PLL1_BYPASS 175
+#define VF610_PLL2_BYPASS 176
+#define VF610_PLL3_BYPASS 177
+#define VF610_PLL4_BYPASS 178
+#define VF610_PLL5_BYPASS 179
+#define VF610_PLL6_BYPASS 180
+#define VF610_PLL7_BYPASS 181
+#define VF610_CLK_END 182
#endif /* __DT_BINDINGS_CLOCK_VF610_H */
diff --git a/include/dt-bindings/dma/at91.h b/include/dt-bindings/dma/at91.h
index e835037a77b4..ab6cbba45401 100644
--- a/include/dt-bindings/dma/at91.h
+++ b/include/dt-bindings/dma/at91.h
@@ -9,6 +9,8 @@
#ifndef __DT_BINDINGS_AT91_DMA_H__
#define __DT_BINDINGS_AT91_DMA_H__
+/* ---------- HDMAC ---------- */
+
/*
* Source and/or destination peripheral ID
*/
@@ -24,4 +26,27 @@
#define AT91_DMA_CFG_FIFOCFG_ALAP (0x1 << AT91_DMA_CFG_FIFOCFG_OFFSET) /* largest defined AHB burst */
#define AT91_DMA_CFG_FIFOCFG_ASAP (0x2 << AT91_DMA_CFG_FIFOCFG_OFFSET) /* single AHB access */
+
+/* ---------- XDMAC ---------- */
+#define AT91_XDMAC_DT_MEM_IF_MASK (0x1)
+#define AT91_XDMAC_DT_MEM_IF_OFFSET (13)
+#define AT91_XDMAC_DT_MEM_IF(mem_if) (((mem_if) & AT91_XDMAC_DT_MEM_IF_MASK) \
+ << AT91_XDMAC_DT_MEM_IF_OFFSET)
+#define AT91_XDMAC_DT_GET_MEM_IF(cfg) (((cfg) >> AT91_XDMAC_DT_MEM_IF_OFFSET) \
+ & AT91_XDMAC_DT_MEM_IF_MASK)
+
+#define AT91_XDMAC_DT_PER_IF_MASK (0x1)
+#define AT91_XDMAC_DT_PER_IF_OFFSET (14)
+#define AT91_XDMAC_DT_PER_IF(per_if) (((per_if) & AT91_XDMAC_DT_PER_IF_MASK) \
+ << AT91_XDMAC_DT_PER_IF_OFFSET)
+#define AT91_XDMAC_DT_GET_PER_IF(cfg) (((cfg) >> AT91_XDMAC_DT_PER_IF_OFFSET) \
+ & AT91_XDMAC_DT_PER_IF_MASK)
+
+#define AT91_XDMAC_DT_PERID_MASK (0x7f)
+#define AT91_XDMAC_DT_PERID_OFFSET (24)
+#define AT91_XDMAC_DT_PERID(perid) (((perid) & AT91_XDMAC_DT_PERID_MASK) \
+ << AT91_XDMAC_DT_PERID_OFFSET)
+#define AT91_XDMAC_DT_GET_PERID(cfg) (((cfg) >> AT91_XDMAC_DT_PERID_OFFSET) \
+ & AT91_XDMAC_DT_PERID_MASK)
+
#endif /* __DT_BINDINGS_AT91_DMA_H__ */
diff --git a/include/dt-bindings/interrupt-controller/mips-gic.h b/include/dt-bindings/interrupt-controller/mips-gic.h
new file mode 100644
index 000000000000..cf35a577e371
--- /dev/null
+++ b/include/dt-bindings/interrupt-controller/mips-gic.h
@@ -0,0 +1,9 @@
+#ifndef _DT_BINDINGS_INTERRUPT_CONTROLLER_MIPS_GIC_H
+#define _DT_BINDINGS_INTERRUPT_CONTROLLER_MIPS_GIC_H
+
+#include <dt-bindings/interrupt-controller/irq.h>
+
+#define GIC_SHARED 0
+#define GIC_LOCAL 1
+
+#endif
diff --git a/include/dt-bindings/memory/tegra114-mc.h b/include/dt-bindings/memory/tegra114-mc.h
new file mode 100644
index 000000000000..8f48985a3139
--- /dev/null
+++ b/include/dt-bindings/memory/tegra114-mc.h
@@ -0,0 +1,25 @@
+#ifndef DT_BINDINGS_MEMORY_TEGRA114_MC_H
+#define DT_BINDINGS_MEMORY_TEGRA114_MC_H
+
+#define TEGRA_SWGROUP_PTC 0
+#define TEGRA_SWGROUP_DC 1
+#define TEGRA_SWGROUP_DCB 2
+#define TEGRA_SWGROUP_EPP 3
+#define TEGRA_SWGROUP_G2 4
+#define TEGRA_SWGROUP_AVPC 5
+#define TEGRA_SWGROUP_NV 6
+#define TEGRA_SWGROUP_HDA 7
+#define TEGRA_SWGROUP_HC 8
+#define TEGRA_SWGROUP_MSENC 9
+#define TEGRA_SWGROUP_PPCS 10
+#define TEGRA_SWGROUP_VDE 11
+#define TEGRA_SWGROUP_MPCORELP 12
+#define TEGRA_SWGROUP_MPCORE 13
+#define TEGRA_SWGROUP_VI 14
+#define TEGRA_SWGROUP_ISP 15
+#define TEGRA_SWGROUP_XUSB_HOST 16
+#define TEGRA_SWGROUP_XUSB_DEV 17
+#define TEGRA_SWGROUP_EMUCIF 18
+#define TEGRA_SWGROUP_TSEC 19
+
+#endif
diff --git a/include/dt-bindings/memory/tegra124-mc.h b/include/dt-bindings/memory/tegra124-mc.h
new file mode 100644
index 000000000000..7d8ee798f34e
--- /dev/null
+++ b/include/dt-bindings/memory/tegra124-mc.h
@@ -0,0 +1,31 @@
+#ifndef DT_BINDINGS_MEMORY_TEGRA124_MC_H
+#define DT_BINDINGS_MEMORY_TEGRA124_MC_H
+
+#define TEGRA_SWGROUP_PTC 0
+#define TEGRA_SWGROUP_DC 1
+#define TEGRA_SWGROUP_DCB 2
+#define TEGRA_SWGROUP_AFI 3
+#define TEGRA_SWGROUP_AVPC 4
+#define TEGRA_SWGROUP_HDA 5
+#define TEGRA_SWGROUP_HC 6
+#define TEGRA_SWGROUP_MSENC 7
+#define TEGRA_SWGROUP_PPCS 8
+#define TEGRA_SWGROUP_SATA 9
+#define TEGRA_SWGROUP_VDE 10
+#define TEGRA_SWGROUP_MPCORELP 11
+#define TEGRA_SWGROUP_MPCORE 12
+#define TEGRA_SWGROUP_ISP2 13
+#define TEGRA_SWGROUP_XUSB_HOST 14
+#define TEGRA_SWGROUP_XUSB_DEV 15
+#define TEGRA_SWGROUP_ISP2B 16
+#define TEGRA_SWGROUP_TSEC 17
+#define TEGRA_SWGROUP_A9AVP 18
+#define TEGRA_SWGROUP_GPU 19
+#define TEGRA_SWGROUP_SDMMC1A 20
+#define TEGRA_SWGROUP_SDMMC2A 21
+#define TEGRA_SWGROUP_SDMMC3A 22
+#define TEGRA_SWGROUP_SDMMC4A 23
+#define TEGRA_SWGROUP_VIC 24
+#define TEGRA_SWGROUP_VI 25
+
+#endif
diff --git a/include/dt-bindings/memory/tegra30-mc.h b/include/dt-bindings/memory/tegra30-mc.h
new file mode 100644
index 000000000000..502beb03d777
--- /dev/null
+++ b/include/dt-bindings/memory/tegra30-mc.h
@@ -0,0 +1,24 @@
+#ifndef DT_BINDINGS_MEMORY_TEGRA30_MC_H
+#define DT_BINDINGS_MEMORY_TEGRA30_MC_H
+
+#define TEGRA_SWGROUP_PTC 0
+#define TEGRA_SWGROUP_DC 1
+#define TEGRA_SWGROUP_DCB 2
+#define TEGRA_SWGROUP_EPP 3
+#define TEGRA_SWGROUP_G2 4
+#define TEGRA_SWGROUP_MPE 5
+#define TEGRA_SWGROUP_VI 6
+#define TEGRA_SWGROUP_AFI 7
+#define TEGRA_SWGROUP_AVPC 8
+#define TEGRA_SWGROUP_NV 9
+#define TEGRA_SWGROUP_NV2 10
+#define TEGRA_SWGROUP_HDA 11
+#define TEGRA_SWGROUP_HC 12
+#define TEGRA_SWGROUP_PPCS 13
+#define TEGRA_SWGROUP_SATA 14
+#define TEGRA_SWGROUP_VDE 15
+#define TEGRA_SWGROUP_MPCORELP 16
+#define TEGRA_SWGROUP_MPCORE 17
+#define TEGRA_SWGROUP_ISP 18
+
+#endif
diff --git a/include/dt-bindings/phy/phy.h b/include/dt-bindings/phy/phy.h
new file mode 100644
index 000000000000..6c901930eb3e
--- /dev/null
+++ b/include/dt-bindings/phy/phy.h
@@ -0,0 +1,19 @@
+/*
+ *
+ * This header provides constants for the phy framework
+ *
+ * Copyright (C) 2014 STMicroelectronics
+ * Author: Gabriel Fernandez <gabriel.fernandez@st.com>
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#ifndef _DT_BINDINGS_PHY
+#define _DT_BINDINGS_PHY
+
+#define PHY_NONE 0
+#define PHY_TYPE_SATA 1
+#define PHY_TYPE_PCIE 2
+#define PHY_TYPE_USB2 3
+#define PHY_TYPE_USB3 4
+
+#endif /* _DT_BINDINGS_PHY */
diff --git a/include/dt-bindings/pinctrl/at91.h b/include/dt-bindings/pinctrl/at91.h
index 0fee6ff77ffc..bbca3d038900 100644
--- a/include/dt-bindings/pinctrl/at91.h
+++ b/include/dt-bindings/pinctrl/at91.h
@@ -20,6 +20,11 @@
#define AT91_PINCTRL_PULL_UP_DEGLITCH (AT91_PINCTRL_PULL_UP | AT91_PINCTRL_DEGLITCH)
+#define AT91_PINCTRL_DRIVE_STRENGTH_DEFAULT (0x0 << 5)
+#define AT91_PINCTRL_DRIVE_STRENGTH_LOW (0x1 << 5)
+#define AT91_PINCTRL_DRIVE_STRENGTH_MED (0x2 << 5)
+#define AT91_PINCTRL_DRIVE_STRENGTH_HI (0x3 << 5)
+
#define AT91_PIOA 0
#define AT91_PIOB 1
#define AT91_PIOC 2
diff --git a/include/dt-bindings/pinctrl/dra.h b/include/dt-bindings/pinctrl/dra.h
index 3d33794e4f3e..7448edff4723 100644
--- a/include/dt-bindings/pinctrl/dra.h
+++ b/include/dt-bindings/pinctrl/dra.h
@@ -40,8 +40,8 @@
/* Active pin states */
#define PIN_OUTPUT (0 | PULL_DIS)
-#define PIN_OUTPUT_PULLUP (PIN_OUTPUT | PULL_ENA | PULL_UP)
-#define PIN_OUTPUT_PULLDOWN (PIN_OUTPUT | PULL_ENA)
+#define PIN_OUTPUT_PULLUP (PULL_UP)
+#define PIN_OUTPUT_PULLDOWN (0)
#define PIN_INPUT (INPUT_EN | PULL_DIS)
#define PIN_INPUT_SLEW (INPUT_EN | SLEWCONTROL)
#define PIN_INPUT_PULLUP (PULL_ENA | INPUT_EN | PULL_UP)
diff --git a/include/dt-bindings/pinctrl/qcom,pmic-gpio.h b/include/dt-bindings/pinctrl/qcom,pmic-gpio.h
new file mode 100644
index 000000000000..fa74d7cc960c
--- /dev/null
+++ b/include/dt-bindings/pinctrl/qcom,pmic-gpio.h
@@ -0,0 +1,142 @@
+/*
+ * This header provides constants for the Qualcomm PMIC GPIO binding.
+ */
+
+#ifndef _DT_BINDINGS_PINCTRL_QCOM_PMIC_GPIO_H
+#define _DT_BINDINGS_PINCTRL_QCOM_PMIC_GPIO_H
+
+#define PMIC_GPIO_PULL_UP_30 0
+#define PMIC_GPIO_PULL_UP_1P5 1
+#define PMIC_GPIO_PULL_UP_31P5 2
+#define PMIC_GPIO_PULL_UP_1P5_30 3
+
+#define PMIC_GPIO_STRENGTH_NO 0
+#define PMIC_GPIO_STRENGTH_HIGH 1
+#define PMIC_GPIO_STRENGTH_MED 2
+#define PMIC_GPIO_STRENGTH_LOW 3
+
+/*
+ * Note: PM8018 GPIO3 and GPIO4 are supporting
+ * only S3 and L2 options (1.8V)
+ */
+#define PM8018_GPIO_L6 0
+#define PM8018_GPIO_L5 1
+#define PM8018_GPIO_S3 2
+#define PM8018_GPIO_L14 3
+#define PM8018_GPIO_L2 4
+#define PM8018_GPIO_L4 5
+#define PM8018_GPIO_VDD 6
+
+/*
+ * Note: PM8038 GPIO7 and GPIO8 are supporting
+ * only L11 and L4 options (1.8V)
+ */
+#define PM8038_GPIO_VPH 0
+#define PM8038_GPIO_BB 1
+#define PM8038_GPIO_L11 2
+#define PM8038_GPIO_L15 3
+#define PM8038_GPIO_L4 4
+#define PM8038_GPIO_L3 5
+#define PM8038_GPIO_L17 6
+
+#define PM8058_GPIO_VPH 0
+#define PM8058_GPIO_BB 1
+#define PM8058_GPIO_S3 2
+#define PM8058_GPIO_L3 3
+#define PM8058_GPIO_L7 4
+#define PM8058_GPIO_L6 5
+#define PM8058_GPIO_L5 6
+#define PM8058_GPIO_L2 7
+
+#define PM8917_GPIO_VPH 0
+#define PM8917_GPIO_S4 2
+#define PM8917_GPIO_L15 3
+#define PM8917_GPIO_L4 4
+#define PM8917_GPIO_L3 5
+#define PM8917_GPIO_L17 6
+
+#define PM8921_GPIO_VPH 0
+#define PM8921_GPIO_BB 1
+#define PM8921_GPIO_S4 2
+#define PM8921_GPIO_L15 3
+#define PM8921_GPIO_L4 4
+#define PM8921_GPIO_L3 5
+#define PM8921_GPIO_L17 6
+
+/*
+ * Note: PM8941 gpios from 15 to 18 are supporting
+ * only S3 and L6 options (1.8V)
+ */
+#define PM8941_GPIO_VPH 0
+#define PM8941_GPIO_L1 1
+#define PM8941_GPIO_S3 2
+#define PM8941_GPIO_L6 3
+
+/*
+ * Note: PMA8084 gpios from 15 to 18 are supporting
+ * only S4 and L6 options (1.8V)
+ */
+#define PMA8084_GPIO_VPH 0
+#define PMA8084_GPIO_L1 1
+#define PMA8084_GPIO_S4 2
+#define PMA8084_GPIO_L6 3
+
+/* To be used with "function" */
+#define PMIC_GPIO_FUNC_NORMAL "normal"
+#define PMIC_GPIO_FUNC_PAIRED "paired"
+#define PMIC_GPIO_FUNC_FUNC1 "func1"
+#define PMIC_GPIO_FUNC_FUNC2 "func2"
+#define PMIC_GPIO_FUNC_DTEST1 "dtest1"
+#define PMIC_GPIO_FUNC_DTEST2 "dtest2"
+#define PMIC_GPIO_FUNC_DTEST3 "dtest3"
+#define PMIC_GPIO_FUNC_DTEST4 "dtest4"
+
+#define PM8038_GPIO1_2_LPG_DRV PMIC_GPIO_FUNC_FUNC1
+#define PM8038_GPIO3_5V_BOOST_EN PMIC_GPIO_FUNC_FUNC1
+#define PM8038_GPIO4_SSBI_ALT_CLK PMIC_GPIO_FUNC_FUNC1
+#define PM8038_GPIO5_6_EXT_REG_EN PMIC_GPIO_FUNC_FUNC1
+#define PM8038_GPIO10_11_EXT_REG_EN PMIC_GPIO_FUNC_FUNC1
+#define PM8038_GPIO6_7_CLK PMIC_GPIO_FUNC_FUNC1
+#define PM8038_GPIO9_BAT_ALRM_OUT PMIC_GPIO_FUNC_FUNC1
+#define PM8038_GPIO6_12_KYPD_DRV PMIC_GPIO_FUNC_FUNC2
+
+#define PM8058_GPIO7_8_MP3_CLK PMIC_GPIO_FUNC_FUNC1
+#define PM8058_GPIO7_8_BCLK_19P2MHZ PMIC_GPIO_FUNC_FUNC2
+#define PM8058_GPIO9_26_KYPD_DRV PMIC_GPIO_FUNC_FUNC1
+#define PM8058_GPIO21_23_UART_TX PMIC_GPIO_FUNC_FUNC2
+#define PM8058_GPIO24_26_LPG_DRV PMIC_GPIO_FUNC_FUNC2
+#define PM8058_GPIO33_BCLK_19P2MHZ PMIC_GPIO_FUNC_FUNC1
+#define PM8058_GPIO34_35_MP3_CLK PMIC_GPIO_FUNC_FUNC1
+#define PM8058_GPIO36_BCLK_19P2MHZ PMIC_GPIO_FUNC_FUNC1
+#define PM8058_GPIO37_UPL_OUT PMIC_GPIO_FUNC_FUNC1
+#define PM8058_GPIO37_UART_M_RX PMIC_GPIO_FUNC_FUNC2
+#define PM8058_GPIO38_XO_SLEEP_CLK PMIC_GPIO_FUNC_FUNC1
+#define PM8058_GPIO38_39_CLK_32KHZ PMIC_GPIO_FUNC_FUNC2
+#define PM8058_GPIO39_MP3_CLK PMIC_GPIO_FUNC_FUNC1
+#define PM8058_GPIO40_EXT_BB_EN PMIC_GPIO_FUNC_FUNC1
+
+#define PM8917_GPIO9_18_KEYP_DRV PMIC_GPIO_FUNC_FUNC1
+#define PM8917_GPIO20_BAT_ALRM_OUT PMIC_GPIO_FUNC_FUNC1
+#define PM8917_GPIO21_23_UART_TX PMIC_GPIO_FUNC_FUNC2
+#define PM8917_GPIO25_26_EXT_REG_EN PMIC_GPIO_FUNC_FUNC1
+#define PM8917_GPIO37_38_XO_SLEEP_CLK PMIC_GPIO_FUNC_FUNC1
+#define PM8917_GPIO37_38_MP3_CLK PMIC_GPIO_FUNC_FUNC2
+
+#define PM8941_GPIO9_14_KYPD_DRV PMIC_GPIO_FUNC_FUNC1
+#define PM8941_GPIO15_18_DIV_CLK PMIC_GPIO_FUNC_FUNC1
+#define PM8941_GPIO15_18_SLEEP_CLK PMIC_GPIO_FUNC_FUNC2
+#define PM8941_GPIO23_26_KYPD_DRV PMIC_GPIO_FUNC_FUNC1
+#define PM8941_GPIO23_26_LPG_DRV_HI PMIC_GPIO_FUNC_FUNC2
+#define PM8941_GPIO31_BAT_ALRM_OUT PMIC_GPIO_FUNC_FUNC1
+#define PM8941_GPIO33_36_LPG_DRV_3D PMIC_GPIO_FUNC_FUNC1
+#define PM8941_GPIO33_36_LPG_DRV_HI PMIC_GPIO_FUNC_FUNC2
+
+#define PMA8084_GPIO4_5_LPG_DRV PMIC_GPIO_FUNC_FUNC1
+#define PMA8084_GPIO7_10_LPG_DRV PMIC_GPIO_FUNC_FUNC1
+#define PMA8084_GPIO5_14_KEYP_DRV PMIC_GPIO_FUNC_FUNC2
+#define PMA8084_GPIO19_21_KEYP_DRV PMIC_GPIO_FUNC_FUNC2
+#define PMA8084_GPIO15_18_DIV_CLK PMIC_GPIO_FUNC_FUNC1
+#define PMA8084_GPIO15_18_SLEEP_CLK PMIC_GPIO_FUNC_FUNC2
+#define PMA8084_GPIO22_BAT_ALRM_OUT PMIC_GPIO_FUNC_FUNC1
+
+#endif
diff --git a/include/dt-bindings/pinctrl/qcom,pmic-mpp.h b/include/dt-bindings/pinctrl/qcom,pmic-mpp.h
new file mode 100644
index 000000000000..d2c7dabe3223
--- /dev/null
+++ b/include/dt-bindings/pinctrl/qcom,pmic-mpp.h
@@ -0,0 +1,44 @@
+/*
+ * This header provides constants for the Qualcomm PMIC's
+ * Multi-Purpose Pin binding.
+ */
+
+#ifndef _DT_BINDINGS_PINCTRL_QCOM_PMIC_MPP_H
+#define _DT_BINDINGS_PINCTRL_QCOM_PMIC_MPP_H
+
+/* power-source */
+#define PM8841_MPP_VPH 0
+#define PM8841_MPP_S3 2
+
+#define PM8941_MPP_VPH 0
+#define PM8941_MPP_L1 1
+#define PM8941_MPP_S3 2
+#define PM8941_MPP_L6 3
+
+#define PMA8084_MPP_VPH 0
+#define PMA8084_MPP_L1 1
+#define PMA8084_MPP_S4 2
+#define PMA8084_MPP_L6 3
+
+/*
+ * Analog Input - Set the source for analog input.
+ * To be used with "qcom,amux-route" property
+ */
+#define PMIC_MPP_AMUX_ROUTE_CH5 0
+#define PMIC_MPP_AMUX_ROUTE_CH6 1
+#define PMIC_MPP_AMUX_ROUTE_CH7 2
+#define PMIC_MPP_AMUX_ROUTE_CH8 3
+#define PMIC_MPP_AMUX_ROUTE_ABUS1 4
+#define PMIC_MPP_AMUX_ROUTE_ABUS2 5
+#define PMIC_MPP_AMUX_ROUTE_ABUS3 6
+#define PMIC_MPP_AMUX_ROUTE_ABUS4 7
+
+/* To be used with "function" */
+#define PMIC_MPP_FUNC_NORMAL "normal"
+#define PMIC_MPP_FUNC_PAIRED "paired"
+#define PMIC_MPP_FUNC_DTEST1 "dtest1"
+#define PMIC_MPP_FUNC_DTEST2 "dtest2"
+#define PMIC_MPP_FUNC_DTEST3 "dtest3"
+#define PMIC_MPP_FUNC_DTEST4 "dtest4"
+
+#endif
diff --git a/include/dt-bindings/pinctrl/rockchip.h b/include/dt-bindings/pinctrl/rockchip.h
index cd5788be82ce..743e66a95e13 100644
--- a/include/dt-bindings/pinctrl/rockchip.h
+++ b/include/dt-bindings/pinctrl/rockchip.h
@@ -28,5 +28,7 @@
#define RK_FUNC_GPIO 0
#define RK_FUNC_1 1
#define RK_FUNC_2 2
+#define RK_FUNC_3 3
+#define RK_FUNC_4 4
#endif
diff --git a/include/dt-bindings/regulator/maxim,max77802.h b/include/dt-bindings/regulator/maxim,max77802.h
new file mode 100644
index 000000000000..cf28631d7109
--- /dev/null
+++ b/include/dt-bindings/regulator/maxim,max77802.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright (C) 2014 Google, Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Device Tree binding constants for the Maxim 77802 PMIC regulators
+ */
+
+#ifndef _DT_BINDINGS_REGULATOR_MAXIM_MAX77802_H
+#define _DT_BINDINGS_REGULATOR_MAXIM_MAX77802_H
+
+/* Regulator operating modes */
+#define MAX77802_OPMODE_LP 1
+#define MAX77802_OPMODE_NORMAL 3
+
+#endif /* _DT_BINDINGS_REGULATOR_MAXIM_MAX77802_H */
diff --git a/include/dt-bindings/reset-controller/stih407-resets.h b/include/dt-bindings/reset-controller/stih407-resets.h
new file mode 100644
index 000000000000..02d4328fe479
--- /dev/null
+++ b/include/dt-bindings/reset-controller/stih407-resets.h
@@ -0,0 +1,61 @@
+/*
+ * This header provides constants for the reset controller
+ * based peripheral powerdown requests on the STMicroelectronics
+ * STiH407 SoC.
+ */
+#ifndef _DT_BINDINGS_RESET_CONTROLLER_STIH407
+#define _DT_BINDINGS_RESET_CONTROLLER_STIH407
+
+/* Powerdown requests control 0 */
+#define STIH407_EMISS_POWERDOWN 0
+#define STIH407_NAND_POWERDOWN 1
+
+/* Synp GMAC PowerDown */
+#define STIH407_ETH1_POWERDOWN 2
+
+/* Powerdown requests control 1 */
+#define STIH407_USB3_POWERDOWN 3
+#define STIH407_USB2_PORT1_POWERDOWN 4
+#define STIH407_USB2_PORT0_POWERDOWN 5
+#define STIH407_PCIE1_POWERDOWN 6
+#define STIH407_PCIE0_POWERDOWN 7
+#define STIH407_SATA1_POWERDOWN 8
+#define STIH407_SATA0_POWERDOWN 9
+
+/* Reset defines */
+#define STIH407_ETH1_SOFTRESET 0
+#define STIH407_MMC1_SOFTRESET 1
+#define STIH407_PICOPHY_SOFTRESET 2
+#define STIH407_IRB_SOFTRESET 3
+#define STIH407_PCIE0_SOFTRESET 4
+#define STIH407_PCIE1_SOFTRESET 5
+#define STIH407_SATA0_SOFTRESET 6
+#define STIH407_SATA1_SOFTRESET 7
+#define STIH407_MIPHY0_SOFTRESET 8
+#define STIH407_MIPHY1_SOFTRESET 9
+#define STIH407_MIPHY2_SOFTRESET 10
+#define STIH407_SATA0_PWR_SOFTRESET 11
+#define STIH407_SATA1_PWR_SOFTRESET 12
+#define STIH407_DELTA_SOFTRESET 13
+#define STIH407_BLITTER_SOFTRESET 14
+#define STIH407_HDTVOUT_SOFTRESET 15
+#define STIH407_HDQVDP_SOFTRESET 16
+#define STIH407_VDP_AUX_SOFTRESET 17
+#define STIH407_COMPO_SOFTRESET 18
+#define STIH407_HDMI_TX_PHY_SOFTRESET 19
+#define STIH407_JPEG_DEC_SOFTRESET 20
+#define STIH407_VP8_DEC_SOFTRESET 21
+#define STIH407_GPU_SOFTRESET 22
+#define STIH407_HVA_SOFTRESET 23
+#define STIH407_ERAM_HVA_SOFTRESET 24
+#define STIH407_LPM_SOFTRESET 25
+#define STIH407_KEYSCAN_SOFTRESET 26
+#define STIH407_USB2_PORT0_SOFTRESET 27
+#define STIH407_USB2_PORT1_SOFTRESET 28
+
+/* Picophy reset defines */
+#define STIH407_PICOPHY0_RESET 0
+#define STIH407_PICOPHY1_RESET 1
+#define STIH407_PICOPHY2_RESET 2
+
+#endif /* _DT_BINDINGS_RESET_CONTROLLER_STIH407 */
diff --git a/include/dt-bindings/sound/cs35l32.h b/include/dt-bindings/sound/cs35l32.h
new file mode 100644
index 000000000000..0c6d6a3c15a2
--- /dev/null
+++ b/include/dt-bindings/sound/cs35l32.h
@@ -0,0 +1,26 @@
+#ifndef __DT_CS35L32_H
+#define __DT_CS35L32_H
+
+#define CS35L32_BOOST_MGR_AUTO 0
+#define CS35L32_BOOST_MGR_AUTO_AUDIO 1
+#define CS35L32_BOOST_MGR_BYPASS 2
+#define CS35L32_BOOST_MGR_FIXED 3
+
+#define CS35L32_DATA_CFG_LR_VP 0
+#define CS35L32_DATA_CFG_LR_STAT 1
+#define CS35L32_DATA_CFG_LR 2
+#define CS35L32_DATA_CFG_LR_VPSTAT 3
+
+#define CS35L32_BATT_THRESH_3_1V 0
+#define CS35L32_BATT_THRESH_3_2V 1
+#define CS35L32_BATT_THRESH_3_3V 2
+#define CS35L32_BATT_THRESH_3_4V 3
+
+#define CS35L32_BATT_RECOV_3_1V 0
+#define CS35L32_BATT_RECOV_3_2V 1
+#define CS35L32_BATT_RECOV_3_3V 2
+#define CS35L32_BATT_RECOV_3_4V 3
+#define CS35L32_BATT_RECOV_3_5V 4
+#define CS35L32_BATT_RECOV_3_6V 5
+
+#endif /* __DT_CS35L32_H */
diff --git a/include/dt-bindings/thermal/tegra124-soctherm.h b/include/dt-bindings/thermal/tegra124-soctherm.h
new file mode 100644
index 000000000000..85aaf66690f9
--- /dev/null
+++ b/include/dt-bindings/thermal/tegra124-soctherm.h
@@ -0,0 +1,13 @@
+/*
+ * This header provides constants for binding nvidia,tegra124-soctherm.
+ */
+
+#ifndef _DT_BINDINGS_THERMAL_TEGRA124_SOCTHERM_H
+#define _DT_BINDINGS_THERMAL_TEGRA124_SOCTHERM_H
+
+#define TEGRA124_SOCTHERM_SENSOR_CPU 0
+#define TEGRA124_SOCTHERM_SENSOR_MEM 1
+#define TEGRA124_SOCTHERM_SENSOR_GPU 2
+#define TEGRA124_SOCTHERM_SENSOR_PLLX 3
+
+#endif
diff --git a/include/dt-bindings/thermal/thermal.h b/include/dt-bindings/thermal/thermal.h
index 59822a995858..b5e6b0069ac7 100644
--- a/include/dt-bindings/thermal/thermal.h
+++ b/include/dt-bindings/thermal/thermal.h
@@ -11,7 +11,7 @@
#define _DT_BINDINGS_THERMAL_THERMAL_H
/* On cooling devices upper and lower limits */
-#define THERMAL_NO_LIMIT (-1UL)
+#define THERMAL_NO_LIMIT (~0)
#endif
diff --git a/include/keys/asymmetric-type.h b/include/keys/asymmetric-type.h
index 7dd473496180..c0754abb2f56 100644
--- a/include/keys/asymmetric-type.h
+++ b/include/keys/asymmetric-type.h
@@ -19,6 +19,47 @@
extern struct key_type key_type_asymmetric;
/*
+ * Identifiers for an asymmetric key ID. We have three ways of looking up a
+ * key derived from an X.509 certificate:
+ *
+ * (1) Serial Number & Issuer. Non-optional. This is the only valid way to
+ * map a PKCS#7 signature to an X.509 certificate.
+ *
+ * (2) Issuer & Subject Unique IDs. Optional. These were the original way to
+ * match X.509 certificates, but have fallen into disuse in favour of (3).
+ *
+ * (3) Auth & Subject Key Identifiers. Optional. SKIDs are only provided on
+ * CA keys that are intended to sign other keys, so don't appear in end
+ * user certificates unless forced.
+ *
+ * We could also support an PGP key identifier, which is just a SHA1 sum of the
+ * public key and certain parameters, but since we don't support PGP keys at
+ * the moment, we shall ignore those.
+ *
+ * What we actually do is provide a place where binary identifiers can be
+ * stashed and then compare against them when checking for an id match.
+ */
+struct asymmetric_key_id {
+ unsigned short len;
+ unsigned char data[];
+};
+
+struct asymmetric_key_ids {
+ void *id[2];
+};
+
+extern bool asymmetric_key_id_same(const struct asymmetric_key_id *kid1,
+ const struct asymmetric_key_id *kid2);
+
+extern bool asymmetric_key_id_partial(const struct asymmetric_key_id *kid1,
+ const struct asymmetric_key_id *kid2);
+
+extern struct asymmetric_key_id *asymmetric_key_generate_id(const void *val_1,
+ size_t len_1,
+ const void *val_2,
+ size_t len_2);
+
+/*
* The payload is at the discretion of the subtype.
*/
diff --git a/include/keys/user-type.h b/include/keys/user-type.h
index 3ab1873a4bfa..cebefb069c44 100644
--- a/include/keys/user-type.h
+++ b/include/keys/user-type.h
@@ -40,7 +40,6 @@ struct key_preparsed_payload;
extern int user_preparse(struct key_preparsed_payload *prep);
extern void user_free_preparse(struct key_preparsed_payload *prep);
extern int user_update(struct key *key, struct key_preparsed_payload *prep);
-extern int user_match(const struct key *key, const void *criterion);
extern void user_revoke(struct key *key);
extern void user_destroy(struct key *key);
extern void user_describe(const struct key *user, struct seq_file *m);
diff --git a/include/kvm/arm_arch_timer.h b/include/kvm/arm_arch_timer.h
index ad9db6045b2f..b3f45a578344 100644
--- a/include/kvm/arm_arch_timer.h
+++ b/include/kvm/arm_arch_timer.h
@@ -60,7 +60,8 @@ struct arch_timer_cpu {
#ifdef CONFIG_KVM_ARM_TIMER
int kvm_timer_hyp_init(void);
-int kvm_timer_init(struct kvm *kvm);
+void kvm_timer_enable(struct kvm *kvm);
+void kvm_timer_init(struct kvm *kvm);
void kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu,
const struct kvm_irq_level *irq);
void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu);
@@ -77,11 +78,8 @@ static inline int kvm_timer_hyp_init(void)
return 0;
};
-static inline int kvm_timer_init(struct kvm *kvm)
-{
- return 0;
-}
-
+static inline void kvm_timer_enable(struct kvm *kvm) {}
+static inline void kvm_timer_init(struct kvm *kvm) {}
static inline void kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu,
const struct kvm_irq_level *irq) {}
static inline void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu) {}
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index 35b0c121bb65..ac4888dc86bc 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -25,26 +25,25 @@
#include <linux/spinlock.h>
#include <linux/types.h>
-#define VGIC_NR_IRQS 256
+#define VGIC_NR_IRQS_LEGACY 256
#define VGIC_NR_SGIS 16
#define VGIC_NR_PPIS 16
#define VGIC_NR_PRIVATE_IRQS (VGIC_NR_SGIS + VGIC_NR_PPIS)
-#define VGIC_NR_SHARED_IRQS (VGIC_NR_IRQS - VGIC_NR_PRIVATE_IRQS)
-#define VGIC_MAX_CPUS KVM_MAX_VCPUS
#define VGIC_V2_MAX_LRS (1 << 6)
#define VGIC_V3_MAX_LRS 16
+#define VGIC_MAX_IRQS 1024
/* Sanity checks... */
-#if (VGIC_MAX_CPUS > 8)
+#if (KVM_MAX_VCPUS > 8)
#error Invalid number of CPU interfaces
#endif
-#if (VGIC_NR_IRQS & 31)
+#if (VGIC_NR_IRQS_LEGACY & 31)
#error "VGIC_NR_IRQS must be a multiple of 32"
#endif
-#if (VGIC_NR_IRQS > 1024)
+#if (VGIC_NR_IRQS_LEGACY > VGIC_MAX_IRQS)
#error "VGIC_NR_IRQS must be <= 1024"
#endif
@@ -54,19 +53,33 @@
* - a bunch of shared interrupts (SPI)
*/
struct vgic_bitmap {
- union {
- u32 reg[VGIC_NR_PRIVATE_IRQS / 32];
- DECLARE_BITMAP(reg_ul, VGIC_NR_PRIVATE_IRQS);
- } percpu[VGIC_MAX_CPUS];
- union {
- u32 reg[VGIC_NR_SHARED_IRQS / 32];
- DECLARE_BITMAP(reg_ul, VGIC_NR_SHARED_IRQS);
- } shared;
+ /*
+ * - One UL per VCPU for private interrupts (assumes UL is at
+ * least 32 bits)
+ * - As many UL as necessary for shared interrupts.
+ *
+ * The private interrupts are accessed via the "private"
+ * field, one UL per vcpu (the state for vcpu n is in
+ * private[n]). The shared interrupts are accessed via the
+ * "shared" pointer (IRQn state is at bit n-32 in the bitmap).
+ */
+ unsigned long *private;
+ unsigned long *shared;
};
struct vgic_bytemap {
- u32 percpu[VGIC_MAX_CPUS][VGIC_NR_PRIVATE_IRQS / 4];
- u32 shared[VGIC_NR_SHARED_IRQS / 4];
+ /*
+ * - 8 u32 per VCPU for private interrupts
+ * - As many u32 as necessary for shared interrupts.
+ *
+ * The private interrupts are accessed via the "private"
+ * field, (the state for vcpu n is in private[n*8] to
+ * private[n*8 + 7]). The shared interrupts are accessed via
+ * the "shared" pointer (IRQn state is at byte (n-32)%4 of the
+ * shared[(n-32)/4] word).
+ */
+ u32 *private;
+ u32 *shared;
};
struct kvm_vcpu;
@@ -127,6 +140,9 @@ struct vgic_dist {
bool in_kernel;
bool ready;
+ int nr_cpus;
+ int nr_irqs;
+
/* Virtual control interface mapping */
void __iomem *vctrl_base;
@@ -140,11 +156,25 @@ struct vgic_dist {
/* Interrupt enabled (one bit per IRQ) */
struct vgic_bitmap irq_enabled;
- /* Interrupt 'pin' level */
- struct vgic_bitmap irq_state;
+ /* Level-triggered interrupt external input is asserted */
+ struct vgic_bitmap irq_level;
+
+ /*
+ * Interrupt state is pending on the distributor
+ */
+ struct vgic_bitmap irq_pending;
- /* Level-triggered interrupt in progress */
- struct vgic_bitmap irq_active;
+ /*
+ * Tracks writes to GICD_ISPENDRn and GICD_ICPENDRn for level-triggered
+ * interrupts. Essentially holds the state of the flip-flop in
+ * Figure 4-10 on page 4-101 in ARM IHI 0048B.b.
+ * Once set, it is only cleared for level-triggered interrupts on
+ * guest ACKs (when we queue it) or writes to GICD_ICPENDRn.
+ */
+ struct vgic_bitmap irq_soft_pend;
+
+ /* Level-triggered interrupt queued on VCPU interface */
+ struct vgic_bitmap irq_queued;
/* Interrupt priority. Not used yet. */
struct vgic_bytemap irq_priority;
@@ -152,15 +182,36 @@ struct vgic_dist {
/* Level/edge triggered */
struct vgic_bitmap irq_cfg;
- /* Source CPU per SGI and target CPU */
- u8 irq_sgi_sources[VGIC_MAX_CPUS][VGIC_NR_SGIS];
-
- /* Target CPU for each IRQ */
- u8 irq_spi_cpu[VGIC_NR_SHARED_IRQS];
- struct vgic_bitmap irq_spi_target[VGIC_MAX_CPUS];
+ /*
+ * Source CPU per SGI and target CPU:
+ *
+ * Each byte represent a SGI observable on a VCPU, each bit of
+ * this byte indicating if the corresponding VCPU has
+ * generated this interrupt. This is a GICv2 feature only.
+ *
+ * For VCPUn (n < 8), irq_sgi_sources[n*16] to [n*16 + 15] are
+ * the SGIs observable on VCPUn.
+ */
+ u8 *irq_sgi_sources;
+
+ /*
+ * Target CPU for each SPI:
+ *
+ * Array of available SPI, each byte indicating the target
+ * VCPU for SPI. IRQn (n >=32) is at irq_spi_cpu[n-32].
+ */
+ u8 *irq_spi_cpu;
+
+ /*
+ * Reverse lookup of irq_spi_cpu for faster compute pending:
+ *
+ * Array of bitmaps, one per VCPU, describing if IRQn is
+ * routed to a particular VCPU.
+ */
+ struct vgic_bitmap *irq_spi_target;
/* Bitmap indicating which CPU has something pending */
- unsigned long irq_pending_on_cpu;
+ unsigned long *irq_pending_on_cpu;
#endif
};
@@ -168,8 +219,8 @@ struct vgic_v2_cpu_if {
u32 vgic_hcr;
u32 vgic_vmcr;
u32 vgic_misr; /* Saved only */
- u32 vgic_eisr[2]; /* Saved only */
- u32 vgic_elrsr[2]; /* Saved only */
+ u64 vgic_eisr; /* Saved only */
+ u64 vgic_elrsr; /* Saved only */
u32 vgic_apr;
u32 vgic_lr[VGIC_V2_MAX_LRS];
};
@@ -190,11 +241,11 @@ struct vgic_v3_cpu_if {
struct vgic_cpu {
#ifdef CONFIG_KVM_ARM_VGIC
/* per IRQ to LR mapping */
- u8 vgic_irq_lr_map[VGIC_NR_IRQS];
+ u8 *vgic_irq_lr_map;
/* Pending interrupts on this VCPU */
DECLARE_BITMAP( pending_percpu, VGIC_NR_PRIVATE_IRQS);
- DECLARE_BITMAP( pending_shared, VGIC_NR_SHARED_IRQS);
+ unsigned long *pending_shared;
/* Bitmap of used/free list registers */
DECLARE_BITMAP( lr_used, VGIC_V2_MAX_LRS);
@@ -223,9 +274,10 @@ struct kvm_exit_mmio;
#ifdef CONFIG_KVM_ARM_VGIC
int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write);
int kvm_vgic_hyp_init(void);
-int kvm_vgic_init(struct kvm *kvm);
+int kvm_vgic_map_resources(struct kvm *kvm);
int kvm_vgic_create(struct kvm *kvm);
-int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu);
+void kvm_vgic_destroy(struct kvm *kvm);
+void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu);
void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu);
void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu);
int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num,
@@ -235,7 +287,8 @@ bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
struct kvm_exit_mmio *mmio);
#define irqchip_in_kernel(k) (!!((k)->arch.vgic.in_kernel))
-#define vgic_initialized(k) ((k)->arch.vgic.ready)
+#define vgic_initialized(k) (!!((k)->arch.vgic.nr_cpus))
+#define vgic_ready(k) ((k)->arch.vgic.ready)
int vgic_v2_probe(struct device_node *vgic_node,
const struct vgic_ops **ops,
@@ -269,7 +322,7 @@ static inline int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr,
return -ENXIO;
}
-static inline int kvm_vgic_init(struct kvm *kvm)
+static inline int kvm_vgic_map_resources(struct kvm *kvm)
{
return 0;
}
@@ -279,6 +332,14 @@ static inline int kvm_vgic_create(struct kvm *kvm)
return 0;
}
+static inline void kvm_vgic_destroy(struct kvm *kvm)
+{
+}
+
+static inline void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
+{
+}
+
static inline int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
{
return 0;
@@ -313,6 +374,11 @@ static inline bool vgic_initialized(struct kvm *kvm)
{
return true;
}
+
+static inline bool vgic_ready(struct kvm *kvm)
+{
+ return true;
+}
#endif
#endif
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 807cbc46d73e..856d381b1d5b 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -28,6 +28,7 @@
#include <linux/errno.h>
#include <linux/ioport.h> /* for struct resource */
#include <linux/device.h>
+#include <linux/property.h>
#ifndef _LINUX
#define _LINUX
@@ -123,6 +124,10 @@ int acpi_numa_init (void);
int acpi_table_init (void);
int acpi_table_parse(char *id, acpi_tbl_table_handler handler);
+int __init acpi_parse_entries(char *id, unsigned long table_size,
+ acpi_tbl_entry_handler handler,
+ struct acpi_table_header *table_header,
+ int entry_id, unsigned int max_entries);
int __init acpi_table_parse_entries(char *id, unsigned long table_size,
int entry_id,
acpi_tbl_entry_handler handler,
@@ -148,6 +153,7 @@ int acpi_unmap_lsapic(int cpu);
int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base);
int acpi_unregister_ioapic(acpi_handle handle, u32 gsi_base);
+int acpi_ioapic_registered(acpi_handle handle, u32 gsi_base);
void acpi_irq_stats_init(void);
extern u32 acpi_irq_handled;
extern u32 acpi_irq_not_handled;
@@ -423,15 +429,13 @@ extern int acpi_nvs_for_each_region(int (*func)(__u64, __u64, void *),
const struct acpi_device_id *acpi_match_device(const struct acpi_device_id *ids,
const struct device *dev);
-static inline bool acpi_driver_match_device(struct device *dev,
- const struct device_driver *drv)
-{
- return !!acpi_match_device(drv->acpi_match_table, dev);
-}
-
+extern bool acpi_driver_match_device(struct device *dev,
+ const struct device_driver *drv);
int acpi_device_uevent_modalias(struct device *, struct kobj_uevent_env *);
int acpi_device_modalias(struct device *, char *, int);
+void acpi_walk_dep_device_list(acpi_handle handle);
+struct platform_device *acpi_create_platform_device(struct acpi_device *);
#define ACPI_PTR(_ptr) (_ptr)
#else /* !CONFIG_ACPI */
@@ -442,6 +446,23 @@ int acpi_device_modalias(struct device *, char *, int);
#define ACPI_COMPANION_SET(dev, adev) do { } while (0)
#define ACPI_HANDLE(dev) (NULL)
+struct fwnode_handle;
+
+static inline bool is_acpi_node(struct fwnode_handle *fwnode)
+{
+ return false;
+}
+
+static inline struct acpi_device *acpi_node(struct fwnode_handle *fwnode)
+{
+ return NULL;
+}
+
+static inline struct fwnode_handle *acpi_fwnode_handle(struct acpi_device *adev)
+{
+ return NULL;
+}
+
static inline const char *acpi_dev_name(struct acpi_device *adev)
{
return NULL;
@@ -552,16 +573,26 @@ static inline void arch_reserve_mem_area(acpi_physical_address addr,
#define acpi_os_set_prepare_sleep(func, pm1a_ctrl, pm1b_ctrl) do { } while (0)
#endif
-#if defined(CONFIG_ACPI) && defined(CONFIG_PM_RUNTIME)
+#if defined(CONFIG_ACPI) && defined(CONFIG_PM)
int acpi_dev_runtime_suspend(struct device *dev);
int acpi_dev_runtime_resume(struct device *dev);
int acpi_subsys_runtime_suspend(struct device *dev);
int acpi_subsys_runtime_resume(struct device *dev);
+struct acpi_device *acpi_dev_pm_get_node(struct device *dev);
+int acpi_dev_pm_attach(struct device *dev, bool power_on);
#else
static inline int acpi_dev_runtime_suspend(struct device *dev) { return 0; }
static inline int acpi_dev_runtime_resume(struct device *dev) { return 0; }
static inline int acpi_subsys_runtime_suspend(struct device *dev) { return 0; }
static inline int acpi_subsys_runtime_resume(struct device *dev) { return 0; }
+static inline struct acpi_device *acpi_dev_pm_get_node(struct device *dev)
+{
+ return NULL;
+}
+static inline int acpi_dev_pm_attach(struct device *dev, bool power_on)
+{
+ return -ENODEV;
+}
#endif
#if defined(CONFIG_ACPI) && defined(CONFIG_PM_SLEEP)
@@ -584,22 +615,6 @@ static inline int acpi_subsys_suspend(struct device *dev) { return 0; }
static inline int acpi_subsys_freeze(struct device *dev) { return 0; }
#endif
-#if defined(CONFIG_ACPI) && defined(CONFIG_PM)
-struct acpi_device *acpi_dev_pm_get_node(struct device *dev);
-int acpi_dev_pm_attach(struct device *dev, bool power_on);
-void acpi_dev_pm_detach(struct device *dev, bool power_off);
-#else
-static inline struct acpi_device *acpi_dev_pm_get_node(struct device *dev)
-{
- return NULL;
-}
-static inline int acpi_dev_pm_attach(struct device *dev, bool power_on)
-{
- return -ENODEV;
-}
-static inline void acpi_dev_pm_detach(struct device *dev, bool power_off) {}
-#endif
-
#ifdef CONFIG_ACPI
__printf(3, 4)
void acpi_handle_printk(const char *level, acpi_handle handle,
@@ -660,4 +675,114 @@ do { \
#endif
#endif
+struct acpi_gpio_params {
+ unsigned int crs_entry_index;
+ unsigned int line_index;
+ bool active_low;
+};
+
+struct acpi_gpio_mapping {
+ const char *name;
+ const struct acpi_gpio_params *data;
+ unsigned int size;
+};
+
+#if defined(CONFIG_ACPI) && defined(CONFIG_GPIOLIB)
+int acpi_dev_add_driver_gpios(struct acpi_device *adev,
+ const struct acpi_gpio_mapping *gpios);
+
+static inline void acpi_dev_remove_driver_gpios(struct acpi_device *adev)
+{
+ if (adev)
+ adev->driver_gpios = NULL;
+}
+#else
+static inline int acpi_dev_add_driver_gpios(struct acpi_device *adev,
+ const struct acpi_gpio_mapping *gpios)
+{
+ return -ENXIO;
+}
+static inline void acpi_dev_remove_driver_gpios(struct acpi_device *adev) {}
+#endif
+
+/* Device properties */
+
+#define MAX_ACPI_REFERENCE_ARGS 8
+struct acpi_reference_args {
+ struct acpi_device *adev;
+ size_t nargs;
+ u64 args[MAX_ACPI_REFERENCE_ARGS];
+};
+
+#ifdef CONFIG_ACPI
+int acpi_dev_get_property(struct acpi_device *adev, const char *name,
+ acpi_object_type type, const union acpi_object **obj);
+int acpi_dev_get_property_array(struct acpi_device *adev, const char *name,
+ acpi_object_type type,
+ const union acpi_object **obj);
+int acpi_dev_get_property_reference(struct acpi_device *adev,
+ const char *name, size_t index,
+ struct acpi_reference_args *args);
+
+int acpi_dev_prop_get(struct acpi_device *adev, const char *propname,
+ void **valptr);
+int acpi_dev_prop_read_single(struct acpi_device *adev, const char *propname,
+ enum dev_prop_type proptype, void *val);
+int acpi_dev_prop_read(struct acpi_device *adev, const char *propname,
+ enum dev_prop_type proptype, void *val, size_t nval);
+
+struct acpi_device *acpi_get_next_child(struct device *dev,
+ struct acpi_device *child);
+#else
+static inline int acpi_dev_get_property(struct acpi_device *adev,
+ const char *name, acpi_object_type type,
+ const union acpi_object **obj)
+{
+ return -ENXIO;
+}
+static inline int acpi_dev_get_property_array(struct acpi_device *adev,
+ const char *name,
+ acpi_object_type type,
+ const union acpi_object **obj)
+{
+ return -ENXIO;
+}
+static inline int acpi_dev_get_property_reference(struct acpi_device *adev,
+ const char *name, const char *cells_name,
+ size_t index, struct acpi_reference_args *args)
+{
+ return -ENXIO;
+}
+
+static inline int acpi_dev_prop_get(struct acpi_device *adev,
+ const char *propname,
+ void **valptr)
+{
+ return -ENXIO;
+}
+
+static inline int acpi_dev_prop_read_single(struct acpi_device *adev,
+ const char *propname,
+ enum dev_prop_type proptype,
+ void *val)
+{
+ return -ENXIO;
+}
+
+static inline int acpi_dev_prop_read(struct acpi_device *adev,
+ const char *propname,
+ enum dev_prop_type proptype,
+ void *val, size_t nval)
+{
+ return -ENXIO;
+}
+
+static inline struct acpi_device *acpi_get_next_child(struct device *dev,
+ struct acpi_device *child)
+{
+ return NULL;
+}
+
+#endif
+
#endif /*_LINUX_ACPI_H*/
diff --git a/include/linux/aer.h b/include/linux/aer.h
index c826d1c28f9c..4fef65e57023 100644
--- a/include/linux/aer.h
+++ b/include/linux/aer.h
@@ -7,6 +7,8 @@
#ifndef _AER_H_
#define _AER_H_
+#include <linux/types.h>
+
#define AER_NONFATAL 0
#define AER_FATAL 1
#define AER_CORRECTABLE 2
diff --git a/include/linux/ahci_platform.h b/include/linux/ahci_platform.h
index 09a947e8bc87..642d6ae4030c 100644
--- a/include/linux/ahci_platform.h
+++ b/include/linux/ahci_platform.h
@@ -22,19 +22,6 @@ struct ata_port_info;
struct ahci_host_priv;
struct platform_device;
-/*
- * Note ahci_platform_data is deprecated, it is only kept around for use
- * by the old da850 and spear13xx ahci code.
- * New drivers should instead declare their own platform_driver struct, and
- * use ahci_platform* functions in their own probe, suspend and resume methods.
- */
-struct ahci_platform_data {
- int (*init)(struct device *dev, void __iomem *addr);
- void (*exit)(struct device *dev);
- int (*suspend)(struct device *dev);
- int (*resume)(struct device *dev);
-};
-
int ahci_platform_enable_clks(struct ahci_host_priv *hpriv);
void ahci_platform_disable_clks(struct ahci_host_priv *hpriv);
int ahci_platform_enable_resources(struct ahci_host_priv *hpriv);
diff --git a/include/linux/amba/bus.h b/include/linux/amba/bus.h
index fdd7e1b61f60..2afc618b15ce 100644
--- a/include/linux/amba/bus.h
+++ b/include/linux/amba/bus.h
@@ -23,6 +23,7 @@
#define AMBA_NR_IRQS 9
#define AMBA_CID 0xb105f00d
+#define CORESIGHT_CID 0xb105900d
struct clk;
@@ -44,10 +45,15 @@ struct amba_driver {
const struct amba_id *id_table;
};
+/*
+ * Constants for the designer field of the Peripheral ID register. When bit 7
+ * is set to '1', bits [6:0] should be the JEP106 manufacturer identity code.
+ */
enum amba_vendor {
AMBA_VENDOR_ARM = 0x41,
AMBA_VENDOR_ST = 0x80,
AMBA_VENDOR_QCOM = 0x51,
+ AMBA_VENDOR_LSI = 0xb6,
};
extern struct bus_type amba_bustype;
@@ -92,6 +98,16 @@ void amba_release_regions(struct amba_device *);
#define amba_pclk_disable(d) \
do { if (!IS_ERR((d)->pclk)) clk_disable((d)->pclk); } while (0)
+static inline int amba_pclk_prepare(struct amba_device *dev)
+{
+ return clk_prepare(dev->pclk);
+}
+
+static inline void amba_pclk_unprepare(struct amba_device *dev)
+{
+ clk_unprepare(dev->pclk);
+}
+
/* Some drivers don't use the struct amba_device */
#define AMBA_CONFIG_BITS(a) (((a) >> 24) & 0xff)
#define AMBA_REV_BITS(a) (((a) >> 20) & 0x0f)
diff --git a/include/linux/ata_platform.h b/include/linux/ata_platform.h
index b9fde17f767c..5c618a084225 100644
--- a/include/linux/ata_platform.h
+++ b/include/linux/ata_platform.h
@@ -8,11 +8,6 @@ struct pata_platform_info {
* spacing used by ata_std_ports().
*/
unsigned int ioport_shift;
- /*
- * Indicate platform specific irq types and initial
- * IRQ flags when call request_irq()
- */
- unsigned int irq_flags;
};
extern int __pata_platform_probe(struct device *dev,
diff --git a/include/linux/ath9k_platform.h b/include/linux/ath9k_platform.h
index a495a959e8a7..33eb274cd0e6 100644
--- a/include/linux/ath9k_platform.h
+++ b/include/linux/ath9k_platform.h
@@ -31,8 +31,11 @@ struct ath9k_platform_data {
u32 gpio_mask;
u32 gpio_val;
+ bool endian_check;
bool is_clk_25mhz;
bool tx_gain_buffalo;
+ bool disable_2ghz;
+ bool disable_5ghz;
int (*get_mac_revision)(void);
int (*external_reset)(void);
diff --git a/include/linux/atmel-mci.h b/include/linux/atmel-mci.h
index 4c7a4b2104bf..9177947bf032 100644
--- a/include/linux/atmel-mci.h
+++ b/include/linux/atmel-mci.h
@@ -1,6 +1,8 @@
#ifndef __LINUX_ATMEL_MCI_H
#define __LINUX_ATMEL_MCI_H
+#include <linux/types.h>
+
#define ATMCI_MAX_NR_SLOTS 2
/**
@@ -9,6 +11,7 @@
* @detect_pin: GPIO pin wired to the card detect switch
* @wp_pin: GPIO pin wired to the write protect sensor
* @detect_is_active_high: The state of the detect pin when it is active
+ * @non_removable: The slot is not removable, only detect once
*
* If a given slot is not present on the board, @bus_width should be
* set to 0. The other fields are ignored in this case.
@@ -24,6 +27,7 @@ struct mci_slot_pdata {
int detect_pin;
int wp_pin;
bool detect_is_active_high;
+ bool non_removable;
};
/**
diff --git a/include/linux/atmel_tc.h b/include/linux/atmel_tc.h
index 89a931babecf..b87c1c7c242a 100644
--- a/include/linux/atmel_tc.h
+++ b/include/linux/atmel_tc.h
@@ -44,12 +44,13 @@ struct atmel_tcb_config {
/**
* struct atmel_tc - information about a Timer/Counter Block
* @pdev: physical device
- * @iomem: resource associated with the I/O register
* @regs: mapping through which the I/O registers can be accessed
+ * @id: block id
* @tcb_config: configuration data from SoC
* @irq: irq for each of the three channels
* @clk: internal clock source for each of the three channels
* @node: list node, for tclib internal use
+ * @allocated: if already used, for tclib internal use
*
* On some platforms, each TC channel has its own clocks and IRQs,
* while on others, all TC channels share the same clock and IRQ.
@@ -61,15 +62,16 @@ struct atmel_tcb_config {
*/
struct atmel_tc {
struct platform_device *pdev;
- struct resource *iomem;
void __iomem *regs;
+ int id;
const struct atmel_tcb_config *tcb_config;
int irq[3];
struct clk *clk[3];
struct list_head node;
+ bool allocated;
};
-extern struct atmel_tc *atmel_tc_alloc(unsigned block, const char *name);
+extern struct atmel_tc *atmel_tc_alloc(unsigned block);
extern void atmel_tc_free(struct atmel_tc *tc);
/* platform-specific ATMEL_TC_TIMER_CLOCKx divisors (0 means 32KiHz) */
@@ -258,5 +260,10 @@ extern const u8 atmel_tc_divisors[5];
#define ATMEL_TC_LDRAS (1 << 5) /* RA loading */
#define ATMEL_TC_LDRBS (1 << 6) /* RB loading */
#define ATMEL_TC_ETRGS (1 << 7) /* external trigger */
+#define ATMEL_TC_ALL_IRQ (ATMEL_TC_COVFS | ATMEL_TC_LOVRS | \
+ ATMEL_TC_CPAS | ATMEL_TC_CPBS | \
+ ATMEL_TC_CPCS | ATMEL_TC_LDRAS | \
+ ATMEL_TC_LDRBS | ATMEL_TC_ETRGS) \
+ /* all IRQs */
#endif
diff --git a/include/linux/atomic.h b/include/linux/atomic.h
index fef3a809e7cf..5b08a8540ecf 100644
--- a/include/linux/atomic.h
+++ b/include/linux/atomic.h
@@ -3,42 +3,6 @@
#define _LINUX_ATOMIC_H
#include <asm/atomic.h>
-/*
- * Provide __deprecated wrappers for the new interface, avoid flag day changes.
- * We need the ugly external functions to break header recursion hell.
- */
-#ifndef smp_mb__before_atomic_inc
-static inline void __deprecated smp_mb__before_atomic_inc(void)
-{
- extern void __smp_mb__before_atomic(void);
- __smp_mb__before_atomic();
-}
-#endif
-
-#ifndef smp_mb__after_atomic_inc
-static inline void __deprecated smp_mb__after_atomic_inc(void)
-{
- extern void __smp_mb__after_atomic(void);
- __smp_mb__after_atomic();
-}
-#endif
-
-#ifndef smp_mb__before_atomic_dec
-static inline void __deprecated smp_mb__before_atomic_dec(void)
-{
- extern void __smp_mb__before_atomic(void);
- __smp_mb__before_atomic();
-}
-#endif
-
-#ifndef smp_mb__after_atomic_dec
-static inline void __deprecated smp_mb__after_atomic_dec(void)
-{
- extern void __smp_mb__after_atomic(void);
- __smp_mb__after_atomic();
-}
-#endif
-
/**
* atomic_add_unless - add unless the number is already a given value
* @v: pointer of type atomic_t
diff --git a/include/linux/audit.h b/include/linux/audit.h
index 22cfddb75566..af84234e1f6e 100644
--- a/include/linux/audit.h
+++ b/include/linux/audit.h
@@ -47,6 +47,7 @@ struct sk_buff;
struct audit_krule {
int vers_ops;
+ u32 pflags;
u32 flags;
u32 listnr;
u32 action;
@@ -64,14 +65,21 @@ struct audit_krule {
u64 prio;
};
+/* Flag to indicate legacy AUDIT_LOGINUID unset usage */
+#define AUDIT_LOGINUID_LEGACY 0x1
+
struct audit_field {
u32 type;
- u32 val;
- kuid_t uid;
- kgid_t gid;
+ union {
+ u32 val;
+ kuid_t uid;
+ kgid_t gid;
+ struct {
+ char *lsm_str;
+ void *lsm_rule;
+ };
+ };
u32 op;
- char *lsm_str;
- void *lsm_rule;
};
extern int is_audit_feature_set(int which);
@@ -86,7 +94,7 @@ extern unsigned compat_dir_class[];
extern unsigned compat_chattr_class[];
extern unsigned compat_signal_class[];
-extern int __weak audit_classify_compat_syscall(int abi, unsigned syscall);
+extern int audit_classify_compat_syscall(int abi, unsigned syscall);
/* audit_names->type values */
#define AUDIT_TYPE_UNKNOWN 0 /* we don't know yet */
@@ -109,12 +117,13 @@ extern void audit_log_session_info(struct audit_buffer *ab);
#endif
#ifdef CONFIG_AUDITSYSCALL
+#include <asm/syscall.h> /* for syscall_get_arch() */
+
/* These are defined in auditsc.c */
/* Public API */
extern int audit_alloc(struct task_struct *task);
extern void __audit_free(struct task_struct *task);
-extern void __audit_syscall_entry(int arch,
- int major, unsigned long a0, unsigned long a1,
+extern void __audit_syscall_entry(int major, unsigned long a0, unsigned long a1,
unsigned long a2, unsigned long a3);
extern void __audit_syscall_exit(int ret_success, long ret_value);
extern struct filename *__audit_reusename(const __user char *uptr);
@@ -125,6 +134,7 @@ extern void audit_putname(struct filename *name);
#define AUDIT_INODE_HIDDEN 2 /* audit record should be hidden */
extern void __audit_inode(struct filename *name, const struct dentry *dentry,
unsigned int flags);
+extern void __audit_file(const struct file *);
extern void __audit_inode_child(const struct inode *parent,
const struct dentry *dentry,
const unsigned char type);
@@ -141,12 +151,12 @@ static inline void audit_free(struct task_struct *task)
if (unlikely(task->audit_context))
__audit_free(task);
}
-static inline void audit_syscall_entry(int arch, int major, unsigned long a0,
+static inline void audit_syscall_entry(int major, unsigned long a0,
unsigned long a1, unsigned long a2,
unsigned long a3)
{
if (unlikely(current->audit_context))
- __audit_syscall_entry(arch, major, a0, a1, a2, a3);
+ __audit_syscall_entry(major, a0, a1, a2, a3);
}
static inline void audit_syscall_exit(void *pt_regs)
{
@@ -178,6 +188,11 @@ static inline void audit_inode(struct filename *name,
__audit_inode(name, dentry, flags);
}
}
+static inline void audit_file(struct file *file)
+{
+ if (unlikely(!audit_dummy_context()))
+ __audit_file(file);
+}
static inline void audit_inode_parent_hidden(struct filename *name,
const struct dentry *dentry)
{
@@ -322,7 +337,7 @@ static inline int audit_alloc(struct task_struct *task)
}
static inline void audit_free(struct task_struct *task)
{ }
-static inline void audit_syscall_entry(int arch, int major, unsigned long a0,
+static inline void audit_syscall_entry(int major, unsigned long a0,
unsigned long a1, unsigned long a2,
unsigned long a3)
{ }
@@ -352,6 +367,9 @@ static inline void audit_inode(struct filename *name,
const struct dentry *dentry,
unsigned int parent)
{ }
+static inline void audit_file(struct file *file)
+{
+}
static inline void audit_inode_parent_hidden(struct filename *name,
const struct dentry *dentry)
{ }
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index e488e9459a93..5da6012b7a14 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -28,12 +28,10 @@ struct dentry;
* Bits in backing_dev_info.state
*/
enum bdi_state {
- BDI_wb_alloc, /* Default embedded wb allocated */
BDI_async_congested, /* The async (write) queue is getting full */
BDI_sync_congested, /* The sync queue is getting full */
BDI_registered, /* bdi_register() was done */
BDI_writeback_running, /* Writeback is in progress */
- BDI_unused, /* Available bits start here */
};
typedef int (congested_fn)(void *, int);
@@ -50,7 +48,6 @@ enum bdi_stat_item {
struct bdi_writeback {
struct backing_dev_info *bdi; /* our parent bdi */
- unsigned int nr;
unsigned long last_old_flush; /* last old data flush */
@@ -124,7 +121,6 @@ void bdi_start_background_writeback(struct backing_dev_info *bdi);
void bdi_writeback_workfn(struct work_struct *work);
int bdi_has_dirty_io(struct backing_dev_info *bdi);
void bdi_wakeup_thread_delayed(struct backing_dev_info *bdi);
-void bdi_lock_two(struct bdi_writeback *wb1, struct bdi_writeback *wb2);
extern spinlock_t bdi_lock;
extern struct list_head bdi_list;
diff --git a/include/linux/balloon_compaction.h b/include/linux/balloon_compaction.h
index 089743ade734..9b0a15d06a4f 100644
--- a/include/linux/balloon_compaction.h
+++ b/include/linux/balloon_compaction.h
@@ -27,10 +27,13 @@
* counter raised only while it is under our special handling;
*
* iii. after the lockless scan step have selected a potential balloon page for
- * isolation, re-test the page->mapping flags and the page ref counter
+ * isolation, re-test the PageBalloon mark and the PagePrivate flag
* under the proper page lock, to ensure isolating a valid balloon page
* (not yet isolated, nor under release procedure)
*
+ * iv. isolation or dequeueing procedure must clear PagePrivate flag under
+ * page lock together with removing page from balloon device page list.
+ *
* The functions provided by this interface are placed to help on coping with
* the aforementioned balloon page corner case, as well as to ensure the simple
* set of exposed rules are satisfied while we are dealing with balloon pages
@@ -54,43 +57,22 @@
* balloon driver as a page book-keeper for its registered balloon devices.
*/
struct balloon_dev_info {
- void *balloon_device; /* balloon device descriptor */
- struct address_space *mapping; /* balloon special page->mapping */
unsigned long isolated_pages; /* # of isolated pages for migration */
spinlock_t pages_lock; /* Protection to pages list */
struct list_head pages; /* Pages enqueued & handled to Host */
+ int (*migratepage)(struct balloon_dev_info *, struct page *newpage,
+ struct page *page, enum migrate_mode mode);
};
extern struct page *balloon_page_enqueue(struct balloon_dev_info *b_dev_info);
extern struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info);
-extern struct balloon_dev_info *balloon_devinfo_alloc(
- void *balloon_dev_descriptor);
-static inline void balloon_devinfo_free(struct balloon_dev_info *b_dev_info)
-{
- kfree(b_dev_info);
-}
-
-/*
- * balloon_page_free - release a balloon page back to the page free lists
- * @page: ballooned page to be set free
- *
- * This function must be used to properly set free an isolated/dequeued balloon
- * page at the end of a sucessful page migration, or at the balloon driver's
- * page release procedure.
- */
-static inline void balloon_page_free(struct page *page)
+static inline void balloon_devinfo_init(struct balloon_dev_info *balloon)
{
- /*
- * Balloon pages always get an extra refcount before being isolated
- * and before being dequeued to help on sorting out fortuite colisions
- * between a thread attempting to isolate and another thread attempting
- * to release the very same balloon page.
- *
- * Before we handle the page back to Buddy, lets drop its extra refcnt.
- */
- put_page(page);
- __free_page(page);
+ balloon->isolated_pages = 0;
+ spin_lock_init(&balloon->pages_lock);
+ INIT_LIST_HEAD(&balloon->pages);
+ balloon->migratepage = NULL;
}
#ifdef CONFIG_BALLOON_COMPACTION
@@ -98,107 +80,58 @@ extern bool balloon_page_isolate(struct page *page);
extern void balloon_page_putback(struct page *page);
extern int balloon_page_migrate(struct page *newpage,
struct page *page, enum migrate_mode mode);
-extern struct address_space
-*balloon_mapping_alloc(struct balloon_dev_info *b_dev_info,
- const struct address_space_operations *a_ops);
-
-static inline void balloon_mapping_free(struct address_space *balloon_mapping)
-{
- kfree(balloon_mapping);
-}
/*
- * page_flags_cleared - helper to perform balloon @page ->flags tests.
- *
- * As balloon pages are obtained from buddy and we do not play with page->flags
- * at driver level (exception made when we get the page lock for compaction),
- * we can safely identify a ballooned page by checking if the
- * PAGE_FLAGS_CHECK_AT_PREP page->flags are all cleared. This approach also
- * helps us skip ballooned pages that are locked for compaction or release, thus
- * mitigating their racy check at balloon_page_movable()
- */
-static inline bool page_flags_cleared(struct page *page)
-{
- return !(page->flags & PAGE_FLAGS_CHECK_AT_PREP);
-}
-
-/*
- * __is_movable_balloon_page - helper to perform @page mapping->flags tests
+ * __is_movable_balloon_page - helper to perform @page PageBalloon tests
*/
static inline bool __is_movable_balloon_page(struct page *page)
{
- struct address_space *mapping = page->mapping;
- return mapping_balloon(mapping);
+ return PageBalloon(page);
}
/*
- * balloon_page_movable - test page->mapping->flags to identify balloon pages
- * that can be moved by compaction/migration.
- *
- * This function is used at core compaction's page isolation scheme, therefore
- * most pages exposed to it are not enlisted as balloon pages and so, to avoid
- * undesired side effects like racing against __free_pages(), we cannot afford
- * holding the page locked while testing page->mapping->flags here.
+ * balloon_page_movable - test PageBalloon to identify balloon pages
+ * and PagePrivate to check that the page is not
+ * isolated and can be moved by compaction/migration.
*
* As we might return false positives in the case of a balloon page being just
- * released under us, the page->mapping->flags need to be re-tested later,
- * under the proper page lock, at the functions that will be coping with the
- * balloon page case.
+ * released under us, this need to be re-tested later, under the page lock.
*/
static inline bool balloon_page_movable(struct page *page)
{
- /*
- * Before dereferencing and testing mapping->flags, let's make sure
- * this is not a page that uses ->mapping in a different way
- */
- if (page_flags_cleared(page) && !page_mapped(page) &&
- page_count(page) == 1)
- return __is_movable_balloon_page(page);
-
- return false;
+ return PageBalloon(page) && PagePrivate(page);
}
/*
* isolated_balloon_page - identify an isolated balloon page on private
* compaction/migration page lists.
- *
- * After a compaction thread isolates a balloon page for migration, it raises
- * the page refcount to prevent concurrent compaction threads from re-isolating
- * the same page. For that reason putback_movable_pages(), or other routines
- * that need to identify isolated balloon pages on private pagelists, cannot
- * rely on balloon_page_movable() to accomplish the task.
*/
static inline bool isolated_balloon_page(struct page *page)
{
- /* Already isolated balloon pages, by default, have a raised refcount */
- if (page_flags_cleared(page) && !page_mapped(page) &&
- page_count(page) >= 2)
- return __is_movable_balloon_page(page);
-
- return false;
+ return PageBalloon(page);
}
/*
* balloon_page_insert - insert a page into the balloon's page list and make
- * the page->mapping assignment accordingly.
+ * the page->private assignment accordingly.
+ * @balloon : pointer to balloon device
* @page : page to be assigned as a 'balloon page'
- * @mapping : allocated special 'balloon_mapping'
- * @head : balloon's device page list head
*
* Caller must ensure the page is locked and the spin_lock protecting balloon
* pages list is held before inserting a page into the balloon device.
*/
-static inline void balloon_page_insert(struct page *page,
- struct address_space *mapping,
- struct list_head *head)
+static inline void balloon_page_insert(struct balloon_dev_info *balloon,
+ struct page *page)
{
- page->mapping = mapping;
- list_add(&page->lru, head);
+ __SetPageBalloon(page);
+ SetPagePrivate(page);
+ set_page_private(page, (unsigned long)balloon);
+ list_add(&page->lru, &balloon->pages);
}
/*
* balloon_page_delete - delete a page from balloon's page list and clear
- * the page->mapping assignement accordingly.
+ * the page->private assignement accordingly.
* @page : page to be released from balloon's page list
*
* Caller must ensure the page is locked and the spin_lock protecting balloon
@@ -206,8 +139,12 @@ static inline void balloon_page_insert(struct page *page,
*/
static inline void balloon_page_delete(struct page *page)
{
- page->mapping = NULL;
- list_del(&page->lru);
+ __ClearPageBalloon(page);
+ set_page_private(page, 0);
+ if (PagePrivate(page)) {
+ ClearPagePrivate(page);
+ list_del(&page->lru);
+ }
}
/*
@@ -216,11 +153,7 @@ static inline void balloon_page_delete(struct page *page)
*/
static inline struct balloon_dev_info *balloon_page_device(struct page *page)
{
- struct address_space *mapping = page->mapping;
- if (likely(mapping))
- return mapping->private_data;
-
- return NULL;
+ return (struct balloon_dev_info *)page_private(page);
}
static inline gfp_t balloon_mapping_gfp_mask(void)
@@ -228,34 +161,24 @@ static inline gfp_t balloon_mapping_gfp_mask(void)
return GFP_HIGHUSER_MOVABLE;
}
-static inline bool balloon_compaction_check(void)
-{
- return true;
-}
-
#else /* !CONFIG_BALLOON_COMPACTION */
-static inline void *balloon_mapping_alloc(void *balloon_device,
- const struct address_space_operations *a_ops)
-{
- return ERR_PTR(-EOPNOTSUPP);
-}
-
-static inline void balloon_mapping_free(struct address_space *balloon_mapping)
+static inline void balloon_page_insert(struct balloon_dev_info *balloon,
+ struct page *page)
{
- return;
+ __SetPageBalloon(page);
+ list_add(&page->lru, &balloon->pages);
}
-static inline void balloon_page_insert(struct page *page,
- struct address_space *mapping,
- struct list_head *head)
+static inline void balloon_page_delete(struct page *page)
{
- list_add(&page->lru, head);
+ __ClearPageBalloon(page);
+ list_del(&page->lru);
}
-static inline void balloon_page_delete(struct page *page)
+static inline bool __is_movable_balloon_page(struct page *page)
{
- list_del(&page->lru);
+ return false;
}
static inline bool balloon_page_movable(struct page *page)
@@ -289,9 +212,5 @@ static inline gfp_t balloon_mapping_gfp_mask(void)
return GFP_HIGHUSER;
}
-static inline bool balloon_compaction_check(void)
-{
- return false;
-}
#endif /* CONFIG_BALLOON_COMPACTION */
#endif /* _LINUX_BALLOON_COMPACTION_H */
diff --git a/include/linux/bcma/bcma.h b/include/linux/bcma/bcma.h
index 0272e49135d0..eb1c6a47b67f 100644
--- a/include/linux/bcma/bcma.h
+++ b/include/linux/bcma/bcma.h
@@ -267,7 +267,7 @@ struct bcma_device {
u8 core_unit;
u32 addr;
- u32 addr1;
+ u32 addr_s[8];
u32 wrap;
void __iomem *io_addr;
@@ -323,6 +323,8 @@ struct bcma_bus {
struct pci_dev *host_pci;
/* Pointer to the SDIO device (only for BCMA_HOSTTYPE_SDIO) */
struct sdio_func *host_sdio;
+ /* Pointer to platform device (only for BCMA_HOSTTYPE_SOC) */
+ struct platform_device *host_pdev;
};
struct bcma_chipinfo chipinfo;
@@ -332,10 +334,10 @@ struct bcma_bus {
struct bcma_device *mapped_core;
struct list_head cores;
u8 nr_cores;
- u8 init_done:1;
u8 num;
struct bcma_drv_cc drv_cc;
+ struct bcma_drv_cc_b drv_cc_b;
struct bcma_drv_pci drv_pci[2];
struct bcma_drv_pcie2 drv_pcie2;
struct bcma_drv_mips drv_mips;
@@ -445,4 +447,6 @@ extern u32 bcma_chipco_pll_read(struct bcma_drv_cc *cc, u32 offset);
#define BCMA_DMA_TRANSLATION_DMA64_CMT 0x80000000 /* Client Mode Translation for 64-bit DMA */
extern u32 bcma_core_dma_translation(struct bcma_device *core);
+extern unsigned int bcma_core_irq(struct bcma_device *core, int num);
+
#endif /* LINUX_BCMA_H_ */
diff --git a/include/linux/bcma/bcma_driver_chipcommon.h b/include/linux/bcma/bcma_driver_chipcommon.h
index 63d105cd14a3..db6fa217f98b 100644
--- a/include/linux/bcma/bcma_driver_chipcommon.h
+++ b/include/linux/bcma/bcma_driver_chipcommon.h
@@ -644,6 +644,12 @@ struct bcma_drv_cc {
#endif
};
+struct bcma_drv_cc_b {
+ struct bcma_device *core;
+ u8 setup_done:1;
+ void __iomem *mii;
+};
+
/* Register access */
#define bcma_cc_read32(cc, offset) \
bcma_read32((cc)->core, offset)
@@ -699,4 +705,6 @@ extern void bcma_pmu_spuravoid_pllupdate(struct bcma_drv_cc *cc, int spuravoid);
extern u32 bcma_pmu_get_bus_clock(struct bcma_drv_cc *cc);
+void bcma_chipco_b_mii_write(struct bcma_drv_cc_b *ccb, u32 offset, u32 value);
+
#endif /* LINUX_BCMA_DRIVER_CC_H_ */
diff --git a/include/linux/bcma/bcma_driver_mips.h b/include/linux/bcma/bcma_driver_mips.h
index fb61f3fb4ddb..0b3b32aeeb8a 100644
--- a/include/linux/bcma/bcma_driver_mips.h
+++ b/include/linux/bcma/bcma_driver_mips.h
@@ -43,12 +43,12 @@ struct bcma_drv_mips {
extern void bcma_core_mips_init(struct bcma_drv_mips *mcore);
extern void bcma_core_mips_early_init(struct bcma_drv_mips *mcore);
-extern unsigned int bcma_core_irq(struct bcma_device *core);
+extern unsigned int bcma_core_mips_irq(struct bcma_device *dev);
#else
static inline void bcma_core_mips_init(struct bcma_drv_mips *mcore) { }
static inline void bcma_core_mips_early_init(struct bcma_drv_mips *mcore) { }
-static inline unsigned int bcma_core_irq(struct bcma_device *core)
+static inline unsigned int bcma_core_mips_irq(struct bcma_device *dev)
{
return 0;
}
diff --git a/include/linux/bcma/bcma_regs.h b/include/linux/bcma/bcma_regs.h
index 917dcd7965e7..e64ae7bf80a1 100644
--- a/include/linux/bcma/bcma_regs.h
+++ b/include/linux/bcma/bcma_regs.h
@@ -39,6 +39,11 @@
#define BCMA_RESET_CTL_RESET 0x0001
#define BCMA_RESET_ST 0x0804
+#define BCMA_NS_ROM_IOST_BOOT_DEV_MASK 0x0003
+#define BCMA_NS_ROM_IOST_BOOT_DEV_NOR 0x0000
+#define BCMA_NS_ROM_IOST_BOOT_DEV_NAND 0x0001
+#define BCMA_NS_ROM_IOST_BOOT_DEV_ROM 0x0002
+
/* BCMA PCI config space registers. */
#define BCMA_PCI_PMCSR 0x44
#define BCMA_PCI_PE 0x100
diff --git a/include/linux/bcma/bcma_soc.h b/include/linux/bcma/bcma_soc.h
index 4203c5593b9f..f24d245f8394 100644
--- a/include/linux/bcma/bcma_soc.h
+++ b/include/linux/bcma/bcma_soc.h
@@ -10,6 +10,7 @@ struct bcma_soc {
};
int __init bcma_host_soc_register(struct bcma_soc *soc);
+int __init bcma_host_soc_init(struct bcma_soc *soc);
int bcma_bus_register(struct bcma_bus *bus);
diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
index 61f29e5ea840..576e4639ca60 100644
--- a/include/linux/binfmts.h
+++ b/include/linux/binfmts.h
@@ -53,6 +53,10 @@ struct linux_binprm {
#define BINPRM_FLAGS_EXECFD_BIT 1
#define BINPRM_FLAGS_EXECFD (1 << BINPRM_FLAGS_EXECFD_BIT)
+/* filename of the binary will be inaccessible after exec */
+#define BINPRM_FLAGS_PATH_INACCESSIBLE_BIT 2
+#define BINPRM_FLAGS_PATH_INACCESSIBLE (1 << BINPRM_FLAGS_PATH_INACCESSIBLE_BIT)
+
/* Function parameter for binfmt->coredump */
struct coredump_params {
const siginfo_t *siginfo;
diff --git a/include/linux/bio.h b/include/linux/bio.h
index b39e5000ff58..efead0b532c4 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -292,7 +292,24 @@ static inline unsigned bio_segments(struct bio *bio)
*/
#define bio_get(bio) atomic_inc(&(bio)->bi_cnt)
+enum bip_flags {
+ BIP_BLOCK_INTEGRITY = 1 << 0, /* block layer owns integrity data */
+ BIP_MAPPED_INTEGRITY = 1 << 1, /* ref tag has been remapped */
+ BIP_CTRL_NOCHECK = 1 << 2, /* disable HBA integrity checking */
+ BIP_DISK_NOCHECK = 1 << 3, /* disable disk integrity checking */
+ BIP_IP_CHECKSUM = 1 << 4, /* IP checksum */
+};
+
#if defined(CONFIG_BLK_DEV_INTEGRITY)
+
+static inline struct bio_integrity_payload *bio_integrity(struct bio *bio)
+{
+ if (bio->bi_rw & REQ_INTEGRITY)
+ return bio->bi_integrity;
+
+ return NULL;
+}
+
/*
* bio integrity payload
*/
@@ -301,21 +318,40 @@ struct bio_integrity_payload {
struct bvec_iter bip_iter;
- /* kill - should just use bip_vec */
- void *bip_buf; /* generated integrity data */
-
bio_end_io_t *bip_end_io; /* saved I/O completion fn */
unsigned short bip_slab; /* slab the bip came from */
unsigned short bip_vcnt; /* # of integrity bio_vecs */
unsigned short bip_max_vcnt; /* integrity bio_vec slots */
- unsigned bip_owns_buf:1; /* should free bip_buf */
+ unsigned short bip_flags; /* control flags */
struct work_struct bip_work; /* I/O completion */
struct bio_vec *bip_vec;
struct bio_vec bip_inline_vecs[0];/* embedded bvec array */
};
+
+static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag)
+{
+ struct bio_integrity_payload *bip = bio_integrity(bio);
+
+ if (bip)
+ return bip->bip_flags & flag;
+
+ return false;
+}
+
+static inline sector_t bip_get_seed(struct bio_integrity_payload *bip)
+{
+ return bip->bip_iter.bi_sector;
+}
+
+static inline void bip_set_seed(struct bio_integrity_payload *bip,
+ sector_t seed)
+{
+ bip->bip_iter.bi_sector = seed;
+}
+
#endif /* CONFIG_BLK_DEV_INTEGRITY */
extern void bio_trim(struct bio *bio, int offset, int size);
@@ -342,6 +378,7 @@ static inline struct bio *bio_next_split(struct bio *bio, int sectors,
}
extern struct bio_set *bioset_create(unsigned int, unsigned int);
+extern struct bio_set *bioset_create_nobvec(unsigned int, unsigned int);
extern void bioset_free(struct bio_set *);
extern mempool_t *biovec_create_pool(int pool_entries);
@@ -353,7 +390,6 @@ extern struct bio *bio_clone_fast(struct bio *, gfp_t, struct bio_set *);
extern struct bio *bio_clone_bioset(struct bio *, gfp_t, struct bio_set *bs);
extern struct bio_set *fs_bio_set;
-unsigned int bio_integrity_tag_size(struct bio *bio);
static inline struct bio *bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs)
{
@@ -407,6 +443,11 @@ extern struct bio *bio_copy_kern(struct request_queue *, void *, unsigned int,
extern void bio_set_pages_dirty(struct bio *bio);
extern void bio_check_pages_dirty(struct bio *bio);
+void generic_start_io_acct(int rw, unsigned long sectors,
+ struct hd_struct *part);
+void generic_end_io_acct(int rw, struct hd_struct *part,
+ unsigned long start_time);
+
#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
# error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform"
#endif
@@ -661,14 +702,10 @@ struct biovec_slab {
for_each_bio(_bio) \
bip_for_each_vec(_bvl, _bio->bi_integrity, _iter)
-#define bio_integrity(bio) (bio->bi_integrity != NULL)
-
extern struct bio_integrity_payload *bio_integrity_alloc(struct bio *, gfp_t, unsigned int);
extern void bio_integrity_free(struct bio *);
extern int bio_integrity_add_page(struct bio *, struct page *, unsigned int, unsigned int);
-extern int bio_integrity_enabled(struct bio *bio);
-extern int bio_integrity_set_tag(struct bio *, void *, unsigned int);
-extern int bio_integrity_get_tag(struct bio *, void *, unsigned int);
+extern bool bio_integrity_enabled(struct bio *bio);
extern int bio_integrity_prep(struct bio *);
extern void bio_integrity_endio(struct bio *, int);
extern void bio_integrity_advance(struct bio *, unsigned int);
@@ -680,14 +717,14 @@ extern void bio_integrity_init(void);
#else /* CONFIG_BLK_DEV_INTEGRITY */
-static inline int bio_integrity(struct bio *bio)
+static inline void *bio_integrity(struct bio *bio)
{
- return 0;
+ return NULL;
}
-static inline int bio_integrity_enabled(struct bio *bio)
+static inline bool bio_integrity_enabled(struct bio *bio)
{
- return 0;
+ return false;
}
static inline int bioset_integrity_create(struct bio_set *bs, int pool_size)
@@ -733,6 +770,11 @@ static inline void bio_integrity_init(void)
return;
}
+static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag)
+{
+ return false;
+}
+
#endif /* CONFIG_BLK_DEV_INTEGRITY */
#endif /* CONFIG_BLOCK */
diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
index e1c8d080c427..202e4034fe26 100644
--- a/include/linux/bitmap.h
+++ b/include/linux/bitmap.h
@@ -45,6 +45,7 @@
* bitmap_set(dst, pos, nbits) Set specified bit area
* bitmap_clear(dst, pos, nbits) Clear specified bit area
* bitmap_find_next_zero_area(buf, len, pos, n, mask) Find bit free area
+ * bitmap_find_next_zero_area_off(buf, len, pos, n, mask) as above
* bitmap_shift_right(dst, src, n, nbits) *dst = *src >> n
* bitmap_shift_left(dst, src, n, nbits) *dst = *src << n
* bitmap_remap(dst, src, old, new, nbits) *dst = map(old, new)(src)
@@ -60,6 +61,7 @@
* bitmap_find_free_region(bitmap, bits, order) Find and allocate bit region
* bitmap_release_region(bitmap, pos, order) Free specified bit region
* bitmap_allocate_region(bitmap, pos, order) Allocate specified bit region
+ * bitmap_print_to_pagebuf(list, buf, mask, nbits) Print bitmap src as list/hex
*/
/*
@@ -114,11 +116,36 @@ extern int __bitmap_weight(const unsigned long *bitmap, unsigned int nbits);
extern void bitmap_set(unsigned long *map, unsigned int start, int len);
extern void bitmap_clear(unsigned long *map, unsigned int start, int len);
-extern unsigned long bitmap_find_next_zero_area(unsigned long *map,
- unsigned long size,
- unsigned long start,
- unsigned int nr,
- unsigned long align_mask);
+
+extern unsigned long bitmap_find_next_zero_area_off(unsigned long *map,
+ unsigned long size,
+ unsigned long start,
+ unsigned int nr,
+ unsigned long align_mask,
+ unsigned long align_offset);
+
+/**
+ * bitmap_find_next_zero_area - find a contiguous aligned zero area
+ * @map: The address to base the search on
+ * @size: The bitmap size in bits
+ * @start: The bitnumber to start searching at
+ * @nr: The number of zeroed bits we're looking for
+ * @align_mask: Alignment mask for zero area
+ *
+ * The @align_mask should be one less than a power of 2; the effect is that
+ * the bit offset of all zero areas this function finds is multiples of that
+ * power of 2. A @align_mask of 0 means no alignment is required.
+ */
+static inline unsigned long
+bitmap_find_next_zero_area(unsigned long *map,
+ unsigned long size,
+ unsigned long start,
+ unsigned int nr,
+ unsigned long align_mask)
+{
+ return bitmap_find_next_zero_area_off(map, size, start, nr,
+ align_mask, 0);
+}
extern int bitmap_scnprintf(char *buf, unsigned int len,
const unsigned long *src, int nbits);
@@ -145,6 +172,8 @@ extern void bitmap_release_region(unsigned long *bitmap, unsigned int pos, int o
extern int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order);
extern void bitmap_copy_le(void *dst, const unsigned long *src, int nbits);
extern int bitmap_ord_to_pos(const unsigned long *bitmap, int n, int bits);
+extern int bitmap_print_to_pagebuf(bool list, char *buf,
+ const unsigned long *maskp, int nmaskbits);
#define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) % BITS_PER_LONG))
#define BITMAP_LAST_WORD_MASK(nbits) \
diff --git a/include/linux/bitops.h b/include/linux/bitops.h
index cbc5833fb221..5d858e02997f 100644
--- a/include/linux/bitops.h
+++ b/include/linux/bitops.h
@@ -18,8 +18,11 @@
* position @h. For example
* GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000.
*/
-#define GENMASK(h, l) (((U32_C(1) << ((h) - (l) + 1)) - 1) << (l))
-#define GENMASK_ULL(h, l) (((U64_C(1) << ((h) - (l) + 1)) - 1) << (l))
+#define GENMASK(h, l) \
+ (((~0UL) << (l)) & (~0UL >> (BITS_PER_LONG - 1 - (h))))
+
+#define GENMASK_ULL(h, l) \
+ (((~0ULL) << (l)) & (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h))))
extern unsigned int __sw_hweight8(unsigned int w);
extern unsigned int __sw_hweight16(unsigned int w);
@@ -32,26 +35,6 @@ extern unsigned long __sw_hweight64(__u64 w);
*/
#include <asm/bitops.h>
-/*
- * Provide __deprecated wrappers for the new interface, avoid flag day changes.
- * We need the ugly external functions to break header recursion hell.
- */
-#ifndef smp_mb__before_clear_bit
-static inline void __deprecated smp_mb__before_clear_bit(void)
-{
- extern void __smp_mb__before_atomic(void);
- __smp_mb__before_atomic();
-}
-#endif
-
-#ifndef smp_mb__after_clear_bit
-static inline void __deprecated smp_mb__after_clear_bit(void)
-{
- extern void __smp_mb__after_atomic(void);
- __smp_mb__after_atomic();
-}
-#endif
-
#define for_each_set_bit(bit, addr, size) \
for ((bit) = find_first_bit((addr), (size)); \
(bit) < (size); \
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index a1e31f274fcd..8aded9ab2e4e 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -4,6 +4,7 @@
#include <linux/blkdev.h>
struct blk_mq_tags;
+struct blk_flush_queue;
struct blk_mq_cpu_notifier {
struct list_head list;
@@ -34,6 +35,7 @@ struct blk_mq_hw_ctx {
struct request_queue *queue;
unsigned int queue_num;
+ struct blk_flush_queue *fq;
void *driver_data;
@@ -77,8 +79,15 @@ struct blk_mq_tag_set {
struct list_head tag_list;
};
-typedef int (queue_rq_fn)(struct blk_mq_hw_ctx *, struct request *);
+struct blk_mq_queue_data {
+ struct request *rq;
+ struct list_head *list;
+ bool last;
+};
+
+typedef int (queue_rq_fn)(struct blk_mq_hw_ctx *, const struct blk_mq_queue_data *);
typedef struct blk_mq_hw_ctx *(map_queue_fn)(struct request_queue *, const int);
+typedef enum blk_eh_timer_return (timeout_fn)(struct request *, bool);
typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int);
typedef void (exit_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int);
typedef int (init_request_fn)(void *, struct request *, unsigned int,
@@ -86,6 +95,9 @@ typedef int (init_request_fn)(void *, struct request *, unsigned int,
typedef void (exit_request_fn)(void *, struct request *, unsigned int,
unsigned int);
+typedef void (busy_iter_fn)(struct blk_mq_hw_ctx *, struct request *, void *,
+ bool);
+
struct blk_mq_ops {
/*
* Queue request
@@ -100,7 +112,7 @@ struct blk_mq_ops {
/*
* Called on request timeout
*/
- rq_timed_out_fn *timeout;
+ timeout_fn *timeout;
softirq_done_fn *complete;
@@ -115,6 +127,10 @@ struct blk_mq_ops {
/*
* Called for every command allocated by the block layer to allow
* the driver to set up driver specific data.
+ *
+ * Tag greater than or equal to queue_depth is for setting up
+ * flush request.
+ *
* Ditto for exit/teardown.
*/
init_request_fn *init_request;
@@ -130,6 +146,7 @@ enum {
BLK_MQ_F_TAG_SHARED = 1 << 1,
BLK_MQ_F_SG_MERGE = 1 << 2,
BLK_MQ_F_SYSFS_UP = 1 << 3,
+ BLK_MQ_F_DEFER_ISSUE = 1 << 4,
BLK_MQ_S_STOPPED = 0,
BLK_MQ_S_TAG_ACTIVE = 1,
@@ -140,6 +157,7 @@ enum {
};
struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *);
+void blk_mq_finish_init(struct request_queue *q);
int blk_mq_register_disk(struct gendisk *);
void blk_mq_unregister_disk(struct gendisk *);
@@ -151,16 +169,35 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule);
void blk_mq_insert_request(struct request *, bool, bool, bool);
void blk_mq_run_queues(struct request_queue *q, bool async);
void blk_mq_free_request(struct request *rq);
+void blk_mq_free_hctx_request(struct blk_mq_hw_ctx *, struct request *rq);
bool blk_mq_can_queue(struct blk_mq_hw_ctx *);
struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
gfp_t gfp, bool reserved);
struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag);
+enum {
+ BLK_MQ_UNIQUE_TAG_BITS = 16,
+ BLK_MQ_UNIQUE_TAG_MASK = (1 << BLK_MQ_UNIQUE_TAG_BITS) - 1,
+};
+
+u32 blk_mq_unique_tag(struct request *rq);
+
+static inline u16 blk_mq_unique_tag_to_hwq(u32 unique_tag)
+{
+ return unique_tag >> BLK_MQ_UNIQUE_TAG_BITS;
+}
+
+static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag)
+{
+ return unique_tag & BLK_MQ_UNIQUE_TAG_MASK;
+}
+
struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index);
struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int, int);
-void blk_mq_end_io(struct request *rq, int error);
-void __blk_mq_end_io(struct request *rq, int error);
+void blk_mq_start_request(struct request *rq);
+void blk_mq_end_request(struct request *rq, int error);
+void __blk_mq_end_request(struct request *rq, int error);
void blk_mq_requeue_request(struct request *rq);
void blk_mq_add_to_requeue_list(struct request *rq, bool at_head);
@@ -173,7 +210,8 @@ void blk_mq_stop_hw_queues(struct request_queue *q);
void blk_mq_start_hw_queues(struct request_queue *q);
void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async);
void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
-void blk_mq_tag_busy_iter(struct blk_mq_tags *tags, void (*fn)(void *data, unsigned long *), void *data);
+void blk_mq_tag_busy_iter(struct blk_mq_hw_ctx *hctx, busy_iter_fn *fn,
+ void *priv);
/*
* Driver command data is immediately after the request. So subtract request
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 66c2167f04a9..445d59231bc4 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -78,9 +78,11 @@ struct bio {
struct io_context *bi_ioc;
struct cgroup_subsys_state *bi_css;
#endif
+ union {
#if defined(CONFIG_BLK_DEV_INTEGRITY)
- struct bio_integrity_payload *bi_integrity; /* data integrity */
+ struct bio_integrity_payload *bi_integrity; /* data integrity */
#endif
+ };
unsigned short bi_vcnt; /* how many bio_vec's */
@@ -118,10 +120,8 @@ struct bio {
#define BIO_USER_MAPPED 6 /* contains user pages */
#define BIO_EOPNOTSUPP 7 /* not supported */
#define BIO_NULL_MAPPED 8 /* contains invalid user pages */
-#define BIO_FS_INTEGRITY 9 /* fs owns integrity data, not block layer */
-#define BIO_QUIET 10 /* Make BIO Quiet */
-#define BIO_MAPPED_INTEGRITY 11/* integrity metadata has been remapped */
-#define BIO_SNAP_STABLE 12 /* bio data must be snapshotted during write */
+#define BIO_QUIET 9 /* Make BIO Quiet */
+#define BIO_SNAP_STABLE 10 /* bio data must be snapshotted during write */
/*
* Flags starting here get preserved by bio_reset() - this includes
@@ -162,6 +162,7 @@ enum rq_flag_bits {
__REQ_WRITE_SAME, /* write same block many times */
__REQ_NOIDLE, /* don't anticipate more IO after this one */
+ __REQ_INTEGRITY, /* I/O includes block integrity payload */
__REQ_FUA, /* forced unit access */
__REQ_FLUSH, /* request for cache flush */
@@ -186,9 +187,7 @@ enum rq_flag_bits {
__REQ_FLUSH_SEQ, /* request for flush sequence */
__REQ_IO_STAT, /* account I/O stat */
__REQ_MIXED_MERGE, /* merge of different types, fail separately */
- __REQ_KERNEL, /* direct IO to kernel pages */
__REQ_PM, /* runtime pm request */
- __REQ_END, /* last of chain of requests */
__REQ_HASHED, /* on IO scheduler merge hash */
__REQ_MQ_INFLIGHT, /* track inflight for MQ */
__REQ_NR_BITS, /* stops here */
@@ -204,13 +203,14 @@ enum rq_flag_bits {
#define REQ_DISCARD (1ULL << __REQ_DISCARD)
#define REQ_WRITE_SAME (1ULL << __REQ_WRITE_SAME)
#define REQ_NOIDLE (1ULL << __REQ_NOIDLE)
+#define REQ_INTEGRITY (1ULL << __REQ_INTEGRITY)
#define REQ_FAILFAST_MASK \
(REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
#define REQ_COMMON_MASK \
(REQ_WRITE | REQ_FAILFAST_MASK | REQ_SYNC | REQ_META | REQ_PRIO | \
REQ_DISCARD | REQ_WRITE_SAME | REQ_NOIDLE | REQ_FLUSH | REQ_FUA | \
- REQ_SECURE)
+ REQ_SECURE | REQ_INTEGRITY)
#define REQ_CLONE_MASK REQ_COMMON_MASK
#define BIO_NO_ADVANCE_ITER_MASK (REQ_DISCARD|REQ_WRITE_SAME)
@@ -240,9 +240,7 @@ enum rq_flag_bits {
#define REQ_IO_STAT (1ULL << __REQ_IO_STAT)
#define REQ_MIXED_MERGE (1ULL << __REQ_MIXED_MERGE)
#define REQ_SECURE (1ULL << __REQ_SECURE)
-#define REQ_KERNEL (1ULL << __REQ_KERNEL)
#define REQ_PM (1ULL << __REQ_PM)
-#define REQ_END (1ULL << __REQ_END)
#define REQ_HASHED (1ULL << __REQ_HASHED)
#define REQ_MQ_INFLIGHT (1ULL << __REQ_MQ_INFLIGHT)
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 518b46555b80..92f4b4b288dd 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -36,6 +36,7 @@ struct request;
struct sg_io_hdr;
struct bsg_job;
struct blkcg_gq;
+struct blk_flush_queue;
#define BLKDEV_MIN_RQ 4
#define BLKDEV_MAX_RQ 128 /* Default maximum */
@@ -397,7 +398,7 @@ struct request_queue {
*/
struct kobject mq_kobj;
-#ifdef CONFIG_PM_RUNTIME
+#ifdef CONFIG_PM
struct device *dev;
int rpm_status;
unsigned int nr_pending;
@@ -455,14 +456,7 @@ struct request_queue {
*/
unsigned int flush_flags;
unsigned int flush_not_queueable:1;
- unsigned int flush_queue_delayed:1;
- unsigned int flush_pending_idx:1;
- unsigned int flush_running_idx:1;
- unsigned long flush_pending_since;
- struct list_head flush_queue[2];
- struct list_head flush_data_in_flight;
- struct request *flush_rq;
- spinlock_t mq_flush_lock;
+ struct blk_flush_queue *fq;
struct list_head requeue_list;
spinlock_t requeue_lock;
@@ -865,7 +859,7 @@ extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
{
- return bdev->bd_disk->queue;
+ return bdev->bd_disk->queue; /* this is never NULL */
}
/*
@@ -1063,7 +1057,7 @@ extern void blk_put_queue(struct request_queue *);
/*
* block layer runtime pm functions
*/
-#ifdef CONFIG_PM_RUNTIME
+#ifdef CONFIG_PM
extern void blk_pm_runtime_init(struct request_queue *q, struct device *dev);
extern int blk_pre_runtime_suspend(struct request_queue *q);
extern void blk_post_runtime_suspend(struct request_queue *q, int err);
@@ -1142,8 +1136,6 @@ static inline bool blk_needs_flush_plug(struct task_struct *tsk)
/*
* tag stuff
*/
-#define blk_rq_tagged(rq) \
- ((rq)->mq_ctx || ((rq)->cmd_flags & REQ_QUEUED))
extern int blk_queue_start_tag(struct request_queue *, struct request *);
extern struct request *blk_queue_find_tag(struct request_queue *, int);
extern void blk_queue_end_tag(struct request_queue *, struct request *);
@@ -1192,7 +1184,6 @@ extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm);
enum blk_default_limits {
BLK_MAX_SEGMENTS = 128,
BLK_SAFE_MAX_SECTORS = 255,
- BLK_DEF_MAX_SECTORS = 1024,
BLK_MAX_SEGMENT_SIZE = 65536,
BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL,
};
@@ -1285,10 +1276,9 @@ static inline int queue_alignment_offset(struct request_queue *q)
static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t sector)
{
unsigned int granularity = max(lim->physical_block_size, lim->io_min);
- unsigned int alignment = (sector << 9) & (granularity - 1);
+ unsigned int alignment = sector_div(sector, granularity >> 9) << 9;
- return (granularity + lim->alignment_offset - alignment)
- & (granularity - 1);
+ return (granularity + lim->alignment_offset - alignment) % granularity;
}
static inline int bdev_alignment_offset(struct block_device *bdev)
@@ -1464,32 +1454,31 @@ static inline uint64_t rq_io_start_time_ns(struct request *req)
#if defined(CONFIG_BLK_DEV_INTEGRITY)
-#define INTEGRITY_FLAG_READ 2 /* verify data integrity on read */
-#define INTEGRITY_FLAG_WRITE 4 /* generate data integrity on write */
+enum blk_integrity_flags {
+ BLK_INTEGRITY_VERIFY = 1 << 0,
+ BLK_INTEGRITY_GENERATE = 1 << 1,
+ BLK_INTEGRITY_DEVICE_CAPABLE = 1 << 2,
+ BLK_INTEGRITY_IP_CHECKSUM = 1 << 3,
+};
-struct blk_integrity_exchg {
+struct blk_integrity_iter {
void *prot_buf;
void *data_buf;
- sector_t sector;
+ sector_t seed;
unsigned int data_size;
- unsigned short sector_size;
+ unsigned short interval;
const char *disk_name;
};
-typedef void (integrity_gen_fn) (struct blk_integrity_exchg *);
-typedef int (integrity_vrfy_fn) (struct blk_integrity_exchg *);
-typedef void (integrity_set_tag_fn) (void *, void *, unsigned int);
-typedef void (integrity_get_tag_fn) (void *, void *, unsigned int);
+typedef int (integrity_processing_fn) (struct blk_integrity_iter *);
struct blk_integrity {
- integrity_gen_fn *generate_fn;
- integrity_vrfy_fn *verify_fn;
- integrity_set_tag_fn *set_tag_fn;
- integrity_get_tag_fn *get_tag_fn;
+ integrity_processing_fn *generate_fn;
+ integrity_processing_fn *verify_fn;
unsigned short flags;
unsigned short tuple_size;
- unsigned short sector_size;
+ unsigned short interval;
unsigned short tag_size;
const char *name;
@@ -1504,10 +1493,10 @@ extern int blk_integrity_compare(struct gendisk *, struct gendisk *);
extern int blk_rq_map_integrity_sg(struct request_queue *, struct bio *,
struct scatterlist *);
extern int blk_rq_count_integrity_sg(struct request_queue *, struct bio *);
-extern int blk_integrity_merge_rq(struct request_queue *, struct request *,
- struct request *);
-extern int blk_integrity_merge_bio(struct request_queue *, struct request *,
- struct bio *);
+extern bool blk_integrity_merge_rq(struct request_queue *, struct request *,
+ struct request *);
+extern bool blk_integrity_merge_bio(struct request_queue *, struct request *,
+ struct bio *);
static inline
struct blk_integrity *bdev_get_integrity(struct block_device *bdev)
@@ -1520,12 +1509,9 @@ static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
return disk->integrity;
}
-static inline int blk_integrity_rq(struct request *rq)
+static inline bool blk_integrity_rq(struct request *rq)
{
- if (rq->bio == NULL)
- return 0;
-
- return bio_integrity(rq->bio);
+ return rq->cmd_flags & REQ_INTEGRITY;
}
static inline void blk_queue_max_integrity_segments(struct request_queue *q,
@@ -1564,7 +1550,7 @@ static inline int blk_rq_map_integrity_sg(struct request_queue *q,
}
static inline struct blk_integrity *bdev_get_integrity(struct block_device *b)
{
- return 0;
+ return NULL;
}
static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
{
@@ -1590,17 +1576,17 @@ static inline unsigned short queue_max_integrity_segments(struct request_queue *
{
return 0;
}
-static inline int blk_integrity_merge_rq(struct request_queue *rq,
- struct request *r1,
- struct request *r2)
+static inline bool blk_integrity_merge_rq(struct request_queue *rq,
+ struct request *r1,
+ struct request *r2)
{
- return 0;
+ return true;
}
-static inline int blk_integrity_merge_bio(struct request_queue *rq,
- struct request *r,
- struct bio *b)
+static inline bool blk_integrity_merge_bio(struct request_queue *rq,
+ struct request *r,
+ struct bio *b)
{
- return 0;
+ return true;
}
static inline bool blk_integrity_is_initialized(struct gendisk *g)
{
diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h
index 4e2bd4c95b66..0995c2de8162 100644
--- a/include/linux/bootmem.h
+++ b/include/linux/bootmem.h
@@ -46,6 +46,7 @@ extern unsigned long init_bootmem_node(pg_data_t *pgdat,
extern unsigned long init_bootmem(unsigned long addr, unsigned long memend);
extern unsigned long free_all_bootmem(void);
+extern void reset_node_managed_pages(pg_data_t *pgdat);
extern void reset_all_zones_managed_pages(void);
extern void free_bootmem_node(pg_data_t *pgdat,
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
new file mode 100644
index 000000000000..bbfceb756452
--- /dev/null
+++ b/include/linux/bpf.h
@@ -0,0 +1,145 @@
+/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+#ifndef _LINUX_BPF_H
+#define _LINUX_BPF_H 1
+
+#include <uapi/linux/bpf.h>
+#include <linux/workqueue.h>
+#include <linux/file.h>
+
+struct bpf_map;
+
+/* map is generic key/value storage optionally accesible by eBPF programs */
+struct bpf_map_ops {
+ /* funcs callable from userspace (via syscall) */
+ struct bpf_map *(*map_alloc)(union bpf_attr *attr);
+ void (*map_free)(struct bpf_map *);
+ int (*map_get_next_key)(struct bpf_map *map, void *key, void *next_key);
+
+ /* funcs callable from userspace and from eBPF programs */
+ void *(*map_lookup_elem)(struct bpf_map *map, void *key);
+ int (*map_update_elem)(struct bpf_map *map, void *key, void *value, u64 flags);
+ int (*map_delete_elem)(struct bpf_map *map, void *key);
+};
+
+struct bpf_map {
+ atomic_t refcnt;
+ enum bpf_map_type map_type;
+ u32 key_size;
+ u32 value_size;
+ u32 max_entries;
+ struct bpf_map_ops *ops;
+ struct work_struct work;
+};
+
+struct bpf_map_type_list {
+ struct list_head list_node;
+ struct bpf_map_ops *ops;
+ enum bpf_map_type type;
+};
+
+void bpf_register_map_type(struct bpf_map_type_list *tl);
+void bpf_map_put(struct bpf_map *map);
+struct bpf_map *bpf_map_get(struct fd f);
+
+/* function argument constraints */
+enum bpf_arg_type {
+ ARG_ANYTHING = 0, /* any argument is ok */
+
+ /* the following constraints used to prototype
+ * bpf_map_lookup/update/delete_elem() functions
+ */
+ ARG_CONST_MAP_PTR, /* const argument used as pointer to bpf_map */
+ ARG_PTR_TO_MAP_KEY, /* pointer to stack used as map key */
+ ARG_PTR_TO_MAP_VALUE, /* pointer to stack used as map value */
+
+ /* the following constraints used to prototype bpf_memcmp() and other
+ * functions that access data on eBPF program stack
+ */
+ ARG_PTR_TO_STACK, /* any pointer to eBPF program stack */
+ ARG_CONST_STACK_SIZE, /* number of bytes accessed from stack */
+};
+
+/* type of values returned from helper functions */
+enum bpf_return_type {
+ RET_INTEGER, /* function returns integer */
+ RET_VOID, /* function doesn't return anything */
+ RET_PTR_TO_MAP_VALUE_OR_NULL, /* returns a pointer to map elem value or NULL */
+};
+
+/* eBPF function prototype used by verifier to allow BPF_CALLs from eBPF programs
+ * to in-kernel helper functions and for adjusting imm32 field in BPF_CALL
+ * instructions after verifying
+ */
+struct bpf_func_proto {
+ u64 (*func)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
+ bool gpl_only;
+ enum bpf_return_type ret_type;
+ enum bpf_arg_type arg1_type;
+ enum bpf_arg_type arg2_type;
+ enum bpf_arg_type arg3_type;
+ enum bpf_arg_type arg4_type;
+ enum bpf_arg_type arg5_type;
+};
+
+/* bpf_context is intentionally undefined structure. Pointer to bpf_context is
+ * the first argument to eBPF programs.
+ * For socket filters: 'struct bpf_context *' == 'struct sk_buff *'
+ */
+struct bpf_context;
+
+enum bpf_access_type {
+ BPF_READ = 1,
+ BPF_WRITE = 2
+};
+
+struct bpf_verifier_ops {
+ /* return eBPF function prototype for verification */
+ const struct bpf_func_proto *(*get_func_proto)(enum bpf_func_id func_id);
+
+ /* return true if 'size' wide access at offset 'off' within bpf_context
+ * with 'type' (read or write) is allowed
+ */
+ bool (*is_valid_access)(int off, int size, enum bpf_access_type type);
+};
+
+struct bpf_prog_type_list {
+ struct list_head list_node;
+ struct bpf_verifier_ops *ops;
+ enum bpf_prog_type type;
+};
+
+void bpf_register_prog_type(struct bpf_prog_type_list *tl);
+
+struct bpf_prog;
+
+struct bpf_prog_aux {
+ atomic_t refcnt;
+ bool is_gpl_compatible;
+ enum bpf_prog_type prog_type;
+ struct bpf_verifier_ops *ops;
+ struct bpf_map **used_maps;
+ u32 used_map_cnt;
+ struct bpf_prog *prog;
+ struct work_struct work;
+};
+
+#ifdef CONFIG_BPF_SYSCALL
+void bpf_prog_put(struct bpf_prog *prog);
+#else
+static inline void bpf_prog_put(struct bpf_prog *prog) {}
+#endif
+struct bpf_prog *bpf_prog_get(u32 ufd);
+/* verify correctness of eBPF program */
+int bpf_check(struct bpf_prog *fp, union bpf_attr *attr);
+
+/* verifier prototypes for helper functions called from eBPF programs */
+extern struct bpf_func_proto bpf_map_lookup_elem_proto;
+extern struct bpf_func_proto bpf_map_update_elem_proto;
+extern struct bpf_func_proto bpf_map_delete_elem_proto;
+
+#endif /* _LINUX_BPF_H */
diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h
index 61219b9b3445..7ccd928cc1f2 100644
--- a/include/linux/brcmphy.h
+++ b/include/linux/brcmphy.h
@@ -13,7 +13,11 @@
#define PHY_ID_BCM5461 0x002060c0
#define PHY_ID_BCM57780 0x03625d90
+#define PHY_ID_BCM7250 0xae025280
+#define PHY_ID_BCM7364 0xae025260
#define PHY_ID_BCM7366 0x600d8490
+#define PHY_ID_BCM7425 0x03625e60
+#define PHY_ID_BCM7429 0x600d8730
#define PHY_ID_BCM7439 0x600d8480
#define PHY_ID_BCM7445 0x600d8510
@@ -21,9 +25,9 @@
#define PHY_BCM_OUI_1 0x00206000
#define PHY_BCM_OUI_2 0x0143bc00
#define PHY_BCM_OUI_3 0x03625c00
-#define PHY_BCM_OUI_4 0x600d0000
+#define PHY_BCM_OUI_4 0x600d8400
#define PHY_BCM_OUI_5 0x03625e00
-
+#define PHY_BCM_OUI_6 0xae025000
#define PHY_BCM_FLAGS_MODE_COPPER 0x00000001
#define PHY_BCM_FLAGS_MODE_1000BX 0x00000002
@@ -38,7 +42,8 @@
#define PHY_BRCM_CLEAR_RGMII_MODE 0x00004000
#define PHY_BRCM_DIS_TXCRXC_NOENRGY 0x00008000
/* Broadcom BCM7xxx specific workarounds */
-#define PHY_BRCM_100MBPS_WAR 0x00010000
+#define PHY_BRCM_7XXX_REV(x) (((x) >> 8) & 0xff)
+#define PHY_BRCM_7XXX_PATCH(x) ((x) & 0xff)
#define PHY_BCM_FLAGS_VALID 0x80000000
/* Broadcom BCM54XX register definitions, common to most Broadcom PHYs */
@@ -92,4 +97,130 @@
#define MII_BCM54XX_AUXCTL_SHDWSEL_AUXCTL 0x0000
+/*
+ * Broadcom LED source encodings. These are used in BCM5461, BCM5481,
+ * BCM5482, and possibly some others.
+ */
+#define BCM_LED_SRC_LINKSPD1 0x0
+#define BCM_LED_SRC_LINKSPD2 0x1
+#define BCM_LED_SRC_XMITLED 0x2
+#define BCM_LED_SRC_ACTIVITYLED 0x3
+#define BCM_LED_SRC_FDXLED 0x4
+#define BCM_LED_SRC_SLAVE 0x5
+#define BCM_LED_SRC_INTR 0x6
+#define BCM_LED_SRC_QUALITY 0x7
+#define BCM_LED_SRC_RCVLED 0x8
+#define BCM_LED_SRC_MULTICOLOR1 0xa
+#define BCM_LED_SRC_OPENSHORT 0xb
+#define BCM_LED_SRC_OFF 0xe /* Tied high */
+#define BCM_LED_SRC_ON 0xf /* Tied low */
+
+
+/*
+ * BCM5482: Shadow registers
+ * Shadow values go into bits [14:10] of register 0x1c to select a shadow
+ * register to access.
+ */
+/* 00101: Spare Control Register 3 */
+#define BCM54XX_SHD_SCR3 0x05
+#define BCM54XX_SHD_SCR3_DEF_CLK125 0x0001
+#define BCM54XX_SHD_SCR3_DLLAPD_DIS 0x0002
+#define BCM54XX_SHD_SCR3_TRDDAPD 0x0004
+
+/* 01010: Auto Power-Down */
+#define BCM54XX_SHD_APD 0x0a
+#define BCM54XX_SHD_APD_EN 0x0020
+
+#define BCM5482_SHD_LEDS1 0x0d /* 01101: LED Selector 1 */
+ /* LED3 / ~LINKSPD[2] selector */
+#define BCM5482_SHD_LEDS1_LED3(src) ((src & 0xf) << 4)
+ /* LED1 / ~LINKSPD[1] selector */
+#define BCM5482_SHD_LEDS1_LED1(src) ((src & 0xf) << 0)
+#define BCM54XX_SHD_RGMII_MODE 0x0b /* 01011: RGMII Mode Selector */
+#define BCM5482_SHD_SSD 0x14 /* 10100: Secondary SerDes control */
+#define BCM5482_SHD_SSD_LEDM 0x0008 /* SSD LED Mode enable */
+#define BCM5482_SHD_SSD_EN 0x0001 /* SSD enable */
+#define BCM5482_SHD_MODE 0x1f /* 11111: Mode Control Register */
+#define BCM5482_SHD_MODE_1000BX 0x0001 /* Enable 1000BASE-X registers */
+
+
+/*
+ * EXPANSION SHADOW ACCESS REGISTERS. (PHY REG 0x15, 0x16, and 0x17)
+ */
+#define MII_BCM54XX_EXP_AADJ1CH0 0x001f
+#define MII_BCM54XX_EXP_AADJ1CH0_SWP_ABCD_OEN 0x0200
+#define MII_BCM54XX_EXP_AADJ1CH0_SWSEL_THPF 0x0100
+#define MII_BCM54XX_EXP_AADJ1CH3 0x601f
+#define MII_BCM54XX_EXP_AADJ1CH3_ADCCKADJ 0x0002
+#define MII_BCM54XX_EXP_EXP08 0x0F08
+#define MII_BCM54XX_EXP_EXP08_RJCT_2MHZ 0x0001
+#define MII_BCM54XX_EXP_EXP08_EARLY_DAC_WAKE 0x0200
+#define MII_BCM54XX_EXP_EXP75 0x0f75
+#define MII_BCM54XX_EXP_EXP75_VDACCTRL 0x003c
+#define MII_BCM54XX_EXP_EXP75_CM_OSC 0x0001
+#define MII_BCM54XX_EXP_EXP96 0x0f96
+#define MII_BCM54XX_EXP_EXP96_MYST 0x0010
+#define MII_BCM54XX_EXP_EXP97 0x0f97
+#define MII_BCM54XX_EXP_EXP97_MYST 0x0c0c
+
+/*
+ * BCM5482: Secondary SerDes registers
+ */
+#define BCM5482_SSD_1000BX_CTL 0x00 /* 1000BASE-X Control */
+#define BCM5482_SSD_1000BX_CTL_PWRDOWN 0x0800 /* Power-down SSD */
+#define BCM5482_SSD_SGMII_SLAVE 0x15 /* SGMII Slave Register */
+#define BCM5482_SSD_SGMII_SLAVE_EN 0x0002 /* Slave mode enable */
+#define BCM5482_SSD_SGMII_SLAVE_AD 0x0001 /* Slave auto-detection */
+
+
+/*****************************************************************************/
+/* Fast Ethernet Transceiver definitions. */
+/*****************************************************************************/
+
+#define MII_BRCM_FET_INTREG 0x1a /* Interrupt register */
+#define MII_BRCM_FET_IR_MASK 0x0100 /* Mask all interrupts */
+#define MII_BRCM_FET_IR_LINK_EN 0x0200 /* Link status change enable */
+#define MII_BRCM_FET_IR_SPEED_EN 0x0400 /* Link speed change enable */
+#define MII_BRCM_FET_IR_DUPLEX_EN 0x0800 /* Duplex mode change enable */
+#define MII_BRCM_FET_IR_ENABLE 0x4000 /* Interrupt enable */
+
+#define MII_BRCM_FET_BRCMTEST 0x1f /* Brcm test register */
+#define MII_BRCM_FET_BT_SRE 0x0080 /* Shadow register enable */
+
+
+/*** Shadow register definitions ***/
+
+#define MII_BRCM_FET_SHDW_MISCCTRL 0x10 /* Shadow misc ctrl */
+#define MII_BRCM_FET_SHDW_MC_FAME 0x4000 /* Force Auto MDIX enable */
+
+#define MII_BRCM_FET_SHDW_AUXMODE4 0x1a /* Auxiliary mode 4 */
+#define MII_BRCM_FET_SHDW_AM4_LED_MASK 0x0003
+#define MII_BRCM_FET_SHDW_AM4_LED_MODE1 0x0001
+
+#define MII_BRCM_FET_SHDW_AUXSTAT2 0x1b /* Auxiliary status 2 */
+#define MII_BRCM_FET_SHDW_AS2_APDE 0x0020 /* Auto power down enable */
+
+/*
+ * Indirect register access functions for the 1000BASE-T/100BASE-TX/10BASE-T
+ * 0x1c shadow registers.
+ */
+static inline int bcm54xx_shadow_read(struct phy_device *phydev, u16 shadow)
+{
+ phy_write(phydev, MII_BCM54XX_SHD, MII_BCM54XX_SHD_VAL(shadow));
+ return MII_BCM54XX_SHD_DATA(phy_read(phydev, MII_BCM54XX_SHD));
+}
+
+static inline int bcm54xx_shadow_write(struct phy_device *phydev, u16 shadow,
+ u16 val)
+{
+ return phy_write(phydev, MII_BCM54XX_SHD,
+ MII_BCM54XX_SHD_WRITE |
+ MII_BCM54XX_SHD_VAL(shadow) |
+ MII_BCM54XX_SHD_DATA(val));
+}
+
+#define BRCM_CL45VEN_EEE_CONTROL 0x803d
+#define LPI_FEATURE_EN 0x8000
+#define LPI_FEATURE_EN_DIG1000X 0x4000
+
#endif /* _LINUX_BRCMPHY_H */
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index 324329ceea1e..73b45225a7ca 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -175,12 +175,13 @@ void __wait_on_buffer(struct buffer_head *);
wait_queue_head_t *bh_waitq_head(struct buffer_head *bh);
struct buffer_head *__find_get_block(struct block_device *bdev, sector_t block,
unsigned size);
-struct buffer_head *__getblk(struct block_device *bdev, sector_t block,
- unsigned size);
+struct buffer_head *__getblk_gfp(struct block_device *bdev, sector_t block,
+ unsigned size, gfp_t gfp);
void __brelse(struct buffer_head *);
void __bforget(struct buffer_head *);
void __breadahead(struct block_device *, sector_t block, unsigned int size);
-struct buffer_head *__bread(struct block_device *, sector_t block, unsigned size);
+struct buffer_head *__bread_gfp(struct block_device *,
+ sector_t block, unsigned size, gfp_t gfp);
void invalidate_bh_lrus(void);
struct buffer_head *alloc_buffer_head(gfp_t gfp_flags);
void free_buffer_head(struct buffer_head * bh);
@@ -295,7 +296,13 @@ static inline void bforget(struct buffer_head *bh)
static inline struct buffer_head *
sb_bread(struct super_block *sb, sector_t block)
{
- return __bread(sb->s_bdev, block, sb->s_blocksize);
+ return __bread_gfp(sb->s_bdev, block, sb->s_blocksize, __GFP_MOVABLE);
+}
+
+static inline struct buffer_head *
+sb_bread_unmovable(struct super_block *sb, sector_t block)
+{
+ return __bread_gfp(sb->s_bdev, block, sb->s_blocksize, 0);
}
static inline void
@@ -307,7 +314,7 @@ sb_breadahead(struct super_block *sb, sector_t block)
static inline struct buffer_head *
sb_getblk(struct super_block *sb, sector_t block)
{
- return __getblk(sb->s_bdev, block, sb->s_blocksize);
+ return __getblk_gfp(sb->s_bdev, block, sb->s_blocksize, __GFP_MOVABLE);
}
static inline struct buffer_head *
@@ -344,6 +351,36 @@ static inline void lock_buffer(struct buffer_head *bh)
__lock_buffer(bh);
}
+static inline struct buffer_head *getblk_unmovable(struct block_device *bdev,
+ sector_t block,
+ unsigned size)
+{
+ return __getblk_gfp(bdev, block, size, 0);
+}
+
+static inline struct buffer_head *__getblk(struct block_device *bdev,
+ sector_t block,
+ unsigned size)
+{
+ return __getblk_gfp(bdev, block, size, __GFP_MOVABLE);
+}
+
+/**
+ * __bread() - reads a specified block and returns the bh
+ * @bdev: the block_device to read from
+ * @block: number of block
+ * @size: size (in bytes) to read
+ *
+ * Reads a specified block, and returns buffer head that contains it.
+ * The page cache is allocated from movable area so that it can be migrated.
+ * It returns NULL if the block was unreadable.
+ */
+static inline struct buffer_head *
+__bread(struct block_device *bdev, sector_t block, unsigned size)
+{
+ return __bread_gfp(bdev, block, size, __GFP_MOVABLE);
+}
+
extern int __set_page_dirty_buffers(struct page *page);
#else /* CONFIG_BLOCK */
diff --git a/include/linux/cacheinfo.h b/include/linux/cacheinfo.h
new file mode 100644
index 000000000000..3daf5ed392c9
--- /dev/null
+++ b/include/linux/cacheinfo.h
@@ -0,0 +1,100 @@
+#ifndef _LINUX_CACHEINFO_H
+#define _LINUX_CACHEINFO_H
+
+#include <linux/bitops.h>
+#include <linux/cpumask.h>
+#include <linux/smp.h>
+
+struct device_node;
+struct attribute;
+
+enum cache_type {
+ CACHE_TYPE_NOCACHE = 0,
+ CACHE_TYPE_INST = BIT(0),
+ CACHE_TYPE_DATA = BIT(1),
+ CACHE_TYPE_SEPARATE = CACHE_TYPE_INST | CACHE_TYPE_DATA,
+ CACHE_TYPE_UNIFIED = BIT(2),
+};
+
+/**
+ * struct cacheinfo - represent a cache leaf node
+ * @type: type of the cache - data, inst or unified
+ * @level: represents the hierarcy in the multi-level cache
+ * @coherency_line_size: size of each cache line usually representing
+ * the minimum amount of data that gets transferred from memory
+ * @number_of_sets: total number of sets, a set is a collection of cache
+ * lines sharing the same index
+ * @ways_of_associativity: number of ways in which a particular memory
+ * block can be placed in the cache
+ * @physical_line_partition: number of physical cache lines sharing the
+ * same cachetag
+ * @size: Total size of the cache
+ * @shared_cpu_map: logical cpumask representing all the cpus sharing
+ * this cache node
+ * @attributes: bitfield representing various cache attributes
+ * @of_node: if devicetree is used, this represents either the cpu node in
+ * case there's no explicit cache node or the cache node itself in the
+ * device tree
+ * @disable_sysfs: indicates whether this node is visible to the user via
+ * sysfs or not
+ * @priv: pointer to any private data structure specific to particular
+ * cache design
+ *
+ * While @of_node, @disable_sysfs and @priv are used for internal book
+ * keeping, the remaining members form the core properties of the cache
+ */
+struct cacheinfo {
+ enum cache_type type;
+ unsigned int level;
+ unsigned int coherency_line_size;
+ unsigned int number_of_sets;
+ unsigned int ways_of_associativity;
+ unsigned int physical_line_partition;
+ unsigned int size;
+ cpumask_t shared_cpu_map;
+ unsigned int attributes;
+#define CACHE_WRITE_THROUGH BIT(0)
+#define CACHE_WRITE_BACK BIT(1)
+#define CACHE_WRITE_POLICY_MASK \
+ (CACHE_WRITE_THROUGH | CACHE_WRITE_BACK)
+#define CACHE_READ_ALLOCATE BIT(2)
+#define CACHE_WRITE_ALLOCATE BIT(3)
+#define CACHE_ALLOCATE_POLICY_MASK \
+ (CACHE_READ_ALLOCATE | CACHE_WRITE_ALLOCATE)
+
+ struct device_node *of_node;
+ bool disable_sysfs;
+ void *priv;
+};
+
+struct cpu_cacheinfo {
+ struct cacheinfo *info_list;
+ unsigned int num_levels;
+ unsigned int num_leaves;
+};
+
+/*
+ * Helpers to make sure "func" is executed on the cpu whose cache
+ * attributes are being detected
+ */
+#define DEFINE_SMP_CALL_CACHE_FUNCTION(func) \
+static inline void _##func(void *ret) \
+{ \
+ int cpu = smp_processor_id(); \
+ *(int *)ret = __##func(cpu); \
+} \
+ \
+int func(unsigned int cpu) \
+{ \
+ int ret; \
+ smp_call_function_single(cpu, _##func, &ret, true); \
+ return ret; \
+}
+
+struct cpu_cacheinfo *get_cpu_cacheinfo(unsigned int cpu);
+int init_cache_level(unsigned int cpu);
+int populate_cache_leaves(unsigned int cpu);
+
+const struct attribute_group *cache_get_priv_group(struct cacheinfo *this_leaf);
+
+#endif /* _LINUX_CACHEINFO_H */
diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h
index 6992afc6ba7f..c05ff0f9f9a5 100644
--- a/include/linux/can/dev.h
+++ b/include/linux/can/dev.h
@@ -99,6 +99,12 @@ inval_skb:
return 1;
}
+static inline bool can_is_canfd_skb(const struct sk_buff *skb)
+{
+ /* the CAN specific type of skb is identified by its data length */
+ return skb->len == CANFD_MTU;
+}
+
/* get data length from can_dlc with sanitized can_dlc */
u8 can_dlc2len(u8 can_dlc);
@@ -121,6 +127,9 @@ void unregister_candev(struct net_device *dev);
int can_restart_now(struct net_device *dev);
void can_bus_off(struct net_device *dev);
+void can_change_state(struct net_device *dev, struct can_frame *cf,
+ enum can_state tx_state, enum can_state rx_state);
+
void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
unsigned int idx);
unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx);
diff --git a/include/linux/ceph/auth.h b/include/linux/ceph/auth.h
index 5f3386844134..260d78b587c4 100644
--- a/include/linux/ceph/auth.h
+++ b/include/linux/ceph/auth.h
@@ -13,6 +13,7 @@
struct ceph_auth_client;
struct ceph_authorizer;
+struct ceph_msg;
struct ceph_auth_handshake {
struct ceph_authorizer *authorizer;
@@ -20,6 +21,10 @@ struct ceph_auth_handshake {
size_t authorizer_buf_len;
void *authorizer_reply_buf;
size_t authorizer_reply_buf_len;
+ int (*sign_message)(struct ceph_auth_handshake *auth,
+ struct ceph_msg *msg);
+ int (*check_message_signature)(struct ceph_auth_handshake *auth,
+ struct ceph_msg *msg);
};
struct ceph_auth_client_ops {
@@ -66,6 +71,11 @@ struct ceph_auth_client_ops {
void (*reset)(struct ceph_auth_client *ac);
void (*destroy)(struct ceph_auth_client *ac);
+
+ int (*sign_message)(struct ceph_auth_handshake *auth,
+ struct ceph_msg *msg);
+ int (*check_message_signature)(struct ceph_auth_handshake *auth,
+ struct ceph_msg *msg);
};
struct ceph_auth_client {
@@ -113,4 +123,20 @@ extern int ceph_auth_verify_authorizer_reply(struct ceph_auth_client *ac,
extern void ceph_auth_invalidate_authorizer(struct ceph_auth_client *ac,
int peer_type);
+static inline int ceph_auth_sign_message(struct ceph_auth_handshake *auth,
+ struct ceph_msg *msg)
+{
+ if (auth->sign_message)
+ return auth->sign_message(auth, msg);
+ return 0;
+}
+
+static inline
+int ceph_auth_check_message_signature(struct ceph_auth_handshake *auth,
+ struct ceph_msg *msg)
+{
+ if (auth->check_message_signature)
+ return auth->check_message_signature(auth, msg);
+ return 0;
+}
#endif
diff --git a/include/linux/ceph/buffer.h b/include/linux/ceph/buffer.h
index 07ad423cc37f..07ca15e76100 100644
--- a/include/linux/ceph/buffer.h
+++ b/include/linux/ceph/buffer.h
@@ -10,8 +10,7 @@
/*
* a simple reference counted buffer.
*
- * use kmalloc for small sizes (<= one page), vmalloc for larger
- * sizes.
+ * use kmalloc for smaller sizes, vmalloc for larger sizes.
*/
struct ceph_buffer {
struct kref kref;
diff --git a/include/linux/ceph/ceph_features.h b/include/linux/ceph/ceph_features.h
index d12659ce550d..71e05bbf8ceb 100644
--- a/include/linux/ceph/ceph_features.h
+++ b/include/linux/ceph/ceph_features.h
@@ -84,6 +84,7 @@ static inline u64 ceph_sanitize_features(u64 features)
CEPH_FEATURE_PGPOOL3 | \
CEPH_FEATURE_OSDENC | \
CEPH_FEATURE_CRUSH_TUNABLES | \
+ CEPH_FEATURE_MSG_AUTH | \
CEPH_FEATURE_CRUSH_TUNABLES2 | \
CEPH_FEATURE_REPLY_CREATE_INODE | \
CEPH_FEATURE_OSDHASHPSPOOL | \
diff --git a/include/linux/ceph/ceph_fs.h b/include/linux/ceph/ceph_fs.h
index 3c97d5e9b951..c0dadaac26e3 100644
--- a/include/linux/ceph/ceph_fs.h
+++ b/include/linux/ceph/ceph_fs.h
@@ -522,8 +522,11 @@ struct ceph_mds_reply_dirfrag {
__le32 dist[];
} __attribute__ ((packed));
-#define CEPH_LOCK_FCNTL 1
-#define CEPH_LOCK_FLOCK 2
+#define CEPH_LOCK_FCNTL 1
+#define CEPH_LOCK_FLOCK 2
+#define CEPH_LOCK_FCNTL_INTR 3
+#define CEPH_LOCK_FLOCK_INTR 4
+
#define CEPH_LOCK_SHARED 1
#define CEPH_LOCK_EXCL 2
@@ -549,6 +552,7 @@ struct ceph_filelock {
int ceph_flags_to_mode(int flags);
+#define CEPH_INLINE_NONE ((__u64)-1)
/* capability bits */
#define CEPH_CAP_PIN 1 /* no specific capabilities beyond the pin */
@@ -613,6 +617,8 @@ int ceph_flags_to_mode(int flags);
CEPH_CAP_LINK_SHARED | \
CEPH_CAP_FILE_SHARED | \
CEPH_CAP_XATTR_SHARED)
+#define CEPH_STAT_CAP_INLINE_DATA (CEPH_CAP_FILE_SHARED | \
+ CEPH_CAP_FILE_RD)
#define CEPH_CAP_ANY_SHARED (CEPH_CAP_AUTH_SHARED | \
CEPH_CAP_LINK_SHARED | \
diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h
index 279b0afac1c1..8b11a79ca1cb 100644
--- a/include/linux/ceph/libceph.h
+++ b/include/linux/ceph/libceph.h
@@ -29,6 +29,7 @@
#define CEPH_OPT_NOSHARE (1<<1) /* don't share client with other sbs */
#define CEPH_OPT_MYIP (1<<2) /* specified my ip */
#define CEPH_OPT_NOCRC (1<<3) /* no data crc on writes */
+#define CEPH_OPT_NOMSGAUTH (1<<4) /* not require cephx message signature */
#define CEPH_OPT_DEFAULT (0)
@@ -184,7 +185,6 @@ extern bool libceph_compatible(void *data);
extern const char *ceph_msg_type_name(int type);
extern int ceph_check_fsid(struct ceph_client *client, struct ceph_fsid *fsid);
extern void *ceph_kvmalloc(size_t size, gfp_t flags);
-extern void ceph_kvfree(const void *ptr);
extern struct ceph_options *ceph_parse_options(char *options,
const char *dev_name, const char *dev_name_end,
@@ -211,7 +211,6 @@ extern struct page **ceph_get_direct_page_vector(const void __user *data,
bool write_page);
extern void ceph_put_page_vector(struct page **pages, int num_pages,
bool dirty);
-extern void ceph_release_page_vector(struct page **pages, int num_pages);
extern struct page **ceph_alloc_page_vector(int num_pages, gfp_t flags);
extern int ceph_copy_user_to_page_vector(struct page **pages,
const void __user *data,
diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h
index 40ae58e3e9db..d9d396c16503 100644
--- a/include/linux/ceph/messenger.h
+++ b/include/linux/ceph/messenger.h
@@ -42,6 +42,10 @@ struct ceph_connection_operations {
struct ceph_msg * (*alloc_msg) (struct ceph_connection *con,
struct ceph_msg_header *hdr,
int *skip);
+ int (*sign_message) (struct ceph_connection *con, struct ceph_msg *msg);
+
+ int (*check_message_signature) (struct ceph_connection *con,
+ struct ceph_msg *msg);
};
/* use format string %s%d */
@@ -142,7 +146,10 @@ struct ceph_msg_data_cursor {
*/
struct ceph_msg {
struct ceph_msg_header hdr; /* header */
- struct ceph_msg_footer footer; /* footer */
+ union {
+ struct ceph_msg_footer footer; /* footer */
+ struct ceph_msg_footer_old old_footer; /* old format footer */
+ };
struct kvec front; /* unaligned blobs of message */
struct ceph_buffer *middle;
diff --git a/include/linux/ceph/msgr.h b/include/linux/ceph/msgr.h
index 3d94a73b5f30..1c1887206ffa 100644
--- a/include/linux/ceph/msgr.h
+++ b/include/linux/ceph/msgr.h
@@ -152,7 +152,8 @@ struct ceph_msg_header {
receiver: mask against ~PAGE_MASK */
struct ceph_entity_name src;
- __le32 reserved;
+ __le16 compat_version;
+ __le16 reserved;
__le32 crc; /* header crc32c */
} __attribute__ ((packed));
@@ -164,13 +165,21 @@ struct ceph_msg_header {
/*
* follows data payload
*/
+struct ceph_msg_footer_old {
+ __le32 front_crc, middle_crc, data_crc;
+ __u8 flags;
+} __attribute__ ((packed));
+
struct ceph_msg_footer {
__le32 front_crc, middle_crc, data_crc;
+ // sig holds the 64 bits of the digital signature for the message PLR
+ __le64 sig;
__u8 flags;
} __attribute__ ((packed));
#define CEPH_MSG_FOOTER_COMPLETE (1<<0) /* msg wasn't aborted */
#define CEPH_MSG_FOOTER_NOCRC (1<<1) /* no data crc */
+#define CEPH_MSG_FOOTER_SIGNED (1<<2) /* msg was signed */
#endif
diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h
index 03aeb27fcc69..5d86416d35f2 100644
--- a/include/linux/ceph/osd_client.h
+++ b/include/linux/ceph/osd_client.h
@@ -87,6 +87,13 @@ struct ceph_osd_req_op {
struct ceph_osd_data osd_data;
} extent;
struct {
+ __le32 name_len;
+ __le32 value_len;
+ __u8 cmp_op; /* CEPH_OSD_CMPXATTR_OP_* */
+ __u8 cmp_mode; /* CEPH_OSD_CMPXATTR_MODE_* */
+ struct ceph_osd_data osd_data;
+ } xattr;
+ struct {
const char *class_name;
const char *method_name;
struct ceph_osd_data request_info;
@@ -295,6 +302,9 @@ extern void osd_req_op_cls_response_data_pages(struct ceph_osd_request *,
extern void osd_req_op_cls_init(struct ceph_osd_request *osd_req,
unsigned int which, u16 opcode,
const char *class, const char *method);
+extern int osd_req_op_xattr_init(struct ceph_osd_request *osd_req, unsigned int which,
+ u16 opcode, const char *name, const void *value,
+ size_t size, u8 cmp_op, u8 cmp_mode);
extern void osd_req_op_watch_init(struct ceph_osd_request *osd_req,
unsigned int which, u16 opcode,
u64 cookie, u64 version, int flag);
@@ -318,7 +328,8 @@ extern struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *,
struct ceph_file_layout *layout,
struct ceph_vino vino,
u64 offset, u64 *len,
- int num_ops, int opcode, int flags,
+ unsigned int which, int num_ops,
+ int opcode, int flags,
struct ceph_snap_context *snapc,
u32 truncate_seq, u64 truncate_size,
bool use_mempool);
diff --git a/include/linux/ceph/pagelist.h b/include/linux/ceph/pagelist.h
index 9660d6b0a35d..13d71fe18b0c 100644
--- a/include/linux/ceph/pagelist.h
+++ b/include/linux/ceph/pagelist.h
@@ -1,7 +1,10 @@
#ifndef __FS_CEPH_PAGELIST_H
#define __FS_CEPH_PAGELIST_H
+#include <asm/byteorder.h>
+#include <linux/atomic.h>
#include <linux/list.h>
+#include <linux/types.h>
struct ceph_pagelist {
struct list_head head;
@@ -10,6 +13,7 @@ struct ceph_pagelist {
size_t room;
struct list_head free_list;
size_t num_pages_free;
+ atomic_t refcnt;
};
struct ceph_pagelist_cursor {
@@ -26,9 +30,10 @@ static inline void ceph_pagelist_init(struct ceph_pagelist *pl)
pl->room = 0;
INIT_LIST_HEAD(&pl->free_list);
pl->num_pages_free = 0;
+ atomic_set(&pl->refcnt, 1);
}
-extern int ceph_pagelist_release(struct ceph_pagelist *pl);
+extern void ceph_pagelist_release(struct ceph_pagelist *pl);
extern int ceph_pagelist_append(struct ceph_pagelist *pl, const void *d, size_t l);
diff --git a/include/linux/ceph/rados.h b/include/linux/ceph/rados.h
index f20e0d8a2155..2f822dca1046 100644
--- a/include/linux/ceph/rados.h
+++ b/include/linux/ceph/rados.h
@@ -172,6 +172,7 @@ extern const char *ceph_osd_state_name(int s);
#define CEPH_OSD_OP_MODE_WR 0x2000
#define CEPH_OSD_OP_MODE_RMW 0x3000
#define CEPH_OSD_OP_MODE_SUB 0x4000
+#define CEPH_OSD_OP_MODE_CACHE 0x8000
#define CEPH_OSD_OP_TYPE 0x0f00
#define CEPH_OSD_OP_TYPE_LOCK 0x0100
@@ -181,103 +182,135 @@ extern const char *ceph_osd_state_name(int s);
#define CEPH_OSD_OP_TYPE_PG 0x0500
#define CEPH_OSD_OP_TYPE_MULTI 0x0600 /* multiobject */
+#define __CEPH_OSD_OP1(mode, nr) \
+ (CEPH_OSD_OP_MODE_##mode | (nr))
+
+#define __CEPH_OSD_OP(mode, type, nr) \
+ (CEPH_OSD_OP_MODE_##mode | CEPH_OSD_OP_TYPE_##type | (nr))
+
+#define __CEPH_FORALL_OSD_OPS(f) \
+ /** data **/ \
+ /* read */ \
+ f(READ, __CEPH_OSD_OP(RD, DATA, 1), "read") \
+ f(STAT, __CEPH_OSD_OP(RD, DATA, 2), "stat") \
+ f(MAPEXT, __CEPH_OSD_OP(RD, DATA, 3), "mapext") \
+ \
+ /* fancy read */ \
+ f(MASKTRUNC, __CEPH_OSD_OP(RD, DATA, 4), "masktrunc") \
+ f(SPARSE_READ, __CEPH_OSD_OP(RD, DATA, 5), "sparse-read") \
+ \
+ f(NOTIFY, __CEPH_OSD_OP(RD, DATA, 6), "notify") \
+ f(NOTIFY_ACK, __CEPH_OSD_OP(RD, DATA, 7), "notify-ack") \
+ \
+ /* versioning */ \
+ f(ASSERT_VER, __CEPH_OSD_OP(RD, DATA, 8), "assert-version") \
+ \
+ f(LIST_WATCHERS, __CEPH_OSD_OP(RD, DATA, 9), "list-watchers") \
+ \
+ f(LIST_SNAPS, __CEPH_OSD_OP(RD, DATA, 10), "list-snaps") \
+ \
+ /* sync */ \
+ f(SYNC_READ, __CEPH_OSD_OP(RD, DATA, 11), "sync_read") \
+ \
+ /* write */ \
+ f(WRITE, __CEPH_OSD_OP(WR, DATA, 1), "write") \
+ f(WRITEFULL, __CEPH_OSD_OP(WR, DATA, 2), "writefull") \
+ f(TRUNCATE, __CEPH_OSD_OP(WR, DATA, 3), "truncate") \
+ f(ZERO, __CEPH_OSD_OP(WR, DATA, 4), "zero") \
+ f(DELETE, __CEPH_OSD_OP(WR, DATA, 5), "delete") \
+ \
+ /* fancy write */ \
+ f(APPEND, __CEPH_OSD_OP(WR, DATA, 6), "append") \
+ f(STARTSYNC, __CEPH_OSD_OP(WR, DATA, 7), "startsync") \
+ f(SETTRUNC, __CEPH_OSD_OP(WR, DATA, 8), "settrunc") \
+ f(TRIMTRUNC, __CEPH_OSD_OP(WR, DATA, 9), "trimtrunc") \
+ \
+ f(TMAPUP, __CEPH_OSD_OP(RMW, DATA, 10), "tmapup") \
+ f(TMAPPUT, __CEPH_OSD_OP(WR, DATA, 11), "tmapput") \
+ f(TMAPGET, __CEPH_OSD_OP(RD, DATA, 12), "tmapget") \
+ \
+ f(CREATE, __CEPH_OSD_OP(WR, DATA, 13), "create") \
+ f(ROLLBACK, __CEPH_OSD_OP(WR, DATA, 14), "rollback") \
+ \
+ f(WATCH, __CEPH_OSD_OP(WR, DATA, 15), "watch") \
+ \
+ /* omap */ \
+ f(OMAPGETKEYS, __CEPH_OSD_OP(RD, DATA, 17), "omap-get-keys") \
+ f(OMAPGETVALS, __CEPH_OSD_OP(RD, DATA, 18), "omap-get-vals") \
+ f(OMAPGETHEADER, __CEPH_OSD_OP(RD, DATA, 19), "omap-get-header") \
+ f(OMAPGETVALSBYKEYS, __CEPH_OSD_OP(RD, DATA, 20), "omap-get-vals-by-keys") \
+ f(OMAPSETVALS, __CEPH_OSD_OP(WR, DATA, 21), "omap-set-vals") \
+ f(OMAPSETHEADER, __CEPH_OSD_OP(WR, DATA, 22), "omap-set-header") \
+ f(OMAPCLEAR, __CEPH_OSD_OP(WR, DATA, 23), "omap-clear") \
+ f(OMAPRMKEYS, __CEPH_OSD_OP(WR, DATA, 24), "omap-rm-keys") \
+ f(OMAP_CMP, __CEPH_OSD_OP(RD, DATA, 25), "omap-cmp") \
+ \
+ /* tiering */ \
+ f(COPY_FROM, __CEPH_OSD_OP(WR, DATA, 26), "copy-from") \
+ f(COPY_GET_CLASSIC, __CEPH_OSD_OP(RD, DATA, 27), "copy-get-classic") \
+ f(UNDIRTY, __CEPH_OSD_OP(WR, DATA, 28), "undirty") \
+ f(ISDIRTY, __CEPH_OSD_OP(RD, DATA, 29), "isdirty") \
+ f(COPY_GET, __CEPH_OSD_OP(RD, DATA, 30), "copy-get") \
+ f(CACHE_FLUSH, __CEPH_OSD_OP(CACHE, DATA, 31), "cache-flush") \
+ f(CACHE_EVICT, __CEPH_OSD_OP(CACHE, DATA, 32), "cache-evict") \
+ f(CACHE_TRY_FLUSH, __CEPH_OSD_OP(CACHE, DATA, 33), "cache-try-flush") \
+ \
+ /* convert tmap to omap */ \
+ f(TMAP2OMAP, __CEPH_OSD_OP(RMW, DATA, 34), "tmap2omap") \
+ \
+ /* hints */ \
+ f(SETALLOCHINT, __CEPH_OSD_OP(WR, DATA, 35), "set-alloc-hint") \
+ \
+ /** multi **/ \
+ f(CLONERANGE, __CEPH_OSD_OP(WR, MULTI, 1), "clonerange") \
+ f(ASSERT_SRC_VERSION, __CEPH_OSD_OP(RD, MULTI, 2), "assert-src-version") \
+ f(SRC_CMPXATTR, __CEPH_OSD_OP(RD, MULTI, 3), "src-cmpxattr") \
+ \
+ /** attrs **/ \
+ /* read */ \
+ f(GETXATTR, __CEPH_OSD_OP(RD, ATTR, 1), "getxattr") \
+ f(GETXATTRS, __CEPH_OSD_OP(RD, ATTR, 2), "getxattrs") \
+ f(CMPXATTR, __CEPH_OSD_OP(RD, ATTR, 3), "cmpxattr") \
+ \
+ /* write */ \
+ f(SETXATTR, __CEPH_OSD_OP(WR, ATTR, 1), "setxattr") \
+ f(SETXATTRS, __CEPH_OSD_OP(WR, ATTR, 2), "setxattrs") \
+ f(RESETXATTRS, __CEPH_OSD_OP(WR, ATTR, 3), "resetxattrs") \
+ f(RMXATTR, __CEPH_OSD_OP(WR, ATTR, 4), "rmxattr") \
+ \
+ /** subop **/ \
+ f(PULL, __CEPH_OSD_OP1(SUB, 1), "pull") \
+ f(PUSH, __CEPH_OSD_OP1(SUB, 2), "push") \
+ f(BALANCEREADS, __CEPH_OSD_OP1(SUB, 3), "balance-reads") \
+ f(UNBALANCEREADS, __CEPH_OSD_OP1(SUB, 4), "unbalance-reads") \
+ f(SCRUB, __CEPH_OSD_OP1(SUB, 5), "scrub") \
+ f(SCRUB_RESERVE, __CEPH_OSD_OP1(SUB, 6), "scrub-reserve") \
+ f(SCRUB_UNRESERVE, __CEPH_OSD_OP1(SUB, 7), "scrub-unreserve") \
+ f(SCRUB_STOP, __CEPH_OSD_OP1(SUB, 8), "scrub-stop") \
+ f(SCRUB_MAP, __CEPH_OSD_OP1(SUB, 9), "scrub-map") \
+ \
+ /** lock **/ \
+ f(WRLOCK, __CEPH_OSD_OP(WR, LOCK, 1), "wrlock") \
+ f(WRUNLOCK, __CEPH_OSD_OP(WR, LOCK, 2), "wrunlock") \
+ f(RDLOCK, __CEPH_OSD_OP(WR, LOCK, 3), "rdlock") \
+ f(RDUNLOCK, __CEPH_OSD_OP(WR, LOCK, 4), "rdunlock") \
+ f(UPLOCK, __CEPH_OSD_OP(WR, LOCK, 5), "uplock") \
+ f(DNLOCK, __CEPH_OSD_OP(WR, LOCK, 6), "dnlock") \
+ \
+ /** exec **/ \
+ /* note: the RD bit here is wrong; see special-case below in helper */ \
+ f(CALL, __CEPH_OSD_OP(RD, EXEC, 1), "call") \
+ \
+ /** pg **/ \
+ f(PGLS, __CEPH_OSD_OP(RD, PG, 1), "pgls") \
+ f(PGLS_FILTER, __CEPH_OSD_OP(RD, PG, 2), "pgls-filter") \
+ f(PG_HITSET_LS, __CEPH_OSD_OP(RD, PG, 3), "pg-hitset-ls") \
+ f(PG_HITSET_GET, __CEPH_OSD_OP(RD, PG, 4), "pg-hitset-get")
+
enum {
- /** data **/
- /* read */
- CEPH_OSD_OP_READ = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 1,
- CEPH_OSD_OP_STAT = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 2,
- CEPH_OSD_OP_MAPEXT = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 3,
-
- /* fancy read */
- CEPH_OSD_OP_MASKTRUNC = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 4,
- CEPH_OSD_OP_SPARSE_READ = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 5,
-
- CEPH_OSD_OP_NOTIFY = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 6,
- CEPH_OSD_OP_NOTIFY_ACK = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 7,
-
- /* versioning */
- CEPH_OSD_OP_ASSERT_VER = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 8,
-
- /* write */
- CEPH_OSD_OP_WRITE = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 1,
- CEPH_OSD_OP_WRITEFULL = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 2,
- CEPH_OSD_OP_TRUNCATE = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 3,
- CEPH_OSD_OP_ZERO = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 4,
- CEPH_OSD_OP_DELETE = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 5,
-
- /* fancy write */
- CEPH_OSD_OP_APPEND = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 6,
- CEPH_OSD_OP_STARTSYNC = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 7,
- CEPH_OSD_OP_SETTRUNC = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 8,
- CEPH_OSD_OP_TRIMTRUNC = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 9,
-
- CEPH_OSD_OP_TMAPUP = CEPH_OSD_OP_MODE_RMW | CEPH_OSD_OP_TYPE_DATA | 10,
- CEPH_OSD_OP_TMAPPUT = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 11,
- CEPH_OSD_OP_TMAPGET = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 12,
-
- CEPH_OSD_OP_CREATE = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 13,
- CEPH_OSD_OP_ROLLBACK= CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 14,
-
- CEPH_OSD_OP_WATCH = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 15,
-
- /* omap */
- CEPH_OSD_OP_OMAPGETKEYS = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 17,
- CEPH_OSD_OP_OMAPGETVALS = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 18,
- CEPH_OSD_OP_OMAPGETHEADER = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 19,
- CEPH_OSD_OP_OMAPGETVALSBYKEYS =
- CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 20,
- CEPH_OSD_OP_OMAPSETVALS = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 21,
- CEPH_OSD_OP_OMAPSETHEADER = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 22,
- CEPH_OSD_OP_OMAPCLEAR = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 23,
- CEPH_OSD_OP_OMAPRMKEYS = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 24,
- CEPH_OSD_OP_OMAP_CMP = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 25,
-
- /* hints */
- CEPH_OSD_OP_SETALLOCHINT = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 35,
-
- /** multi **/
- CEPH_OSD_OP_CLONERANGE = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_MULTI | 1,
- CEPH_OSD_OP_ASSERT_SRC_VERSION = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_MULTI | 2,
- CEPH_OSD_OP_SRC_CMPXATTR = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_MULTI | 3,
-
- /** attrs **/
- /* read */
- CEPH_OSD_OP_GETXATTR = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_ATTR | 1,
- CEPH_OSD_OP_GETXATTRS = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_ATTR | 2,
- CEPH_OSD_OP_CMPXATTR = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_ATTR | 3,
-
- /* write */
- CEPH_OSD_OP_SETXATTR = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_ATTR | 1,
- CEPH_OSD_OP_SETXATTRS = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_ATTR | 2,
- CEPH_OSD_OP_RESETXATTRS = CEPH_OSD_OP_MODE_WR|CEPH_OSD_OP_TYPE_ATTR | 3,
- CEPH_OSD_OP_RMXATTR = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_ATTR | 4,
-
- /** subop **/
- CEPH_OSD_OP_PULL = CEPH_OSD_OP_MODE_SUB | 1,
- CEPH_OSD_OP_PUSH = CEPH_OSD_OP_MODE_SUB | 2,
- CEPH_OSD_OP_BALANCEREADS = CEPH_OSD_OP_MODE_SUB | 3,
- CEPH_OSD_OP_UNBALANCEREADS = CEPH_OSD_OP_MODE_SUB | 4,
- CEPH_OSD_OP_SCRUB = CEPH_OSD_OP_MODE_SUB | 5,
- CEPH_OSD_OP_SCRUB_RESERVE = CEPH_OSD_OP_MODE_SUB | 6,
- CEPH_OSD_OP_SCRUB_UNRESERVE = CEPH_OSD_OP_MODE_SUB | 7,
- CEPH_OSD_OP_SCRUB_STOP = CEPH_OSD_OP_MODE_SUB | 8,
- CEPH_OSD_OP_SCRUB_MAP = CEPH_OSD_OP_MODE_SUB | 9,
-
- /** lock **/
- CEPH_OSD_OP_WRLOCK = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_LOCK | 1,
- CEPH_OSD_OP_WRUNLOCK = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_LOCK | 2,
- CEPH_OSD_OP_RDLOCK = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_LOCK | 3,
- CEPH_OSD_OP_RDUNLOCK = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_LOCK | 4,
- CEPH_OSD_OP_UPLOCK = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_LOCK | 5,
- CEPH_OSD_OP_DNLOCK = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_LOCK | 6,
-
- /** exec **/
- /* note: the RD bit here is wrong; see special-case below in helper */
- CEPH_OSD_OP_CALL = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_EXEC | 1,
-
- /** pg **/
- CEPH_OSD_OP_PGLS = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_PG | 1,
- CEPH_OSD_OP_PGLS_FILTER = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_PG | 2,
+#define GENERATE_ENUM_ENTRY(op, opcode, str) CEPH_OSD_OP_##op = (opcode),
+__CEPH_FORALL_OSD_OPS(GENERATE_ENUM_ENTRY)
+#undef GENERATE_ENUM_ENTRY
};
static inline int ceph_osd_op_type_lock(int op)
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index b5223c570eba..da0dae0600e6 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -27,7 +27,6 @@
struct cgroup_root;
struct cgroup_subsys;
-struct inode;
struct cgroup;
extern int cgroup_init_early(void);
@@ -38,7 +37,8 @@ extern void cgroup_exit(struct task_struct *p);
extern int cgroupstats_build(struct cgroupstats *stats,
struct dentry *dentry);
-extern int proc_cgroup_show(struct seq_file *, void *);
+extern int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns,
+ struct pid *pid, struct task_struct *tsk);
/* define the enumeration of all cgroup subsystems */
#define SUBSYS(_x) _x ## _cgrp_id,
@@ -113,6 +113,19 @@ static inline void css_get(struct cgroup_subsys_state *css)
}
/**
+ * css_get_many - obtain references on the specified css
+ * @css: target css
+ * @n: number of references to get
+ *
+ * The caller must already have a reference.
+ */
+static inline void css_get_many(struct cgroup_subsys_state *css, unsigned int n)
+{
+ if (!(css->flags & CSS_NO_REF))
+ percpu_ref_get_many(&css->refcnt, n);
+}
+
+/**
* css_tryget - try to obtain a reference on the specified css
* @css: target css
*
@@ -159,13 +172,21 @@ static inline void css_put(struct cgroup_subsys_state *css)
percpu_ref_put(&css->refcnt);
}
+/**
+ * css_put_many - put css references
+ * @css: target css
+ * @n: number of references to put
+ *
+ * Put references obtained via css_get() and css_tryget_online().
+ */
+static inline void css_put_many(struct cgroup_subsys_state *css, unsigned int n)
+{
+ if (!(css->flags & CSS_NO_REF))
+ percpu_ref_put_many(&css->refcnt, n);
+}
+
/* bits in struct cgroup flags field */
enum {
- /*
- * Control Group has previously had a child cgroup or a task,
- * but no longer (only if CGRP_NOTIFY_ON_RELEASE is set)
- */
- CGRP_RELEASABLE,
/* Control Group requires release notifications to userspace */
CGRP_NOTIFY_ON_RELEASE,
/*
@@ -235,13 +256,6 @@ struct cgroup {
struct list_head e_csets[CGROUP_SUBSYS_COUNT];
/*
- * Linked list running through all cgroups that can
- * potentially be reaped by the release agent. Protected by
- * release_list_lock
- */
- struct list_head release_list;
-
- /*
* list of pidlists, up to two for each namespace (one for procs, one
* for tasks); created on demand.
*/
@@ -250,6 +264,9 @@ struct cgroup {
/* used to wait for offlining of csses */
wait_queue_head_t offline_waitq;
+
+ /* used to schedule release agent */
+ struct work_struct release_agent_work;
};
#define MAX_CGROUP_ROOT_NAMELEN 64
@@ -376,8 +393,8 @@ struct css_set {
* struct cftype: handler definitions for cgroup control files
*
* When reading/writing to a file:
- * - the cgroup to use is file->f_dentry->d_parent->d_fsdata
- * - the 'cftype' of the file is file->f_dentry->d_fsdata
+ * - the cgroup to use is file->f_path.dentry->d_parent->d_fsdata
+ * - the 'cftype' of the file is file->f_path.dentry->d_fsdata
*/
/* cftype->flags */
@@ -536,13 +553,10 @@ static inline bool cgroup_has_tasks(struct cgroup *cgrp)
return !list_empty(&cgrp->cset_links);
}
-/* returns ino associated with a cgroup, 0 indicates unmounted root */
+/* returns ino associated with a cgroup */
static inline ino_t cgroup_ino(struct cgroup *cgrp)
{
- if (cgrp->kn)
- return cgrp->kn->ino;
- else
- return 0;
+ return cgrp->kn->ino;
}
/* cft/css accessors for cftype->write() operation */
@@ -624,8 +638,10 @@ struct cgroup_subsys {
struct cgroup_subsys_state *(*css_alloc)(struct cgroup_subsys_state *parent_css);
int (*css_online)(struct cgroup_subsys_state *css);
void (*css_offline)(struct cgroup_subsys_state *css);
+ void (*css_released)(struct cgroup_subsys_state *css);
void (*css_free)(struct cgroup_subsys_state *css);
void (*css_reset)(struct cgroup_subsys_state *css);
+ void (*css_e_css_changed)(struct cgroup_subsys_state *css);
int (*can_attach)(struct cgroup_subsys_state *css,
struct cgroup_taskset *tset);
@@ -920,6 +936,8 @@ void css_task_iter_end(struct css_task_iter *it);
int cgroup_attach_task_all(struct task_struct *from, struct task_struct *);
int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from);
+struct cgroup_subsys_state *cgroup_get_e_css(struct cgroup *cgroup,
+ struct cgroup_subsys *ss);
struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry,
struct cgroup_subsys *ss);
diff --git a/include/linux/clk-private.h b/include/linux/clk-private.h
index efbf70b9fd84..0ca5f6046920 100644
--- a/include/linux/clk-private.h
+++ b/include/linux/clk-private.h
@@ -46,8 +46,10 @@ struct clk {
unsigned int enable_count;
unsigned int prepare_count;
unsigned long accuracy;
+ int phase;
struct hlist_head children;
struct hlist_node child_node;
+ struct hlist_node debug_node;
unsigned int notifier_count;
#ifdef CONFIG_DEBUG_FS
struct dentry *dentry;
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index 411dd7eb2653..d936409520f8 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -13,6 +13,7 @@
#include <linux/clk.h>
#include <linux/io.h>
+#include <linux/of.h>
#ifdef CONFIG_COMMON_CLK
@@ -129,6 +130,14 @@ struct dentry;
* set then clock accuracy will be initialized to parent accuracy
* or 0 (perfect clock) if clock has no parent.
*
+ * @get_phase: Queries the hardware to get the current phase of a clock.
+ * Returned values are 0-359 degrees on success, negative
+ * error codes on failure.
+ *
+ * @set_phase: Shift the phase this clock signal in degrees specified
+ * by the second argument. Valid values for degrees are
+ * 0-359. Return 0 on success, otherwise -EERROR.
+ *
* @init: Perform platform-specific initialization magic.
* This is not not used by any of the basic clock types.
* Please consider other ways of solving initialization problems
@@ -167,7 +176,7 @@ struct clk_ops {
unsigned long *parent_rate);
long (*determine_rate)(struct clk_hw *hw, unsigned long rate,
unsigned long *best_parent_rate,
- struct clk **best_parent_clk);
+ struct clk_hw **best_parent_hw);
int (*set_parent)(struct clk_hw *hw, u8 index);
u8 (*get_parent)(struct clk_hw *hw);
int (*set_rate)(struct clk_hw *hw, unsigned long rate,
@@ -177,6 +186,8 @@ struct clk_ops {
unsigned long parent_rate, u8 index);
unsigned long (*recalc_accuracy)(struct clk_hw *hw,
unsigned long parent_accuracy);
+ int (*get_phase)(struct clk_hw *hw);
+ int (*set_phase)(struct clk_hw *hw, int degrees);
void (*init)(struct clk_hw *hw);
int (*debug_init)(struct clk_hw *hw, struct dentry *dentry);
};
@@ -341,7 +352,6 @@ struct clk_divider {
#define CLK_DIVIDER_READ_ONLY BIT(5)
extern const struct clk_ops clk_divider_ops;
-extern const struct clk_ops clk_divider_ro_ops;
struct clk *clk_register_divider(struct device *dev, const char *name,
const char *parent_name, unsigned long flags,
void __iomem *reg, u8 shift, u8 width,
@@ -488,6 +498,28 @@ struct clk *clk_register_composite(struct device *dev, const char *name,
struct clk_hw *gate_hw, const struct clk_ops *gate_ops,
unsigned long flags);
+/***
+ * struct clk_gpio_gate - gpio gated clock
+ *
+ * @hw: handle between common and hardware-specific interfaces
+ * @gpiod: gpio descriptor
+ *
+ * Clock with a gpio control for enabling and disabling the parent clock.
+ * Implements .enable, .disable and .is_enabled
+ */
+
+struct clk_gpio {
+ struct clk_hw hw;
+ struct gpio_desc *gpiod;
+};
+
+extern const struct clk_ops clk_gpio_gate_ops;
+struct clk *clk_register_gpio_gate(struct device *dev, const char *name,
+ const char *parent_name, struct gpio_desc *gpio,
+ unsigned long flags);
+
+void of_gpio_clk_gate_setup(struct device_node *node);
+
/**
* clk_register - allocate a new clock, register it and return an opaque cookie
* @dev: device that is registering this clock
@@ -512,16 +544,14 @@ u8 __clk_get_num_parents(struct clk *clk);
struct clk *__clk_get_parent(struct clk *clk);
struct clk *clk_get_parent_by_index(struct clk *clk, u8 index);
unsigned int __clk_get_enable_count(struct clk *clk);
-unsigned int __clk_get_prepare_count(struct clk *clk);
unsigned long __clk_get_rate(struct clk *clk);
-unsigned long __clk_get_accuracy(struct clk *clk);
unsigned long __clk_get_flags(struct clk *clk);
bool __clk_is_prepared(struct clk *clk);
bool __clk_is_enabled(struct clk *clk);
struct clk *__clk_lookup(const char *name);
long __clk_mux_determine_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *best_parent_rate,
- struct clk **best_parent_p);
+ struct clk_hw **best_parent_p);
/*
* FIXME clock api without lock protection
@@ -620,7 +650,7 @@ static inline void clk_writel(u32 val, u32 __iomem *reg)
#endif /* platform dependent I/O accessors */
#ifdef CONFIG_DEBUG_FS
-struct dentry *clk_debugfs_add_file(struct clk *clk, char *name, umode_t mode,
+struct dentry *clk_debugfs_add_file(struct clk_hw *hw, char *name, umode_t mode,
void *data, const struct file_operations *fops);
#endif
diff --git a/include/linux/clk.h b/include/linux/clk.h
index fb5e097d8f72..c7f258a81761 100644
--- a/include/linux/clk.h
+++ b/include/linux/clk.h
@@ -106,6 +106,25 @@ int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb);
*/
long clk_get_accuracy(struct clk *clk);
+/**
+ * clk_set_phase - adjust the phase shift of a clock signal
+ * @clk: clock signal source
+ * @degrees: number of degrees the signal is shifted
+ *
+ * Shifts the phase of a clock signal by the specified degrees. Returns 0 on
+ * success, -EERROR otherwise.
+ */
+int clk_set_phase(struct clk *clk, int degrees);
+
+/**
+ * clk_get_phase - return the phase shift of a clock signal
+ * @clk: clock signal source
+ *
+ * Returns the phase shift of a clock node in degrees, otherwise returns
+ * -EERROR.
+ */
+int clk_get_phase(struct clk *clk);
+
#else
static inline long clk_get_accuracy(struct clk *clk)
@@ -113,6 +132,16 @@ static inline long clk_get_accuracy(struct clk *clk)
return -ENOTSUPP;
}
+static inline long clk_set_phase(struct clk *clk, int phase)
+{
+ return -ENOTSUPP;
+}
+
+static inline long clk_get_phase(struct clk *clk)
+{
+ return -ENOTSUPP;
+}
+
#endif
/**
@@ -238,7 +267,7 @@ void clk_put(struct clk *clk);
/**
* devm_clk_put - "free" a managed clock source
- * @dev: device used to acuqire the clock
+ * @dev: device used to acquire the clock
* @clk: clock source acquired with devm_clk_get()
*
* Note: drivers must ensure that all clk_enable calls made on this
diff --git a/include/linux/clk/at91_pmc.h b/include/linux/clk/at91_pmc.h
index de4268d4987a..c8e3b3d1eded 100644
--- a/include/linux/clk/at91_pmc.h
+++ b/include/linux/clk/at91_pmc.h
@@ -125,6 +125,7 @@ extern void __iomem *at91_pmc_base;
#define AT91_PMC_PLLADIV2 (1 << 12) /* PLLA divisor by 2 [some SAM9 only] */
#define AT91_PMC_PLLADIV2_OFF (0 << 12)
#define AT91_PMC_PLLADIV2_ON (1 << 12)
+#define AT91_PMC_H32MXDIV BIT(24)
#define AT91_PMC_USB 0x38 /* USB Clock Register [some SAM9 only] */
#define AT91_PMC_USBS (0x1 << 0) /* USB OHCI Input clock selection */
diff --git a/include/linux/clk/ti.h b/include/linux/clk/ti.h
index e8d8a35034a5..55ef529a0dbf 100644
--- a/include/linux/clk/ti.h
+++ b/include/linux/clk/ti.h
@@ -254,13 +254,26 @@ extern const struct clk_ops ti_clk_mux_ops;
void omap2_init_clk_hw_omap_clocks(struct clk *clk);
int omap3_noncore_dpll_enable(struct clk_hw *hw);
void omap3_noncore_dpll_disable(struct clk_hw *hw);
+int omap3_noncore_dpll_set_parent(struct clk_hw *hw, u8 index);
int omap3_noncore_dpll_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate);
+int omap3_noncore_dpll_set_rate_and_parent(struct clk_hw *hw,
+ unsigned long rate,
+ unsigned long parent_rate,
+ u8 index);
+long omap3_noncore_dpll_determine_rate(struct clk_hw *hw,
+ unsigned long rate,
+ unsigned long *best_parent_rate,
+ struct clk_hw **best_parent_clk);
unsigned long omap4_dpll_regm4xen_recalc(struct clk_hw *hw,
unsigned long parent_rate);
long omap4_dpll_regm4xen_round_rate(struct clk_hw *hw,
unsigned long target_rate,
unsigned long *parent_rate);
+long omap4_dpll_regm4xen_determine_rate(struct clk_hw *hw,
+ unsigned long rate,
+ unsigned long *best_parent_rate,
+ struct clk_hw **best_parent_clk);
u8 omap2_init_dpll_parent(struct clk_hw *hw);
unsigned long omap3_dpll_recalc(struct clk_hw *hw, unsigned long parent_rate);
long omap2_dpll_round_rate(struct clk_hw *hw, unsigned long target_rate,
@@ -278,6 +291,8 @@ int omap2_clk_disable_autoidle_all(void);
void omap2_clk_enable_init_clocks(const char **clk_names, u8 num_clocks);
int omap3_dpll4_set_rate(struct clk_hw *clk, unsigned long rate,
unsigned long parent_rate);
+int omap3_dpll4_set_rate_and_parent(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate, u8 index);
int omap2_dflt_clk_enable(struct clk_hw *hw);
void omap2_dflt_clk_disable(struct clk_hw *hw);
int omap2_dflt_clk_is_enabled(struct clk_hw *hw);
@@ -292,6 +307,7 @@ void omap2xxx_clkt_vps_init(void);
void __iomem *ti_clk_get_reg_addr(struct device_node *node, int index);
void ti_dt_clocks_register(struct ti_dt_clk *oclks);
void ti_dt_clk_init_provider(struct device_node *np, int index);
+void ti_dt_clk_init_retry_clks(void);
void ti_dt_clockdomains_setup(void);
int ti_clk_retry_init(struct device_node *node, struct clk_hw *hw,
ti_of_clk_init_cb_t func);
diff --git a/include/linux/clock_cooling.h b/include/linux/clock_cooling.h
new file mode 100644
index 000000000000..4d1019d56f7f
--- /dev/null
+++ b/include/linux/clock_cooling.h
@@ -0,0 +1,65 @@
+/*
+ * linux/include/linux/clock_cooling.h
+ *
+ * Copyright (C) 2014 Eduardo Valentin <edubezval@gmail.com>
+ *
+ * Copyright (C) 2013 Texas Instruments Inc.
+ * Contact: Eduardo Valentin <eduardo.valentin@ti.com>
+ *
+ * Highly based on cpu_cooling.c.
+ * Copyright (C) 2012 Samsung Electronics Co., Ltd(http://www.samsung.com)
+ * Copyright (C) 2012 Amit Daniel <amit.kachhap@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef __CPU_COOLING_H__
+#define __CPU_COOLING_H__
+
+#include <linux/of.h>
+#include <linux/thermal.h>
+#include <linux/cpumask.h>
+
+#ifdef CONFIG_CLOCK_THERMAL
+/**
+ * clock_cooling_register - function to create clock cooling device.
+ * @dev: struct device pointer to the device used as clock cooling device.
+ * @clock_name: string containing the clock used as cooling mechanism.
+ */
+struct thermal_cooling_device *
+clock_cooling_register(struct device *dev, const char *clock_name);
+
+/**
+ * clock_cooling_unregister - function to remove clock cooling device.
+ * @cdev: thermal cooling device pointer.
+ */
+void clock_cooling_unregister(struct thermal_cooling_device *cdev);
+
+unsigned long clock_cooling_get_level(struct thermal_cooling_device *cdev,
+ unsigned long freq);
+#else /* !CONFIG_CLOCK_THERMAL */
+static inline struct thermal_cooling_device *
+clock_cooling_register(struct device *dev, const char *clock_name)
+{
+ return NULL;
+}
+static inline
+void clock_cooling_unregister(struct thermal_cooling_device *cdev)
+{
+}
+static inline
+unsigned long clock_cooling_get_level(struct thermal_cooling_device *cdev,
+ unsigned long freq)
+{
+ return THERMAL_CSTATE_INVALID;
+}
+#endif /* CONFIG_CLOCK_THERMAL */
+
+#endif /* __CPU_COOLING_H__ */
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index 653f0e2b6ca9..abcafaa20b86 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -287,7 +287,7 @@ extern struct clocksource* clocksource_get_next(void);
extern void clocksource_change_rating(struct clocksource *cs, int rating);
extern void clocksource_suspend(void);
extern void clocksource_resume(void);
-extern struct clocksource * __init __weak clocksource_default_clock(void);
+extern struct clocksource * __init clocksource_default_clock(void);
extern void clocksource_mark_unstable(struct clocksource *cs);
extern u64
diff --git a/include/linux/cma.h b/include/linux/cma.h
index 371b93042520..9384ba66e975 100644
--- a/include/linux/cma.h
+++ b/include/linux/cma.h
@@ -15,13 +15,17 @@
struct cma;
+extern unsigned long totalcma_pages;
extern phys_addr_t cma_get_base(struct cma *cma);
extern unsigned long cma_get_size(struct cma *cma);
-extern int __init cma_declare_contiguous(phys_addr_t size,
- phys_addr_t base, phys_addr_t limit,
+extern int __init cma_declare_contiguous(phys_addr_t base,
+ phys_addr_t size, phys_addr_t limit,
phys_addr_t alignment, unsigned int order_per_bit,
bool fixed, struct cma **res_cma);
+extern int cma_init_reserved_mem(phys_addr_t base,
+ phys_addr_t size, int order_per_bit,
+ struct cma **res_cma);
extern struct page *cma_alloc(struct cma *cma, int count, unsigned int align);
extern bool cma_release(struct cma *cma, struct page *pages, int count);
#endif
diff --git a/include/linux/com20020.h b/include/linux/com20020.h
index 5dcfb944b6ce..85898995b234 100644
--- a/include/linux/com20020.h
+++ b/include/linux/com20020.h
@@ -41,6 +41,35 @@ extern const struct net_device_ops com20020_netdev_ops;
#define BUS_ALIGN 1
#endif
+#define PLX_PCI_MAX_CARDS 2
+
+struct com20020_pci_channel_map {
+ u32 bar;
+ u32 offset;
+ u32 size; /* 0x00 - auto, e.g. length of entire bar */
+};
+
+struct com20020_pci_card_info {
+ const char *name;
+ int devcount;
+
+ struct com20020_pci_channel_map chan_map_tbl[PLX_PCI_MAX_CARDS];
+
+ unsigned int flags;
+};
+
+struct com20020_priv {
+ struct com20020_pci_card_info *ci;
+ struct list_head list_dev;
+};
+
+struct com20020_dev {
+ struct list_head list;
+ struct net_device *dev;
+
+ struct com20020_priv *pci_priv;
+ int index;
+};
#define _INTMASK (ioaddr+BUS_ALIGN*0) /* writable */
#define _STATUS (ioaddr+BUS_ALIGN*0) /* readable */
diff --git a/include/linux/compaction.h b/include/linux/compaction.h
index 01e3132820da..3238ffa33f68 100644
--- a/include/linux/compaction.h
+++ b/include/linux/compaction.h
@@ -2,14 +2,24 @@
#define _LINUX_COMPACTION_H
/* Return values for compact_zone() and try_to_compact_pages() */
+/* compaction didn't start as it was deferred due to past failures */
+#define COMPACT_DEFERRED 0
/* compaction didn't start as it was not possible or direct reclaim was more suitable */
-#define COMPACT_SKIPPED 0
+#define COMPACT_SKIPPED 1
/* compaction should continue to another pageblock */
-#define COMPACT_CONTINUE 1
+#define COMPACT_CONTINUE 2
/* direct compaction partially compacted a zone and there are suitable pages */
-#define COMPACT_PARTIAL 2
+#define COMPACT_PARTIAL 3
/* The full zone was compacted */
-#define COMPACT_COMPLETE 3
+#define COMPACT_COMPLETE 4
+
+/* Used to signal whether compaction detected need_sched() or lock contention */
+/* No contention detected */
+#define COMPACT_CONTENDED_NONE 0
+/* Either need_sched() was true or fatal signal pending */
+#define COMPACT_CONTENDED_SCHED 1
+/* Zone lock or lru_lock was contended in async compaction */
+#define COMPACT_CONTENDED_LOCK 2
#ifdef CONFIG_COMPACTION
extern int sysctl_compact_memory;
@@ -22,10 +32,12 @@ extern int sysctl_extfrag_handler(struct ctl_table *table, int write,
extern int fragmentation_index(struct zone *zone, unsigned int order);
extern unsigned long try_to_compact_pages(struct zonelist *zonelist,
int order, gfp_t gfp_mask, nodemask_t *mask,
- enum migrate_mode mode, bool *contended);
+ enum migrate_mode mode, int *contended,
+ int alloc_flags, int classzone_idx);
extern void compact_pgdat(pg_data_t *pgdat, int order);
extern void reset_isolation_suitable(pg_data_t *pgdat);
-extern unsigned long compaction_suitable(struct zone *zone, int order);
+extern unsigned long compaction_suitable(struct zone *zone, int order,
+ int alloc_flags, int classzone_idx);
/* Do not skip compaction more than 64 times */
#define COMPACT_MAX_DEFER_SHIFT 6
@@ -91,7 +103,8 @@ static inline bool compaction_restarting(struct zone *zone, int order)
#else
static inline unsigned long try_to_compact_pages(struct zonelist *zonelist,
int order, gfp_t gfp_mask, nodemask_t *nodemask,
- enum migrate_mode mode, bool *contended)
+ enum migrate_mode mode, int *contended,
+ int alloc_flags, int classzone_idx)
{
return COMPACT_CONTINUE;
}
@@ -104,7 +117,8 @@ static inline void reset_isolation_suitable(pg_data_t *pgdat)
{
}
-static inline unsigned long compaction_suitable(struct zone *zone, int order)
+static inline unsigned long compaction_suitable(struct zone *zone, int order,
+ int alloc_flags, int classzone_idx)
{
return COMPACT_SKIPPED;
}
diff --git a/include/linux/compat.h b/include/linux/compat.h
index e6494261eaff..7450ca2ac1fc 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -357,6 +357,9 @@ asmlinkage long compat_sys_lseek(unsigned int, compat_off_t, unsigned int);
asmlinkage long compat_sys_execve(const char __user *filename, const compat_uptr_t __user *argv,
const compat_uptr_t __user *envp);
+asmlinkage long compat_sys_execveat(int dfd, const char __user *filename,
+ const compat_uptr_t __user *argv,
+ const compat_uptr_t __user *envp, int flags);
asmlinkage long compat_sys_select(int n, compat_ulong_t __user *inp,
compat_ulong_t __user *outp, compat_ulong_t __user *exp,
diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
index 2507fd2a1eb4..d1a558239b1a 100644
--- a/include/linux/compiler-gcc4.h
+++ b/include/linux/compiler-gcc4.h
@@ -71,7 +71,6 @@
* http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670
*
* Work it around via a compiler barrier quirk suggested by Jakub Jelinek.
- * Fixed in GCC 4.8.2 and later versions.
*
* (asm goto is automatically volatile - the naming reflects this.)
*/
diff --git a/include/linux/compiler-gcc5.h b/include/linux/compiler-gcc5.h
new file mode 100644
index 000000000000..c8c565952548
--- /dev/null
+++ b/include/linux/compiler-gcc5.h
@@ -0,0 +1,65 @@
+#ifndef __LINUX_COMPILER_H
+#error "Please don't include <linux/compiler-gcc5.h> directly, include <linux/compiler.h> instead."
+#endif
+
+#define __used __attribute__((__used__))
+#define __must_check __attribute__((warn_unused_result))
+#define __compiler_offsetof(a, b) __builtin_offsetof(a, b)
+
+/* Mark functions as cold. gcc will assume any path leading to a call
+ to them will be unlikely. This means a lot of manual unlikely()s
+ are unnecessary now for any paths leading to the usual suspects
+ like BUG(), printk(), panic() etc. [but let's keep them for now for
+ older compilers]
+
+ Early snapshots of gcc 4.3 don't support this and we can't detect this
+ in the preprocessor, but we can live with this because they're unreleased.
+ Maketime probing would be overkill here.
+
+ gcc also has a __attribute__((__hot__)) to move hot functions into
+ a special section, but I don't see any sense in this right now in
+ the kernel context */
+#define __cold __attribute__((__cold__))
+
+#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
+
+#ifndef __CHECKER__
+# define __compiletime_warning(message) __attribute__((warning(message)))
+# define __compiletime_error(message) __attribute__((error(message)))
+#endif /* __CHECKER__ */
+
+/*
+ * Mark a position in code as unreachable. This can be used to
+ * suppress control flow warnings after asm blocks that transfer
+ * control elsewhere.
+ *
+ * Early snapshots of gcc 4.5 don't support this and we can't detect
+ * this in the preprocessor, but we can live with this because they're
+ * unreleased. Really, we need to have autoconf for the kernel.
+ */
+#define unreachable() __builtin_unreachable()
+
+/* Mark a function definition as prohibited from being cloned. */
+#define __noclone __attribute__((__noclone__))
+
+/*
+ * Tell the optimizer that something else uses this function or variable.
+ */
+#define __visible __attribute__((externally_visible))
+
+/*
+ * GCC 'asm goto' miscompiles certain code sequences:
+ *
+ * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670
+ *
+ * Work it around via a compiler barrier quirk suggested by Jakub Jelinek.
+ *
+ * (asm goto is automatically volatile - the naming reflects this.)
+ */
+#define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0)
+
+#ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP
+#define __HAVE_BUILTIN_BSWAP32__
+#define __HAVE_BUILTIN_BSWAP64__
+#define __HAVE_BUILTIN_BSWAP16__
+#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index d5ad7b1118fc..a1c81f80978e 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -186,6 +186,80 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
# define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__)
#endif
+#include <uapi/linux/types.h>
+
+static __always_inline void data_access_exceeds_word_size(void)
+#ifdef __compiletime_warning
+__compiletime_warning("data access exceeds word size and won't be atomic")
+#endif
+;
+
+static __always_inline void data_access_exceeds_word_size(void)
+{
+}
+
+static __always_inline void __read_once_size(volatile void *p, void *res, int size)
+{
+ switch (size) {
+ case 1: *(__u8 *)res = *(volatile __u8 *)p; break;
+ case 2: *(__u16 *)res = *(volatile __u16 *)p; break;
+ case 4: *(__u32 *)res = *(volatile __u32 *)p; break;
+#ifdef CONFIG_64BIT
+ case 8: *(__u64 *)res = *(volatile __u64 *)p; break;
+#endif
+ default:
+ barrier();
+ __builtin_memcpy((void *)res, (const void *)p, size);
+ data_access_exceeds_word_size();
+ barrier();
+ }
+}
+
+static __always_inline void __assign_once_size(volatile void *p, void *res, int size)
+{
+ switch (size) {
+ case 1: *(volatile __u8 *)p = *(__u8 *)res; break;
+ case 2: *(volatile __u16 *)p = *(__u16 *)res; break;
+ case 4: *(volatile __u32 *)p = *(__u32 *)res; break;
+#ifdef CONFIG_64BIT
+ case 8: *(volatile __u64 *)p = *(__u64 *)res; break;
+#endif
+ default:
+ barrier();
+ __builtin_memcpy((void *)p, (const void *)res, size);
+ data_access_exceeds_word_size();
+ barrier();
+ }
+}
+
+/*
+ * Prevent the compiler from merging or refetching reads or writes. The
+ * compiler is also forbidden from reordering successive instances of
+ * READ_ONCE, ASSIGN_ONCE and ACCESS_ONCE (see below), but only when the
+ * compiler is aware of some particular ordering. One way to make the
+ * compiler aware of ordering is to put the two invocations of READ_ONCE,
+ * ASSIGN_ONCE or ACCESS_ONCE() in different C statements.
+ *
+ * In contrast to ACCESS_ONCE these two macros will also work on aggregate
+ * data types like structs or unions. If the size of the accessed data
+ * type exceeds the word size of the machine (e.g., 32 bits or 64 bits)
+ * READ_ONCE() and ASSIGN_ONCE() will fall back to memcpy and print a
+ * compile-time warning.
+ *
+ * Their two major use cases are: (1) Mediating communication between
+ * process-level code and irq/NMI handlers, all running on the same CPU,
+ * and (2) Ensuring that the compiler does not fold, spindle, or otherwise
+ * mutilate accesses that either do not require ordering or that interact
+ * with an explicit memory barrier or atomic instruction that provides the
+ * required ordering.
+ */
+
+#define READ_ONCE(x) \
+ ({ typeof(x) __val; __read_once_size(&x, &__val, sizeof(__val)); __val; })
+
+#define ASSIGN_ONCE(val, x) \
+ ({ typeof(x) __val; __val = val; __assign_once_size(&x, &__val, sizeof(__val)); __val; })
+
#endif /* __KERNEL__ */
#endif /* __ASSEMBLY__ */
diff --git a/include/linux/coresight.h b/include/linux/coresight.h
new file mode 100644
index 000000000000..5d3c54311f7a
--- /dev/null
+++ b/include/linux/coresight.h
@@ -0,0 +1,263 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _LINUX_CORESIGHT_H
+#define _LINUX_CORESIGHT_H
+
+#include <linux/device.h>
+
+/* Peripheral id registers (0xFD0-0xFEC) */
+#define CORESIGHT_PERIPHIDR4 0xfd0
+#define CORESIGHT_PERIPHIDR5 0xfd4
+#define CORESIGHT_PERIPHIDR6 0xfd8
+#define CORESIGHT_PERIPHIDR7 0xfdC
+#define CORESIGHT_PERIPHIDR0 0xfe0
+#define CORESIGHT_PERIPHIDR1 0xfe4
+#define CORESIGHT_PERIPHIDR2 0xfe8
+#define CORESIGHT_PERIPHIDR3 0xfeC
+/* Component id registers (0xFF0-0xFFC) */
+#define CORESIGHT_COMPIDR0 0xff0
+#define CORESIGHT_COMPIDR1 0xff4
+#define CORESIGHT_COMPIDR2 0xff8
+#define CORESIGHT_COMPIDR3 0xffC
+
+#define ETM_ARCH_V3_3 0x23
+#define ETM_ARCH_V3_5 0x25
+#define PFT_ARCH_V1_0 0x30
+#define PFT_ARCH_V1_1 0x31
+
+#define CORESIGHT_UNLOCK 0xc5acce55
+
+extern struct bus_type coresight_bustype;
+
+enum coresight_dev_type {
+ CORESIGHT_DEV_TYPE_NONE,
+ CORESIGHT_DEV_TYPE_SINK,
+ CORESIGHT_DEV_TYPE_LINK,
+ CORESIGHT_DEV_TYPE_LINKSINK,
+ CORESIGHT_DEV_TYPE_SOURCE,
+};
+
+enum coresight_dev_subtype_sink {
+ CORESIGHT_DEV_SUBTYPE_SINK_NONE,
+ CORESIGHT_DEV_SUBTYPE_SINK_PORT,
+ CORESIGHT_DEV_SUBTYPE_SINK_BUFFER,
+};
+
+enum coresight_dev_subtype_link {
+ CORESIGHT_DEV_SUBTYPE_LINK_NONE,
+ CORESIGHT_DEV_SUBTYPE_LINK_MERG,
+ CORESIGHT_DEV_SUBTYPE_LINK_SPLIT,
+ CORESIGHT_DEV_SUBTYPE_LINK_FIFO,
+};
+
+enum coresight_dev_subtype_source {
+ CORESIGHT_DEV_SUBTYPE_SOURCE_NONE,
+ CORESIGHT_DEV_SUBTYPE_SOURCE_PROC,
+ CORESIGHT_DEV_SUBTYPE_SOURCE_BUS,
+ CORESIGHT_DEV_SUBTYPE_SOURCE_SOFTWARE,
+};
+
+/**
+ * struct coresight_dev_subtype - further characterisation of a type
+ * @sink_subtype: type of sink this component is, as defined
+ by @coresight_dev_subtype_sink.
+ * @link_subtype: type of link this component is, as defined
+ by @coresight_dev_subtype_link.
+ * @source_subtype: type of source this component is, as defined
+ by @coresight_dev_subtype_source.
+ */
+struct coresight_dev_subtype {
+ enum coresight_dev_subtype_sink sink_subtype;
+ enum coresight_dev_subtype_link link_subtype;
+ enum coresight_dev_subtype_source source_subtype;
+};
+
+/**
+ * struct coresight_platform_data - data harvested from the DT specification
+ * @cpu: the CPU a source belongs to. Only applicable for ETM/PTMs.
+ * @name: name of the component as shown under sysfs.
+ * @nr_inport: number of input ports for this component.
+ * @outports: list of remote endpoint port number.
+ * @child_names:name of all child components connected to this device.
+ * @child_ports:child component port number the current component is
+ connected to.
+ * @nr_outport: number of output ports for this component.
+ * @clk: The clock this component is associated to.
+ */
+struct coresight_platform_data {
+ int cpu;
+ const char *name;
+ int nr_inport;
+ int *outports;
+ const char **child_names;
+ int *child_ports;
+ int nr_outport;
+ struct clk *clk;
+};
+
+/**
+ * struct coresight_desc - description of a component required from drivers
+ * @type: as defined by @coresight_dev_type.
+ * @subtype: as defined by @coresight_dev_subtype.
+ * @ops: generic operations for this component, as defined
+ by @coresight_ops.
+ * @pdata: platform data collected from DT.
+ * @dev: The device entity associated to this component.
+ * @groups: operations specific to this component. These will end up
+ in the component's sysfs sub-directory.
+ */
+struct coresight_desc {
+ enum coresight_dev_type type;
+ struct coresight_dev_subtype subtype;
+ const struct coresight_ops *ops;
+ struct coresight_platform_data *pdata;
+ struct device *dev;
+ const struct attribute_group **groups;
+};
+
+/**
+ * struct coresight_connection - representation of a single connection
+ * @outport: a connection's output port number.
+ * @chid_name: remote component's name.
+ * @child_port: remote component's port number @output is connected to.
+ * @child_dev: a @coresight_device representation of the component
+ connected to @outport.
+ */
+struct coresight_connection {
+ int outport;
+ const char *child_name;
+ int child_port;
+ struct coresight_device *child_dev;
+};
+
+/**
+ * struct coresight_device - representation of a device as used by the framework
+ * @conns: array of coresight_connections associated to this component.
+ * @nr_inport: number of input port associated to this component.
+ * @nr_outport: number of output port associated to this component.
+ * @type: as defined by @coresight_dev_type.
+ * @subtype: as defined by @coresight_dev_subtype.
+ * @ops: generic operations for this component, as defined
+ by @coresight_ops.
+ * @dev: The device entity associated to this component.
+ * @refcnt: keep track of what is in use.
+ * @path_link: link of current component into the path being enabled.
+ * @orphan: true if the component has connections that haven't been linked.
+ * @enable: 'true' if component is currently part of an active path.
+ * @activated: 'true' only if a _sink_ has been activated. A sink can be
+ activated but not yet enabled. Enabling for a _sink_
+ happens when a source has been selected for that it.
+ */
+struct coresight_device {
+ struct coresight_connection *conns;
+ int nr_inport;
+ int nr_outport;
+ enum coresight_dev_type type;
+ struct coresight_dev_subtype subtype;
+ const struct coresight_ops *ops;
+ struct device dev;
+ atomic_t *refcnt;
+ struct list_head path_link;
+ bool orphan;
+ bool enable; /* true only if configured as part of a path */
+ bool activated; /* true only if a sink is part of a path */
+};
+
+#define to_coresight_device(d) container_of(d, struct coresight_device, dev)
+
+#define source_ops(csdev) csdev->ops->source_ops
+#define sink_ops(csdev) csdev->ops->sink_ops
+#define link_ops(csdev) csdev->ops->link_ops
+
+#define CORESIGHT_DEBUGFS_ENTRY(__name, __entry_name, \
+ __mode, __get, __set, __fmt) \
+DEFINE_SIMPLE_ATTRIBUTE(__name ## _ops, __get, __set, __fmt); \
+static const struct coresight_ops_entry __name ## _entry = { \
+ .name = __entry_name, \
+ .mode = __mode, \
+ .ops = &__name ## _ops \
+}
+
+/**
+ * struct coresight_ops_sink - basic operations for a sink
+ * Operations available for sinks
+ * @enable: enables the sink.
+ * @disable: disables the sink.
+ */
+struct coresight_ops_sink {
+ int (*enable)(struct coresight_device *csdev);
+ void (*disable)(struct coresight_device *csdev);
+};
+
+/**
+ * struct coresight_ops_link - basic operations for a link
+ * Operations available for links.
+ * @enable: enables flow between iport and oport.
+ * @disable: disables flow between iport and oport.
+ */
+struct coresight_ops_link {
+ int (*enable)(struct coresight_device *csdev, int iport, int oport);
+ void (*disable)(struct coresight_device *csdev, int iport, int oport);
+};
+
+/**
+ * struct coresight_ops_source - basic operations for a source
+ * Operations available for sources.
+ * @trace_id: returns the value of the component's trace ID as known
+ to the HW.
+ * @enable: enables tracing from a source.
+ * @disable: disables tracing for a source.
+ */
+struct coresight_ops_source {
+ int (*trace_id)(struct coresight_device *csdev);
+ int (*enable)(struct coresight_device *csdev);
+ void (*disable)(struct coresight_device *csdev);
+};
+
+struct coresight_ops {
+ const struct coresight_ops_sink *sink_ops;
+ const struct coresight_ops_link *link_ops;
+ const struct coresight_ops_source *source_ops;
+};
+
+#ifdef CONFIG_CORESIGHT
+extern struct coresight_device *
+coresight_register(struct coresight_desc *desc);
+extern void coresight_unregister(struct coresight_device *csdev);
+extern int coresight_enable(struct coresight_device *csdev);
+extern void coresight_disable(struct coresight_device *csdev);
+extern int coresight_is_bit_set(u32 val, int position, int value);
+extern int coresight_timeout(void __iomem *addr, u32 offset,
+ int position, int value);
+#ifdef CONFIG_OF
+extern struct coresight_platform_data *of_get_coresight_platform_data(
+ struct device *dev, struct device_node *node);
+#endif
+#else
+static inline struct coresight_device *
+coresight_register(struct coresight_desc *desc) { return NULL; }
+static inline void coresight_unregister(struct coresight_device *csdev) {}
+static inline int
+coresight_enable(struct coresight_device *csdev) { return -ENOSYS; }
+static inline void coresight_disable(struct coresight_device *csdev) {}
+static inline int coresight_is_bit_set(u32 val, int position, int value)
+ { return 0; }
+static inline int coresight_timeout(void __iomem *addr, u32 offset,
+ int position, int value) { return 1; }
+#ifdef CONFIG_OF
+static inline struct coresight_platform_data *of_get_coresight_platform_data(
+ struct device *dev, struct device_node *node) { return NULL; }
+#endif
+#endif
+
+#endif
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 95978ad7fcdd..4260e8594bd7 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -19,6 +19,7 @@
struct device;
struct device_node;
+struct attribute_group;
struct cpu {
int node_id; /* The node which contains the CPU */
@@ -39,6 +40,9 @@ extern void cpu_remove_dev_attr(struct device_attribute *attr);
extern int cpu_add_dev_attr_group(struct attribute_group *attrs);
extern void cpu_remove_dev_attr_group(struct attribute_group *attrs);
+extern struct device *cpu_device_create(struct device *parent, void *drvdata,
+ const struct attribute_group **groups,
+ const char *fmt, ...);
#ifdef CONFIG_HOTPLUG_CPU
extern void unregister_cpu(struct cpu *cpu);
extern ssize_t arch_cpu_probe(const char *, size_t);
@@ -213,6 +217,7 @@ extern struct bus_type cpu_subsys;
extern void cpu_hotplug_begin(void);
extern void cpu_hotplug_done(void);
extern void get_online_cpus(void);
+extern bool try_get_online_cpus(void);
extern void put_online_cpus(void);
extern void cpu_hotplug_disable(void);
extern void cpu_hotplug_enable(void);
@@ -230,6 +235,7 @@ int cpu_down(unsigned int cpu);
static inline void cpu_hotplug_begin(void) {}
static inline void cpu_hotplug_done(void) {}
#define get_online_cpus() do { } while (0)
+#define try_get_online_cpus() true
#define put_online_cpus() do { } while (0)
#define cpu_hotplug_disable() do { } while (0)
#define cpu_hotplug_enable() do { } while (0)
diff --git a/include/linux/cpu_cooling.h b/include/linux/cpu_cooling.h
index c303d383def1..bd955270d5aa 100644
--- a/include/linux/cpu_cooling.h
+++ b/include/linux/cpu_cooling.h
@@ -50,7 +50,7 @@ static inline struct thermal_cooling_device *
of_cpufreq_cooling_register(struct device_node *np,
const struct cpumask *clip_cpus)
{
- return NULL;
+ return ERR_PTR(-ENOSYS);
}
#endif
@@ -65,13 +65,13 @@ unsigned long cpufreq_cooling_get_level(unsigned int cpu, unsigned int freq);
static inline struct thermal_cooling_device *
cpufreq_cooling_register(const struct cpumask *clip_cpus)
{
- return NULL;
+ return ERR_PTR(-ENOSYS);
}
static inline struct thermal_cooling_device *
of_cpufreq_cooling_register(struct device_node *np,
const struct cpumask *clip_cpus)
{
- return NULL;
+ return ERR_PTR(-ENOSYS);
}
static inline
void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
diff --git a/include/linux/cpufreq-dt.h b/include/linux/cpufreq-dt.h
new file mode 100644
index 000000000000..0414009e2c30
--- /dev/null
+++ b/include/linux/cpufreq-dt.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) 2014 Marvell
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __CPUFREQ_DT_H__
+#define __CPUFREQ_DT_H__
+
+struct cpufreq_dt_platform_data {
+ /*
+ * True when each CPU has its own clock to control its
+ * frequency, false when all CPUs are controlled by a single
+ * clock.
+ */
+ bool independent_clocks;
+};
+
+#endif /* __CPUFREQ_DT_H__ */
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 7d1955afa62c..4d078cebafd2 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -112,6 +112,9 @@ struct cpufreq_policy {
spinlock_t transition_lock;
wait_queue_head_t transition_wait;
struct task_struct *transition_task; /* Task which is doing the transition */
+
+ /* For cpufreq driver's internal use */
+ void *driver_data;
};
/* Only for ACPI */
@@ -214,25 +217,26 @@ __ATTR(_name, 0644, show_##_name, store_##_name)
struct cpufreq_driver {
- char name[CPUFREQ_NAME_LEN];
- u8 flags;
+ char name[CPUFREQ_NAME_LEN];
+ u8 flags;
+ void *driver_data;
/* needed by all drivers */
- int (*init) (struct cpufreq_policy *policy);
- int (*verify) (struct cpufreq_policy *policy);
+ int (*init)(struct cpufreq_policy *policy);
+ int (*verify)(struct cpufreq_policy *policy);
/* define one out of two */
- int (*setpolicy) (struct cpufreq_policy *policy);
+ int (*setpolicy)(struct cpufreq_policy *policy);
/*
* On failure, should always restore frequency to policy->restore_freq
* (i.e. old freq).
*/
- int (*target) (struct cpufreq_policy *policy, /* Deprecated */
- unsigned int target_freq,
- unsigned int relation);
- int (*target_index) (struct cpufreq_policy *policy,
- unsigned int index);
+ int (*target)(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int relation); /* Deprecated */
+ int (*target_index)(struct cpufreq_policy *policy,
+ unsigned int index);
/*
* Only for drivers with target_index() and CPUFREQ_ASYNC_NOTIFICATION
* unset.
@@ -248,27 +252,31 @@ struct cpufreq_driver {
* wish to switch to intermediate frequency for some target frequency.
* In that case core will directly call ->target_index().
*/
- unsigned int (*get_intermediate)(struct cpufreq_policy *policy,
- unsigned int index);
- int (*target_intermediate)(struct cpufreq_policy *policy,
- unsigned int index);
+ unsigned int (*get_intermediate)(struct cpufreq_policy *policy,
+ unsigned int index);
+ int (*target_intermediate)(struct cpufreq_policy *policy,
+ unsigned int index);
/* should be defined, if possible */
- unsigned int (*get) (unsigned int cpu);
+ unsigned int (*get)(unsigned int cpu);
/* optional */
- int (*bios_limit) (int cpu, unsigned int *limit);
+ int (*bios_limit)(int cpu, unsigned int *limit);
+
+ int (*exit)(struct cpufreq_policy *policy);
+ void (*stop_cpu)(struct cpufreq_policy *policy);
+ int (*suspend)(struct cpufreq_policy *policy);
+ int (*resume)(struct cpufreq_policy *policy);
+
+ /* Will be called after the driver is fully initialized */
+ void (*ready)(struct cpufreq_policy *policy);
- int (*exit) (struct cpufreq_policy *policy);
- void (*stop_cpu) (struct cpufreq_policy *policy);
- int (*suspend) (struct cpufreq_policy *policy);
- int (*resume) (struct cpufreq_policy *policy);
- struct freq_attr **attr;
+ struct freq_attr **attr;
/* platform specific boost support code */
- bool boost_supported;
- bool boost_enabled;
- int (*set_boost) (int state);
+ bool boost_supported;
+ bool boost_enabled;
+ int (*set_boost)(int state);
};
/* flags */
@@ -309,6 +317,7 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data);
int cpufreq_unregister_driver(struct cpufreq_driver *driver_data);
const char *cpufreq_get_current_driver(void);
+void *cpufreq_get_driver_data(void);
static inline void cpufreq_verify_within_limits(struct cpufreq_policy *policy,
unsigned int min, unsigned int max)
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
index 25e0df6155a4..ab70f3bc44ad 100644
--- a/include/linux/cpuidle.h
+++ b/include/linux/cpuidle.h
@@ -53,7 +53,6 @@ struct cpuidle_state {
};
/* Idle State Flags */
-#define CPUIDLE_FLAG_TIME_VALID (0x01) /* is residency time measurable? */
#define CPUIDLE_FLAG_COUPLED (0x02) /* state applies to multiple cpus */
#define CPUIDLE_FLAG_TIMER_STOP (0x04) /* timer is stopped on this state */
@@ -89,8 +88,6 @@ DECLARE_PER_CPU(struct cpuidle_device, cpuidle_dev);
/**
* cpuidle_get_last_residency - retrieves the last state's residency time
* @dev: the target CPU
- *
- * NOTE: this value is invalid if CPUIDLE_FLAG_TIME_VALID isn't set
*/
static inline int cpuidle_get_last_residency(struct cpuidle_device *dev)
{
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index 2997af6d2ccd..b950e9d6008b 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -666,10 +666,19 @@ static inline size_t cpumask_size(void)
*
* This code makes NR_CPUS length memcopy and brings to a memory corruption.
* cpumask_copy() provide safe copy functionality.
+ *
+ * Note that there is another evil here: If you define a cpumask_var_t
+ * as a percpu variable then the way to obtain the address of the cpumask
+ * structure differently influences what this_cpu_* operation needs to be
+ * used. Please use this_cpu_cpumask_var_t in those cases. The direct use
+ * of this_cpu_ptr() or this_cpu_read() will lead to failures when the
+ * other type of cpumask_var_t implementation is configured.
*/
#ifdef CONFIG_CPUMASK_OFFSTACK
typedef struct cpumask *cpumask_var_t;
+#define this_cpu_cpumask_var_ptr(x) this_cpu_read(x)
+
bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node);
bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags);
bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node);
@@ -681,6 +690,8 @@ void free_bootmem_cpumask_var(cpumask_var_t mask);
#else
typedef struct cpumask cpumask_var_t[1];
+#define this_cpu_cpumask_var_ptr(x) this_cpu_ptr(x)
+
static inline bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
{
return true;
@@ -792,6 +803,23 @@ static inline const struct cpumask *get_cpu_mask(unsigned int cpu)
}
#endif /* NR_CPUS > BITS_PER_LONG */
+/**
+ * cpumap_print_to_pagebuf - copies the cpumask into the buffer either
+ * as comma-separated list of cpus or hex values of cpumask
+ * @list: indicates whether the cpumap must be list
+ * @mask: the cpumask to copy
+ * @buf: the buffer to copy into
+ *
+ * Returns the length of the (null-terminated) @buf string, zero if
+ * nothing is copied.
+ */
+static inline ssize_t
+cpumap_print_to_pagebuf(bool list, char *buf, const struct cpumask *mask)
+{
+ return bitmap_print_to_pagebuf(list, buf, cpumask_bits(mask),
+ nr_cpumask_bits);
+}
+
/*
*
* From here down, all obsolete. Use cpumask_ variants!
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index 6e39c9bb0dae..1b357997cac5 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -48,29 +48,16 @@ extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
void cpuset_init_current_mems_allowed(void);
int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask);
-extern int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask);
-extern int __cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask);
+extern int __cpuset_node_allowed(int node, gfp_t gfp_mask);
-static inline int cpuset_node_allowed_softwall(int node, gfp_t gfp_mask)
+static inline int cpuset_node_allowed(int node, gfp_t gfp_mask)
{
- return nr_cpusets() <= 1 ||
- __cpuset_node_allowed_softwall(node, gfp_mask);
+ return nr_cpusets() <= 1 || __cpuset_node_allowed(node, gfp_mask);
}
-static inline int cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask)
+static inline int cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
{
- return nr_cpusets() <= 1 ||
- __cpuset_node_allowed_hardwall(node, gfp_mask);
-}
-
-static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask)
-{
- return cpuset_node_allowed_softwall(zone_to_nid(z), gfp_mask);
-}
-
-static inline int cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask)
-{
- return cpuset_node_allowed_hardwall(zone_to_nid(z), gfp_mask);
+ return cpuset_node_allowed(zone_to_nid(z), gfp_mask);
}
extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
@@ -86,7 +73,8 @@ extern void __cpuset_memory_pressure_bump(void);
extern void cpuset_task_status_allowed(struct seq_file *m,
struct task_struct *task);
-extern int proc_cpuset_show(struct seq_file *, void *);
+extern int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns,
+ struct pid *pid, struct task_struct *tsk);
extern int cpuset_mem_spread_node(void);
extern int cpuset_slab_spread_node(void);
@@ -178,22 +166,12 @@ static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
return 1;
}
-static inline int cpuset_node_allowed_softwall(int node, gfp_t gfp_mask)
-{
- return 1;
-}
-
-static inline int cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask)
-{
- return 1;
-}
-
-static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask)
+static inline int cpuset_node_allowed(int node, gfp_t gfp_mask)
{
return 1;
}
-static inline int cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask)
+static inline int cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
{
return 1;
}
diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h
index 72ab536ad3de..3849fce7ecfe 100644
--- a/include/linux/crash_dump.h
+++ b/include/linux/crash_dump.h
@@ -14,14 +14,13 @@
extern unsigned long long elfcorehdr_addr;
extern unsigned long long elfcorehdr_size;
-extern int __weak elfcorehdr_alloc(unsigned long long *addr,
- unsigned long long *size);
-extern void __weak elfcorehdr_free(unsigned long long addr);
-extern ssize_t __weak elfcorehdr_read(char *buf, size_t count, u64 *ppos);
-extern ssize_t __weak elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos);
-extern int __weak remap_oldmem_pfn_range(struct vm_area_struct *vma,
- unsigned long from, unsigned long pfn,
- unsigned long size, pgprot_t prot);
+extern int elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size);
+extern void elfcorehdr_free(unsigned long long addr);
+extern ssize_t elfcorehdr_read(char *buf, size_t count, u64 *ppos);
+extern ssize_t elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos);
+extern int remap_oldmem_pfn_range(struct vm_area_struct *vma,
+ unsigned long from, unsigned long pfn,
+ unsigned long size, pgprot_t prot);
extern ssize_t copy_oldmem_page(unsigned long, char *, size_t,
unsigned long, int);
diff --git a/include/linux/crc-t10dif.h b/include/linux/crc-t10dif.h
index b3cb71f0d3b0..cf53d0773ce3 100644
--- a/include/linux/crc-t10dif.h
+++ b/include/linux/crc-t10dif.h
@@ -6,7 +6,8 @@
#define CRC_T10DIF_DIGEST_SIZE 2
#define CRC_T10DIF_BLOCK_SIZE 1
-__u16 crc_t10dif_generic(__u16 crc, const unsigned char *buffer, size_t len);
-__u16 crc_t10dif(unsigned char const *, size_t);
+extern __u16 crc_t10dif_generic(__u16 crc, const unsigned char *buffer,
+ size_t len);
+extern __u16 crc_t10dif(unsigned char const *, size_t);
#endif
diff --git a/include/linux/cred.h b/include/linux/cred.h
index b2d0820837c4..2fb2ca2127ed 100644
--- a/include/linux/cred.h
+++ b/include/linux/cred.h
@@ -68,6 +68,7 @@ extern void groups_free(struct group_info *);
extern int set_current_groups(struct group_info *);
extern void set_groups(struct cred *, struct group_info *);
extern int groups_search(const struct group_info *, kgid_t);
+extern bool may_setgroups(void);
/* access the groups "array" with this macro */
#define GROUP_AT(gi, i) \
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index d45e949699ea..9c8776d0ada8 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -26,6 +26,19 @@
#include <linux/uaccess.h>
/*
+ * Autoloaded crypto modules should only use a prefixed name to avoid allowing
+ * arbitrary modules to be loaded. Loading from userspace may still need the
+ * unprefixed names, so retains those aliases as well.
+ * This uses __MODULE_INFO directly instead of MODULE_ALIAS because pre-4.3
+ * gcc (e.g. avr32 toolchain) uses __LINE__ for uniqueness, and this macro
+ * expands twice on the same line. Instead, use a separate base name for the
+ * alias.
+ */
+#define MODULE_ALIAS_CRYPTO(name) \
+ __MODULE_INFO(alias, alias_userspace, name); \
+ __MODULE_INFO(alias, alias_crypto, "crypto-" name)
+
+/*
* Algorithm masks and types.
*/
#define CRYPTO_ALG_TYPE_MASK 0x0000000f
@@ -127,6 +140,13 @@ struct skcipher_givcrypt_request;
typedef void (*crypto_completion_t)(struct crypto_async_request *req, int err);
+/**
+ * DOC: Block Cipher Context Data Structures
+ *
+ * These data structures define the operating context for each block cipher
+ * type.
+ */
+
struct crypto_async_request {
struct list_head list;
crypto_completion_t complete;
@@ -194,9 +214,63 @@ struct hash_desc {
u32 flags;
};
-/*
- * Algorithms: modular crypto algorithm implementations, managed
- * via crypto_register_alg() and crypto_unregister_alg().
+/**
+ * DOC: Block Cipher Algorithm Definitions
+ *
+ * These data structures define modular crypto algorithm implementations,
+ * managed via crypto_register_alg() and crypto_unregister_alg().
+ */
+
+/**
+ * struct ablkcipher_alg - asynchronous block cipher definition
+ * @min_keysize: Minimum key size supported by the transformation. This is the
+ * smallest key length supported by this transformation algorithm.
+ * This must be set to one of the pre-defined values as this is
+ * not hardware specific. Possible values for this field can be
+ * found via git grep "_MIN_KEY_SIZE" include/crypto/
+ * @max_keysize: Maximum key size supported by the transformation. This is the
+ * largest key length supported by this transformation algorithm.
+ * This must be set to one of the pre-defined values as this is
+ * not hardware specific. Possible values for this field can be
+ * found via git grep "_MAX_KEY_SIZE" include/crypto/
+ * @setkey: Set key for the transformation. This function is used to either
+ * program a supplied key into the hardware or store the key in the
+ * transformation context for programming it later. Note that this
+ * function does modify the transformation context. This function can
+ * be called multiple times during the existence of the transformation
+ * object, so one must make sure the key is properly reprogrammed into
+ * the hardware. This function is also responsible for checking the key
+ * length for validity. In case a software fallback was put in place in
+ * the @cra_init call, this function might need to use the fallback if
+ * the algorithm doesn't support all of the key sizes.
+ * @encrypt: Encrypt a scatterlist of blocks. This function is used to encrypt
+ * the supplied scatterlist containing the blocks of data. The crypto
+ * API consumer is responsible for aligning the entries of the
+ * scatterlist properly and making sure the chunks are correctly
+ * sized. In case a software fallback was put in place in the
+ * @cra_init call, this function might need to use the fallback if
+ * the algorithm doesn't support all of the key sizes. In case the
+ * key was stored in transformation context, the key might need to be
+ * re-programmed into the hardware in this function. This function
+ * shall not modify the transformation context, as this function may
+ * be called in parallel with the same transformation object.
+ * @decrypt: Decrypt a single block. This is a reverse counterpart to @encrypt
+ * and the conditions are exactly the same.
+ * @givencrypt: Update the IV for encryption. With this function, a cipher
+ * implementation may provide the function on how to update the IV
+ * for encryption.
+ * @givdecrypt: Update the IV for decryption. This is the reverse of
+ * @givencrypt .
+ * @geniv: The transformation implementation may use an "IV generator" provided
+ * by the kernel crypto API. Several use cases have a predefined
+ * approach how IVs are to be updated. For such use cases, the kernel
+ * crypto API provides ready-to-use implementations that can be
+ * referenced with this variable.
+ * @ivsize: IV size applicable for transformation. The consumer must provide an
+ * IV of exactly that size to perform the encrypt or decrypt operation.
+ *
+ * All fields except @givencrypt , @givdecrypt , @geniv and @ivsize are
+ * mandatory and must be filled.
*/
struct ablkcipher_alg {
int (*setkey)(struct crypto_ablkcipher *tfm, const u8 *key,
@@ -213,6 +287,32 @@ struct ablkcipher_alg {
unsigned int ivsize;
};
+/**
+ * struct aead_alg - AEAD cipher definition
+ * @maxauthsize: Set the maximum authentication tag size supported by the
+ * transformation. A transformation may support smaller tag sizes.
+ * As the authentication tag is a message digest to ensure the
+ * integrity of the encrypted data, a consumer typically wants the
+ * largest authentication tag possible as defined by this
+ * variable.
+ * @setauthsize: Set authentication size for the AEAD transformation. This
+ * function is used to specify the consumer requested size of the
+ * authentication tag to be either generated by the transformation
+ * during encryption or the size of the authentication tag to be
+ * supplied during the decryption operation. This function is also
+ * responsible for checking the authentication tag size for
+ * validity.
+ * @setkey: see struct ablkcipher_alg
+ * @encrypt: see struct ablkcipher_alg
+ * @decrypt: see struct ablkcipher_alg
+ * @givencrypt: see struct ablkcipher_alg
+ * @givdecrypt: see struct ablkcipher_alg
+ * @geniv: see struct ablkcipher_alg
+ * @ivsize: see struct ablkcipher_alg
+ *
+ * All fields except @givencrypt , @givdecrypt , @geniv and @ivsize are
+ * mandatory and must be filled.
+ */
struct aead_alg {
int (*setkey)(struct crypto_aead *tfm, const u8 *key,
unsigned int keylen);
@@ -228,6 +328,18 @@ struct aead_alg {
unsigned int maxauthsize;
};
+/**
+ * struct blkcipher_alg - synchronous block cipher definition
+ * @min_keysize: see struct ablkcipher_alg
+ * @max_keysize: see struct ablkcipher_alg
+ * @setkey: see struct ablkcipher_alg
+ * @encrypt: see struct ablkcipher_alg
+ * @decrypt: see struct ablkcipher_alg
+ * @geniv: see struct ablkcipher_alg
+ * @ivsize: see struct ablkcipher_alg
+ *
+ * All fields except @geniv and @ivsize are mandatory and must be filled.
+ */
struct blkcipher_alg {
int (*setkey)(struct crypto_tfm *tfm, const u8 *key,
unsigned int keylen);
@@ -245,6 +357,53 @@ struct blkcipher_alg {
unsigned int ivsize;
};
+/**
+ * struct cipher_alg - single-block symmetric ciphers definition
+ * @cia_min_keysize: Minimum key size supported by the transformation. This is
+ * the smallest key length supported by this transformation
+ * algorithm. This must be set to one of the pre-defined
+ * values as this is not hardware specific. Possible values
+ * for this field can be found via git grep "_MIN_KEY_SIZE"
+ * include/crypto/
+ * @cia_max_keysize: Maximum key size supported by the transformation. This is
+ * the largest key length supported by this transformation
+ * algorithm. This must be set to one of the pre-defined values
+ * as this is not hardware specific. Possible values for this
+ * field can be found via git grep "_MAX_KEY_SIZE"
+ * include/crypto/
+ * @cia_setkey: Set key for the transformation. This function is used to either
+ * program a supplied key into the hardware or store the key in the
+ * transformation context for programming it later. Note that this
+ * function does modify the transformation context. This function
+ * can be called multiple times during the existence of the
+ * transformation object, so one must make sure the key is properly
+ * reprogrammed into the hardware. This function is also
+ * responsible for checking the key length for validity.
+ * @cia_encrypt: Encrypt a single block. This function is used to encrypt a
+ * single block of data, which must be @cra_blocksize big. This
+ * always operates on a full @cra_blocksize and it is not possible
+ * to encrypt a block of smaller size. The supplied buffers must
+ * therefore also be at least of @cra_blocksize size. Both the
+ * input and output buffers are always aligned to @cra_alignmask.
+ * In case either of the input or output buffer supplied by user
+ * of the crypto API is not aligned to @cra_alignmask, the crypto
+ * API will re-align the buffers. The re-alignment means that a
+ * new buffer will be allocated, the data will be copied into the
+ * new buffer, then the processing will happen on the new buffer,
+ * then the data will be copied back into the original buffer and
+ * finally the new buffer will be freed. In case a software
+ * fallback was put in place in the @cra_init call, this function
+ * might need to use the fallback if the algorithm doesn't support
+ * all of the key sizes. In case the key was stored in
+ * transformation context, the key might need to be re-programmed
+ * into the hardware in this function. This function shall not
+ * modify the transformation context, as this function may be
+ * called in parallel with the same transformation object.
+ * @cia_decrypt: Decrypt a single block. This is a reverse counterpart to
+ * @cia_encrypt, and the conditions are exactly the same.
+ *
+ * All fields are mandatory and must be filled.
+ */
struct cipher_alg {
unsigned int cia_min_keysize;
unsigned int cia_max_keysize;
@@ -261,6 +420,25 @@ struct compress_alg {
unsigned int slen, u8 *dst, unsigned int *dlen);
};
+/**
+ * struct rng_alg - random number generator definition
+ * @rng_make_random: The function defined by this variable obtains a random
+ * number. The random number generator transform must generate
+ * the random number out of the context provided with this
+ * call.
+ * @rng_reset: Reset of the random number generator by clearing the entire state.
+ * With the invocation of this function call, the random number
+ * generator shall completely reinitialize its state. If the random
+ * number generator requires a seed for setting up a new state,
+ * the seed must be provided by the consumer while invoking this
+ * function. The required size of the seed is defined with
+ * @seedsize .
+ * @seedsize: The seed size required for a random number generator
+ * initialization defined with this variable. Some random number
+ * generators like the SP800-90A DRBG does not require a seed as the
+ * seeding is implemented internally without the need of support by
+ * the consumer. In this case, the seed size is set to zero.
+ */
struct rng_alg {
int (*rng_make_random)(struct crypto_rng *tfm, u8 *rdata,
unsigned int dlen);
@@ -277,6 +455,81 @@ struct rng_alg {
#define cra_compress cra_u.compress
#define cra_rng cra_u.rng
+/**
+ * struct crypto_alg - definition of a cryptograpic cipher algorithm
+ * @cra_flags: Flags describing this transformation. See include/linux/crypto.h
+ * CRYPTO_ALG_* flags for the flags which go in here. Those are
+ * used for fine-tuning the description of the transformation
+ * algorithm.
+ * @cra_blocksize: Minimum block size of this transformation. The size in bytes
+ * of the smallest possible unit which can be transformed with
+ * this algorithm. The users must respect this value.
+ * In case of HASH transformation, it is possible for a smaller
+ * block than @cra_blocksize to be passed to the crypto API for
+ * transformation, in case of any other transformation type, an
+ * error will be returned upon any attempt to transform smaller
+ * than @cra_blocksize chunks.
+ * @cra_ctxsize: Size of the operational context of the transformation. This
+ * value informs the kernel crypto API about the memory size
+ * needed to be allocated for the transformation context.
+ * @cra_alignmask: Alignment mask for the input and output data buffer. The data
+ * buffer containing the input data for the algorithm must be
+ * aligned to this alignment mask. The data buffer for the
+ * output data must be aligned to this alignment mask. Note that
+ * the Crypto API will do the re-alignment in software, but
+ * only under special conditions and there is a performance hit.
+ * The re-alignment happens at these occasions for different
+ * @cra_u types: cipher -- For both input data and output data
+ * buffer; ahash -- For output hash destination buf; shash --
+ * For output hash destination buf.
+ * This is needed on hardware which is flawed by design and
+ * cannot pick data from arbitrary addresses.
+ * @cra_priority: Priority of this transformation implementation. In case
+ * multiple transformations with same @cra_name are available to
+ * the Crypto API, the kernel will use the one with highest
+ * @cra_priority.
+ * @cra_name: Generic name (usable by multiple implementations) of the
+ * transformation algorithm. This is the name of the transformation
+ * itself. This field is used by the kernel when looking up the
+ * providers of particular transformation.
+ * @cra_driver_name: Unique name of the transformation provider. This is the
+ * name of the provider of the transformation. This can be any
+ * arbitrary value, but in the usual case, this contains the
+ * name of the chip or provider and the name of the
+ * transformation algorithm.
+ * @cra_type: Type of the cryptographic transformation. This is a pointer to
+ * struct crypto_type, which implements callbacks common for all
+ * trasnformation types. There are multiple options:
+ * &crypto_blkcipher_type, &crypto_ablkcipher_type,
+ * &crypto_ahash_type, &crypto_aead_type, &crypto_rng_type.
+ * This field might be empty. In that case, there are no common
+ * callbacks. This is the case for: cipher, compress, shash.
+ * @cra_u: Callbacks implementing the transformation. This is a union of
+ * multiple structures. Depending on the type of transformation selected
+ * by @cra_type and @cra_flags above, the associated structure must be
+ * filled with callbacks. This field might be empty. This is the case
+ * for ahash, shash.
+ * @cra_init: Initialize the cryptographic transformation object. This function
+ * is used to initialize the cryptographic transformation object.
+ * This function is called only once at the instantiation time, right
+ * after the transformation context was allocated. In case the
+ * cryptographic hardware has some special requirements which need to
+ * be handled by software, this function shall check for the precise
+ * requirement of the transformation and put any software fallbacks
+ * in place.
+ * @cra_exit: Deinitialize the cryptographic transformation object. This is a
+ * counterpart to @cra_init, used to remove various changes set in
+ * @cra_init.
+ * @cra_module: Owner of this transformation implementation. Set to THIS_MODULE
+ * @cra_list: internally used
+ * @cra_users: internally used
+ * @cra_refcnt: internally used
+ * @cra_destroy: internally used
+ *
+ * The struct crypto_alg describes a generic Crypto API algorithm and is common
+ * for all of the transformations. Any variable not documented here shall not
+ * be used by a cipher implementation as it is internal to the Crypto API.
+ */
struct crypto_alg {
struct list_head cra_list;
struct list_head cra_users;
@@ -581,6 +834,50 @@ static inline u32 crypto_skcipher_mask(u32 mask)
return mask;
}
+/**
+ * DOC: Asynchronous Block Cipher API
+ *
+ * Asynchronous block cipher API is used with the ciphers of type
+ * CRYPTO_ALG_TYPE_ABLKCIPHER (listed as type "ablkcipher" in /proc/crypto).
+ *
+ * Asynchronous cipher operations imply that the function invocation for a
+ * cipher request returns immediately before the completion of the operation.
+ * The cipher request is scheduled as a separate kernel thread and therefore
+ * load-balanced on the different CPUs via the process scheduler. To allow
+ * the kernel crypto API to inform the caller about the completion of a cipher
+ * request, the caller must provide a callback function. That function is
+ * invoked with the cipher handle when the request completes.
+ *
+ * To support the asynchronous operation, additional information than just the
+ * cipher handle must be supplied to the kernel crypto API. That additional
+ * information is given by filling in the ablkcipher_request data structure.
+ *
+ * For the asynchronous block cipher API, the state is maintained with the tfm
+ * cipher handle. A single tfm can be used across multiple calls and in
+ * parallel. For asynchronous block cipher calls, context data supplied and
+ * only used by the caller can be referenced the request data structure in
+ * addition to the IV used for the cipher request. The maintenance of such
+ * state information would be important for a crypto driver implementer to
+ * have, because when calling the callback function upon completion of the
+ * cipher operation, that callback function may need some information about
+ * which operation just finished if it invoked multiple in parallel. This
+ * state information is unused by the kernel crypto API.
+ */
+
+/**
+ * crypto_alloc_ablkcipher() - allocate asynchronous block cipher handle
+ * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
+ * ablkcipher cipher
+ * @type: specifies the type of the cipher
+ * @mask: specifies the mask for the cipher
+ *
+ * Allocate a cipher handle for an ablkcipher. The returned struct
+ * crypto_ablkcipher is the cipher handle that is required for any subsequent
+ * API invocation for that ablkcipher.
+ *
+ * Return: allocated cipher handle in case of success; IS_ERR() is true in case
+ * of an error, PTR_ERR() returns the error code.
+ */
struct crypto_ablkcipher *crypto_alloc_ablkcipher(const char *alg_name,
u32 type, u32 mask);
@@ -590,11 +887,25 @@ static inline struct crypto_tfm *crypto_ablkcipher_tfm(
return &tfm->base;
}
+/**
+ * crypto_free_ablkcipher() - zeroize and free cipher handle
+ * @tfm: cipher handle to be freed
+ */
static inline void crypto_free_ablkcipher(struct crypto_ablkcipher *tfm)
{
crypto_free_tfm(crypto_ablkcipher_tfm(tfm));
}
+/**
+ * crypto_has_ablkcipher() - Search for the availability of an ablkcipher.
+ * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
+ * ablkcipher
+ * @type: specifies the type of the cipher
+ * @mask: specifies the mask for the cipher
+ *
+ * Return: true when the ablkcipher is known to the kernel crypto API; false
+ * otherwise
+ */
static inline int crypto_has_ablkcipher(const char *alg_name, u32 type,
u32 mask)
{
@@ -608,12 +919,31 @@ static inline struct ablkcipher_tfm *crypto_ablkcipher_crt(
return &crypto_ablkcipher_tfm(tfm)->crt_ablkcipher;
}
+/**
+ * crypto_ablkcipher_ivsize() - obtain IV size
+ * @tfm: cipher handle
+ *
+ * The size of the IV for the ablkcipher referenced by the cipher handle is
+ * returned. This IV size may be zero if the cipher does not need an IV.
+ *
+ * Return: IV size in bytes
+ */
static inline unsigned int crypto_ablkcipher_ivsize(
struct crypto_ablkcipher *tfm)
{
return crypto_ablkcipher_crt(tfm)->ivsize;
}
+/**
+ * crypto_ablkcipher_blocksize() - obtain block size of cipher
+ * @tfm: cipher handle
+ *
+ * The block size for the ablkcipher referenced with the cipher handle is
+ * returned. The caller may use that information to allocate appropriate
+ * memory for the data returned by the encryption or decryption operation
+ *
+ * Return: block size of cipher
+ */
static inline unsigned int crypto_ablkcipher_blocksize(
struct crypto_ablkcipher *tfm)
{
@@ -643,6 +973,22 @@ static inline void crypto_ablkcipher_clear_flags(struct crypto_ablkcipher *tfm,
crypto_tfm_clear_flags(crypto_ablkcipher_tfm(tfm), flags);
}
+/**
+ * crypto_ablkcipher_setkey() - set key for cipher
+ * @tfm: cipher handle
+ * @key: buffer holding the key
+ * @keylen: length of the key in bytes
+ *
+ * The caller provided key is set for the ablkcipher referenced by the cipher
+ * handle.
+ *
+ * Note, the key length determines the cipher type. Many block ciphers implement
+ * different cipher modes depending on the key size, such as AES-128 vs AES-192
+ * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128
+ * is performed.
+ *
+ * Return: 0 if the setting of the key was successful; < 0 if an error occurred
+ */
static inline int crypto_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
const u8 *key, unsigned int keylen)
{
@@ -651,12 +997,32 @@ static inline int crypto_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
return crt->setkey(crt->base, key, keylen);
}
+/**
+ * crypto_ablkcipher_reqtfm() - obtain cipher handle from request
+ * @req: ablkcipher_request out of which the cipher handle is to be obtained
+ *
+ * Return the crypto_ablkcipher handle when furnishing an ablkcipher_request
+ * data structure.
+ *
+ * Return: crypto_ablkcipher handle
+ */
static inline struct crypto_ablkcipher *crypto_ablkcipher_reqtfm(
struct ablkcipher_request *req)
{
return __crypto_ablkcipher_cast(req->base.tfm);
}
+/**
+ * crypto_ablkcipher_encrypt() - encrypt plaintext
+ * @req: reference to the ablkcipher_request handle that holds all information
+ * needed to perform the cipher operation
+ *
+ * Encrypt plaintext data using the ablkcipher_request handle. That data
+ * structure and how it is filled with data is discussed with the
+ * ablkcipher_request_* functions.
+ *
+ * Return: 0 if the cipher operation was successful; < 0 if an error occurred
+ */
static inline int crypto_ablkcipher_encrypt(struct ablkcipher_request *req)
{
struct ablkcipher_tfm *crt =
@@ -664,6 +1030,17 @@ static inline int crypto_ablkcipher_encrypt(struct ablkcipher_request *req)
return crt->encrypt(req);
}
+/**
+ * crypto_ablkcipher_decrypt() - decrypt ciphertext
+ * @req: reference to the ablkcipher_request handle that holds all information
+ * needed to perform the cipher operation
+ *
+ * Decrypt ciphertext data using the ablkcipher_request handle. That data
+ * structure and how it is filled with data is discussed with the
+ * ablkcipher_request_* functions.
+ *
+ * Return: 0 if the cipher operation was successful; < 0 if an error occurred
+ */
static inline int crypto_ablkcipher_decrypt(struct ablkcipher_request *req)
{
struct ablkcipher_tfm *crt =
@@ -671,12 +1048,37 @@ static inline int crypto_ablkcipher_decrypt(struct ablkcipher_request *req)
return crt->decrypt(req);
}
+/**
+ * DOC: Asynchronous Cipher Request Handle
+ *
+ * The ablkcipher_request data structure contains all pointers to data
+ * required for the asynchronous cipher operation. This includes the cipher
+ * handle (which can be used by multiple ablkcipher_request instances), pointer
+ * to plaintext and ciphertext, asynchronous callback function, etc. It acts
+ * as a handle to the ablkcipher_request_* API calls in a similar way as
+ * ablkcipher handle to the crypto_ablkcipher_* API calls.
+ */
+
+/**
+ * crypto_ablkcipher_reqsize() - obtain size of the request data structure
+ * @tfm: cipher handle
+ *
+ * Return: number of bytes
+ */
static inline unsigned int crypto_ablkcipher_reqsize(
struct crypto_ablkcipher *tfm)
{
return crypto_ablkcipher_crt(tfm)->reqsize;
}
+/**
+ * ablkcipher_request_set_tfm() - update cipher handle reference in request
+ * @req: request handle to be modified
+ * @tfm: cipher handle that shall be added to the request handle
+ *
+ * Allow the caller to replace the existing ablkcipher handle in the request
+ * data structure with a different one.
+ */
static inline void ablkcipher_request_set_tfm(
struct ablkcipher_request *req, struct crypto_ablkcipher *tfm)
{
@@ -689,6 +1091,18 @@ static inline struct ablkcipher_request *ablkcipher_request_cast(
return container_of(req, struct ablkcipher_request, base);
}
+/**
+ * ablkcipher_request_alloc() - allocate request data structure
+ * @tfm: cipher handle to be registered with the request
+ * @gfp: memory allocation flag that is handed to kmalloc by the API call.
+ *
+ * Allocate the request data structure that must be used with the ablkcipher
+ * encrypt and decrypt API calls. During the allocation, the provided ablkcipher
+ * handle is registered in the request data structure.
+ *
+ * Return: allocated request handle in case of success; IS_ERR() is true in case
+ * of an error, PTR_ERR() returns the error code.
+ */
static inline struct ablkcipher_request *ablkcipher_request_alloc(
struct crypto_ablkcipher *tfm, gfp_t gfp)
{
@@ -703,11 +1117,40 @@ static inline struct ablkcipher_request *ablkcipher_request_alloc(
return req;
}
+/**
+ * ablkcipher_request_free() - zeroize and free request data structure
+ * @req: request data structure cipher handle to be freed
+ */
static inline void ablkcipher_request_free(struct ablkcipher_request *req)
{
kzfree(req);
}
+/**
+ * ablkcipher_request_set_callback() - set asynchronous callback function
+ * @req: request handle
+ * @flags: specify zero or an ORing of the flags
+ * CRYPTO_TFM_REQ_MAY_BACKLOG the request queue may back log and
+ * increase the wait queue beyond the initial maximum size;
+ * CRYPTO_TFM_REQ_MAY_SLEEP the request processing may sleep
+ * @compl: callback function pointer to be registered with the request handle
+ * @data: The data pointer refers to memory that is not used by the kernel
+ * crypto API, but provided to the callback function for it to use. Here,
+ * the caller can provide a reference to memory the callback function can
+ * operate on. As the callback function is invoked asynchronously to the
+ * related functionality, it may need to access data structures of the
+ * related functionality which can be referenced using this pointer. The
+ * callback function can access the memory via the "data" field in the
+ * crypto_async_request data structure provided to the callback function.
+ *
+ * This function allows setting the callback function that is triggered once the
+ * cipher operation completes.
+ *
+ * The callback function is registered with the ablkcipher_request handle and
+ * must comply with the following template:
+ *
+ * void callback_function(struct crypto_async_request *req, int error)
+ */
static inline void ablkcipher_request_set_callback(
struct ablkcipher_request *req,
u32 flags, crypto_completion_t compl, void *data)
@@ -717,6 +1160,22 @@ static inline void ablkcipher_request_set_callback(
req->base.flags = flags;
}
+/**
+ * ablkcipher_request_set_crypt() - set data buffers
+ * @req: request handle
+ * @src: source scatter / gather list
+ * @dst: destination scatter / gather list
+ * @nbytes: number of bytes to process from @src
+ * @iv: IV for the cipher operation which must comply with the IV size defined
+ * by crypto_ablkcipher_ivsize
+ *
+ * This function allows setting of the source data and destination data
+ * scatter / gather lists.
+ *
+ * For encryption, the source is treated as the plaintext and the
+ * destination is the ciphertext. For a decryption operation, the use is
+ * reversed: the source is the ciphertext and the destination is the plaintext.
+ */
static inline void ablkcipher_request_set_crypt(
struct ablkcipher_request *req,
struct scatterlist *src, struct scatterlist *dst,
@@ -728,11 +1187,55 @@ static inline void ablkcipher_request_set_crypt(
req->info = iv;
}
+/**
+ * DOC: Authenticated Encryption With Associated Data (AEAD) Cipher API
+ *
+ * The AEAD cipher API is used with the ciphers of type CRYPTO_ALG_TYPE_AEAD
+ * (listed as type "aead" in /proc/crypto)
+ *
+ * The most prominent examples for this type of encryption is GCM and CCM.
+ * However, the kernel supports other types of AEAD ciphers which are defined
+ * with the following cipher string:
+ *
+ * authenc(keyed message digest, block cipher)
+ *
+ * For example: authenc(hmac(sha256), cbc(aes))
+ *
+ * The example code provided for the asynchronous block cipher operation
+ * applies here as well. Naturally all *ablkcipher* symbols must be exchanged
+ * the *aead* pendants discussed in the following. In addtion, for the AEAD
+ * operation, the aead_request_set_assoc function must be used to set the
+ * pointer to the associated data memory location before performing the
+ * encryption or decryption operation. In case of an encryption, the associated
+ * data memory is filled during the encryption operation. For decryption, the
+ * associated data memory must contain data that is used to verify the integrity
+ * of the decrypted data. Another deviation from the asynchronous block cipher
+ * operation is that the caller should explicitly check for -EBADMSG of the
+ * crypto_aead_decrypt. That error indicates an authentication error, i.e.
+ * a breach in the integrity of the message. In essence, that -EBADMSG error
+ * code is the key bonus an AEAD cipher has over "standard" block chaining
+ * modes.
+ */
+
static inline struct crypto_aead *__crypto_aead_cast(struct crypto_tfm *tfm)
{
return (struct crypto_aead *)tfm;
}
+/**
+ * crypto_alloc_aead() - allocate AEAD cipher handle
+ * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
+ * AEAD cipher
+ * @type: specifies the type of the cipher
+ * @mask: specifies the mask for the cipher
+ *
+ * Allocate a cipher handle for an AEAD. The returned struct
+ * crypto_aead is the cipher handle that is required for any subsequent
+ * API invocation for that AEAD.
+ *
+ * Return: allocated cipher handle in case of success; IS_ERR() is true in case
+ * of an error, PTR_ERR() returns the error code.
+ */
struct crypto_aead *crypto_alloc_aead(const char *alg_name, u32 type, u32 mask);
static inline struct crypto_tfm *crypto_aead_tfm(struct crypto_aead *tfm)
@@ -740,6 +1243,10 @@ static inline struct crypto_tfm *crypto_aead_tfm(struct crypto_aead *tfm)
return &tfm->base;
}
+/**
+ * crypto_free_aead() - zeroize and free aead handle
+ * @tfm: cipher handle to be freed
+ */
static inline void crypto_free_aead(struct crypto_aead *tfm)
{
crypto_free_tfm(crypto_aead_tfm(tfm));
@@ -750,16 +1257,47 @@ static inline struct aead_tfm *crypto_aead_crt(struct crypto_aead *tfm)
return &crypto_aead_tfm(tfm)->crt_aead;
}
+/**
+ * crypto_aead_ivsize() - obtain IV size
+ * @tfm: cipher handle
+ *
+ * The size of the IV for the aead referenced by the cipher handle is
+ * returned. This IV size may be zero if the cipher does not need an IV.
+ *
+ * Return: IV size in bytes
+ */
static inline unsigned int crypto_aead_ivsize(struct crypto_aead *tfm)
{
return crypto_aead_crt(tfm)->ivsize;
}
+/**
+ * crypto_aead_authsize() - obtain maximum authentication data size
+ * @tfm: cipher handle
+ *
+ * The maximum size of the authentication data for the AEAD cipher referenced
+ * by the AEAD cipher handle is returned. The authentication data size may be
+ * zero if the cipher implements a hard-coded maximum.
+ *
+ * The authentication data may also be known as "tag value".
+ *
+ * Return: authentication data size / tag size in bytes
+ */
static inline unsigned int crypto_aead_authsize(struct crypto_aead *tfm)
{
return crypto_aead_crt(tfm)->authsize;
}
+/**
+ * crypto_aead_blocksize() - obtain block size of cipher
+ * @tfm: cipher handle
+ *
+ * The block size for the AEAD referenced with the cipher handle is returned.
+ * The caller may use that information to allocate appropriate memory for the
+ * data returned by the encryption or decryption operation
+ *
+ * Return: block size of cipher
+ */
static inline unsigned int crypto_aead_blocksize(struct crypto_aead *tfm)
{
return crypto_tfm_alg_blocksize(crypto_aead_tfm(tfm));
@@ -785,6 +1323,22 @@ static inline void crypto_aead_clear_flags(struct crypto_aead *tfm, u32 flags)
crypto_tfm_clear_flags(crypto_aead_tfm(tfm), flags);
}
+/**
+ * crypto_aead_setkey() - set key for cipher
+ * @tfm: cipher handle
+ * @key: buffer holding the key
+ * @keylen: length of the key in bytes
+ *
+ * The caller provided key is set for the AEAD referenced by the cipher
+ * handle.
+ *
+ * Note, the key length determines the cipher type. Many block ciphers implement
+ * different cipher modes depending on the key size, such as AES-128 vs AES-192
+ * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128
+ * is performed.
+ *
+ * Return: 0 if the setting of the key was successful; < 0 if an error occurred
+ */
static inline int crypto_aead_setkey(struct crypto_aead *tfm, const u8 *key,
unsigned int keylen)
{
@@ -793,6 +1347,16 @@ static inline int crypto_aead_setkey(struct crypto_aead *tfm, const u8 *key,
return crt->setkey(crt->base, key, keylen);
}
+/**
+ * crypto_aead_setauthsize() - set authentication data size
+ * @tfm: cipher handle
+ * @authsize: size of the authentication data / tag in bytes
+ *
+ * Set the authentication data size / tag size. AEAD requires an authentication
+ * tag (or MAC) in addition to the associated data.
+ *
+ * Return: 0 if the setting of the key was successful; < 0 if an error occurred
+ */
int crypto_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize);
static inline struct crypto_aead *crypto_aead_reqtfm(struct aead_request *req)
@@ -800,27 +1364,105 @@ static inline struct crypto_aead *crypto_aead_reqtfm(struct aead_request *req)
return __crypto_aead_cast(req->base.tfm);
}
+/**
+ * crypto_aead_encrypt() - encrypt plaintext
+ * @req: reference to the aead_request handle that holds all information
+ * needed to perform the cipher operation
+ *
+ * Encrypt plaintext data using the aead_request handle. That data structure
+ * and how it is filled with data is discussed with the aead_request_*
+ * functions.
+ *
+ * IMPORTANT NOTE The encryption operation creates the authentication data /
+ * tag. That data is concatenated with the created ciphertext.
+ * The ciphertext memory size is therefore the given number of
+ * block cipher blocks + the size defined by the
+ * crypto_aead_setauthsize invocation. The caller must ensure
+ * that sufficient memory is available for the ciphertext and
+ * the authentication tag.
+ *
+ * Return: 0 if the cipher operation was successful; < 0 if an error occurred
+ */
static inline int crypto_aead_encrypt(struct aead_request *req)
{
return crypto_aead_crt(crypto_aead_reqtfm(req))->encrypt(req);
}
+/**
+ * crypto_aead_decrypt() - decrypt ciphertext
+ * @req: reference to the ablkcipher_request handle that holds all information
+ * needed to perform the cipher operation
+ *
+ * Decrypt ciphertext data using the aead_request handle. That data structure
+ * and how it is filled with data is discussed with the aead_request_*
+ * functions.
+ *
+ * IMPORTANT NOTE The caller must concatenate the ciphertext followed by the
+ * authentication data / tag. That authentication data / tag
+ * must have the size defined by the crypto_aead_setauthsize
+ * invocation.
+ *
+ *
+ * Return: 0 if the cipher operation was successful; -EBADMSG: The AEAD
+ * cipher operation performs the authentication of the data during the
+ * decryption operation. Therefore, the function returns this error if
+ * the authentication of the ciphertext was unsuccessful (i.e. the
+ * integrity of the ciphertext or the associated data was violated);
+ * < 0 if an error occurred.
+ */
static inline int crypto_aead_decrypt(struct aead_request *req)
{
return crypto_aead_crt(crypto_aead_reqtfm(req))->decrypt(req);
}
+/**
+ * DOC: Asynchronous AEAD Request Handle
+ *
+ * The aead_request data structure contains all pointers to data required for
+ * the AEAD cipher operation. This includes the cipher handle (which can be
+ * used by multiple aead_request instances), pointer to plaintext and
+ * ciphertext, asynchronous callback function, etc. It acts as a handle to the
+ * aead_request_* API calls in a similar way as AEAD handle to the
+ * crypto_aead_* API calls.
+ */
+
+/**
+ * crypto_aead_reqsize() - obtain size of the request data structure
+ * @tfm: cipher handle
+ *
+ * Return: number of bytes
+ */
static inline unsigned int crypto_aead_reqsize(struct crypto_aead *tfm)
{
return crypto_aead_crt(tfm)->reqsize;
}
+/**
+ * aead_request_set_tfm() - update cipher handle reference in request
+ * @req: request handle to be modified
+ * @tfm: cipher handle that shall be added to the request handle
+ *
+ * Allow the caller to replace the existing aead handle in the request
+ * data structure with a different one.
+ */
static inline void aead_request_set_tfm(struct aead_request *req,
struct crypto_aead *tfm)
{
req->base.tfm = crypto_aead_tfm(crypto_aead_crt(tfm)->base);
}
+/**
+ * aead_request_alloc() - allocate request data structure
+ * @tfm: cipher handle to be registered with the request
+ * @gfp: memory allocation flag that is handed to kmalloc by the API call.
+ *
+ * Allocate the request data structure that must be used with the AEAD
+ * encrypt and decrypt API calls. During the allocation, the provided aead
+ * handle is registered in the request data structure.
+ *
+ * Return: allocated request handle in case of success; IS_ERR() is true in case
+ * of an error, PTR_ERR() returns the error code.
+ */
static inline struct aead_request *aead_request_alloc(struct crypto_aead *tfm,
gfp_t gfp)
{
@@ -834,11 +1476,40 @@ static inline struct aead_request *aead_request_alloc(struct crypto_aead *tfm,
return req;
}
+/**
+ * aead_request_free() - zeroize and free request data structure
+ * @req: request data structure cipher handle to be freed
+ */
static inline void aead_request_free(struct aead_request *req)
{
kzfree(req);
}
+/**
+ * aead_request_set_callback() - set asynchronous callback function
+ * @req: request handle
+ * @flags: specify zero or an ORing of the flags
+ * CRYPTO_TFM_REQ_MAY_BACKLOG the request queue may back log and
+ * increase the wait queue beyond the initial maximum size;
+ * CRYPTO_TFM_REQ_MAY_SLEEP the request processing may sleep
+ * @compl: callback function pointer to be registered with the request handle
+ * @data: The data pointer refers to memory that is not used by the kernel
+ * crypto API, but provided to the callback function for it to use. Here,
+ * the caller can provide a reference to memory the callback function can
+ * operate on. As the callback function is invoked asynchronously to the
+ * related functionality, it may need to access data structures of the
+ * related functionality which can be referenced using this pointer. The
+ * callback function can access the memory via the "data" field in the
+ * crypto_async_request data structure provided to the callback function.
+ *
+ * Setting the callback function that is triggered once the cipher operation
+ * completes
+ *
+ * The callback function is registered with the aead_request handle and
+ * must comply with the following template:
+ *
+ * void callback_function(struct crypto_async_request *req, int error)
+ */
static inline void aead_request_set_callback(struct aead_request *req,
u32 flags,
crypto_completion_t compl,
@@ -849,6 +1520,36 @@ static inline void aead_request_set_callback(struct aead_request *req,
req->base.flags = flags;
}
+/**
+ * aead_request_set_crypt - set data buffers
+ * @req: request handle
+ * @src: source scatter / gather list
+ * @dst: destination scatter / gather list
+ * @cryptlen: number of bytes to process from @src
+ * @iv: IV for the cipher operation which must comply with the IV size defined
+ * by crypto_aead_ivsize()
+ *
+ * Setting the source data and destination data scatter / gather lists.
+ *
+ * For encryption, the source is treated as the plaintext and the
+ * destination is the ciphertext. For a decryption operation, the use is
+ * reversed: the source is the ciphertext and the destination is the plaintext.
+ *
+ * IMPORTANT NOTE AEAD requires an authentication tag (MAC). For decryption,
+ * the caller must concatenate the ciphertext followed by the
+ * authentication tag and provide the entire data stream to the
+ * decryption operation (i.e. the data length used for the
+ * initialization of the scatterlist and the data length for the
+ * decryption operation is identical). For encryption, however,
+ * the authentication tag is created while encrypting the data.
+ * The destination buffer must hold sufficient space for the
+ * ciphertext and the authentication tag while the encryption
+ * invocation must only point to the plaintext data size. The
+ * following code snippet illustrates the memory usage
+ * buffer = kmalloc(ptbuflen + (enc ? authsize : 0));
+ * sg_init_one(&sg, buffer, ptbuflen + (enc ? authsize : 0));
+ * aead_request_set_crypt(req, &sg, &sg, ptbuflen, iv);
+ */
static inline void aead_request_set_crypt(struct aead_request *req,
struct scatterlist *src,
struct scatterlist *dst,
@@ -860,6 +1561,15 @@ static inline void aead_request_set_crypt(struct aead_request *req,
req->iv = iv;
}
+/**
+ * aead_request_set_assoc() - set the associated data scatter / gather list
+ * @req: request handle
+ * @assoc: associated data scatter / gather list
+ * @assoclen: number of bytes to process from @assoc
+ *
+ * For encryption, the memory is filled with the associated data. For
+ * decryption, the memory must point to the associated data.
+ */
static inline void aead_request_set_assoc(struct aead_request *req,
struct scatterlist *assoc,
unsigned int assoclen)
@@ -868,6 +1578,36 @@ static inline void aead_request_set_assoc(struct aead_request *req,
req->assoclen = assoclen;
}
+/**
+ * DOC: Synchronous Block Cipher API
+ *
+ * The synchronous block cipher API is used with the ciphers of type
+ * CRYPTO_ALG_TYPE_BLKCIPHER (listed as type "blkcipher" in /proc/crypto)
+ *
+ * Synchronous calls, have a context in the tfm. But since a single tfm can be
+ * used in multiple calls and in parallel, this info should not be changeable
+ * (unless a lock is used). This applies, for example, to the symmetric key.
+ * However, the IV is changeable, so there is an iv field in blkcipher_tfm
+ * structure for synchronous blkcipher api. So, its the only state info that can
+ * be kept for synchronous calls without using a big lock across a tfm.
+ *
+ * The block cipher API allows the use of a complete cipher, i.e. a cipher
+ * consisting of a template (a block chaining mode) and a single block cipher
+ * primitive (e.g. AES).
+ *
+ * The plaintext data buffer and the ciphertext data buffer are pointed to
+ * by using scatter/gather lists. The cipher operation is performed
+ * on all segments of the provided scatter/gather lists.
+ *
+ * The kernel crypto API supports a cipher operation "in-place" which means that
+ * the caller may provide the same scatter/gather list for the plaintext and
+ * cipher text. After the completion of the cipher operation, the plaintext
+ * data is replaced with the ciphertext data in case of an encryption and vice
+ * versa for a decryption. The caller must ensure that the scatter/gather lists
+ * for the output data point to sufficiently large buffers, i.e. multiples of
+ * the block size of the cipher.
+ */
+
static inline struct crypto_blkcipher *__crypto_blkcipher_cast(
struct crypto_tfm *tfm)
{
@@ -881,6 +1621,20 @@ static inline struct crypto_blkcipher *crypto_blkcipher_cast(
return __crypto_blkcipher_cast(tfm);
}
+/**
+ * crypto_alloc_blkcipher() - allocate synchronous block cipher handle
+ * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
+ * blkcipher cipher
+ * @type: specifies the type of the cipher
+ * @mask: specifies the mask for the cipher
+ *
+ * Allocate a cipher handle for a block cipher. The returned struct
+ * crypto_blkcipher is the cipher handle that is required for any subsequent
+ * API invocation for that block cipher.
+ *
+ * Return: allocated cipher handle in case of success; IS_ERR() is true in case
+ * of an error, PTR_ERR() returns the error code.
+ */
static inline struct crypto_blkcipher *crypto_alloc_blkcipher(
const char *alg_name, u32 type, u32 mask)
{
@@ -897,11 +1651,25 @@ static inline struct crypto_tfm *crypto_blkcipher_tfm(
return &tfm->base;
}
+/**
+ * crypto_free_blkcipher() - zeroize and free the block cipher handle
+ * @tfm: cipher handle to be freed
+ */
static inline void crypto_free_blkcipher(struct crypto_blkcipher *tfm)
{
crypto_free_tfm(crypto_blkcipher_tfm(tfm));
}
+/**
+ * crypto_has_blkcipher() - Search for the availability of a block cipher
+ * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
+ * block cipher
+ * @type: specifies the type of the cipher
+ * @mask: specifies the mask for the cipher
+ *
+ * Return: true when the block cipher is known to the kernel crypto API; false
+ * otherwise
+ */
static inline int crypto_has_blkcipher(const char *alg_name, u32 type, u32 mask)
{
type &= ~CRYPTO_ALG_TYPE_MASK;
@@ -911,6 +1679,12 @@ static inline int crypto_has_blkcipher(const char *alg_name, u32 type, u32 mask)
return crypto_has_alg(alg_name, type, mask);
}
+/**
+ * crypto_blkcipher_name() - return the name / cra_name from the cipher handle
+ * @tfm: cipher handle
+ *
+ * Return: The character string holding the name of the cipher
+ */
static inline const char *crypto_blkcipher_name(struct crypto_blkcipher *tfm)
{
return crypto_tfm_alg_name(crypto_blkcipher_tfm(tfm));
@@ -928,11 +1702,30 @@ static inline struct blkcipher_alg *crypto_blkcipher_alg(
return &crypto_blkcipher_tfm(tfm)->__crt_alg->cra_blkcipher;
}
+/**
+ * crypto_blkcipher_ivsize() - obtain IV size
+ * @tfm: cipher handle
+ *
+ * The size of the IV for the block cipher referenced by the cipher handle is
+ * returned. This IV size may be zero if the cipher does not need an IV.
+ *
+ * Return: IV size in bytes
+ */
static inline unsigned int crypto_blkcipher_ivsize(struct crypto_blkcipher *tfm)
{
return crypto_blkcipher_alg(tfm)->ivsize;
}
+/**
+ * crypto_blkcipher_blocksize() - obtain block size of cipher
+ * @tfm: cipher handle
+ *
+ * The block size for the block cipher referenced with the cipher handle is
+ * returned. The caller may use that information to allocate appropriate
+ * memory for the data returned by the encryption or decryption operation.
+ *
+ * Return: block size of cipher
+ */
static inline unsigned int crypto_blkcipher_blocksize(
struct crypto_blkcipher *tfm)
{
@@ -962,6 +1755,22 @@ static inline void crypto_blkcipher_clear_flags(struct crypto_blkcipher *tfm,
crypto_tfm_clear_flags(crypto_blkcipher_tfm(tfm), flags);
}
+/**
+ * crypto_blkcipher_setkey() - set key for cipher
+ * @tfm: cipher handle
+ * @key: buffer holding the key
+ * @keylen: length of the key in bytes
+ *
+ * The caller provided key is set for the block cipher referenced by the cipher
+ * handle.
+ *
+ * Note, the key length determines the cipher type. Many block ciphers implement
+ * different cipher modes depending on the key size, such as AES-128 vs AES-192
+ * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128
+ * is performed.
+ *
+ * Return: 0 if the setting of the key was successful; < 0 if an error occurred
+ */
static inline int crypto_blkcipher_setkey(struct crypto_blkcipher *tfm,
const u8 *key, unsigned int keylen)
{
@@ -969,6 +1778,24 @@ static inline int crypto_blkcipher_setkey(struct crypto_blkcipher *tfm,
key, keylen);
}
+/**
+ * crypto_blkcipher_encrypt() - encrypt plaintext
+ * @desc: reference to the block cipher handle with meta data
+ * @dst: scatter/gather list that is filled by the cipher operation with the
+ * ciphertext
+ * @src: scatter/gather list that holds the plaintext
+ * @nbytes: number of bytes of the plaintext to encrypt.
+ *
+ * Encrypt plaintext data using the IV set by the caller with a preceding
+ * call of crypto_blkcipher_set_iv.
+ *
+ * The blkcipher_desc data structure must be filled by the caller and can
+ * reside on the stack. The caller must fill desc as follows: desc.tfm is filled
+ * with the block cipher handle; desc.flags is filled with either
+ * CRYPTO_TFM_REQ_MAY_SLEEP or 0.
+ *
+ * Return: 0 if the cipher operation was successful; < 0 if an error occurred
+ */
static inline int crypto_blkcipher_encrypt(struct blkcipher_desc *desc,
struct scatterlist *dst,
struct scatterlist *src,
@@ -978,6 +1805,25 @@ static inline int crypto_blkcipher_encrypt(struct blkcipher_desc *desc,
return crypto_blkcipher_crt(desc->tfm)->encrypt(desc, dst, src, nbytes);
}
+/**
+ * crypto_blkcipher_encrypt_iv() - encrypt plaintext with dedicated IV
+ * @desc: reference to the block cipher handle with meta data
+ * @dst: scatter/gather list that is filled by the cipher operation with the
+ * ciphertext
+ * @src: scatter/gather list that holds the plaintext
+ * @nbytes: number of bytes of the plaintext to encrypt.
+ *
+ * Encrypt plaintext data with the use of an IV that is solely used for this
+ * cipher operation. Any previously set IV is not used.
+ *
+ * The blkcipher_desc data structure must be filled by the caller and can
+ * reside on the stack. The caller must fill desc as follows: desc.tfm is filled
+ * with the block cipher handle; desc.info is filled with the IV to be used for
+ * the current operation; desc.flags is filled with either
+ * CRYPTO_TFM_REQ_MAY_SLEEP or 0.
+ *
+ * Return: 0 if the cipher operation was successful; < 0 if an error occurred
+ */
static inline int crypto_blkcipher_encrypt_iv(struct blkcipher_desc *desc,
struct scatterlist *dst,
struct scatterlist *src,
@@ -986,6 +1832,23 @@ static inline int crypto_blkcipher_encrypt_iv(struct blkcipher_desc *desc,
return crypto_blkcipher_crt(desc->tfm)->encrypt(desc, dst, src, nbytes);
}
+/**
+ * crypto_blkcipher_decrypt() - decrypt ciphertext
+ * @desc: reference to the block cipher handle with meta data
+ * @dst: scatter/gather list that is filled by the cipher operation with the
+ * plaintext
+ * @src: scatter/gather list that holds the ciphertext
+ * @nbytes: number of bytes of the ciphertext to decrypt.
+ *
+ * Decrypt ciphertext data using the IV set by the caller with a preceding
+ * call of crypto_blkcipher_set_iv.
+ *
+ * The blkcipher_desc data structure must be filled by the caller as documented
+ * for the crypto_blkcipher_encrypt call above.
+ *
+ * Return: 0 if the cipher operation was successful; < 0 if an error occurred
+ *
+ */
static inline int crypto_blkcipher_decrypt(struct blkcipher_desc *desc,
struct scatterlist *dst,
struct scatterlist *src,
@@ -995,6 +1858,22 @@ static inline int crypto_blkcipher_decrypt(struct blkcipher_desc *desc,
return crypto_blkcipher_crt(desc->tfm)->decrypt(desc, dst, src, nbytes);
}
+/**
+ * crypto_blkcipher_decrypt_iv() - decrypt ciphertext with dedicated IV
+ * @desc: reference to the block cipher handle with meta data
+ * @dst: scatter/gather list that is filled by the cipher operation with the
+ * plaintext
+ * @src: scatter/gather list that holds the ciphertext
+ * @nbytes: number of bytes of the ciphertext to decrypt.
+ *
+ * Decrypt ciphertext data with the use of an IV that is solely used for this
+ * cipher operation. Any previously set IV is not used.
+ *
+ * The blkcipher_desc data structure must be filled by the caller as documented
+ * for the crypto_blkcipher_encrypt_iv call above.
+ *
+ * Return: 0 if the cipher operation was successful; < 0 if an error occurred
+ */
static inline int crypto_blkcipher_decrypt_iv(struct blkcipher_desc *desc,
struct scatterlist *dst,
struct scatterlist *src,
@@ -1003,18 +1882,54 @@ static inline int crypto_blkcipher_decrypt_iv(struct blkcipher_desc *desc,
return crypto_blkcipher_crt(desc->tfm)->decrypt(desc, dst, src, nbytes);
}
+/**
+ * crypto_blkcipher_set_iv() - set IV for cipher
+ * @tfm: cipher handle
+ * @src: buffer holding the IV
+ * @len: length of the IV in bytes
+ *
+ * The caller provided IV is set for the block cipher referenced by the cipher
+ * handle.
+ */
static inline void crypto_blkcipher_set_iv(struct crypto_blkcipher *tfm,
const u8 *src, unsigned int len)
{
memcpy(crypto_blkcipher_crt(tfm)->iv, src, len);
}
+/**
+ * crypto_blkcipher_get_iv() - obtain IV from cipher
+ * @tfm: cipher handle
+ * @dst: buffer filled with the IV
+ * @len: length of the buffer dst
+ *
+ * The caller can obtain the IV set for the block cipher referenced by the
+ * cipher handle and store it into the user-provided buffer. If the buffer
+ * has an insufficient space, the IV is truncated to fit the buffer.
+ */
static inline void crypto_blkcipher_get_iv(struct crypto_blkcipher *tfm,
u8 *dst, unsigned int len)
{
memcpy(dst, crypto_blkcipher_crt(tfm)->iv, len);
}
+/**
+ * DOC: Single Block Cipher API
+ *
+ * The single block cipher API is used with the ciphers of type
+ * CRYPTO_ALG_TYPE_CIPHER (listed as type "cipher" in /proc/crypto).
+ *
+ * Using the single block cipher API calls, operations with the basic cipher
+ * primitive can be implemented. These cipher primitives exclude any block
+ * chaining operations including IV handling.
+ *
+ * The purpose of this single block cipher API is to support the implementation
+ * of templates or other concepts that only need to perform the cipher operation
+ * on one block at a time. Templates invoke the underlying cipher primitive
+ * block-wise and process either the input or the output data of these cipher
+ * operations.
+ */
+
static inline struct crypto_cipher *__crypto_cipher_cast(struct crypto_tfm *tfm)
{
return (struct crypto_cipher *)tfm;
@@ -1026,6 +1941,20 @@ static inline struct crypto_cipher *crypto_cipher_cast(struct crypto_tfm *tfm)
return __crypto_cipher_cast(tfm);
}
+/**
+ * crypto_alloc_cipher() - allocate single block cipher handle
+ * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
+ * single block cipher
+ * @type: specifies the type of the cipher
+ * @mask: specifies the mask for the cipher
+ *
+ * Allocate a cipher handle for a single block cipher. The returned struct
+ * crypto_cipher is the cipher handle that is required for any subsequent API
+ * invocation for that single block cipher.
+ *
+ * Return: allocated cipher handle in case of success; IS_ERR() is true in case
+ * of an error, PTR_ERR() returns the error code.
+ */
static inline struct crypto_cipher *crypto_alloc_cipher(const char *alg_name,
u32 type, u32 mask)
{
@@ -1041,11 +1970,25 @@ static inline struct crypto_tfm *crypto_cipher_tfm(struct crypto_cipher *tfm)
return &tfm->base;
}
+/**
+ * crypto_free_cipher() - zeroize and free the single block cipher handle
+ * @tfm: cipher handle to be freed
+ */
static inline void crypto_free_cipher(struct crypto_cipher *tfm)
{
crypto_free_tfm(crypto_cipher_tfm(tfm));
}
+/**
+ * crypto_has_cipher() - Search for the availability of a single block cipher
+ * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
+ * single block cipher
+ * @type: specifies the type of the cipher
+ * @mask: specifies the mask for the cipher
+ *
+ * Return: true when the single block cipher is known to the kernel crypto API;
+ * false otherwise
+ */
static inline int crypto_has_cipher(const char *alg_name, u32 type, u32 mask)
{
type &= ~CRYPTO_ALG_TYPE_MASK;
@@ -1060,6 +2003,16 @@ static inline struct cipher_tfm *crypto_cipher_crt(struct crypto_cipher *tfm)
return &crypto_cipher_tfm(tfm)->crt_cipher;
}
+/**
+ * crypto_cipher_blocksize() - obtain block size for cipher
+ * @tfm: cipher handle
+ *
+ * The block size for the single block cipher referenced with the cipher handle
+ * tfm is returned. The caller may use that information to allocate appropriate
+ * memory for the data returned by the encryption or decryption operation
+ *
+ * Return: block size of cipher
+ */
static inline unsigned int crypto_cipher_blocksize(struct crypto_cipher *tfm)
{
return crypto_tfm_alg_blocksize(crypto_cipher_tfm(tfm));
@@ -1087,6 +2040,22 @@ static inline void crypto_cipher_clear_flags(struct crypto_cipher *tfm,
crypto_tfm_clear_flags(crypto_cipher_tfm(tfm), flags);
}
+/**
+ * crypto_cipher_setkey() - set key for cipher
+ * @tfm: cipher handle
+ * @key: buffer holding the key
+ * @keylen: length of the key in bytes
+ *
+ * The caller provided key is set for the single block cipher referenced by the
+ * cipher handle.
+ *
+ * Note, the key length determines the cipher type. Many block ciphers implement
+ * different cipher modes depending on the key size, such as AES-128 vs AES-192
+ * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128
+ * is performed.
+ *
+ * Return: 0 if the setting of the key was successful; < 0 if an error occurred
+ */
static inline int crypto_cipher_setkey(struct crypto_cipher *tfm,
const u8 *key, unsigned int keylen)
{
@@ -1094,6 +2063,15 @@ static inline int crypto_cipher_setkey(struct crypto_cipher *tfm,
key, keylen);
}
+/**
+ * crypto_cipher_encrypt_one() - encrypt one block of plaintext
+ * @tfm: cipher handle
+ * @dst: points to the buffer that will be filled with the ciphertext
+ * @src: buffer holding the plaintext to be encrypted
+ *
+ * Invoke the encryption operation of one block. The caller must ensure that
+ * the plaintext and ciphertext buffers are at least one block in size.
+ */
static inline void crypto_cipher_encrypt_one(struct crypto_cipher *tfm,
u8 *dst, const u8 *src)
{
@@ -1101,6 +2079,15 @@ static inline void crypto_cipher_encrypt_one(struct crypto_cipher *tfm,
dst, src);
}
+/**
+ * crypto_cipher_decrypt_one() - decrypt one block of ciphertext
+ * @tfm: cipher handle
+ * @dst: points to the buffer that will be filled with the plaintext
+ * @src: buffer holding the ciphertext to be decrypted
+ *
+ * Invoke the decryption operation of one block. The caller must ensure that
+ * the plaintext and ciphertext buffers are at least one block in size.
+ */
static inline void crypto_cipher_decrypt_one(struct crypto_cipher *tfm,
u8 *dst, const u8 *src)
{
@@ -1108,6 +2095,13 @@ static inline void crypto_cipher_decrypt_one(struct crypto_cipher *tfm,
dst, src);
}
+/**
+ * DOC: Synchronous Message Digest API
+ *
+ * The synchronous message digest API is used with the ciphers of type
+ * CRYPTO_ALG_TYPE_HASH (listed as type "hash" in /proc/crypto)
+ */
+
static inline struct crypto_hash *__crypto_hash_cast(struct crypto_tfm *tfm)
{
return (struct crypto_hash *)tfm;
@@ -1120,6 +2114,20 @@ static inline struct crypto_hash *crypto_hash_cast(struct crypto_tfm *tfm)
return __crypto_hash_cast(tfm);
}
+/**
+ * crypto_alloc_hash() - allocate synchronous message digest handle
+ * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
+ * message digest cipher
+ * @type: specifies the type of the cipher
+ * @mask: specifies the mask for the cipher
+ *
+ * Allocate a cipher handle for a message digest. The returned struct
+ * crypto_hash is the cipher handle that is required for any subsequent
+ * API invocation for that message digest.
+ *
+ * Return: allocated cipher handle in case of success; IS_ERR() is true in case
+ * of an error, PTR_ERR() returns the error code.
+ */
static inline struct crypto_hash *crypto_alloc_hash(const char *alg_name,
u32 type, u32 mask)
{
@@ -1136,11 +2144,25 @@ static inline struct crypto_tfm *crypto_hash_tfm(struct crypto_hash *tfm)
return &tfm->base;
}
+/**
+ * crypto_free_hash() - zeroize and free message digest handle
+ * @tfm: cipher handle to be freed
+ */
static inline void crypto_free_hash(struct crypto_hash *tfm)
{
crypto_free_tfm(crypto_hash_tfm(tfm));
}
+/**
+ * crypto_has_hash() - Search for the availability of a message digest
+ * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
+ * message digest cipher
+ * @type: specifies the type of the cipher
+ * @mask: specifies the mask for the cipher
+ *
+ * Return: true when the message digest cipher is known to the kernel crypto
+ * API; false otherwise
+ */
static inline int crypto_has_hash(const char *alg_name, u32 type, u32 mask)
{
type &= ~CRYPTO_ALG_TYPE_MASK;
@@ -1156,6 +2178,15 @@ static inline struct hash_tfm *crypto_hash_crt(struct crypto_hash *tfm)
return &crypto_hash_tfm(tfm)->crt_hash;
}
+/**
+ * crypto_hash_blocksize() - obtain block size for message digest
+ * @tfm: cipher handle
+ *
+ * The block size for the message digest cipher referenced with the cipher
+ * handle is returned.
+ *
+ * Return: block size of cipher
+ */
static inline unsigned int crypto_hash_blocksize(struct crypto_hash *tfm)
{
return crypto_tfm_alg_blocksize(crypto_hash_tfm(tfm));
@@ -1166,6 +2197,15 @@ static inline unsigned int crypto_hash_alignmask(struct crypto_hash *tfm)
return crypto_tfm_alg_alignmask(crypto_hash_tfm(tfm));
}
+/**
+ * crypto_hash_digestsize() - obtain message digest size
+ * @tfm: cipher handle
+ *
+ * The size for the message digest created by the message digest cipher
+ * referenced with the cipher handle is returned.
+ *
+ * Return: message digest size
+ */
static inline unsigned int crypto_hash_digestsize(struct crypto_hash *tfm)
{
return crypto_hash_crt(tfm)->digestsize;
@@ -1186,11 +2226,38 @@ static inline void crypto_hash_clear_flags(struct crypto_hash *tfm, u32 flags)
crypto_tfm_clear_flags(crypto_hash_tfm(tfm), flags);
}
+/**
+ * crypto_hash_init() - (re)initialize message digest handle
+ * @desc: cipher request handle that to be filled by caller --
+ * desc.tfm is filled with the hash cipher handle;
+ * desc.flags is filled with either CRYPTO_TFM_REQ_MAY_SLEEP or 0.
+ *
+ * The call (re-)initializes the message digest referenced by the hash cipher
+ * request handle. Any potentially existing state created by previous
+ * operations is discarded.
+ *
+ * Return: 0 if the message digest initialization was successful; < 0 if an
+ * error occurred
+ */
static inline int crypto_hash_init(struct hash_desc *desc)
{
return crypto_hash_crt(desc->tfm)->init(desc);
}
+/**
+ * crypto_hash_update() - add data to message digest for processing
+ * @desc: cipher request handle
+ * @sg: scatter / gather list pointing to the data to be added to the message
+ * digest
+ * @nbytes: number of bytes to be processed from @sg
+ *
+ * Updates the message digest state of the cipher handle pointed to by the
+ * hash cipher request handle with the input data pointed to by the
+ * scatter/gather list.
+ *
+ * Return: 0 if the message digest update was successful; < 0 if an error
+ * occurred
+ */
static inline int crypto_hash_update(struct hash_desc *desc,
struct scatterlist *sg,
unsigned int nbytes)
@@ -1198,11 +2265,39 @@ static inline int crypto_hash_update(struct hash_desc *desc,
return crypto_hash_crt(desc->tfm)->update(desc, sg, nbytes);
}
+/**
+ * crypto_hash_final() - calculate message digest
+ * @desc: cipher request handle
+ * @out: message digest output buffer -- The caller must ensure that the out
+ * buffer has a sufficient size (e.g. by using the crypto_hash_digestsize
+ * function).
+ *
+ * Finalize the message digest operation and create the message digest
+ * based on all data added to the cipher handle. The message digest is placed
+ * into the output buffer.
+ *
+ * Return: 0 if the message digest creation was successful; < 0 if an error
+ * occurred
+ */
static inline int crypto_hash_final(struct hash_desc *desc, u8 *out)
{
return crypto_hash_crt(desc->tfm)->final(desc, out);
}
+/**
+ * crypto_hash_digest() - calculate message digest for a buffer
+ * @desc: see crypto_hash_final()
+ * @sg: see crypto_hash_update()
+ * @nbytes: see crypto_hash_update()
+ * @out: see crypto_hash_final()
+ *
+ * This function is a "short-hand" for the function calls of crypto_hash_init,
+ * crypto_hash_update and crypto_hash_final. The parameters have the same
+ * meaning as discussed for those separate three functions.
+ *
+ * Return: 0 if the message digest creation was successful; < 0 if an error
+ * occurred
+ */
static inline int crypto_hash_digest(struct hash_desc *desc,
struct scatterlist *sg,
unsigned int nbytes, u8 *out)
@@ -1210,6 +2305,17 @@ static inline int crypto_hash_digest(struct hash_desc *desc,
return crypto_hash_crt(desc->tfm)->digest(desc, sg, nbytes, out);
}
+/**
+ * crypto_hash_setkey() - set key for message digest
+ * @hash: cipher handle
+ * @key: buffer holding the key
+ * @keylen: length of the key in bytes
+ *
+ * The caller provided key is set for the message digest cipher. The cipher
+ * handle must point to a keyed hash in order for this function to succeed.
+ *
+ * Return: 0 if the setting of the key was successful; < 0 if an error occurred
+ */
static inline int crypto_hash_setkey(struct crypto_hash *hash,
const u8 *key, unsigned int keylen)
{
diff --git a/include/linux/cycx_x25.h b/include/linux/cycx_x25.h
deleted file mode 100644
index 362bf19d6cf1..000000000000
--- a/include/linux/cycx_x25.h
+++ /dev/null
@@ -1,125 +0,0 @@
-#ifndef _CYCX_X25_H
-#define _CYCX_X25_H
-/*
-* cycx_x25.h Cyclom X.25 firmware API definitions.
-*
-* Author: Arnaldo Carvalho de Melo <acme@conectiva.com.br>
-*
-* Copyright: (c) 1998-2003 Arnaldo Carvalho de Melo
-*
-* Based on sdla_x25.h by Gene Kozin <74604.152@compuserve.com>
-*
-* This program is free software; you can redistribute it and/or
-* modify it under the terms of the GNU General Public License
-* as published by the Free Software Foundation; either version
-* 2 of the License, or (at your option) any later version.
-* ============================================================================
-* 2000/04/02 acme dprintk and cycx_debug
-* 1999/01/03 acme judicious use of data types
-* 1999/01/02 acme #define X25_ACK_N3 0x4411
-* 1998/12/28 acme cleanup: lot'o'things removed
-* commands listed,
-* TX25Cmd & TX25Config structs
-* typedef'ed
-*/
-#ifndef PACKED
-#define PACKED __attribute__((packed))
-#endif
-
-/* X.25 shared memory layout. */
-#define X25_MBOX_OFFS 0x300 /* general mailbox block */
-#define X25_RXMBOX_OFFS 0x340 /* receive mailbox */
-
-/* Debug */
-#define dprintk(level, format, a...) if (cycx_debug >= level) printk(format, ##a)
-
-extern unsigned int cycx_debug;
-
-/* Data Structures */
-/* X.25 Command Block. */
-struct cycx_x25_cmd {
- u16 command;
- u16 link; /* values: 0 or 1 */
- u16 len; /* values: 0 thru 0x205 (517) */
- u32 buf;
-} PACKED;
-
-/* Defines for the 'command' field. */
-#define X25_CONNECT_REQUEST 0x4401
-#define X25_CONNECT_RESPONSE 0x4402
-#define X25_DISCONNECT_REQUEST 0x4403
-#define X25_DISCONNECT_RESPONSE 0x4404
-#define X25_DATA_REQUEST 0x4405
-#define X25_ACK_TO_VC 0x4406
-#define X25_INTERRUPT_RESPONSE 0x4407
-#define X25_CONFIG 0x4408
-#define X25_CONNECT_INDICATION 0x4409
-#define X25_CONNECT_CONFIRM 0x440A
-#define X25_DISCONNECT_INDICATION 0x440B
-#define X25_DISCONNECT_CONFIRM 0x440C
-#define X25_DATA_INDICATION 0x440E
-#define X25_INTERRUPT_INDICATION 0x440F
-#define X25_ACK_FROM_VC 0x4410
-#define X25_ACK_N3 0x4411
-#define X25_CONNECT_COLLISION 0x4413
-#define X25_N3WIN 0x4414
-#define X25_LINE_ON 0x4415
-#define X25_LINE_OFF 0x4416
-#define X25_RESET_REQUEST 0x4417
-#define X25_LOG 0x4500
-#define X25_STATISTIC 0x4600
-#define X25_TRACE 0x4700
-#define X25_N2TRACEXC 0x4702
-#define X25_N3TRACEXC 0x4703
-
-/**
- * struct cycx_x25_config - cyclom2x x25 firmware configuration
- * @link - link number
- * @speed - line speed
- * @clock - internal/external
- * @n2 - # of level 2 retransm.(values: 1 thru FF)
- * @n2win - level 2 window (values: 1 thru 7)
- * @n3win - level 3 window (values: 1 thru 7)
- * @nvc - # of logical channels (values: 1 thru 64)
- * @pktlen - level 3 packet length - log base 2 of size
- * @locaddr - my address
- * @remaddr - remote address
- * @t1 - time, in seconds
- * @t2 - time, in seconds
- * @t21 - time, in seconds
- * @npvc - # of permanent virt. circuits (1 thru nvc)
- * @t23 - time, in seconds
- * @flags - see dosx25.doc, in portuguese, for details
- */
-struct cycx_x25_config {
- u8 link;
- u8 speed;
- u8 clock;
- u8 n2;
- u8 n2win;
- u8 n3win;
- u8 nvc;
- u8 pktlen;
- u8 locaddr;
- u8 remaddr;
- u16 t1;
- u16 t2;
- u8 t21;
- u8 npvc;
- u8 t23;
- u8 flags;
-} PACKED;
-
-struct cycx_x25_stats {
- u16 rx_crc_errors;
- u16 rx_over_errors;
- u16 n2_tx_frames;
- u16 n2_rx_frames;
- u16 tx_timeouts;
- u16 rx_timeouts;
- u16 n3_tx_packets;
- u16 n3_rx_packets;
- u16 tx_aborts;
- u16 rx_aborts;
-} PACKED;
-#endif /* _CYCX_X25_H */
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index 75a227cc7ce2..5a813988e6d4 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -11,7 +11,6 @@
#include <linux/rcupdate.h>
#include <linux/lockref.h>
-struct nameidata;
struct path;
struct vfsmount;
@@ -125,15 +124,15 @@ struct dentry {
void *d_fsdata; /* fs-specific data */
struct list_head d_lru; /* LRU list */
+ struct list_head d_child; /* child of parent list */
+ struct list_head d_subdirs; /* our children */
/*
- * d_child and d_rcu can share memory
+ * d_alias and d_rcu can share memory
*/
union {
- struct list_head d_child; /* child of parent list */
+ struct hlist_node d_alias; /* inode alias list */
struct rcu_head d_rcu;
} d_u;
- struct list_head d_subdirs; /* our children */
- struct hlist_node d_alias; /* inode alias list */
};
/*
@@ -226,17 +225,11 @@ struct dentry_operations {
extern seqlock_t rename_lock;
-static inline int dname_external(const struct dentry *dentry)
-{
- return dentry->d_name.name != dentry->d_iname;
-}
-
/*
* These are the low-level FS interfaces to the dcache..
*/
extern void d_instantiate(struct dentry *, struct inode *);
extern struct dentry * d_instantiate_unique(struct dentry *, struct inode *);
-extern struct dentry * d_materialise_unique(struct dentry *, struct inode *);
extern int d_instantiate_no_diralias(struct dentry *, struct inode *);
extern void __d_drop(struct dentry *dentry);
extern void d_drop(struct dentry *dentry);
@@ -254,7 +247,7 @@ extern struct dentry * d_obtain_root(struct inode *);
extern void shrink_dcache_sb(struct super_block *);
extern void shrink_dcache_parent(struct dentry *);
extern void shrink_dcache_for_umount(struct super_block *);
-extern int d_invalidate(struct dentry *);
+extern void d_invalidate(struct dentry *);
/* only used at mount-time */
extern struct dentry * d_make_root(struct inode *);
@@ -269,7 +262,6 @@ extern void d_prune_aliases(struct inode *);
/* test whether we have any submounts in a subdir tree */
extern int have_submounts(struct dentry *);
-extern int check_submounts_and_drop(struct dentry *);
/*
* This adds the entry to the hash queues.
diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h
index 4d0b4d1aa132..da4c4983adbe 100644
--- a/include/linux/debugfs.h
+++ b/include/linux/debugfs.h
@@ -20,6 +20,7 @@
#include <linux/types.h>
+struct device;
struct file_operations;
struct debugfs_blob_wrapper {
@@ -92,20 +93,25 @@ struct dentry *debugfs_create_regset32(const char *name, umode_t mode,
struct dentry *parent,
struct debugfs_regset32 *regset);
-int debugfs_print_regs32(struct seq_file *s, const struct debugfs_reg32 *regs,
- int nregs, void __iomem *base, char *prefix);
+void debugfs_print_regs32(struct seq_file *s, const struct debugfs_reg32 *regs,
+ int nregs, void __iomem *base, char *prefix);
struct dentry *debugfs_create_u32_array(const char *name, umode_t mode,
struct dentry *parent,
u32 *array, u32 elements);
+struct dentry *debugfs_create_devm_seqfile(struct device *dev, const char *name,
+ struct dentry *parent,
+ int (*read_fn)(struct seq_file *s,
+ void *data));
+
bool debugfs_initialized(void);
#else
#include <linux/err.h>
-/*
+/*
* We do not return NULL from these functions if CONFIG_DEBUG_FS is not enabled
* so users have a chance to detect if there was a real error or not. We don't
* want to duplicate the design decision mistakes of procfs and devfs again.
@@ -233,10 +239,9 @@ static inline struct dentry *debugfs_create_regset32(const char *name,
return ERR_PTR(-ENODEV);
}
-static inline int debugfs_print_regs32(struct seq_file *s, const struct debugfs_reg32 *regs,
+static inline void debugfs_print_regs32(struct seq_file *s, const struct debugfs_reg32 *regs,
int nregs, void __iomem *base, char *prefix)
{
- return 0;
}
static inline bool debugfs_initialized(void)
@@ -251,6 +256,15 @@ static inline struct dentry *debugfs_create_u32_array(const char *name, umode_t
return ERR_PTR(-ENODEV);
}
+static inline struct dentry *debugfs_create_devm_seqfile(struct device *dev,
+ const char *name,
+ struct dentry *parent,
+ int (*read_fn)(struct seq_file *s,
+ void *data))
+{
+ return ERR_PTR(-ENODEV);
+}
+
#endif
#endif
diff --git a/include/linux/devcoredump.h b/include/linux/devcoredump.h
new file mode 100644
index 000000000000..c0a360e99f64
--- /dev/null
+++ b/include/linux/devcoredump.h
@@ -0,0 +1,35 @@
+#ifndef __DEVCOREDUMP_H
+#define __DEVCOREDUMP_H
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+
+#ifdef CONFIG_DEV_COREDUMP
+void dev_coredumpv(struct device *dev, const void *data, size_t datalen,
+ gfp_t gfp);
+
+void dev_coredumpm(struct device *dev, struct module *owner,
+ const void *data, size_t datalen, gfp_t gfp,
+ ssize_t (*read)(char *buffer, loff_t offset, size_t count,
+ const void *data, size_t datalen),
+ void (*free)(const void *data));
+#else
+static inline void dev_coredumpv(struct device *dev, const void *data,
+ size_t datalen, gfp_t gfp)
+{
+ vfree(data);
+}
+
+static inline void
+dev_coredumpm(struct device *dev, struct module *owner,
+ const void *data, size_t datalen, gfp_t gfp,
+ ssize_t (*read)(char *buffer, loff_t offset, size_t count,
+ const void *data, size_t datalen),
+ void (*free)(const void *data))
+{
+ free(data);
+}
+#endif /* CONFIG_DEV_COREDUMP */
+
+#endif /* __DEVCOREDUMP_H */
diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h
index f1863dcd83ea..ce447f0f1bad 100644
--- a/include/linux/devfreq.h
+++ b/include/linux/devfreq.h
@@ -188,7 +188,7 @@ extern struct devfreq *devm_devfreq_add_device(struct device *dev,
extern void devm_devfreq_remove_device(struct device *dev,
struct devfreq *devfreq);
-/* Supposed to be called by PM_SLEEP/PM_RUNTIME callbacks */
+/* Supposed to be called by PM callbacks */
extern int devfreq_suspend_device(struct devfreq *devfreq);
extern int devfreq_resume_device(struct devfreq *devfreq);
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index e1707de043ae..ca6d2acc5eb7 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -64,6 +64,7 @@ typedef int (*dm_request_endio_fn) (struct dm_target *ti,
union map_info *map_context);
typedef void (*dm_presuspend_fn) (struct dm_target *ti);
+typedef void (*dm_presuspend_undo_fn) (struct dm_target *ti);
typedef void (*dm_postsuspend_fn) (struct dm_target *ti);
typedef int (*dm_preresume_fn) (struct dm_target *ti);
typedef void (*dm_resume_fn) (struct dm_target *ti);
@@ -145,6 +146,7 @@ struct target_type {
dm_endio_fn end_io;
dm_request_endio_fn rq_end_io;
dm_presuspend_fn presuspend;
+ dm_presuspend_undo_fn presuspend_undo;
dm_postsuspend_fn postsuspend;
dm_preresume_fn preresume;
dm_resume_fn resume;
diff --git a/include/linux/device.h b/include/linux/device.h
index 43d183aeb25b..fb506738f7b7 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -181,13 +181,14 @@ extern int bus_unregister_notifier(struct bus_type *bus,
* with the device lock held in the core, so be careful.
*/
#define BUS_NOTIFY_ADD_DEVICE 0x00000001 /* device added */
-#define BUS_NOTIFY_DEL_DEVICE 0x00000002 /* device removed */
-#define BUS_NOTIFY_BIND_DRIVER 0x00000003 /* driver about to be
+#define BUS_NOTIFY_DEL_DEVICE 0x00000002 /* device to be removed */
+#define BUS_NOTIFY_REMOVED_DEVICE 0x00000003 /* device removed */
+#define BUS_NOTIFY_BIND_DRIVER 0x00000004 /* driver about to be
bound */
-#define BUS_NOTIFY_BOUND_DRIVER 0x00000004 /* driver bound to device */
-#define BUS_NOTIFY_UNBIND_DRIVER 0x00000005 /* driver about to be
+#define BUS_NOTIFY_BOUND_DRIVER 0x00000005 /* driver bound to device */
+#define BUS_NOTIFY_UNBIND_DRIVER 0x00000006 /* driver about to be
unbound */
-#define BUS_NOTIFY_UNBOUND_DRIVER 0x00000006 /* driver is unbound
+#define BUS_NOTIFY_UNBOUND_DRIVER 0x00000007 /* driver is unbound
from the device */
extern struct kset *bus_get_kset(struct bus_type *bus);
@@ -607,8 +608,8 @@ extern int devres_release_group(struct device *dev, void *id);
extern void *devm_kmalloc(struct device *dev, size_t size, gfp_t gfp);
extern char *devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt,
va_list ap);
-extern char *devm_kasprintf(struct device *dev, gfp_t gfp,
- const char *fmt, ...);
+extern __printf(3, 4)
+char *devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...);
static inline void *devm_kzalloc(struct device *dev, size_t size, gfp_t gfp)
{
return devm_kmalloc(dev, size, gfp | __GFP_ZERO);
@@ -910,6 +911,11 @@ static inline void device_unlock(struct device *dev)
mutex_unlock(&dev->mutex);
}
+static inline void device_lock_assert(struct device *dev)
+{
+ lockdep_assert_held(&dev->mutex);
+}
+
void driver_init(void);
/*
@@ -1117,6 +1123,41 @@ do { \
})
#endif
+#ifdef CONFIG_PRINTK
+#define dev_level_once(dev_level, dev, fmt, ...) \
+do { \
+ static bool __print_once __read_mostly; \
+ \
+ if (!__print_once) { \
+ __print_once = true; \
+ dev_level(dev, fmt, ##__VA_ARGS__); \
+ } \
+} while (0)
+#else
+#define dev_level_once(dev_level, dev, fmt, ...) \
+do { \
+ if (0) \
+ dev_level(dev, fmt, ##__VA_ARGS__); \
+} while (0)
+#endif
+
+#define dev_emerg_once(dev, fmt, ...) \
+ dev_level_once(dev_emerg, dev, fmt, ##__VA_ARGS__)
+#define dev_alert_once(dev, fmt, ...) \
+ dev_level_once(dev_alert, dev, fmt, ##__VA_ARGS__)
+#define dev_crit_once(dev, fmt, ...) \
+ dev_level_once(dev_crit, dev, fmt, ##__VA_ARGS__)
+#define dev_err_once(dev, fmt, ...) \
+ dev_level_once(dev_err, dev, fmt, ##__VA_ARGS__)
+#define dev_warn_once(dev, fmt, ...) \
+ dev_level_once(dev_warn, dev, fmt, ##__VA_ARGS__)
+#define dev_notice_once(dev, fmt, ...) \
+ dev_level_once(dev_notice, dev, fmt, ##__VA_ARGS__)
+#define dev_info_once(dev, fmt, ...) \
+ dev_level_once(dev_info, dev, fmt, ##__VA_ARGS__)
+#define dev_dbg_once(dev, fmt, ...) \
+ dev_level_once(dev_info, dev, fmt, ##__VA_ARGS__)
+
#define dev_level_ratelimited(dev_level, dev, fmt, ...) \
do { \
static DEFINE_RATELIMIT_STATE(_rs, \
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 931b70986272..c3007cb4bfa6 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -129,11 +129,14 @@ static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask)
extern u64 dma_get_required_mask(struct device *dev);
-#ifndef set_arch_dma_coherent_ops
-static inline int set_arch_dma_coherent_ops(struct device *dev)
-{
- return 0;
-}
+#ifndef arch_setup_dma_ops
+static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base,
+ u64 size, struct iommu_ops *iommu,
+ bool coherent) { }
+#endif
+
+#ifndef arch_teardown_dma_ops
+static inline void arch_teardown_dma_ops(struct device *dev) { }
#endif
static inline unsigned int dma_get_max_seg_size(struct device *dev)
@@ -263,6 +266,32 @@ struct dma_attrs;
#define dma_unmap_sg_attrs(dev, sgl, nents, dir, attrs) \
dma_unmap_sg(dev, sgl, nents, dir)
+#else
+static inline void *dma_alloc_writecombine(struct device *dev, size_t size,
+ dma_addr_t *dma_addr, gfp_t gfp)
+{
+ DEFINE_DMA_ATTRS(attrs);
+ dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
+ return dma_alloc_attrs(dev, size, dma_addr, gfp, &attrs);
+}
+
+static inline void dma_free_writecombine(struct device *dev, size_t size,
+ void *cpu_addr, dma_addr_t dma_addr)
+{
+ DEFINE_DMA_ATTRS(attrs);
+ dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
+ return dma_free_attrs(dev, size, cpu_addr, dma_addr, &attrs);
+}
+
+static inline int dma_mmap_writecombine(struct device *dev,
+ struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr,
+ size_t size)
+{
+ DEFINE_DMA_ATTRS(attrs);
+ dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
+ return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, &attrs);
+}
#endif /* CONFIG_HAVE_DMA_ATTRS */
#ifdef CONFIG_NEED_DMA_MAP_STATE
diff --git a/include/linux/dma/dw.h b/include/linux/dma/dw.h
new file mode 100644
index 000000000000..71456442ebe3
--- /dev/null
+++ b/include/linux/dma/dw.h
@@ -0,0 +1,64 @@
+/*
+ * Driver for the Synopsys DesignWare DMA Controller
+ *
+ * Copyright (C) 2007 Atmel Corporation
+ * Copyright (C) 2010-2011 ST Microelectronics
+ * Copyright (C) 2014 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef _DMA_DW_H
+#define _DMA_DW_H
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/dmaengine.h>
+
+#include <linux/platform_data/dma-dw.h>
+
+struct dw_dma;
+
+/**
+ * struct dw_dma_chip - representation of DesignWare DMA controller hardware
+ * @dev: struct device of the DMA controller
+ * @irq: irq line
+ * @regs: memory mapped I/O space
+ * @clk: hclk clock
+ * @dw: struct dw_dma that is filed by dw_dma_probe()
+ */
+struct dw_dma_chip {
+ struct device *dev;
+ int irq;
+ void __iomem *regs;
+ struct clk *clk;
+ struct dw_dma *dw;
+};
+
+/* Export to the platform drivers */
+int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata);
+int dw_dma_remove(struct dw_dma_chip *chip);
+
+/* DMA API extensions */
+struct dw_desc;
+
+struct dw_cyclic_desc {
+ struct dw_desc **desc;
+ unsigned long periods;
+ void (*period_callback)(void *param);
+ void *period_callback_param;
+};
+
+struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
+ dma_addr_t buf_addr, size_t buf_len, size_t period_len,
+ enum dma_transfer_direction direction);
+void dw_dma_cyclic_free(struct dma_chan *chan);
+int dw_dma_cyclic_start(struct dma_chan *chan);
+void dw_dma_cyclic_stop(struct dma_chan *chan);
+
+dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan);
+
+dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan);
+
+#endif /* _DMA_DW_H */
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 1f9e642c66ad..40cd75e21ea2 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -199,15 +199,12 @@ enum dma_ctrl_flags {
* configuration data in statically from the platform). An additional
* argument of struct dma_slave_config must be passed in with this
* command.
- * @FSLDMA_EXTERNAL_START: this command will put the Freescale DMA controller
- * into external start mode.
*/
enum dma_ctrl_cmd {
DMA_TERMINATE_ALL,
DMA_PAUSE,
DMA_RESUME,
DMA_SLAVE_CONFIG,
- FSLDMA_EXTERNAL_START,
};
/**
@@ -307,7 +304,9 @@ enum dma_slave_buswidth {
* struct dma_slave_config - dma slave channel runtime config
* @direction: whether the data shall go in or out on this slave
* channel, right now. DMA_MEM_TO_DEV and DMA_DEV_TO_MEM are
- * legal values.
+ * legal values. DEPRECATED, drivers should use the direction argument
+ * to the device_prep_slave_sg and device_prep_dma_cyclic functions or
+ * the dir field in the dma_interleaved_template structure.
* @src_addr: this is the physical address where DMA slave data
* should be read (RX), if the source is memory this argument is
* ignored.
@@ -448,7 +447,8 @@ struct dmaengine_unmap_data {
* communicate status
* @phys: physical address of the descriptor
* @chan: target channel for this operation
- * @tx_submit: set the prepared descriptor(s) to be executed by the engine
+ * @tx_submit: accept the descriptor, assign ordered cookie and mark the
+ * descriptor pending. To be pushed on .issue_pending() call
* @callback: routine to call after this operation is complete
* @callback_param: general parameter to pass to the callback routine
* ---async_tx api specific fields---
@@ -755,6 +755,16 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_interleaved_dma(
return chan->device->device_prep_interleaved_dma(chan, xt, flags);
}
+static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_sg(
+ struct dma_chan *chan,
+ struct scatterlist *dst_sg, unsigned int dst_nents,
+ struct scatterlist *src_sg, unsigned int src_nents,
+ unsigned long flags)
+{
+ return chan->device->device_prep_dma_sg(chan, dst_sg, dst_nents,
+ src_sg, src_nents, flags);
+}
+
static inline int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps)
{
if (!chan || !caps)
@@ -900,18 +910,6 @@ static inline void dmaengine_put(void)
}
#endif
-#ifdef CONFIG_NET_DMA
-#define net_dmaengine_get() dmaengine_get()
-#define net_dmaengine_put() dmaengine_put()
-#else
-static inline void net_dmaengine_get(void)
-{
-}
-static inline void net_dmaengine_put(void)
-{
-}
-#endif
-
#ifdef CONFIG_ASYNC_TX_DMA
#define async_dmaengine_get() dmaengine_get()
#define async_dmaengine_put() dmaengine_put()
@@ -933,16 +931,8 @@ async_dma_find_channel(enum dma_transaction_type type)
return NULL;
}
#endif /* CONFIG_ASYNC_TX_DMA */
-
-dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan,
- void *dest, void *src, size_t len);
-dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan,
- struct page *page, unsigned int offset, void *kdata, size_t len);
-dma_cookie_t dma_async_memcpy_pg_to_pg(struct dma_chan *chan,
- struct page *dest_pg, unsigned int dest_off, struct page *src_pg,
- unsigned int src_off, size_t len);
void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
- struct dma_chan *chan);
+ struct dma_chan *chan);
static inline void async_tx_ack(struct dma_async_tx_descriptor *tx)
{
diff --git a/include/linux/dmar.h b/include/linux/dmar.h
index 1deece46a0ca..30624954dec5 100644
--- a/include/linux/dmar.h
+++ b/include/linux/dmar.h
@@ -30,6 +30,12 @@
struct acpi_dmar_header;
+#ifdef CONFIG_X86
+# define DMAR_UNITS_SUPPORTED MAX_IO_APICS
+#else
+# define DMAR_UNITS_SUPPORTED 64
+#endif
+
/* DMAR Flags */
#define DMAR_INTR_REMAP 0x1
#define DMAR_X2APIC_OPT_OUT 0x2
@@ -56,13 +62,19 @@ struct dmar_drhd_unit {
struct intel_iommu *iommu;
};
+struct dmar_pci_path {
+ u8 bus;
+ u8 device;
+ u8 function;
+};
+
struct dmar_pci_notify_info {
struct pci_dev *dev;
unsigned long event;
int bus;
u16 seg;
u16 level;
- struct acpi_dmar_pci_path path[];
+ struct dmar_pci_path path[];
} __attribute__((packed));
extern struct rw_semaphore dmar_global_lock;
@@ -114,28 +126,60 @@ extern int dmar_remove_dev_scope(struct dmar_pci_notify_info *info,
/* Intel IOMMU detection */
extern int detect_intel_iommu(void);
extern int enable_drhd_fault_handling(void);
+extern int dmar_device_add(acpi_handle handle);
+extern int dmar_device_remove(acpi_handle handle);
+
+static inline int dmar_res_noop(struct acpi_dmar_header *hdr, void *arg)
+{
+ return 0;
+}
#ifdef CONFIG_INTEL_IOMMU
extern int iommu_detected, no_iommu;
extern int intel_iommu_init(void);
-extern int dmar_parse_one_rmrr(struct acpi_dmar_header *header);
-extern int dmar_parse_one_atsr(struct acpi_dmar_header *header);
+extern int dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg);
+extern int dmar_parse_one_atsr(struct acpi_dmar_header *header, void *arg);
+extern int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg);
+extern int dmar_release_one_atsr(struct acpi_dmar_header *hdr, void *arg);
+extern int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert);
extern int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info);
#else /* !CONFIG_INTEL_IOMMU: */
static inline int intel_iommu_init(void) { return -ENODEV; }
-static inline int dmar_parse_one_rmrr(struct acpi_dmar_header *header)
+
+#define dmar_parse_one_rmrr dmar_res_noop
+#define dmar_parse_one_atsr dmar_res_noop
+#define dmar_check_one_atsr dmar_res_noop
+#define dmar_release_one_atsr dmar_res_noop
+
+static inline int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
{
return 0;
}
-static inline int dmar_parse_one_atsr(struct acpi_dmar_header *header)
+
+static inline int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
{
return 0;
}
-static inline int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
+#endif /* CONFIG_INTEL_IOMMU */
+
+#ifdef CONFIG_IRQ_REMAP
+extern int dmar_ir_hotplug(struct dmar_drhd_unit *dmaru, bool insert);
+#else /* CONFIG_IRQ_REMAP */
+static inline int dmar_ir_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
+{ return 0; }
+#endif /* CONFIG_IRQ_REMAP */
+
+#else /* CONFIG_DMAR_TABLE */
+
+static inline int dmar_device_add(void *handle)
+{
+ return 0;
+}
+
+static inline int dmar_device_remove(void *handle)
{
return 0;
}
-#endif /* CONFIG_INTEL_IOMMU */
#endif /* CONFIG_DMAR_TABLE */
diff --git a/include/linux/drbd.h b/include/linux/drbd.h
index debb70d40547..8723f2a99e15 100644
--- a/include/linux/drbd.h
+++ b/include/linux/drbd.h
@@ -172,7 +172,7 @@ enum drbd_ret_code {
ERR_RES_NOT_KNOWN = 158,
ERR_RES_IN_USE = 159,
ERR_MINOR_CONFIGURED = 160,
- ERR_MINOR_EXISTS = 161,
+ ERR_MINOR_OR_VOLUME_EXISTS = 161,
ERR_INVALID_REQUEST = 162,
ERR_NEED_APV_100 = 163,
ERR_NEED_ALLOW_TWO_PRI = 164,
diff --git a/include/linux/dw_dmac.h b/include/linux/dw_dmac.h
deleted file mode 100644
index 68b4024184de..000000000000
--- a/include/linux/dw_dmac.h
+++ /dev/null
@@ -1,111 +0,0 @@
-/*
- * Driver for the Synopsys DesignWare DMA Controller
- *
- * Copyright (C) 2007 Atmel Corporation
- * Copyright (C) 2010-2011 ST Microelectronics
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#ifndef DW_DMAC_H
-#define DW_DMAC_H
-
-#include <linux/dmaengine.h>
-
-/**
- * struct dw_dma_slave - Controller-specific information about a slave
- *
- * @dma_dev: required DMA master device. Depricated.
- * @bus_id: name of this device channel, not just a device name since
- * devices may have more than one channel e.g. "foo_tx"
- * @cfg_hi: Platform-specific initializer for the CFG_HI register
- * @cfg_lo: Platform-specific initializer for the CFG_LO register
- * @src_master: src master for transfers on allocated channel.
- * @dst_master: dest master for transfers on allocated channel.
- */
-struct dw_dma_slave {
- struct device *dma_dev;
- u32 cfg_hi;
- u32 cfg_lo;
- u8 src_master;
- u8 dst_master;
-};
-
-/**
- * struct dw_dma_platform_data - Controller configuration parameters
- * @nr_channels: Number of channels supported by hardware (max 8)
- * @is_private: The device channels should be marked as private and not for
- * by the general purpose DMA channel allocator.
- * @chan_allocation_order: Allocate channels starting from 0 or 7
- * @chan_priority: Set channel priority increasing from 0 to 7 or 7 to 0.
- * @block_size: Maximum block size supported by the controller
- * @nr_masters: Number of AHB masters supported by the controller
- * @data_width: Maximum data width supported by hardware per AHB master
- * (0 - 8bits, 1 - 16bits, ..., 5 - 256bits)
- */
-struct dw_dma_platform_data {
- unsigned int nr_channels;
- bool is_private;
-#define CHAN_ALLOCATION_ASCENDING 0 /* zero to seven */
-#define CHAN_ALLOCATION_DESCENDING 1 /* seven to zero */
- unsigned char chan_allocation_order;
-#define CHAN_PRIORITY_ASCENDING 0 /* chan0 highest */
-#define CHAN_PRIORITY_DESCENDING 1 /* chan7 highest */
- unsigned char chan_priority;
- unsigned short block_size;
- unsigned char nr_masters;
- unsigned char data_width[4];
-};
-
-/* bursts size */
-enum dw_dma_msize {
- DW_DMA_MSIZE_1,
- DW_DMA_MSIZE_4,
- DW_DMA_MSIZE_8,
- DW_DMA_MSIZE_16,
- DW_DMA_MSIZE_32,
- DW_DMA_MSIZE_64,
- DW_DMA_MSIZE_128,
- DW_DMA_MSIZE_256,
-};
-
-/* Platform-configurable bits in CFG_HI */
-#define DWC_CFGH_FCMODE (1 << 0)
-#define DWC_CFGH_FIFO_MODE (1 << 1)
-#define DWC_CFGH_PROTCTL(x) ((x) << 2)
-#define DWC_CFGH_SRC_PER(x) ((x) << 7)
-#define DWC_CFGH_DST_PER(x) ((x) << 11)
-
-/* Platform-configurable bits in CFG_LO */
-#define DWC_CFGL_LOCK_CH_XFER (0 << 12) /* scope of LOCK_CH */
-#define DWC_CFGL_LOCK_CH_BLOCK (1 << 12)
-#define DWC_CFGL_LOCK_CH_XACT (2 << 12)
-#define DWC_CFGL_LOCK_BUS_XFER (0 << 14) /* scope of LOCK_BUS */
-#define DWC_CFGL_LOCK_BUS_BLOCK (1 << 14)
-#define DWC_CFGL_LOCK_BUS_XACT (2 << 14)
-#define DWC_CFGL_LOCK_CH (1 << 15) /* channel lockout */
-#define DWC_CFGL_LOCK_BUS (1 << 16) /* busmaster lockout */
-#define DWC_CFGL_HS_DST_POL (1 << 18) /* dst handshake active low */
-#define DWC_CFGL_HS_SRC_POL (1 << 19) /* src handshake active low */
-
-/* DMA API extensions */
-struct dw_cyclic_desc {
- struct dw_desc **desc;
- unsigned long periods;
- void (*period_callback)(void *param);
- void *period_callback_param;
-};
-
-struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
- dma_addr_t buf_addr, size_t buf_len, size_t period_len,
- enum dma_transfer_direction direction);
-void dw_dma_cyclic_free(struct dma_chan *chan);
-int dw_dma_cyclic_start(struct dma_chan *chan);
-void dw_dma_cyclic_stop(struct dma_chan *chan);
-
-dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan);
-
-dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan);
-
-#endif /* DW_DMAC_H */
diff --git a/include/linux/dynamic_debug.h b/include/linux/dynamic_debug.h
index 2fe93b26b42f..4f1bbc68cd1b 100644
--- a/include/linux/dynamic_debug.h
+++ b/include/linux/dynamic_debug.h
@@ -42,7 +42,7 @@ int ddebug_add_module(struct _ddebug *tab, unsigned int n,
#if defined(CONFIG_DYNAMIC_DEBUG)
extern int ddebug_remove_module(const char *mod_name);
extern __printf(2, 3)
-int __dynamic_pr_debug(struct _ddebug *descriptor, const char *fmt, ...);
+void __dynamic_pr_debug(struct _ddebug *descriptor, const char *fmt, ...);
extern int ddebug_dyndbg_module_param_cb(char *param, char *val,
const char *modname);
@@ -50,15 +50,15 @@ extern int ddebug_dyndbg_module_param_cb(char *param, char *val,
struct device;
extern __printf(3, 4)
-int __dynamic_dev_dbg(struct _ddebug *descriptor, const struct device *dev,
- const char *fmt, ...);
+void __dynamic_dev_dbg(struct _ddebug *descriptor, const struct device *dev,
+ const char *fmt, ...);
struct net_device;
extern __printf(3, 4)
-int __dynamic_netdev_dbg(struct _ddebug *descriptor,
- const struct net_device *dev,
- const char *fmt, ...);
+void __dynamic_netdev_dbg(struct _ddebug *descriptor,
+ const struct net_device *dev,
+ const char *fmt, ...);
#define DEFINE_DYNAMIC_DEBUG_METADATA(name, fmt) \
static struct _ddebug __aligned(8) \
diff --git a/include/linux/dynamic_queue_limits.h b/include/linux/dynamic_queue_limits.h
index 5621547d631b..a4be70398ce1 100644
--- a/include/linux/dynamic_queue_limits.h
+++ b/include/linux/dynamic_queue_limits.h
@@ -73,14 +73,22 @@ static inline void dql_queued(struct dql *dql, unsigned int count)
{
BUG_ON(count > DQL_MAX_OBJECT);
- dql->num_queued += count;
dql->last_obj_cnt = count;
+
+ /* We want to force a write first, so that cpu do not attempt
+ * to get cache line containing last_obj_cnt, num_queued, adj_limit
+ * in Shared state, but directly does a Request For Ownership
+ * It is only a hint, we use barrier() only.
+ */
+ barrier();
+
+ dql->num_queued += count;
}
/* Returns how many objects can be queued, < 0 indicates over limit. */
static inline int dql_avail(const struct dql *dql)
{
- return dql->adj_limit - dql->num_queued;
+ return ACCESS_ONCE(dql->adj_limit) - ACCESS_ONCE(dql->num_queued);
}
/* Record number of completed objects and recalculate the limit. */
diff --git a/include/linux/edac.h b/include/linux/edac.h
index e1e68da6f35c..da3b72e95db3 100644
--- a/include/linux/edac.h
+++ b/include/linux/edac.h
@@ -194,7 +194,8 @@ static inline char *mc_event_error_type(const unsigned int err_type)
* @MEM_DDR3: DDR3 RAM
* @MEM_RDDR3: Registered DDR3 RAM
* This is a variant of the DDR3 memories.
- * @MEM_DDR4: DDR4 RAM
+ * @MEM_LRDDR3 Load-Reduced DDR3 memory.
+ * @MEM_DDR4: Unbuffered DDR4 RAM
* @MEM_RDDR4: Registered DDR4 RAM
* This is a variant of the DDR4 memories.
*/
@@ -216,6 +217,7 @@ enum mem_type {
MEM_XDR,
MEM_DDR3,
MEM_RDDR3,
+ MEM_LRDDR3,
MEM_DDR4,
MEM_RDDR4,
};
diff --git a/include/linux/eeprom_93cx6.h b/include/linux/eeprom_93cx6.h
index e50f98b0297a..eb0b1988050a 100644
--- a/include/linux/eeprom_93cx6.h
+++ b/include/linux/eeprom_93cx6.h
@@ -75,6 +75,10 @@ extern void eeprom_93cx6_read(struct eeprom_93cx6 *eeprom,
const u8 word, u16 *data);
extern void eeprom_93cx6_multiread(struct eeprom_93cx6 *eeprom,
const u8 word, __le16 *data, const u16 words);
+extern void eeprom_93cx6_readb(struct eeprom_93cx6 *eeprom,
+ const u8 byte, u8 *data);
+extern void eeprom_93cx6_multireadb(struct eeprom_93cx6 *eeprom,
+ const u8 byte, u8 *data, const u16 bytes);
extern void eeprom_93cx6_wren(struct eeprom_93cx6 *eeprom, bool enable);
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 45cb4ffdea62..0238d612750e 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -92,6 +92,7 @@ typedef struct {
#define EFI_MEMORY_WC ((u64)0x0000000000000002ULL) /* write-coalescing */
#define EFI_MEMORY_WT ((u64)0x0000000000000004ULL) /* write-through */
#define EFI_MEMORY_WB ((u64)0x0000000000000008ULL) /* write-back */
+#define EFI_MEMORY_UCE ((u64)0x0000000000000010ULL) /* uncached, exported */
#define EFI_MEMORY_WP ((u64)0x0000000000001000ULL) /* write-protect */
#define EFI_MEMORY_RP ((u64)0x0000000000002000ULL) /* read-protect */
#define EFI_MEMORY_XP ((u64)0x0000000000004000ULL) /* execute-protect */
@@ -502,6 +503,10 @@ typedef efi_status_t efi_get_next_variable_t (unsigned long *name_size, efi_char
typedef efi_status_t efi_set_variable_t (efi_char16_t *name, efi_guid_t *vendor,
u32 attr, unsigned long data_size,
void *data);
+typedef efi_status_t
+efi_set_variable_nonblocking_t(efi_char16_t *name, efi_guid_t *vendor,
+ u32 attr, unsigned long data_size, void *data);
+
typedef efi_status_t efi_get_next_high_mono_count_t (u32 *count);
typedef void efi_reset_system_t (int reset_type, efi_status_t status,
unsigned long data_size, efi_char16_t *data);
@@ -542,6 +547,9 @@ void efi_native_runtime_setup(void);
#define SMBIOS_TABLE_GUID \
EFI_GUID( 0xeb9d2d31, 0x2d88, 0x11d3, 0x9a, 0x16, 0x0, 0x90, 0x27, 0x3f, 0xc1, 0x4d )
+#define SMBIOS3_TABLE_GUID \
+ EFI_GUID( 0xf2fd1544, 0x9794, 0x4a2c, 0x99, 0x2e, 0xe5, 0xbb, 0xcf, 0x20, 0xe3, 0x94 )
+
#define SAL_SYSTEM_TABLE_GUID \
EFI_GUID( 0xeb9d2d32, 0x2d88, 0x11d3, 0x9a, 0x16, 0x0, 0x90, 0x27, 0x3f, 0xc1, 0x4d )
@@ -805,7 +813,8 @@ extern struct efi {
unsigned long mps; /* MPS table */
unsigned long acpi; /* ACPI table (IA64 ext 0.71) */
unsigned long acpi20; /* ACPI table (ACPI 2.0) */
- unsigned long smbios; /* SM BIOS table */
+ unsigned long smbios; /* SMBIOS table (32 bit entry point) */
+ unsigned long smbios3; /* SMBIOS table (64 bit entry point) */
unsigned long sal_systab; /* SAL system table */
unsigned long boot_info; /* boot info table */
unsigned long hcdp; /* HCDP table */
@@ -821,6 +830,7 @@ extern struct efi {
efi_get_variable_t *get_variable;
efi_get_next_variable_t *get_next_variable;
efi_set_variable_t *set_variable;
+ efi_set_variable_nonblocking_t *set_variable_nonblocking;
efi_query_variable_info_t *query_variable_info;
efi_update_capsule_t *update_capsule;
efi_query_capsule_caps_t *query_capsule_caps;
@@ -886,6 +896,13 @@ extern bool efi_poweroff_required(void);
(md) <= (efi_memory_desc_t *)((m)->map_end - (m)->desc_size); \
(md) = (void *)(md) + (m)->desc_size)
+/*
+ * Format an EFI memory descriptor's type and attributes to a user-provided
+ * character buffer, as per snprintf(), and return the buffer.
+ */
+char * __init efi_md_typeattr_format(char *buf, size_t size,
+ const efi_memory_desc_t *md);
+
/**
* efi_range_is_wc - check the WC bit on an address range
* @start: starting kvirt address
@@ -1034,6 +1051,7 @@ struct efivar_operations {
efi_get_variable_t *get_variable;
efi_get_next_variable_t *get_next_variable;
efi_set_variable_t *set_variable;
+ efi_set_variable_nonblocking_t *set_variable_nonblocking;
efi_query_variable_store_t *query_variable_store;
};
@@ -1227,4 +1245,7 @@ efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg,
unsigned long *load_addr,
unsigned long *load_size);
+efi_status_t efi_parse_options(char *cmdline);
+
+bool efi_runtime_disabled(void);
#endif /* _LINUX_EFI_H */
diff --git a/include/linux/elf.h b/include/linux/elf.h
index 67a5fa7830c4..20fa8d8ae313 100644
--- a/include/linux/elf.h
+++ b/include/linux/elf.h
@@ -15,6 +15,11 @@
set_personality(PER_LINUX | (current->personality & (~PER_MASK)))
#endif
+#ifndef SET_PERSONALITY2
+#define SET_PERSONALITY2(ex, state) \
+ SET_PERSONALITY(ex)
+#endif
+
#if ELF_CLASS == ELFCLASS32
extern Elf32_Dyn _DYNAMIC [];
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
index 9c5529dc6d07..41c891d05f04 100644
--- a/include/linux/etherdevice.h
+++ b/include/linux/etherdevice.h
@@ -29,6 +29,7 @@
#include <asm/bitsperlong.h>
#ifdef __KERNEL__
+u32 eth_get_headlen(void *data, unsigned int max_len);
__be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev);
extern const struct header_ops eth_header_ops;
@@ -391,4 +392,16 @@ static inline unsigned long compare_ether_header(const void *a, const void *b)
#endif
}
+/**
+ * eth_skb_pad - Pad buffer to mininum number of octets for Ethernet frame
+ * @skb: Buffer to pad
+ *
+ * An Ethernet frame should have a minimum size of 60 bytes. This function
+ * takes short frames and pads them with zeros up to the 60 byte limit.
+ */
+static inline int eth_skb_pad(struct sk_buff *skb)
+{
+ return skb_put_padto(skb, ETH_ZLEN);
+}
+
#endif /* _LINUX_ETHERDEVICE_H */
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index e658229fee39..653dc9c4ebac 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -59,6 +59,26 @@ enum ethtool_phys_id_state {
ETHTOOL_ID_OFF
};
+enum {
+ ETH_RSS_HASH_TOP_BIT, /* Configurable RSS hash function - Toeplitz */
+ ETH_RSS_HASH_XOR_BIT, /* Configurable RSS hash function - Xor */
+
+ /*
+ * Add your fresh new hash function bits above and remember to update
+ * rss_hash_func_strings[] in ethtool.c
+ */
+ ETH_RSS_HASH_FUNCS_COUNT
+};
+
+#define __ETH_RSS_HASH_BIT(bit) ((u32)1 << (bit))
+#define __ETH_RSS_HASH(name) __ETH_RSS_HASH_BIT(ETH_RSS_HASH_##name##_BIT)
+
+#define ETH_RSS_HASH_TOP __ETH_RSS_HASH(TOP)
+#define ETH_RSS_HASH_XOR __ETH_RSS_HASH(XOR)
+
+#define ETH_RSS_HASH_UNKNOWN 0
+#define ETH_RSS_HASH_NO_CHANGE 0
+
struct net_device;
/* Some generic methods drivers may use in their ethtool_ops */
@@ -158,17 +178,14 @@ static inline u32 ethtool_rxfh_indir_default(u32 index, u32 n_rx_rings)
* Returns zero if not supported for this specific device.
* @get_rxfh_indir_size: Get the size of the RX flow hash indirection table.
* Returns zero if not supported for this specific device.
- * @get_rxfh: Get the contents of the RX flow hash indirection table and hash
- * key.
- * Will only be called if one or both of @get_rxfh_indir_size and
- * @get_rxfh_key_size are implemented and return non-zero.
- * Returns a negative error code or zero.
- * @set_rxfh: Set the contents of the RX flow hash indirection table and/or
- * hash key. In case only the indirection table or hash key is to be
- * changed, the other argument will be %NULL.
- * Will only be called if one or both of @get_rxfh_indir_size and
- * @get_rxfh_key_size are implemented and return non-zero.
+ * @get_rxfh: Get the contents of the RX flow hash indirection table, hash key
+ * and/or hash function.
* Returns a negative error code or zero.
+ * @set_rxfh: Set the contents of the RX flow hash indirection table, hash
+ * key, and/or hash function. Arguments which are set to %NULL or zero
+ * will remain unchanged.
+ * Returns a negative error code or zero. An error code must be returned
+ * if at least one unsupported change was requested.
* @get_channels: Get number of channels.
* @set_channels: Set number of channels. Returns a negative error code or
* zero.
@@ -241,9 +258,10 @@ struct ethtool_ops {
int (*reset)(struct net_device *, u32 *);
u32 (*get_rxfh_key_size)(struct net_device *);
u32 (*get_rxfh_indir_size)(struct net_device *);
- int (*get_rxfh)(struct net_device *, u32 *indir, u8 *key);
+ int (*get_rxfh)(struct net_device *, u32 *indir, u8 *key,
+ u8 *hfunc);
int (*set_rxfh)(struct net_device *, const u32 *indir,
- const u8 *key);
+ const u8 *key, const u8 hfunc);
void (*get_channels)(struct net_device *, struct ethtool_channels *);
int (*set_channels)(struct net_device *, struct ethtool_channels *);
int (*get_dump_flag)(struct net_device *, struct ethtool_dump *);
@@ -257,6 +275,10 @@ struct ethtool_ops {
struct ethtool_eeprom *, u8 *);
int (*get_eee)(struct net_device *, struct ethtool_eee *);
int (*set_eee)(struct net_device *, struct ethtool_eee *);
+ int (*get_tunable)(struct net_device *,
+ const struct ethtool_tunable *, void *);
+ int (*set_tunable)(struct net_device *,
+ const struct ethtool_tunable *, const void *);
};
diff --git a/include/linux/extcon/extcon-gpio.h b/include/linux/extcon/extcon-gpio.h
index 8900fdf511c6..0b17ad43fbfc 100644
--- a/include/linux/extcon/extcon-gpio.h
+++ b/include/linux/extcon/extcon-gpio.h
@@ -34,8 +34,10 @@
* @irq_flags: IRQ Flags (e.g., IRQF_TRIGGER_LOW).
* @state_on: print_state is overriden with state_on if attached.
* If NULL, default method of extcon class is used.
- * @state_off: print_state is overriden with state_on if detached.
+ * @state_off: print_state is overriden with state_off if detached.
* If NUll, default method of extcon class is used.
+ * @check_on_resume: Boolean describing whether to check the state of gpio
+ * while resuming from sleep.
*
* Note that in order for state_on or state_off to be valid, both state_on
* and state_off should be not NULL. If at least one of them is NULL,
diff --git a/include/linux/extcon/sm5502.h b/include/linux/extcon/sm5502.h
deleted file mode 100644
index 030526bf8d79..000000000000
--- a/include/linux/extcon/sm5502.h
+++ /dev/null
@@ -1,287 +0,0 @@
-/*
- * sm5502.h
- *
- * Copyright (c) 2014 Samsung Electronics Co., Ltd
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef __LINUX_EXTCON_SM5502_H
-#define __LINUX_EXTCON_SM5502_H
-
-enum sm5502_types {
- TYPE_SM5502,
-};
-
-/* SM5502 registers */
-enum sm5502_reg {
- SM5502_REG_DEVICE_ID = 0x01,
- SM5502_REG_CONTROL,
- SM5502_REG_INT1,
- SM5502_REG_INT2,
- SM5502_REG_INTMASK1,
- SM5502_REG_INTMASK2,
- SM5502_REG_ADC,
- SM5502_REG_TIMING_SET1,
- SM5502_REG_TIMING_SET2,
- SM5502_REG_DEV_TYPE1,
- SM5502_REG_DEV_TYPE2,
- SM5502_REG_BUTTON1,
- SM5502_REG_BUTTON2,
- SM5502_REG_CAR_KIT_STATUS,
- SM5502_REG_RSVD1,
- SM5502_REG_RSVD2,
- SM5502_REG_RSVD3,
- SM5502_REG_RSVD4,
- SM5502_REG_MANUAL_SW1,
- SM5502_REG_MANUAL_SW2,
- SM5502_REG_DEV_TYPE3,
- SM5502_REG_RSVD5,
- SM5502_REG_RSVD6,
- SM5502_REG_RSVD7,
- SM5502_REG_RSVD8,
- SM5502_REG_RSVD9,
- SM5502_REG_RESET,
- SM5502_REG_RSVD10,
- SM5502_REG_RESERVED_ID1,
- SM5502_REG_RSVD11,
- SM5502_REG_RSVD12,
- SM5502_REG_RESERVED_ID2,
- SM5502_REG_RSVD13,
- SM5502_REG_OCP,
- SM5502_REG_RSVD14,
- SM5502_REG_RSVD15,
- SM5502_REG_RSVD16,
- SM5502_REG_RSVD17,
- SM5502_REG_RSVD18,
- SM5502_REG_RSVD19,
- SM5502_REG_RSVD20,
- SM5502_REG_RSVD21,
- SM5502_REG_RSVD22,
- SM5502_REG_RSVD23,
- SM5502_REG_RSVD24,
- SM5502_REG_RSVD25,
- SM5502_REG_RSVD26,
- SM5502_REG_RSVD27,
- SM5502_REG_RSVD28,
- SM5502_REG_RSVD29,
- SM5502_REG_RSVD30,
- SM5502_REG_RSVD31,
- SM5502_REG_RSVD32,
- SM5502_REG_RSVD33,
- SM5502_REG_RSVD34,
- SM5502_REG_RSVD35,
- SM5502_REG_RSVD36,
- SM5502_REG_RESERVED_ID3,
-
- SM5502_REG_END,
-};
-
-/* Define SM5502 MASK/SHIFT constant */
-#define SM5502_REG_DEVICE_ID_VENDOR_SHIFT 0
-#define SM5502_REG_DEVICE_ID_VERSION_SHIFT 3
-#define SM5502_REG_DEVICE_ID_VENDOR_MASK (0x3 << SM5502_REG_DEVICE_ID_VENDOR_SHIFT)
-#define SM5502_REG_DEVICE_ID_VERSION_MASK (0x1f << SM5502_REG_DEVICE_ID_VERSION_SHIFT)
-
-#define SM5502_REG_CONTROL_MASK_INT_SHIFT 0
-#define SM5502_REG_CONTROL_WAIT_SHIFT 1
-#define SM5502_REG_CONTROL_MANUAL_SW_SHIFT 2
-#define SM5502_REG_CONTROL_RAW_DATA_SHIFT 3
-#define SM5502_REG_CONTROL_SW_OPEN_SHIFT 4
-#define SM5502_REG_CONTROL_MASK_INT_MASK (0x1 << SM5502_REG_CONTROL_MASK_INT_SHIFT)
-#define SM5502_REG_CONTROL_WAIT_MASK (0x1 << SM5502_REG_CONTROL_WAIT_SHIFT)
-#define SM5502_REG_CONTROL_MANUAL_SW_MASK (0x1 << SM5502_REG_CONTROL_MANUAL_SW_SHIFT)
-#define SM5502_REG_CONTROL_RAW_DATA_MASK (0x1 << SM5502_REG_CONTROL_RAW_DATA_SHIFT)
-#define SM5502_REG_CONTROL_SW_OPEN_MASK (0x1 << SM5502_REG_CONTROL_SW_OPEN_SHIFT)
-
-#define SM5502_REG_INTM1_ATTACH_SHIFT 0
-#define SM5502_REG_INTM1_DETACH_SHIFT 1
-#define SM5502_REG_INTM1_KP_SHIFT 2
-#define SM5502_REG_INTM1_LKP_SHIFT 3
-#define SM5502_REG_INTM1_LKR_SHIFT 4
-#define SM5502_REG_INTM1_OVP_EVENT_SHIFT 5
-#define SM5502_REG_INTM1_OCP_EVENT_SHIFT 6
-#define SM5502_REG_INTM1_OVP_OCP_DIS_SHIFT 7
-#define SM5502_REG_INTM1_ATTACH_MASK (0x1 << SM5502_REG_INTM1_ATTACH_SHIFT)
-#define SM5502_REG_INTM1_DETACH_MASK (0x1 << SM5502_REG_INTM1_DETACH_SHIFT)
-#define SM5502_REG_INTM1_KP_MASK (0x1 << SM5502_REG_INTM1_KP_SHIFT)
-#define SM5502_REG_INTM1_LKP_MASK (0x1 << SM5502_REG_INTM1_LKP_SHIFT)
-#define SM5502_REG_INTM1_LKR_MASK (0x1 << SM5502_REG_INTM1_LKR_SHIFT)
-#define SM5502_REG_INTM1_OVP_EVENT_MASK (0x1 << SM5502_REG_INTM1_OVP_EVENT_SHIFT)
-#define SM5502_REG_INTM1_OCP_EVENT_MASK (0x1 << SM5502_REG_INTM1_OCP_EVENT_SHIFT)
-#define SM5502_REG_INTM1_OVP_OCP_DIS_MASK (0x1 << SM5502_REG_INTM1_OVP_OCP_DIS_SHIFT)
-
-#define SM5502_REG_INTM2_VBUS_DET_SHIFT 0
-#define SM5502_REG_INTM2_REV_ACCE_SHIFT 1
-#define SM5502_REG_INTM2_ADC_CHG_SHIFT 2
-#define SM5502_REG_INTM2_STUCK_KEY_SHIFT 3
-#define SM5502_REG_INTM2_STUCK_KEY_RCV_SHIFT 4
-#define SM5502_REG_INTM2_MHL_SHIFT 5
-#define SM5502_REG_INTM2_VBUS_DET_MASK (0x1 << SM5502_REG_INTM2_VBUS_DET_SHIFT)
-#define SM5502_REG_INTM2_REV_ACCE_MASK (0x1 << SM5502_REG_INTM2_REV_ACCE_SHIFT)
-#define SM5502_REG_INTM2_ADC_CHG_MASK (0x1 << SM5502_REG_INTM2_ADC_CHG_SHIFT)
-#define SM5502_REG_INTM2_STUCK_KEY_MASK (0x1 << SM5502_REG_INTM2_STUCK_KEY_SHIFT)
-#define SM5502_REG_INTM2_STUCK_KEY_RCV_MASK (0x1 << SM5502_REG_INTM2_STUCK_KEY_RCV_SHIFT)
-#define SM5502_REG_INTM2_MHL_MASK (0x1 << SM5502_REG_INTM2_MHL_SHIFT)
-
-#define SM5502_REG_ADC_SHIFT 0
-#define SM5502_REG_ADC_MASK (0x1f << SM5502_REG_ADC_SHIFT)
-
-#define SM5502_REG_TIMING_SET1_KEY_PRESS_SHIFT 4
-#define SM5502_REG_TIMING_SET1_KEY_PRESS_MASK (0xf << SM5502_REG_TIMING_SET1_KEY_PRESS_SHIFT)
-#define TIMING_KEY_PRESS_100MS 0x0
-#define TIMING_KEY_PRESS_200MS 0x1
-#define TIMING_KEY_PRESS_300MS 0x2
-#define TIMING_KEY_PRESS_400MS 0x3
-#define TIMING_KEY_PRESS_500MS 0x4
-#define TIMING_KEY_PRESS_600MS 0x5
-#define TIMING_KEY_PRESS_700MS 0x6
-#define TIMING_KEY_PRESS_800MS 0x7
-#define TIMING_KEY_PRESS_900MS 0x8
-#define TIMING_KEY_PRESS_1000MS 0x9
-#define SM5502_REG_TIMING_SET1_ADC_DET_SHIFT 0
-#define SM5502_REG_TIMING_SET1_ADC_DET_MASK (0xf << SM5502_REG_TIMING_SET1_ADC_DET_SHIFT)
-#define TIMING_ADC_DET_50MS 0x0
-#define TIMING_ADC_DET_100MS 0x1
-#define TIMING_ADC_DET_150MS 0x2
-#define TIMING_ADC_DET_200MS 0x3
-#define TIMING_ADC_DET_300MS 0x4
-#define TIMING_ADC_DET_400MS 0x5
-#define TIMING_ADC_DET_500MS 0x6
-#define TIMING_ADC_DET_600MS 0x7
-#define TIMING_ADC_DET_700MS 0x8
-#define TIMING_ADC_DET_800MS 0x9
-#define TIMING_ADC_DET_900MS 0xA
-#define TIMING_ADC_DET_1000MS 0xB
-
-#define SM5502_REG_TIMING_SET2_SW_WAIT_SHIFT 4
-#define SM5502_REG_TIMING_SET2_SW_WAIT_MASK (0xf << SM5502_REG_TIMING_SET2_SW_WAIT_SHIFT)
-#define TIMING_SW_WAIT_10MS 0x0
-#define TIMING_SW_WAIT_30MS 0x1
-#define TIMING_SW_WAIT_50MS 0x2
-#define TIMING_SW_WAIT_70MS 0x3
-#define TIMING_SW_WAIT_90MS 0x4
-#define TIMING_SW_WAIT_110MS 0x5
-#define TIMING_SW_WAIT_130MS 0x6
-#define TIMING_SW_WAIT_150MS 0x7
-#define TIMING_SW_WAIT_170MS 0x8
-#define TIMING_SW_WAIT_190MS 0x9
-#define TIMING_SW_WAIT_210MS 0xA
-#define SM5502_REG_TIMING_SET2_LONG_KEY_SHIFT 0
-#define SM5502_REG_TIMING_SET2_LONG_KEY_MASK (0xf << SM5502_REG_TIMING_SET2_LONG_KEY_SHIFT)
-#define TIMING_LONG_KEY_300MS 0x0
-#define TIMING_LONG_KEY_400MS 0x1
-#define TIMING_LONG_KEY_500MS 0x2
-#define TIMING_LONG_KEY_600MS 0x3
-#define TIMING_LONG_KEY_700MS 0x4
-#define TIMING_LONG_KEY_800MS 0x5
-#define TIMING_LONG_KEY_900MS 0x6
-#define TIMING_LONG_KEY_1000MS 0x7
-#define TIMING_LONG_KEY_1100MS 0x8
-#define TIMING_LONG_KEY_1200MS 0x9
-#define TIMING_LONG_KEY_1300MS 0xA
-#define TIMING_LONG_KEY_1400MS 0xB
-#define TIMING_LONG_KEY_1500MS 0xC
-
-#define SM5502_REG_DEV_TYPE1_AUDIO_TYPE1_SHIFT 0
-#define SM5502_REG_DEV_TYPE1_AUDIO_TYPE2_SHIFT 1
-#define SM5502_REG_DEV_TYPE1_USB_SDP_SHIFT 2
-#define SM5502_REG_DEV_TYPE1_UART_SHIFT 3
-#define SM5502_REG_DEV_TYPE1_CAR_KIT_CHARGER_SHIFT 4
-#define SM5502_REG_DEV_TYPE1_USB_CHG_SHIFT 5
-#define SM5502_REG_DEV_TYPE1_DEDICATED_CHG_SHIFT 6
-#define SM5502_REG_DEV_TYPE1_USB_OTG_SHIFT 7
-#define SM5502_REG_DEV_TYPE1_AUDIO_TYPE1_MASK (0x1 << SM5502_REG_DEV_TYPE1_AUDIO_TYPE1_SHIFT)
-#define SM5502_REG_DEV_TYPE1_AUDIO_TYPE1__MASK (0x1 << SM5502_REG_DEV_TYPE1_AUDIO_TYPE2_SHIFT)
-#define SM5502_REG_DEV_TYPE1_USB_SDP_MASK (0x1 << SM5502_REG_DEV_TYPE1_USB_SDP_SHIFT)
-#define SM5502_REG_DEV_TYPE1_UART_MASK (0x1 << SM5502_REG_DEV_TYPE1_UART_SHIFT)
-#define SM5502_REG_DEV_TYPE1_CAR_KIT_CHARGER_MASK (0x1 << SM5502_REG_DEV_TYPE1_CAR_KIT_CHARGER_SHIFT)
-#define SM5502_REG_DEV_TYPE1_USB_CHG_MASK (0x1 << SM5502_REG_DEV_TYPE1_USB_CHG_SHIFT)
-#define SM5502_REG_DEV_TYPE1_DEDICATED_CHG_MASK (0x1 << SM5502_REG_DEV_TYPE1_DEDICATED_CHG_SHIFT)
-#define SM5502_REG_DEV_TYPE1_USB_OTG_MASK (0x1 << SM5502_REG_DEV_TYPE1_USB_OTG_SHIFT)
-
-#define SM5502_REG_DEV_TYPE2_JIG_USB_ON_SHIFT 0
-#define SM5502_REG_DEV_TYPE2_JIG_USB_OFF_SHIFT 1
-#define SM5502_REG_DEV_TYPE2_JIG_UART_ON_SHIFT 2
-#define SM5502_REG_DEV_TYPE2_JIG_UART_OFF_SHIFT 3
-#define SM5502_REG_DEV_TYPE2_PPD_SHIFT 4
-#define SM5502_REG_DEV_TYPE2_TTY_SHIFT 5
-#define SM5502_REG_DEV_TYPE2_AV_CABLE_SHIFT 6
-#define SM5502_REG_DEV_TYPE2_JIG_USB_ON_MASK (0x1 << SM5502_REG_DEV_TYPE2_JIG_USB_ON_SHIFT)
-#define SM5502_REG_DEV_TYPE2_JIG_USB_OFF_MASK (0x1 << SM5502_REG_DEV_TYPE2_JIG_USB_OFF_SHIFT)
-#define SM5502_REG_DEV_TYPE2_JIG_UART_ON_MASK (0x1 << SM5502_REG_DEV_TYPE2_JIG_UART_ON_SHIFT)
-#define SM5502_REG_DEV_TYPE2_JIG_UART_OFF_MASK (0x1 << SM5502_REG_DEV_TYPE2_JIG_UART_OFF_SHIFT)
-#define SM5502_REG_DEV_TYPE2_PPD_MASK (0x1 << SM5502_REG_DEV_TYPE2_PPD_SHIFT)
-#define SM5502_REG_DEV_TYPE2_TTY_MASK (0x1 << SM5502_REG_DEV_TYPE2_TTY_SHIFT)
-#define SM5502_REG_DEV_TYPE2_AV_CABLE_MASK (0x1 << SM5502_REG_DEV_TYPE2_AV_CABLE_SHIFT)
-
-#define SM5502_REG_MANUAL_SW1_VBUSIN_SHIFT 0
-#define SM5502_REG_MANUAL_SW1_DP_SHIFT 2
-#define SM5502_REG_MANUAL_SW1_DM_SHIFT 5
-#define SM5502_REG_MANUAL_SW1_VBUSIN_MASK (0x3 << SM5502_REG_MANUAL_SW1_VBUSIN_SHIFT)
-#define SM5502_REG_MANUAL_SW1_DP_MASK (0x7 << SM5502_REG_MANUAL_SW1_DP_SHIFT)
-#define SM5502_REG_MANUAL_SW1_DM_MASK (0x7 << SM5502_REG_MANUAL_SW1_DM_SHIFT)
-#define VBUSIN_SWITCH_OPEN 0x0
-#define VBUSIN_SWITCH_VBUSOUT 0x1
-#define VBUSIN_SWITCH_MIC 0x2
-#define VBUSIN_SWITCH_VBUSOUT_WITH_USB 0x3
-#define DM_DP_CON_SWITCH_OPEN 0x0
-#define DM_DP_CON_SWITCH_USB 0x1
-#define DM_DP_CON_SWITCH_AUDIO 0x2
-#define DM_DP_CON_SWITCH_UART 0x3
-#define DM_DP_SWITCH_OPEN ((DM_DP_CON_SWITCH_OPEN <<SM5502_REG_MANUAL_SW1_DP_SHIFT) \
- | (DM_DP_CON_SWITCH_OPEN <<SM5502_REG_MANUAL_SW1_DM_SHIFT))
-#define DM_DP_SWITCH_USB ((DM_DP_CON_SWITCH_USB <<SM5502_REG_MANUAL_SW1_DP_SHIFT) \
- | (DM_DP_CON_SWITCH_USB <<SM5502_REG_MANUAL_SW1_DM_SHIFT))
-#define DM_DP_SWITCH_AUDIO ((DM_DP_CON_SWITCH_AUDIO <<SM5502_REG_MANUAL_SW1_DP_SHIFT) \
- | (DM_DP_CON_SWITCH_AUDIO <<SM5502_REG_MANUAL_SW1_DM_SHIFT))
-#define DM_DP_SWITCH_UART ((DM_DP_CON_SWITCH_UART <<SM5502_REG_MANUAL_SW1_DP_SHIFT) \
- | (DM_DP_CON_SWITCH_UART <<SM5502_REG_MANUAL_SW1_DM_SHIFT))
-
-/* SM5502 Interrupts */
-enum sm5502_irq {
- /* INT1 */
- SM5502_IRQ_INT1_ATTACH,
- SM5502_IRQ_INT1_DETACH,
- SM5502_IRQ_INT1_KP,
- SM5502_IRQ_INT1_LKP,
- SM5502_IRQ_INT1_LKR,
- SM5502_IRQ_INT1_OVP_EVENT,
- SM5502_IRQ_INT1_OCP_EVENT,
- SM5502_IRQ_INT1_OVP_OCP_DIS,
-
- /* INT2 */
- SM5502_IRQ_INT2_VBUS_DET,
- SM5502_IRQ_INT2_REV_ACCE,
- SM5502_IRQ_INT2_ADC_CHG,
- SM5502_IRQ_INT2_STUCK_KEY,
- SM5502_IRQ_INT2_STUCK_KEY_RCV,
- SM5502_IRQ_INT2_MHL,
-
- SM5502_IRQ_NUM,
-};
-
-#define SM5502_IRQ_INT1_ATTACH_MASK BIT(0)
-#define SM5502_IRQ_INT1_DETACH_MASK BIT(1)
-#define SM5502_IRQ_INT1_KP_MASK BIT(2)
-#define SM5502_IRQ_INT1_LKP_MASK BIT(3)
-#define SM5502_IRQ_INT1_LKR_MASK BIT(4)
-#define SM5502_IRQ_INT1_OVP_EVENT_MASK BIT(5)
-#define SM5502_IRQ_INT1_OCP_EVENT_MASK BIT(6)
-#define SM5502_IRQ_INT1_OVP_OCP_DIS_MASK BIT(7)
-#define SM5502_IRQ_INT2_VBUS_DET_MASK BIT(0)
-#define SM5502_IRQ_INT2_REV_ACCE_MASK BIT(1)
-#define SM5502_IRQ_INT2_ADC_CHG_MASK BIT(2)
-#define SM5502_IRQ_INT2_STUCK_KEY_MASK BIT(3)
-#define SM5502_IRQ_INT2_STUCK_KEY_RCV_MASK BIT(4)
-#define SM5502_IRQ_INT2_MHL_MASK BIT(5)
-
-#endif /* __LINUX_EXTCON_SM5502_H */
diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
index 08ed2b0a96e6..87f14e90e984 100644
--- a/include/linux/f2fs_fs.h
+++ b/include/linux/f2fs_fs.h
@@ -15,8 +15,9 @@
#include <linux/types.h>
#define F2FS_SUPER_OFFSET 1024 /* byte-size offset */
-#define F2FS_LOG_SECTOR_SIZE 9 /* 9 bits for 512 byte */
-#define F2FS_LOG_SECTORS_PER_BLOCK 3 /* 4KB: F2FS_BLKSIZE */
+#define F2FS_MIN_LOG_SECTOR_SIZE 9 /* 9 bits for 512 bytes */
+#define F2FS_MAX_LOG_SECTOR_SIZE 12 /* 12 bits for 4096 bytes */
+#define F2FS_LOG_SECTORS_PER_BLOCK 3 /* log number for sector/blk */
#define F2FS_BLKSIZE 4096 /* support only 4KB block */
#define F2FS_MAX_EXTENSION 64 /* # of extension entries */
#define F2FS_BLK_ALIGN(x) (((x) + F2FS_BLKSIZE - 1) / F2FS_BLKSIZE)
@@ -32,7 +33,8 @@
#define F2FS_META_INO(sbi) (sbi->meta_ino_num)
/* This flag is used by node and meta inodes, and by recovery */
-#define GFP_F2FS_ZERO (GFP_NOFS | __GFP_ZERO)
+#define GFP_F2FS_ZERO (GFP_NOFS | __GFP_ZERO)
+#define GFP_F2FS_HIGH_ZERO (GFP_NOFS | __GFP_ZERO | __GFP_HIGHMEM)
/*
* For further optimization on multi-head logs, on-disk layout supports maximum
@@ -85,6 +87,7 @@ struct f2fs_super_block {
/*
* For checkpoint
*/
+#define CP_FSCK_FLAG 0x00000010
#define CP_ERROR_FLAG 0x00000008
#define CP_COMPACT_SUM_FLAG 0x00000004
#define CP_ORPHAN_PRESENT_FLAG 0x00000002
@@ -168,14 +171,12 @@ struct f2fs_extent {
#define F2FS_INLINE_XATTR 0x01 /* file inline xattr flag */
#define F2FS_INLINE_DATA 0x02 /* file inline data flag */
+#define F2FS_INLINE_DENTRY 0x04 /* file inline dentry flag */
+#define F2FS_DATA_EXIST 0x08 /* file inline data exist flag */
#define MAX_INLINE_DATA (sizeof(__le32) * (DEF_ADDRS_PER_INODE - \
F2FS_INLINE_XATTR_ADDRS - 1))
-#define INLINE_DATA_OFFSET (PAGE_CACHE_SIZE - sizeof(struct node_footer) -\
- sizeof(__le32) * (DEF_ADDRS_PER_INODE + \
- DEF_NIDS_PER_INODE - 1))
-
struct f2fs_inode {
__le16 i_mode; /* file mode */
__u8 i_advise; /* file hints */
@@ -433,6 +434,24 @@ struct f2fs_dentry_block {
__u8 filename[NR_DENTRY_IN_BLOCK][F2FS_SLOT_LEN];
} __packed;
+/* for inline dir */
+#define NR_INLINE_DENTRY (MAX_INLINE_DATA * BITS_PER_BYTE / \
+ ((SIZE_OF_DIR_ENTRY + F2FS_SLOT_LEN) * \
+ BITS_PER_BYTE + 1))
+#define INLINE_DENTRY_BITMAP_SIZE ((NR_INLINE_DENTRY + \
+ BITS_PER_BYTE - 1) / BITS_PER_BYTE)
+#define INLINE_RESERVED_SIZE (MAX_INLINE_DATA - \
+ ((SIZE_OF_DIR_ENTRY + F2FS_SLOT_LEN) * \
+ NR_INLINE_DENTRY + INLINE_DENTRY_BITMAP_SIZE))
+
+/* inline directory entry structure */
+struct f2fs_inline_dentry {
+ __u8 dentry_bitmap[INLINE_DENTRY_BITMAP_SIZE];
+ __u8 reserved[INLINE_RESERVED_SIZE];
+ struct f2fs_dir_entry dentry[NR_INLINE_DENTRY];
+ __u8 filename[NR_INLINE_DENTRY][F2FS_SLOT_LEN];
+} __packed;
+
/* file types used in inode_info->flags */
enum {
F2FS_FT_UNKNOWN,
diff --git a/include/linux/fault-inject.h b/include/linux/fault-inject.h
index c6f996f2abb6..798fad9e420d 100644
--- a/include/linux/fault-inject.h
+++ b/include/linux/fault-inject.h
@@ -5,6 +5,7 @@
#include <linux/types.h>
#include <linux/debugfs.h>
+#include <linux/ratelimit.h>
#include <linux/atomic.h>
/*
@@ -25,14 +26,18 @@ struct fault_attr {
unsigned long reject_end;
unsigned long count;
+ struct ratelimit_state ratelimit_state;
+ struct dentry *dname;
};
-#define FAULT_ATTR_INITIALIZER { \
- .interval = 1, \
- .times = ATOMIC_INIT(1), \
- .require_end = ULONG_MAX, \
- .stacktrace_depth = 32, \
- .verbose = 2, \
+#define FAULT_ATTR_INITIALIZER { \
+ .interval = 1, \
+ .times = ATOMIC_INIT(1), \
+ .require_end = ULONG_MAX, \
+ .stacktrace_depth = 32, \
+ .ratelimit_state = RATELIMIT_STATE_INIT_DISABLED, \
+ .verbose = 2, \
+ .dname = NULL, \
}
#define DECLARE_FAULT_ATTR(name) struct fault_attr name = FAULT_ATTR_INITIALIZER
diff --git a/include/linux/fence.h b/include/linux/fence.h
index d174585b874b..39efee130d2b 100644
--- a/include/linux/fence.h
+++ b/include/linux/fence.h
@@ -128,8 +128,8 @@ struct fence_cb {
* from irq context, so normal spinlocks can be used.
*
* A return value of false indicates the fence already passed,
- * or some failure occured that made it impossible to enable
- * signaling. True indicates succesful enabling.
+ * or some failure occurred that made it impossible to enable
+ * signaling. True indicates successful enabling.
*
* fence->status may be set in enable_signaling, but only when false is
* returned.
diff --git a/include/linux/file.h b/include/linux/file.h
index 4d69123377a2..f87d30882a24 100644
--- a/include/linux/file.h
+++ b/include/linux/file.h
@@ -66,7 +66,6 @@ extern void set_close_on_exec(unsigned int fd, int flag);
extern bool get_close_on_exec(unsigned int fd);
extern void put_filp(struct file *);
extern int get_unused_fd_flags(unsigned flags);
-#define get_unused_fd() get_unused_fd_flags(0)
extern void put_unused_fd(unsigned int fd);
extern void fd_install(unsigned int fd, struct file *file);
diff --git a/include/linux/filter.h b/include/linux/filter.h
index a5227ab8ccb1..caac2087a4d5 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -4,58 +4,24 @@
#ifndef __LINUX_FILTER_H__
#define __LINUX_FILTER_H__
+#include <stdarg.h>
+
#include <linux/atomic.h>
#include <linux/compat.h>
#include <linux/skbuff.h>
+#include <linux/linkage.h>
+#include <linux/printk.h>
#include <linux/workqueue.h>
-#include <uapi/linux/filter.h>
-/* Internally used and optimized filter representation with extended
- * instruction set based on top of classic BPF.
- */
+#include <asm/cacheflush.h>
-/* instruction classes */
-#define BPF_ALU64 0x07 /* alu mode in double word width */
-
-/* ld/ldx fields */
-#define BPF_DW 0x18 /* double word */
-#define BPF_XADD 0xc0 /* exclusive add */
-
-/* alu/jmp fields */
-#define BPF_MOV 0xb0 /* mov reg to reg */
-#define BPF_ARSH 0xc0 /* sign extending arithmetic shift right */
-
-/* change endianness of a register */
-#define BPF_END 0xd0 /* flags for endianness conversion: */
-#define BPF_TO_LE 0x00 /* convert to little-endian */
-#define BPF_TO_BE 0x08 /* convert to big-endian */
-#define BPF_FROM_LE BPF_TO_LE
-#define BPF_FROM_BE BPF_TO_BE
-
-#define BPF_JNE 0x50 /* jump != */
-#define BPF_JSGT 0x60 /* SGT is signed '>', GT in x86 */
-#define BPF_JSGE 0x70 /* SGE is signed '>=', GE in x86 */
-#define BPF_CALL 0x80 /* function call */
-#define BPF_EXIT 0x90 /* function return */
-
-/* Register numbers */
-enum {
- BPF_REG_0 = 0,
- BPF_REG_1,
- BPF_REG_2,
- BPF_REG_3,
- BPF_REG_4,
- BPF_REG_5,
- BPF_REG_6,
- BPF_REG_7,
- BPF_REG_8,
- BPF_REG_9,
- BPF_REG_10,
- __MAX_BPF_REG,
-};
+#include <uapi/linux/filter.h>
+#include <uapi/linux/bpf.h>
-/* BPF has 10 general purpose 64-bit registers and stack frame. */
-#define MAX_BPF_REG __MAX_BPF_REG
+struct sk_buff;
+struct sock;
+struct seccomp_data;
+struct bpf_prog_aux;
/* ArgX, context and stack frame pointer register positions. Note,
* Arg1, Arg2, Arg3, etc are used as argument mappings of function
@@ -161,6 +127,30 @@ enum {
.off = 0, \
.imm = IMM })
+/* BPF_LD_IMM64 macro encodes single 'load 64-bit immediate' insn */
+#define BPF_LD_IMM64(DST, IMM) \
+ BPF_LD_IMM64_RAW(DST, 0, IMM)
+
+#define BPF_LD_IMM64_RAW(DST, SRC, IMM) \
+ ((struct bpf_insn) { \
+ .code = BPF_LD | BPF_DW | BPF_IMM, \
+ .dst_reg = DST, \
+ .src_reg = SRC, \
+ .off = 0, \
+ .imm = (__u32) (IMM) }), \
+ ((struct bpf_insn) { \
+ .code = 0, /* zero is reserved opcode */ \
+ .dst_reg = 0, \
+ .src_reg = 0, \
+ .off = 0, \
+ .imm = ((__u64) (IMM)) >> 32 })
+
+#define BPF_PSEUDO_MAP_FD 1
+
+/* pseudo BPF_LD_IMM64 insn used to refer to process-local map_fd */
+#define BPF_LD_MAP_FD(DST, MAP_FD) \
+ BPF_LD_IMM64_RAW(DST, BPF_PSEUDO_MAP_FD, MAP_FD)
+
/* Short form of mov based on type, BPF_X: dst_reg = src_reg, BPF_K: dst_reg = imm32 */
#define BPF_MOV64_RAW(TYPE, DST, SRC, IMM) \
@@ -299,14 +289,6 @@ enum {
#define SK_RUN_FILTER(filter, ctx) \
(*filter->prog->bpf_func)(ctx, filter->prog->insnsi)
-struct bpf_insn {
- __u8 code; /* opcode */
- __u8 dst_reg:4; /* dest register */
- __u8 src_reg:4; /* source register */
- __s16 off; /* signed offset */
- __s32 imm; /* signed immediate constant */
-};
-
#ifdef CONFIG_COMPAT
/* A struct sock_filter is architecture independent. */
struct compat_sock_fprog {
@@ -320,20 +302,23 @@ struct sock_fprog_kern {
struct sock_filter *filter;
};
-struct sk_buff;
-struct sock;
-struct seccomp_data;
+struct bpf_binary_header {
+ unsigned int pages;
+ u8 image[];
+};
struct bpf_prog {
- u32 jited:1, /* Is our filter JIT'ed? */
- len:31; /* Number of filter blocks */
+ u16 pages; /* Number of allocated pages */
+ bool jited; /* Is our filter JIT'ed? */
+ u32 len; /* Number of filter blocks */
struct sock_fprog_kern *orig_prog; /* Original BPF program */
+ struct bpf_prog_aux *aux; /* Auxiliary fields */
unsigned int (*bpf_func)(const struct sk_buff *skb,
const struct bpf_insn *filter);
+ /* Instructions for interpreter */
union {
struct sock_filter insns[0];
struct bpf_insn insnsi[0];
- struct work_struct work;
};
};
@@ -353,6 +338,26 @@ static inline unsigned int bpf_prog_size(unsigned int proglen)
#define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0]))
+#ifdef CONFIG_DEBUG_SET_MODULE_RONX
+static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
+{
+ set_memory_ro((unsigned long)fp, fp->pages);
+}
+
+static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
+{
+ set_memory_rw((unsigned long)fp, fp->pages);
+}
+#else
+static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
+{
+}
+
+static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
+{
+}
+#endif /* CONFIG_DEBUG_SET_MODULE_RONX */
+
int sk_filter(struct sock *sk, struct sk_buff *skb);
void bpf_prog_select_runtime(struct bpf_prog *fp);
@@ -361,10 +366,22 @@ void bpf_prog_free(struct bpf_prog *fp);
int bpf_convert_filter(struct sock_filter *prog, int len,
struct bpf_insn *new_prog, int *new_len);
+struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags);
+struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
+ gfp_t gfp_extra_flags);
+void __bpf_prog_free(struct bpf_prog *fp);
+
+static inline void bpf_prog_unlock_free(struct bpf_prog *fp)
+{
+ bpf_prog_unlock_ro(fp);
+ __bpf_prog_free(fp);
+}
+
int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog);
void bpf_prog_destroy(struct bpf_prog *fp);
int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk);
+int sk_attach_bpf(u32 ufd, struct sock *sk);
int sk_detach_filter(struct sock *sk);
int bpf_check_classic(const struct sock_filter *filter, unsigned int flen);
@@ -377,6 +394,38 @@ void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp);
u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
void bpf_int_jit_compile(struct bpf_prog *fp);
+#ifdef CONFIG_BPF_JIT
+typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size);
+
+struct bpf_binary_header *
+bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
+ unsigned int alignment,
+ bpf_jit_fill_hole_t bpf_fill_ill_insns);
+void bpf_jit_binary_free(struct bpf_binary_header *hdr);
+
+void bpf_jit_compile(struct bpf_prog *fp);
+void bpf_jit_free(struct bpf_prog *fp);
+
+static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen,
+ u32 pass, void *image)
+{
+ pr_err("flen=%u proglen=%u pass=%u image=%pK\n",
+ flen, proglen, pass, image);
+ if (image)
+ print_hex_dump(KERN_ERR, "JIT code: ", DUMP_PREFIX_OFFSET,
+ 16, 1, image, proglen, false);
+}
+#else
+static inline void bpf_jit_compile(struct bpf_prog *fp)
+{
+}
+
+static inline void bpf_jit_free(struct bpf_prog *fp)
+{
+ bpf_prog_unlock_free(fp);
+}
+#endif /* CONFIG_BPF_JIT */
+
#define BPF_ANC BIT(15)
static inline u16 bpf_anc_helper(const struct sock_filter *ftest)
@@ -424,36 +473,6 @@ static inline void *bpf_load_pointer(const struct sk_buff *skb, int k,
return bpf_internal_load_pointer_neg_helper(skb, k, size);
}
-#ifdef CONFIG_BPF_JIT
-#include <stdarg.h>
-#include <linux/linkage.h>
-#include <linux/printk.h>
-
-void bpf_jit_compile(struct bpf_prog *fp);
-void bpf_jit_free(struct bpf_prog *fp);
-
-static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen,
- u32 pass, void *image)
-{
- pr_err("flen=%u proglen=%u pass=%u image=%pK\n",
- flen, proglen, pass, image);
- if (image)
- print_hex_dump(KERN_ERR, "JIT code: ", DUMP_PREFIX_OFFSET,
- 16, 1, image, proglen, false);
-}
-#else
-#include <linux/slab.h>
-
-static inline void bpf_jit_compile(struct bpf_prog *fp)
-{
-}
-
-static inline void bpf_jit_free(struct bpf_prog *fp)
-{
- kfree(fp);
-}
-#endif /* CONFIG_BPF_JIT */
-
static inline int bpf_tell_extensions(void)
{
return SKF_AD_MAX;
diff --git a/include/linux/flex_proportions.h b/include/linux/flex_proportions.h
index 4ebc49fae391..0d348e011a6e 100644
--- a/include/linux/flex_proportions.h
+++ b/include/linux/flex_proportions.h
@@ -10,6 +10,7 @@
#include <linux/percpu_counter.h>
#include <linux/spinlock.h>
#include <linux/seqlock.h>
+#include <linux/gfp.h>
/*
* When maximum proportion of some event type is specified, this is the
@@ -32,7 +33,7 @@ struct fprop_global {
seqcount_t sequence;
};
-int fprop_global_init(struct fprop_global *p);
+int fprop_global_init(struct fprop_global *p, gfp_t gfp);
void fprop_global_destroy(struct fprop_global *p);
bool fprop_new_period(struct fprop_global *p, int periods);
@@ -79,7 +80,7 @@ struct fprop_local_percpu {
raw_spinlock_t lock; /* Protect period and numerator */
};
-int fprop_local_init_percpu(struct fprop_local_percpu *pl);
+int fprop_local_init_percpu(struct fprop_local_percpu *pl, gfp_t gfp);
void fprop_local_destroy_percpu(struct fprop_local_percpu *pl);
void __fprop_inc_percpu(struct fprop_global *p, struct fprop_local_percpu *pl);
void __fprop_inc_percpu_max(struct fprop_global *p, struct fprop_local_percpu *pl,
diff --git a/include/linux/font.h b/include/linux/font.h
index 40a24ab41b36..d6821769dd1e 100644
--- a/include/linux/font.h
+++ b/include/linux/font.h
@@ -31,6 +31,7 @@ struct font_desc {
#define SUN12x22_IDX 7
#define ACORN8x8_IDX 8
#define MINI4x6_IDX 9
+#define FONT6x10_IDX 10
extern const struct font_desc font_vga_8x8,
font_vga_8x16,
@@ -41,7 +42,8 @@ extern const struct font_desc font_vga_8x8,
font_sun_8x16,
font_sun_12x22,
font_acorn_8x8,
- font_mini_4x6;
+ font_mini_4x6,
+ font_6x10;
/* Find a font with a specific name */
diff --git a/include/linux/freezer.h b/include/linux/freezer.h
index 7fd81b8c4897..6b7fd9cf5ea2 100644
--- a/include/linux/freezer.h
+++ b/include/linux/freezer.h
@@ -246,15 +246,6 @@ static inline int freezable_schedule_hrtimeout_range(ktime_t *expires,
* defined in <linux/wait.h>
*/
-#define wait_event_freezekillable(wq, condition) \
-({ \
- int __retval; \
- freezer_do_not_count(); \
- __retval = wait_event_killable(wq, (condition)); \
- freezer_count(); \
- __retval; \
-})
-
/* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
#define wait_event_freezekillable_unsafe(wq, condition) \
({ \
@@ -265,35 +256,6 @@ static inline int freezable_schedule_hrtimeout_range(ktime_t *expires,
__retval; \
})
-#define wait_event_freezable(wq, condition) \
-({ \
- int __retval; \
- freezer_do_not_count(); \
- __retval = wait_event_interruptible(wq, (condition)); \
- freezer_count(); \
- __retval; \
-})
-
-#define wait_event_freezable_timeout(wq, condition, timeout) \
-({ \
- long __retval = timeout; \
- freezer_do_not_count(); \
- __retval = wait_event_interruptible_timeout(wq, (condition), \
- __retval); \
- freezer_count(); \
- __retval; \
-})
-
-#define wait_event_freezable_exclusive(wq, condition) \
-({ \
- int __retval; \
- freezer_do_not_count(); \
- __retval = wait_event_interruptible_exclusive(wq, condition); \
- freezer_count(); \
- __retval; \
-})
-
-
#else /* !CONFIG_FREEZER */
static inline bool frozen(struct task_struct *p) { return false; }
static inline bool freezing(struct task_struct *p) { return false; }
@@ -331,18 +293,6 @@ static inline void set_freezable(void) {}
#define freezable_schedule_hrtimeout_range(expires, delta, mode) \
schedule_hrtimeout_range(expires, delta, mode)
-#define wait_event_freezable(wq, condition) \
- wait_event_interruptible(wq, condition)
-
-#define wait_event_freezable_timeout(wq, condition, timeout) \
- wait_event_interruptible_timeout(wq, condition, timeout)
-
-#define wait_event_freezable_exclusive(wq, condition) \
- wait_event_interruptible_exclusive(wq, condition)
-
-#define wait_event_freezekillable(wq, condition) \
- wait_event_killable(wq, condition)
-
#define wait_event_freezekillable_unsafe(wq, condition) \
wait_event_killable(wq, condition)
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 94187721ad41..f90c0282c114 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -18,6 +18,7 @@
#include <linux/pid.h>
#include <linux/bug.h>
#include <linux/mutex.h>
+#include <linux/rwsem.h>
#include <linux/capability.h>
#include <linux/semaphore.h>
#include <linux/fiemap.h>
@@ -192,8 +193,6 @@ typedef void (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
#define READ 0
#define WRITE RW_MASK
#define READA RWA_MASK
-#define KERNEL_READ (READ|REQ_KERNEL)
-#define KERNEL_WRITE (WRITE|REQ_KERNEL)
#define READ_SYNC (READ | REQ_SYNC)
#define WRITE_SYNC (WRITE | REQ_SYNC | REQ_NOIDLE)
@@ -225,6 +224,13 @@ typedef void (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
#define ATTR_TIMES_SET (1 << 16)
/*
+ * Whiteout is represented by a char device. The following constants define the
+ * mode and device number to use.
+ */
+#define WHITEOUT_MODE 0
+#define WHITEOUT_DEV 0
+
+/*
* This is the Inode Attributes structure, used for notify_change(). It
* uses the above definitions as flags, to know which values have changed.
* Also, in this manner, a Filesystem can look at only the values it cares
@@ -256,6 +262,12 @@ struct iattr {
*/
#include <linux/quota.h>
+/*
+ * Maximum number of layers of fs stack. Needs to be limited to
+ * prevent kernel stack overflow
+ */
+#define FILESYSTEM_MAX_STACK_DEPTH 2
+
/**
* enum positive_aop_returns - aop return codes with specific semantics
*
@@ -390,7 +402,7 @@ struct address_space {
atomic_t i_mmap_writable;/* count VM_SHARED mappings */
struct rb_root i_mmap; /* tree of private and shared mappings */
struct list_head i_mmap_nonlinear;/*list VM_NONLINEAR mappings */
- struct mutex i_mmap_mutex; /* protect tree, count, list */
+ struct rw_semaphore i_mmap_rwsem; /* protect tree, count, list */
/* Protected by tree_lock together with the radix tree */
unsigned long nrpages; /* number of total pages */
unsigned long nrshadows; /* number of shadow entries */
@@ -456,6 +468,26 @@ struct block_device {
int mapping_tagged(struct address_space *mapping, int tag);
+static inline void i_mmap_lock_write(struct address_space *mapping)
+{
+ down_write(&mapping->i_mmap_rwsem);
+}
+
+static inline void i_mmap_unlock_write(struct address_space *mapping)
+{
+ up_write(&mapping->i_mmap_rwsem);
+}
+
+static inline void i_mmap_lock_read(struct address_space *mapping)
+{
+ down_read(&mapping->i_mmap_rwsem);
+}
+
+static inline void i_mmap_unlock_read(struct address_space *mapping)
+{
+ up_read(&mapping->i_mmap_rwsem);
+}
+
/*
* Might pages of this file be mapped into userspace?
*/
@@ -595,9 +627,6 @@ struct inode {
const struct file_operations *i_fop; /* former ->i_op->default_file_ops */
struct file_lock *i_flock;
struct address_space i_data;
-#ifdef CONFIG_QUOTA
- struct dquot *i_dquot[MAXQUOTAS];
-#endif
struct list_head i_devices;
union {
struct pipe_inode_info *i_pipe;
@@ -628,11 +657,13 @@ static inline int inode_unhashed(struct inode *inode)
* 2: child/target
* 3: xattr
* 4: second non-directory
- * The last is for certain operations (such as rename) which lock two
+ * 5: second parent (when locking independent directories in rename)
+ *
+ * I_MUTEX_NONDIR2 is for certain operations (such as rename) which lock two
* non-directories at once.
*
* The locking order between these classes is
- * parent -> child -> normal -> xattr -> second non-directory
+ * parent[2] -> child -> grandchild -> normal -> xattr -> second non-directory
*/
enum inode_i_mutex_lock_class
{
@@ -640,7 +671,8 @@ enum inode_i_mutex_lock_class
I_MUTEX_PARENT,
I_MUTEX_CHILD,
I_MUTEX_XATTR,
- I_MUTEX_NONDIR2
+ I_MUTEX_NONDIR2,
+ I_MUTEX_PARENT2,
};
void lock_two_nondirectories(struct inode *, struct inode*);
@@ -775,7 +807,6 @@ struct file {
struct rcu_head fu_rcuhead;
} f_u;
struct path f_path;
-#define f_dentry f_path.dentry
struct inode *f_inode; /* cached value */
const struct file_operations *f_op;
@@ -851,13 +882,7 @@ static inline struct file *get_file(struct file *f)
*/
#define FILE_LOCK_DEFERRED 1
-/*
- * The POSIX file lock owner is determined by
- * the "struct files_struct" in the thread group
- * (or NULL for no owner - BSD locks).
- *
- * Lockd stuffs a "host" pointer into this.
- */
+/* legacy typedef, should eventually be removed */
typedef void *fl_owner_t;
struct file_lock_operations {
@@ -868,10 +893,13 @@ struct file_lock_operations {
struct lock_manager_operations {
int (*lm_compare_owner)(struct file_lock *, struct file_lock *);
unsigned long (*lm_owner_key)(struct file_lock *);
+ void (*lm_get_owner)(struct file_lock *, struct file_lock *);
+ void (*lm_put_owner)(struct file_lock *);
void (*lm_notify)(struct file_lock *); /* unblock callback */
- int (*lm_grant)(struct file_lock *, struct file_lock *, int);
- void (*lm_break)(struct file_lock *);
- int (*lm_change)(struct file_lock **, int);
+ int (*lm_grant)(struct file_lock *, int);
+ bool (*lm_break)(struct file_lock *);
+ int (*lm_change)(struct file_lock **, int, struct list_head *);
+ void (*lm_setup)(struct file_lock *, void **);
};
struct lock_manager {
@@ -966,7 +994,7 @@ void locks_free_lock(struct file_lock *fl);
extern void locks_init_lock(struct file_lock *);
extern struct file_lock * locks_alloc_lock(void);
extern void locks_copy_lock(struct file_lock *, struct file_lock *);
-extern void __locks_copy_lock(struct file_lock *, const struct file_lock *);
+extern void locks_copy_conflock(struct file_lock *, struct file_lock *);
extern void locks_remove_posix(struct file *, fl_owner_t);
extern void locks_remove_file(struct file *);
extern void locks_release_private(struct file_lock *);
@@ -980,11 +1008,9 @@ extern int vfs_cancel_lock(struct file *filp, struct file_lock *fl);
extern int flock_lock_file_wait(struct file *filp, struct file_lock *fl);
extern int __break_lease(struct inode *inode, unsigned int flags, unsigned int type);
extern void lease_get_mtime(struct inode *, struct timespec *time);
-extern int generic_setlease(struct file *, long, struct file_lock **);
-extern int vfs_setlease(struct file *, long, struct file_lock **);
-extern int lease_modify(struct file_lock **, int);
-extern int lock_may_read(struct inode *, loff_t start, unsigned long count);
-extern int lock_may_write(struct inode *, loff_t start, unsigned long count);
+extern int generic_setlease(struct file *, long, struct file_lock **, void **priv);
+extern int vfs_setlease(struct file *, long, struct file_lock **, void **);
+extern int lease_modify(struct file_lock **, int, struct list_head *);
#else /* !CONFIG_FILE_LOCKING */
static inline int fcntl_getlk(struct file *file, unsigned int cmd,
struct flock __user *user)
@@ -1013,12 +1039,12 @@ static inline int fcntl_setlk64(unsigned int fd, struct file *file,
#endif
static inline int fcntl_setlease(unsigned int fd, struct file *filp, long arg)
{
- return 0;
+ return -EINVAL;
}
static inline int fcntl_getlease(struct file *filp)
{
- return 0;
+ return F_UNLCK;
}
static inline void locks_init_lock(struct file_lock *fl)
@@ -1026,7 +1052,7 @@ static inline void locks_init_lock(struct file_lock *fl)
return;
}
-static inline void __locks_copy_lock(struct file_lock *new, struct file_lock *fl)
+static inline void locks_copy_conflock(struct file_lock *new, struct file_lock *fl)
{
return;
}
@@ -1100,33 +1126,22 @@ static inline void lease_get_mtime(struct inode *inode, struct timespec *time)
}
static inline int generic_setlease(struct file *filp, long arg,
- struct file_lock **flp)
+ struct file_lock **flp, void **priv)
{
return -EINVAL;
}
static inline int vfs_setlease(struct file *filp, long arg,
- struct file_lock **lease)
+ struct file_lock **lease, void **priv)
{
return -EINVAL;
}
-static inline int lease_modify(struct file_lock **before, int arg)
+static inline int lease_modify(struct file_lock **before, int arg,
+ struct list_head *dispose)
{
return -EINVAL;
}
-
-static inline int lock_may_read(struct inode *inode, loff_t start,
- unsigned long len)
-{
- return 1;
-}
-
-static inline int lock_may_write(struct inode *inode, loff_t start,
- unsigned long len)
-{
- return 1;
-}
#endif /* !CONFIG_FILE_LOCKING */
@@ -1151,8 +1166,8 @@ extern void fasync_free(struct fasync_struct *);
/* can be called from interrupts */
extern void kill_fasync(struct fasync_struct **, int, int);
-extern int __f_setown(struct file *filp, struct pid *, enum pid_type, int force);
-extern int f_setown(struct file *filp, unsigned long arg, int force);
+extern void __f_setown(struct file *filp, struct pid *, enum pid_type, int force);
+extern void f_setown(struct file *filp, unsigned long arg, int force);
extern void f_delown(struct file *filp);
extern pid_t f_getown(struct file *filp);
extern int send_sigurg(struct fown_struct *fown);
@@ -1226,6 +1241,7 @@ struct super_block {
struct backing_dev_info *s_bdi;
struct mtd_info *s_mtd;
struct hlist_node s_instances;
+ unsigned int s_quota_types; /* Bitmask of supported quota types */
struct quota_info s_dquot; /* Diskquota specific options */
struct sb_writers s_writers;
@@ -1284,6 +1300,11 @@ struct super_block {
struct list_lru s_dentry_lru ____cacheline_aligned_in_smp;
struct list_lru s_inode_lru ____cacheline_aligned_in_smp;
struct rcu_head rcu;
+
+ /*
+ * Indicates how deep in a filesystem stack this SB is
+ */
+ int s_stack_depth;
};
extern struct timespec current_fs_time(struct super_block *sb);
@@ -1416,6 +1437,7 @@ extern int vfs_link(struct dentry *, struct inode *, struct dentry *, struct ino
extern int vfs_rmdir(struct inode *, struct dentry *);
extern int vfs_unlink(struct inode *, struct dentry *, struct inode **);
extern int vfs_rename(struct inode *, struct dentry *, struct inode *, struct dentry *, struct inode **, unsigned int);
+extern int vfs_whiteout(struct inode *, struct dentry *);
/*
* VFS dentry helper functions.
@@ -1463,7 +1485,10 @@ int fiemap_check_flags(struct fiemap_extent_info *fieinfo, u32 fs_flags);
* This allows the kernel to read directories into kernel space or
* to have different dirent layouts depending on the binary type.
*/
-typedef int (*filldir_t)(void *, const char *, int, loff_t, u64, unsigned);
+struct dir_context;
+typedef int (*filldir_t)(struct dir_context *, const char *, int, loff_t, u64,
+ unsigned);
+
struct dir_context {
const filldir_t actor;
loff_t pos;
@@ -1493,6 +1518,7 @@ struct file_operations {
long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long);
long (*compat_ioctl) (struct file *, unsigned int, unsigned long);
int (*mmap) (struct file *, struct vm_area_struct *);
+ void (*mremap)(struct file *, struct vm_area_struct *);
int (*open) (struct inode *, struct file *);
int (*flush) (struct file *, fl_owner_t id);
int (*release) (struct inode *, struct file *);
@@ -1506,10 +1532,10 @@ struct file_operations {
int (*flock) (struct file *, int, struct file_lock *);
ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int);
ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int);
- int (*setlease)(struct file *, long, struct file_lock **);
+ int (*setlease)(struct file *, long, struct file_lock **, void **);
long (*fallocate)(struct file *file, int mode, loff_t offset,
loff_t len);
- int (*show_fdinfo)(struct seq_file *m, struct file *f);
+ void (*show_fdinfo)(struct seq_file *m, struct file *f);
};
struct inode_operations {
@@ -1546,6 +1572,9 @@ struct inode_operations {
umode_t create_mode, int *opened);
int (*tmpfile) (struct inode *, struct dentry *, umode_t);
int (*set_acl)(struct inode *, struct posix_acl *, int);
+
+ /* WARNING: probably going away soon, do not use! */
+ int (*dentry_open)(struct dentry *, struct file *, const struct cred *);
} ____cacheline_aligned;
ssize_t rw_copy_check_uvector(int type, const struct iovec __user * uvector,
@@ -1553,6 +1582,7 @@ ssize_t rw_copy_check_uvector(int type, const struct iovec __user * uvector,
struct iovec *fast_pointer,
struct iovec **ret_pointer);
+extern ssize_t __vfs_read(struct file *, char __user *, size_t, loff_t *);
extern ssize_t vfs_read(struct file *, char __user *, size_t, loff_t *);
extern ssize_t vfs_write(struct file *, const char __user *, size_t, loff_t *);
extern ssize_t vfs_readv(struct file *, const struct iovec __user *,
@@ -1570,7 +1600,9 @@ struct super_operations {
void (*evict_inode) (struct inode *);
void (*put_super) (struct super_block *);
int (*sync_fs)(struct super_block *sb, int wait);
+ int (*freeze_super) (struct super_block *);
int (*freeze_fs) (struct super_block *);
+ int (*thaw_super) (struct super_block *);
int (*unfreeze_fs) (struct super_block *);
int (*statfs) (struct dentry *, struct kstatfs *);
int (*remount_fs) (struct super_block *, int *, char *);
@@ -1583,6 +1615,7 @@ struct super_operations {
#ifdef CONFIG_QUOTA
ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t);
ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t);
+ struct dquot **(*get_dquots)(struct inode *);
#endif
int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
long (*nr_cached_objects)(struct super_block *, int);
@@ -1643,6 +1676,9 @@ struct super_operations {
#define IS_AUTOMOUNT(inode) ((inode)->i_flags & S_AUTOMOUNT)
#define IS_NOSEC(inode) ((inode)->i_flags & S_NOSEC)
+#define IS_WHITEOUT(inode) (S_ISCHR(inode->i_mode) && \
+ (inode)->i_rdev == WHITEOUT_DEV)
+
/*
* Inode state bits. Protected by inode->i_lock
*
@@ -1855,7 +1891,8 @@ extern struct vfsmount *kern_mount_data(struct file_system_type *, void *data);
extern void kern_unmount(struct vfsmount *mnt);
extern int may_umount_tree(struct vfsmount *);
extern int may_umount(struct vfsmount *);
-extern long do_mount(const char *, const char *, const char *, unsigned long, void *);
+extern long do_mount(const char *, const char __user *,
+ const char *, unsigned long, void *);
extern struct vfsmount *collect_mounts(struct path *);
extern void drop_collected_mounts(struct vfsmount *);
extern int iterate_mounts(int (*)(struct vfsmount *, void *), void *,
@@ -1874,7 +1911,7 @@ extern int current_umask(void);
extern void ihold(struct inode * inode);
extern void iput(struct inode *);
-static inline struct inode *file_inode(struct file *f)
+static inline struct inode *file_inode(const struct file *f)
{
return f->f_inode;
}
@@ -2049,7 +2086,7 @@ struct filename {
extern long vfs_truncate(struct path *, loff_t);
extern int do_truncate(struct dentry *, loff_t start, unsigned int time_attrs,
struct file *filp);
-extern int do_fallocate(struct file *file, int mode, loff_t offset,
+extern int vfs_fallocate(struct file *file, int mode, loff_t offset,
loff_t len);
extern long do_sys_open(int dfd, const char __user *filename, int flags,
umode_t mode);
@@ -2057,9 +2094,11 @@ extern struct file *file_open_name(struct filename *, int, umode_t);
extern struct file *filp_open(const char *, int, umode_t);
extern struct file *file_open_root(struct dentry *, struct vfsmount *,
const char *, int);
+extern int vfs_open(const struct path *, struct file *, const struct cred *);
extern struct file * dentry_open(const struct path *, int, const struct cred *);
extern int filp_close(struct file *, fl_owner_t id);
+extern struct filename *getname_flags(const char __user *, int, int *);
extern struct filename *getname(const char __user *);
extern struct filename *getname_kernel(const char *);
@@ -2137,7 +2176,6 @@ static inline int sb_is_blkdev_sb(struct super_block *sb)
extern int sync_filesystem(struct super_block *);
extern const struct file_operations def_blk_fops;
extern const struct file_operations def_chr_fops;
-extern const struct file_operations bad_sock_fops;
#ifdef CONFIG_BLOCK
extern int ioctl_by_bdev(struct block_device *, unsigned, unsigned long);
extern int blkdev_ioctl(struct block_device *, fmode_t, unsigned, unsigned long);
@@ -2270,7 +2308,9 @@ extern sector_t bmap(struct inode *, sector_t);
#endif
extern int notify_change(struct dentry *, struct iattr *, struct inode **);
extern int inode_permission(struct inode *, int);
+extern int __inode_permission(struct inode *, int);
extern int generic_permission(struct inode *, int);
+extern int __check_sticky(struct inode *dir, struct inode *inode);
static inline bool execute_ok(struct inode *inode)
{
@@ -2455,6 +2495,7 @@ extern ssize_t new_sync_read(struct file *filp, char __user *buf, size_t len, lo
extern ssize_t new_sync_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos);
/* fs/block_dev.c */
+extern ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to);
extern ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from);
extern int blkdev_fsync(struct file *filp, loff_t start, loff_t end,
int datasync);
@@ -2469,6 +2510,9 @@ extern ssize_t iter_file_splice_write(struct pipe_inode_info *,
struct file *, loff_t *, size_t, unsigned int);
extern ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe,
struct file *out, loff_t *, size_t len, unsigned int flags);
+extern long do_splice_direct(struct file *in, loff_t *ppos, struct file *out,
+ loff_t *opos, size_t len, unsigned int flags);
+
extern void
file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping);
@@ -2611,6 +2655,7 @@ extern int simple_write_end(struct file *file, struct address_space *mapping,
struct page *page, void *fsdata);
extern int always_delete_dentry(const struct dentry *);
extern struct inode *alloc_anon_inode(struct super_block *);
+extern int simple_nosetlease(struct file *, long, struct file_lock **, void **);
extern const struct dentry_operations simple_dentry_operations;
extern struct dentry *simple_lookup(struct inode *, struct dentry *, unsigned int flags);
@@ -2753,12 +2798,25 @@ static inline int is_sxid(umode_t mode)
return (mode & S_ISUID) || ((mode & S_ISGID) && (mode & S_IXGRP));
}
+static inline int check_sticky(struct inode *dir, struct inode *inode)
+{
+ if (!(dir->i_mode & S_ISVTX))
+ return 0;
+
+ return __check_sticky(dir, inode);
+}
+
static inline void inode_has_no_xattr(struct inode *inode)
{
if (!is_sxid(inode->i_mode) && (inode->i_sb->s_flags & MS_NOSEC))
inode->i_flags |= S_NOSEC;
}
+static inline bool is_root_inode(struct inode *inode)
+{
+ return inode == inode->i_sb->s_root->d_inode;
+}
+
static inline bool dir_emit(struct dir_context *ctx,
const char *name, int namelen,
u64 ino, unsigned type)
diff --git a/include/linux/fs_enet_pd.h b/include/linux/fs_enet_pd.h
index efb05961bdd8..77d783f71527 100644
--- a/include/linux/fs_enet_pd.h
+++ b/include/linux/fs_enet_pd.h
@@ -139,7 +139,6 @@ struct fs_platform_info {
int rx_ring, tx_ring; /* number of buffers on rx */
__u8 macaddr[ETH_ALEN]; /* mac address */
int rx_copybreak; /* limit we copy small frames */
- int use_napi; /* use NAPI */
int napi_weight; /* NAPI weight */
int use_rmii; /* use RMII mode */
diff --git a/include/linux/fsl_ifc.h b/include/linux/fsl_ifc.h
index f49ddb1b2273..bf0321eabbda 100644
--- a/include/linux/fsl_ifc.h
+++ b/include/linux/fsl_ifc.h
@@ -29,7 +29,16 @@
#include <linux/of_platform.h>
#include <linux/interrupt.h>
-#define FSL_IFC_BANK_COUNT 4
+/*
+ * The actual number of banks implemented depends on the IFC version
+ * - IFC version 1.0 implements 4 banks.
+ * - IFC version 1.1 onward implements 8 banks.
+ */
+#define FSL_IFC_BANK_COUNT 8
+
+#define FSL_IFC_VERSION_MASK 0x0F0F0000
+#define FSL_IFC_VERSION_1_0_0 0x01000000
+#define FSL_IFC_VERSION_1_1_0 0x01010000
/*
* CSPR - Chip Select Property Register
@@ -776,23 +785,23 @@ struct fsl_ifc_regs {
__be32 cspr;
u32 res2;
} cspr_cs[FSL_IFC_BANK_COUNT];
- u32 res3[0x19];
+ u32 res3[0xd];
struct {
__be32 amask;
u32 res4[0x2];
} amask_cs[FSL_IFC_BANK_COUNT];
- u32 res5[0x17];
+ u32 res5[0xc];
struct {
- __be32 csor_ext;
__be32 csor;
+ __be32 csor_ext;
u32 res6;
} csor_cs[FSL_IFC_BANK_COUNT];
- u32 res7[0x19];
+ u32 res7[0xc];
struct {
__be32 ftim[4];
u32 res8[0x8];
} ftim_cs[FSL_IFC_BANK_COUNT];
- u32 res9[0x60];
+ u32 res9[0x30];
__be32 rb_stat;
u32 res10[0x2];
__be32 ifc_gcr;
@@ -827,6 +836,8 @@ struct fsl_ifc_ctrl {
int nand_irq;
spinlock_t lock;
void *nand;
+ int version;
+ int banks;
u32 nand_stat;
wait_queue_head_t nand_wait;
diff --git a/include/linux/fsldma.h b/include/linux/fsldma.h
new file mode 100644
index 000000000000..b213c02963c9
--- /dev/null
+++ b/include/linux/fsldma.h
@@ -0,0 +1,13 @@
+/*
+ * This is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef FSL_DMA_H
+#define FSL_DMA_H
+/* fsl dma API for enxternal start */
+int fsl_dma_external_start(struct dma_chan *dchan, int enable);
+
+#endif
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
index ca060d7c4fa6..0f313f93c586 100644
--- a/include/linux/fsnotify_backend.h
+++ b/include/linux/fsnotify_backend.h
@@ -197,24 +197,6 @@ struct fsnotify_group {
#define FSNOTIFY_EVENT_INODE 2
/*
- * Inode specific fields in an fsnotify_mark
- */
-struct fsnotify_inode_mark {
- struct inode *inode; /* inode this mark is associated with */
- struct hlist_node i_list; /* list of marks by inode->i_fsnotify_marks */
- struct list_head free_i_list; /* tmp list used when freeing this mark */
-};
-
-/*
- * Mount point specific fields in an fsnotify_mark
- */
-struct fsnotify_vfsmount_mark {
- struct vfsmount *mnt; /* vfsmount this mark is associated with */
- struct hlist_node m_list; /* list of marks by inode->i_fsnotify_marks */
- struct list_head free_m_list; /* tmp list used when freeing this mark */
-};
-
-/*
* a mark is simply an object attached to an in core inode which allows an
* fsnotify listener to indicate they are either no longer interested in events
* of a type matching mask or only interested in those events.
@@ -230,11 +212,17 @@ struct fsnotify_mark {
* in kernel that found and may be using this mark. */
atomic_t refcnt; /* active things looking at this mark */
struct fsnotify_group *group; /* group this mark is for */
- struct list_head g_list; /* list of marks by group->i_fsnotify_marks */
+ struct list_head g_list; /* list of marks by group->i_fsnotify_marks
+ * Also reused for queueing mark into
+ * destroy_list when it's waiting for
+ * the end of SRCU period before it can
+ * be freed */
spinlock_t lock; /* protect group and inode */
+ struct hlist_node obj_list; /* list of marks for inode / vfsmount */
+ struct list_head free_list; /* tmp list used when freeing this mark */
union {
- struct fsnotify_inode_mark i;
- struct fsnotify_vfsmount_mark m;
+ struct inode *inode; /* inode this mark is associated with */
+ struct vfsmount *mnt; /* vfsmount this mark is associated with */
};
__u32 ignored_mask; /* events types to ignore */
#define FSNOTIFY_MARK_FLAG_INODE 0x01
@@ -243,7 +231,6 @@ struct fsnotify_mark {
#define FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY 0x08
#define FSNOTIFY_MARK_FLAG_ALIVE 0x10
unsigned int flags; /* vfsmount or inode mark? */
- struct list_head destroy_list;
void (*free_mark)(struct fsnotify_mark *mark); /* called on final put+free */
};
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index f0b0edbf55a9..1da602982cf9 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -39,6 +39,12 @@
# define FTRACE_FORCE_LIST_FUNC 0
#endif
+/* Main tracing buffer and events set up */
+#ifdef CONFIG_TRACING
+void trace_init(void);
+#else
+static inline void trace_init(void) { }
+#endif
struct module;
struct ftrace_hash;
@@ -56,9 +62,16 @@ struct ftrace_ops;
typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct pt_regs *regs);
+ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops);
+
/*
* FTRACE_OPS_FL_* bits denote the state of ftrace_ops struct and are
* set in the flags member.
+ * CONTROL, SAVE_REGS, SAVE_REGS_IF_SUPPORTED, RECURSION_SAFE, STUB and
+ * IPMODIFY are a kind of attribute flags which can be set only before
+ * registering the ftrace_ops, and can not be modified while registered.
+ * Changing those attribute flags after regsitering ftrace_ops will
+ * cause unexpected results.
*
* ENABLED - set/unset when ftrace_ops is registered/unregistered
* DYNAMIC - set when ftrace_ops is registered to denote dynamically
@@ -89,6 +102,20 @@ typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip,
* INITIALIZED - The ftrace_ops has already been initialized (first use time
* register_ftrace_function() is called, it will initialized the ops)
* DELETED - The ops are being deleted, do not let them be registered again.
+ * ADDING - The ops is in the process of being added.
+ * REMOVING - The ops is in the process of being removed.
+ * MODIFYING - The ops is in the process of changing its filter functions.
+ * ALLOC_TRAMP - A dynamic trampoline was allocated by the core code.
+ * The arch specific code sets this flag when it allocated a
+ * trampoline. This lets the arch know that it can update the
+ * trampoline in case the callback function changes.
+ * The ftrace_ops trampoline can be set by the ftrace users, and
+ * in such cases the arch must not modify it. Only the arch ftrace
+ * core code should set this flag.
+ * IPMODIFY - The ops can modify the IP register. This can only be set with
+ * SAVE_REGS. If another ops with this flag set is already registered
+ * for any of the functions that this ops will be registered for, then
+ * this ops will fail to register or set_filter_ip.
*/
enum {
FTRACE_OPS_FL_ENABLED = 1 << 0,
@@ -100,6 +127,11 @@ enum {
FTRACE_OPS_FL_STUB = 1 << 6,
FTRACE_OPS_FL_INITIALIZED = 1 << 7,
FTRACE_OPS_FL_DELETED = 1 << 8,
+ FTRACE_OPS_FL_ADDING = 1 << 9,
+ FTRACE_OPS_FL_REMOVING = 1 << 10,
+ FTRACE_OPS_FL_MODIFYING = 1 << 11,
+ FTRACE_OPS_FL_ALLOC_TRAMP = 1 << 12,
+ FTRACE_OPS_FL_IPMODIFY = 1 << 13,
};
#ifdef CONFIG_DYNAMIC_FTRACE
@@ -132,8 +164,9 @@ struct ftrace_ops {
int nr_trampolines;
struct ftrace_ops_hash local_hash;
struct ftrace_ops_hash *func_hash;
- struct ftrace_hash *tramp_hash;
+ struct ftrace_ops_hash old_hash;
unsigned long trampoline;
+ unsigned long trampoline_size;
#endif
};
@@ -247,7 +280,9 @@ struct ftrace_func_command {
int ftrace_arch_code_modify_prepare(void);
int ftrace_arch_code_modify_post_process(void);
-void ftrace_bug(int err, unsigned long ip);
+struct dyn_ftrace;
+
+void ftrace_bug(int err, struct dyn_ftrace *rec);
struct seq_file;
@@ -279,6 +314,8 @@ extern int ftrace_text_reserved(const void *start, const void *end);
extern int ftrace_nr_registered_ops(void);
+bool is_ftrace_trampoline(unsigned long addr);
+
/*
* The dyn_ftrace record's flags field is split into two parts.
* the first part which is '0-FTRACE_REF_MAX' is a counter of
@@ -289,6 +326,7 @@ extern int ftrace_nr_registered_ops(void);
* ENABLED - the function is being traced
* REGS - the record wants the function to save regs
* REGS_EN - the function is set up to save regs.
+ * IPMODIFY - the record allows for the IP address to be changed.
*
* When a new ftrace_ops is registered and wants a function to save
* pt_regs, the rec->flag REGS is set. When the function has been
@@ -302,10 +340,11 @@ enum {
FTRACE_FL_REGS_EN = (1UL << 29),
FTRACE_FL_TRAMP = (1UL << 28),
FTRACE_FL_TRAMP_EN = (1UL << 27),
+ FTRACE_FL_IPMODIFY = (1UL << 26),
};
-#define FTRACE_REF_MAX_SHIFT 27
-#define FTRACE_FL_BITS 5
+#define FTRACE_REF_MAX_SHIFT 26
+#define FTRACE_FL_BITS 6
#define FTRACE_FL_MASKED_BITS ((1UL << FTRACE_FL_BITS) - 1)
#define FTRACE_FL_MASK (FTRACE_FL_MASKED_BITS << FTRACE_REF_MAX_SHIFT)
#define FTRACE_REF_MAX ((1UL << FTRACE_REF_MAX_SHIFT) - 1)
@@ -578,6 +617,11 @@ static inline ssize_t ftrace_notrace_write(struct file *file, const char __user
size_t cnt, loff_t *ppos) { return -ENODEV; }
static inline int
ftrace_regex_release(struct inode *inode, struct file *file) { return -ENODEV; }
+
+static inline bool is_ftrace_trampoline(unsigned long addr)
+{
+ return false;
+}
#endif /* CONFIG_DYNAMIC_FTRACE */
/* totally disable ftrace - can not re-enable after this */
@@ -835,6 +879,7 @@ static inline int test_tsk_trace_graph(struct task_struct *tsk)
enum ftrace_dump_mode;
extern enum ftrace_dump_mode ftrace_dump_on_oops;
+extern int tracepoint_printk;
extern void disable_trace_on_warning(void);
extern int __disable_trace_on_warning;
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index 28672e87e910..0bebb5c348b8 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
@@ -138,6 +138,17 @@ enum print_line_t {
TRACE_TYPE_NO_CONSUME = 3 /* Handled but ask to not consume */
};
+/*
+ * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq
+ * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function
+ * simplifies those functions and keeps them in sync.
+ */
+static inline enum print_line_t trace_handle_return(struct trace_seq *s)
+{
+ return trace_seq_has_overflowed(s) ?
+ TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED;
+}
+
void tracing_generic_entry_update(struct trace_entry *entry,
unsigned long flags,
int pc);
diff --git a/include/linux/genalloc.h b/include/linux/genalloc.h
index 1c2fdaa2ffc3..1ccaab44abcc 100644
--- a/include/linux/genalloc.h
+++ b/include/linux/genalloc.h
@@ -110,6 +110,10 @@ extern void gen_pool_set_algo(struct gen_pool *pool, genpool_algo_t algo,
extern unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size,
unsigned long start, unsigned int nr, void *data);
+extern unsigned long gen_pool_first_fit_order_align(unsigned long *map,
+ unsigned long size, unsigned long start, unsigned int nr,
+ void *data);
+
extern unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size,
unsigned long start, unsigned int nr, void *data);
@@ -117,6 +121,9 @@ extern struct gen_pool *devm_gen_pool_create(struct device *dev,
int min_alloc_order, int nid);
extern struct gen_pool *dev_get_gen_pool(struct device *dev);
+bool addr_in_gen_pool(struct gen_pool *pool, unsigned long start,
+ size_t size);
+
#ifdef CONFIG_OF
extern struct gen_pool *of_get_named_gen_pool(struct device_node *np,
const char *propname, int index);
diff --git a/include/linux/genl_magic_func.h b/include/linux/genl_magic_func.h
index c0894dd8827b..667c31101b8b 100644
--- a/include/linux/genl_magic_func.h
+++ b/include/linux/genl_magic_func.h
@@ -178,12 +178,12 @@ static int s_name ## _from_attrs_for_change(struct s_name *s, \
#define __assign(attr_nr, attr_flag, name, nla_type, type, assignment...) \
nla = ntb[attr_nr]; \
if (nla) { \
- if (exclude_invariants && ((attr_flag) & DRBD_F_INVARIANT)) { \
+ if (exclude_invariants && !!((attr_flag) & DRBD_F_INVARIANT)) { \
pr_info("<< must not change invariant attr: %s\n", #name); \
return -EEXIST; \
} \
assignment; \
- } else if (exclude_invariants && ((attr_flag) & DRBD_F_INVARIANT)) { \
+ } else if (exclude_invariants && !!((attr_flag) & DRBD_F_INVARIANT)) { \
/* attribute missing from payload, */ \
/* which was expected */ \
} else if ((attr_flag) & DRBD_F_REQUIRED) { \
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 5e7219dc0fae..b840e3b2770d 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -110,11 +110,8 @@ struct vm_area_struct;
#define GFP_TEMPORARY (__GFP_WAIT | __GFP_IO | __GFP_FS | \
__GFP_RECLAIMABLE)
#define GFP_USER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL)
-#define GFP_HIGHUSER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL | \
- __GFP_HIGHMEM)
-#define GFP_HIGHUSER_MOVABLE (__GFP_WAIT | __GFP_IO | __GFP_FS | \
- __GFP_HARDWALL | __GFP_HIGHMEM | \
- __GFP_MOVABLE)
+#define GFP_HIGHUSER (GFP_USER | __GFP_HIGHMEM)
+#define GFP_HIGHUSER_MOVABLE (GFP_HIGHUSER | __GFP_MOVABLE)
#define GFP_IOFS (__GFP_IO | __GFP_FS)
#define GFP_TRANSHUGE (GFP_HIGHUSER_MOVABLE | __GFP_COMP | \
__GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN | \
@@ -156,7 +153,7 @@ struct vm_area_struct;
#define GFP_DMA32 __GFP_DMA32
/* Convert GFP flags to their corresponding migrate type */
-static inline int allocflags_to_migratetype(gfp_t gfp_flags)
+static inline int gfpflags_to_migratetype(const gfp_t gfp_flags)
{
WARN_ON((gfp_flags & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK);
@@ -381,8 +378,8 @@ extern void free_kmem_pages(unsigned long addr, unsigned int order);
void page_alloc_init(void);
void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp);
-void drain_all_pages(void);
-void drain_local_pages(void *dummy);
+void drain_all_pages(struct zone *zone);
+void drain_local_pages(struct zone *zone);
/*
* gfp_allowed_mask is set to GFP_BOOT_MASK during early boot to restrict what
diff --git a/include/linux/gpio.h b/include/linux/gpio.h
index 85aa5d0b9357..ab81339a8590 100644
--- a/include/linux/gpio.h
+++ b/include/linux/gpio.h
@@ -216,14 +216,15 @@ static inline int gpio_to_irq(unsigned gpio)
return -EINVAL;
}
-static inline int gpio_lock_as_irq(struct gpio_chip *chip, unsigned int offset)
+static inline int gpiochip_lock_as_irq(struct gpio_chip *chip,
+ unsigned int offset)
{
WARN_ON(1);
return -EINVAL;
}
-static inline void gpio_unlock_as_irq(struct gpio_chip *chip,
- unsigned int offset)
+static inline void gpiochip_unlock_as_irq(struct gpio_chip *chip,
+ unsigned int offset)
{
WARN_ON(1);
}
diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h
index 12f146fa6604..fd85cb120ee0 100644
--- a/include/linux/gpio/consumer.h
+++ b/include/linux/gpio/consumer.h
@@ -66,7 +66,7 @@ __devm_gpiod_get_index_optional(struct device *dev, const char *con_id,
unsigned int index, enum gpiod_flags flags);
void devm_gpiod_put(struct device *dev, struct gpio_desc *desc);
-int gpiod_get_direction(const struct gpio_desc *desc);
+int gpiod_get_direction(struct gpio_desc *desc);
int gpiod_direction_input(struct gpio_desc *desc);
int gpiod_direction_output(struct gpio_desc *desc, int value);
int gpiod_direction_output_raw(struct gpio_desc *desc, int value);
@@ -74,14 +74,24 @@ int gpiod_direction_output_raw(struct gpio_desc *desc, int value);
/* Value get/set from non-sleeping context */
int gpiod_get_value(const struct gpio_desc *desc);
void gpiod_set_value(struct gpio_desc *desc, int value);
+void gpiod_set_array(unsigned int array_size,
+ struct gpio_desc **desc_array, int *value_array);
int gpiod_get_raw_value(const struct gpio_desc *desc);
void gpiod_set_raw_value(struct gpio_desc *desc, int value);
+void gpiod_set_raw_array(unsigned int array_size,
+ struct gpio_desc **desc_array, int *value_array);
/* Value get/set from sleeping context */
int gpiod_get_value_cansleep(const struct gpio_desc *desc);
void gpiod_set_value_cansleep(struct gpio_desc *desc, int value);
+void gpiod_set_array_cansleep(unsigned int array_size,
+ struct gpio_desc **desc_array,
+ int *value_array);
int gpiod_get_raw_value_cansleep(const struct gpio_desc *desc);
void gpiod_set_raw_value_cansleep(struct gpio_desc *desc, int value);
+void gpiod_set_raw_array_cansleep(unsigned int array_size,
+ struct gpio_desc **desc_array,
+ int *value_array);
int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce);
@@ -94,6 +104,13 @@ int gpiod_to_irq(const struct gpio_desc *desc);
struct gpio_desc *gpio_to_desc(unsigned gpio);
int desc_to_gpio(const struct gpio_desc *desc);
+/* Child properties interface */
+struct fwnode_handle;
+
+struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode,
+ const char *propname);
+struct gpio_desc *devm_get_gpiod_from_child(struct device *dev,
+ struct fwnode_handle *child);
#else /* CONFIG_GPIOLIB */
static inline struct gpio_desc *__must_check __gpiod_get(struct device *dev,
@@ -210,6 +227,13 @@ static inline void gpiod_set_value(struct gpio_desc *desc, int value)
/* GPIO can never have been requested */
WARN_ON(1);
}
+static inline void gpiod_set_array(unsigned int array_size,
+ struct gpio_desc **desc_array,
+ int *value_array)
+{
+ /* GPIO can never have been requested */
+ WARN_ON(1);
+}
static inline int gpiod_get_raw_value(const struct gpio_desc *desc)
{
/* GPIO can never have been requested */
@@ -221,6 +245,13 @@ static inline void gpiod_set_raw_value(struct gpio_desc *desc, int value)
/* GPIO can never have been requested */
WARN_ON(1);
}
+static inline void gpiod_set_raw_array(unsigned int array_size,
+ struct gpio_desc **desc_array,
+ int *value_array)
+{
+ /* GPIO can never have been requested */
+ WARN_ON(1);
+}
static inline int gpiod_get_value_cansleep(const struct gpio_desc *desc)
{
@@ -233,6 +264,13 @@ static inline void gpiod_set_value_cansleep(struct gpio_desc *desc, int value)
/* GPIO can never have been requested */
WARN_ON(1);
}
+static inline void gpiod_set_array_cansleep(unsigned int array_size,
+ struct gpio_desc **desc_array,
+ int *value_array)
+{
+ /* GPIO can never have been requested */
+ WARN_ON(1);
+}
static inline int gpiod_get_raw_value_cansleep(const struct gpio_desc *desc)
{
/* GPIO can never have been requested */
@@ -245,6 +283,13 @@ static inline void gpiod_set_raw_value_cansleep(struct gpio_desc *desc,
/* GPIO can never have been requested */
WARN_ON(1);
}
+static inline void gpiod_set_raw_array_cansleep(unsigned int array_size,
+ struct gpio_desc **desc_array,
+ int *value_array)
+{
+ /* GPIO can never have been requested */
+ WARN_ON(1);
+}
static inline int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce)
{
diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h
index e78a2373e374..c497c62889d1 100644
--- a/include/linux/gpio/driver.h
+++ b/include/linux/gpio/driver.h
@@ -32,6 +32,7 @@ struct seq_file;
* @get: returns value for signal "offset"; for output signals this
* returns either the value actually sensed, or zero
* @set: assigns output value for signal "offset"
+ * @set_multiple: assigns output values for multiple signals defined by "mask"
* @set_debounce: optional hook for setting debounce time for specified gpio in
* interrupt triggered gpio chips
* @to_irq: optional hook supporting non-static gpio_to_irq() mappings;
@@ -56,6 +57,8 @@ struct seq_file;
* as the chip access may sleep when e.g. reading out the IRQ status
* registers.
* @exported: flags if the gpiochip is exported for use from sysfs. Private.
+ * @irq_not_threaded: flag must be set if @can_sleep is set but the
+ * IRQs don't need to be threaded
*
* A gpio_chip can help platforms abstract various sources of GPIOs so
* they can all be accessed through a common programing interface.
@@ -87,6 +90,9 @@ struct gpio_chip {
unsigned offset);
void (*set)(struct gpio_chip *chip,
unsigned offset, int value);
+ void (*set_multiple)(struct gpio_chip *chip,
+ unsigned long *mask,
+ unsigned long *bits);
int (*set_debounce)(struct gpio_chip *chip,
unsigned offset,
unsigned debounce);
@@ -101,11 +107,12 @@ struct gpio_chip {
struct gpio_desc *desc;
const char *const *names;
bool can_sleep;
+ bool irq_not_threaded;
bool exported;
#ifdef CONFIG_GPIOLIB_IRQCHIP
/*
- * With CONFIG_GPIO_IRQCHIP we get an irqchip inside the gpiolib
+ * With CONFIG_GPIOLIB_IRQCHIP we get an irqchip inside the gpiolib
* to handle IRQs for most practical cases.
*/
struct irq_chip *irqchip;
@@ -141,13 +148,13 @@ extern const char *gpiochip_is_requested(struct gpio_chip *chip,
/* add/remove chips */
extern int gpiochip_add(struct gpio_chip *chip);
-extern int gpiochip_remove(struct gpio_chip *chip);
+extern void gpiochip_remove(struct gpio_chip *chip);
extern struct gpio_chip *gpiochip_find(void *data,
int (*match)(struct gpio_chip *chip, void *data));
/* lock/unlock as IRQ */
-int gpio_lock_as_irq(struct gpio_chip *chip, unsigned int offset);
-void gpio_unlock_as_irq(struct gpio_chip *chip, unsigned int offset);
+int gpiochip_lock_as_irq(struct gpio_chip *chip, unsigned int offset);
+void gpiochip_unlock_as_irq(struct gpio_chip *chip, unsigned int offset);
struct gpio_chip *gpiod_to_chip(const struct gpio_desc *desc);
@@ -164,9 +171,10 @@ int gpiochip_irqchip_add(struct gpio_chip *gpiochip,
irq_flow_handler_t handler,
unsigned int type);
-#endif /* CONFIG_GPIO_IRQCHIP */
+#endif /* CONFIG_GPIOLIB_IRQCHIP */
-int gpiochip_request_own_desc(struct gpio_desc *desc, const char *label);
+struct gpio_desc *gpiochip_request_own_desc(struct gpio_chip *chip, u16 hwnum,
+ const char *label);
void gpiochip_free_own_desc(struct gpio_desc *desc);
#else /* CONFIG_GPIOLIB */
diff --git a/include/linux/gpio_keys.h b/include/linux/gpio_keys.h
index 8b622468952c..ee2d8c6f9130 100644
--- a/include/linux/gpio_keys.h
+++ b/include/linux/gpio_keys.h
@@ -2,6 +2,7 @@
#define _GPIO_KEYS_H
struct device;
+struct gpio_desc;
/**
* struct gpio_keys_button - configuration parameters
@@ -17,6 +18,7 @@ struct device;
* disable button via sysfs
* @value: axis value for %EV_ABS
* @irq: Irq number in case of interrupt keys
+ * @gpiod: GPIO descriptor
*/
struct gpio_keys_button {
unsigned int code;
@@ -29,6 +31,7 @@ struct gpio_keys_button {
bool can_disable;
int value;
unsigned int irq;
+ struct gpio_desc *gpiod;
};
/**
diff --git a/include/linux/hash.h b/include/linux/hash.h
index d0494c399392..1afde47e1528 100644
--- a/include/linux/hash.h
+++ b/include/linux/hash.h
@@ -15,7 +15,6 @@
*/
#include <asm/types.h>
-#include <asm/hash.h>
#include <linux/compiler.h>
/* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */
@@ -84,38 +83,4 @@ static inline u32 hash32_ptr(const void *ptr)
return (u32)val;
}
-struct fast_hash_ops {
- u32 (*hash)(const void *data, u32 len, u32 seed);
- u32 (*hash2)(const u32 *data, u32 len, u32 seed);
-};
-
-/**
- * arch_fast_hash - Caclulates a hash over a given buffer that can have
- * arbitrary size. This function will eventually use an
- * architecture-optimized hashing implementation if
- * available, and trades off distribution for speed.
- *
- * @data: buffer to hash
- * @len: length of buffer in bytes
- * @seed: start seed
- *
- * Returns 32bit hash.
- */
-extern u32 arch_fast_hash(const void *data, u32 len, u32 seed);
-
-/**
- * arch_fast_hash2 - Caclulates a hash over a given buffer that has a
- * size that is of a multiple of 32bit words. This
- * function will eventually use an architecture-
- * optimized hashing implementation if available,
- * and trades off distribution for speed.
- *
- * @data: buffer to hash (must be 32bit padded)
- * @len: number of 32bit words
- * @seed: start seed
- *
- * Returns 32bit hash.
- */
-extern u32 arch_fast_hash2(const u32 *data, u32 len, u32 seed);
-
#endif /* _LINUX_HASH_H */
diff --git a/include/linux/hdmi.h b/include/linux/hdmi.h
index 11c0182a153b..cbb5790a35cd 100644
--- a/include/linux/hdmi.h
+++ b/include/linux/hdmi.h
@@ -1,9 +1,24 @@
/*
* Copyright (C) 2012 Avionic Design GmbH
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
*/
#ifndef __LINUX_HDMI_H_
diff --git a/include/linux/hid.h b/include/linux/hid.h
index f53c4a9cca1d..06c4607744f6 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -234,6 +234,33 @@ struct hid_item {
#define HID_DG_BARRELSWITCH 0x000d0044
#define HID_DG_ERASER 0x000d0045
#define HID_DG_TABLETPICK 0x000d0046
+
+#define HID_CP_CONSUMERCONTROL 0x000c0001
+#define HID_CP_NUMERICKEYPAD 0x000c0002
+#define HID_CP_PROGRAMMABLEBUTTONS 0x000c0003
+#define HID_CP_MICROPHONE 0x000c0004
+#define HID_CP_HEADPHONE 0x000c0005
+#define HID_CP_GRAPHICEQUALIZER 0x000c0006
+#define HID_CP_FUNCTIONBUTTONS 0x000c0036
+#define HID_CP_SELECTION 0x000c0080
+#define HID_CP_MEDIASELECTION 0x000c0087
+#define HID_CP_SELECTDISC 0x000c00ba
+#define HID_CP_PLAYBACKSPEED 0x000c00f1
+#define HID_CP_PROXIMITY 0x000c0109
+#define HID_CP_SPEAKERSYSTEM 0x000c0160
+#define HID_CP_CHANNELLEFT 0x000c0161
+#define HID_CP_CHANNELRIGHT 0x000c0162
+#define HID_CP_CHANNELCENTER 0x000c0163
+#define HID_CP_CHANNELFRONT 0x000c0164
+#define HID_CP_CHANNELCENTERFRONT 0x000c0165
+#define HID_CP_CHANNELSIDE 0x000c0166
+#define HID_CP_CHANNELSURROUND 0x000c0167
+#define HID_CP_CHANNELLOWFREQUENCYENHANCEMENT 0x000c0168
+#define HID_CP_CHANNELTOP 0x000c0169
+#define HID_CP_CHANNELUNKNOWN 0x000c016a
+#define HID_CP_APPLICATIONLAUNCHBUTTONS 0x000c0180
+#define HID_CP_GENERICGUIAPPLICATIONCONTROLS 0x000c0200
+
#define HID_DG_CONFIDENCE 0x000d0047
#define HID_DG_WIDTH 0x000d0048
#define HID_DG_HEIGHT 0x000d0049
@@ -265,6 +292,7 @@ struct hid_item {
#define HID_CONNECT_HIDDEV 0x08
#define HID_CONNECT_HIDDEV_FORCE 0x10
#define HID_CONNECT_FF 0x20
+#define HID_CONNECT_DRIVER 0x40
#define HID_CONNECT_DEFAULT (HID_CONNECT_HIDINPUT|HID_CONNECT_HIDRAW| \
HID_CONNECT_HIDDEV|HID_CONNECT_FF)
@@ -287,6 +315,7 @@ struct hid_item {
#define HID_QUIRK_HIDINPUT_FORCE 0x00000080
#define HID_QUIRK_NO_EMPTY_INPUT 0x00000100
#define HID_QUIRK_NO_INIT_INPUT_REPORTS 0x00000200
+#define HID_QUIRK_ALWAYS_POLL 0x00000400
#define HID_QUIRK_SKIP_OUTPUT_REPORTS 0x00010000
#define HID_QUIRK_SKIP_OUTPUT_REPORT_ID 0x00020000
#define HID_QUIRK_NO_OUTPUT_REPORTS_ON_INTR_EP 0x00040000
@@ -310,11 +339,8 @@ struct hid_item {
* Vendor specific HID device groups
*/
#define HID_GROUP_RMI 0x0100
-
-/*
- * Vendor specific HID device groups
- */
#define HID_GROUP_WACOM 0x0101
+#define HID_GROUP_LOGITECH_DJ_DEVICE 0x0102
/*
* This is the global environment of the parser. This information is
@@ -440,6 +466,7 @@ struct hid_output_fifo {
#define HID_CLAIMED_INPUT 1
#define HID_CLAIMED_HIDDEV 2
#define HID_CLAIMED_HIDRAW 4
+#define HID_CLAIMED_DRIVER 8
#define HID_STAT_ADDED 1
#define HID_STAT_PARSED 2
@@ -1060,6 +1087,17 @@ static inline void hid_hw_wait(struct hid_device *hdev)
hdev->ll_driver->wait(hdev);
}
+/**
+ * hid_report_len - calculate the report length
+ *
+ * @report: the report we want to know the length
+ */
+static inline int hid_report_len(struct hid_report *report)
+{
+ /* equivalent to DIV_ROUND_UP(report->size, 8) + !!(report->id > 0) */
+ return ((report->size - 1) >> 3) + 1 + (report->id > 0);
+}
+
int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, int size,
int interrupt);
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 63579cb8d3dc..ad9051bab267 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -132,7 +132,7 @@ extern int __pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma,
static inline int pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma,
spinlock_t **ptl)
{
- VM_BUG_ON(!rwsem_is_locked(&vma->vm_mm->mmap_sem));
+ VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma);
if (pmd_trans_huge(*pmd))
return __pmd_trans_huge_lock(pmd, vma, ptl);
else
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 6e6d338641fe..431b7fc605c9 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -175,6 +175,52 @@ static inline void __unmap_hugepage_range(struct mmu_gather *tlb,
}
#endif /* !CONFIG_HUGETLB_PAGE */
+/*
+ * hugepages at page global directory. If arch support
+ * hugepages at pgd level, they need to define this.
+ */
+#ifndef pgd_huge
+#define pgd_huge(x) 0
+#endif
+
+#ifndef pgd_write
+static inline int pgd_write(pgd_t pgd)
+{
+ BUG();
+ return 0;
+}
+#endif
+
+#ifndef pud_write
+static inline int pud_write(pud_t pud)
+{
+ BUG();
+ return 0;
+}
+#endif
+
+#ifndef is_hugepd
+/*
+ * Some architectures requires a hugepage directory format that is
+ * required to support multiple hugepage sizes. For example
+ * a4fe3ce76 "powerpc/mm: Allow more flexible layouts for hugepage pagetables"
+ * introduced the same on powerpc. This allows for a more flexible hugepage
+ * pagetable layout.
+ */
+typedef struct { unsigned long pd; } hugepd_t;
+#define is_hugepd(hugepd) (0)
+#define __hugepd(x) ((hugepd_t) { (x) })
+static inline int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
+ unsigned pdshift, unsigned long end,
+ int write, struct page **pages, int *nr)
+{
+ return 0;
+}
+#else
+extern int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
+ unsigned pdshift, unsigned long end,
+ int write, struct page **pages, int *nr);
+#endif
#define HUGETLB_ANON_FILE "anon_hugepage"
@@ -311,7 +357,8 @@ static inline struct hstate *hstate_sizelog(int page_size_log)
{
if (!page_size_log)
return &default_hstate;
- return size_to_hstate(1 << page_size_log);
+
+ return size_to_hstate(1UL << page_size_log);
}
static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
diff --git a/include/linux/hugetlb_cgroup.h b/include/linux/hugetlb_cgroup.h
index 0129f89cf98d..bcc853eccc85 100644
--- a/include/linux/hugetlb_cgroup.h
+++ b/include/linux/hugetlb_cgroup.h
@@ -16,7 +16,6 @@
#define _LINUX_HUGETLB_CGROUP_H
#include <linux/mmdebug.h>
-#include <linux/res_counter.h>
struct hugetlb_cgroup;
/*
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index 08cfaff8a072..476c685ca6f9 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -650,6 +650,8 @@ struct vmbus_channel {
u8 monitor_grp;
u8 monitor_bit;
+ bool rescind; /* got rescind msg */
+
u32 ringbuffer_gpadlhandle;
/* Allocated memory for ring buffer */
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index b556e0ab946f..e3a1721c8354 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -46,6 +46,8 @@ struct i2c_client;
struct i2c_driver;
union i2c_smbus_data;
struct i2c_board_info;
+enum i2c_slave_event;
+typedef int (*i2c_slave_cb_t)(struct i2c_client *, enum i2c_slave_event, u8 *);
struct module;
@@ -209,6 +211,8 @@ struct i2c_driver {
* @irq: indicates the IRQ generated by this device (if any)
* @detected: member of an i2c_driver.clients list or i2c-core's
* userspace_devices list
+ * @slave_cb: Callback when I2C slave mode of an adapter is used. The adapter
+ * calls it to pass on slave events to the slave driver.
*
* An i2c_client identifies a single device (i.e. chip) connected to an
* i2c bus. The behaviour exposed to Linux is defined by the driver
@@ -224,6 +228,7 @@ struct i2c_client {
struct device dev; /* the device structure */
int irq; /* irq issued by device */
struct list_head detected;
+ i2c_slave_cb_t slave_cb; /* callback for slave mode */
};
#define to_i2c_client(d) container_of(d, struct i2c_client, dev)
@@ -246,6 +251,25 @@ static inline void i2c_set_clientdata(struct i2c_client *dev, void *data)
dev_set_drvdata(&dev->dev, data);
}
+/* I2C slave support */
+
+enum i2c_slave_event {
+ I2C_SLAVE_REQ_READ_START,
+ I2C_SLAVE_REQ_READ_END,
+ I2C_SLAVE_REQ_WRITE_START,
+ I2C_SLAVE_REQ_WRITE_END,
+ I2C_SLAVE_STOP,
+};
+
+extern int i2c_slave_register(struct i2c_client *client, i2c_slave_cb_t slave_cb);
+extern int i2c_slave_unregister(struct i2c_client *client);
+
+static inline int i2c_slave_event(struct i2c_client *client,
+ enum i2c_slave_event event, u8 *val)
+{
+ return client->slave_cb(client, event, val);
+}
+
/**
* struct i2c_board_info - template for device creation
* @type: chip type, to initialize i2c_client.name
@@ -352,6 +376,8 @@ i2c_register_board_info(int busnum, struct i2c_board_info const *info,
* into I2C transfers instead.
* @functionality: Return the flags that this algorithm/adapter pair supports
* from the I2C_FUNC_* flags.
+ * @reg_slave: Register given client to I2C slave mode of this adapter
+ * @unreg_slave: Unregister given client from I2C slave mode of this adapter
*
* The following structs are for those who like to implement new bus drivers:
* i2c_algorithm is the interface to a class of hardware solutions which can
@@ -359,7 +385,7 @@ i2c_register_board_info(int busnum, struct i2c_board_info const *info,
* to name two of the most common.
*
* The return codes from the @master_xfer field should indicate the type of
- * error code that occured during the transfer, as documented in the kernel
+ * error code that occurred during the transfer, as documented in the kernel
* Documentation file Documentation/i2c/fault-codes.
*/
struct i2c_algorithm {
@@ -377,6 +403,9 @@ struct i2c_algorithm {
/* To determine what the adapter supports */
u32 (*functionality) (struct i2c_adapter *);
+
+ int (*reg_slave)(struct i2c_client *client);
+ int (*unreg_slave)(struct i2c_client *client);
};
/**
diff --git a/include/linux/i2c/pmbus.h b/include/linux/i2c/pmbus.h
index 69280db02c41..ee3c2aba2a8e 100644
--- a/include/linux/i2c/pmbus.h
+++ b/include/linux/i2c/pmbus.h
@@ -40,6 +40,10 @@
struct pmbus_platform_data {
u32 flags; /* Device specific flags */
+
+ /* regulator support */
+ int num_regulators;
+ struct regulator_init_data *reg_init_data;
};
#endif /* _PMBUS_H_ */
diff --git a/include/linux/i2c/twl.h b/include/linux/i2c/twl.h
index 8cfb50f38529..0bc03f100d04 100644
--- a/include/linux/i2c/twl.h
+++ b/include/linux/i2c/twl.h
@@ -26,7 +26,6 @@
#define __TWL_H_
#include <linux/types.h>
-#include <linux/phy/phy.h>
#include <linux/input/matrix_keypad.h>
/*
@@ -634,7 +633,6 @@ enum twl4030_usb_mode {
struct twl4030_usb_data {
enum twl4030_usb_mode usb_mode;
unsigned long features;
- struct phy_init_data *init_data;
int (*phy_init)(struct device *dev);
int (*phy_exit)(struct device *dev);
diff --git a/include/linux/i82593.h b/include/linux/i82593.h
deleted file mode 100644
index afac5c7a323d..000000000000
--- a/include/linux/i82593.h
+++ /dev/null
@@ -1,229 +0,0 @@
-/*
- * Definitions for Intel 82593 CSMA/CD Core LAN Controller
- * The definitions are taken from the 1992 users manual with Intel
- * order number 297125-001.
- *
- * /usr/src/pc/RCS/i82593.h,v 1.1 1996/07/17 15:23:12 root Exp
- *
- * Copyright 1994, Anders Klemets <klemets@it.kth.se>
- *
- * HISTORY
- * i82593.h,v
- * Revision 1.4 2005/11/4 09:15:00 baroniunas
- * Modified copyright with permission of author as follows:
- *
- * "If I82539.H is the only file with my copyright statement
- * that is included in the Source Forge project, then you have
- * my approval to change the copyright statement to be a GPL
- * license, in the way you proposed on October 10."
- *
- * Revision 1.1 1996/07/17 15:23:12 root
- * Initial revision
- *
- * Revision 1.3 1995/04/05 15:13:58 adj
- * Initial alpha release
- *
- * Revision 1.2 1994/06/16 23:57:31 klemets
- * Mirrored all the fields in the configuration block.
- *
- * Revision 1.1 1994/06/02 20:25:34 klemets
- * Initial revision
- *
- *
- */
-#ifndef _I82593_H
-#define _I82593_H
-
-/* Intel 82593 CSMA/CD Core LAN Controller */
-
-/* Port 0 Command Register definitions */
-
-/* Execution operations */
-#define OP0_NOP 0 /* CHNL = 0 */
-#define OP0_SWIT_TO_PORT_1 0 /* CHNL = 1 */
-#define OP0_IA_SETUP 1
-#define OP0_CONFIGURE 2
-#define OP0_MC_SETUP 3
-#define OP0_TRANSMIT 4
-#define OP0_TDR 5
-#define OP0_DUMP 6
-#define OP0_DIAGNOSE 7
-#define OP0_TRANSMIT_NO_CRC 9
-#define OP0_RETRANSMIT 12
-#define OP0_ABORT 13
-/* Reception operations */
-#define OP0_RCV_ENABLE 8
-#define OP0_RCV_DISABLE 10
-#define OP0_STOP_RCV 11
-/* Status pointer control operations */
-#define OP0_FIX_PTR 15 /* CHNL = 1 */
-#define OP0_RLS_PTR 15 /* CHNL = 0 */
-#define OP0_RESET 14
-
-#define CR0_CHNL (1 << 4) /* 0=Channel 0, 1=Channel 1 */
-#define CR0_STATUS_0 0x00
-#define CR0_STATUS_1 0x20
-#define CR0_STATUS_2 0x40
-#define CR0_STATUS_3 0x60
-#define CR0_INT_ACK (1 << 7) /* 0=No ack, 1=acknowledge */
-
-/* Port 0 Status Register definitions */
-
-#define SR0_NO_RESULT 0 /* dummy */
-#define SR0_EVENT_MASK 0x0f
-#define SR0_IA_SETUP_DONE 1
-#define SR0_CONFIGURE_DONE 2
-#define SR0_MC_SETUP_DONE 3
-#define SR0_TRANSMIT_DONE 4
-#define SR0_TDR_DONE 5
-#define SR0_DUMP_DONE 6
-#define SR0_DIAGNOSE_PASSED 7
-#define SR0_TRANSMIT_NO_CRC_DONE 9
-#define SR0_RETRANSMIT_DONE 12
-#define SR0_EXECUTION_ABORTED 13
-#define SR0_END_OF_FRAME 8
-#define SR0_RECEPTION_ABORTED 10
-#define SR0_DIAGNOSE_FAILED 15
-#define SR0_STOP_REG_HIT 11
-
-#define SR0_CHNL (1 << 4)
-#define SR0_EXECUTION (1 << 5)
-#define SR0_RECEPTION (1 << 6)
-#define SR0_INTERRUPT (1 << 7)
-#define SR0_BOTH_RX_TX (SR0_EXECUTION | SR0_RECEPTION)
-
-#define SR3_EXEC_STATE_MASK 0x03
-#define SR3_EXEC_IDLE 0
-#define SR3_TX_ABORT_IN_PROGRESS 1
-#define SR3_EXEC_ACTIVE 2
-#define SR3_ABORT_IN_PROGRESS 3
-#define SR3_EXEC_CHNL (1 << 2)
-#define SR3_STP_ON_NO_RSRC (1 << 3)
-#define SR3_RCVING_NO_RSRC (1 << 4)
-#define SR3_RCV_STATE_MASK 0x60
-#define SR3_RCV_IDLE 0x00
-#define SR3_RCV_READY 0x20
-#define SR3_RCV_ACTIVE 0x40
-#define SR3_RCV_STOP_IN_PROG 0x60
-#define SR3_RCV_CHNL (1 << 7)
-
-/* Port 1 Command Register definitions */
-
-#define OP1_NOP 0
-#define OP1_SWIT_TO_PORT_0 1
-#define OP1_INT_DISABLE 2
-#define OP1_INT_ENABLE 3
-#define OP1_SET_TS 5
-#define OP1_RST_TS 7
-#define OP1_POWER_DOWN 8
-#define OP1_RESET_RING_MNGMT 11
-#define OP1_RESET 14
-#define OP1_SEL_RST 15
-
-#define CR1_STATUS_4 0x00
-#define CR1_STATUS_5 0x20
-#define CR1_STATUS_6 0x40
-#define CR1_STOP_REG_UPDATE (1 << 7)
-
-/* Receive frame status bits */
-
-#define RX_RCLD (1 << 0)
-#define RX_IA_MATCH (1 << 1)
-#define RX_NO_AD_MATCH (1 << 2)
-#define RX_NO_SFD (1 << 3)
-#define RX_SRT_FRM (1 << 7)
-#define RX_OVRRUN (1 << 8)
-#define RX_ALG_ERR (1 << 10)
-#define RX_CRC_ERR (1 << 11)
-#define RX_LEN_ERR (1 << 12)
-#define RX_RCV_OK (1 << 13)
-#define RX_TYP_LEN (1 << 15)
-
-/* Transmit status bits */
-
-#define TX_NCOL_MASK 0x0f
-#define TX_FRTL (1 << 4)
-#define TX_MAX_COL (1 << 5)
-#define TX_HRT_BEAT (1 << 6)
-#define TX_DEFER (1 << 7)
-#define TX_UND_RUN (1 << 8)
-#define TX_LOST_CTS (1 << 9)
-#define TX_LOST_CRS (1 << 10)
-#define TX_LTCOL (1 << 11)
-#define TX_OK (1 << 13)
-#define TX_COLL (1 << 15)
-
-struct i82593_conf_block {
- u_char fifo_limit : 4,
- forgnesi : 1,
- fifo_32 : 1,
- d6mod : 1,
- throttle_enb : 1;
- u_char throttle : 6,
- cntrxint : 1,
- contin : 1;
- u_char addr_len : 3,
- acloc : 1,
- preamb_len : 2,
- loopback : 2;
- u_char lin_prio : 3,
- tbofstop : 1,
- exp_prio : 3,
- bof_met : 1;
- u_char : 4,
- ifrm_spc : 4;
- u_char : 5,
- slottim_low : 3;
- u_char slottim_hi : 3,
- : 1,
- max_retr : 4;
- u_char prmisc : 1,
- bc_dis : 1,
- : 1,
- crs_1 : 1,
- nocrc_ins : 1,
- crc_1632 : 1,
- : 1,
- crs_cdt : 1;
- u_char cs_filter : 3,
- crs_src : 1,
- cd_filter : 3,
- : 1;
- u_char : 2,
- min_fr_len : 6;
- u_char lng_typ : 1,
- lng_fld : 1,
- rxcrc_xf : 1,
- artx : 1,
- sarec : 1,
- tx_jabber : 1, /* why is this called max_len in the manual? */
- hash_1 : 1,
- lbpkpol : 1;
- u_char : 6,
- fdx : 1,
- : 1;
- u_char dummy_6 : 6, /* supposed to be ones */
- mult_ia : 1,
- dis_bof : 1;
- u_char dummy_1 : 1, /* supposed to be one */
- tx_ifs_retrig : 2,
- mc_all : 1,
- rcv_mon : 2,
- frag_acpt : 1,
- tstrttrs : 1;
- u_char fretx : 1,
- runt_eop : 1,
- hw_sw_pin : 1,
- big_endn : 1,
- syncrqs : 1,
- sttlen : 1,
- tx_eop : 1,
- rx_eop : 1;
- u_char rbuf_size : 5,
- rcvstop : 1,
- : 2;
-};
-
-#define I82593_MAX_MULTICAST_ADDRESSES 128 /* Hardware hashed filter */
-
-#endif /* _I82593_H */
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index 63ab3873c5ed..4f4eea8a6288 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -6,6 +6,7 @@
* Copyright (c) 2002-2003, Jouni Malinen <jkmaline@cc.hut.fi>
* Copyright (c) 2005, Devicescape Software, Inc.
* Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net>
+ * Copyright (c) 2013 - 2014 Intel Mobile Communications GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -18,6 +19,7 @@
#include <linux/types.h>
#include <linux/if_ether.h>
#include <asm/byteorder.h>
+#include <asm/unaligned.h>
/*
* DS bit usage
@@ -165,8 +167,12 @@ static inline u16 ieee80211_sn_sub(u16 sn1, u16 sn2)
#define IEEE80211_MAX_MESH_ID_LEN 32
+#define IEEE80211_FIRST_TSPEC_TSID 8
#define IEEE80211_NUM_TIDS 16
+/* number of user priorities 802.11 uses */
+#define IEEE80211_NUM_UPS 8
+
#define IEEE80211_QOS_CTL_LEN 2
/* 1d tag mask */
#define IEEE80211_QOS_CTL_TAG1D_MASK 0x0007
@@ -838,6 +844,16 @@ enum ieee80211_vht_opmode_bits {
#define WLAN_SA_QUERY_TR_ID_LEN 2
+/**
+ * struct ieee80211_tpc_report_ie
+ *
+ * This structure refers to "TPC Report element"
+ */
+struct ieee80211_tpc_report_ie {
+ u8 tx_power;
+ u8 link_margin;
+} __packed;
+
struct ieee80211_mgmt {
__le16 frame_control;
__le16 duration;
@@ -973,6 +989,13 @@ struct ieee80211_mgmt {
u8 action_code;
u8 operating_mode;
} __packed vht_opmode_notif;
+ struct {
+ u8 action_code;
+ u8 dialog_token;
+ u8 tpc_elem_id;
+ u8 tpc_elem_length;
+ struct ieee80211_tpc_report_ie tpc;
+ } __packed tpc_report;
} u;
} __packed action;
} u;
@@ -1044,6 +1067,12 @@ struct ieee80211_pspoll {
/* TDLS */
+/* Channel switch timing */
+struct ieee80211_ch_switch_timing {
+ __le16 switch_time;
+ __le16 switch_timeout;
+} __packed;
+
/* Link-id information element */
struct ieee80211_tdls_lnkie {
u8 ie_type; /* Link Identifier IE */
@@ -1085,6 +1114,15 @@ struct ieee80211_tdls_data {
u8 dialog_token;
u8 variable[0];
} __packed discover_req;
+ struct {
+ u8 target_channel;
+ u8 oper_class;
+ u8 variable[0];
+ } __packed chan_switch_req;
+ struct {
+ __le16 status_code;
+ u8 variable[0];
+ } __packed chan_switch_resp;
} u;
} __packed;
@@ -1252,7 +1290,7 @@ struct ieee80211_ht_cap {
#define IEEE80211_HT_AMPDU_PARM_DENSITY_SHIFT 2
/*
- * Maximum length of AMPDU that the STA can receive.
+ * Maximum length of AMPDU that the STA can receive in high-throughput (HT).
* Length = 2 ^ (13 + max_ampdu_length_exp) - 1 (octets)
*/
enum ieee80211_max_ampdu_length_exp {
@@ -1262,6 +1300,21 @@ enum ieee80211_max_ampdu_length_exp {
IEEE80211_HT_MAX_AMPDU_64K = 3
};
+/*
+ * Maximum length of AMPDU that the STA can receive in VHT.
+ * Length = 2 ^ (13 + max_ampdu_length_exp) - 1 (octets)
+ */
+enum ieee80211_vht_max_ampdu_length_exp {
+ IEEE80211_VHT_MAX_AMPDU_8K = 0,
+ IEEE80211_VHT_MAX_AMPDU_16K = 1,
+ IEEE80211_VHT_MAX_AMPDU_32K = 2,
+ IEEE80211_VHT_MAX_AMPDU_64K = 3,
+ IEEE80211_VHT_MAX_AMPDU_128K = 4,
+ IEEE80211_VHT_MAX_AMPDU_256K = 5,
+ IEEE80211_VHT_MAX_AMPDU_512K = 6,
+ IEEE80211_VHT_MAX_AMPDU_1024K = 7
+};
+
#define IEEE80211_HT_MAX_AMPDU_FACTOR 13
/* Minimum MPDU start spacing */
@@ -1806,7 +1859,8 @@ enum ieee80211_eid {
WLAN_EID_DMG_TSPEC = 146,
WLAN_EID_DMG_AT = 147,
WLAN_EID_DMG_CAP = 148,
- /* 149-150 reserved for Cisco */
+ /* 149 reserved for Cisco */
+ WLAN_EID_CISCO_VENDOR_SPECIFIC = 150,
WLAN_EID_DMG_OPERATION = 151,
WLAN_EID_DMG_BSS_PARAM_CHANGE = 152,
WLAN_EID_DMG_BEAM_REFINEMENT = 153,
@@ -1865,6 +1919,7 @@ enum ieee80211_category {
WLAN_CATEGORY_DLS = 2,
WLAN_CATEGORY_BACK = 3,
WLAN_CATEGORY_PUBLIC = 4,
+ WLAN_CATEGORY_RADIO_MEASUREMENT = 5,
WLAN_CATEGORY_HT = 7,
WLAN_CATEGORY_SA_QUERY = 8,
WLAN_CATEGORY_PROTECTED_DUAL_OF_ACTION = 9,
@@ -1974,6 +2029,16 @@ enum ieee80211_tdls_actioncode {
WLAN_TDLS_DISCOVERY_REQUEST = 10,
};
+/* Extended Channel Switching capability to be set in the 1st byte of
+ * the @WLAN_EID_EXT_CAPABILITY information element
+ */
+#define WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING BIT(2)
+
+/* TDLS capabilities in the the 4th byte of @WLAN_EID_EXT_CAPABILITY */
+#define WLAN_EXT_CAPA4_TDLS_BUFFER_STA BIT(4)
+#define WLAN_EXT_CAPA4_TDLS_PEER_PSM BIT(5)
+#define WLAN_EXT_CAPA4_TDLS_CHAN_SWITCH BIT(6)
+
/* Interworking capabilities are set in 7th bit of 4th byte of the
* @WLAN_EID_EXT_CAPABILITY information element
*/
@@ -1985,6 +2050,7 @@ enum ieee80211_tdls_actioncode {
*/
#define WLAN_EXT_CAPA5_TDLS_ENABLED BIT(5)
#define WLAN_EXT_CAPA5_TDLS_PROHIBITED BIT(6)
+#define WLAN_EXT_CAPA5_TDLS_CH_SW_PROHIBITED BIT(7)
#define WLAN_EXT_CAPA8_OPMODE_NOTIF BIT(6)
#define WLAN_EXT_CAPA8_TDLS_WIDE_BW_ENABLED BIT(7)
@@ -1992,6 +2058,9 @@ enum ieee80211_tdls_actioncode {
/* TDLS specific payload type in the LLC/SNAP header */
#define WLAN_TDLS_SNAP_RFTYPE 0x2
+/* BSS Coex IE information field bits */
+#define WLAN_BSS_COEX_INFORMATION_REQUEST BIT(0)
+
/**
* enum - mesh synchronization method identifier
*
@@ -2374,8 +2443,79 @@ static inline bool ieee80211_check_tim(const struct ieee80211_tim_ie *tim,
return !!(tim->virtual_map[index] & mask);
}
+/**
+ * ieee80211_get_tdls_action - get tdls packet action (or -1, if not tdls packet)
+ * @skb: the skb containing the frame, length will not be checked
+ * @hdr_size: the size of the ieee80211_hdr that starts at skb->data
+ *
+ * This function assumes the frame is a data frame, and that the network header
+ * is in the correct place.
+ */
+static inline int ieee80211_get_tdls_action(struct sk_buff *skb, u32 hdr_size)
+{
+ if (!skb_is_nonlinear(skb) &&
+ skb->len > (skb_network_offset(skb) + 2)) {
+ /* Point to where the indication of TDLS should start */
+ const u8 *tdls_data = skb_network_header(skb) - 2;
+
+ if (get_unaligned_be16(tdls_data) == ETH_P_TDLS &&
+ tdls_data[2] == WLAN_TDLS_SNAP_RFTYPE &&
+ tdls_data[3] == WLAN_CATEGORY_TDLS)
+ return tdls_data[4];
+ }
+
+ return -1;
+}
+
/* convert time units */
#define TU_TO_JIFFIES(x) (usecs_to_jiffies((x) * 1024))
#define TU_TO_EXP_TIME(x) (jiffies + TU_TO_JIFFIES(x))
+/**
+ * ieee80211_action_contains_tpc - checks if the frame contains TPC element
+ * @skb: the skb containing the frame, length will be checked
+ *
+ * This function checks if it's either TPC report action frame or Link
+ * Measurement report action frame as defined in IEEE Std. 802.11-2012 8.5.2.5
+ * and 8.5.7.5 accordingly.
+ */
+static inline bool ieee80211_action_contains_tpc(struct sk_buff *skb)
+{
+ struct ieee80211_mgmt *mgmt = (void *)skb->data;
+
+ if (!ieee80211_is_action(mgmt->frame_control))
+ return false;
+
+ if (skb->len < IEEE80211_MIN_ACTION_SIZE +
+ sizeof(mgmt->u.action.u.tpc_report))
+ return false;
+
+ /*
+ * TPC report - check that:
+ * category = 0 (Spectrum Management) or 5 (Radio Measurement)
+ * spectrum management action = 3 (TPC/Link Measurement report)
+ * TPC report EID = 35
+ * TPC report element length = 2
+ *
+ * The spectrum management's tpc_report struct is used here both for
+ * parsing tpc_report and radio measurement's link measurement report
+ * frame, since the relevant part is identical in both frames.
+ */
+ if (mgmt->u.action.category != WLAN_CATEGORY_SPECTRUM_MGMT &&
+ mgmt->u.action.category != WLAN_CATEGORY_RADIO_MEASUREMENT)
+ return false;
+
+ /* both spectrum mgmt and link measurement have same action code */
+ if (mgmt->u.action.u.tpc_report.action_code !=
+ WLAN_ACTION_SPCT_TPC_RPRT)
+ return false;
+
+ if (mgmt->u.action.u.tpc_report.tpc_elem_id != WLAN_EID_TPC_REPORT ||
+ mgmt->u.action.u.tpc_report.tpc_elem_length !=
+ sizeof(struct ieee80211_tpc_report_ie))
+ return false;
+
+ return true;
+}
+
#endif /* LINUX_IEEE80211_H */
diff --git a/include/net/ieee802154.h b/include/linux/ieee802154.h
index 0aa7122e8f15..6e82d888287c 100644
--- a/include/net/ieee802154.h
+++ b/include/linux/ieee802154.h
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
* Written by:
* Pavel Smolenskiy <pavel.smolenskiy@gmail.com>
* Maxim Gorbachyov <maxim.gorbachev@siemens.com>
@@ -24,10 +20,27 @@
* Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
*/
-#ifndef NET_IEEE802154_H
-#define NET_IEEE802154_H
+#ifndef LINUX_IEEE802154_H
+#define LINUX_IEEE802154_H
+
+#include <linux/types.h>
+#include <linux/random.h>
+#include <asm/byteorder.h>
#define IEEE802154_MTU 127
+#define IEEE802154_MIN_PSDU_LEN 5
+
+#define IEEE802154_PAN_ID_BROADCAST 0xffff
+#define IEEE802154_ADDR_SHORT_BROADCAST 0xffff
+#define IEEE802154_ADDR_SHORT_UNSPEC 0xfffe
+
+#define IEEE802154_EXTENDED_ADDR_LEN 8
+
+#define IEEE802154_LIFS_PERIOD 40
+#define IEEE802154_SIFS_PERIOD 12
+
+#define IEEE802154_MAX_CHANNEL 26
+#define IEEE802154_MAX_PAGE 31
#define IEEE802154_FC_TYPE_BEACON 0x0 /* Frame is beacon */
#define IEEE802154_FC_TYPE_DATA 0x1 /* Frame is data */
@@ -189,7 +202,41 @@ enum {
IEEE802154_SCAN_IN_PROGRESS = 0xfc,
};
+/**
+ * ieee802154_is_valid_psdu_len - check if psdu len is valid
+ * @len: psdu len with (MHR + payload + MFR)
+ */
+static inline bool ieee802154_is_valid_psdu_len(const u8 len)
+{
+ return (len >= IEEE802154_MIN_PSDU_LEN && len <= IEEE802154_MTU);
+}
+
+/**
+ * ieee802154_is_valid_psdu_len - check if extended addr is valid
+ * @addr: extended addr to check
+ */
+static inline bool ieee802154_is_valid_extended_addr(const __le64 addr)
+{
+ /* These EUI-64 addresses are reserved by IEEE. 0xffffffffffffffff
+ * is used internally as extended to short address broadcast mapping.
+ * This is currently a workaround because neighbor discovery can't
+ * deal with short addresses types right now.
+ */
+ return ((addr != cpu_to_le64(0x0000000000000000ULL)) &&
+ (addr != cpu_to_le64(0xffffffffffffffffULL)));
+}
-#endif
+/**
+ * ieee802154_random_extended_addr - generates a random extended address
+ * @addr: extended addr pointer to place the random address
+ */
+static inline void ieee802154_random_extended_addr(__le64 *addr)
+{
+ get_random_bytes(addr, IEEE802154_EXTENDED_ADDR_LEN);
+ /* toggle some bit if we hit an invalid extended addr */
+ if (!ieee802154_is_valid_extended_addr(*addr))
+ ((u8 *)addr)[IEEE802154_EXTENDED_ADDR_LEN - 1] ^= 0x01;
+}
+#endif /* LINUX_IEEE802154_H */
diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h
index 808dcb8cc04f..0a8ce762a47f 100644
--- a/include/linux/if_bridge.h
+++ b/include/linux/if_bridge.h
@@ -15,6 +15,7 @@
#include <linux/netdevice.h>
#include <uapi/linux/if_bridge.h>
+#include <linux/bitops.h>
struct br_ip {
union {
@@ -32,11 +33,41 @@ struct br_ip_list {
struct br_ip addr;
};
+#define BR_HAIRPIN_MODE BIT(0)
+#define BR_BPDU_GUARD BIT(1)
+#define BR_ROOT_BLOCK BIT(2)
+#define BR_MULTICAST_FAST_LEAVE BIT(3)
+#define BR_ADMIN_COST BIT(4)
+#define BR_LEARNING BIT(5)
+#define BR_FLOOD BIT(6)
+#define BR_AUTO_MASK (BR_FLOOD | BR_LEARNING)
+#define BR_PROMISC BIT(7)
+#define BR_PROXYARP BIT(8)
+#define BR_LEARNING_SYNC BIT(9)
+
extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __user *));
typedef int br_should_route_hook_t(struct sk_buff *skb);
extern br_should_route_hook_t __rcu *br_should_route_hook;
+#if IS_ENABLED(CONFIG_BRIDGE)
+int br_fdb_external_learn_add(struct net_device *dev,
+ const unsigned char *addr, u16 vid);
+int br_fdb_external_learn_del(struct net_device *dev,
+ const unsigned char *addr, u16 vid);
+#else
+static inline int br_fdb_external_learn_add(struct net_device *dev,
+ const unsigned char *addr, u16 vid)
+{
+ return 0;
+}
+static inline int br_fdb_external_learn_del(struct net_device *dev,
+ const unsigned char *addr, u16 vid)
+{
+ return 0;
+}
+#endif
+
#if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_BRIDGE_IGMP_SNOOPING)
int br_multicast_list_adjacent(struct net_device *dev,
struct list_head *br_ip_list);
diff --git a/include/linux/if_macvlan.h b/include/linux/if_macvlan.h
index 6b2c7cf352a5..6f6929ea8a0c 100644
--- a/include/linux/if_macvlan.h
+++ b/include/linux/if_macvlan.h
@@ -60,6 +60,7 @@ struct macvlan_dev {
#ifdef CONFIG_NET_POLL_CONTROLLER
struct netpoll *netpoll;
#endif
+ unsigned int macaddr_count;
};
static inline void macvlan_count_rx(const struct macvlan_dev *vlan,
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index d69f0577a319..515a35e2a48a 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -282,28 +282,24 @@ static inline bool vlan_hw_offload_capable(netdev_features_t features,
}
/**
- * vlan_insert_tag - regular VLAN tag inserting
+ * __vlan_insert_tag - regular VLAN tag inserting
* @skb: skbuff to tag
* @vlan_proto: VLAN encapsulation protocol
* @vlan_tci: VLAN TCI to insert
*
* Inserts the VLAN tag into @skb as part of the payload
- * Returns a VLAN tagged skb. If a new skb is created, @skb is freed.
- *
- * Following the skb_unshare() example, in case of error, the calling function
- * doesn't have to worry about freeing the original skb.
+ * Returns error if skb_cow_head failes.
*
* Does not change skb->protocol so this function can be used during receive.
*/
-static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb,
- __be16 vlan_proto, u16 vlan_tci)
+static inline int __vlan_insert_tag(struct sk_buff *skb,
+ __be16 vlan_proto, u16 vlan_tci)
{
struct vlan_ethhdr *veth;
- if (skb_cow_head(skb, VLAN_HLEN) < 0) {
- dev_kfree_skb_any(skb);
- return NULL;
- }
+ if (skb_cow_head(skb, VLAN_HLEN) < 0)
+ return -ENOMEM;
+
veth = (struct vlan_ethhdr *)skb_push(skb, VLAN_HLEN);
/* Move the mac addresses to the beginning of the new header. */
@@ -316,12 +312,40 @@ static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb,
/* now, the TCI */
veth->h_vlan_TCI = htons(vlan_tci);
+ return 0;
+}
+
+/**
+ * vlan_insert_tag - regular VLAN tag inserting
+ * @skb: skbuff to tag
+ * @vlan_proto: VLAN encapsulation protocol
+ * @vlan_tci: VLAN TCI to insert
+ *
+ * Inserts the VLAN tag into @skb as part of the payload
+ * Returns a VLAN tagged skb. If a new skb is created, @skb is freed.
+ *
+ * Following the skb_unshare() example, in case of error, the calling function
+ * doesn't have to worry about freeing the original skb.
+ *
+ * Does not change skb->protocol so this function can be used during receive.
+ */
+static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb,
+ __be16 vlan_proto, u16 vlan_tci)
+{
+ int err;
+
+ err = __vlan_insert_tag(skb, vlan_proto, vlan_tci);
+ if (err) {
+ dev_kfree_skb_any(skb);
+ return NULL;
+ }
return skb;
}
/**
- * __vlan_put_tag - regular VLAN tag inserting
+ * vlan_insert_tag_set_proto - regular VLAN tag inserting
* @skb: skbuff to tag
+ * @vlan_proto: VLAN encapsulation protocol
* @vlan_tci: VLAN TCI to insert
*
* Inserts the VLAN tag into @skb as part of the payload
@@ -330,8 +354,9 @@ static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb,
* Following the skb_unshare() example, in case of error, the calling function
* doesn't have to worry about freeing the original skb.
*/
-static inline struct sk_buff *__vlan_put_tag(struct sk_buff *skb,
- __be16 vlan_proto, u16 vlan_tci)
+static inline struct sk_buff *vlan_insert_tag_set_proto(struct sk_buff *skb,
+ __be16 vlan_proto,
+ u16 vlan_tci)
{
skb = vlan_insert_tag(skb, vlan_proto, vlan_tci);
if (skb)
@@ -339,39 +364,53 @@ static inline struct sk_buff *__vlan_put_tag(struct sk_buff *skb,
return skb;
}
-/**
- * __vlan_hwaccel_put_tag - hardware accelerated VLAN inserting
+/*
+ * __vlan_hwaccel_push_inside - pushes vlan tag to the payload
* @skb: skbuff to tag
- * @vlan_proto: VLAN encapsulation protocol
- * @vlan_tci: VLAN TCI to insert
*
- * Puts the VLAN TCI in @skb->vlan_tci and lets the device do the rest
+ * Pushes the VLAN tag from @skb->vlan_tci inside to the payload.
+ *
+ * Following the skb_unshare() example, in case of error, the calling function
+ * doesn't have to worry about freeing the original skb.
*/
-static inline struct sk_buff *__vlan_hwaccel_put_tag(struct sk_buff *skb,
- __be16 vlan_proto,
- u16 vlan_tci)
+static inline struct sk_buff *__vlan_hwaccel_push_inside(struct sk_buff *skb)
{
- skb->vlan_proto = vlan_proto;
- skb->vlan_tci = VLAN_TAG_PRESENT | vlan_tci;
+ skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto,
+ vlan_tx_tag_get(skb));
+ if (likely(skb))
+ skb->vlan_tci = 0;
+ return skb;
+}
+/*
+ * vlan_hwaccel_push_inside - pushes vlan tag to the payload
+ * @skb: skbuff to tag
+ *
+ * Checks is tag is present in @skb->vlan_tci and if it is, it pushes the
+ * VLAN tag from @skb->vlan_tci inside to the payload.
+ *
+ * Following the skb_unshare() example, in case of error, the calling function
+ * doesn't have to worry about freeing the original skb.
+ */
+static inline struct sk_buff *vlan_hwaccel_push_inside(struct sk_buff *skb)
+{
+ if (vlan_tx_tag_present(skb))
+ skb = __vlan_hwaccel_push_inside(skb);
return skb;
}
/**
- * vlan_put_tag - inserts VLAN tag according to device features
+ * __vlan_hwaccel_put_tag - hardware accelerated VLAN inserting
* @skb: skbuff to tag
+ * @vlan_proto: VLAN encapsulation protocol
* @vlan_tci: VLAN TCI to insert
*
- * Assumes skb->dev is the target that will xmit this frame.
- * Returns a VLAN tagged skb.
+ * Puts the VLAN TCI in @skb->vlan_tci and lets the device do the rest
*/
-static inline struct sk_buff *vlan_put_tag(struct sk_buff *skb,
- __be16 vlan_proto, u16 vlan_tci)
+static inline void __vlan_hwaccel_put_tag(struct sk_buff *skb,
+ __be16 vlan_proto, u16 vlan_tci)
{
- if (vlan_hw_offload_capable(skb->dev->features, vlan_proto)) {
- return __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci);
- } else {
- return __vlan_put_tag(skb, vlan_proto, vlan_tci);
- }
+ skb->vlan_proto = vlan_proto;
+ skb->vlan_tci = VLAN_TAG_PRESENT | vlan_tci;
}
/**
diff --git a/include/linux/igmp.h b/include/linux/igmp.h
index f47550d75f85..2c677afeea47 100644
--- a/include/linux/igmp.h
+++ b/include/linux/igmp.h
@@ -39,6 +39,7 @@ static inline struct igmpv3_query *
extern int sysctl_igmp_max_memberships;
extern int sysctl_igmp_max_msf;
+extern int sysctl_igmp_qrv;
struct ip_sf_socklist {
unsigned int sl_max;
diff --git a/include/linux/iio/common/st_sensors.h b/include/linux/iio/common/st_sensors.h
index d8257ab60bac..2c476acb87d9 100644
--- a/include/linux/iio/common/st_sensors.h
+++ b/include/linux/iio/common/st_sensors.h
@@ -164,7 +164,7 @@ struct st_sensor_transfer_function {
};
/**
- * struct st_sensors - ST sensors list
+ * struct st_sensor_settings - ST specific sensor settings
* @wai: Contents of WhoAmI register.
* @sensors_supported: List of supported sensors by struct itself.
* @ch: IIO channels for the sensor.
@@ -177,7 +177,7 @@ struct st_sensor_transfer_function {
* @multi_read_bit: Use or not particular bit for [I2C/SPI] multi-read.
* @bootime: samples to discard when sensor passing from power-down to power-up.
*/
-struct st_sensors {
+struct st_sensor_settings {
u8 wai;
char sensors_supported[ST_SENSORS_MAX_4WAI][ST_SENSORS_MAX_NAME];
struct iio_chan_spec *ch;
@@ -196,7 +196,7 @@ struct st_sensors {
* struct st_sensor_data - ST sensor device status
* @dev: Pointer to instance of struct device (I2C or SPI).
* @trig: The trigger in use by the core driver.
- * @sensor: Pointer to the current sensor struct in use.
+ * @sensor_settings: Pointer to the specific sensor settings in use.
* @current_fullscale: Maximum range of measure by the sensor.
* @vdd: Pointer to sensor's Vdd power supply
* @vdd_io: Pointer to sensor's Vdd-IO power supply
@@ -213,7 +213,7 @@ struct st_sensors {
struct st_sensor_data {
struct device *dev;
struct iio_trigger *trig;
- struct st_sensors *sensor;
+ struct st_sensor_settings *sensor_settings;
struct st_sensor_fullscale_avl *current_fullscale;
struct regulator *vdd;
struct regulator *vdd_io;
@@ -279,7 +279,7 @@ int st_sensors_read_info_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *ch, int *val);
int st_sensors_check_device_support(struct iio_dev *indio_dev,
- int num_sensors_list, const struct st_sensors *sensors);
+ int num_sensors_list, const struct st_sensor_settings *sensor_settings);
ssize_t st_sensors_sysfs_sampling_frequency_avail(struct device *dev,
struct device_attribute *attr, char *buf);
diff --git a/include/linux/iio/events.h b/include/linux/iio/events.h
index 8bbd7bc1043d..03fa332ad2a8 100644
--- a/include/linux/iio/events.h
+++ b/include/linux/iio/events.h
@@ -72,7 +72,7 @@ struct iio_event_data {
#define IIO_EVENT_CODE_EXTRACT_TYPE(mask) ((mask >> 56) & 0xFF)
-#define IIO_EVENT_CODE_EXTRACT_DIR(mask) ((mask >> 48) & 0xCF)
+#define IIO_EVENT_CODE_EXTRACT_DIR(mask) ((mask >> 48) & 0x7F)
#define IIO_EVENT_CODE_EXTRACT_CHAN_TYPE(mask) ((mask >> 32) & 0xFF)
diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h
index 15dc6bc2bdd2..3642ce7ef512 100644
--- a/include/linux/iio/iio.h
+++ b/include/linux/iio/iio.h
@@ -13,6 +13,7 @@
#include <linux/device.h>
#include <linux/cdev.h>
#include <linux/iio/types.h>
+#include <linux/of.h>
/* IIO TODO LIST */
/*
* Provide means of adjusting timer accuracy.
@@ -326,6 +327,11 @@ struct iio_dev;
* @update_scan_mode: function to configure device and scan buffer when
* channels have changed
* @debugfs_reg_access: function to read or write register value of device
+ * @of_xlate: function pointer to obtain channel specifier index.
+ * When #iio-cells is greater than '0', the driver could
+ * provide a custom of_xlate function that reads the
+ * *args* and returns the appropriate index in registered
+ * IIO channels array.
**/
struct iio_info {
struct module *driver_module;
@@ -385,6 +391,8 @@ struct iio_info {
int (*debugfs_reg_access)(struct iio_dev *indio_dev,
unsigned reg, unsigned writeval,
unsigned *readval);
+ int (*of_xlate)(struct iio_dev *indio_dev,
+ const struct of_phandle_args *iiospec);
};
/**
diff --git a/include/linux/ima.h b/include/linux/ima.h
index 7cf5e9b32550..120ccc53fcb7 100644
--- a/include/linux/ima.h
+++ b/include/linux/ima.h
@@ -15,7 +15,7 @@ struct linux_binprm;
#ifdef CONFIG_IMA
extern int ima_bprm_check(struct linux_binprm *bprm);
-extern int ima_file_check(struct file *file, int mask);
+extern int ima_file_check(struct file *file, int mask, int opened);
extern void ima_file_free(struct file *file);
extern int ima_file_mmap(struct file *file, unsigned long prot);
extern int ima_module_check(struct file *file);
@@ -27,7 +27,7 @@ static inline int ima_bprm_check(struct linux_binprm *bprm)
return 0;
}
-static inline int ima_file_check(struct file *file, int mask)
+static inline int ima_file_check(struct file *file, int mask, int opened)
{
return 0;
}
diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h
index 0068708161ff..0a21fbefdfbe 100644
--- a/include/linux/inetdevice.h
+++ b/include/linux/inetdevice.h
@@ -242,7 +242,7 @@ static inline void in_dev_put(struct in_device *idev)
static __inline__ __be32 inet_make_mask(int logmask)
{
if (logmask)
- return htonl(~((1<<(32-logmask))-1));
+ return htonl(~((1U<<(32-logmask))-1));
return 0;
}
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 2bb4c4f3531a..3037fc085e8e 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -102,7 +102,7 @@ extern struct group_info init_groups;
#define INIT_IDS
#endif
-#ifdef CONFIG_TREE_PREEMPT_RCU
+#ifdef CONFIG_PREEMPT_RCU
#define INIT_TASK_RCU_TREE_PREEMPT() \
.rcu_blocked_node = NULL,
#else
@@ -111,12 +111,21 @@ extern struct group_info init_groups;
#ifdef CONFIG_PREEMPT_RCU
#define INIT_TASK_RCU_PREEMPT(tsk) \
.rcu_read_lock_nesting = 0, \
- .rcu_read_unlock_special = 0, \
+ .rcu_read_unlock_special.s = 0, \
.rcu_node_entry = LIST_HEAD_INIT(tsk.rcu_node_entry), \
INIT_TASK_RCU_TREE_PREEMPT()
#else
#define INIT_TASK_RCU_PREEMPT(tsk)
#endif
+#ifdef CONFIG_TASKS_RCU
+#define INIT_TASK_RCU_TASKS(tsk) \
+ .rcu_tasks_holdout = false, \
+ .rcu_tasks_holdout_list = \
+ LIST_HEAD_INIT(tsk.rcu_tasks_holdout_list), \
+ .rcu_tasks_idle_cpu = -1,
+#else
+#define INIT_TASK_RCU_TASKS(tsk)
+#endif
extern struct cred init_cred;
@@ -157,6 +166,15 @@ extern struct task_group root_task_group;
# define INIT_RT_MUTEXES(tsk)
#endif
+#ifdef CONFIG_NUMA_BALANCING
+# define INIT_NUMA_BALANCING(tsk) \
+ .numa_preferred_nid = -1, \
+ .numa_group = NULL, \
+ .numa_faults = NULL,
+#else
+# define INIT_NUMA_BALANCING(tsk)
+#endif
+
/*
* INIT_TASK is used to set up the first task table, touch at
* your own risk!. Base=0, limit=0x1fffff (=2MB)
@@ -224,9 +242,11 @@ extern struct task_group root_task_group;
INIT_FTRACE_GRAPH \
INIT_TRACE_RECURSION \
INIT_TASK_RCU_PREEMPT(tsk) \
+ INIT_TASK_RCU_TASKS(tsk) \
INIT_CPUSET_SEQ(tsk) \
INIT_RT_MUTEXES(tsk) \
INIT_VTIME(tsk) \
+ INIT_NUMA_BALANCING(tsk) \
}
diff --git a/include/linux/integrity.h b/include/linux/integrity.h
index 83222cebd47b..c2d6082a1a4c 100644
--- a/include/linux/integrity.h
+++ b/include/linux/integrity.h
@@ -24,6 +24,7 @@ enum integrity_status {
#ifdef CONFIG_INTEGRITY
extern struct integrity_iint_cache *integrity_inode_get(struct inode *inode);
extern void integrity_inode_free(struct inode *inode);
+extern void __init integrity_load_keys(void);
#else
static inline struct integrity_iint_cache *
@@ -36,5 +37,10 @@ static inline void integrity_inode_free(struct inode *inode)
{
return;
}
+
+static inline void integrity_load_keys(void)
+{
+}
#endif /* CONFIG_INTEGRITY */
+
#endif /* _LINUX_INTEGRITY_H */
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 698ad053d064..d9b05b5bf8c7 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -193,11 +193,6 @@ extern void irq_wake_thread(unsigned int irq, void *dev_id);
/* The following three functions are for the core kernel use only. */
extern void suspend_device_irqs(void);
extern void resume_device_irqs(void);
-#ifdef CONFIG_PM_SLEEP
-extern int check_wakeup_irqs(void);
-#else
-static inline int check_wakeup_irqs(void) { return 0; }
-#endif
/**
* struct irq_affinity_notify - context for notification of IRQ affinity changes
@@ -561,12 +556,6 @@ static inline void tasklet_enable(struct tasklet_struct *t)
atomic_dec(&t->count);
}
-static inline void tasklet_hi_enable(struct tasklet_struct *t)
-{
- smp_mb__before_atomic();
- atomic_dec(&t->count);
-}
-
extern void tasklet_kill(struct tasklet_struct *t);
extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu);
extern void tasklet_init(struct tasklet_struct *t,
diff --git a/include/linux/io.h b/include/linux/io.h
index d5fc9b8d8b03..fa02e55e5a2e 100644
--- a/include/linux/io.h
+++ b/include/linux/io.h
@@ -61,9 +61,9 @@ static inline void devm_ioport_unmap(struct device *dev, void __iomem *addr)
#define IOMEM_ERR_PTR(err) (__force void __iomem *)ERR_PTR(err)
void __iomem *devm_ioremap(struct device *dev, resource_size_t offset,
- unsigned long size);
+ resource_size_t size);
void __iomem *devm_ioremap_nocache(struct device *dev, resource_size_t offset,
- unsigned long size);
+ resource_size_t size);
void devm_iounmap(struct device *dev, void __iomem *addr);
int check_signature(const volatile void __iomem *io_addr,
const unsigned char *signature, int length);
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 20f9a527922a..38daa453f2e5 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -21,13 +21,15 @@
#include <linux/errno.h>
#include <linux/err.h>
+#include <linux/of.h>
#include <linux/types.h>
+#include <linux/scatterlist.h>
#include <trace/events/iommu.h>
#define IOMMU_READ (1 << 0)
#define IOMMU_WRITE (1 << 1)
#define IOMMU_CACHE (1 << 2) /* DMA cache coherency */
-#define IOMMU_EXEC (1 << 3)
+#define IOMMU_NOEXEC (1 << 3)
struct iommu_ops;
struct iommu_group;
@@ -57,8 +59,12 @@ struct iommu_domain {
struct iommu_domain_geometry geometry;
};
-#define IOMMU_CAP_CACHE_COHERENCY 0x1
-#define IOMMU_CAP_INTR_REMAP 0x2 /* isolates device intrs */
+enum iommu_cap {
+ IOMMU_CAP_CACHE_COHERENCY, /* IOMMU can enforce cache coherent DMA
+ transactions */
+ IOMMU_CAP_INTR_REMAP, /* IOMMU supports interrupt isolation */
+ IOMMU_CAP_NOEXEC, /* IOMMU_NOEXEC flag */
+};
/*
* Following constraints are specifc to FSL_PAMUV1:
@@ -80,6 +86,7 @@ enum iommu_attr {
DOMAIN_ATTR_FSL_PAMU_STASH,
DOMAIN_ATTR_FSL_PAMU_ENABLE,
DOMAIN_ATTR_FSL_PAMUV1,
+ DOMAIN_ATTR_NESTING, /* two stages of translation */
DOMAIN_ATTR_MAX,
};
@@ -93,15 +100,19 @@ enum iommu_attr {
* @detach_dev: detach device from an iommu domain
* @map: map a physically contiguous memory region to an iommu domain
* @unmap: unmap a physically contiguous memory region from an iommu domain
+ * @map_sg: map a scatter-gather list of physically contiguous memory chunks
+ * to an iommu domain
* @iova_to_phys: translate iova to physical address
- * @domain_has_cap: domain capabilities query
* @add_device: add device to iommu grouping
* @remove_device: remove device from iommu grouping
* @domain_get_attr: Query domain attributes
* @domain_set_attr: Change domain attributes
+ * @of_xlate: add OF master IDs to iommu grouping
* @pgsize_bitmap: bitmap of supported page sizes
+ * @priv: per-instance data private to the iommu driver
*/
struct iommu_ops {
+ bool (*capable)(enum iommu_cap);
int (*domain_init)(struct iommu_domain *domain);
void (*domain_destroy)(struct iommu_domain *domain);
int (*attach_dev)(struct iommu_domain *domain, struct device *dev);
@@ -110,9 +121,9 @@ struct iommu_ops {
phys_addr_t paddr, size_t size, int prot);
size_t (*unmap)(struct iommu_domain *domain, unsigned long iova,
size_t size);
+ size_t (*map_sg)(struct iommu_domain *domain, unsigned long iova,
+ struct scatterlist *sg, unsigned int nents, int prot);
phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, dma_addr_t iova);
- int (*domain_has_cap)(struct iommu_domain *domain,
- unsigned long cap);
int (*add_device)(struct device *dev);
void (*remove_device)(struct device *dev);
int (*device_group)(struct device *dev, unsigned int *groupid);
@@ -130,7 +141,12 @@ struct iommu_ops {
/* Get the numer of window per domain */
u32 (*domain_get_windows)(struct iommu_domain *domain);
+#ifdef CONFIG_OF_IOMMU
+ int (*of_xlate)(struct device *dev, struct of_phandle_args *args);
+#endif
+
unsigned long pgsize_bitmap;
+ void *priv;
};
#define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */
@@ -142,6 +158,7 @@ struct iommu_ops {
extern int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops);
extern bool iommu_present(struct bus_type *bus);
+extern bool iommu_capable(struct bus_type *bus, enum iommu_cap cap);
extern struct iommu_domain *iommu_domain_alloc(struct bus_type *bus);
extern struct iommu_group *iommu_group_get_by_id(int id);
extern void iommu_domain_free(struct iommu_domain *domain);
@@ -153,9 +170,10 @@ extern int iommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot);
extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova,
size_t size);
+extern size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
+ struct scatterlist *sg,unsigned int nents,
+ int prot);
extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova);
-extern int iommu_domain_has_cap(struct iommu_domain *domain,
- unsigned long cap);
extern void iommu_set_fault_handler(struct iommu_domain *domain,
iommu_fault_handler_t handler, void *token);
@@ -240,6 +258,13 @@ static inline int report_iommu_fault(struct iommu_domain *domain,
return ret;
}
+static inline size_t iommu_map_sg(struct iommu_domain *domain,
+ unsigned long iova, struct scatterlist *sg,
+ unsigned int nents, int prot)
+{
+ return domain->ops->map_sg(domain, iova, sg, nents, prot);
+}
+
#else /* CONFIG_IOMMU_API */
struct iommu_ops {};
@@ -250,6 +275,11 @@ static inline bool iommu_present(struct bus_type *bus)
return false;
}
+static inline bool iommu_capable(struct bus_type *bus, enum iommu_cap cap)
+{
+ return false;
+}
+
static inline struct iommu_domain *iommu_domain_alloc(struct bus_type *bus)
{
return NULL;
@@ -287,6 +317,13 @@ static inline int iommu_unmap(struct iommu_domain *domain, unsigned long iova,
return -ENODEV;
}
+static inline size_t iommu_map_sg(struct iommu_domain *domain,
+ unsigned long iova, struct scatterlist *sg,
+ unsigned int nents, int prot)
+{
+ return -ENODEV;
+}
+
static inline int iommu_domain_window_enable(struct iommu_domain *domain,
u32 wnd_nr, phys_addr_t paddr,
u64 size, int prot)
@@ -304,12 +341,6 @@ static inline phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_ad
return 0;
}
-static inline int iommu_domain_has_cap(struct iommu_domain *domain,
- unsigned long cap)
-{
- return 0;
-}
-
static inline void iommu_set_fault_handler(struct iommu_domain *domain,
iommu_fault_handler_t handler, void *token)
{
diff --git a/include/linux/ioport.h b/include/linux/ioport.h
index 142ec544167c..2c5250222278 100644
--- a/include/linux/ioport.h
+++ b/include/linux/ioport.h
@@ -215,6 +215,11 @@ static inline int __deprecated check_region(resource_size_t s,
/* Wrappers for managed devices */
struct device;
+
+extern int devm_request_resource(struct device *dev, struct resource *root,
+ struct resource *new);
+extern void devm_release_resource(struct device *dev, struct resource *new);
+
#define devm_request_region(dev,start,n,name) \
__devm_request_region(dev, &ioport_resource, (start), (n), (name))
#define devm_request_mem_region(dev,start,n,name) \
diff --git a/include/linux/ipack.h b/include/linux/ipack.h
index 1888e06ddf64..8bddc3fbdddf 100644
--- a/include/linux/ipack.h
+++ b/include/linux/ipack.h
@@ -172,6 +172,7 @@ struct ipack_bus_ops {
* @ops: bus operations for the mezzanine drivers
*/
struct ipack_bus_device {
+ struct module *owner;
struct device *parent;
int slots;
int bus_nr;
@@ -189,7 +190,8 @@ struct ipack_bus_device {
* available bus device in ipack.
*/
struct ipack_bus_device *ipack_bus_register(struct device *parent, int slots,
- const struct ipack_bus_ops *ops);
+ const struct ipack_bus_ops *ops,
+ struct module *owner);
/**
* ipack_bus_unregister -- unregister an ipack bus
@@ -265,3 +267,23 @@ void ipack_put_device(struct ipack_device *dev);
.format = (_format), \
.vendor = (vend), \
.device = (dev)
+
+/**
+ * ipack_get_carrier - it increase the carrier ref. counter of
+ * the carrier module
+ * @dev: mezzanine device which wants to get the carrier
+ */
+static inline int ipack_get_carrier(struct ipack_device *dev)
+{
+ return try_module_get(dev->bus->owner);
+}
+
+/**
+ * ipack_get_carrier - it decrease the carrier ref. counter of
+ * the carrier module
+ * @dev: mezzanine device which wants to get the carrier
+ */
+static inline void ipack_put_carrier(struct ipack_device *dev)
+{
+ module_put(dev->bus->owner);
+}
diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h
index 35e7eca4e33b..1eee6bcfcf76 100644
--- a/include/linux/ipc_namespace.h
+++ b/include/linux/ipc_namespace.h
@@ -6,15 +6,7 @@
#include <linux/rwsem.h>
#include <linux/notifier.h>
#include <linux/nsproxy.h>
-
-/*
- * ipc namespace events
- */
-#define IPCNS_MEMCHANGED 0x00000001 /* Notify lowmem size changed */
-#define IPCNS_CREATED 0x00000002 /* Notify new ipc namespace created */
-#define IPCNS_REMOVED 0x00000003 /* Notify ipc namespace removed */
-
-#define IPCNS_CALLBACK_PRI 0
+#include <linux/ns_common.h>
struct user_namespace;
@@ -38,7 +30,6 @@ struct ipc_namespace {
unsigned int msg_ctlmni;
atomic_t msg_bytes;
atomic_t msg_hdrs;
- int auto_msgmni;
size_t shm_ctlmax;
size_t shm_ctlall;
@@ -68,7 +59,7 @@ struct ipc_namespace {
/* user_ns which owns the ipc ns */
struct user_namespace *user_ns;
- unsigned int proc_inum;
+ struct ns_common ns;
};
extern struct ipc_namespace init_ipc_ns;
@@ -77,18 +68,8 @@ extern atomic_t nr_ipc_ns;
extern spinlock_t mq_lock;
#ifdef CONFIG_SYSVIPC
-extern int register_ipcns_notifier(struct ipc_namespace *);
-extern int cond_register_ipcns_notifier(struct ipc_namespace *);
-extern void unregister_ipcns_notifier(struct ipc_namespace *);
-extern int ipcns_notify(unsigned long);
extern void shm_destroy_orphaned(struct ipc_namespace *ns);
#else /* CONFIG_SYSVIPC */
-static inline int register_ipcns_notifier(struct ipc_namespace *ns)
-{ return 0; }
-static inline int cond_register_ipcns_notifier(struct ipc_namespace *ns)
-{ return 0; }
-static inline void unregister_ipcns_notifier(struct ipc_namespace *ns) { }
-static inline int ipcns_notify(unsigned long l) { return 0; }
static inline void shm_destroy_orphaned(struct ipc_namespace *ns) {}
#endif /* CONFIG_SYSVIPC */
diff --git a/include/linux/ipmi.h b/include/linux/ipmi.h
index 76d2acbfa7c6..838dbfa3c331 100644
--- a/include/linux/ipmi.h
+++ b/include/linux/ipmi.h
@@ -37,6 +37,7 @@
#include <linux/list.h>
#include <linux/proc_fs.h>
+#include <linux/acpi.h> /* For acpi_handle */
struct module;
struct device;
@@ -278,15 +279,18 @@ enum ipmi_addr_src {
SI_INVALID = 0, SI_HOTMOD, SI_HARDCODED, SI_SPMI, SI_ACPI, SI_SMBIOS,
SI_PCI, SI_DEVICETREE, SI_DEFAULT
};
+const char *ipmi_addr_src_to_str(enum ipmi_addr_src src);
union ipmi_smi_info_union {
+#ifdef CONFIG_ACPI
/*
* the acpi_info element is defined for the SI_ACPI
* address type
*/
struct {
- void *acpi_handle;
+ acpi_handle acpi_handle;
} acpi_info;
+#endif
};
struct ipmi_smi_info {
diff --git a/include/linux/ipmi_smi.h b/include/linux/ipmi_smi.h
index bd349240d50e..0b1e569f5ff5 100644
--- a/include/linux/ipmi_smi.h
+++ b/include/linux/ipmi_smi.h
@@ -98,12 +98,11 @@ struct ipmi_smi_handlers {
operation is not allowed to fail. If an error occurs, it
should report back the error in a received message. It may
do this in the current call context, since no write locks
- are held when this is run. If the priority is > 0, the
- message will go into a high-priority queue and be sent
- first. Otherwise, it goes into a normal-priority queue. */
+ are held when this is run. Message are delivered one at
+ a time by the message handler, a new message will not be
+ delivered until the previous message is returned. */
void (*sender)(void *send_info,
- struct ipmi_smi_msg *msg,
- int priority);
+ struct ipmi_smi_msg *msg);
/* Called by the upper layer to request that we try to get
events from the BMC we are attached to. */
@@ -212,7 +211,6 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
void *send_info,
struct ipmi_device_id *device_id,
struct device *dev,
- const char *sysfs_name,
unsigned char slave_addr);
/*
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index ff560537dd61..c694e7baa621 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -42,6 +42,7 @@ struct ipv6_devconf {
__s32 accept_ra_from_local;
#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
__s32 optimistic_dad;
+ __s32 use_optimistic;
#endif
#ifdef CONFIG_IPV6_MROUTE
__s32 mc_forwarding;
@@ -316,14 +317,4 @@ static inline struct raw6_sock *raw6_sk(const struct sock *sk)
#define tcp_twsk_ipv6only(__sk) 0
#define inet_v6_ipv6only(__sk) 0
#endif /* IS_ENABLED(CONFIG_IPV6) */
-
-#define INET6_MATCH(__sk, __net, __saddr, __daddr, __ports, __dif) \
- (((__sk)->sk_portpair == (__ports)) && \
- ((__sk)->sk_family == AF_INET6) && \
- ipv6_addr_equal(&(__sk)->sk_v6_daddr, (__saddr)) && \
- ipv6_addr_equal(&(__sk)->sk_v6_rcv_saddr, (__daddr)) && \
- (!(__sk)->sk_bound_dev_if || \
- ((__sk)->sk_bound_dev_if == (__dif))) && \
- net_eq(sock_net(__sk), (__net)))
-
#endif /* _IPV6_H */
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 62af59242ddc..d09ec7a1243e 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -15,11 +15,13 @@
#include <linux/spinlock.h>
#include <linux/cpumask.h>
#include <linux/gfp.h>
+#include <linux/irqhandler.h>
#include <linux/irqreturn.h>
#include <linux/irqnr.h>
#include <linux/errno.h>
#include <linux/topology.h>
#include <linux/wait.h>
+#include <linux/io.h>
#include <asm/irq.h>
#include <asm/ptrace.h>
@@ -27,11 +29,7 @@
struct seq_file;
struct module;
-struct irq_desc;
-struct irq_data;
-typedef void (*irq_flow_handler_t)(unsigned int irq,
- struct irq_desc *desc);
-typedef void (*irq_preflow_handler_t)(struct irq_data *data);
+struct msi_msg;
/*
* IRQ line status.
@@ -113,10 +111,14 @@ enum {
*
* IRQ_SET_MASK_OK - OK, core updates irq_data.affinity
* IRQ_SET_MASK_NOCPY - OK, chip did update irq_data.affinity
+ * IRQ_SET_MASK_OK_DONE - Same as IRQ_SET_MASK_OK for core. Special code to
+ * support stacked irqchips, which indicates skipping
+ * all descendent irqchips.
*/
enum {
IRQ_SET_MASK_OK = 0,
IRQ_SET_MASK_OK_NOCOPY,
+ IRQ_SET_MASK_OK_DONE,
};
struct msi_desc;
@@ -133,6 +135,8 @@ struct irq_domain;
* @chip: low level interrupt hardware access
* @domain: Interrupt translation domain; responsible for mapping
* between hwirq number and linux irq number.
+ * @parent_data: pointer to parent struct irq_data to support hierarchy
+ * irq_domain
* @handler_data: per-IRQ data for the irq_chip methods
* @chip_data: platform-specific per-chip private data for the chip
* methods, to allow shared chip implementations
@@ -151,6 +155,9 @@ struct irq_data {
unsigned int state_use_accessors;
struct irq_chip *chip;
struct irq_domain *domain;
+#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
+ struct irq_data *parent_data;
+#endif
void *handler_data;
void *chip_data;
struct msi_desc *msi_desc;
@@ -173,6 +180,7 @@ struct irq_data {
* IRQD_IRQ_DISABLED - Disabled state of the interrupt
* IRQD_IRQ_MASKED - Masked state of the interrupt
* IRQD_IRQ_INPROGRESS - In progress state of the interrupt
+ * IRQD_WAKEUP_ARMED - Wakeup mode armed
*/
enum {
IRQD_TRIGGER_MASK = 0xf,
@@ -186,6 +194,7 @@ enum {
IRQD_IRQ_DISABLED = (1 << 16),
IRQD_IRQ_MASKED = (1 << 17),
IRQD_IRQ_INPROGRESS = (1 << 18),
+ IRQD_WAKEUP_ARMED = (1 << 19),
};
static inline bool irqd_is_setaffinity_pending(struct irq_data *d)
@@ -257,6 +266,12 @@ static inline bool irqd_irq_inprogress(struct irq_data *d)
return d->state_use_accessors & IRQD_IRQ_INPROGRESS;
}
+static inline bool irqd_is_wakeup_armed(struct irq_data *d)
+{
+ return d->state_use_accessors & IRQD_WAKEUP_ARMED;
+}
+
+
/*
* Functions for chained handlers which can be enabled/disabled by the
* standard disable_irq/enable_irq calls. Must be called with
@@ -307,6 +322,8 @@ static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
* any other callback related to this irq
* @irq_release_resources: optional to release resources acquired with
* irq_request_resources
+ * @irq_compose_msi_msg: optional to compose message content for MSI
+ * @irq_write_msi_msg: optional to write message content for MSI
* @flags: chip specific flags
*/
struct irq_chip {
@@ -343,6 +360,9 @@ struct irq_chip {
int (*irq_request_resources)(struct irq_data *data);
void (*irq_release_resources)(struct irq_data *data);
+ void (*irq_compose_msi_msg)(struct irq_data *data, struct msi_msg *msg);
+ void (*irq_write_msi_msg)(struct irq_data *data, struct msi_msg *msg);
+
unsigned long flags;
};
@@ -430,6 +450,18 @@ extern void handle_percpu_devid_irq(unsigned int irq, struct irq_desc *desc);
extern void handle_bad_irq(unsigned int irq, struct irq_desc *desc);
extern void handle_nested_irq(unsigned int irq);
+extern int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg);
+#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
+extern void irq_chip_ack_parent(struct irq_data *data);
+extern int irq_chip_retrigger_hierarchy(struct irq_data *data);
+extern void irq_chip_mask_parent(struct irq_data *data);
+extern void irq_chip_unmask_parent(struct irq_data *data);
+extern void irq_chip_eoi_parent(struct irq_data *data);
+extern int irq_chip_set_affinity_parent(struct irq_data *data,
+ const struct cpumask *dest,
+ bool force);
+#endif
+
/* Handling of unhandled and spurious interrupts: */
extern void note_interrupt(unsigned int irq, struct irq_desc *desc,
irqreturn_t action_ret);
@@ -631,13 +663,6 @@ void arch_teardown_hwirq(unsigned int irq);
void irq_init_desc(unsigned int irq);
#endif
-#ifndef irq_reg_writel
-# define irq_reg_writel(val, addr) writel(val, addr)
-#endif
-#ifndef irq_reg_readl
-# define irq_reg_readl(addr) readl(addr)
-#endif
-
/**
* struct irq_chip_regs - register offsets for struct irq_gci
* @enable: Enable register offset to reg_base
@@ -684,6 +709,8 @@ struct irq_chip_type {
* struct irq_chip_generic - Generic irq chip data structure
* @lock: Lock to protect register and cache data access
* @reg_base: Register base address (virtual)
+ * @reg_readl: Alternate I/O accessor (defaults to readl if NULL)
+ * @reg_writel: Alternate I/O accessor (defaults to writel if NULL)
* @irq_base: Interrupt base nr for this chip
* @irq_cnt: Number of interrupts handled by this chip
* @mask_cache: Cached mask register shared between all chip types
@@ -708,6 +735,8 @@ struct irq_chip_type {
struct irq_chip_generic {
raw_spinlock_t lock;
void __iomem *reg_base;
+ u32 (*reg_readl)(void __iomem *addr);
+ void (*reg_writel)(u32 val, void __iomem *addr);
unsigned int irq_base;
unsigned int irq_cnt;
u32 mask_cache;
@@ -732,12 +761,14 @@ struct irq_chip_generic {
* the parent irq. Usually GPIO implementations
* @IRQ_GC_MASK_CACHE_PER_TYPE: Mask cache is chip type private
* @IRQ_GC_NO_MASK: Do not calculate irq_data->mask
+ * @IRQ_GC_BE_IO: Use big-endian register accesses (default: LE)
*/
enum irq_gc_flags {
IRQ_GC_INIT_MASK_CACHE = 1 << 0,
IRQ_GC_INIT_NESTED_LOCK = 1 << 1,
IRQ_GC_MASK_CACHE_PER_TYPE = 1 << 2,
IRQ_GC_NO_MASK = 1 << 3,
+ IRQ_GC_BE_IO = 1 << 4,
};
/*
@@ -813,4 +844,22 @@ static inline void irq_gc_lock(struct irq_chip_generic *gc) { }
static inline void irq_gc_unlock(struct irq_chip_generic *gc) { }
#endif
+static inline void irq_reg_writel(struct irq_chip_generic *gc,
+ u32 val, int reg_offset)
+{
+ if (gc->reg_writel)
+ gc->reg_writel(val, gc->reg_base + reg_offset);
+ else
+ writel(val, gc->reg_base + reg_offset);
+}
+
+static inline u32 irq_reg_readl(struct irq_chip_generic *gc,
+ int reg_offset)
+{
+ if (gc->reg_readl)
+ return gc->reg_readl(gc->reg_base + reg_offset);
+ else
+ return readl(gc->reg_base + reg_offset);
+}
+
#endif /* _LINUX_IRQ_H */
diff --git a/include/linux/irq_work.h b/include/linux/irq_work.h
index bf9422c3aefe..bf3fe719c7ce 100644
--- a/include/linux/irq_work.h
+++ b/include/linux/irq_work.h
@@ -39,9 +39,12 @@ bool irq_work_queue_on(struct irq_work *work, int cpu);
#endif
void irq_work_run(void);
+void irq_work_tick(void);
void irq_work_sync(struct irq_work *work);
#ifdef CONFIG_IRQ_WORK
+#include <asm/irq_work.h>
+
bool irq_work_needs_cpu(void);
#else
static inline bool irq_work_needs_cpu(void) { return false; }
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index 03a4ea37ba86..1e8b0cf30792 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -49,6 +49,10 @@
#define GICD_CTLR_ENABLE_G1A (1U << 1)
#define GICD_CTLR_ENABLE_G1 (1U << 0)
+#define GICD_TYPER_ID_BITS(typer) ((((typer) >> 19) & 0x1f) + 1)
+#define GICD_TYPER_IRQS(typer) ((((typer) & 0x1f) + 1) * 32)
+#define GICD_TYPER_LPIS (1U << 17)
+
#define GICD_IROUTER_SPI_MODE_ONE (0U << 31)
#define GICD_IROUTER_SPI_MODE_ANY (1U << 31)
@@ -76,9 +80,27 @@
#define GICR_MOVALLR 0x0110
#define GICR_PIDR2 GICD_PIDR2
+#define GICR_CTLR_ENABLE_LPIS (1UL << 0)
+
+#define GICR_TYPER_CPU_NUMBER(r) (((r) >> 8) & 0xffff)
+
#define GICR_WAKER_ProcessorSleep (1U << 1)
#define GICR_WAKER_ChildrenAsleep (1U << 2)
+#define GICR_PROPBASER_NonShareable (0U << 10)
+#define GICR_PROPBASER_InnerShareable (1U << 10)
+#define GICR_PROPBASER_OuterShareable (2U << 10)
+#define GICR_PROPBASER_SHAREABILITY_MASK (3UL << 10)
+#define GICR_PROPBASER_nCnB (0U << 7)
+#define GICR_PROPBASER_nC (1U << 7)
+#define GICR_PROPBASER_RaWt (2U << 7)
+#define GICR_PROPBASER_RaWb (3U << 7)
+#define GICR_PROPBASER_WaWt (4U << 7)
+#define GICR_PROPBASER_WaWb (5U << 7)
+#define GICR_PROPBASER_RaWaWt (6U << 7)
+#define GICR_PROPBASER_RaWaWb (7U << 7)
+#define GICR_PROPBASER_IDBITS_MASK (0x1f)
+
/*
* Re-Distributor registers, offsets from SGI_base
*/
@@ -91,9 +113,93 @@
#define GICR_IPRIORITYR0 GICD_IPRIORITYR
#define GICR_ICFGR0 GICD_ICFGR
+#define GICR_TYPER_PLPIS (1U << 0)
#define GICR_TYPER_VLPIS (1U << 1)
#define GICR_TYPER_LAST (1U << 4)
+#define LPI_PROP_GROUP1 (1 << 1)
+#define LPI_PROP_ENABLED (1 << 0)
+
+/*
+ * ITS registers, offsets from ITS_base
+ */
+#define GITS_CTLR 0x0000
+#define GITS_IIDR 0x0004
+#define GITS_TYPER 0x0008
+#define GITS_CBASER 0x0080
+#define GITS_CWRITER 0x0088
+#define GITS_CREADR 0x0090
+#define GITS_BASER 0x0100
+#define GITS_PIDR2 GICR_PIDR2
+
+#define GITS_TRANSLATER 0x10040
+
+#define GITS_TYPER_PTA (1UL << 19)
+
+#define GITS_CBASER_VALID (1UL << 63)
+#define GITS_CBASER_nCnB (0UL << 59)
+#define GITS_CBASER_nC (1UL << 59)
+#define GITS_CBASER_RaWt (2UL << 59)
+#define GITS_CBASER_RaWb (3UL << 59)
+#define GITS_CBASER_WaWt (4UL << 59)
+#define GITS_CBASER_WaWb (5UL << 59)
+#define GITS_CBASER_RaWaWt (6UL << 59)
+#define GITS_CBASER_RaWaWb (7UL << 59)
+#define GITS_CBASER_NonShareable (0UL << 10)
+#define GITS_CBASER_InnerShareable (1UL << 10)
+#define GITS_CBASER_OuterShareable (2UL << 10)
+#define GITS_CBASER_SHAREABILITY_MASK (3UL << 10)
+
+#define GITS_BASER_NR_REGS 8
+
+#define GITS_BASER_VALID (1UL << 63)
+#define GITS_BASER_nCnB (0UL << 59)
+#define GITS_BASER_nC (1UL << 59)
+#define GITS_BASER_RaWt (2UL << 59)
+#define GITS_BASER_RaWb (3UL << 59)
+#define GITS_BASER_WaWt (4UL << 59)
+#define GITS_BASER_WaWb (5UL << 59)
+#define GITS_BASER_RaWaWt (6UL << 59)
+#define GITS_BASER_RaWaWb (7UL << 59)
+#define GITS_BASER_TYPE_SHIFT (56)
+#define GITS_BASER_TYPE(r) (((r) >> GITS_BASER_TYPE_SHIFT) & 7)
+#define GITS_BASER_ENTRY_SIZE_SHIFT (48)
+#define GITS_BASER_ENTRY_SIZE(r) ((((r) >> GITS_BASER_ENTRY_SIZE_SHIFT) & 0xff) + 1)
+#define GITS_BASER_NonShareable (0UL << 10)
+#define GITS_BASER_InnerShareable (1UL << 10)
+#define GITS_BASER_OuterShareable (2UL << 10)
+#define GITS_BASER_SHAREABILITY_SHIFT (10)
+#define GITS_BASER_SHAREABILITY_MASK (3UL << GITS_BASER_SHAREABILITY_SHIFT)
+#define GITS_BASER_PAGE_SIZE_SHIFT (8)
+#define GITS_BASER_PAGE_SIZE_4K (0UL << GITS_BASER_PAGE_SIZE_SHIFT)
+#define GITS_BASER_PAGE_SIZE_16K (1UL << GITS_BASER_PAGE_SIZE_SHIFT)
+#define GITS_BASER_PAGE_SIZE_64K (2UL << GITS_BASER_PAGE_SIZE_SHIFT)
+#define GITS_BASER_PAGE_SIZE_MASK (3UL << GITS_BASER_PAGE_SIZE_SHIFT)
+
+#define GITS_BASER_TYPE_NONE 0
+#define GITS_BASER_TYPE_DEVICE 1
+#define GITS_BASER_TYPE_VCPU 2
+#define GITS_BASER_TYPE_CPU 3
+#define GITS_BASER_TYPE_COLLECTION 4
+#define GITS_BASER_TYPE_RESERVED5 5
+#define GITS_BASER_TYPE_RESERVED6 6
+#define GITS_BASER_TYPE_RESERVED7 7
+
+/*
+ * ITS commands
+ */
+#define GITS_CMD_MAPD 0x08
+#define GITS_CMD_MAPC 0x09
+#define GITS_CMD_MAPVI 0x0a
+#define GITS_CMD_MOVI 0x01
+#define GITS_CMD_DISCARD 0x0f
+#define GITS_CMD_INV 0x0c
+#define GITS_CMD_MOVALL 0x0e
+#define GITS_CMD_INVALL 0x0d
+#define GITS_CMD_INT 0x03
+#define GITS_CMD_CLEAR 0x04
+#define GITS_CMD_SYNC 0x05
+
/*
* CPU interface registers
*/
@@ -189,12 +295,34 @@
#include <linux/stringify.h>
+/*
+ * We need a value to serve as a irq-type for LPIs. Choose one that will
+ * hopefully pique the interest of the reviewer.
+ */
+#define GIC_IRQ_TYPE_LPI 0xa110c8ed
+
+struct rdists {
+ struct {
+ void __iomem *rd_base;
+ struct page *pend_page;
+ phys_addr_t phys_base;
+ } __percpu *rdist;
+ struct page *prop_page;
+ int id_bits;
+ u64 flags;
+};
+
static inline void gic_write_eoir(u64 irq)
{
asm volatile("msr_s " __stringify(ICC_EOIR1_EL1) ", %0" : : "r" (irq));
isb();
}
+struct irq_domain;
+int its_cpu_init(void);
+int its_init(struct device_node *node, struct rdists *rdists,
+ struct irq_domain *domain);
+
#endif
#endif
diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h
index 45e2d8c15bd2..71d706d5f169 100644
--- a/include/linux/irqchip/arm-gic.h
+++ b/include/linux/irqchip/arm-gic.h
@@ -21,7 +21,11 @@
#define GIC_CPU_ACTIVEPRIO 0xd0
#define GIC_CPU_IDENT 0xfc
+#define GICC_ENABLE 0x1
+#define GICC_INT_PRI_THRESHOLD 0xf0
#define GICC_IAR_INT_ID_MASK 0x3ff
+#define GICC_INT_SPURIOUS 1023
+#define GICC_DIS_BYPASS_MASK 0x1e0
#define GIC_DIST_CTRL 0x000
#define GIC_DIST_CTR 0x004
@@ -39,6 +43,18 @@
#define GIC_DIST_SGI_PENDING_CLEAR 0xf10
#define GIC_DIST_SGI_PENDING_SET 0xf20
+#define GICD_ENABLE 0x1
+#define GICD_DISABLE 0x0
+#define GICD_INT_ACTLOW_LVLTRIG 0x0
+#define GICD_INT_EN_CLR_X32 0xffffffff
+#define GICD_INT_EN_SET_SGI 0x0000ffff
+#define GICD_INT_EN_CLR_PPI 0xffff0000
+#define GICD_INT_DEF_PRI 0xa0
+#define GICD_INT_DEF_PRI_X4 ((GICD_INT_DEF_PRI << 24) |\
+ (GICD_INT_DEF_PRI << 16) |\
+ (GICD_INT_DEF_PRI << 8) |\
+ GICD_INT_DEF_PRI)
+
#define GICH_HCR 0x0
#define GICH_VTR 0x4
#define GICH_VMCR 0x8
@@ -75,6 +91,8 @@
#ifndef __ASSEMBLY__
+#include <linux/irqdomain.h>
+
struct device_node;
extern struct irq_chip gic_arch_extn;
@@ -90,6 +108,8 @@ static inline void gic_init(unsigned int nr, int start,
gic_init_bases(nr, start, dist, cpu, 0, NULL);
}
+int gicv2m_of_init(struct device_node *node, struct irq_domain *parent);
+
void gic_send_sgi(unsigned int cpu_id, unsigned int irq);
int gic_get_cpu_id(unsigned int cpu);
void gic_migrate_target(unsigned int new_cpu_id);
diff --git a/include/linux/irqchip/irq-omap-intc.h b/include/linux/irqchip/irq-omap-intc.h
new file mode 100644
index 000000000000..e06b370cfc0d
--- /dev/null
+++ b/include/linux/irqchip/irq-omap-intc.h
@@ -0,0 +1,32 @@
+/**
+ * irq-omap-intc.h - INTC Idle Functions
+ *
+ * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com
+ *
+ * Author: Felipe Balbi <balbi@ti.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 of
+ * the License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __INCLUDE_LINUX_IRQCHIP_IRQ_OMAP_INTC_H
+#define __INCLUDE_LINUX_IRQCHIP_IRQ_OMAP_INTC_H
+
+void omap2_init_irq(void);
+void omap3_init_irq(void);
+void ti81xx_init_irq(void);
+
+int omap_irq_pending(void);
+void omap_intc_save_context(void);
+void omap_intc_restore_context(void);
+void omap3_intc_suspend(void);
+void omap3_intc_prepare_idle(void);
+void omap3_intc_resume_idle(void);
+
+#endif /* __INCLUDE_LINUX_IRQCHIP_IRQ_OMAP_INTC_H */
diff --git a/include/linux/irqchip/mips-gic.h b/include/linux/irqchip/mips-gic.h
new file mode 100644
index 000000000000..420f77b34d02
--- /dev/null
+++ b/include/linux/irqchip/mips-gic.h
@@ -0,0 +1,249 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2000, 07 MIPS Technologies, Inc.
+ */
+#ifndef __LINUX_IRQCHIP_MIPS_GIC_H
+#define __LINUX_IRQCHIP_MIPS_GIC_H
+
+#include <linux/clocksource.h>
+
+#define GIC_MAX_INTRS 256
+
+/* Constants */
+#define GIC_POL_POS 1
+#define GIC_POL_NEG 0
+#define GIC_TRIG_EDGE 1
+#define GIC_TRIG_LEVEL 0
+#define GIC_TRIG_DUAL_ENABLE 1
+#define GIC_TRIG_DUAL_DISABLE 0
+
+#define MSK(n) ((1 << (n)) - 1)
+
+/* Accessors */
+#define GIC_REG(segment, offset) (segment##_##SECTION_OFS + offset##_##OFS)
+
+/* GIC Address Space */
+#define SHARED_SECTION_OFS 0x0000
+#define SHARED_SECTION_SIZE 0x8000
+#define VPE_LOCAL_SECTION_OFS 0x8000
+#define VPE_LOCAL_SECTION_SIZE 0x4000
+#define VPE_OTHER_SECTION_OFS 0xc000
+#define VPE_OTHER_SECTION_SIZE 0x4000
+#define USM_VISIBLE_SECTION_OFS 0x10000
+#define USM_VISIBLE_SECTION_SIZE 0x10000
+
+/* Register Map for Shared Section */
+
+#define GIC_SH_CONFIG_OFS 0x0000
+
+/* Shared Global Counter */
+#define GIC_SH_COUNTER_31_00_OFS 0x0010
+#define GIC_SH_COUNTER_63_32_OFS 0x0014
+#define GIC_SH_REVISIONID_OFS 0x0020
+
+/* Convert an interrupt number to a byte offset/bit for multi-word registers */
+#define GIC_INTR_OFS(intr) (((intr) / 32) * 4)
+#define GIC_INTR_BIT(intr) ((intr) % 32)
+
+/* Polarity : Reset Value is always 0 */
+#define GIC_SH_SET_POLARITY_OFS 0x0100
+
+/* Triggering : Reset Value is always 0 */
+#define GIC_SH_SET_TRIGGER_OFS 0x0180
+
+/* Dual edge triggering : Reset Value is always 0 */
+#define GIC_SH_SET_DUAL_OFS 0x0200
+
+/* Set/Clear corresponding bit in Edge Detect Register */
+#define GIC_SH_WEDGE_OFS 0x0280
+
+/* Mask manipulation */
+#define GIC_SH_RMASK_OFS 0x0300
+#define GIC_SH_SMASK_OFS 0x0380
+
+/* Global Interrupt Mask Register (RO) - Bit Set == Interrupt enabled */
+#define GIC_SH_MASK_OFS 0x0400
+
+/* Pending Global Interrupts (RO) */
+#define GIC_SH_PEND_OFS 0x0480
+
+/* Maps Interrupt X to a Pin */
+#define GIC_SH_INTR_MAP_TO_PIN_BASE_OFS 0x0500
+#define GIC_SH_MAP_TO_PIN(intr) (4 * (intr))
+
+/* Maps Interrupt X to a VPE */
+#define GIC_SH_INTR_MAP_TO_VPE_BASE_OFS 0x2000
+#define GIC_SH_MAP_TO_VPE_REG_OFF(intr, vpe) \
+ ((32 * (intr)) + (((vpe) / 32) * 4))
+#define GIC_SH_MAP_TO_VPE_REG_BIT(vpe) (1 << ((vpe) % 32))
+
+/* Register Map for Local Section */
+#define GIC_VPE_CTL_OFS 0x0000
+#define GIC_VPE_PEND_OFS 0x0004
+#define GIC_VPE_MASK_OFS 0x0008
+#define GIC_VPE_RMASK_OFS 0x000c
+#define GIC_VPE_SMASK_OFS 0x0010
+#define GIC_VPE_WD_MAP_OFS 0x0040
+#define GIC_VPE_COMPARE_MAP_OFS 0x0044
+#define GIC_VPE_TIMER_MAP_OFS 0x0048
+#define GIC_VPE_FDC_MAP_OFS 0x004c
+#define GIC_VPE_PERFCTR_MAP_OFS 0x0050
+#define GIC_VPE_SWINT0_MAP_OFS 0x0054
+#define GIC_VPE_SWINT1_MAP_OFS 0x0058
+#define GIC_VPE_OTHER_ADDR_OFS 0x0080
+#define GIC_VPE_WD_CONFIG0_OFS 0x0090
+#define GIC_VPE_WD_COUNT0_OFS 0x0094
+#define GIC_VPE_WD_INITIAL0_OFS 0x0098
+#define GIC_VPE_COMPARE_LO_OFS 0x00a0
+#define GIC_VPE_COMPARE_HI_OFS 0x00a4
+
+#define GIC_VPE_EIC_SHADOW_SET_BASE_OFS 0x0100
+#define GIC_VPE_EIC_SS(intr) (4 * (intr))
+
+#define GIC_VPE_EIC_VEC_BASE_OFS 0x0800
+#define GIC_VPE_EIC_VEC(intr) (4 * (intr))
+
+#define GIC_VPE_TENABLE_NMI_OFS 0x1000
+#define GIC_VPE_TENABLE_YQ_OFS 0x1004
+#define GIC_VPE_TENABLE_INT_31_0_OFS 0x1080
+#define GIC_VPE_TENABLE_INT_63_32_OFS 0x1084
+
+/* User Mode Visible Section Register Map */
+#define GIC_UMV_SH_COUNTER_31_00_OFS 0x0000
+#define GIC_UMV_SH_COUNTER_63_32_OFS 0x0004
+
+/* Masks */
+#define GIC_SH_CONFIG_COUNTSTOP_SHF 28
+#define GIC_SH_CONFIG_COUNTSTOP_MSK (MSK(1) << GIC_SH_CONFIG_COUNTSTOP_SHF)
+
+#define GIC_SH_CONFIG_COUNTBITS_SHF 24
+#define GIC_SH_CONFIG_COUNTBITS_MSK (MSK(4) << GIC_SH_CONFIG_COUNTBITS_SHF)
+
+#define GIC_SH_CONFIG_NUMINTRS_SHF 16
+#define GIC_SH_CONFIG_NUMINTRS_MSK (MSK(8) << GIC_SH_CONFIG_NUMINTRS_SHF)
+
+#define GIC_SH_CONFIG_NUMVPES_SHF 0
+#define GIC_SH_CONFIG_NUMVPES_MSK (MSK(8) << GIC_SH_CONFIG_NUMVPES_SHF)
+
+#define GIC_SH_WEDGE_SET(intr) ((intr) | (0x1 << 31))
+#define GIC_SH_WEDGE_CLR(intr) ((intr) & ~(0x1 << 31))
+
+#define GIC_MAP_TO_PIN_SHF 31
+#define GIC_MAP_TO_PIN_MSK (MSK(1) << GIC_MAP_TO_PIN_SHF)
+#define GIC_MAP_TO_NMI_SHF 30
+#define GIC_MAP_TO_NMI_MSK (MSK(1) << GIC_MAP_TO_NMI_SHF)
+#define GIC_MAP_TO_YQ_SHF 29
+#define GIC_MAP_TO_YQ_MSK (MSK(1) << GIC_MAP_TO_YQ_SHF)
+#define GIC_MAP_SHF 0
+#define GIC_MAP_MSK (MSK(6) << GIC_MAP_SHF)
+
+/* GIC_VPE_CTL Masks */
+#define GIC_VPE_CTL_FDC_RTBL_SHF 4
+#define GIC_VPE_CTL_FDC_RTBL_MSK (MSK(1) << GIC_VPE_CTL_FDC_RTBL_SHF)
+#define GIC_VPE_CTL_SWINT_RTBL_SHF 3
+#define GIC_VPE_CTL_SWINT_RTBL_MSK (MSK(1) << GIC_VPE_CTL_SWINT_RTBL_SHF)
+#define GIC_VPE_CTL_PERFCNT_RTBL_SHF 2
+#define GIC_VPE_CTL_PERFCNT_RTBL_MSK (MSK(1) << GIC_VPE_CTL_PERFCNT_RTBL_SHF)
+#define GIC_VPE_CTL_TIMER_RTBL_SHF 1
+#define GIC_VPE_CTL_TIMER_RTBL_MSK (MSK(1) << GIC_VPE_CTL_TIMER_RTBL_SHF)
+#define GIC_VPE_CTL_EIC_MODE_SHF 0
+#define GIC_VPE_CTL_EIC_MODE_MSK (MSK(1) << GIC_VPE_CTL_EIC_MODE_SHF)
+
+/* GIC_VPE_PEND Masks */
+#define GIC_VPE_PEND_WD_SHF 0
+#define GIC_VPE_PEND_WD_MSK (MSK(1) << GIC_VPE_PEND_WD_SHF)
+#define GIC_VPE_PEND_CMP_SHF 1
+#define GIC_VPE_PEND_CMP_MSK (MSK(1) << GIC_VPE_PEND_CMP_SHF)
+#define GIC_VPE_PEND_TIMER_SHF 2
+#define GIC_VPE_PEND_TIMER_MSK (MSK(1) << GIC_VPE_PEND_TIMER_SHF)
+#define GIC_VPE_PEND_PERFCOUNT_SHF 3
+#define GIC_VPE_PEND_PERFCOUNT_MSK (MSK(1) << GIC_VPE_PEND_PERFCOUNT_SHF)
+#define GIC_VPE_PEND_SWINT0_SHF 4
+#define GIC_VPE_PEND_SWINT0_MSK (MSK(1) << GIC_VPE_PEND_SWINT0_SHF)
+#define GIC_VPE_PEND_SWINT1_SHF 5
+#define GIC_VPE_PEND_SWINT1_MSK (MSK(1) << GIC_VPE_PEND_SWINT1_SHF)
+
+/* GIC_VPE_RMASK Masks */
+#define GIC_VPE_RMASK_WD_SHF 0
+#define GIC_VPE_RMASK_WD_MSK (MSK(1) << GIC_VPE_RMASK_WD_SHF)
+#define GIC_VPE_RMASK_CMP_SHF 1
+#define GIC_VPE_RMASK_CMP_MSK (MSK(1) << GIC_VPE_RMASK_CMP_SHF)
+#define GIC_VPE_RMASK_TIMER_SHF 2
+#define GIC_VPE_RMASK_TIMER_MSK (MSK(1) << GIC_VPE_RMASK_TIMER_SHF)
+#define GIC_VPE_RMASK_PERFCNT_SHF 3
+#define GIC_VPE_RMASK_PERFCNT_MSK (MSK(1) << GIC_VPE_RMASK_PERFCNT_SHF)
+#define GIC_VPE_RMASK_SWINT0_SHF 4
+#define GIC_VPE_RMASK_SWINT0_MSK (MSK(1) << GIC_VPE_RMASK_SWINT0_SHF)
+#define GIC_VPE_RMASK_SWINT1_SHF 5
+#define GIC_VPE_RMASK_SWINT1_MSK (MSK(1) << GIC_VPE_RMASK_SWINT1_SHF)
+
+/* GIC_VPE_SMASK Masks */
+#define GIC_VPE_SMASK_WD_SHF 0
+#define GIC_VPE_SMASK_WD_MSK (MSK(1) << GIC_VPE_SMASK_WD_SHF)
+#define GIC_VPE_SMASK_CMP_SHF 1
+#define GIC_VPE_SMASK_CMP_MSK (MSK(1) << GIC_VPE_SMASK_CMP_SHF)
+#define GIC_VPE_SMASK_TIMER_SHF 2
+#define GIC_VPE_SMASK_TIMER_MSK (MSK(1) << GIC_VPE_SMASK_TIMER_SHF)
+#define GIC_VPE_SMASK_PERFCNT_SHF 3
+#define GIC_VPE_SMASK_PERFCNT_MSK (MSK(1) << GIC_VPE_SMASK_PERFCNT_SHF)
+#define GIC_VPE_SMASK_SWINT0_SHF 4
+#define GIC_VPE_SMASK_SWINT0_MSK (MSK(1) << GIC_VPE_SMASK_SWINT0_SHF)
+#define GIC_VPE_SMASK_SWINT1_SHF 5
+#define GIC_VPE_SMASK_SWINT1_MSK (MSK(1) << GIC_VPE_SMASK_SWINT1_SHF)
+
+/* GIC nomenclature for Core Interrupt Pins. */
+#define GIC_CPU_INT0 0 /* Core Interrupt 2 */
+#define GIC_CPU_INT1 1 /* . */
+#define GIC_CPU_INT2 2 /* . */
+#define GIC_CPU_INT3 3 /* . */
+#define GIC_CPU_INT4 4 /* . */
+#define GIC_CPU_INT5 5 /* Core Interrupt 7 */
+
+/* Add 2 to convert GIC CPU pin to core interrupt */
+#define GIC_CPU_PIN_OFFSET 2
+
+/* Add 2 to convert non-EIC hardware interrupt to EIC vector number. */
+#define GIC_CPU_TO_VEC_OFFSET 2
+
+/* Mapped interrupt to pin X, then GIC will generate the vector (X+1). */
+#define GIC_PIN_TO_VEC_OFFSET 1
+
+/* Local GIC interrupts. */
+#define GIC_LOCAL_INT_WD 0 /* GIC watchdog */
+#define GIC_LOCAL_INT_COMPARE 1 /* GIC count and compare timer */
+#define GIC_LOCAL_INT_TIMER 2 /* CPU timer interrupt */
+#define GIC_LOCAL_INT_PERFCTR 3 /* CPU performance counter */
+#define GIC_LOCAL_INT_SWINT0 4 /* CPU software interrupt 0 */
+#define GIC_LOCAL_INT_SWINT1 5 /* CPU software interrupt 1 */
+#define GIC_LOCAL_INT_FDC 6 /* CPU fast debug channel */
+#define GIC_NUM_LOCAL_INTRS 7
+
+/* Convert between local/shared IRQ number and GIC HW IRQ number. */
+#define GIC_LOCAL_HWIRQ_BASE 0
+#define GIC_LOCAL_TO_HWIRQ(x) (GIC_LOCAL_HWIRQ_BASE + (x))
+#define GIC_HWIRQ_TO_LOCAL(x) ((x) - GIC_LOCAL_HWIRQ_BASE)
+#define GIC_SHARED_HWIRQ_BASE GIC_NUM_LOCAL_INTRS
+#define GIC_SHARED_TO_HWIRQ(x) (GIC_SHARED_HWIRQ_BASE + (x))
+#define GIC_HWIRQ_TO_SHARED(x) ((x) - GIC_SHARED_HWIRQ_BASE)
+
+extern unsigned int gic_present;
+
+extern void gic_init(unsigned long gic_base_addr,
+ unsigned long gic_addrspace_size, unsigned int cpu_vec,
+ unsigned int irqbase);
+extern void gic_clocksource_init(unsigned int);
+extern cycle_t gic_read_count(void);
+extern unsigned int gic_get_count_width(void);
+extern cycle_t gic_read_compare(void);
+extern void gic_write_compare(cycle_t cnt);
+extern void gic_write_cpu_compare(cycle_t cnt, int cpu);
+extern void gic_send_ipi(unsigned int intr);
+extern unsigned int plat_ipi_call_int_xlate(unsigned int);
+extern unsigned int plat_ipi_resched_int_xlate(unsigned int);
+extern unsigned int gic_get_timer_pending(void);
+extern int gic_get_c0_compare_int(void);
+extern int gic_get_c0_perfcount_int(void);
+#endif /* __LINUX_IRQCHIP_MIPS_GIC_H */
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
index 472c021a2d4f..faf433af425e 100644
--- a/include/linux/irqdesc.h
+++ b/include/linux/irqdesc.h
@@ -12,6 +12,8 @@ struct irq_affinity_notify;
struct proc_dir_entry;
struct module;
struct irq_desc;
+struct irq_domain;
+struct pt_regs;
/**
* struct irq_desc - interrupt descriptor
@@ -36,6 +38,11 @@ struct irq_desc;
* @threads_oneshot: bitfield to handle shared oneshot threads
* @threads_active: number of irqaction threads currently running
* @wait_for_threads: wait queue for sync_irq to wait for threaded handlers
+ * @nr_actions: number of installed actions on this descriptor
+ * @no_suspend_depth: number of irqactions on a irq descriptor with
+ * IRQF_NO_SUSPEND set
+ * @force_resume_depth: number of irqactions on a irq descriptor with
+ * IRQF_FORCE_RESUME set
* @dir: /proc/irq/ procfs entry
* @name: flow handler name for /proc/interrupts output
*/
@@ -68,6 +75,11 @@ struct irq_desc {
unsigned long threads_oneshot;
atomic_t threads_active;
wait_queue_head_t wait_for_threads;
+#ifdef CONFIG_PM_SLEEP
+ unsigned int nr_actions;
+ unsigned int no_suspend_depth;
+ unsigned int force_resume_depth;
+#endif
#ifdef CONFIG_PROC_FS
struct proc_dir_entry *dir;
#endif
@@ -118,6 +130,23 @@ static inline void generic_handle_irq_desc(unsigned int irq, struct irq_desc *de
int generic_handle_irq(unsigned int irq);
+#ifdef CONFIG_HANDLE_DOMAIN_IRQ
+/*
+ * Convert a HW interrupt number to a logical one using a IRQ domain,
+ * and handle the result interrupt number. Return -EINVAL if
+ * conversion failed. Providing a NULL domain indicates that the
+ * conversion has already been done.
+ */
+int __handle_domain_irq(struct irq_domain *domain, unsigned int hwirq,
+ bool lookup, struct pt_regs *regs);
+
+static inline int handle_domain_irq(struct irq_domain *domain,
+ unsigned int hwirq, struct pt_regs *regs)
+{
+ return __handle_domain_irq(domain, hwirq, true, regs);
+}
+#endif
+
/* Test to see if a driver has successfully requested an irq */
static inline int irq_has_action(unsigned int irq)
{
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
index b0f9d16e48f6..676d7306a360 100644
--- a/include/linux/irqdomain.h
+++ b/include/linux/irqdomain.h
@@ -33,11 +33,14 @@
#define _LINUX_IRQDOMAIN_H
#include <linux/types.h>
+#include <linux/irqhandler.h>
#include <linux/radix-tree.h>
struct device_node;
struct irq_domain;
struct of_device_id;
+struct irq_chip;
+struct irq_data;
/* Number of irqs reserved for a legacy isa controller */
#define NUM_ISA_INTERRUPTS 16
@@ -64,6 +67,16 @@ struct irq_domain_ops {
int (*xlate)(struct irq_domain *d, struct device_node *node,
const u32 *intspec, unsigned int intsize,
unsigned long *out_hwirq, unsigned int *out_type);
+
+#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
+ /* extended V2 interfaces to support hierarchy irq_domains */
+ int (*alloc)(struct irq_domain *d, unsigned int virq,
+ unsigned int nr_irqs, void *arg);
+ void (*free)(struct irq_domain *d, unsigned int virq,
+ unsigned int nr_irqs);
+ void (*activate)(struct irq_domain *d, struct irq_data *irq_data);
+ void (*deactivate)(struct irq_domain *d, struct irq_data *irq_data);
+#endif
};
extern struct irq_domain_ops irq_generic_chip_ops;
@@ -77,6 +90,7 @@ struct irq_domain_chip_generic;
* @ops: pointer to irq_domain methods
* @host_data: private data pointer for use by owner. Not touched by irq_domain
* core code.
+ * @flags: host per irq_domain flags
*
* Optional elements
* @of_node: Pointer to device tree nodes associated with the irq_domain. Used
@@ -84,6 +98,7 @@ struct irq_domain_chip_generic;
* @gc: Pointer to a list of generic chips. There is a helper function for
* setting up one or more generic chips for interrupt controllers
* drivers using the generic chip library which uses this pointer.
+ * @parent: Pointer to parent irq_domain to support hierarchy irq_domains
*
* Revmap data, used internally by irq_domain
* @revmap_direct_max_irq: The largest hwirq that can be set for controllers that
@@ -97,10 +112,14 @@ struct irq_domain {
const char *name;
const struct irq_domain_ops *ops;
void *host_data;
+ unsigned int flags;
/* Optional data */
struct device_node *of_node;
struct irq_domain_chip_generic *gc;
+#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
+ struct irq_domain *parent;
+#endif
/* reverse map data. The linear map gets appended to the irq_domain */
irq_hw_number_t hwirq_max;
@@ -110,6 +129,22 @@ struct irq_domain {
unsigned int linear_revmap[];
};
+/* Irq domain flags */
+enum {
+ /* Irq domain is hierarchical */
+ IRQ_DOMAIN_FLAG_HIERARCHY = (1 << 0),
+
+ /* Core calls alloc/free recursive through the domain hierarchy. */
+ IRQ_DOMAIN_FLAG_AUTO_RECURSIVE = (1 << 1),
+
+ /*
+ * Flags starting from IRQ_DOMAIN_FLAG_NONCORE are reserved
+ * for implementation specific purposes and ignored by the
+ * core code.
+ */
+ IRQ_DOMAIN_FLAG_NONCORE = (1 << 16),
+};
+
#ifdef CONFIG_IRQ_DOMAIN
struct irq_domain *__irq_domain_add(struct device_node *of_node, int size,
irq_hw_number_t hwirq_max, int direct_max,
@@ -220,8 +255,74 @@ int irq_domain_xlate_onetwocell(struct irq_domain *d, struct device_node *ctrlr,
const u32 *intspec, unsigned int intsize,
irq_hw_number_t *out_hwirq, unsigned int *out_type);
+/* V2 interfaces to support hierarchy IRQ domains. */
+extern struct irq_data *irq_domain_get_irq_data(struct irq_domain *domain,
+ unsigned int virq);
+#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
+extern struct irq_domain *irq_domain_add_hierarchy(struct irq_domain *parent,
+ unsigned int flags, unsigned int size,
+ struct device_node *node,
+ const struct irq_domain_ops *ops, void *host_data);
+extern int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base,
+ unsigned int nr_irqs, int node, void *arg,
+ bool realloc);
+extern void irq_domain_free_irqs(unsigned int virq, unsigned int nr_irqs);
+extern void irq_domain_activate_irq(struct irq_data *irq_data);
+extern void irq_domain_deactivate_irq(struct irq_data *irq_data);
+
+static inline int irq_domain_alloc_irqs(struct irq_domain *domain,
+ unsigned int nr_irqs, int node, void *arg)
+{
+ return __irq_domain_alloc_irqs(domain, -1, nr_irqs, node, arg, false);
+}
+
+extern int irq_domain_set_hwirq_and_chip(struct irq_domain *domain,
+ unsigned int virq,
+ irq_hw_number_t hwirq,
+ struct irq_chip *chip,
+ void *chip_data);
+extern void irq_domain_set_info(struct irq_domain *domain, unsigned int virq,
+ irq_hw_number_t hwirq, struct irq_chip *chip,
+ void *chip_data, irq_flow_handler_t handler,
+ void *handler_data, const char *handler_name);
+extern void irq_domain_reset_irq_data(struct irq_data *irq_data);
+extern void irq_domain_free_irqs_common(struct irq_domain *domain,
+ unsigned int virq,
+ unsigned int nr_irqs);
+extern void irq_domain_free_irqs_top(struct irq_domain *domain,
+ unsigned int virq, unsigned int nr_irqs);
+
+extern int irq_domain_alloc_irqs_parent(struct irq_domain *domain,
+ unsigned int irq_base,
+ unsigned int nr_irqs, void *arg);
+
+extern void irq_domain_free_irqs_parent(struct irq_domain *domain,
+ unsigned int irq_base,
+ unsigned int nr_irqs);
+
+static inline bool irq_domain_is_hierarchy(struct irq_domain *domain)
+{
+ return domain->flags & IRQ_DOMAIN_FLAG_HIERARCHY;
+}
+#else /* CONFIG_IRQ_DOMAIN_HIERARCHY */
+static inline void irq_domain_activate_irq(struct irq_data *data) { }
+static inline void irq_domain_deactivate_irq(struct irq_data *data) { }
+static inline int irq_domain_alloc_irqs(struct irq_domain *domain,
+ unsigned int nr_irqs, int node, void *arg)
+{
+ return -1;
+}
+
+static inline bool irq_domain_is_hierarchy(struct irq_domain *domain)
+{
+ return false;
+}
+#endif /* CONFIG_IRQ_DOMAIN_HIERARCHY */
+
#else /* CONFIG_IRQ_DOMAIN */
static inline void irq_dispose_mapping(unsigned int virq) { }
+static inline void irq_domain_activate_irq(struct irq_data *data) { }
+static inline void irq_domain_deactivate_irq(struct irq_data *data) { }
#endif /* !CONFIG_IRQ_DOMAIN */
#endif /* _LINUX_IRQDOMAIN_H */
diff --git a/include/linux/irqhandler.h b/include/linux/irqhandler.h
new file mode 100644
index 000000000000..62d543004197
--- /dev/null
+++ b/include/linux/irqhandler.h
@@ -0,0 +1,14 @@
+#ifndef _LINUX_IRQHANDLER_H
+#define _LINUX_IRQHANDLER_H
+
+/*
+ * Interrupt flow handler typedefs are defined here to avoid circular
+ * include dependencies.
+ */
+
+struct irq_desc;
+struct irq_data;
+typedef void (*irq_flow_handler_t)(unsigned int irq, struct irq_desc *desc);
+typedef void (*irq_preflow_handler_t)(struct irq_data *data);
+
+#endif
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index 0dae71e9971c..704b9a599b26 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -1042,7 +1042,7 @@ void jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block);
extern void jbd2_journal_commit_transaction(journal_t *);
/* Checkpoint list management */
-int __jbd2_journal_clean_checkpoint_list(journal_t *journal);
+void __jbd2_journal_clean_checkpoint_list(journal_t *journal);
int __jbd2_journal_remove_checkpoint(struct journal_head *);
void __jbd2_journal_insert_checkpoint(struct journal_head *, transaction_t *);
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
index 784304b222b3..98f923b6a0ea 100644
--- a/include/linux/jump_label.h
+++ b/include/linux/jump_label.h
@@ -8,28 +8,28 @@
* Copyright (C) 2011-2012 Peter Zijlstra <pzijlstr@redhat.com>
*
* Jump labels provide an interface to generate dynamic branches using
- * self-modifying code. Assuming toolchain and architecture support the result
- * of a "if (static_key_false(&key))" statement is a unconditional branch (which
+ * self-modifying code. Assuming toolchain and architecture support, the result
+ * of a "if (static_key_false(&key))" statement is an unconditional branch (which
* defaults to false - and the true block is placed out of line).
*
* However at runtime we can change the branch target using
* static_key_slow_{inc,dec}(). These function as a 'reference' count on the key
- * object and for as long as there are references all branches referring to
+ * object, and for as long as there are references all branches referring to
* that particular key will point to the (out of line) true block.
*
- * Since this relies on modifying code the static_key_slow_{inc,dec}() functions
+ * Since this relies on modifying code, the static_key_slow_{inc,dec}() functions
* must be considered absolute slow paths (machine wide synchronization etc.).
- * OTOH, since the affected branches are unconditional their runtime overhead
+ * OTOH, since the affected branches are unconditional, their runtime overhead
* will be absolutely minimal, esp. in the default (off) case where the total
* effect is a single NOP of appropriate size. The on case will patch in a jump
* to the out-of-line block.
*
- * When the control is directly exposed to userspace it is prudent to delay the
+ * When the control is directly exposed to userspace, it is prudent to delay the
* decrement to avoid high frequency code modifications which can (and do)
* cause significant performance degradation. Struct static_key_deferred and
* static_key_slow_dec_deferred() provide for this.
*
- * Lacking toolchain and or architecture support, it falls back to a simple
+ * Lacking toolchain and or architecture support, jump labels fall back to a simple
* conditional branch.
*
* struct static_key my_key = STATIC_KEY_INIT_TRUE;
@@ -43,8 +43,7 @@
*
* Not initializing the key (static data is initialized to 0s anyway) is the
* same as using STATIC_KEY_INIT_FALSE.
- *
-*/
+ */
#include <linux/types.h>
#include <linux/compiler.h>
diff --git a/include/linux/kern_levels.h b/include/linux/kern_levels.h
index 866caaa9e2bb..c2ce155d83cc 100644
--- a/include/linux/kern_levels.h
+++ b/include/linux/kern_levels.h
@@ -22,4 +22,17 @@
*/
#define KERN_CONT ""
+/* integer equivalents of KERN_<LEVEL> */
+#define LOGLEVEL_SCHED -2 /* Deferred messages from sched code
+ * are set to this special level */
+#define LOGLEVEL_DEFAULT -1 /* default (or last) loglevel */
+#define LOGLEVEL_EMERG 0 /* system is unusable */
+#define LOGLEVEL_ALERT 1 /* action must be taken immediately */
+#define LOGLEVEL_CRIT 2 /* critical conditions */
+#define LOGLEVEL_ERR 3 /* error conditions */
+#define LOGLEVEL_WARNING 4 /* warning conditions */
+#define LOGLEVEL_NOTICE 5 /* normal but significant condition */
+#define LOGLEVEL_INFO 6 /* informational */
+#define LOGLEVEL_DEBUG 7 /* debug-level messages */
+
#endif
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 95624bed87ef..5449d2f4a1ef 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -162,6 +162,7 @@ extern int _cond_resched(void);
#endif
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
+ void ___might_sleep(const char *file, int line, int preempt_offset);
void __might_sleep(const char *file, int line, int preempt_offset);
/**
* might_sleep - annotation for functions that can sleep
@@ -175,10 +176,14 @@ extern int _cond_resched(void);
*/
# define might_sleep() \
do { __might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0)
+# define sched_annotate_sleep() __set_current_state(TASK_RUNNING)
#else
+ static inline void ___might_sleep(const char *file, int line,
+ int preempt_offset) { }
static inline void __might_sleep(const char *file, int line,
int preempt_offset) { }
# define might_sleep() do { might_resched(); } while (0)
+# define sched_annotate_sleep() do { } while (0)
#endif
#define might_sleep_if(cond) do { if (cond) might_sleep(); } while (0)
@@ -376,10 +381,6 @@ extern unsigned long simple_strtoul(const char *,char **,unsigned int);
extern long simple_strtol(const char *,char **,unsigned int);
extern unsigned long long simple_strtoull(const char *,char **,unsigned int);
extern long long simple_strtoll(const char *,char **,unsigned int);
-#define strict_strtoul kstrtoul
-#define strict_strtol kstrtol
-#define strict_strtoull kstrtoull
-#define strict_strtoll kstrtoll
extern int num_to_str(char *buf, int size, unsigned long long num);
@@ -407,6 +408,7 @@ int vsscanf(const char *, const char *, va_list);
extern int get_option(char **str, int *pint);
extern char *get_options(const char *str, int nints, int *ints);
extern unsigned long long memparse(const char *ptr, char **retptr);
+extern bool parse_option_str(const char *str, const char *option);
extern int core_kernel_text(unsigned long addr);
extern int core_kernel_data(unsigned long addr);
@@ -414,9 +416,6 @@ extern int __kernel_text_address(unsigned long addr);
extern int kernel_text_address(unsigned long addr);
extern int func_ptr_is_kernel_text(void *ptr);
-struct pid;
-extern struct pid *session_of_pgrp(struct pid *pgrp);
-
unsigned long int_sqrt(unsigned long);
extern void bust_spinlocks(int yes);
@@ -425,6 +424,7 @@ extern int panic_timeout;
extern int panic_on_oops;
extern int panic_on_unrecovered_nmi;
extern int panic_on_io_nmi;
+extern int panic_on_warn;
extern int sysctl_panic_on_stackoverflow;
/*
* Only to be used by arch init code. If the user over-wrote the default
@@ -496,6 +496,7 @@ static inline char *hex_byte_pack_upper(char *buf, u8 byte)
extern int hex_to_bin(char ch);
extern int __must_check hex2bin(u8 *dst, const char *src, size_t count);
+extern char *bin2hex(char *dst, const void *src, size_t count);
bool mac_pton(const char *s, u8 *mac);
@@ -715,23 +716,8 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
(void) (&_max1 == &_max2); \
_max1 > _max2 ? _max1 : _max2; })
-#define min3(x, y, z) ({ \
- typeof(x) _min1 = (x); \
- typeof(y) _min2 = (y); \
- typeof(z) _min3 = (z); \
- (void) (&_min1 == &_min2); \
- (void) (&_min1 == &_min3); \
- _min1 < _min2 ? (_min1 < _min3 ? _min1 : _min3) : \
- (_min2 < _min3 ? _min2 : _min3); })
-
-#define max3(x, y, z) ({ \
- typeof(x) _max1 = (x); \
- typeof(y) _max2 = (y); \
- typeof(z) _max3 = (z); \
- (void) (&_max1 == &_max2); \
- (void) (&_max1 == &_max3); \
- _max1 > _max2 ? (_max1 > _max3 ? _max1 : _max3) : \
- (_max2 > _max3 ? _max2 : _max3); })
+#define min3(x, y, z) min((typeof(x))min(x, y), z)
+#define max3(x, y, z) max((typeof(x))max(x, y), z)
/**
* min_not_zero - return the minimum that is _not_ zero, unless both are zero
@@ -746,20 +732,13 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
/**
* clamp - return a value clamped to a given range with strict typechecking
* @val: current value
- * @min: minimum allowable value
- * @max: maximum allowable value
+ * @lo: lowest allowable value
+ * @hi: highest allowable value
*
- * This macro does strict typechecking of min/max to make sure they are of the
+ * This macro does strict typechecking of lo/hi to make sure they are of the
* same type as val. See the unnecessary pointer comparisons.
*/
-#define clamp(val, min, max) ({ \
- typeof(val) __val = (val); \
- typeof(min) __min = (min); \
- typeof(max) __max = (max); \
- (void) (&__val == &__min); \
- (void) (&__val == &__max); \
- __val = __val < __min ? __min: __val; \
- __val > __max ? __max: __val; })
+#define clamp(val, lo, hi) min((typeof(val))max(val, lo), hi)
/*
* ..and if you can't take the strict
@@ -781,36 +760,26 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
* clamp_t - return a value clamped to a given range using a given type
* @type: the type of variable to use
* @val: current value
- * @min: minimum allowable value
- * @max: maximum allowable value
+ * @lo: minimum allowable value
+ * @hi: maximum allowable value
*
* This macro does no typechecking and uses temporary variables of type
* 'type' to make all the comparisons.
*/
-#define clamp_t(type, val, min, max) ({ \
- type __val = (val); \
- type __min = (min); \
- type __max = (max); \
- __val = __val < __min ? __min: __val; \
- __val > __max ? __max: __val; })
+#define clamp_t(type, val, lo, hi) min_t(type, max_t(type, val, lo), hi)
/**
* clamp_val - return a value clamped to a given range using val's type
* @val: current value
- * @min: minimum allowable value
- * @max: maximum allowable value
+ * @lo: minimum allowable value
+ * @hi: maximum allowable value
*
* This macro does no typechecking and uses temporary variables of whatever
* type the input argument 'val' is. This is useful when val is an unsigned
* type and min and max are literals that will otherwise be assigned a signed
* integer type.
*/
-#define clamp_val(val, min, max) ({ \
- typeof(val) __val = (val); \
- typeof(val) __min = (min); \
- typeof(val) __max = (max); \
- __val = __val < __min ? __min: __val; \
- __val > __max ? __max: __val; })
+#define clamp_val(val, lo, hi) clamp_t(typeof(val), val, lo, hi)
/*
diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h
index ecbc52f9ff77..25a822f6f000 100644
--- a/include/linux/kernel_stat.h
+++ b/include/linux/kernel_stat.h
@@ -44,8 +44,8 @@ DECLARE_PER_CPU(struct kernel_stat, kstat);
DECLARE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
/* Must have preemption disabled for this to be meaningful. */
-#define kstat_this_cpu (&__get_cpu_var(kstat))
-#define kcpustat_this_cpu (&__get_cpu_var(kernel_cpustat))
+#define kstat_this_cpu this_cpu_ptr(&kstat)
+#define kcpustat_this_cpu this_cpu_ptr(&kernel_cpustat)
#define kstat_cpu(cpu) per_cpu(kstat, cpu)
#define kcpustat_cpu(cpu) per_cpu(kernel_cpustat, cpu)
@@ -68,6 +68,7 @@ static inline unsigned int kstat_softirqs_cpu(unsigned int irq, int cpu)
* Number of interrupts per specific IRQ source, since bootup
*/
extern unsigned int kstat_irqs(unsigned int irq);
+extern unsigned int kstat_irqs_usr(unsigned int irq);
/*
* Number of interrupts per cpu, since bootup
@@ -77,11 +78,6 @@ static inline unsigned int kstat_cpu_irqs_sum(unsigned int cpu)
return kstat_cpu(cpu).irqs_sum;
}
-/*
- * Lock/unlock the current runqueue - to extract task statistics:
- */
-extern unsigned long long task_delta_exec(struct task_struct *);
-
extern void account_user_time(struct task_struct *, cputime_t, cputime_t);
extern void account_system_time(struct task_struct *, int, cputime_t, cputime_t);
extern void account_steal_time(cputime_t);
diff --git a/include/linux/kernelcapi.h b/include/linux/kernelcapi.h
index 9be37da93680..e985ba679c4a 100644
--- a/include/linux/kernelcapi.h
+++ b/include/linux/kernelcapi.h
@@ -41,7 +41,7 @@ u16 capi20_get_manufacturer(u32 contr, u8 buf[CAPI_MANUFACTURER_LEN]);
u16 capi20_get_version(u32 contr, struct capi_version *verp);
u16 capi20_get_serial(u32 contr, u8 serial[CAPI_SERIAL_LEN]);
u16 capi20_get_profile(u32 contr, struct capi_profile *profp);
-int capi20_manufacturer(unsigned int cmd, void __user *data);
+int capi20_manufacturer(unsigned long cmd, void __user *data);
#define CAPICTR_UP 0
#define CAPICTR_DOWN 1
diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h
index 30faf797c2c3..d4e01b358341 100644
--- a/include/linux/kernfs.h
+++ b/include/linux/kernfs.h
@@ -179,6 +179,7 @@ struct kernfs_open_file {
struct mutex mutex;
int event;
struct list_head list;
+ char *prealloc_buf;
size_t atomic_write_len;
bool mmapped;
@@ -214,6 +215,13 @@ struct kernfs_ops {
* larger ones are rejected with -E2BIG.
*/
size_t atomic_write_len;
+ /*
+ * "prealloc" causes a buffer to be allocated at open for
+ * all read/write requests. As ->seq_show uses seq_read()
+ * which does its own allocation, it is incompatible with
+ * ->prealloc. Provide ->read and ->write with ->prealloc.
+ */
+ bool prealloc;
ssize_t (*write)(struct kernfs_open_file *of, char *buf, size_t bytes,
loff_t off);
diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index 4b2a0e11cc5b..9d957b7ae095 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -178,6 +178,7 @@ struct kexec_buf {
struct kimage *image;
char *buffer;
unsigned long bufsz;
+ unsigned long mem;
unsigned long memsz;
unsigned long buf_align;
unsigned long buf_min;
diff --git a/include/linux/key-type.h b/include/linux/key-type.h
index 44792ee649de..ff9f1d394235 100644
--- a/include/linux/key-type.h
+++ b/include/linux/key-type.h
@@ -53,6 +53,24 @@ typedef int (*request_key_actor_t)(struct key_construction *key,
const char *op, void *aux);
/*
+ * Preparsed matching criterion.
+ */
+struct key_match_data {
+ /* Comparison function, defaults to exact description match, but can be
+ * overridden by type->match_preparse(). Should return true if a match
+ * is found and false if not.
+ */
+ bool (*cmp)(const struct key *key,
+ const struct key_match_data *match_data);
+
+ const void *raw_data; /* Raw match data */
+ void *preparsed; /* For ->match_preparse() to stash stuff */
+ unsigned lookup_type; /* Type of lookup for this search. */
+#define KEYRING_SEARCH_LOOKUP_DIRECT 0x0000 /* Direct lookup by description. */
+#define KEYRING_SEARCH_LOOKUP_ITERATE 0x0001 /* Iterative search. */
+};
+
+/*
* kernel managed key type definition
*/
struct key_type {
@@ -65,11 +83,6 @@ struct key_type {
*/
size_t def_datalen;
- /* Default key search algorithm. */
- unsigned def_lookup_type;
-#define KEYRING_SEARCH_LOOKUP_DIRECT 0x0000 /* Direct lookup by description. */
-#define KEYRING_SEARCH_LOOKUP_ITERATE 0x0001 /* Iterative search. */
-
/* vet a description */
int (*vet_description)(const char *description);
@@ -96,8 +109,15 @@ struct key_type {
*/
int (*update)(struct key *key, struct key_preparsed_payload *prep);
- /* match a key against a description */
- int (*match)(const struct key *key, const void *desc);
+ /* Preparse the data supplied to ->match() (optional). The
+ * data to be preparsed can be found in match_data->raw_data.
+ * The lookup type can also be set by this function.
+ */
+ int (*match_preparse)(struct key_match_data *match_data);
+
+ /* Free preparsed match data (optional). This should be supplied it
+ * ->match_preparse() is supplied. */
+ void (*match_free)(struct key_match_data *match_data);
/* clear some of the data from a key on revokation (optional)
* - the key's semaphore will be write-locked by the caller
diff --git a/include/linux/kfifo.h b/include/linux/kfifo.h
index 554fde3a3927..473b43678ad1 100644
--- a/include/linux/kfifo.h
+++ b/include/linux/kfifo.h
@@ -722,7 +722,7 @@ __kfifo_uint_must_check_helper( \
/**
* kfifo_dma_out_finish - finish a DMA OUT operation
* @fifo: address of the fifo to be used
- * @len: number of bytes transferrd
+ * @len: number of bytes transferred
*
* This macro finish a DMA OUT operation. The out counter will be updated by
* the len parameter. No error checking will be done.
diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
index 6b06d378f3df..e465bb15912d 100644
--- a/include/linux/kgdb.h
+++ b/include/linux/kgdb.h
@@ -283,7 +283,7 @@ struct kgdb_io {
extern struct kgdb_arch arch_kgdb_ops;
-extern unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs);
+extern unsigned long kgdb_arch_pc(int exception, struct pt_regs *regs);
#ifdef CONFIG_SERIAL_KGDB_NMI
extern int kgdb_register_nmi_console(void);
diff --git a/include/linux/khugepaged.h b/include/linux/khugepaged.h
index 6b394f0b5148..eeb307985715 100644
--- a/include/linux/khugepaged.h
+++ b/include/linux/khugepaged.h
@@ -6,7 +6,8 @@
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
extern int __khugepaged_enter(struct mm_struct *mm);
extern void __khugepaged_exit(struct mm_struct *mm);
-extern int khugepaged_enter_vma_merge(struct vm_area_struct *vma);
+extern int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
+ unsigned long vm_flags);
#define khugepaged_enabled() \
(transparent_hugepage_flags & \
@@ -35,13 +36,13 @@ static inline void khugepaged_exit(struct mm_struct *mm)
__khugepaged_exit(mm);
}
-static inline int khugepaged_enter(struct vm_area_struct *vma)
+static inline int khugepaged_enter(struct vm_area_struct *vma,
+ unsigned long vm_flags)
{
if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags))
if ((khugepaged_always() ||
- (khugepaged_req_madv() &&
- vma->vm_flags & VM_HUGEPAGE)) &&
- !(vma->vm_flags & VM_NOHUGEPAGE))
+ (khugepaged_req_madv() && (vm_flags & VM_HUGEPAGE))) &&
+ !(vm_flags & VM_NOHUGEPAGE))
if (__khugepaged_enter(vma->vm_mm))
return -ENOMEM;
return 0;
@@ -54,11 +55,13 @@ static inline int khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm)
static inline void khugepaged_exit(struct mm_struct *mm)
{
}
-static inline int khugepaged_enter(struct vm_area_struct *vma)
+static inline int khugepaged_enter(struct vm_area_struct *vma,
+ unsigned long vm_flags)
{
return 0;
}
-static inline int khugepaged_enter_vma_merge(struct vm_area_struct *vma)
+static inline int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
+ unsigned long vm_flags)
{
return 0;
}
diff --git a/include/linux/kmemleak.h b/include/linux/kmemleak.h
index 057e95971014..e705467ddb47 100644
--- a/include/linux/kmemleak.h
+++ b/include/linux/kmemleak.h
@@ -21,6 +21,8 @@
#ifndef __KMEMLEAK_H
#define __KMEMLEAK_H
+#include <linux/slab.h>
+
#ifdef CONFIG_DEBUG_KMEMLEAK
extern void kmemleak_init(void) __ref;
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index f7296e57d614..5297f9fa0ef2 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -335,6 +335,7 @@ extern void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
extern int arch_prepare_kprobe_ftrace(struct kprobe *p);
#endif
+int arch_check_ftrace_location(struct kprobe *p);
/* Get the kprobe at this addr (if any) - called with preemption disabled */
struct kprobe *get_kprobe(void *addr);
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index a4c33b34fe3f..26f106022c88 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -43,6 +43,7 @@
* include/linux/kvm_h.
*/
#define KVM_MEMSLOT_INVALID (1UL << 16)
+#define KVM_MEMSLOT_INCOHERENT (1UL << 17)
/* Two fragments for cross MMIO pages. */
#define KVM_MAX_MMIO_FRAGMENTS 2
@@ -136,12 +137,11 @@ static inline bool is_error_page(struct page *page)
#define KVM_REQ_GLOBAL_CLOCK_UPDATE 22
#define KVM_REQ_ENABLE_IBS 23
#define KVM_REQ_DISABLE_IBS 24
+#define KVM_REQ_APIC_PAGE_RELOAD 25
#define KVM_USERSPACE_IRQ_SOURCE_ID 0
#define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1
-struct kvm;
-struct kvm_vcpu;
extern struct kmem_cache *kvm_vcpu_cache;
extern spinlock_t kvm_lock;
@@ -200,6 +200,17 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, unsigned long hva,
int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu);
#endif
+/*
+ * Carry out a gup that requires IO. Allow the mm to relinquish the mmap
+ * semaphore if the filemap/swap has to wait on a page lock. pagep == NULL
+ * controls whether we retry the gup one more time to completion in that case.
+ * Typically this is called after a FAULT_FLAG_RETRY_NOWAIT in the main tdp
+ * handler.
+ */
+int kvm_get_user_page_io(struct task_struct *tsk, struct mm_struct *mm,
+ unsigned long addr, bool write_fault,
+ struct page **pagep);
+
enum {
OUTSIDE_GUEST_MODE,
IN_GUEST_MODE,
@@ -325,8 +336,6 @@ struct kvm_kernel_irq_routing_entry {
struct hlist_node link;
};
-struct kvm_irq_routing_table;
-
#ifndef KVM_PRIVATE_MEM_SLOTS
#define KVM_PRIVATE_MEM_SLOTS 0
#endif
@@ -345,6 +354,8 @@ struct kvm_memslots {
struct kvm_memory_slot memslots[KVM_MEM_SLOTS_NUM];
/* The mapping table from slot id to the index in memslots[]. */
short id_to_index[KVM_MEM_SLOTS_NUM];
+ atomic_t lru_slot;
+ int used_slots;
};
struct kvm {
@@ -387,7 +398,6 @@ struct kvm {
* Update side is protected by irq_lock.
*/
struct kvm_irq_routing_table __rcu *irq_routing;
- struct hlist_head mask_notifier_list;
#endif
#ifdef CONFIG_HAVE_KVM_IRQFD
struct hlist_head irq_ack_notifier_list;
@@ -439,6 +449,14 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
int __must_check vcpu_load(struct kvm_vcpu *vcpu);
void vcpu_put(struct kvm_vcpu *vcpu);
+#ifdef __KVM_HAVE_IOAPIC
+void kvm_vcpu_request_scan_ioapic(struct kvm *kvm);
+#else
+static inline void kvm_vcpu_request_scan_ioapic(struct kvm *kvm)
+{
+}
+#endif
+
#ifdef CONFIG_HAVE_KVM_IRQFD
int kvm_irqfd_init(void);
void kvm_irqfd_exit(void);
@@ -528,6 +546,8 @@ struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable);
unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
+unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot, gfn_t gfn,
+ bool *writable);
void kvm_release_page_clean(struct page *page);
void kvm_release_page_dirty(struct page *page);
void kvm_set_page_accessed(struct page *page);
@@ -579,6 +599,7 @@ void kvm_flush_remote_tlbs(struct kvm *kvm);
void kvm_reload_remote_mmus(struct kvm *kvm);
void kvm_make_mclock_inprogress_request(struct kvm *kvm);
void kvm_make_scan_ioapic_request(struct kvm *kvm);
+bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req);
long kvm_arch_dev_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg);
@@ -624,6 +645,8 @@ void kvm_arch_exit(void);
int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu);
+void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu);
+
void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu);
@@ -632,8 +655,8 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu);
int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu);
-int kvm_arch_hardware_enable(void *garbage);
-void kvm_arch_hardware_disable(void *garbage);
+int kvm_arch_hardware_enable(void);
+void kvm_arch_hardware_disable(void);
int kvm_arch_hardware_setup(void);
void kvm_arch_hardware_unsetup(void);
void kvm_arch_check_processor_compat(void *rtn);
@@ -690,7 +713,7 @@ void kvm_arch_sync_events(struct kvm *kvm);
int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu);
void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
-bool kvm_is_mmio_pfn(pfn_t pfn);
+bool kvm_is_reserved_pfn(pfn_t pfn);
struct kvm_irq_ack_notifier {
struct hlist_node link;
@@ -698,44 +721,6 @@ struct kvm_irq_ack_notifier {
void (*irq_acked)(struct kvm_irq_ack_notifier *kian);
};
-struct kvm_assigned_dev_kernel {
- struct kvm_irq_ack_notifier ack_notifier;
- struct list_head list;
- int assigned_dev_id;
- int host_segnr;
- int host_busnr;
- int host_devfn;
- unsigned int entries_nr;
- int host_irq;
- bool host_irq_disabled;
- bool pci_2_3;
- struct msix_entry *host_msix_entries;
- int guest_irq;
- struct msix_entry *guest_msix_entries;
- unsigned long irq_requested_type;
- int irq_source_id;
- int flags;
- struct pci_dev *dev;
- struct kvm *kvm;
- spinlock_t intx_lock;
- spinlock_t intx_mask_lock;
- char irq_name[32];
- struct pci_saved_state *pci_saved_state;
-};
-
-struct kvm_irq_mask_notifier {
- void (*func)(struct kvm_irq_mask_notifier *kimn, bool masked);
- int irq;
- struct hlist_node link;
-};
-
-void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq,
- struct kvm_irq_mask_notifier *kimn);
-void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq,
- struct kvm_irq_mask_notifier *kimn);
-void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin,
- bool mask);
-
int kvm_irq_map_gsi(struct kvm *kvm,
struct kvm_kernel_irq_routing_entry *entries, int gsi);
int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin);
@@ -757,12 +742,6 @@ void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id);
#ifdef CONFIG_KVM_DEVICE_ASSIGNMENT
int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot);
void kvm_iommu_unmap_pages(struct kvm *kvm, struct kvm_memory_slot *slot);
-int kvm_iommu_map_guest(struct kvm *kvm);
-int kvm_iommu_unmap_guest(struct kvm *kvm);
-int kvm_assign_device(struct kvm *kvm,
- struct kvm_assigned_dev_kernel *assigned_dev);
-int kvm_deassign_device(struct kvm *kvm,
- struct kvm_assigned_dev_kernel *assigned_dev);
#else
static inline int kvm_iommu_map_pages(struct kvm *kvm,
struct kvm_memory_slot *slot)
@@ -774,11 +753,6 @@ static inline void kvm_iommu_unmap_pages(struct kvm *kvm,
struct kvm_memory_slot *slot)
{
}
-
-static inline int kvm_iommu_unmap_guest(struct kvm *kvm)
-{
- return 0;
-}
#endif
static inline void kvm_guest_enter(void)
@@ -819,12 +793,28 @@ static inline void kvm_guest_exit(void)
static inline struct kvm_memory_slot *
search_memslots(struct kvm_memslots *slots, gfn_t gfn)
{
- struct kvm_memory_slot *memslot;
+ int start = 0, end = slots->used_slots;
+ int slot = atomic_read(&slots->lru_slot);
+ struct kvm_memory_slot *memslots = slots->memslots;
+
+ if (gfn >= memslots[slot].base_gfn &&
+ gfn < memslots[slot].base_gfn + memslots[slot].npages)
+ return &memslots[slot];
+
+ while (start < end) {
+ slot = start + (end - start) / 2;
+
+ if (gfn >= memslots[slot].base_gfn)
+ end = slot;
+ else
+ start = slot + 1;
+ }
- kvm_for_each_memslot(memslot, slots)
- if (gfn >= memslot->base_gfn &&
- gfn < memslot->base_gfn + memslot->npages)
- return memslot;
+ if (gfn >= memslots[start].base_gfn &&
+ gfn < memslots[start].base_gfn + memslots[start].npages) {
+ atomic_set(&slots->lru_slot, start);
+ return &memslots[start];
+ }
return NULL;
}
@@ -998,25 +988,6 @@ static inline bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu) { return true; }
#endif
-#ifdef CONFIG_KVM_DEVICE_ASSIGNMENT
-
-long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl,
- unsigned long arg);
-
-void kvm_free_all_assigned_devices(struct kvm *kvm);
-
-#else
-
-static inline long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl,
- unsigned long arg)
-{
- return -ENOTTY;
-}
-
-static inline void kvm_free_all_assigned_devices(struct kvm *kvm) {}
-
-#endif
-
static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu)
{
set_bit(req, &vcpu->requests);
@@ -1034,8 +1005,6 @@ static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu)
extern bool kvm_rebooting;
-struct kvm_device_ops;
-
struct kvm_device {
struct kvm_device_ops *ops;
struct kvm *kvm;
@@ -1068,12 +1037,11 @@ struct kvm_device_ops {
void kvm_device_get(struct kvm_device *dev);
void kvm_device_put(struct kvm_device *dev);
struct kvm_device *kvm_device_from_filp(struct file *filp);
+int kvm_register_device_ops(struct kvm_device_ops *ops, u32 type);
+void kvm_unregister_device_ops(u32 type);
extern struct kvm_device_ops kvm_mpic_ops;
extern struct kvm_device_ops kvm_xics_ops;
-extern struct kvm_device_ops kvm_vfio_ops;
-extern struct kvm_device_ops kvm_arm_vgic_v2_ops;
-extern struct kvm_device_ops kvm_flic_ops;
#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h
index b0bcce0ddc95..931da7e917cf 100644
--- a/include/linux/kvm_types.h
+++ b/include/linux/kvm_types.h
@@ -17,6 +17,20 @@
#ifndef __KVM_TYPES_H__
#define __KVM_TYPES_H__
+struct kvm;
+struct kvm_async_pf;
+struct kvm_device_ops;
+struct kvm_interrupt;
+struct kvm_irq_routing_table;
+struct kvm_memory_slot;
+struct kvm_one_reg;
+struct kvm_run;
+struct kvm_userspace_memory_region;
+struct kvm_vcpu;
+struct kvm_vcpu_init;
+
+enum kvm_mr_change;
+
#include <asm/types.h>
/*
@@ -40,33 +54,6 @@ typedef u64 hfn_t;
typedef hfn_t pfn_t;
-union kvm_ioapic_redirect_entry {
- u64 bits;
- struct {
- u8 vector;
- u8 delivery_mode:3;
- u8 dest_mode:1;
- u8 delivery_status:1;
- u8 polarity:1;
- u8 remote_irr:1;
- u8 trig_mode:1;
- u8 mask:1;
- u8 reserve:7;
- u8 reserved[4];
- u8 dest_id;
- } fields;
-};
-
-struct kvm_lapic_irq {
- u32 vector;
- u32 delivery_mode;
- u32 dest_mode;
- u32 level;
- u32 trig_mode;
- u32 shorthand;
- u32 dest_id;
-};
-
struct gfn_to_hva_cache {
u64 generation;
gpa_t gpa;
diff --git a/include/linux/leds.h b/include/linux/leds.h
index e43686472197..cfceef32c9b3 100644
--- a/include/linux/leds.h
+++ b/include/linux/leds.h
@@ -13,8 +13,9 @@
#define __LINUX_LEDS_H_INCLUDED
#include <linux/list.h>
-#include <linux/spinlock.h>
+#include <linux/mutex.h>
#include <linux/rwsem.h>
+#include <linux/spinlock.h>
#include <linux/timer.h>
#include <linux/workqueue.h>
@@ -31,8 +32,8 @@ enum led_brightness {
struct led_classdev {
const char *name;
- int brightness;
- int max_brightness;
+ enum led_brightness brightness;
+ enum led_brightness max_brightness;
int flags;
/* Lower 16 bits reflect status */
@@ -42,11 +43,20 @@ struct led_classdev {
#define LED_BLINK_ONESHOT (1 << 17)
#define LED_BLINK_ONESHOT_STOP (1 << 18)
#define LED_BLINK_INVERT (1 << 19)
+#define LED_SYSFS_DISABLE (1 << 20)
+#define SET_BRIGHTNESS_ASYNC (1 << 21)
+#define SET_BRIGHTNESS_SYNC (1 << 22)
/* Set LED brightness level */
/* Must not sleep, use a workqueue if needed */
void (*brightness_set)(struct led_classdev *led_cdev,
enum led_brightness brightness);
+ /*
+ * Set LED brightness level immediately - it can block the caller for
+ * the time required for accessing a LED device register.
+ */
+ int (*brightness_set_sync)(struct led_classdev *led_cdev,
+ enum led_brightness brightness);
/* Get LED brightness level */
enum led_brightness (*brightness_get)(struct led_classdev *led_cdev);
@@ -85,6 +95,9 @@ struct led_classdev {
/* true if activated - deactivate routine uses it to do cleanup */
bool activated;
#endif
+
+ /* Ensures consistent access to the LED Flash Class device */
+ struct mutex led_access;
};
extern int led_classdev_register(struct device *parent,
@@ -140,6 +153,43 @@ extern void led_blink_set_oneshot(struct led_classdev *led_cdev,
*/
extern void led_set_brightness(struct led_classdev *led_cdev,
enum led_brightness brightness);
+/**
+ * led_update_brightness - update LED brightness
+ * @led_cdev: the LED to query
+ *
+ * Get an LED's current brightness and update led_cdev->brightness
+ * member with the obtained value.
+ *
+ * Returns: 0 on success or negative error value on failure
+ */
+extern int led_update_brightness(struct led_classdev *led_cdev);
+
+/**
+ * led_sysfs_disable - disable LED sysfs interface
+ * @led_cdev: the LED to set
+ *
+ * Disable the led_cdev's sysfs interface.
+ */
+extern void led_sysfs_disable(struct led_classdev *led_cdev);
+
+/**
+ * led_sysfs_enable - enable LED sysfs interface
+ * @led_cdev: the LED to set
+ *
+ * Enable the led_cdev's sysfs interface.
+ */
+extern void led_sysfs_enable(struct led_classdev *led_cdev);
+
+/**
+ * led_sysfs_is_disabled - check if LED sysfs interface is disabled
+ * @led_cdev: the LED to query
+ *
+ * Returns: true if the led_cdev's sysfs interface is disabled.
+ */
+static inline bool led_sysfs_is_disabled(struct led_classdev *led_cdev)
+{
+ return led_cdev->flags & LED_SYSFS_DISABLE;
+}
/*
* LED Triggers
@@ -251,6 +301,7 @@ struct gpio_led {
unsigned retain_state_suspended : 1;
unsigned default_state : 2;
/* default_state should be one of LEDS_GPIO_DEFSTATE_(ON|OFF|KEEP) */
+ struct gpio_desc *gpiod;
};
#define LEDS_GPIO_DEFSTATE_OFF 0
#define LEDS_GPIO_DEFSTATE_ON 1
@@ -263,7 +314,7 @@ struct gpio_led_platform_data {
#define GPIO_LED_NO_BLINK_LOW 0 /* No blink GPIO state low */
#define GPIO_LED_NO_BLINK_HIGH 1 /* No blink GPIO state high */
#define GPIO_LED_BLINK 2 /* Please, blink */
- int (*gpio_blink_set)(unsigned gpio, int state,
+ int (*gpio_blink_set)(struct gpio_desc *desc, int state,
unsigned long *delay_on,
unsigned long *delay_off);
};
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 92abb497ab14..2d182413b1db 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -191,7 +191,8 @@ enum {
ATA_DEV_PMP_UNSUP = 6, /* SATA port multiplier (unsupported) */
ATA_DEV_SEMB = 7, /* SEMB */
ATA_DEV_SEMB_UNSUP = 8, /* SEMB (unsupported) */
- ATA_DEV_NONE = 9, /* no device */
+ ATA_DEV_ZAC = 9, /* ZAC device */
+ ATA_DEV_NONE = 10, /* no device */
/* struct ata_link flags */
ATA_LFLAG_NO_HRST = (1 << 1), /* avoid hardreset */
@@ -1191,9 +1192,9 @@ extern void ata_scsi_unlock_native_capacity(struct scsi_device *sdev);
extern int ata_scsi_slave_config(struct scsi_device *sdev);
extern void ata_scsi_slave_destroy(struct scsi_device *sdev);
extern int ata_scsi_change_queue_depth(struct scsi_device *sdev,
- int queue_depth, int reason);
+ int queue_depth);
extern int __ata_change_queue_depth(struct ata_port *ap, struct scsi_device *sdev,
- int queue_depth, int reason);
+ int queue_depth);
extern struct ata_device *ata_dev_pair(struct ata_device *adev);
extern int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev);
extern void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap);
@@ -1404,14 +1405,14 @@ static inline int sata_srst_pmp(struct ata_link *link)
* printk helpers
*/
__printf(3, 4)
-int ata_port_printk(const struct ata_port *ap, const char *level,
- const char *fmt, ...);
+void ata_port_printk(const struct ata_port *ap, const char *level,
+ const char *fmt, ...);
__printf(3, 4)
-int ata_link_printk(const struct ata_link *link, const char *level,
- const char *fmt, ...);
+void ata_link_printk(const struct ata_link *link, const char *level,
+ const char *fmt, ...);
__printf(3, 4)
-int ata_dev_printk(const struct ata_device *dev, const char *level,
- const char *fmt, ...);
+void ata_dev_printk(const struct ata_device *dev, const char *level,
+ const char *fmt, ...);
#define ata_port_err(ap, fmt, ...) \
ata_port_printk(ap, KERN_ERR, fmt, ##__VA_ARGS__)
@@ -1491,7 +1492,8 @@ static inline unsigned int ata_tag_internal(unsigned int tag)
static inline unsigned int ata_class_enabled(unsigned int class)
{
return class == ATA_DEV_ATA || class == ATA_DEV_ATAPI ||
- class == ATA_DEV_PMP || class == ATA_DEV_SEMB;
+ class == ATA_DEV_PMP || class == ATA_DEV_SEMB ||
+ class == ATA_DEV_ZAC;
}
static inline unsigned int ata_class_disabled(unsigned int class)
diff --git a/include/linux/list.h b/include/linux/list.h
index cbbb96fcead9..feb773c76ee0 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -5,6 +5,7 @@
#include <linux/stddef.h>
#include <linux/poison.h>
#include <linux/const.h>
+#include <linux/kernel.h>
/*
* Simple doubly linked list implementation.
@@ -345,7 +346,7 @@ static inline void list_splice_tail_init(struct list_head *list,
* list_entry - get the struct for this entry
* @ptr: the &struct list_head pointer.
* @type: the type of the struct this is embedded in.
- * @member: the name of the list_struct within the struct.
+ * @member: the name of the list_head within the struct.
*/
#define list_entry(ptr, type, member) \
container_of(ptr, type, member)
@@ -354,7 +355,7 @@ static inline void list_splice_tail_init(struct list_head *list,
* list_first_entry - get the first element from a list
* @ptr: the list head to take the element from.
* @type: the type of the struct this is embedded in.
- * @member: the name of the list_struct within the struct.
+ * @member: the name of the list_head within the struct.
*
* Note, that list is expected to be not empty.
*/
@@ -365,7 +366,7 @@ static inline void list_splice_tail_init(struct list_head *list,
* list_last_entry - get the last element from a list
* @ptr: the list head to take the element from.
* @type: the type of the struct this is embedded in.
- * @member: the name of the list_struct within the struct.
+ * @member: the name of the list_head within the struct.
*
* Note, that list is expected to be not empty.
*/
@@ -376,7 +377,7 @@ static inline void list_splice_tail_init(struct list_head *list,
* list_first_entry_or_null - get the first element from a list
* @ptr: the list head to take the element from.
* @type: the type of the struct this is embedded in.
- * @member: the name of the list_struct within the struct.
+ * @member: the name of the list_head within the struct.
*
* Note that if the list is empty, it returns NULL.
*/
@@ -386,7 +387,7 @@ static inline void list_splice_tail_init(struct list_head *list,
/**
* list_next_entry - get the next element in list
* @pos: the type * to cursor
- * @member: the name of the list_struct within the struct.
+ * @member: the name of the list_head within the struct.
*/
#define list_next_entry(pos, member) \
list_entry((pos)->member.next, typeof(*(pos)), member)
@@ -394,7 +395,7 @@ static inline void list_splice_tail_init(struct list_head *list,
/**
* list_prev_entry - get the prev element in list
* @pos: the type * to cursor
- * @member: the name of the list_struct within the struct.
+ * @member: the name of the list_head within the struct.
*/
#define list_prev_entry(pos, member) \
list_entry((pos)->member.prev, typeof(*(pos)), member)
@@ -440,7 +441,7 @@ static inline void list_splice_tail_init(struct list_head *list,
* list_for_each_entry - iterate over list of given type
* @pos: the type * to use as a loop cursor.
* @head: the head for your list.
- * @member: the name of the list_struct within the struct.
+ * @member: the name of the list_head within the struct.
*/
#define list_for_each_entry(pos, head, member) \
for (pos = list_first_entry(head, typeof(*pos), member); \
@@ -451,7 +452,7 @@ static inline void list_splice_tail_init(struct list_head *list,
* list_for_each_entry_reverse - iterate backwards over list of given type.
* @pos: the type * to use as a loop cursor.
* @head: the head for your list.
- * @member: the name of the list_struct within the struct.
+ * @member: the name of the list_head within the struct.
*/
#define list_for_each_entry_reverse(pos, head, member) \
for (pos = list_last_entry(head, typeof(*pos), member); \
@@ -462,7 +463,7 @@ static inline void list_splice_tail_init(struct list_head *list,
* list_prepare_entry - prepare a pos entry for use in list_for_each_entry_continue()
* @pos: the type * to use as a start point
* @head: the head of the list
- * @member: the name of the list_struct within the struct.
+ * @member: the name of the list_head within the struct.
*
* Prepares a pos entry for use as a start point in list_for_each_entry_continue().
*/
@@ -473,7 +474,7 @@ static inline void list_splice_tail_init(struct list_head *list,
* list_for_each_entry_continue - continue iteration over list of given type
* @pos: the type * to use as a loop cursor.
* @head: the head for your list.
- * @member: the name of the list_struct within the struct.
+ * @member: the name of the list_head within the struct.
*
* Continue to iterate over list of given type, continuing after
* the current position.
@@ -487,7 +488,7 @@ static inline void list_splice_tail_init(struct list_head *list,
* list_for_each_entry_continue_reverse - iterate backwards from the given point
* @pos: the type * to use as a loop cursor.
* @head: the head for your list.
- * @member: the name of the list_struct within the struct.
+ * @member: the name of the list_head within the struct.
*
* Start to iterate over list of given type backwards, continuing after
* the current position.
@@ -501,7 +502,7 @@ static inline void list_splice_tail_init(struct list_head *list,
* list_for_each_entry_from - iterate over list of given type from the current point
* @pos: the type * to use as a loop cursor.
* @head: the head for your list.
- * @member: the name of the list_struct within the struct.
+ * @member: the name of the list_head within the struct.
*
* Iterate over list of given type, continuing from current position.
*/
@@ -514,7 +515,7 @@ static inline void list_splice_tail_init(struct list_head *list,
* @pos: the type * to use as a loop cursor.
* @n: another type * to use as temporary storage
* @head: the head for your list.
- * @member: the name of the list_struct within the struct.
+ * @member: the name of the list_head within the struct.
*/
#define list_for_each_entry_safe(pos, n, head, member) \
for (pos = list_first_entry(head, typeof(*pos), member), \
@@ -527,7 +528,7 @@ static inline void list_splice_tail_init(struct list_head *list,
* @pos: the type * to use as a loop cursor.
* @n: another type * to use as temporary storage
* @head: the head for your list.
- * @member: the name of the list_struct within the struct.
+ * @member: the name of the list_head within the struct.
*
* Iterate over list of given type, continuing after current point,
* safe against removal of list entry.
@@ -543,7 +544,7 @@ static inline void list_splice_tail_init(struct list_head *list,
* @pos: the type * to use as a loop cursor.
* @n: another type * to use as temporary storage
* @head: the head for your list.
- * @member: the name of the list_struct within the struct.
+ * @member: the name of the list_head within the struct.
*
* Iterate over list of given type from current point, safe against
* removal of list entry.
@@ -558,7 +559,7 @@ static inline void list_splice_tail_init(struct list_head *list,
* @pos: the type * to use as a loop cursor.
* @n: another type * to use as temporary storage
* @head: the head for your list.
- * @member: the name of the list_struct within the struct.
+ * @member: the name of the list_head within the struct.
*
* Iterate backwards over list of given type, safe against removal
* of list entry.
@@ -573,7 +574,7 @@ static inline void list_splice_tail_init(struct list_head *list,
* list_safe_reset_next - reset a stale list_for_each_entry_safe loop
* @pos: the loop cursor used in the list_for_each_entry_safe loop
* @n: temporary storage used in list_for_each_entry_safe
- * @member: the name of the list_struct within the struct.
+ * @member: the name of the list_head within the struct.
*
* list_safe_reset_next is not safe to use in general if the list may be
* modified concurrently (eg. the lock is dropped in the loop body). An
diff --git a/include/linux/lockd/debug.h b/include/linux/lockd/debug.h
index 257d3779f2ab..0ca8109934e4 100644
--- a/include/linux/lockd/debug.h
+++ b/include/linux/lockd/debug.h
@@ -17,12 +17,8 @@
* Enable lockd debugging.
* Requires RPC_DEBUG.
*/
-#ifdef RPC_DEBUG
-# define LOCKD_DEBUG 1
-#endif
-
#undef ifdebug
-#if defined(RPC_DEBUG) && defined(LOCKD_DEBUG)
+#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
# define ifdebug(flag) if (unlikely(nlm_debug & NLMDBG_##flag))
#else
# define ifdebug(flag) if (0)
diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h
index 219d79627c05..ff82a32871b5 100644
--- a/include/linux/lockd/lockd.h
+++ b/include/linux/lockd/lockd.h
@@ -178,7 +178,6 @@ struct nlm_block {
unsigned char b_granted; /* VFS granted lock */
struct nlm_file * b_file; /* file in question */
struct cache_req * b_cache_req; /* deferred request handling */
- struct file_lock * b_fl; /* set for GETLK */
struct cache_deferred_req * b_deferred_req;
unsigned int b_flags; /* block flags */
#define B_QUEUED 1 /* lock queued */
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 008388f920d7..74ab23176e9b 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -4,7 +4,7 @@
* Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
* Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
*
- * see Documentation/lockdep-design.txt for more details.
+ * see Documentation/locking/lockdep-design.txt for more details.
*/
#ifndef __LINUX_LOCKDEP_H
#define __LINUX_LOCKDEP_H
@@ -362,6 +362,10 @@ extern void lockdep_trace_alloc(gfp_t mask);
WARN_ON(debug_locks && !lockdep_is_held(l)); \
} while (0)
+#define lockdep_assert_held_once(l) do { \
+ WARN_ON_ONCE(debug_locks && !lockdep_is_held(l)); \
+ } while (0)
+
#define lockdep_recursing(tsk) ((tsk)->lockdep_recursion)
#else /* !CONFIG_LOCKDEP */
@@ -412,6 +416,7 @@ struct lock_class_key { };
#define lockdep_depth(tsk) (0)
#define lockdep_assert_held(l) do { (void)(l); } while (0)
+#define lockdep_assert_held_once(l) do { (void)(l); } while (0)
#define lockdep_recursing(tsk) (0)
@@ -505,6 +510,7 @@ static inline void print_irqtrace_events(struct task_struct *curr)
#define lock_map_acquire(l) lock_acquire_exclusive(l, 0, 0, NULL, _THIS_IP_)
#define lock_map_acquire_read(l) lock_acquire_shared_recursive(l, 0, 0, NULL, _THIS_IP_)
+#define lock_map_acquire_tryread(l) lock_acquire_shared_recursive(l, 0, 1, NULL, _THIS_IP_)
#define lock_map_release(l) lock_release(l, 1, _THIS_IP_)
#ifdef CONFIG_PROVE_LOCKING
diff --git a/include/linux/mailbox_client.h b/include/linux/mailbox_client.h
new file mode 100644
index 000000000000..1726ccbd8009
--- /dev/null
+++ b/include/linux/mailbox_client.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2013-2014 Linaro Ltd.
+ * Author: Jassi Brar <jassisinghbrar@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __MAILBOX_CLIENT_H
+#define __MAILBOX_CLIENT_H
+
+#include <linux/of.h>
+#include <linux/device.h>
+
+struct mbox_chan;
+
+/**
+ * struct mbox_client - User of a mailbox
+ * @dev: The client device
+ * @tx_block: If the mbox_send_message should block until data is
+ * transmitted.
+ * @tx_tout: Max block period in ms before TX is assumed failure
+ * @knows_txdone: If the client could run the TX state machine. Usually
+ * if the client receives some ACK packet for transmission.
+ * Unused if the controller already has TX_Done/RTR IRQ.
+ * @rx_callback: Atomic callback to provide client the data received
+ * @tx_prepare: Atomic callback to ask client to prepare the payload
+ * before initiating the transmission if required.
+ * @tx_done: Atomic callback to tell client of data transmission
+ */
+struct mbox_client {
+ struct device *dev;
+ bool tx_block;
+ unsigned long tx_tout;
+ bool knows_txdone;
+
+ void (*rx_callback)(struct mbox_client *cl, void *mssg);
+ void (*tx_prepare)(struct mbox_client *cl, void *mssg);
+ void (*tx_done)(struct mbox_client *cl, void *mssg, int r);
+};
+
+struct mbox_chan *mbox_request_channel(struct mbox_client *cl, int index);
+int mbox_send_message(struct mbox_chan *chan, void *mssg);
+void mbox_client_txdone(struct mbox_chan *chan, int r); /* atomic */
+bool mbox_client_peek_data(struct mbox_chan *chan); /* atomic */
+void mbox_free_channel(struct mbox_chan *chan); /* may sleep */
+
+#endif /* __MAILBOX_CLIENT_H */
diff --git a/include/linux/mailbox_controller.h b/include/linux/mailbox_controller.h
new file mode 100644
index 000000000000..d4cf96f07cfc
--- /dev/null
+++ b/include/linux/mailbox_controller.h
@@ -0,0 +1,133 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __MAILBOX_CONTROLLER_H
+#define __MAILBOX_CONTROLLER_H
+
+#include <linux/of.h>
+#include <linux/types.h>
+#include <linux/timer.h>
+#include <linux/device.h>
+#include <linux/completion.h>
+
+struct mbox_chan;
+
+/**
+ * struct mbox_chan_ops - methods to control mailbox channels
+ * @send_data: The API asks the MBOX controller driver, in atomic
+ * context try to transmit a message on the bus. Returns 0 if
+ * data is accepted for transmission, -EBUSY while rejecting
+ * if the remote hasn't yet read the last data sent. Actual
+ * transmission of data is reported by the controller via
+ * mbox_chan_txdone (if it has some TX ACK irq). It must not
+ * sleep.
+ * @startup: Called when a client requests the chan. The controller
+ * could ask clients for additional parameters of communication
+ * to be provided via client's chan_data. This call may
+ * block. After this call the Controller must forward any
+ * data received on the chan by calling mbox_chan_received_data.
+ * The controller may do stuff that need to sleep.
+ * @shutdown: Called when a client relinquishes control of a chan.
+ * This call may block too. The controller must not forward
+ * any received data anymore.
+ * The controller may do stuff that need to sleep.
+ * @last_tx_done: If the controller sets 'txdone_poll', the API calls
+ * this to poll status of last TX. The controller must
+ * give priority to IRQ method over polling and never
+ * set both txdone_poll and txdone_irq. Only in polling
+ * mode 'send_data' is expected to return -EBUSY.
+ * The controller may do stuff that need to sleep/block.
+ * Used only if txdone_poll:=true && txdone_irq:=false
+ * @peek_data: Atomic check for any received data. Return true if controller
+ * has some data to push to the client. False otherwise.
+ */
+struct mbox_chan_ops {
+ int (*send_data)(struct mbox_chan *chan, void *data);
+ int (*startup)(struct mbox_chan *chan);
+ void (*shutdown)(struct mbox_chan *chan);
+ bool (*last_tx_done)(struct mbox_chan *chan);
+ bool (*peek_data)(struct mbox_chan *chan);
+};
+
+/**
+ * struct mbox_controller - Controller of a class of communication channels
+ * @dev: Device backing this controller
+ * @ops: Operators that work on each communication chan
+ * @chans: Array of channels
+ * @num_chans: Number of channels in the 'chans' array.
+ * @txdone_irq: Indicates if the controller can report to API when
+ * the last transmitted data was read by the remote.
+ * Eg, if it has some TX ACK irq.
+ * @txdone_poll: If the controller can read but not report the TX
+ * done. Ex, some register shows the TX status but
+ * no interrupt rises. Ignored if 'txdone_irq' is set.
+ * @txpoll_period: If 'txdone_poll' is in effect, the API polls for
+ * last TX's status after these many millisecs
+ * @of_xlate: Controller driver specific mapping of channel via DT
+ * @poll: API private. Used to poll for TXDONE on all channels.
+ * @node: API private. To hook into list of controllers.
+ */
+struct mbox_controller {
+ struct device *dev;
+ struct mbox_chan_ops *ops;
+ struct mbox_chan *chans;
+ int num_chans;
+ bool txdone_irq;
+ bool txdone_poll;
+ unsigned txpoll_period;
+ struct mbox_chan *(*of_xlate)(struct mbox_controller *mbox,
+ const struct of_phandle_args *sp);
+ /* Internal to API */
+ struct timer_list poll;
+ struct list_head node;
+};
+
+/*
+ * The length of circular buffer for queuing messages from a client.
+ * 'msg_count' tracks the number of buffered messages while 'msg_free'
+ * is the index where the next message would be buffered.
+ * We shouldn't need it too big because every transfer is interrupt
+ * triggered and if we have lots of data to transfer, the interrupt
+ * latencies are going to be the bottleneck, not the buffer length.
+ * Besides, mbox_send_message could be called from atomic context and
+ * the client could also queue another message from the notifier 'tx_done'
+ * of the last transfer done.
+ * REVISIT: If too many platforms see the "Try increasing MBOX_TX_QUEUE_LEN"
+ * print, it needs to be taken from config option or somesuch.
+ */
+#define MBOX_TX_QUEUE_LEN 20
+
+/**
+ * struct mbox_chan - s/w representation of a communication chan
+ * @mbox: Pointer to the parent/provider of this channel
+ * @txdone_method: Way to detect TXDone chosen by the API
+ * @cl: Pointer to the current owner of this channel
+ * @tx_complete: Transmission completion
+ * @active_req: Currently active request hook
+ * @msg_count: No. of mssg currently queued
+ * @msg_free: Index of next available mssg slot
+ * @msg_data: Hook for data packet
+ * @lock: Serialise access to the channel
+ * @con_priv: Hook for controller driver to attach private data
+ */
+struct mbox_chan {
+ struct mbox_controller *mbox;
+ unsigned txdone_method;
+ struct mbox_client *cl;
+ struct completion tx_complete;
+ void *active_req;
+ unsigned msg_count, msg_free;
+ void *msg_data[MBOX_TX_QUEUE_LEN];
+ spinlock_t lock; /* Serialise access to the channel */
+ void *con_priv;
+};
+
+int mbox_controller_register(struct mbox_controller *mbox); /* can sleep */
+void mbox_controller_unregister(struct mbox_controller *mbox); /* can sleep */
+void mbox_chan_received_data(struct mbox_chan *chan, void *data); /* atomic */
+void mbox_chan_txdone(struct mbox_chan *chan, int r); /* atomic */
+
+#endif /* __MAILBOX_CONTROLLER_H */
diff --git a/include/linux/marvell_phy.h b/include/linux/marvell_phy.h
index 8e9a029e093d..e6982ac3200d 100644
--- a/include/linux/marvell_phy.h
+++ b/include/linux/marvell_phy.h
@@ -16,6 +16,7 @@
#define MARVELL_PHY_ID_88E1318S 0x01410e90
#define MARVELL_PHY_ID_88E1116R 0x01410e40
#define MARVELL_PHY_ID_88E1510 0x01410dd0
+#define MARVELL_PHY_ID_88E3016 0x01410e60
/* struct phy_device dev_flags definitions */
#define MARVELL_PHY_M1145_FLAGS_RESISTANCE 0x00000001
diff --git a/include/linux/mbus.h b/include/linux/mbus.h
index 550c88fb0267..611b69fa8594 100644
--- a/include/linux/mbus.h
+++ b/include/linux/mbus.h
@@ -61,6 +61,7 @@ static inline const struct mbus_dram_target_info *mv_mbus_dram_info(void)
}
#endif
+int mvebu_mbus_save_cpu_target(u32 *store_addr);
void mvebu_mbus_get_pcie_mem_aperture(struct resource *res);
void mvebu_mbus_get_pcie_io_aperture(struct resource *res);
int mvebu_mbus_add_window_remap_by_id(unsigned int target,
diff --git a/include/linux/mei_cl_bus.h b/include/linux/mei_cl_bus.h
index d14af7b722ef..164aad1f9f12 100644
--- a/include/linux/mei_cl_bus.h
+++ b/include/linux/mei_cl_bus.h
@@ -3,6 +3,7 @@
#include <linux/device.h>
#include <linux/uuid.h>
+#include <linux/mod_devicetable.h>
struct mei_cl_device;
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index e0752d204d9e..7c95af8d552c 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -25,7 +25,6 @@
#include <linux/jump_label.h>
struct mem_cgroup;
-struct page_cgroup;
struct page;
struct mm_struct;
struct kmem_cache;
@@ -68,10 +67,9 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage,
struct lruvec *mem_cgroup_zone_lruvec(struct zone *, struct mem_cgroup *);
struct lruvec *mem_cgroup_page_lruvec(struct page *, struct zone *);
-bool __mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg,
- struct mem_cgroup *memcg);
-bool task_in_mem_cgroup(struct task_struct *task,
- const struct mem_cgroup *memcg);
+bool mem_cgroup_is_descendant(struct mem_cgroup *memcg,
+ struct mem_cgroup *root);
+bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg);
extern struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page);
extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
@@ -79,15 +77,16 @@ extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
extern struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg);
extern struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css);
-static inline
-bool mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *memcg)
+static inline bool mm_match_cgroup(struct mm_struct *mm,
+ struct mem_cgroup *memcg)
{
struct mem_cgroup *task_memcg;
- bool match;
+ bool match = false;
rcu_read_lock();
task_memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
- match = __mem_cgroup_same_or_subtree(memcg, task_memcg);
+ if (task_memcg)
+ match = mem_cgroup_is_descendant(task_memcg, memcg);
rcu_read_unlock();
return match;
}
@@ -139,48 +138,23 @@ static inline bool mem_cgroup_disabled(void)
return false;
}
-void __mem_cgroup_begin_update_page_stat(struct page *page, bool *locked,
- unsigned long *flags);
-
-extern atomic_t memcg_moving;
-
-static inline void mem_cgroup_begin_update_page_stat(struct page *page,
- bool *locked, unsigned long *flags)
-{
- if (mem_cgroup_disabled())
- return;
- rcu_read_lock();
- *locked = false;
- if (atomic_read(&memcg_moving))
- __mem_cgroup_begin_update_page_stat(page, locked, flags);
-}
+struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page, bool *locked,
+ unsigned long *flags);
+void mem_cgroup_end_page_stat(struct mem_cgroup *memcg, bool *locked,
+ unsigned long *flags);
+void mem_cgroup_update_page_stat(struct mem_cgroup *memcg,
+ enum mem_cgroup_stat_index idx, int val);
-void __mem_cgroup_end_update_page_stat(struct page *page,
- unsigned long *flags);
-static inline void mem_cgroup_end_update_page_stat(struct page *page,
- bool *locked, unsigned long *flags)
-{
- if (mem_cgroup_disabled())
- return;
- if (*locked)
- __mem_cgroup_end_update_page_stat(page, flags);
- rcu_read_unlock();
-}
-
-void mem_cgroup_update_page_stat(struct page *page,
- enum mem_cgroup_stat_index idx,
- int val);
-
-static inline void mem_cgroup_inc_page_stat(struct page *page,
+static inline void mem_cgroup_inc_page_stat(struct mem_cgroup *memcg,
enum mem_cgroup_stat_index idx)
{
- mem_cgroup_update_page_stat(page, idx, 1);
+ mem_cgroup_update_page_stat(memcg, idx, 1);
}
-static inline void mem_cgroup_dec_page_stat(struct page *page,
+static inline void mem_cgroup_dec_page_stat(struct mem_cgroup *memcg,
enum mem_cgroup_stat_index idx)
{
- mem_cgroup_update_page_stat(page, idx, -1);
+ mem_cgroup_update_page_stat(memcg, idx, -1);
}
unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
@@ -199,10 +173,6 @@ static inline void mem_cgroup_count_vm_event(struct mm_struct *mm,
void mem_cgroup_split_huge_fixup(struct page *head);
#endif
-#ifdef CONFIG_DEBUG_VM
-bool mem_cgroup_bad_page_check(struct page *page);
-void mem_cgroup_print_bad_page(struct page *page);
-#endif
#else /* CONFIG_MEMCG */
struct mem_cgroup;
@@ -315,12 +285,13 @@ mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
{
}
-static inline void mem_cgroup_begin_update_page_stat(struct page *page,
+static inline struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page,
bool *locked, unsigned long *flags)
{
+ return NULL;
}
-static inline void mem_cgroup_end_update_page_stat(struct page *page,
+static inline void mem_cgroup_end_page_stat(struct mem_cgroup *memcg,
bool *locked, unsigned long *flags)
{
}
@@ -343,12 +314,12 @@ static inline bool mem_cgroup_oom_synchronize(bool wait)
return false;
}
-static inline void mem_cgroup_inc_page_stat(struct page *page,
+static inline void mem_cgroup_inc_page_stat(struct mem_cgroup *memcg,
enum mem_cgroup_stat_index idx)
{
}
-static inline void mem_cgroup_dec_page_stat(struct page *page,
+static inline void mem_cgroup_dec_page_stat(struct mem_cgroup *memcg,
enum mem_cgroup_stat_index idx)
{
}
@@ -371,19 +342,6 @@ void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx)
}
#endif /* CONFIG_MEMCG */
-#if !defined(CONFIG_MEMCG) || !defined(CONFIG_DEBUG_VM)
-static inline bool
-mem_cgroup_bad_page_check(struct page *page)
-{
- return false;
-}
-
-static inline void
-mem_cgroup_print_bad_page(struct page *page)
-{
-}
-#endif
-
enum {
UNDER_LIMIT,
SOFT_LIMIT,
@@ -440,15 +398,10 @@ void __memcg_kmem_uncharge_pages(struct page *page, int order);
int memcg_cache_id(struct mem_cgroup *memcg);
-int memcg_alloc_cache_params(struct mem_cgroup *memcg, struct kmem_cache *s,
- struct kmem_cache *root_cache);
-void memcg_free_cache_params(struct kmem_cache *s);
-
-int memcg_update_cache_size(struct kmem_cache *s, int num_groups);
void memcg_update_array_size(int num_groups);
-struct kmem_cache *
-__memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp);
+struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep);
+void __memcg_kmem_put_cache(struct kmem_cache *cachep);
int __memcg_charge_slab(struct kmem_cache *cachep, gfp_t gfp, int order);
void __memcg_uncharge_slab(struct kmem_cache *cachep, int order);
@@ -476,9 +429,8 @@ memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order)
/*
* __GFP_NOFAIL allocations will move on even if charging is not
* possible. Therefore we don't even try, and have this allocation
- * unaccounted. We could in theory charge it with
- * res_counter_charge_nofail, but we hope those allocations are rare,
- * and won't be worth the trouble.
+ * unaccounted. We could in theory charge it forcibly, but we hope
+ * those allocations are rare, and won't be worth the trouble.
*/
if (gfp & __GFP_NOFAIL)
return true;
@@ -496,8 +448,6 @@ memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order)
* memcg_kmem_uncharge_pages: uncharge pages from memcg
* @page: pointer to struct page being freed
* @order: allocation order.
- *
- * there is no need to specify memcg here, since it is embedded in page_cgroup
*/
static inline void
memcg_kmem_uncharge_pages(struct page *page, int order)
@@ -514,8 +464,7 @@ memcg_kmem_uncharge_pages(struct page *page, int order)
*
* Needs to be called after memcg_kmem_newpage_charge, regardless of success or
* failure of the allocation. if @page is NULL, this function will revert the
- * charges. Otherwise, it will commit the memcg given by @memcg to the
- * corresponding page_cgroup.
+ * charges. Otherwise, it will commit @page to @memcg.
*/
static inline void
memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg, int order)
@@ -543,7 +492,13 @@ memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp)
if (unlikely(fatal_signal_pending(current)))
return cachep;
- return __memcg_kmem_get_cache(cachep, gfp);
+ return __memcg_kmem_get_cache(cachep);
+}
+
+static __always_inline void memcg_kmem_put_cache(struct kmem_cache *cachep)
+{
+ if (memcg_kmem_enabled())
+ __memcg_kmem_put_cache(cachep);
}
#else
#define for_each_memcg_cache_index(_idx) \
@@ -574,21 +529,15 @@ static inline int memcg_cache_id(struct mem_cgroup *memcg)
return -1;
}
-static inline int memcg_alloc_cache_params(struct mem_cgroup *memcg,
- struct kmem_cache *s, struct kmem_cache *root_cache)
-{
- return 0;
-}
-
-static inline void memcg_free_cache_params(struct kmem_cache *s)
-{
-}
-
static inline struct kmem_cache *
memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp)
{
return cachep;
}
+
+static inline void memcg_kmem_put_cache(struct kmem_cache *cachep)
+{
+}
#endif /* CONFIG_MEMCG_KMEM */
#endif /* _LINUX_MEMCONTROL_H */
diff --git a/include/linux/memory.h b/include/linux/memory.h
index bb7384e3c3d8..8b8d8d12348e 100644
--- a/include/linux/memory.h
+++ b/include/linux/memory.h
@@ -35,7 +35,7 @@ struct memory_block {
};
int arch_get_memory_phys_device(unsigned long start_pfn);
-unsigned long __weak memory_block_size_bytes(void);
+unsigned long memory_block_size_bytes(void);
/* These states are exposed to userspace as text strings in sysfs */
#define MEM_ONLINE (1<<0) /* exposed to userspace */
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index d9524c49d767..8f1a41951df9 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -84,6 +84,7 @@ extern int zone_grow_waitqueues(struct zone *zone, unsigned long nr_pages);
extern int add_one_highpage(struct page *page, int pfn, int bad_ppro);
/* VM interface that may be used by firmware interface */
extern int online_pages(unsigned long, unsigned long, int);
+extern int test_pages_in_a_zone(unsigned long, unsigned long);
extern void __offline_isolated_pages(unsigned long, unsigned long);
typedef void (*online_page_callback_t)(struct page *page);
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
index f230a978e6ba..3d385c81c153 100644
--- a/include/linux/mempolicy.h
+++ b/include/linux/mempolicy.h
@@ -134,9 +134,10 @@ void mpol_free_shared_policy(struct shared_policy *p);
struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp,
unsigned long idx);
-struct mempolicy *get_vma_policy(struct task_struct *tsk,
- struct vm_area_struct *vma, unsigned long addr);
-bool vma_policy_mof(struct task_struct *task, struct vm_area_struct *vma);
+struct mempolicy *get_task_policy(struct task_struct *p);
+struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
+ unsigned long addr);
+bool vma_policy_mof(struct vm_area_struct *vma);
extern void numa_default_policy(void);
extern void numa_policy_init(void);
diff --git a/include/linux/mfd/abx500/ab8500-sysctrl.h b/include/linux/mfd/abx500/ab8500-sysctrl.h
index adba89d9c660..689312745b2f 100644
--- a/include/linux/mfd/abx500/ab8500-sysctrl.h
+++ b/include/linux/mfd/abx500/ab8500-sysctrl.h
@@ -12,7 +12,6 @@
int ab8500_sysctrl_read(u16 reg, u8 *value);
int ab8500_sysctrl_write(u16 reg, u8 mask, u8 value);
-void ab8500_restart(char mode, const char *cmd);
#else
diff --git a/include/linux/mfd/arizona/core.h b/include/linux/mfd/arizona/core.h
index f34723f7663c..910e3aa1e965 100644
--- a/include/linux/mfd/arizona/core.h
+++ b/include/linux/mfd/arizona/core.h
@@ -141,6 +141,7 @@ struct arizona {
uint16_t dac_comp_coeff;
uint8_t dac_comp_enabled;
+ struct mutex dac_comp_lock;
};
int arizona_clk32k_enable(struct arizona *arizona);
diff --git a/include/linux/mfd/arizona/registers.h b/include/linux/mfd/arizona/registers.h
index dbd23c36de21..aacc10d7789c 100644
--- a/include/linux/mfd/arizona/registers.h
+++ b/include/linux/mfd/arizona/registers.h
@@ -27,6 +27,7 @@
#define ARIZONA_WRITE_SEQUENCER_CTRL_0 0x16
#define ARIZONA_WRITE_SEQUENCER_CTRL_1 0x17
#define ARIZONA_WRITE_SEQUENCER_CTRL_2 0x18
+#define ARIZONA_WRITE_SEQUENCER_CTRL_3 0x19
#define ARIZONA_WRITE_SEQUENCER_PROM 0x1A
#define ARIZONA_TONE_GENERATOR_1 0x20
#define ARIZONA_TONE_GENERATOR_2 0x21
@@ -70,7 +71,9 @@
#define ARIZONA_SAMPLE_RATE_3_STATUS 0x10C
#define ARIZONA_ASYNC_CLOCK_1 0x112
#define ARIZONA_ASYNC_SAMPLE_RATE_1 0x113
+#define ARIZONA_ASYNC_SAMPLE_RATE_2 0x114
#define ARIZONA_ASYNC_SAMPLE_RATE_1_STATUS 0x11B
+#define ARIZONA_ASYNC_SAMPLE_RATE_2_STATUS 0x11C
#define ARIZONA_OUTPUT_SYSTEM_CLOCK 0x149
#define ARIZONA_OUTPUT_ASYNC_CLOCK 0x14A
#define ARIZONA_RATE_ESTIMATOR_1 0x152
@@ -122,6 +125,8 @@
#define ARIZONA_MIC_BIAS_CTRL_1 0x218
#define ARIZONA_MIC_BIAS_CTRL_2 0x219
#define ARIZONA_MIC_BIAS_CTRL_3 0x21A
+#define ARIZONA_HP_CTRL_1L 0x225
+#define ARIZONA_HP_CTRL_1R 0x226
#define ARIZONA_ACCESSORY_DETECT_MODE_1 0x293
#define ARIZONA_HEADPHONE_DETECT_1 0x29B
#define ARIZONA_HEADPHONE_DETECT_2 0x29C
@@ -276,8 +281,16 @@
#define ARIZONA_AIF2_FRAME_CTRL_2 0x548
#define ARIZONA_AIF2_FRAME_CTRL_3 0x549
#define ARIZONA_AIF2_FRAME_CTRL_4 0x54A
+#define ARIZONA_AIF2_FRAME_CTRL_5 0x54B
+#define ARIZONA_AIF2_FRAME_CTRL_6 0x54C
+#define ARIZONA_AIF2_FRAME_CTRL_7 0x54D
+#define ARIZONA_AIF2_FRAME_CTRL_8 0x54E
#define ARIZONA_AIF2_FRAME_CTRL_11 0x551
#define ARIZONA_AIF2_FRAME_CTRL_12 0x552
+#define ARIZONA_AIF2_FRAME_CTRL_13 0x553
+#define ARIZONA_AIF2_FRAME_CTRL_14 0x554
+#define ARIZONA_AIF2_FRAME_CTRL_15 0x555
+#define ARIZONA_AIF2_FRAME_CTRL_16 0x556
#define ARIZONA_AIF2_TX_ENABLES 0x559
#define ARIZONA_AIF2_RX_ENABLES 0x55A
#define ARIZONA_AIF2_FORCE_WRITE 0x55B
@@ -1664,16 +1677,30 @@
/*
* R275 (0x113) - Async sample rate 1
*/
-#define ARIZONA_ASYNC_SAMPLE_RATE_MASK 0x001F /* ASYNC_SAMPLE_RATE - [4:0] */
-#define ARIZONA_ASYNC_SAMPLE_RATE_SHIFT 0 /* ASYNC_SAMPLE_RATE - [4:0] */
-#define ARIZONA_ASYNC_SAMPLE_RATE_WIDTH 5 /* ASYNC_SAMPLE_RATE - [4:0] */
+#define ARIZONA_ASYNC_SAMPLE_RATE_1_MASK 0x001F /* ASYNC_SAMPLE_RATE_1 - [4:0] */
+#define ARIZONA_ASYNC_SAMPLE_RATE_1_SHIFT 0 /* ASYNC_SAMPLE_RATE_1 - [4:0] */
+#define ARIZONA_ASYNC_SAMPLE_RATE_1_WIDTH 5 /* ASYNC_SAMPLE_RATE_1 - [4:0] */
+
+/*
+ * R276 (0x114) - Async sample rate 2
+ */
+#define ARIZONA_ASYNC_SAMPLE_RATE_2_MASK 0x001F /* ASYNC_SAMPLE_RATE_2 - [4:0] */
+#define ARIZONA_ASYNC_SAMPLE_RATE_2_SHIFT 0 /* ASYNC_SAMPLE_RATE_2 - [4:0] */
+#define ARIZONA_ASYNC_SAMPLE_RATE_2_WIDTH 5 /* ASYNC_SAMPLE_RATE_2 - [4:0] */
/*
* R283 (0x11B) - Async sample rate 1 status
*/
-#define ARIZONA_ASYNC_SAMPLE_RATE_STS_MASK 0x001F /* ASYNC_SAMPLE_RATE_STS - [4:0] */
-#define ARIZONA_ASYNC_SAMPLE_RATE_STS_SHIFT 0 /* ASYNC_SAMPLE_RATE_STS - [4:0] */
-#define ARIZONA_ASYNC_SAMPLE_RATE_STS_WIDTH 5 /* ASYNC_SAMPLE_RATE_STS - [4:0] */
+#define ARIZONA_ASYNC_SAMPLE_RATE_1_STS_MASK 0x001F /* ASYNC_SAMPLE_RATE_1_STS - [4:0] */
+#define ARIZONA_ASYNC_SAMPLE_RATE_1_STS_SHIFT 0 /* ASYNC_SAMPLE_RATE_1_STS - [4:0] */
+#define ARIZONA_ASYNC_SAMPLE_RATE_1_STS_WIDTH 5 /* ASYNC_SAMPLE_RATE_1_STS - [4:0] */
+
+/*
+ * R284 (0x11C) - Async sample rate 2 status
+ */
+#define ARIZONA_ASYNC_SAMPLE_RATE_2_STS_MASK 0x001F /* ASYNC_SAMPLE_RATE_2_STS - [4:0] */
+#define ARIZONA_ASYNC_SAMPLE_RATE_2_STS_SHIFT 0 /* ASYNC_SAMPLE_RATE_2_STS - [4:0] */
+#define ARIZONA_ASYNC_SAMPLE_RATE_2_STS_WIDTH 5 /* ASYNC_SAMPLE_RATE_2_STS - [4:0] */
/*
* R329 (0x149) - Output system clock
@@ -2228,6 +2255,46 @@
#define ARIZONA_MICB3_ENA_WIDTH 1 /* MICB3_ENA */
/*
+ * R549 (0x225) - HP Ctrl 1L
+ */
+#define ARIZONA_RMV_SHRT_HP1L 0x4000 /* RMV_SHRT_HP1L */
+#define ARIZONA_RMV_SHRT_HP1L_MASK 0x4000 /* RMV_SHRT_HP1L */
+#define ARIZONA_RMV_SHRT_HP1L_SHIFT 14 /* RMV_SHRT_HP1L */
+#define ARIZONA_RMV_SHRT_HP1L_WIDTH 1 /* RMV_SHRT_HP1L */
+#define ARIZONA_HP1L_FLWR 0x0004 /* HP1L_FLWR */
+#define ARIZONA_HP1L_FLWR_MASK 0x0004 /* HP1L_FLWR */
+#define ARIZONA_HP1L_FLWR_SHIFT 2 /* HP1L_FLWR */
+#define ARIZONA_HP1L_FLWR_WIDTH 1 /* HP1L_FLWR */
+#define ARIZONA_HP1L_SHRTI 0x0002 /* HP1L_SHRTI */
+#define ARIZONA_HP1L_SHRTI_MASK 0x0002 /* HP1L_SHRTI */
+#define ARIZONA_HP1L_SHRTI_SHIFT 1 /* HP1L_SHRTI */
+#define ARIZONA_HP1L_SHRTI_WIDTH 1 /* HP1L_SHRTI */
+#define ARIZONA_HP1L_SHRTO 0x0001 /* HP1L_SHRTO */
+#define ARIZONA_HP1L_SHRTO_MASK 0x0001 /* HP1L_SHRTO */
+#define ARIZONA_HP1L_SHRTO_SHIFT 0 /* HP1L_SHRTO */
+#define ARIZONA_HP1L_SHRTO_WIDTH 1 /* HP1L_SHRTO */
+
+/*
+ * R550 (0x226) - HP Ctrl 1R
+ */
+#define ARIZONA_RMV_SHRT_HP1R 0x4000 /* RMV_SHRT_HP1R */
+#define ARIZONA_RMV_SHRT_HP1R_MASK 0x4000 /* RMV_SHRT_HP1R */
+#define ARIZONA_RMV_SHRT_HP1R_SHIFT 14 /* RMV_SHRT_HP1R */
+#define ARIZONA_RMV_SHRT_HP1R_WIDTH 1 /* RMV_SHRT_HP1R */
+#define ARIZONA_HP1R_FLWR 0x0004 /* HP1R_FLWR */
+#define ARIZONA_HP1R_FLWR_MASK 0x0004 /* HP1R_FLWR */
+#define ARIZONA_HP1R_FLWR_SHIFT 2 /* HP1R_FLWR */
+#define ARIZONA_HP1R_FLWR_WIDTH 1 /* HP1R_FLWR */
+#define ARIZONA_HP1R_SHRTI 0x0002 /* HP1R_SHRTI */
+#define ARIZONA_HP1R_SHRTI_MASK 0x0002 /* HP1R_SHRTI */
+#define ARIZONA_HP1R_SHRTI_SHIFT 1 /* HP1R_SHRTI */
+#define ARIZONA_HP1R_SHRTI_WIDTH 1 /* HP1R_SHRTI */
+#define ARIZONA_HP1R_SHRTO 0x0001 /* HP1R_SHRTO */
+#define ARIZONA_HP1R_SHRTO_MASK 0x0001 /* HP1R_SHRTO */
+#define ARIZONA_HP1R_SHRTO_SHIFT 0 /* HP1R_SHRTO */
+#define ARIZONA_HP1R_SHRTO_WIDTH 1 /* HP1R_SHRTO */
+
+/*
* R659 (0x293) - Accessory Detect Mode 1
*/
#define ARIZONA_ACCDET_SRC 0x2000 /* ACCDET_SRC */
diff --git a/include/linux/mfd/atmel-hlcdc.h b/include/linux/mfd/atmel-hlcdc.h
new file mode 100644
index 000000000000..1279ab1644b5
--- /dev/null
+++ b/include/linux/mfd/atmel-hlcdc.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright (C) 2014 Free Electrons
+ * Copyright (C) 2014 Atmel
+ *
+ * Author: Boris BREZILLON <boris.brezillon@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __LINUX_MFD_HLCDC_H
+#define __LINUX_MFD_HLCDC_H
+
+#include <linux/clk.h>
+#include <linux/regmap.h>
+
+#define ATMEL_HLCDC_CFG(i) ((i) * 0x4)
+#define ATMEL_HLCDC_SIG_CFG LCDCFG(5)
+#define ATMEL_HLCDC_HSPOL BIT(0)
+#define ATMEL_HLCDC_VSPOL BIT(1)
+#define ATMEL_HLCDC_VSPDLYS BIT(2)
+#define ATMEL_HLCDC_VSPDLYE BIT(3)
+#define ATMEL_HLCDC_DISPPOL BIT(4)
+#define ATMEL_HLCDC_DITHER BIT(6)
+#define ATMEL_HLCDC_DISPDLY BIT(7)
+#define ATMEL_HLCDC_MODE_MASK GENMASK(9, 8)
+#define ATMEL_HLCDC_PP BIT(10)
+#define ATMEL_HLCDC_VSPSU BIT(12)
+#define ATMEL_HLCDC_VSPHO BIT(13)
+#define ATMEL_HLCDC_GUARDTIME_MASK GENMASK(20, 16)
+
+#define ATMEL_HLCDC_EN 0x20
+#define ATMEL_HLCDC_DIS 0x24
+#define ATMEL_HLCDC_SR 0x28
+#define ATMEL_HLCDC_IER 0x2c
+#define ATMEL_HLCDC_IDR 0x30
+#define ATMEL_HLCDC_IMR 0x34
+#define ATMEL_HLCDC_ISR 0x38
+
+#define ATMEL_HLCDC_CLKPOL BIT(0)
+#define ATMEL_HLCDC_CLKSEL BIT(2)
+#define ATMEL_HLCDC_CLKPWMSEL BIT(3)
+#define ATMEL_HLCDC_CGDIS(i) BIT(8 + (i))
+#define ATMEL_HLCDC_CLKDIV_SHFT 16
+#define ATMEL_HLCDC_CLKDIV_MASK GENMASK(23, 16)
+#define ATMEL_HLCDC_CLKDIV(div) ((div - 2) << ATMEL_HLCDC_CLKDIV_SHFT)
+
+#define ATMEL_HLCDC_PIXEL_CLK BIT(0)
+#define ATMEL_HLCDC_SYNC BIT(1)
+#define ATMEL_HLCDC_DISP BIT(2)
+#define ATMEL_HLCDC_PWM BIT(3)
+#define ATMEL_HLCDC_SIP BIT(4)
+
+#define ATMEL_HLCDC_SOF BIT(0)
+#define ATMEL_HLCDC_SYNCDIS BIT(1)
+#define ATMEL_HLCDC_FIFOERR BIT(4)
+#define ATMEL_HLCDC_LAYER_STATUS(x) BIT((x) + 8)
+
+/**
+ * Structure shared by the MFD device and its subdevices.
+ *
+ * @regmap: register map used to access HLCDC IP registers
+ * @periph_clk: the hlcdc peripheral clock
+ * @sys_clk: the hlcdc system clock
+ * @slow_clk: the system slow clk
+ * @irq: the hlcdc irq
+ */
+struct atmel_hlcdc {
+ struct regmap *regmap;
+ struct clk *periph_clk;
+ struct clk *sys_clk;
+ struct clk *slow_clk;
+ int irq;
+};
+
+#endif /* __LINUX_MFD_HLCDC_H */
diff --git a/include/linux/mfd/axp20x.h b/include/linux/mfd/axp20x.h
index d0e31a2287ac..81589d176ae8 100644
--- a/include/linux/mfd/axp20x.h
+++ b/include/linux/mfd/axp20x.h
@@ -14,6 +14,8 @@
enum {
AXP202_ID = 0,
AXP209_ID,
+ AXP288_ID,
+ NR_AXP20X_VARIANTS,
};
#define AXP20X_DATACACHE(m) (0x04 + (m))
@@ -49,11 +51,13 @@ enum {
#define AXP20X_IRQ3_EN 0x42
#define AXP20X_IRQ4_EN 0x43
#define AXP20X_IRQ5_EN 0x44
+#define AXP20X_IRQ6_EN 0x45
#define AXP20X_IRQ1_STATE 0x48
#define AXP20X_IRQ2_STATE 0x49
#define AXP20X_IRQ3_STATE 0x4a
#define AXP20X_IRQ4_STATE 0x4b
#define AXP20X_IRQ5_STATE 0x4c
+#define AXP20X_IRQ6_STATE 0x4d
/* ADC */
#define AXP20X_ACIN_V_ADC_H 0x56
@@ -116,6 +120,15 @@ enum {
#define AXP20X_CC_CTRL 0xb8
#define AXP20X_FG_RES 0xb9
+/* AXP288 specific registers */
+#define AXP288_PMIC_ADC_H 0x56
+#define AXP288_PMIC_ADC_L 0x57
+#define AXP288_ADC_TS_PIN_CTRL 0x84
+
+#define AXP288_PMIC_ADC_EN 0x84
+#define AXP288_FG_TUNE5 0xed
+
+
/* Regulators IDs */
enum {
AXP20X_LDO1 = 0,
@@ -169,12 +182,58 @@ enum {
AXP20X_IRQ_GPIO0_INPUT,
};
+enum axp288_irqs {
+ AXP288_IRQ_VBUS_FALL = 2,
+ AXP288_IRQ_VBUS_RISE,
+ AXP288_IRQ_OV,
+ AXP288_IRQ_FALLING_ALT,
+ AXP288_IRQ_RISING_ALT,
+ AXP288_IRQ_OV_ALT,
+ AXP288_IRQ_DONE = 10,
+ AXP288_IRQ_CHARGING,
+ AXP288_IRQ_SAFE_QUIT,
+ AXP288_IRQ_SAFE_ENTER,
+ AXP288_IRQ_ABSENT,
+ AXP288_IRQ_APPEND,
+ AXP288_IRQ_QWBTU,
+ AXP288_IRQ_WBTU,
+ AXP288_IRQ_QWBTO,
+ AXP288_IRQ_WBTO,
+ AXP288_IRQ_QCBTU,
+ AXP288_IRQ_CBTU,
+ AXP288_IRQ_QCBTO,
+ AXP288_IRQ_CBTO,
+ AXP288_IRQ_WL2,
+ AXP288_IRQ_WL1,
+ AXP288_IRQ_GPADC,
+ AXP288_IRQ_OT = 31,
+ AXP288_IRQ_GPIO0,
+ AXP288_IRQ_GPIO1,
+ AXP288_IRQ_POKO,
+ AXP288_IRQ_POKL,
+ AXP288_IRQ_POKS,
+ AXP288_IRQ_POKN,
+ AXP288_IRQ_POKP,
+ AXP288_IRQ_TIMER,
+ AXP288_IRQ_MV_CHNG,
+ AXP288_IRQ_BC_USB_CHNG,
+};
+
+#define AXP288_TS_ADC_H 0x58
+#define AXP288_TS_ADC_L 0x59
+#define AXP288_GP_ADC_H 0x5a
+#define AXP288_GP_ADC_L 0x5b
+
struct axp20x_dev {
struct device *dev;
struct i2c_client *i2c_client;
struct regmap *regmap;
struct regmap_irq_chip_data *regmap_irqc;
long variant;
+ int nr_cells;
+ struct mfd_cell *cells;
+ const struct regmap_config *regmap_cfg;
+ const struct regmap_irq_chip *regmap_irq_chip;
};
#endif /* __LINUX_MFD_AXP20X_H */
diff --git a/include/linux/mfd/core.h b/include/linux/mfd/core.h
index f543de91ce19..a76bc100bf97 100644
--- a/include/linux/mfd/core.h
+++ b/include/linux/mfd/core.h
@@ -44,6 +44,9 @@ struct mfd_cell {
*/
const char *of_compatible;
+ /* Matches ACPI PNP id, either _HID or _CID */
+ const char *acpi_pnpid;
+
/*
* These resources can be specified relative to the parent device.
* For accessing hardware you should use resources from the platform dev
@@ -108,6 +111,13 @@ extern int mfd_add_devices(struct device *parent, int id,
struct resource *mem_base,
int irq_base, struct irq_domain *irq_domain);
+static inline int mfd_add_hotplug_devices(struct device *parent,
+ const struct mfd_cell *cells, int n_devs)
+{
+ return mfd_add_devices(parent, PLATFORM_DEVID_AUTO, cells, n_devs,
+ NULL, 0, NULL);
+}
+
extern void mfd_remove_devices(struct device *parent);
#endif
diff --git a/include/linux/mfd/cros_ec.h b/include/linux/mfd/cros_ec.h
index fcbe9d129a9d..0e166b92f5b4 100644
--- a/include/linux/mfd/cros_ec.h
+++ b/include/linux/mfd/cros_ec.h
@@ -62,10 +62,6 @@ struct cros_ec_command {
* @dev: Device pointer
* @was_wake_device: true if this device was set to wake the system from
* sleep at the last suspend
- * @cmd_xfer: send command to EC and get response
- * Returns the number of bytes received if the communication succeeded, but
- * that doesn't mean the EC was happy with the command. The caller
- * should check msg.result for the EC's result code.
*
* @priv: Private data
* @irq: Interrupt to use
@@ -82,6 +78,10 @@ struct cros_ec_command {
* @dout_size: size of dout buffer to allocate (zero to use static dout)
* @parent: pointer to parent device (e.g. i2c or spi device)
* @wake_enabled: true if this device can wake the system from sleep
+ * @cmd_xfer: send command to EC and get response
+ * Returns the number of bytes received if the communication succeeded, but
+ * that doesn't mean the EC was happy with the command. The caller
+ * should check msg.result for the EC's result code.
* @lock: one transaction at a time
*/
struct cros_ec_device {
@@ -92,8 +92,6 @@ struct cros_ec_device {
struct device *dev;
bool was_wake_device;
struct class *cros_class;
- int (*cmd_xfer)(struct cros_ec_device *ec,
- struct cros_ec_command *msg);
/* These are used to implement the platform-specific interface */
void *priv;
@@ -104,6 +102,8 @@ struct cros_ec_device {
int dout_size;
struct device *parent;
bool wake_enabled;
+ int (*cmd_xfer)(struct cros_ec_device *ec,
+ struct cros_ec_command *msg);
struct mutex lock;
};
@@ -153,6 +153,18 @@ int cros_ec_check_result(struct cros_ec_device *ec_dev,
struct cros_ec_command *msg);
/**
+ * cros_ec_cmd_xfer - Send a command to the ChromeOS EC
+ *
+ * Call this to send a command to the ChromeOS EC. This should be used
+ * instead of calling the EC's cmd_xfer() callback directly.
+ *
+ * @ec_dev: EC device
+ * @msg: Message to write
+ */
+int cros_ec_cmd_xfer(struct cros_ec_device *ec_dev,
+ struct cros_ec_command *msg);
+
+/**
* cros_ec_remove - Remove a ChromeOS EC
*
* Call this to deregister a ChromeOS EC, then clean up any private data.
diff --git a/include/linux/mfd/cros_ec_commands.h b/include/linux/mfd/cros_ec_commands.h
index 7853a6410d14..a49cd41feea7 100644
--- a/include/linux/mfd/cros_ec_commands.h
+++ b/include/linux/mfd/cros_ec_commands.h
@@ -1928,9 +1928,6 @@ struct ec_response_power_info {
#define EC_CMD_I2C_PASSTHRU 0x9e
-/* Slave address is 10 (not 7) bit */
-#define EC_I2C_FLAG_10BIT (1 << 16)
-
/* Read data; if not present, message is a write */
#define EC_I2C_FLAG_READ (1 << 15)
diff --git a/include/linux/mfd/da9052/da9052.h b/include/linux/mfd/da9052/da9052.h
index bba65f51a0b5..c18a4c19d6fc 100644
--- a/include/linux/mfd/da9052/da9052.h
+++ b/include/linux/mfd/da9052/da9052.h
@@ -211,7 +211,7 @@ static inline int da9052_reg_update(struct da9052 *da9052, unsigned char reg,
int da9052_device_init(struct da9052 *da9052, u8 chip_id);
void da9052_device_exit(struct da9052 *da9052);
-extern struct regmap_config da9052_regmap_config;
+extern const struct regmap_config da9052_regmap_config;
int da9052_irq_init(struct da9052 *da9052);
int da9052_irq_exit(struct da9052 *da9052);
diff --git a/include/linux/mfd/davinci_voicecodec.h b/include/linux/mfd/davinci_voicecodec.h
index 5166935ce66d..8e1cdbef3dad 100644
--- a/include/linux/mfd/davinci_voicecodec.h
+++ b/include/linux/mfd/davinci_voicecodec.h
@@ -21,7 +21,7 @@
*/
#ifndef __LINUX_MFD_DAVINCI_VOICECODEC_H_
-#define __LINUX_MFD_DAVINIC_VOICECODEC_H_
+#define __LINUX_MFD_DAVINCI_VOICECODEC_H_
#include <linux/kernel.h>
#include <linux/platform_device.h>
@@ -99,12 +99,6 @@ struct davinci_vcif {
dma_addr_t dma_rx_addr;
};
-struct cq93vc {
- struct platform_device *pdev;
- struct snd_soc_codec *codec;
- u32 sysclk;
-};
-
struct davinci_vc;
struct davinci_vc {
@@ -122,7 +116,6 @@ struct davinci_vc {
/* Client devices */
struct davinci_vcif davinci_vcif;
- struct cq93vc cq93vc;
};
#endif
diff --git a/include/linux/mfd/dln2.h b/include/linux/mfd/dln2.h
new file mode 100644
index 000000000000..004b24576da8
--- /dev/null
+++ b/include/linux/mfd/dln2.h
@@ -0,0 +1,103 @@
+#ifndef __LINUX_USB_DLN2_H
+#define __LINUX_USB_DLN2_H
+
+#define DLN2_CMD(cmd, id) ((cmd) | ((id) << 8))
+
+struct dln2_platform_data {
+ u16 handle; /* sub-driver handle (internally used only) */
+ u8 port; /* I2C/SPI port */
+};
+
+/**
+ * dln2_event_cb_t - event callback function signature
+ *
+ * @pdev - the sub-device that registered this callback
+ * @echo - the echo header field received in the message
+ * @data - the data payload
+ * @len - the data payload length
+ *
+ * The callback function is called in interrupt context and the data payload is
+ * only valid during the call. If the user needs later access of the data, it
+ * must copy it.
+ */
+
+typedef void (*dln2_event_cb_t)(struct platform_device *pdev, u16 echo,
+ const void *data, int len);
+
+/**
+ * dl2n_register_event_cb - register a callback function for an event
+ *
+ * @pdev - the sub-device that registers the callback
+ * @event - the event for which to register a callback
+ * @event_cb - the callback function
+ *
+ * @return 0 in case of success, negative value in case of error
+ */
+int dln2_register_event_cb(struct platform_device *pdev, u16 event,
+ dln2_event_cb_t event_cb);
+
+/**
+ * dln2_unregister_event_cb - unregister the callback function for an event
+ *
+ * @pdev - the sub-device that registered the callback
+ * @event - the event for which to register a callback
+ */
+void dln2_unregister_event_cb(struct platform_device *pdev, u16 event);
+
+/**
+ * dln2_transfer - issue a DLN2 command and wait for a response and the
+ * associated data
+ *
+ * @pdev - the sub-device which is issuing this transfer
+ * @cmd - the command to be sent to the device
+ * @obuf - the buffer to be sent to the device; it can be NULL if the user
+ * doesn't need to transmit data with this command
+ * @obuf_len - the size of the buffer to be sent to the device
+ * @ibuf - any data associated with the response will be copied here; it can be
+ * NULL if the user doesn't need the response data
+ * @ibuf_len - must be initialized to the input buffer size; it will be modified
+ * to indicate the actual data transferred;
+ *
+ * @return 0 for success, negative value for errors
+ */
+int dln2_transfer(struct platform_device *pdev, u16 cmd,
+ const void *obuf, unsigned obuf_len,
+ void *ibuf, unsigned *ibuf_len);
+
+/**
+ * dln2_transfer_rx - variant of @dln2_transfer() where TX buffer is not needed
+ *
+ * @pdev - the sub-device which is issuing this transfer
+ * @cmd - the command to be sent to the device
+ * @ibuf - any data associated with the response will be copied here; it can be
+ * NULL if the user doesn't need the response data
+ * @ibuf_len - must be initialized to the input buffer size; it will be modified
+ * to indicate the actual data transferred;
+ *
+ * @return 0 for success, negative value for errors
+ */
+
+static inline int dln2_transfer_rx(struct platform_device *pdev, u16 cmd,
+ void *ibuf, unsigned *ibuf_len)
+{
+ return dln2_transfer(pdev, cmd, NULL, 0, ibuf, ibuf_len);
+}
+
+/**
+ * dln2_transfer_tx - variant of @dln2_transfer() where RX buffer is not needed
+ *
+ * @pdev - the sub-device which is issuing this transfer
+ * @cmd - the command to be sent to the device
+ * @obuf - the buffer to be sent to the device; it can be NULL if the
+ * user doesn't need to transmit data with this command
+ * @obuf_len - the size of the buffer to be sent to the device
+ *
+ * @return 0 for success, negative value for errors
+ */
+static inline int dln2_transfer_tx(struct platform_device *pdev, u16 cmd,
+ const void *obuf, unsigned obuf_len)
+{
+ return dln2_transfer(pdev, cmd, obuf, obuf_len, NULL, NULL);
+}
+
+#endif
diff --git a/include/linux/mfd/hi6421-pmic.h b/include/linux/mfd/hi6421-pmic.h
new file mode 100644
index 000000000000..587273e35acf
--- /dev/null
+++ b/include/linux/mfd/hi6421-pmic.h
@@ -0,0 +1,41 @@
+/*
+ * Header file for device driver Hi6421 PMIC
+ *
+ * Copyright (c) <2011-2014> HiSilicon Technologies Co., Ltd.
+ * http://www.hisilicon.com
+ * Copyright (c) <2013-2014> Linaro Ltd.
+ * http://www.linaro.org
+ *
+ * Author: Guodong Xu <guodong.xu@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __HI6421_PMIC_H
+#define __HI6421_PMIC_H
+
+/* Hi6421 registers are mapped to memory bus in 4 bytes stride */
+#define HI6421_REG_TO_BUS_ADDR(x) (x << 2)
+
+/* Hi6421 maximum register number */
+#define HI6421_REG_MAX 0xFF
+
+/* Hi6421 OCP (over current protection) and DEB (debounce) control register */
+#define HI6421_OCP_DEB_CTRL_REG HI6421_REG_TO_BUS_ADDR(0x51)
+#define HI6421_OCP_DEB_SEL_MASK 0x0C
+#define HI6421_OCP_DEB_SEL_8MS 0x00
+#define HI6421_OCP_DEB_SEL_16MS 0x04
+#define HI6421_OCP_DEB_SEL_32MS 0x08
+#define HI6421_OCP_DEB_SEL_64MS 0x0C
+#define HI6421_OCP_EN_DEBOUNCE_MASK 0x02
+#define HI6421_OCP_EN_DEBOUNCE_ENABLE 0x02
+#define HI6421_OCP_AUTO_STOP_MASK 0x01
+#define HI6421_OCP_AUTO_STOP_ENABLE 0x01
+
+struct hi6421_pmic {
+ struct regmap *regmap;
+};
+
+#endif /* __HI6421_PMIC_H */
diff --git a/include/linux/mfd/max14577-private.h b/include/linux/mfd/max14577-private.h
index 499253604026..f01c1fae4d84 100644
--- a/include/linux/mfd/max14577-private.h
+++ b/include/linux/mfd/max14577-private.h
@@ -72,15 +72,33 @@ enum max14577_muic_reg {
MAX14577_MUIC_REG_END,
};
+/*
+ * Combined charger types for max14577 and max77836.
+ *
+ * On max14577 three lower bits map to STATUS2/CHGTYP field.
+ * However the max77836 has different two last values of STATUS2/CHGTYP.
+ * To indicate the difference enum has two additional values for max77836.
+ * These values are just a register value bitwise OR with 0x8.
+ */
enum max14577_muic_charger_type {
- MAX14577_CHARGER_TYPE_NONE = 0,
- MAX14577_CHARGER_TYPE_USB,
- MAX14577_CHARGER_TYPE_DOWNSTREAM_PORT,
- MAX14577_CHARGER_TYPE_DEDICATED_CHG,
- MAX14577_CHARGER_TYPE_SPECIAL_500MA,
- MAX14577_CHARGER_TYPE_SPECIAL_1A,
- MAX14577_CHARGER_TYPE_RESERVED,
- MAX14577_CHARGER_TYPE_DEAD_BATTERY = 7,
+ MAX14577_CHARGER_TYPE_NONE = 0x0,
+ MAX14577_CHARGER_TYPE_USB = 0x1,
+ MAX14577_CHARGER_TYPE_DOWNSTREAM_PORT = 0x2,
+ MAX14577_CHARGER_TYPE_DEDICATED_CHG = 0x3,
+ MAX14577_CHARGER_TYPE_SPECIAL_500MA = 0x4,
+ /* Special 1A or 2A charger */
+ MAX14577_CHARGER_TYPE_SPECIAL_1A = 0x5,
+ /* max14577: reserved, used on max77836 */
+ MAX14577_CHARGER_TYPE_RESERVED = 0x6,
+ /* max14577: dead-battery charing with maximum current 100mA */
+ MAX14577_CHARGER_TYPE_DEAD_BATTERY = 0x7,
+ /*
+ * max77836: special charger (bias on D+/D-),
+ * matches register value of 0x6
+ */
+ MAX77836_CHARGER_TYPE_SPECIAL_BIAS = 0xe,
+ /* max77836: reserved, register value 0x7 */
+ MAX77836_CHARGER_TYPE_RESERVED = 0xf,
};
/* MAX14577 interrupts */
@@ -121,13 +139,15 @@ enum max14577_muic_charger_type {
#define STATUS2_CHGTYP_SHIFT 0
#define STATUS2_CHGDETRUN_SHIFT 3
#define STATUS2_DCDTMR_SHIFT 4
-#define STATUS2_DBCHG_SHIFT 5
+#define MAX14577_STATUS2_DBCHG_SHIFT 5
+#define MAX77836_STATUS2_DXOVP_SHIFT 5
#define STATUS2_VBVOLT_SHIFT 6
#define MAX77836_STATUS2_VIDRM_SHIFT 7
#define STATUS2_CHGTYP_MASK (0x7 << STATUS2_CHGTYP_SHIFT)
#define STATUS2_CHGDETRUN_MASK BIT(STATUS2_CHGDETRUN_SHIFT)
#define STATUS2_DCDTMR_MASK BIT(STATUS2_DCDTMR_SHIFT)
-#define STATUS2_DBCHG_MASK BIT(STATUS2_DBCHG_SHIFT)
+#define MAX14577_STATUS2_DBCHG_MASK BIT(MAX14577_STATUS2_DBCHG_SHIFT)
+#define MAX77836_STATUS2_DXOVP_MASK BIT(MAX77836_STATUS2_DXOVP_SHIFT)
#define STATUS2_VBVOLT_MASK BIT(STATUS2_VBVOLT_SHIFT)
#define MAX77836_STATUS2_VIDRM_MASK BIT(MAX77836_STATUS2_VIDRM_SHIFT)
@@ -177,9 +197,11 @@ enum max14577_muic_charger_type {
#define CTRL3_JIGSET_SHIFT 0
#define CTRL3_BOOTSET_SHIFT 2
#define CTRL3_ADCDBSET_SHIFT 4
+#define CTRL3_WBTH_SHIFT 6
#define CTRL3_JIGSET_MASK (0x3 << CTRL3_JIGSET_SHIFT)
#define CTRL3_BOOTSET_MASK (0x3 << CTRL3_BOOTSET_SHIFT)
#define CTRL3_ADCDBSET_MASK (0x3 << CTRL3_ADCDBSET_SHIFT)
+#define CTRL3_WBTH_MASK (0x3 << CTRL3_WBTH_SHIFT)
/* Slave addr = 0x4A: Charger */
enum max14577_charger_reg {
@@ -210,16 +232,20 @@ enum max14577_charger_reg {
#define CDETCTRL1_CHGTYPMAN_SHIFT 1
#define CDETCTRL1_DCDEN_SHIFT 2
#define CDETCTRL1_DCD2SCT_SHIFT 3
-#define CDETCTRL1_DCHKTM_SHIFT 4
-#define CDETCTRL1_DBEXIT_SHIFT 5
+#define MAX14577_CDETCTRL1_DCHKTM_SHIFT 4
+#define MAX77836_CDETCTRL1_CDLY_SHIFT 4
+#define MAX14577_CDETCTRL1_DBEXIT_SHIFT 5
+#define MAX77836_CDETCTRL1_DCDCPL_SHIFT 5
#define CDETCTRL1_DBIDLE_SHIFT 6
#define CDETCTRL1_CDPDET_SHIFT 7
#define CDETCTRL1_CHGDETEN_MASK BIT(CDETCTRL1_CHGDETEN_SHIFT)
#define CDETCTRL1_CHGTYPMAN_MASK BIT(CDETCTRL1_CHGTYPMAN_SHIFT)
#define CDETCTRL1_DCDEN_MASK BIT(CDETCTRL1_DCDEN_SHIFT)
#define CDETCTRL1_DCD2SCT_MASK BIT(CDETCTRL1_DCD2SCT_SHIFT)
-#define CDETCTRL1_DCHKTM_MASK BIT(CDETCTRL1_DCHKTM_SHIFT)
-#define CDETCTRL1_DBEXIT_MASK BIT(CDETCTRL1_DBEXIT_SHIFT)
+#define MAX14577_CDETCTRL1_DCHKTM_MASK BIT(MAX14577_CDETCTRL1_DCHKTM_SHIFT)
+#define MAX77836_CDETCTRL1_CDDLY_MASK BIT(MAX77836_CDETCTRL1_CDDLY_SHIFT)
+#define MAX14577_CDETCTRL1_DBEXIT_MASK BIT(MAX14577_CDETCTRL1_DBEXIT_SHIFT)
+#define MAX77836_CDETCTRL1_DCDCPL_MASK BIT(MAX77836_CDETCTRL1_DCDCPL_SHIFT)
#define CDETCTRL1_DBIDLE_MASK BIT(CDETCTRL1_DBIDLE_SHIFT)
#define CDETCTRL1_CDPDET_MASK BIT(CDETCTRL1_CDPDET_SHIFT)
@@ -255,17 +281,36 @@ enum max14577_charger_reg {
#define CHGCTRL7_OTPCGHCVS_SHIFT 0
#define CHGCTRL7_OTPCGHCVS_MASK (0x3 << CHGCTRL7_OTPCGHCVS_SHIFT)
-/* MAX14577 regulator current limits (as in CHGCTRL4 register), uA */
-#define MAX14577_REGULATOR_CURRENT_LIMIT_MIN 90000
-#define MAX14577_REGULATOR_CURRENT_LIMIT_HIGH_START 200000
-#define MAX14577_REGULATOR_CURRENT_LIMIT_HIGH_STEP 50000
-#define MAX14577_REGULATOR_CURRENT_LIMIT_MAX 950000
-
-/* MAX77836 regulator current limits (as in CHGCTRL4 register), uA */
-#define MAX77836_REGULATOR_CURRENT_LIMIT_MIN 45000
-#define MAX77836_REGULATOR_CURRENT_LIMIT_HIGH_START 100000
-#define MAX77836_REGULATOR_CURRENT_LIMIT_HIGH_STEP 25000
-#define MAX77836_REGULATOR_CURRENT_LIMIT_MAX 475000
+/* MAX14577 charger current limits (as in CHGCTRL4 register), uA */
+#define MAX14577_CHARGER_CURRENT_LIMIT_MIN 90000U
+#define MAX14577_CHARGER_CURRENT_LIMIT_HIGH_START 200000U
+#define MAX14577_CHARGER_CURRENT_LIMIT_HIGH_STEP 50000U
+#define MAX14577_CHARGER_CURRENT_LIMIT_MAX 950000U
+
+/* MAX77836 charger current limits (as in CHGCTRL4 register), uA */
+#define MAX77836_CHARGER_CURRENT_LIMIT_MIN 45000U
+#define MAX77836_CHARGER_CURRENT_LIMIT_HIGH_START 100000U
+#define MAX77836_CHARGER_CURRENT_LIMIT_HIGH_STEP 25000U
+#define MAX77836_CHARGER_CURRENT_LIMIT_MAX 475000U
+
+/*
+ * MAX14577 charger End-Of-Charge current limits
+ * (as in CHGCTRL5 register), uA
+ */
+#define MAX14577_CHARGER_EOC_CURRENT_LIMIT_MIN 50000U
+#define MAX14577_CHARGER_EOC_CURRENT_LIMIT_STEP 10000U
+#define MAX14577_CHARGER_EOC_CURRENT_LIMIT_MAX 200000U
+
+/*
+ * MAX14577/MAX77836 Battery Constant Voltage
+ * (as in CHGCTRL3 register), uV
+ */
+#define MAXIM_CHARGER_CONSTANT_VOLTAGE_MIN 4000000U
+#define MAXIM_CHARGER_CONSTANT_VOLTAGE_STEP 20000U
+#define MAXIM_CHARGER_CONSTANT_VOLTAGE_MAX 4350000U
+
+/* Default value for fast charge timer, in hours */
+#define MAXIM_CHARGER_FAST_CHARGE_TIMER_DEFAULT 5
/* MAX14577 regulator SFOUT LDO voltage, fixed, uV */
#define MAX14577_REGULATOR_SAFEOUT_VOLTAGE 4900000
diff --git a/include/linux/mfd/max14577.h b/include/linux/mfd/max14577.h
index c83fbed1c7b6..ccfaf952c31b 100644
--- a/include/linux/mfd/max14577.h
+++ b/include/linux/mfd/max14577.h
@@ -54,6 +54,13 @@ struct max14577_regulator_platform_data {
struct device_node *of_node;
};
+struct max14577_charger_platform_data {
+ u32 constant_uvolt;
+ u32 fast_charge_uamp;
+ u32 eoc_uamp;
+ u32 ovp_uvolt;
+};
+
/*
* MAX14577 MFD platform data
*/
@@ -74,4 +81,27 @@ struct max14577_platform_data {
struct max14577_regulator_platform_data *regulators;
};
+/*
+ * Valid limits of current for max14577 and max77836 chargers.
+ * They must correspond to MBCICHWRCL and MBCICHWRCH fields in CHGCTRL4
+ * register for given chipset.
+ */
+struct maxim_charger_current {
+ /* Minimal current, set in CHGCTRL4/MBCICHWRCL, uA */
+ unsigned int min;
+ /*
+ * Minimal current when high setting is active,
+ * set in CHGCTRL4/MBCICHWRCH, uA
+ */
+ unsigned int high_start;
+ /* Value of one step in high setting, uA */
+ unsigned int high_step;
+ /* Maximum current of high setting, uA */
+ unsigned int max;
+};
+
+extern const struct maxim_charger_current maxim_charger_currents[];
+extern int maxim_charger_calc_reg_current(const struct maxim_charger_current *limits,
+ unsigned int min_ua, unsigned int max_ua, u8 *dst);
+
#endif /* __MAX14577_H__ */
diff --git a/include/linux/mfd/max77686.h b/include/linux/mfd/max77686.h
index 7e6dc4b2b795..553f7d09258a 100644
--- a/include/linux/mfd/max77686.h
+++ b/include/linux/mfd/max77686.h
@@ -131,13 +131,6 @@ enum max77686_opmode {
MAX77686_OPMODE_STANDBY,
};
-enum max77802_opmode {
- MAX77802_OPMODE_OFF,
- MAX77802_OPMODE_STANDBY,
- MAX77802_OPMODE_LP,
- MAX77802_OPMODE_NORMAL,
-};
-
struct max77686_opmode_data {
int id;
int mode;
diff --git a/include/linux/mfd/max77693-private.h b/include/linux/mfd/max77693-private.h
index d0e578fd7053..08dae01258b9 100644
--- a/include/linux/mfd/max77693-private.h
+++ b/include/linux/mfd/max77693-private.h
@@ -26,7 +26,6 @@
#include <linux/i2c.h>
-#define MAX77693_NUM_IRQ_MUIC_REGS 3
#define MAX77693_REG_INVALID (0xff)
/* Slave addr = 0xCC: PMIC, Charger, Flash LED */
@@ -46,7 +45,7 @@ enum max77693_pmic_reg {
MAX77693_LED_REG_VOUT_FLASH2 = 0x0C,
MAX77693_LED_REG_FLASH_INT = 0x0E,
MAX77693_LED_REG_FLASH_INT_MASK = 0x0F,
- MAX77693_LED_REG_FLASH_INT_STATUS = 0x10,
+ MAX77693_LED_REG_FLASH_STATUS = 0x10,
MAX77693_PMIC_REG_PMIC_ID1 = 0x20,
MAX77693_PMIC_REG_PMIC_ID2 = 0x21,
@@ -85,6 +84,65 @@ enum max77693_pmic_reg {
MAX77693_PMIC_REG_END,
};
+/* MAX77693 ITORCH register */
+#define TORCH_IOUT1_SHIFT 0
+#define TORCH_IOUT2_SHIFT 4
+#define TORCH_IOUT_MIN 15625
+#define TORCH_IOUT_MAX 250000
+#define TORCH_IOUT_STEP 15625
+
+/* MAX77693 IFLASH1 and IFLASH2 registers */
+#define FLASH_IOUT_MIN 15625
+#define FLASH_IOUT_MAX_1LED 1000000
+#define FLASH_IOUT_MAX_2LEDS 625000
+#define FLASH_IOUT_STEP 15625
+
+/* MAX77693 TORCH_TIMER register */
+#define TORCH_TMR_NO_TIMER 0x40
+#define TORCH_TIMEOUT_MIN 262000
+#define TORCH_TIMEOUT_MAX 15728000
+
+/* MAX77693 FLASH_TIMER register */
+#define FLASH_TMR_LEVEL 0x80
+#define FLASH_TIMEOUT_MIN 62500
+#define FLASH_TIMEOUT_MAX 1000000
+#define FLASH_TIMEOUT_STEP 62500
+
+/* MAX77693 FLASH_EN register */
+#define FLASH_EN_OFF 0x0
+#define FLASH_EN_FLASH 0x1
+#define FLASH_EN_TORCH 0x2
+#define FLASH_EN_ON 0x3
+#define FLASH_EN_SHIFT(x) (6 - ((x) - 1) * 2)
+#define TORCH_EN_SHIFT(x) (2 - ((x) - 1) * 2)
+
+/* MAX77693 MAX_FLASH1 register */
+#define MAX_FLASH1_MAX_FL_EN 0x80
+#define MAX_FLASH1_VSYS_MIN 2400
+#define MAX_FLASH1_VSYS_MAX 3400
+#define MAX_FLASH1_VSYS_STEP 33
+
+/* MAX77693 VOUT_CNTL register */
+#define FLASH_BOOST_FIXED 0x04
+#define FLASH_BOOST_LEDNUM_2 0x80
+
+/* MAX77693 VOUT_FLASH1 register */
+#define FLASH_VOUT_MIN 3300
+#define FLASH_VOUT_MAX 5500
+#define FLASH_VOUT_STEP 25
+#define FLASH_VOUT_RMIN 0x0c
+
+/* MAX77693 FLASH_STATUS register */
+#define FLASH_STATUS_FLASH_ON BIT(3)
+#define FLASH_STATUS_TORCH_ON BIT(2)
+
+/* MAX77693 FLASH_INT register */
+#define FLASH_INT_FLED2_OPEN BIT(0)
+#define FLASH_INT_FLED2_SHORT BIT(1)
+#define FLASH_INT_FLED1_OPEN BIT(2)
+#define FLASH_INT_FLED1_SHORT BIT(3)
+#define FLASH_INT_OVER_CURRENT BIT(4)
+
/* MAX77693 CHG_CNFG_00 register */
#define CHG_CNFG_00_CHG_MASK 0x1
#define CHG_CNFG_00_BUCK_MASK 0x4
@@ -271,6 +329,13 @@ enum max77693_irq_source {
MAX77693_IRQ_GROUP_NR,
};
+#define SRC_IRQ_CHARGER BIT(0)
+#define SRC_IRQ_TOP BIT(1)
+#define SRC_IRQ_FLASH BIT(2)
+#define SRC_IRQ_MUIC BIT(3)
+#define SRC_IRQ_ALL (SRC_IRQ_CHARGER | SRC_IRQ_TOP \
+ | SRC_IRQ_FLASH | SRC_IRQ_MUIC)
+
#define LED_IRQ_FLED2_OPEN BIT(0)
#define LED_IRQ_FLED2_SHORT BIT(1)
#define LED_IRQ_FLED1_OPEN BIT(2)
diff --git a/include/linux/mfd/max77693.h b/include/linux/mfd/max77693.h
index 3f3dc45f93ee..f0b6585cd874 100644
--- a/include/linux/mfd/max77693.h
+++ b/include/linux/mfd/max77693.h
@@ -63,6 +63,45 @@ struct max77693_muic_platform_data {
int path_uart;
};
+/* MAX77693 led flash */
+
+/* triggers */
+enum max77693_led_trigger {
+ MAX77693_LED_TRIG_OFF,
+ MAX77693_LED_TRIG_FLASH,
+ MAX77693_LED_TRIG_TORCH,
+ MAX77693_LED_TRIG_EXT,
+ MAX77693_LED_TRIG_SOFT,
+};
+
+/* trigger types */
+enum max77693_led_trigger_type {
+ MAX77693_LED_TRIG_TYPE_EDGE,
+ MAX77693_LED_TRIG_TYPE_LEVEL,
+};
+
+/* boost modes */
+enum max77693_led_boost_mode {
+ MAX77693_LED_BOOST_NONE,
+ MAX77693_LED_BOOST_ADAPTIVE,
+ MAX77693_LED_BOOST_FIXED,
+};
+
+struct max77693_led_platform_data {
+ u32 fleds[2];
+ u32 iout_torch[2];
+ u32 iout_flash[2];
+ u32 trigger[2];
+ u32 trigger_type[2];
+ u32 num_leds;
+ u32 boost_mode;
+ u32 flash_timeout;
+ u32 boost_vout;
+ u32 low_vsys;
+};
+
+/* MAX77693 */
+
struct max77693_platform_data {
/* regulator data */
struct max77693_regulator_data *regulators;
@@ -70,5 +109,6 @@ struct max77693_platform_data {
/* muic data */
struct max77693_muic_platform_data *muic_data;
+ struct max77693_led_platform_data *led_data;
};
#endif /* __LINUX_MFD_MAX77693_H */
diff --git a/include/linux/mfd/rk808.h b/include/linux/mfd/rk808.h
new file mode 100644
index 000000000000..fb09312d854b
--- /dev/null
+++ b/include/linux/mfd/rk808.h
@@ -0,0 +1,196 @@
+/*
+ * rk808.h for Rockchip RK808
+ *
+ * Copyright (c) 2014, Fuzhou Rockchip Electronics Co., Ltd
+ *
+ * Author: Chris Zhong <zyw@rock-chips.com>
+ * Author: Zhang Qing <zhangqing@rock-chips.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __LINUX_REGULATOR_rk808_H
+#define __LINUX_REGULATOR_rk808_H
+
+#include <linux/regulator/machine.h>
+#include <linux/regmap.h>
+
+/*
+ * rk808 Global Register Map.
+ */
+
+#define RK808_DCDC1 0 /* (0+RK808_START) */
+#define RK808_LDO1 4 /* (4+RK808_START) */
+#define RK808_NUM_REGULATORS 14
+
+enum rk808_reg {
+ RK808_ID_DCDC1,
+ RK808_ID_DCDC2,
+ RK808_ID_DCDC3,
+ RK808_ID_DCDC4,
+ RK808_ID_LDO1,
+ RK808_ID_LDO2,
+ RK808_ID_LDO3,
+ RK808_ID_LDO4,
+ RK808_ID_LDO5,
+ RK808_ID_LDO6,
+ RK808_ID_LDO7,
+ RK808_ID_LDO8,
+ RK808_ID_SWITCH1,
+ RK808_ID_SWITCH2,
+};
+
+#define RK808_SECONDS_REG 0x00
+#define RK808_MINUTES_REG 0x01
+#define RK808_HOURS_REG 0x02
+#define RK808_DAYS_REG 0x03
+#define RK808_MONTHS_REG 0x04
+#define RK808_YEARS_REG 0x05
+#define RK808_WEEKS_REG 0x06
+#define RK808_ALARM_SECONDS_REG 0x08
+#define RK808_ALARM_MINUTES_REG 0x09
+#define RK808_ALARM_HOURS_REG 0x0a
+#define RK808_ALARM_DAYS_REG 0x0b
+#define RK808_ALARM_MONTHS_REG 0x0c
+#define RK808_ALARM_YEARS_REG 0x0d
+#define RK808_RTC_CTRL_REG 0x10
+#define RK808_RTC_STATUS_REG 0x11
+#define RK808_RTC_INT_REG 0x12
+#define RK808_RTC_COMP_LSB_REG 0x13
+#define RK808_RTC_COMP_MSB_REG 0x14
+#define RK808_CLK32OUT_REG 0x20
+#define RK808_VB_MON_REG 0x21
+#define RK808_THERMAL_REG 0x22
+#define RK808_DCDC_EN_REG 0x23
+#define RK808_LDO_EN_REG 0x24
+#define RK808_SLEEP_SET_OFF_REG1 0x25
+#define RK808_SLEEP_SET_OFF_REG2 0x26
+#define RK808_DCDC_UV_STS_REG 0x27
+#define RK808_DCDC_UV_ACT_REG 0x28
+#define RK808_LDO_UV_STS_REG 0x29
+#define RK808_LDO_UV_ACT_REG 0x2a
+#define RK808_DCDC_PG_REG 0x2b
+#define RK808_LDO_PG_REG 0x2c
+#define RK808_VOUT_MON_TDB_REG 0x2d
+#define RK808_BUCK1_CONFIG_REG 0x2e
+#define RK808_BUCK1_ON_VSEL_REG 0x2f
+#define RK808_BUCK1_SLP_VSEL_REG 0x30
+#define RK808_BUCK1_DVS_VSEL_REG 0x31
+#define RK808_BUCK2_CONFIG_REG 0x32
+#define RK808_BUCK2_ON_VSEL_REG 0x33
+#define RK808_BUCK2_SLP_VSEL_REG 0x34
+#define RK808_BUCK2_DVS_VSEL_REG 0x35
+#define RK808_BUCK3_CONFIG_REG 0x36
+#define RK808_BUCK4_CONFIG_REG 0x37
+#define RK808_BUCK4_ON_VSEL_REG 0x38
+#define RK808_BUCK4_SLP_VSEL_REG 0x39
+#define RK808_BOOST_CONFIG_REG 0x3a
+#define RK808_LDO1_ON_VSEL_REG 0x3b
+#define RK808_LDO1_SLP_VSEL_REG 0x3c
+#define RK808_LDO2_ON_VSEL_REG 0x3d
+#define RK808_LDO2_SLP_VSEL_REG 0x3e
+#define RK808_LDO3_ON_VSEL_REG 0x3f
+#define RK808_LDO3_SLP_VSEL_REG 0x40
+#define RK808_LDO4_ON_VSEL_REG 0x41
+#define RK808_LDO4_SLP_VSEL_REG 0x42
+#define RK808_LDO5_ON_VSEL_REG 0x43
+#define RK808_LDO5_SLP_VSEL_REG 0x44
+#define RK808_LDO6_ON_VSEL_REG 0x45
+#define RK808_LDO6_SLP_VSEL_REG 0x46
+#define RK808_LDO7_ON_VSEL_REG 0x47
+#define RK808_LDO7_SLP_VSEL_REG 0x48
+#define RK808_LDO8_ON_VSEL_REG 0x49
+#define RK808_LDO8_SLP_VSEL_REG 0x4a
+#define RK808_DEVCTRL_REG 0x4b
+#define RK808_INT_STS_REG1 0x4c
+#define RK808_INT_STS_MSK_REG1 0x4d
+#define RK808_INT_STS_REG2 0x4e
+#define RK808_INT_STS_MSK_REG2 0x4f
+#define RK808_IO_POL_REG 0x50
+
+/* IRQ Definitions */
+#define RK808_IRQ_VOUT_LO 0
+#define RK808_IRQ_VB_LO 1
+#define RK808_IRQ_PWRON 2
+#define RK808_IRQ_PWRON_LP 3
+#define RK808_IRQ_HOTDIE 4
+#define RK808_IRQ_RTC_ALARM 5
+#define RK808_IRQ_RTC_PERIOD 6
+#define RK808_IRQ_PLUG_IN_INT 7
+#define RK808_IRQ_PLUG_OUT_INT 8
+#define RK808_NUM_IRQ 9
+
+#define RK808_IRQ_VOUT_LO_MSK BIT(0)
+#define RK808_IRQ_VB_LO_MSK BIT(1)
+#define RK808_IRQ_PWRON_MSK BIT(2)
+#define RK808_IRQ_PWRON_LP_MSK BIT(3)
+#define RK808_IRQ_HOTDIE_MSK BIT(4)
+#define RK808_IRQ_RTC_ALARM_MSK BIT(5)
+#define RK808_IRQ_RTC_PERIOD_MSK BIT(6)
+#define RK808_IRQ_PLUG_IN_INT_MSK BIT(0)
+#define RK808_IRQ_PLUG_OUT_INT_MSK BIT(1)
+
+#define RK808_VBAT_LOW_2V8 0x00
+#define RK808_VBAT_LOW_2V9 0x01
+#define RK808_VBAT_LOW_3V0 0x02
+#define RK808_VBAT_LOW_3V1 0x03
+#define RK808_VBAT_LOW_3V2 0x04
+#define RK808_VBAT_LOW_3V3 0x05
+#define RK808_VBAT_LOW_3V4 0x06
+#define RK808_VBAT_LOW_3V5 0x07
+#define VBAT_LOW_VOL_MASK (0x07 << 0)
+#define EN_VABT_LOW_SHUT_DOWN (0x00 << 4)
+#define EN_VBAT_LOW_IRQ (0x1 << 4)
+#define VBAT_LOW_ACT_MASK (0x1 << 4)
+
+#define BUCK_ILMIN_MASK (7 << 0)
+#define BOOST_ILMIN_MASK (7 << 0)
+#define BUCK1_RATE_MASK (3 << 3)
+#define BUCK2_RATE_MASK (3 << 3)
+#define MASK_ALL 0xff
+
+#define SWITCH2_EN BIT(6)
+#define SWITCH1_EN BIT(5)
+#define DEV_OFF_RST BIT(3)
+
+#define VB_LO_ACT BIT(4)
+#define VB_LO_SEL_3500MV (7 << 0)
+
+#define VOUT_LO_INT BIT(0)
+#define CLK32KOUT2_EN BIT(0)
+
+enum {
+ BUCK_ILMIN_50MA,
+ BUCK_ILMIN_100MA,
+ BUCK_ILMIN_150MA,
+ BUCK_ILMIN_200MA,
+ BUCK_ILMIN_250MA,
+ BUCK_ILMIN_300MA,
+ BUCK_ILMIN_350MA,
+ BUCK_ILMIN_400MA,
+};
+
+enum {
+ BOOST_ILMIN_75MA,
+ BOOST_ILMIN_100MA,
+ BOOST_ILMIN_125MA,
+ BOOST_ILMIN_150MA,
+ BOOST_ILMIN_175MA,
+ BOOST_ILMIN_200MA,
+ BOOST_ILMIN_225MA,
+ BOOST_ILMIN_250MA,
+};
+
+struct rk808 {
+ struct i2c_client *i2c;
+ struct regmap_irq_chip_data *irq_data;
+ struct regmap *regmap;
+};
+#endif /* __LINUX_REGULATOR_rk808_H */
diff --git a/include/linux/mfd/rn5t618.h b/include/linux/mfd/rn5t618.h
new file mode 100644
index 000000000000..c72d5344f3b3
--- /dev/null
+++ b/include/linux/mfd/rn5t618.h
@@ -0,0 +1,228 @@
+/*
+ * MFD core driver for Ricoh RN5T618 PMIC
+ *
+ * Copyright (C) 2014 Beniamino Galvani <b.galvani@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __LINUX_MFD_RN5T618_H
+#define __LINUX_MFD_RN5T618_H
+
+#include <linux/regmap.h>
+
+#define RN5T618_LSIVER 0x00
+#define RN5T618_OTPVER 0x01
+#define RN5T618_IODAC 0x02
+#define RN5T618_VINDAC 0x03
+#define RN5T618_CPUCNT 0x06
+#define RN5T618_PSWR 0x07
+#define RN5T618_PONHIS 0x09
+#define RN5T618_POFFHIS 0x0a
+#define RN5T618_WATCHDOG 0x0b
+#define RN5T618_WATCHDOGCNT 0x0c
+#define RN5T618_PWRFUNC 0x0d
+#define RN5T618_SLPCNT 0x0e
+#define RN5T618_REPCNT 0x0f
+#define RN5T618_PWRONTIMSET 0x10
+#define RN5T618_NOETIMSETCNT 0x11
+#define RN5T618_PWRIREN 0x12
+#define RN5T618_PWRIRQ 0x13
+#define RN5T618_PWRMON 0x14
+#define RN5T618_PWRIRSEL 0x15
+#define RN5T618_DC1_SLOT 0x16
+#define RN5T618_DC2_SLOT 0x17
+#define RN5T618_DC3_SLOT 0x18
+#define RN5T618_LDO1_SLOT 0x1b
+#define RN5T618_LDO2_SLOT 0x1c
+#define RN5T618_LDO3_SLOT 0x1d
+#define RN5T618_LDO4_SLOT 0x1e
+#define RN5T618_LDO5_SLOT 0x1f
+#define RN5T618_PSO0_SLOT 0x25
+#define RN5T618_PSO1_SLOT 0x26
+#define RN5T618_PSO2_SLOT 0x27
+#define RN5T618_PSO3_SLOT 0x28
+#define RN5T618_LDORTC1_SLOT 0x2a
+#define RN5T618_DC1CTL 0x2c
+#define RN5T618_DC1CTL2 0x2d
+#define RN5T618_DC2CTL 0x2e
+#define RN5T618_DC2CTL2 0x2f
+#define RN5T618_DC3CTL 0x30
+#define RN5T618_DC3CTL2 0x31
+#define RN5T618_DC1DAC 0x36
+#define RN5T618_DC2DAC 0x37
+#define RN5T618_DC3DAC 0x38
+#define RN5T618_DC1DAC_SLP 0x3b
+#define RN5T618_DC2DAC_SLP 0x3c
+#define RN5T618_DC3DAC_SLP 0x3d
+#define RN5T618_DCIREN 0x40
+#define RN5T618_DCIRQ 0x41
+#define RN5T618_DCIRMON 0x42
+#define RN5T618_LDOEN1 0x44
+#define RN5T618_LDOEN2 0x45
+#define RN5T618_LDODIS 0x46
+#define RN5T618_LDO1DAC 0x4c
+#define RN5T618_LDO2DAC 0x4d
+#define RN5T618_LDO3DAC 0x4e
+#define RN5T618_LDO4DAC 0x4f
+#define RN5T618_LDO5DAC 0x50
+#define RN5T618_LDORTCDAC 0x56
+#define RN5T618_LDORTC2DAC 0x57
+#define RN5T618_LDO1DAC_SLP 0x58
+#define RN5T618_LDO2DAC_SLP 0x59
+#define RN5T618_LDO3DAC_SLP 0x5a
+#define RN5T618_LDO4DAC_SLP 0x5b
+#define RN5T618_LDO5DAC_SLP 0x5c
+#define RN5T618_ADCCNT1 0x64
+#define RN5T618_ADCCNT2 0x65
+#define RN5T618_ADCCNT3 0x66
+#define RN5T618_ILIMDATAH 0x68
+#define RN5T618_ILIMDATAL 0x69
+#define RN5T618_VBATDATAH 0x6a
+#define RN5T618_VBATDATAL 0x6b
+#define RN5T618_VADPDATAH 0x6c
+#define RN5T618_VADPDATAL 0x6d
+#define RN5T618_VUSBDATAH 0x6e
+#define RN5T618_VUSBDATAL 0x6f
+#define RN5T618_VSYSDATAH 0x70
+#define RN5T618_VSYSDATAL 0x71
+#define RN5T618_VTHMDATAH 0x72
+#define RN5T618_VTHMDATAL 0x73
+#define RN5T618_AIN1DATAH 0x74
+#define RN5T618_AIN1DATAL 0x75
+#define RN5T618_AIN0DATAH 0x76
+#define RN5T618_AIN0DATAL 0x77
+#define RN5T618_ILIMTHL 0x78
+#define RN5T618_ILIMTHH 0x79
+#define RN5T618_VBATTHL 0x7a
+#define RN5T618_VBATTHH 0x7b
+#define RN5T618_VADPTHL 0x7c
+#define RN5T618_VADPTHH 0x7d
+#define RN5T618_VUSBTHL 0x7e
+#define RN5T618_VUSBTHH 0x7f
+#define RN5T618_VSYSTHL 0x80
+#define RN5T618_VSYSTHH 0x81
+#define RN5T618_VTHMTHL 0x82
+#define RN5T618_VTHMTHH 0x83
+#define RN5T618_AIN1THL 0x84
+#define RN5T618_AIN1THH 0x85
+#define RN5T618_AIN0THL 0x86
+#define RN5T618_AIN0THH 0x87
+#define RN5T618_EN_ADCIR1 0x88
+#define RN5T618_EN_ADCIR2 0x89
+#define RN5T618_EN_ADCIR3 0x8a
+#define RN5T618_IR_ADC1 0x8c
+#define RN5T618_IR_ADC2 0x8d
+#define RN5T618_IR_ADC3 0x8e
+#define RN5T618_IOSEL 0x90
+#define RN5T618_IOOUT 0x91
+#define RN5T618_GPEDGE1 0x92
+#define RN5T618_GPEDGE2 0x93
+#define RN5T618_EN_GPIR 0x94
+#define RN5T618_IR_GPR 0x95
+#define RN5T618_IR_GPF 0x96
+#define RN5T618_MON_IOIN 0x97
+#define RN5T618_GPLED_FUNC 0x98
+#define RN5T618_INTPOL 0x9c
+#define RN5T618_INTEN 0x9d
+#define RN5T618_INTMON 0x9e
+#define RN5T618_PREVINDAC 0xb0
+#define RN5T618_BATDAC 0xb1
+#define RN5T618_CHGCTL1 0xb3
+#define RN5T618_CHGCTL2 0xb4
+#define RN5T618_VSYSSET 0xb5
+#define RN5T618_REGISET1 0xb6
+#define RN5T618_REGISET2 0xb7
+#define RN5T618_CHGISET 0xb8
+#define RN5T618_TIMSET 0xb9
+#define RN5T618_BATSET1 0xba
+#define RN5T618_BATSET2 0xbb
+#define RN5T618_DIESET 0xbc
+#define RN5T618_CHGSTATE 0xbd
+#define RN5T618_CHGCTRL_IRFMASK 0xbe
+#define RN5T618_CHGSTAT_IRFMASK1 0xbf
+#define RN5T618_CHGSTAT_IRFMASK2 0xc0
+#define RN5T618_CHGERR_IRFMASK 0xc1
+#define RN5T618_CHGCTRL_IRR 0xc2
+#define RN5T618_CHGSTAT_IRR1 0xc3
+#define RN5T618_CHGSTAT_IRR2 0xc4
+#define RN5T618_CHGERR_IRR 0xc5
+#define RN5T618_CHGCTRL_MONI 0xc6
+#define RN5T618_CHGSTAT_MONI1 0xc7
+#define RN5T618_CHGSTAT_MONI2 0xc8
+#define RN5T618_CHGERR_MONI 0xc9
+#define RN5T618_CHGCTRL_DETMOD1 0xca
+#define RN5T618_CHGCTRL_DETMOD2 0xcb
+#define RN5T618_CHGSTAT_DETMOD1 0xcc
+#define RN5T618_CHGSTAT_DETMOD2 0xcd
+#define RN5T618_CHGSTAT_DETMOD3 0xce
+#define RN5T618_CHGERR_DETMOD1 0xcf
+#define RN5T618_CHGERR_DETMOD2 0xd0
+#define RN5T618_CHGOSCCTL 0xd4
+#define RN5T618_CHGOSCSCORESET1 0xd5
+#define RN5T618_CHGOSCSCORESET2 0xd6
+#define RN5T618_CHGOSCSCORESET3 0xd7
+#define RN5T618_CHGOSCFREQSET1 0xd8
+#define RN5T618_CHGOSCFREQSET2 0xd9
+#define RN5T618_CONTROL 0xe0
+#define RN5T618_SOC 0xe1
+#define RN5T618_RE_CAP_H 0xe2
+#define RN5T618_RE_CAP_L 0xe3
+#define RN5T618_FA_CAP_H 0xe4
+#define RN5T618_FA_CAP_L 0xe5
+#define RN5T618_AGE 0xe6
+#define RN5T618_TT_EMPTY_H 0xe7
+#define RN5T618_TT_EMPTY_L 0xe8
+#define RN5T618_TT_FULL_H 0xe9
+#define RN5T618_TT_FULL_L 0xea
+#define RN5T618_VOLTAGE_1 0xeb
+#define RN5T618_VOLTAGE_0 0xec
+#define RN5T618_TEMP_1 0xed
+#define RN5T618_TEMP_0 0xee
+#define RN5T618_CC_CTRL 0xef
+#define RN5T618_CC_COUNT2 0xf0
+#define RN5T618_CC_COUNT1 0xf1
+#define RN5T618_CC_COUNT0 0xf2
+#define RN5T618_CC_SUMREG3 0xf3
+#define RN5T618_CC_SUMREG2 0xf4
+#define RN5T618_CC_SUMREG1 0xf5
+#define RN5T618_CC_SUMREG0 0xf6
+#define RN5T618_CC_OFFREG1 0xf7
+#define RN5T618_CC_OFFREG0 0xf8
+#define RN5T618_CC_GAINREG1 0xf9
+#define RN5T618_CC_GAINREG0 0xfa
+#define RN5T618_CC_AVEREG1 0xfb
+#define RN5T618_CC_AVEREG0 0xfc
+#define RN5T618_MAX_REG 0xfc
+
+#define RN5T618_REPCNT_REPWRON BIT(0)
+#define RN5T618_SLPCNT_SWPWROFF BIT(0)
+#define RN5T618_WATCHDOG_WDOGEN BIT(2)
+#define RN5T618_WATCHDOG_WDOGTIM_M (BIT(0) | BIT(1))
+#define RN5T618_WATCHDOG_WDOGTIM_S 0
+#define RN5T618_PWRIRQ_IR_WDOG BIT(6)
+
+enum {
+ RN5T618_DCDC1,
+ RN5T618_DCDC2,
+ RN5T618_DCDC3,
+ RN5T618_LDO1,
+ RN5T618_LDO2,
+ RN5T618_LDO3,
+ RN5T618_LDO4,
+ RN5T618_LDO5,
+ RN5T618_LDORTC1,
+ RN5T618_LDORTC2,
+ RN5T618_REG_NUM,
+};
+
+struct rn5t618 {
+ struct regmap *regmap;
+};
+
+#endif /* __LINUX_MFD_RN5T618_H */
diff --git a/include/linux/mfd/rtsx_pci.h b/include/linux/mfd/rtsx_pci.h
index 74346d5e7899..0c12628e91c6 100644
--- a/include/linux/mfd/rtsx_pci.h
+++ b/include/linux/mfd/rtsx_pci.h
@@ -558,6 +558,7 @@
#define SD_SAMPLE_POINT_CTL 0xFDA7
#define SD_PUSH_POINT_CTL 0xFDA8
#define SD_CMD0 0xFDA9
+#define SD_CMD_START 0x40
#define SD_CMD1 0xFDAA
#define SD_CMD2 0xFDAB
#define SD_CMD3 0xFDAC
@@ -707,6 +708,14 @@
#define PM_CTRL1 0xFF44
#define PM_CTRL2 0xFF45
#define PM_CTRL3 0xFF46
+#define SDIO_SEND_PME_EN 0x80
+#define FORCE_RC_MODE_ON 0x40
+#define FORCE_RX50_LINK_ON 0x20
+#define D3_DELINK_MODE_EN 0x10
+#define USE_PESRTB_CTL_DELINK 0x08
+#define DELAY_PIN_WAKE 0x04
+#define RESET_PIN_WAKE 0x02
+#define PM_WAKE_EN 0x01
#define PM_CTRL4 0xFF47
/* Memory mapping */
@@ -752,6 +761,14 @@
#define PHY_DUM_REG 0x1F
#define LCTLR 0x80
+#define LCTLR_EXT_SYNC 0x80
+#define LCTLR_COMMON_CLOCK_CFG 0x40
+#define LCTLR_RETRAIN_LINK 0x20
+#define LCTLR_LINK_DISABLE 0x10
+#define LCTLR_RCB 0x08
+#define LCTLR_RESERVED 0x04
+#define LCTLR_ASPM_CTL_MASK 0x03
+
#define PCR_SETTING_REG1 0x724
#define PCR_SETTING_REG2 0x814
#define PCR_SETTING_REG3 0x747
@@ -967,4 +984,24 @@ static inline u8 *rtsx_pci_get_cmd_data(struct rtsx_pcr *pcr)
return (u8 *)(pcr->host_cmds_ptr);
}
+static inline int rtsx_pci_update_cfg_byte(struct rtsx_pcr *pcr, int addr,
+ u8 mask, u8 append)
+{
+ int err;
+ u8 val;
+
+ err = pci_read_config_byte(pcr->pci, addr, &val);
+ if (err < 0)
+ return err;
+ return pci_write_config_byte(pcr->pci, addr, (val & mask) | append);
+}
+
+static inline void rtsx_pci_write_be32(struct rtsx_pcr *pcr, u16 reg, u32 val)
+{
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, reg, 0xFF, val >> 24);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, reg + 1, 0xFF, val >> 16);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, reg + 2, 0xFF, val >> 8);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, reg + 3, 0xFF, val);
+}
+
#endif
diff --git a/include/linux/mfd/samsung/core.h b/include/linux/mfd/samsung/core.h
index b5f73de81aad..3fdb7cfbffb3 100644
--- a/include/linux/mfd/samsung/core.h
+++ b/include/linux/mfd/samsung/core.h
@@ -14,12 +14,35 @@
#ifndef __LINUX_MFD_SEC_CORE_H
#define __LINUX_MFD_SEC_CORE_H
+/* Macros to represent minimum voltages for LDO/BUCK */
+#define MIN_3000_MV 3000000
+#define MIN_2500_MV 2500000
+#define MIN_2000_MV 2000000
+#define MIN_1800_MV 1800000
+#define MIN_1500_MV 1500000
+#define MIN_1400_MV 1400000
+#define MIN_1000_MV 1000000
+
+#define MIN_900_MV 900000
+#define MIN_850_MV 850000
+#define MIN_800_MV 800000
+#define MIN_750_MV 750000
+#define MIN_600_MV 600000
+#define MIN_500_MV 500000
+
+/* Macros to represent steps for LDO/BUCK */
+#define STEP_50_MV 50000
+#define STEP_25_MV 25000
+#define STEP_12_5_MV 12500
+#define STEP_6_25_MV 6250
+
enum sec_device_type {
S5M8751X,
S5M8763X,
S5M8767X,
S2MPA01,
S2MPS11X,
+ S2MPS13X,
S2MPS14X,
S2MPU02,
};
diff --git a/include/linux/mfd/samsung/s2mpa01.h b/include/linux/mfd/samsung/s2mpa01.h
index fbc63bc0d6a2..2766108bca2f 100644
--- a/include/linux/mfd/samsung/s2mpa01.h
+++ b/include/linux/mfd/samsung/s2mpa01.h
@@ -155,18 +155,6 @@ enum s2mpa01_regulators {
S2MPA01_REGULATOR_MAX,
};
-#define S2MPA01_BUCK_MIN1 600000
-#define S2MPA01_BUCK_MIN2 800000
-#define S2MPA01_BUCK_MIN3 1000000
-#define S2MPA01_BUCK_MIN4 1500000
-#define S2MPA01_LDO_MIN 800000
-
-#define S2MPA01_BUCK_STEP1 6250
-#define S2MPA01_BUCK_STEP2 12500
-
-#define S2MPA01_LDO_STEP1 50000
-#define S2MPA01_LDO_STEP2 25000
-
#define S2MPA01_LDO_VSEL_MASK 0x3F
#define S2MPA01_BUCK_VSEL_MASK 0xFF
#define S2MPA01_ENABLE_MASK (0x03 << S2MPA01_ENABLE_SHIFT)
diff --git a/include/linux/mfd/samsung/s2mps11.h b/include/linux/mfd/samsung/s2mps11.h
index b3ddf98dec37..7981a9d77d3f 100644
--- a/include/linux/mfd/samsung/s2mps11.h
+++ b/include/linux/mfd/samsung/s2mps11.h
@@ -171,15 +171,6 @@ enum s2mps11_regulators {
S2MPS11_REGULATOR_MAX,
};
-#define S2MPS11_BUCK_MIN1 600000
-#define S2MPS11_BUCK_MIN2 750000
-#define S2MPS11_BUCK_MIN3 3000000
-#define S2MPS11_LDO_MIN 800000
-#define S2MPS11_BUCK_STEP1 6250
-#define S2MPS11_BUCK_STEP2 12500
-#define S2MPS11_BUCK_STEP3 25000
-#define S2MPS11_LDO_STEP1 50000
-#define S2MPS11_LDO_STEP2 25000
#define S2MPS11_LDO_VSEL_MASK 0x3F
#define S2MPS11_BUCK_VSEL_MASK 0xFF
#define S2MPS11_ENABLE_MASK (0x03 << S2MPS11_ENABLE_SHIFT)
diff --git a/include/linux/mfd/samsung/s2mps13.h b/include/linux/mfd/samsung/s2mps13.h
new file mode 100644
index 000000000000..ce5dda8958fe
--- /dev/null
+++ b/include/linux/mfd/samsung/s2mps13.h
@@ -0,0 +1,186 @@
+/*
+ * s2mps13.h
+ *
+ * Copyright (c) 2014 Samsung Electronics Co., Ltd
+ * http://www.samsung.com
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __LINUX_MFD_S2MPS13_H
+#define __LINUX_MFD_S2MPS13_H
+
+/* S2MPS13 registers */
+enum s2mps13_reg {
+ S2MPS13_REG_ID,
+ S2MPS13_REG_INT1,
+ S2MPS13_REG_INT2,
+ S2MPS13_REG_INT3,
+ S2MPS13_REG_INT1M,
+ S2MPS13_REG_INT2M,
+ S2MPS13_REG_INT3M,
+ S2MPS13_REG_ST1,
+ S2MPS13_REG_ST2,
+ S2MPS13_REG_PWRONSRC,
+ S2MPS13_REG_OFFSRC,
+ S2MPS13_REG_BU_CHG,
+ S2MPS13_REG_RTCCTRL,
+ S2MPS13_REG_CTRL1,
+ S2MPS13_REG_CTRL2,
+ S2MPS13_REG_RSVD1,
+ S2MPS13_REG_RSVD2,
+ S2MPS13_REG_RSVD3,
+ S2MPS13_REG_RSVD4,
+ S2MPS13_REG_RSVD5,
+ S2MPS13_REG_RSVD6,
+ S2MPS13_REG_CTRL3,
+ S2MPS13_REG_RSVD7,
+ S2MPS13_REG_RSVD8,
+ S2MPS13_REG_WRSTBI,
+ S2MPS13_REG_B1CTRL,
+ S2MPS13_REG_B1OUT,
+ S2MPS13_REG_B2CTRL,
+ S2MPS13_REG_B2OUT,
+ S2MPS13_REG_B3CTRL,
+ S2MPS13_REG_B3OUT,
+ S2MPS13_REG_B4CTRL,
+ S2MPS13_REG_B4OUT,
+ S2MPS13_REG_B5CTRL,
+ S2MPS13_REG_B5OUT,
+ S2MPS13_REG_B6CTRL,
+ S2MPS13_REG_B6OUT,
+ S2MPS13_REG_B7CTRL,
+ S2MPS13_REG_B7OUT,
+ S2MPS13_REG_B8CTRL,
+ S2MPS13_REG_B8OUT,
+ S2MPS13_REG_B9CTRL,
+ S2MPS13_REG_B9OUT,
+ S2MPS13_REG_B10CTRL,
+ S2MPS13_REG_B10OUT,
+ S2MPS13_REG_BB1CTRL,
+ S2MPS13_REG_BB1OUT,
+ S2MPS13_REG_BUCK_RAMP1,
+ S2MPS13_REG_BUCK_RAMP2,
+ S2MPS13_REG_LDO_DVS1,
+ S2MPS13_REG_LDO_DVS2,
+ S2MPS13_REG_LDO_DVS3,
+ S2MPS13_REG_B6OUT2,
+ S2MPS13_REG_L1CTRL,
+ S2MPS13_REG_L2CTRL,
+ S2MPS13_REG_L3CTRL,
+ S2MPS13_REG_L4CTRL,
+ S2MPS13_REG_L5CTRL,
+ S2MPS13_REG_L6CTRL,
+ S2MPS13_REG_L7CTRL,
+ S2MPS13_REG_L8CTRL,
+ S2MPS13_REG_L9CTRL,
+ S2MPS13_REG_L10CTRL,
+ S2MPS13_REG_L11CTRL,
+ S2MPS13_REG_L12CTRL,
+ S2MPS13_REG_L13CTRL,
+ S2MPS13_REG_L14CTRL,
+ S2MPS13_REG_L15CTRL,
+ S2MPS13_REG_L16CTRL,
+ S2MPS13_REG_L17CTRL,
+ S2MPS13_REG_L18CTRL,
+ S2MPS13_REG_L19CTRL,
+ S2MPS13_REG_L20CTRL,
+ S2MPS13_REG_L21CTRL,
+ S2MPS13_REG_L22CTRL,
+ S2MPS13_REG_L23CTRL,
+ S2MPS13_REG_L24CTRL,
+ S2MPS13_REG_L25CTRL,
+ S2MPS13_REG_L26CTRL,
+ S2MPS13_REG_L27CTRL,
+ S2MPS13_REG_L28CTRL,
+ S2MPS13_REG_L30CTRL,
+ S2MPS13_REG_L31CTRL,
+ S2MPS13_REG_L32CTRL,
+ S2MPS13_REG_L33CTRL,
+ S2MPS13_REG_L34CTRL,
+ S2MPS13_REG_L35CTRL,
+ S2MPS13_REG_L36CTRL,
+ S2MPS13_REG_L37CTRL,
+ S2MPS13_REG_L38CTRL,
+ S2MPS13_REG_L39CTRL,
+ S2MPS13_REG_L40CTRL,
+ S2MPS13_REG_LDODSCH1,
+ S2MPS13_REG_LDODSCH2,
+ S2MPS13_REG_LDODSCH3,
+ S2MPS13_REG_LDODSCH4,
+ S2MPS13_REG_LDODSCH5,
+};
+
+/* regulator ids */
+enum s2mps13_regulators {
+ S2MPS13_LDO1,
+ S2MPS13_LDO2,
+ S2MPS13_LDO3,
+ S2MPS13_LDO4,
+ S2MPS13_LDO5,
+ S2MPS13_LDO6,
+ S2MPS13_LDO7,
+ S2MPS13_LDO8,
+ S2MPS13_LDO9,
+ S2MPS13_LDO10,
+ S2MPS13_LDO11,
+ S2MPS13_LDO12,
+ S2MPS13_LDO13,
+ S2MPS13_LDO14,
+ S2MPS13_LDO15,
+ S2MPS13_LDO16,
+ S2MPS13_LDO17,
+ S2MPS13_LDO18,
+ S2MPS13_LDO19,
+ S2MPS13_LDO20,
+ S2MPS13_LDO21,
+ S2MPS13_LDO22,
+ S2MPS13_LDO23,
+ S2MPS13_LDO24,
+ S2MPS13_LDO25,
+ S2MPS13_LDO26,
+ S2MPS13_LDO27,
+ S2MPS13_LDO28,
+ S2MPS13_LDO29,
+ S2MPS13_LDO30,
+ S2MPS13_LDO31,
+ S2MPS13_LDO32,
+ S2MPS13_LDO33,
+ S2MPS13_LDO34,
+ S2MPS13_LDO35,
+ S2MPS13_LDO36,
+ S2MPS13_LDO37,
+ S2MPS13_LDO38,
+ S2MPS13_LDO39,
+ S2MPS13_LDO40,
+ S2MPS13_BUCK1,
+ S2MPS13_BUCK2,
+ S2MPS13_BUCK3,
+ S2MPS13_BUCK4,
+ S2MPS13_BUCK5,
+ S2MPS13_BUCK6,
+ S2MPS13_BUCK7,
+ S2MPS13_BUCK8,
+ S2MPS13_BUCK9,
+ S2MPS13_BUCK10,
+
+ S2MPS13_REGULATOR_MAX,
+};
+
+/*
+ * Default ramp delay in uv/us. Datasheet says that ramp delay can be
+ * controlled however it does not specify which register is used for that.
+ * Let's assume that default value will be set.
+ */
+#define S2MPS13_BUCK_RAMP_DELAY 12500
+
+#endif /* __LINUX_MFD_S2MPS13_H */
diff --git a/include/linux/mfd/samsung/s2mps14.h b/include/linux/mfd/samsung/s2mps14.h
index 900cd7a04314..c92f4782afb5 100644
--- a/include/linux/mfd/samsung/s2mps14.h
+++ b/include/linux/mfd/samsung/s2mps14.h
@@ -123,10 +123,6 @@ enum s2mps14_regulators {
};
/* Regulator constraints for BUCKx */
-#define S2MPS14_BUCK1235_MIN_600MV 600000
-#define S2MPS14_BUCK4_MIN_1400MV 1400000
-#define S2MPS14_BUCK1235_STEP_6_25MV 6250
-#define S2MPS14_BUCK4_STEP_12_5MV 12500
#define S2MPS14_BUCK1235_START_SEL 0x20
#define S2MPS14_BUCK4_START_SEL 0x40
/*
@@ -136,12 +132,6 @@ enum s2mps14_regulators {
*/
#define S2MPS14_BUCK_RAMP_DELAY 12500
-/* Regulator constraints for different types of LDOx */
-#define S2MPS14_LDO_MIN_800MV 800000
-#define S2MPS14_LDO_MIN_1800MV 1800000
-#define S2MPS14_LDO_STEP_12_5MV 12500
-#define S2MPS14_LDO_STEP_25MV 25000
-
#define S2MPS14_LDO_VSEL_MASK 0x3F
#define S2MPS14_BUCK_VSEL_MASK 0xFF
#define S2MPS14_ENABLE_MASK (0x03 << S2MPS14_ENABLE_SHIFT)
diff --git a/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h b/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h
index ff44374a1a4e..c877cad61a13 100644
--- a/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h
+++ b/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h
@@ -395,4 +395,43 @@
#define IMX6SL_GPR1_FEC_CLOCK_MUX1_SEL_MASK (0x3 << 17)
#define IMX6SL_GPR1_FEC_CLOCK_MUX2_SEL_MASK (0x1 << 14)
+/* For imx6sx iomux gpr register field define */
+#define IMX6SX_GPR1_VDEC_SW_RST_MASK (0x1 << 20)
+#define IMX6SX_GPR1_VDEC_SW_RST_RESET (0x1 << 20)
+#define IMX6SX_GPR1_VDEC_SW_RST_RELEASE (0x0 << 20)
+#define IMX6SX_GPR1_VADC_SW_RST_MASK (0x1 << 19)
+#define IMX6SX_GPR1_VADC_SW_RST_RESET (0x1 << 19)
+#define IMX6SX_GPR1_VADC_SW_RST_RELEASE (0x0 << 19)
+#define IMX6SX_GPR1_FEC_CLOCK_MUX_SEL_MASK (0x3 << 13)
+#define IMX6SX_GPR1_FEC_CLOCK_PAD_DIR_MASK (0x3 << 17)
+#define IMX6SX_GPR1_FEC_CLOCK_MUX_SEL_EXT (0x3 << 13)
+
+#define IMX6SX_GPR4_FEC_ENET1_STOP_REQ (0x1 << 3)
+#define IMX6SX_GPR4_FEC_ENET2_STOP_REQ (0x1 << 4)
+
+#define IMX6SX_GPR5_DISP_MUX_LDB_CTRL_MASK (0x1 << 3)
+#define IMX6SX_GPR5_DISP_MUX_LDB_CTRL_LCDIF1 (0x0 << 3)
+#define IMX6SX_GPR5_DISP_MUX_LDB_CTRL_LCDIF2 (0x1 << 3)
+
+#define IMX6SX_GPR5_CSI2_MUX_CTRL_MASK (0x3 << 27)
+#define IMX6SX_GPR5_CSI2_MUX_CTRL_EXT_PIN (0x0 << 27)
+#define IMX6SX_GPR5_CSI2_MUX_CTRL_CVD (0x1 << 27)
+#define IMX6SX_GPR5_CSI2_MUX_CTRL_VDAC_TO_CSI (0x2 << 27)
+#define IMX6SX_GPR5_CSI2_MUX_CTRL_GND (0x3 << 27)
+#define IMX6SX_GPR5_VADC_TO_CSI_CAPTURE_EN_MASK (0x1 << 26)
+#define IMX6SX_GPR5_VADC_TO_CSI_CAPTURE_EN_ENABLE (0x1 << 26)
+#define IMX6SX_GPR5_VADC_TO_CSI_CAPTURE_EN_DISABLE (0x0 << 26)
+#define IMX6SX_GPR5_CSI1_MUX_CTRL_MASK (0x3 << 4)
+#define IMX6SX_GPR5_CSI1_MUX_CTRL_EXT_PIN (0x0 << 4)
+#define IMX6SX_GPR5_CSI1_MUX_CTRL_CVD (0x1 << 4)
+#define IMX6SX_GPR5_CSI1_MUX_CTRL_VDAC_TO_CSI (0x2 << 4)
+#define IMX6SX_GPR5_CSI1_MUX_CTRL_GND (0x3 << 4)
+
+#define IMX6SX_GPR5_DISP_MUX_DCIC2_LCDIF2 (0x0 << 2)
+#define IMX6SX_GPR5_DISP_MUX_DCIC2_LVDS (0x1 << 2)
+#define IMX6SX_GPR5_DISP_MUX_DCIC2_MASK (0x1 << 2)
+#define IMX6SX_GPR5_DISP_MUX_DCIC1_LCDIF1 (0x0 << 1)
+#define IMX6SX_GPR5_DISP_MUX_DCIC1_LVDS (0x1 << 1)
+#define IMX6SX_GPR5_DISP_MUX_DCIC1_MASK (0x1 << 1)
+
#endif /* __LINUX_IMX6Q_IOMUXC_GPR_H */
diff --git a/include/linux/mfd/tc3589x.h b/include/linux/mfd/tc3589x.h
index e6088c2e2092..e1c12d84c26a 100644
--- a/include/linux/mfd/tc3589x.h
+++ b/include/linux/mfd/tc3589x.h
@@ -164,13 +164,10 @@ struct tc3589x_keypad_platform_data {
/**
* struct tc3589x_gpio_platform_data - TC3589x GPIO platform data
- * @gpio_base: first gpio number assigned to TC3589x. A maximum of
- * %TC3589x_NR_GPIOS GPIOs will be allocated.
* @setup: callback for board-specific initialization
* @remove: callback for board-specific teardown
*/
struct tc3589x_gpio_platform_data {
- int gpio_base;
void (*setup)(struct tc3589x *tc3589x, unsigned gpio_base);
void (*remove)(struct tc3589x *tc3589x, unsigned gpio_base);
};
@@ -178,18 +175,13 @@ struct tc3589x_gpio_platform_data {
/**
* struct tc3589x_platform_data - TC3589x platform data
* @block: bitmask of blocks to enable (use TC3589x_BLOCK_*)
- * @irq_base: base IRQ number. %TC3589x_NR_IRQS irqs will be used.
* @gpio: GPIO-specific platform data
* @keypad: keypad-specific platform data
*/
struct tc3589x_platform_data {
unsigned int block;
- int irq_base;
struct tc3589x_gpio_platform_data *gpio;
const struct tc3589x_keypad_platform_data *keypad;
};
-#define TC3589x_NR_GPIOS 24
-#define TC3589x_NR_IRQS TC3589x_INT_GPIO(TC3589x_NR_GPIOS)
-
#endif
diff --git a/include/linux/mfd/ti_am335x_tscadc.h b/include/linux/mfd/ti_am335x_tscadc.h
index fb96c84dada5..e2e70053470e 100644
--- a/include/linux/mfd/ti_am335x_tscadc.h
+++ b/include/linux/mfd/ti_am335x_tscadc.h
@@ -155,6 +155,7 @@ struct ti_tscadc_dev {
void __iomem *tscadc_base;
int irq;
int used_cells; /* 1-2 */
+ int tsc_wires;
int tsc_cell; /* -1 if not used */
int adc_cell; /* -1 if not used */
struct mfd_cell cells[TSCADC_CELLS];
diff --git a/include/linux/mfd/ti_ssp.h b/include/linux/mfd/ti_ssp.h
deleted file mode 100644
index dbb4b43bd20e..000000000000
--- a/include/linux/mfd/ti_ssp.h
+++ /dev/null
@@ -1,93 +0,0 @@
-/*
- * Sequencer Serial Port (SSP) driver for Texas Instruments' SoCs
- *
- * Copyright (C) 2010 Texas Instruments Inc
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#ifndef __TI_SSP_H__
-#define __TI_SSP_H__
-
-struct ti_ssp_dev_data {
- const char *dev_name;
- void *pdata;
- size_t pdata_size;
-};
-
-struct ti_ssp_data {
- unsigned long out_clock;
- struct ti_ssp_dev_data dev_data[2];
-};
-
-struct ti_ssp_spi_data {
- unsigned long iosel;
- int num_cs;
- void (*select)(int cs);
-};
-
-/*
- * Sequencer port IO pin configuration bits. These do not correlate 1-1 with
- * the hardware. The iosel field in the port data combines iosel1 and iosel2,
- * and is therefore not a direct map to register space. It is best to use the
- * macros below to construct iosel values.
- *
- * least significant 16 bits --> iosel1
- * most significant 16 bits --> iosel2
- */
-
-#define SSP_IN 0x0000
-#define SSP_DATA 0x0001
-#define SSP_CLOCK 0x0002
-#define SSP_CHIPSEL 0x0003
-#define SSP_OUT 0x0004
-#define SSP_PIN_SEL(pin, v) ((v) << ((pin) * 3))
-#define SSP_PIN_MASK(pin) SSP_PIN_SEL(pin, 0x7)
-#define SSP_INPUT_SEL(pin) ((pin) << 16)
-
-/* Sequencer port config bits */
-#define SSP_EARLY_DIN BIT(8)
-#define SSP_DELAY_DOUT BIT(9)
-
-/* Sequence map definitions */
-#define SSP_CLK_HIGH BIT(0)
-#define SSP_CLK_LOW 0
-#define SSP_DATA_HIGH BIT(1)
-#define SSP_DATA_LOW 0
-#define SSP_CS_HIGH BIT(2)
-#define SSP_CS_LOW 0
-#define SSP_OUT_MODE BIT(3)
-#define SSP_IN_MODE 0
-#define SSP_DATA_REG BIT(4)
-#define SSP_ADDR_REG 0
-
-#define SSP_OPCODE_DIRECT ((0x0) << 5)
-#define SSP_OPCODE_TOGGLE ((0x1) << 5)
-#define SSP_OPCODE_SHIFT ((0x2) << 5)
-#define SSP_OPCODE_BRANCH0 ((0x4) << 5)
-#define SSP_OPCODE_BRANCH1 ((0x5) << 5)
-#define SSP_OPCODE_BRANCH ((0x6) << 5)
-#define SSP_OPCODE_STOP ((0x7) << 5)
-#define SSP_BRANCH(addr) ((addr) << 8)
-#define SSP_COUNT(cycles) ((cycles) << 8)
-
-int ti_ssp_raw_read(struct device *dev);
-int ti_ssp_raw_write(struct device *dev, u32 val);
-int ti_ssp_load(struct device *dev, int offs, u32* prog, int len);
-int ti_ssp_run(struct device *dev, u32 pc, u32 input, u32 *output);
-int ti_ssp_set_mode(struct device *dev, int mode);
-int ti_ssp_set_iosel(struct device *dev, u32 iosel);
-
-#endif /* __TI_SSP_H__ */
diff --git a/include/linux/mfd/tmio.h b/include/linux/mfd/tmio.h
index 8f6f2e91e7ae..57388171610d 100644
--- a/include/linux/mfd/tmio.h
+++ b/include/linux/mfd/tmio.h
@@ -5,6 +5,7 @@
#include <linux/fb.h>
#include <linux/io.h>
#include <linux/jiffies.h>
+#include <linux/mmc/card.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
@@ -83,6 +84,27 @@
*/
#define TMIO_MMC_HAVE_HIGH_REG (1 << 6)
+/*
+ * Some controllers have CMD12 automatically
+ * issue/non-issue register
+ */
+#define TMIO_MMC_HAVE_CMD12_CTRL (1 << 7)
+
+/*
+ * Some controllers needs to set 1 on SDIO status reserved bits
+ */
+#define TMIO_MMC_SDIO_STATUS_QUIRK (1 << 8)
+
+/*
+ * Some controllers have DMA enable/disable register
+ */
+#define TMIO_MMC_HAVE_CTL_DMA_REG (1 << 9)
+
+/*
+ * Some controllers allows to set SDx actual clock
+ */
+#define TMIO_MMC_CLK_ACTUAL (1 << 10)
+
int tmio_core_mmc_enable(void __iomem *cnf, int shift, unsigned long base);
int tmio_core_mmc_resume(void __iomem *cnf, int shift, unsigned long base);
void tmio_core_mmc_pwr(void __iomem *cnf, int shift, int state);
@@ -96,6 +118,7 @@ struct tmio_mmc_dma {
int slave_id_tx;
int slave_id_rx;
int alignment_shift;
+ dma_addr_t dma_rx_offset;
bool (*filter)(struct dma_chan *chan, void *arg);
};
@@ -120,6 +143,8 @@ struct tmio_mmc_data {
/* clock management callbacks */
int (*clk_enable)(struct platform_device *pdev, unsigned int *f);
void (*clk_disable)(struct platform_device *pdev);
+ int (*multi_io_quirk)(struct mmc_card *card,
+ unsigned int direction, int blk_size);
};
/*
diff --git a/include/linux/mfd/tps65217.h b/include/linux/mfd/tps65217.h
index 95d6938737fd..ac7fba44d7e4 100644
--- a/include/linux/mfd/tps65217.h
+++ b/include/linux/mfd/tps65217.h
@@ -60,6 +60,8 @@
#define TPS65217_REG_SEQ5 0X1D
#define TPS65217_REG_SEQ6 0X1E
+#define TPS65217_REG_MAX TPS65217_REG_SEQ6
+
/* Register field definitions */
#define TPS65217_CHIPID_CHIP_MASK 0xF0
#define TPS65217_CHIPID_REV_MASK 0x0F
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index a2901c414664..fab9b32ace8e 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -13,18 +13,9 @@ typedef void free_page_t(struct page *page, unsigned long private);
* Return values from addresss_space_operations.migratepage():
* - negative errno on page migration failure;
* - zero on page migration success;
- *
- * The balloon page migration introduces this special case where a 'distinct'
- * return code is used to flag a successful page migration to unmap_and_move().
- * This approach is necessary because page migration can race against balloon
- * deflation procedure, and for such case we could introduce a nasty page leak
- * if a successfully migrated balloon page gets released concurrently with
- * migration's unmap_and_move() wrap-up steps.
*/
#define MIGRATEPAGE_SUCCESS 0
-#define MIGRATEPAGE_BALLOON_SUCCESS 1 /* special ret code for balloon page
- * sucessful migration case.
- */
+
enum migrate_reason {
MR_COMPACTION,
MR_MEMORY_FAILURE,
@@ -45,9 +36,6 @@ extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free,
extern int migrate_prep(void);
extern int migrate_prep_local(void);
-extern int migrate_vmas(struct mm_struct *mm,
- const nodemask_t *from, const nodemask_t *to,
- unsigned long flags);
extern void migrate_page_copy(struct page *newpage, struct page *page);
extern int migrate_huge_page_move_mapping(struct address_space *mapping,
struct page *newpage, struct page *page);
@@ -66,13 +54,6 @@ static inline int migrate_pages(struct list_head *l, new_page_t new,
static inline int migrate_prep(void) { return -ENOSYS; }
static inline int migrate_prep_local(void) { return -ENOSYS; }
-static inline int migrate_vmas(struct mm_struct *mm,
- const nodemask_t *from, const nodemask_t *to,
- unsigned long flags)
-{
- return -ENOSYS;
-}
-
static inline void migrate_page_copy(struct page *newpage,
struct page *page) {}
@@ -82,9 +63,6 @@ static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
return -ENOSYS;
}
-/* Possible settings for the migrate_page() method in address_operations */
-#define migrate_page NULL
-
#endif /* CONFIG_MIGRATION */
#ifdef CONFIG_NUMA_BALANCING
diff --git a/include/linux/mlx4/cmd.h b/include/linux/mlx4/cmd.h
index 379c02648ab3..64d25941b329 100644
--- a/include/linux/mlx4/cmd.h
+++ b/include/linux/mlx4/cmd.h
@@ -67,6 +67,8 @@ enum {
MLX4_CMD_MAP_ICM_AUX = 0xffc,
MLX4_CMD_UNMAP_ICM_AUX = 0xffb,
MLX4_CMD_SET_ICM_SIZE = 0xffd,
+ MLX4_CMD_ACCESS_REG = 0x3b,
+
/*master notify fw on finish for slave's flr*/
MLX4_CMD_INFORM_FLR_DONE = 0x5b,
MLX4_CMD_GET_OP_REQ = 0x59,
@@ -197,6 +199,33 @@ enum {
MLX4_CMD_NATIVE
};
+/*
+ * MLX4_RX_CSUM_MODE_VAL_NON_TCP_UDP -
+ * Receive checksum value is reported in CQE also for non TCP/UDP packets.
+ *
+ * MLX4_RX_CSUM_MODE_L4 -
+ * L4_CSUM bit in CQE, which indicates whether or not L4 checksum
+ * was validated correctly, is supported.
+ *
+ * MLX4_RX_CSUM_MODE_IP_OK_IP_NON_TCP_UDP -
+ * IP_OK CQE's field is supported also for non TCP/UDP IP packets.
+ *
+ * MLX4_RX_CSUM_MODE_MULTI_VLAN -
+ * Receive Checksum offload is supported for packets with more than 2 vlan headers.
+ */
+enum mlx4_rx_csum_mode {
+ MLX4_RX_CSUM_MODE_VAL_NON_TCP_UDP = 1UL << 0,
+ MLX4_RX_CSUM_MODE_L4 = 1UL << 1,
+ MLX4_RX_CSUM_MODE_IP_OK_IP_NON_TCP_UDP = 1UL << 2,
+ MLX4_RX_CSUM_MODE_MULTI_VLAN = 1UL << 3
+};
+
+struct mlx4_config_dev_params {
+ u16 vxlan_udp_dport;
+ u8 rx_csum_flags_port_1;
+ u8 rx_csum_flags_port_2;
+};
+
struct mlx4_dev;
struct mlx4_cmd_mailbox {
@@ -248,6 +277,8 @@ int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos);
int mlx4_set_vf_spoofchk(struct mlx4_dev *dev, int port, int vf, bool setting);
int mlx4_get_vf_config(struct mlx4_dev *dev, int port, int vf, struct ifla_vf_info *ivf);
int mlx4_set_vf_link_state(struct mlx4_dev *dev, int port, int vf, int link_state);
+int mlx4_config_dev_retrieval(struct mlx4_dev *dev,
+ struct mlx4_config_dev_params *params);
/*
* mlx4_get_slave_default_vlan -
* return true if VST ( default vlan)
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index a5b7d7cfcedf..25c791e295fd 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -38,6 +38,7 @@
#include <linux/completion.h>
#include <linux/radix-tree.h>
#include <linux/cpu_rmap.h>
+#include <linux/crash_dump.h>
#include <linux/atomic.h>
@@ -94,7 +95,7 @@ enum {
enum {
MLX4_MAX_NUM_PF = 16,
- MLX4_MAX_NUM_VF = 64,
+ MLX4_MAX_NUM_VF = 126,
MLX4_MAX_NUM_VF_P_PORT = 64,
MLX4_MFUNC_MAX = 80,
MLX4_MAX_EQ_NUM = 1024,
@@ -116,6 +117,14 @@ enum {
MLX4_STEERING_MODE_DEVICE_MANAGED
};
+enum {
+ MLX4_STEERING_DMFS_A0_DEFAULT,
+ MLX4_STEERING_DMFS_A0_DYNAMIC,
+ MLX4_STEERING_DMFS_A0_STATIC,
+ MLX4_STEERING_DMFS_A0_DISABLE,
+ MLX4_STEERING_DMFS_A0_NOT_SUPPORTED
+};
+
static inline const char *mlx4_steering_mode_str(int steering_mode)
{
switch (steering_mode) {
@@ -184,19 +193,49 @@ enum {
MLX4_DEV_CAP_FLAG2_DMFS_IPOIB = 1LL << 9,
MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS = 1LL << 10,
MLX4_DEV_CAP_FLAG2_MAD_DEMUX = 1LL << 11,
+ MLX4_DEV_CAP_FLAG2_CQE_STRIDE = 1LL << 12,
+ MLX4_DEV_CAP_FLAG2_EQE_STRIDE = 1LL << 13,
+ MLX4_DEV_CAP_FLAG2_ETH_PROT_CTRL = 1LL << 14,
+ MLX4_DEV_CAP_FLAG2_ETH_BACKPL_AN_REP = 1LL << 15,
+ MLX4_DEV_CAP_FLAG2_CONFIG_DEV = 1LL << 16,
+ MLX4_DEV_CAP_FLAG2_SYS_EQS = 1LL << 17,
+ MLX4_DEV_CAP_FLAG2_80_VFS = 1LL << 18,
+ MLX4_DEV_CAP_FLAG2_FS_A0 = 1LL << 19
+};
+
+enum {
+ MLX4_QUERY_FUNC_FLAGS_BF_RES_QP = 1LL << 0,
+ MLX4_QUERY_FUNC_FLAGS_A0_RES_QP = 1LL << 1
+};
+
+/* bit enums for an 8-bit flags field indicating special use
+ * QPs which require special handling in qp_reserve_range.
+ * Currently, this only includes QPs used by the ETH interface,
+ * where we expect to use blueflame. These QPs must not have
+ * bits 6 and 7 set in their qp number.
+ *
+ * This enum may use only bits 0..7.
+ */
+enum {
+ MLX4_RESERVE_A0_QP = 1 << 6,
+ MLX4_RESERVE_ETH_BF_QP = 1 << 7,
};
enum {
MLX4_DEV_CAP_64B_EQE_ENABLED = 1LL << 0,
- MLX4_DEV_CAP_64B_CQE_ENABLED = 1LL << 1
+ MLX4_DEV_CAP_64B_CQE_ENABLED = 1LL << 1,
+ MLX4_DEV_CAP_CQE_STRIDE_ENABLED = 1LL << 2,
+ MLX4_DEV_CAP_EQE_STRIDE_ENABLED = 1LL << 3
};
enum {
- MLX4_USER_DEV_CAP_64B_CQE = 1L << 0
+ MLX4_USER_DEV_CAP_LARGE_CQE = 1L << 0
};
enum {
- MLX4_FUNC_CAP_64B_EQE_CQE = 1L << 0
+ MLX4_FUNC_CAP_64B_EQE_CQE = 1L << 0,
+ MLX4_FUNC_CAP_EQE_CQE_STRIDE = 1L << 1,
+ MLX4_FUNC_CAP_DMFS_A0_STATIC = 1L << 2
};
@@ -322,6 +361,8 @@ enum {
enum mlx4_qp_region {
MLX4_QP_REGION_FW = 0,
+ MLX4_QP_REGION_RSS_RAW_ETH,
+ MLX4_QP_REGION_BOTTOM = MLX4_QP_REGION_RSS_RAW_ETH,
MLX4_QP_REGION_ETH_ADDR,
MLX4_QP_REGION_FC_ADDR,
MLX4_QP_REGION_FC_EXCH,
@@ -373,6 +414,13 @@ enum {
#define MSTR_SM_CHANGE_MASK (MLX4_EQ_PORT_INFO_MSTR_SM_SL_CHANGE_MASK | \
MLX4_EQ_PORT_INFO_MSTR_SM_LID_CHANGE_MASK)
+enum mlx4_module_id {
+ MLX4_MODULE_ID_SFP = 0x3,
+ MLX4_MODULE_ID_QSFP = 0xC,
+ MLX4_MODULE_ID_QSFP_PLUS = 0xD,
+ MLX4_MODULE_ID_QSFP28 = 0x11,
+};
+
static inline u64 mlx4_fw_ver(u64 major, u64 minor, u64 subminor)
{
return (major << 32) | (minor << 16) | subminor;
@@ -427,6 +475,7 @@ struct mlx4_caps {
int num_cqs;
int max_cqes;
int reserved_cqs;
+ int num_sys_eqs;
int num_eqs;
int reserved_eqs;
int num_comp_vectors;
@@ -443,6 +492,7 @@ struct mlx4_caps {
int reserved_mcgs;
int num_qp_per_mgm;
int steering_mode;
+ int dmfs_high_steer_mode;
int fs_log_max_ucast_qp_range_size;
int num_pds;
int reserved_pds;
@@ -481,6 +531,10 @@ struct mlx4_caps {
u16 hca_core_clock;
u64 phys_port_id[MLX4_MAX_PORTS + 1];
int tunnel_offload_mode;
+ u8 rx_checksum_flags_port[MLX4_MAX_PORTS + 1];
+ u8 alloc_res_qp_mask;
+ u32 dmfs_high_rate_qpn_base;
+ u32 dmfs_high_rate_qpn_range;
};
struct mlx4_buf_list {
@@ -577,7 +631,7 @@ struct mlx4_uar {
};
struct mlx4_bf {
- unsigned long offset;
+ unsigned int offset;
int buf_size;
struct mlx4_uar *uar;
void __iomem *reg;
@@ -601,6 +655,11 @@ struct mlx4_cq {
atomic_t refcount;
struct completion free;
+ struct {
+ struct list_head list;
+ void (*comp)(struct mlx4_cq *);
+ void *priv;
+ } tasklet_ctx;
};
struct mlx4_qp {
@@ -701,6 +760,7 @@ struct mlx4_dev {
u64 regid_promisc_array[MLX4_MAX_PORTS + 1];
u64 regid_allmulti_array[MLX4_MAX_PORTS + 1];
struct mlx4_vf_dev *dev_vfs;
+ int nvfs[MLX4_MAX_PORTS + 1];
};
struct mlx4_eqe {
@@ -792,6 +852,26 @@ struct mlx4_init_port_param {
u64 si_guid;
};
+#define MAD_IFC_DATA_SZ 192
+/* MAD IFC Mailbox */
+struct mlx4_mad_ifc {
+ u8 base_version;
+ u8 mgmt_class;
+ u8 class_version;
+ u8 method;
+ __be16 status;
+ __be16 class_specific;
+ __be64 tid;
+ __be16 attr_id;
+ __be16 resv;
+ __be32 attr_mod;
+ __be64 mkey;
+ __be16 dr_slid;
+ __be16 dr_dlid;
+ u8 reserved[28];
+ u8 data[MAD_IFC_DATA_SZ];
+} __packed;
+
#define mlx4_foreach_port(port, dev, type) \
for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++) \
if ((type) == (dev)->caps.port_mask[(port)])
@@ -828,7 +908,9 @@ static inline int mlx4_num_reserved_sqps(struct mlx4_dev *dev)
static inline int mlx4_is_qp_reserved(struct mlx4_dev *dev, u32 qpn)
{
return (qpn < dev->phys_caps.base_sqpn + 8 +
- 16 * MLX4_MFUNC_MAX * !!mlx4_is_master(dev));
+ 16 * MLX4_MFUNC_MAX * !!mlx4_is_master(dev) &&
+ qpn >= dev->phys_caps.base_sqpn) ||
+ (qpn < dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW]);
}
static inline int mlx4_is_guest_proxy(struct mlx4_dev *dev, int slave, u32 qpn)
@@ -904,8 +986,8 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt,
struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq,
unsigned vector, int collapsed, int timestamp_en);
void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq);
-
-int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int *base);
+int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align,
+ int *base, u8 flags);
void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt);
int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp,
@@ -1276,10 +1358,50 @@ int mlx4_mr_rereg_mem_write(struct mlx4_dev *dev, struct mlx4_mr *mr,
u64 iova, u64 size, int npages,
int page_shift, struct mlx4_mpt_entry *mpt_entry);
+int mlx4_get_module_info(struct mlx4_dev *dev, u8 port,
+ u16 offset, u16 size, u8 *data);
+
/* Returns true if running in low memory profile (kdump kernel) */
static inline bool mlx4_low_memory_profile(void)
{
- return reset_devices;
+ return is_kdump_kernel();
}
+/* ACCESS REG commands */
+enum mlx4_access_reg_method {
+ MLX4_ACCESS_REG_QUERY = 0x1,
+ MLX4_ACCESS_REG_WRITE = 0x2,
+};
+
+/* ACCESS PTYS Reg command */
+enum mlx4_ptys_proto {
+ MLX4_PTYS_IB = 1<<0,
+ MLX4_PTYS_EN = 1<<2,
+};
+
+struct mlx4_ptys_reg {
+ u8 resrvd1;
+ u8 local_port;
+ u8 resrvd2;
+ u8 proto_mask;
+ __be32 resrvd3[2];
+ __be32 eth_proto_cap;
+ __be16 ib_width_cap;
+ __be16 ib_speed_cap;
+ __be32 resrvd4;
+ __be32 eth_proto_admin;
+ __be16 ib_width_admin;
+ __be16 ib_speed_admin;
+ __be32 resrvd5;
+ __be32 eth_proto_oper;
+ __be16 ib_width_oper;
+ __be16 ib_speed_oper;
+ __be32 resrvd6;
+ __be32 eth_proto_lp_adv;
+} __packed;
+
+int mlx4_ACCESS_PTYS_REG(struct mlx4_dev *dev,
+ enum mlx4_access_reg_method method,
+ struct mlx4_ptys_reg *ptys_reg);
+
#endif /* MLX4_DEVICE_H */
diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h
index 5f4e36cf0091..467ccdf94c98 100644
--- a/include/linux/mlx4/qp.h
+++ b/include/linux/mlx4/qp.h
@@ -120,13 +120,15 @@ enum {
MLX4_RSS_QPC_FLAG_OFFSET = 13,
};
+#define MLX4_EN_RSS_KEY_SIZE 40
+
struct mlx4_rss_context {
__be32 base_qpn;
__be32 default_qpn;
u16 reserved;
u8 hash_fn;
u8 flags;
- __be32 rss_key[10];
+ __be32 rss_key[MLX4_EN_RSS_KEY_SIZE / sizeof(__be32)];
__be32 base_qpn_udp;
};
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index 334947151dfc..4e5bd813bb9a 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -44,6 +44,50 @@
#error Host endianness not defined
#endif
+/* helper macros */
+#define __mlx5_nullp(typ) ((struct mlx5_ifc_##typ##_bits *)0)
+#define __mlx5_bit_sz(typ, fld) sizeof(__mlx5_nullp(typ)->fld)
+#define __mlx5_bit_off(typ, fld) ((unsigned)(unsigned long)(&(__mlx5_nullp(typ)->fld)))
+#define __mlx5_dw_off(typ, fld) (__mlx5_bit_off(typ, fld) / 32)
+#define __mlx5_64_off(typ, fld) (__mlx5_bit_off(typ, fld) / 64)
+#define __mlx5_dw_bit_off(typ, fld) (32 - __mlx5_bit_sz(typ, fld) - (__mlx5_bit_off(typ, fld) & 0x1f))
+#define __mlx5_mask(typ, fld) ((u32)((1ull << __mlx5_bit_sz(typ, fld)) - 1))
+#define __mlx5_dw_mask(typ, fld) (__mlx5_mask(typ, fld) << __mlx5_dw_bit_off(typ, fld))
+#define __mlx5_st_sz_bits(typ) sizeof(struct mlx5_ifc_##typ##_bits)
+
+#define MLX5_FLD_SZ_BYTES(typ, fld) (__mlx5_bit_sz(typ, fld) / 8)
+#define MLX5_ST_SZ_BYTES(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 8)
+#define MLX5_ST_SZ_DW(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 32)
+#define MLX5_BYTE_OFF(typ, fld) (__mlx5_bit_off(typ, fld) / 8)
+#define MLX5_ADDR_OF(typ, p, fld) ((char *)(p) + MLX5_BYTE_OFF(typ, fld))
+
+/* insert a value to a struct */
+#define MLX5_SET(typ, p, fld, v) do { \
+ BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 32); \
+ *((__be32 *)(p) + __mlx5_dw_off(typ, fld)) = \
+ cpu_to_be32((be32_to_cpu(*((__be32 *)(p) + __mlx5_dw_off(typ, fld))) & \
+ (~__mlx5_dw_mask(typ, fld))) | (((v) & __mlx5_mask(typ, fld)) \
+ << __mlx5_dw_bit_off(typ, fld))); \
+} while (0)
+
+#define MLX5_GET(typ, p, fld) ((be32_to_cpu(*((__be32 *)(p) +\
+__mlx5_dw_off(typ, fld))) >> __mlx5_dw_bit_off(typ, fld)) & \
+__mlx5_mask(typ, fld))
+
+#define MLX5_GET_PR(typ, p, fld) ({ \
+ u32 ___t = MLX5_GET(typ, p, fld); \
+ pr_debug(#fld " = 0x%x\n", ___t); \
+ ___t; \
+})
+
+#define MLX5_SET64(typ, p, fld, v) do { \
+ BUILD_BUG_ON(__mlx5_bit_sz(typ, fld) != 64); \
+ BUILD_BUG_ON(__mlx5_bit_off(typ, fld) % 64); \
+ *((__be64 *)(p) + __mlx5_64_off(typ, fld)) = cpu_to_be64(v); \
+} while (0)
+
+#define MLX5_GET64(typ, p, fld) be64_to_cpu(*((__be64 *)(p) + __mlx5_64_off(typ, fld)))
+
enum {
MLX5_MAX_COMMANDS = 32,
MLX5_CMD_DATA_BLOCK_SIZE = 512,
@@ -71,6 +115,20 @@ enum {
};
enum {
+ MLX5_MIN_PKEY_TABLE_SIZE = 128,
+ MLX5_MAX_LOG_PKEY_TABLE = 5,
+};
+
+enum {
+ MLX5_MKEY_INBOX_PG_ACCESS = 1 << 31
+};
+
+enum {
+ MLX5_PFAULT_SUBTYPE_WQE = 0,
+ MLX5_PFAULT_SUBTYPE_RDMA = 1,
+};
+
+enum {
MLX5_PERM_LOCAL_READ = 1 << 2,
MLX5_PERM_LOCAL_WRITE = 1 << 3,
MLX5_PERM_REMOTE_READ = 1 << 4,
@@ -131,6 +189,19 @@ enum {
MLX5_MKEY_MASK_FREE = 1ull << 29,
};
+enum {
+ MLX5_UMR_TRANSLATION_OFFSET_EN = (1 << 4),
+
+ MLX5_UMR_CHECK_NOT_FREE = (1 << 5),
+ MLX5_UMR_CHECK_FREE = (2 << 5),
+
+ MLX5_UMR_INLINE = (1 << 7),
+};
+
+#define MLX5_UMR_MTT_ALIGNMENT 0x40
+#define MLX5_UMR_MTT_MASK (MLX5_UMR_MTT_ALIGNMENT - 1)
+#define MLX5_UMR_MTT_MIN_CHUNK_SIZE MLX5_UMR_MTT_ALIGNMENT
+
enum mlx5_event {
MLX5_EVENT_TYPE_COMP = 0x0,
@@ -157,6 +228,8 @@ enum mlx5_event {
MLX5_EVENT_TYPE_CMD = 0x0a,
MLX5_EVENT_TYPE_PAGE_REQUEST = 0xb,
+
+ MLX5_EVENT_TYPE_PAGE_FAULT = 0xc,
};
enum {
@@ -170,11 +243,7 @@ enum {
};
enum {
- MLX5_DEV_CAP_FLAG_RC = 1LL << 0,
- MLX5_DEV_CAP_FLAG_UC = 1LL << 1,
- MLX5_DEV_CAP_FLAG_UD = 1LL << 2,
MLX5_DEV_CAP_FLAG_XRC = 1LL << 3,
- MLX5_DEV_CAP_FLAG_SRQ = 1LL << 6,
MLX5_DEV_CAP_FLAG_BAD_PKEY_CNTR = 1LL << 8,
MLX5_DEV_CAP_FLAG_BAD_QKEY_CNTR = 1LL << 9,
MLX5_DEV_CAP_FLAG_APM = 1LL << 17,
@@ -183,11 +252,8 @@ enum {
MLX5_DEV_CAP_FLAG_ON_DMND_PG = 1LL << 24,
MLX5_DEV_CAP_FLAG_CQ_MODER = 1LL << 29,
MLX5_DEV_CAP_FLAG_RESIZE_CQ = 1LL << 30,
- MLX5_DEV_CAP_FLAG_RESIZE_SRQ = 1LL << 32,
- MLX5_DEV_CAP_FLAG_REMOTE_FENCE = 1LL << 38,
- MLX5_DEV_CAP_FLAG_TLP_HINTS = 1LL << 39,
+ MLX5_DEV_CAP_FLAG_DCT = 1LL << 37,
MLX5_DEV_CAP_FLAG_SIG_HAND_OVER = 1LL << 40,
- MLX5_DEV_CAP_FLAG_DCT = 1LL << 41,
MLX5_DEV_CAP_FLAG_CMDIF_CSUM = 3LL << 46,
};
@@ -243,10 +309,16 @@ enum {
};
enum {
- MLX5_CAP_OFF_DCT = 41,
MLX5_CAP_OFF_CMDIF_CSUM = 46,
};
+enum {
+ HCA_CAP_OPMOD_GET_MAX = 0,
+ HCA_CAP_OPMOD_GET_CUR = 1,
+ HCA_CAP_OPMOD_GET_ODP_MAX = 4,
+ HCA_CAP_OPMOD_GET_ODP_CUR = 5
+};
+
struct mlx5_inbox_hdr {
__be16 opcode;
u8 rsvd[4];
@@ -274,101 +346,23 @@ struct mlx5_cmd_query_adapter_mbox_out {
u8 vsd_psid[16];
};
-struct mlx5_hca_cap {
- u8 rsvd1[16];
- u8 log_max_srq_sz;
- u8 log_max_qp_sz;
- u8 rsvd2;
- u8 log_max_qp;
- u8 log_max_strq_sz;
- u8 log_max_srqs;
- u8 rsvd4[2];
- u8 rsvd5;
- u8 log_max_cq_sz;
- u8 rsvd6;
- u8 log_max_cq;
- u8 log_max_eq_sz;
- u8 log_max_mkey;
- u8 rsvd7;
- u8 log_max_eq;
- u8 max_indirection;
- u8 log_max_mrw_sz;
- u8 log_max_bsf_list_sz;
- u8 log_max_klm_list_sz;
- u8 rsvd_8_0;
- u8 log_max_ra_req_dc;
- u8 rsvd_8_1;
- u8 log_max_ra_res_dc;
- u8 rsvd9;
- u8 log_max_ra_req_qp;
- u8 rsvd10;
- u8 log_max_ra_res_qp;
- u8 rsvd11[4];
- __be16 max_qp_count;
- __be16 rsvd12;
- u8 rsvd13;
- u8 local_ca_ack_delay;
- u8 rsvd14;
- u8 num_ports;
- u8 log_max_msg;
- u8 rsvd15[3];
- __be16 stat_rate_support;
- u8 rsvd16[2];
- __be64 flags;
- u8 rsvd17;
- u8 uar_sz;
- u8 rsvd18;
- u8 log_pg_sz;
- __be16 bf_log_bf_reg_size;
- u8 rsvd19[4];
- __be16 max_desc_sz_sq;
- u8 rsvd20[2];
- __be16 max_desc_sz_rq;
- u8 rsvd21[2];
- __be16 max_desc_sz_sq_dc;
- __be32 max_qp_mcg;
- u8 rsvd22[3];
- u8 log_max_mcg;
- u8 rsvd23;
- u8 log_max_pd;
- u8 rsvd24;
- u8 log_max_xrcd;
- u8 rsvd25[42];
- __be16 log_uar_page_sz;
- u8 rsvd26[28];
- u8 log_max_atomic_size_qp;
- u8 rsvd27[2];
- u8 log_max_atomic_size_dc;
- u8 rsvd28[76];
-};
-
-
-struct mlx5_cmd_query_hca_cap_mbox_in {
- struct mlx5_inbox_hdr hdr;
- u8 rsvd[8];
-};
-
-
-struct mlx5_cmd_query_hca_cap_mbox_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvd0[8];
- struct mlx5_hca_cap hca_cap;
+enum mlx5_odp_transport_cap_bits {
+ MLX5_ODP_SUPPORT_SEND = 1 << 31,
+ MLX5_ODP_SUPPORT_RECV = 1 << 30,
+ MLX5_ODP_SUPPORT_WRITE = 1 << 29,
+ MLX5_ODP_SUPPORT_READ = 1 << 28,
};
-
-struct mlx5_cmd_set_hca_cap_mbox_in {
- struct mlx5_inbox_hdr hdr;
- u8 rsvd[8];
- struct mlx5_hca_cap hca_cap;
+struct mlx5_odp_caps {
+ char reserved[0x10];
+ struct {
+ __be32 rc_odp_caps;
+ __be32 uc_odp_caps;
+ __be32 ud_odp_caps;
+ } per_transport_caps;
+ char reserved2[0xe4];
};
-
-struct mlx5_cmd_set_hca_cap_mbox_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvd0[8];
-};
-
-
struct mlx5_cmd_init_hca_mbox_in {
struct mlx5_inbox_hdr hdr;
u8 rsvd0[2];
@@ -489,6 +483,27 @@ struct mlx5_eqe_page_req {
__be32 rsvd1[5];
};
+struct mlx5_eqe_page_fault {
+ __be32 bytes_committed;
+ union {
+ struct {
+ u16 reserved1;
+ __be16 wqe_index;
+ u16 reserved2;
+ __be16 packet_length;
+ u8 reserved3[12];
+ } __packed wqe;
+ struct {
+ __be32 r_key;
+ u16 reserved1;
+ __be16 packet_length;
+ __be32 rdma_op_len;
+ __be64 rdma_va;
+ } __packed rdma;
+ } __packed;
+ __be32 flags_qpn;
+} __packed;
+
union ev_data {
__be32 raw[7];
struct mlx5_eqe_cmd cmd;
@@ -500,6 +515,7 @@ union ev_data {
struct mlx5_eqe_congestion cong;
struct mlx5_eqe_stall_vl stall_vl;
struct mlx5_eqe_page_req req_pages;
+ struct mlx5_eqe_page_fault page_fault;
} __packed;
struct mlx5_eqe {
@@ -826,6 +842,10 @@ struct mlx5_query_eq_mbox_out {
struct mlx5_eq_context ctx;
};
+enum {
+ MLX5_MKEY_STATUS_FREE = 1 << 6,
+};
+
struct mlx5_mkey_seg {
/* This is a two bit field occupying bits 31-30.
* bit 31 is always 0,
@@ -862,7 +882,7 @@ struct mlx5_query_special_ctxs_mbox_out {
struct mlx5_create_mkey_mbox_in {
struct mlx5_inbox_hdr hdr;
__be32 input_mkey_index;
- u8 rsvd0[4];
+ __be32 flags;
struct mlx5_mkey_seg seg;
u8 rsvd1[16];
__be32 xlat_oct_act_size;
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index b88e9b46d957..166d9315fe4b 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -44,6 +44,7 @@
#include <linux/mlx5/device.h>
#include <linux/mlx5/doorbell.h>
+#include <linux/mlx5/mlx5_ifc.h>
enum {
MLX5_BOARD_ID_LEN = 64,
@@ -99,81 +100,6 @@ enum {
};
enum {
- MLX5_CMD_OP_QUERY_HCA_CAP = 0x100,
- MLX5_CMD_OP_QUERY_ADAPTER = 0x101,
- MLX5_CMD_OP_INIT_HCA = 0x102,
- MLX5_CMD_OP_TEARDOWN_HCA = 0x103,
- MLX5_CMD_OP_ENABLE_HCA = 0x104,
- MLX5_CMD_OP_DISABLE_HCA = 0x105,
- MLX5_CMD_OP_QUERY_PAGES = 0x107,
- MLX5_CMD_OP_MANAGE_PAGES = 0x108,
- MLX5_CMD_OP_SET_HCA_CAP = 0x109,
-
- MLX5_CMD_OP_CREATE_MKEY = 0x200,
- MLX5_CMD_OP_QUERY_MKEY = 0x201,
- MLX5_CMD_OP_DESTROY_MKEY = 0x202,
- MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS = 0x203,
-
- MLX5_CMD_OP_CREATE_EQ = 0x301,
- MLX5_CMD_OP_DESTROY_EQ = 0x302,
- MLX5_CMD_OP_QUERY_EQ = 0x303,
-
- MLX5_CMD_OP_CREATE_CQ = 0x400,
- MLX5_CMD_OP_DESTROY_CQ = 0x401,
- MLX5_CMD_OP_QUERY_CQ = 0x402,
- MLX5_CMD_OP_MODIFY_CQ = 0x403,
-
- MLX5_CMD_OP_CREATE_QP = 0x500,
- MLX5_CMD_OP_DESTROY_QP = 0x501,
- MLX5_CMD_OP_RST2INIT_QP = 0x502,
- MLX5_CMD_OP_INIT2RTR_QP = 0x503,
- MLX5_CMD_OP_RTR2RTS_QP = 0x504,
- MLX5_CMD_OP_RTS2RTS_QP = 0x505,
- MLX5_CMD_OP_SQERR2RTS_QP = 0x506,
- MLX5_CMD_OP_2ERR_QP = 0x507,
- MLX5_CMD_OP_RTS2SQD_QP = 0x508,
- MLX5_CMD_OP_SQD2RTS_QP = 0x509,
- MLX5_CMD_OP_2RST_QP = 0x50a,
- MLX5_CMD_OP_QUERY_QP = 0x50b,
- MLX5_CMD_OP_CONF_SQP = 0x50c,
- MLX5_CMD_OP_MAD_IFC = 0x50d,
- MLX5_CMD_OP_INIT2INIT_QP = 0x50e,
- MLX5_CMD_OP_SUSPEND_QP = 0x50f,
- MLX5_CMD_OP_UNSUSPEND_QP = 0x510,
- MLX5_CMD_OP_SQD2SQD_QP = 0x511,
- MLX5_CMD_OP_ALLOC_QP_COUNTER_SET = 0x512,
- MLX5_CMD_OP_DEALLOC_QP_COUNTER_SET = 0x513,
- MLX5_CMD_OP_QUERY_QP_COUNTER_SET = 0x514,
-
- MLX5_CMD_OP_CREATE_PSV = 0x600,
- MLX5_CMD_OP_DESTROY_PSV = 0x601,
- MLX5_CMD_OP_QUERY_PSV = 0x602,
- MLX5_CMD_OP_QUERY_SIG_RULE_TABLE = 0x603,
- MLX5_CMD_OP_QUERY_BLOCK_SIZE_TABLE = 0x604,
-
- MLX5_CMD_OP_CREATE_SRQ = 0x700,
- MLX5_CMD_OP_DESTROY_SRQ = 0x701,
- MLX5_CMD_OP_QUERY_SRQ = 0x702,
- MLX5_CMD_OP_ARM_RQ = 0x703,
- MLX5_CMD_OP_RESIZE_SRQ = 0x704,
-
- MLX5_CMD_OP_ALLOC_PD = 0x800,
- MLX5_CMD_OP_DEALLOC_PD = 0x801,
- MLX5_CMD_OP_ALLOC_UAR = 0x802,
- MLX5_CMD_OP_DEALLOC_UAR = 0x803,
-
- MLX5_CMD_OP_ATTACH_TO_MCG = 0x806,
- MLX5_CMD_OP_DETACH_FROM_MCG = 0x807,
-
-
- MLX5_CMD_OP_ALLOC_XRCD = 0x80e,
- MLX5_CMD_OP_DEALLOC_XRCD = 0x80f,
-
- MLX5_CMD_OP_ACCESS_REG = 0x805,
- MLX5_CMD_OP_MAX = 0x810,
-};
-
-enum {
MLX5_REG_PCAP = 0x5001,
MLX5_REG_PMTU = 0x5003,
MLX5_REG_PTYS = 0x5004,
@@ -187,6 +113,13 @@ enum {
MLX5_REG_HOST_ENDIANNESS = 0x7004,
};
+enum mlx5_page_fault_resume_flags {
+ MLX5_PAGE_FAULT_RESUME_REQUESTOR = 1 << 0,
+ MLX5_PAGE_FAULT_RESUME_WRITE = 1 << 1,
+ MLX5_PAGE_FAULT_RESUME_RDMA = 1 << 2,
+ MLX5_PAGE_FAULT_RESUME_ERROR = 1 << 7,
+};
+
enum dbg_rsc_type {
MLX5_DBG_RSC_QP,
MLX5_DBG_RSC_EQ,
@@ -335,23 +268,30 @@ struct mlx5_port_caps {
int pkey_table_len;
};
-struct mlx5_caps {
+struct mlx5_general_caps {
u8 log_max_eq;
u8 log_max_cq;
u8 log_max_qp;
u8 log_max_mkey;
u8 log_max_pd;
u8 log_max_srq;
+ u8 log_max_strq;
+ u8 log_max_mrw_sz;
+ u8 log_max_bsf_list_size;
+ u8 log_max_klm_list_size;
u32 max_cqes;
int max_wqes;
+ u32 max_eqes;
+ u32 max_indirection;
int max_sq_desc_sz;
int max_rq_desc_sz;
+ int max_dc_sq_desc_sz;
u64 flags;
u16 stat_rate_support;
int log_max_msg;
int num_ports;
- int max_ra_res_qp;
- int max_ra_req_qp;
+ u8 log_max_ra_res_qp;
+ u8 log_max_ra_req_qp;
int max_srq_wqes;
int bf_reg_size;
int bf_regs_per_page;
@@ -363,6 +303,19 @@ struct mlx5_caps {
u8 log_max_mcg;
u32 max_qp_mcg;
int min_page_sz;
+ int pd_cap;
+ u32 max_qp_counters;
+ u32 pkey_table_size;
+ u8 log_max_ra_req_dc;
+ u8 log_max_ra_res_dc;
+ u32 uar_sz;
+ u8 min_log_pg_sz;
+ u8 log_max_xrcd;
+ u16 log_uar_page_sz;
+};
+
+struct mlx5_caps {
+ struct mlx5_general_caps gen;
};
struct mlx5_cmd_mailbox {
@@ -429,6 +382,16 @@ struct mlx5_core_mr {
u32 pd;
};
+enum mlx5_res_type {
+ MLX5_RES_QP,
+};
+
+struct mlx5_core_rsc_common {
+ enum mlx5_res_type res;
+ atomic_t refcount;
+ struct completion free;
+};
+
struct mlx5_core_srq {
u32 srqn;
int max;
@@ -511,7 +474,7 @@ struct mlx5_priv {
struct workqueue_struct *pg_wq;
struct rb_root page_root;
int fw_pages;
- int reg_pages;
+ atomic_t reg_pages;
struct list_head free_list;
struct mlx5_core_health health;
@@ -677,14 +640,6 @@ static inline void *mlx5_vzalloc(unsigned long size)
return rtn;
}
-static inline void mlx5_vfree(const void *addr)
-{
- if (addr && is_vmalloc_addr(addr))
- vfree(addr);
- else
- kfree(addr);
-}
-
static inline u32 mlx5_base_mkey(const u32 key)
{
return key & 0xffffff00u;
@@ -695,6 +650,9 @@ void mlx5_cmd_cleanup(struct mlx5_core_dev *dev);
void mlx5_cmd_use_events(struct mlx5_core_dev *dev);
void mlx5_cmd_use_polling(struct mlx5_core_dev *dev);
int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr);
+int mlx5_cmd_status_to_err_v2(void *ptr);
+int mlx5_core_get_caps(struct mlx5_core_dev *dev, struct mlx5_caps *caps,
+ u16 opmod);
int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
int out_size);
int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size,
@@ -751,7 +709,10 @@ int mlx5_eq_init(struct mlx5_core_dev *dev);
void mlx5_eq_cleanup(struct mlx5_core_dev *dev);
void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas);
void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn);
-void mlx5_qp_event(struct mlx5_core_dev *dev, u32 qpn, int event_type);
+void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type);
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+void mlx5_eq_pagefault(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe);
+#endif
void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type);
struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn);
void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, unsigned long vector);
@@ -788,6 +749,9 @@ void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev);
int mlx5_core_create_psv(struct mlx5_core_dev *dev, u32 pdn,
int npsvs, u32 *sig_index);
int mlx5_core_destroy_psv(struct mlx5_core_dev *dev, int psv_num);
+void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common);
+int mlx5_query_odp_caps(struct mlx5_core_dev *dev,
+ struct mlx5_odp_caps *odp_caps);
static inline u32 mlx5_mkey_to_idx(u32 mkey)
{
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
new file mode 100644
index 000000000000..5f48b8f592c5
--- /dev/null
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -0,0 +1,349 @@
+/*
+ * Copyright (c) 2014, Mellanox Technologies inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef MLX5_IFC_H
+#define MLX5_IFC_H
+
+enum {
+ MLX5_CMD_OP_QUERY_HCA_CAP = 0x100,
+ MLX5_CMD_OP_QUERY_ADAPTER = 0x101,
+ MLX5_CMD_OP_INIT_HCA = 0x102,
+ MLX5_CMD_OP_TEARDOWN_HCA = 0x103,
+ MLX5_CMD_OP_ENABLE_HCA = 0x104,
+ MLX5_CMD_OP_DISABLE_HCA = 0x105,
+ MLX5_CMD_OP_QUERY_PAGES = 0x107,
+ MLX5_CMD_OP_MANAGE_PAGES = 0x108,
+ MLX5_CMD_OP_SET_HCA_CAP = 0x109,
+ MLX5_CMD_OP_CREATE_MKEY = 0x200,
+ MLX5_CMD_OP_QUERY_MKEY = 0x201,
+ MLX5_CMD_OP_DESTROY_MKEY = 0x202,
+ MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS = 0x203,
+ MLX5_CMD_OP_PAGE_FAULT_RESUME = 0x204,
+ MLX5_CMD_OP_CREATE_EQ = 0x301,
+ MLX5_CMD_OP_DESTROY_EQ = 0x302,
+ MLX5_CMD_OP_QUERY_EQ = 0x303,
+ MLX5_CMD_OP_GEN_EQE = 0x304,
+ MLX5_CMD_OP_CREATE_CQ = 0x400,
+ MLX5_CMD_OP_DESTROY_CQ = 0x401,
+ MLX5_CMD_OP_QUERY_CQ = 0x402,
+ MLX5_CMD_OP_MODIFY_CQ = 0x403,
+ MLX5_CMD_OP_CREATE_QP = 0x500,
+ MLX5_CMD_OP_DESTROY_QP = 0x501,
+ MLX5_CMD_OP_RST2INIT_QP = 0x502,
+ MLX5_CMD_OP_INIT2RTR_QP = 0x503,
+ MLX5_CMD_OP_RTR2RTS_QP = 0x504,
+ MLX5_CMD_OP_RTS2RTS_QP = 0x505,
+ MLX5_CMD_OP_SQERR2RTS_QP = 0x506,
+ MLX5_CMD_OP_2ERR_QP = 0x507,
+ MLX5_CMD_OP_2RST_QP = 0x50a,
+ MLX5_CMD_OP_QUERY_QP = 0x50b,
+ MLX5_CMD_OP_INIT2INIT_QP = 0x50e,
+ MLX5_CMD_OP_CREATE_PSV = 0x600,
+ MLX5_CMD_OP_DESTROY_PSV = 0x601,
+ MLX5_CMD_OP_CREATE_SRQ = 0x700,
+ MLX5_CMD_OP_DESTROY_SRQ = 0x701,
+ MLX5_CMD_OP_QUERY_SRQ = 0x702,
+ MLX5_CMD_OP_ARM_RQ = 0x703,
+ MLX5_CMD_OP_RESIZE_SRQ = 0x704,
+ MLX5_CMD_OP_CREATE_DCT = 0x710,
+ MLX5_CMD_OP_DESTROY_DCT = 0x711,
+ MLX5_CMD_OP_DRAIN_DCT = 0x712,
+ MLX5_CMD_OP_QUERY_DCT = 0x713,
+ MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION = 0x714,
+ MLX5_CMD_OP_QUERY_VPORT_STATE = 0x750,
+ MLX5_CMD_OP_MODIFY_VPORT_STATE = 0x751,
+ MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT = 0x752,
+ MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT = 0x753,
+ MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT = 0x754,
+ MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT = 0x755,
+ MLX5_CMD_OP_QUERY_RCOE_ADDRESS = 0x760,
+ MLX5_CMD_OP_SET_ROCE_ADDRESS = 0x761,
+ MLX5_CMD_OP_QUERY_VPORT_COUNTER = 0x770,
+ MLX5_CMD_OP_ALLOC_Q_COUNTER = 0x771,
+ MLX5_CMD_OP_DEALLOC_Q_COUNTER = 0x772,
+ MLX5_CMD_OP_QUERY_Q_COUNTER = 0x773,
+ MLX5_CMD_OP_ALLOC_PD = 0x800,
+ MLX5_CMD_OP_DEALLOC_PD = 0x801,
+ MLX5_CMD_OP_ALLOC_UAR = 0x802,
+ MLX5_CMD_OP_DEALLOC_UAR = 0x803,
+ MLX5_CMD_OP_CONFIG_INT_MODERATION = 0x804,
+ MLX5_CMD_OP_ACCESS_REG = 0x805,
+ MLX5_CMD_OP_ATTACH_TO_MCG = 0x806,
+ MLX5_CMD_OP_DETACH_FROM_MCG = 0x807,
+ MLX5_CMD_OP_GET_DROPPED_PACKET_LOG = 0x80a,
+ MLX5_CMD_OP_MAD_IFC = 0x50d,
+ MLX5_CMD_OP_QUERY_MAD_DEMUX = 0x80b,
+ MLX5_CMD_OP_SET_MAD_DEMUX = 0x80c,
+ MLX5_CMD_OP_NOP = 0x80d,
+ MLX5_CMD_OP_ALLOC_XRCD = 0x80e,
+ MLX5_CMD_OP_DEALLOC_XRCD = 0x80f,
+ MLX5_CMD_OP_SET_BURST_SIZE = 0x812,
+ MLX5_CMD_OP_QUERY_BURST_SZIE = 0x813,
+ MLX5_CMD_OP_ACTIVATE_TRACER = 0x814,
+ MLX5_CMD_OP_DEACTIVATE_TRACER = 0x815,
+ MLX5_CMD_OP_CREATE_SNIFFER_RULE = 0x820,
+ MLX5_CMD_OP_DESTROY_SNIFFER_RULE = 0x821,
+ MLX5_CMD_OP_QUERY_CONG_PARAMS = 0x822,
+ MLX5_CMD_OP_MODIFY_CONG_PARAMS = 0x823,
+ MLX5_CMD_OP_QUERY_CONG_STATISTICS = 0x824,
+ MLX5_CMD_OP_CREATE_TIR = 0x900,
+ MLX5_CMD_OP_MODIFY_TIR = 0x901,
+ MLX5_CMD_OP_DESTROY_TIR = 0x902,
+ MLX5_CMD_OP_QUERY_TIR = 0x903,
+ MLX5_CMD_OP_CREATE_TIS = 0x912,
+ MLX5_CMD_OP_MODIFY_TIS = 0x913,
+ MLX5_CMD_OP_DESTROY_TIS = 0x914,
+ MLX5_CMD_OP_QUERY_TIS = 0x915,
+ MLX5_CMD_OP_CREATE_SQ = 0x904,
+ MLX5_CMD_OP_MODIFY_SQ = 0x905,
+ MLX5_CMD_OP_DESTROY_SQ = 0x906,
+ MLX5_CMD_OP_QUERY_SQ = 0x907,
+ MLX5_CMD_OP_CREATE_RQ = 0x908,
+ MLX5_CMD_OP_MODIFY_RQ = 0x909,
+ MLX5_CMD_OP_DESTROY_RQ = 0x90a,
+ MLX5_CMD_OP_QUERY_RQ = 0x90b,
+ MLX5_CMD_OP_CREATE_RMP = 0x90c,
+ MLX5_CMD_OP_MODIFY_RMP = 0x90d,
+ MLX5_CMD_OP_DESTROY_RMP = 0x90e,
+ MLX5_CMD_OP_QUERY_RMP = 0x90f,
+ MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY = 0x910,
+ MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY = 0x911,
+ MLX5_CMD_OP_MAX = 0x911
+};
+
+struct mlx5_ifc_cmd_hca_cap_bits {
+ u8 reserved_0[0x80];
+
+ u8 log_max_srq_sz[0x8];
+ u8 log_max_qp_sz[0x8];
+ u8 reserved_1[0xb];
+ u8 log_max_qp[0x5];
+
+ u8 log_max_strq_sz[0x8];
+ u8 reserved_2[0x3];
+ u8 log_max_srqs[0x5];
+ u8 reserved_3[0x10];
+
+ u8 reserved_4[0x8];
+ u8 log_max_cq_sz[0x8];
+ u8 reserved_5[0xb];
+ u8 log_max_cq[0x5];
+
+ u8 log_max_eq_sz[0x8];
+ u8 reserved_6[0x2];
+ u8 log_max_mkey[0x6];
+ u8 reserved_7[0xc];
+ u8 log_max_eq[0x4];
+
+ u8 max_indirection[0x8];
+ u8 reserved_8[0x1];
+ u8 log_max_mrw_sz[0x7];
+ u8 reserved_9[0x2];
+ u8 log_max_bsf_list_size[0x6];
+ u8 reserved_10[0x2];
+ u8 log_max_klm_list_size[0x6];
+
+ u8 reserved_11[0xa];
+ u8 log_max_ra_req_dc[0x6];
+ u8 reserved_12[0xa];
+ u8 log_max_ra_res_dc[0x6];
+
+ u8 reserved_13[0xa];
+ u8 log_max_ra_req_qp[0x6];
+ u8 reserved_14[0xa];
+ u8 log_max_ra_res_qp[0x6];
+
+ u8 pad_cap[0x1];
+ u8 cc_query_allowed[0x1];
+ u8 cc_modify_allowed[0x1];
+ u8 reserved_15[0x1d];
+
+ u8 reserved_16[0x6];
+ u8 max_qp_cnt[0xa];
+ u8 pkey_table_size[0x10];
+
+ u8 eswitch_owner[0x1];
+ u8 reserved_17[0xa];
+ u8 local_ca_ack_delay[0x5];
+ u8 reserved_18[0x8];
+ u8 num_ports[0x8];
+
+ u8 reserved_19[0x3];
+ u8 log_max_msg[0x5];
+ u8 reserved_20[0x18];
+
+ u8 stat_rate_support[0x10];
+ u8 reserved_21[0x10];
+
+ u8 reserved_22[0x10];
+ u8 cmdif_checksum[0x2];
+ u8 sigerr_cqe[0x1];
+ u8 reserved_23[0x1];
+ u8 wq_signature[0x1];
+ u8 sctr_data_cqe[0x1];
+ u8 reserved_24[0x1];
+ u8 sho[0x1];
+ u8 tph[0x1];
+ u8 rf[0x1];
+ u8 dc[0x1];
+ u8 reserved_25[0x2];
+ u8 roce[0x1];
+ u8 atomic[0x1];
+ u8 rsz_srq[0x1];
+
+ u8 cq_oi[0x1];
+ u8 cq_resize[0x1];
+ u8 cq_moderation[0x1];
+ u8 sniffer_rule_flow[0x1];
+ u8 sniffer_rule_vport[0x1];
+ u8 sniffer_rule_phy[0x1];
+ u8 reserved_26[0x1];
+ u8 pg[0x1];
+ u8 block_lb_mc[0x1];
+ u8 reserved_27[0x3];
+ u8 cd[0x1];
+ u8 reserved_28[0x1];
+ u8 apm[0x1];
+ u8 reserved_29[0x7];
+ u8 qkv[0x1];
+ u8 pkv[0x1];
+ u8 reserved_30[0x4];
+ u8 xrc[0x1];
+ u8 ud[0x1];
+ u8 uc[0x1];
+ u8 rc[0x1];
+
+ u8 reserved_31[0xa];
+ u8 uar_sz[0x6];
+ u8 reserved_32[0x8];
+ u8 log_pg_sz[0x8];
+
+ u8 bf[0x1];
+ u8 reserved_33[0xa];
+ u8 log_bf_reg_size[0x5];
+ u8 reserved_34[0x10];
+
+ u8 reserved_35[0x10];
+ u8 max_wqe_sz_sq[0x10];
+
+ u8 reserved_36[0x10];
+ u8 max_wqe_sz_rq[0x10];
+
+ u8 reserved_37[0x10];
+ u8 max_wqe_sz_sq_dc[0x10];
+
+ u8 reserved_38[0x7];
+ u8 max_qp_mcg[0x19];
+
+ u8 reserved_39[0x18];
+ u8 log_max_mcg[0x8];
+
+ u8 reserved_40[0xb];
+ u8 log_max_pd[0x5];
+ u8 reserved_41[0xb];
+ u8 log_max_xrcd[0x5];
+
+ u8 reserved_42[0x20];
+
+ u8 reserved_43[0x3];
+ u8 log_max_rq[0x5];
+ u8 reserved_44[0x3];
+ u8 log_max_sq[0x5];
+ u8 reserved_45[0x3];
+ u8 log_max_tir[0x5];
+ u8 reserved_46[0x3];
+ u8 log_max_tis[0x5];
+
+ u8 reserved_47[0x13];
+ u8 log_max_rq_per_tir[0x5];
+ u8 reserved_48[0x3];
+ u8 log_max_tis_per_sq[0x5];
+
+ u8 reserved_49[0xe0];
+
+ u8 reserved_50[0x10];
+ u8 log_uar_page_sz[0x10];
+
+ u8 reserved_51[0x100];
+
+ u8 reserved_52[0x1f];
+ u8 cqe_zip[0x1];
+
+ u8 cqe_zip_timeout[0x10];
+ u8 cqe_zip_max_num[0x10];
+
+ u8 reserved_53[0x220];
+};
+
+struct mlx5_ifc_set_hca_cap_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+
+ struct mlx5_ifc_cmd_hca_cap_bits hca_capability_struct;
+};
+
+struct mlx5_ifc_query_hca_cap_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+};
+
+struct mlx5_ifc_query_hca_cap_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+
+ u8 capability_struct[256][0x8];
+};
+
+struct mlx5_ifc_set_hca_cap_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+#endif /* MLX5_IFC_H */
diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h
index 9709b30e2d69..61f7a342d1bf 100644
--- a/include/linux/mlx5/qp.h
+++ b/include/linux/mlx5/qp.h
@@ -40,6 +40,18 @@
#define MLX5_SIG_WQE_SIZE (MLX5_SEND_WQE_BB * 5)
#define MLX5_DIF_SIZE 8
#define MLX5_STRIDE_BLOCK_OP 0x400
+#define MLX5_CPY_GRD_MASK 0xc0
+#define MLX5_CPY_APP_MASK 0x30
+#define MLX5_CPY_REF_MASK 0x0f
+#define MLX5_BSF_INC_REFTAG (1 << 6)
+#define MLX5_BSF_INL_VALID (1 << 15)
+#define MLX5_BSF_REFRESH_DIF (1 << 14)
+#define MLX5_BSF_REPEAT_BLOCK (1 << 7)
+#define MLX5_BSF_APPTAG_ESCAPE 0x1
+#define MLX5_BSF_APPREF_ESCAPE 0x2
+
+#define MLX5_QPN_BITS 24
+#define MLX5_QPN_MASK ((1 << MLX5_QPN_BITS) - 1)
enum mlx5_qp_optpar {
MLX5_QP_OPTPAR_ALT_ADDR_PATH = 1 << 0,
@@ -180,6 +192,14 @@ struct mlx5_wqe_ctrl_seg {
__be32 imm;
};
+#define MLX5_WQE_CTRL_DS_MASK 0x3f
+#define MLX5_WQE_CTRL_QPN_MASK 0xffffff00
+#define MLX5_WQE_CTRL_QPN_SHIFT 8
+#define MLX5_WQE_DS_UNITS 16
+#define MLX5_WQE_CTRL_OPCODE_MASK 0xff
+#define MLX5_WQE_CTRL_WQE_INDEX_MASK 0x00ffff00
+#define MLX5_WQE_CTRL_WQE_INDEX_SHIFT 8
+
struct mlx5_wqe_xrc_seg {
__be32 xrc_srqn;
u8 rsvd[12];
@@ -283,10 +303,28 @@ struct mlx5_wqe_signature_seg {
u8 rsvd1[11];
};
+#define MLX5_WQE_INLINE_SEG_BYTE_COUNT_MASK 0x3ff
+
struct mlx5_wqe_inline_seg {
__be32 byte_count;
};
+enum mlx5_sig_type {
+ MLX5_DIF_CRC = 0x1,
+ MLX5_DIF_IPCS = 0x2,
+};
+
+struct mlx5_bsf_inl {
+ __be16 vld_refresh;
+ __be16 dif_apptag;
+ __be32 dif_reftag;
+ u8 sig_type;
+ u8 rp_inv_seed;
+ u8 rsvd[3];
+ u8 dif_inc_ref_guard_check;
+ __be16 dif_app_bitmask_check;
+};
+
struct mlx5_bsf {
struct mlx5_bsf_basic {
u8 bsf_size_sbs;
@@ -310,14 +348,8 @@ struct mlx5_bsf {
__be32 w_tfs_psv;
__be32 m_tfs_psv;
} ext;
- struct mlx5_bsf_inl {
- __be32 w_inl_vld;
- __be32 w_rsvd;
- __be64 w_block_format;
- __be32 m_inl_vld;
- __be32 m_rsvd;
- __be64 m_block_format;
- } inl;
+ struct mlx5_bsf_inl w_inl;
+ struct mlx5_bsf_inl m_inl;
};
struct mlx5_klm {
@@ -341,11 +373,47 @@ struct mlx5_stride_block_ctrl_seg {
__be16 num_entries;
};
+enum mlx5_pagefault_flags {
+ MLX5_PFAULT_REQUESTOR = 1 << 0,
+ MLX5_PFAULT_WRITE = 1 << 1,
+ MLX5_PFAULT_RDMA = 1 << 2,
+};
+
+/* Contains the details of a pagefault. */
+struct mlx5_pagefault {
+ u32 bytes_committed;
+ u8 event_subtype;
+ enum mlx5_pagefault_flags flags;
+ union {
+ /* Initiator or send message responder pagefault details. */
+ struct {
+ /* Received packet size, only valid for responders. */
+ u32 packet_size;
+ /*
+ * WQE index. Refers to either the send queue or
+ * receive queue, according to event_subtype.
+ */
+ u16 wqe_index;
+ } wqe;
+ /* RDMA responder pagefault details */
+ struct {
+ u32 r_key;
+ /*
+ * Received packet size, minimal size page fault
+ * resolution required for forward progress.
+ */
+ u32 packet_size;
+ u32 rdma_op_len;
+ u64 rdma_va;
+ } rdma;
+ };
+};
+
struct mlx5_core_qp {
+ struct mlx5_core_rsc_common common; /* must be first */
void (*event) (struct mlx5_core_qp *, int);
+ void (*pfault_handler)(struct mlx5_core_qp *, struct mlx5_pagefault *);
int qpn;
- atomic_t refcount;
- struct completion free;
struct mlx5_rsc_debug *dbg;
int pid;
};
@@ -512,6 +580,17 @@ static inline struct mlx5_core_mr *__mlx5_mr_lookup(struct mlx5_core_dev *dev, u
return radix_tree_lookup(&dev->priv.mr_table.tree, key);
}
+struct mlx5_page_fault_resume_mbox_in {
+ struct mlx5_inbox_hdr hdr;
+ __be32 flags_qpn;
+ u8 reserved[4];
+};
+
+struct mlx5_page_fault_resume_mbox_out {
+ struct mlx5_outbox_hdr hdr;
+ u8 rsvd[8];
+};
+
int mlx5_core_create_qp(struct mlx5_core_dev *dev,
struct mlx5_core_qp *qp,
struct mlx5_create_qp_mbox_in *in,
@@ -531,6 +610,10 @@ void mlx5_init_qp_table(struct mlx5_core_dev *dev);
void mlx5_cleanup_qp_table(struct mlx5_core_dev *dev);
int mlx5_debug_qp_add(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp);
void mlx5_debug_qp_remove(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp);
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+int mlx5_core_page_fault_resume(struct mlx5_core_dev *dev, u32 qpn,
+ u8 context, int error);
+#endif
static inline const char *mlx5_qp_type_str(int type)
{
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 8981cc882ed2..f80d0194c9bc 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -18,6 +18,8 @@
#include <linux/pfn.h>
#include <linux/bit_spinlock.h>
#include <linux/shrinker.h>
+#include <linux/resource.h>
+#include <linux/page_ext.h>
struct mempolicy;
struct anon_vma;
@@ -55,6 +57,17 @@ extern int sysctl_legacy_va_layout;
#define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0))
#endif
+/*
+ * To prevent common memory management code establishing
+ * a zero page mapping on a read fault.
+ * This macro should be defined within <asm/pgtable.h>.
+ * s390 does this to prevent multiplexing of hardware bits
+ * related to the physical page in case of virtualization.
+ */
+#ifndef mm_forbids_zeropage
+#define mm_forbids_zeropage(X) (0)
+#endif
+
extern unsigned long sysctl_user_reserve_kbytes;
extern unsigned long sysctl_admin_reserve_kbytes;
@@ -127,6 +140,7 @@ extern unsigned int kobjsize(const void *objp);
#define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */
#define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */
#define VM_ARCH_1 0x01000000 /* Architecture-specific flag */
+#define VM_ARCH_2 0x02000000
#define VM_DONTDUMP 0x04000000 /* Do not include in the core dump */
#ifdef CONFIG_MEM_SOFT_DIRTY
@@ -154,6 +168,11 @@ extern unsigned int kobjsize(const void *objp);
# define VM_MAPPED_COPY VM_ARCH_1 /* T if mapped copy of data (nommu mmap) */
#endif
+#if defined(CONFIG_X86)
+/* MPX specific bounds table or bounds directory */
+# define VM_MPX VM_ARCH_2
+#endif
+
#ifndef VM_GROWSUP
# define VM_GROWSUP VM_NONE
#endif
@@ -267,8 +286,6 @@ struct vm_operations_struct {
*/
struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
unsigned long addr);
- int (*migrate)(struct vm_area_struct *vma, const nodemask_t *from,
- const nodemask_t *to, unsigned long flags);
#endif
/* called by sys_remap_file_pages() to populate non-linear mapping */
int (*remap_pages)(struct vm_area_struct *vma, unsigned long addr,
@@ -346,6 +363,7 @@ static inline int put_page_unless_one(struct page *page)
}
extern int page_is_ram(unsigned long pfn);
+extern int region_is_ram(resource_size_t phys_addr, unsigned long size);
/* Support for virtually mapped pages */
struct page *vmalloc_to_page(const void *addr);
@@ -553,6 +571,25 @@ static inline void __ClearPageBuddy(struct page *page)
atomic_set(&page->_mapcount, -1);
}
+#define PAGE_BALLOON_MAPCOUNT_VALUE (-256)
+
+static inline int PageBalloon(struct page *page)
+{
+ return atomic_read(&page->_mapcount) == PAGE_BALLOON_MAPCOUNT_VALUE;
+}
+
+static inline void __SetPageBalloon(struct page *page)
+{
+ VM_BUG_ON_PAGE(atomic_read(&page->_mapcount) != -1, page);
+ atomic_set(&page->_mapcount, PAGE_BALLOON_MAPCOUNT_VALUE);
+}
+
+static inline void __ClearPageBalloon(struct page *page)
+{
+ VM_BUG_ON_PAGE(!PageBalloon(page), page);
+ atomic_set(&page->_mapcount, -1);
+}
+
void put_page(struct page *page);
void put_pages_list(struct list_head *pages);
@@ -1155,6 +1192,7 @@ static inline void unmap_shared_mapping_range(struct address_space *mapping,
extern void truncate_pagecache(struct inode *inode, loff_t new);
extern void truncate_setsize(struct inode *inode, loff_t newsize);
+void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to);
void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end);
int truncate_inode_page(struct address_space *mapping, struct page *page);
int generic_error_remove_page(struct address_space *mapping, struct page *page);
@@ -1213,7 +1251,6 @@ int __set_page_dirty_no_writeback(struct page *page);
int redirty_page_for_writepage(struct writeback_control *wbc,
struct page *page);
void account_page_dirtied(struct page *page, struct address_space *mapping);
-void account_page_writeback(struct page *page);
int set_page_dirty(struct page *page);
int set_page_dirty_lock(struct page *page);
int clear_page_dirty_for_io(struct page *page);
@@ -1247,8 +1284,8 @@ static inline int stack_guard_page_end(struct vm_area_struct *vma,
!vma_growsup(vma->vm_next, addr);
}
-extern pid_t
-vm_is_stack(struct task_struct *task, struct vm_area_struct *vma, int in_group);
+extern struct task_struct *task_of_stack(struct task_struct *task,
+ struct vm_area_struct *vma, bool in_group);
extern unsigned long move_page_tables(struct vm_area_struct *vma,
unsigned long old_addr, struct vm_area_struct *new_vma,
@@ -1780,6 +1817,20 @@ extern struct vm_area_struct *copy_vma(struct vm_area_struct **,
bool *need_rmap_locks);
extern void exit_mmap(struct mm_struct *);
+static inline int check_data_rlimit(unsigned long rlim,
+ unsigned long new,
+ unsigned long start,
+ unsigned long end_data,
+ unsigned long start_data)
+{
+ if (rlim < RLIM_INFINITY) {
+ if (((new - start) + (end_data - start_data)) > rlim)
+ return -ENOSPC;
+ }
+
+ return 0;
+}
+
extern int mm_take_all_locks(struct mm_struct *mm);
extern void mm_drop_all_locks(struct mm_struct *mm);
@@ -1939,11 +1990,16 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
#ifdef CONFIG_MMU
pgprot_t vm_get_page_prot(unsigned long vm_flags);
+void vma_set_page_prot(struct vm_area_struct *vma);
#else
static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
{
return __pgprot(0);
}
+static inline void vma_set_page_prot(struct vm_area_struct *vma)
+{
+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
+}
#endif
#ifdef CONFIG_NUMA_BALANCING
@@ -1985,6 +2041,7 @@ static inline struct page *follow_page(struct vm_area_struct *vma,
#define FOLL_HWPOISON 0x100 /* check page is hwpoisoned */
#define FOLL_NUMA 0x200 /* force NUMA hinting page fault */
#define FOLL_MIGRATION 0x400 /* wait for page to replace migration entry */
+#define FOLL_TRIED 0x800 /* a retry, previous pass started an IO */
typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
void *data);
@@ -2002,7 +2059,22 @@ static inline void vm_stat_account(struct mm_struct *mm,
#endif /* CONFIG_PROC_FS */
#ifdef CONFIG_DEBUG_PAGEALLOC
-extern void kernel_map_pages(struct page *page, int numpages, int enable);
+extern bool _debug_pagealloc_enabled;
+extern void __kernel_map_pages(struct page *page, int numpages, int enable);
+
+static inline bool debug_pagealloc_enabled(void)
+{
+ return _debug_pagealloc_enabled;
+}
+
+static inline void
+kernel_map_pages(struct page *page, int numpages, int enable)
+{
+ if (!debug_pagealloc_enabled())
+ return;
+
+ __kernel_map_pages(page, numpages, enable);
+}
#ifdef CONFIG_HIBERNATION
extern bool kernel_page_present(struct page *page);
#endif /* CONFIG_HIBERNATION */
@@ -2036,9 +2108,9 @@ int drop_caches_sysctl_handler(struct ctl_table *, int,
void __user *, size_t *, loff_t *);
#endif
-unsigned long shrink_slab(struct shrink_control *shrink,
- unsigned long nr_pages_scanned,
- unsigned long lru_pages);
+unsigned long shrink_node_slabs(gfp_t gfp_mask, int nid,
+ unsigned long nr_scanned,
+ unsigned long nr_eligible);
#ifndef CONFIG_MMU
#define randomize_va_space 0
@@ -2097,20 +2169,36 @@ extern void copy_user_huge_page(struct page *dst, struct page *src,
unsigned int pages_per_huge_page);
#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
+extern struct page_ext_operations debug_guardpage_ops;
+extern struct page_ext_operations page_poisoning_ops;
+
#ifdef CONFIG_DEBUG_PAGEALLOC
extern unsigned int _debug_guardpage_minorder;
+extern bool _debug_guardpage_enabled;
static inline unsigned int debug_guardpage_minorder(void)
{
return _debug_guardpage_minorder;
}
+static inline bool debug_guardpage_enabled(void)
+{
+ return _debug_guardpage_enabled;
+}
+
static inline bool page_is_guard(struct page *page)
{
- return test_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags);
+ struct page_ext *page_ext;
+
+ if (!debug_guardpage_enabled())
+ return false;
+
+ page_ext = lookup_page_ext(page);
+ return test_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
}
#else
static inline unsigned int debug_guardpage_minorder(void) { return 0; }
+static inline bool debug_guardpage_enabled(void) { return false; }
static inline bool page_is_guard(struct page *page) { return false; }
#endif /* CONFIG_DEBUG_PAGEALLOC */
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 6e0b286649f1..6d34aa266a8c 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -10,7 +10,6 @@
#include <linux/rwsem.h>
#include <linux/completion.h>
#include <linux/cpumask.h>
-#include <linux/page-debug-flags.h>
#include <linux/uprobes.h>
#include <linux/page-flags-layout.h>
#include <asm/page.h>
@@ -22,6 +21,7 @@
#define AT_VECTOR_SIZE (2*(AT_VECTOR_SIZE_ARCH + AT_VECTOR_SIZE_BASE + 1))
struct address_space;
+struct mem_cgroup;
#define USE_SPLIT_PTE_PTLOCKS (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS)
#define USE_SPLIT_PMD_PTLOCKS (USE_SPLIT_PTE_PTLOCKS && \
@@ -167,6 +167,10 @@ struct page {
struct page *first_page; /* Compound tail pages */
};
+#ifdef CONFIG_MEMCG
+ struct mem_cgroup *mem_cgroup;
+#endif
+
/*
* On machines where all RAM is mapped into kernel address space,
* we can simply calculate the virtual address. On machines with
@@ -181,9 +185,6 @@ struct page {
void *virtual; /* Kernel virtual address (NULL if
not kmapped, ie. highmem) */
#endif /* WANT_PAGE_VIRTUAL */
-#ifdef CONFIG_WANT_PAGE_DEBUG_FLAGS
- unsigned long debug_flags; /* Use atomic bitops on this */
-#endif
#ifdef CONFIG_KMEMCHECK
/*
@@ -454,6 +455,10 @@ struct mm_struct {
bool tlb_flush_pending;
#endif
struct uprobes_state uprobes_state;
+#ifdef CONFIG_X86_INTEL_MPX
+ /* address of the bounds directory */
+ void __user *bd_addr;
+#endif
};
static inline void mm_init_cpumask(struct mm_struct *mm)
@@ -525,4 +530,12 @@ enum tlb_flush_reason {
NR_TLB_FLUSH_REASONS,
};
+ /*
+ * A swap entry has to fit into a "unsigned long", as the entry is hidden
+ * in the "index" field of the swapper address space.
+ */
+typedef struct {
+ unsigned long val;
+} swp_entry_t;
+
#endif /* _LINUX_MM_TYPES_H */
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index d424b9de3aff..4d69c00497bd 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -42,7 +42,8 @@ struct mmc_csd {
unsigned int read_partial:1,
read_misalign:1,
write_partial:1,
- write_misalign:1;
+ write_misalign:1,
+ dsr_imp:1;
};
struct mmc_ext_csd {
@@ -74,7 +75,7 @@ struct mmc_ext_csd {
unsigned int sec_trim_mult; /* Secure trim multiplier */
unsigned int sec_erase_mult; /* Secure erase multiplier */
unsigned int trim_timeout; /* In milliseconds */
- bool enhanced_area_en; /* enable bit */
+ bool partition_setting_completed; /* enable bit */
unsigned long long enhanced_area_offset; /* Units: Byte */
unsigned int enhanced_area_size; /* Units: KB */
unsigned int cache_size; /* Units: KB */
@@ -87,6 +88,9 @@ struct mmc_ext_csd {
unsigned int data_tag_unit_size; /* DATA TAG UNIT size */
unsigned int boot_ro_lock; /* ro lock support */
bool boot_ro_lockable;
+ bool ffu_capable; /* Firmware upgrade support */
+#define MMC_FIRMWARE_LEN 8
+ u8 fwrev[MMC_FIRMWARE_LEN]; /* FW version */
u8 raw_exception_status; /* 54 */
u8 raw_partition_support; /* 160 */
u8 raw_rpmb_size_mult; /* 168 */
@@ -214,11 +218,12 @@ enum mmc_blk_status {
};
/* The number of MMC physical partitions. These consist of:
- * boot partitions (2), general purpose partitions (4) in MMC v4.4.
+ * boot partitions (2), general purpose partitions (4) and
+ * RPMB partition (1) in MMC v4.4.
*/
#define MMC_NUM_BOOT_PARTITION 2
#define MMC_NUM_GP_PARTITION 4
-#define MMC_NUM_PHY_PARTITION 6
+#define MMC_NUM_PHY_PARTITION 7
#define MAX_MMC_PART_NAME_LEN 20
/*
@@ -507,24 +512,8 @@ static inline int mmc_card_broken_irq_polling(const struct mmc_card *c)
#define mmc_dev_to_card(d) container_of(d, struct mmc_card, dev)
-#define mmc_list_to_card(l) container_of(l, struct mmc_card, node)
-#define mmc_get_drvdata(c) dev_get_drvdata(&(c)->dev)
-#define mmc_set_drvdata(c,d) dev_set_drvdata(&(c)->dev, d)
-
-/*
- * MMC device driver (e.g., Flash card, I/O card...)
- */
-struct mmc_driver {
- struct device_driver drv;
- int (*probe)(struct mmc_card *);
- void (*remove)(struct mmc_card *);
- int (*suspend)(struct mmc_card *);
- int (*resume)(struct mmc_card *);
- void (*shutdown)(struct mmc_card *);
-};
-
-extern int mmc_register_driver(struct mmc_driver *);
-extern void mmc_unregister_driver(struct mmc_driver *);
+extern int mmc_register_driver(struct device_driver *);
+extern void mmc_unregister_driver(struct device_driver *);
extern void mmc_fixup_device(struct mmc_card *card,
const struct mmc_fixup *table);
diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h
index f206e29f94d7..cb2b0400d284 100644
--- a/include/linux/mmc/core.h
+++ b/include/linux/mmc/core.h
@@ -154,7 +154,8 @@ extern void mmc_start_bkops(struct mmc_card *card, bool from_exception);
extern int __mmc_switch(struct mmc_card *, u8, u8, u8, unsigned int, bool,
bool, bool);
extern int mmc_switch(struct mmc_card *, u8, u8, u8, unsigned int);
-extern int mmc_send_ext_csd(struct mmc_card *card, u8 *ext_csd);
+extern int mmc_send_tuning(struct mmc_host *host);
+extern int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd);
#define MMC_ERASE_ARG 0x00000000
#define MMC_SECURE_ERASE_ARG 0x80000000
diff --git a/include/linux/mmc/dw_mmc.h b/include/linux/mmc/dw_mmc.h
index 29ce014ab421..42b724e8d503 100644
--- a/include/linux/mmc/dw_mmc.h
+++ b/include/linux/mmc/dw_mmc.h
@@ -26,6 +26,8 @@ enum dw_mci_state {
STATE_DATA_BUSY,
STATE_SENDING_STOP,
STATE_DATA_ERROR,
+ STATE_SENDING_CMD11,
+ STATE_WAITING_CMD11_DONE,
};
enum {
@@ -52,6 +54,7 @@ struct mmc_data;
* transfer is in progress.
* @use_dma: Whether DMA channel is initialized or not.
* @using_dma: Whether DMA is in use for the current transfer.
+ * @dma_64bit_address: Whether DMA supports 64-bit address mode or not.
* @sg_dma: Bus address of DMA buffer.
* @sg_cpu: Virtual address of DMA buffer.
* @dma_ops: Pointer to platform-specific DMA callbacks.
@@ -94,6 +97,7 @@ struct mmc_data;
* @quirks: Set of quirks that apply to specific versions of the IP.
* @irq_flags: The flags to be passed to request_irq.
* @irq: The irq value to be passed to request_irq.
+ * @sdio_id0: Number of slot0 in the SDIO interrupt registers.
*
* Locking
* =======
@@ -133,11 +137,11 @@ struct dw_mci {
struct mmc_command stop_abort;
unsigned int prev_blksz;
unsigned char timing;
- struct workqueue_struct *card_workqueue;
/* DMA interface members*/
int use_dma;
int using_dma;
+ int dma_64bit_address;
dma_addr_t sg_dma;
void *sg_cpu;
@@ -152,7 +156,6 @@ struct dw_mci {
u32 stop_cmdr;
u32 dir_status;
struct tasklet_struct tasklet;
- struct work_struct card_work;
unsigned long pending_events;
unsigned long completed_events;
enum dw_mci_state state;
@@ -188,9 +191,11 @@ struct dw_mci {
/* Workaround flags */
u32 quirks;
- struct regulator *vmmc; /* Power regulator */
+ bool vqmmc_enabled;
unsigned long irq_flags; /* IRQ flags */
int irq;
+
+ int sdio_id0;
};
/* DMA ops for Internal/External DMAC interface */
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 7960424d0bc0..9f322706f7cb 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -42,6 +42,7 @@ struct mmc_ios {
#define MMC_POWER_OFF 0
#define MMC_POWER_UP 1
#define MMC_POWER_ON 2
+#define MMC_POWER_UNDEFINED 3
unsigned char bus_width; /* data bus width */
@@ -139,6 +140,13 @@ struct mmc_host_ops {
int (*select_drive_strength)(unsigned int max_dtr, int host_drv, int card_drv);
void (*hw_reset)(struct mmc_host *host);
void (*card_event)(struct mmc_host *host);
+
+ /*
+ * Optional callback to support controllers with HW issues for multiple
+ * I/O. Returns the number of supported blocks for the request.
+ */
+ int (*multi_io_quirk)(struct mmc_card *card,
+ unsigned int direction, int blk_size);
};
struct mmc_card;
@@ -265,7 +273,6 @@ struct mmc_host {
#define MMC_CAP2_BOOTPART_NOACC (1 << 0) /* Boot partition no access */
#define MMC_CAP2_FULL_PWR_CYCLE (1 << 2) /* Can do full power cycle */
-#define MMC_CAP2_NO_MULTI_READ (1 << 3) /* Multiblock reads don't work */
#define MMC_CAP2_HS200_1_8V_SDR (1 << 5) /* can support */
#define MMC_CAP2_HS200_1_2V_SDR (1 << 6) /* can support */
#define MMC_CAP2_HS200 (MMC_CAP2_HS200_1_8V_SDR | \
@@ -282,6 +289,7 @@ struct mmc_host {
#define MMC_CAP2_HS400_1_2V (1 << 16) /* Can support HS400 1.2V */
#define MMC_CAP2_HS400 (MMC_CAP2_HS400_1_8V | \
MMC_CAP2_HS400_1_2V)
+#define MMC_CAP2_HSX00_1_2V (MMC_CAP2_HS200_1_2V_SDR | MMC_CAP2_HS400_1_2V)
#define MMC_CAP2_SDIO_IRQ_NOTHREAD (1 << 17)
mmc_pm_flag_t pm_caps; /* supported pm features */
@@ -365,6 +373,9 @@ struct mmc_host {
unsigned int slotno; /* used for sdio acpi binding */
+ int dsr_req; /* DSR value is valid */
+ u32 dsr; /* optional driver stage (DSR) value */
+
unsigned long private[0] ____cacheline_aligned;
};
diff --git a/include/linux/mmc/mmc.h b/include/linux/mmc/mmc.h
index 64ec963ed347..49ad7a943638 100644
--- a/include/linux/mmc/mmc.h
+++ b/include/linux/mmc/mmc.h
@@ -53,6 +53,11 @@
#define MMC_SEND_TUNING_BLOCK 19 /* adtc R1 */
#define MMC_SEND_TUNING_BLOCK_HS200 21 /* adtc R1 */
+#define MMC_TUNING_BLK_PATTERN_4BIT_SIZE 64
+#define MMC_TUNING_BLK_PATTERN_8BIT_SIZE 128
+extern const u8 tuning_blk_pattern_4bit[MMC_TUNING_BLK_PATTERN_4BIT_SIZE];
+extern const u8 tuning_blk_pattern_8bit[MMC_TUNING_BLK_PATTERN_8BIT_SIZE];
+
/* class 3 */
#define MMC_WRITE_DAT_UNTIL_STOP 20 /* adtc [31:0] data addr R1 */
@@ -281,6 +286,7 @@ struct _mmc_csd {
#define EXT_CSD_EXP_EVENTS_CTRL 56 /* R/W, 2 bytes */
#define EXT_CSD_DATA_SECTOR_SIZE 61 /* R */
#define EXT_CSD_GP_SIZE_MULT 143 /* R/W */
+#define EXT_CSD_PARTITION_SETTING_COMPLETED 155 /* R/W */
#define EXT_CSD_PARTITION_ATTRIBUTE 156 /* R/W */
#define EXT_CSD_PARTITION_SUPPORT 160 /* RO */
#define EXT_CSD_HPI_MGMT 161 /* R/W */
@@ -290,6 +296,7 @@ struct _mmc_csd {
#define EXT_CSD_SANITIZE_START 165 /* W */
#define EXT_CSD_WR_REL_PARAM 166 /* RO */
#define EXT_CSD_RPMB_MULT 168 /* RO */
+#define EXT_CSD_FW_CONFIG 169 /* R/W */
#define EXT_CSD_BOOT_WP 173 /* R/W */
#define EXT_CSD_ERASE_GROUP_DEF 175 /* R/W */
#define EXT_CSD_PART_CONFIG 179 /* R/W */
@@ -326,6 +333,8 @@ struct _mmc_csd {
#define EXT_CSD_GENERIC_CMD6_TIME 248 /* RO */
#define EXT_CSD_CACHE_SIZE 249 /* RO, 4 bytes */
#define EXT_CSD_PWR_CL_DDR_200_360 253 /* RO */
+#define EXT_CSD_FIRMWARE_VERSION 254 /* RO, 8 bytes */
+#define EXT_CSD_SUPPORTED_MODE 493 /* RO */
#define EXT_CSD_TAG_UNIT_SIZE 498 /* RO */
#define EXT_CSD_DATA_TAG_SUPPORT 499 /* RO */
#define EXT_CSD_MAX_PACKED_WRITES 500 /* RO */
@@ -349,6 +358,7 @@ struct _mmc_csd {
#define EXT_CSD_PART_CONFIG_ACC_RPMB (0x3)
#define EXT_CSD_PART_CONFIG_ACC_GP0 (0x4)
+#define EXT_CSD_PART_SETTING_COMPLETED (0x1)
#define EXT_CSD_PART_SUPPORT_PART_EN (0x1)
#define EXT_CSD_CMD_SET_NORMAL (1<<0)
diff --git a/include/linux/mmc/sdhci.h b/include/linux/mmc/sdhci.h
index 09ebe57d5ce9..375af80bde7d 100644
--- a/include/linux/mmc/sdhci.h
+++ b/include/linux/mmc/sdhci.h
@@ -98,6 +98,14 @@ struct sdhci_host {
#define SDHCI_QUIRK2_BROKEN_HS200 (1<<6)
/* Controller does not support DDR50 */
#define SDHCI_QUIRK2_BROKEN_DDR50 (1<<7)
+/* Stop command (CMD12) can set Transfer Complete when not using MMC_RSP_BUSY */
+#define SDHCI_QUIRK2_STOP_WITH_TC (1<<8)
+/* Controller does not support 64-bit DMA */
+#define SDHCI_QUIRK2_BROKEN_64_BIT_DMA (1<<9)
+/* need clear transfer mode register before send cmd */
+#define SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD (1<<10)
+/* Capability register bit-63 indicates HS400 support */
+#define SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 (1<<11)
int irq; /* Device IRQ */
void __iomem *ioaddr; /* Mapped address */
@@ -128,6 +136,7 @@ struct sdhci_host {
#define SDHCI_SDIO_IRQ_ENABLED (1<<9) /* SDIO irq enabled */
#define SDHCI_SDR104_NEEDS_TUNING (1<<10) /* SDR104/HS200 needs tuning */
#define SDHCI_USING_RETUNING_TIMER (1<<11) /* Host is using a retuning timer for the card */
+#define SDHCI_USE_64_BIT_DMA (1<<12) /* Use 64-bit DMA */
unsigned int version; /* SDHCI spec. version */
@@ -146,18 +155,26 @@ struct sdhci_host {
struct mmc_command *cmd; /* Current command */
struct mmc_data *data; /* Current data request */
unsigned int data_early:1; /* Data finished before cmd */
+ unsigned int busy_handle:1; /* Handling the order of Busy-end */
struct sg_mapping_iter sg_miter; /* SG state for PIO */
unsigned int blocks; /* remaining PIO blocks */
int sg_count; /* Mapped sg entries */
- u8 *adma_desc; /* ADMA descriptor table */
- u8 *align_buffer; /* Bounce buffer */
+ void *adma_table; /* ADMA descriptor table */
+ void *align_buffer; /* Bounce buffer */
+
+ size_t adma_table_sz; /* ADMA descriptor table size */
+ size_t align_buffer_sz; /* Bounce buffer size */
dma_addr_t adma_addr; /* Mapped ADMA descr. table */
dma_addr_t align_addr; /* Mapped bounce buffer */
+ unsigned int desc_sz; /* ADMA descriptor size */
+ unsigned int align_sz; /* ADMA alignment */
+ unsigned int align_mask; /* ADMA alignment mask */
+
struct tasklet_struct finish_tasklet; /* Tasklet structures */
struct timer_list timer; /* Timer for timeouts */
diff --git a/include/linux/mmc/sdio_func.h b/include/linux/mmc/sdio_func.h
index 50f0bc952328..aab032a6ae61 100644
--- a/include/linux/mmc/sdio_func.h
+++ b/include/linux/mmc/sdio_func.h
@@ -84,8 +84,6 @@ struct sdio_driver {
struct device_driver drv;
};
-#define to_sdio_driver(d) container_of(d, struct sdio_driver, drv)
-
/**
* SDIO_DEVICE - macro used to describe a specific SDIO device
* @vend: the 16 bit manufacturer code
diff --git a/include/linux/mmc/slot-gpio.h b/include/linux/mmc/slot-gpio.h
index d2433381e828..e56fa24c9322 100644
--- a/include/linux/mmc/slot-gpio.h
+++ b/include/linux/mmc/slot-gpio.h
@@ -24,7 +24,10 @@ void mmc_gpio_free_cd(struct mmc_host *host);
int mmc_gpiod_request_cd(struct mmc_host *host, const char *con_id,
unsigned int idx, bool override_active_level,
- unsigned int debounce);
+ unsigned int debounce, bool *gpio_invert);
+int mmc_gpiod_request_ro(struct mmc_host *host, const char *con_id,
+ unsigned int idx, bool override_active_level,
+ unsigned int debounce, bool *gpio_invert);
void mmc_gpiod_free_cd(struct mmc_host *host);
void mmc_gpiod_request_cd_irq(struct mmc_host *host);
diff --git a/include/linux/mmdebug.h b/include/linux/mmdebug.h
index 2f348d02f640..877ef226f90f 100644
--- a/include/linux/mmdebug.h
+++ b/include/linux/mmdebug.h
@@ -4,10 +4,14 @@
#include <linux/stringify.h>
struct page;
+struct vm_area_struct;
+struct mm_struct;
extern void dump_page(struct page *page, const char *reason);
extern void dump_page_badflags(struct page *page, const char *reason,
unsigned long badflags);
+void dump_vma(const struct vm_area_struct *vma);
+void dump_mm(const struct mm_struct *mm);
#ifdef CONFIG_DEBUG_VM
#define VM_BUG_ON(cond) BUG_ON(cond)
@@ -18,12 +22,28 @@ extern void dump_page_badflags(struct page *page, const char *reason,
BUG(); \
} \
} while (0)
+#define VM_BUG_ON_VMA(cond, vma) \
+ do { \
+ if (unlikely(cond)) { \
+ dump_vma(vma); \
+ BUG(); \
+ } \
+ } while (0)
+#define VM_BUG_ON_MM(cond, mm) \
+ do { \
+ if (unlikely(cond)) { \
+ dump_mm(mm); \
+ BUG(); \
+ } \
+ } while (0)
#define VM_WARN_ON(cond) WARN_ON(cond)
#define VM_WARN_ON_ONCE(cond) WARN_ON_ONCE(cond)
#define VM_WARN_ONCE(cond, format...) WARN_ONCE(cond, format)
#else
#define VM_BUG_ON(cond) BUILD_BUG_ON_INVALID(cond)
#define VM_BUG_ON_PAGE(cond, page) VM_BUG_ON(cond)
+#define VM_BUG_ON_VMA(cond, vma) VM_BUG_ON(cond)
+#define VM_BUG_ON_MM(cond, mm) VM_BUG_ON(cond)
#define VM_WARN_ON(cond) BUILD_BUG_ON_INVALID(cond)
#define VM_WARN_ON_ONCE(cond) BUILD_BUG_ON_INVALID(cond)
#define VM_WARN_ONCE(cond, format...) BUILD_BUG_ON_INVALID(cond)
diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
index 27288692241e..95243d28a0ee 100644
--- a/include/linux/mmu_notifier.h
+++ b/include/linux/mmu_notifier.h
@@ -57,10 +57,13 @@ struct mmu_notifier_ops {
* pte. This way the VM will provide proper aging to the
* accesses to the page through the secondary MMUs and not
* only to the ones through the Linux pte.
+ * Start-end is necessary in case the secondary MMU is mapping the page
+ * at a smaller granularity than the primary MMU.
*/
int (*clear_flush_young)(struct mmu_notifier *mn,
struct mm_struct *mm,
- unsigned long address);
+ unsigned long start,
+ unsigned long end);
/*
* test_young is called to check the young/accessed bitflag in
@@ -95,11 +98,11 @@ struct mmu_notifier_ops {
/*
* invalidate_range_start() and invalidate_range_end() must be
* paired and are called only when the mmap_sem and/or the
- * locks protecting the reverse maps are held. The subsystem
- * must guarantee that no additional references are taken to
- * the pages in the range established between the call to
- * invalidate_range_start() and the matching call to
- * invalidate_range_end().
+ * locks protecting the reverse maps are held. If the subsystem
+ * can't guarantee that no additional references are taken to
+ * the pages in the range, it has to implement the
+ * invalidate_range() notifier to remove any references taken
+ * after invalidate_range_start().
*
* Invalidation of multiple concurrent ranges may be
* optionally permitted by the driver. Either way the
@@ -141,6 +144,29 @@ struct mmu_notifier_ops {
void (*invalidate_range_end)(struct mmu_notifier *mn,
struct mm_struct *mm,
unsigned long start, unsigned long end);
+
+ /*
+ * invalidate_range() is either called between
+ * invalidate_range_start() and invalidate_range_end() when the
+ * VM has to free pages that where unmapped, but before the
+ * pages are actually freed, or outside of _start()/_end() when
+ * a (remote) TLB is necessary.
+ *
+ * If invalidate_range() is used to manage a non-CPU TLB with
+ * shared page-tables, it not necessary to implement the
+ * invalidate_range_start()/end() notifiers, as
+ * invalidate_range() alread catches the points in time when an
+ * external TLB range needs to be flushed.
+ *
+ * The invalidate_range() function is called under the ptl
+ * spin-lock and not allowed to sleep.
+ *
+ * Note that this function might be called with just a sub-range
+ * of what was passed to invalidate_range_start()/end(), if
+ * called between those functions.
+ */
+ void (*invalidate_range)(struct mmu_notifier *mn, struct mm_struct *mm,
+ unsigned long start, unsigned long end);
};
/*
@@ -151,7 +177,7 @@ struct mmu_notifier_ops {
* Therefore notifier chains can only be traversed when either
*
* 1. mmap_sem is held.
- * 2. One of the reverse map locks is held (i_mmap_mutex or anon_vma->rwsem).
+ * 2. One of the reverse map locks is held (i_mmap_rwsem or anon_vma->rwsem).
* 3. No other concurrent thread can access the list (release)
*/
struct mmu_notifier {
@@ -175,7 +201,8 @@ extern void mmu_notifier_unregister_no_release(struct mmu_notifier *mn,
extern void __mmu_notifier_mm_destroy(struct mm_struct *mm);
extern void __mmu_notifier_release(struct mm_struct *mm);
extern int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
- unsigned long address);
+ unsigned long start,
+ unsigned long end);
extern int __mmu_notifier_test_young(struct mm_struct *mm,
unsigned long address);
extern void __mmu_notifier_change_pte(struct mm_struct *mm,
@@ -186,6 +213,8 @@ extern void __mmu_notifier_invalidate_range_start(struct mm_struct *mm,
unsigned long start, unsigned long end);
extern void __mmu_notifier_invalidate_range_end(struct mm_struct *mm,
unsigned long start, unsigned long end);
+extern void __mmu_notifier_invalidate_range(struct mm_struct *mm,
+ unsigned long start, unsigned long end);
static inline void mmu_notifier_release(struct mm_struct *mm)
{
@@ -194,10 +223,11 @@ static inline void mmu_notifier_release(struct mm_struct *mm)
}
static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm,
- unsigned long address)
+ unsigned long start,
+ unsigned long end)
{
if (mm_has_notifiers(mm))
- return __mmu_notifier_clear_flush_young(mm, address);
+ return __mmu_notifier_clear_flush_young(mm, start, end);
return 0;
}
@@ -237,6 +267,13 @@ static inline void mmu_notifier_invalidate_range_end(struct mm_struct *mm,
__mmu_notifier_invalidate_range_end(mm, start, end);
}
+static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
+ unsigned long start, unsigned long end)
+{
+ if (mm_has_notifiers(mm))
+ __mmu_notifier_invalidate_range(mm, start, end);
+}
+
static inline void mmu_notifier_mm_init(struct mm_struct *mm)
{
mm->mmu_notifier_mm = NULL;
@@ -255,7 +292,9 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
unsigned long ___address = __address; \
__young = ptep_clear_flush_young(___vma, ___address, __ptep); \
__young |= mmu_notifier_clear_flush_young(___vma->vm_mm, \
- ___address); \
+ ___address, \
+ ___address + \
+ PAGE_SIZE); \
__young; \
})
@@ -266,10 +305,50 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
unsigned long ___address = __address; \
__young = pmdp_clear_flush_young(___vma, ___address, __pmdp); \
__young |= mmu_notifier_clear_flush_young(___vma->vm_mm, \
- ___address); \
+ ___address, \
+ ___address + \
+ PMD_SIZE); \
__young; \
})
+#define ptep_clear_flush_notify(__vma, __address, __ptep) \
+({ \
+ unsigned long ___addr = __address & PAGE_MASK; \
+ struct mm_struct *___mm = (__vma)->vm_mm; \
+ pte_t ___pte; \
+ \
+ ___pte = ptep_clear_flush(__vma, __address, __ptep); \
+ mmu_notifier_invalidate_range(___mm, ___addr, \
+ ___addr + PAGE_SIZE); \
+ \
+ ___pte; \
+})
+
+#define pmdp_clear_flush_notify(__vma, __haddr, __pmd) \
+({ \
+ unsigned long ___haddr = __haddr & HPAGE_PMD_MASK; \
+ struct mm_struct *___mm = (__vma)->vm_mm; \
+ pmd_t ___pmd; \
+ \
+ ___pmd = pmdp_clear_flush(__vma, __haddr, __pmd); \
+ mmu_notifier_invalidate_range(___mm, ___haddr, \
+ ___haddr + HPAGE_PMD_SIZE); \
+ \
+ ___pmd; \
+})
+
+#define pmdp_get_and_clear_notify(__mm, __haddr, __pmd) \
+({ \
+ unsigned long ___haddr = __haddr & HPAGE_PMD_MASK; \
+ pmd_t ___pmd; \
+ \
+ ___pmd = pmdp_get_and_clear(__mm, __haddr, __pmd); \
+ mmu_notifier_invalidate_range(__mm, ___haddr, \
+ ___haddr + HPAGE_PMD_SIZE); \
+ \
+ ___pmd; \
+})
+
/*
* set_pte_at_notify() sets the pte _after_ running the notifier.
* This is safe to start by updating the secondary MMUs, because the primary MMU
@@ -301,7 +380,8 @@ static inline void mmu_notifier_release(struct mm_struct *mm)
}
static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm,
- unsigned long address)
+ unsigned long start,
+ unsigned long end)
{
return 0;
}
@@ -332,6 +412,11 @@ static inline void mmu_notifier_invalidate_range_end(struct mm_struct *mm,
{
}
+static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
+ unsigned long start, unsigned long end)
+{
+}
+
static inline void mmu_notifier_mm_init(struct mm_struct *mm)
{
}
@@ -342,6 +427,9 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
#define ptep_clear_flush_young_notify ptep_clear_flush_young
#define pmdp_clear_flush_young_notify pmdp_clear_flush_young
+#define ptep_clear_flush_notify ptep_clear_flush
+#define pmdp_clear_flush_notify pmdp_clear_flush
+#define pmdp_get_and_clear_notify pmdp_get_and_clear
#define set_pte_at_notify set_pte_at
#endif /* CONFIG_MMU_NOTIFIER */
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 318df7051850..2f0856d14b21 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -431,6 +431,15 @@ struct zone {
*/
int nr_migrate_reserve_block;
+#ifdef CONFIG_MEMORY_ISOLATION
+ /*
+ * Number of isolated pageblock. It is used to solve incorrect
+ * freepage counting problem due to racy retrieving migratetype
+ * of pageblock. Protected by zone->lock.
+ */
+ unsigned long nr_isolate_pageblock;
+#endif
+
#ifdef CONFIG_MEMORY_HOTPLUG
/* see spanned/present_pages for more description */
seqlock_t span_seqlock;
@@ -521,13 +530,13 @@ struct zone {
atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
} ____cacheline_internodealigned_in_smp;
-typedef enum {
+enum zone_flags {
ZONE_RECLAIM_LOCKED, /* prevents concurrent reclaim */
ZONE_OOM_LOCKED, /* zone is in OOM killer zonelist */
ZONE_CONGESTED, /* zone has many dirty pages backed by
* a congested BDI
*/
- ZONE_TAIL_LRU_DIRTY, /* reclaim scanning has recently found
+ ZONE_DIRTY, /* reclaim scanning has recently found
* many dirty file pages at the tail
* of the LRU.
*/
@@ -535,52 +544,7 @@ typedef enum {
* many pages under writeback
*/
ZONE_FAIR_DEPLETED, /* fair zone policy batch depleted */
-} zone_flags_t;
-
-static inline void zone_set_flag(struct zone *zone, zone_flags_t flag)
-{
- set_bit(flag, &zone->flags);
-}
-
-static inline int zone_test_and_set_flag(struct zone *zone, zone_flags_t flag)
-{
- return test_and_set_bit(flag, &zone->flags);
-}
-
-static inline void zone_clear_flag(struct zone *zone, zone_flags_t flag)
-{
- clear_bit(flag, &zone->flags);
-}
-
-static inline int zone_is_reclaim_congested(const struct zone *zone)
-{
- return test_bit(ZONE_CONGESTED, &zone->flags);
-}
-
-static inline int zone_is_reclaim_dirty(const struct zone *zone)
-{
- return test_bit(ZONE_TAIL_LRU_DIRTY, &zone->flags);
-}
-
-static inline int zone_is_reclaim_writeback(const struct zone *zone)
-{
- return test_bit(ZONE_WRITEBACK, &zone->flags);
-}
-
-static inline int zone_is_reclaim_locked(const struct zone *zone)
-{
- return test_bit(ZONE_RECLAIM_LOCKED, &zone->flags);
-}
-
-static inline int zone_is_fair_depleted(const struct zone *zone)
-{
- return test_bit(ZONE_FAIR_DEPLETED, &zone->flags);
-}
-
-static inline int zone_is_oom_locked(const struct zone *zone)
-{
- return test_bit(ZONE_OOM_LOCKED, &zone->flags);
-}
+};
static inline unsigned long zone_end_pfn(const struct zone *zone)
{
@@ -758,8 +722,8 @@ typedef struct pglist_data {
int nr_zones;
#ifdef CONFIG_FLAT_NODE_MEM_MAP /* means !SPARSEMEM */
struct page *node_mem_map;
-#ifdef CONFIG_MEMCG
- struct page_cgroup *node_page_cgroup;
+#ifdef CONFIG_PAGE_EXTENSION
+ struct page_ext *node_page_ext;
#endif
#endif
#ifndef CONFIG_NO_BOOTMEM
@@ -1114,7 +1078,7 @@ static inline unsigned long early_pfn_to_nid(unsigned long pfn)
#define SECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SECTION_MASK)
struct page;
-struct page_cgroup;
+struct page_ext;
struct mem_section {
/*
* This is, logically, a pointer to an array of struct
@@ -1132,12 +1096,12 @@ struct mem_section {
/* See declaration of similar field in struct zone */
unsigned long *pageblock_flags;
-#ifdef CONFIG_MEMCG
+#ifdef CONFIG_PAGE_EXTENSION
/*
- * If !SPARSEMEM, pgdat doesn't have page_cgroup pointer. We use
- * section. (see memcontrol.h/page_cgroup.h about this.)
+ * If !SPARSEMEM, pgdat doesn't have page_ext pointer. We use
+ * section. (see page_ext.h about this.)
*/
- struct page_cgroup *page_cgroup;
+ struct page_ext *page_ext;
unsigned long pad;
#endif
/*
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index 44eeef0da186..745def862580 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -69,7 +69,7 @@ struct ieee1394_device_id {
* @bDeviceClass: Class of device; numbers are assigned
* by the USB forum. Products may choose to implement classes,
* or be vendor-specific. Device classes specify behavior of all
- * the interfaces on a devices.
+ * the interfaces on a device.
* @bDeviceSubClass: Subclass of device; associated with bDeviceClass.
* @bDeviceProtocol: Protocol of device; associated with bDeviceClass.
* @bInterfaceClass: Class of interface; numbers are assigned
diff --git a/include/linux/module.h b/include/linux/module.h
index 71f282a4e307..ebfb0e153c6a 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -210,20 +210,6 @@ enum module_state {
MODULE_STATE_UNFORMED, /* Still setting it up. */
};
-/**
- * struct module_ref - per cpu module reference counts
- * @incs: number of module get on this cpu
- * @decs: number of module put on this cpu
- *
- * We force an alignment on 8 or 16 bytes, so that alloc_percpu()
- * put @incs/@decs in same cache line, with no extra memory cost,
- * since alloc_percpu() is fine grained.
- */
-struct module_ref {
- unsigned long incs;
- unsigned long decs;
-} __attribute((aligned(2 * sizeof(unsigned long))));
-
struct module {
enum module_state state;
@@ -367,7 +353,7 @@ struct module {
/* Destruction function. */
void (*exit)(void);
- struct module_ref __percpu *refptr;
+ atomic_t refcnt;
#endif
#ifdef CONFIG_CONSTRUCTORS
diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
index 494f99e852da..1c9effa25e26 100644
--- a/include/linux/moduleparam.h
+++ b/include/linux/moduleparam.h
@@ -42,7 +42,7 @@ struct kernel_param;
* NOARG - the parameter allows for no argument (foo instead of foo=1)
*/
enum {
- KERNEL_PARAM_FL_NOARG = (1 << 0)
+ KERNEL_PARAM_OPS_FL_NOARG = (1 << 0)
};
struct kernel_param_ops {
@@ -56,11 +56,21 @@ struct kernel_param_ops {
void (*free)(void *arg);
};
+/*
+ * Flags available for kernel_param
+ *
+ * UNSAFE - the parameter is dangerous and setting it will taint the kernel
+ */
+enum {
+ KERNEL_PARAM_FL_UNSAFE = (1 << 0)
+};
+
struct kernel_param {
const char *name;
const struct kernel_param_ops *ops;
u16 perm;
- s16 level;
+ s8 level;
+ u8 flags;
union {
void *arg;
const struct kparam_string *str;
@@ -68,6 +78,8 @@ struct kernel_param {
};
};
+extern const struct kernel_param __start___param[], __stop___param[];
+
/* Special one for strings we want to copy into */
struct kparam_string {
unsigned int maxlen;
@@ -113,6 +125,12 @@ struct kparam_array
module_param_named(name, name, type, perm)
/**
+ * module_param_unsafe - same as module_param but taints kernel
+ */
+#define module_param_unsafe(name, type, perm) \
+ module_param_named_unsafe(name, name, type, perm)
+
+/**
* module_param_named - typesafe helper for a renamed module/cmdline parameter
* @name: a valid C identifier which is the parameter name.
* @value: the actual lvalue to alter.
@@ -129,6 +147,14 @@ struct kparam_array
__MODULE_PARM_TYPE(name, #type)
/**
+ * module_param_named_unsafe - same as module_param_named but taints kernel
+ */
+#define module_param_named_unsafe(name, value, type, perm) \
+ param_check_##type(name, &(value)); \
+ module_param_cb_unsafe(name, &param_ops_##type, &value, perm); \
+ __MODULE_PARM_TYPE(name, #type)
+
+/**
* module_param_cb - general callback for a module/cmdline parameter
* @name: a valid C identifier which is the parameter name.
* @ops: the set & get operations for this parameter.
@@ -137,7 +163,11 @@ struct kparam_array
* The ops can have NULL set or get functions.
*/
#define module_param_cb(name, ops, arg, perm) \
- __module_param_call(MODULE_PARAM_PREFIX, name, ops, arg, perm, -1)
+ __module_param_call(MODULE_PARAM_PREFIX, name, ops, arg, perm, -1, 0)
+
+#define module_param_cb_unsafe(name, ops, arg, perm) \
+ __module_param_call(MODULE_PARAM_PREFIX, name, ops, arg, perm, -1, \
+ KERNEL_PARAM_FL_UNSAFE)
/**
* <level>_param_cb - general callback for a module/cmdline parameter
@@ -149,7 +179,7 @@ struct kparam_array
* The ops can have NULL set or get functions.
*/
#define __level_param_cb(name, ops, arg, perm, level) \
- __module_param_call(MODULE_PARAM_PREFIX, name, ops, arg, perm, level)
+ __module_param_call(MODULE_PARAM_PREFIX, name, ops, arg, perm, level, 0)
#define core_param_cb(name, ops, arg, perm) \
__level_param_cb(name, ops, arg, perm, 1)
@@ -184,22 +214,22 @@ struct kparam_array
/* This is the fundamental function for registering boot/module
parameters. */
-#define __module_param_call(prefix, name, ops, arg, perm, level) \
+#define __module_param_call(prefix, name, ops, arg, perm, level, flags) \
/* Default value instead of permissions? */ \
static const char __param_str_##name[] = prefix #name; \
static struct kernel_param __moduleparam_const __param_##name \
__used \
__attribute__ ((unused,__section__ ("__param"),aligned(sizeof(void *)))) \
= { __param_str_##name, ops, VERIFY_OCTAL_PERMISSIONS(perm), \
- level, { arg } }
+ level, flags, { arg } }
/* Obsolete - use module_param_cb() */
#define module_param_call(name, set, get, arg, perm) \
static struct kernel_param_ops __param_ops_##name = \
- { 0, (void *)set, (void *)get }; \
+ { .flags = 0, (void *)set, (void *)get }; \
__module_param_call(MODULE_PARAM_PREFIX, \
name, &__param_ops_##name, arg, \
- (perm) + sizeof(__check_old_set_param(set))*0, -1)
+ (perm) + sizeof(__check_old_set_param(set))*0, -1, 0)
/* We don't get oldget: it's often a new-style param_get_uint, etc. */
static inline int
@@ -279,7 +309,7 @@ static inline void __kernel_param_unlock(void)
*/
#define core_param(name, var, type, perm) \
param_check_##type(name, &(var)); \
- __module_param_call("", name, &param_ops_##type, &var, perm, -1)
+ __module_param_call("", name, &param_ops_##type, &var, perm, -1, 0)
#endif /* !MODULE */
/**
@@ -297,7 +327,7 @@ static inline void __kernel_param_unlock(void)
= { len, string }; \
__module_param_call(MODULE_PARAM_PREFIX, name, \
&param_ops_string, \
- .str = &__param_string_##name, perm, -1); \
+ .str = &__param_string_##name, perm, -1, 0);\
__MODULE_PARM_TYPE(name, "string")
/**
@@ -444,7 +474,7 @@ extern int param_set_bint(const char *val, const struct kernel_param *kp);
__module_param_call(MODULE_PARAM_PREFIX, name, \
&param_array_ops, \
.arr = &__param_arr_##name, \
- perm, -1); \
+ perm, -1, 0); \
__MODULE_PARM_TYPE(name, "array of " #type)
extern struct kernel_param_ops param_array_ops;
diff --git a/include/linux/mount.h b/include/linux/mount.h
index 9262e4bf0cc3..c2c561dc0114 100644
--- a/include/linux/mount.h
+++ b/include/linux/mount.h
@@ -81,6 +81,9 @@ extern struct vfsmount *mntget(struct vfsmount *mnt);
extern struct vfsmount *mnt_clone_internal(struct path *path);
extern int __mnt_is_readonly(struct vfsmount *mnt);
+struct path;
+extern struct vfsmount *clone_private_mount(struct path *path);
+
struct file_system_type;
extern struct vfsmount *vfs_kern_mount(struct file_system_type *type,
int flags, const char *name,
diff --git a/include/linux/msi.h b/include/linux/msi.h
index 8103f32f6d87..8ac4a68ffae2 100644
--- a/include/linux/msi.h
+++ b/include/linux/msi.h
@@ -10,17 +10,12 @@ struct msi_msg {
u32 data; /* 16 bits of msi message data */
};
+extern int pci_msi_ignore_mask;
/* Helper functions */
struct irq_data;
struct msi_desc;
-void mask_msi_irq(struct irq_data *data);
-void unmask_msi_irq(struct irq_data *data);
-void __read_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
-void __write_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
-void read_msi_msg(unsigned int irq, struct msi_msg *msg);
void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg);
-void write_msi_msg(unsigned int irq, struct msi_msg *msg);
struct msi_desc {
struct {
@@ -29,7 +24,6 @@ struct msi_desc {
__u8 multi_cap : 3; /* log2 num of messages supported */
__u8 maskbit : 1; /* mask-pending bit supported ? */
__u8 is_64 : 1; /* Address size: 0=32bit 1=64bit */
- __u8 pos; /* Location of the msi capability */
__u16 entry_nr; /* specific enabled entry */
unsigned default_irq; /* default pre-assigned irq */
} msi_attrib;
@@ -47,10 +41,54 @@ struct msi_desc {
/* Last set MSI message */
struct msi_msg msg;
-
- struct kobject kobj;
};
+/* Helpers to hide struct msi_desc implementation details */
+#define msi_desc_to_dev(desc) (&(desc)->dev.dev)
+#define dev_to_msi_list(dev) (&to_pci_dev((dev))->msi_list)
+#define first_msi_entry(dev) \
+ list_first_entry(dev_to_msi_list((dev)), struct msi_desc, list)
+#define for_each_msi_entry(desc, dev) \
+ list_for_each_entry((desc), dev_to_msi_list((dev)), list)
+
+#ifdef CONFIG_PCI_MSI
+#define first_pci_msi_entry(pdev) first_msi_entry(&(pdev)->dev)
+#define for_each_pci_msi_entry(desc, pdev) \
+ for_each_msi_entry((desc), &(pdev)->dev)
+
+static inline struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc)
+{
+ return desc->dev;
+}
+#endif /* CONFIG_PCI_MSI */
+
+void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
+void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
+void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg);
+
+u32 __pci_msix_desc_mask_irq(struct msi_desc *desc, u32 flag);
+u32 __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag);
+void pci_msi_mask_irq(struct irq_data *data);
+void pci_msi_unmask_irq(struct irq_data *data);
+
+/* Conversion helpers. Should be removed after merging */
+static inline void __write_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
+{
+ __pci_write_msi_msg(entry, msg);
+}
+static inline void write_msi_msg(int irq, struct msi_msg *msg)
+{
+ pci_write_msi_msg(irq, msg);
+}
+static inline void mask_msi_irq(struct irq_data *data)
+{
+ pci_msi_mask_irq(data);
+}
+static inline void unmask_msi_irq(struct irq_data *data)
+{
+ pci_msi_unmask_irq(data);
+}
+
/*
* The arch hooks to setup up msi irqs. Those functions are
* implemented as weak symbols so that they /can/ be overriden by
@@ -60,25 +98,146 @@ int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc);
void arch_teardown_msi_irq(unsigned int irq);
int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type);
void arch_teardown_msi_irqs(struct pci_dev *dev);
-int arch_msi_check_device(struct pci_dev* dev, int nvec, int type);
void arch_restore_msi_irqs(struct pci_dev *dev);
void default_teardown_msi_irqs(struct pci_dev *dev);
void default_restore_msi_irqs(struct pci_dev *dev);
-u32 default_msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag);
-u32 default_msix_mask_irq(struct msi_desc *desc, u32 flag);
-struct msi_chip {
+struct msi_controller {
struct module *owner;
struct device *dev;
struct device_node *of_node;
struct list_head list;
+#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
+ struct irq_domain *domain;
+#endif
- int (*setup_irq)(struct msi_chip *chip, struct pci_dev *dev,
+ int (*setup_irq)(struct msi_controller *chip, struct pci_dev *dev,
struct msi_desc *desc);
- void (*teardown_irq)(struct msi_chip *chip, unsigned int irq);
- int (*check_device)(struct msi_chip *chip, struct pci_dev *dev,
- int nvec, int type);
+ void (*teardown_irq)(struct msi_controller *chip, unsigned int irq);
};
+#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
+
+#include <linux/irqhandler.h>
+#include <asm/msi.h>
+
+struct irq_domain;
+struct irq_chip;
+struct device_node;
+struct msi_domain_info;
+
+/**
+ * struct msi_domain_ops - MSI interrupt domain callbacks
+ * @get_hwirq: Retrieve the resulting hw irq number
+ * @msi_init: Domain specific init function for MSI interrupts
+ * @msi_free: Domain specific function to free a MSI interrupts
+ * @msi_check: Callback for verification of the domain/info/dev data
+ * @msi_prepare: Prepare the allocation of the interrupts in the domain
+ * @msi_finish: Optional callbacl to finalize the allocation
+ * @set_desc: Set the msi descriptor for an interrupt
+ * @handle_error: Optional error handler if the allocation fails
+ *
+ * @get_hwirq, @msi_init and @msi_free are callbacks used by
+ * msi_create_irq_domain() and related interfaces
+ *
+ * @msi_check, @msi_prepare, @msi_finish, @set_desc and @handle_error
+ * are callbacks used by msi_irq_domain_alloc_irqs() and related
+ * interfaces which are based on msi_desc.
+ */
+struct msi_domain_ops {
+ irq_hw_number_t (*get_hwirq)(struct msi_domain_info *info,
+ msi_alloc_info_t *arg);
+ int (*msi_init)(struct irq_domain *domain,
+ struct msi_domain_info *info,
+ unsigned int virq, irq_hw_number_t hwirq,
+ msi_alloc_info_t *arg);
+ void (*msi_free)(struct irq_domain *domain,
+ struct msi_domain_info *info,
+ unsigned int virq);
+ int (*msi_check)(struct irq_domain *domain,
+ struct msi_domain_info *info,
+ struct device *dev);
+ int (*msi_prepare)(struct irq_domain *domain,
+ struct device *dev, int nvec,
+ msi_alloc_info_t *arg);
+ void (*msi_finish)(msi_alloc_info_t *arg, int retval);
+ void (*set_desc)(msi_alloc_info_t *arg,
+ struct msi_desc *desc);
+ int (*handle_error)(struct irq_domain *domain,
+ struct msi_desc *desc, int error);
+};
+
+/**
+ * struct msi_domain_info - MSI interrupt domain data
+ * @flags: Flags to decribe features and capabilities
+ * @ops: The callback data structure
+ * @chip: Optional: associated interrupt chip
+ * @chip_data: Optional: associated interrupt chip data
+ * @handler: Optional: associated interrupt flow handler
+ * @handler_data: Optional: associated interrupt flow handler data
+ * @handler_name: Optional: associated interrupt flow handler name
+ * @data: Optional: domain specific data
+ */
+struct msi_domain_info {
+ u32 flags;
+ struct msi_domain_ops *ops;
+ struct irq_chip *chip;
+ void *chip_data;
+ irq_flow_handler_t handler;
+ void *handler_data;
+ const char *handler_name;
+ void *data;
+};
+
+/* Flags for msi_domain_info */
+enum {
+ /*
+ * Init non implemented ops callbacks with default MSI domain
+ * callbacks.
+ */
+ MSI_FLAG_USE_DEF_DOM_OPS = (1 << 0),
+ /*
+ * Init non implemented chip callbacks with default MSI chip
+ * callbacks.
+ */
+ MSI_FLAG_USE_DEF_CHIP_OPS = (1 << 1),
+ /* Build identity map between hwirq and irq */
+ MSI_FLAG_IDENTITY_MAP = (1 << 2),
+ /* Support multiple PCI MSI interrupts */
+ MSI_FLAG_MULTI_PCI_MSI = (1 << 3),
+ /* Support PCI MSIX interrupts */
+ MSI_FLAG_PCI_MSIX = (1 << 4),
+};
+
+int msi_domain_set_affinity(struct irq_data *data, const struct cpumask *mask,
+ bool force);
+
+struct irq_domain *msi_create_irq_domain(struct device_node *of_node,
+ struct msi_domain_info *info,
+ struct irq_domain *parent);
+int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
+ int nvec);
+void msi_domain_free_irqs(struct irq_domain *domain, struct device *dev);
+struct msi_domain_info *msi_get_domain_info(struct irq_domain *domain);
+
+#endif /* CONFIG_GENERIC_MSI_IRQ_DOMAIN */
+
+#ifdef CONFIG_PCI_MSI_IRQ_DOMAIN
+void pci_msi_domain_write_msg(struct irq_data *irq_data, struct msi_msg *msg);
+struct irq_domain *pci_msi_create_irq_domain(struct device_node *node,
+ struct msi_domain_info *info,
+ struct irq_domain *parent);
+int pci_msi_domain_alloc_irqs(struct irq_domain *domain, struct pci_dev *dev,
+ int nvec, int type);
+void pci_msi_domain_free_irqs(struct irq_domain *domain, struct pci_dev *dev);
+struct irq_domain *pci_msi_create_default_irq_domain(struct device_node *node,
+ struct msi_domain_info *info, struct irq_domain *parent);
+
+irq_hw_number_t pci_msi_domain_calc_hwirq(struct pci_dev *dev,
+ struct msi_desc *desc);
+int pci_msi_domain_check_cap(struct irq_domain *domain,
+ struct msi_domain_info *info, struct device *dev);
+#endif /* CONFIG_PCI_MSI_IRQ_DOMAIN */
+
#endif /* LINUX_MSI_H */
diff --git a/include/linux/mtd/cfi.h b/include/linux/mtd/cfi.h
index 37ef6b194089..299d7d31fe53 100644
--- a/include/linux/mtd/cfi.h
+++ b/include/linux/mtd/cfi.h
@@ -153,7 +153,7 @@ struct cfi_ident {
uint16_t MaxBufWriteSize;
uint8_t NumEraseRegions;
uint32_t EraseRegionInfo[0]; /* Not host ordered */
-} __attribute__((packed));
+} __packed;
/* Extended Query Structure for both PRI and ALT */
@@ -161,7 +161,7 @@ struct cfi_extquery {
uint8_t pri[3];
uint8_t MajorVersion;
uint8_t MinorVersion;
-} __attribute__((packed));
+} __packed;
/* Vendor-Specific PRI for Intel/Sharp Extended Command Set (0x0001) */
@@ -180,7 +180,7 @@ struct cfi_pri_intelext {
uint8_t FactProtRegSize;
uint8_t UserProtRegSize;
uint8_t extra[0];
-} __attribute__((packed));
+} __packed;
struct cfi_intelext_otpinfo {
uint32_t ProtRegAddr;
@@ -188,7 +188,7 @@ struct cfi_intelext_otpinfo {
uint8_t FactProtRegSize;
uint16_t UserGroups;
uint8_t UserProtRegSize;
-} __attribute__((packed));
+} __packed;
struct cfi_intelext_blockinfo {
uint16_t NumIdentBlocks;
@@ -196,7 +196,7 @@ struct cfi_intelext_blockinfo {
uint16_t MinBlockEraseCycles;
uint8_t BitsPerCell;
uint8_t BlockCap;
-} __attribute__((packed));
+} __packed;
struct cfi_intelext_regioninfo {
uint16_t NumIdentPartitions;
@@ -205,7 +205,7 @@ struct cfi_intelext_regioninfo {
uint8_t NumOpAllowedSimEraMode;
uint8_t NumBlockTypes;
struct cfi_intelext_blockinfo BlockTypes[1];
-} __attribute__((packed));
+} __packed;
struct cfi_intelext_programming_regioninfo {
uint8_t ProgRegShift;
@@ -214,7 +214,7 @@ struct cfi_intelext_programming_regioninfo {
uint8_t Reserved2;
uint8_t ControlInvalid;
uint8_t Reserved3;
-} __attribute__((packed));
+} __packed;
/* Vendor-Specific PRI for AMD/Fujitsu Extended Command Set (0x0002) */
@@ -233,7 +233,7 @@ struct cfi_pri_amdstd {
uint8_t VppMin;
uint8_t VppMax;
uint8_t TopBottom;
-} __attribute__((packed));
+} __packed;
/* Vendor-Specific PRI for Atmel chips (command set 0x0002) */
@@ -245,18 +245,18 @@ struct cfi_pri_atmel {
uint8_t BottomBoot;
uint8_t BurstMode;
uint8_t PageMode;
-} __attribute__((packed));
+} __packed;
struct cfi_pri_query {
uint8_t NumFields;
uint32_t ProtField[1]; /* Not host ordered */
-} __attribute__((packed));
+} __packed;
struct cfi_bri_query {
uint8_t PageModeReadCap;
uint8_t NumFields;
uint32_t ConfField[1]; /* Not host ordered */
-} __attribute__((packed));
+} __packed;
#define P_ID_NONE 0x0000
#define P_ID_INTEL_EXT 0x0001
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index c300db3ae285..3d4ea7eb2b68 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -455,8 +455,21 @@ struct nand_hw_control {
* be provided if an hardware ECC is available
* @calculate: function for ECC calculation or readback from ECC hardware
* @correct: function for ECC correction, matching to ECC generator (sw/hw)
- * @read_page_raw: function to read a raw page without ECC
- * @write_page_raw: function to write a raw page without ECC
+ * @read_page_raw: function to read a raw page without ECC. This function
+ * should hide the specific layout used by the ECC
+ * controller and always return contiguous in-band and
+ * out-of-band data even if they're not stored
+ * contiguously on the NAND chip (e.g.
+ * NAND_ECC_HW_SYNDROME interleaves in-band and
+ * out-of-band data).
+ * @write_page_raw: function to write a raw page without ECC. This function
+ * should hide the specific layout used by the ECC
+ * controller and consider the passed data as contiguous
+ * in-band and out-of-band data. ECC controller is
+ * responsible for doing the appropriate transformations
+ * to adapt to its specific layout (e.g.
+ * NAND_ECC_HW_SYNDROME interleaves in-band and
+ * out-of-band data).
* @read_page: function to read a page according to the ECC generator
* requirements; returns maximum number of bitflips corrected in
* any single ECC step, 0 if bitflips uncorrectable, -EIO hw error
@@ -587,6 +600,11 @@ struct nand_buffers {
* @ecc_step_ds: [INTERN] ECC step required by the @ecc_strength_ds,
* also from the datasheet. It is the recommended ECC step
* size, if known; if unknown, set to zero.
+ * @onfi_timing_mode_default: [INTERN] default ONFI timing mode. This field is
+ * either deduced from the datasheet if the NAND
+ * chip is not ONFI compliant or set to 0 if it is
+ * (an ONFI chip is always configured in mode 0
+ * after a NAND reset)
* @numchips: [INTERN] number of physical chips
* @chipsize: [INTERN] the size of one chip for multichip arrays
* @pagemask: [INTERN] page number mask = number of (pages / chip) - 1
@@ -671,6 +689,7 @@ struct nand_chip {
uint8_t bits_per_cell;
uint16_t ecc_strength_ds;
uint16_t ecc_step_ds;
+ int onfi_timing_mode_default;
int badblockpos;
int badblockbits;
@@ -717,6 +736,7 @@ struct nand_chip {
#define NAND_MFR_EON 0x92
#define NAND_MFR_SANDISK 0x45
#define NAND_MFR_INTEL 0x89
+#define NAND_MFR_ATO 0x9b
/* The maximum expected count of bytes in the NAND ID sequence */
#define NAND_MAX_ID_LEN 8
@@ -766,12 +786,17 @@ struct nand_chip {
* @options: stores various chip bit options
* @id_len: The valid length of the @id.
* @oobsize: OOB size
+ * @ecc: ECC correctability and step information from the datasheet.
* @ecc.strength_ds: The ECC correctability from the datasheet, same as the
* @ecc_strength_ds in nand_chip{}.
* @ecc.step_ds: The ECC step required by the @ecc.strength_ds, same as the
* @ecc_step_ds in nand_chip{}, also from the datasheet.
* For example, the "4bit ECC for each 512Byte" can be set with
* NAND_ECC_INFO(4, 512).
+ * @onfi_timing_mode_default: the default ONFI timing mode entered after a NAND
+ * reset. Should be deduced from timings described
+ * in the datasheet.
+ *
*/
struct nand_flash_dev {
char *name;
@@ -792,6 +817,7 @@ struct nand_flash_dev {
uint16_t strength_ds;
uint16_t step_ds;
} ecc;
+ int onfi_timing_mode_default;
};
/**
diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h
index 9e6294f32ba8..63aeccf9ddc8 100644
--- a/include/linux/mtd/spi-nor.h
+++ b/include/linux/mtd/spi-nor.h
@@ -116,6 +116,10 @@ enum spi_nor_ops {
SPI_NOR_OPS_UNLOCK,
};
+enum spi_nor_option_flags {
+ SNOR_F_USE_FSR = BIT(0),
+};
+
/**
* struct spi_nor - Structure for defining a the SPI NOR layer
* @mtd: point to a mtd_info structure
@@ -129,6 +133,7 @@ enum spi_nor_ops {
* @program_opcode: the program opcode
* @flash_read: the mode of the read
* @sst_write_second: used by the SST write operation
+ * @flags: flag options for the current SPI-NOR (SNOR_F_*)
* @cfg: used by the read_xfer/write_xfer
* @cmd_buf: used by the write_reg
* @prepare: [OPTIONAL] do some preparations for the
@@ -139,9 +144,6 @@ enum spi_nor_ops {
* @write_xfer: [OPTIONAL] the writefundamental primitive
* @read_reg: [DRIVER-SPECIFIC] read out the register
* @write_reg: [DRIVER-SPECIFIC] write data to the register
- * @read_id: [REPLACEABLE] read out the ID data, and find
- * the proper spi_device_id
- * @wait_till_ready: [REPLACEABLE] wait till the NOR becomes ready
* @read: [DRIVER-SPECIFIC] read data from the SPI NOR
* @write: [DRIVER-SPECIFIC] write data to the SPI NOR
* @erase: [DRIVER-SPECIFIC] erase a sector of the SPI NOR
@@ -160,6 +162,7 @@ struct spi_nor {
u8 program_opcode;
enum read_mode flash_read;
bool sst_write_second;
+ u32 flags;
struct spi_nor_xfer_cfg cfg;
u8 cmd_buf[SPI_NOR_MAX_CMD_SIZE];
@@ -172,8 +175,6 @@ struct spi_nor {
int (*read_reg)(struct spi_nor *nor, u8 opcode, u8 *buf, int len);
int (*write_reg)(struct spi_nor *nor, u8 opcode, u8 *buf, int len,
int write_enable);
- const struct spi_device_id *(*read_id)(struct spi_nor *nor);
- int (*wait_till_ready)(struct spi_nor *nor);
int (*read)(struct spi_nor *nor, loff_t from,
size_t len, size_t *retlen, u_char *read_buf);
@@ -187,32 +188,17 @@ struct spi_nor {
/**
* spi_nor_scan() - scan the SPI NOR
* @nor: the spi_nor structure
- * @id: the spi_device_id provided by the driver
+ * @name: the chip type name
* @mode: the read mode supported by the driver
*
* The drivers can use this fuction to scan the SPI NOR.
* In the scanning, it will try to get all the necessary information to
* fill the mtd_info{} and the spi_nor{}.
*
- * The board may assigns a spi_device_id with @id which be used to compared with
- * the spi_device_id detected by the scanning.
+ * The chip type name can be provided through the @name parameter.
*
* Return: 0 for success, others for failure.
*/
-int spi_nor_scan(struct spi_nor *nor, const struct spi_device_id *id,
- enum read_mode mode);
-extern const struct spi_device_id spi_nor_ids[];
-
-/**
- * spi_nor_match_id() - find the spi_device_id by the name
- * @name: the name of the spi_device_id
- *
- * The drivers use this function to find the spi_device_id
- * specified by the @name.
- *
- * Return: returns the right spi_device_id pointer on success,
- * and returns NULL on failure.
- */
-const struct spi_device_id *spi_nor_match_id(char *name);
+int spi_nor_scan(struct spi_nor *nor, const char *name, enum read_mode mode);
#endif
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index 8d5535c58cc2..cc31498fc526 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -52,7 +52,7 @@ struct mutex {
atomic_t count;
spinlock_t wait_lock;
struct list_head wait_list;
-#if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_SMP)
+#if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_MUTEX_SPIN_ON_OWNER)
struct task_struct *owner;
#endif
#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
@@ -133,7 +133,7 @@ static inline int mutex_is_locked(struct mutex *lock)
/*
* See kernel/locking/mutex.c for detailed documentation of these APIs.
- * Also see Documentation/mutex-design.txt.
+ * Also see Documentation/locking/mutex-design.txt.
*/
#ifdef CONFIG_DEBUG_LOCK_ALLOC
extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass);
diff --git a/include/linux/namei.h b/include/linux/namei.h
index 492de72560fa..c8990779f0c3 100644
--- a/include/linux/namei.h
+++ b/include/linux/namei.h
@@ -7,21 +7,10 @@
#include <linux/path.h>
struct vfsmount;
+struct nameidata;
enum { MAX_NESTED_LINKS = 8 };
-struct nameidata {
- struct path path;
- struct qstr last;
- struct path root;
- struct inode *inode; /* path.dentry.d_inode */
- unsigned int flags;
- unsigned seq, m_seq;
- int last_type;
- unsigned depth;
- char *saved_names[MAX_NESTED_LINKS + 1];
-};
-
/*
* Type of the last component on LOOKUP_PARENT
*/
@@ -82,16 +71,8 @@ extern struct dentry *lock_rename(struct dentry *, struct dentry *);
extern void unlock_rename(struct dentry *, struct dentry *);
extern void nd_jump_link(struct nameidata *nd, struct path *path);
-
-static inline void nd_set_link(struct nameidata *nd, char *path)
-{
- nd->saved_names[nd->depth] = path;
-}
-
-static inline char *nd_get_link(struct nameidata *nd)
-{
- return nd->saved_names[nd->depth];
-}
+extern void nd_set_link(struct nameidata *nd, char *path);
+extern char *nd_get_link(struct nameidata *nd);
static inline void nd_terminate_link(void *name, size_t len, size_t maxlen)
{
diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h
index dcfdecbfa0b7..8e30685affeb 100644
--- a/include/linux/netdev_features.h
+++ b/include/linux/netdev_features.h
@@ -47,9 +47,9 @@ enum {
NETIF_F_GSO_SIT_BIT, /* ... SIT tunnel with TSO */
NETIF_F_GSO_UDP_TUNNEL_BIT, /* ... UDP TUNNEL with TSO */
NETIF_F_GSO_UDP_TUNNEL_CSUM_BIT,/* ... UDP TUNNEL with TSO & CSUM */
- NETIF_F_GSO_MPLS_BIT, /* ... MPLS segmentation */
+ NETIF_F_GSO_TUNNEL_REMCSUM_BIT, /* ... TUNNEL with TSO & REMCSUM */
/**/NETIF_F_GSO_LAST = /* last bit, see GSO_MASK */
- NETIF_F_GSO_MPLS_BIT,
+ NETIF_F_GSO_TUNNEL_REMCSUM_BIT,
NETIF_F_FCOE_CRC_BIT, /* FCoE CRC32 */
NETIF_F_SCTP_CSUM_BIT, /* SCTP checksum offload */
@@ -118,7 +118,7 @@ enum {
#define NETIF_F_GSO_SIT __NETIF_F(GSO_SIT)
#define NETIF_F_GSO_UDP_TUNNEL __NETIF_F(GSO_UDP_TUNNEL)
#define NETIF_F_GSO_UDP_TUNNEL_CSUM __NETIF_F(GSO_UDP_TUNNEL_CSUM)
-#define NETIF_F_GSO_MPLS __NETIF_F(GSO_MPLS)
+#define NETIF_F_GSO_TUNNEL_REMCSUM __NETIF_F(GSO_TUNNEL_REMCSUM)
#define NETIF_F_HW_VLAN_STAG_FILTER __NETIF_F(HW_VLAN_STAG_FILTER)
#define NETIF_F_HW_VLAN_STAG_RX __NETIF_F(HW_VLAN_STAG_RX)
#define NETIF_F_HW_VLAN_STAG_TX __NETIF_F(HW_VLAN_STAG_TX)
@@ -181,7 +181,6 @@ enum {
NETIF_F_GSO_IPIP | \
NETIF_F_GSO_SIT | \
NETIF_F_GSO_UDP_TUNNEL | \
- NETIF_F_GSO_UDP_TUNNEL_CSUM | \
- NETIF_F_GSO_MPLS)
+ NETIF_F_GSO_UDP_TUNNEL_CSUM)
#endif /* _LINUX_NETDEV_FEATURES_H */
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index c8e388e5fccc..679e6e90aa4c 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -30,6 +30,7 @@
#include <linux/bug.h>
#include <linux/delay.h>
#include <linux/atomic.h>
+#include <linux/prefetch.h>
#include <asm/cache.h>
#include <asm/byteorder.h>
@@ -56,6 +57,8 @@ struct device;
struct phy_device;
/* 802.11 specific */
struct wireless_dev;
+/* 802.15.4 specific */
+struct wpan_dev;
void netdev_set_default_ethtool_ops(struct net_device *dev,
const struct ethtool_ops *ops);
@@ -313,6 +316,7 @@ struct napi_struct {
struct net_device *dev;
struct sk_buff *gro_list;
struct sk_buff *skb;
+ struct hrtimer timer;
struct list_head dev_list;
struct hlist_node napi_hash_node;
unsigned int napi_id;
@@ -385,6 +389,7 @@ typedef enum rx_handler_result rx_handler_result_t;
typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb);
void __napi_schedule(struct napi_struct *n);
+void __napi_schedule_irqoff(struct napi_struct *n);
static inline bool napi_disable_pending(struct napi_struct *n)
{
@@ -419,6 +424,18 @@ static inline void napi_schedule(struct napi_struct *n)
__napi_schedule(n);
}
+/**
+ * napi_schedule_irqoff - schedule NAPI poll
+ * @n: napi context
+ *
+ * Variant of napi_schedule(), assuming hard irqs are masked.
+ */
+static inline void napi_schedule_irqoff(struct napi_struct *n)
+{
+ if (napi_schedule_prep(n))
+ __napi_schedule_irqoff(n);
+}
+
/* Try to reschedule poll. Called by dev->poll() after napi_complete(). */
static inline bool napi_reschedule(struct napi_struct *napi)
{
@@ -429,14 +446,19 @@ static inline bool napi_reschedule(struct napi_struct *napi)
return false;
}
+void __napi_complete(struct napi_struct *n);
+void napi_complete_done(struct napi_struct *n, int work_done);
/**
* napi_complete - NAPI processing complete
* @n: napi context
*
* Mark NAPI processing as complete.
+ * Consider using napi_complete_done() instead.
*/
-void __napi_complete(struct napi_struct *n);
-void napi_complete(struct napi_struct *n);
+static inline void napi_complete(struct napi_struct *n)
+{
+ return napi_complete_done(n, 0);
+}
/**
* napi_by_id - lookup a NAPI by napi_id
@@ -471,14 +493,7 @@ void napi_hash_del(struct napi_struct *napi);
* Stop NAPI from being scheduled on this context.
* Waits till any outstanding processing completes.
*/
-static inline void napi_disable(struct napi_struct *n)
-{
- might_sleep();
- set_bit(NAPI_STATE_DISABLE, &n->state);
- while (test_and_set_bit(NAPI_STATE_SCHED, &n->state))
- msleep(1);
- clear_bit(NAPI_STATE_DISABLE, &n->state);
-}
+void napi_disable(struct napi_struct *n);
/**
* napi_enable - enable NAPI scheduling
@@ -543,7 +558,7 @@ struct netdev_queue {
* read mostly part
*/
struct net_device *dev;
- struct Qdisc *qdisc;
+ struct Qdisc __rcu *qdisc;
struct Qdisc *qdisc_sleeping;
#ifdef CONFIG_SYSFS
struct kobject kobj;
@@ -739,13 +754,13 @@ struct netdev_fcoe_hbainfo {
};
#endif
-#define MAX_PHYS_PORT_ID_LEN 32
+#define MAX_PHYS_ITEM_ID_LEN 32
-/* This structure holds a unique identifier to identify the
- * physical port used by a netdevice.
+/* This structure holds a unique identifier to identify some
+ * physical item (port for example) used by a netdevice.
*/
-struct netdev_phys_port_id {
- unsigned char id[MAX_PHYS_PORT_ID_LEN];
+struct netdev_phys_item_id {
+ unsigned char id[MAX_PHYS_ITEM_ID_LEN];
unsigned char id_len;
};
@@ -936,11 +951,11 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
*
* int (*ndo_fdb_add)(struct ndmsg *ndm, struct nlattr *tb[],
* struct net_device *dev,
- * const unsigned char *addr, u16 flags)
+ * const unsigned char *addr, u16 vid, u16 flags)
* Adds an FDB entry to dev for addr.
* int (*ndo_fdb_del)(struct ndmsg *ndm, struct nlattr *tb[],
* struct net_device *dev,
- * const unsigned char *addr)
+ * const unsigned char *addr, u16 vid)
* Deletes the FDB entry from dev coresponding to addr.
* int (*ndo_fdb_dump)(struct sk_buff *skb, struct netlink_callback *cb,
* struct net_device *dev, struct net_device *filter_dev,
@@ -961,7 +976,7 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
* USB_CDC_NOTIFY_NETWORK_CONNECTION) should NOT implement this function.
*
* int (*ndo_get_phys_port_id)(struct net_device *dev,
- * struct netdev_phys_port_id *ppid);
+ * struct netdev_phys_item_id *ppid);
* Called to get ID of physical port of this device. If driver does
* not implement this, it is assumed that the hw is not able to have
* multiple net devices on single physical port.
@@ -997,6 +1012,24 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
* Callback to use for xmit over the accelerated station. This
* is used in place of ndo_start_xmit on accelerated net
* devices.
+ * netdev_features_t (*ndo_features_check) (struct sk_buff *skb,
+ * struct net_device *dev
+ * netdev_features_t features);
+ * Called by core transmit path to determine if device is capable of
+ * performing offload operations on a given packet. This is to give
+ * the device an opportunity to implement any restrictions that cannot
+ * be otherwise expressed by feature flags. The check is called with
+ * the set of features that the stack has calculated and it returns
+ * those the driver believes to be appropriate.
+ *
+ * int (*ndo_switch_parent_id_get)(struct net_device *dev,
+ * struct netdev_phys_item_id *psid);
+ * Called to get an ID of the switch chip this port is part of.
+ * If driver implements this, it indicates that it represents a port
+ * of a switch chip.
+ * int (*ndo_switch_port_stp_update)(struct net_device *dev, u8 state);
+ * Called to notify switch device port of bridge port STP
+ * state change.
*/
struct net_device_ops {
int (*ndo_init)(struct net_device *dev);
@@ -1107,11 +1140,13 @@ struct net_device_ops {
struct nlattr *tb[],
struct net_device *dev,
const unsigned char *addr,
+ u16 vid,
u16 flags);
int (*ndo_fdb_del)(struct ndmsg *ndm,
struct nlattr *tb[],
struct net_device *dev,
- const unsigned char *addr);
+ const unsigned char *addr,
+ u16 vid);
int (*ndo_fdb_dump)(struct sk_buff *skb,
struct netlink_callback *cb,
struct net_device *dev,
@@ -1129,7 +1164,7 @@ struct net_device_ops {
int (*ndo_change_carrier)(struct net_device *dev,
bool new_carrier);
int (*ndo_get_phys_port_id)(struct net_device *dev,
- struct netdev_phys_port_id *ppid);
+ struct netdev_phys_item_id *ppid);
void (*ndo_add_vxlan_port)(struct net_device *dev,
sa_family_t sa_family,
__be16 port);
@@ -1146,6 +1181,15 @@ struct net_device_ops {
struct net_device *dev,
void *priv);
int (*ndo_get_lock_subclass)(struct net_device *dev);
+ netdev_features_t (*ndo_features_check) (struct sk_buff *skb,
+ struct net_device *dev,
+ netdev_features_t features);
+#ifdef CONFIG_NET_SWITCHDEV
+ int (*ndo_switch_parent_id_get)(struct net_device *dev,
+ struct netdev_phys_item_id *psid);
+ int (*ndo_switch_port_stp_update)(struct net_device *dev,
+ u8 state);
+#endif
};
/**
@@ -1206,6 +1250,9 @@ enum netdev_priv_flags {
IFF_SUPP_NOFCS = 1<<19,
IFF_LIVE_ADDR_CHANGE = 1<<20,
IFF_MACVLAN = 1<<21,
+ IFF_XMIT_DST_RELEASE_PERM = 1<<22,
+ IFF_IPVLAN_MASTER = 1<<23,
+ IFF_IPVLAN_SLAVE = 1<<24,
};
#define IFF_802_1Q_VLAN IFF_802_1Q_VLAN
@@ -1230,6 +1277,9 @@ enum netdev_priv_flags {
#define IFF_SUPP_NOFCS IFF_SUPP_NOFCS
#define IFF_LIVE_ADDR_CHANGE IFF_LIVE_ADDR_CHANGE
#define IFF_MACVLAN IFF_MACVLAN
+#define IFF_XMIT_DST_RELEASE_PERM IFF_XMIT_DST_RELEASE_PERM
+#define IFF_IPVLAN_MASTER IFF_IPVLAN_MASTER
+#define IFF_IPVLAN_SLAVE IFF_IPVLAN_SLAVE
/**
* struct net_device - The DEVICE structure.
@@ -1416,6 +1466,8 @@ enum netdev_priv_flags {
* @gso_max_size: Maximum size of generic segmentation offload
* @gso_max_segs: Maximum number of segments that can be passed to the
* NIC for GSO
+ * @gso_min_segs: Minimum number of segments that can be passed to the
+ * NIC for GSO
*
* @dcbnl_ops: Data Center Bridging netlink ops
* @num_tc: Number of traffic classes in the net device
@@ -1559,6 +1611,7 @@ struct net_device {
struct inet6_dev __rcu *ip6_ptr;
void *ax25_ptr;
struct wireless_dev *ieee80211_ptr;
+ struct wpan_dev *ieee802154_ptr;
/*
* Cache lines mostly used on receive path (including eth_type_trans())
@@ -1577,6 +1630,7 @@ struct net_device {
#endif
+ unsigned long gro_flush_timeout;
rx_handler_func_t __rcu *rx_handler;
void __rcu *rx_handler_data;
@@ -1666,7 +1720,7 @@ struct net_device {
unsigned int gso_max_size;
#define GSO_MAX_SEGS 65535
u16 gso_max_segs;
-
+ u16 gso_min_segs;
#ifdef CONFIG_DCB
const struct dcbnl_rtnl_ops *dcbnl_ops;
#endif
@@ -1747,6 +1801,12 @@ struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev,
return &dev->_tx[index];
}
+static inline struct netdev_queue *skb_get_tx_queue(const struct net_device *dev,
+ const struct sk_buff *skb)
+{
+ return netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
+}
+
static inline void netdev_for_each_tx_queue(struct net_device *dev,
void (*f)(struct net_device *,
struct netdev_queue *,
@@ -1781,24 +1841,13 @@ void dev_net_set(struct net_device *dev, struct net *net)
#endif
}
-static inline bool netdev_uses_dsa_tags(struct net_device *dev)
-{
-#ifdef CONFIG_NET_DSA_TAG_DSA
- if (dev->dsa_ptr != NULL)
- return dsa_uses_dsa_tags(dev->dsa_ptr);
-#endif
-
- return 0;
-}
-
-static inline bool netdev_uses_trailer_tags(struct net_device *dev)
+static inline bool netdev_uses_dsa(struct net_device *dev)
{
-#ifdef CONFIG_NET_DSA_TAG_TRAILER
+#if IS_ENABLED(CONFIG_NET_DSA)
if (dev->dsa_ptr != NULL)
- return dsa_uses_trailer_tags(dev->dsa_ptr);
+ return dsa_uses_tagged_protocol(dev->dsa_ptr);
#endif
-
- return 0;
+ return false;
}
/**
@@ -1879,11 +1928,20 @@ struct napi_gro_cb {
/* jiffies when first packet was created/queued */
unsigned long age;
- /* Used in ipv6_gro_receive() */
+ /* Used in ipv6_gro_receive() and foo-over-udp */
u16 proto;
/* Used in udp_gro_receive */
- u16 udp_mark;
+ u8 udp_mark:1;
+
+ /* GRO checksum is valid */
+ u8 csum_valid:1;
+
+ /* Number of checksums via CHECKSUM_UNNECESSARY */
+ u8 csum_cnt:3;
+
+ /* Used in foo-over-udp, set in udp[46]_gro_receive */
+ u8 is_ipv6:1;
/* used to support CHECKSUM_COMPLETE for tunneling protocols */
__wsum csum;
@@ -1910,7 +1968,6 @@ struct packet_type {
struct offload_callbacks {
struct sk_buff *(*gso_segment)(struct sk_buff *skb,
netdev_features_t features);
- int (*gso_send_check)(struct sk_buff *skb);
struct sk_buff **(*gro_receive)(struct sk_buff **head,
struct sk_buff *skb);
int (*gro_complete)(struct sk_buff *skb, int nhoff);
@@ -1924,6 +1981,7 @@ struct packet_offload {
struct udp_offload {
__be16 port;
+ u8 ipproto;
struct offload_callbacks callbacks;
};
@@ -1982,6 +2040,7 @@ struct pcpu_sw_netstats {
#define NETDEV_CHANGEUPPER 0x0015
#define NETDEV_RESEND_IGMP 0x0016
#define NETDEV_PRECHANGEMTU 0x0017 /* notify before mtu change happened */
+#define NETDEV_CHANGEINFODATA 0x0018
int register_netdevice_notifier(struct notifier_block *nb);
int unregister_netdevice_notifier(struct notifier_block *nb);
@@ -2074,8 +2133,8 @@ void __dev_remove_pack(struct packet_type *pt);
void dev_add_offload(struct packet_offload *po);
void dev_remove_offload(struct packet_offload *po);
-struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short flags,
- unsigned short mask);
+struct net_device *__dev_get_by_flags(struct net *net, unsigned short flags,
+ unsigned short mask);
struct net_device *dev_get_by_name(struct net *net, const char *name);
struct net_device *dev_get_by_name_rcu(struct net *net, const char *name);
struct net_device *__dev_get_by_name(struct net *net, const char *name);
@@ -2153,11 +2212,97 @@ static inline void *skb_gro_network_header(struct sk_buff *skb)
static inline void skb_gro_postpull_rcsum(struct sk_buff *skb,
const void *start, unsigned int len)
{
- if (skb->ip_summed == CHECKSUM_COMPLETE)
+ if (NAPI_GRO_CB(skb)->csum_valid)
NAPI_GRO_CB(skb)->csum = csum_sub(NAPI_GRO_CB(skb)->csum,
csum_partial(start, len, 0));
}
+/* GRO checksum functions. These are logical equivalents of the normal
+ * checksum functions (in skbuff.h) except that they operate on the GRO
+ * offsets and fields in sk_buff.
+ */
+
+__sum16 __skb_gro_checksum_complete(struct sk_buff *skb);
+
+static inline bool __skb_gro_checksum_validate_needed(struct sk_buff *skb,
+ bool zero_okay,
+ __sum16 check)
+{
+ return (skb->ip_summed != CHECKSUM_PARTIAL &&
+ NAPI_GRO_CB(skb)->csum_cnt == 0 &&
+ (!zero_okay || check));
+}
+
+static inline __sum16 __skb_gro_checksum_validate_complete(struct sk_buff *skb,
+ __wsum psum)
+{
+ if (NAPI_GRO_CB(skb)->csum_valid &&
+ !csum_fold(csum_add(psum, NAPI_GRO_CB(skb)->csum)))
+ return 0;
+
+ NAPI_GRO_CB(skb)->csum = psum;
+
+ return __skb_gro_checksum_complete(skb);
+}
+
+static inline void skb_gro_incr_csum_unnecessary(struct sk_buff *skb)
+{
+ if (NAPI_GRO_CB(skb)->csum_cnt > 0) {
+ /* Consume a checksum from CHECKSUM_UNNECESSARY */
+ NAPI_GRO_CB(skb)->csum_cnt--;
+ } else {
+ /* Update skb for CHECKSUM_UNNECESSARY and csum_level when we
+ * verified a new top level checksum or an encapsulated one
+ * during GRO. This saves work if we fallback to normal path.
+ */
+ __skb_incr_checksum_unnecessary(skb);
+ }
+}
+
+#define __skb_gro_checksum_validate(skb, proto, zero_okay, check, \
+ compute_pseudo) \
+({ \
+ __sum16 __ret = 0; \
+ if (__skb_gro_checksum_validate_needed(skb, zero_okay, check)) \
+ __ret = __skb_gro_checksum_validate_complete(skb, \
+ compute_pseudo(skb, proto)); \
+ if (__ret) \
+ __skb_mark_checksum_bad(skb); \
+ else \
+ skb_gro_incr_csum_unnecessary(skb); \
+ __ret; \
+})
+
+#define skb_gro_checksum_validate(skb, proto, compute_pseudo) \
+ __skb_gro_checksum_validate(skb, proto, false, 0, compute_pseudo)
+
+#define skb_gro_checksum_validate_zero_check(skb, proto, check, \
+ compute_pseudo) \
+ __skb_gro_checksum_validate(skb, proto, true, check, compute_pseudo)
+
+#define skb_gro_checksum_simple_validate(skb) \
+ __skb_gro_checksum_validate(skb, 0, false, 0, null_compute_pseudo)
+
+static inline bool __skb_gro_checksum_convert_check(struct sk_buff *skb)
+{
+ return (NAPI_GRO_CB(skb)->csum_cnt == 0 &&
+ !NAPI_GRO_CB(skb)->csum_valid);
+}
+
+static inline void __skb_gro_checksum_convert(struct sk_buff *skb,
+ __sum16 check, __wsum pseudo)
+{
+ NAPI_GRO_CB(skb)->csum = ~pseudo;
+ NAPI_GRO_CB(skb)->csum_valid = 1;
+}
+
+#define skb_gro_checksum_try_convert(skb, proto, check, compute_pseudo) \
+do { \
+ if (__skb_gro_checksum_convert_check(skb)) \
+ __skb_gro_checksum_convert(skb, check, \
+ compute_pseudo(skb, proto)); \
+} while (0)
+
static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
unsigned short type,
const void *daddr, const void *saddr,
@@ -2212,10 +2357,7 @@ extern int netdev_flow_limit_table_len;
* Incoming packets are placed on per-cpu queues
*/
struct softnet_data {
- struct Qdisc *output_queue;
- struct Qdisc **output_queue_tailp;
struct list_head poll_list;
- struct sk_buff *completion_queue;
struct sk_buff_head process_queue;
/* stats */
@@ -2223,10 +2365,17 @@ struct softnet_data {
unsigned int time_squeeze;
unsigned int cpu_collision;
unsigned int received_rps;
-
#ifdef CONFIG_RPS
struct softnet_data *rps_ipi_list;
+#endif
+#ifdef CONFIG_NET_FLOW_LIMIT
+ struct sd_flow_limit __rcu *flow_limit;
+#endif
+ struct Qdisc *output_queue;
+ struct Qdisc **output_queue_tailp;
+ struct sk_buff *completion_queue;
+#ifdef CONFIG_RPS
/* Elements below can be accessed between CPUs for RPS */
struct call_single_data csd ____cacheline_aligned_in_smp;
struct softnet_data *rps_ipi_next;
@@ -2238,9 +2387,6 @@ struct softnet_data {
struct sk_buff_head input_pkt_queue;
struct napi_struct backlog;
-#ifdef CONFIG_NET_FLOW_LIMIT
- struct sd_flow_limit __rcu *flow_limit;
-#endif
};
static inline void input_queue_head_incr(struct softnet_data *sd)
@@ -2261,12 +2407,7 @@ static inline void input_queue_tail_incr_save(struct softnet_data *sd,
DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
void __netif_schedule(struct Qdisc *q);
-
-static inline void netif_schedule_queue(struct netdev_queue *txq)
-{
- if (!(txq->state & QUEUE_STATE_ANY_XOFF))
- __netif_schedule(txq->qdisc);
-}
+void netif_schedule_queue(struct netdev_queue *txq);
static inline void netif_tx_schedule_all(struct net_device *dev)
{
@@ -2302,11 +2443,7 @@ static inline void netif_tx_start_all_queues(struct net_device *dev)
}
}
-static inline void netif_tx_wake_queue(struct netdev_queue *dev_queue)
-{
- if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state))
- __netif_schedule(dev_queue->qdisc);
-}
+void netif_tx_wake_queue(struct netdev_queue *dev_queue);
/**
* netif_wake_queue - restart transmit
@@ -2394,6 +2531,34 @@ netif_xmit_frozen_or_drv_stopped(const struct netdev_queue *dev_queue)
return dev_queue->state & QUEUE_STATE_DRV_XOFF_OR_FROZEN;
}
+/**
+ * netdev_txq_bql_enqueue_prefetchw - prefetch bql data for write
+ * @dev_queue: pointer to transmit queue
+ *
+ * BQL enabled drivers might use this helper in their ndo_start_xmit(),
+ * to give appropriate hint to the cpu.
+ */
+static inline void netdev_txq_bql_enqueue_prefetchw(struct netdev_queue *dev_queue)
+{
+#ifdef CONFIG_BQL
+ prefetchw(&dev_queue->dql.num_queued);
+#endif
+}
+
+/**
+ * netdev_txq_bql_complete_prefetchw - prefetch bql data for write
+ * @dev_queue: pointer to transmit queue
+ *
+ * BQL enabled drivers might use this helper in their TX completion path,
+ * to give appropriate hint to the cpu.
+ */
+static inline void netdev_txq_bql_complete_prefetchw(struct netdev_queue *dev_queue)
+{
+#ifdef CONFIG_BQL
+ prefetchw(&dev_queue->dql.limit);
+#endif
+}
+
static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue,
unsigned int bytes)
{
@@ -2578,19 +2743,7 @@ static inline bool netif_subqueue_stopped(const struct net_device *dev,
return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb));
}
-/**
- * netif_wake_subqueue - allow sending packets on subqueue
- * @dev: network device
- * @queue_index: sub queue index
- *
- * Resume individual transmit queue of a device with multiple transmit queues.
- */
-static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
-{
- struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
- if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &txq->state))
- __netif_schedule(txq->qdisc);
-}
+void netif_wake_subqueue(struct net_device *dev, u16 queue_index);
#ifdef CONFIG_XPS
int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
@@ -2637,23 +2790,6 @@ static inline int netif_set_real_num_rx_queues(struct net_device *dev,
}
#endif
-static inline int netif_copy_real_num_queues(struct net_device *to_dev,
- const struct net_device *from_dev)
-{
- int err;
-
- err = netif_set_real_num_tx_queues(to_dev,
- from_dev->real_num_tx_queues);
- if (err)
- return err;
-#ifdef CONFIG_SYSFS
- return netif_set_real_num_rx_queues(to_dev,
- from_dev->real_num_rx_queues);
-#else
- return 0;
-#endif
-}
-
#ifdef CONFIG_SYSFS
static inline unsigned int get_netdev_rx_queue_index(
struct netdev_rx_queue *queue)
@@ -2753,9 +2889,10 @@ void dev_set_group(struct net_device *, int);
int dev_set_mac_address(struct net_device *, struct sockaddr *);
int dev_change_carrier(struct net_device *, bool new_carrier);
int dev_get_phys_port_id(struct net_device *dev,
- struct netdev_phys_port_id *ppid);
-int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
- struct netdev_queue *txq);
+ struct netdev_phys_item_id *ppid);
+struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev);
+struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
+ struct netdev_queue *txq, int *ret);
int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
int dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
bool is_skb_forwardable(struct net_device *dev, struct sk_buff *skb);
@@ -3313,6 +3450,12 @@ void netdev_upper_dev_unlink(struct net_device *dev,
void netdev_adjacent_rename_links(struct net_device *dev, char *oldname);
void *netdev_lower_dev_get_private(struct net_device *dev,
struct net_device *lower_dev);
+
+/* RSS keys are 40 or 52 bytes long */
+#define NETDEV_RSS_KEY_LEN 52
+extern u8 netdev_rss_key[NETDEV_RSS_KEY_LEN];
+void netdev_rss_key_fill(void *buffer, size_t len);
+
int dev_get_nest_level(struct net_device *dev,
bool (*type_check)(struct net_device *dev));
int skb_checksum_help(struct sk_buff *skb);
@@ -3357,6 +3500,27 @@ int __init dev_proc_init(void);
#define dev_proc_init() 0
#endif
+static inline netdev_tx_t __netdev_start_xmit(const struct net_device_ops *ops,
+ struct sk_buff *skb, struct net_device *dev,
+ bool more)
+{
+ skb->xmit_more = more ? 1 : 0;
+ return ops->ndo_start_xmit(skb, dev);
+}
+
+static inline netdev_tx_t netdev_start_xmit(struct sk_buff *skb, struct net_device *dev,
+ struct netdev_queue *txq, bool more)
+{
+ const struct net_device_ops *ops = dev->netdev_ops;
+ int rc;
+
+ rc = __netdev_start_xmit(ops, skb, dev, more);
+ if (rc == NETDEV_TX_OK)
+ txq_trans_update(txq);
+
+ return rc;
+}
+
int netdev_class_create_file_ns(struct class_attribute *class_attr,
const void *ns);
void netdev_class_remove_file_ns(struct class_attribute *class_attr,
@@ -3436,7 +3600,7 @@ static inline bool net_gso_ok(netdev_features_t features, int gso_type)
BUILD_BUG_ON(SKB_GSO_SIT != (NETIF_F_GSO_SIT >> NETIF_F_GSO_SHIFT));
BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL != (NETIF_F_GSO_UDP_TUNNEL >> NETIF_F_GSO_SHIFT));
BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL_CSUM != (NETIF_F_GSO_UDP_TUNNEL_CSUM >> NETIF_F_GSO_SHIFT));
- BUILD_BUG_ON(SKB_GSO_MPLS != (NETIF_F_GSO_MPLS >> NETIF_F_GSO_SHIFT));
+ BUILD_BUG_ON(SKB_GSO_TUNNEL_REMCSUM != (NETIF_F_GSO_TUNNEL_REMCSUM >> NETIF_F_GSO_SHIFT));
return (features & feature) == feature;
}
@@ -3447,7 +3611,7 @@ static inline bool skb_gso_ok(struct sk_buff *skb, netdev_features_t features)
(!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST));
}
-static inline bool netif_needs_gso(struct sk_buff *skb,
+static inline bool netif_needs_gso(struct net_device *dev, struct sk_buff *skb,
netdev_features_t features)
{
return skb_is_gso(skb) && (!skb_gso_ok(skb, features) ||
@@ -3479,6 +3643,21 @@ static inline bool netif_is_macvlan(struct net_device *dev)
return dev->priv_flags & IFF_MACVLAN;
}
+static inline bool netif_is_macvlan_port(struct net_device *dev)
+{
+ return dev->priv_flags & IFF_MACVLAN_PORT;
+}
+
+static inline bool netif_is_ipvlan(struct net_device *dev)
+{
+ return dev->priv_flags & IFF_IPVLAN_SLAVE;
+}
+
+static inline bool netif_is_ipvlan_port(struct net_device *dev)
+{
+ return dev->priv_flags & IFF_IPVLAN_MASTER;
+}
+
static inline bool netif_is_bond_master(struct net_device *dev)
{
return dev->flags & IFF_MASTER && dev->priv_flags & IFF_BONDING;
@@ -3494,6 +3673,12 @@ static inline bool netif_supports_nofcs(struct net_device *dev)
return dev->priv_flags & IFF_SUPP_NOFCS;
}
+/* This device needs to keep skb dst for qdisc enqueue or ndo_start_xmit() */
+static inline void netif_keep_dst(struct net_device *dev)
+{
+ dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM);
+}
+
extern struct pernet_operations __net_initdata loopback_net_ops;
/* Logging, debugging and troubleshooting/diagnostic helpers. */
@@ -3523,22 +3708,22 @@ static inline const char *netdev_reg_state(const struct net_device *dev)
}
__printf(3, 4)
-int netdev_printk(const char *level, const struct net_device *dev,
- const char *format, ...);
+void netdev_printk(const char *level, const struct net_device *dev,
+ const char *format, ...);
__printf(2, 3)
-int netdev_emerg(const struct net_device *dev, const char *format, ...);
+void netdev_emerg(const struct net_device *dev, const char *format, ...);
__printf(2, 3)
-int netdev_alert(const struct net_device *dev, const char *format, ...);
+void netdev_alert(const struct net_device *dev, const char *format, ...);
__printf(2, 3)
-int netdev_crit(const struct net_device *dev, const char *format, ...);
+void netdev_crit(const struct net_device *dev, const char *format, ...);
__printf(2, 3)
-int netdev_err(const struct net_device *dev, const char *format, ...);
+void netdev_err(const struct net_device *dev, const char *format, ...);
__printf(2, 3)
-int netdev_warn(const struct net_device *dev, const char *format, ...);
+void netdev_warn(const struct net_device *dev, const char *format, ...);
__printf(2, 3)
-int netdev_notice(const struct net_device *dev, const char *format, ...);
+void netdev_notice(const struct net_device *dev, const char *format, ...);
__printf(2, 3)
-int netdev_info(const struct net_device *dev, const char *format, ...);
+void netdev_info(const struct net_device *dev, const char *format, ...);
#define MODULE_ALIAS_NETDEV(device) \
MODULE_ALIAS("netdev-" device)
@@ -3556,7 +3741,6 @@ do { \
({ \
if (0) \
netdev_printk(KERN_DEBUG, __dev, format, ##args); \
- 0; \
})
#endif
diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h
index 96afc29184be..f1606fa6132d 100644
--- a/include/linux/netfilter/ipset/ip_set.h
+++ b/include/linux/netfilter/ipset/ip_set.h
@@ -57,6 +57,8 @@ enum ip_set_extension {
IPSET_EXT_COUNTER = (1 << IPSET_EXT_BIT_COUNTER),
IPSET_EXT_BIT_COMMENT = 2,
IPSET_EXT_COMMENT = (1 << IPSET_EXT_BIT_COMMENT),
+ IPSET_EXT_BIT_SKBINFO = 3,
+ IPSET_EXT_SKBINFO = (1 << IPSET_EXT_BIT_SKBINFO),
/* Mark set with an extension which needs to call destroy */
IPSET_EXT_BIT_DESTROY = 7,
IPSET_EXT_DESTROY = (1 << IPSET_EXT_BIT_DESTROY),
@@ -65,12 +67,14 @@ enum ip_set_extension {
#define SET_WITH_TIMEOUT(s) ((s)->extensions & IPSET_EXT_TIMEOUT)
#define SET_WITH_COUNTER(s) ((s)->extensions & IPSET_EXT_COUNTER)
#define SET_WITH_COMMENT(s) ((s)->extensions & IPSET_EXT_COMMENT)
+#define SET_WITH_SKBINFO(s) ((s)->extensions & IPSET_EXT_SKBINFO)
#define SET_WITH_FORCEADD(s) ((s)->flags & IPSET_CREATE_FLAG_FORCEADD)
/* Extension id, in size order */
enum ip_set_ext_id {
IPSET_EXT_ID_COUNTER = 0,
IPSET_EXT_ID_TIMEOUT,
+ IPSET_EXT_ID_SKBINFO,
IPSET_EXT_ID_COMMENT,
IPSET_EXT_ID_MAX,
};
@@ -92,6 +96,10 @@ struct ip_set_ext {
u64 packets;
u64 bytes;
u32 timeout;
+ u32 skbmark;
+ u32 skbmarkmask;
+ u32 skbprio;
+ u16 skbqueue;
char *comment;
};
@@ -104,6 +112,13 @@ struct ip_set_comment {
char *str;
};
+struct ip_set_skbinfo {
+ u32 skbmark;
+ u32 skbmarkmask;
+ u32 skbprio;
+ u16 skbqueue;
+};
+
struct ip_set;
#define ext_timeout(e, s) \
@@ -112,7 +127,8 @@ struct ip_set;
(struct ip_set_counter *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_COUNTER])
#define ext_comment(e, s) \
(struct ip_set_comment *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_COMMENT])
-
+#define ext_skbinfo(e, s) \
+(struct ip_set_skbinfo *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_SKBINFO])
typedef int (*ipset_adtfn)(struct ip_set *set, void *value,
const struct ip_set_ext *ext,
@@ -256,6 +272,8 @@ ip_set_put_flags(struct sk_buff *skb, struct ip_set *set)
cadt_flags |= IPSET_FLAG_WITH_COUNTERS;
if (SET_WITH_COMMENT(set))
cadt_flags |= IPSET_FLAG_WITH_COMMENT;
+ if (SET_WITH_SKBINFO(set))
+ cadt_flags |= IPSET_FLAG_WITH_SKBINFO;
if (SET_WITH_FORCEADD(set))
cadt_flags |= IPSET_FLAG_WITH_FORCEADD;
@@ -304,6 +322,43 @@ ip_set_update_counter(struct ip_set_counter *counter,
}
}
+static inline void
+ip_set_get_skbinfo(struct ip_set_skbinfo *skbinfo,
+ const struct ip_set_ext *ext,
+ struct ip_set_ext *mext, u32 flags)
+{
+ mext->skbmark = skbinfo->skbmark;
+ mext->skbmarkmask = skbinfo->skbmarkmask;
+ mext->skbprio = skbinfo->skbprio;
+ mext->skbqueue = skbinfo->skbqueue;
+}
+static inline bool
+ip_set_put_skbinfo(struct sk_buff *skb, struct ip_set_skbinfo *skbinfo)
+{
+ /* Send nonzero parameters only */
+ return ((skbinfo->skbmark || skbinfo->skbmarkmask) &&
+ nla_put_net64(skb, IPSET_ATTR_SKBMARK,
+ cpu_to_be64((u64)skbinfo->skbmark << 32 |
+ skbinfo->skbmarkmask))) ||
+ (skbinfo->skbprio &&
+ nla_put_net32(skb, IPSET_ATTR_SKBPRIO,
+ cpu_to_be32(skbinfo->skbprio))) ||
+ (skbinfo->skbqueue &&
+ nla_put_net16(skb, IPSET_ATTR_SKBQUEUE,
+ cpu_to_be16(skbinfo->skbqueue)));
+
+}
+
+static inline void
+ip_set_init_skbinfo(struct ip_set_skbinfo *skbinfo,
+ const struct ip_set_ext *ext)
+{
+ skbinfo->skbmark = ext->skbmark;
+ skbinfo->skbmarkmask = ext->skbmarkmask;
+ skbinfo->skbprio = ext->skbprio;
+ skbinfo->skbqueue = ext->skbqueue;
+}
+
static inline bool
ip_set_put_counter(struct sk_buff *skb, struct ip_set_counter *counter)
{
@@ -497,6 +552,9 @@ ip_set_put_extensions(struct sk_buff *skb, const struct ip_set *set,
if (SET_WITH_COMMENT(set) &&
ip_set_put_comment(skb, ext_comment(e, set)))
return -EMSGSIZE;
+ if (SET_WITH_SKBINFO(set) &&
+ ip_set_put_skbinfo(skb, ext_skbinfo(e, set)))
+ return -EMSGSIZE;
return 0;
}
diff --git a/include/linux/netfilter/ipset/ip_set_list.h b/include/linux/netfilter/ipset/ip_set_list.h
index 68c2aea897f5..fe2622a00151 100644
--- a/include/linux/netfilter/ipset/ip_set_list.h
+++ b/include/linux/netfilter/ipset/ip_set_list.h
@@ -6,5 +6,6 @@
#define IP_SET_LIST_DEFAULT_SIZE 8
#define IP_SET_LIST_MIN_SIZE 4
+#define IP_SET_LIST_MAX_SIZE 65536
#endif /* __IP_SET_LIST_H */
diff --git a/include/linux/netfilter_bridge.h b/include/linux/netfilter_bridge.h
index 8ab1c278b66d..c755e4971fa3 100644
--- a/include/linux/netfilter_bridge.h
+++ b/include/linux/netfilter_bridge.h
@@ -15,7 +15,7 @@ enum nf_br_hook_priorities {
NF_BR_PRI_LAST = INT_MAX,
};
-#ifdef CONFIG_BRIDGE_NETFILTER
+#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
#define BRNF_PKT_TYPE 0x01
#define BRNF_BRIDGED_DNAT 0x02
@@ -24,16 +24,6 @@ enum nf_br_hook_priorities {
#define BRNF_8021Q 0x10
#define BRNF_PPPoE 0x20
-/* Only used in br_forward.c */
-int nf_bridge_copy_header(struct sk_buff *skb);
-static inline int nf_bridge_maybe_copy_header(struct sk_buff *skb)
-{
- if (skb->nf_bridge &&
- skb->nf_bridge->mask & (BRNF_BRIDGED | BRNF_BRIDGED_DNAT))
- return nf_bridge_copy_header(skb);
- return 0;
-}
-
static inline unsigned int nf_bridge_encap_header_len(const struct sk_buff *skb)
{
switch (skb->protocol) {
@@ -46,6 +36,44 @@ static inline unsigned int nf_bridge_encap_header_len(const struct sk_buff *skb)
}
}
+static inline void nf_bridge_update_protocol(struct sk_buff *skb)
+{
+ if (skb->nf_bridge->mask & BRNF_8021Q)
+ skb->protocol = htons(ETH_P_8021Q);
+ else if (skb->nf_bridge->mask & BRNF_PPPoE)
+ skb->protocol = htons(ETH_P_PPP_SES);
+}
+
+/* Fill in the header for fragmented IP packets handled by
+ * the IPv4 connection tracking code.
+ *
+ * Only used in br_forward.c
+ */
+static inline int nf_bridge_copy_header(struct sk_buff *skb)
+{
+ int err;
+ unsigned int header_size;
+
+ nf_bridge_update_protocol(skb);
+ header_size = ETH_HLEN + nf_bridge_encap_header_len(skb);
+ err = skb_cow_head(skb, header_size);
+ if (err)
+ return err;
+
+ skb_copy_to_linear_data_offset(skb, -header_size,
+ skb->nf_bridge->data, header_size);
+ __skb_push(skb, nf_bridge_encap_header_len(skb));
+ return 0;
+}
+
+static inline int nf_bridge_maybe_copy_header(struct sk_buff *skb)
+{
+ if (skb->nf_bridge &&
+ skb->nf_bridge->mask & (BRNF_BRIDGED | BRNF_BRIDGED_DNAT))
+ return nf_bridge_copy_header(skb);
+ return 0;
+}
+
static inline unsigned int nf_bridge_mtu_reduction(const struct sk_buff *skb)
{
if (unlikely(skb->nf_bridge->mask & BRNF_PPPoE))
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index 9e572daa15d5..02fc86d2348e 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -46,8 +46,8 @@ struct netlink_kernel_cfg {
unsigned int flags;
void (*input)(struct sk_buff *skb);
struct mutex *cb_mutex;
- int (*bind)(int group);
- void (*unbind)(int group);
+ int (*bind)(struct net *net, int group);
+ void (*unbind)(struct net *net, int group);
bool (*compare)(struct net *net, struct sock *sk);
};
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index a1e3064a8d99..022b761dbf0a 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -110,6 +110,20 @@ enum nfs_opnum4 {
OP_DESTROY_CLIENTID = 57,
OP_RECLAIM_COMPLETE = 58,
+ /* nfs42 */
+ OP_ALLOCATE = 59,
+ OP_COPY = 60,
+ OP_COPY_NOTIFY = 61,
+ OP_DEALLOCATE = 62,
+ OP_IO_ADVISE = 63,
+ OP_LAYOUTERROR = 64,
+ OP_LAYOUTSTATS = 65,
+ OP_OFFLOAD_CANCEL = 66,
+ OP_OFFLOAD_STATUS = 67,
+ OP_READ_PLUS = 68,
+ OP_SEEK = 69,
+ OP_WRITE_SAME = 70,
+
OP_ILLEGAL = 10044,
};
@@ -117,10 +131,10 @@ enum nfs_opnum4 {
Needs to be updated if more operations are defined in future.*/
#define FIRST_NFS4_OP OP_ACCESS
-#define LAST_NFS4_OP OP_RECLAIM_COMPLETE
+#define LAST_NFS4_OP OP_WRITE_SAME
#define LAST_NFS40_OP OP_RELEASE_LOCKOWNER
#define LAST_NFS41_OP OP_RECLAIM_COMPLETE
-#define LAST_NFS42_OP OP_RECLAIM_COMPLETE
+#define LAST_NFS42_OP OP_WRITE_SAME
enum nfsstat4 {
NFS4_OK = 0,
@@ -235,10 +249,11 @@ enum nfsstat4 {
/* nfs42 */
NFS4ERR_PARTNER_NOTSUPP = 10088,
NFS4ERR_PARTNER_NO_AUTH = 10089,
- NFS4ERR_METADATA_NOTSUPP = 10090,
+ NFS4ERR_UNION_NOTSUPP = 10090,
NFS4ERR_OFFLOAD_DENIED = 10091,
NFS4ERR_WRONG_LFS = 10092,
NFS4ERR_BADLABEL = 10093,
+ NFS4ERR_OFFLOAD_NO_REQS = 10094,
};
static inline bool seqid_mutating_err(u32 err)
@@ -472,6 +487,11 @@ enum {
NFSPROC4_CLNT_GETDEVICELIST,
NFSPROC4_CLNT_BIND_CONN_TO_SESSION,
NFSPROC4_CLNT_DESTROY_CLIENTID,
+
+ /* nfs42 */
+ NFSPROC4_CLNT_SEEK,
+ NFSPROC4_CLNT_ALLOCATE,
+ NFSPROC4_CLNT_DEALLOCATE,
};
/* nfs41 types */
@@ -535,4 +555,9 @@ struct nfs4_deviceid {
char data[NFS4_DEVICEID4_SIZE];
};
+enum data_content4 {
+ NFS4_CONTENT_DATA = 0,
+ NFS4_CONTENT_HOLE = 1,
+};
+
#endif
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index 5180a7ededec..6d627b92df53 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -163,7 +163,7 @@ struct nfs_inode {
*/
__be32 cookieverf[2];
- unsigned long npages;
+ unsigned long nrequests;
struct nfs_mds_commit_info commit_info;
/* Open contexts for shared mmap writes */
@@ -443,31 +443,15 @@ static inline struct rpc_cred *nfs_file_cred(struct file *file)
}
/*
- * linux/fs/nfs/xattr.c
- */
-#ifdef CONFIG_NFS_V3_ACL
-extern ssize_t nfs3_listxattr(struct dentry *, char *, size_t);
-extern ssize_t nfs3_getxattr(struct dentry *, const char *, void *, size_t);
-extern int nfs3_setxattr(struct dentry *, const char *,
- const void *, size_t, int);
-extern int nfs3_removexattr (struct dentry *, const char *name);
-#else
-# define nfs3_listxattr NULL
-# define nfs3_getxattr NULL
-# define nfs3_setxattr NULL
-# define nfs3_removexattr NULL
-#endif
-
-/*
* linux/fs/nfs/direct.c
*/
extern ssize_t nfs_direct_IO(int, struct kiocb *, struct iov_iter *, loff_t);
extern ssize_t nfs_file_direct_read(struct kiocb *iocb,
struct iov_iter *iter,
- loff_t pos, bool uio);
+ loff_t pos);
extern ssize_t nfs_file_direct_write(struct kiocb *iocb,
struct iov_iter *iter,
- loff_t pos, bool uio);
+ loff_t pos);
/*
* linux/fs/nfs/dir.c
@@ -529,22 +513,14 @@ extern int nfs_updatepage(struct file *, struct page *, unsigned int, unsigned
extern int nfs_wb_all(struct inode *inode);
extern int nfs_wb_page(struct inode *inode, struct page* page);
extern int nfs_wb_page_cancel(struct inode *inode, struct page* page);
-#if IS_ENABLED(CONFIG_NFS_V3) || IS_ENABLED(CONFIG_NFS_V4)
extern int nfs_commit_inode(struct inode *, int);
extern struct nfs_commit_data *nfs_commitdata_alloc(void);
extern void nfs_commit_free(struct nfs_commit_data *data);
-#else
-static inline int
-nfs_commit_inode(struct inode *inode, int how)
-{
- return 0;
-}
-#endif
static inline int
nfs_have_writebacks(struct inode *inode)
{
- return NFS_I(inode)->npages != 0;
+ return NFS_I(inode)->nrequests != 0;
}
/*
@@ -557,23 +533,6 @@ extern int nfs_readpage_async(struct nfs_open_context *, struct inode *,
struct page *);
/*
- * linux/fs/nfs3proc.c
- */
-#ifdef CONFIG_NFS_V3_ACL
-extern struct posix_acl *nfs3_get_acl(struct inode *inode, int type);
-extern int nfs3_set_acl(struct inode *inode, struct posix_acl *acl, int type);
-extern int nfs3_proc_setacls(struct inode *inode, struct posix_acl *acl,
- struct posix_acl *dfacl);
-extern const struct xattr_handler *nfs3_xattr_handlers[];
-#else
-static inline int nfs3_proc_setacls(struct inode *inode, struct posix_acl *acl,
- struct posix_acl *dfacl)
-{
- return 0;
-}
-#endif /* CONFIG_NFS_V3_ACL */
-
-/*
* inline functions
*/
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index 922be2e050f5..1e37fbb78f7a 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -230,5 +230,8 @@ struct nfs_server {
#define NFS_CAP_STATEID_NFSV41 (1U << 16)
#define NFS_CAP_ATOMIC_OPEN_V1 (1U << 17)
#define NFS_CAP_SECURITY_LABEL (1U << 18)
+#define NFS_CAP_SEEK (1U << 19)
+#define NFS_CAP_ALLOCATE (1U << 20)
+#define NFS_CAP_DEALLOCATE (1U << 21)
#endif
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 0040629894df..467c84efb596 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -252,17 +252,6 @@ struct nfs4_layoutget {
gfp_t gfp_flags;
};
-struct nfs4_getdevicelist_args {
- struct nfs4_sequence_args seq_args;
- const struct nfs_fh *fh;
- u32 layoutclass;
-};
-
-struct nfs4_getdevicelist_res {
- struct nfs4_sequence_res seq_res;
- struct pnfs_devicelist *devlist;
-};
-
struct nfs4_getdeviceinfo_args {
struct nfs4_sequence_args seq_args;
struct pnfs_device *pdev;
@@ -279,6 +268,9 @@ struct nfs4_layoutcommit_args {
__u64 lastbytewritten;
struct inode *inode;
const u32 *bitmask;
+ size_t layoutupdate_len;
+ struct page *layoutupdate_page;
+ struct page **layoutupdate_pages;
};
struct nfs4_layoutcommit_res {
@@ -1232,13 +1224,57 @@ struct nfs41_free_stateid_res {
unsigned int status;
};
+static inline void
+nfs_free_pnfs_ds_cinfo(struct pnfs_ds_commit_info *cinfo)
+{
+ kfree(cinfo->buckets);
+}
+
#else
struct pnfs_ds_commit_info {
};
+static inline void
+nfs_free_pnfs_ds_cinfo(struct pnfs_ds_commit_info *cinfo)
+{
+}
+
#endif /* CONFIG_NFS_V4_1 */
+#ifdef CONFIG_NFS_V4_2
+struct nfs42_falloc_args {
+ struct nfs4_sequence_args seq_args;
+
+ struct nfs_fh *falloc_fh;
+ nfs4_stateid falloc_stateid;
+ u64 falloc_offset;
+ u64 falloc_length;
+};
+
+struct nfs42_falloc_res {
+ struct nfs4_sequence_res seq_res;
+ unsigned int status;
+};
+
+struct nfs42_seek_args {
+ struct nfs4_sequence_args seq_args;
+
+ struct nfs_fh *sa_fh;
+ nfs4_stateid sa_stateid;
+ u64 sa_offset;
+ u32 sa_what;
+};
+
+struct nfs42_seek_res {
+ struct nfs4_sequence_res seq_res;
+ unsigned int status;
+
+ u32 sr_eof;
+ u64 sr_offset;
+};
+#endif
+
struct nfs_page;
#define NFS_PAGEVEC_SIZE (8U)
@@ -1328,6 +1364,7 @@ struct nfs_commit_data {
struct pnfs_layout_segment *lseg;
struct nfs_client *ds_clp; /* pNFS data server */
int ds_commit_index;
+ loff_t lwb;
const struct rpc_call_ops *mds_ops;
const struct nfs_commit_completion_ops *completion_ops;
int (*commit_done_cb) (struct rpc_task *task, struct nfs_commit_data *data);
@@ -1346,6 +1383,7 @@ struct nfs_unlinkdata {
struct inode *dir;
struct rpc_cred *cred;
struct nfs_fattr dir_attr;
+ long timeout;
};
struct nfs_renamedata {
@@ -1359,6 +1397,7 @@ struct nfs_renamedata {
struct dentry *new_dentry;
struct nfs_fattr new_fattr;
void (*complete)(struct rpc_task *, struct nfs_renamedata *);
+ long timeout;
};
struct nfs_access_entry;
diff --git a/include/linux/nl802154.h b/include/linux/nl802154.h
index 20163b9a0eae..167342c2ce6b 100644
--- a/include/linux/nl802154.h
+++ b/include/linux/nl802154.h
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
*/
#ifndef NL802154_H
diff --git a/include/linux/nmi.h b/include/linux/nmi.h
index 1d2a6ab6b8bb..9b2022ab4d85 100644
--- a/include/linux/nmi.h
+++ b/include/linux/nmi.h
@@ -24,6 +24,19 @@ static inline void touch_nmi_watchdog(void)
}
#endif
+#if defined(CONFIG_HARDLOCKUP_DETECTOR)
+extern void watchdog_enable_hardlockup_detector(bool val);
+extern bool watchdog_hardlockup_detector_is_enabled(void);
+#else
+static inline void watchdog_enable_hardlockup_detector(bool val)
+{
+}
+static inline bool watchdog_hardlockup_detector_is_enabled(void)
+{
+ return true;
+}
+#endif
+
/*
* Create trigger_all_cpu_backtrace() out of the arch-provided
* base function. Return whether such support was available,
diff --git a/include/linux/ns_common.h b/include/linux/ns_common.h
new file mode 100644
index 000000000000..85a5c8c16be9
--- /dev/null
+++ b/include/linux/ns_common.h
@@ -0,0 +1,12 @@
+#ifndef _LINUX_NS_COMMON_H
+#define _LINUX_NS_COMMON_H
+
+struct proc_ns_operations;
+
+struct ns_common {
+ atomic_long_t stashed;
+ const struct proc_ns_operations *ops;
+ unsigned int inum;
+};
+
+#endif
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index 2bf403195c09..258945fcabf1 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -19,6 +19,7 @@
#include <linux/pci.h>
#include <linux/miscdevice.h>
#include <linux/kref.h>
+#include <linux/blk-mq.h>
struct nvme_bar {
__u64 cap; /* Controller Capabilities */
@@ -38,6 +39,7 @@ struct nvme_bar {
#define NVME_CAP_TIMEOUT(cap) (((cap) >> 24) & 0xff)
#define NVME_CAP_STRIDE(cap) (((cap) >> 32) & 0xf)
#define NVME_CAP_MPSMIN(cap) (((cap) >> 48) & 0xf)
+#define NVME_CAP_MPSMAX(cap) (((cap) >> 52) & 0xf)
enum {
NVME_CC_ENABLE = 1 << 0,
@@ -70,8 +72,10 @@ extern unsigned char nvme_io_timeout;
*/
struct nvme_dev {
struct list_head node;
- struct nvme_queue __rcu **queues;
- unsigned short __percpu *io_queue;
+ struct nvme_queue **queues;
+ struct request_queue *admin_q;
+ struct blk_mq_tag_set tagset;
+ struct blk_mq_tag_set admin_tagset;
u32 __iomem *dbs;
struct pci_dev *pci_dev;
struct dma_pool *prp_page_pool;
@@ -90,15 +94,16 @@ struct nvme_dev {
struct miscdevice miscdev;
work_func_t reset_workfn;
struct work_struct reset_work;
- struct work_struct cpu_work;
char name[12];
char serial[20];
char model[40];
char firmware_rev[8];
u32 max_hw_sectors;
u32 stripe_size;
+ u32 page_size;
u16 oncs;
u16 abort_limit;
+ u8 event_limit;
u8 vwc;
u8 initialized;
};
@@ -132,7 +137,6 @@ struct nvme_iod {
int offset; /* Of PRP list */
int nents; /* Used in scatterlist */
int length; /* Of data, in bytes */
- unsigned long start_time;
dma_addr_t first_dma;
struct list_head node;
struct scatterlist sg[0];
@@ -150,12 +154,14 @@ static inline u64 nvme_block_nr(struct nvme_ns *ns, sector_t sector)
*/
void nvme_free_iod(struct nvme_dev *dev, struct nvme_iod *iod);
-int nvme_setup_prps(struct nvme_dev *, struct nvme_iod *, int , gfp_t);
+int nvme_setup_prps(struct nvme_dev *, struct nvme_iod *, int, gfp_t);
struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write,
unsigned long addr, unsigned length);
void nvme_unmap_user_pages(struct nvme_dev *dev, int write,
struct nvme_iod *iod);
-int nvme_submit_io_cmd(struct nvme_dev *, struct nvme_command *, u32 *);
+int nvme_submit_io_cmd(struct nvme_dev *, struct nvme_ns *,
+ struct nvme_command *, u32 *);
+int nvme_submit_flush_data(struct nvme_queue *nvmeq, struct nvme_ns *ns);
int nvme_submit_admin_cmd(struct nvme_dev *, struct nvme_command *,
u32 *result);
int nvme_identify(struct nvme_dev *, unsigned nsid, unsigned cns,
diff --git a/include/linux/of.h b/include/linux/of.h
index 6c4363b8ddc3..dfde07e77a63 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -23,6 +23,8 @@
#include <linux/spinlock.h>
#include <linux/topology.h>
#include <linux/notifier.h>
+#include <linux/property.h>
+#include <linux/list.h>
#include <asm/byteorder.h>
#include <asm/errno.h>
@@ -49,14 +51,13 @@ struct device_node {
const char *type;
phandle phandle;
const char *full_name;
+ struct fwnode_handle fwnode;
struct property *properties;
struct property *deadprops; /* removed properties */
struct device_node *parent;
struct device_node *child;
struct device_node *sibling;
- struct device_node *next; /* next device of same type */
- struct device_node *allnext; /* next in list of all nodes */
struct kobject kobj;
unsigned long _flags;
void *data;
@@ -74,11 +75,18 @@ struct of_phandle_args {
uint32_t args[MAX_PHANDLE_ARGS];
};
+struct of_reconfig_data {
+ struct device_node *dn;
+ struct property *prop;
+ struct property *old_prop;
+};
+
/* initialize a node */
extern struct kobj_type of_node_ktype;
static inline void of_node_init(struct device_node *node)
{
kobject_init(&node->kobj, &of_node_ktype);
+ node->fwnode.type = FWNODE_OF;
}
/* true when node is initialized */
@@ -105,18 +113,27 @@ static inline struct device_node *of_node_get(struct device_node *node)
static inline void of_node_put(struct device_node *node) { }
#endif /* !CONFIG_OF_DYNAMIC */
-#ifdef CONFIG_OF
-
/* Pointer for first entry in chain of all nodes. */
-extern struct device_node *of_allnodes;
+extern struct device_node *of_root;
extern struct device_node *of_chosen;
extern struct device_node *of_aliases;
extern struct device_node *of_stdout;
extern raw_spinlock_t devtree_lock;
+#ifdef CONFIG_OF
+static inline bool is_of_node(struct fwnode_handle *fwnode)
+{
+ return fwnode && fwnode->type == FWNODE_OF;
+}
+
+static inline struct device_node *of_node(struct fwnode_handle *fwnode)
+{
+ return fwnode ? container_of(fwnode, struct device_node, fwnode) : NULL;
+}
+
static inline bool of_have_populated_dt(void)
{
- return of_allnodes != NULL;
+ return of_root != NULL;
}
static inline bool of_node_is_root(const struct device_node *node)
@@ -160,6 +177,7 @@ static inline void of_property_clear_flag(struct property *p, unsigned long flag
clear_bit(flag, &p->_flags);
}
+extern struct device_node *__of_find_all_nodes(struct device_node *prev);
extern struct device_node *of_find_all_nodes(struct device_node *prev);
/*
@@ -215,8 +233,9 @@ static inline const char *of_node_full_name(const struct device_node *np)
return np ? np->full_name : "<no-node>";
}
-#define for_each_of_allnodes(dn) \
- for (dn = of_allnodes; dn; dn = dn->allnext)
+#define for_each_of_allnodes_from(from, dn) \
+ for (dn = __of_find_all_nodes(from); dn; dn = __of_find_all_nodes(dn))
+#define for_each_of_allnodes(dn) for_each_of_allnodes_from(NULL, dn)
extern struct device_node *of_find_node_by_name(struct device_node *from,
const char *name);
extern struct device_node *of_find_node_by_type(struct device_node *from,
@@ -228,7 +247,13 @@ extern struct device_node *of_find_matching_node_and_match(
const struct of_device_id *matches,
const struct of_device_id **match);
-extern struct device_node *of_find_node_by_path(const char *path);
+extern struct device_node *of_find_node_opts_by_path(const char *path,
+ const char **opts);
+static inline struct device_node *of_find_node_by_path(const char *path)
+{
+ return of_find_node_opts_by_path(path, NULL);
+}
+
extern struct device_node *of_find_node_by_phandle(phandle handle);
extern struct device_node *of_get_parent(const struct device_node *node);
extern struct device_node *of_get_next_parent(struct device_node *node);
@@ -263,21 +288,23 @@ extern int of_property_read_u32_array(const struct device_node *np,
size_t sz);
extern int of_property_read_u64(const struct device_node *np,
const char *propname, u64 *out_value);
+extern int of_property_read_u64_array(const struct device_node *np,
+ const char *propname,
+ u64 *out_values,
+ size_t sz);
extern int of_property_read_string(struct device_node *np,
const char *propname,
const char **out_string);
-extern int of_property_read_string_index(struct device_node *np,
- const char *propname,
- int index, const char **output);
extern int of_property_match_string(struct device_node *np,
const char *propname,
const char *string);
-extern int of_property_count_strings(struct device_node *np,
- const char *propname);
+extern int of_property_read_string_helper(struct device_node *np,
+ const char *propname,
+ const char **out_strs, size_t sz, int index);
extern int of_device_is_compatible(const struct device_node *device,
const char *);
-extern int of_device_is_available(const struct device_node *device);
+extern bool of_device_is_available(const struct device_node *device);
extern const void *of_get_property(const struct device_node *node,
const char *name,
int *lenp);
@@ -319,16 +346,6 @@ extern int of_update_property(struct device_node *np, struct property *newprop);
#define OF_RECONFIG_REMOVE_PROPERTY 0x0004
#define OF_RECONFIG_UPDATE_PROPERTY 0x0005
-struct of_prop_reconfig {
- struct device_node *dn;
- struct property *prop;
- struct property *old_prop;
-};
-
-extern int of_reconfig_notifier_register(struct notifier_block *);
-extern int of_reconfig_notifier_unregister(struct notifier_block *);
-extern int of_reconfig_notify(unsigned long, void *);
-
extern int of_attach_node(struct device_node *);
extern int of_detach_node(struct device_node *);
@@ -357,6 +374,16 @@ bool of_console_check(struct device_node *dn, char *name, int index);
#else /* CONFIG_OF */
+static inline bool is_of_node(struct fwnode_handle *fwnode)
+{
+ return false;
+}
+
+static inline struct device_node *of_node(struct fwnode_handle *fwnode)
+{
+ return NULL;
+}
+
static inline const char* of_node_full_name(const struct device_node *np)
{
return "<no-node>";
@@ -387,6 +414,12 @@ static inline struct device_node *of_find_node_by_path(const char *path)
return NULL;
}
+static inline struct device_node *of_find_node_opts_by_path(const char *path,
+ const char **opts)
+{
+ return NULL;
+}
+
static inline struct device_node *of_get_parent(const struct device_node *node)
{
return NULL;
@@ -428,9 +461,9 @@ static inline int of_device_is_compatible(const struct device_node *device,
return 0;
}
-static inline int of_device_is_available(const struct device_node *device)
+static inline bool of_device_is_available(const struct device_node *device)
{
- return 0;
+ return false;
}
static inline struct property *of_find_property(const struct device_node *np,
@@ -479,22 +512,23 @@ static inline int of_property_read_u32_array(const struct device_node *np,
return -ENOSYS;
}
-static inline int of_property_read_string(struct device_node *np,
- const char *propname,
- const char **out_string)
+static inline int of_property_read_u64_array(const struct device_node *np,
+ const char *propname,
+ u64 *out_values, size_t sz)
{
return -ENOSYS;
}
-static inline int of_property_read_string_index(struct device_node *np,
- const char *propname, int index,
- const char **out_string)
+static inline int of_property_read_string(struct device_node *np,
+ const char *propname,
+ const char **out_string)
{
return -ENOSYS;
}
-static inline int of_property_count_strings(struct device_node *np,
- const char *propname)
+static inline int of_property_read_string_helper(struct device_node *np,
+ const char *propname,
+ const char **out_strs, size_t sz, int index)
{
return -ENOSYS;
}
@@ -668,6 +702,70 @@ static inline int of_property_count_u64_elems(const struct device_node *np,
}
/**
+ * of_property_read_string_array() - Read an array of strings from a multiple
+ * strings property.
+ * @np: device node from which the property value is to be read.
+ * @propname: name of the property to be searched.
+ * @out_strs: output array of string pointers.
+ * @sz: number of array elements to read.
+ *
+ * Search for a property in a device tree node and retrieve a list of
+ * terminated string values (pointer to data, not a copy) in that property.
+ *
+ * If @out_strs is NULL, the number of strings in the property is returned.
+ */
+static inline int of_property_read_string_array(struct device_node *np,
+ const char *propname, const char **out_strs,
+ size_t sz)
+{
+ return of_property_read_string_helper(np, propname, out_strs, sz, 0);
+}
+
+/**
+ * of_property_count_strings() - Find and return the number of strings from a
+ * multiple strings property.
+ * @np: device node from which the property value is to be read.
+ * @propname: name of the property to be searched.
+ *
+ * Search for a property in a device tree node and retrieve the number of null
+ * terminated string contain in it. Returns the number of strings on
+ * success, -EINVAL if the property does not exist, -ENODATA if property
+ * does not have a value, and -EILSEQ if the string is not null-terminated
+ * within the length of the property data.
+ */
+static inline int of_property_count_strings(struct device_node *np,
+ const char *propname)
+{
+ return of_property_read_string_helper(np, propname, NULL, 0, 0);
+}
+
+/**
+ * of_property_read_string_index() - Find and read a string from a multiple
+ * strings property.
+ * @np: device node from which the property value is to be read.
+ * @propname: name of the property to be searched.
+ * @index: index of the string in the list of strings
+ * @out_string: pointer to null terminated return string, modified only if
+ * return value is 0.
+ *
+ * Search for a property in a device tree node and retrieve a null
+ * terminated string value (pointer to data, not a copy) in the list of strings
+ * contained in that property.
+ * Returns 0 on success, -EINVAL if the property does not exist, -ENODATA if
+ * property does not have a value, and -EILSEQ if the string is not
+ * null-terminated within the length of the property data.
+ *
+ * The out_string pointer is modified only if a valid string can be decoded.
+ */
+static inline int of_property_read_string_index(struct device_node *np,
+ const char *propname,
+ int index, const char **output)
+{
+ int rc = of_property_read_string_helper(np, propname, output, 1, index);
+ return rc < 0 ? rc : 0;
+}
+
+/**
* of_property_read_bool - Findfrom a property
* @np: device node from which the property value is to be read.
* @propname: name of the property to be searched.
@@ -704,6 +802,13 @@ static inline int of_property_read_u32(const struct device_node *np,
return of_property_read_u32_array(np, propname, out_value, 1);
}
+static inline int of_property_read_s32(const struct device_node *np,
+ const char *propname,
+ s32 *out_value)
+{
+ return of_property_read_u32(np, propname, (u32*) out_value);
+}
+
#define of_property_for_each_u32(np, propname, prop, p, u) \
for (prop = of_find_property(np, propname, NULL), \
p = of_prop_next_u32(prop, NULL, &u); \
@@ -772,7 +877,7 @@ static inline int of_get_available_child_count(const struct device_node *np)
= { .compatible = compat, \
.data = (fn == (fn_type)NULL) ? fn : fn }
#else
-#define _OF_DECLARE(table, name, compat, fn, fn_type) \
+#define _OF_DECLARE(table, name, compat, fn, fn_type) \
static const struct of_device_id __of_table_##name \
__attribute__((unused)) \
= { .compatible = compat, \
@@ -823,7 +928,19 @@ struct of_changeset {
struct list_head entries;
};
+enum of_reconfig_change {
+ OF_RECONFIG_NO_CHANGE = 0,
+ OF_RECONFIG_CHANGE_ADD,
+ OF_RECONFIG_CHANGE_REMOVE,
+};
+
#ifdef CONFIG_OF_DYNAMIC
+extern int of_reconfig_notifier_register(struct notifier_block *);
+extern int of_reconfig_notifier_unregister(struct notifier_block *);
+extern int of_reconfig_notify(unsigned long, struct of_reconfig_data *rd);
+extern int of_reconfig_get_state_change(unsigned long action,
+ struct of_reconfig_data *arg);
+
extern void of_changeset_init(struct of_changeset *ocs);
extern void of_changeset_destroy(struct of_changeset *ocs);
extern int of_changeset_apply(struct of_changeset *ocs);
@@ -861,6 +978,69 @@ static inline int of_changeset_update_property(struct of_changeset *ocs,
{
return of_changeset_action(ocs, OF_RECONFIG_UPDATE_PROPERTY, np, prop);
}
+#else /* CONFIG_OF_DYNAMIC */
+static inline int of_reconfig_notifier_register(struct notifier_block *nb)
+{
+ return -EINVAL;
+}
+static inline int of_reconfig_notifier_unregister(struct notifier_block *nb)
+{
+ return -EINVAL;
+}
+static inline int of_reconfig_notify(unsigned long action,
+ struct of_reconfig_data *arg)
+{
+ return -EINVAL;
+}
+static inline int of_reconfig_get_state_change(unsigned long action,
+ struct of_reconfig_data *arg)
+{
+ return -EINVAL;
+}
+#endif /* CONFIG_OF_DYNAMIC */
+
+/* CONFIG_OF_RESOLVE api */
+extern int of_resolve_phandles(struct device_node *tree);
+
+/**
+ * of_device_is_system_power_controller - Tells if system-power-controller is found for device_node
+ * @np: Pointer to the given device_node
+ *
+ * return true if present false otherwise
+ */
+static inline bool of_device_is_system_power_controller(const struct device_node *np)
+{
+ return of_property_read_bool(np, "system-power-controller");
+}
+
+/**
+ * Overlay support
+ */
+
+#ifdef CONFIG_OF_OVERLAY
+
+/* ID based overlays; the API for external users */
+int of_overlay_create(struct device_node *tree);
+int of_overlay_destroy(int id);
+int of_overlay_destroy_all(void);
+
+#else
+
+static inline int of_overlay_create(struct device_node *tree)
+{
+ return -ENOTSUPP;
+}
+
+static inline int of_overlay_destroy(int id)
+{
+ return -ENOTSUPP;
+}
+
+static inline int of_overlay_destroy_all(void)
+{
+ return -ENOTSUPP;
+}
+
#endif
#endif /* _LINUX_OF_H */
diff --git a/include/linux/of_address.h b/include/linux/of_address.h
index fb7b7221e063..d88e81be6368 100644
--- a/include/linux/of_address.h
+++ b/include/linux/of_address.h
@@ -23,17 +23,6 @@ struct of_pci_range {
#define for_each_of_pci_range(parser, range) \
for (; of_pci_range_parser_one(parser, range);)
-static inline void of_pci_range_to_resource(struct of_pci_range *range,
- struct device_node *np,
- struct resource *res)
-{
- res->flags = range->flags;
- res->start = range->cpu_addr;
- res->end = range->cpu_addr + range->size - 1;
- res->parent = res->child = res->sibling = NULL;
- res->name = np->full_name;
-}
-
/* Translate a DMA address from device space to CPU space */
extern u64 of_translate_dma_address(struct device_node *dev,
const __be32 *in_addr);
@@ -55,7 +44,9 @@ extern void __iomem *of_iomap(struct device_node *device, int index);
extern const __be32 *of_get_address(struct device_node *dev, int index,
u64 *size, unsigned int *flags);
+extern int pci_register_io_range(phys_addr_t addr, resource_size_t size);
extern unsigned long pci_address_to_pio(phys_addr_t addr);
+extern phys_addr_t pci_pio_to_address(unsigned long pio);
extern int of_pci_range_parser_init(struct of_pci_range_parser *parser,
struct device_node *node);
@@ -80,6 +71,11 @@ static inline const __be32 *of_get_address(struct device_node *dev, int index,
return NULL;
}
+static inline phys_addr_t pci_pio_to_address(unsigned long pio)
+{
+ return 0;
+}
+
static inline int of_pci_range_parser_init(struct of_pci_range_parser *parser,
struct device_node *node)
{
@@ -110,7 +106,7 @@ extern int of_address_to_resource(struct device_node *dev, int index,
struct resource *r);
void __iomem *of_iomap(struct device_node *node, int index);
void __iomem *of_io_request_and_map(struct device_node *device,
- int index, char *name);
+ int index, const char *name);
#else
#include <linux/io.h>
@@ -127,7 +123,7 @@ static inline void __iomem *of_iomap(struct device_node *device, int index)
}
static inline void __iomem *of_io_request_and_map(struct device_node *device,
- int index, char *name)
+ int index, const char *name)
{
return IOMEM_ERR_PTR(-EINVAL);
}
@@ -138,6 +134,9 @@ extern const __be32 *of_get_pci_address(struct device_node *dev, int bar_no,
u64 *size, unsigned int *flags);
extern int of_pci_address_to_resource(struct device_node *dev, int bar,
struct resource *r);
+extern int of_pci_range_to_resource(struct of_pci_range *range,
+ struct device_node *np,
+ struct resource *res);
#else /* CONFIG_OF_ADDRESS && CONFIG_PCI */
static inline int of_pci_address_to_resource(struct device_node *dev, int bar,
struct resource *r)
@@ -150,6 +149,12 @@ static inline const __be32 *of_get_pci_address(struct device_node *dev,
{
return NULL;
}
+static inline int of_pci_range_to_resource(struct of_pci_range *range,
+ struct device_node *np,
+ struct resource *res)
+{
+ return -ENOSYS;
+}
#endif /* CONFIG_OF_ADDRESS && CONFIG_PCI */
#endif /* __OF_ADDRESS_H */
diff --git a/include/linux/of_iommu.h b/include/linux/of_iommu.h
index 51a560f34bca..16c75547d725 100644
--- a/include/linux/of_iommu.h
+++ b/include/linux/of_iommu.h
@@ -1,12 +1,19 @@
#ifndef __OF_IOMMU_H
#define __OF_IOMMU_H
+#include <linux/device.h>
+#include <linux/iommu.h>
+#include <linux/of.h>
+
#ifdef CONFIG_OF_IOMMU
extern int of_get_dma_window(struct device_node *dn, const char *prefix,
int index, unsigned long *busno, dma_addr_t *addr,
size_t *size);
+extern void of_iommu_init(void);
+extern struct iommu_ops *of_iommu_configure(struct device *dev);
+
#else
static inline int of_get_dma_window(struct device_node *dn, const char *prefix,
@@ -16,6 +23,22 @@ static inline int of_get_dma_window(struct device_node *dn, const char *prefix,
return -EINVAL;
}
+static inline void of_iommu_init(void) { }
+static inline struct iommu_ops *of_iommu_configure(struct device *dev)
+{
+ return NULL;
+}
+
#endif /* CONFIG_OF_IOMMU */
+void of_iommu_set_ops(struct device_node *np, struct iommu_ops *ops);
+struct iommu_ops *of_iommu_get_ops(struct device_node *np);
+
+extern struct of_device_id __iommu_of_table;
+
+typedef int (*of_iommu_init_fn)(struct device_node *);
+
+#define IOMMU_OF_DECLARE(name, compat, fn) \
+ _OF_DECLARE(iommu, name, compat, fn, of_iommu_init_fn)
+
#endif /* __OF_IOMMU_H */
diff --git a/include/linux/of_pci.h b/include/linux/of_pci.h
index dde3a4a0fa5d..ce0e5abeb454 100644
--- a/include/linux/of_pci.h
+++ b/include/linux/of_pci.h
@@ -15,6 +15,7 @@ struct device_node *of_pci_find_child_device(struct device_node *parent,
int of_pci_get_devfn(struct device_node *np);
int of_irq_parse_and_map_pci(const struct pci_dev *dev, u8 slot, u8 pin);
int of_pci_parse_bus_range(struct device_node *node, struct resource *res);
+int of_get_pci_domain_nr(struct device_node *node);
#else
static inline int of_irq_parse_pci(const struct pci_dev *pdev, struct of_phandle_args *out_irq)
{
@@ -43,16 +44,28 @@ of_pci_parse_bus_range(struct device_node *node, struct resource *res)
{
return -EINVAL;
}
+
+static inline int
+of_get_pci_domain_nr(struct device_node *node)
+{
+ return -1;
+}
+#endif
+
+#if defined(CONFIG_OF_ADDRESS)
+int of_pci_get_host_bridge_resources(struct device_node *dev,
+ unsigned char busno, unsigned char bus_max,
+ struct list_head *resources, resource_size_t *io_base);
#endif
#if defined(CONFIG_OF) && defined(CONFIG_PCI_MSI)
-int of_pci_msi_chip_add(struct msi_chip *chip);
-void of_pci_msi_chip_remove(struct msi_chip *chip);
-struct msi_chip *of_pci_find_msi_chip_by_node(struct device_node *of_node);
+int of_pci_msi_chip_add(struct msi_controller *chip);
+void of_pci_msi_chip_remove(struct msi_controller *chip);
+struct msi_controller *of_pci_find_msi_chip_by_node(struct device_node *of_node);
#else
-static inline int of_pci_msi_chip_add(struct msi_chip *chip) { return -EINVAL; }
-static inline void of_pci_msi_chip_remove(struct msi_chip *chip) { }
-static inline struct msi_chip *
+static inline int of_pci_msi_chip_add(struct msi_controller *chip) { return -EINVAL; }
+static inline void of_pci_msi_chip_remove(struct msi_controller *chip) { }
+static inline struct msi_controller *
of_pci_find_msi_chip_by_node(struct device_node *of_node) { return NULL; }
#endif
diff --git a/include/linux/of_pdt.h b/include/linux/of_pdt.h
index c65a18a0cfdf..7e09244bb679 100644
--- a/include/linux/of_pdt.h
+++ b/include/linux/of_pdt.h
@@ -39,7 +39,6 @@ extern void *prom_early_alloc(unsigned long size);
/* for building the device tree */
extern void of_pdt_build_devicetree(phandle root_node, struct of_pdt_ops *ops);
-extern void (*of_pdt_build_more)(struct device_node *dp,
- struct device_node ***nextp);
+extern void (*of_pdt_build_more)(struct device_node *dp);
#endif /* _LINUX_OF_PDT_H */
diff --git a/include/linux/of_platform.h b/include/linux/of_platform.h
index c2b0627a2317..8a860f096c35 100644
--- a/include/linux/of_platform.h
+++ b/include/linux/of_platform.h
@@ -84,4 +84,10 @@ static inline int of_platform_populate(struct device_node *root,
static inline void of_platform_depopulate(struct device *parent) { }
#endif
+#ifdef CONFIG_OF_DYNAMIC
+extern void of_platform_register_reconfig_notifier(void);
+#else
+static inline void of_platform_register_reconfig_notifier(void) { }
+#endif
+
#endif /* _LINUX_OF_PLATFORM_H */
diff --git a/include/linux/of_reserved_mem.h b/include/linux/of_reserved_mem.h
index 5b5efae09135..ad2f67054372 100644
--- a/include/linux/of_reserved_mem.h
+++ b/include/linux/of_reserved_mem.h
@@ -16,7 +16,7 @@ struct reserved_mem {
};
struct reserved_mem_ops {
- void (*device_init)(struct reserved_mem *rmem,
+ int (*device_init)(struct reserved_mem *rmem,
struct device *dev);
void (*device_release)(struct reserved_mem *rmem,
struct device *dev);
@@ -28,14 +28,17 @@ typedef int (*reservedmem_of_init_fn)(struct reserved_mem *rmem);
_OF_DECLARE(reservedmem, name, compat, init, reservedmem_of_init_fn)
#ifdef CONFIG_OF_RESERVED_MEM
-void of_reserved_mem_device_init(struct device *dev);
+int of_reserved_mem_device_init(struct device *dev);
void of_reserved_mem_device_release(struct device *dev);
void fdt_init_reserved_mem(void);
void fdt_reserved_mem_save_node(unsigned long node, const char *uname,
phys_addr_t base, phys_addr_t size);
#else
-static inline void of_reserved_mem_device_init(struct device *dev) { }
+static inline int of_reserved_mem_device_init(struct device *dev)
+{
+ return -ENOSYS;
+}
static inline void of_reserved_mem_device_release(struct device *pdev) { }
static inline void fdt_init_reserved_mem(void) { }
diff --git a/include/linux/omap-dma.h b/include/linux/omap-dma.h
index 6f06f8bc612c..e5a70132a240 100644
--- a/include/linux/omap-dma.h
+++ b/include/linux/omap-dma.h
@@ -306,15 +306,12 @@ extern void omap_set_dma_transfer_params(int lch, int data_type,
int elem_count, int frame_count,
int sync_mode,
int dma_trigger, int src_or_dst_synch);
-extern void omap_set_dma_color_mode(int lch, enum omap_dma_color_mode mode,
- u32 color);
extern void omap_set_dma_write_mode(int lch, enum omap_dma_write_mode mode);
extern void omap_set_dma_channel_mode(int lch, enum omap_dma_channel_mode mode);
extern void omap_set_dma_src_params(int lch, int src_port, int src_amode,
unsigned long src_start,
int src_ei, int src_fi);
-extern void omap_set_dma_src_index(int lch, int eidx, int fidx);
extern void omap_set_dma_src_data_pack(int lch, int enable);
extern void omap_set_dma_src_burst_mode(int lch,
enum omap_dma_burst_mode burst_mode);
@@ -322,7 +319,6 @@ extern void omap_set_dma_src_burst_mode(int lch,
extern void omap_set_dma_dest_params(int lch, int dest_port, int dest_amode,
unsigned long dest_start,
int dst_ei, int dst_fi);
-extern void omap_set_dma_dest_index(int lch, int eidx, int fidx);
extern void omap_set_dma_dest_data_pack(int lch, int enable);
extern void omap_set_dma_dest_burst_mode(int lch,
enum omap_dma_burst_mode burst_mode);
@@ -331,52 +327,19 @@ extern void omap_set_dma_params(int lch,
struct omap_dma_channel_params *params);
extern void omap_dma_link_lch(int lch_head, int lch_queue);
-extern void omap_dma_unlink_lch(int lch_head, int lch_queue);
extern int omap_set_dma_callback(int lch,
void (*callback)(int lch, u16 ch_status, void *data),
void *data);
extern dma_addr_t omap_get_dma_src_pos(int lch);
extern dma_addr_t omap_get_dma_dst_pos(int lch);
-extern void omap_clear_dma(int lch);
extern int omap_get_dma_active_status(int lch);
extern int omap_dma_running(void);
extern void omap_dma_set_global_params(int arb_rate, int max_fifo_depth,
int tparams);
-extern int omap_dma_set_prio_lch(int lch, unsigned char read_prio,
- unsigned char write_prio);
-extern void omap_set_dma_dst_endian_type(int lch, enum end_type etype);
-extern void omap_set_dma_src_endian_type(int lch, enum end_type etype);
-extern int omap_get_dma_index(int lch, int *ei, int *fi);
-
void omap_dma_global_context_save(void);
void omap_dma_global_context_restore(void);
-extern void omap_dma_disable_irq(int lch);
-
-/* Chaining APIs */
-#ifndef CONFIG_ARCH_OMAP1
-extern int omap_request_dma_chain(int dev_id, const char *dev_name,
- void (*callback) (int lch, u16 ch_status,
- void *data),
- int *chain_id, int no_of_chans,
- int chain_mode,
- struct omap_dma_channel_params params);
-extern int omap_free_dma_chain(int chain_id);
-extern int omap_dma_chain_a_transfer(int chain_id, int src_start,
- int dest_start, int elem_count,
- int frame_count, void *callbk_data);
-extern int omap_start_dma_chain_transfers(int chain_id);
-extern int omap_stop_dma_chain_transfers(int chain_id);
-extern int omap_get_dma_chain_index(int chain_id, int *ei, int *fi);
-extern int omap_get_dma_chain_dst_pos(int chain_id);
-extern int omap_get_dma_chain_src_pos(int chain_id);
-
-extern int omap_modify_dma_chain_params(int chain_id,
- struct omap_dma_channel_params params);
-extern int omap_dma_chain_status(int chain_id);
-#endif
-
#if defined(CONFIG_ARCH_OMAP1) && IS_ENABLED(CONFIG_FB_OMAP)
#include <mach/lcd_dma.h>
#else
diff --git a/include/linux/omap-gpmc.h b/include/linux/omap-gpmc.h
new file mode 100644
index 000000000000..c2080eebbb47
--- /dev/null
+++ b/include/linux/omap-gpmc.h
@@ -0,0 +1,199 @@
+/*
+ * OMAP GPMC (General Purpose Memory Controller) defines
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+/* Maximum Number of Chip Selects */
+#define GPMC_CS_NUM 8
+
+#define GPMC_CONFIG_WP 0x00000005
+
+#define GPMC_IRQ_FIFOEVENTENABLE 0x01
+#define GPMC_IRQ_COUNT_EVENT 0x02
+
+#define GPMC_BURST_4 4 /* 4 word burst */
+#define GPMC_BURST_8 8 /* 8 word burst */
+#define GPMC_BURST_16 16 /* 16 word burst */
+#define GPMC_DEVWIDTH_8BIT 1 /* 8-bit device width */
+#define GPMC_DEVWIDTH_16BIT 2 /* 16-bit device width */
+#define GPMC_MUX_AAD 1 /* Addr-Addr-Data multiplex */
+#define GPMC_MUX_AD 2 /* Addr-Data multiplex */
+
+/* bool type time settings */
+struct gpmc_bool_timings {
+ bool cycle2cyclediffcsen;
+ bool cycle2cyclesamecsen;
+ bool we_extra_delay;
+ bool oe_extra_delay;
+ bool adv_extra_delay;
+ bool cs_extra_delay;
+ bool time_para_granularity;
+};
+
+/*
+ * Note that all values in this struct are in nanoseconds except sync_clk
+ * (which is in picoseconds), while the register values are in gpmc_fck cycles.
+ */
+struct gpmc_timings {
+ /* Minimum clock period for synchronous mode (in picoseconds) */
+ u32 sync_clk;
+
+ /* Chip-select signal timings corresponding to GPMC_CS_CONFIG2 */
+ u32 cs_on; /* Assertion time */
+ u32 cs_rd_off; /* Read deassertion time */
+ u32 cs_wr_off; /* Write deassertion time */
+
+ /* ADV signal timings corresponding to GPMC_CONFIG3 */
+ u32 adv_on; /* Assertion time */
+ u32 adv_rd_off; /* Read deassertion time */
+ u32 adv_wr_off; /* Write deassertion time */
+
+ /* WE signals timings corresponding to GPMC_CONFIG4 */
+ u32 we_on; /* WE assertion time */
+ u32 we_off; /* WE deassertion time */
+
+ /* OE signals timings corresponding to GPMC_CONFIG4 */
+ u32 oe_on; /* OE assertion time */
+ u32 oe_off; /* OE deassertion time */
+
+ /* Access time and cycle time timings corresponding to GPMC_CONFIG5 */
+ u32 page_burst_access; /* Multiple access word delay */
+ u32 access; /* Start-cycle to first data valid delay */
+ u32 rd_cycle; /* Total read cycle time */
+ u32 wr_cycle; /* Total write cycle time */
+
+ u32 bus_turnaround;
+ u32 cycle2cycle_delay;
+
+ u32 wait_monitoring;
+ u32 clk_activation;
+
+ /* The following are only on OMAP3430 */
+ u32 wr_access; /* WRACCESSTIME */
+ u32 wr_data_mux_bus; /* WRDATAONADMUXBUS */
+
+ struct gpmc_bool_timings bool_timings;
+};
+
+/* Device timings in picoseconds */
+struct gpmc_device_timings {
+ u32 t_ceasu; /* address setup to CS valid */
+ u32 t_avdasu; /* address setup to ADV valid */
+ /* XXX: try to combine t_avdp_r & t_avdp_w. Issue is
+ * of tusb using these timings even for sync whilst
+ * ideally for adv_rd/(wr)_off it should have considered
+ * t_avdh instead. This indirectly necessitates r/w
+ * variations of t_avdp as it is possible to have one
+ * sync & other async
+ */
+ u32 t_avdp_r; /* ADV low time (what about t_cer ?) */
+ u32 t_avdp_w;
+ u32 t_aavdh; /* address hold time */
+ u32 t_oeasu; /* address setup to OE valid */
+ u32 t_aa; /* access time from ADV assertion */
+ u32 t_iaa; /* initial access time */
+ u32 t_oe; /* access time from OE assertion */
+ u32 t_ce; /* access time from CS asertion */
+ u32 t_rd_cycle; /* read cycle time */
+ u32 t_cez_r; /* read CS deassertion to high Z */
+ u32 t_cez_w; /* write CS deassertion to high Z */
+ u32 t_oez; /* OE deassertion to high Z */
+ u32 t_weasu; /* address setup to WE valid */
+ u32 t_wpl; /* write assertion time */
+ u32 t_wph; /* write deassertion time */
+ u32 t_wr_cycle; /* write cycle time */
+
+ u32 clk;
+ u32 t_bacc; /* burst access valid clock to output delay */
+ u32 t_ces; /* CS setup time to clk */
+ u32 t_avds; /* ADV setup time to clk */
+ u32 t_avdh; /* ADV hold time from clk */
+ u32 t_ach; /* address hold time from clk */
+ u32 t_rdyo; /* clk to ready valid */
+
+ u32 t_ce_rdyz; /* XXX: description ?, or use t_cez instead */
+ u32 t_ce_avd; /* CS on to ADV on delay */
+
+ /* XXX: check the possibility of combining
+ * cyc_aavhd_oe & cyc_aavdh_we
+ */
+ u8 cyc_aavdh_oe;/* read address hold time in cycles */
+ u8 cyc_aavdh_we;/* write address hold time in cycles */
+ u8 cyc_oe; /* access time from OE assertion in cycles */
+ u8 cyc_wpl; /* write deassertion time in cycles */
+ u32 cyc_iaa; /* initial access time in cycles */
+
+ /* extra delays */
+ bool ce_xdelay;
+ bool avd_xdelay;
+ bool oe_xdelay;
+ bool we_xdelay;
+};
+
+struct gpmc_settings {
+ bool burst_wrap; /* enables wrap bursting */
+ bool burst_read; /* enables read page/burst mode */
+ bool burst_write; /* enables write page/burst mode */
+ bool device_nand; /* device is NAND */
+ bool sync_read; /* enables synchronous reads */
+ bool sync_write; /* enables synchronous writes */
+ bool wait_on_read; /* monitor wait on reads */
+ bool wait_on_write; /* monitor wait on writes */
+ u32 burst_len; /* page/burst length */
+ u32 device_width; /* device bus width (8 or 16 bit) */
+ u32 mux_add_data; /* multiplex address & data */
+ u32 wait_pin; /* wait-pin to be used */
+};
+
+extern int gpmc_calc_timings(struct gpmc_timings *gpmc_t,
+ struct gpmc_settings *gpmc_s,
+ struct gpmc_device_timings *dev_t);
+
+struct gpmc_nand_regs;
+struct device_node;
+
+extern void gpmc_update_nand_reg(struct gpmc_nand_regs *reg, int cs);
+extern int gpmc_get_client_irq(unsigned irq_config);
+
+extern unsigned int gpmc_ticks_to_ns(unsigned int ticks);
+
+extern void gpmc_cs_write_reg(int cs, int idx, u32 val);
+extern int gpmc_calc_divider(unsigned int sync_clk);
+extern int gpmc_cs_set_timings(int cs, const struct gpmc_timings *t);
+extern int gpmc_cs_program_settings(int cs, struct gpmc_settings *p);
+extern int gpmc_cs_request(int cs, unsigned long size, unsigned long *base);
+extern void gpmc_cs_free(int cs);
+extern int gpmc_configure(int cmd, int wval);
+extern void gpmc_read_settings_dt(struct device_node *np,
+ struct gpmc_settings *p);
+
+extern void omap3_gpmc_save_context(void);
+extern void omap3_gpmc_restore_context(void);
+
+struct gpmc_timings;
+struct omap_nand_platform_data;
+struct omap_onenand_platform_data;
+
+#if IS_ENABLED(CONFIG_MTD_NAND_OMAP2)
+extern int gpmc_nand_init(struct omap_nand_platform_data *d,
+ struct gpmc_timings *gpmc_t);
+#else
+static inline int gpmc_nand_init(struct omap_nand_platform_data *d,
+ struct gpmc_timings *gpmc_t)
+{
+ return 0;
+}
+#endif
+
+#if IS_ENABLED(CONFIG_MTD_ONENAND_OMAP2)
+extern void gpmc_onenand_init(struct omap_onenand_platform_data *d);
+#else
+#define board_onenand_data NULL
+static inline void gpmc_onenand_init(struct omap_onenand_platform_data *d)
+{
+}
+#endif
diff --git a/include/linux/omap-mailbox.h b/include/linux/omap-mailbox.h
index f8322d9cd235..587bbdd31f5a 100644
--- a/include/linux/omap-mailbox.h
+++ b/include/linux/omap-mailbox.h
@@ -10,20 +10,20 @@
#define OMAP_MAILBOX_H
typedef u32 mbox_msg_t;
-struct omap_mbox;
typedef int __bitwise omap_mbox_irq_t;
#define IRQ_TX ((__force omap_mbox_irq_t) 1)
#define IRQ_RX ((__force omap_mbox_irq_t) 2)
-int omap_mbox_msg_send(struct omap_mbox *, mbox_msg_t msg);
+struct mbox_chan;
+struct mbox_client;
-struct omap_mbox *omap_mbox_get(const char *, struct notifier_block *nb);
-void omap_mbox_put(struct omap_mbox *mbox, struct notifier_block *nb);
+struct mbox_chan *omap_mbox_request_channel(struct mbox_client *cl,
+ const char *chan_name);
-void omap_mbox_save_ctx(struct omap_mbox *mbox);
-void omap_mbox_restore_ctx(struct omap_mbox *mbox);
-void omap_mbox_enable_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq);
-void omap_mbox_disable_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq);
+void omap_mbox_save_ctx(struct mbox_chan *chan);
+void omap_mbox_restore_ctx(struct mbox_chan *chan);
+void omap_mbox_enable_irq(struct mbox_chan *chan, omap_mbox_irq_t irq);
+void omap_mbox_disable_irq(struct mbox_chan *chan, omap_mbox_irq_t irq);
#endif /* OMAP_MAILBOX_H */
diff --git a/include/linux/oom.h b/include/linux/oom.h
index 647395a1a550..853698c721f7 100644
--- a/include/linux/oom.h
+++ b/include/linux/oom.h
@@ -50,6 +50,9 @@ static inline bool oom_task_origin(const struct task_struct *p)
extern unsigned long oom_badness(struct task_struct *p,
struct mem_cgroup *memcg, const nodemask_t *nodemask,
unsigned long totalpages);
+
+extern int oom_kills_count(void);
+extern void note_oom_kill(void);
extern void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
unsigned int points, unsigned long totalpages,
struct mem_cgroup *memcg, nodemask_t *nodemask,
@@ -89,6 +92,17 @@ static inline bool oom_gfp_allowed(gfp_t gfp_mask)
extern struct task_struct *find_lock_task_mm(struct task_struct *p);
+static inline bool task_will_free_mem(struct task_struct *task)
+{
+ /*
+ * A coredumping process may sleep for an extended period in exit_mm(),
+ * so the oom killer cannot assume that the process will promptly exit
+ * and release memory.
+ */
+ return (task->flags & PF_EXITING) &&
+ !(task->signal->flags & SIGNAL_GROUP_COREDUMP);
+}
+
/* sysctls */
extern int sysctl_oom_dump_tasks;
extern int sysctl_oom_kill_allocating_task;
diff --git a/include/linux/page-debug-flags.h b/include/linux/page-debug-flags.h
deleted file mode 100644
index 22691f614043..000000000000
--- a/include/linux/page-debug-flags.h
+++ /dev/null
@@ -1,32 +0,0 @@
-#ifndef LINUX_PAGE_DEBUG_FLAGS_H
-#define LINUX_PAGE_DEBUG_FLAGS_H
-
-/*
- * page->debug_flags bits:
- *
- * PAGE_DEBUG_FLAG_POISON is set for poisoned pages. This is used to
- * implement generic debug pagealloc feature. The pages are filled with
- * poison patterns and set this flag after free_pages(). The poisoned
- * pages are verified whether the patterns are not corrupted and clear
- * the flag before alloc_pages().
- */
-
-enum page_debug_flags {
- PAGE_DEBUG_FLAG_POISON, /* Page is poisoned */
- PAGE_DEBUG_FLAG_GUARD,
-};
-
-/*
- * Ensure that CONFIG_WANT_PAGE_DEBUG_FLAGS reliably
- * gets turned off when no debug features are enabling it!
- */
-
-#ifdef CONFIG_WANT_PAGE_DEBUG_FLAGS
-#if !defined(CONFIG_PAGE_POISONING) && \
- !defined(CONFIG_PAGE_GUARD) \
-/* && !defined(CONFIG_PAGE_DEBUG_SOMETHING_ELSE) && ... */
-#error WANT_PAGE_DEBUG_FLAGS is turned on with no debug features!
-#endif
-#endif /* CONFIG_WANT_PAGE_DEBUG_FLAGS */
-
-#endif /* LINUX_PAGE_DEBUG_FLAGS_H */
diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h
index 3fff8e774067..2dc1e1697b45 100644
--- a/include/linux/page-isolation.h
+++ b/include/linux/page-isolation.h
@@ -2,6 +2,10 @@
#define __LINUX_PAGEISOLATION_H
#ifdef CONFIG_MEMORY_ISOLATION
+static inline bool has_isolate_pageblock(struct zone *zone)
+{
+ return zone->nr_isolate_pageblock;
+}
static inline bool is_migrate_isolate_page(struct page *page)
{
return get_pageblock_migratetype(page) == MIGRATE_ISOLATE;
@@ -11,6 +15,10 @@ static inline bool is_migrate_isolate(int migratetype)
return migratetype == MIGRATE_ISOLATE;
}
#else
+static inline bool has_isolate_pageblock(struct zone *zone)
+{
+ return false;
+}
static inline bool is_migrate_isolate_page(struct page *page)
{
return false;
diff --git a/include/linux/page_cgroup.h b/include/linux/page_cgroup.h
deleted file mode 100644
index 5c831f1eca79..000000000000
--- a/include/linux/page_cgroup.h
+++ /dev/null
@@ -1,105 +0,0 @@
-#ifndef __LINUX_PAGE_CGROUP_H
-#define __LINUX_PAGE_CGROUP_H
-
-enum {
- /* flags for mem_cgroup */
- PCG_USED = 0x01, /* This page is charged to a memcg */
- PCG_MEM = 0x02, /* This page holds a memory charge */
- PCG_MEMSW = 0x04, /* This page holds a memory+swap charge */
-};
-
-struct pglist_data;
-
-#ifdef CONFIG_MEMCG
-struct mem_cgroup;
-
-/*
- * Page Cgroup can be considered as an extended mem_map.
- * A page_cgroup page is associated with every page descriptor. The
- * page_cgroup helps us identify information about the cgroup
- * All page cgroups are allocated at boot or memory hotplug event,
- * then the page cgroup for pfn always exists.
- */
-struct page_cgroup {
- unsigned long flags;
- struct mem_cgroup *mem_cgroup;
-};
-
-extern void pgdat_page_cgroup_init(struct pglist_data *pgdat);
-
-#ifdef CONFIG_SPARSEMEM
-static inline void page_cgroup_init_flatmem(void)
-{
-}
-extern void page_cgroup_init(void);
-#else
-extern void page_cgroup_init_flatmem(void);
-static inline void page_cgroup_init(void)
-{
-}
-#endif
-
-struct page_cgroup *lookup_page_cgroup(struct page *page);
-
-static inline int PageCgroupUsed(struct page_cgroup *pc)
-{
- return !!(pc->flags & PCG_USED);
-}
-#else /* !CONFIG_MEMCG */
-struct page_cgroup;
-
-static inline void pgdat_page_cgroup_init(struct pglist_data *pgdat)
-{
-}
-
-static inline struct page_cgroup *lookup_page_cgroup(struct page *page)
-{
- return NULL;
-}
-
-static inline void page_cgroup_init(void)
-{
-}
-
-static inline void page_cgroup_init_flatmem(void)
-{
-}
-#endif /* CONFIG_MEMCG */
-
-#include <linux/swap.h>
-
-#ifdef CONFIG_MEMCG_SWAP
-extern unsigned short swap_cgroup_cmpxchg(swp_entry_t ent,
- unsigned short old, unsigned short new);
-extern unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id);
-extern unsigned short lookup_swap_cgroup_id(swp_entry_t ent);
-extern int swap_cgroup_swapon(int type, unsigned long max_pages);
-extern void swap_cgroup_swapoff(int type);
-#else
-
-static inline
-unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id)
-{
- return 0;
-}
-
-static inline
-unsigned short lookup_swap_cgroup_id(swp_entry_t ent)
-{
- return 0;
-}
-
-static inline int
-swap_cgroup_swapon(int type, unsigned long max_pages)
-{
- return 0;
-}
-
-static inline void swap_cgroup_swapoff(int type)
-{
- return;
-}
-
-#endif /* CONFIG_MEMCG_SWAP */
-
-#endif /* __LINUX_PAGE_CGROUP_H */
diff --git a/include/linux/page_counter.h b/include/linux/page_counter.h
new file mode 100644
index 000000000000..955421575d16
--- /dev/null
+++ b/include/linux/page_counter.h
@@ -0,0 +1,51 @@
+#ifndef _LINUX_PAGE_COUNTER_H
+#define _LINUX_PAGE_COUNTER_H
+
+#include <linux/atomic.h>
+#include <linux/kernel.h>
+#include <asm/page.h>
+
+struct page_counter {
+ atomic_long_t count;
+ unsigned long limit;
+ struct page_counter *parent;
+
+ /* legacy */
+ unsigned long watermark;
+ unsigned long failcnt;
+};
+
+#if BITS_PER_LONG == 32
+#define PAGE_COUNTER_MAX LONG_MAX
+#else
+#define PAGE_COUNTER_MAX (LONG_MAX / PAGE_SIZE)
+#endif
+
+static inline void page_counter_init(struct page_counter *counter,
+ struct page_counter *parent)
+{
+ atomic_long_set(&counter->count, 0);
+ counter->limit = PAGE_COUNTER_MAX;
+ counter->parent = parent;
+}
+
+static inline unsigned long page_counter_read(struct page_counter *counter)
+{
+ return atomic_long_read(&counter->count);
+}
+
+void page_counter_cancel(struct page_counter *counter, unsigned long nr_pages);
+void page_counter_charge(struct page_counter *counter, unsigned long nr_pages);
+int page_counter_try_charge(struct page_counter *counter,
+ unsigned long nr_pages,
+ struct page_counter **fail);
+void page_counter_uncharge(struct page_counter *counter, unsigned long nr_pages);
+int page_counter_limit(struct page_counter *counter, unsigned long limit);
+int page_counter_memparse(const char *buf, unsigned long *nr_pages);
+
+static inline void page_counter_reset_watermark(struct page_counter *counter)
+{
+ counter->watermark = page_counter_read(counter);
+}
+
+#endif /* _LINUX_PAGE_COUNTER_H */
diff --git a/include/linux/page_ext.h b/include/linux/page_ext.h
new file mode 100644
index 000000000000..d2a2c84c72d0
--- /dev/null
+++ b/include/linux/page_ext.h
@@ -0,0 +1,84 @@
+#ifndef __LINUX_PAGE_EXT_H
+#define __LINUX_PAGE_EXT_H
+
+#include <linux/types.h>
+#include <linux/stacktrace.h>
+
+struct pglist_data;
+struct page_ext_operations {
+ bool (*need)(void);
+ void (*init)(void);
+};
+
+#ifdef CONFIG_PAGE_EXTENSION
+
+/*
+ * page_ext->flags bits:
+ *
+ * PAGE_EXT_DEBUG_POISON is set for poisoned pages. This is used to
+ * implement generic debug pagealloc feature. The pages are filled with
+ * poison patterns and set this flag after free_pages(). The poisoned
+ * pages are verified whether the patterns are not corrupted and clear
+ * the flag before alloc_pages().
+ */
+
+enum page_ext_flags {
+ PAGE_EXT_DEBUG_POISON, /* Page is poisoned */
+ PAGE_EXT_DEBUG_GUARD,
+ PAGE_EXT_OWNER,
+};
+
+/*
+ * Page Extension can be considered as an extended mem_map.
+ * A page_ext page is associated with every page descriptor. The
+ * page_ext helps us add more information about the page.
+ * All page_ext are allocated at boot or memory hotplug event,
+ * then the page_ext for pfn always exists.
+ */
+struct page_ext {
+ unsigned long flags;
+#ifdef CONFIG_PAGE_OWNER
+ unsigned int order;
+ gfp_t gfp_mask;
+ struct stack_trace trace;
+ unsigned long trace_entries[8];
+#endif
+};
+
+extern void pgdat_page_ext_init(struct pglist_data *pgdat);
+
+#ifdef CONFIG_SPARSEMEM
+static inline void page_ext_init_flatmem(void)
+{
+}
+extern void page_ext_init(void);
+#else
+extern void page_ext_init_flatmem(void);
+static inline void page_ext_init(void)
+{
+}
+#endif
+
+struct page_ext *lookup_page_ext(struct page *page);
+
+#else /* !CONFIG_PAGE_EXTENSION */
+struct page_ext;
+
+static inline void pgdat_page_ext_init(struct pglist_data *pgdat)
+{
+}
+
+static inline struct page_ext *lookup_page_ext(struct page *page)
+{
+ return NULL;
+}
+
+static inline void page_ext_init(void)
+{
+}
+
+static inline void page_ext_init_flatmem(void)
+{
+}
+#endif /* CONFIG_PAGE_EXTENSION */
+#endif /* __LINUX_PAGE_EXT_H */
diff --git a/include/linux/page_owner.h b/include/linux/page_owner.h
new file mode 100644
index 000000000000..b48c3471c254
--- /dev/null
+++ b/include/linux/page_owner.h
@@ -0,0 +1,38 @@
+#ifndef __LINUX_PAGE_OWNER_H
+#define __LINUX_PAGE_OWNER_H
+
+#ifdef CONFIG_PAGE_OWNER
+extern bool page_owner_inited;
+extern struct page_ext_operations page_owner_ops;
+
+extern void __reset_page_owner(struct page *page, unsigned int order);
+extern void __set_page_owner(struct page *page,
+ unsigned int order, gfp_t gfp_mask);
+
+static inline void reset_page_owner(struct page *page, unsigned int order)
+{
+ if (likely(!page_owner_inited))
+ return;
+
+ __reset_page_owner(page, order);
+}
+
+static inline void set_page_owner(struct page *page,
+ unsigned int order, gfp_t gfp_mask)
+{
+ if (likely(!page_owner_inited))
+ return;
+
+ __set_page_owner(page, order, gfp_mask);
+}
+#else
+static inline void reset_page_owner(struct page *page, unsigned int order)
+{
+}
+static inline void set_page_owner(struct page *page,
+ unsigned int order, gfp_t gfp_mask)
+{
+}
+
+#endif /* CONFIG_PAGE_OWNER */
+#endif /* __LINUX_PAGE_OWNER_H */
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 3df8c7db7a4e..4b3736f7065c 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -24,8 +24,7 @@ enum mapping_flags {
AS_ENOSPC = __GFP_BITS_SHIFT + 1, /* ENOSPC on async write */
AS_MM_ALL_LOCKS = __GFP_BITS_SHIFT + 2, /* under mm_take_all_locks() */
AS_UNEVICTABLE = __GFP_BITS_SHIFT + 3, /* e.g., ramdisk, SHM_LOCK */
- AS_BALLOON_MAP = __GFP_BITS_SHIFT + 4, /* balloon page special map */
- AS_EXITING = __GFP_BITS_SHIFT + 5, /* final truncate in progress */
+ AS_EXITING = __GFP_BITS_SHIFT + 4, /* final truncate in progress */
};
static inline void mapping_set_error(struct address_space *mapping, int error)
@@ -55,21 +54,6 @@ static inline int mapping_unevictable(struct address_space *mapping)
return !!mapping;
}
-static inline void mapping_set_balloon(struct address_space *mapping)
-{
- set_bit(AS_BALLOON_MAP, &mapping->flags);
-}
-
-static inline void mapping_clear_balloon(struct address_space *mapping)
-{
- clear_bit(AS_BALLOON_MAP, &mapping->flags);
-}
-
-static inline int mapping_balloon(struct address_space *mapping)
-{
- return mapping && test_bit(AS_BALLOON_MAP, &mapping->flags);
-}
-
static inline void mapping_set_exiting(struct address_space *mapping)
{
set_bit(AS_EXITING, &mapping->flags);
@@ -96,7 +80,7 @@ static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
}
/*
- * The page cache can done in larger chunks than
+ * The page cache can be done in larger chunks than
* one page, because it allows for more efficient
* throughput (it can then be mapped into user
* space in smaller chunks for same flexibility).
@@ -267,7 +251,7 @@ pgoff_t page_cache_prev_hole(struct address_space *mapping,
#define FGP_NOWAIT 0x00000020
struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
- int fgp_flags, gfp_t cache_gfp_mask, gfp_t radix_gfp_mask);
+ int fgp_flags, gfp_t cache_gfp_mask);
/**
* find_get_page - find and get a page reference
@@ -282,13 +266,13 @@ struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
static inline struct page *find_get_page(struct address_space *mapping,
pgoff_t offset)
{
- return pagecache_get_page(mapping, offset, 0, 0, 0);
+ return pagecache_get_page(mapping, offset, 0, 0);
}
static inline struct page *find_get_page_flags(struct address_space *mapping,
pgoff_t offset, int fgp_flags)
{
- return pagecache_get_page(mapping, offset, fgp_flags, 0, 0);
+ return pagecache_get_page(mapping, offset, fgp_flags, 0);
}
/**
@@ -308,7 +292,7 @@ static inline struct page *find_get_page_flags(struct address_space *mapping,
static inline struct page *find_lock_page(struct address_space *mapping,
pgoff_t offset)
{
- return pagecache_get_page(mapping, offset, FGP_LOCK, 0, 0);
+ return pagecache_get_page(mapping, offset, FGP_LOCK, 0);
}
/**
@@ -335,7 +319,7 @@ static inline struct page *find_or_create_page(struct address_space *mapping,
{
return pagecache_get_page(mapping, offset,
FGP_LOCK|FGP_ACCESSED|FGP_CREAT,
- gfp_mask, gfp_mask & GFP_RECLAIM_MASK);
+ gfp_mask);
}
/**
@@ -356,8 +340,7 @@ static inline struct page *grab_cache_page_nowait(struct address_space *mapping,
{
return pagecache_get_page(mapping, index,
FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT,
- mapping_gfp_mask(mapping),
- GFP_NOFS);
+ mapping_gfp_mask(mapping));
}
struct page *find_get_entry(struct address_space *mapping, pgoff_t offset);
@@ -496,12 +479,14 @@ static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm,
}
/*
- * This is exported only for wait_on_page_locked/wait_on_page_writeback.
- * Never use this directly!
+ * This is exported only for wait_on_page_locked/wait_on_page_writeback,
+ * and for filesystems which need to wait on PG_private.
*/
extern void wait_on_page_bit(struct page *page, int bit_nr);
extern int wait_on_page_bit_killable(struct page *page, int bit_nr);
+extern int wait_on_page_bit_killable_timeout(struct page *page,
+ int bit_nr, unsigned long timeout);
static inline int wait_on_page_locked_killable(struct page *page)
{
@@ -510,6 +495,12 @@ static inline int wait_on_page_locked_killable(struct page *page)
return 0;
}
+extern wait_queue_head_t *page_waitqueue(struct page *page);
+static inline void wake_up_page(struct page *page, int bit)
+{
+ __wake_up_bit(page_waitqueue(page), &page->flags, bit);
+}
+
/*
* Wait for a page to be unlocked.
*
diff --git a/include/linux/pci-acpi.h b/include/linux/pci-acpi.h
index 64dacb7288a6..24c7728ca681 100644
--- a/include/linux/pci-acpi.h
+++ b/include/linux/pci-acpi.h
@@ -41,8 +41,13 @@ static inline acpi_handle acpi_pci_get_bridge_handle(struct pci_bus *pbus)
if (pci_is_root_bus(pbus))
dev = pbus->bridge;
- else
+ else {
+ /* If pbus is a virtual bus, there is no bridge to it */
+ if (!pbus->self)
+ return NULL;
+
dev = &pbus->self->dev;
+ }
return ACPI_HANDLE(dev);
}
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 96453f9bc8ba..360a966a97a5 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -45,7 +45,7 @@
* In the interest of not exposing interfaces to user-space unnecessarily,
* the following kernel-only defines are being added here.
*/
-#define PCI_DEVID(bus, devfn) ((((u16)bus) << 8) | devfn)
+#define PCI_DEVID(bus, devfn) ((((u16)(bus)) << 8) | (devfn))
/* return bus from PCI devid = ((u16)bus_number) << 8) | devfn */
#define PCI_BUS_NUM(x) (((x) >> 8) & 0xff)
@@ -331,6 +331,7 @@ struct pci_dev {
unsigned int is_added:1;
unsigned int is_busmaster:1; /* device is busmaster */
unsigned int no_msi:1; /* device may not use msi */
+ unsigned int no_64bit_msi:1; /* device may only use 32-bit MSIs */
unsigned int block_cfg_access:1; /* config space access is blocked */
unsigned int broken_parity_status:1; /* Device generates false positive parity */
unsigned int irq_reroute_variant:2; /* device needs IRQ rerouting variant */
@@ -348,6 +349,7 @@ struct pci_dev {
unsigned int __aer_firmware_first:1;
unsigned int broken_intx_masking:1;
unsigned int io_window_1k:1; /* Intel P2P bridge 1K I/O windows */
+ unsigned int irq_managed:1;
pci_dev_flags_t dev_flags;
atomic_t enable_cnt; /* pci_enable_device has been called */
@@ -449,7 +451,7 @@ struct pci_bus {
struct resource busn_res; /* bus numbers routed to this bus */
struct pci_ops *ops; /* configuration access functions */
- struct msi_chip *msi; /* MSI controller */
+ struct msi_controller *msi; /* MSI controller */
void *sysdata; /* hook for sys-specific extension */
struct proc_dir_entry *procdir; /* directory entry in /proc/bus/pci */
@@ -457,6 +459,9 @@ struct pci_bus {
unsigned char primary; /* number of primary bridge */
unsigned char max_bus_speed; /* enum pci_bus_speed */
unsigned char cur_bus_speed; /* enum pci_bus_speed */
+#ifdef CONFIG_PCI_DOMAINS_GENERIC
+ int domain_nr;
+#endif
char name[48];
@@ -1000,6 +1005,8 @@ void __iomem __must_check *pci_platform_rom(struct pci_dev *pdev, size_t *size);
int pci_save_state(struct pci_dev *dev);
void pci_restore_state(struct pci_dev *dev);
struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev);
+int pci_load_saved_state(struct pci_dev *dev,
+ struct pci_saved_state *state);
int pci_load_and_free_saved_state(struct pci_dev *dev,
struct pci_saved_state **state);
struct pci_cap_saved_state *pci_find_saved_cap(struct pci_dev *dev, char cap);
@@ -1103,6 +1110,9 @@ int __must_check pci_bus_alloc_resource(struct pci_bus *bus,
resource_size_t),
void *alignf_data);
+
+int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr);
+
static inline dma_addr_t pci_bus_address(struct pci_dev *pdev, int bar)
{
struct pci_bus_region region;
@@ -1288,12 +1298,32 @@ void pci_cfg_access_unlock(struct pci_dev *dev);
*/
#ifdef CONFIG_PCI_DOMAINS
extern int pci_domains_supported;
+int pci_get_new_domain_nr(void);
#else
enum { pci_domains_supported = 0 };
static inline int pci_domain_nr(struct pci_bus *bus) { return 0; }
static inline int pci_proc_domain(struct pci_bus *bus) { return 0; }
+static inline int pci_get_new_domain_nr(void) { return -ENOSYS; }
#endif /* CONFIG_PCI_DOMAINS */
+/*
+ * Generic implementation for PCI domain support. If your
+ * architecture does not need custom management of PCI
+ * domains then this implementation will be used
+ */
+#ifdef CONFIG_PCI_DOMAINS_GENERIC
+static inline int pci_domain_nr(struct pci_bus *bus)
+{
+ return bus->domain_nr;
+}
+void pci_bus_assign_domain_nr(struct pci_bus *bus, struct device *parent);
+#else
+static inline void pci_bus_assign_domain_nr(struct pci_bus *bus,
+ struct device *parent)
+{
+}
+#endif
+
/* some architectures require additional setup to direct VGA traffic */
typedef int (*arch_set_vga_state_t)(struct pci_dev *pdev, bool decode,
unsigned int command_bits, u32 flags);
@@ -1402,6 +1432,7 @@ static inline struct pci_dev *pci_get_bus_and_slot(unsigned int bus,
static inline int pci_domain_nr(struct pci_bus *bus) { return 0; }
static inline struct pci_dev *pci_dev_get(struct pci_dev *dev) { return NULL; }
+static inline int pci_get_new_domain_nr(void) { return -ENOSYS; }
#define dev_is_pci(d) (false)
#define dev_is_pf(d) (false)
@@ -1563,16 +1594,11 @@ enum pci_fixup_pass {
#ifdef CONFIG_PCI_QUIRKS
void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev);
-struct pci_dev *pci_get_dma_source(struct pci_dev *dev);
int pci_dev_specific_acs_enabled(struct pci_dev *dev, u16 acs_flags);
void pci_dev_specific_enable_acs(struct pci_dev *dev);
#else
static inline void pci_fixup_device(enum pci_fixup_pass pass,
struct pci_dev *dev) { }
-static inline struct pci_dev *pci_get_dma_source(struct pci_dev *dev)
-{
- return pci_dev_get(dev);
-}
static inline int pci_dev_specific_acs_enabled(struct pci_dev *dev,
u16 acs_flags)
{
@@ -1707,7 +1733,7 @@ bool pci_acs_path_enabled(struct pci_dev *start,
struct pci_dev *end, u16 acs_flags);
#define PCI_VPD_LRDT 0x80 /* Large Resource Data Type */
-#define PCI_VPD_LRDT_ID(x) (x | PCI_VPD_LRDT)
+#define PCI_VPD_LRDT_ID(x) ((x) | PCI_VPD_LRDT)
/* Large Resource Data Type Tag Item Names */
#define PCI_VPD_LTIN_ID_STRING 0x02 /* Identifier String */
@@ -1834,15 +1860,17 @@ int pci_for_each_dma_alias(struct pci_dev *pdev,
int (*fn)(struct pci_dev *pdev,
u16 alias, void *data), void *data);
-/**
- * pci_find_upstream_pcie_bridge - find upstream PCIe-to-PCI bridge of a device
- * @pdev: the PCI device
- *
- * if the device is PCIE, return NULL
- * if the device isn't connected to a PCIe bridge (that is its parent is a
- * legacy PCI bridge and the bridge is directly connected to bus 0), return its
- * parent
- */
-struct pci_dev *pci_find_upstream_pcie_bridge(struct pci_dev *pdev);
-
+/* helper functions for operation of device flag */
+static inline void pci_set_dev_assigned(struct pci_dev *pdev)
+{
+ pdev->dev_flags |= PCI_DEV_FLAGS_ASSIGNED;
+}
+static inline void pci_clear_dev_assigned(struct pci_dev *pdev)
+{
+ pdev->dev_flags &= ~PCI_DEV_FLAGS_ASSIGNED;
+}
+static inline bool pci_is_dev_assigned(struct pci_dev *pdev)
+{
+ return (pdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED) == PCI_DEV_FLAGS_ASSIGNED;
+}
#endif /* LINUX_PCI_H */
diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h
index 5f2e559af6b0..8c7895061121 100644
--- a/include/linux/pci_hotplug.h
+++ b/include/linux/pci_hotplug.h
@@ -109,7 +109,6 @@ struct hotplug_slot {
struct list_head slot_list;
struct pci_slot *pci_slot;
};
-#define to_hotplug_slot(n) container_of(n, struct hotplug_slot, kobj)
static inline const char *hotplug_slot_name(const struct hotplug_slot *slot)
{
@@ -187,6 +186,4 @@ static inline int pci_get_hp_params(struct pci_dev *dev,
return -ENODEV;
}
#endif
-
-void pci_configure_slot(struct pci_dev *dev);
#endif
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 6ed0bb73a864..e63c02a93f6b 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -522,6 +522,8 @@
#define PCI_DEVICE_ID_AMD_15H_M10H_F3 0x1403
#define PCI_DEVICE_ID_AMD_15H_M30H_NB_F3 0x141d
#define PCI_DEVICE_ID_AMD_15H_M30H_NB_F4 0x141e
+#define PCI_DEVICE_ID_AMD_15H_M60H_NB_F3 0x1573
+#define PCI_DEVICE_ID_AMD_15H_M60H_NB_F4 0x1574
#define PCI_DEVICE_ID_AMD_15H_NB_F0 0x1600
#define PCI_DEVICE_ID_AMD_15H_NB_F1 0x1601
#define PCI_DEVICE_ID_AMD_15H_NB_F2 0x1602
@@ -562,6 +564,7 @@
#define PCI_DEVICE_ID_AMD_8131_BRIDGE 0x7450
#define PCI_DEVICE_ID_AMD_8131_APIC 0x7451
#define PCI_DEVICE_ID_AMD_8132_BRIDGE 0x7458
+#define PCI_DEVICE_ID_AMD_NL_USB 0x7912
#define PCI_DEVICE_ID_AMD_CS5535_IDE 0x208F
#define PCI_DEVICE_ID_AMD_CS5536_ISA 0x2090
#define PCI_DEVICE_ID_AMD_CS5536_FLASH 0x2091
@@ -2245,6 +2248,8 @@
#define PCI_VENDOR_ID_MORETON 0x15aa
#define PCI_DEVICE_ID_RASTEL_2PORT 0x2000
+#define PCI_VENDOR_ID_VMWARE 0x15ad
+
#define PCI_VENDOR_ID_ZOLTRIX 0x15b0
#define PCI_DEVICE_ID_ZOLTRIX_2BD0 0x2bd0
@@ -2536,6 +2541,7 @@
#define PCI_DEVICE_ID_INTEL_EESSC 0x0008
#define PCI_DEVICE_ID_INTEL_SNB_IMC 0x0100
#define PCI_DEVICE_ID_INTEL_IVB_IMC 0x0154
+#define PCI_DEVICE_ID_INTEL_IVB_E3_IMC 0x0150
#define PCI_DEVICE_ID_INTEL_HSW_IMC 0x0c00
#define PCI_DEVICE_ID_INTEL_PXHD_0 0x0320
#define PCI_DEVICE_ID_INTEL_PXHD_1 0x0321
@@ -2557,6 +2563,7 @@
#define PCI_DEVICE_ID_INTEL_MFD_EMMC0 0x0823
#define PCI_DEVICE_ID_INTEL_MFD_EMMC1 0x0824
#define PCI_DEVICE_ID_INTEL_MRST_SD2 0x084F
+#define PCI_DEVICE_ID_INTEL_QUARK_X1000_ILB 0x095E
#define PCI_DEVICE_ID_INTEL_I960 0x0960
#define PCI_DEVICE_ID_INTEL_I960RM 0x0962
#define PCI_DEVICE_ID_INTEL_CENTERTON_ILB 0x0c60
@@ -2818,7 +2825,22 @@
#define PCI_DEVICE_ID_INTEL_UNC_R2PCIE 0x3c43
#define PCI_DEVICE_ID_INTEL_UNC_R3QPI0 0x3c44
#define PCI_DEVICE_ID_INTEL_UNC_R3QPI1 0x3c45
+#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_RAS 0x3c71 /* 15.1 */
+#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_ERR0 0x3c72 /* 16.2 */
+#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_ERR1 0x3c73 /* 16.3 */
+#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_ERR2 0x3c76 /* 16.6 */
+#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_ERR3 0x3c77 /* 16.7 */
+#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_HA0 0x3ca0 /* 14.0 */
+#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA 0x3ca8 /* 15.0 */
+#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD0 0x3caa /* 15.2 */
+#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD1 0x3cab /* 15.3 */
+#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD2 0x3cac /* 15.4 */
+#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD3 0x3cad /* 15.5 */
+#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_DDRIO 0x3cb8 /* 17.0 */
#define PCI_DEVICE_ID_INTEL_JAKETOWN_UBOX 0x3ce0
+#define PCI_DEVICE_ID_INTEL_SBRIDGE_SAD0 0x3cf4 /* 12.6 */
+#define PCI_DEVICE_ID_INTEL_SBRIDGE_BR 0x3cf5 /* 13.6 */
+#define PCI_DEVICE_ID_INTEL_SBRIDGE_SAD1 0x3cf6 /* 12.7 */
#define PCI_DEVICE_ID_INTEL_IOAT_SNB 0x402f
#define PCI_DEVICE_ID_INTEL_5100_16 0x65f0
#define PCI_DEVICE_ID_INTEL_5100_19 0x65f3
@@ -2860,6 +2882,7 @@
#define PCI_DEVICE_ID_INTEL_82372FB_1 0x7601
#define PCI_DEVICE_ID_INTEL_SCH_LPC 0x8119
#define PCI_DEVICE_ID_INTEL_SCH_IDE 0x811a
+#define PCI_DEVICE_ID_INTEL_E6XX_CU 0x8183
#define PCI_DEVICE_ID_INTEL_ITC_LPC 0x8186
#define PCI_DEVICE_ID_INTEL_82454GX 0x84c4
#define PCI_DEVICE_ID_INTEL_82450GX 0x84c5
diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h
index cfd56046ecec..57f3a1c550dc 100644
--- a/include/linux/percpu-defs.h
+++ b/include/linux/percpu-defs.h
@@ -254,11 +254,6 @@ do { \
#endif /* CONFIG_SMP */
#define per_cpu(var, cpu) (*per_cpu_ptr(&(var), cpu))
-#define __raw_get_cpu_var(var) (*raw_cpu_ptr(&(var)))
-#define __get_cpu_var(var) (*this_cpu_ptr(&(var)))
-
-/* keep until we have removed all uses of __this_cpu_ptr */
-#define __this_cpu_ptr(ptr) raw_cpu_ptr(ptr)
/*
* Must be an lvalue. Since @var must be a simple identifier,
diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h
index ef5894ca8e50..b4337646388b 100644
--- a/include/linux/percpu-refcount.h
+++ b/include/linux/percpu-refcount.h
@@ -13,7 +13,7 @@
*
* The refcount will have a range of 0 to ((1U << 31) - 1), i.e. one bit less
* than an atomic_t - this is because of the way shutdown works, see
- * percpu_ref_kill()/PCPU_COUNT_BIAS.
+ * percpu_ref_kill()/PERCPU_COUNT_BIAS.
*
* Before you call percpu_ref_kill(), percpu_ref_put() does not check for the
* refcount hitting 0 - it can't, if it was in percpu mode. percpu_ref_kill()
@@ -29,7 +29,7 @@
* calls io_destroy() or the process exits.
*
* In the aio code, kill_ioctx() is called when we wish to destroy a kioctx; it
- * calls percpu_ref_kill(), then hlist_del_rcu() and sychronize_rcu() to remove
+ * calls percpu_ref_kill(), then hlist_del_rcu() and synchronize_rcu() to remove
* the kioctx from the proccess's list of kioctxs - after that, there can't be
* any new users of the kioctx (from lookup_ioctx()) and it's then safe to drop
* the initial ref with percpu_ref_put().
@@ -49,29 +49,60 @@
#include <linux/kernel.h>
#include <linux/percpu.h>
#include <linux/rcupdate.h>
+#include <linux/gfp.h>
struct percpu_ref;
typedef void (percpu_ref_func_t)(struct percpu_ref *);
+/* flags set in the lower bits of percpu_ref->percpu_count_ptr */
+enum {
+ __PERCPU_REF_ATOMIC = 1LU << 0, /* operating in atomic mode */
+ __PERCPU_REF_DEAD = 1LU << 1, /* (being) killed */
+ __PERCPU_REF_ATOMIC_DEAD = __PERCPU_REF_ATOMIC | __PERCPU_REF_DEAD,
+
+ __PERCPU_REF_FLAG_BITS = 2,
+};
+
+/* @flags for percpu_ref_init() */
+enum {
+ /*
+ * Start w/ ref == 1 in atomic mode. Can be switched to percpu
+ * operation using percpu_ref_switch_to_percpu(). If initialized
+ * with this flag, the ref will stay in atomic mode until
+ * percpu_ref_switch_to_percpu() is invoked on it.
+ */
+ PERCPU_REF_INIT_ATOMIC = 1 << 0,
+
+ /*
+ * Start dead w/ ref == 0 in atomic mode. Must be revived with
+ * percpu_ref_reinit() before used. Implies INIT_ATOMIC.
+ */
+ PERCPU_REF_INIT_DEAD = 1 << 1,
+};
+
struct percpu_ref {
- atomic_t count;
+ atomic_long_t count;
/*
* The low bit of the pointer indicates whether the ref is in percpu
* mode; if set, then get/put will manipulate the atomic_t.
*/
- unsigned long pcpu_count_ptr;
+ unsigned long percpu_count_ptr;
percpu_ref_func_t *release;
- percpu_ref_func_t *confirm_kill;
+ percpu_ref_func_t *confirm_switch;
+ bool force_atomic:1;
struct rcu_head rcu;
};
int __must_check percpu_ref_init(struct percpu_ref *ref,
- percpu_ref_func_t *release);
-void percpu_ref_reinit(struct percpu_ref *ref);
+ percpu_ref_func_t *release, unsigned int flags,
+ gfp_t gfp);
void percpu_ref_exit(struct percpu_ref *ref);
+void percpu_ref_switch_to_atomic(struct percpu_ref *ref,
+ percpu_ref_func_t *confirm_switch);
+void percpu_ref_switch_to_percpu(struct percpu_ref *ref);
void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
percpu_ref_func_t *confirm_kill);
-void __percpu_ref_kill_expedited(struct percpu_ref *ref);
+void percpu_ref_reinit(struct percpu_ref *ref);
/**
* percpu_ref_kill - drop the initial ref
@@ -88,70 +119,88 @@ static inline void percpu_ref_kill(struct percpu_ref *ref)
return percpu_ref_kill_and_confirm(ref, NULL);
}
-#define PCPU_REF_DEAD 1
-
/*
* Internal helper. Don't use outside percpu-refcount proper. The
* function doesn't return the pointer and let the caller test it for NULL
* because doing so forces the compiler to generate two conditional
- * branches as it can't assume that @ref->pcpu_count is not NULL.
+ * branches as it can't assume that @ref->percpu_count is not NULL.
*/
-static inline bool __pcpu_ref_alive(struct percpu_ref *ref,
- unsigned __percpu **pcpu_countp)
+static inline bool __ref_is_percpu(struct percpu_ref *ref,
+ unsigned long __percpu **percpu_countp)
{
- unsigned long pcpu_ptr = ACCESS_ONCE(ref->pcpu_count_ptr);
-
/* paired with smp_store_release() in percpu_ref_reinit() */
- smp_read_barrier_depends();
+ unsigned long percpu_ptr = lockless_dereference(ref->percpu_count_ptr);
- if (unlikely(pcpu_ptr & PCPU_REF_DEAD))
+ /*
+ * Theoretically, the following could test just ATOMIC; however,
+ * then we'd have to mask off DEAD separately as DEAD may be
+ * visible without ATOMIC if we race with percpu_ref_kill(). DEAD
+ * implies ATOMIC anyway. Test them together.
+ */
+ if (unlikely(percpu_ptr & __PERCPU_REF_ATOMIC_DEAD))
return false;
- *pcpu_countp = (unsigned __percpu *)pcpu_ptr;
+ *percpu_countp = (unsigned long __percpu *)percpu_ptr;
return true;
}
/**
- * percpu_ref_get - increment a percpu refcount
+ * percpu_ref_get_many - increment a percpu refcount
* @ref: percpu_ref to get
+ * @nr: number of references to get
*
- * Analagous to atomic_inc().
- */
-static inline void percpu_ref_get(struct percpu_ref *ref)
+ * Analogous to atomic_long_add().
+ *
+ * This function is safe to call as long as @ref is between init and exit.
+ */
+static inline void percpu_ref_get_many(struct percpu_ref *ref, unsigned long nr)
{
- unsigned __percpu *pcpu_count;
+ unsigned long __percpu *percpu_count;
rcu_read_lock_sched();
- if (__pcpu_ref_alive(ref, &pcpu_count))
- this_cpu_inc(*pcpu_count);
+ if (__ref_is_percpu(ref, &percpu_count))
+ this_cpu_add(*percpu_count, nr);
else
- atomic_inc(&ref->count);
+ atomic_long_add(nr, &ref->count);
rcu_read_unlock_sched();
}
/**
+ * percpu_ref_get - increment a percpu refcount
+ * @ref: percpu_ref to get
+ *
+ * Analagous to atomic_long_inc().
+ *
+ * This function is safe to call as long as @ref is between init and exit.
+ */
+static inline void percpu_ref_get(struct percpu_ref *ref)
+{
+ percpu_ref_get_many(ref, 1);
+}
+
+/**
* percpu_ref_tryget - try to increment a percpu refcount
* @ref: percpu_ref to try-get
*
* Increment a percpu refcount unless its count already reached zero.
* Returns %true on success; %false on failure.
*
- * The caller is responsible for ensuring that @ref stays accessible.
+ * This function is safe to call as long as @ref is between init and exit.
*/
static inline bool percpu_ref_tryget(struct percpu_ref *ref)
{
- unsigned __percpu *pcpu_count;
- int ret = false;
+ unsigned long __percpu *percpu_count;
+ int ret;
rcu_read_lock_sched();
- if (__pcpu_ref_alive(ref, &pcpu_count)) {
- this_cpu_inc(*pcpu_count);
+ if (__ref_is_percpu(ref, &percpu_count)) {
+ this_cpu_inc(*percpu_count);
ret = true;
} else {
- ret = atomic_inc_not_zero(&ref->count);
+ ret = atomic_long_inc_not_zero(&ref->count);
}
rcu_read_unlock_sched();
@@ -166,23 +215,26 @@ static inline bool percpu_ref_tryget(struct percpu_ref *ref)
* Increment a percpu refcount unless it has already been killed. Returns
* %true on success; %false on failure.
*
- * Completion of percpu_ref_kill() in itself doesn't guarantee that tryget
- * will fail. For such guarantee, percpu_ref_kill_and_confirm() should be
- * used. After the confirm_kill callback is invoked, it's guaranteed that
- * no new reference will be given out by percpu_ref_tryget().
+ * Completion of percpu_ref_kill() in itself doesn't guarantee that this
+ * function will fail. For such guarantee, percpu_ref_kill_and_confirm()
+ * should be used. After the confirm_kill callback is invoked, it's
+ * guaranteed that no new reference will be given out by
+ * percpu_ref_tryget_live().
*
- * The caller is responsible for ensuring that @ref stays accessible.
+ * This function is safe to call as long as @ref is between init and exit.
*/
static inline bool percpu_ref_tryget_live(struct percpu_ref *ref)
{
- unsigned __percpu *pcpu_count;
+ unsigned long __percpu *percpu_count;
int ret = false;
rcu_read_lock_sched();
- if (__pcpu_ref_alive(ref, &pcpu_count)) {
- this_cpu_inc(*pcpu_count);
+ if (__ref_is_percpu(ref, &percpu_count)) {
+ this_cpu_inc(*percpu_count);
ret = true;
+ } else if (!(ACCESS_ONCE(ref->percpu_count_ptr) & __PERCPU_REF_DEAD)) {
+ ret = atomic_long_inc_not_zero(&ref->count);
}
rcu_read_unlock_sched();
@@ -191,39 +243,58 @@ static inline bool percpu_ref_tryget_live(struct percpu_ref *ref)
}
/**
- * percpu_ref_put - decrement a percpu refcount
+ * percpu_ref_put_many - decrement a percpu refcount
* @ref: percpu_ref to put
+ * @nr: number of references to put
*
* Decrement the refcount, and if 0, call the release function (which was passed
* to percpu_ref_init())
+ *
+ * This function is safe to call as long as @ref is between init and exit.
*/
-static inline void percpu_ref_put(struct percpu_ref *ref)
+static inline void percpu_ref_put_many(struct percpu_ref *ref, unsigned long nr)
{
- unsigned __percpu *pcpu_count;
+ unsigned long __percpu *percpu_count;
rcu_read_lock_sched();
- if (__pcpu_ref_alive(ref, &pcpu_count))
- this_cpu_dec(*pcpu_count);
- else if (unlikely(atomic_dec_and_test(&ref->count)))
+ if (__ref_is_percpu(ref, &percpu_count))
+ this_cpu_sub(*percpu_count, nr);
+ else if (unlikely(atomic_long_sub_and_test(nr, &ref->count)))
ref->release(ref);
rcu_read_unlock_sched();
}
/**
+ * percpu_ref_put - decrement a percpu refcount
+ * @ref: percpu_ref to put
+ *
+ * Decrement the refcount, and if 0, call the release function (which was passed
+ * to percpu_ref_init())
+ *
+ * This function is safe to call as long as @ref is between init and exit.
+ */
+static inline void percpu_ref_put(struct percpu_ref *ref)
+{
+ percpu_ref_put_many(ref, 1);
+}
+
+/**
* percpu_ref_is_zero - test whether a percpu refcount reached zero
* @ref: percpu_ref to test
*
* Returns %true if @ref reached zero.
+ *
+ * This function is safe to call as long as @ref is between init and exit.
*/
static inline bool percpu_ref_is_zero(struct percpu_ref *ref)
{
- unsigned __percpu *pcpu_count;
+ unsigned long __percpu *percpu_count;
- if (__pcpu_ref_alive(ref, &pcpu_count))
+ if (__ref_is_percpu(ref, &percpu_count))
return false;
- return !atomic_read(&ref->count);
+ return !atomic_long_read(&ref->count);
}
#endif
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index 6f61b61b7996..caebf2a758dc 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -5,6 +5,7 @@
#include <linux/preempt.h>
#include <linux/smp.h>
#include <linux/cpumask.h>
+#include <linux/printk.h>
#include <linux/pfn.h>
#include <linux/init.h>
@@ -48,9 +49,9 @@
* intelligent way to determine this would be nice.
*/
#if BITS_PER_LONG > 32
-#define PERCPU_DYNAMIC_RESERVE (20 << 10)
+#define PERCPU_DYNAMIC_RESERVE (28 << 10)
#else
-#define PERCPU_DYNAMIC_RESERVE (12 << 10)
+#define PERCPU_DYNAMIC_RESERVE (20 << 10)
#endif
extern void *pcpu_base_addr;
@@ -122,11 +123,19 @@ extern void __init setup_per_cpu_areas(void);
#endif
extern void __init percpu_init_late(void);
+extern void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp);
extern void __percpu *__alloc_percpu(size_t size, size_t align);
extern void free_percpu(void __percpu *__pdata);
extern phys_addr_t per_cpu_ptr_to_phys(void *addr);
-#define alloc_percpu(type) \
- (typeof(type) __percpu *)__alloc_percpu(sizeof(type), __alignof__(type))
+#define alloc_percpu_gfp(type, gfp) \
+ (typeof(type) __percpu *)__alloc_percpu_gfp(sizeof(type), \
+ __alignof__(type), gfp)
+#define alloc_percpu(type) \
+ (typeof(type) __percpu *)__alloc_percpu(sizeof(type), \
+ __alignof__(type))
+
+/* To avoid include hell, as printk can not declare this, we declare it here */
+DECLARE_PER_CPU(printk_func_t, printk_func);
#endif /* __LINUX_PERCPU_H */
diff --git a/include/linux/percpu_counter.h b/include/linux/percpu_counter.h
index d5dd4657c8d6..50e50095c8d1 100644
--- a/include/linux/percpu_counter.h
+++ b/include/linux/percpu_counter.h
@@ -12,6 +12,7 @@
#include <linux/threads.h>
#include <linux/percpu.h>
#include <linux/types.h>
+#include <linux/gfp.h>
#ifdef CONFIG_SMP
@@ -26,14 +27,14 @@ struct percpu_counter {
extern int percpu_counter_batch;
-int __percpu_counter_init(struct percpu_counter *fbc, s64 amount,
+int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, gfp_t gfp,
struct lock_class_key *key);
-#define percpu_counter_init(fbc, value) \
+#define percpu_counter_init(fbc, value, gfp) \
({ \
static struct lock_class_key __key; \
\
- __percpu_counter_init(fbc, value, &__key); \
+ __percpu_counter_init(fbc, value, gfp, &__key); \
})
void percpu_counter_destroy(struct percpu_counter *fbc);
@@ -89,7 +90,8 @@ struct percpu_counter {
s64 count;
};
-static inline int percpu_counter_init(struct percpu_counter *fbc, s64 amount)
+static inline int percpu_counter_init(struct percpu_counter *fbc, s64 amount,
+ gfp_t gfp)
{
fbc->count = amount;
return 0;
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 707617a8c0f6..486e84ccb1f9 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -52,6 +52,7 @@ struct perf_guest_info_callbacks {
#include <linux/atomic.h>
#include <linux/sysfs.h>
#include <linux/perf_regs.h>
+#include <linux/workqueue.h>
#include <asm/local.h>
struct perf_callchain_entry {
@@ -78,7 +79,7 @@ struct perf_branch_stack {
struct perf_branch_entry entries[0];
};
-struct perf_regs_user {
+struct perf_regs {
__u64 abi;
struct pt_regs *regs;
};
@@ -268,6 +269,7 @@ struct pmu {
* enum perf_event_active_state - the states of a event
*/
enum perf_event_active_state {
+ PERF_EVENT_STATE_EXIT = -3,
PERF_EVENT_STATE_ERROR = -2,
PERF_EVENT_STATE_OFF = -1,
PERF_EVENT_STATE_INACTIVE = 0,
@@ -507,6 +509,9 @@ struct perf_event_context {
int nr_cgroups; /* cgroup evts */
int nr_branch_stack; /* branch_stack evt */
struct rcu_head rcu_head;
+
+ struct delayed_work orphans_remove;
+ bool orphans_remove_sched;
};
/*
@@ -575,34 +580,47 @@ extern u64 perf_event_read_value(struct perf_event *event,
struct perf_sample_data {
- u64 type;
+ /*
+ * Fields set by perf_sample_data_init(), group so as to
+ * minimize the cachelines touched.
+ */
+ u64 addr;
+ struct perf_raw_record *raw;
+ struct perf_branch_stack *br_stack;
+ u64 period;
+ u64 weight;
+ u64 txn;
+ union perf_mem_data_src data_src;
+ /*
+ * The other fields, optionally {set,used} by
+ * perf_{prepare,output}_sample().
+ */
+ u64 type;
u64 ip;
struct {
u32 pid;
u32 tid;
} tid_entry;
u64 time;
- u64 addr;
u64 id;
u64 stream_id;
struct {
u32 cpu;
u32 reserved;
} cpu_entry;
- u64 period;
- union perf_mem_data_src data_src;
struct perf_callchain_entry *callchain;
- struct perf_raw_record *raw;
- struct perf_branch_stack *br_stack;
- struct perf_regs_user regs_user;
+ struct perf_regs regs_user;
+ struct perf_regs regs_intr;
u64 stack_user_size;
- u64 weight;
- /*
- * Transaction flags for abort events:
- */
- u64 txn;
-};
+} ____cacheline_aligned;
+
+/* default value for data source */
+#define PERF_MEM_NA (PERF_MEM_S(OP, NA) |\
+ PERF_MEM_S(LVL, NA) |\
+ PERF_MEM_S(SNOOP, NA) |\
+ PERF_MEM_S(LOCK, NA) |\
+ PERF_MEM_S(TLB, NA))
static inline void perf_sample_data_init(struct perf_sample_data *data,
u64 addr, u64 period)
@@ -612,11 +630,8 @@ static inline void perf_sample_data_init(struct perf_sample_data *data,
data->raw = NULL;
data->br_stack = NULL;
data->period = period;
- data->regs_user.abi = PERF_SAMPLE_REGS_ABI_NONE;
- data->regs_user.regs = NULL;
- data->stack_user_size = 0;
data->weight = 0;
- data->data_src.val = 0;
+ data->data_src.val = PERF_MEM_NA;
data->txn = 0;
}
diff --git a/include/linux/phonedev.h b/include/linux/phonedev.h
deleted file mode 100644
index 4269de99e320..000000000000
--- a/include/linux/phonedev.h
+++ /dev/null
@@ -1,25 +0,0 @@
-#ifndef __LINUX_PHONEDEV_H
-#define __LINUX_PHONEDEV_H
-
-#include <linux/types.h>
-
-#ifdef __KERNEL__
-
-#include <linux/poll.h>
-
-struct phone_device {
- struct phone_device *next;
- const struct file_operations *f_op;
- int (*open) (struct phone_device *, struct file *);
- int board; /* Device private index */
- int minor;
-};
-
-extern int phonedev_init(void);
-#define PHONE_MAJOR 100
-extern int phone_register_device(struct phone_device *, int unit);
-#define PHONE_UNIT_ANY -1
-extern void phone_unregister_device(struct phone_device *);
-
-#endif
-#endif
diff --git a/include/linux/phy.h b/include/linux/phy.h
index ed39956b5613..22af8f8f5802 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -433,6 +433,7 @@ struct phy_device {
* by this PHY
* flags: A bitfield defining certain other features this PHY
* supports (like interrupts)
+ * driver_data: static driver data
*
* The drivers must implement config_aneg and read_status. All
* other functions are optional. Note that none of these
@@ -448,6 +449,7 @@ struct phy_driver {
unsigned int phy_id_mask;
u32 features;
u32 flags;
+ const void *driver_data;
/*
* Called to issue a PHY software reset
@@ -598,6 +600,19 @@ static inline int phy_read_mmd(struct phy_device *phydev, int devad, u32 regnum)
}
/**
+ * phy_read_mmd_indirect - reads data from the MMD registers
+ * @phydev: The PHY device bus
+ * @prtad: MMD Address
+ * @devad: MMD DEVAD
+ * @addr: PHY address on the MII bus
+ *
+ * Description: it reads data from the MMD registers (clause 22 to access to
+ * clause 45) of the specified phy address.
+ */
+int phy_read_mmd_indirect(struct phy_device *phydev, int prtad,
+ int devad, int addr);
+
+/**
* phy_read - Convenience function for reading a given PHY register
* @phydev: the phy_device struct
* @regnum: register number to read
@@ -668,6 +683,20 @@ static inline int phy_write_mmd(struct phy_device *phydev, int devad,
return mdiobus_write(phydev->bus, phydev->addr, regnum, val);
}
+/**
+ * phy_write_mmd_indirect - writes data to the MMD registers
+ * @phydev: The PHY device
+ * @prtad: MMD Address
+ * @devad: MMD DEVAD
+ * @addr: PHY address on the MII bus
+ * @data: data to write in the MMD register
+ *
+ * Description: Write data from the MMD registers of the specified
+ * phy address.
+ */
+void phy_write_mmd_indirect(struct phy_device *phydev, int prtad,
+ int devad, int addr, u32 data);
+
struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id,
bool is_c45,
struct phy_c45_device_ids *c45_ids);
@@ -745,4 +774,28 @@ int __init mdio_bus_init(void);
void mdio_bus_exit(void);
extern struct bus_type mdio_bus_type;
+
+/**
+ * module_phy_driver() - Helper macro for registering PHY drivers
+ * @__phy_drivers: array of PHY drivers to register
+ *
+ * Helper macro for PHY drivers which do not do anything special in module
+ * init/exit. Each module may only use this macro once, and calling it
+ * replaces module_init() and module_exit().
+ */
+#define phy_module_driver(__phy_drivers, __count) \
+static int __init phy_module_init(void) \
+{ \
+ return phy_drivers_register(__phy_drivers, __count); \
+} \
+module_init(phy_module_init); \
+static void __exit phy_module_exit(void) \
+{ \
+ phy_drivers_unregister(__phy_drivers, __count); \
+} \
+module_exit(phy_module_exit)
+
+#define module_phy_driver(__phy_drivers) \
+ phy_module_driver(__phy_drivers, ARRAY_SIZE(__phy_drivers))
+
#endif /* __PHY_H */
diff --git a/include/linux/phy/phy.h b/include/linux/phy/phy.h
index 8cb6f815475b..a0197fa1b116 100644
--- a/include/linux/phy/phy.h
+++ b/include/linux/phy/phy.h
@@ -61,7 +61,6 @@ struct phy {
struct device dev;
int id;
const struct phy_ops *ops;
- struct phy_init_data *init_data;
struct mutex mutex;
int init_count;
int power_count;
@@ -84,33 +83,14 @@ struct phy_provider {
struct of_phandle_args *args);
};
-/**
- * struct phy_consumer - represents the phy consumer
- * @dev_name: the device name of the controller that will use this PHY device
- * @port: name given to the consumer port
- */
-struct phy_consumer {
- const char *dev_name;
- const char *port;
-};
-
-/**
- * struct phy_init_data - contains the list of PHY consumers
- * @num_consumers: number of consumers for this PHY device
- * @consumers: list of PHY consumers
- */
-struct phy_init_data {
- unsigned int num_consumers;
- struct phy_consumer *consumers;
+struct phy_lookup {
+ struct list_head node;
+ const char *dev_id;
+ const char *con_id;
+ struct phy *phy;
};
-#define PHY_CONSUMER(_dev_name, _port) \
-{ \
- .dev_name = _dev_name, \
- .port = _port, \
-}
-
-#define to_phy(dev) (container_of((dev), struct phy, dev))
+#define to_phy(a) (container_of((a), struct phy, dev))
#define of_phy_provider_register(dev, xlate) \
__of_phy_provider_register((dev), THIS_MODULE, (xlate))
@@ -159,10 +139,9 @@ struct phy *of_phy_get(struct device_node *np, const char *con_id);
struct phy *of_phy_simple_xlate(struct device *dev,
struct of_phandle_args *args);
struct phy *phy_create(struct device *dev, struct device_node *node,
- const struct phy_ops *ops,
- struct phy_init_data *init_data);
+ const struct phy_ops *ops);
struct phy *devm_phy_create(struct device *dev, struct device_node *node,
- const struct phy_ops *ops, struct phy_init_data *init_data);
+ const struct phy_ops *ops);
void phy_destroy(struct phy *phy);
void devm_phy_destroy(struct device *dev, struct phy *phy);
struct phy_provider *__of_phy_provider_register(struct device *dev,
@@ -174,6 +153,8 @@ struct phy_provider *__devm_of_phy_provider_register(struct device *dev,
void of_phy_provider_unregister(struct phy_provider *phy_provider);
void devm_of_phy_provider_unregister(struct device *dev,
struct phy_provider *phy_provider);
+int phy_create_lookup(struct phy *phy, const char *con_id, const char *dev_id);
+void phy_remove_lookup(struct phy *phy, const char *con_id, const char *dev_id);
#else
static inline int phy_pm_runtime_get(struct phy *phy)
{
@@ -301,16 +282,14 @@ static inline struct phy *of_phy_simple_xlate(struct device *dev,
static inline struct phy *phy_create(struct device *dev,
struct device_node *node,
- const struct phy_ops *ops,
- struct phy_init_data *init_data)
+ const struct phy_ops *ops)
{
return ERR_PTR(-ENOSYS);
}
static inline struct phy *devm_phy_create(struct device *dev,
struct device_node *node,
- const struct phy_ops *ops,
- struct phy_init_data *init_data)
+ const struct phy_ops *ops)
{
return ERR_PTR(-ENOSYS);
}
@@ -345,6 +324,13 @@ static inline void devm_of_phy_provider_unregister(struct device *dev,
struct phy_provider *phy_provider)
{
}
+static inline int
+phy_create_lookup(struct phy *phy, const char *con_id, const char *dev_id)
+{
+ return 0;
+}
+static inline void phy_remove_lookup(struct phy *phy, const char *con_id,
+ const char *dev_id) { }
#endif
#endif /* __DRIVERS_PHY_H */
diff --git a/include/linux/phy_fixed.h b/include/linux/phy_fixed.h
index ae612acebb53..7e75bfe37cc7 100644
--- a/include/linux/phy_fixed.h
+++ b/include/linux/phy_fixed.h
@@ -11,37 +11,38 @@ struct fixed_phy_status {
struct device_node;
-#ifdef CONFIG_FIXED_PHY
+#if IS_ENABLED(CONFIG_FIXED_PHY)
extern int fixed_phy_add(unsigned int irq, int phy_id,
struct fixed_phy_status *status);
-extern int fixed_phy_register(unsigned int irq,
- struct fixed_phy_status *status,
- struct device_node *np);
+extern struct phy_device *fixed_phy_register(unsigned int irq,
+ struct fixed_phy_status *status,
+ struct device_node *np);
extern void fixed_phy_del(int phy_addr);
+extern int fixed_phy_set_link_update(struct phy_device *phydev,
+ int (*link_update)(struct net_device *,
+ struct fixed_phy_status *));
#else
static inline int fixed_phy_add(unsigned int irq, int phy_id,
struct fixed_phy_status *status)
{
return -ENODEV;
}
-static inline int fixed_phy_register(unsigned int irq,
- struct fixed_phy_status *status,
- struct device_node *np)
+static inline struct phy_device *fixed_phy_register(unsigned int irq,
+ struct fixed_phy_status *status,
+ struct device_node *np)
{
- return -ENODEV;
+ return ERR_PTR(-ENODEV);
}
static inline int fixed_phy_del(int phy_addr)
{
return -ENODEV;
}
-#endif /* CONFIG_FIXED_PHY */
-
-/*
- * This function issued only by fixed_phy-aware drivers, no need
- * protect it with #ifdef
- */
-extern int fixed_phy_set_link_update(struct phy_device *phydev,
+static inline int fixed_phy_set_link_update(struct phy_device *phydev,
int (*link_update)(struct net_device *,
- struct fixed_phy_status *));
+ struct fixed_phy_status *))
+{
+ return -ENODEV;
+}
+#endif /* CONFIG_FIXED_PHY */
#endif /* __PHY_FIXED_H */
diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h
index 1997ffc295a7..b9cf6c51b181 100644
--- a/include/linux/pid_namespace.h
+++ b/include/linux/pid_namespace.h
@@ -8,6 +8,7 @@
#include <linux/threads.h>
#include <linux/nsproxy.h>
#include <linux/kref.h>
+#include <linux/ns_common.h>
struct pidmap {
atomic_t nr_free;
@@ -43,7 +44,7 @@ struct pid_namespace {
kgid_t pid_gid;
int hide_pid;
int reboot; /* group exit code if this pidns was rebooted */
- unsigned int proc_inum;
+ struct ns_common ns;
};
extern struct pid_namespace init_pid_ns;
diff --git a/include/linux/pinctrl/pinconf-generic.h b/include/linux/pinctrl/pinconf-generic.h
index a15f10727eb8..d578a60eff23 100644
--- a/include/linux/pinctrl/pinconf-generic.h
+++ b/include/linux/pinctrl/pinconf-generic.h
@@ -57,7 +57,7 @@
* which are then pulled up with an external resistor. Setting this
* config will enable open drain mode, the argument is ignored.
* @PIN_CONFIG_DRIVE_OPEN_SOURCE: the pin will be driven with open source
- * (open emitter). Setting this config will enable open drain mode, the
+ * (open emitter). Setting this config will enable open source mode, the
* argument is ignored.
* @PIN_CONFIG_DRIVE_STRENGTH: the pin will sink or source at most the current
* passed as argument. The argument is in mA.
diff --git a/include/linux/pinctrl/pinmux.h b/include/linux/pinctrl/pinmux.h
index 3097aafbeb24..511bda9ed4bf 100644
--- a/include/linux/pinctrl/pinmux.h
+++ b/include/linux/pinctrl/pinmux.h
@@ -39,13 +39,12 @@ struct pinctrl_dev;
* name can be used with the generic @pinctrl_ops to retrieve the
* actual pins affected. The applicable groups will be returned in
* @groups and the number of groups in @num_groups
- * @enable: enable a certain muxing function with a certain pin group. The
+ * @set_mux: enable a certain muxing function with a certain pin group. The
* driver does not need to figure out whether enabling this function
* conflicts some other use of the pins in that group, such collisions
* are handled by the pinmux subsystem. The @func_selector selects a
* certain function whereas @group_selector selects a certain set of pins
* to be used. On simple controllers the latter argument may be ignored
- * @disable: disable a certain muxing selector with a certain pin group
* @gpio_request_enable: requests and enables GPIO on a certain pin.
* Implement this only if you can mux every pin individually as GPIO. The
* affected GPIO range is passed along with an offset(pin number) into that
@@ -68,8 +67,8 @@ struct pinmux_ops {
unsigned selector,
const char * const **groups,
unsigned * const num_groups);
- int (*enable) (struct pinctrl_dev *pctldev, unsigned func_selector,
- unsigned group_selector);
+ int (*set_mux) (struct pinctrl_dev *pctldev, unsigned func_selector,
+ unsigned group_selector);
int (*gpio_request_enable) (struct pinctrl_dev *pctldev,
struct pinctrl_gpio_range *range,
unsigned offset);
diff --git a/include/linux/mailbox.h b/include/linux/pl320-ipc.h
index 5161f63ec1c8..5161f63ec1c8 100644
--- a/include/linux/mailbox.h
+++ b/include/linux/pl320-ipc.h
diff --git a/include/linux/platform_data/asoc-s3c.h b/include/linux/platform_data/asoc-s3c.h
index a6591c693ebb..5e0bc779e6c5 100644
--- a/include/linux/platform_data/asoc-s3c.h
+++ b/include/linux/platform_data/asoc-s3c.h
@@ -27,6 +27,7 @@ struct samsung_i2s {
#define QUIRK_NO_MUXPSR (1 << 2)
#define QUIRK_NEED_RSTCLR (1 << 3)
#define QUIRK_SUPPORTS_TDM (1 << 4)
+#define QUIRK_SUPPORTS_IDMA (1 << 5)
/* Quirks of the I2S controller */
u32 quirks;
dma_addr_t idma_addr;
diff --git a/include/linux/platform_data/bcmgenet.h b/include/linux/platform_data/bcmgenet.h
new file mode 100644
index 000000000000..26af54321958
--- /dev/null
+++ b/include/linux/platform_data/bcmgenet.h
@@ -0,0 +1,18 @@
+#ifndef __LINUX_PLATFORM_DATA_BCMGENET_H__
+#define __LINUX_PLATFORM_DATA_BCMGENET_H__
+
+#include <linux/types.h>
+#include <linux/if_ether.h>
+#include <linux/phy.h>
+
+struct bcmgenet_platform_data {
+ bool mdio_enabled;
+ phy_interface_t phy_interface;
+ int phy_address;
+ int phy_speed;
+ int phy_duplex;
+ u8 mac_address[ETH_ALEN];
+ int genet_version;
+};
+
+#endif
diff --git a/include/linux/platform_data/dma-dw.h b/include/linux/platform_data/dma-dw.h
new file mode 100644
index 000000000000..d8155c005242
--- /dev/null
+++ b/include/linux/platform_data/dma-dw.h
@@ -0,0 +1,59 @@
+/*
+ * Driver for the Synopsys DesignWare DMA Controller
+ *
+ * Copyright (C) 2007 Atmel Corporation
+ * Copyright (C) 2010-2011 ST Microelectronics
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef _PLATFORM_DATA_DMA_DW_H
+#define _PLATFORM_DATA_DMA_DW_H
+
+#include <linux/device.h>
+
+/**
+ * struct dw_dma_slave - Controller-specific information about a slave
+ *
+ * @dma_dev: required DMA master device. Depricated.
+ * @src_id: src request line
+ * @dst_id: dst request line
+ * @src_master: src master for transfers on allocated channel.
+ * @dst_master: dest master for transfers on allocated channel.
+ */
+struct dw_dma_slave {
+ struct device *dma_dev;
+ u8 src_id;
+ u8 dst_id;
+ u8 src_master;
+ u8 dst_master;
+};
+
+/**
+ * struct dw_dma_platform_data - Controller configuration parameters
+ * @nr_channels: Number of channels supported by hardware (max 8)
+ * @is_private: The device channels should be marked as private and not for
+ * by the general purpose DMA channel allocator.
+ * @chan_allocation_order: Allocate channels starting from 0 or 7
+ * @chan_priority: Set channel priority increasing from 0 to 7 or 7 to 0.
+ * @block_size: Maximum block size supported by the controller
+ * @nr_masters: Number of AHB masters supported by the controller
+ * @data_width: Maximum data width supported by hardware per AHB master
+ * (0 - 8bits, 1 - 16bits, ..., 5 - 256bits)
+ */
+struct dw_dma_platform_data {
+ unsigned int nr_channels;
+ bool is_private;
+#define CHAN_ALLOCATION_ASCENDING 0 /* zero to seven */
+#define CHAN_ALLOCATION_DESCENDING 1 /* seven to zero */
+ unsigned char chan_allocation_order;
+#define CHAN_PRIORITY_ASCENDING 0 /* chan0 highest */
+#define CHAN_PRIORITY_DESCENDING 1 /* chan7 highest */
+ unsigned char chan_priority;
+ unsigned short block_size;
+ unsigned char nr_masters;
+ unsigned char data_width[4];
+};
+
+#endif /* _PLATFORM_DATA_DMA_DW_H */
diff --git a/include/linux/platform_data/dma-imx.h b/include/linux/platform_data/dma-imx.h
index 6a1357d31871..7d964e787299 100644
--- a/include/linux/platform_data/dma-imx.h
+++ b/include/linux/platform_data/dma-imx.h
@@ -41,6 +41,7 @@ enum sdma_peripheral_type {
IMX_DMATYPE_ESAI, /* ESAI */
IMX_DMATYPE_SSI_DUAL, /* SSI Dual FIFO */
IMX_DMATYPE_ASRC_SP, /* Shared ASRC */
+ IMX_DMATYPE_SAI, /* SAI */
};
enum imx_dma_prio {
diff --git a/include/linux/platform_data/dwc3-exynos.h b/include/linux/platform_data/dwc3-exynos.h
deleted file mode 100644
index 5eb7da9b3772..000000000000
--- a/include/linux/platform_data/dwc3-exynos.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/**
- * dwc3-exynos.h - Samsung EXYNOS DWC3 Specific Glue layer, header.
- *
- * Copyright (c) 2012 Samsung Electronics Co., Ltd.
- * http://www.samsung.com
- *
- * Author: Anton Tikhomirov <av.tikhomirov@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#ifndef _DWC3_EXYNOS_H_
-#define _DWC3_EXYNOS_H_
-
-struct dwc3_exynos_data {
- int phy_type;
- int (*phy_init)(struct platform_device *pdev, int type);
- int (*phy_exit)(struct platform_device *pdev, int type);
-};
-
-#endif /* _DWC3_EXYNOS_H_ */
diff --git a/include/linux/platform_data/elm.h b/include/linux/platform_data/elm.h
index 780d1e97f620..b8686c00f15f 100644
--- a/include/linux/platform_data/elm.h
+++ b/include/linux/platform_data/elm.h
@@ -42,8 +42,24 @@ struct elm_errorvec {
int error_loc[16];
};
+#if IS_ENABLED(CONFIG_MTD_NAND_OMAP_BCH)
void elm_decode_bch_error_page(struct device *dev, u8 *ecc_calc,
struct elm_errorvec *err_vec);
int elm_config(struct device *dev, enum bch_ecc bch_type,
int ecc_steps, int ecc_step_size, int ecc_syndrome_size);
+#else
+static inline void
+elm_decode_bch_error_page(struct device *dev, u8 *ecc_calc,
+ struct elm_errorvec *err_vec)
+{
+}
+
+static inline int elm_config(struct device *dev, enum bch_ecc bch_type,
+ int ecc_steps, int ecc_step_size,
+ int ecc_syndrome_size)
+{
+ return -ENOSYS;
+}
+#endif /* CONFIG_MTD_NAND_ECC_BCH */
+
#endif /* __ELM_H */
diff --git a/include/linux/platform_data/gpio-dwapb.h b/include/linux/platform_data/gpio-dwapb.h
new file mode 100644
index 000000000000..28702c849af1
--- /dev/null
+++ b/include/linux/platform_data/gpio-dwapb.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright(c) 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef GPIO_DW_APB_H
+#define GPIO_DW_APB_H
+
+struct dwapb_port_property {
+ struct device_node *node;
+ const char *name;
+ unsigned int idx;
+ unsigned int ngpio;
+ unsigned int gpio_base;
+ unsigned int irq;
+ bool irq_shared;
+};
+
+struct dwapb_platform_data {
+ struct dwapb_port_property *properties;
+ unsigned int nports;
+};
+
+#endif
diff --git a/include/linux/platform_data/hsmmc-omap.h b/include/linux/platform_data/hsmmc-omap.h
new file mode 100644
index 000000000000..67bbcf0785f6
--- /dev/null
+++ b/include/linux/platform_data/hsmmc-omap.h
@@ -0,0 +1,90 @@
+/*
+ * MMC definitions for OMAP2
+ *
+ * Copyright (C) 2006 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * struct omap_hsmmc_dev_attr.flags possibilities
+ *
+ * OMAP_HSMMC_SUPPORTS_DUAL_VOLT: Some HSMMC controller instances can
+ * operate with either 1.8Vdc or 3.0Vdc card voltages; this flag
+ * should be set if this is the case. See for example Section 22.5.3
+ * "MMC/SD/SDIO1 Bus Voltage Selection" of the OMAP34xx Multimedia
+ * Device Silicon Revision 3.1.x Revision ZR (July 2011) (SWPU223R).
+ *
+ * OMAP_HSMMC_BROKEN_MULTIBLOCK_READ: Multiple-block read transfers
+ * don't work correctly on some MMC controller instances on some
+ * OMAP3 SoCs; this flag should be set if this is the case. See
+ * for example Advisory 2.1.1.128 "MMC: Multiple Block Read
+ * Operation Issue" in _OMAP3530/3525/3515/3503 Silicon Errata_
+ * Revision F (October 2010) (SPRZ278F).
+ */
+#define OMAP_HSMMC_SUPPORTS_DUAL_VOLT BIT(0)
+#define OMAP_HSMMC_BROKEN_MULTIBLOCK_READ BIT(1)
+#define OMAP_HSMMC_SWAKEUP_MISSING BIT(2)
+
+struct omap_hsmmc_dev_attr {
+ u8 flags;
+};
+
+struct mmc_card;
+
+struct omap_hsmmc_platform_data {
+ /* back-link to device */
+ struct device *dev;
+
+ /* set if your board has components or wiring that limits the
+ * maximum frequency on the MMC bus */
+ unsigned int max_freq;
+
+ /* Integrating attributes from the omap_hwmod layer */
+ u8 controller_flags;
+
+ /* Register offset deviation */
+ u16 reg_offset;
+
+ /*
+ * 4/8 wires and any additional host capabilities
+ * need to OR'd all capabilities (ref. linux/mmc/host.h)
+ */
+ u32 caps; /* Used for the MMC driver on 2430 and later */
+ u32 pm_caps; /* PM capabilities of the mmc */
+
+ /* switch pin can be for card detect (default) or card cover */
+ unsigned cover:1;
+
+ /* use the internal clock */
+ unsigned internal_clock:1;
+
+ /* nonremovable e.g. eMMC */
+ unsigned nonremovable:1;
+
+ /* eMMC does not handle power off when not in sleep state */
+ unsigned no_regulator_off_init:1;
+
+ /* we can put the features above into this variable */
+#define HSMMC_HAS_PBIAS (1 << 0)
+#define HSMMC_HAS_UPDATED_RESET (1 << 1)
+#define HSMMC_HAS_HSPE_SUPPORT (1 << 2)
+ unsigned features;
+
+ int switch_pin; /* gpio (card detect) */
+ int gpio_wp; /* gpio (write protect) */
+
+ int (*set_power)(struct device *dev, int power_on, int vdd);
+ void (*remux)(struct device *dev, int power_on);
+ /* Call back before enabling / disabling regulators */
+ void (*before_set_reg)(struct device *dev, int power_on, int vdd);
+ /* Call back after enabling / disabling regulators */
+ void (*after_set_reg)(struct device *dev, int power_on, int vdd);
+ /* if we have special card, init it using this callback */
+ void (*init_card)(struct mmc_card *card);
+
+ const char *name;
+ u32 ocr_mask;
+};
diff --git a/include/linux/platform_data/i2c-designware.h b/include/linux/platform_data/i2c-designware.h
new file mode 100644
index 000000000000..7a61fb27c25b
--- /dev/null
+++ b/include/linux/platform_data/i2c-designware.h
@@ -0,0 +1,21 @@
+/*
+ * Copyright(c) 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef I2C_DESIGNWARE_H
+#define I2C_DESIGNWARE_H
+
+struct dw_i2c_platform_data {
+ unsigned int i2c_scl_freq;
+};
+
+#endif
diff --git a/include/linux/platform_data/isl9305.h b/include/linux/platform_data/isl9305.h
new file mode 100644
index 000000000000..1419133fa69e
--- /dev/null
+++ b/include/linux/platform_data/isl9305.h
@@ -0,0 +1,30 @@
+/*
+ * isl9305 - Intersil ISL9305 DCDC regulator
+ *
+ * Copyright 2014 Linaro Ltd
+ *
+ * Author: Mark Brown <broonie@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef __ISL9305_H
+#define __ISL9305_H
+
+#define ISL9305_DCD1 0
+#define ISL9305_DCD2 1
+#define ISL9305_LDO1 2
+#define ISL9305_LDO2 3
+
+#define ISL9305_MAX_REGULATOR ISL9305_LDO2
+
+struct regulator_init_data;
+
+struct isl9305_pdata {
+ struct regulator_init_data *init_data[ISL9305_MAX_REGULATOR];
+};
+
+#endif
diff --git a/include/linux/platform_data/lp855x.h b/include/linux/platform_data/lp855x.h
index 1b2ba24e4e03..9c7fd1efe495 100644
--- a/include/linux/platform_data/lp855x.h
+++ b/include/linux/platform_data/lp855x.h
@@ -136,6 +136,7 @@ struct lp855x_rom_data {
Only valid when mode is PWM_BASED.
* @size_program : total size of lp855x_rom_data
* @rom_data : list of new eeprom/eprom registers
+ * @supply : regulator that supplies 3V input
*/
struct lp855x_platform_data {
const char *name;
@@ -144,6 +145,7 @@ struct lp855x_platform_data {
unsigned int period_ns;
int size_program;
struct lp855x_rom_data *rom_data;
+ struct regulator *supply;
};
#endif
diff --git a/include/linux/platform_data/mmc-atmel-mci.h b/include/linux/platform_data/mmc-atmel-mci.h
new file mode 100644
index 000000000000..399a2d5a14bd
--- /dev/null
+++ b/include/linux/platform_data/mmc-atmel-mci.h
@@ -0,0 +1,22 @@
+#ifndef __MMC_ATMEL_MCI_H
+#define __MMC_ATMEL_MCI_H
+
+#include <linux/platform_data/dma-atmel.h>
+#include <linux/platform_data/dma-dw.h>
+
+/**
+ * struct mci_dma_data - DMA data for MCI interface
+ */
+struct mci_dma_data {
+#ifdef CONFIG_ARM
+ struct at_dma_slave sdata;
+#else
+ struct dw_dma_slave sdata;
+#endif
+};
+
+/* accessor macros */
+#define slave_data_ptr(s) (&(s)->sdata)
+#define find_slave_dev(s) ((s)->sdata.dma_dev)
+
+#endif /* __MMC_ATMEL_MCI_H */
diff --git a/include/linux/platform_data/mmc-omap.h b/include/linux/platform_data/mmc-omap.h
index 51e70cf25cbc..5c188f4e9bec 100644
--- a/include/linux/platform_data/mmc-omap.h
+++ b/include/linux/platform_data/mmc-omap.h
@@ -10,32 +10,8 @@
#define OMAP_MMC_MAX_SLOTS 2
-/*
- * struct omap_mmc_dev_attr.flags possibilities
- *
- * OMAP_HSMMC_SUPPORTS_DUAL_VOLT: Some HSMMC controller instances can
- * operate with either 1.8Vdc or 3.0Vdc card voltages; this flag
- * should be set if this is the case. See for example Section 22.5.3
- * "MMC/SD/SDIO1 Bus Voltage Selection" of the OMAP34xx Multimedia
- * Device Silicon Revision 3.1.x Revision ZR (July 2011) (SWPU223R).
- *
- * OMAP_HSMMC_BROKEN_MULTIBLOCK_READ: Multiple-block read transfers
- * don't work correctly on some MMC controller instances on some
- * OMAP3 SoCs; this flag should be set if this is the case. See
- * for example Advisory 2.1.1.128 "MMC: Multiple Block Read
- * Operation Issue" in _OMAP3530/3525/3515/3503 Silicon Errata_
- * Revision F (October 2010) (SPRZ278F).
- */
-#define OMAP_HSMMC_SUPPORTS_DUAL_VOLT BIT(0)
-#define OMAP_HSMMC_BROKEN_MULTIBLOCK_READ BIT(1)
-#define OMAP_HSMMC_SWAKEUP_MISSING BIT(2)
-
struct mmc_card;
-struct omap_mmc_dev_attr {
- u8 flags;
-};
-
struct omap_mmc_platform_data {
/* back-link to device */
struct device *dev;
@@ -106,9 +82,6 @@ struct omap_mmc_platform_data {
unsigned vcc_aux_disable_is_sleep:1;
/* we can put the features above into this variable */
-#define HSMMC_HAS_PBIAS (1 << 0)
-#define HSMMC_HAS_UPDATED_RESET (1 << 1)
-#define HSMMC_HAS_HSPE_SUPPORT (1 << 2)
#define MMC_OMAP7XX (1 << 3)
#define MMC_OMAP15XX (1 << 4)
#define MMC_OMAP16XX (1 << 5)
diff --git a/include/linux/platform_data/mtd-nand-omap2.h b/include/linux/platform_data/mtd-nand-omap2.h
index 16ec262dfcc8..090bbab0130a 100644
--- a/include/linux/platform_data/mtd-nand-omap2.h
+++ b/include/linux/platform_data/mtd-nand-omap2.h
@@ -71,6 +71,7 @@ struct omap_nand_platform_data {
struct mtd_partition *parts;
int nr_parts;
bool dev_ready;
+ bool flash_bbt;
enum nand_io xfer_type;
int devsize;
enum omap_ecc ecc_opt;
diff --git a/include/linux/platform_data/pxa_sdhci.h b/include/linux/platform_data/pxa_sdhci.h
index 27d3156d093a..9e20c2fb4ffd 100644
--- a/include/linux/platform_data/pxa_sdhci.h
+++ b/include/linux/platform_data/pxa_sdhci.h
@@ -55,9 +55,4 @@ struct sdhci_pxa_platdata {
unsigned int quirks2;
unsigned int pm_caps;
};
-
-struct sdhci_pxa {
- u8 clk_enable;
- u8 power_mode;
-};
#endif /* _PXA_SDHCI_H_ */
diff --git a/include/linux/platform_data/rcar-du.h b/include/linux/platform_data/rcar-du.h
deleted file mode 100644
index 1a2e9901a22e..000000000000
--- a/include/linux/platform_data/rcar-du.h
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * rcar_du.h -- R-Car Display Unit DRM driver
- *
- * Copyright (C) 2013 Renesas Corporation
- *
- * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#ifndef __RCAR_DU_H__
-#define __RCAR_DU_H__
-
-#include <drm/drm_mode.h>
-
-enum rcar_du_output {
- RCAR_DU_OUTPUT_DPAD0,
- RCAR_DU_OUTPUT_DPAD1,
- RCAR_DU_OUTPUT_LVDS0,
- RCAR_DU_OUTPUT_LVDS1,
- RCAR_DU_OUTPUT_TCON,
- RCAR_DU_OUTPUT_MAX,
-};
-
-enum rcar_du_encoder_type {
- RCAR_DU_ENCODER_UNUSED = 0,
- RCAR_DU_ENCODER_NONE,
- RCAR_DU_ENCODER_VGA,
- RCAR_DU_ENCODER_LVDS,
-};
-
-struct rcar_du_panel_data {
- unsigned int width_mm; /* Panel width in mm */
- unsigned int height_mm; /* Panel height in mm */
- struct drm_mode_modeinfo mode;
-};
-
-struct rcar_du_connector_lvds_data {
- struct rcar_du_panel_data panel;
-};
-
-struct rcar_du_connector_vga_data {
- /* TODO: Add DDC information for EDID retrieval */
-};
-
-/*
- * struct rcar_du_encoder_data - Encoder platform data
- * @type: the encoder type (RCAR_DU_ENCODER_*)
- * @output: the DU output the connector is connected to (RCAR_DU_OUTPUT_*)
- * @connector.lvds: platform data for LVDS connectors
- * @connector.vga: platform data for VGA connectors
- *
- * Encoder platform data describes an on-board encoder, its associated DU SoC
- * output, and the connector.
- */
-struct rcar_du_encoder_data {
- enum rcar_du_encoder_type type;
- enum rcar_du_output output;
-
- union {
- struct rcar_du_connector_lvds_data lvds;
- struct rcar_du_connector_vga_data vga;
- } connector;
-};
-
-struct rcar_du_platform_data {
- struct rcar_du_encoder_data *encoders;
- unsigned int num_encoders;
-};
-
-#endif /* __RCAR_DU_H__ */
diff --git a/include/linux/platform_data/samsung-usbphy.h b/include/linux/platform_data/samsung-usbphy.h
deleted file mode 100644
index 1bd24cba982b..000000000000
--- a/include/linux/platform_data/samsung-usbphy.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * Copyright (C) 2012 Samsung Electronics Co.Ltd
- * http://www.samsung.com/
- * Author: Praveen Paneri <p.paneri@samsung.com>
- *
- * Defines platform data for samsung usb phy driver.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#ifndef __SAMSUNG_USBPHY_PLATFORM_H
-#define __SAMSUNG_USBPHY_PLATFORM_H
-
-/**
- * samsung_usbphy_data - Platform data for USB PHY driver.
- * @pmu_isolation: Function to control usb phy isolation in PMU.
- */
-struct samsung_usbphy_data {
- void (*pmu_isolation)(int on);
-};
-
-extern void samsung_usbphy_set_pdata(struct samsung_usbphy_data *pd);
-
-#endif /* __SAMSUNG_USBPHY_PLATFORM_H */
diff --git a/include/linux/platform_data/serial-omap.h b/include/linux/platform_data/serial-omap.h
index c860c1b314c0..d09275f3cde3 100644
--- a/include/linux/platform_data/serial-omap.h
+++ b/include/linux/platform_data/serial-omap.h
@@ -38,9 +38,6 @@ struct omap_uart_port_info {
unsigned int dma_rx_timeout;
unsigned int autosuspend_timeout;
unsigned int dma_rx_poll_rate;
- int DTR_gpio;
- int DTR_inverted;
- int DTR_present;
int (*get_context_loss_count)(struct device *);
void (*enable_wakeup)(struct device *, bool);
diff --git a/include/linux/platform_data/st21nfca.h b/include/linux/platform_data/st21nfca.h
index 1730312398ff..5087fff96d86 100644
--- a/include/linux/platform_data/st21nfca.h
+++ b/include/linux/platform_data/st21nfca.h
@@ -24,7 +24,6 @@
#define ST21NFCA_HCI_DRIVER_NAME "st21nfca_hci"
struct st21nfca_nfc_platform_data {
- unsigned int gpio_irq;
unsigned int gpio_ena;
unsigned int irq_polarity;
};
diff --git a/include/linux/platform_data/st21nfcb.h b/include/linux/platform_data/st21nfcb.h
index 2d11f1f5efab..c3b432f5b63e 100644
--- a/include/linux/platform_data/st21nfcb.h
+++ b/include/linux/platform_data/st21nfcb.h
@@ -24,7 +24,6 @@
#define ST21NFCB_NCI_DRIVER_NAME "st21nfcb_nci"
struct st21nfcb_nfc_platform_data {
- unsigned int gpio_irq;
unsigned int gpio_reset;
unsigned int irq_polarity;
};
diff --git a/include/linux/platform_data/tegra_emc.h b/include/linux/platform_data/tegra_emc.h
deleted file mode 100644
index df67505e98f8..000000000000
--- a/include/linux/platform_data/tegra_emc.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * Copyright (C) 2011 Google, Inc.
- *
- * Author:
- * Colin Cross <ccross@android.com>
- * Olof Johansson <olof@lixom.net>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#ifndef __TEGRA_EMC_H_
-#define __TEGRA_EMC_H_
-
-#define TEGRA_EMC_NUM_REGS 46
-
-struct tegra_emc_table {
- unsigned long rate;
- u32 regs[TEGRA_EMC_NUM_REGS];
-};
-
-struct tegra_emc_pdata {
- int num_tables;
- struct tegra_emc_table *tables;
-};
-
-#endif
diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h
index 153d303af7eb..ae4882ca4a64 100644
--- a/include/linux/platform_device.h
+++ b/include/linux/platform_device.h
@@ -197,8 +197,10 @@ extern void platform_driver_unregister(struct platform_driver *);
/* non-hotpluggable platform devices may use this so that probe() and
* its support may live in __init sections, conserving runtime memory.
*/
-extern int platform_driver_probe(struct platform_driver *driver,
- int (*probe)(struct platform_device *));
+#define platform_driver_probe(drv, probe) \
+ __platform_driver_probe(drv, probe, THIS_MODULE)
+extern int __platform_driver_probe(struct platform_driver *driver,
+ int (*probe)(struct platform_device *), struct module *module);
static inline void *platform_get_drvdata(const struct platform_device *pdev)
{
@@ -238,10 +240,12 @@ static void __exit __platform_driver##_exit(void) \
} \
module_exit(__platform_driver##_exit);
-extern struct platform_device *platform_create_bundle(
+#define platform_create_bundle(driver, probe, res, n_res, data, size) \
+ __platform_create_bundle(driver, probe, res, n_res, data, size, THIS_MODULE)
+extern struct platform_device *__platform_create_bundle(
struct platform_driver *driver, int (*probe)(struct platform_device *),
struct resource *res, unsigned int n_res,
- const void *data, size_t size);
+ const void *data, size_t size, struct module *module);
/* early platform driver interface */
struct early_platform_driver {
diff --git a/include/linux/plist.h b/include/linux/plist.h
index 8b6c970cff6c..97883604a3c5 100644
--- a/include/linux/plist.h
+++ b/include/linux/plist.h
@@ -176,7 +176,7 @@ extern void plist_requeue(struct plist_node *node, struct plist_head *head);
* plist_for_each_entry - iterate over list of given type
* @pos: the type * to use as a loop counter
* @head: the head for your list
- * @mem: the name of the list_struct within the struct
+ * @mem: the name of the list_head within the struct
*/
#define plist_for_each_entry(pos, head, mem) \
list_for_each_entry(pos, &(head)->node_list, mem.node_list)
@@ -185,7 +185,7 @@ extern void plist_requeue(struct plist_node *node, struct plist_head *head);
* plist_for_each_entry_continue - continue iteration over list of given type
* @pos: the type * to use as a loop cursor
* @head: the head for your list
- * @m: the name of the list_struct within the struct
+ * @m: the name of the list_head within the struct
*
* Continue to iterate over list of given type, continuing after
* the current position.
@@ -198,7 +198,7 @@ extern void plist_requeue(struct plist_node *node, struct plist_head *head);
* @pos: the type * to use as a loop counter
* @n: another type * to use as temporary storage
* @head: the head for your list
- * @m: the name of the list_struct within the struct
+ * @m: the name of the list_head within the struct
*
* Iterate over list of given type, safe against removal of list entry.
*/
@@ -229,7 +229,7 @@ static inline int plist_node_empty(const struct plist_node *node)
* plist_first_entry - get the struct for the first entry
* @head: the &struct plist_head pointer
* @type: the type of the struct this is embedded in
- * @member: the name of the list_struct within the struct
+ * @member: the name of the list_head within the struct
*/
#ifdef CONFIG_DEBUG_PI_LIST
# define plist_first_entry(head, type, member) \
@@ -246,7 +246,7 @@ static inline int plist_node_empty(const struct plist_node *node)
* plist_last_entry - get the struct for the last entry
* @head: the &struct plist_head pointer
* @type: the type of the struct this is embedded in
- * @member: the name of the list_struct within the struct
+ * @member: the name of the list_head within the struct
*/
#ifdef CONFIG_DEBUG_PI_LIST
# define plist_last_entry(head, type, member) \
diff --git a/include/linux/pm.h b/include/linux/pm.h
index 72c0fe098a27..8b5976364619 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -342,7 +342,7 @@ struct dev_pm_ops {
#define SET_LATE_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn)
#endif
-#ifdef CONFIG_PM_RUNTIME
+#ifdef CONFIG_PM
#define SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \
.runtime_suspend = suspend_fn, \
.runtime_resume = resume_fn, \
@@ -351,15 +351,6 @@ struct dev_pm_ops {
#define SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn)
#endif
-#ifdef CONFIG_PM
-#define SET_PM_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \
- .runtime_suspend = suspend_fn, \
- .runtime_resume = resume_fn, \
- .runtime_idle = idle_fn,
-#else
-#define SET_PM_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn)
-#endif
-
/*
* Use this if you want to use the same suspend and resume callbacks for suspend
* to RAM and hibernation.
@@ -538,11 +529,7 @@ enum rpm_request {
};
struct wakeup_source;
-
-struct pm_domain_data {
- struct list_head list_node;
- struct device *dev;
-};
+struct pm_domain_data;
struct pm_subsys_data {
spinlock_t lock;
@@ -576,7 +563,7 @@ struct dev_pm_info {
#else
unsigned int should_wakeup:1;
#endif
-#ifdef CONFIG_PM_RUNTIME
+#ifdef CONFIG_PM
struct timer_list suspend_timer;
unsigned long timer_expires;
struct work_struct work;
@@ -619,6 +606,7 @@ extern int dev_pm_put_subsys_data(struct device *dev);
*/
struct dev_pm_domain {
struct dev_pm_ops ops;
+ void (*detach)(struct device *dev, bool power_off);
};
/*
@@ -679,12 +667,16 @@ struct dev_pm_domain {
extern void device_pm_lock(void);
extern void dpm_resume_start(pm_message_t state);
extern void dpm_resume_end(pm_message_t state);
+extern void dpm_resume_noirq(pm_message_t state);
+extern void dpm_resume_early(pm_message_t state);
extern void dpm_resume(pm_message_t state);
extern void dpm_complete(pm_message_t state);
extern void device_pm_unlock(void);
extern int dpm_suspend_end(pm_message_t state);
extern int dpm_suspend_start(pm_message_t state);
+extern int dpm_suspend_noirq(pm_message_t state);
+extern int dpm_suspend_late(pm_message_t state);
extern int dpm_suspend(pm_message_t state);
extern int dpm_prepare(pm_message_t state);
diff --git a/include/linux/pm_clock.h b/include/linux/pm_clock.h
index 8348866e7b05..0b0039634410 100644
--- a/include/linux/pm_clock.h
+++ b/include/linux/pm_clock.h
@@ -18,6 +18,8 @@ struct pm_clk_notifier_block {
char *con_ids[];
};
+struct clk;
+
#ifdef CONFIG_PM_CLK
static inline bool pm_clk_no_clocks(struct device *dev)
{
@@ -29,6 +31,7 @@ extern void pm_clk_init(struct device *dev);
extern int pm_clk_create(struct device *dev);
extern void pm_clk_destroy(struct device *dev);
extern int pm_clk_add(struct device *dev, const char *con_id);
+extern int pm_clk_add_clk(struct device *dev, struct clk *clk);
extern void pm_clk_remove(struct device *dev, const char *con_id);
extern int pm_clk_suspend(struct device *dev);
extern int pm_clk_resume(struct device *dev);
@@ -51,6 +54,11 @@ static inline int pm_clk_add(struct device *dev, const char *con_id)
{
return -EINVAL;
}
+
+static inline int pm_clk_add_clk(struct device *dev, struct clk *clk)
+{
+ return -EINVAL;
+}
static inline void pm_clk_remove(struct device *dev, const char *con_id)
{
}
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
index ebc4c76ffb73..a9edab2c787a 100644
--- a/include/linux/pm_domain.h
+++ b/include/linux/pm_domain.h
@@ -17,6 +17,9 @@
#include <linux/notifier.h>
#include <linux/cpuidle.h>
+/* Defines used for the flags field in the struct generic_pm_domain */
+#define GENPD_FLAG_PM_CLK (1U << 0) /* PM domain uses PM clk */
+
enum gpd_status {
GPD_STATE_ACTIVE = 0, /* PM domain is active */
GPD_STATE_WAIT_MASTER, /* PM domain's master is being waited for */
@@ -35,18 +38,10 @@ struct gpd_dev_ops {
int (*stop)(struct device *dev);
int (*save_state)(struct device *dev);
int (*restore_state)(struct device *dev);
- int (*suspend)(struct device *dev);
- int (*suspend_late)(struct device *dev);
- int (*resume_early)(struct device *dev);
- int (*resume)(struct device *dev);
- int (*freeze)(struct device *dev);
- int (*freeze_late)(struct device *dev);
- int (*thaw_early)(struct device *dev);
- int (*thaw)(struct device *dev);
bool (*active_wakeup)(struct device *dev);
};
-struct gpd_cpu_data {
+struct gpd_cpuidle_data {
unsigned int saved_exit_latency;
struct cpuidle_state *idle_state;
};
@@ -71,7 +66,6 @@ struct generic_pm_domain {
unsigned int suspended_count; /* System suspend device counter */
unsigned int prepared_count; /* Suspend counter of prepared devices */
bool suspend_power_off; /* Power status before system suspend */
- bool dev_irq_safe; /* Device callbacks are IRQ-safe */
int (*power_off)(struct generic_pm_domain *domain);
s64 power_off_latency_ns;
int (*power_on)(struct generic_pm_domain *domain);
@@ -80,8 +74,12 @@ struct generic_pm_domain {
s64 max_off_time_ns; /* Maximum allowed "suspended" time. */
bool max_off_time_changed;
bool cached_power_down_ok;
- struct device_node *of_node; /* Node in device tree */
- struct gpd_cpu_data *cpu_data;
+ struct gpd_cpuidle_data *cpuidle_data;
+ int (*attach_dev)(struct generic_pm_domain *domain,
+ struct device *dev);
+ void (*detach_dev)(struct generic_pm_domain *domain,
+ struct device *dev);
+ unsigned int flags; /* Bit field of configs for genpd */
};
static inline struct generic_pm_domain *pd_to_genpd(struct dev_pm_domain *pd)
@@ -106,14 +104,18 @@ struct gpd_timing_data {
bool cached_stop_ok;
};
+struct pm_domain_data {
+ struct list_head list_node;
+ struct device *dev;
+};
+
struct generic_pm_domain_data {
struct pm_domain_data base;
- struct gpd_dev_ops ops;
struct gpd_timing_data td;
struct notifier_block nb;
struct mutex lock;
unsigned int refcount;
- bool need_restore;
+ int need_restore;
};
#ifdef CONFIG_PM_GENERIC_DOMAINS
@@ -127,17 +129,11 @@ static inline struct generic_pm_domain_data *dev_gpd_data(struct device *dev)
return to_gpd_data(dev->power.subsys_data->domain_data);
}
-extern struct dev_power_governor simple_qos_governor;
-
extern struct generic_pm_domain *dev_to_genpd(struct device *dev);
extern int __pm_genpd_add_device(struct generic_pm_domain *genpd,
struct device *dev,
struct gpd_timing_data *td);
-extern int __pm_genpd_of_add_device(struct device_node *genpd_node,
- struct device *dev,
- struct gpd_timing_data *td);
-
extern int __pm_genpd_name_add_device(const char *domain_name,
struct device *dev,
struct gpd_timing_data *td);
@@ -151,10 +147,6 @@ extern int pm_genpd_add_subdomain_names(const char *master_name,
const char *subdomain_name);
extern int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
struct generic_pm_domain *target);
-extern int pm_genpd_add_callbacks(struct device *dev,
- struct gpd_dev_ops *ops,
- struct gpd_timing_data *td);
-extern int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td);
extern int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state);
extern int pm_genpd_name_attach_cpuidle(const char *name, int state);
extern int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd);
@@ -164,9 +156,9 @@ extern void pm_genpd_init(struct generic_pm_domain *genpd,
extern int pm_genpd_poweron(struct generic_pm_domain *genpd);
extern int pm_genpd_name_poweron(const char *domain_name);
+extern void pm_genpd_poweroff_unused(void);
-extern bool default_stop_ok(struct device *dev);
-
+extern struct dev_power_governor simple_qos_governor;
extern struct dev_power_governor pm_domain_always_on_gov;
#else
@@ -184,12 +176,6 @@ static inline int __pm_genpd_add_device(struct generic_pm_domain *genpd,
{
return -ENOSYS;
}
-static inline int __pm_genpd_of_add_device(struct device_node *genpd_node,
- struct device *dev,
- struct gpd_timing_data *td)
-{
- return -ENOSYS;
-}
static inline int __pm_genpd_name_add_device(const char *domain_name,
struct device *dev,
struct gpd_timing_data *td)
@@ -217,16 +203,6 @@ static inline int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
{
return -ENOSYS;
}
-static inline int pm_genpd_add_callbacks(struct device *dev,
- struct gpd_dev_ops *ops,
- struct gpd_timing_data *td)
-{
- return -ENOSYS;
-}
-static inline int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td)
-{
- return -ENOSYS;
-}
static inline int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int st)
{
return -ENOSYS;
@@ -255,10 +231,7 @@ static inline int pm_genpd_name_poweron(const char *domain_name)
{
return -ENOSYS;
}
-static inline bool default_stop_ok(struct device *dev)
-{
- return false;
-}
+static inline void pm_genpd_poweroff_unused(void) {}
#define simple_qos_governor NULL
#define pm_domain_always_on_gov NULL
#endif
@@ -269,45 +242,89 @@ static inline int pm_genpd_add_device(struct generic_pm_domain *genpd,
return __pm_genpd_add_device(genpd, dev, NULL);
}
-static inline int pm_genpd_of_add_device(struct device_node *genpd_node,
- struct device *dev)
-{
- return __pm_genpd_of_add_device(genpd_node, dev, NULL);
-}
-
static inline int pm_genpd_name_add_device(const char *domain_name,
struct device *dev)
{
return __pm_genpd_name_add_device(domain_name, dev, NULL);
}
-static inline int pm_genpd_remove_callbacks(struct device *dev)
+#ifdef CONFIG_PM_GENERIC_DOMAINS_SLEEP
+extern void pm_genpd_syscore_poweroff(struct device *dev);
+extern void pm_genpd_syscore_poweron(struct device *dev);
+#else
+static inline void pm_genpd_syscore_poweroff(struct device *dev) {}
+static inline void pm_genpd_syscore_poweron(struct device *dev) {}
+#endif
+
+/* OF PM domain providers */
+struct of_device_id;
+
+struct genpd_onecell_data {
+ struct generic_pm_domain **domains;
+ unsigned int num_domains;
+};
+
+typedef struct generic_pm_domain *(*genpd_xlate_t)(struct of_phandle_args *args,
+ void *data);
+
+#ifdef CONFIG_PM_GENERIC_DOMAINS_OF
+int __of_genpd_add_provider(struct device_node *np, genpd_xlate_t xlate,
+ void *data);
+void of_genpd_del_provider(struct device_node *np);
+struct generic_pm_domain *of_genpd_get_from_provider(
+ struct of_phandle_args *genpdspec);
+
+struct generic_pm_domain *__of_genpd_xlate_simple(
+ struct of_phandle_args *genpdspec,
+ void *data);
+struct generic_pm_domain *__of_genpd_xlate_onecell(
+ struct of_phandle_args *genpdspec,
+ void *data);
+
+int genpd_dev_pm_attach(struct device *dev);
+#else /* !CONFIG_PM_GENERIC_DOMAINS_OF */
+static inline int __of_genpd_add_provider(struct device_node *np,
+ genpd_xlate_t xlate, void *data)
{
- return __pm_genpd_remove_callbacks(dev, true);
+ return 0;
}
+static inline void of_genpd_del_provider(struct device_node *np) {}
-#ifdef CONFIG_PM_GENERIC_DOMAINS_RUNTIME
-extern void genpd_queue_power_off_work(struct generic_pm_domain *genpd);
-extern void pm_genpd_poweroff_unused(void);
-#else
-static inline void genpd_queue_power_off_work(struct generic_pm_domain *gpd) {}
-static inline void pm_genpd_poweroff_unused(void) {}
-#endif
+static inline struct generic_pm_domain *of_genpd_get_from_provider(
+ struct of_phandle_args *genpdspec)
+{
+ return NULL;
+}
-#ifdef CONFIG_PM_GENERIC_DOMAINS_SLEEP
-extern void pm_genpd_syscore_switch(struct device *dev, bool suspend);
-#else
-static inline void pm_genpd_syscore_switch(struct device *dev, bool suspend) {}
-#endif
+#define __of_genpd_xlate_simple NULL
+#define __of_genpd_xlate_onecell NULL
+
+static inline int genpd_dev_pm_attach(struct device *dev)
+{
+ return -ENODEV;
+}
+#endif /* CONFIG_PM_GENERIC_DOMAINS_OF */
-static inline void pm_genpd_syscore_poweroff(struct device *dev)
+static inline int of_genpd_add_provider_simple(struct device_node *np,
+ struct generic_pm_domain *genpd)
+{
+ return __of_genpd_add_provider(np, __of_genpd_xlate_simple, genpd);
+}
+static inline int of_genpd_add_provider_onecell(struct device_node *np,
+ struct genpd_onecell_data *data)
{
- pm_genpd_syscore_switch(dev, true);
+ return __of_genpd_add_provider(np, __of_genpd_xlate_onecell, data);
}
-static inline void pm_genpd_syscore_poweron(struct device *dev)
+#ifdef CONFIG_PM
+extern int dev_pm_domain_attach(struct device *dev, bool power_on);
+extern void dev_pm_domain_detach(struct device *dev, bool power_off);
+#else
+static inline int dev_pm_domain_attach(struct device *dev, bool power_on)
{
- pm_genpd_syscore_switch(dev, false);
+ return -ENODEV;
}
+static inline void dev_pm_domain_detach(struct device *dev, bool power_off) {}
+#endif
#endif /* _LINUX_PM_DOMAIN_H */
diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h
index 0330217abfad..cec2d4540914 100644
--- a/include/linux/pm_opp.h
+++ b/include/linux/pm_opp.h
@@ -21,7 +21,7 @@ struct dev_pm_opp;
struct device;
enum dev_pm_opp_event {
- OPP_EVENT_ADD, OPP_EVENT_ENABLE, OPP_EVENT_DISABLE,
+ OPP_EVENT_ADD, OPP_EVENT_REMOVE, OPP_EVENT_ENABLE, OPP_EVENT_DISABLE,
};
#if defined(CONFIG_PM_OPP)
@@ -44,6 +44,7 @@ struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
int dev_pm_opp_add(struct device *dev, unsigned long freq,
unsigned long u_volt);
+void dev_pm_opp_remove(struct device *dev, unsigned long freq);
int dev_pm_opp_enable(struct device *dev, unsigned long freq);
@@ -90,6 +91,10 @@ static inline int dev_pm_opp_add(struct device *dev, unsigned long freq,
return -EINVAL;
}
+static inline void dev_pm_opp_remove(struct device *dev, unsigned long freq)
+{
+}
+
static inline int dev_pm_opp_enable(struct device *dev, unsigned long freq)
{
return 0;
@@ -109,11 +114,16 @@ static inline struct srcu_notifier_head *dev_pm_opp_get_notifier(
#if defined(CONFIG_PM_OPP) && defined(CONFIG_OF)
int of_init_opp_table(struct device *dev);
+void of_free_opp_table(struct device *dev);
#else
static inline int of_init_opp_table(struct device *dev)
{
return -EINVAL;
}
+
+static inline void of_free_opp_table(struct device *dev)
+{
+}
#endif
#endif /* __LINUX_OPP_H__ */
diff --git a/include/linux/pm_qos.h b/include/linux/pm_qos.h
index 9ab4bf7c4646..7b3ae0cffc05 100644
--- a/include/linux/pm_qos.h
+++ b/include/linux/pm_qos.h
@@ -15,6 +15,7 @@ enum {
PM_QOS_CPU_DMA_LATENCY,
PM_QOS_NETWORK_LATENCY,
PM_QOS_NETWORK_THROUGHPUT,
+ PM_QOS_MEMORY_BANDWIDTH,
/* insert new class ID */
PM_QOS_NUM_CLASSES,
@@ -32,6 +33,7 @@ enum pm_qos_flags_status {
#define PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC)
#define PM_QOS_NETWORK_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC)
#define PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE 0
+#define PM_QOS_MEMORY_BANDWIDTH_DEFAULT_VALUE 0
#define PM_QOS_RESUME_LATENCY_DEFAULT_VALUE 0
#define PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE 0
#define PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT (-1)
@@ -69,7 +71,8 @@ struct dev_pm_qos_request {
enum pm_qos_type {
PM_QOS_UNITIALIZED,
PM_QOS_MAX, /* return the largest value */
- PM_QOS_MIN /* return the smallest value */
+ PM_QOS_MIN, /* return the smallest value */
+ PM_QOS_SUM /* return the sum */
};
/*
@@ -151,6 +154,23 @@ void dev_pm_qos_constraints_destroy(struct device *dev);
int dev_pm_qos_add_ancestor_request(struct device *dev,
struct dev_pm_qos_request *req,
enum dev_pm_qos_req_type type, s32 value);
+int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value);
+void dev_pm_qos_hide_latency_limit(struct device *dev);
+int dev_pm_qos_expose_flags(struct device *dev, s32 value);
+void dev_pm_qos_hide_flags(struct device *dev);
+int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set);
+s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev);
+int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val);
+
+static inline s32 dev_pm_qos_requested_resume_latency(struct device *dev)
+{
+ return dev->power.qos->resume_latency_req->data.pnode.prio;
+}
+
+static inline s32 dev_pm_qos_requested_flags(struct device *dev)
+{
+ return dev->power.qos->flags_req->data.flr.flags;
+}
#else
static inline enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev,
s32 mask)
@@ -197,27 +217,6 @@ static inline int dev_pm_qos_add_ancestor_request(struct device *dev,
enum dev_pm_qos_req_type type,
s32 value)
{ return 0; }
-#endif
-
-#ifdef CONFIG_PM_RUNTIME
-int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value);
-void dev_pm_qos_hide_latency_limit(struct device *dev);
-int dev_pm_qos_expose_flags(struct device *dev, s32 value);
-void dev_pm_qos_hide_flags(struct device *dev);
-int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set);
-s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev);
-int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val);
-
-static inline s32 dev_pm_qos_requested_resume_latency(struct device *dev)
-{
- return dev->power.qos->resume_latency_req->data.pnode.prio;
-}
-
-static inline s32 dev_pm_qos_requested_flags(struct device *dev)
-{
- return dev->power.qos->flags_req->data.flr.flags;
-}
-#else
static inline int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
{ return 0; }
static inline void dev_pm_qos_hide_latency_limit(struct device *dev) {}
diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
index 367f49b9a1c9..30e84d48bfea 100644
--- a/include/linux/pm_runtime.h
+++ b/include/linux/pm_runtime.h
@@ -35,16 +35,6 @@ extern int pm_generic_runtime_suspend(struct device *dev);
extern int pm_generic_runtime_resume(struct device *dev);
extern int pm_runtime_force_suspend(struct device *dev);
extern int pm_runtime_force_resume(struct device *dev);
-#else
-static inline bool queue_pm_work(struct work_struct *work) { return false; }
-
-static inline int pm_generic_runtime_suspend(struct device *dev) { return 0; }
-static inline int pm_generic_runtime_resume(struct device *dev) { return 0; }
-static inline int pm_runtime_force_suspend(struct device *dev) { return 0; }
-static inline int pm_runtime_force_resume(struct device *dev) { return 0; }
-#endif
-
-#ifdef CONFIG_PM_RUNTIME
extern int __pm_runtime_idle(struct device *dev, int rpmflags);
extern int __pm_runtime_suspend(struct device *dev, int rpmflags);
@@ -128,7 +118,19 @@ static inline void pm_runtime_mark_last_busy(struct device *dev)
ACCESS_ONCE(dev->power.last_busy) = jiffies;
}
-#else /* !CONFIG_PM_RUNTIME */
+static inline bool pm_runtime_is_irq_safe(struct device *dev)
+{
+ return dev->power.irq_safe;
+}
+
+#else /* !CONFIG_PM */
+
+static inline bool queue_pm_work(struct work_struct *work) { return false; }
+
+static inline int pm_generic_runtime_suspend(struct device *dev) { return 0; }
+static inline int pm_generic_runtime_resume(struct device *dev) { return 0; }
+static inline int pm_runtime_force_suspend(struct device *dev) { return 0; }
+static inline int pm_runtime_force_resume(struct device *dev) { return 0; }
static inline int __pm_runtime_idle(struct device *dev, int rpmflags)
{
@@ -167,6 +169,7 @@ static inline bool pm_runtime_enabled(struct device *dev) { return false; }
static inline void pm_runtime_no_callbacks(struct device *dev) {}
static inline void pm_runtime_irq_safe(struct device *dev) {}
+static inline bool pm_runtime_is_irq_safe(struct device *dev) { return false; }
static inline bool pm_runtime_callbacks_present(struct device *dev) { return false; }
static inline void pm_runtime_mark_last_busy(struct device *dev) {}
@@ -179,7 +182,7 @@ static inline unsigned long pm_runtime_autosuspend_expiration(
static inline void pm_runtime_set_memalloc_noio(struct device *dev,
bool enable){}
-#endif /* !CONFIG_PM_RUNTIME */
+#endif /* !CONFIG_PM */
static inline int pm_runtime_idle(struct device *dev)
{
diff --git a/include/linux/pnfs_osd_xdr.h b/include/linux/pnfs_osd_xdr.h
index fe25876c1a5d..17d7d0d20eca 100644
--- a/include/linux/pnfs_osd_xdr.h
+++ b/include/linux/pnfs_osd_xdr.h
@@ -5,7 +5,7 @@
* All rights reserved.
*
* Benny Halevy <bhalevy@panasas.com>
- * Boaz Harrosh <bharrosh@panasas.com>
+ * Boaz Harrosh <ooo@electrozaur.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2
diff --git a/include/linux/power/charger-manager.h b/include/linux/power/charger-manager.h
index 07e7945a1ff2..e97fc656a058 100644
--- a/include/linux/power/charger-manager.h
+++ b/include/linux/power/charger-manager.h
@@ -253,9 +253,6 @@ struct charger_manager {
struct device *dev;
struct charger_desc *desc;
- struct power_supply *fuel_gauge;
- struct power_supply **charger_stat;
-
#ifdef CONFIG_THERMAL
struct thermal_zone_device *tzd_batt;
#endif
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index f3dea41dbcd2..096dbced02ac 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -18,8 +18,6 @@
#include <linux/spinlock.h>
#include <linux/notifier.h>
-struct device;
-
/*
* All voltages, currents, charges, energies, time and temperatures in uV,
* µA, µAh, µWh, seconds and tenths of degree Celsius unless otherwise
@@ -102,9 +100,11 @@ enum power_supply_property {
POWER_SUPPLY_PROP_VOLTAGE_NOW,
POWER_SUPPLY_PROP_VOLTAGE_AVG,
POWER_SUPPLY_PROP_VOLTAGE_OCV,
+ POWER_SUPPLY_PROP_VOLTAGE_BOOT,
POWER_SUPPLY_PROP_CURRENT_MAX,
POWER_SUPPLY_PROP_CURRENT_NOW,
POWER_SUPPLY_PROP_CURRENT_AVG,
+ POWER_SUPPLY_PROP_CURRENT_BOOT,
POWER_SUPPLY_PROP_POWER_NOW,
POWER_SUPPLY_PROP_POWER_AVG,
POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
@@ -146,6 +146,7 @@ enum power_supply_property {
POWER_SUPPLY_PROP_TYPE, /* use power_supply.type instead */
POWER_SUPPLY_PROP_SCOPE,
POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT,
+ POWER_SUPPLY_PROP_CALIBRATE,
/* Properties of type `const char *' */
POWER_SUPPLY_PROP_MODEL_NAME,
POWER_SUPPLY_PROP_MANUFACTURER,
@@ -172,6 +173,7 @@ union power_supply_propval {
const char *strval;
};
+struct device;
struct device_node;
struct power_supply {
@@ -198,6 +200,12 @@ struct power_supply {
void (*external_power_changed)(struct power_supply *psy);
void (*set_charged)(struct power_supply *psy);
+ /*
+ * Set if thermal zone should not be created for this power supply.
+ * For example for virtual supplies forwarding calls to actual
+ * sensors or other supplies.
+ */
+ bool no_thermal;
/* For APM emulation, think legacy userspace. */
int use_for_apm;
@@ -291,6 +299,7 @@ static inline bool power_supply_is_amp_property(enum power_supply_property psp)
case POWER_SUPPLY_PROP_CURRENT_MAX:
case POWER_SUPPLY_PROP_CURRENT_NOW:
case POWER_SUPPLY_PROP_CURRENT_AVG:
+ case POWER_SUPPLY_PROP_CURRENT_BOOT:
return 1;
default:
break;
@@ -315,6 +324,7 @@ static inline bool power_supply_is_watt_property(enum power_supply_property psp)
case POWER_SUPPLY_PROP_VOLTAGE_NOW:
case POWER_SUPPLY_PROP_VOLTAGE_AVG:
case POWER_SUPPLY_PROP_VOLTAGE_OCV:
+ case POWER_SUPPLY_PROP_VOLTAGE_BOOT:
case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX:
case POWER_SUPPLY_PROP_POWER_NOW:
diff --git a/include/linux/printk.h b/include/linux/printk.h
index d78125f73ac4..c8f170324e64 100644
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
@@ -118,12 +118,13 @@ int no_printk(const char *fmt, ...)
#ifdef CONFIG_EARLY_PRINTK
extern asmlinkage __printf(1, 2)
void early_printk(const char *fmt, ...);
-void early_vprintk(const char *fmt, va_list ap);
#else
static inline __printf(1, 2) __cold
void early_printk(const char *s, ...) { }
#endif
+typedef int(*printk_func_t)(const char *fmt, va_list args);
+
#ifdef CONFIG_PRINTK
asmlinkage __printf(5, 0)
int vprintk_emit(int facility, int level,
diff --git a/include/linux/prio_heap.h b/include/linux/prio_heap.h
deleted file mode 100644
index 08094350f26a..000000000000
--- a/include/linux/prio_heap.h
+++ /dev/null
@@ -1,58 +0,0 @@
-#ifndef _LINUX_PRIO_HEAP_H
-#define _LINUX_PRIO_HEAP_H
-
-/*
- * Simple insertion-only static-sized priority heap containing
- * pointers, based on CLR, chapter 7
- */
-
-#include <linux/gfp.h>
-
-/**
- * struct ptr_heap - simple static-sized priority heap
- * @ptrs - pointer to data area
- * @max - max number of elements that can be stored in @ptrs
- * @size - current number of valid elements in @ptrs (in the range 0..@size-1
- * @gt: comparison operator, which should implement "greater than"
- */
-struct ptr_heap {
- void **ptrs;
- int max;
- int size;
- int (*gt)(void *, void *);
-};
-
-/**
- * heap_init - initialize an empty heap with a given memory size
- * @heap: the heap structure to be initialized
- * @size: amount of memory to use in bytes
- * @gfp_mask: mask to pass to kmalloc()
- * @gt: comparison operator, which should implement "greater than"
- */
-extern int heap_init(struct ptr_heap *heap, size_t size, gfp_t gfp_mask,
- int (*gt)(void *, void *));
-
-/**
- * heap_free - release a heap's storage
- * @heap: the heap structure whose data should be released
- */
-void heap_free(struct ptr_heap *heap);
-
-/**
- * heap_insert - insert a value into the heap and return any overflowed value
- * @heap: the heap to be operated on
- * @p: the pointer to be inserted
- *
- * Attempts to insert the given value into the priority heap. If the
- * heap is full prior to the insertion, then the resulting heap will
- * consist of the smallest @max elements of the original heap and the
- * new element; the greatest element will be removed from the heap and
- * returned. Note that the returned element will be the new element
- * (i.e. no change to the heap) if the new element is greater than all
- * elements currently in the heap.
- */
-extern void *heap_insert(struct ptr_heap *heap, void *p);
-
-
-
-#endif /* _LINUX_PRIO_HEAP_H */
diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
index 9d117f61d976..b97bf2ef996e 100644
--- a/include/linux/proc_fs.h
+++ b/include/linux/proc_fs.h
@@ -74,6 +74,8 @@ static inline int remove_proc_subtree(const char *name, struct proc_dir_entry *p
#endif /* CONFIG_PROC_FS */
+struct net;
+
static inline struct proc_dir_entry *proc_net_mkdir(
struct net *net, const char *name, struct proc_dir_entry *parent)
{
diff --git a/include/linux/proc_ns.h b/include/linux/proc_ns.h
index 34a1e105bef4..42dfc615dbf8 100644
--- a/include/linux/proc_ns.h
+++ b/include/linux/proc_ns.h
@@ -4,21 +4,18 @@
#ifndef _LINUX_PROC_NS_H
#define _LINUX_PROC_NS_H
+#include <linux/ns_common.h>
+
struct pid_namespace;
struct nsproxy;
+struct path;
struct proc_ns_operations {
const char *name;
int type;
- void *(*get)(struct task_struct *task);
- void (*put)(void *ns);
- int (*install)(struct nsproxy *nsproxy, void *ns);
- unsigned int (*inum)(void *ns);
-};
-
-struct proc_ns {
- void *ns;
- const struct proc_ns_operations *ns_ops;
+ struct ns_common *(*get)(struct task_struct *task);
+ void (*put)(struct ns_common *ns);
+ int (*install)(struct nsproxy *nsproxy, struct ns_common *ns);
};
extern const struct proc_ns_operations netns_operations;
@@ -43,32 +40,38 @@ enum {
extern int pid_ns_prepare_proc(struct pid_namespace *ns);
extern void pid_ns_release_proc(struct pid_namespace *ns);
-extern struct file *proc_ns_fget(int fd);
-extern struct proc_ns *get_proc_ns(struct inode *);
extern int proc_alloc_inum(unsigned int *pino);
extern void proc_free_inum(unsigned int inum);
-extern bool proc_ns_inode(struct inode *inode);
#else /* CONFIG_PROC_FS */
static inline int pid_ns_prepare_proc(struct pid_namespace *ns) { return 0; }
static inline void pid_ns_release_proc(struct pid_namespace *ns) {}
-static inline struct file *proc_ns_fget(int fd)
-{
- return ERR_PTR(-EINVAL);
-}
-
-static inline struct proc_ns *get_proc_ns(struct inode *inode) { return NULL; }
-
static inline int proc_alloc_inum(unsigned int *inum)
{
*inum = 1;
return 0;
}
static inline void proc_free_inum(unsigned int inum) {}
-static inline bool proc_ns_inode(struct inode *inode) { return false; }
#endif /* CONFIG_PROC_FS */
+static inline int ns_alloc_inum(struct ns_common *ns)
+{
+ atomic_long_set(&ns->stashed, 0);
+ return proc_alloc_inum(&ns->inum);
+}
+
+#define ns_free_inum(ns) proc_free_inum((ns)->inum)
+
+extern struct file *proc_ns_fget(int fd);
+#define get_proc_ns(inode) ((struct ns_common *)(inode)->i_private)
+extern void *ns_get_path(struct path *path, struct task_struct *task,
+ const struct proc_ns_operations *ns_ops);
+
+extern int ns_get_name(char *buf, size_t size, struct task_struct *task,
+ const struct proc_ns_operations *ns_ops);
+extern void nsfs_init(void);
+
#endif /* _LINUX_PROC_NS_H */
diff --git a/include/linux/property.h b/include/linux/property.h
new file mode 100644
index 000000000000..a6a3d98bd7e9
--- /dev/null
+++ b/include/linux/property.h
@@ -0,0 +1,143 @@
+/*
+ * property.h - Unified device property interface.
+ *
+ * Copyright (C) 2014, Intel Corporation
+ * Authors: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+ * Mika Westerberg <mika.westerberg@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _LINUX_PROPERTY_H_
+#define _LINUX_PROPERTY_H_
+
+#include <linux/types.h>
+
+struct device;
+
+enum dev_prop_type {
+ DEV_PROP_U8,
+ DEV_PROP_U16,
+ DEV_PROP_U32,
+ DEV_PROP_U64,
+ DEV_PROP_STRING,
+ DEV_PROP_MAX,
+};
+
+bool device_property_present(struct device *dev, const char *propname);
+int device_property_read_u8_array(struct device *dev, const char *propname,
+ u8 *val, size_t nval);
+int device_property_read_u16_array(struct device *dev, const char *propname,
+ u16 *val, size_t nval);
+int device_property_read_u32_array(struct device *dev, const char *propname,
+ u32 *val, size_t nval);
+int device_property_read_u64_array(struct device *dev, const char *propname,
+ u64 *val, size_t nval);
+int device_property_read_string_array(struct device *dev, const char *propname,
+ const char **val, size_t nval);
+int device_property_read_string(struct device *dev, const char *propname,
+ const char **val);
+
+enum fwnode_type {
+ FWNODE_INVALID = 0,
+ FWNODE_OF,
+ FWNODE_ACPI,
+};
+
+struct fwnode_handle {
+ enum fwnode_type type;
+};
+
+bool fwnode_property_present(struct fwnode_handle *fwnode, const char *propname);
+int fwnode_property_read_u8_array(struct fwnode_handle *fwnode,
+ const char *propname, u8 *val,
+ size_t nval);
+int fwnode_property_read_u16_array(struct fwnode_handle *fwnode,
+ const char *propname, u16 *val,
+ size_t nval);
+int fwnode_property_read_u32_array(struct fwnode_handle *fwnode,
+ const char *propname, u32 *val,
+ size_t nval);
+int fwnode_property_read_u64_array(struct fwnode_handle *fwnode,
+ const char *propname, u64 *val,
+ size_t nval);
+int fwnode_property_read_string_array(struct fwnode_handle *fwnode,
+ const char *propname, const char **val,
+ size_t nval);
+int fwnode_property_read_string(struct fwnode_handle *fwnode,
+ const char *propname, const char **val);
+
+struct fwnode_handle *device_get_next_child_node(struct device *dev,
+ struct fwnode_handle *child);
+
+#define device_for_each_child_node(dev, child) \
+ for (child = device_get_next_child_node(dev, NULL); child; \
+ child = device_get_next_child_node(dev, child))
+
+void fwnode_handle_put(struct fwnode_handle *fwnode);
+
+unsigned int device_get_child_node_count(struct device *dev);
+
+static inline bool device_property_read_bool(struct device *dev,
+ const char *propname)
+{
+ return device_property_present(dev, propname);
+}
+
+static inline int device_property_read_u8(struct device *dev,
+ const char *propname, u8 *val)
+{
+ return device_property_read_u8_array(dev, propname, val, 1);
+}
+
+static inline int device_property_read_u16(struct device *dev,
+ const char *propname, u16 *val)
+{
+ return device_property_read_u16_array(dev, propname, val, 1);
+}
+
+static inline int device_property_read_u32(struct device *dev,
+ const char *propname, u32 *val)
+{
+ return device_property_read_u32_array(dev, propname, val, 1);
+}
+
+static inline int device_property_read_u64(struct device *dev,
+ const char *propname, u64 *val)
+{
+ return device_property_read_u64_array(dev, propname, val, 1);
+}
+
+static inline bool fwnode_property_read_bool(struct fwnode_handle *fwnode,
+ const char *propname)
+{
+ return fwnode_property_present(fwnode, propname);
+}
+
+static inline int fwnode_property_read_u8(struct fwnode_handle *fwnode,
+ const char *propname, u8 *val)
+{
+ return fwnode_property_read_u8_array(fwnode, propname, val, 1);
+}
+
+static inline int fwnode_property_read_u16(struct fwnode_handle *fwnode,
+ const char *propname, u16 *val)
+{
+ return fwnode_property_read_u16_array(fwnode, propname, val, 1);
+}
+
+static inline int fwnode_property_read_u32(struct fwnode_handle *fwnode,
+ const char *propname, u32 *val)
+{
+ return fwnode_property_read_u32_array(fwnode, propname, val, 1);
+}
+
+static inline int fwnode_property_read_u64(struct fwnode_handle *fwnode,
+ const char *propname, u64 *val)
+{
+ return fwnode_property_read_u64_array(fwnode, propname, val, 1);
+}
+
+#endif /* _LINUX_PROPERTY_H_ */
diff --git a/include/linux/proportions.h b/include/linux/proportions.h
index 26a8a4ed9b07..00e8e8fa7358 100644
--- a/include/linux/proportions.h
+++ b/include/linux/proportions.h
@@ -12,6 +12,7 @@
#include <linux/percpu_counter.h>
#include <linux/spinlock.h>
#include <linux/mutex.h>
+#include <linux/gfp.h>
struct prop_global {
/*
@@ -40,7 +41,7 @@ struct prop_descriptor {
struct mutex mutex; /* serialize the prop_global switch */
};
-int prop_descriptor_init(struct prop_descriptor *pd, int shift);
+int prop_descriptor_init(struct prop_descriptor *pd, int shift, gfp_t gfp);
void prop_change_shift(struct prop_descriptor *pd, int new_shift);
/*
@@ -61,7 +62,7 @@ struct prop_local_percpu {
raw_spinlock_t lock; /* protect the snapshot state */
};
-int prop_local_init_percpu(struct prop_local_percpu *pl);
+int prop_local_init_percpu(struct prop_local_percpu *pl, gfp_t gfp);
void prop_local_destroy_percpu(struct prop_local_percpu *pl);
void __prop_inc_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl);
void prop_fraction_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl,
diff --git a/include/linux/pstore_ram.h b/include/linux/pstore_ram.h
index 9974975d40db..4af3fdc85b01 100644
--- a/include/linux/pstore_ram.h
+++ b/include/linux/pstore_ram.h
@@ -53,7 +53,8 @@ struct persistent_ram_zone {
};
struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size,
- u32 sig, struct persistent_ram_ecc_info *ecc_info);
+ u32 sig, struct persistent_ram_ecc_info *ecc_info,
+ unsigned int memtype);
void persistent_ram_free(struct persistent_ram_zone *prz);
void persistent_ram_zap(struct persistent_ram_zone *prz);
@@ -76,6 +77,7 @@ ssize_t persistent_ram_ecc_string(struct persistent_ram_zone *prz,
struct ramoops_platform_data {
unsigned long mem_size;
unsigned long mem_address;
+ unsigned int mem_type;
unsigned long record_size;
unsigned long console_size;
unsigned long ftrace_size;
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
index cc79eff4a1ad..987a73a40ef8 100644
--- a/include/linux/ptrace.h
+++ b/include/linux/ptrace.h
@@ -52,7 +52,7 @@ extern void ptrace_notify(int exit_code);
extern void __ptrace_link(struct task_struct *child,
struct task_struct *new_parent);
extern void __ptrace_unlink(struct task_struct *child);
-extern void exit_ptrace(struct task_struct *tracer);
+extern void exit_ptrace(struct task_struct *tracer, struct list_head *dead);
#define PTRACE_MODE_READ 0x01
#define PTRACE_MODE_ATTACH 0x02
#define PTRACE_MODE_NOAUDIT 0x04
diff --git a/include/linux/pxa168_eth.h b/include/linux/pxa168_eth.h
index 18d75e795606..e1ab6e86cdb3 100644
--- a/include/linux/pxa168_eth.h
+++ b/include/linux/pxa168_eth.h
@@ -4,6 +4,8 @@
#ifndef __LINUX_PXA168_ETH_H
#define __LINUX_PXA168_ETH_H
+#include <linux/phy.h>
+
struct pxa168_eth_platform_data {
int port_number;
int phy_addr;
@@ -13,6 +15,7 @@ struct pxa168_eth_platform_data {
*/
int speed; /* 0, SPEED_10, SPEED_100 */
int duplex; /* DUPLEX_HALF or DUPLEX_FULL */
+ phy_interface_t intf;
/*
* Override default RX/TX queue sizes if nonzero.
diff --git a/include/linux/pxa2xx_ssp.h b/include/linux/pxa2xx_ssp.h
index f2b405116166..77aed9ea1d26 100644
--- a/include/linux/pxa2xx_ssp.h
+++ b/include/linux/pxa2xx_ssp.h
@@ -108,6 +108,25 @@
#define SSCR1_RxTresh(x) (((x) - 1) << 10) /* level [1..4] */
#endif
+/* QUARK_X1000 SSCR0 bit definition */
+#define QUARK_X1000_SSCR0_DSS (0x1F) /* Data Size Select (mask) */
+#define QUARK_X1000_SSCR0_DataSize(x) ((x) - 1) /* Data Size Select [4..32] */
+#define QUARK_X1000_SSCR0_FRF (0x3 << 5) /* FRame Format (mask) */
+#define QUARK_X1000_SSCR0_Motorola (0x0 << 5) /* Motorola's Serial Peripheral Interface (SPI) */
+
+#define RX_THRESH_QUARK_X1000_DFLT 1
+#define TX_THRESH_QUARK_X1000_DFLT 16
+
+#define QUARK_X1000_SSSR_TFL_MASK (0x1F << 8) /* Transmit FIFO Level mask */
+#define QUARK_X1000_SSSR_RFL_MASK (0x1F << 13) /* Receive FIFO Level mask */
+
+#define QUARK_X1000_SSCR1_TFT (0x1F << 6) /* Transmit FIFO Threshold (mask) */
+#define QUARK_X1000_SSCR1_TxTresh(x) (((x) - 1) << 6) /* level [1..32] */
+#define QUARK_X1000_SSCR1_RFT (0x1F << 11) /* Receive FIFO Threshold (mask) */
+#define QUARK_X1000_SSCR1_RxTresh(x) (((x) - 1) << 11) /* level [1..32] */
+#define QUARK_X1000_SSCR1_STRF (1 << 17) /* Select FIFO or EFWR */
+#define QUARK_X1000_SSCR1_EFWR (1 << 16) /* Enable FIFO Write/Read */
+
/* extra bits in PXA255, PXA26x and PXA27x SSP ports */
#define SSCR0_TISSP (1 << 4) /* TI Sync Serial Protocol */
#define SSCR0_PSP (3 << 4) /* PSP - Programmable Serial Protocol */
@@ -175,6 +194,7 @@ enum pxa_ssp_type {
PXA910_SSP,
CE4100_SSP,
LPSS_SSP,
+ QUARK_X1000_SSP,
};
struct ssp_device {
diff --git a/include/linux/quota.h b/include/linux/quota.h
index 80d345a3524c..50978b781a19 100644
--- a/include/linux/quota.h
+++ b/include/linux/quota.h
@@ -56,6 +56,11 @@ enum quota_type {
PRJQUOTA = 2, /* element used for project quotas */
};
+/* Masks for quota types when used as a bitmask */
+#define QTYPE_MASK_USR (1 << USRQUOTA)
+#define QTYPE_MASK_GRP (1 << GRPQUOTA)
+#define QTYPE_MASK_PRJ (1 << PRJQUOTA)
+
typedef __kernel_uid32_t qid_t; /* Type in which we store ids in memory */
typedef long long qsize_t; /* Type in which we store sizes */
diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h
index 1d3eee594cd6..f23538a6e411 100644
--- a/include/linux/quotaops.h
+++ b/include/linux/quotaops.h
@@ -64,10 +64,10 @@ void dquot_destroy(struct dquot *dquot);
int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags);
void __dquot_free_space(struct inode *inode, qsize_t number, int flags);
-int dquot_alloc_inode(const struct inode *inode);
+int dquot_alloc_inode(struct inode *inode);
int dquot_claim_space_nodirty(struct inode *inode, qsize_t number);
-void dquot_free_inode(const struct inode *inode);
+void dquot_free_inode(struct inode *inode);
void dquot_reclaim_space_nodirty(struct inode *inode, qsize_t number);
int dquot_disable(struct super_block *sb, int type, unsigned int flags);
@@ -213,12 +213,12 @@ static inline void dquot_drop(struct inode *inode)
{
}
-static inline int dquot_alloc_inode(const struct inode *inode)
+static inline int dquot_alloc_inode(struct inode *inode)
{
return 0;
}
-static inline void dquot_free_inode(const struct inode *inode)
+static inline void dquot_free_inode(struct inode *inode)
{
}
diff --git a/include/linux/random.h b/include/linux/random.h
index 57fbbffd77a0..b05856e16b75 100644
--- a/include/linux/random.h
+++ b/include/linux/random.h
@@ -26,7 +26,7 @@ unsigned int get_random_int(void);
unsigned long randomize_range(unsigned long start, unsigned long end, unsigned long len);
u32 prandom_u32(void);
-void prandom_bytes(void *buf, int nbytes);
+void prandom_bytes(void *buf, size_t nbytes);
void prandom_seed(u32 seed);
void prandom_reseed_late(void);
@@ -35,7 +35,7 @@ struct rnd_state {
};
u32 prandom_u32_state(struct rnd_state *state);
-void prandom_bytes_state(struct rnd_state *state, void *buf, int nbytes);
+void prandom_bytes_state(struct rnd_state *state, void *buf, size_t nbytes);
/**
* prandom_u32_max - returns a pseudo-random number in interval [0, ep_ro)
diff --git a/include/linux/ratelimit.h b/include/linux/ratelimit.h
index 0a260d8a18bf..18102529254e 100644
--- a/include/linux/ratelimit.h
+++ b/include/linux/ratelimit.h
@@ -17,14 +17,20 @@ struct ratelimit_state {
unsigned long begin;
};
-#define DEFINE_RATELIMIT_STATE(name, interval_init, burst_init) \
- \
- struct ratelimit_state name = { \
+#define RATELIMIT_STATE_INIT(name, interval_init, burst_init) { \
.lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \
.interval = interval_init, \
.burst = burst_init, \
}
+#define RATELIMIT_STATE_INIT_DISABLED \
+ RATELIMIT_STATE_INIT(ratelimit_state, 0, DEFAULT_RATELIMIT_BURST)
+
+#define DEFINE_RATELIMIT_STATE(name, interval_init, burst_init) \
+ \
+ struct ratelimit_state name = \
+ RATELIMIT_STATE_INIT(name, interval_init, burst_init) \
+
static inline void ratelimit_state_init(struct ratelimit_state *rs,
int interval, int burst)
{
diff --git a/include/linux/rbtree_augmented.h b/include/linux/rbtree_augmented.h
index fea49b5da12a..378c5ee75f78 100644
--- a/include/linux/rbtree_augmented.h
+++ b/include/linux/rbtree_augmented.h
@@ -43,6 +43,16 @@ struct rb_augment_callbacks {
extern void __rb_insert_augmented(struct rb_node *node, struct rb_root *root,
void (*augment_rotate)(struct rb_node *old, struct rb_node *new));
+/*
+ * Fixup the rbtree and update the augmented information when rebalancing.
+ *
+ * On insertion, the user must update the augmented information on the path
+ * leading to the inserted node, then call rb_link_node() as usual and
+ * rb_augment_inserted() instead of the usual rb_insert_color() call.
+ * If rb_augment_inserted() rebalances the rbtree, it will callback into
+ * a user provided function to update the augmented information on the
+ * affected subtrees.
+ */
static inline void
rb_insert_augmented(struct rb_node *node, struct rb_root *root,
const struct rb_augment_callbacks *augment)
diff --git a/include/linux/rculist.h b/include/linux/rculist.h
index 372ad5e0dcb8..529bc946f450 100644
--- a/include/linux/rculist.h
+++ b/include/linux/rculist.h
@@ -241,7 +241,7 @@ static inline void list_splice_init_rcu(struct list_head *list,
* list_entry_rcu - get the struct for this entry
* @ptr: the &struct list_head pointer.
* @type: the type of the struct this is embedded in.
- * @member: the name of the list_struct within the struct.
+ * @member: the name of the list_head within the struct.
*
* This primitive may safely run concurrently with the _rcu list-mutation
* primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock().
@@ -278,7 +278,7 @@ static inline void list_splice_init_rcu(struct list_head *list,
* list_first_or_null_rcu - get the first element from a list
* @ptr: the list head to take the element from.
* @type: the type of the struct this is embedded in.
- * @member: the name of the list_struct within the struct.
+ * @member: the name of the list_head within the struct.
*
* Note that if the list is empty, it returns NULL.
*
@@ -296,7 +296,7 @@ static inline void list_splice_init_rcu(struct list_head *list,
* list_for_each_entry_rcu - iterate over rcu list of given type
* @pos: the type * to use as a loop cursor.
* @head: the head for your list.
- * @member: the name of the list_struct within the struct.
+ * @member: the name of the list_head within the struct.
*
* This list-traversal primitive may safely run concurrently with
* the _rcu list-mutation primitives such as list_add_rcu()
@@ -311,7 +311,7 @@ static inline void list_splice_init_rcu(struct list_head *list,
* list_for_each_entry_continue_rcu - continue iteration over list of given type
* @pos: the type * to use as a loop cursor.
* @head: the head for your list.
- * @member: the name of the list_struct within the struct.
+ * @member: the name of the list_head within the struct.
*
* Continue to iterate over list of given type, continuing after
* the current position.
@@ -542,6 +542,15 @@ static inline void hlist_add_behind_rcu(struct hlist_node *n,
pos = hlist_entry_safe(rcu_dereference_bh((pos)->member.next),\
typeof(*(pos)), member))
+/**
+ * hlist_for_each_entry_from_rcu - iterate over a hlist continuing from current point
+ * @pos: the type * to use as a loop cursor.
+ * @member: the name of the hlist_node within the struct.
+ */
+#define hlist_for_each_entry_from_rcu(pos, member) \
+ for (; pos; \
+ pos = hlist_entry_safe(rcu_dereference((pos)->member.next),\
+ typeof(*(pos)), member))
#endif /* __KERNEL__ */
#endif
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index d231aa17b1d7..ed4f5939a452 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -47,19 +47,17 @@
#include <asm/barrier.h>
extern int rcu_expedited; /* for sysctl */
-#ifdef CONFIG_RCU_TORTURE_TEST
-extern int rcutorture_runnable; /* for sysctl */
-#endif /* #ifdef CONFIG_RCU_TORTURE_TEST */
enum rcutorture_type {
RCU_FLAVOR,
RCU_BH_FLAVOR,
RCU_SCHED_FLAVOR,
+ RCU_TASKS_FLAVOR,
SRCU_FLAVOR,
INVALID_RCU_FLAVOR
};
-#if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU)
+#if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU)
void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
unsigned long *gpnum, unsigned long *completed);
void rcutorture_record_test_transition(void);
@@ -197,6 +195,28 @@ void call_rcu_sched(struct rcu_head *head,
void synchronize_sched(void);
+/**
+ * call_rcu_tasks() - Queue an RCU for invocation task-based grace period
+ * @head: structure to be used for queueing the RCU updates.
+ * @func: actual callback function to be invoked after the grace period
+ *
+ * The callback function will be invoked some time after a full grace
+ * period elapses, in other words after all currently executing RCU
+ * read-side critical sections have completed. call_rcu_tasks() assumes
+ * that the read-side critical sections end at a voluntary context
+ * switch (not a preemption!), entry into idle, or transition to usermode
+ * execution. As such, there are no read-side primitives analogous to
+ * rcu_read_lock() and rcu_read_unlock() because this primitive is intended
+ * to determine that all tasks have passed through a safe state, not so
+ * much for data-strcuture synchronization.
+ *
+ * See the description of call_rcu() for more detailed information on
+ * memory ordering guarantees.
+ */
+void call_rcu_tasks(struct rcu_head *head, void (*func)(struct rcu_head *head));
+void synchronize_rcu_tasks(void);
+void rcu_barrier_tasks(void);
+
#ifdef CONFIG_PREEMPT_RCU
void __rcu_read_lock(void);
@@ -238,9 +258,9 @@ static inline int rcu_preempt_depth(void)
/* Internal to kernel */
void rcu_init(void);
-void rcu_sched_qs(int cpu);
-void rcu_bh_qs(int cpu);
-void rcu_check_callbacks(int cpu, int user);
+void rcu_sched_qs(void);
+void rcu_bh_qs(void);
+void rcu_check_callbacks(int user);
struct notifier_block;
void rcu_idle_enter(void);
void rcu_idle_exit(void);
@@ -269,6 +289,14 @@ static inline void rcu_user_hooks_switch(struct task_struct *prev,
struct task_struct *next) { }
#endif /* CONFIG_RCU_USER_QS */
+#ifdef CONFIG_RCU_NOCB_CPU
+void rcu_init_nohz(void);
+#else /* #ifdef CONFIG_RCU_NOCB_CPU */
+static inline void rcu_init_nohz(void)
+{
+}
+#endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
+
/**
* RCU_NONIDLE - Indicate idle-loop code that needs RCU readers
* @a: Code that RCU needs to pay attention to.
@@ -294,6 +322,36 @@ static inline void rcu_user_hooks_switch(struct task_struct *prev,
rcu_irq_exit(); \
} while (0)
+/*
+ * Note a voluntary context switch for RCU-tasks benefit. This is a
+ * macro rather than an inline function to avoid #include hell.
+ */
+#ifdef CONFIG_TASKS_RCU
+#define TASKS_RCU(x) x
+extern struct srcu_struct tasks_rcu_exit_srcu;
+#define rcu_note_voluntary_context_switch(t) \
+ do { \
+ if (ACCESS_ONCE((t)->rcu_tasks_holdout)) \
+ ACCESS_ONCE((t)->rcu_tasks_holdout) = false; \
+ } while (0)
+#else /* #ifdef CONFIG_TASKS_RCU */
+#define TASKS_RCU(x) do { } while (0)
+#define rcu_note_voluntary_context_switch(t) do { } while (0)
+#endif /* #else #ifdef CONFIG_TASKS_RCU */
+
+/**
+ * cond_resched_rcu_qs - Report potential quiescent states to RCU
+ *
+ * This macro resembles cond_resched(), except that it is defined to
+ * report potential quiescent states to RCU-tasks even if the cond_resched()
+ * machinery were to be shut off, as some advocate for PREEMPT kernels.
+ */
+#define cond_resched_rcu_qs() \
+do { \
+ if (!cond_resched()) \
+ rcu_note_voluntary_context_switch(current); \
+} while (0)
+
#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP)
bool __rcu_is_watching(void);
#endif /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) */
@@ -307,7 +365,7 @@ typedef void call_rcu_func_t(struct rcu_head *head,
void (*func)(struct rcu_head *head));
void wait_rcu_gp(call_rcu_func_t crf);
-#if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU)
+#if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU)
#include <linux/rcutree.h>
#elif defined(CONFIG_TINY_RCU)
#include <linux/rcutiny.h>
@@ -349,7 +407,7 @@ bool rcu_lockdep_current_cpu_online(void);
#else /* #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */
static inline bool rcu_lockdep_current_cpu_online(void)
{
- return 1;
+ return true;
}
#endif /* #else #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */
@@ -371,41 +429,7 @@ extern struct lockdep_map rcu_sched_lock_map;
extern struct lockdep_map rcu_callback_map;
int debug_lockdep_rcu_enabled(void);
-/**
- * rcu_read_lock_held() - might we be in RCU read-side critical section?
- *
- * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an RCU
- * read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC,
- * this assumes we are in an RCU read-side critical section unless it can
- * prove otherwise. This is useful for debug checks in functions that
- * require that they be called within an RCU read-side critical section.
- *
- * Checks debug_lockdep_rcu_enabled() to prevent false positives during boot
- * and while lockdep is disabled.
- *
- * Note that rcu_read_lock() and the matching rcu_read_unlock() must
- * occur in the same context, for example, it is illegal to invoke
- * rcu_read_unlock() in process context if the matching rcu_read_lock()
- * was invoked from within an irq handler.
- *
- * Note that rcu_read_lock() is disallowed if the CPU is either idle or
- * offline from an RCU perspective, so check for those as well.
- */
-static inline int rcu_read_lock_held(void)
-{
- if (!debug_lockdep_rcu_enabled())
- return 1;
- if (!rcu_is_watching())
- return 0;
- if (!rcu_lockdep_current_cpu_online())
- return 0;
- return lock_is_held(&rcu_lock_map);
-}
-
-/*
- * rcu_read_lock_bh_held() is defined out of line to avoid #include-file
- * hell.
- */
+int rcu_read_lock_held(void);
int rcu_read_lock_bh_held(void);
/**
@@ -593,6 +617,21 @@ static inline void rcu_preempt_sleep_check(void)
#define RCU_INITIALIZER(v) (typeof(*(v)) __force __rcu *)(v)
/**
+ * lockless_dereference() - safely load a pointer for later dereference
+ * @p: The pointer to load
+ *
+ * Similar to rcu_dereference(), but for situations where the pointed-to
+ * object's lifetime is managed by something other than RCU. That
+ * "something other" might be reference counting or simple immortality.
+ */
+#define lockless_dereference(p) \
+({ \
+ typeof(p) _________p1 = ACCESS_ONCE(p); \
+ smp_read_barrier_depends(); /* Dependency order vs. p above. */ \
+ (_________p1); \
+})
+
+/**
* rcu_assign_pointer() - assign to RCU-protected pointer
* @p: pointer to assign to
* @v: value to assign (publish)
@@ -828,7 +867,7 @@ static inline void rcu_preempt_sleep_check(void)
*
* In non-preemptible RCU implementations (TREE_RCU and TINY_RCU),
* it is illegal to block while in an RCU read-side critical section.
- * In preemptible RCU implementations (TREE_PREEMPT_RCU) in CONFIG_PREEMPT
+ * In preemptible RCU implementations (PREEMPT_RCU) in CONFIG_PREEMPT
* kernel builds, RCU read-side critical sections may be preempted,
* but explicit blocking is illegal. Finally, in preemptible RCU
* implementations in real-time (with -rt patchset) kernel builds, RCU
@@ -863,7 +902,9 @@ static inline void rcu_read_lock(void)
* Unfortunately, this function acquires the scheduler's runqueue and
* priority-inheritance spinlocks. This means that deadlock could result
* if the caller of rcu_read_unlock() already holds one of these locks or
- * any lock that is ever acquired while holding them.
+ * any lock that is ever acquired while holding them; or any lock which
+ * can be taken from interrupt context because rcu_boost()->rt_mutex_lock()
+ * does not disable irqs while taking ->wait_lock.
*
* That said, RCU readers are never priority boosted unless they were
* preempted. Therefore, one way to avoid deadlock is to make sure
@@ -1023,6 +1064,7 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
*/
#define RCU_INIT_POINTER(p, v) \
do { \
+ rcu_dereference_sparse(p, __rcu); \
p = RCU_INITIALIZER(v); \
} while (0)
@@ -1079,7 +1121,7 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
__kfree_rcu(&((ptr)->rcu_head), offsetof(typeof(*(ptr)), rcu_head))
#if defined(CONFIG_TINY_RCU) || defined(CONFIG_RCU_NOCB_CPU_ALL)
-static inline int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies)
+static inline int rcu_needs_cpu(unsigned long *delta_jiffies)
{
*delta_jiffies = ULONG_MAX;
return 0;
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index d40a6a451330..0e5366200154 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -78,9 +78,9 @@ static inline void kfree_call_rcu(struct rcu_head *head,
call_rcu(head, func);
}
-static inline void rcu_note_context_switch(int cpu)
+static inline void rcu_note_context_switch(void)
{
- rcu_sched_qs(cpu);
+ rcu_sched_qs();
}
/*
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index 3e2f5d432743..52953790dcca 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -30,9 +30,9 @@
#ifndef __LINUX_RCUTREE_H
#define __LINUX_RCUTREE_H
-void rcu_note_context_switch(int cpu);
+void rcu_note_context_switch(void);
#ifndef CONFIG_RCU_NOCB_CPU_ALL
-int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies);
+int rcu_needs_cpu(unsigned long *delta_jiffies);
#endif /* #ifndef CONFIG_RCU_NOCB_CPU_ALL */
void rcu_cpu_stall_reset(void);
@@ -43,7 +43,7 @@ void rcu_cpu_stall_reset(void);
*/
static inline void rcu_virt_note_context_switch(int cpu)
{
- rcu_note_context_switch(cpu);
+ rcu_note_context_switch();
}
void synchronize_rcu_bh(void);
diff --git a/include/linux/reboot.h b/include/linux/reboot.h
index 48bf152761c7..67fc8fcdc4b0 100644
--- a/include/linux/reboot.h
+++ b/include/linux/reboot.h
@@ -38,6 +38,9 @@ extern int reboot_force;
extern int register_reboot_notifier(struct notifier_block *);
extern int unregister_reboot_notifier(struct notifier_block *);
+extern int register_restart_handler(struct notifier_block *);
+extern int unregister_restart_handler(struct notifier_block *);
+extern void do_kernel_restart(char *cmd);
/*
* Architecture-specific implementations of sys_reboot commands.
diff --git a/include/linux/regmap.h b/include/linux/regmap.h
index c5ed83f49c4e..4419b99d8d6e 100644
--- a/include/linux/regmap.h
+++ b/include/linux/regmap.h
@@ -27,6 +27,7 @@ struct spmi_device;
struct regmap;
struct regmap_range_cfg;
struct regmap_field;
+struct snd_ac97;
/* An enum of all the supported cache types */
enum regcache_type {
@@ -340,6 +341,8 @@ struct regmap *regmap_init_spmi_ext(struct spmi_device *dev,
struct regmap *regmap_init_mmio_clk(struct device *dev, const char *clk_id,
void __iomem *regs,
const struct regmap_config *config);
+struct regmap *regmap_init_ac97(struct snd_ac97 *ac97,
+ const struct regmap_config *config);
struct regmap *devm_regmap_init(struct device *dev,
const struct regmap_bus *bus,
@@ -356,6 +359,10 @@ struct regmap *devm_regmap_init_spmi_ext(struct spmi_device *dev,
struct regmap *devm_regmap_init_mmio_clk(struct device *dev, const char *clk_id,
void __iomem *regs,
const struct regmap_config *config);
+struct regmap *devm_regmap_init_ac97(struct snd_ac97 *ac97,
+ const struct regmap_config *config);
+
+bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg);
/**
* regmap_init_mmio(): Initialise register map
diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h
index f8a8733068a7..d17e1ff7ad01 100644
--- a/include/linux/regulator/consumer.h
+++ b/include/linux/regulator/consumer.h
@@ -35,6 +35,8 @@
#ifndef __LINUX_REGULATOR_CONSUMER_H_
#define __LINUX_REGULATOR_CONSUMER_H_
+#include <linux/err.h>
+
struct device;
struct notifier_block;
struct regmap;
@@ -93,7 +95,14 @@ struct regmap;
* OVER_TEMP Regulator over temp.
* FORCE_DISABLE Regulator forcibly shut down by software.
* VOLTAGE_CHANGE Regulator voltage changed.
+ * Data passed is old voltage cast to (void *).
* DISABLE Regulator was disabled.
+ * PRE_VOLTAGE_CHANGE Regulator is about to have voltage changed.
+ * Data passed is "struct pre_voltage_change_data"
+ * ABORT_VOLTAGE_CHANGE Regulator voltage change failed for some reason.
+ * Data passed is old voltage cast to (void *).
+ * PRE_DISABLE Regulator is about to be disabled
+ * ABORT_DISABLE Regulator disable failed for some reason
*
* NOTE: These events can be OR'ed together when passed into handler.
*/
@@ -106,6 +115,23 @@ struct regmap;
#define REGULATOR_EVENT_FORCE_DISABLE 0x20
#define REGULATOR_EVENT_VOLTAGE_CHANGE 0x40
#define REGULATOR_EVENT_DISABLE 0x80
+#define REGULATOR_EVENT_PRE_VOLTAGE_CHANGE 0x100
+#define REGULATOR_EVENT_ABORT_VOLTAGE_CHANGE 0x200
+#define REGULATOR_EVENT_PRE_DISABLE 0x400
+#define REGULATOR_EVENT_ABORT_DISABLE 0x800
+
+/**
+ * struct pre_voltage_change_data - Data sent with PRE_VOLTAGE_CHANGE event
+ *
+ * @old_uV: Current voltage before change.
+ * @min_uV: Min voltage we'll change to.
+ * @max_uV: Max voltage we'll change to.
+ */
+struct pre_voltage_change_data {
+ unsigned long old_uV;
+ unsigned long min_uV;
+ unsigned long max_uV;
+};
struct regulator;
@@ -262,7 +288,7 @@ devm_regulator_get(struct device *dev, const char *id)
static inline struct regulator *__must_check
regulator_get_exclusive(struct device *dev, const char *id)
{
- return NULL;
+ return ERR_PTR(-ENODEV);
}
static inline struct regulator *__must_check
diff --git a/include/linux/regulator/da9211.h b/include/linux/regulator/da9211.h
index 0981ce0e72cc..5479394fefce 100644
--- a/include/linux/regulator/da9211.h
+++ b/include/linux/regulator/da9211.h
@@ -1,5 +1,5 @@
/*
- * da9211.h - Regulator device driver for DA9211
+ * da9211.h - Regulator device driver for DA9211/DA9213
* Copyright (C) 2014 Dialog Semiconductor Ltd.
*
* This library is free software; you can redistribute it and/or
@@ -20,6 +20,11 @@
#define DA9211_MAX_REGULATORS 2
+enum da9211_chip_id {
+ DA9211,
+ DA9213,
+};
+
struct da9211_pdata {
/*
* Number of buck
@@ -27,6 +32,6 @@ struct da9211_pdata {
* 2 : 2 phase 2 buck
*/
int num_buck;
- struct regulator_init_data *init_data;
+ struct regulator_init_data *init_data[DA9211_MAX_REGULATORS];
};
#endif
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h
index 4efa1ed8a2b0..5f1e9ca47417 100644
--- a/include/linux/regulator/driver.h
+++ b/include/linux/regulator/driver.h
@@ -203,6 +203,8 @@ enum regulator_type {
*
* @name: Identifying name for the regulator.
* @supply_name: Identifying the regulator supply
+ * @of_match: Name used to identify regulator in DT.
+ * @regulators_node: Name of node containing regulator definitions in DT.
* @id: Numerical identifier for the regulator.
* @ops: Regulator operations table.
* @irq: Interrupt number for the regulator.
@@ -240,14 +242,19 @@ enum regulator_type {
* @bypass_val_off: Disabling value for control when using regmap set_bypass
*
* @enable_time: Time taken for initial enable of regulator (in uS).
+ * @off_on_delay: guard time (in uS), before re-enabling a regulator
+ *
+ * @of_map_mode: Maps a hardware mode defined in a DeviceTree to a standard mode
*/
struct regulator_desc {
const char *name;
const char *supply_name;
+ const char *of_match;
+ const char *regulators_node;
int id;
bool continuous_voltage_range;
unsigned n_voltages;
- struct regulator_ops *ops;
+ const struct regulator_ops *ops;
int irq;
enum regulator_type type;
struct module *owner;
@@ -278,6 +285,10 @@ struct regulator_desc {
unsigned int bypass_val_off;
unsigned int enable_time;
+
+ unsigned int off_on_delay;
+
+ unsigned int (*of_map_mode)(unsigned int mode);
};
/**
@@ -294,6 +305,9 @@ struct regulator_desc {
* NULL).
* @regmap: regmap to use for core regmap helpers if dev_get_regulator() is
* insufficient.
+ * @ena_gpio_initialized: GPIO controlling regulator enable was properly
+ * initialized, meaning that >= 0 is a valid gpio
+ * identifier and < 0 is a non existent gpio.
* @ena_gpio: GPIO controlling regulator enable.
* @ena_gpio_invert: Sense for GPIO enable control.
* @ena_gpio_flags: Flags to use when calling gpio_request_one()
@@ -305,6 +319,7 @@ struct regulator_config {
struct device_node *of_node;
struct regmap *regmap;
+ bool ena_gpio_initialized;
int ena_gpio;
unsigned int ena_gpio_invert:1;
unsigned int ena_gpio_flags;
@@ -350,6 +365,9 @@ struct regulator_dev {
struct regulator_enable_gpio *ena_pin;
unsigned int ena_gpio_state:1;
+
+ /* time when this regulator was disabled last time */
+ unsigned long last_off_jiffy;
};
struct regulator_dev *
diff --git a/include/linux/regulator/max1586.h b/include/linux/regulator/max1586.h
index de9a7fae20be..cedd0febe882 100644
--- a/include/linux/regulator/max1586.h
+++ b/include/linux/regulator/max1586.h
@@ -40,7 +40,7 @@
*/
struct max1586_subdev_data {
int id;
- char *name;
+ const char *name;
struct regulator_init_data *platform_data;
};
diff --git a/include/linux/regulator/of_regulator.h b/include/linux/regulator/of_regulator.h
index f9217965aaa3..763953f7e3b8 100644
--- a/include/linux/regulator/of_regulator.h
+++ b/include/linux/regulator/of_regulator.h
@@ -6,24 +6,29 @@
#ifndef __LINUX_OF_REG_H
#define __LINUX_OF_REG_H
+struct regulator_desc;
+
struct of_regulator_match {
const char *name;
void *driver_data;
struct regulator_init_data *init_data;
struct device_node *of_node;
+ const struct regulator_desc *desc;
};
#if defined(CONFIG_OF)
extern struct regulator_init_data
*of_get_regulator_init_data(struct device *dev,
- struct device_node *node);
+ struct device_node *node,
+ const struct regulator_desc *desc);
extern int of_regulator_match(struct device *dev, struct device_node *node,
struct of_regulator_match *matches,
unsigned int num_matches);
#else
static inline struct regulator_init_data
*of_get_regulator_init_data(struct device *dev,
- struct device_node *node)
+ struct device_node *node,
+ const struct regulator_desc *desc)
{
return NULL;
}
diff --git a/include/linux/res_counter.h b/include/linux/res_counter.h
deleted file mode 100644
index 56b7bc32db4f..000000000000
--- a/include/linux/res_counter.h
+++ /dev/null
@@ -1,223 +0,0 @@
-#ifndef __RES_COUNTER_H__
-#define __RES_COUNTER_H__
-
-/*
- * Resource Counters
- * Contain common data types and routines for resource accounting
- *
- * Copyright 2007 OpenVZ SWsoft Inc
- *
- * Author: Pavel Emelianov <xemul@openvz.org>
- *
- * See Documentation/cgroups/resource_counter.txt for more
- * info about what this counter is.
- */
-
-#include <linux/spinlock.h>
-#include <linux/errno.h>
-
-/*
- * The core object. the cgroup that wishes to account for some
- * resource may include this counter into its structures and use
- * the helpers described beyond
- */
-
-struct res_counter {
- /*
- * the current resource consumption level
- */
- unsigned long long usage;
- /*
- * the maximal value of the usage from the counter creation
- */
- unsigned long long max_usage;
- /*
- * the limit that usage cannot exceed
- */
- unsigned long long limit;
- /*
- * the limit that usage can be exceed
- */
- unsigned long long soft_limit;
- /*
- * the number of unsuccessful attempts to consume the resource
- */
- unsigned long long failcnt;
- /*
- * the lock to protect all of the above.
- * the routines below consider this to be IRQ-safe
- */
- spinlock_t lock;
- /*
- * Parent counter, used for hierarchial resource accounting
- */
- struct res_counter *parent;
-};
-
-#define RES_COUNTER_MAX ULLONG_MAX
-
-/**
- * Helpers to interact with userspace
- * res_counter_read_u64() - returns the value of the specified member.
- * res_counter_read/_write - put/get the specified fields from the
- * res_counter struct to/from the user
- *
- * @counter: the counter in question
- * @member: the field to work with (see RES_xxx below)
- * @buf: the buffer to opeate on,...
- * @nbytes: its size...
- * @pos: and the offset.
- */
-
-u64 res_counter_read_u64(struct res_counter *counter, int member);
-
-ssize_t res_counter_read(struct res_counter *counter, int member,
- const char __user *buf, size_t nbytes, loff_t *pos,
- int (*read_strategy)(unsigned long long val, char *s));
-
-int res_counter_memparse_write_strategy(const char *buf,
- unsigned long long *res);
-
-/*
- * the field descriptors. one for each member of res_counter
- */
-
-enum {
- RES_USAGE,
- RES_MAX_USAGE,
- RES_LIMIT,
- RES_FAILCNT,
- RES_SOFT_LIMIT,
-};
-
-/*
- * helpers for accounting
- */
-
-void res_counter_init(struct res_counter *counter, struct res_counter *parent);
-
-/*
- * charge - try to consume more resource.
- *
- * @counter: the counter
- * @val: the amount of the resource. each controller defines its own
- * units, e.g. numbers, bytes, Kbytes, etc
- *
- * returns 0 on success and <0 if the counter->usage will exceed the
- * counter->limit
- *
- * charge_nofail works the same, except that it charges the resource
- * counter unconditionally, and returns < 0 if the after the current
- * charge we are over limit.
- */
-
-int __must_check res_counter_charge(struct res_counter *counter,
- unsigned long val, struct res_counter **limit_fail_at);
-int res_counter_charge_nofail(struct res_counter *counter,
- unsigned long val, struct res_counter **limit_fail_at);
-
-/*
- * uncharge - tell that some portion of the resource is released
- *
- * @counter: the counter
- * @val: the amount of the resource
- *
- * these calls check for usage underflow and show a warning on the console
- *
- * returns the total charges still present in @counter.
- */
-
-u64 res_counter_uncharge(struct res_counter *counter, unsigned long val);
-
-u64 res_counter_uncharge_until(struct res_counter *counter,
- struct res_counter *top,
- unsigned long val);
-/**
- * res_counter_margin - calculate chargeable space of a counter
- * @cnt: the counter
- *
- * Returns the difference between the hard limit and the current usage
- * of resource counter @cnt.
- */
-static inline unsigned long long res_counter_margin(struct res_counter *cnt)
-{
- unsigned long long margin;
- unsigned long flags;
-
- spin_lock_irqsave(&cnt->lock, flags);
- if (cnt->limit > cnt->usage)
- margin = cnt->limit - cnt->usage;
- else
- margin = 0;
- spin_unlock_irqrestore(&cnt->lock, flags);
- return margin;
-}
-
-/**
- * Get the difference between the usage and the soft limit
- * @cnt: The counter
- *
- * Returns 0 if usage is less than or equal to soft limit
- * The difference between usage and soft limit, otherwise.
- */
-static inline unsigned long long
-res_counter_soft_limit_excess(struct res_counter *cnt)
-{
- unsigned long long excess;
- unsigned long flags;
-
- spin_lock_irqsave(&cnt->lock, flags);
- if (cnt->usage <= cnt->soft_limit)
- excess = 0;
- else
- excess = cnt->usage - cnt->soft_limit;
- spin_unlock_irqrestore(&cnt->lock, flags);
- return excess;
-}
-
-static inline void res_counter_reset_max(struct res_counter *cnt)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&cnt->lock, flags);
- cnt->max_usage = cnt->usage;
- spin_unlock_irqrestore(&cnt->lock, flags);
-}
-
-static inline void res_counter_reset_failcnt(struct res_counter *cnt)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&cnt->lock, flags);
- cnt->failcnt = 0;
- spin_unlock_irqrestore(&cnt->lock, flags);
-}
-
-static inline int res_counter_set_limit(struct res_counter *cnt,
- unsigned long long limit)
-{
- unsigned long flags;
- int ret = -EBUSY;
-
- spin_lock_irqsave(&cnt->lock, flags);
- if (cnt->usage <= limit) {
- cnt->limit = limit;
- ret = 0;
- }
- spin_unlock_irqrestore(&cnt->lock, flags);
- return ret;
-}
-
-static inline int
-res_counter_set_soft_limit(struct res_counter *cnt,
- unsigned long long soft_limit)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&cnt->lock, flags);
- cnt->soft_limit = soft_limit;
- spin_unlock_irqrestore(&cnt->lock, flags);
- return 0;
-}
-
-#endif
diff --git a/include/linux/reset-controller.h b/include/linux/reset-controller.h
index 41a4695fde08..ce6b962ffed4 100644
--- a/include/linux/reset-controller.h
+++ b/include/linux/reset-controller.h
@@ -12,11 +12,13 @@ struct reset_controller_dev;
* things to reset the device
* @assert: manually assert the reset line, if supported
* @deassert: manually deassert the reset line, if supported
+ * @status: return the status of the reset line, if supported
*/
struct reset_control_ops {
int (*reset)(struct reset_controller_dev *rcdev, unsigned long id);
int (*assert)(struct reset_controller_dev *rcdev, unsigned long id);
int (*deassert)(struct reset_controller_dev *rcdev, unsigned long id);
+ int (*status)(struct reset_controller_dev *rcdev, unsigned long id);
};
struct module;
diff --git a/include/linux/reset.h b/include/linux/reset.h
index 349f150ae12c..da5602bd77d7 100644
--- a/include/linux/reset.h
+++ b/include/linux/reset.h
@@ -10,6 +10,7 @@ struct reset_control;
int reset_control_reset(struct reset_control *rstc);
int reset_control_assert(struct reset_control *rstc);
int reset_control_deassert(struct reset_control *rstc);
+int reset_control_status(struct reset_control *rstc);
struct reset_control *reset_control_get(struct device *dev, const char *id);
void reset_control_put(struct reset_control *rstc);
@@ -57,6 +58,12 @@ static inline int reset_control_deassert(struct reset_control *rstc)
return 0;
}
+static inline int reset_control_status(struct reset_control *rstc)
+{
+ WARN_ON(1);
+ return 0;
+}
+
static inline void reset_control_put(struct reset_control *rstc)
{
WARN_ON(1);
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
index 36826c0166c5..b93fd89b2e5e 100644
--- a/include/linux/rhashtable.h
+++ b/include/linux/rhashtable.h
@@ -44,6 +44,7 @@ struct rhashtable;
* @head_offset: Offset of rhash_head in struct to be hashed
* @hash_rnd: Seed to use while hashing
* @max_shift: Maximum number of shifts while expanding
+ * @min_shift: Minimum number of shifts while shrinking
* @hashfn: Function to hash key
* @obj_hashfn: Function to hash object
* @grow_decision: If defined, may return true if table should expand
@@ -57,13 +58,17 @@ struct rhashtable_params {
size_t head_offset;
u32 hash_rnd;
size_t max_shift;
+ size_t min_shift;
rht_hashfn_t hashfn;
rht_obj_hashfn_t obj_hashfn;
bool (*grow_decision)(const struct rhashtable *ht,
size_t new_size);
bool (*shrink_decision)(const struct rhashtable *ht,
size_t new_size);
- int (*mutex_is_held)(void);
+#ifdef CONFIG_PROVE_LOCKING
+ int (*mutex_is_held)(void *parent);
+ void *parent;
+#endif
};
/**
@@ -94,16 +99,16 @@ int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params);
u32 rhashtable_hashfn(const struct rhashtable *ht, const void *key, u32 len);
u32 rhashtable_obj_hashfn(const struct rhashtable *ht, void *ptr);
-void rhashtable_insert(struct rhashtable *ht, struct rhash_head *node, gfp_t);
-bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *node, gfp_t);
+void rhashtable_insert(struct rhashtable *ht, struct rhash_head *node);
+bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *node);
void rhashtable_remove_pprev(struct rhashtable *ht, struct rhash_head *obj,
- struct rhash_head __rcu **pprev, gfp_t flags);
+ struct rhash_head __rcu **pprev);
bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size);
bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size);
-int rhashtable_expand(struct rhashtable *ht, gfp_t flags);
-int rhashtable_shrink(struct rhashtable *ht, gfp_t flags);
+int rhashtable_expand(struct rhashtable *ht);
+int rhashtable_shrink(struct rhashtable *ht);
void *rhashtable_lookup(const struct rhashtable *ht, const void *key);
void *rhashtable_lookup_compare(const struct rhashtable *ht, u32 hash,
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
index 49a4d6f59108..e2c13cd863bd 100644
--- a/include/linux/ring_buffer.h
+++ b/include/linux/ring_buffer.h
@@ -97,7 +97,7 @@ __ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *k
__ring_buffer_alloc((size), (flags), &__key); \
})
-int ring_buffer_wait(struct ring_buffer *buffer, int cpu);
+int ring_buffer_wait(struct ring_buffer *buffer, int cpu, bool full);
int ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
struct file *filp, poll_table *poll_table);
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index be574506e6a9..c0c2bce6b0b7 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -150,7 +150,7 @@ int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
static inline void anon_vma_merge(struct vm_area_struct *vma,
struct vm_area_struct *next)
{
- VM_BUG_ON(vma->anon_vma != next->anon_vma);
+ VM_BUG_ON_VMA(vma->anon_vma != next->anon_vma, vma);
unlink_anon_vmas(next);
}
diff --git a/include/linux/rtc.h b/include/linux/rtc.h
index c2c28975293c..6d6be09a2fe5 100644
--- a/include/linux/rtc.h
+++ b/include/linux/rtc.h
@@ -19,11 +19,28 @@
extern int rtc_month_days(unsigned int month, unsigned int year);
extern int rtc_year_days(unsigned int day, unsigned int month, unsigned int year);
extern int rtc_valid_tm(struct rtc_time *tm);
-extern int rtc_tm_to_time(struct rtc_time *tm, unsigned long *time);
-extern void rtc_time_to_tm(unsigned long time, struct rtc_time *tm);
+extern time64_t rtc_tm_to_time64(struct rtc_time *tm);
+extern void rtc_time64_to_tm(time64_t time, struct rtc_time *tm);
ktime_t rtc_tm_to_ktime(struct rtc_time tm);
struct rtc_time rtc_ktime_to_tm(ktime_t kt);
+/**
+ * Deprecated. Use rtc_time64_to_tm().
+ */
+static inline void rtc_time_to_tm(unsigned long time, struct rtc_time *tm)
+{
+ rtc_time64_to_tm(time, tm);
+}
+
+/**
+ * Deprecated. Use rtc_tm_to_time64().
+ */
+static inline int rtc_tm_to_time(struct rtc_time *tm, unsigned long *time)
+{
+ *time = rtc_tm_to_time64(tm);
+
+ return 0;
+}
#include <linux/device.h>
#include <linux/seq_file.h>
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
index 167bae7bdfa4..5db76a32fcab 100644
--- a/include/linux/rtnetlink.h
+++ b/include/linux/rtnetlink.h
@@ -17,6 +17,11 @@ extern int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst,
u32 id, long expires, u32 error);
void rtmsg_ifinfo(int type, struct net_device *dev, unsigned change, gfp_t flags);
+struct sk_buff *rtmsg_ifinfo_build_skb(int type, struct net_device *dev,
+ unsigned change, gfp_t flags);
+void rtmsg_ifinfo_send(struct sk_buff *skb, struct net_device *dev,
+ gfp_t flags);
+
/* RTNL is used as a global lock for all changes to network configuration */
extern void rtnl_lock(void);
@@ -47,6 +52,16 @@ static inline int lockdep_rtnl_is_held(void)
rcu_dereference_check(p, lockdep_rtnl_is_held())
/**
+ * rcu_dereference_bh_rtnl - rcu_dereference_bh with debug checking
+ * @p: The pointer to read, prior to dereference
+ *
+ * Do an rcu_dereference_bh(p), but check caller either holds rcu_read_lock_bh()
+ * or RTNL. Note : Please prefer rtnl_dereference() or rcu_dereference_bh()
+ */
+#define rcu_dereference_bh_rtnl(p) \
+ rcu_dereference_bh_check(p, lockdep_rtnl_is_held())
+
+/**
* rtnl_dereference - fetch RCU pointer when updates are prevented by RTNL
* @p: The pointer to read, prior to dereferencing
*
@@ -84,12 +99,15 @@ extern int ndo_dflt_fdb_add(struct ndmsg *ndm,
struct nlattr *tb[],
struct net_device *dev,
const unsigned char *addr,
- u16 flags);
+ u16 vid,
+ u16 flags);
extern int ndo_dflt_fdb_del(struct ndmsg *ndm,
struct nlattr *tb[],
struct net_device *dev,
- const unsigned char *addr);
+ const unsigned char *addr,
+ u16 vid);
extern int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
- struct net_device *dev, u16 mode);
+ struct net_device *dev, u16 mode,
+ u32 flags, u32 mask);
#endif /* __LINUX_RTNETLINK_H */
diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h
index 035d3c57fc8a..8f498cdde280 100644
--- a/include/linux/rwsem.h
+++ b/include/linux/rwsem.h
@@ -149,7 +149,7 @@ extern void downgrade_write(struct rw_semaphore *sem);
* static then another method for expressing nested locking is
* the explicit definition of lock class keys and the use of
* lockdep_set_class() at lock initialization time.
- * See Documentation/lockdep-design.txt for more details.)
+ * See Documentation/locking/lockdep-design.txt for more details.)
*/
extern void down_read_nested(struct rw_semaphore *sem, int subclass);
extern void down_write_nested(struct rw_semaphore *sem, int subclass);
diff --git a/include/linux/sched.h b/include/linux/sched.h
index b867a4dab38a..8db31ef98d2f 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -57,6 +57,7 @@ struct sched_param {
#include <linux/llist.h>
#include <linux/uidgid.h>
#include <linux/gfp.h>
+#include <linux/magic.h>
#include <asm/processor.h>
@@ -167,6 +168,7 @@ extern int nr_threads;
DECLARE_PER_CPU(unsigned long, process_counts);
extern int nr_processes(void);
extern unsigned long nr_running(void);
+extern bool single_task_running(void);
extern unsigned long nr_iowait(void);
extern unsigned long nr_iowait_cpu(int cpu);
extern void get_iowait_load(unsigned long *nr_waiters, unsigned long *load);
@@ -241,6 +243,43 @@ extern char ___assert_task_state[1 - 2*!!(
((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
(task->flags & PF_FROZEN) == 0)
+#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
+
+#define __set_task_state(tsk, state_value) \
+ do { \
+ (tsk)->task_state_change = _THIS_IP_; \
+ (tsk)->state = (state_value); \
+ } while (0)
+#define set_task_state(tsk, state_value) \
+ do { \
+ (tsk)->task_state_change = _THIS_IP_; \
+ set_mb((tsk)->state, (state_value)); \
+ } while (0)
+
+/*
+ * set_current_state() includes a barrier so that the write of current->state
+ * is correctly serialised wrt the caller's subsequent test of whether to
+ * actually sleep:
+ *
+ * set_current_state(TASK_UNINTERRUPTIBLE);
+ * if (do_i_need_to_sleep())
+ * schedule();
+ *
+ * If the caller does not need such serialisation then use __set_current_state()
+ */
+#define __set_current_state(state_value) \
+ do { \
+ current->task_state_change = _THIS_IP_; \
+ current->state = (state_value); \
+ } while (0)
+#define set_current_state(state_value) \
+ do { \
+ current->task_state_change = _THIS_IP_; \
+ set_mb(current->state, (state_value)); \
+ } while (0)
+
+#else
+
#define __set_task_state(tsk, state_value) \
do { (tsk)->state = (state_value); } while (0)
#define set_task_state(tsk, state_value) \
@@ -257,11 +296,13 @@ extern char ___assert_task_state[1 - 2*!!(
*
* If the caller does not need such serialisation then use __set_current_state()
*/
-#define __set_current_state(state_value) \
+#define __set_current_state(state_value) \
do { current->state = (state_value); } while (0)
-#define set_current_state(state_value) \
+#define set_current_state(state_value) \
set_mb(current->state, (state_value))
+#endif
+
/* Task command name length */
#define TASK_COMM_LEN 16
@@ -645,6 +686,7 @@ struct signal_struct {
* Live threads maintain their own counters and add to these
* in __exit_signal, except for the group leader.
*/
+ seqlock_t stats_lock;
cputime_t utime, stime, cutime, cstime;
cputime_t gtime;
cputime_t cgtime;
@@ -1023,6 +1065,7 @@ struct sched_domain_topology_level {
extern struct sched_domain_topology_level *sched_domain_topology;
extern void set_sched_topology(struct sched_domain_topology_level *tl);
+extern void wake_up_if_idle(int cpu);
#ifdef CONFIG_SCHED_DEBUG
# define SD_INIT_NAME(type) .name = #type
@@ -1212,6 +1255,13 @@ struct sched_dl_entity {
struct hrtimer dl_timer;
};
+union rcu_special {
+ struct {
+ bool blocked;
+ bool need_qs;
+ } b;
+ short s;
+};
struct rcu_node;
enum perf_event_task_context {
@@ -1264,12 +1314,18 @@ struct task_struct {
#ifdef CONFIG_PREEMPT_RCU
int rcu_read_lock_nesting;
- char rcu_read_unlock_special;
+ union rcu_special rcu_read_unlock_special;
struct list_head rcu_node_entry;
#endif /* #ifdef CONFIG_PREEMPT_RCU */
-#ifdef CONFIG_TREE_PREEMPT_RCU
+#ifdef CONFIG_PREEMPT_RCU
struct rcu_node *rcu_blocked_node;
-#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
+#endif /* #ifdef CONFIG_PREEMPT_RCU */
+#ifdef CONFIG_TASKS_RCU
+ unsigned long rcu_tasks_nvcsw;
+ bool rcu_tasks_holdout;
+ struct list_head rcu_tasks_holdout_list;
+ int rcu_tasks_idle_cpu;
+#endif /* #ifdef CONFIG_TASKS_RCU */
#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
struct sched_info sched_info;
@@ -1308,6 +1364,10 @@ struct task_struct {
unsigned sched_reset_on_fork:1;
unsigned sched_contributes_to_load:1;
+#ifdef CONFIG_MEMCG_KMEM
+ unsigned memcg_kmem_skip_account:1;
+#endif
+
unsigned long atomic_flags; /* Flags needing atomic access. */
pid_t pid;
@@ -1541,28 +1601,23 @@ struct task_struct {
struct numa_group *numa_group;
/*
- * Exponential decaying average of faults on a per-node basis.
- * Scheduling placement decisions are made based on the these counts.
- * The values remain static for the duration of a PTE scan
+ * numa_faults is an array split into four regions:
+ * faults_memory, faults_cpu, faults_memory_buffer, faults_cpu_buffer
+ * in this precise order.
+ *
+ * faults_memory: Exponential decaying average of faults on a per-node
+ * basis. Scheduling placement decisions are made based on these
+ * counts. The values remain static for the duration of a PTE scan.
+ * faults_cpu: Track the nodes the process was running on when a NUMA
+ * hinting fault was incurred.
+ * faults_memory_buffer and faults_cpu_buffer: Record faults per node
+ * during the current scan window. When the scan completes, the counts
+ * in faults_memory and faults_cpu decay and these values are copied.
*/
- unsigned long *numa_faults_memory;
+ unsigned long *numa_faults;
unsigned long total_numa_faults;
/*
- * numa_faults_buffer records faults per node during the current
- * scan window. When the scan completes, the counts in
- * numa_faults_memory decay and these values are copied.
- */
- unsigned long *numa_faults_buffer_memory;
-
- /*
- * Track the nodes the process was running on when a NUMA hinting
- * fault was incurred.
- */
- unsigned long *numa_faults_cpu;
- unsigned long *numa_faults_buffer_cpu;
-
- /*
* numa_faults_locality tracks if faults recorded during the last
* scan window were remote/local. The task scan period is adapted
* based on the locality of the faults with different weights
@@ -1628,8 +1683,7 @@ struct task_struct {
/* bitmask and counter of trace recursion */
unsigned long trace_recursion;
#endif /* CONFIG_TRACING */
-#ifdef CONFIG_MEMCG /* memcg uses this to do batch job */
- unsigned int memcg_kmem_skip_account;
+#ifdef CONFIG_MEMCG
struct memcg_oom_info {
struct mem_cgroup *memcg;
gfp_t gfp_mask;
@@ -1644,6 +1698,9 @@ struct task_struct {
unsigned int sequential_io;
unsigned int sequential_io_avg;
#endif
+#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
+ unsigned long task_state_change;
+#endif
};
/* Future-safe accessor for struct task_struct's cpus_allowed. */
@@ -1934,11 +1991,13 @@ extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut,
#define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
#define used_math() tsk_used_math(current)
-/* __GFP_IO isn't allowed if PF_MEMALLOC_NOIO is set in current->flags */
+/* __GFP_IO isn't allowed if PF_MEMALLOC_NOIO is set in current->flags
+ * __GFP_FS is also cleared as it implies __GFP_IO.
+ */
static inline gfp_t memalloc_noio_flags(gfp_t flags)
{
if (unlikely(current->flags & PF_MEMALLOC_NOIO))
- flags &= ~__GFP_IO;
+ flags &= ~(__GFP_IO | __GFP_FS);
return flags;
}
@@ -2011,29 +2070,21 @@ extern void task_clear_jobctl_trapping(struct task_struct *task);
extern void task_clear_jobctl_pending(struct task_struct *task,
unsigned int mask);
-#ifdef CONFIG_PREEMPT_RCU
-
-#define RCU_READ_UNLOCK_BLOCKED (1 << 0) /* blocked while in RCU read-side. */
-#define RCU_READ_UNLOCK_NEED_QS (1 << 1) /* RCU core needs CPU response. */
-
static inline void rcu_copy_process(struct task_struct *p)
{
+#ifdef CONFIG_PREEMPT_RCU
p->rcu_read_lock_nesting = 0;
- p->rcu_read_unlock_special = 0;
-#ifdef CONFIG_TREE_PREEMPT_RCU
+ p->rcu_read_unlock_special.s = 0;
p->rcu_blocked_node = NULL;
-#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
INIT_LIST_HEAD(&p->rcu_node_entry);
+#endif /* #ifdef CONFIG_PREEMPT_RCU */
+#ifdef CONFIG_TASKS_RCU
+ p->rcu_tasks_holdout = false;
+ INIT_LIST_HEAD(&p->rcu_tasks_holdout_list);
+ p->rcu_tasks_idle_cpu = -1;
+#endif /* #ifdef CONFIG_TASKS_RCU */
}
-#else
-
-static inline void rcu_copy_process(struct task_struct *p)
-{
-}
-
-#endif
-
static inline void tsk_restore_flags(struct task_struct *task,
unsigned long orig_flags, unsigned long flags)
{
@@ -2041,6 +2092,10 @@ static inline void tsk_restore_flags(struct task_struct *task,
task->flags |= orig_flags & flags;
}
+extern int cpuset_cpumask_can_shrink(const struct cpumask *cur,
+ const struct cpumask *trial);
+extern int task_can_attach(struct task_struct *p,
+ const struct cpumask *cs_cpus_allowed);
#ifdef CONFIG_SMP
extern void do_set_cpus_allowed(struct task_struct *p,
const struct cpumask *new_mask);
@@ -2430,6 +2485,10 @@ extern void do_group_exit(int);
extern int do_execve(struct filename *,
const char __user * const __user *,
const char __user * const __user *);
+extern int do_execveat(int, struct filename *,
+ const char __user * const __user *,
+ const char __user * const __user *,
+ int);
extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *);
struct task_struct *fork_idle(int);
extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
@@ -2639,6 +2698,8 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
}
#endif
+#define task_stack_end_corrupted(task) \
+ (*(end_of_stack(task)) != STACK_END_MAGIC)
static inline int object_is_on_stack(void *obj)
{
@@ -2661,6 +2722,7 @@ static inline unsigned long stack_not_used(struct task_struct *p)
return (unsigned long)n - (unsigned long)end_of_stack(p);
}
#endif
+extern void set_task_stack_end_magic(struct task_struct *tsk);
/* set thread flags in other task's structures
* - see asm/thread_info.h for TIF_xxxx flags available
@@ -2746,7 +2808,7 @@ static inline int signal_pending_state(long state, struct task_struct *p)
extern int _cond_resched(void);
#define cond_resched() ({ \
- __might_sleep(__FILE__, __LINE__, 0); \
+ ___might_sleep(__FILE__, __LINE__, 0); \
_cond_resched(); \
})
@@ -2759,14 +2821,14 @@ extern int __cond_resched_lock(spinlock_t *lock);
#endif
#define cond_resched_lock(lock) ({ \
- __might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET); \
+ ___might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);\
__cond_resched_lock(lock); \
})
extern int __cond_resched_softirq(void);
#define cond_resched_softirq() ({ \
- __might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \
+ ___might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \
__cond_resched_softirq(); \
})
diff --git a/include/linux/screen_info.h b/include/linux/screen_info.h
index 005bf3e38db5..f0f8bad54be9 100644
--- a/include/linux/screen_info.h
+++ b/include/linux/screen_info.h
@@ -5,12 +5,4 @@
extern struct screen_info screen_info;
-#define ORIG_X (screen_info.orig_x)
-#define ORIG_Y (screen_info.orig_y)
-#define ORIG_VIDEO_MODE (screen_info.orig_video_mode)
-#define ORIG_VIDEO_COLS (screen_info.orig_video_cols)
-#define ORIG_VIDEO_EGA_BX (screen_info.orig_video_ega_bx)
-#define ORIG_VIDEO_LINES (screen_info.orig_video_lines)
-#define ORIG_VIDEO_ISVGA (screen_info.orig_video_isVGA)
-#define ORIG_VIDEO_POINTS (screen_info.orig_video_points)
#endif /* _SCREEN_INFO_H */
diff --git a/include/linux/seccomp.h b/include/linux/seccomp.h
index 5d586a45a319..a19ddacdac30 100644
--- a/include/linux/seccomp.h
+++ b/include/linux/seccomp.h
@@ -27,19 +27,23 @@ struct seccomp {
struct seccomp_filter *filter;
};
-extern int __secure_computing(int);
-static inline int secure_computing(int this_syscall)
+#ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER
+extern int __secure_computing(void);
+static inline int secure_computing(void)
{
if (unlikely(test_thread_flag(TIF_SECCOMP)))
- return __secure_computing(this_syscall);
+ return __secure_computing();
return 0;
}
-/* A wrapper for architectures supporting only SECCOMP_MODE_STRICT. */
-static inline void secure_computing_strict(int this_syscall)
-{
- BUG_ON(secure_computing(this_syscall) != 0);
-}
+#define SECCOMP_PHASE1_OK 0
+#define SECCOMP_PHASE1_SKIP 1
+
+extern u32 seccomp_phase1(struct seccomp_data *sd);
+int seccomp_phase2(u32 phase1_result);
+#else
+extern void secure_computing_strict(int this_syscall);
+#endif
extern long prctl_get_seccomp(void);
extern long prctl_set_seccomp(unsigned long, char __user *);
@@ -56,8 +60,11 @@ static inline int seccomp_mode(struct seccomp *s)
struct seccomp { };
struct seccomp_filter { };
-static inline int secure_computing(int this_syscall) { return 0; }
+#ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER
+static inline int secure_computing(void) { return 0; }
+#else
static inline void secure_computing_strict(int this_syscall) { return; }
+#endif
static inline long prctl_get_seccomp(void)
{
diff --git a/include/linux/security.h b/include/linux/security.h
index 623f90e5f38d..ba96471c11ba 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -1559,7 +1559,7 @@ struct security_operations {
int (*file_lock) (struct file *file, unsigned int cmd);
int (*file_fcntl) (struct file *file, unsigned int cmd,
unsigned long arg);
- int (*file_set_fowner) (struct file *file);
+ void (*file_set_fowner) (struct file *file);
int (*file_send_sigiotask) (struct task_struct *tsk,
struct fown_struct *fown, int sig);
int (*file_receive) (struct file *file);
@@ -1834,7 +1834,7 @@ int security_file_mprotect(struct vm_area_struct *vma, unsigned long reqprot,
unsigned long prot);
int security_file_lock(struct file *file, unsigned int cmd);
int security_file_fcntl(struct file *file, unsigned int cmd, unsigned long arg);
-int security_file_set_fowner(struct file *file);
+void security_file_set_fowner(struct file *file);
int security_file_send_sigiotask(struct task_struct *tsk,
struct fown_struct *fown, int sig);
int security_file_receive(struct file *file);
@@ -2108,7 +2108,7 @@ static inline int security_dentry_init_security(struct dentry *dentry,
static inline int security_inode_init_security(struct inode *inode,
struct inode *dir,
const struct qstr *qstr,
- const initxattrs initxattrs,
+ const initxattrs xattrs,
void *fs_data)
{
return 0;
@@ -2312,9 +2312,9 @@ static inline int security_file_fcntl(struct file *file, unsigned int cmd,
return 0;
}
-static inline int security_file_set_fowner(struct file *file)
+static inline void security_file_set_fowner(struct file *file)
{
- return 0;
+ return;
}
static inline int security_file_send_sigiotask(struct task_struct *tsk,
diff --git a/include/linux/seq_buf.h b/include/linux/seq_buf.h
new file mode 100644
index 000000000000..9aafe0e24c68
--- /dev/null
+++ b/include/linux/seq_buf.h
@@ -0,0 +1,136 @@
+#ifndef _LINUX_SEQ_BUF_H
+#define _LINUX_SEQ_BUF_H
+
+#include <linux/fs.h>
+
+/*
+ * Trace sequences are used to allow a function to call several other functions
+ * to create a string of data to use.
+ */
+
+/**
+ * seq_buf - seq buffer structure
+ * @buffer: pointer to the buffer
+ * @size: size of the buffer
+ * @len: the amount of data inside the buffer
+ * @readpos: The next position to read in the buffer.
+ */
+struct seq_buf {
+ char *buffer;
+ size_t size;
+ size_t len;
+ loff_t readpos;
+};
+
+static inline void seq_buf_clear(struct seq_buf *s)
+{
+ s->len = 0;
+ s->readpos = 0;
+}
+
+static inline void
+seq_buf_init(struct seq_buf *s, unsigned char *buf, unsigned int size)
+{
+ s->buffer = buf;
+ s->size = size;
+ seq_buf_clear(s);
+}
+
+/*
+ * seq_buf have a buffer that might overflow. When this happens
+ * the len and size are set to be equal.
+ */
+static inline bool
+seq_buf_has_overflowed(struct seq_buf *s)
+{
+ return s->len > s->size;
+}
+
+static inline void
+seq_buf_set_overflow(struct seq_buf *s)
+{
+ s->len = s->size + 1;
+}
+
+/*
+ * How much buffer is left on the seq_buf?
+ */
+static inline unsigned int
+seq_buf_buffer_left(struct seq_buf *s)
+{
+ if (seq_buf_has_overflowed(s))
+ return 0;
+
+ return s->size - s->len;
+}
+
+/* How much buffer was written? */
+static inline unsigned int seq_buf_used(struct seq_buf *s)
+{
+ return min(s->len, s->size);
+}
+
+/**
+ * seq_buf_get_buf - get buffer to write arbitrary data to
+ * @s: the seq_buf handle
+ * @bufp: the beginning of the buffer is stored here
+ *
+ * Return the number of bytes available in the buffer, or zero if
+ * there's no space.
+ */
+static inline size_t seq_buf_get_buf(struct seq_buf *s, char **bufp)
+{
+ WARN_ON(s->len > s->size + 1);
+
+ if (s->len < s->size) {
+ *bufp = s->buffer + s->len;
+ return s->size - s->len;
+ }
+
+ *bufp = NULL;
+ return 0;
+}
+
+/**
+ * seq_buf_commit - commit data to the buffer
+ * @s: the seq_buf handle
+ * @num: the number of bytes to commit
+ *
+ * Commit @num bytes of data written to a buffer previously acquired
+ * by seq_buf_get. To signal an error condition, or that the data
+ * didn't fit in the available space, pass a negative @num value.
+ */
+static inline void seq_buf_commit(struct seq_buf *s, int num)
+{
+ if (num < 0) {
+ seq_buf_set_overflow(s);
+ } else {
+ /* num must be negative on overflow */
+ BUG_ON(s->len + num > s->size);
+ s->len += num;
+ }
+}
+
+extern __printf(2, 3)
+int seq_buf_printf(struct seq_buf *s, const char *fmt, ...);
+extern __printf(2, 0)
+int seq_buf_vprintf(struct seq_buf *s, const char *fmt, va_list args);
+extern int seq_buf_print_seq(struct seq_file *m, struct seq_buf *s);
+extern int seq_buf_to_user(struct seq_buf *s, char __user *ubuf,
+ int cnt);
+extern int seq_buf_puts(struct seq_buf *s, const char *str);
+extern int seq_buf_putc(struct seq_buf *s, unsigned char c);
+extern int seq_buf_putmem(struct seq_buf *s, const void *mem, unsigned int len);
+extern int seq_buf_putmem_hex(struct seq_buf *s, const void *mem,
+ unsigned int len);
+extern int seq_buf_path(struct seq_buf *s, const struct path *path, const char *esc);
+
+extern int seq_buf_bitmask(struct seq_buf *s, const unsigned long *maskp,
+ int nmaskbits);
+
+#ifdef CONFIG_BINARY_PRINTF
+extern int
+seq_buf_bprintf(struct seq_buf *s, const char *fmt, const u32 *binary);
+#endif
+
+#endif /* _LINUX_SEQ_BUF_H */
diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
index 52e0097f61f0..cf6a9daaaf6d 100644
--- a/include/linux/seq_file.h
+++ b/include/linux/seq_file.h
@@ -43,6 +43,21 @@ struct seq_operations {
#define SEQ_SKIP 1
/**
+ * seq_has_overflowed - check if the buffer has overflowed
+ * @m: the seq_file handle
+ *
+ * seq_files have a buffer which may overflow. When this happens a larger
+ * buffer is reallocated and all the data will be printed again.
+ * The overflow state is true when m->count == m->size.
+ *
+ * Returns true if the buffer received more than it can hold.
+ */
+static inline bool seq_has_overflowed(struct seq_file *m)
+{
+ return m->count == m->size;
+}
+
+/**
* seq_get_buf - get buffer to write arbitrary data to
* @m: the seq_file handle
* @bufp: the beginning of the buffer is stored here
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index cc359636cfa3..f5df8f687b4d 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -456,4 +456,23 @@ read_sequnlock_excl_irqrestore(seqlock_t *sl, unsigned long flags)
spin_unlock_irqrestore(&sl->lock, flags);
}
+static inline unsigned long
+read_seqbegin_or_lock_irqsave(seqlock_t *lock, int *seq)
+{
+ unsigned long flags = 0;
+
+ if (!(*seq & 1)) /* Even */
+ *seq = read_seqbegin(lock);
+ else /* Odd */
+ read_seqlock_excl_irqsave(lock, flags);
+
+ return flags;
+}
+
+static inline void
+done_seqretry_irqrestore(seqlock_t *lock, int seq, unsigned long flags)
+{
+ if (seq & 1)
+ read_sequnlock_excl_irqrestore(lock, flags);
+}
#endif /* __LINUX_SEQLOCK_H */
diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h
index f93649e22c43..e02acf0a0ec9 100644
--- a/include/linux/serial_8250.h
+++ b/include/linux/serial_8250.h
@@ -84,6 +84,7 @@ struct uart_8250_port {
unsigned char mcr_mask; /* mask of user bits */
unsigned char mcr_force; /* mask of forced bits */
unsigned char cur_iotype; /* Running I/O type */
+ unsigned int rpm_tx_active;
/*
* Some bits in registers are cleared on a read, so they must
@@ -121,6 +122,8 @@ extern void serial8250_early_out(struct uart_port *port, int offset, int value);
extern int setup_early_serial8250_console(char *cmdline);
extern void serial8250_do_set_termios(struct uart_port *port,
struct ktermios *termios, struct ktermios *old);
+extern int serial8250_do_startup(struct uart_port *port);
+extern void serial8250_do_shutdown(struct uart_port *port);
extern void serial8250_do_pm(struct uart_port *port, unsigned int state,
unsigned int oldstate);
extern int fsl8250_handle_irq(struct uart_port *port);
diff --git a/include/linux/serial_bcm63xx.h b/include/linux/serial_bcm63xx.h
index a80aa1a5bee2..570e964dc899 100644
--- a/include/linux/serial_bcm63xx.h
+++ b/include/linux/serial_bcm63xx.h
@@ -116,6 +116,4 @@
UART_FIFO_PARERR_MASK | \
UART_FIFO_BRKDET_MASK)
-#define UART_REG_SIZE 24
-
#endif /* _LINUX_SERIAL_BCM63XX_H */
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index cf3a1e789bf5..057038cf2788 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -63,7 +63,7 @@ struct uart_ops {
void (*flush_buffer)(struct uart_port *);
void (*set_termios)(struct uart_port *, struct ktermios *new,
struct ktermios *old);
- void (*set_ldisc)(struct uart_port *, int new);
+ void (*set_ldisc)(struct uart_port *, struct ktermios *);
void (*pm)(struct uart_port *, unsigned int state,
unsigned int oldstate);
@@ -112,6 +112,7 @@ struct uart_icount {
};
typedef unsigned int __bitwise__ upf_t;
+typedef unsigned int __bitwise__ upstat_t;
struct uart_port {
spinlock_t lock; /* port lock */
@@ -122,10 +123,16 @@ struct uart_port {
void (*set_termios)(struct uart_port *,
struct ktermios *new,
struct ktermios *old);
+ int (*startup)(struct uart_port *port);
+ void (*shutdown)(struct uart_port *port);
+ void (*throttle)(struct uart_port *port);
+ void (*unthrottle)(struct uart_port *port);
int (*handle_irq)(struct uart_port *);
void (*pm)(struct uart_port *, unsigned int state,
unsigned int old);
void (*handle_break)(struct uart_port *);
+ int (*rs485_config)(struct uart_port *,
+ struct serial_rs485 *rs485);
unsigned int irq; /* irq number */
unsigned long irqflags; /* irq flags */
unsigned int uartclk; /* base uart clock */
@@ -135,12 +142,13 @@ struct uart_port {
unsigned char iotype; /* io access style */
unsigned char unused1;
-#define UPIO_PORT (0)
-#define UPIO_HUB6 (1)
-#define UPIO_MEM (2)
-#define UPIO_MEM32 (3)
-#define UPIO_AU (4) /* Au1x00 and RT288x type IO */
-#define UPIO_TSI (5) /* Tsi108/109 type IO */
+#define UPIO_PORT (0) /* 8b I/O port access */
+#define UPIO_HUB6 (1) /* Hub6 ISA card */
+#define UPIO_MEM (2) /* 8b MMIO access */
+#define UPIO_MEM32 (3) /* 32b little endian */
+#define UPIO_MEM32BE (4) /* 32b big endian */
+#define UPIO_AU (5) /* Au1x00 and RT288x type IO */
+#define UPIO_TSI (6) /* Tsi108/109 type IO */
unsigned int read_status_mask; /* driver specific */
unsigned int ignore_status_mask; /* driver specific */
@@ -152,23 +160,36 @@ struct uart_port {
unsigned long sysrq; /* sysrq timeout */
#endif
+ /* flags must be updated while holding port mutex */
upf_t flags;
-#define UPF_FOURPORT ((__force upf_t) (1 << 1))
-#define UPF_SAK ((__force upf_t) (1 << 2))
-#define UPF_SPD_MASK ((__force upf_t) (0x1030))
-#define UPF_SPD_HI ((__force upf_t) (0x0010))
-#define UPF_SPD_VHI ((__force upf_t) (0x0020))
-#define UPF_SPD_CUST ((__force upf_t) (0x0030))
-#define UPF_SPD_SHI ((__force upf_t) (0x1000))
-#define UPF_SPD_WARP ((__force upf_t) (0x1010))
-#define UPF_SKIP_TEST ((__force upf_t) (1 << 6))
-#define UPF_AUTO_IRQ ((__force upf_t) (1 << 7))
-#define UPF_HARDPPS_CD ((__force upf_t) (1 << 11))
-#define UPF_LOW_LATENCY ((__force upf_t) (1 << 13))
-#define UPF_BUGGY_UART ((__force upf_t) (1 << 14))
+ /*
+ * These flags must be equivalent to the flags defined in
+ * include/uapi/linux/tty_flags.h which are the userspace definitions
+ * assigned from the serial_struct flags in uart_set_info()
+ * [for bit definitions in the UPF_CHANGE_MASK]
+ *
+ * Bits [0..UPF_LAST_USER] are userspace defined/visible/changeable
+ * except bit 15 (UPF_NO_TXEN_TEST) which is masked off.
+ * The remaining bits are serial-core specific and not modifiable by
+ * userspace.
+ */
+#define UPF_FOURPORT ((__force upf_t) ASYNC_FOURPORT /* 1 */ )
+#define UPF_SAK ((__force upf_t) ASYNC_SAK /* 2 */ )
+#define UPF_SPD_HI ((__force upf_t) ASYNC_SPD_HI /* 4 */ )
+#define UPF_SPD_VHI ((__force upf_t) ASYNC_SPD_VHI /* 5 */ )
+#define UPF_SPD_CUST ((__force upf_t) ASYNC_SPD_CUST /* 0x0030 */ )
+#define UPF_SPD_WARP ((__force upf_t) ASYNC_SPD_WARP /* 0x1010 */ )
+#define UPF_SPD_MASK ((__force upf_t) ASYNC_SPD_MASK /* 0x1030 */ )
+#define UPF_SKIP_TEST ((__force upf_t) ASYNC_SKIP_TEST /* 6 */ )
+#define UPF_AUTO_IRQ ((__force upf_t) ASYNC_AUTO_IRQ /* 7 */ )
+#define UPF_HARDPPS_CD ((__force upf_t) ASYNC_HARDPPS_CD /* 11 */ )
+#define UPF_SPD_SHI ((__force upf_t) ASYNC_SPD_SHI /* 12 */ )
+#define UPF_LOW_LATENCY ((__force upf_t) ASYNC_LOW_LATENCY /* 13 */ )
+#define UPF_BUGGY_UART ((__force upf_t) ASYNC_BUGGY_UART /* 14 */ )
#define UPF_NO_TXEN_TEST ((__force upf_t) (1 << 15))
-#define UPF_MAGIC_MULTIPLIER ((__force upf_t) (1 << 16))
+#define UPF_MAGIC_MULTIPLIER ((__force upf_t) ASYNC_MAGIC_MULTIPLIER /* 16 */ )
+
/* Port has hardware-assisted h/w flow control (iow, auto-RTS *not* auto-CTS) */
#define UPF_HARD_FLOW ((__force upf_t) (1 << 21))
/* Port has hardware-assisted s/w flow control */
@@ -184,9 +205,21 @@ struct uart_port {
#define UPF_DEAD ((__force upf_t) (1 << 30))
#define UPF_IOREMAP ((__force upf_t) (1 << 31))
-#define UPF_CHANGE_MASK ((__force upf_t) (0x17fff))
+#define __UPF_CHANGE_MASK 0x17fff
+#define UPF_CHANGE_MASK ((__force upf_t) __UPF_CHANGE_MASK)
#define UPF_USR_MASK ((__force upf_t) (UPF_SPD_MASK|UPF_LOW_LATENCY))
+#if __UPF_CHANGE_MASK > ASYNC_FLAGS
+#error Change mask not equivalent to userspace-visible bit defines
+#endif
+
+ /* status must be updated while holding port lock */
+ upstat_t status;
+
+#define UPSTAT_CTS_ENABLE ((__force upstat_t) (1 << 0))
+#define UPSTAT_DCD_ENABLE ((__force upstat_t) (1 << 1))
+
+ int hw_stopped; /* sw-assisted CTS flow state */
unsigned int mctrl; /* current modem ctrl settings */
unsigned int timeout; /* character-based timeout */
unsigned int type; /* port type */
@@ -201,6 +234,7 @@ struct uart_port {
unsigned char unused[2];
struct attribute_group *attr_group; /* port specific attributes */
const struct attribute_group **tty_groups; /* all attributes (serial core use only) */
+ struct serial_rs485 rs485;
void *private_data; /* generic platform data pointer */
};
@@ -347,11 +381,16 @@ int uart_resume_port(struct uart_driver *reg, struct uart_port *port);
static inline int uart_tx_stopped(struct uart_port *port)
{
struct tty_struct *tty = port->state->port.tty;
- if(tty->stopped || tty->hw_stopped)
+ if (tty->stopped || port->hw_stopped)
return 1;
return 0;
}
+static inline bool uart_cts_enabled(struct uart_port *uport)
+{
+ return !!(uport->status & UPSTAT_CTS_ENABLE);
+}
+
/*
* The following are helper functions for the low level drivers.
*/
diff --git a/include/linux/shrinker.h b/include/linux/shrinker.h
index 68c097077ef0..f4aee75f00b1 100644
--- a/include/linux/shrinker.h
+++ b/include/linux/shrinker.h
@@ -18,8 +18,6 @@ struct shrink_control {
*/
unsigned long nr_to_scan;
- /* shrink from these nodes */
- nodemask_t nodes_to_scan;
/* current node being shrunk (for NUMA aware shrinkers) */
int nid;
};
diff --git a/include/linux/signal.h b/include/linux/signal.h
index 750196fcc0a5..ab1e0392b5ac 100644
--- a/include/linux/signal.h
+++ b/include/linux/signal.h
@@ -2,6 +2,7 @@
#define _LINUX_SIGNAL_H
#include <linux/list.h>
+#include <linux/bug.h>
#include <uapi/linux/signal.h>
struct task_struct;
@@ -67,7 +68,6 @@ static inline int sigismember(sigset_t *set, int _sig)
static inline int sigisemptyset(sigset_t *set)
{
- extern void _NSIG_WORDS_is_unsupported_size(void);
switch (_NSIG_WORDS) {
case 4:
return (set->sig[3] | set->sig[2] |
@@ -77,7 +77,7 @@ static inline int sigisemptyset(sigset_t *set)
case 1:
return set->sig[0] == 0;
default:
- _NSIG_WORDS_is_unsupported_size();
+ BUILD_BUG();
return 0;
}
}
@@ -90,24 +90,23 @@ static inline int sigisemptyset(sigset_t *set)
#define _SIG_SET_BINOP(name, op) \
static inline void name(sigset_t *r, const sigset_t *a, const sigset_t *b) \
{ \
- extern void _NSIG_WORDS_is_unsupported_size(void); \
unsigned long a0, a1, a2, a3, b0, b1, b2, b3; \
\
switch (_NSIG_WORDS) { \
- case 4: \
+ case 4: \
a3 = a->sig[3]; a2 = a->sig[2]; \
b3 = b->sig[3]; b2 = b->sig[2]; \
r->sig[3] = op(a3, b3); \
r->sig[2] = op(a2, b2); \
- case 2: \
+ case 2: \
a1 = a->sig[1]; b1 = b->sig[1]; \
r->sig[1] = op(a1, b1); \
- case 1: \
+ case 1: \
a0 = a->sig[0]; b0 = b->sig[0]; \
r->sig[0] = op(a0, b0); \
break; \
- default: \
- _NSIG_WORDS_is_unsupported_size(); \
+ default: \
+ BUILD_BUG(); \
} \
}
@@ -128,16 +127,14 @@ _SIG_SET_BINOP(sigandnsets, _sig_andn)
#define _SIG_SET_OP(name, op) \
static inline void name(sigset_t *set) \
{ \
- extern void _NSIG_WORDS_is_unsupported_size(void); \
- \
switch (_NSIG_WORDS) { \
- case 4: set->sig[3] = op(set->sig[3]); \
- set->sig[2] = op(set->sig[2]); \
- case 2: set->sig[1] = op(set->sig[1]); \
- case 1: set->sig[0] = op(set->sig[0]); \
+ case 4: set->sig[3] = op(set->sig[3]); \
+ set->sig[2] = op(set->sig[2]); \
+ case 2: set->sig[1] = op(set->sig[1]); \
+ case 1: set->sig[0] = op(set->sig[0]); \
break; \
- default: \
- _NSIG_WORDS_is_unsupported_size(); \
+ default: \
+ BUILD_BUG(); \
} \
}
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index abde271c18ae..85ab7d72b54c 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -20,6 +20,8 @@
#include <linux/time.h>
#include <linux/bug.h>
#include <linux/cache.h>
+#include <linux/rbtree.h>
+#include <linux/socket.h>
#include <linux/atomic.h>
#include <asm/types.h>
@@ -28,7 +30,6 @@
#include <linux/textsearch.h>
#include <net/checksum.h>
#include <linux/rcupdate.h>
-#include <linux/dmaengine.h>
#include <linux/hrtimer.h>
#include <linux/dma-mapping.h>
#include <linux/netdev_features.h>
@@ -47,11 +48,29 @@
*
* The hardware you're dealing with doesn't calculate the full checksum
* (as in CHECKSUM_COMPLETE), but it does parse headers and verify checksums
- * for specific protocols e.g. TCP/UDP/SCTP, then, for such packets it will
- * set CHECKSUM_UNNECESSARY if their checksums are okay. skb->csum is still
- * undefined in this case though. It is a bad option, but, unfortunately,
- * nowadays most vendors do this. Apparently with the secret goal to sell
- * you new devices, when you will add new protocol to your host, f.e. IPv6 8)
+ * for specific protocols. For such packets it will set CHECKSUM_UNNECESSARY
+ * if their checksums are okay. skb->csum is still undefined in this case
+ * though. It is a bad option, but, unfortunately, nowadays most vendors do
+ * this. Apparently with the secret goal to sell you new devices, when you
+ * will add new protocol to your host, f.e. IPv6 8)
+ *
+ * CHECKSUM_UNNECESSARY is applicable to following protocols:
+ * TCP: IPv6 and IPv4.
+ * UDP: IPv4 and IPv6. A device may apply CHECKSUM_UNNECESSARY to a
+ * zero UDP checksum for either IPv4 or IPv6, the networking stack
+ * may perform further validation in this case.
+ * GRE: only if the checksum is present in the header.
+ * SCTP: indicates the CRC in SCTP header has been validated.
+ *
+ * skb->csum_level indicates the number of consecutive checksums found in
+ * the packet minus one that have been verified as CHECKSUM_UNNECESSARY.
+ * For instance if a device receives an IPv6->UDP->GRE->IPv4->TCP packet
+ * and a device is able to verify the checksums for UDP (possibly zero),
+ * GRE (checksum flag is set), and TCP-- skb->csum_level would be set to
+ * two. If the device were only able to verify the UDP checksum and not
+ * GRE, either because it doesn't support GRE checksum of because GRE
+ * checksum is bad, skb->csum_level would be set to zero (TCP checksum is
+ * not considered in this case).
*
* CHECKSUM_COMPLETE:
*
@@ -112,6 +131,9 @@
#define CHECKSUM_COMPLETE 2
#define CHECKSUM_PARTIAL 3
+/* Maximum value in skb->csum_level */
+#define SKB_MAX_CSUM_LEVEL 3
+
#define SKB_DATA_ALIGN(X) ALIGN(X, SMP_CACHE_BYTES)
#define SKB_WITH_OVERHEAD(X) \
((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
@@ -128,6 +150,8 @@
struct net_device;
struct scatterlist;
struct pipe_inode_info;
+struct iov_iter;
+struct napi_struct;
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
struct nf_conntrack {
@@ -135,7 +159,7 @@ struct nf_conntrack {
};
#endif
-#ifdef CONFIG_BRIDGE_NETFILTER
+#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
struct nf_bridge_info {
atomic_t use;
unsigned int mask;
@@ -318,9 +342,9 @@ struct skb_shared_info {
enum {
- SKB_FCLONE_UNAVAILABLE,
- SKB_FCLONE_ORIG,
- SKB_FCLONE_CLONE,
+ SKB_FCLONE_UNAVAILABLE, /* skb has no fclone (from head_cache) */
+ SKB_FCLONE_ORIG, /* orig skb (from fclone_cache) */
+ SKB_FCLONE_CLONE, /* companion fclone skb (from fclone_cache) */
};
enum {
@@ -349,8 +373,7 @@ enum {
SKB_GSO_UDP_TUNNEL_CSUM = 1 << 11,
- SKB_GSO_MPLS = 1 << 12,
-
+ SKB_GSO_TUNNEL_REMCSUM = 1 << 12,
};
#if BITS_PER_LONG > 32
@@ -419,6 +442,7 @@ static inline u32 skb_mstamp_us_delta(const struct skb_mstamp *t1,
* @next: Next buffer in list
* @prev: Previous buffer in list
* @tstamp: Time we arrived/left
+ * @rbnode: RB tree node, alternative to next/prev for netem/tcp
* @sk: Socket we are owned by
* @dev: Device we arrived on/are leaving by
* @cb: Control buffer. Free for use by every layer. Put private vars here
@@ -452,6 +476,7 @@ static inline u32 skb_mstamp_us_delta(const struct skb_mstamp *t1,
* @tc_verd: traffic control verdict
* @hash: the packet hash
* @queue_mapping: Queue mapping for multiqueue devices
+ * @xmit_more: More SKBs are pending for this queue
* @ndisc_nodetype: router type (from link layer)
* @ooo_okay: allow the mapping of a socket to a queue to be changed
* @l4_hash: indicate hash is a canonical 4-tuple hash over transport
@@ -460,8 +485,6 @@ static inline u32 skb_mstamp_us_delta(const struct skb_mstamp *t1,
* @wifi_acked_valid: wifi_acked was set
* @wifi_acked: whether frame was acked on wifi or not
* @no_fcs: Request NIC to treat last 4 bytes as Ethernet FCS
- * @dma_cookie: a cookie to one of several possible DMA operations
- * done by skb DMA functions
* @napi_id: id of the NAPI struct this skb came from
* @secmark: security marking
* @mark: Generic packet mark
@@ -484,15 +507,19 @@ static inline u32 skb_mstamp_us_delta(const struct skb_mstamp *t1,
*/
struct sk_buff {
- /* These two members must be first. */
- struct sk_buff *next;
- struct sk_buff *prev;
-
union {
- ktime_t tstamp;
- struct skb_mstamp skb_mstamp;
+ struct {
+ /* These two members must be first. */
+ struct sk_buff *next;
+ struct sk_buff *prev;
+
+ union {
+ ktime_t tstamp;
+ struct skb_mstamp skb_mstamp;
+ };
+ };
+ struct rb_node rbnode; /* used in netem & tcp stack */
};
-
struct sock *sk;
struct net_device *dev;
@@ -505,87 +532,102 @@ struct sk_buff {
char cb[48] __aligned(8);
unsigned long _skb_refdst;
+ void (*destructor)(struct sk_buff *skb);
#ifdef CONFIG_XFRM
struct sec_path *sp;
#endif
+#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
+ struct nf_conntrack *nfct;
+#endif
+#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
+ struct nf_bridge_info *nf_bridge;
+#endif
unsigned int len,
data_len;
__u16 mac_len,
hdr_len;
- union {
- __wsum csum;
- struct {
- __u16 csum_start;
- __u16 csum_offset;
- };
- };
- __u32 priority;
+
+ /* Following fields are _not_ copied in __copy_skb_header()
+ * Note that queue_mapping is here mostly to fill a hole.
+ */
kmemcheck_bitfield_begin(flags1);
- __u8 ignore_df:1,
- cloned:1,
- ip_summed:2,
+ __u16 queue_mapping;
+ __u8 cloned:1,
nohdr:1,
- nfctinfo:3;
- __u8 pkt_type:3,
fclone:2,
- ipvs_property:1,
peeked:1,
- nf_trace:1;
+ head_frag:1,
+ xmit_more:1;
+ /* one bit hole */
kmemcheck_bitfield_end(flags1);
- __be16 protocol;
- void (*destructor)(struct sk_buff *skb);
-#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
- struct nf_conntrack *nfct;
-#endif
-#ifdef CONFIG_BRIDGE_NETFILTER
- struct nf_bridge_info *nf_bridge;
-#endif
-
- int skb_iif;
-
- __u32 hash;
-
- __be16 vlan_proto;
- __u16 vlan_tci;
+ /* fields enclosed in headers_start/headers_end are copied
+ * using a single memcpy() in __copy_skb_header()
+ */
+ /* private: */
+ __u32 headers_start[0];
+ /* public: */
-#ifdef CONFIG_NET_SCHED
- __u16 tc_index; /* traffic control index */
-#ifdef CONFIG_NET_CLS_ACT
- __u16 tc_verd; /* traffic control verdict */
-#endif
+/* if you move pkt_type around you also must adapt those constants */
+#ifdef __BIG_ENDIAN_BITFIELD
+#define PKT_TYPE_MAX (7 << 5)
+#else
+#define PKT_TYPE_MAX 7
#endif
+#define PKT_TYPE_OFFSET() offsetof(struct sk_buff, __pkt_type_offset)
- __u16 queue_mapping;
- kmemcheck_bitfield_begin(flags2);
-#ifdef CONFIG_IPV6_NDISC_NODETYPE
- __u8 ndisc_nodetype:2;
-#endif
+ __u8 __pkt_type_offset[0];
+ __u8 pkt_type:3;
__u8 pfmemalloc:1;
+ __u8 ignore_df:1;
+ __u8 nfctinfo:3;
+
+ __u8 nf_trace:1;
+ __u8 ip_summed:2;
__u8 ooo_okay:1;
__u8 l4_hash:1;
__u8 sw_hash:1;
__u8 wifi_acked_valid:1;
__u8 wifi_acked:1;
+
__u8 no_fcs:1;
- __u8 head_frag:1;
- /* Encapsulation protocol and NIC drivers should use
- * this flag to indicate to each other if the skb contains
- * encapsulated packet or not and maybe use the inner packet
- * headers if needed
- */
+ /* Indicates the inner headers are valid in the skbuff. */
__u8 encapsulation:1;
__u8 encap_hdr_csum:1;
__u8 csum_valid:1;
__u8 csum_complete_sw:1;
- /* 2/4 bit hole (depending on ndisc_nodetype presence) */
- kmemcheck_bitfield_end(flags2);
+ __u8 csum_level:2;
+ __u8 csum_bad:1;
+
+#ifdef CONFIG_IPV6_NDISC_NODETYPE
+ __u8 ndisc_nodetype:2;
+#endif
+ __u8 ipvs_property:1;
+ __u8 inner_protocol_type:1;
+ __u8 remcsum_offload:1;
+ /* 3 or 5 bit hole */
+
+#ifdef CONFIG_NET_SCHED
+ __u16 tc_index; /* traffic control index */
+#ifdef CONFIG_NET_CLS_ACT
+ __u16 tc_verd; /* traffic control verdict */
+#endif
+#endif
-#if defined CONFIG_NET_DMA || defined CONFIG_NET_RX_BUSY_POLL
union {
- unsigned int napi_id;
- dma_cookie_t dma_cookie;
+ __wsum csum;
+ struct {
+ __u16 csum_start;
+ __u16 csum_offset;
+ };
};
+ __u32 priority;
+ int skb_iif;
+ __u32 hash;
+ __be16 vlan_proto;
+ __u16 vlan_tci;
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ unsigned int napi_id;
#endif
#ifdef CONFIG_NETWORK_SECMARK
__u32 secmark;
@@ -596,13 +638,24 @@ struct sk_buff {
__u32 reserved_tailroom;
};
- __be16 inner_protocol;
+ union {
+ __be16 inner_protocol;
+ __u8 inner_ipproto;
+ };
+
__u16 inner_transport_header;
__u16 inner_network_header;
__u16 inner_mac_header;
+
+ __be16 protocol;
__u16 transport_header;
__u16 network_header;
__u16 mac_header;
+
+ /* private: */
+ __u32 headers_end[0];
+ /* public: */
+
/* These elements must be at the end, see alloc_skb() for details. */
sk_buff_data_t tail;
sk_buff_data_t end;
@@ -621,6 +674,7 @@ struct sk_buff {
#define SKB_ALLOC_FCLONE 0x01
#define SKB_ALLOC_RX 0x02
+#define SKB_ALLOC_NAPI 0x04
/* Returns true if the skb was allocated from PFMEMALLOC reserves */
static inline bool skb_pfmemalloc(const struct sk_buff *skb)
@@ -665,9 +719,6 @@ static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst)
skb->_skb_refdst = (unsigned long)dst;
}
-void __skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst,
- bool force);
-
/**
* skb_dst_set_noref - sets skb dst, hopefully, without taking reference
* @skb: buffer
@@ -680,24 +731,8 @@ void __skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst,
*/
static inline void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst)
{
- __skb_dst_set_noref(skb, dst, false);
-}
-
-/**
- * skb_dst_set_noref_force - sets skb dst, without taking reference
- * @skb: buffer
- * @dst: dst entry
- *
- * Sets skb dst, assuming a reference was not taken on dst.
- * No reference is taken and no dst_release will be called. While for
- * cached dsts deferred reclaim is a basic feature, for entries that are
- * not cached it is caller's job to guarantee that last dst_release for
- * provided dst happens when nobody uses it, eg. after a RCU grace period.
- */
-static inline void skb_dst_set_noref_force(struct sk_buff *skb,
- struct dst_entry *dst)
-{
- __skb_dst_set_noref(skb, dst, true);
+ WARN_ON(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
+ skb->_skb_refdst = (unsigned long)dst | SKB_DST_NOREF;
}
/**
@@ -734,6 +769,41 @@ static inline struct sk_buff *alloc_skb(unsigned int size,
return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
}
+struct sk_buff *alloc_skb_with_frags(unsigned long header_len,
+ unsigned long data_len,
+ int max_page_order,
+ int *errcode,
+ gfp_t gfp_mask);
+
+/* Layout of fast clones : [skb1][skb2][fclone_ref] */
+struct sk_buff_fclones {
+ struct sk_buff skb1;
+
+ struct sk_buff skb2;
+
+ atomic_t fclone_ref;
+};
+
+/**
+ * skb_fclone_busy - check if fclone is busy
+ * @skb: buffer
+ *
+ * Returns true is skb is a fast clone, and its clone is not freed.
+ * Some drivers call skb_orphan() in their ndo_start_xmit(),
+ * so we also check that this didnt happen.
+ */
+static inline bool skb_fclone_busy(const struct sock *sk,
+ const struct sk_buff *skb)
+{
+ const struct sk_buff_fclones *fclones;
+
+ fclones = container_of(skb, struct sk_buff_fclones, skb1);
+
+ return skb->fclone == SKB_FCLONE_ORIG &&
+ atomic_read(&fclones->fclone_ref) > 1 &&
+ fclones->skb2.sk == sk;
+}
+
static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
gfp_t priority)
{
@@ -1042,6 +1112,7 @@ static inline int skb_header_cloned(const struct sk_buff *skb)
* Drop a reference to the header part of the buffer. This is done
* by acquiring a payload reference. You must not read from the header
* part of skb->data after this.
+ * Note : Check if you can use __skb_header_release() instead.
*/
static inline void skb_header_release(struct sk_buff *skb)
{
@@ -1051,6 +1122,20 @@ static inline void skb_header_release(struct sk_buff *skb)
}
/**
+ * __skb_header_release - release reference to header
+ * @skb: buffer to operate on
+ *
+ * Variant of skb_header_release() assuming skb is private to caller.
+ * We can avoid one atomic operation.
+ */
+static inline void __skb_header_release(struct sk_buff *skb)
+{
+ skb->nohdr = 1;
+ atomic_set(&skb_shinfo(skb)->dataref, 1 + (1 << SKB_DATAREF_SHIFT));
+}
+
+
+/**
* skb_shared - is the buffer shared
* @skb: buffer to check
*
@@ -1116,7 +1201,12 @@ static inline struct sk_buff *skb_unshare(struct sk_buff *skb,
might_sleep_if(pri & __GFP_WAIT);
if (skb_cloned(skb)) {
struct sk_buff *nskb = skb_copy(skb, pri);
- kfree_skb(skb); /* Free our shared copy */
+
+ /* Free our shared copy */
+ if (likely(nskb))
+ consume_skb(skb);
+ else
+ kfree_skb(skb);
skb = nskb;
}
return skb;
@@ -1675,6 +1765,23 @@ static inline void skb_reserve(struct sk_buff *skb, int len)
skb->tail += len;
}
+#define ENCAP_TYPE_ETHER 0
+#define ENCAP_TYPE_IPPROTO 1
+
+static inline void skb_set_inner_protocol(struct sk_buff *skb,
+ __be16 protocol)
+{
+ skb->inner_protocol = protocol;
+ skb->inner_protocol_type = ENCAP_TYPE_ETHER;
+}
+
+static inline void skb_set_inner_ipproto(struct sk_buff *skb,
+ __u8 ipproto)
+{
+ skb->inner_ipproto = ipproto;
+ skb->inner_protocol_type = ENCAP_TYPE_IPPROTO;
+}
+
static inline void skb_reset_inner_headers(struct sk_buff *skb)
{
skb->inner_mac_header = skb->mac_header;
@@ -1860,18 +1967,6 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
return pskb_may_pull(skb, skb_network_offset(skb) + len);
}
-static inline void skb_pop_rcv_encapsulation(struct sk_buff *skb)
-{
- /* Only continue with checksum unnecessary if device indicated
- * it is valid across encapsulation (skb->encapsulation was set).
- */
- if (skb->ip_summed == CHECKSUM_UNNECESSARY && !skb->encapsulation)
- skb->ip_summed = CHECKSUM_NONE;
-
- skb->encapsulation = 0;
- skb->csum_valid = 0;
-}
-
/*
* CPUs often take a performance hit when accessing unaligned memory
* locations. The actual performance hit varies, it can be small if the
@@ -2071,47 +2166,61 @@ static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev,
return __netdev_alloc_skb_ip_align(dev, length, GFP_ATOMIC);
}
+void *napi_alloc_frag(unsigned int fragsz);
+struct sk_buff *__napi_alloc_skb(struct napi_struct *napi,
+ unsigned int length, gfp_t gfp_mask);
+static inline struct sk_buff *napi_alloc_skb(struct napi_struct *napi,
+ unsigned int length)
+{
+ return __napi_alloc_skb(napi, length, GFP_ATOMIC);
+}
+
/**
- * __skb_alloc_pages - allocate pages for ps-rx on a skb and preserve pfmemalloc data
- * @gfp_mask: alloc_pages_node mask. Set __GFP_NOMEMALLOC if not for network packet RX
- * @skb: skb to set pfmemalloc on if __GFP_MEMALLOC is used
- * @order: size of the allocation
+ * __dev_alloc_pages - allocate page for network Rx
+ * @gfp_mask: allocation priority. Set __GFP_NOMEMALLOC if not for network Rx
+ * @order: size of the allocation
*
- * Allocate a new page.
+ * Allocate a new page.
*
- * %NULL is returned if there is no free memory.
+ * %NULL is returned if there is no free memory.
*/
-static inline struct page *__skb_alloc_pages(gfp_t gfp_mask,
- struct sk_buff *skb,
- unsigned int order)
-{
- struct page *page;
-
- gfp_mask |= __GFP_COLD;
-
- if (!(gfp_mask & __GFP_NOMEMALLOC))
- gfp_mask |= __GFP_MEMALLOC;
+static inline struct page *__dev_alloc_pages(gfp_t gfp_mask,
+ unsigned int order)
+{
+ /* This piece of code contains several assumptions.
+ * 1. This is for device Rx, therefor a cold page is preferred.
+ * 2. The expectation is the user wants a compound page.
+ * 3. If requesting a order 0 page it will not be compound
+ * due to the check to see if order has a value in prep_new_page
+ * 4. __GFP_MEMALLOC is ignored if __GFP_NOMEMALLOC is set due to
+ * code in gfp_to_alloc_flags that should be enforcing this.
+ */
+ gfp_mask |= __GFP_COLD | __GFP_COMP | __GFP_MEMALLOC;
- page = alloc_pages_node(NUMA_NO_NODE, gfp_mask, order);
- if (skb && page && page->pfmemalloc)
- skb->pfmemalloc = true;
+ return alloc_pages_node(NUMA_NO_NODE, gfp_mask, order);
+}
- return page;
+static inline struct page *dev_alloc_pages(unsigned int order)
+{
+ return __dev_alloc_pages(GFP_ATOMIC, order);
}
/**
- * __skb_alloc_page - allocate a page for ps-rx for a given skb and preserve pfmemalloc data
- * @gfp_mask: alloc_pages_node mask. Set __GFP_NOMEMALLOC if not for network packet RX
- * @skb: skb to set pfmemalloc on if __GFP_MEMALLOC is used
+ * __dev_alloc_page - allocate a page for network Rx
+ * @gfp_mask: allocation priority. Set __GFP_NOMEMALLOC if not for network Rx
*
- * Allocate a new page.
+ * Allocate a new page.
*
- * %NULL is returned if there is no free memory.
+ * %NULL is returned if there is no free memory.
*/
-static inline struct page *__skb_alloc_page(gfp_t gfp_mask,
- struct sk_buff *skb)
+static inline struct page *__dev_alloc_page(gfp_t gfp_mask)
+{
+ return __dev_alloc_pages(gfp_mask, 0);
+}
+
+static inline struct page *dev_alloc_page(void)
{
- return __skb_alloc_pages(gfp_mask, skb, 0);
+ return __dev_alloc_page(GFP_ATOMIC);
}
/**
@@ -2343,7 +2452,6 @@ static inline int skb_cow_head(struct sk_buff *skb, unsigned int headroom)
* is untouched. Otherwise it is extended. Returns zero on
* success. The skb is freed on error.
*/
-
static inline int skb_padto(struct sk_buff *skb, unsigned int len)
{
unsigned int size = skb->len;
@@ -2352,6 +2460,29 @@ static inline int skb_padto(struct sk_buff *skb, unsigned int len)
return skb_pad(skb, len - size);
}
+/**
+ * skb_put_padto - increase size and pad an skbuff up to a minimal size
+ * @skb: buffer to pad
+ * @len: minimal length
+ *
+ * Pads up a buffer to ensure the trailing bytes exist and are
+ * blanked. If the buffer already contains sufficient data it
+ * is untouched. Otherwise it is extended. Returns zero on
+ * success. The skb is freed on error.
+ */
+static inline int skb_put_padto(struct sk_buff *skb, unsigned int len)
+{
+ unsigned int size = skb->len;
+
+ if (unlikely(size < len)) {
+ len -= size;
+ if (skb_pad(skb, len))
+ return -ENOMEM;
+ __skb_put(skb, len);
+ }
+ return 0;
+}
+
static inline int skb_add_data(struct sk_buff *skb,
char __user *from, int copy)
{
@@ -2524,18 +2655,18 @@ struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
int *err);
unsigned int datagram_poll(struct file *file, struct socket *sock,
struct poll_table_struct *wait);
-int skb_copy_datagram_iovec(const struct sk_buff *from, int offset,
- struct iovec *to, int size);
-int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb, int hlen,
- struct iovec *iov);
-int skb_copy_datagram_from_iovec(struct sk_buff *skb, int offset,
- const struct iovec *from, int from_offset,
- int len);
-int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *frm,
- int offset, size_t count);
-int skb_copy_datagram_const_iovec(const struct sk_buff *from, int offset,
- const struct iovec *to, int to_offset,
- int size);
+int skb_copy_datagram_iter(const struct sk_buff *from, int offset,
+ struct iov_iter *to, int size);
+static inline int skb_copy_datagram_msg(const struct sk_buff *from, int offset,
+ struct msghdr *msg, int size)
+{
+ return skb_copy_datagram_iter(from, offset, &msg->msg_iter, size);
+}
+int skb_copy_and_csum_datagram_msg(struct sk_buff *skb, int hlen,
+ struct msghdr *msg);
+int skb_copy_datagram_from_iter(struct sk_buff *skb, int offset,
+ struct iov_iter *from, int len);
+int zerocopy_sg_from_iter(struct sk_buff *skb, struct iov_iter *frm);
void skb_free_datagram(struct sock *sk, struct sk_buff *skb);
void skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb);
int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags);
@@ -2556,6 +2687,20 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet);
unsigned int skb_gso_transport_seglen(const struct sk_buff *skb);
struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features);
struct sk_buff *skb_vlan_untag(struct sk_buff *skb);
+int skb_ensure_writable(struct sk_buff *skb, int write_len);
+int skb_vlan_pop(struct sk_buff *skb);
+int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci);
+
+static inline int memcpy_from_msg(void *data, struct msghdr *msg, int len)
+{
+ /* XXX: stripping const */
+ return memcpy_fromiovec(data, (struct iovec *)msg->msg_iter.iov, len);
+}
+
+static inline int memcpy_to_msg(struct msghdr *msg, void *data, int len)
+{
+ return copy_to_iter(data, len, &msg->msg_iter) == len ? 0 : -EFAULT;
+}
struct skb_checksum_ops {
__wsum (*update)(const void *mem, int len, __wsum wsum);
@@ -2567,20 +2712,26 @@ __wsum __skb_checksum(const struct sk_buff *skb, int offset, int len,
__wsum skb_checksum(const struct sk_buff *skb, int offset, int len,
__wsum csum);
-static inline void *skb_header_pointer(const struct sk_buff *skb, int offset,
- int len, void *buffer)
+static inline void *__skb_header_pointer(const struct sk_buff *skb, int offset,
+ int len, void *data, int hlen, void *buffer)
{
- int hlen = skb_headlen(skb);
-
if (hlen - offset >= len)
- return skb->data + offset;
+ return data + offset;
- if (skb_copy_bits(skb, offset, buffer, len) < 0)
+ if (!skb ||
+ skb_copy_bits(skb, offset, buffer, len) < 0)
return NULL;
return buffer;
}
+static inline void *skb_header_pointer(const struct sk_buff *skb, int offset,
+ int len, void *buffer)
+{
+ return __skb_header_pointer(skb, offset, len, skb->data,
+ skb_headlen(skb), buffer);
+}
+
/**
* skb_needs_linearize - check if we need to linearize a given skb
* depending on the given device features.
@@ -2671,6 +2822,8 @@ static inline ktime_t net_invalid_timestamp(void)
return ktime_set(0, 0);
}
+struct sk_buff *skb_clone_sk(struct sk_buff *skb);
+
#ifdef CONFIG_NETWORK_PHY_TIMESTAMPING
void skb_clone_tx_timestamp(struct sk_buff *skb);
@@ -2786,6 +2939,42 @@ static inline __sum16 skb_checksum_complete(struct sk_buff *skb)
0 : __skb_checksum_complete(skb);
}
+static inline void __skb_decr_checksum_unnecessary(struct sk_buff *skb)
+{
+ if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
+ if (skb->csum_level == 0)
+ skb->ip_summed = CHECKSUM_NONE;
+ else
+ skb->csum_level--;
+ }
+}
+
+static inline void __skb_incr_checksum_unnecessary(struct sk_buff *skb)
+{
+ if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
+ if (skb->csum_level < SKB_MAX_CSUM_LEVEL)
+ skb->csum_level++;
+ } else if (skb->ip_summed == CHECKSUM_NONE) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ skb->csum_level = 0;
+ }
+}
+
+static inline void __skb_mark_checksum_bad(struct sk_buff *skb)
+{
+ /* Mark current checksum as bad (typically called from GRO
+ * path). In the case that ip_summed is CHECKSUM_NONE
+ * this must be the first checksum encountered in the packet.
+ * When ip_summed is CHECKSUM_UNNECESSARY, this is the first
+ * checksum after the last one validated. For UDP, a zero
+ * checksum can not be marked as bad.
+ */
+
+ if (skb->ip_summed == CHECKSUM_NONE ||
+ skb->ip_summed == CHECKSUM_UNNECESSARY)
+ skb->csum_bad = 1;
+}
+
/* Check if we need to perform checksum complete validation.
*
* Returns true if checksum complete is needed, false otherwise
@@ -2797,6 +2986,7 @@ static inline bool __skb_checksum_validate_needed(struct sk_buff *skb,
{
if (skb_csum_unnecessary(skb) || (zero_okay && !check)) {
skb->csum_valid = 1;
+ __skb_decr_checksum_unnecessary(skb);
return false;
}
@@ -2826,6 +3016,9 @@ static inline __sum16 __skb_checksum_validate_complete(struct sk_buff *skb,
skb->csum_valid = 1;
return 0;
}
+ } else if (skb->csum_bad) {
+ /* ip_summed == CHECKSUM_NONE in this case */
+ return 1;
}
skb->csum = psum;
@@ -2883,6 +3076,26 @@ static inline __wsum null_compute_pseudo(struct sk_buff *skb, int proto)
#define skb_checksum_simple_validate(skb) \
__skb_checksum_validate(skb, 0, true, false, 0, null_compute_pseudo)
+static inline bool __skb_checksum_convert_check(struct sk_buff *skb)
+{
+ return (skb->ip_summed == CHECKSUM_NONE &&
+ skb->csum_valid && !skb->csum_bad);
+}
+
+static inline void __skb_checksum_convert(struct sk_buff *skb,
+ __sum16 check, __wsum pseudo)
+{
+ skb->csum = ~pseudo;
+ skb->ip_summed = CHECKSUM_COMPLETE;
+}
+
+#define skb_checksum_try_convert(skb, proto, check, compute_pseudo) \
+do { \
+ if (__skb_checksum_convert_check(skb)) \
+ __skb_checksum_convert(skb, check, \
+ compute_pseudo(skb, proto)); \
+} while (0)
+
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
void nf_conntrack_destroy(struct nf_conntrack *nfct);
static inline void nf_conntrack_put(struct nf_conntrack *nfct)
@@ -2896,7 +3109,7 @@ static inline void nf_conntrack_get(struct nf_conntrack *nfct)
atomic_inc(&nfct->use);
}
#endif
-#ifdef CONFIG_BRIDGE_NETFILTER
+#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
static inline void nf_bridge_put(struct nf_bridge_info *nf_bridge)
{
if (nf_bridge && atomic_dec_and_test(&nf_bridge->use))
@@ -2914,7 +3127,7 @@ static inline void nf_reset(struct sk_buff *skb)
nf_conntrack_put(skb->nfct);
skb->nfct = NULL;
#endif
-#ifdef CONFIG_BRIDGE_NETFILTER
+#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
nf_bridge_put(skb->nf_bridge);
skb->nf_bridge = NULL;
#endif
@@ -2928,19 +3141,22 @@ static inline void nf_reset_trace(struct sk_buff *skb)
}
/* Note: This doesn't put any conntrack and bridge info in dst. */
-static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src)
+static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src,
+ bool copy)
{
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
dst->nfct = src->nfct;
nf_conntrack_get(src->nfct);
- dst->nfctinfo = src->nfctinfo;
+ if (copy)
+ dst->nfctinfo = src->nfctinfo;
#endif
-#ifdef CONFIG_BRIDGE_NETFILTER
+#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
dst->nf_bridge = src->nf_bridge;
nf_bridge_get(src->nf_bridge);
#endif
#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES)
- dst->nf_trace = src->nf_trace;
+ if (copy)
+ dst->nf_trace = src->nf_trace;
#endif
}
@@ -2949,10 +3165,10 @@ static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src)
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
nf_conntrack_put(dst->nfct);
#endif
-#ifdef CONFIG_BRIDGE_NETFILTER
+#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
nf_bridge_put(dst->nf_bridge);
#endif
- __nf_copy(dst, src);
+ __nf_copy(dst, src, true);
}
#ifdef CONFIG_NETWORK_SECMARK
@@ -3137,7 +3353,9 @@ bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off);
int skb_checksum_setup(struct sk_buff *skb, bool recalculate);
-u32 __skb_get_poff(const struct sk_buff *skb);
+u32 skb_get_poff(const struct sk_buff *skb);
+u32 __skb_get_poff(const struct sk_buff *skb, void *data,
+ const struct flow_keys *keys, int hlen);
/**
* skb_head_is_locked - Determine if the skb->head is locked down
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 1d9abb7d22a0..9a139b637069 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -158,31 +158,6 @@ size_t ksize(const void *);
#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
#endif
-#ifdef CONFIG_SLOB
-/*
- * Common fields provided in kmem_cache by all slab allocators
- * This struct is either used directly by the allocator (SLOB)
- * or the allocator must include definitions for all fields
- * provided in kmem_cache_common in their definition of kmem_cache.
- *
- * Once we can do anonymous structs (C11 standard) we could put a
- * anonymous struct definition in these allocators so that the
- * separate allocations in the kmem_cache structure of SLAB and
- * SLUB is no longer needed.
- */
-struct kmem_cache {
- unsigned int object_size;/* The original size of the object */
- unsigned int size; /* The aligned/padded/added on size */
- unsigned int align; /* Alignment as calculated */
- unsigned long flags; /* Active flags on the slab */
- const char *name; /* Slab name for sysfs */
- int refcount; /* Use counter */
- void (*ctor)(void *); /* Called on object slot creation */
- struct list_head list; /* List of all slab caches on the system */
-};
-
-#endif /* CONFIG_SLOB */
-
/*
* Kmalloc array related definitions
*/
@@ -363,14 +338,6 @@ kmem_cache_alloc_node_trace(struct kmem_cache *s,
}
#endif /* CONFIG_TRACING */
-#ifdef CONFIG_SLAB
-#include <linux/slab_def.h>
-#endif
-
-#ifdef CONFIG_SLUB
-#include <linux/slub_def.h>
-#endif
-
extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order);
#ifdef CONFIG_TRACING
@@ -526,7 +493,6 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
* @memcg: pointer to the memcg this cache belongs to
* @list: list_head for the list of all caches in this memcg
* @root_cache: pointer to the global, root cache, this cache was derived from
- * @nr_pages: number of pages that belongs to this cache.
*/
struct memcg_cache_params {
bool is_root_cache;
@@ -539,17 +505,12 @@ struct memcg_cache_params {
struct mem_cgroup *memcg;
struct list_head list;
struct kmem_cache *root_cache;
- atomic_t nr_pages;
};
};
};
int memcg_update_all_caches(int num_memcgs);
-struct seq_file;
-int cache_show(struct kmem_cache *s, struct seq_file *m);
-void print_slabinfo_header(struct seq_file *m);
-
/**
* kmalloc_array - allocate memory for an array.
* @n: number of elements.
@@ -582,37 +543,15 @@ static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
* allocator where we care about the real place the memory allocation
* request comes from.
*/
-#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
- (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \
- (defined(CONFIG_SLOB) && defined(CONFIG_TRACING))
extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
#define kmalloc_track_caller(size, flags) \
__kmalloc_track_caller(size, flags, _RET_IP_)
-#else
-#define kmalloc_track_caller(size, flags) \
- __kmalloc(size, flags)
-#endif /* DEBUG_SLAB */
#ifdef CONFIG_NUMA
-/*
- * kmalloc_node_track_caller is a special version of kmalloc_node that
- * records the calling function of the routine calling it for slab leak
- * tracking instead of just the calling function (confusing, eh?).
- * It's useful when the call to kmalloc_node comes from a widely-used
- * standard allocator where we care about the real place the memory
- * allocation request comes from.
- */
-#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
- (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \
- (defined(CONFIG_SLOB) && defined(CONFIG_TRACING))
extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
#define kmalloc_node_track_caller(size, flags, node) \
__kmalloc_node_track_caller(size, flags, node, \
_RET_IP_)
-#else
-#define kmalloc_node_track_caller(size, flags, node) \
- __kmalloc_node(size, flags, node)
-#endif
#else /* CONFIG_NUMA */
@@ -650,14 +589,7 @@ static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
return kmalloc_node(size, flags | __GFP_ZERO, node);
}
-/*
- * Determine the size of a slab object
- */
-static inline unsigned int kmem_cache_size(struct kmem_cache *s)
-{
- return s->object_size;
-}
-
+unsigned int kmem_cache_size(struct kmem_cache *s);
void __init kmem_cache_init_late(void);
#endif /* _LINUX_SLAB_H */
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
index 8235dfbb3b05..b869d1662ba3 100644
--- a/include/linux/slab_def.h
+++ b/include/linux/slab_def.h
@@ -8,6 +8,8 @@
*/
struct kmem_cache {
+ struct array_cache __percpu *cpu_cache;
+
/* 1) Cache tunables. Protected by slab_mutex */
unsigned int batchcount;
unsigned int limit;
@@ -71,23 +73,7 @@ struct kmem_cache {
struct memcg_cache_params *memcg_params;
#endif
-/* 6) per-cpu/per-node data, touched during every alloc/free */
- /*
- * We put array[] at the end of kmem_cache, because we want to size
- * this array to nr_cpu_ids slots instead of NR_CPUS
- * (see kmem_cache_init())
- * We still use [NR_CPUS] and not [1] or [0] because cache_cache
- * is statically defined, so we reserve the max number of cpus.
- *
- * We also need to guarantee that the list is able to accomodate a
- * pointer for each node since "nodelists" uses the remainder of
- * available pointers.
- */
- struct kmem_cache_node **node;
- struct array_cache *array[NR_CPUS + MAX_NUMNODES];
- /*
- * Do not add fields after array[]
- */
+ struct kmem_cache_node *node[MAX_NUMNODES];
};
#endif /* _LINUX_SLAB_DEF_H */
diff --git a/include/linux/smp.h b/include/linux/smp.h
index 34347f26be9b..93dff5fff524 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -100,6 +100,7 @@ int smp_call_function_any(const struct cpumask *mask,
smp_call_func_t func, void *info, int wait);
void kick_all_cpus_sync(void);
+void wake_up_all_idle_cpus(void);
/*
* Generic and arch helpers
@@ -148,6 +149,7 @@ smp_call_function_any(const struct cpumask *mask, smp_call_func_t func,
}
static inline void kick_all_cpus_sync(void) { }
+static inline void wake_up_all_idle_cpus(void) { }
#endif /* !SMP */
diff --git a/include/linux/soc/ti/knav_dma.h b/include/linux/soc/ti/knav_dma.h
new file mode 100644
index 000000000000..dad035c16d94
--- /dev/null
+++ b/include/linux/soc/ti/knav_dma.h
@@ -0,0 +1,175 @@
+/*
+ * Copyright (C) 2014 Texas Instruments Incorporated
+ * Authors: Sandeep Nair <sandeep_n@ti.com
+ * Cyril Chemparathy <cyril@ti.com
+ Santosh Shilimkar <santosh.shilimkar@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __SOC_TI_KEYSTONE_NAVIGATOR_DMA_H__
+#define __SOC_TI_KEYSTONE_NAVIGATOR_DMA_H__
+
+/*
+ * PKTDMA descriptor manipulation macros for host packet descriptor
+ */
+#define MASK(x) (BIT(x) - 1)
+#define KNAV_DMA_DESC_PKT_LEN_MASK MASK(22)
+#define KNAV_DMA_DESC_PKT_LEN_SHIFT 0
+#define KNAV_DMA_DESC_PS_INFO_IN_SOP BIT(22)
+#define KNAV_DMA_DESC_PS_INFO_IN_DESC 0
+#define KNAV_DMA_DESC_TAG_MASK MASK(8)
+#define KNAV_DMA_DESC_SAG_HI_SHIFT 24
+#define KNAV_DMA_DESC_STAG_LO_SHIFT 16
+#define KNAV_DMA_DESC_DTAG_HI_SHIFT 8
+#define KNAV_DMA_DESC_DTAG_LO_SHIFT 0
+#define KNAV_DMA_DESC_HAS_EPIB BIT(31)
+#define KNAV_DMA_DESC_NO_EPIB 0
+#define KNAV_DMA_DESC_PSLEN_SHIFT 24
+#define KNAV_DMA_DESC_PSLEN_MASK MASK(6)
+#define KNAV_DMA_DESC_ERR_FLAG_SHIFT 20
+#define KNAV_DMA_DESC_ERR_FLAG_MASK MASK(4)
+#define KNAV_DMA_DESC_PSFLAG_SHIFT 16
+#define KNAV_DMA_DESC_PSFLAG_MASK MASK(4)
+#define KNAV_DMA_DESC_RETQ_SHIFT 0
+#define KNAV_DMA_DESC_RETQ_MASK MASK(14)
+#define KNAV_DMA_DESC_BUF_LEN_MASK MASK(22)
+
+#define KNAV_DMA_NUM_EPIB_WORDS 4
+#define KNAV_DMA_NUM_PS_WORDS 16
+#define KNAV_DMA_FDQ_PER_CHAN 4
+
+/* Tx channel scheduling priority */
+enum knav_dma_tx_priority {
+ DMA_PRIO_HIGH = 0,
+ DMA_PRIO_MED_H,
+ DMA_PRIO_MED_L,
+ DMA_PRIO_LOW
+};
+
+/* Rx channel error handling mode during buffer starvation */
+enum knav_dma_rx_err_mode {
+ DMA_DROP = 0,
+ DMA_RETRY
+};
+
+/* Rx flow size threshold configuration */
+enum knav_dma_rx_thresholds {
+ DMA_THRESH_NONE = 0,
+ DMA_THRESH_0 = 1,
+ DMA_THRESH_0_1 = 3,
+ DMA_THRESH_0_1_2 = 7
+};
+
+/* Descriptor type */
+enum knav_dma_desc_type {
+ DMA_DESC_HOST = 0,
+ DMA_DESC_MONOLITHIC = 2
+};
+
+/**
+ * struct knav_dma_tx_cfg: Tx channel configuration
+ * @filt_einfo: Filter extended packet info
+ * @filt_pswords: Filter PS words present
+ * @knav_dma_tx_priority: Tx channel scheduling priority
+ */
+struct knav_dma_tx_cfg {
+ bool filt_einfo;
+ bool filt_pswords;
+ enum knav_dma_tx_priority priority;
+};
+
+/**
+ * struct knav_dma_rx_cfg: Rx flow configuration
+ * @einfo_present: Extended packet info present
+ * @psinfo_present: PS words present
+ * @knav_dma_rx_err_mode: Error during buffer starvation
+ * @knav_dma_desc_type: Host or Monolithic desc
+ * @psinfo_at_sop: PS word located at start of packet
+ * @sop_offset: Start of packet offset
+ * @dst_q: Destination queue for a given flow
+ * @thresh: Rx flow size threshold
+ * @fdq[]: Free desc Queue array
+ * @sz_thresh0: RX packet size threshold 0
+ * @sz_thresh1: RX packet size threshold 1
+ * @sz_thresh2: RX packet size threshold 2
+ */
+struct knav_dma_rx_cfg {
+ bool einfo_present;
+ bool psinfo_present;
+ enum knav_dma_rx_err_mode err_mode;
+ enum knav_dma_desc_type desc_type;
+ bool psinfo_at_sop;
+ unsigned int sop_offset;
+ unsigned int dst_q;
+ enum knav_dma_rx_thresholds thresh;
+ unsigned int fdq[KNAV_DMA_FDQ_PER_CHAN];
+ unsigned int sz_thresh0;
+ unsigned int sz_thresh1;
+ unsigned int sz_thresh2;
+};
+
+/**
+ * struct knav_dma_cfg: Pktdma channel configuration
+ * @sl_cfg: Slave configuration
+ * @tx: Tx channel configuration
+ * @rx: Rx flow configuration
+ */
+struct knav_dma_cfg {
+ enum dma_transfer_direction direction;
+ union {
+ struct knav_dma_tx_cfg tx;
+ struct knav_dma_rx_cfg rx;
+ } u;
+};
+
+/**
+ * struct knav_dma_desc: Host packet descriptor layout
+ * @desc_info: Descriptor information like id, type, length
+ * @tag_info: Flow tag info written in during RX
+ * @packet_info: Queue Manager, policy, flags etc
+ * @buff_len: Buffer length in bytes
+ * @buff: Buffer pointer
+ * @next_desc: For chaining the descriptors
+ * @orig_len: length since 'buff_len' can be overwritten
+ * @orig_buff: buff pointer since 'buff' can be overwritten
+ * @epib: Extended packet info block
+ * @psdata: Protocol specific
+ */
+struct knav_dma_desc {
+ u32 desc_info;
+ u32 tag_info;
+ u32 packet_info;
+ u32 buff_len;
+ u32 buff;
+ u32 next_desc;
+ u32 orig_len;
+ u32 orig_buff;
+ u32 epib[KNAV_DMA_NUM_EPIB_WORDS];
+ u32 psdata[KNAV_DMA_NUM_PS_WORDS];
+ u32 pad[4];
+} ____cacheline_aligned;
+
+#if IS_ENABLED(CONFIG_KEYSTONE_NAVIGATOR_DMA)
+void *knav_dma_open_channel(struct device *dev, const char *name,
+ struct knav_dma_cfg *config);
+void knav_dma_close_channel(void *channel);
+#else
+static inline void *knav_dma_open_channel(struct device *dev, const char *name,
+ struct knav_dma_cfg *config)
+{
+ return (void *) NULL;
+}
+static inline void knav_dma_close_channel(void *channel)
+{}
+
+#endif
+
+#endif /* __SOC_TI_KEYSTONE_NAVIGATOR_DMA_H__ */
diff --git a/include/linux/soc/ti/knav_qmss.h b/include/linux/soc/ti/knav_qmss.h
new file mode 100644
index 000000000000..9f0ebb3bad27
--- /dev/null
+++ b/include/linux/soc/ti/knav_qmss.h
@@ -0,0 +1,90 @@
+/*
+ * Keystone Navigator Queue Management Sub-System header
+ *
+ * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com
+ * Author: Sandeep Nair <sandeep_n@ti.com>
+ * Cyril Chemparathy <cyril@ti.com>
+ * Santosh Shilimkar <santosh.shilimkar@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __SOC_TI_KNAV_QMSS_H__
+#define __SOC_TI_KNAV_QMSS_H__
+
+#include <linux/err.h>
+#include <linux/time.h>
+#include <linux/atomic.h>
+#include <linux/device.h>
+#include <linux/fcntl.h>
+#include <linux/dma-mapping.h>
+
+/* queue types */
+#define KNAV_QUEUE_QPEND ((unsigned)-2) /* interruptible qpend queue */
+#define KNAV_QUEUE_ACC ((unsigned)-3) /* Accumulated queue */
+#define KNAV_QUEUE_GP ((unsigned)-4) /* General purpose queue */
+
+/* queue flags */
+#define KNAV_QUEUE_SHARED 0x0001 /* Queue can be shared */
+
+/**
+ * enum knav_queue_ctrl_cmd - queue operations.
+ * @KNAV_QUEUE_GET_ID: Get the ID number for an open queue
+ * @KNAV_QUEUE_FLUSH: forcibly empty a queue if possible
+ * @KNAV_QUEUE_SET_NOTIFIER: Set a notifier callback to a queue handle.
+ * @KNAV_QUEUE_ENABLE_NOTIFY: Enable notifier callback for a queue handle.
+ * @KNAV_QUEUE_DISABLE_NOTIFY: Disable notifier callback for a queue handle.
+ * @KNAV_QUEUE_GET_COUNT: Get number of queues.
+ */
+enum knav_queue_ctrl_cmd {
+ KNAV_QUEUE_GET_ID,
+ KNAV_QUEUE_FLUSH,
+ KNAV_QUEUE_SET_NOTIFIER,
+ KNAV_QUEUE_ENABLE_NOTIFY,
+ KNAV_QUEUE_DISABLE_NOTIFY,
+ KNAV_QUEUE_GET_COUNT
+};
+
+/* Queue notifier callback prototype */
+typedef void (*knav_queue_notify_fn)(void *arg);
+
+/**
+ * struct knav_queue_notify_config: Notifier configuration
+ * @fn: Notifier function
+ * @fn_arg: Notifier function arguments
+ */
+struct knav_queue_notify_config {
+ knav_queue_notify_fn fn;
+ void *fn_arg;
+};
+
+void *knav_queue_open(const char *name, unsigned id,
+ unsigned flags);
+void knav_queue_close(void *qhandle);
+int knav_queue_device_control(void *qhandle,
+ enum knav_queue_ctrl_cmd cmd,
+ unsigned long arg);
+dma_addr_t knav_queue_pop(void *qhandle, unsigned *size);
+int knav_queue_push(void *qhandle, dma_addr_t dma,
+ unsigned size, unsigned flags);
+
+void *knav_pool_create(const char *name,
+ int num_desc, int region_id);
+void knav_pool_destroy(void *ph);
+int knav_pool_count(void *ph);
+void *knav_pool_desc_get(void *ph);
+void knav_pool_desc_put(void *ph, void *desc);
+int knav_pool_desc_map(void *ph, void *desc, unsigned size,
+ dma_addr_t *dma, unsigned *dma_sz);
+void *knav_pool_desc_unmap(void *ph, dma_addr_t dma, unsigned dma_sz);
+dma_addr_t knav_pool_desc_virt_to_dma(void *ph, void *virt);
+void *knav_pool_desc_dma_to_virt(void *ph, dma_addr_t dma);
+
+#endif /* __SOC_TI_KNAV_QMSS_H__ */
diff --git a/include/linux/socket.h b/include/linux/socket.h
index ec538fc287a6..6e49a14365dc 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -47,16 +47,25 @@ struct linger {
struct msghdr {
void *msg_name; /* ptr to socket address structure */
int msg_namelen; /* size of socket address structure */
- struct iovec *msg_iov; /* scatter/gather array */
- __kernel_size_t msg_iovlen; /* # elements in msg_iov */
+ struct iov_iter msg_iter; /* data */
void *msg_control; /* ancillary data */
__kernel_size_t msg_controllen; /* ancillary data buffer length */
unsigned int msg_flags; /* flags on received message */
};
+
+struct user_msghdr {
+ void __user *msg_name; /* ptr to socket address structure */
+ int msg_namelen; /* size of socket address structure */
+ struct iovec __user *msg_iov; /* scatter/gather array */
+ __kernel_size_t msg_iovlen; /* # elements in msg_iov */
+ void __user *msg_control; /* ancillary data */
+ __kernel_size_t msg_controllen; /* ancillary data buffer length */
+ unsigned int msg_flags; /* flags on received message */
+};
/* For recvmmsg/sendmmsg */
struct mmsghdr {
- struct msghdr msg_hdr;
+ struct user_msghdr msg_hdr;
unsigned int msg_len;
};
@@ -94,6 +103,10 @@ struct cmsghdr {
(cmsg)->cmsg_len <= (unsigned long) \
((mhdr)->msg_controllen - \
((char *)(cmsg) - (char *)(mhdr)->msg_control)))
+#define for_each_cmsghdr(cmsg, msg) \
+ for (cmsg = CMSG_FIRSTHDR(msg); \
+ cmsg; \
+ cmsg = CMSG_NXTHDR(msg, cmsg))
/*
* Get the next cmsg header
@@ -256,7 +269,7 @@ struct ucred {
#define MSG_EOF MSG_FIN
#define MSG_FASTOPEN 0x20000000 /* Send data in TCP SYN */
-#define MSG_CMSG_CLOEXEC 0x40000000 /* Set close_on_exit for file
+#define MSG_CMSG_CLOEXEC 0x40000000 /* Set close_on_exec for file
descriptor received through
SCM_RIGHTS */
#if defined(CONFIG_COMPAT)
@@ -312,15 +325,14 @@ extern int csum_partial_copy_fromiovecend(unsigned char *kdata,
extern unsigned long iov_pages(const struct iovec *iov, int offset,
unsigned long nr_segs);
-extern int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *address, int mode);
extern int move_addr_to_kernel(void __user *uaddr, int ulen, struct sockaddr_storage *kaddr);
extern int put_cmsg(struct msghdr*, int level, int type, int len, void *data);
struct timespec;
/* The __sys_...msg variants allow MSG_CMSG_COMPAT */
-extern long __sys_recvmsg(int fd, struct msghdr __user *msg, unsigned flags);
-extern long __sys_sendmsg(int fd, struct msghdr __user *msg, unsigned flags);
+extern long __sys_recvmsg(int fd, struct user_msghdr __user *msg, unsigned flags);
+extern long __sys_sendmsg(int fd, struct user_msghdr __user *msg, unsigned flags);
extern int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen,
unsigned int flags, struct timespec *timeout);
extern int __sys_sendmmsg(int fd, struct mmsghdr __user *mmsg,
diff --git a/include/linux/spi/mcp23s08.h b/include/linux/spi/mcp23s08.h
index 2d676d5aaa89..aa07d7b32568 100644
--- a/include/linux/spi/mcp23s08.h
+++ b/include/linux/spi/mcp23s08.h
@@ -22,4 +22,22 @@ struct mcp23s08_platform_data {
* base to base+15 (or base+31 for s17 variant).
*/
unsigned base;
+ /* Marks the device as a interrupt controller.
+ * NOTE: The interrupt functionality is only supported for i2c
+ * versions of the chips. The spi chips can also do the interrupts,
+ * but this is not supported by the linux driver yet.
+ */
+ bool irq_controller;
+
+ /* Sets the mirror flag in the IOCON register. Devices
+ * with two interrupt outputs (these are the devices ending with 17 and
+ * those that have 16 IOs) have two IO banks: IO 0-7 form bank 1 and
+ * IO 8-15 are bank 2. These chips have two different interrupt outputs:
+ * One for bank 1 and another for bank 2. If irq-mirror is set, both
+ * interrupts are generated regardless of the bank that an input change
+ * occurred on. If it is not set, the interrupt are only generated for
+ * the bank they belong to.
+ * On devices with only one interrupt output this property is useless.
+ */
+ bool mirror;
};
diff --git a/include/linux/spi/pxa2xx_spi.h b/include/linux/spi/pxa2xx_spi.h
index 82d5111cd0c2..d5a316550177 100644
--- a/include/linux/spi/pxa2xx_spi.h
+++ b/include/linux/spi/pxa2xx_spi.h
@@ -23,6 +23,8 @@
#define PXA2XX_CS_ASSERT (0x01)
#define PXA2XX_CS_DEASSERT (0x02)
+struct dma_chan;
+
/* device.platform_data for SSP controller devices */
struct pxa2xx_spi_master {
u32 clock_enable;
@@ -30,10 +32,9 @@ struct pxa2xx_spi_master {
u8 enable_dma;
/* DMA engine specific config */
- int rx_chan_id;
- int tx_chan_id;
- int rx_slave_id;
- int tx_slave_id;
+ bool (*dma_filter)(struct dma_chan *chan, void *param);
+ void *tx_param;
+ void *rx_param;
/* For non-PXA arches */
struct ssp_device ssp;
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index 46d188a9947c..a6ef2a8e6de4 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -1049,4 +1049,10 @@ spi_unregister_device(struct spi_device *spi)
extern const struct spi_device_id *
spi_get_device_id(const struct spi_device *sdev);
+static inline bool
+spi_transfer_is_last(struct spi_master *master, struct spi_transfer *xfer)
+{
+ return list_is_last(&xfer->transfer_list, &master->cur_msg->transfers);
+}
+
#endif /* __LINUX_SPI_H */
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index 3f2867ff0ced..262ba4ef9a8e 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -197,7 +197,13 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
_raw_spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \
} while (0)
#else
-# define raw_spin_lock_nested(lock, subclass) _raw_spin_lock(lock)
+/*
+ * Always evaluate the 'subclass' argument to avoid that the compiler
+ * warns about set-but-not-used variables when building with
+ * CONFIG_DEBUG_LOCK_ALLOC=n and with W=1.
+ */
+# define raw_spin_lock_nested(lock, subclass) \
+ _raw_spin_lock(((void)(subclass), (lock)))
# define raw_spin_lock_nest_lock(lock, nest_lock) _raw_spin_lock(lock)
#endif
diff --git a/include/linux/spmi.h b/include/linux/spmi.h
index 91f5eab9e428..f84212cd3b7d 100644
--- a/include/linux/spmi.h
+++ b/include/linux/spmi.h
@@ -134,9 +134,6 @@ void spmi_controller_remove(struct spmi_controller *ctrl);
* this structure.
* @probe: binds this driver to a SPMI device.
* @remove: unbinds this driver from the SPMI device.
- * @shutdown: standard shutdown callback used during powerdown/halt.
- * @suspend: standard suspend callback used during system suspend.
- * @resume: standard resume callback used during system resume.
*
* If PM runtime support is desired for a slave, a device driver can call
* pm_runtime_put() from their probe() routine (and a balancing
diff --git a/include/linux/stacktrace.h b/include/linux/stacktrace.h
index 115b570e3bff..669045ab73f3 100644
--- a/include/linux/stacktrace.h
+++ b/include/linux/stacktrace.h
@@ -1,6 +1,8 @@
#ifndef __LINUX_STACKTRACE_H
#define __LINUX_STACKTRACE_H
+#include <linux/types.h>
+
struct task_struct;
struct pt_regs;
@@ -20,6 +22,8 @@ extern void save_stack_trace_tsk(struct task_struct *tsk,
struct stack_trace *trace);
extern void print_stack_trace(struct stack_trace *trace, int spaces);
+extern int snprint_stack_trace(char *buf, size_t size,
+ struct stack_trace *trace, int spaces);
#ifdef CONFIG_USER_STACKTRACE_SUPPORT
extern void save_stack_trace_user(struct stack_trace *trace);
@@ -32,6 +36,7 @@ extern void save_stack_trace_user(struct stack_trace *trace);
# define save_stack_trace_tsk(tsk, trace) do { } while (0)
# define save_stack_trace_user(trace) do { } while (0)
# define print_stack_trace(trace, spaces) do { } while (0)
+# define snprint_stack_trace(buf, size, trace, spaces) do { } while (0)
#endif
#endif
diff --git a/include/linux/string.h b/include/linux/string.h
index d36977e029af..2e22a2e58f3a 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -41,7 +41,7 @@ extern int strcmp(const char *,const char *);
extern int strncmp(const char *,const char *,__kernel_size_t);
#endif
#ifndef __HAVE_ARCH_STRNICMP
-extern int strnicmp(const char *, const char *, __kernel_size_t);
+#define strnicmp strncasecmp
#endif
#ifndef __HAVE_ARCH_STRCASECMP
extern int strcasecmp(const char *s1, const char *s2);
@@ -132,7 +132,7 @@ int bprintf(u32 *bin_buf, size_t size, const char *fmt, ...) __printf(3, 4);
#endif
extern ssize_t memory_read_from_buffer(void *to, size_t count, loff_t *ppos,
- const void *from, size_t available);
+ const void *from, size_t available);
/**
* strstarts - does @str start with @prefix?
@@ -144,7 +144,8 @@ static inline bool strstarts(const char *str, const char *prefix)
return strncmp(str, prefix, strlen(prefix)) == 0;
}
-extern size_t memweight(const void *ptr, size_t bytes);
+size_t memweight(const void *ptr, size_t bytes);
+void memzero_explicit(void *s, size_t count);
/**
* kbasename - return the last part of a pathname.
diff --git a/include/linux/string_helpers.h b/include/linux/string_helpers.h
index 3eeee9672a4a..6eb567ac56bc 100644
--- a/include/linux/string_helpers.h
+++ b/include/linux/string_helpers.h
@@ -20,40 +20,6 @@ int string_get_size(u64 size, enum string_size_units units,
#define UNESCAPE_ANY \
(UNESCAPE_SPACE | UNESCAPE_OCTAL | UNESCAPE_HEX | UNESCAPE_SPECIAL)
-/**
- * string_unescape - unquote characters in the given string
- * @src: source buffer (escaped)
- * @dst: destination buffer (unescaped)
- * @size: size of the destination buffer (0 to unlimit)
- * @flags: combination of the flags (bitwise OR):
- * %UNESCAPE_SPACE:
- * '\f' - form feed
- * '\n' - new line
- * '\r' - carriage return
- * '\t' - horizontal tab
- * '\v' - vertical tab
- * %UNESCAPE_OCTAL:
- * '\NNN' - byte with octal value NNN (1 to 3 digits)
- * %UNESCAPE_HEX:
- * '\xHH' - byte with hexadecimal value HH (1 to 2 digits)
- * %UNESCAPE_SPECIAL:
- * '\"' - double quote
- * '\\' - backslash
- * '\a' - alert (BEL)
- * '\e' - escape
- * %UNESCAPE_ANY:
- * all previous together
- *
- * Returns amount of characters processed to the destination buffer excluding
- * trailing '\0'.
- *
- * Because the size of the output will be the same as or less than the size of
- * the input, the transformation may be performed in place.
- *
- * Caller must provide valid source and destination pointers. Be aware that
- * destination buffer will always be NULL-terminated. Source string must be
- * NULL-terminated as well.
- */
int string_unescape(char *src, char *dst, size_t size, unsigned int flags);
static inline int string_unescape_inplace(char *buf, unsigned int flags)
@@ -71,4 +37,35 @@ static inline int string_unescape_any_inplace(char *buf)
return string_unescape_any(buf, buf, 0);
}
+#define ESCAPE_SPACE 0x01
+#define ESCAPE_SPECIAL 0x02
+#define ESCAPE_NULL 0x04
+#define ESCAPE_OCTAL 0x08
+#define ESCAPE_ANY \
+ (ESCAPE_SPACE | ESCAPE_OCTAL | ESCAPE_SPECIAL | ESCAPE_NULL)
+#define ESCAPE_NP 0x10
+#define ESCAPE_ANY_NP (ESCAPE_ANY | ESCAPE_NP)
+#define ESCAPE_HEX 0x20
+
+int string_escape_mem(const char *src, size_t isz, char **dst, size_t osz,
+ unsigned int flags, const char *esc);
+
+static inline int string_escape_mem_any_np(const char *src, size_t isz,
+ char **dst, size_t osz, const char *esc)
+{
+ return string_escape_mem(src, isz, dst, osz, ESCAPE_ANY_NP, esc);
+}
+
+static inline int string_escape_str(const char *src, char **dst, size_t sz,
+ unsigned int flags, const char *esc)
+{
+ return string_escape_mem(src, strlen(src), dst, sz, flags, esc);
+}
+
+static inline int string_escape_str_any_np(const char *src, char **dst,
+ size_t sz, const char *esc)
+{
+ return string_escape_str(src, dst, sz, ESCAPE_ANY_NP, esc);
+}
+
#endif
diff --git a/include/linux/sunrpc/auth.h b/include/linux/sunrpc/auth.h
index 8e030075fe79..a7cbb570cc5c 100644
--- a/include/linux/sunrpc/auth.h
+++ b/include/linux/sunrpc/auth.h
@@ -53,7 +53,7 @@ struct rpc_cred {
struct rcu_head cr_rcu;
struct rpc_auth * cr_auth;
const struct rpc_credops *cr_ops;
-#ifdef RPC_DEBUG
+#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
unsigned long cr_magic; /* 0x0f4aa4f0 */
#endif
unsigned long cr_expire; /* when to gc */
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
index 70736b98c721..d86acc63b25f 100644
--- a/include/linux/sunrpc/clnt.h
+++ b/include/linux/sunrpc/clnt.h
@@ -63,6 +63,9 @@ struct rpc_clnt {
struct rpc_rtt cl_rtt_default;
struct rpc_timeout cl_timeout_default;
const struct rpc_program *cl_program;
+#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
+ struct dentry *cl_debugfs; /* debugfs directory */
+#endif
};
/*
@@ -176,5 +179,6 @@ size_t rpc_peeraddr(struct rpc_clnt *, struct sockaddr *, size_t);
const char *rpc_peeraddr2str(struct rpc_clnt *, enum rpc_display_format_t);
int rpc_localaddr(struct rpc_clnt *, struct sockaddr *, size_t);
+const char *rpc_proc_name(const struct rpc_task *task);
#endif /* __KERNEL__ */
#endif /* _LINUX_SUNRPC_CLNT_H */
diff --git a/include/linux/sunrpc/debug.h b/include/linux/sunrpc/debug.h
index 9385bd74c860..c57d8ea0716c 100644
--- a/include/linux/sunrpc/debug.h
+++ b/include/linux/sunrpc/debug.h
@@ -10,22 +10,10 @@
#include <uapi/linux/sunrpc/debug.h>
-
-/*
- * Enable RPC debugging/profiling.
- */
-#ifdef CONFIG_SUNRPC_DEBUG
-#define RPC_DEBUG
-#endif
-#ifdef CONFIG_TRACEPOINTS
-#define RPC_TRACEPOINTS
-#endif
-/* #define RPC_PROFILE */
-
/*
* Debugging macros etc
*/
-#ifdef RPC_DEBUG
+#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
extern unsigned int rpc_debug;
extern unsigned int nfs_debug;
extern unsigned int nfsd_debug;
@@ -36,7 +24,7 @@ extern unsigned int nlm_debug;
#define dprintk_rcu(args...) dfprintk_rcu(FACILITY, ## args)
#undef ifdebug
-#ifdef RPC_DEBUG
+#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
# define ifdebug(fac) if (unlikely(rpc_debug & RPCDBG_##fac))
# define dfprintk(fac, args...) \
@@ -65,9 +53,55 @@ extern unsigned int nlm_debug;
/*
* Sysctl interface for RPC debugging
*/
-#ifdef RPC_DEBUG
+
+struct rpc_clnt;
+struct rpc_xprt;
+
+#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
void rpc_register_sysctl(void);
void rpc_unregister_sysctl(void);
+int sunrpc_debugfs_init(void);
+void sunrpc_debugfs_exit(void);
+int rpc_clnt_debugfs_register(struct rpc_clnt *);
+void rpc_clnt_debugfs_unregister(struct rpc_clnt *);
+int rpc_xprt_debugfs_register(struct rpc_xprt *);
+void rpc_xprt_debugfs_unregister(struct rpc_xprt *);
+#else
+static inline int
+sunrpc_debugfs_init(void)
+{
+ return 0;
+}
+
+static inline void
+sunrpc_debugfs_exit(void)
+{
+ return;
+}
+
+static inline int
+rpc_clnt_debugfs_register(struct rpc_clnt *clnt)
+{
+ return 0;
+}
+
+static inline void
+rpc_clnt_debugfs_unregister(struct rpc_clnt *clnt)
+{
+ return;
+}
+
+static inline int
+rpc_xprt_debugfs_register(struct rpc_xprt *xprt)
+{
+ return 0;
+}
+
+static inline void
+rpc_xprt_debugfs_unregister(struct rpc_xprt *xprt)
+{
+ return;
+}
#endif
#endif /* _LINUX_SUNRPC_DEBUG_H_ */
diff --git a/include/linux/sunrpc/metrics.h b/include/linux/sunrpc/metrics.h
index 1565bbe86d51..eecb5a71e6c0 100644
--- a/include/linux/sunrpc/metrics.h
+++ b/include/linux/sunrpc/metrics.h
@@ -27,10 +27,13 @@
#include <linux/seq_file.h>
#include <linux/ktime.h>
+#include <linux/spinlock.h>
#define RPC_IOSTATS_VERS "1.0"
struct rpc_iostats {
+ spinlock_t om_lock;
+
/*
* These counters give an idea about how many request
* transmissions are required, on average, to complete that
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
index 1a8959944c5f..5f1e6bd4c316 100644
--- a/include/linux/sunrpc/sched.h
+++ b/include/linux/sunrpc/sched.h
@@ -79,7 +79,7 @@ struct rpc_task {
unsigned short tk_flags; /* misc flags */
unsigned short tk_timeouts; /* maj timeouts */
-#if defined(RPC_DEBUG) || defined(RPC_TRACEPOINTS)
+#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) || IS_ENABLED(CONFIG_TRACEPOINTS)
unsigned short tk_pid; /* debugging aid */
#endif
unsigned char tk_priority : 2,/* Task priority */
@@ -187,7 +187,7 @@ struct rpc_wait_queue {
unsigned char nr; /* # tasks remaining for cookie */
unsigned short qlen; /* total # tasks waiting in queue */
struct rpc_timer timer_list;
-#if defined(RPC_DEBUG) || defined(RPC_TRACEPOINTS)
+#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) || IS_ENABLED(CONFIG_TRACEPOINTS)
const char * name;
#endif
};
@@ -237,7 +237,7 @@ void rpc_free(void *);
int rpciod_up(void);
void rpciod_down(void);
int __rpc_wait_for_completion_task(struct rpc_task *task, wait_bit_action_f *);
-#ifdef RPC_DEBUG
+#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
struct net;
void rpc_show_tasks(struct net *);
#endif
@@ -251,7 +251,7 @@ static inline int rpc_wait_for_completion_task(struct rpc_task *task)
return __rpc_wait_for_completion_task(task, NULL);
}
-#if defined(RPC_DEBUG) || defined (RPC_TRACEPOINTS)
+#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) || IS_ENABLED(CONFIG_TRACEPOINTS)
static inline const char * rpc_qname(const struct rpc_wait_queue *q)
{
return ((q && q->name) ? q->name : "unknown");
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index cf61ecd148e0..6f22cfeef5e3 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -26,10 +26,10 @@ typedef int (*svc_thread_fn)(void *);
/* statistics for svc_pool structures */
struct svc_pool_stats {
- unsigned long packets;
+ atomic_long_t packets;
unsigned long sockets_queued;
- unsigned long threads_woken;
- unsigned long threads_timedout;
+ atomic_long_t threads_woken;
+ atomic_long_t threads_timedout;
};
/*
@@ -45,12 +45,13 @@ struct svc_pool_stats {
struct svc_pool {
unsigned int sp_id; /* pool id; also node id on NUMA */
spinlock_t sp_lock; /* protects all fields */
- struct list_head sp_threads; /* idle server threads */
struct list_head sp_sockets; /* pending sockets */
unsigned int sp_nrthreads; /* # of threads in pool */
struct list_head sp_all_threads; /* all server threads */
struct svc_pool_stats sp_stats; /* statistics on pool operation */
- int sp_task_pending;/* has pending task */
+#define SP_TASK_PENDING (0) /* still work to do even if no
+ * xprt is queued. */
+ unsigned long sp_flags;
} ____cacheline_aligned_in_smp;
/*
@@ -219,8 +220,8 @@ static inline void svc_putu32(struct kvec *iov, __be32 val)
* processed.
*/
struct svc_rqst {
- struct list_head rq_list; /* idle list */
struct list_head rq_all; /* all threads list */
+ struct rcu_head rq_rcu_head; /* for RCU deferred kfree */
struct svc_xprt * rq_xprt; /* transport ptr */
struct sockaddr_storage rq_addr; /* peer address */
@@ -236,7 +237,6 @@ struct svc_rqst {
struct svc_cred rq_cred; /* auth info */
void * rq_xprt_ctxt; /* transport specific context ptr */
struct svc_deferred_req*rq_deferred; /* deferred request we are replaying */
- bool rq_usedeferral; /* use deferral */
size_t rq_xprt_hlen; /* xprt header len */
struct xdr_buf rq_arg;
@@ -253,9 +253,17 @@ struct svc_rqst {
u32 rq_vers; /* program version */
u32 rq_proc; /* procedure number */
u32 rq_prot; /* IP protocol */
- unsigned short
- rq_secure : 1; /* secure port */
- unsigned short rq_local : 1; /* local request */
+ int rq_cachetype; /* catering to nfsd */
+#define RQ_SECURE (0) /* secure port */
+#define RQ_LOCAL (1) /* local request */
+#define RQ_USEDEFERRAL (2) /* use deferral */
+#define RQ_DROPME (3) /* drop current reply */
+#define RQ_SPLICE_OK (4) /* turned off in gss privacy
+ * to prevent encrypting page
+ * cache pages */
+#define RQ_VICTIM (5) /* about to be shut down */
+#define RQ_BUSY (6) /* request is busy */
+ unsigned long rq_flags; /* flags field */
void * rq_argp; /* decoded arguments */
void * rq_resp; /* xdr'd results */
@@ -271,17 +279,12 @@ struct svc_rqst {
struct cache_req rq_chandle; /* handle passed to caches for
* request delaying
*/
- bool rq_dropme;
/* Catering to nfsd */
struct auth_domain * rq_client; /* RPC peer info */
struct auth_domain * rq_gssclient; /* "gss/"-style peer info */
- int rq_cachetype;
struct svc_cacherep * rq_cacherep; /* cache info */
- bool rq_splice_ok; /* turned off in gss privacy
- * to prevent encrypting page
- * cache pages */
- wait_queue_head_t rq_wait; /* synchronization */
struct task_struct *rq_task; /* service thread */
+ spinlock_t rq_lock; /* per-request lock */
};
#define SVC_NET(svc_rqst) (svc_rqst->rq_xprt->xpt_net)
diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h
index ce6e4182a5b2..79f6f8f3dc0a 100644
--- a/include/linux/sunrpc/svc_xprt.h
+++ b/include/linux/sunrpc/svc_xprt.h
@@ -63,10 +63,9 @@ struct svc_xprt {
#define XPT_CHNGBUF 7 /* need to change snd/rcv buf sizes */
#define XPT_DEFERRED 8 /* deferred request pending */
#define XPT_OLD 9 /* used for xprt aging mark+sweep */
-#define XPT_DETACHED 10 /* detached from tempsocks list */
-#define XPT_LISTENER 11 /* listening endpoint */
-#define XPT_CACHE_AUTH 12 /* cache auth info */
-#define XPT_LOCAL 13 /* connection from loopback interface */
+#define XPT_LISTENER 10 /* listening endpoint */
+#define XPT_CACHE_AUTH 11 /* cache auth info */
+#define XPT_LOCAL 12 /* connection from loopback interface */
struct svc_serv *xpt_server; /* service for transport */
atomic_t xpt_reserved; /* space on outq that is rsvd */
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
index fcbfe8783243..9d27ac45b909 100644
--- a/include/linux/sunrpc/xprt.h
+++ b/include/linux/sunrpc/xprt.h
@@ -239,6 +239,9 @@ struct rpc_xprt {
struct net *xprt_net;
const char *servername;
const char *address_strings[RPC_DISPLAY_MAX];
+#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
+ struct dentry *debugfs; /* debugfs directory */
+#endif
};
#if defined(CONFIG_SUNRPC_BACKCHANNEL)
@@ -357,6 +360,7 @@ int xs_swapper(struct rpc_xprt *xprt, int enable);
#define XPRT_CONNECTION_ABORT (7)
#define XPRT_CONNECTION_CLOSE (8)
#define XPRT_CONGESTED (9)
+#define XPRT_CONNECTION_REUSE (10)
static inline void xprt_set_connected(struct rpc_xprt *xprt)
{
diff --git a/include/linux/sunrpc/xprtsock.h b/include/linux/sunrpc/xprtsock.h
index 1ad36cc25b2e..7591788e9fbf 100644
--- a/include/linux/sunrpc/xprtsock.h
+++ b/include/linux/sunrpc/xprtsock.h
@@ -17,6 +17,65 @@ void cleanup_socket_xprt(void);
#define RPC_DEF_MIN_RESVPORT (665U)
#define RPC_DEF_MAX_RESVPORT (1023U)
+struct sock_xprt {
+ struct rpc_xprt xprt;
+
+ /*
+ * Network layer
+ */
+ struct socket * sock;
+ struct sock * inet;
+
+ /*
+ * State of TCP reply receive
+ */
+ __be32 tcp_fraghdr,
+ tcp_xid,
+ tcp_calldir;
+
+ u32 tcp_offset,
+ tcp_reclen;
+
+ unsigned long tcp_copied,
+ tcp_flags;
+
+ /*
+ * Connection of transports
+ */
+ struct delayed_work connect_worker;
+ struct sockaddr_storage srcaddr;
+ unsigned short srcport;
+
+ /*
+ * UDP socket buffer size parameters
+ */
+ size_t rcvsize,
+ sndsize;
+
+ /*
+ * Saved socket callback addresses
+ */
+ void (*old_data_ready)(struct sock *);
+ void (*old_state_change)(struct sock *);
+ void (*old_write_space)(struct sock *);
+ void (*old_error_report)(struct sock *);
+};
+
+/*
+ * TCP receive state flags
+ */
+#define TCP_RCV_LAST_FRAG (1UL << 0)
+#define TCP_RCV_COPY_FRAGHDR (1UL << 1)
+#define TCP_RCV_COPY_XID (1UL << 2)
+#define TCP_RCV_COPY_DATA (1UL << 3)
+#define TCP_RCV_READ_CALLDIR (1UL << 4)
+#define TCP_RCV_COPY_CALLDIR (1UL << 5)
+
+/*
+ * TCP RPC flags
+ */
+#define TCP_RPC_REPLY (1UL << 6)
+
#endif /* __KERNEL__ */
#endif /* _LINUX_SUNRPC_XPRTSOCK_H */
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index 519064e0c943..3388c1b6f7d8 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -189,6 +189,8 @@ struct platform_suspend_ops {
struct platform_freeze_ops {
int (*begin)(void);
+ int (*prepare)(void);
+ void (*restore)(void);
void (*end)(void);
};
@@ -371,6 +373,8 @@ extern int unregister_pm_notifier(struct notifier_block *nb);
extern bool events_check_enabled;
extern bool pm_wakeup_pending(void);
+extern void pm_system_wakeup(void);
+extern void pm_wakeup_clear(void);
extern bool pm_get_wakeup_count(unsigned int *count, bool block);
extern bool pm_save_wakeup_count(unsigned int count);
extern void pm_wakep_autosleep_enabled(bool set);
@@ -418,6 +422,8 @@ static inline int unregister_pm_notifier(struct notifier_block *nb)
#define pm_notifier(fn, pri) do { (void)(fn); } while (0)
static inline bool pm_wakeup_pending(void) { return false; }
+static inline void pm_system_wakeup(void) {}
+static inline void pm_wakeup_clear(void) {}
static inline void lock_system_sleep(void) {}
static inline void unlock_system_sleep(void) {}
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 1b72060f093a..34e8b60ab973 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -102,14 +102,6 @@ union swap_header {
} info;
};
- /* A swap entry has to fit into a "unsigned long", as
- * the entry is hidden in the "index" field of the
- * swapper address space.
- */
-typedef struct {
- unsigned long val;
-} swp_entry_t;
-
/*
* current->reclaim_state points to one of these when a task is running
* memory reclaim
@@ -327,8 +319,10 @@ extern void lru_cache_add_active_or_unevictable(struct page *page,
extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
gfp_t gfp_mask, nodemask_t *mask);
extern int __isolate_lru_page(struct page *page, isolate_mode_t mode);
-extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem,
- gfp_t gfp_mask, bool noswap);
+extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
+ unsigned long nr_pages,
+ gfp_t gfp_mask,
+ bool may_swap);
extern unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
gfp_t gfp_mask, bool noswap,
struct zone *zone,
@@ -354,22 +348,6 @@ static inline int zone_reclaim(struct zone *z, gfp_t mask, unsigned int order)
extern int page_evictable(struct page *page);
extern void check_move_unevictable_pages(struct page **, int nr_pages);
-extern unsigned long scan_unevictable_pages;
-extern int scan_unevictable_handler(struct ctl_table *, int,
- void __user *, size_t *, loff_t *);
-#ifdef CONFIG_NUMA
-extern int scan_unevictable_register_node(struct node *node);
-extern void scan_unevictable_unregister_node(struct node *node);
-#else
-static inline int scan_unevictable_register_node(struct node *node)
-{
- return 0;
-}
-static inline void scan_unevictable_unregister_node(struct node *node)
-{
-}
-#endif
-
extern int kswapd_run(int nid);
extern void kswapd_stop(int nid);
#ifdef CONFIG_MEMCG
diff --git a/include/linux/swap_cgroup.h b/include/linux/swap_cgroup.h
new file mode 100644
index 000000000000..145306bdc92f
--- /dev/null
+++ b/include/linux/swap_cgroup.h
@@ -0,0 +1,42 @@
+#ifndef __LINUX_SWAP_CGROUP_H
+#define __LINUX_SWAP_CGROUP_H
+
+#include <linux/swap.h>
+
+#ifdef CONFIG_MEMCG_SWAP
+
+extern unsigned short swap_cgroup_cmpxchg(swp_entry_t ent,
+ unsigned short old, unsigned short new);
+extern unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id);
+extern unsigned short lookup_swap_cgroup_id(swp_entry_t ent);
+extern int swap_cgroup_swapon(int type, unsigned long max_pages);
+extern void swap_cgroup_swapoff(int type);
+
+#else
+
+static inline
+unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id)
+{
+ return 0;
+}
+
+static inline
+unsigned short lookup_swap_cgroup_id(swp_entry_t ent)
+{
+ return 0;
+}
+
+static inline int
+swap_cgroup_swapon(int type, unsigned long max_pages)
+{
+ return 0;
+}
+
+static inline void swap_cgroup_swapoff(int type)
+{
+ return;
+}
+
+#endif /* CONFIG_MEMCG_SWAP */
+
+#endif /* __LINUX_SWAP_CGROUP_H */
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 0f86d85a9ce4..85893d744901 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -25,7 +25,7 @@ struct linux_dirent64;
struct list_head;
struct mmap_arg_struct;
struct msgbuf;
-struct msghdr;
+struct user_msghdr;
struct mmsghdr;
struct msqid_ds;
struct new_utsname;
@@ -65,6 +65,7 @@ struct old_linux_dirent;
struct perf_event_attr;
struct file_handle;
struct sigaltstack;
+union bpf_attr;
#include <linux/types.h>
#include <linux/aio_abi.h>
@@ -600,13 +601,13 @@ asmlinkage long sys_getpeername(int, struct sockaddr __user *, int __user *);
asmlinkage long sys_send(int, void __user *, size_t, unsigned);
asmlinkage long sys_sendto(int, void __user *, size_t, unsigned,
struct sockaddr __user *, int);
-asmlinkage long sys_sendmsg(int fd, struct msghdr __user *msg, unsigned flags);
+asmlinkage long sys_sendmsg(int fd, struct user_msghdr __user *msg, unsigned flags);
asmlinkage long sys_sendmmsg(int fd, struct mmsghdr __user *msg,
unsigned int vlen, unsigned flags);
asmlinkage long sys_recv(int, void __user *, size_t, unsigned);
asmlinkage long sys_recvfrom(int, void __user *, size_t, unsigned,
struct sockaddr __user *, int __user *);
-asmlinkage long sys_recvmsg(int fd, struct msghdr __user *msg, unsigned flags);
+asmlinkage long sys_recvmsg(int fd, struct user_msghdr __user *msg, unsigned flags);
asmlinkage long sys_recvmmsg(int fd, struct mmsghdr __user *msg,
unsigned int vlen, unsigned flags,
struct timespec __user *timeout);
@@ -875,5 +876,10 @@ asmlinkage long sys_seccomp(unsigned int op, unsigned int flags,
const char __user *uargs);
asmlinkage long sys_getrandom(char __user *buf, size_t count,
unsigned int flags);
+asmlinkage long sys_bpf(int cmd, union bpf_attr *attr, unsigned int size);
+
+asmlinkage long sys_execveat(int dfd, const char __user *filename,
+ const char __user *const __user *argv,
+ const char __user *const __user *envp, int flags);
#endif
diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
index f97d0dbb59fa..ddad16148bd6 100644
--- a/include/linux/sysfs.h
+++ b/include/linux/sysfs.h
@@ -70,6 +70,8 @@ struct attribute_group {
* for examples..
*/
+#define SYSFS_PREALLOC 010000
+
#define __ATTR(_name, _mode, _show, _store) { \
.attr = {.name = __stringify(_name), \
.mode = VERIFY_OCTAL_PERMISSIONS(_mode) }, \
@@ -77,6 +79,13 @@ struct attribute_group {
.store = _store, \
}
+#define __ATTR_PREALLOC(_name, _mode, _show, _store) { \
+ .attr = {.name = __stringify(_name), \
+ .mode = SYSFS_PREALLOC | VERIFY_OCTAL_PERMISSIONS(_mode) },\
+ .show = _show, \
+ .store = _store, \
+}
+
#define __ATTR_RO(_name) { \
.attr = { .name = __stringify(_name), .mode = S_IRUGO }, \
.show = _name##_show, \
diff --git a/include/linux/syslog.h b/include/linux/syslog.h
index 98a3153c0f96..4b7b875a7ce1 100644
--- a/include/linux/syslog.h
+++ b/include/linux/syslog.h
@@ -49,4 +49,13 @@
int do_syslog(int type, char __user *buf, int count, bool from_file);
+#ifdef CONFIG_PRINTK
+int check_syslog_permissions(int type, bool from_file);
+#else
+static inline int check_syslog_permissions(int type, bool from_file)
+{
+ return 0;
+}
+#endif
+
#endif /* _LINUX_SYSLOG_H */
diff --git a/include/linux/t10-pi.h b/include/linux/t10-pi.h
new file mode 100644
index 000000000000..6a8b9942632d
--- /dev/null
+++ b/include/linux/t10-pi.h
@@ -0,0 +1,22 @@
+#ifndef _LINUX_T10_PI_H
+#define _LINUX_T10_PI_H
+
+#include <linux/types.h>
+#include <linux/blkdev.h>
+
+/*
+ * T10 Protection Information tuple.
+ */
+struct t10_pi_tuple {
+ __be16 guard_tag; /* Checksum */
+ __be16 app_tag; /* Opaque storage */
+ __be32 ref_tag; /* Target LBA or indirect LBA */
+};
+
+
+extern struct blk_integrity t10_pi_type1_crc;
+extern struct blk_integrity t10_pi_type1_ip;
+extern struct blk_integrity t10_pi_type3_crc;
+extern struct blk_integrity t10_pi_type3_ip;
+
+#endif
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index fa5258f322e7..67309ece0772 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -19,7 +19,6 @@
#include <linux/skbuff.h>
-#include <linux/dmaengine.h>
#include <net/sock.h>
#include <net/inet_connection_sock.h>
#include <net/inet_timewait_sock.h>
@@ -131,7 +130,7 @@ struct tcp_sock {
/* inet_connection_sock has to be the first member of tcp_sock */
struct inet_connection_sock inet_conn;
u16 tcp_header_len; /* Bytes of tcp header to send */
- u16 xmit_size_goal_segs; /* Goal for segmenting output packets */
+ u16 gso_segs; /* Max number of segs per GSO packet */
/*
* Header prediction flags
@@ -163,16 +162,9 @@ struct tcp_sock {
struct {
struct sk_buff_head prequeue;
struct task_struct *task;
- struct iovec *iov;
+ struct msghdr *msg;
int memory;
int len;
-#ifdef CONFIG_NET_DMA
- /* members for async copy */
- struct dma_chan *dma_chan;
- int wakeup;
- struct dma_pinned_list *pinned_list;
- dma_cookie_t dma_cookie;
-#endif
} ucopy;
u32 snd_wl1; /* Sequence for window update */
@@ -212,10 +204,10 @@ struct tcp_sock {
u16 urg_data; /* Saved octet of OOB data and control flags */
u8 ecn_flags; /* ECN status bits. */
- u8 reordering; /* Packet reordering metric. */
+ u8 keepalive_probes; /* num of allowed keep alive probes */
+ u32 reordering; /* Packet reordering metric. */
u32 snd_up; /* Urgent pointer */
- u8 keepalive_probes; /* num of allowed keep alive probes */
/*
* Options received (usually on last packet, some only on SYN packets).
*/
@@ -276,7 +268,7 @@ struct tcp_sock {
u32 retrans_stamp; /* Timestamp of the last retransmit,
* also used in SYN-SENT to remember stamp of
* the first SYN. */
- u32 undo_marker; /* tracking retrans started here. */
+ u32 undo_marker; /* snd_una upon a new recovery episode. */
int undo_retrans; /* number of undoable retransmissions. */
u32 total_retrans; /* Total retransmits for entire connection */
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
index 0305cde21a74..fc52e307efab 100644
--- a/include/linux/thermal.h
+++ b/include/linux/thermal.h
@@ -29,26 +29,25 @@
#include <linux/idr.h>
#include <linux/device.h>
#include <linux/workqueue.h>
+#include <uapi/linux/thermal.h>
#define THERMAL_TRIPS_NONE -1
#define THERMAL_MAX_TRIPS 12
-#define THERMAL_NAME_LENGTH 20
/* invalid cooling state */
#define THERMAL_CSTATE_INVALID -1UL
/* No upper/lower limit requirement */
-#define THERMAL_NO_LIMIT THERMAL_CSTATE_INVALID
+#define THERMAL_NO_LIMIT ((u32)~0)
/* Unit conversion macros */
#define KELVIN_TO_CELSIUS(t) (long)(((long)t-2732 >= 0) ? \
((long)t-2732+5)/10 : ((long)t-2732-5)/10)
#define CELSIUS_TO_KELVIN(t) ((t)*10+2732)
-
-/* Adding event notification support elements */
-#define THERMAL_GENL_FAMILY_NAME "thermal_event"
-#define THERMAL_GENL_VERSION 0x01
-#define THERMAL_GENL_MCAST_GROUP_NAME "thermal_mc_grp"
+#define DECI_KELVIN_TO_MILLICELSIUS_WITH_OFFSET(t, off) (((t) - (off)) * 100)
+#define DECI_KELVIN_TO_MILLICELSIUS(t) DECI_KELVIN_TO_MILLICELSIUS_WITH_OFFSET(t, 2732)
+#define MILLICELSIUS_TO_DECI_KELVIN_WITH_OFFSET(t, off) (((t) / 100) + (off))
+#define MILLICELSIUS_TO_DECI_KELVIN(t) MILLICELSIUS_TO_DECI_KELVIN_WITH_OFFSET(t, 2732)
/* Default Thermal Governor */
#if defined(CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE)
@@ -82,30 +81,6 @@ enum thermal_trend {
THERMAL_TREND_DROP_FULL, /* apply lowest cooling action */
};
-/* Events supported by Thermal Netlink */
-enum events {
- THERMAL_AUX0,
- THERMAL_AUX1,
- THERMAL_CRITICAL,
- THERMAL_DEV_FAULT,
-};
-
-/* attributes of thermal_genl_family */
-enum {
- THERMAL_GENL_ATTR_UNSPEC,
- THERMAL_GENL_ATTR_EVENT,
- __THERMAL_GENL_ATTR_MAX,
-};
-#define THERMAL_GENL_ATTR_MAX (__THERMAL_GENL_ATTR_MAX - 1)
-
-/* commands supported by the thermal_genl_family */
-enum {
- THERMAL_GENL_CMD_UNSPEC,
- THERMAL_GENL_CMD_EVENT,
- __THERMAL_GENL_CMD_MAX,
-};
-#define THERMAL_GENL_CMD_MAX (__THERMAL_GENL_CMD_MAX - 1)
-
struct thermal_zone_device_ops {
int (*bind) (struct thermal_zone_device *,
struct thermal_cooling_device *);
@@ -285,19 +260,49 @@ struct thermal_genl_event {
enum events event;
};
+/**
+ * struct thermal_zone_of_device_ops - scallbacks for handling DT based zones
+ *
+ * Mandatory:
+ * @get_temp: a pointer to a function that reads the sensor temperature.
+ *
+ * Optional:
+ * @get_trend: a pointer to a function that reads the sensor temperature trend.
+ * @set_emul_temp: a pointer to a function that sets sensor emulated
+ * temperature.
+ */
+struct thermal_zone_of_device_ops {
+ int (*get_temp)(void *, long *);
+ int (*get_trend)(void *, long *);
+ int (*set_emul_temp)(void *, unsigned long);
+};
+
+/**
+ * struct thermal_trip - representation of a point in temperature domain
+ * @np: pointer to struct device_node that this trip point was created from
+ * @temperature: temperature value in miliCelsius
+ * @hysteresis: relative hysteresis in miliCelsius
+ * @type: trip point type
+ */
+
+struct thermal_trip {
+ struct device_node *np;
+ unsigned long int temperature;
+ unsigned long int hysteresis;
+ enum thermal_trip_type type;
+};
+
/* Function declarations */
#ifdef CONFIG_THERMAL_OF
struct thermal_zone_device *
-thermal_zone_of_sensor_register(struct device *dev, int id,
- void *data, int (*get_temp)(void *, long *),
- int (*get_trend)(void *, long *));
+thermal_zone_of_sensor_register(struct device *dev, int id, void *data,
+ const struct thermal_zone_of_device_ops *ops);
void thermal_zone_of_sensor_unregister(struct device *dev,
struct thermal_zone_device *tz);
#else
static inline struct thermal_zone_device *
-thermal_zone_of_sensor_register(struct device *dev, int id,
- void *data, int (*get_temp)(void *, long *),
- int (*get_trend)(void *, long *))
+thermal_zone_of_sensor_register(struct device *dev, int id, void *data,
+ const struct thermal_zone_of_device_ops *ops)
{
return NULL;
}
diff --git a/include/linux/ti_wilink_st.h b/include/linux/ti_wilink_st.h
index 932b76392248..884d6263e962 100644
--- a/include/linux/ti_wilink_st.h
+++ b/include/linux/ti_wilink_st.h
@@ -268,7 +268,7 @@ struct kim_data_s {
struct st_data_s *core_data;
struct chip_version version;
unsigned char ldisc_install;
- unsigned char dev_name[UART_DEV_NAME_LEN];
+ unsigned char dev_name[UART_DEV_NAME_LEN + 1];
unsigned char flow_cntrl;
unsigned long baud_rate;
};
diff --git a/include/linux/tick.h b/include/linux/tick.h
index 9a82c7dc3fdd..eda850ca757a 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -108,7 +108,7 @@ extern struct tick_sched *tick_get_tick_sched(int cpu);
extern void tick_irq_enter(void);
extern int tick_oneshot_mode_active(void);
# ifndef arch_needs_cpu
-# define arch_needs_cpu(cpu) (0)
+# define arch_needs_cpu() (0)
# endif
# else
static inline void tick_clock_notify(void) { }
@@ -181,14 +181,12 @@ static inline bool tick_nohz_full_cpu(int cpu)
return cpumask_test_cpu(cpu, tick_nohz_full_mask);
}
-extern void tick_nohz_init(void);
extern void __tick_nohz_full_check(void);
extern void tick_nohz_full_kick(void);
extern void tick_nohz_full_kick_cpu(int cpu);
extern void tick_nohz_full_kick_all(void);
extern void __tick_nohz_task_switch(struct task_struct *tsk);
#else
-static inline void tick_nohz_init(void) { }
static inline bool tick_nohz_full_enabled(void) { return false; }
static inline bool tick_nohz_full_cpu(int cpu) { return false; }
static inline void __tick_nohz_full_check(void) { }
diff --git a/include/linux/time.h b/include/linux/time.h
index 8c42cf8d2444..203c2ad40d71 100644
--- a/include/linux/time.h
+++ b/include/linux/time.h
@@ -39,9 +39,20 @@ static inline int timeval_compare(const struct timeval *lhs, const struct timeva
return lhs->tv_usec - rhs->tv_usec;
}
-extern unsigned long mktime(const unsigned int year, const unsigned int mon,
- const unsigned int day, const unsigned int hour,
- const unsigned int min, const unsigned int sec);
+extern time64_t mktime64(const unsigned int year, const unsigned int mon,
+ const unsigned int day, const unsigned int hour,
+ const unsigned int min, const unsigned int sec);
+
+/**
+ * Deprecated. Use mktime64().
+ */
+static inline unsigned long mktime(const unsigned int year,
+ const unsigned int mon, const unsigned int day,
+ const unsigned int hour, const unsigned int min,
+ const unsigned int sec)
+{
+ return mktime64(year, mon, day, hour, min, sec);
+}
extern void set_normalized_timespec(struct timespec *ts, time_t sec, s64 nsec);
diff --git a/include/linux/timekeeper_internal.h b/include/linux/timekeeper_internal.h
index 95640dcd1899..05af9a334893 100644
--- a/include/linux/timekeeper_internal.h
+++ b/include/linux/timekeeper_internal.h
@@ -42,6 +42,7 @@ struct tk_read_base {
* struct timekeeper - Structure holding internal timekeeping values.
* @tkr: The readout base structure
* @xtime_sec: Current CLOCK_REALTIME time in seconds
+ * @ktime_sec: Current CLOCK_MONOTONIC time in seconds
* @wall_to_monotonic: CLOCK_REALTIME to CLOCK_MONOTONIC offset
* @offs_real: Offset clock monotonic -> clock realtime
* @offs_boot: Offset clock monotonic -> clock boottime
@@ -77,6 +78,7 @@ struct tk_read_base {
struct timekeeper {
struct tk_read_base tkr;
u64 xtime_sec;
+ unsigned long ktime_sec;
struct timespec64 wall_to_monotonic;
ktime_t offs_real;
ktime_t offs_boot;
diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h
index 1caa6b04fdc5..9b63d13ba82b 100644
--- a/include/linux/timekeeping.h
+++ b/include/linux/timekeeping.h
@@ -10,7 +10,7 @@ extern int timekeeping_suspended;
* Get and set timeofday
*/
extern void do_gettimeofday(struct timeval *tv);
-extern int do_settimeofday(const struct timespec *tv);
+extern int do_settimeofday64(const struct timespec64 *ts);
extern int do_sys_settimeofday(const struct timespec *tv,
const struct timezone *tz);
@@ -25,14 +25,24 @@ struct timespec __current_kernel_time(void);
/*
* timespec based interfaces
*/
-struct timespec get_monotonic_coarse(void);
-extern void getrawmonotonic(struct timespec *ts);
+struct timespec64 get_monotonic_coarse64(void);
+extern void getrawmonotonic64(struct timespec64 *ts);
extern void ktime_get_ts64(struct timespec64 *ts);
+extern time64_t ktime_get_seconds(void);
+extern time64_t ktime_get_real_seconds(void);
extern int __getnstimeofday64(struct timespec64 *tv);
extern void getnstimeofday64(struct timespec64 *tv);
#if BITS_PER_LONG == 64
+/**
+ * Deprecated. Use do_settimeofday64().
+ */
+static inline int do_settimeofday(const struct timespec *ts)
+{
+ return do_settimeofday64(ts);
+}
+
static inline int __getnstimeofday(struct timespec *ts)
{
return __getnstimeofday64(ts);
@@ -53,7 +63,27 @@ static inline void ktime_get_real_ts(struct timespec *ts)
getnstimeofday64(ts);
}
+static inline void getrawmonotonic(struct timespec *ts)
+{
+ getrawmonotonic64(ts);
+}
+
+static inline struct timespec get_monotonic_coarse(void)
+{
+ return get_monotonic_coarse64();
+}
#else
+/**
+ * Deprecated. Use do_settimeofday64().
+ */
+static inline int do_settimeofday(const struct timespec *ts)
+{
+ struct timespec64 ts64;
+
+ ts64 = timespec_to_timespec64(*ts);
+ return do_settimeofday64(&ts64);
+}
+
static inline int __getnstimeofday(struct timespec *ts)
{
struct timespec64 ts64;
@@ -86,6 +116,19 @@ static inline void ktime_get_real_ts(struct timespec *ts)
getnstimeofday64(&ts64);
*ts = timespec64_to_timespec(ts64);
}
+
+static inline void getrawmonotonic(struct timespec *ts)
+{
+ struct timespec64 ts64;
+
+ getrawmonotonic64(&ts64);
+ *ts = timespec64_to_timespec(ts64);
+}
+
+static inline struct timespec get_monotonic_coarse(void)
+{
+ return timespec64_to_timespec(get_monotonic_coarse64());
+}
#endif
extern void getboottime(struct timespec *ts);
@@ -182,7 +225,7 @@ static inline void timekeeping_clocktai(struct timespec *ts)
/*
* RTC specific
*/
-extern void timekeeping_inject_sleeptime(struct timespec *delta);
+extern void timekeeping_inject_sleeptime64(struct timespec64 *delta);
/*
* PPS accessor
diff --git a/include/linux/topology.h b/include/linux/topology.h
index dda6ee521e74..909b6e43b694 100644
--- a/include/linux/topology.h
+++ b/include/linux/topology.h
@@ -119,11 +119,20 @@ static inline int numa_node_id(void)
* Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem().
*/
DECLARE_PER_CPU(int, _numa_mem_);
+extern int _node_numa_mem_[MAX_NUMNODES];
#ifndef set_numa_mem
static inline void set_numa_mem(int node)
{
this_cpu_write(_numa_mem_, node);
+ _node_numa_mem_[numa_node_id()] = node;
+}
+#endif
+
+#ifndef node_to_mem_node
+static inline int node_to_mem_node(int node)
+{
+ return _node_numa_mem_[node];
}
#endif
@@ -146,6 +155,7 @@ static inline int cpu_to_mem(int cpu)
static inline void set_cpu_numa_mem(int cpu, int node)
{
per_cpu(_numa_mem_, cpu) = node;
+ _node_numa_mem_[cpu_to_node(cpu)] = node;
}
#endif
@@ -159,6 +169,13 @@ static inline int numa_mem_id(void)
}
#endif
+#ifndef node_to_mem_node
+static inline int node_to_mem_node(int node)
+{
+ return node;
+}
+#endif
+
#ifndef cpu_to_mem
static inline int cpu_to_mem(int cpu)
{
diff --git a/include/linux/torture.h b/include/linux/torture.h
index 5ca58fcbaf1b..7759fc3c622d 100644
--- a/include/linux/torture.h
+++ b/include/linux/torture.h
@@ -51,7 +51,7 @@
/* Definitions for online/offline exerciser. */
int torture_onoff_init(long ooholdoff, long oointerval);
-char *torture_onoff_stats(char *page);
+void torture_onoff_stats(void);
bool torture_onoff_failures(void);
/* Low-rider random number generator. */
@@ -77,7 +77,8 @@ int torture_stutter_init(int s);
/* Initialization and cleanup. */
bool torture_init_begin(char *ttype, bool v, int *runnable);
void torture_init_end(void);
-bool torture_cleanup(void);
+bool torture_cleanup_begin(void);
+void torture_cleanup_end(void);
bool torture_must_stop(void);
bool torture_must_stop_irq(void);
void torture_kthread_stopping(char *title);
diff --git a/include/linux/trace_seq.h b/include/linux/trace_seq.h
index ea6c9dea79e3..cfaf5a1d4bad 100644
--- a/include/linux/trace_seq.h
+++ b/include/linux/trace_seq.h
@@ -1,7 +1,7 @@
#ifndef _LINUX_TRACE_SEQ_H
#define _LINUX_TRACE_SEQ_H
-#include <linux/fs.h>
+#include <linux/seq_buf.h>
#include <asm/page.h>
@@ -12,20 +12,36 @@
struct trace_seq {
unsigned char buffer[PAGE_SIZE];
- unsigned int len;
- unsigned int readpos;
+ struct seq_buf seq;
int full;
};
static inline void
trace_seq_init(struct trace_seq *s)
{
- s->len = 0;
- s->readpos = 0;
+ seq_buf_init(&s->seq, s->buffer, PAGE_SIZE);
s->full = 0;
}
/**
+ * trace_seq_used - amount of actual data written to buffer
+ * @s: trace sequence descriptor
+ *
+ * Returns the amount of data written to the buffer.
+ *
+ * IMPORTANT!
+ *
+ * Use this instead of @s->seq.len if you need to pass the amount
+ * of data from the buffer to another buffer (userspace, or what not).
+ * The @s->seq.len on overflow is bigger than the buffer size and
+ * using it can cause access to undefined memory.
+ */
+static inline int trace_seq_used(struct trace_seq *s)
+{
+ return seq_buf_used(&s->seq);
+}
+
+/**
* trace_seq_buffer_ptr - return pointer to next location in buffer
* @s: trace sequence descriptor
*
@@ -37,7 +53,19 @@ trace_seq_init(struct trace_seq *s)
static inline unsigned char *
trace_seq_buffer_ptr(struct trace_seq *s)
{
- return s->buffer + s->len;
+ return s->buffer + seq_buf_used(&s->seq);
+}
+
+/**
+ * trace_seq_has_overflowed - return true if the trace_seq took too much
+ * @s: trace sequence descriptor
+ *
+ * Returns true if too much data was added to the trace_seq and it is
+ * now full and will not take anymore.
+ */
+static inline bool trace_seq_has_overflowed(struct trace_seq *s)
+{
+ return s->full || seq_buf_has_overflowed(&s->seq);
}
/*
@@ -45,40 +73,37 @@ trace_seq_buffer_ptr(struct trace_seq *s)
*/
#ifdef CONFIG_TRACING
extern __printf(2, 3)
-int trace_seq_printf(struct trace_seq *s, const char *fmt, ...);
+void trace_seq_printf(struct trace_seq *s, const char *fmt, ...);
extern __printf(2, 0)
-int trace_seq_vprintf(struct trace_seq *s, const char *fmt, va_list args);
-extern int
+void trace_seq_vprintf(struct trace_seq *s, const char *fmt, va_list args);
+extern void
trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary);
extern int trace_print_seq(struct seq_file *m, struct trace_seq *s);
extern int trace_seq_to_user(struct trace_seq *s, char __user *ubuf,
int cnt);
-extern int trace_seq_puts(struct trace_seq *s, const char *str);
-extern int trace_seq_putc(struct trace_seq *s, unsigned char c);
-extern int trace_seq_putmem(struct trace_seq *s, const void *mem, unsigned int len);
-extern int trace_seq_putmem_hex(struct trace_seq *s, const void *mem,
+extern void trace_seq_puts(struct trace_seq *s, const char *str);
+extern void trace_seq_putc(struct trace_seq *s, unsigned char c);
+extern void trace_seq_putmem(struct trace_seq *s, const void *mem, unsigned int len);
+extern void trace_seq_putmem_hex(struct trace_seq *s, const void *mem,
unsigned int len);
extern int trace_seq_path(struct trace_seq *s, const struct path *path);
-extern int trace_seq_bitmask(struct trace_seq *s, const unsigned long *maskp,
+extern void trace_seq_bitmask(struct trace_seq *s, const unsigned long *maskp,
int nmaskbits);
#else /* CONFIG_TRACING */
-static inline int trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
+static inline void trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
{
- return 0;
}
-static inline int
+static inline void
trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary)
{
- return 0;
}
-static inline int
+static inline void
trace_seq_bitmask(struct trace_seq *s, const unsigned long *maskp,
int nmaskbits)
{
- return 0;
}
static inline int trace_print_seq(struct seq_file *m, struct trace_seq *s)
@@ -90,23 +115,19 @@ static inline int trace_seq_to_user(struct trace_seq *s, char __user *ubuf,
{
return 0;
}
-static inline int trace_seq_puts(struct trace_seq *s, const char *str)
+static inline void trace_seq_puts(struct trace_seq *s, const char *str)
{
- return 0;
}
-static inline int trace_seq_putc(struct trace_seq *s, unsigned char c)
+static inline void trace_seq_putc(struct trace_seq *s, unsigned char c)
{
- return 0;
}
-static inline int
+static inline void
trace_seq_putmem(struct trace_seq *s, const void *mem, unsigned int len)
{
- return 0;
}
-static inline int trace_seq_putmem_hex(struct trace_seq *s, const void *mem,
+static inline void trace_seq_putmem_hex(struct trace_seq *s, const void *mem,
unsigned int len)
{
- return 0;
}
static inline int trace_seq_path(struct trace_seq *s, const struct path *path)
{
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index b1293f15f592..e08e21e5f601 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -157,6 +157,12 @@ extern void syscall_unregfunc(void);
* Make sure the alignment of the structure in the __tracepoints section will
* not add unwanted padding between the beginning of the section and the
* structure. Force alignment to the same alignment as the section start.
+ *
+ * When lockdep is enabled, we make sure to always do the RCU portions of
+ * the tracepoint code, regardless of whether tracing is on or we match the
+ * condition. This lets us find RCU issues triggered with tracepoints even
+ * when this tracepoint is off. This code has no purpose other than poking
+ * RCU a bit.
*/
#define __DECLARE_TRACE(name, proto, args, cond, data_proto, data_args) \
extern struct tracepoint __tracepoint_##name; \
@@ -167,6 +173,11 @@ extern void syscall_unregfunc(void);
TP_PROTO(data_proto), \
TP_ARGS(data_args), \
TP_CONDITION(cond),,); \
+ if (IS_ENABLED(CONFIG_LOCKDEP)) { \
+ rcu_read_lock_sched_notrace(); \
+ rcu_dereference_sched(__tracepoint_##name.funcs);\
+ rcu_read_unlock_sched_notrace(); \
+ } \
} \
__DECLARE_TRACE_RCU(name, PARAMS(proto), PARAMS(args), \
PARAMS(cond), PARAMS(data_proto), PARAMS(data_args)) \
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 84132942902a..7d66ae508e5c 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -252,6 +252,7 @@ struct tty_struct {
struct rw_semaphore termios_rwsem;
struct mutex winsize_mutex;
spinlock_t ctrl_lock;
+ spinlock_t flow_lock;
/* Termios values are protected by the termios rwsem */
struct ktermios termios, termios_locked;
struct termiox *termiox; /* May be NULL for unsupported */
@@ -261,8 +262,13 @@ struct tty_struct {
unsigned long flags;
int count;
struct winsize winsize; /* winsize_mutex */
- unsigned char stopped:1, hw_stopped:1, flow_stopped:1, packet:1;
- unsigned char ctrl_status; /* ctrl_lock */
+ unsigned long stopped:1, /* flow_lock */
+ flow_stopped:1,
+ unused:BITS_PER_LONG - 2;
+ int hw_stopped;
+ unsigned long ctrl_status:8, /* ctrl_lock */
+ packet:1,
+ unused_ctrl:BITS_PER_LONG - 9;
unsigned int receive_room; /* Bytes free for queue */
int flow_change;
@@ -278,7 +284,7 @@ struct tty_struct {
#define N_TTY_BUF_SIZE 4096
- unsigned char closing:1;
+ int closing;
unsigned char *write_buf;
int write_cnt;
/* If the tty has a pending do_SAK, queue it here - akpm */
@@ -310,12 +316,10 @@ struct tty_file_private {
#define TTY_EXCLUSIVE 3 /* Exclusive open mode */
#define TTY_DEBUG 4 /* Debugging */
#define TTY_DO_WRITE_WAKEUP 5 /* Call write_wakeup after queuing new */
-#define TTY_CLOSING 7 /* ->close() in progress */
#define TTY_LDISC_OPEN 11 /* Line discipline is open */
#define TTY_PTY_LOCK 16 /* pty private */
#define TTY_NO_WRITE_SPLIT 17 /* Preserve write boundaries to driver */
#define TTY_HUPPED 18 /* Post driver->hangup() */
-#define TTY_HUPPING 21 /* ->hangup() in progress */
#define TTY_LDISC_HALTED 22 /* Line discipline is halted */
#define TTY_WRITE_FLUSH(tty) tty_write_flush((tty))
@@ -397,7 +401,9 @@ extern int tty_paranoia_check(struct tty_struct *tty, struct inode *inode,
extern char *tty_name(struct tty_struct *tty, char *buf);
extern void tty_wait_until_sent(struct tty_struct *tty, long timeout);
extern int tty_check_change(struct tty_struct *tty);
+extern void __stop_tty(struct tty_struct *tty);
extern void stop_tty(struct tty_struct *tty);
+extern void __start_tty(struct tty_struct *tty);
extern void start_tty(struct tty_struct *tty);
extern int tty_register_driver(struct tty_driver *driver);
extern int tty_unregister_driver(struct tty_driver *driver);
@@ -411,6 +417,7 @@ extern void tty_unregister_device(struct tty_driver *driver, unsigned index);
extern int tty_read_raw_data(struct tty_struct *tty, unsigned char *bufp,
int buflen);
extern void tty_write_message(struct tty_struct *tty, char *msg);
+extern int tty_send_xchar(struct tty_struct *tty, char ch);
extern int tty_put_char(struct tty_struct *tty, unsigned char c);
extern int tty_chars_in_buffer(struct tty_struct *tty);
extern int tty_write_room(struct tty_struct *tty);
@@ -428,14 +435,13 @@ extern int is_ignored(int sig);
extern int tty_signal(int sig, struct tty_struct *tty);
extern void tty_hangup(struct tty_struct *tty);
extern void tty_vhangup(struct tty_struct *tty);
-extern void tty_unhangup(struct file *filp);
extern int tty_hung_up_p(struct file *filp);
extern void do_SAK(struct tty_struct *tty);
extern void __do_SAK(struct tty_struct *tty);
extern void no_tty(void);
extern void tty_flush_to_ldisc(struct tty_struct *tty);
extern void tty_buffer_free_all(struct tty_port *port);
-extern void tty_buffer_flush(struct tty_struct *tty);
+extern void tty_buffer_flush(struct tty_struct *tty, struct tty_ldisc *ld);
extern void tty_buffer_init(struct tty_port *port);
extern speed_t tty_termios_baud_rate(struct ktermios *termios);
extern speed_t tty_termios_input_baud_rate(struct ktermios *termios);
@@ -489,14 +495,9 @@ extern int tty_init_termios(struct tty_struct *tty);
extern int tty_standard_install(struct tty_driver *driver,
struct tty_struct *tty);
-extern struct tty_struct *tty_pair_get_tty(struct tty_struct *tty);
-extern struct tty_struct *tty_pair_get_pty(struct tty_struct *tty);
-
extern struct mutex tty_mutex;
extern spinlock_t tty_files_lock;
-extern void tty_write_unlock(struct tty_struct *tty);
-extern int tty_write_lock(struct tty_struct *tty, int ndelay);
#define tty_is_writelocked(tty) (mutex_is_locked(&tty->atomic_write_lock))
extern void tty_port_init(struct tty_port *port);
@@ -555,7 +556,7 @@ extern int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc);
extern int tty_unregister_ldisc(int disc);
extern int tty_set_ldisc(struct tty_struct *tty, int ldisc);
extern int tty_ldisc_setup(struct tty_struct *tty, struct tty_struct *o_tty);
-extern void tty_ldisc_release(struct tty_struct *tty, struct tty_struct *o_tty);
+extern void tty_ldisc_release(struct tty_struct *tty);
extern void tty_ldisc_init(struct tty_struct *tty);
extern void tty_ldisc_deinit(struct tty_struct *tty);
extern void tty_ldisc_begin(void);
@@ -616,14 +617,6 @@ extern int n_tty_ioctl_helper(struct tty_struct *tty, struct file *file,
extern long n_tty_compat_ioctl_helper(struct tty_struct *tty, struct file *file,
unsigned int cmd, unsigned long arg);
-/* serial.c */
-
-extern void serial_console_init(void);
-
-/* pcxx.c */
-
-extern int pcxe_open(struct tty_struct *tty, struct file *filp);
-
/* vt.c */
extern int vt_ioctl(struct tty_struct *tty,
@@ -636,11 +629,9 @@ extern long vt_compat_ioctl(struct tty_struct *tty,
/* functions for preparation of BKL removal */
extern void __lockfunc tty_lock(struct tty_struct *tty);
extern void __lockfunc tty_unlock(struct tty_struct *tty);
-extern void __lockfunc tty_lock_pair(struct tty_struct *tty,
- struct tty_struct *tty2);
-extern void __lockfunc tty_unlock_pair(struct tty_struct *tty,
- struct tty_struct *tty2);
-
+extern void __lockfunc tty_lock_slave(struct tty_struct *tty);
+extern void __lockfunc tty_unlock_slave(struct tty_struct *tty);
+extern void tty_set_lock_subclass(struct tty_struct *tty);
/*
* this shall be called only from where BTM is held (like close)
*
diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
index e48c608a8fa8..92e337c18839 100644
--- a/include/linux/tty_driver.h
+++ b/include/linux/tty_driver.h
@@ -152,6 +152,8 @@
* This routine notifies the tty driver that it should stop
* outputting characters to the tty device.
*
+ * Called with ->flow_lock held. Serialized with start() method.
+ *
* Optional:
*
* Note: Call stop_tty not this method.
@@ -161,6 +163,8 @@
* This routine notifies the tty driver that it resume sending
* characters to the tty device.
*
+ * Called with ->flow_lock held. Serialized with stop() method.
+ *
* Optional:
*
* Note: Call start_tty not this method.
diff --git a/include/linux/udp.h b/include/linux/udp.h
index 247cfdcc4b08..ee3277593222 100644
--- a/include/linux/udp.h
+++ b/include/linux/udp.h
@@ -49,7 +49,11 @@ struct udp_sock {
unsigned int corkflag; /* Cork is required */
__u8 encap_type; /* Is this an Encapsulation socket? */
unsigned char no_check6_tx:1,/* Send zero UDP6 checksums on TX? */
- no_check6_rx:1;/* Allow zero UDP6 checksums on RX? */
+ no_check6_rx:1,/* Allow zero UDP6 checksums on RX? */
+ convert_csum:1;/* On receive, convert checksum
+ * unnecessary to checksum complete
+ * if possible.
+ */
/*
* Following member retains the information to create a UDP header
* when the socket is uncorked.
@@ -98,6 +102,16 @@ static inline bool udp_get_no_check6_rx(struct sock *sk)
return udp_sk(sk)->no_check6_rx;
}
+static inline void udp_set_convert_csum(struct sock *sk, bool val)
+{
+ udp_sk(sk)->convert_csum = val;
+}
+
+static inline bool udp_get_convert_csum(struct sock *sk)
+{
+ return udp_sk(sk)->convert_csum;
+}
+
#define udp_portaddr_for_each_entry(__sk, node, list) \
hlist_nulls_for_each_entry(__sk, node, list, __sk_common.skc_portaddr_node)
diff --git a/include/linux/uio.h b/include/linux/uio.h
index 290fbf0b6b8a..1c5e453f7ea9 100644
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@ -31,6 +31,7 @@ struct iov_iter {
size_t count;
union {
const struct iovec *iov;
+ const struct kvec *kvec;
const struct bio_vec *bvec;
};
unsigned long nr_segs;
@@ -80,9 +81,15 @@ size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
struct iov_iter *i);
size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
struct iov_iter *i);
+size_t copy_to_iter(void *addr, size_t bytes, struct iov_iter *i);
+size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i);
+size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i);
+size_t iov_iter_zero(size_t bytes, struct iov_iter *);
unsigned long iov_iter_alignment(const struct iov_iter *i);
void iov_iter_init(struct iov_iter *i, int direction, const struct iovec *iov,
unsigned long nr_segs, size_t count);
+void iov_iter_kvec(struct iov_iter *i, int direction, const struct kvec *iov,
+ unsigned long nr_segs, size_t count);
ssize_t iov_iter_get_pages(struct iov_iter *i, struct page **pages,
size_t maxsize, unsigned maxpages, size_t *start);
ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, struct page ***pages,
@@ -94,6 +101,11 @@ static inline size_t iov_iter_count(struct iov_iter *i)
return i->count;
}
+static inline bool iter_is_iovec(struct iov_iter *i)
+{
+ return !(i->type & (ITER_BVEC | ITER_KVEC));
+}
+
/*
* Cap the iov_iter by given limit; note that the second argument is
* *not* the new size - it's upper limit for such. Passing it a value
@@ -120,9 +132,10 @@ static inline void iov_iter_reexpand(struct iov_iter *i, size_t count)
{
i->count = count;
}
+size_t csum_and_copy_to_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
+size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
int memcpy_fromiovec(unsigned char *kdata, struct iovec *iov, int len);
-int memcpy_toiovec(struct iovec *iov, unsigned char *kdata, int len);
int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov,
int offset, int len);
int memcpy_toiovecend(const struct iovec *v, unsigned char *kdata,
diff --git a/include/linux/uio_driver.h b/include/linux/uio_driver.h
index 1ad4724458de..32c0e83d6239 100644
--- a/include/linux/uio_driver.h
+++ b/include/linux/uio_driver.h
@@ -35,7 +35,7 @@ struct uio_map;
struct uio_mem {
const char *name;
phys_addr_t addr;
- unsigned long size;
+ resource_size_t size;
int memtype;
void __iomem *internal_addr;
struct uio_map *map;
@@ -63,7 +63,17 @@ struct uio_port {
#define MAX_UIO_PORT_REGIONS 5
-struct uio_device;
+struct uio_device {
+ struct module *owner;
+ struct device *dev;
+ int minor;
+ atomic_t event;
+ struct fasync_struct *async_queue;
+ wait_queue_head_t wait;
+ struct uio_info *info;
+ struct kobject *map_dir;
+ struct kobject *portio_dir;
+};
/**
* struct uio_info - UIO device capabilities
diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h
index 4f844c6b03ee..60beb5dc7977 100644
--- a/include/linux/uprobes.h
+++ b/include/linux/uprobes.h
@@ -98,11 +98,11 @@ struct uprobes_state {
struct xol_area *xol_area;
};
-extern int __weak set_swbp(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long vaddr);
-extern int __weak set_orig_insn(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long vaddr);
-extern bool __weak is_swbp_insn(uprobe_opcode_t *insn);
-extern bool __weak is_trap_insn(uprobe_opcode_t *insn);
-extern unsigned long __weak uprobe_get_swbp_addr(struct pt_regs *regs);
+extern int set_swbp(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long vaddr);
+extern int set_orig_insn(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long vaddr);
+extern bool is_swbp_insn(uprobe_opcode_t *insn);
+extern bool is_trap_insn(uprobe_opcode_t *insn);
+extern unsigned long uprobe_get_swbp_addr(struct pt_regs *regs);
extern unsigned long uprobe_get_trap_addr(struct pt_regs *regs);
extern int uprobe_write_opcode(struct mm_struct *mm, unsigned long vaddr, uprobe_opcode_t);
extern int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc);
@@ -128,8 +128,8 @@ extern bool arch_uprobe_xol_was_trapped(struct task_struct *tsk);
extern int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val, void *data);
extern void arch_uprobe_abort_xol(struct arch_uprobe *aup, struct pt_regs *regs);
extern unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs *regs);
-extern bool __weak arch_uprobe_ignore(struct arch_uprobe *aup, struct pt_regs *regs);
-extern void __weak arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
+extern bool arch_uprobe_ignore(struct arch_uprobe *aup, struct pt_regs *regs);
+extern void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
void *src, unsigned long len);
#else /* !CONFIG_UPROBES */
struct uprobes_state {
diff --git a/include/linux/usb.h b/include/linux/usb.h
index d2465bc0e73c..f89c24a03bd9 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -637,7 +637,7 @@ static inline bool usb_acpi_power_manageable(struct usb_device *hdev, int index)
#endif
/* USB autosuspend and autoresume */
-#ifdef CONFIG_PM_RUNTIME
+#ifdef CONFIG_PM
extern void usb_enable_autosuspend(struct usb_device *udev);
extern void usb_disable_autosuspend(struct usb_device *udev);
@@ -1862,6 +1862,18 @@ extern void usb_unregister_notify(struct notifier_block *nb);
/* debugfs stuff */
extern struct dentry *usb_debug_root;
+/* LED triggers */
+enum usb_led_event {
+ USB_LED_EVENT_HOST = 0,
+ USB_LED_EVENT_GADGET = 1,
+};
+
+#ifdef CONFIG_USB_LED_TRIG
+extern void usb_led_activity(enum usb_led_event ev);
+#else
+static inline void usb_led_activity(enum usb_led_event ev) {}
+#endif
+
#endif /* __KERNEL__ */
#endif
diff --git a/include/linux/usb/chipidea.h b/include/linux/usb/chipidea.h
index bbe779f640be..535997a6681b 100644
--- a/include/linux/usb/chipidea.h
+++ b/include/linux/usb/chipidea.h
@@ -13,11 +13,12 @@ struct ci_hdrc_platform_data {
/* offset of the capability registers */
uintptr_t capoffset;
unsigned power_budget;
- struct usb_phy *phy;
+ struct phy *phy;
+ /* old usb_phy interface */
+ struct usb_phy *usb_phy;
enum usb_phy_interface phy_mode;
unsigned long flags;
#define CI_HDRC_REGS_SHARED BIT(0)
-#define CI_HDRC_REQUIRE_TRANSCEIVER BIT(1)
#define CI_HDRC_DISABLE_STREAMING BIT(3)
/*
* Only set it when DCCPARAMS.DC==1 and DCCPARAMS.HC==1,
@@ -31,6 +32,7 @@ struct ci_hdrc_platform_data {
#define CI_HDRC_CONTROLLER_STOPPED_EVENT 1
void (*notify_event) (struct ci_hdrc *ci, unsigned event);
struct regulator *reg_vbus;
+ bool tpl_support;
};
/* Default offset of capability registers */
diff --git a/include/linux/usb/composite.h b/include/linux/usb/composite.h
index c330f5ef42cf..3d87defcc527 100644
--- a/include/linux/usb/composite.h
+++ b/include/linux/usb/composite.h
@@ -427,6 +427,8 @@ static inline struct usb_composite_driver *to_cdriver(
* @b_vendor_code: bMS_VendorCode part of the OS string
* @use_os_string: false by default, interested gadgets set it
* @os_desc_config: the configuration to be used with OS descriptors
+ * @setup_pending: true when setup request is queued but not completed
+ * @os_desc_pending: true when os_desc request is queued but not completed
*
* One of these devices is allocated and initialized before the
* associated device driver's bind() is called.
@@ -488,6 +490,9 @@ struct usb_composite_dev {
/* protects deactivations and delayed_status counts*/
spinlock_t lock;
+
+ unsigned setup_pending:1;
+ unsigned os_desc_pending:1;
};
extern int usb_string_id(struct usb_composite_dev *c);
@@ -501,6 +506,8 @@ extern int usb_string_ids_n(struct usb_composite_dev *c, unsigned n);
extern void composite_disconnect(struct usb_gadget *gadget);
extern int composite_setup(struct usb_gadget *gadget,
const struct usb_ctrlrequest *ctrl);
+extern void composite_suspend(struct usb_gadget *gadget);
+extern void composite_resume(struct usb_gadget *gadget);
/*
* Some systems will need runtime overrides for the product identifiers
diff --git a/include/linux/usb/ehci-dbgp.h b/include/linux/usb/ehci-dbgp.h
new file mode 100644
index 000000000000..7344d9e591cc
--- /dev/null
+++ b/include/linux/usb/ehci-dbgp.h
@@ -0,0 +1,83 @@
+/*
+ * Standalone EHCI usb debug driver
+ *
+ * Originally written by:
+ * Eric W. Biederman" <ebiederm@xmission.com> and
+ * Yinghai Lu <yhlu.kernel@gmail.com>
+ *
+ * Changes for early/late printk and HW errata:
+ * Jason Wessel <jason.wessel@windriver.com>
+ * Copyright (C) 2009 Wind River Systems, Inc.
+ *
+ */
+
+#ifndef __LINUX_USB_EHCI_DBGP_H
+#define __LINUX_USB_EHCI_DBGP_H
+
+#include <linux/console.h>
+#include <linux/types.h>
+
+/* Appendix C, Debug port ... intended for use with special "debug devices"
+ * that can help if there's no serial console. (nonstandard enumeration.)
+ */
+struct ehci_dbg_port {
+ u32 control;
+#define DBGP_OWNER (1<<30)
+#define DBGP_ENABLED (1<<28)
+#define DBGP_DONE (1<<16)
+#define DBGP_INUSE (1<<10)
+#define DBGP_ERRCODE(x) (((x)>>7)&0x07)
+# define DBGP_ERR_BAD 1
+# define DBGP_ERR_SIGNAL 2
+#define DBGP_ERROR (1<<6)
+#define DBGP_GO (1<<5)
+#define DBGP_OUT (1<<4)
+#define DBGP_LEN(x) (((x)>>0)&0x0f)
+ u32 pids;
+#define DBGP_PID_GET(x) (((x)>>16)&0xff)
+#define DBGP_PID_SET(data, tok) (((data)<<8)|(tok))
+ u32 data03;
+ u32 data47;
+ u32 address;
+#define DBGP_EPADDR(dev, ep) (((dev)<<8)|(ep))
+};
+
+#ifdef CONFIG_EARLY_PRINTK_DBGP
+extern int early_dbgp_init(char *s);
+extern struct console early_dbgp_console;
+#endif /* CONFIG_EARLY_PRINTK_DBGP */
+
+struct usb_hcd;
+
+#ifdef CONFIG_XEN_DOM0
+extern int xen_dbgp_reset_prep(struct usb_hcd *);
+extern int xen_dbgp_external_startup(struct usb_hcd *);
+#else
+static inline int xen_dbgp_reset_prep(struct usb_hcd *hcd)
+{
+ return 1; /* Shouldn't this be 0? */
+}
+
+static inline int xen_dbgp_external_startup(struct usb_hcd *hcd)
+{
+ return -1;
+}
+#endif
+
+#ifdef CONFIG_EARLY_PRINTK_DBGP
+/* Call backs from ehci host driver to ehci debug driver */
+extern int dbgp_external_startup(struct usb_hcd *);
+extern int dbgp_reset_prep(struct usb_hcd *);
+#else
+static inline int dbgp_reset_prep(struct usb_hcd *hcd)
+{
+ return xen_dbgp_reset_prep(hcd);
+}
+
+static inline int dbgp_external_startup(struct usb_hcd *hcd)
+{
+ return xen_dbgp_external_startup(hcd);
+}
+#endif
+
+#endif /* __LINUX_USB_EHCI_DBGP_H */
diff --git a/include/linux/usb/ehci_def.h b/include/linux/usb/ehci_def.h
index daec99af5d54..966889a20ea3 100644
--- a/include/linux/usb/ehci_def.h
+++ b/include/linux/usb/ehci_def.h
@@ -19,6 +19,8 @@
#ifndef __LINUX_USB_EHCI_DEF_H
#define __LINUX_USB_EHCI_DEF_H
+#include <linux/usb/ehci-dbgp.h>
+
/* EHCI register interface, corresponds to EHCI Revision 0.95 specification */
/* Section 2.2 Host Controller Capability Registers */
@@ -190,67 +192,4 @@ struct ehci_regs {
#define USBMODE_EX_HC (3<<0) /* host controller mode */
};
-/* Appendix C, Debug port ... intended for use with special "debug devices"
- * that can help if there's no serial console. (nonstandard enumeration.)
- */
-struct ehci_dbg_port {
- u32 control;
-#define DBGP_OWNER (1<<30)
-#define DBGP_ENABLED (1<<28)
-#define DBGP_DONE (1<<16)
-#define DBGP_INUSE (1<<10)
-#define DBGP_ERRCODE(x) (((x)>>7)&0x07)
-# define DBGP_ERR_BAD 1
-# define DBGP_ERR_SIGNAL 2
-#define DBGP_ERROR (1<<6)
-#define DBGP_GO (1<<5)
-#define DBGP_OUT (1<<4)
-#define DBGP_LEN(x) (((x)>>0)&0x0f)
- u32 pids;
-#define DBGP_PID_GET(x) (((x)>>16)&0xff)
-#define DBGP_PID_SET(data, tok) (((data)<<8)|(tok))
- u32 data03;
- u32 data47;
- u32 address;
-#define DBGP_EPADDR(dev, ep) (((dev)<<8)|(ep))
-};
-
-#ifdef CONFIG_EARLY_PRINTK_DBGP
-#include <linux/init.h>
-extern int __init early_dbgp_init(char *s);
-extern struct console early_dbgp_console;
-#endif /* CONFIG_EARLY_PRINTK_DBGP */
-
-struct usb_hcd;
-
-#ifdef CONFIG_XEN_DOM0
-extern int xen_dbgp_reset_prep(struct usb_hcd *);
-extern int xen_dbgp_external_startup(struct usb_hcd *);
-#else
-static inline int xen_dbgp_reset_prep(struct usb_hcd *hcd)
-{
- return 1; /* Shouldn't this be 0? */
-}
-
-static inline int xen_dbgp_external_startup(struct usb_hcd *hcd)
-{
- return -1;
-}
-#endif
-
-#ifdef CONFIG_EARLY_PRINTK_DBGP
-/* Call backs from ehci host driver to ehci debug driver */
-extern int dbgp_external_startup(struct usb_hcd *);
-extern int dbgp_reset_prep(struct usb_hcd *hcd);
-#else
-static inline int dbgp_reset_prep(struct usb_hcd *hcd)
-{
- return xen_dbgp_reset_prep(hcd);
-}
-static inline int dbgp_external_startup(struct usb_hcd *hcd)
-{
- return xen_dbgp_external_startup(hcd);
-}
-#endif
-
#endif /* __LINUX_USB_EHCI_DEF_H */
diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h
index c3a61853cd13..70ddb3943b62 100644
--- a/include/linux/usb/gadget.h
+++ b/include/linux/usb/gadget.h
@@ -345,12 +345,13 @@ static inline int usb_ep_queue(struct usb_ep *ep,
* @ep:the endpoint associated with the request
* @req:the request being canceled
*
- * if the request is still active on the endpoint, it is dequeued and its
+ * If the request is still active on the endpoint, it is dequeued and its
* completion routine is called (with status -ECONNRESET); else a negative
- * error code is returned.
+ * error code is returned. This is guaranteed to happen before the call to
+ * usb_ep_dequeue() returns.
*
- * note that some hardware can't clear out write fifos (to unlink the request
- * at the head of the queue) except as part of disconnecting from usb. such
+ * Note that some hardware can't clear out write fifos (to unlink the request
+ * at the head of the queue) except as part of disconnecting from usb. Such
* restrictions prevent drivers from supporting configuration changes,
* even to configuration zero (a "chapter 9" requirement).
*/
@@ -489,8 +490,7 @@ struct usb_gadget_ops {
void (*get_config_params)(struct usb_dcd_config_params *);
int (*udc_start)(struct usb_gadget *,
struct usb_gadget_driver *);
- int (*udc_stop)(struct usb_gadget *,
- struct usb_gadget_driver *);
+ int (*udc_stop)(struct usb_gadget *);
};
/**
@@ -816,6 +816,8 @@ static inline int usb_gadget_disconnect(struct usb_gadget *gadget)
* Called in a context that permits sleeping.
* @suspend: Invoked on USB suspend. May be called in_interrupt.
* @resume: Invoked on USB resume. May be called in_interrupt.
+ * @reset: Invoked on USB bus reset. It is mandatory for all gadget drivers
+ * and should be called in_interrupt.
* @driver: Driver model state for this driver.
*
* Devices are disabled till a gadget driver successfully bind()s, which
@@ -873,6 +875,7 @@ struct usb_gadget_driver {
void (*disconnect)(struct usb_gadget *);
void (*suspend)(struct usb_gadget *);
void (*resume)(struct usb_gadget *);
+ void (*reset)(struct usb_gadget *);
/* FIXME support safe rmmod */
struct device_driver driver;
@@ -921,7 +924,7 @@ extern int usb_add_gadget_udc_release(struct device *parent,
struct usb_gadget *gadget, void (*release)(struct device *dev));
extern int usb_add_gadget_udc(struct device *parent, struct usb_gadget *gadget);
extern void usb_del_gadget_udc(struct usb_gadget *gadget);
-extern int udc_attach_driver(const char *name,
+extern int usb_udc_attach_driver(const char *name,
struct usb_gadget_driver *driver);
/*-------------------------------------------------------------------------*/
@@ -1013,6 +1016,20 @@ extern void usb_gadget_set_state(struct usb_gadget *gadget,
/*-------------------------------------------------------------------------*/
+/* utility to tell udc core that the bus reset occurs */
+extern void usb_gadget_udc_reset(struct usb_gadget *gadget,
+ struct usb_gadget_driver *driver);
+
+/*-------------------------------------------------------------------------*/
+
+/* utility to give requests back to the gadget layer */
+
+extern void usb_gadget_giveback_request(struct usb_ep *ep,
+ struct usb_request *req);
+
+
+/*-------------------------------------------------------------------------*/
+
/* utility wrapping a simple endpoint selection policy */
extern struct usb_ep *usb_ep_autoconfig(struct usb_gadget *,
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
index 485cd5e2100c..086bf13307e6 100644
--- a/include/linux/usb/hcd.h
+++ b/include/linux/usb/hcd.h
@@ -93,7 +93,7 @@ struct usb_hcd {
struct timer_list rh_timer; /* drives root-hub polling */
struct urb *status_urb; /* the current status urb */
-#ifdef CONFIG_PM_RUNTIME
+#ifdef CONFIG_PM
struct work_struct wakeup_work; /* for remote wakeup */
#endif
@@ -106,7 +106,8 @@ struct usb_hcd {
* OTG and some Host controllers need software interaction with phys;
* other external phys should be software-transparent
*/
- struct usb_phy *phy;
+ struct usb_phy *usb_phy;
+ struct phy *phy;
/* Flags that need to be manipulated atomically because they can
* change while the host controller is running. Always use
@@ -144,6 +145,7 @@ struct usb_hcd {
unsigned has_tt:1; /* Integrated TT in root hub */
unsigned amd_resume_bug:1; /* AMD remote wakeup quirk */
unsigned can_do_streams:1; /* HC supports streams */
+ unsigned tpl_support:1; /* OTG & EH TPL support */
unsigned int irq; /* irq allocated */
void __iomem *regs; /* device memory/io */
@@ -377,6 +379,9 @@ struct hc_driver {
int (*disable_usb3_lpm_timeout)(struct usb_hcd *,
struct usb_device *, enum usb3_link_state state);
int (*find_raw_port_number)(struct usb_hcd *, int);
+ /* Call for power on/off the port if necessary */
+ int (*port_power)(struct usb_hcd *hcd, int portnum, bool enable);
+
};
static inline int hcd_giveback_urb_in_bh(struct usb_hcd *hcd)
@@ -623,16 +628,13 @@ extern int usb_find_interface_driver(struct usb_device *dev,
extern void usb_root_hub_lost_power(struct usb_device *rhdev);
extern int hcd_bus_suspend(struct usb_device *rhdev, pm_message_t msg);
extern int hcd_bus_resume(struct usb_device *rhdev, pm_message_t msg);
-#endif /* CONFIG_PM */
-
-#ifdef CONFIG_PM_RUNTIME
extern void usb_hcd_resume_root_hub(struct usb_hcd *hcd);
#else
static inline void usb_hcd_resume_root_hub(struct usb_hcd *hcd)
{
return;
}
-#endif /* CONFIG_PM_RUNTIME */
+#endif /* CONFIG_PM */
/*-------------------------------------------------------------------------*/
diff --git a/include/linux/usb/of.h b/include/linux/usb/of.h
index 8c38aa26b3bb..cfe0528cdbb1 100644
--- a/include/linux/usb/of.h
+++ b/include/linux/usb/of.h
@@ -14,6 +14,7 @@
#if IS_ENABLED(CONFIG_OF)
enum usb_dr_mode of_usb_get_dr_mode(struct device_node *np);
enum usb_device_speed of_usb_get_maximum_speed(struct device_node *np);
+bool of_usb_host_tpl_support(struct device_node *np);
#else
static inline enum usb_dr_mode of_usb_get_dr_mode(struct device_node *np)
{
@@ -25,6 +26,10 @@ of_usb_get_maximum_speed(struct device_node *np)
{
return USB_SPEED_UNKNOWN;
}
+static inline bool of_usb_host_tpl_support(struct device_node *np)
+{
+ return false;
+}
#endif
#if IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_USB_SUPPORT)
diff --git a/include/linux/usb/otg.h b/include/linux/usb/otg.h
index 154332b7c8c0..52661c5da690 100644
--- a/include/linux/usb/otg.h
+++ b/include/linux/usb/otg.h
@@ -9,15 +9,20 @@
#ifndef __LINUX_USB_OTG_H
#define __LINUX_USB_OTG_H
+#include <linux/phy/phy.h>
#include <linux/usb/phy.h>
struct usb_otg {
u8 default_a;
- struct usb_phy *phy;
+ struct phy *phy;
+ /* old usb_phy interface */
+ struct usb_phy *usb_phy;
struct usb_bus *host;
struct usb_gadget *gadget;
+ enum usb_otg_state state;
+
/* bind/unbind the host controller */
int (*set_host)(struct usb_otg *otg, struct usb_bus *host);
diff --git a/include/linux/usb/phy.h b/include/linux/usb/phy.h
index 353053a33f21..f499c23e6342 100644
--- a/include/linux/usb/phy.h
+++ b/include/linux/usb/phy.h
@@ -77,7 +77,6 @@ struct usb_phy {
unsigned int flags;
enum usb_phy_type type;
- enum usb_otg_state state;
enum usb_phy_events last_event;
struct usb_otg *otg;
@@ -210,6 +209,7 @@ extern void usb_put_phy(struct usb_phy *);
extern void devm_usb_put_phy(struct device *dev, struct usb_phy *x);
extern int usb_bind_phy(const char *dev_name, u8 index,
const char *phy_dev_name);
+extern void usb_phy_set_event(struct usb_phy *x, unsigned long event);
#else
static inline struct usb_phy *usb_get_phy(enum usb_phy_type type)
{
@@ -251,6 +251,10 @@ static inline int usb_bind_phy(const char *dev_name, u8 index,
{
return -EOPNOTSUPP;
}
+
+static inline void usb_phy_set_event(struct usb_phy *x, unsigned long event)
+{
+}
#endif
static inline int
diff --git a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h
index 55a17b188daa..9948c874e3f1 100644
--- a/include/linux/usb/quirks.h
+++ b/include/linux/usb/quirks.h
@@ -8,27 +8,27 @@
#define __LINUX_USB_QUIRKS_H
/* string descriptors must not be fetched using a 255-byte read */
-#define USB_QUIRK_STRING_FETCH_255 0x00000001
+#define USB_QUIRK_STRING_FETCH_255 BIT(0)
/* device can't resume correctly so reset it instead */
-#define USB_QUIRK_RESET_RESUME 0x00000002
+#define USB_QUIRK_RESET_RESUME BIT(1)
/* device can't handle Set-Interface requests */
-#define USB_QUIRK_NO_SET_INTF 0x00000004
+#define USB_QUIRK_NO_SET_INTF BIT(2)
/* device can't handle its Configuration or Interface strings */
-#define USB_QUIRK_CONFIG_INTF_STRINGS 0x00000008
+#define USB_QUIRK_CONFIG_INTF_STRINGS BIT(3)
/* device can't be reset(e.g morph devices), don't use reset */
-#define USB_QUIRK_RESET 0x00000010
+#define USB_QUIRK_RESET BIT(4)
/* device has more interface descriptions than the bNumInterfaces count,
and can't handle talking to these interfaces */
-#define USB_QUIRK_HONOR_BNUMINTERFACES 0x00000020
+#define USB_QUIRK_HONOR_BNUMINTERFACES BIT(5)
/* device needs a pause during initialization, after we read the device
descriptor */
-#define USB_QUIRK_DELAY_INIT 0x00000040
+#define USB_QUIRK_DELAY_INIT BIT(6)
/*
* For high speed and super speed interupt endpoints, the USB 2.0 and
@@ -39,6 +39,12 @@
* Devices with this quirk report their bInterval as the result of this
* calculation instead of the exponent variable used in the calculation.
*/
-#define USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL 0x00000080
+#define USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL BIT(7)
+
+/* device can't handle device_qualifier descriptor requests */
+#define USB_QUIRK_DEVICE_QUALIFIER BIT(8)
+
+/* device generates spurious wakeup, ignore remote wakeup capability */
+#define USB_QUIRK_IGNORE_REMOTE_WAKEUP BIT(9)
#endif /* __LINUX_USB_QUIRKS_H */
diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h
index d5952bb66752..9fd9e481ea98 100644
--- a/include/linux/usb/renesas_usbhs.h
+++ b/include/linux/usb/renesas_usbhs.h
@@ -145,6 +145,10 @@ struct renesas_usbhs_driver_param {
int d0_rx_id;
int d1_tx_id;
int d1_rx_id;
+ int d2_tx_id;
+ int d2_rx_id;
+ int d3_tx_id;
+ int d3_rx_id;
/*
* option:
diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h
index 26088feb6608..d9a4905e01d0 100644
--- a/include/linux/usb/usbnet.h
+++ b/include/linux/usb/usbnet.h
@@ -78,6 +78,7 @@ struct usbnet {
# define EVENT_NO_RUNTIME_PM 9
# define EVENT_RX_KILL 10
# define EVENT_LINK_CHANGE 11
+# define EVENT_SET_RX_MODE 12
};
static inline struct usb_driver *driver_of(struct usb_interface *intf)
@@ -159,6 +160,9 @@ struct driver_info {
/* called by minidriver when receiving indication */
void (*indication)(struct usbnet *dev, void *ind, int indlen);
+ /* rx mode change (device changes address list filtering) */
+ void (*set_rx_mode)(struct usbnet *dev);
+
/* for new devices, use the descriptor-reading code instead */
int in; /* rx endpoint */
int out; /* tx endpoint */
diff --git a/include/linux/usb_usual.h b/include/linux/usb_usual.h
index 9b7de1b46437..a7f2604c5f25 100644
--- a/include/linux/usb_usual.h
+++ b/include/linux/usb_usual.h
@@ -73,6 +73,10 @@
/* Device advertises UAS but it is broken */ \
US_FLAG(BROKEN_FUA, 0x01000000) \
/* Cannot handle FUA in WRITE or READ CDBs */ \
+ US_FLAG(NO_ATA_1X, 0x02000000) \
+ /* Cannot handle ATA_12 or ATA_16 CDBs */ \
+ US_FLAG(NO_REPORT_OPCODES, 0x04000000) \
+ /* Cannot handle MI_REPORT_SUPPORTED_OPERATION_CODES */ \
#define US_FLAG(name, value) US_FL_##name = value ,
enum { US_DO_ALL_FLAGS };
diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h
index e95372654f09..8297e5b341d8 100644
--- a/include/linux/user_namespace.h
+++ b/include/linux/user_namespace.h
@@ -3,6 +3,7 @@
#include <linux/kref.h>
#include <linux/nsproxy.h>
+#include <linux/ns_common.h>
#include <linux/sched.h>
#include <linux/err.h>
@@ -17,6 +18,10 @@ struct uid_gid_map { /* 64 bytes -- 1 cache line */
} extent[UID_GID_MAP_MAX_EXTENTS];
};
+#define USERNS_SETGROUPS_ALLOWED 1UL
+
+#define USERNS_INIT_FLAGS USERNS_SETGROUPS_ALLOWED
+
struct user_namespace {
struct uid_gid_map uid_map;
struct uid_gid_map gid_map;
@@ -26,7 +31,8 @@ struct user_namespace {
int level;
kuid_t owner;
kgid_t group;
- unsigned int proc_inum;
+ struct ns_common ns;
+ unsigned long flags;
/* Register of per-UID persistent keyrings for this namespace */
#ifdef CONFIG_PERSISTENT_KEYRINGS
@@ -63,6 +69,9 @@ extern const struct seq_operations proc_projid_seq_operations;
extern ssize_t proc_uid_map_write(struct file *, const char __user *, size_t, loff_t *);
extern ssize_t proc_gid_map_write(struct file *, const char __user *, size_t, loff_t *);
extern ssize_t proc_projid_map_write(struct file *, const char __user *, size_t, loff_t *);
+extern ssize_t proc_setgroups_write(struct file *, const char __user *, size_t, loff_t *);
+extern int proc_setgroups_show(struct seq_file *m, void *v);
+extern bool userns_may_setgroups(const struct user_namespace *ns);
#else
static inline struct user_namespace *get_user_ns(struct user_namespace *ns)
@@ -87,6 +96,10 @@ static inline void put_user_ns(struct user_namespace *ns)
{
}
+static inline bool userns_may_setgroups(const struct user_namespace *ns)
+{
+ return true;
+}
#endif
#endif /* _LINUX_USER_H */
diff --git a/include/linux/utsname.h b/include/linux/utsname.h
index 239e27733d6c..5093f58ae192 100644
--- a/include/linux/utsname.h
+++ b/include/linux/utsname.h
@@ -5,6 +5,7 @@
#include <linux/sched.h>
#include <linux/kref.h>
#include <linux/nsproxy.h>
+#include <linux/ns_common.h>
#include <linux/err.h>
#include <uapi/linux/utsname.h>
@@ -23,7 +24,7 @@ struct uts_namespace {
struct kref kref;
struct new_utsname name;
struct user_namespace *user_ns;
- unsigned int proc_inum;
+ struct ns_common ns;
};
extern struct uts_namespace init_uts_ns;
diff --git a/include/linux/vexpress.h b/include/linux/vexpress.h
index a4c9547aae64..f8e76e08ebe4 100644
--- a/include/linux/vexpress.h
+++ b/include/linux/vexpress.h
@@ -15,8 +15,6 @@
#define _LINUX_VEXPRESS_H
#include <linux/device.h>
-#include <linux/platform_device.h>
-#include <linux/reboot.h>
#include <linux/regmap.h>
#define VEXPRESS_SITE_MB 0
@@ -24,13 +22,6 @@
#define VEXPRESS_SITE_DB2 2
#define VEXPRESS_SITE_MASTER 0xf
-#define VEXPRESS_RES_FUNC(_site, _func) \
-{ \
- .start = (_site), \
- .end = (_func), \
- .flags = IORESOURCE_BUS, \
-}
-
/* Config infrastructure */
void vexpress_config_set_master(u32 site);
@@ -58,16 +49,6 @@ struct regmap *devm_regmap_init_vexpress_config(struct device *dev);
/* Platform control */
-unsigned int vexpress_get_mci_cardin(struct device *dev);
-u32 vexpress_get_procid(int site);
-void *vexpress_get_24mhz_clock_base(void);
void vexpress_flags_set(u32 data);
-void vexpress_sysreg_early_init(void __iomem *base);
-int vexpress_syscfg_device_register(struct platform_device *pdev);
-
-/* Clocks */
-
-void vexpress_clk_init(void __iomem *sp810_base);
-
#endif
diff --git a/include/linux/virtio.h b/include/linux/virtio.h
index b46671e28de2..28f0e65b9a11 100644
--- a/include/linux/virtio.h
+++ b/include/linux/virtio.h
@@ -75,9 +75,16 @@ unsigned int virtqueue_get_vring_size(struct virtqueue *vq);
bool virtqueue_is_broken(struct virtqueue *vq);
+void *virtqueue_get_avail(struct virtqueue *vq);
+void *virtqueue_get_used(struct virtqueue *vq);
+
/**
* virtio_device - representation of a device using virtio
* @index: unique position on the virtio bus
+ * @failed: saved value for VIRTIO_CONFIG_S_FAILED bit (for restore)
+ * @config_enabled: configuration change reporting enabled
+ * @config_change_pending: configuration change reported while disabled
+ * @config_lock: protects configuration change reporting
* @dev: underlying device.
* @id: the device type identification (used to match it with a driver).
* @config: the configuration ops for this device.
@@ -88,16 +95,21 @@ bool virtqueue_is_broken(struct virtqueue *vq);
*/
struct virtio_device {
int index;
+ bool failed;
+ bool config_enabled;
+ bool config_change_pending;
+ spinlock_t config_lock;
struct device dev;
struct virtio_device_id id;
const struct virtio_config_ops *config;
const struct vringh_config_ops *vringh_config;
struct list_head vqs;
- /* Note that this is a Linux set_bit-style bitmap. */
- unsigned long features[1];
+ u64 features;
void *priv;
};
+bool virtio_device_is_legacy_only(struct virtio_device_id id);
+
static inline struct virtio_device *dev_to_virtio(struct device *_dev)
{
return container_of(_dev, struct virtio_device, dev);
@@ -108,12 +120,20 @@ void unregister_virtio_device(struct virtio_device *dev);
void virtio_break_device(struct virtio_device *dev);
+void virtio_config_changed(struct virtio_device *dev);
+#ifdef CONFIG_PM_SLEEP
+int virtio_device_freeze(struct virtio_device *dev);
+int virtio_device_restore(struct virtio_device *dev);
+#endif
+
/**
* virtio_driver - operations for a virtio I/O driver
* @driver: underlying device driver (populate name and owner).
* @id_table: the ids serviced by this driver.
* @feature_table: an array of feature numbers supported by this driver.
* @feature_table_size: number of entries in the feature table array.
+ * @feature_table_legacy: same as feature_table but when working in legacy mode.
+ * @feature_table_size_legacy: number of entries in feature table legacy array.
* @probe: the function to call when a device is found. Returns 0 or -errno.
* @remove: the function to call when a device is removed.
* @config_changed: optional function to call when the device configuration
@@ -124,6 +144,8 @@ struct virtio_driver {
const struct virtio_device_id *id_table;
const unsigned int *feature_table;
unsigned int feature_table_size;
+ const unsigned int *feature_table_legacy;
+ unsigned int feature_table_size_legacy;
int (*probe)(struct virtio_device *dev);
void (*scan)(struct virtio_device *dev);
void (*remove)(struct virtio_device *dev);
diff --git a/include/linux/virtio_byteorder.h b/include/linux/virtio_byteorder.h
new file mode 100644
index 000000000000..51865d05b267
--- /dev/null
+++ b/include/linux/virtio_byteorder.h
@@ -0,0 +1,59 @@
+#ifndef _LINUX_VIRTIO_BYTEORDER_H
+#define _LINUX_VIRTIO_BYTEORDER_H
+#include <linux/types.h>
+#include <uapi/linux/virtio_types.h>
+
+/*
+ * Low-level memory accessors for handling virtio in modern little endian and in
+ * compatibility native endian format.
+ */
+
+static inline u16 __virtio16_to_cpu(bool little_endian, __virtio16 val)
+{
+ if (little_endian)
+ return le16_to_cpu((__force __le16)val);
+ else
+ return (__force u16)val;
+}
+
+static inline __virtio16 __cpu_to_virtio16(bool little_endian, u16 val)
+{
+ if (little_endian)
+ return (__force __virtio16)cpu_to_le16(val);
+ else
+ return (__force __virtio16)val;
+}
+
+static inline u32 __virtio32_to_cpu(bool little_endian, __virtio32 val)
+{
+ if (little_endian)
+ return le32_to_cpu((__force __le32)val);
+ else
+ return (__force u32)val;
+}
+
+static inline __virtio32 __cpu_to_virtio32(bool little_endian, u32 val)
+{
+ if (little_endian)
+ return (__force __virtio32)cpu_to_le32(val);
+ else
+ return (__force __virtio32)val;
+}
+
+static inline u64 __virtio64_to_cpu(bool little_endian, __virtio64 val)
+{
+ if (little_endian)
+ return le64_to_cpu((__force __le64)val);
+ else
+ return (__force u64)val;
+}
+
+static inline __virtio64 __cpu_to_virtio64(bool little_endian, u64 val)
+{
+ if (little_endian)
+ return (__force __virtio64)cpu_to_le64(val);
+ else
+ return (__force __virtio64)val;
+}
+
+#endif /* _LINUX_VIRTIO_BYTEORDER */
diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h
index e8f8f71e843c..ca3ed78e5ec7 100644
--- a/include/linux/virtio_config.h
+++ b/include/linux/virtio_config.h
@@ -4,6 +4,7 @@
#include <linux/err.h>
#include <linux/bug.h>
#include <linux/virtio.h>
+#include <linux/virtio_byteorder.h>
#include <uapi/linux/virtio_config.h>
/**
@@ -18,6 +19,9 @@
* offset: the offset of the configuration field
* buf: the buffer to read the field value from.
* len: the length of the buffer
+ * @generation: config generation counter
+ * vdev: the virtio_device
+ * Returns the config generation counter
* @get_status: read the status byte
* vdev: the virtio_device
* Returns the status byte
@@ -46,6 +50,7 @@
* vdev: the virtio_device
* This gives the final feature bits for the device: it can change
* the dev->feature bits if it wants.
+ * Returns 0 on success or error status
* @bus_name: return the bus name associated with the device
* vdev: the virtio_device
* This returns a pointer to the bus name a la pci_name from which
@@ -58,6 +63,7 @@ struct virtio_config_ops {
void *buf, unsigned len);
void (*set)(struct virtio_device *vdev, unsigned offset,
const void *buf, unsigned len);
+ u32 (*generation)(struct virtio_device *vdev);
u8 (*get_status)(struct virtio_device *vdev);
void (*set_status)(struct virtio_device *vdev, u8 status);
void (*reset)(struct virtio_device *vdev);
@@ -66,8 +72,8 @@ struct virtio_config_ops {
vq_callback_t *callbacks[],
const char *names[]);
void (*del_vqs)(struct virtio_device *);
- u32 (*get_features)(struct virtio_device *vdev);
- void (*finalize_features)(struct virtio_device *vdev);
+ u64 (*get_features)(struct virtio_device *vdev);
+ int (*finalize_features)(struct virtio_device *vdev);
const char *(*bus_name)(struct virtio_device *vdev);
int (*set_vq_affinity)(struct virtqueue *vq, int cpu);
};
@@ -77,23 +83,70 @@ void virtio_check_driver_offered_feature(const struct virtio_device *vdev,
unsigned int fbit);
/**
- * virtio_has_feature - helper to determine if this device has this feature.
+ * __virtio_test_bit - helper to test feature bits. For use by transports.
+ * Devices should normally use virtio_has_feature,
+ * which includes more checks.
* @vdev: the device
* @fbit: the feature bit
*/
-static inline bool virtio_has_feature(const struct virtio_device *vdev,
+static inline bool __virtio_test_bit(const struct virtio_device *vdev,
+ unsigned int fbit)
+{
+ /* Did you forget to fix assumptions on max features? */
+ if (__builtin_constant_p(fbit))
+ BUILD_BUG_ON(fbit >= 64);
+ else
+ BUG_ON(fbit >= 64);
+
+ return vdev->features & BIT_ULL(fbit);
+}
+
+/**
+ * __virtio_set_bit - helper to set feature bits. For use by transports.
+ * @vdev: the device
+ * @fbit: the feature bit
+ */
+static inline void __virtio_set_bit(struct virtio_device *vdev,
+ unsigned int fbit)
+{
+ /* Did you forget to fix assumptions on max features? */
+ if (__builtin_constant_p(fbit))
+ BUILD_BUG_ON(fbit >= 64);
+ else
+ BUG_ON(fbit >= 64);
+
+ vdev->features |= BIT_ULL(fbit);
+}
+
+/**
+ * __virtio_clear_bit - helper to clear feature bits. For use by transports.
+ * @vdev: the device
+ * @fbit: the feature bit
+ */
+static inline void __virtio_clear_bit(struct virtio_device *vdev,
unsigned int fbit)
{
/* Did you forget to fix assumptions on max features? */
if (__builtin_constant_p(fbit))
- BUILD_BUG_ON(fbit >= 32);
+ BUILD_BUG_ON(fbit >= 64);
else
- BUG_ON(fbit >= 32);
+ BUG_ON(fbit >= 64);
+
+ vdev->features &= ~BIT_ULL(fbit);
+}
+/**
+ * virtio_has_feature - helper to determine if this device has this feature.
+ * @vdev: the device
+ * @fbit: the feature bit
+ */
+static inline bool virtio_has_feature(const struct virtio_device *vdev,
+ unsigned int fbit)
+{
if (fbit < VIRTIO_TRANSPORT_F_START)
virtio_check_driver_offered_feature(vdev, fbit);
- return test_bit(fbit, vdev->features);
+ return __virtio_test_bit(vdev, fbit);
}
static inline
@@ -109,6 +162,23 @@ struct virtqueue *virtio_find_single_vq(struct virtio_device *vdev,
return vq;
}
+/**
+ * virtio_device_ready - enable vq use in probe function
+ * @vdev: the device
+ *
+ * Driver must call this to use vqs in the probe function.
+ *
+ * Note: vqs are enabled automatically after probe returns.
+ */
+static inline
+void virtio_device_ready(struct virtio_device *dev)
+{
+ unsigned status = dev->config->get_status(dev);
+
+ BUG_ON(status & VIRTIO_CONFIG_S_DRIVER_OK);
+ dev->config->set_status(dev, status | VIRTIO_CONFIG_S_DRIVER_OK);
+}
+
static inline
const char *virtio_bus_name(struct virtio_device *vdev)
{
@@ -135,6 +205,37 @@ int virtqueue_set_affinity(struct virtqueue *vq, int cpu)
return 0;
}
+/* Memory accessors */
+static inline u16 virtio16_to_cpu(struct virtio_device *vdev, __virtio16 val)
+{
+ return __virtio16_to_cpu(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val);
+}
+
+static inline __virtio16 cpu_to_virtio16(struct virtio_device *vdev, u16 val)
+{
+ return __cpu_to_virtio16(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val);
+}
+
+static inline u32 virtio32_to_cpu(struct virtio_device *vdev, __virtio32 val)
+{
+ return __virtio32_to_cpu(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val);
+}
+
+static inline __virtio32 cpu_to_virtio32(struct virtio_device *vdev, u32 val)
+{
+ return __cpu_to_virtio32(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val);
+}
+
+static inline u64 virtio64_to_cpu(struct virtio_device *vdev, __virtio64 val)
+{
+ return __virtio64_to_cpu(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val);
+}
+
+static inline __virtio64 cpu_to_virtio64(struct virtio_device *vdev, u64 val)
+{
+ return __cpu_to_virtio64(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val);
+}
+
/* Config space accessors. */
#define virtio_cread(vdev, structname, member, ptr) \
do { \
@@ -204,11 +305,33 @@ static inline u8 virtio_cread8(struct virtio_device *vdev, unsigned int offset)
return ret;
}
+/* Read @count fields, @bytes each. */
+static inline void __virtio_cread_many(struct virtio_device *vdev,
+ unsigned int offset,
+ void *buf, size_t count, size_t bytes)
+{
+ u32 old, gen = vdev->config->generation ?
+ vdev->config->generation(vdev) : 0;
+ int i;
+
+ do {
+ old = gen;
+
+ for (i = 0; i < count; i++)
+ vdev->config->get(vdev, offset + bytes * i,
+ buf + i * bytes, bytes);
+
+ gen = vdev->config->generation ?
+ vdev->config->generation(vdev) : 0;
+ } while (gen != old);
+}
+
+
static inline void virtio_cread_bytes(struct virtio_device *vdev,
unsigned int offset,
void *buf, size_t len)
{
- vdev->config->get(vdev, offset, buf, len);
+ __virtio_cread_many(vdev, offset, buf, len, 1);
}
static inline void virtio_cwrite8(struct virtio_device *vdev,
@@ -222,12 +345,13 @@ static inline u16 virtio_cread16(struct virtio_device *vdev,
{
u16 ret;
vdev->config->get(vdev, offset, &ret, sizeof(ret));
- return ret;
+ return virtio16_to_cpu(vdev, (__force __virtio16)ret);
}
static inline void virtio_cwrite16(struct virtio_device *vdev,
unsigned int offset, u16 val)
{
+ val = (__force u16)cpu_to_virtio16(vdev, val);
vdev->config->set(vdev, offset, &val, sizeof(val));
}
@@ -236,12 +360,13 @@ static inline u32 virtio_cread32(struct virtio_device *vdev,
{
u32 ret;
vdev->config->get(vdev, offset, &ret, sizeof(ret));
- return ret;
+ return virtio32_to_cpu(vdev, (__force __virtio32)ret);
}
static inline void virtio_cwrite32(struct virtio_device *vdev,
unsigned int offset, u32 val)
{
+ val = (__force u32)cpu_to_virtio32(vdev, val);
vdev->config->set(vdev, offset, &val, sizeof(val));
}
@@ -250,12 +375,14 @@ static inline u64 virtio_cread64(struct virtio_device *vdev,
{
u64 ret;
vdev->config->get(vdev, offset, &ret, sizeof(ret));
- return ret;
+ __virtio_cread_many(vdev, offset, &ret, 1, sizeof(ret));
+ return virtio64_to_cpu(vdev, (__force __virtio64)ret);
}
static inline void virtio_cwrite64(struct virtio_device *vdev,
unsigned int offset, u64 val)
{
+ val = (__force u64)cpu_to_virtio64(vdev, val);
vdev->config->set(vdev, offset, &val, sizeof(val));
}
diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h
index ced92345c963..9246d32dc973 100644
--- a/include/linux/vm_event_item.h
+++ b/include/linux/vm_event_item.h
@@ -72,6 +72,13 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
THP_ZERO_PAGE_ALLOC,
THP_ZERO_PAGE_ALLOC_FAILED,
#endif
+#ifdef CONFIG_MEMORY_BALLOON
+ BALLOON_INFLATE,
+ BALLOON_DEFLATE,
+#ifdef CONFIG_BALLOON_COMPACTION
+ BALLOON_MIGRATE,
+#endif
+#endif
#ifdef CONFIG_DEBUG_TLBFLUSH
#ifdef CONFIG_SMP
NR_TLB_REMOTE_FLUSH, /* cpu tried to flush others' tlbs */
@@ -83,6 +90,7 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
#ifdef CONFIG_DEBUG_VM_VMACACHE
VMACACHE_FIND_CALLS,
VMACACHE_FIND_HITS,
+ VMACACHE_FULL_FLUSHES,
#endif
NR_VM_EVENT_ITEMS
};
diff --git a/include/linux/vmw_vmci_api.h b/include/linux/vmw_vmci_api.h
index 023430e265fe..5691f752ce8f 100644
--- a/include/linux/vmw_vmci_api.h
+++ b/include/linux/vmw_vmci_api.h
@@ -24,6 +24,7 @@
#define VMCI_KERNEL_API_VERSION_2 2
#define VMCI_KERNEL_API_VERSION VMCI_KERNEL_API_VERSION_2
+struct msghdr;
typedef void (vmci_device_shutdown_fn) (void *device_registration,
void *user_data);
@@ -75,8 +76,8 @@ ssize_t vmci_qpair_peek(struct vmci_qp *qpair, void *buf, size_t buf_size,
ssize_t vmci_qpair_enquev(struct vmci_qp *qpair,
void *iov, size_t iov_size, int mode);
ssize_t vmci_qpair_dequev(struct vmci_qp *qpair,
- void *iov, size_t iov_size, int mode);
-ssize_t vmci_qpair_peekv(struct vmci_qp *qpair, void *iov, size_t iov_size,
+ struct msghdr *msg, size_t iov_size, int mode);
+ssize_t vmci_qpair_peekv(struct vmci_qp *qpair, struct msghdr *msg, size_t iov_size,
int mode);
#endif /* !__VMW_VMCI_API_H__ */
diff --git a/include/linux/vringh.h b/include/linux/vringh.h
index 749cde28728b..a3fa537e717a 100644
--- a/include/linux/vringh.h
+++ b/include/linux/vringh.h
@@ -24,12 +24,16 @@
#ifndef _LINUX_VRINGH_H
#define _LINUX_VRINGH_H
#include <uapi/linux/virtio_ring.h>
+#include <linux/virtio_byteorder.h>
#include <linux/uio.h>
#include <linux/slab.h>
#include <asm/barrier.h>
/* virtio_ring with information needed for host access. */
struct vringh {
+ /* Everything is little endian */
+ bool little_endian;
+
/* Guest publishes used event idx (note: we always do). */
bool event_indices;
@@ -105,7 +109,7 @@ struct vringh_kiov {
#define VRINGH_IOV_ALLOCATED 0x8000000
/* Helpers for userspace vrings. */
-int vringh_init_user(struct vringh *vrh, u32 features,
+int vringh_init_user(struct vringh *vrh, u64 features,
unsigned int num, bool weak_barriers,
struct vring_desc __user *desc,
struct vring_avail __user *avail,
@@ -167,7 +171,7 @@ bool vringh_notify_enable_user(struct vringh *vrh);
void vringh_notify_disable_user(struct vringh *vrh);
/* Helpers for kernelspace vrings. */
-int vringh_init_kern(struct vringh *vrh, u32 features,
+int vringh_init_kern(struct vringh *vrh, u64 features,
unsigned int num, bool weak_barriers,
struct vring_desc *desc,
struct vring_avail *avail,
@@ -222,4 +226,33 @@ static inline void vringh_notify(struct vringh *vrh)
vrh->notify(vrh);
}
+static inline u16 vringh16_to_cpu(const struct vringh *vrh, __virtio16 val)
+{
+ return __virtio16_to_cpu(vrh->little_endian, val);
+}
+
+static inline __virtio16 cpu_to_vringh16(const struct vringh *vrh, u16 val)
+{
+ return __cpu_to_virtio16(vrh->little_endian, val);
+}
+
+static inline u32 vringh32_to_cpu(const struct vringh *vrh, __virtio32 val)
+{
+ return __virtio32_to_cpu(vrh->little_endian, val);
+}
+
+static inline __virtio32 cpu_to_vringh32(const struct vringh *vrh, u32 val)
+{
+ return __cpu_to_virtio32(vrh->little_endian, val);
+}
+
+static inline u64 vringh64_to_cpu(const struct vringh *vrh, __virtio64 val)
+{
+ return __virtio64_to_cpu(vrh->little_endian, val);
+}
+
+static inline __virtio64 cpu_to_vringh64(const struct vringh *vrh, u64 val)
+{
+ return __cpu_to_virtio64(vrh->little_endian, val);
+}
#endif /* _LINUX_VRINGH_H */
diff --git a/include/linux/wait.h b/include/linux/wait.h
index 6fb1ba5f9b2f..2232ed16635a 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -13,9 +13,12 @@ typedef struct __wait_queue wait_queue_t;
typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int flags, void *key);
int default_wake_function(wait_queue_t *wait, unsigned mode, int flags, void *key);
+/* __wait_queue::flags */
+#define WQ_FLAG_EXCLUSIVE 0x01
+#define WQ_FLAG_WOKEN 0x02
+
struct __wait_queue {
unsigned int flags;
-#define WQ_FLAG_EXCLUSIVE 0x01
void *private;
wait_queue_func_t func;
struct list_head task_list;
@@ -25,7 +28,7 @@ struct wait_bit_key {
void *flags;
int bit_nr;
#define WAIT_ATOMIC_T_BIT_NR -1
- unsigned long private;
+ unsigned long timeout;
};
struct wait_bit_queue {
@@ -154,6 +157,7 @@ int __wait_on_bit_lock(wait_queue_head_t *, struct wait_bit_queue *, wait_bit_ac
void wake_up_bit(void *, int);
void wake_up_atomic_t(atomic_t *);
int out_of_line_wait_on_bit(void *, int, wait_bit_action_f *, unsigned);
+int out_of_line_wait_on_bit_timeout(void *, int, wait_bit_action_f *, unsigned, unsigned long);
int out_of_line_wait_on_bit_lock(void *, int, wait_bit_action_f *, unsigned);
int out_of_line_wait_on_atomic_t(atomic_t *, int (*)(atomic_t *), unsigned);
wait_queue_head_t *bit_waitqueue(void *, int);
@@ -257,11 +261,37 @@ __out: __ret; \
*/
#define wait_event(wq, condition) \
do { \
+ might_sleep(); \
if (condition) \
break; \
__wait_event(wq, condition); \
} while (0)
+#define __wait_event_freezable(wq, condition) \
+ ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \
+ schedule(); try_to_freeze())
+
+/**
+ * wait_event - sleep (or freeze) until a condition gets true
+ * @wq: the waitqueue to wait on
+ * @condition: a C expression for the event to wait for
+ *
+ * The process is put to sleep (TASK_INTERRUPTIBLE -- so as not to contribute
+ * to system load) until the @condition evaluates to true. The
+ * @condition is checked each time the waitqueue @wq is woken up.
+ *
+ * wake_up() has to be called after changing any variable that could
+ * change the result of the wait condition.
+ */
+#define wait_event_freezable(wq, condition) \
+({ \
+ int __ret = 0; \
+ might_sleep(); \
+ if (!(condition)) \
+ __ret = __wait_event_freezable(wq, condition); \
+ __ret; \
+})
+
#define __wait_event_timeout(wq, condition, timeout) \
___wait_event(wq, ___wait_cond_timeout(condition), \
TASK_UNINTERRUPTIBLE, 0, timeout, \
@@ -280,18 +310,39 @@ do { \
* wake_up() has to be called after changing any variable that could
* change the result of the wait condition.
*
- * The function returns 0 if the @timeout elapsed, or the remaining
- * jiffies (at least 1) if the @condition evaluated to %true before
- * the @timeout elapsed.
+ * Returns:
+ * 0 if the @condition evaluated to %false after the @timeout elapsed,
+ * 1 if the @condition evaluated to %true after the @timeout elapsed,
+ * or the remaining jiffies (at least 1) if the @condition evaluated
+ * to %true before the @timeout elapsed.
*/
#define wait_event_timeout(wq, condition, timeout) \
({ \
long __ret = timeout; \
+ might_sleep(); \
if (!___wait_cond_timeout(condition)) \
__ret = __wait_event_timeout(wq, condition, timeout); \
__ret; \
})
+#define __wait_event_freezable_timeout(wq, condition, timeout) \
+ ___wait_event(wq, ___wait_cond_timeout(condition), \
+ TASK_INTERRUPTIBLE, 0, timeout, \
+ __ret = schedule_timeout(__ret); try_to_freeze())
+
+/*
+ * like wait_event_timeout() -- except it uses TASK_INTERRUPTIBLE to avoid
+ * increasing load and is freezable.
+ */
+#define wait_event_freezable_timeout(wq, condition, timeout) \
+({ \
+ long __ret = timeout; \
+ might_sleep(); \
+ if (!___wait_cond_timeout(condition)) \
+ __ret = __wait_event_freezable_timeout(wq, condition, timeout); \
+ __ret; \
+})
+
#define __wait_event_cmd(wq, condition, cmd1, cmd2) \
(void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
cmd1; schedule(); cmd2)
@@ -312,6 +363,7 @@ do { \
*/
#define wait_event_cmd(wq, condition, cmd1, cmd2) \
do { \
+ might_sleep(); \
if (condition) \
break; \
__wait_event_cmd(wq, condition, cmd1, cmd2); \
@@ -339,6 +391,7 @@ do { \
#define wait_event_interruptible(wq, condition) \
({ \
int __ret = 0; \
+ might_sleep(); \
if (!(condition)) \
__ret = __wait_event_interruptible(wq, condition); \
__ret; \
@@ -363,13 +416,16 @@ do { \
* change the result of the wait condition.
*
* Returns:
- * 0 if the @timeout elapsed, -%ERESTARTSYS if it was interrupted by
- * a signal, or the remaining jiffies (at least 1) if the @condition
- * evaluated to %true before the @timeout elapsed.
+ * 0 if the @condition evaluated to %false after the @timeout elapsed,
+ * 1 if the @condition evaluated to %true after the @timeout elapsed,
+ * the remaining jiffies (at least 1) if the @condition evaluated
+ * to %true before the @timeout elapsed, or -%ERESTARTSYS if it was
+ * interrupted by a signal.
*/
#define wait_event_interruptible_timeout(wq, condition, timeout) \
({ \
long __ret = timeout; \
+ might_sleep(); \
if (!___wait_cond_timeout(condition)) \
__ret = __wait_event_interruptible_timeout(wq, \
condition, timeout); \
@@ -420,6 +476,7 @@ do { \
#define wait_event_hrtimeout(wq, condition, timeout) \
({ \
int __ret = 0; \
+ might_sleep(); \
if (!(condition)) \
__ret = __wait_event_hrtimeout(wq, condition, timeout, \
TASK_UNINTERRUPTIBLE); \
@@ -445,6 +502,7 @@ do { \
#define wait_event_interruptible_hrtimeout(wq, condition, timeout) \
({ \
long __ret = 0; \
+ might_sleep(); \
if (!(condition)) \
__ret = __wait_event_hrtimeout(wq, condition, timeout, \
TASK_INTERRUPTIBLE); \
@@ -458,12 +516,27 @@ do { \
#define wait_event_interruptible_exclusive(wq, condition) \
({ \
int __ret = 0; \
+ might_sleep(); \
if (!(condition)) \
__ret = __wait_event_interruptible_exclusive(wq, condition);\
__ret; \
})
+#define __wait_event_freezable_exclusive(wq, condition) \
+ ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \
+ schedule(); try_to_freeze())
+
+#define wait_event_freezable_exclusive(wq, condition) \
+({ \
+ int __ret = 0; \
+ might_sleep(); \
+ if (!(condition)) \
+ __ret = __wait_event_freezable_exclusive(wq, condition);\
+ __ret; \
+})
+
+
#define __wait_event_interruptible_locked(wq, condition, exclusive, irq) \
({ \
int __ret = 0; \
@@ -632,6 +705,7 @@ do { \
#define wait_event_killable(wq, condition) \
({ \
int __ret = 0; \
+ might_sleep(); \
if (!(condition)) \
__ret = __wait_event_killable(wq, condition); \
__ret; \
@@ -825,6 +899,8 @@ void prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int sta
long prepare_to_wait_event(wait_queue_head_t *q, wait_queue_t *wait, int state);
void finish_wait(wait_queue_head_t *q, wait_queue_t *wait);
void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait, unsigned int mode, void *key);
+long wait_woken(wait_queue_t *wait, unsigned mode, long timeout);
+int woken_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
@@ -859,6 +935,8 @@ int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
extern int bit_wait(struct wait_bit_key *);
extern int bit_wait_io(struct wait_bit_key *);
+extern int bit_wait_timeout(struct wait_bit_key *);
+extern int bit_wait_io_timeout(struct wait_bit_key *);
/**
* wait_on_bit - wait for a bit to be cleared
@@ -879,6 +957,7 @@ extern int bit_wait_io(struct wait_bit_key *);
static inline int
wait_on_bit(void *word, int bit, unsigned mode)
{
+ might_sleep();
if (!test_bit(bit, word))
return 0;
return out_of_line_wait_on_bit(word, bit,
@@ -903,6 +982,7 @@ wait_on_bit(void *word, int bit, unsigned mode)
static inline int
wait_on_bit_io(void *word, int bit, unsigned mode)
{
+ might_sleep();
if (!test_bit(bit, word))
return 0;
return out_of_line_wait_on_bit(word, bit,
@@ -929,6 +1009,7 @@ wait_on_bit_io(void *word, int bit, unsigned mode)
static inline int
wait_on_bit_action(void *word, int bit, wait_bit_action_f *action, unsigned mode)
{
+ might_sleep();
if (!test_bit(bit, word))
return 0;
return out_of_line_wait_on_bit(word, bit, action, mode);
@@ -956,6 +1037,7 @@ wait_on_bit_action(void *word, int bit, wait_bit_action_f *action, unsigned mode
static inline int
wait_on_bit_lock(void *word, int bit, unsigned mode)
{
+ might_sleep();
if (!test_and_set_bit(bit, word))
return 0;
return out_of_line_wait_on_bit_lock(word, bit, bit_wait, mode);
@@ -979,6 +1061,7 @@ wait_on_bit_lock(void *word, int bit, unsigned mode)
static inline int
wait_on_bit_lock_io(void *word, int bit, unsigned mode)
{
+ might_sleep();
if (!test_and_set_bit(bit, word))
return 0;
return out_of_line_wait_on_bit_lock(word, bit, bit_wait_io, mode);
@@ -1004,6 +1087,7 @@ wait_on_bit_lock_io(void *word, int bit, unsigned mode)
static inline int
wait_on_bit_lock_action(void *word, int bit, wait_bit_action_f *action, unsigned mode)
{
+ might_sleep();
if (!test_and_set_bit(bit, word))
return 0;
return out_of_line_wait_on_bit_lock(word, bit, action, mode);
@@ -1022,6 +1106,7 @@ wait_on_bit_lock_action(void *word, int bit, wait_bit_action_f *action, unsigned
static inline
int wait_on_atomic_t(atomic_t *val, int (*action)(atomic_t *), unsigned mode)
{
+ might_sleep();
if (atomic_read(val) == 0)
return 0;
return out_of_line_wait_on_atomic_t(val, action, mode);
diff --git a/include/linux/watchdog.h b/include/linux/watchdog.h
index 2a3038ee17a3..395b70e0eccf 100644
--- a/include/linux/watchdog.h
+++ b/include/linux/watchdog.h
@@ -97,13 +97,8 @@ struct watchdog_device {
#define WDOG_UNREGISTERED 4 /* Has the device been unregistered */
};
-#ifdef CONFIG_WATCHDOG_NOWAYOUT
-#define WATCHDOG_NOWAYOUT 1
-#define WATCHDOG_NOWAYOUT_INIT_STATUS (1 << WDOG_NO_WAY_OUT)
-#else
-#define WATCHDOG_NOWAYOUT 0
-#define WATCHDOG_NOWAYOUT_INIT_STATUS 0
-#endif
+#define WATCHDOG_NOWAYOUT IS_BUILTIN(CONFIG_WATCHDOG_NOWAYOUT)
+#define WATCHDOG_NOWAYOUT_INIT_STATUS (WATCHDOG_NOWAYOUT << WDOG_NO_WAY_OUT)
/* Use the following function to check whether or not the watchdog is active */
static inline bool watchdog_active(struct watchdog_device *wdd)
diff --git a/include/linux/zsmalloc.h b/include/linux/zsmalloc.h
index e44d634e7fb7..05c214760977 100644
--- a/include/linux/zsmalloc.h
+++ b/include/linux/zsmalloc.h
@@ -46,6 +46,6 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle,
enum zs_mapmode mm);
void zs_unmap_object(struct zs_pool *pool, unsigned long handle);
-u64 zs_get_total_size_bytes(struct zs_pool *pool);
+unsigned long zs_get_total_pages(struct zs_pool *pool);
#endif
diff --git a/include/media/davinci/dm644x_ccdc.h b/include/media/davinci/dm644x_ccdc.h
index 852e96c4bb46..984fb79031de 100644
--- a/include/media/davinci/dm644x_ccdc.h
+++ b/include/media/davinci/dm644x_ccdc.h
@@ -114,7 +114,7 @@ struct ccdc_fault_pixel {
/* Number of fault pixel */
unsigned short fp_num;
/* Address of fault pixel table */
- unsigned int fpc_table_addr;
+ unsigned long fpc_table_addr;
};
/* Structure for CCDC configuration parameters for raw capture mode passed
diff --git a/include/media/davinci/vpbe.h b/include/media/davinci/vpbe.h
index 57585c7004a4..4376beeb28c2 100644
--- a/include/media/davinci/vpbe.h
+++ b/include/media/davinci/vpbe.h
@@ -63,7 +63,7 @@ struct vpbe_output {
* output basis. If per mode is needed, we may have to move this to
* mode_info structure
*/
- enum v4l2_mbus_pixelcode if_params;
+ u32 if_params;
};
/* encoder configuration info */
diff --git a/include/media/davinci/vpbe_display.h b/include/media/davinci/vpbe_display.h
index 637749a91432..fa0247ad815f 100644
--- a/include/media/davinci/vpbe_display.h
+++ b/include/media/davinci/vpbe_display.h
@@ -70,8 +70,6 @@ struct vpbe_disp_buffer {
/* vpbe display object structure */
struct vpbe_layer {
- /* number of buffers in fbuffers */
- unsigned int numbuffers;
/* Pointer to the vpbe_display */
struct vpbe_display *disp_dev;
/* Pointer pointing to current v4l2_buffer */
@@ -91,10 +89,6 @@ struct vpbe_layer {
/* V4l2 specific parameters */
/* Identifies video device for this layer */
struct video_device video_dev;
- /* This field keeps track of type of buffer exchange mechanism user
- * has selected
- */
- enum v4l2_memory memory;
/* Used to store pixel format */
struct v4l2_pix_format pix_fmt;
enum v4l2_field buf_field;
@@ -106,12 +100,8 @@ struct vpbe_layer {
unsigned char window_enable;
/* number of open instances of the layer */
unsigned int usrs;
- /* number of users performing IO */
- unsigned int io_usrs;
/* Indicates id of the field which is being displayed */
unsigned int field_id;
- /* Indicates whether streaming started */
- unsigned char started;
/* Identifies device object */
enum vpbe_display_device_id device_id;
/* facilitation of ioctl ops lock by v4l2*/
@@ -131,17 +121,6 @@ struct vpbe_display {
struct osd_state *osd_device;
};
-/* File handle structure */
-struct vpbe_fh {
- struct v4l2_fh fh;
- /* vpbe device structure */
- struct vpbe_display *disp_dev;
- /* pointer to layer object for opened device */
- struct vpbe_layer *layer;
- /* Indicates whether this file handle is doing IO */
- unsigned char io_allowed;
-};
-
struct buf_config_params {
unsigned char min_numbuffers;
unsigned char numbuffers[VPBE_DISPLAY_MAX_DEVICES];
diff --git a/include/media/davinci/vpbe_venc.h b/include/media/davinci/vpbe_venc.h
index 476fafc2f522..3dbd20026107 100644
--- a/include/media/davinci/vpbe_venc.h
+++ b/include/media/davinci/vpbe_venc.h
@@ -30,11 +30,10 @@
#define VENC_SECOND_FIELD BIT(2)
struct venc_platform_data {
- int (*setup_pinmux)(enum v4l2_mbus_pixelcode if_type,
- int field);
+ int (*setup_pinmux)(u32 if_type, int field);
int (*setup_clock)(enum vpbe_enc_timings_type type,
unsigned int pixclock);
- int (*setup_if_config)(enum v4l2_mbus_pixelcode pixcode);
+ int (*setup_if_config)(u32 pixcode);
/* Number of LCD outputs supported */
int num_lcd_outputs;
struct vpbe_if_params *lcd_if_params;
diff --git a/include/media/exynos-fimc.h b/include/media/exynos-fimc.h
index aa44660e2041..69bcd2a07d5c 100644
--- a/include/media/exynos-fimc.h
+++ b/include/media/exynos-fimc.h
@@ -101,7 +101,7 @@ struct fimc_source_info {
* @flags: flags indicating which operation mode format applies to
*/
struct fimc_fmt {
- enum v4l2_mbus_pixelcode mbus_code;
+ u32 mbus_code;
char *name;
u32 fourcc;
u32 color;
diff --git a/include/media/lirc_dev.h b/include/media/lirc_dev.h
index 78f0637ca68d..05e7ad5d2c8b 100644
--- a/include/media/lirc_dev.h
+++ b/include/media/lirc_dev.h
@@ -29,14 +29,13 @@ struct lirc_buffer {
/* Using chunks instead of bytes pretends to simplify boundary checking
* And should allow for some performance fine tunning later */
struct kfifo fifo;
- u8 fifo_initialized;
};
static inline void lirc_buffer_clear(struct lirc_buffer *buf)
{
unsigned long flags;
- if (buf->fifo_initialized) {
+ if (kfifo_initialized(&buf->fifo)) {
spin_lock_irqsave(&buf->fifo_lock, flags);
kfifo_reset(&buf->fifo);
spin_unlock_irqrestore(&buf->fifo_lock, flags);
@@ -56,17 +55,14 @@ static inline int lirc_buffer_init(struct lirc_buffer *buf,
buf->chunk_size = chunk_size;
buf->size = size;
ret = kfifo_alloc(&buf->fifo, size * chunk_size, GFP_KERNEL);
- if (ret == 0)
- buf->fifo_initialized = 1;
return ret;
}
static inline void lirc_buffer_free(struct lirc_buffer *buf)
{
- if (buf->fifo_initialized) {
+ if (kfifo_initialized(&buf->fifo)) {
kfifo_free(&buf->fifo);
- buf->fifo_initialized = 0;
} else
WARN(1, "calling %s on an uninitialized lirc_buffer\n",
__func__);
diff --git a/include/media/omap3isp.h b/include/media/omap3isp.h
index c9d06d9f7e6e..398279dd1922 100644
--- a/include/media/omap3isp.h
+++ b/include/media/omap3isp.h
@@ -57,6 +57,8 @@ enum {
* 0 - Active high, 1 - Active low
* @vs_pol: Vertical synchronization polarity
* 0 - Active high, 1 - Active low
+ * @fld_pol: Field signal polarity
+ * 0 - Positive, 1 - Negative
* @data_pol: Data polarity
* 0 - Normal, 1 - One's complement
*/
@@ -65,6 +67,7 @@ struct isp_parallel_platform_data {
unsigned int clk_pol:1;
unsigned int hs_pol:1;
unsigned int vs_pol:1;
+ unsigned int fld_pol:1;
unsigned int data_pol:1;
};
diff --git a/include/media/radio-si4713.h b/include/media/radio-si4713.h
deleted file mode 100644
index f6aae29c7741..000000000000
--- a/include/media/radio-si4713.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * include/media/radio-si4713.h
- *
- * Board related data definitions for Si4713 radio transmitter chip.
- *
- * Copyright (c) 2009 Nokia Corporation
- * Contact: Eduardo Valentin <eduardo.valentin@nokia.com>
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
- *
- */
-
-#ifndef RADIO_SI4713_H
-#define RADIO_SI4713_H
-
-#include <linux/i2c.h>
-
-#define SI4713_NAME "radio-si4713"
-
-/*
- * Platform dependent definition
- */
-struct radio_si4713_platform_data {
- int i2c_bus;
- struct i2c_board_info *subdev_board_info;
-};
-
-#endif /* ifndef RADIO_SI4713_H*/
diff --git a/include/media/rc-map.h b/include/media/rc-map.h
index 80f951890b4c..e7a1514075ec 100644
--- a/include/media/rc-map.h
+++ b/include/media/rc-map.h
@@ -135,6 +135,7 @@ void rc_map_init(void);
#define RC_MAP_DM1105_NEC "rc-dm1105-nec"
#define RC_MAP_DNTV_LIVE_DVBT_PRO "rc-dntv-live-dvbt-pro"
#define RC_MAP_DNTV_LIVE_DVB_T "rc-dntv-live-dvb-t"
+#define RC_MAP_DVBSKY "rc-dvbsky"
#define RC_MAP_EMPTY "rc-empty"
#define RC_MAP_EM_TERRATEC "rc-em-terratec"
#define RC_MAP_ENCORE_ENLTV2 "rc-encore-enltv2"
diff --git a/include/media/si4713.h b/include/media/si4713.h
index f98a0a7af61c..be4f58e2440b 100644
--- a/include/media/si4713.h
+++ b/include/media/si4713.h
@@ -23,9 +23,7 @@
* Platform dependent definition
*/
struct si4713_platform_data {
- const char * const *supply_names;
- unsigned supplies;
- int gpio_reset; /* < 0 if not used */
+ bool is_platform_device;
};
/*
diff --git a/include/media/soc_camera.h b/include/media/soc_camera.h
index 865246b00127..2f6261f3e570 100644
--- a/include/media/soc_camera.h
+++ b/include/media/soc_camera.h
@@ -296,7 +296,7 @@ const struct soc_camera_format_xlate *soc_camera_xlate_by_fourcc(
* format setup.
*/
struct soc_camera_format_xlate {
- enum v4l2_mbus_pixelcode code;
+ u32 code;
const struct soc_mbus_pixelfmt *host_fmt;
};
diff --git a/include/media/soc_mediabus.h b/include/media/soc_mediabus.h
index d33f6d059692..2ff773785fb6 100644
--- a/include/media/soc_mediabus.h
+++ b/include/media/soc_mediabus.h
@@ -91,16 +91,16 @@ struct soc_mbus_pixelfmt {
* @fmt: pixel format description
*/
struct soc_mbus_lookup {
- enum v4l2_mbus_pixelcode code;
+ u32 code;
struct soc_mbus_pixelfmt fmt;
};
const struct soc_mbus_pixelfmt *soc_mbus_find_fmtdesc(
- enum v4l2_mbus_pixelcode code,
+ u32 code,
const struct soc_mbus_lookup *lookup,
int n);
const struct soc_mbus_pixelfmt *soc_mbus_get_fmtdesc(
- enum v4l2_mbus_pixelcode code);
+ u32 code);
s32 soc_mbus_bytes_per_line(u32 width, const struct soc_mbus_pixelfmt *mf);
s32 soc_mbus_image_size(const struct soc_mbus_pixelfmt *mf,
u32 bytes_per_line, u32 height);
diff --git a/include/media/v4l2-common.h b/include/media/v4l2-common.h
index 48f974866f13..1cc0c5ba16b3 100644
--- a/include/media/v4l2-common.h
+++ b/include/media/v4l2-common.h
@@ -80,24 +80,9 @@
/* ------------------------------------------------------------------------- */
-/* Control helper functions */
+/* Control helper function */
-int v4l2_ctrl_check(struct v4l2_ext_control *ctrl, struct v4l2_queryctrl *qctrl,
- const char * const *menu_items);
-const char *v4l2_ctrl_get_name(u32 id);
-const char * const *v4l2_ctrl_get_menu(u32 id);
-const s64 *v4l2_ctrl_get_int_menu(u32 id, u32 *len);
int v4l2_ctrl_query_fill(struct v4l2_queryctrl *qctrl, s32 min, s32 max, s32 step, s32 def);
-int v4l2_ctrl_query_menu(struct v4l2_querymenu *qmenu,
- struct v4l2_queryctrl *qctrl, const char * const *menu_items);
-#define V4L2_CTRL_MENU_IDS_END (0xffffffff)
-int v4l2_ctrl_query_menu_valid_items(struct v4l2_querymenu *qmenu, const u32 *ids);
-
-/* Note: ctrl_classes points to an array of u32 pointers. Each u32 array is a
- 0-terminated array of control IDs. Each array must be sorted low to high
- and belong to the same control class. The array of u32 pointers must also
- be sorted, from low class IDs to high class IDs. */
-u32 v4l2_ctrl_next(const u32 * const *ctrl_classes, u32 id);
/* ------------------------------------------------------------------------- */
diff --git a/include/media/v4l2-ctrls.h b/include/media/v4l2-ctrls.h
index b7cd7a665e35..911f3e542834 100644
--- a/include/media/v4l2-ctrls.h
+++ b/include/media/v4l2-ctrls.h
@@ -670,6 +670,31 @@ static inline int v4l2_ctrl_modify_range(struct v4l2_ctrl *ctrl,
*/
void v4l2_ctrl_notify(struct v4l2_ctrl *ctrl, v4l2_ctrl_notify_fnc notify, void *priv);
+/** v4l2_ctrl_get_name() - Get the name of the control
+ * @id: The control ID.
+ *
+ * This function returns the name of the given control ID or NULL if it isn't
+ * a known control.
+ */
+const char *v4l2_ctrl_get_name(u32 id);
+
+/** v4l2_ctrl_get_menu() - Get the menu string array of the control
+ * @id: The control ID.
+ *
+ * This function returns the NULL-terminated menu string array name of the
+ * given control ID or NULL if it isn't a known menu control.
+ */
+const char * const *v4l2_ctrl_get_menu(u32 id);
+
+/** v4l2_ctrl_get_int_menu() - Get the integer menu array of the control
+ * @id: The control ID.
+ * @len: The size of the integer array.
+ *
+ * This function returns the integer array of the given control ID or NULL if it
+ * if it isn't a known integer menu control.
+ */
+const s64 *v4l2_ctrl_get_int_menu(u32 id, u32 *len);
+
/** v4l2_ctrl_g_ctrl() - Helper function to get the control's value from within a driver.
* @ctrl: The control.
*
diff --git a/include/media/v4l2-image-sizes.h b/include/media/v4l2-image-sizes.h
index 10daf92ff1ab..a07d7a683bd9 100644
--- a/include/media/v4l2-image-sizes.h
+++ b/include/media/v4l2-image-sizes.h
@@ -25,10 +25,19 @@
#define QVGA_WIDTH 320
#define QVGA_HEIGHT 240
+#define SVGA_WIDTH 800
+#define SVGA_HEIGHT 600
+
#define SXGA_WIDTH 1280
#define SXGA_HEIGHT 1024
#define VGA_WIDTH 640
#define VGA_HEIGHT 480
+#define UXGA_WIDTH 1600
+#define UXGA_HEIGHT 1200
+
+#define XGA_WIDTH 1024
+#define XGA_HEIGHT 768
+
#endif /* _IMAGE_SIZES_H */
diff --git a/include/media/v4l2-mediabus.h b/include/media/v4l2-mediabus.h
index 395c4a95a42a..38d960d8dccd 100644
--- a/include/media/v4l2-mediabus.h
+++ b/include/media/v4l2-mediabus.h
@@ -94,16 +94,20 @@ static inline void v4l2_fill_pix_format(struct v4l2_pix_format *pix_fmt,
pix_fmt->height = mbus_fmt->height;
pix_fmt->field = mbus_fmt->field;
pix_fmt->colorspace = mbus_fmt->colorspace;
+ pix_fmt->ycbcr_enc = mbus_fmt->ycbcr_enc;
+ pix_fmt->quantization = mbus_fmt->quantization;
}
static inline void v4l2_fill_mbus_format(struct v4l2_mbus_framefmt *mbus_fmt,
const struct v4l2_pix_format *pix_fmt,
- enum v4l2_mbus_pixelcode code)
+ u32 code)
{
mbus_fmt->width = pix_fmt->width;
mbus_fmt->height = pix_fmt->height;
mbus_fmt->field = pix_fmt->field;
mbus_fmt->colorspace = pix_fmt->colorspace;
+ mbus_fmt->ycbcr_enc = pix_fmt->ycbcr_enc;
+ mbus_fmt->quantization = pix_fmt->quantization;
mbus_fmt->code = code;
}
diff --git a/include/media/v4l2-subdev.h b/include/media/v4l2-subdev.h
index d7465725773d..5860292d42eb 100644
--- a/include/media/v4l2-subdev.h
+++ b/include/media/v4l2-subdev.h
@@ -341,7 +341,7 @@ struct v4l2_subdev_video_ops {
int (*query_dv_timings)(struct v4l2_subdev *sd,
struct v4l2_dv_timings *timings);
int (*enum_mbus_fmt)(struct v4l2_subdev *sd, unsigned int index,
- enum v4l2_mbus_pixelcode *code);
+ u32 *code);
int (*enum_mbus_fsizes)(struct v4l2_subdev *sd,
struct v4l2_frmsizeenum *fsize);
int (*g_mbus_fmt)(struct v4l2_subdev *sd,
diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h
index 2fefcf491aa8..bd2cec2d6c3d 100644
--- a/include/media/videobuf2-core.h
+++ b/include/media/videobuf2-core.h
@@ -82,19 +82,23 @@ struct vb2_threadio_data;
* unmap_dmabuf.
*/
struct vb2_mem_ops {
- void *(*alloc)(void *alloc_ctx, unsigned long size, gfp_t gfp_flags);
+ void *(*alloc)(void *alloc_ctx, unsigned long size,
+ enum dma_data_direction dma_dir,
+ gfp_t gfp_flags);
void (*put)(void *buf_priv);
struct dma_buf *(*get_dmabuf)(void *buf_priv, unsigned long flags);
void *(*get_userptr)(void *alloc_ctx, unsigned long vaddr,
- unsigned long size, int write);
+ unsigned long size,
+ enum dma_data_direction dma_dir);
void (*put_userptr)(void *buf_priv);
void (*prepare)(void *buf_priv);
void (*finish)(void *buf_priv);
void *(*attach_dmabuf)(void *alloc_ctx, struct dma_buf *dbuf,
- unsigned long size, int write);
+ unsigned long size,
+ enum dma_data_direction dma_dir);
void (*detach_dmabuf)(void *buf_priv);
int (*map_dmabuf)(void *buf_priv);
void (*unmap_dmabuf)(void *buf_priv);
@@ -270,22 +274,24 @@ struct vb2_buffer {
* queue setup from completing successfully; optional.
* @buf_prepare: called every time the buffer is queued from userspace
* and from the VIDIOC_PREPARE_BUF ioctl; drivers may
- * perform any initialization required before each hardware
- * operation in this callback; drivers that support
- * VIDIOC_CREATE_BUFS must also validate the buffer size;
- * if an error is returned, the buffer will not be queued
- * in driver; optional.
+ * perform any initialization required before each
+ * hardware operation in this callback; drivers can
+ * access/modify the buffer here as it is still synced for
+ * the CPU; drivers that support VIDIOC_CREATE_BUFS must
+ * also validate the buffer size; if an error is returned,
+ * the buffer will not be queued in driver; optional.
* @buf_finish: called before every dequeue of the buffer back to
- * userspace; drivers may perform any operations required
- * before userspace accesses the buffer; optional. The
- * buffer state can be one of the following: DONE and
- * ERROR occur while streaming is in progress, and the
- * PREPARED state occurs when the queue has been canceled
- * and all pending buffers are being returned to their
- * default DEQUEUED state. Typically you only have to do
- * something if the state is VB2_BUF_STATE_DONE, since in
- * all other cases the buffer contents will be ignored
- * anyway.
+ * userspace; the buffer is synced for the CPU, so drivers
+ * can access/modify the buffer contents; drivers may
+ * perform any operations required before userspace
+ * accesses the buffer; optional. The buffer state can be
+ * one of the following: DONE and ERROR occur while
+ * streaming is in progress, and the PREPARED state occurs
+ * when the queue has been canceled and all pending
+ * buffers are being returned to their default DEQUEUED
+ * state. Typically you only have to do something if the
+ * state is VB2_BUF_STATE_DONE, since in all other cases
+ * the buffer contents will be ignored anyway.
* @buf_cleanup: called once before the buffer is freed; drivers may
* perform any additional cleanup; optional.
* @start_streaming: called once to enter 'streaming' state; the driver may
@@ -356,8 +362,8 @@ struct v4l2_fh;
* @buf_struct_size: size of the driver-specific buffer structure;
* "0" indicates the driver doesn't want to use a custom buffer
* structure type, so sizeof(struct vb2_buffer) will is used
- * @timestamp_flags: Timestamp flags; V4L2_BUF_FLAGS_TIMESTAMP_* and
- * V4L2_BUF_FLAGS_TSTAMP_SRC_*
+ * @timestamp_flags: Timestamp flags; V4L2_BUF_FLAG_TIMESTAMP_* and
+ * V4L2_BUF_FLAG_TSTAMP_SRC_*
* @gfp_flags: additional gfp flags used when allocating the buffers.
* Typically this is 0, but it may be e.g. GFP_DMA or __GFP_DMA32
* to force the buffer allocation to a specific memory zone.
@@ -366,6 +372,7 @@ struct v4l2_fh;
* cannot be started unless at least this number of buffers
* have been queued into the driver.
*
+ * @mmap_lock: private mutex used when buffers are allocated/freed/mmapped
* @memory: current memory type used
* @bufs: videobuf buffer structures
* @num_buffers: number of allocated/used buffers
@@ -402,6 +409,7 @@ struct vb2_queue {
u32 min_buffers_needed;
/* private: internal use only */
+ struct mutex mmap_lock;
enum v4l2_memory memory;
struct vb2_buffer *bufs[VIDEO_MAX_FRAME];
unsigned int num_buffers;
@@ -592,6 +600,15 @@ vb2_plane_size(struct vb2_buffer *vb, unsigned int plane_no)
return 0;
}
+/**
+ * vb2_start_streaming_called() - return streaming status of driver
+ * @q: videobuf queue
+ */
+static inline bool vb2_start_streaming_called(struct vb2_queue *q)
+{
+ return q->start_streaming_called;
+}
+
/*
* The following functions are not part of the vb2 core API, but are simple
* helper functions that you can use in your struct v4l2_file_operations,
diff --git a/include/media/videobuf2-dma-sg.h b/include/media/videobuf2-dma-sg.h
index 7b89852779af..14ce3068b642 100644
--- a/include/media/videobuf2-dma-sg.h
+++ b/include/media/videobuf2-dma-sg.h
@@ -21,6 +21,9 @@ static inline struct sg_table *vb2_dma_sg_plane_desc(
return (struct sg_table *)vb2_plane_cookie(vb, plane_no);
}
+void *vb2_dma_sg_init_ctx(struct device *dev);
+void vb2_dma_sg_cleanup_ctx(void *alloc_ctx);
+
extern const struct vb2_mem_ops vb2_dma_sg_memops;
#endif
diff --git a/include/misc/cxl.h b/include/misc/cxl.h
new file mode 100644
index 000000000000..975cc7861f18
--- /dev/null
+++ b/include/misc/cxl.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2014 IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _MISC_CXL_H
+#define _MISC_CXL_H
+
+#ifdef CONFIG_CXL_BASE
+
+#define CXL_IRQ_RANGES 4
+
+struct cxl_irq_ranges {
+ irq_hw_number_t offset[CXL_IRQ_RANGES];
+ irq_hw_number_t range[CXL_IRQ_RANGES];
+};
+
+extern atomic_t cxl_use_count;
+
+static inline bool cxl_ctx_in_use(void)
+{
+ return (atomic_read(&cxl_use_count) != 0);
+}
+
+static inline void cxl_ctx_get(void)
+{
+ atomic_inc(&cxl_use_count);
+}
+
+static inline void cxl_ctx_put(void)
+{
+ atomic_dec(&cxl_use_count);
+}
+
+void cxl_slbia(struct mm_struct *mm);
+
+#else /* CONFIG_CXL_BASE */
+
+static inline bool cxl_ctx_in_use(void) { return false; }
+static inline void cxl_slbia(struct mm_struct *mm) {}
+
+#endif /* CONFIG_CXL_BASE */
+
+#endif
diff --git a/include/net/6lowpan.h b/include/net/6lowpan.h
index d184df1d0d41..dc03d77ad23b 100644
--- a/include/net/6lowpan.h
+++ b/include/net/6lowpan.h
@@ -372,12 +372,12 @@ lowpan_uncompress_size(const struct sk_buff *skb, u16 *dgram_offset)
return skb->len + uncomp_header - ret;
}
-typedef int (*skb_delivery_cb)(struct sk_buff *skb, struct net_device *dev);
-
-int lowpan_process_data(struct sk_buff *skb, struct net_device *dev,
- const u8 *saddr, const u8 saddr_type, const u8 saddr_len,
- const u8 *daddr, const u8 daddr_type, const u8 daddr_len,
- u8 iphc0, u8 iphc1, skb_delivery_cb skb_deliver);
+int
+lowpan_header_decompress(struct sk_buff *skb, struct net_device *dev,
+ const u8 *saddr, const u8 saddr_type,
+ const u8 saddr_len, const u8 *daddr,
+ const u8 daddr_type, const u8 daddr_len,
+ u8 iphc0, u8 iphc1);
int lowpan_header_compress(struct sk_buff *skb, struct net_device *dev,
unsigned short type, const void *_daddr,
const void *_saddr, unsigned int len);
diff --git a/include/net/9p/transport.h b/include/net/9p/transport.h
index d9fa68f26c41..2a25dec30211 100644
--- a/include/net/9p/transport.h
+++ b/include/net/9p/transport.h
@@ -34,7 +34,6 @@
* @list: used to maintain a list of currently available transports
* @name: the human-readable name of the transport
* @maxsize: transport provided maximum packet size
- * @pref: Preferences of this transport
* @def: set if this transport should be considered the default
* @create: member function to create a new connection on this transport
* @close: member function to discard a connection on this transport
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index ec51e673b4b6..d13573bb879e 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -202,7 +202,7 @@ int ipv6_sock_ac_drop(struct sock *sk, int ifindex,
const struct in6_addr *addr);
void ipv6_sock_ac_close(struct sock *sk);
-int ipv6_dev_ac_inc(struct net_device *dev, const struct in6_addr *addr);
+int __ipv6_dev_ac_inc(struct inet6_dev *idev, const struct in6_addr *addr);
int __ipv6_dev_ac_dec(struct inet6_dev *idev, const struct in6_addr *addr);
void ipv6_ac_destroy_dev(struct inet6_dev *idev);
bool ipv6_chk_acast_addr(struct net *net, struct net_device *dev,
diff --git a/include/net/af_ieee802154.h b/include/net/af_ieee802154.h
index 085940f7eeec..7d38e2ffd256 100644
--- a/include/net/af_ieee802154.h
+++ b/include/net/af_ieee802154.h
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
* Written by:
* Sergey Lapin <slapin@ossfans.org>
* Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
diff --git a/include/net/af_vsock.h b/include/net/af_vsock.h
index 428277869400..0d87674fb775 100644
--- a/include/net/af_vsock.h
+++ b/include/net/af_vsock.h
@@ -103,14 +103,14 @@ struct vsock_transport {
int (*dgram_dequeue)(struct kiocb *kiocb, struct vsock_sock *vsk,
struct msghdr *msg, size_t len, int flags);
int (*dgram_enqueue)(struct vsock_sock *, struct sockaddr_vm *,
- struct iovec *, size_t len);
+ struct msghdr *, size_t len);
bool (*dgram_allow)(u32 cid, u32 port);
/* STREAM. */
/* TODO: stream_bind() */
- ssize_t (*stream_dequeue)(struct vsock_sock *, struct iovec *,
+ ssize_t (*stream_dequeue)(struct vsock_sock *, struct msghdr *,
size_t len, int flags);
- ssize_t (*stream_enqueue)(struct vsock_sock *, struct iovec *,
+ ssize_t (*stream_enqueue)(struct vsock_sock *, struct msghdr *,
size_t len);
s64 (*stream_has_data)(struct vsock_sock *);
s64 (*stream_has_space)(struct vsock_sock *);
diff --git a/include/net/ah.h b/include/net/ah.h
index ca95b98969dd..4e2dfa474a7e 100644
--- a/include/net/ah.h
+++ b/include/net/ah.h
@@ -3,9 +3,6 @@
#include <linux/skbuff.h>
-/* This is the maximum truncated ICV length that we know of. */
-#define MAX_AH_AUTH_LEN 64
-
struct crypto_ahash;
struct ah_data {
diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h
index 373000de610d..58695ffeb138 100644
--- a/include/net/bluetooth/bluetooth.h
+++ b/include/net/bluetooth/bluetooth.h
@@ -120,9 +120,9 @@ struct bt_voice {
#define BT_RCVMTU 13
__printf(1, 2)
-int bt_info(const char *fmt, ...);
+void bt_info(const char *fmt, ...);
__printf(1, 2)
-int bt_err(const char *fmt, ...);
+void bt_err(const char *fmt, ...);
#define BT_INFO(fmt, ...) bt_info(fmt "\n", ##__VA_ARGS__)
#define BT_ERR(fmt, ...) bt_err(fmt "\n", ##__VA_ARGS__)
@@ -284,6 +284,7 @@ struct hci_req_ctrl {
struct bt_skb_cb {
__u8 pkt_type;
__u8 incoming;
+ __u16 opcode;
__u16 expect;
__u8 force_active;
struct l2cap_chan *chan;
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index 3f8547f1c6f8..40129b3838b2 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -129,6 +129,15 @@ enum {
* during the hdev->setup vendor callback.
*/
HCI_QUIRK_INVALID_BDADDR,
+
+ /* When this quirk is set, the duplicate filtering during
+ * scanning is based on Bluetooth devices addresses. To allow
+ * RSSI based updates, restart scanning if needed.
+ *
+ * This quirk can be set before hci_register_dev is called or
+ * during the hdev->setup vendor callback.
+ */
+ HCI_QUIRK_STRICT_DUPLICATE_FILTER,
};
/* HCI device flags */
@@ -154,6 +163,7 @@ enum {
enum {
HCI_DUT_MODE,
HCI_FORCE_SC,
+ HCI_FORCE_LESC,
HCI_FORCE_STATIC_ADDR,
};
@@ -265,6 +275,7 @@ enum {
/* Low Energy links do not have defined link type. Use invented one */
#define LE_LINK 0x80
#define AMP_LINK 0x81
+#define INVALID_LINK 0xff
/* LMP features */
#define LMP_3SLOT 0x01
@@ -332,6 +343,7 @@ enum {
#define HCI_LE_ENCRYPTION 0x01
#define HCI_LE_CONN_PARAM_REQ_PROC 0x02
#define HCI_LE_PING 0x10
+#define HCI_LE_EXT_SCAN_POLICY 0x80
/* Connection modes */
#define HCI_CM_ACTIVE 0x0000
@@ -385,6 +397,7 @@ enum {
#define HCI_ERROR_AUTH_FAILURE 0x05
#define HCI_ERROR_MEMORY_EXCEEDED 0x07
#define HCI_ERROR_CONNECTION_TIMEOUT 0x08
+#define HCI_ERROR_REJ_LIMITED_RESOURCES 0x0d
#define HCI_ERROR_REJ_BAD_ADDR 0x0f
#define HCI_ERROR_REMOTE_USER_TERM 0x13
#define HCI_ERROR_REMOTE_LOW_RESOURCES 0x14
@@ -400,6 +413,7 @@ enum {
/* The core spec defines 127 as the "not available" value */
#define HCI_TX_POWER_INVALID 127
+#define HCI_RSSI_INVALID 127
#define HCI_ROLE_MASTER 0x00
#define HCI_ROLE_SLAVE 0x01
@@ -628,7 +642,7 @@ struct hci_cp_user_passkey_reply {
struct hci_cp_remote_oob_data_reply {
bdaddr_t bdaddr;
__u8 hash[16];
- __u8 randomizer[16];
+ __u8 rand[16];
} __packed;
#define HCI_OP_REMOTE_OOB_DATA_NEG_REPLY 0x0433
@@ -720,9 +734,9 @@ struct hci_rp_set_csb {
struct hci_cp_remote_oob_ext_data_reply {
bdaddr_t bdaddr;
__u8 hash192[16];
- __u8 randomizer192[16];
+ __u8 rand192[16];
__u8 hash256[16];
- __u8 randomizer256[16];
+ __u8 rand256[16];
} __packed;
#define HCI_OP_SNIFF_MODE 0x0803
@@ -929,7 +943,7 @@ struct hci_cp_write_ssp_mode {
struct hci_rp_read_local_oob_data {
__u8 status;
__u8 hash[16];
- __u8 randomizer[16];
+ __u8 rand[16];
} __packed;
#define HCI_OP_READ_INQ_RSP_TX_POWER 0x0c58
@@ -1013,9 +1027,9 @@ struct hci_cp_write_sc_support {
struct hci_rp_read_local_oob_ext_data {
__u8 status;
__u8 hash192[16];
- __u8 randomizer192[16];
+ __u8 rand192[16];
__u8 hash256[16];
- __u8 randomizer256[16];
+ __u8 rand256[16];
} __packed;
#define HCI_OP_READ_LOCAL_VERSION 0x1001
@@ -1462,6 +1476,11 @@ struct hci_ev_cmd_status {
__le16 opcode;
} __packed;
+#define HCI_EV_HARDWARE_ERROR 0x10
+struct hci_ev_hardware_error {
+ __u8 code;
+} __packed;
+
#define HCI_EV_ROLE_CHANGE 0x12
struct hci_ev_role_change {
__u8 status;
@@ -1733,6 +1752,25 @@ struct hci_ev_le_conn_complete {
__u8 clk_accurancy;
} __packed;
+/* Advertising report event types */
+#define LE_ADV_IND 0x00
+#define LE_ADV_DIRECT_IND 0x01
+#define LE_ADV_SCAN_IND 0x02
+#define LE_ADV_NONCONN_IND 0x03
+#define LE_ADV_SCAN_RSP 0x04
+
+#define ADDR_LE_DEV_PUBLIC 0x00
+#define ADDR_LE_DEV_RANDOM 0x01
+
+#define HCI_EV_LE_ADVERTISING_REPORT 0x02
+struct hci_ev_le_advertising_info {
+ __u8 evt_type;
+ __u8 bdaddr_type;
+ bdaddr_t bdaddr;
+ __u8 length;
+ __u8 data[0];
+} __packed;
+
#define HCI_EV_LE_CONN_UPDATE_COMPLETE 0x03
struct hci_ev_le_conn_update_complete {
__u8 status;
@@ -1758,23 +1796,14 @@ struct hci_ev_le_remote_conn_param_req {
__le16 timeout;
} __packed;
-/* Advertising report event types */
-#define LE_ADV_IND 0x00
-#define LE_ADV_DIRECT_IND 0x01
-#define LE_ADV_SCAN_IND 0x02
-#define LE_ADV_NONCONN_IND 0x03
-#define LE_ADV_SCAN_RSP 0x04
-
-#define ADDR_LE_DEV_PUBLIC 0x00
-#define ADDR_LE_DEV_RANDOM 0x01
-
-#define HCI_EV_LE_ADVERTISING_REPORT 0x02
-struct hci_ev_le_advertising_info {
+#define HCI_EV_LE_DIRECT_ADV_REPORT 0x0B
+struct hci_ev_le_direct_adv_info {
__u8 evt_type;
__u8 bdaddr_type;
bdaddr_t bdaddr;
- __u8 length;
- __u8 data[0];
+ __u8 direct_addr_type;
+ bdaddr_t direct_addr;
+ __s8 rssi;
} __packed;
/* Internal events generated by Bluetooth stack */
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index 6f884e6c731e..3c7827005c25 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -75,6 +75,10 @@ struct discovery_state {
u32 last_adv_flags;
u8 last_adv_data[HCI_MAX_AD_LENGTH];
u8 last_adv_data_len;
+ bool report_invalid_rssi;
+ s8 rssi;
+ u16 uuid_count;
+ u8 (*uuids)[16];
};
struct hci_conn_hash {
@@ -108,6 +112,7 @@ struct smp_csrk {
struct smp_ltk {
struct list_head list;
+ struct rcu_head rcu;
bdaddr_t bdaddr;
u8 bdaddr_type;
u8 authenticated;
@@ -120,6 +125,7 @@ struct smp_ltk {
struct smp_irk {
struct list_head list;
+ struct rcu_head rcu;
bdaddr_t rpa;
bdaddr_t bdaddr;
u8 addr_type;
@@ -128,6 +134,7 @@ struct smp_irk {
struct link_key {
struct list_head list;
+ struct rcu_head rcu;
bdaddr_t bdaddr;
u8 type;
u8 val[HCI_LINK_KEY_SIZE];
@@ -137,10 +144,11 @@ struct link_key {
struct oob_data {
struct list_head list;
bdaddr_t bdaddr;
+ u8 bdaddr_type;
u8 hash192[16];
- u8 randomizer192[16];
+ u8 rand192[16];
u8 hash256[16];
- u8 randomizer256[16];
+ u8 rand256[16];
};
#define HCI_MAX_SHORT_NAME_LENGTH 10
@@ -302,7 +310,8 @@ struct hci_dev {
__u32 req_status;
__u32 req_result;
- struct crypto_blkcipher *tfm_aes;
+ void *smp_data;
+ void *smp_bredr_data;
struct discovery_state discovery;
struct hci_conn_hash conn_hash;
@@ -398,6 +407,8 @@ struct hci_conn {
__u16 le_conn_interval;
__u16 le_conn_latency;
__u16 le_supv_timeout;
+ __u8 le_adv_data[HCI_MAX_AD_LENGTH];
+ __u8 le_adv_data_len;
__s8 rssi;
__s8 tx_power;
__s8 max_tx_power;
@@ -496,6 +507,17 @@ static inline void discovery_init(struct hci_dev *hdev)
INIT_LIST_HEAD(&hdev->discovery.all);
INIT_LIST_HEAD(&hdev->discovery.unknown);
INIT_LIST_HEAD(&hdev->discovery.resolve);
+ hdev->discovery.report_invalid_rssi = true;
+ hdev->discovery.rssi = HCI_RSSI_INVALID;
+}
+
+static inline void hci_discovery_filter_clear(struct hci_dev *hdev)
+{
+ hdev->discovery.report_invalid_rssi = true;
+ hdev->discovery.rssi = HCI_RSSI_INVALID;
+ hdev->discovery.uuid_count = 0;
+ kfree(hdev->discovery.uuids);
+ hdev->discovery.uuids = NULL;
}
bool hci_discovery_active(struct hci_dev *hdev);
@@ -539,7 +561,6 @@ enum {
HCI_CONN_RSWITCH_PEND,
HCI_CONN_MODE_CHANGE_PEND,
HCI_CONN_SCO_SETUP_PEND,
- HCI_CONN_LE_SMP_PEND,
HCI_CONN_MGMT_CONNECTED,
HCI_CONN_SSP_ENABLED,
HCI_CONN_SC_ENABLED,
@@ -553,6 +574,9 @@ enum {
HCI_CONN_FIPS,
HCI_CONN_STK_ENCRYPT,
HCI_CONN_AUTH_INITIATOR,
+ HCI_CONN_DROP,
+ HCI_CONN_PARAM_REMOVAL_PEND,
+ HCI_CONN_NEW_LINK_KEY,
};
static inline bool hci_conn_ssp_enabled(struct hci_conn *conn)
@@ -643,6 +667,26 @@ static inline unsigned int hci_conn_count(struct hci_dev *hdev)
return c->acl_num + c->amp_num + c->sco_num + c->le_num;
}
+static inline __u8 hci_conn_lookup_type(struct hci_dev *hdev, __u16 handle)
+{
+ struct hci_conn_hash *h = &hdev->conn_hash;
+ struct hci_conn *c;
+ __u8 type = INVALID_LINK;
+
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(c, &h->list, list) {
+ if (c->handle == handle) {
+ type = c->type;
+ break;
+ }
+ }
+
+ rcu_read_unlock();
+
+ return type;
+}
+
static inline struct hci_conn *hci_conn_hash_lookup_handle(struct hci_dev *hdev,
__u16 handle)
{
@@ -702,7 +746,7 @@ static inline struct hci_conn *hci_conn_hash_lookup_state(struct hci_dev *hdev,
return NULL;
}
-void hci_disconnect(struct hci_conn *conn, __u8 reason);
+int hci_disconnect(struct hci_conn *conn, __u8 reason);
bool hci_setup_sync(struct hci_conn *conn, __u16 handle);
void hci_sco_setup(struct hci_conn *conn, __u8 status);
@@ -756,9 +800,10 @@ void hci_le_conn_failed(struct hci_conn *conn, u8 status);
* _get()/_drop() in it, but require the caller to have a valid ref (FIXME).
*/
-static inline void hci_conn_get(struct hci_conn *conn)
+static inline struct hci_conn *hci_conn_get(struct hci_conn *conn)
{
get_device(&conn->dev);
+ return conn;
}
static inline void hci_conn_put(struct hci_conn *conn)
@@ -790,7 +835,7 @@ static inline void hci_conn_drop(struct hci_conn *conn)
if (!conn->out)
timeo *= 2;
} else {
- timeo = msecs_to_jiffies(10);
+ timeo = 0;
}
break;
@@ -799,7 +844,7 @@ static inline void hci_conn_drop(struct hci_conn *conn)
break;
default:
- timeo = msecs_to_jiffies(10);
+ timeo = 0;
break;
}
@@ -852,6 +897,7 @@ int hci_register_dev(struct hci_dev *hdev);
void hci_unregister_dev(struct hci_dev *hdev);
int hci_suspend_dev(struct hci_dev *hdev);
int hci_resume_dev(struct hci_dev *hdev);
+int hci_reset_dev(struct hci_dev *hdev);
int hci_dev_open(__u16 dev);
int hci_dev_close(__u16 dev);
int hci_dev_reset(__u16 dev);
@@ -893,13 +939,11 @@ struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr);
struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
bdaddr_t *bdaddr, u8 *val, u8 type,
u8 pin_len, bool *persistent);
-struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
- u8 role);
struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
u8 addr_type, u8 type, u8 authenticated,
u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand);
-struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
- u8 addr_type, u8 role);
+struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 addr_type, u8 role);
int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type);
void hci_smp_ltks_clear(struct hci_dev *hdev);
int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr);
@@ -914,18 +958,16 @@ void hci_smp_irks_clear(struct hci_dev *hdev);
void hci_remote_oob_data_clear(struct hci_dev *hdev);
struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
- bdaddr_t *bdaddr);
+ bdaddr_t *bdaddr, u8 bdaddr_type);
int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
- u8 *hash, u8 *randomizer);
-int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
- u8 *hash192, u8 *randomizer192,
- u8 *hash256, u8 *randomizer256);
-int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr);
+ u8 bdaddr_type, u8 *hash192, u8 *rand192,
+ u8 *hash256, u8 *rand256);
+int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 bdaddr_type);
void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb);
int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb);
-int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count);
int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count);
void hci_init_sysfs(struct hci_dev *hdev);
@@ -970,6 +1012,12 @@ void hci_conn_del_sysfs(struct hci_conn *conn);
#define lmp_host_le_capable(dev) (!!((dev)->features[1][0] & LMP_HOST_LE))
#define lmp_host_le_br_capable(dev) (!!((dev)->features[1][0] & LMP_HOST_LE_BREDR))
+#define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \
+ !test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
+#define bredr_sc_enabled(dev) ((lmp_sc_capable(dev) || \
+ test_bit(HCI_FORCE_SC, &(dev)->dbg_flags)) && \
+ test_bit(HCI_SC_ENABLED, &(dev)->dev_flags))
+
/* ----- HCI protocols ----- */
#define HCI_PROTO_DEFER 0x01
@@ -1258,6 +1306,8 @@ bool hci_req_pending(struct hci_dev *hdev);
void hci_req_add_le_scan_disable(struct hci_request *req);
void hci_req_add_le_passive_scan(struct hci_request *req);
+void hci_update_page_scan(struct hci_dev *hdev, struct hci_request *req);
+
struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
const void *param, u32 timeout);
struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
@@ -1305,9 +1355,8 @@ int mgmt_update_adv_data(struct hci_dev *hdev);
void mgmt_discoverable_timeout(struct hci_dev *hdev);
void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
bool persistent);
-void mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
- u8 addr_type, u32 flags, u8 *name, u8 name_len,
- u8 *dev_class);
+void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
+ u32 flags, u8 *name, u8 name_len);
void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
u8 link_type, u8 addr_type, u8 reason,
bool mgmt_connected);
@@ -1336,8 +1385,7 @@ int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
u8 link_type, u8 addr_type, u32 passkey,
u8 entered);
-void mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
- u8 addr_type, u8 status);
+void mgmt_auth_failed(struct hci_conn *conn, u8 status);
void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status);
void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status);
void mgmt_sc_enable_complete(struct hci_dev *hdev, u8 enable, u8 status);
@@ -1345,14 +1393,15 @@ void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
u8 status);
void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status);
void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
- u8 *randomizer192, u8 *hash256,
- u8 *randomizer256, u8 status);
+ u8 *rand192, u8 *hash256, u8 *rand256,
+ u8 status);
void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len);
void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
u8 addr_type, s8 rssi, u8 *name, u8 name_len);
void mgmt_discovering(struct hci_dev *hdev, u8 discovering);
+bool mgmt_powering_down(struct hci_dev *hdev);
void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent);
void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk);
void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
index 8df15ad0d43f..d1bb342d083f 100644
--- a/include/net/bluetooth/l2cap.h
+++ b/include/net/bluetooth/l2cap.h
@@ -28,6 +28,7 @@
#define __L2CAP_H
#include <asm/unaligned.h>
+#include <linux/atomic.h>
/* L2CAP defaults */
#define L2CAP_DEFAULT_MTU 672
@@ -140,6 +141,7 @@ struct l2cap_conninfo {
#define L2CAP_FC_ATT 0x10
#define L2CAP_FC_SIG_LE 0x20
#define L2CAP_FC_SMP_LE 0x40
+#define L2CAP_FC_SMP_BREDR 0x80
/* L2CAP Control Field bit masks */
#define L2CAP_CTRL_SAR 0xC000
@@ -254,6 +256,7 @@ struct l2cap_conn_rsp {
#define L2CAP_CID_ATT 0x0004
#define L2CAP_CID_LE_SIGNALING 0x0005
#define L2CAP_CID_SMP 0x0006
+#define L2CAP_CID_SMP_BREDR 0x0007
#define L2CAP_CID_DYN_START 0x0040
#define L2CAP_CID_DYN_END 0xffff
#define L2CAP_CID_LE_DYN_END 0x007f
@@ -481,6 +484,7 @@ struct l2cap_chan {
struct hci_conn *hs_hcon;
struct hci_chan *hs_hchan;
struct kref kref;
+ atomic_t nesting;
__u8 state;
@@ -604,10 +608,6 @@ struct l2cap_ops {
struct sk_buff *(*alloc_skb) (struct l2cap_chan *chan,
unsigned long hdr_len,
unsigned long len, int nb);
- int (*memcpy_fromiovec) (struct l2cap_chan *chan,
- unsigned char *kdata,
- struct iovec *iov,
- int len);
};
struct l2cap_conn {
@@ -617,8 +617,8 @@ struct l2cap_conn {
unsigned int mtu;
__u32 feat_mask;
- __u8 fixed_chan_mask;
- bool hs_enabled;
+ __u8 remote_fixed_chan;
+ __u8 local_fixed_chan;
__u8 info_state;
__u8 info_ident;
@@ -633,10 +633,11 @@ struct l2cap_conn {
struct sk_buff_head pending_rx;
struct work_struct pending_rx_work;
+ struct work_struct id_addr_update_work;
+
__u8 disc_reason;
- struct delayed_work security_timer;
- struct smp_chan *smp_chan;
+ struct l2cap_chan *smp;
struct list_head chan_l;
struct mutex chan_lock;
@@ -708,6 +709,19 @@ enum {
FLAG_EFS_ENABLE,
FLAG_DEFER_SETUP,
FLAG_LE_CONN_REQ_SENT,
+ FLAG_PENDING_SECURITY,
+ FLAG_HOLD_HCI_CONN,
+};
+
+/* Lock nesting levels for L2CAP channels. We need these because lockdep
+ * otherwise considers all channels equal and will e.g. complain about a
+ * connection oriented channel triggering SMP procedures or a listening
+ * channel creating and locking a child channel.
+ */
+enum {
+ L2CAP_NESTING_SMP,
+ L2CAP_NESTING_NORMAL,
+ L2CAP_NESTING_PARENT,
};
enum {
@@ -775,7 +789,7 @@ void l2cap_chan_put(struct l2cap_chan *c);
static inline void l2cap_chan_lock(struct l2cap_chan *chan)
{
- mutex_lock(&chan->lock);
+ mutex_lock_nested(&chan->lock, atomic_read(&chan->nesting));
}
static inline void l2cap_chan_unlock(struct l2cap_chan *chan)
@@ -837,53 +851,53 @@ static inline struct l2cap_chan *l2cap_chan_no_new_connection(struct l2cap_chan
return NULL;
}
+static inline int l2cap_chan_no_recv(struct l2cap_chan *chan, struct sk_buff *skb)
+{
+ return -ENOSYS;
+}
+
+static inline struct sk_buff *l2cap_chan_no_alloc_skb(struct l2cap_chan *chan,
+ unsigned long hdr_len,
+ unsigned long len, int nb)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
static inline void l2cap_chan_no_teardown(struct l2cap_chan *chan, int err)
{
}
+static inline void l2cap_chan_no_close(struct l2cap_chan *chan)
+{
+}
+
static inline void l2cap_chan_no_ready(struct l2cap_chan *chan)
{
}
-static inline void l2cap_chan_no_defer(struct l2cap_chan *chan)
+static inline void l2cap_chan_no_state_change(struct l2cap_chan *chan,
+ int state, int err)
{
}
-static inline void l2cap_chan_no_resume(struct l2cap_chan *chan)
+static inline void l2cap_chan_no_defer(struct l2cap_chan *chan)
{
}
-static inline void l2cap_chan_no_set_shutdown(struct l2cap_chan *chan)
+static inline void l2cap_chan_no_suspend(struct l2cap_chan *chan)
{
}
-static inline long l2cap_chan_no_get_sndtimeo(struct l2cap_chan *chan)
+static inline void l2cap_chan_no_resume(struct l2cap_chan *chan)
{
- return 0;
}
-static inline int l2cap_chan_no_memcpy_fromiovec(struct l2cap_chan *chan,
- unsigned char *kdata,
- struct iovec *iov,
- int len)
+static inline void l2cap_chan_no_set_shutdown(struct l2cap_chan *chan)
{
- /* Following is safe since for compiler definitions of kvec and
- * iovec are identical, yielding the same in-core layout and alignment
- */
- struct kvec *vec = (struct kvec *)iov;
-
- while (len > 0) {
- if (vec->iov_len) {
- int copy = min_t(unsigned int, len, vec->iov_len);
- memcpy(kdata, vec->iov_base, copy);
- len -= copy;
- kdata += copy;
- vec->iov_base += copy;
- vec->iov_len -= copy;
- }
- vec++;
- }
+}
+static inline long l2cap_chan_no_get_sndtimeo(struct l2cap_chan *chan)
+{
return 0;
}
@@ -911,14 +925,13 @@ int l2cap_ertm_init(struct l2cap_chan *chan);
void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan);
void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan);
void l2cap_chan_del(struct l2cap_chan *chan, int err);
-void l2cap_conn_update_id_addr(struct hci_conn *hcon);
void l2cap_send_conn_req(struct l2cap_chan *chan);
void l2cap_move_start(struct l2cap_chan *chan);
void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
u8 status);
void __l2cap_physical_cfm(struct l2cap_chan *chan, int result);
-void l2cap_conn_get(struct l2cap_conn *conn);
+struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn);
void l2cap_conn_put(struct l2cap_conn *conn);
int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user);
diff --git a/include/net/bluetooth/mgmt.h b/include/net/bluetooth/mgmt.h
index 414cd2f9a437..95c34d5180fa 100644
--- a/include/net/bluetooth/mgmt.h
+++ b/include/net/bluetooth/mgmt.h
@@ -184,6 +184,9 @@ struct mgmt_cp_load_link_keys {
#define MGMT_LTK_UNAUTHENTICATED 0x00
#define MGMT_LTK_AUTHENTICATED 0x01
+#define MGMT_LTK_P256_UNAUTH 0x02
+#define MGMT_LTK_P256_AUTH 0x03
+#define MGMT_LTK_P256_DEBUG 0x04
struct mgmt_ltk_info {
struct mgmt_addr_info addr;
@@ -299,28 +302,28 @@ struct mgmt_cp_user_passkey_neg_reply {
#define MGMT_READ_LOCAL_OOB_DATA_SIZE 0
struct mgmt_rp_read_local_oob_data {
__u8 hash[16];
- __u8 randomizer[16];
+ __u8 rand[16];
} __packed;
struct mgmt_rp_read_local_oob_ext_data {
__u8 hash192[16];
- __u8 randomizer192[16];
+ __u8 rand192[16];
__u8 hash256[16];
- __u8 randomizer256[16];
+ __u8 rand256[16];
} __packed;
#define MGMT_OP_ADD_REMOTE_OOB_DATA 0x0021
struct mgmt_cp_add_remote_oob_data {
struct mgmt_addr_info addr;
__u8 hash[16];
- __u8 randomizer[16];
+ __u8 rand[16];
} __packed;
#define MGMT_ADD_REMOTE_OOB_DATA_SIZE (MGMT_ADDR_INFO_SIZE + 32)
struct mgmt_cp_add_remote_oob_ext_data {
struct mgmt_addr_info addr;
__u8 hash192[16];
- __u8 randomizer192[16];
+ __u8 rand192[16];
__u8 hash256[16];
- __u8 randomizer256[16];
+ __u8 rand256[16];
} __packed;
#define MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE (MGMT_ADDR_INFO_SIZE + 64)
@@ -495,6 +498,15 @@ struct mgmt_cp_set_public_address {
} __packed;
#define MGMT_SET_PUBLIC_ADDRESS_SIZE 6
+#define MGMT_OP_START_SERVICE_DISCOVERY 0x003A
+struct mgmt_cp_start_service_discovery {
+ __u8 type;
+ __s8 rssi;
+ __le16 uuid_count;
+ __u8 uuids[0][16];
+} __packed;
+#define MGMT_START_SERVICE_DISCOVERY_SIZE 4
+
#define MGMT_EV_CMD_COMPLETE 0x0001
struct mgmt_ev_cmd_complete {
__le16 opcode;
diff --git a/include/net/bond_3ad.h b/include/net/bond_3ad.h
new file mode 100644
index 000000000000..e01d903633ef
--- /dev/null
+++ b/include/net/bond_3ad.h
@@ -0,0 +1,283 @@
+/*
+ * Copyright(c) 1999 - 2004 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ */
+
+#ifndef _NET_BOND_3AD_H
+#define _NET_BOND_3AD_H
+
+#include <asm/byteorder.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/if_ether.h>
+
+/* General definitions */
+#define PKT_TYPE_LACPDU cpu_to_be16(ETH_P_SLOW)
+#define AD_TIMER_INTERVAL 100 /*msec*/
+
+#define MULTICAST_LACPDU_ADDR {0x01, 0x80, 0xC2, 0x00, 0x00, 0x02}
+
+#define AD_LACP_SLOW 0
+#define AD_LACP_FAST 1
+
+typedef struct mac_addr {
+ u8 mac_addr_value[ETH_ALEN];
+} __packed mac_addr_t;
+
+enum {
+ BOND_AD_STABLE = 0,
+ BOND_AD_BANDWIDTH = 1,
+ BOND_AD_COUNT = 2,
+};
+
+/* rx machine states(43.4.11 in the 802.3ad standard) */
+typedef enum {
+ AD_RX_DUMMY,
+ AD_RX_INITIALIZE, /* rx Machine */
+ AD_RX_PORT_DISABLED, /* rx Machine */
+ AD_RX_LACP_DISABLED, /* rx Machine */
+ AD_RX_EXPIRED, /* rx Machine */
+ AD_RX_DEFAULTED, /* rx Machine */
+ AD_RX_CURRENT /* rx Machine */
+} rx_states_t;
+
+/* periodic machine states(43.4.12 in the 802.3ad standard) */
+typedef enum {
+ AD_PERIODIC_DUMMY,
+ AD_NO_PERIODIC, /* periodic machine */
+ AD_FAST_PERIODIC, /* periodic machine */
+ AD_SLOW_PERIODIC, /* periodic machine */
+ AD_PERIODIC_TX /* periodic machine */
+} periodic_states_t;
+
+/* mux machine states(43.4.13 in the 802.3ad standard) */
+typedef enum {
+ AD_MUX_DUMMY,
+ AD_MUX_DETACHED, /* mux machine */
+ AD_MUX_WAITING, /* mux machine */
+ AD_MUX_ATTACHED, /* mux machine */
+ AD_MUX_COLLECTING_DISTRIBUTING /* mux machine */
+} mux_states_t;
+
+/* tx machine states(43.4.15 in the 802.3ad standard) */
+typedef enum {
+ AD_TX_DUMMY,
+ AD_TRANSMIT /* tx Machine */
+} tx_states_t;
+
+/* rx indication types */
+typedef enum {
+ AD_TYPE_LACPDU = 1, /* type lacpdu */
+ AD_TYPE_MARKER /* type marker */
+} pdu_type_t;
+
+/* rx marker indication types */
+typedef enum {
+ AD_MARKER_INFORMATION_SUBTYPE = 1, /* marker imformation subtype */
+ AD_MARKER_RESPONSE_SUBTYPE /* marker response subtype */
+} bond_marker_subtype_t;
+
+/* timers types(43.4.9 in the 802.3ad standard) */
+typedef enum {
+ AD_CURRENT_WHILE_TIMER,
+ AD_ACTOR_CHURN_TIMER,
+ AD_PERIODIC_TIMER,
+ AD_PARTNER_CHURN_TIMER,
+ AD_WAIT_WHILE_TIMER
+} ad_timers_t;
+
+#pragma pack(1)
+
+/* Link Aggregation Control Protocol(LACP) data unit structure(43.4.2.2 in the 802.3ad standard) */
+typedef struct lacpdu {
+ u8 subtype; /* = LACP(= 0x01) */
+ u8 version_number;
+ u8 tlv_type_actor_info; /* = actor information(type/length/value) */
+ u8 actor_information_length; /* = 20 */
+ __be16 actor_system_priority;
+ struct mac_addr actor_system;
+ __be16 actor_key;
+ __be16 actor_port_priority;
+ __be16 actor_port;
+ u8 actor_state;
+ u8 reserved_3_1[3]; /* = 0 */
+ u8 tlv_type_partner_info; /* = partner information */
+ u8 partner_information_length; /* = 20 */
+ __be16 partner_system_priority;
+ struct mac_addr partner_system;
+ __be16 partner_key;
+ __be16 partner_port_priority;
+ __be16 partner_port;
+ u8 partner_state;
+ u8 reserved_3_2[3]; /* = 0 */
+ u8 tlv_type_collector_info; /* = collector information */
+ u8 collector_information_length;/* = 16 */
+ __be16 collector_max_delay;
+ u8 reserved_12[12];
+ u8 tlv_type_terminator; /* = terminator */
+ u8 terminator_length; /* = 0 */
+ u8 reserved_50[50]; /* = 0 */
+} __packed lacpdu_t;
+
+typedef struct lacpdu_header {
+ struct ethhdr hdr;
+ struct lacpdu lacpdu;
+} __packed lacpdu_header_t;
+
+/* Marker Protocol Data Unit(PDU) structure(43.5.3.2 in the 802.3ad standard) */
+typedef struct bond_marker {
+ u8 subtype; /* = 0x02 (marker PDU) */
+ u8 version_number; /* = 0x01 */
+ u8 tlv_type; /* = 0x01 (marker information) */
+ /* = 0x02 (marker response information) */
+ u8 marker_length; /* = 0x16 */
+ u16 requester_port; /* The number assigned to the port by the requester */
+ struct mac_addr requester_system; /* The requester's system id */
+ u32 requester_transaction_id; /* The transaction id allocated by the requester, */
+ u16 pad; /* = 0 */
+ u8 tlv_type_terminator; /* = 0x00 */
+ u8 terminator_length; /* = 0x00 */
+ u8 reserved_90[90]; /* = 0 */
+} __packed bond_marker_t;
+
+typedef struct bond_marker_header {
+ struct ethhdr hdr;
+ struct bond_marker marker;
+} __packed bond_marker_header_t;
+
+#pragma pack()
+
+struct slave;
+struct bonding;
+struct ad_info;
+struct port;
+
+#ifdef __ia64__
+#pragma pack(8)
+#endif
+
+/* aggregator structure(43.4.5 in the 802.3ad standard) */
+typedef struct aggregator {
+ struct mac_addr aggregator_mac_address;
+ u16 aggregator_identifier;
+ bool is_individual;
+ u16 actor_admin_aggregator_key;
+ u16 actor_oper_aggregator_key;
+ struct mac_addr partner_system;
+ u16 partner_system_priority;
+ u16 partner_oper_aggregator_key;
+ u16 receive_state; /* BOOLEAN */
+ u16 transmit_state; /* BOOLEAN */
+ struct port *lag_ports;
+ /* ****** PRIVATE PARAMETERS ****** */
+ struct slave *slave; /* pointer to the bond slave that this aggregator belongs to */
+ u16 is_active; /* BOOLEAN. Indicates if this aggregator is active */
+ u16 num_of_ports;
+} aggregator_t;
+
+struct port_params {
+ struct mac_addr system;
+ u16 system_priority;
+ u16 key;
+ u16 port_number;
+ u16 port_priority;
+ u16 port_state;
+};
+
+/* port structure(43.4.6 in the 802.3ad standard) */
+typedef struct port {
+ u16 actor_port_number;
+ u16 actor_port_priority;
+ struct mac_addr actor_system; /* This parameter is added here although it is not specified in the standard, just for simplification */
+ u16 actor_system_priority; /* This parameter is added here although it is not specified in the standard, just for simplification */
+ u16 actor_port_aggregator_identifier;
+ bool ntt;
+ u16 actor_admin_port_key;
+ u16 actor_oper_port_key;
+ u8 actor_admin_port_state;
+ u8 actor_oper_port_state;
+
+ struct port_params partner_admin;
+ struct port_params partner_oper;
+
+ bool is_enabled;
+
+ /* ****** PRIVATE PARAMETERS ****** */
+ u16 sm_vars; /* all state machines variables for this port */
+ rx_states_t sm_rx_state; /* state machine rx state */
+ u16 sm_rx_timer_counter; /* state machine rx timer counter */
+ periodic_states_t sm_periodic_state; /* state machine periodic state */
+ u16 sm_periodic_timer_counter; /* state machine periodic timer counter */
+ mux_states_t sm_mux_state; /* state machine mux state */
+ u16 sm_mux_timer_counter; /* state machine mux timer counter */
+ tx_states_t sm_tx_state; /* state machine tx state */
+ u16 sm_tx_timer_counter; /* state machine tx timer counter(allways on - enter to transmit state 3 time per second) */
+ struct slave *slave; /* pointer to the bond slave that this port belongs to */
+ struct aggregator *aggregator; /* pointer to an aggregator that this port related to */
+ struct port *next_port_in_aggregator; /* Next port on the linked list of the parent aggregator */
+ u32 transaction_id; /* continuous number for identification of Marker PDU's; */
+ struct lacpdu lacpdu; /* the lacpdu that will be sent for this port */
+} port_t;
+
+/* system structure */
+struct ad_system {
+ u16 sys_priority;
+ struct mac_addr sys_mac_addr;
+};
+
+#ifdef __ia64__
+#pragma pack()
+#endif
+
+/* ========== AD Exported structures to the main bonding code ========== */
+#define BOND_AD_INFO(bond) ((bond)->ad_info)
+#define SLAVE_AD_INFO(slave) ((slave)->ad_info)
+
+struct ad_bond_info {
+ struct ad_system system; /* 802.3ad system structure */
+ u32 agg_select_timer; /* Timer to select aggregator after all adapter's hand shakes */
+ u16 aggregator_identifier;
+};
+
+struct ad_slave_info {
+ struct aggregator aggregator; /* 802.3ad aggregator structure */
+ struct port port; /* 802.3ad port structure */
+ u16 id;
+};
+
+/* ========== AD Exported functions to the main bonding code ========== */
+void bond_3ad_initialize(struct bonding *bond, u16 tick_resolution);
+void bond_3ad_bind_slave(struct slave *slave);
+void bond_3ad_unbind_slave(struct slave *slave);
+void bond_3ad_state_machine_handler(struct work_struct *);
+void bond_3ad_initiate_agg_selection(struct bonding *bond, int timeout);
+void bond_3ad_adapter_speed_changed(struct slave *slave);
+void bond_3ad_adapter_duplex_changed(struct slave *slave);
+void bond_3ad_handle_link_change(struct slave *slave, char link);
+int bond_3ad_get_active_agg_info(struct bonding *bond, struct ad_info *ad_info);
+int __bond_3ad_get_active_agg_info(struct bonding *bond,
+ struct ad_info *ad_info);
+int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev);
+int bond_3ad_lacpdu_recv(const struct sk_buff *skb, struct bonding *bond,
+ struct slave *slave);
+int bond_3ad_set_carrier(struct bonding *bond);
+void bond_3ad_update_lacp_rate(struct bonding *bond);
+#endif /* _NET_BOND_3AD_H */
+
diff --git a/include/net/bond_alb.h b/include/net/bond_alb.h
new file mode 100644
index 000000000000..313a8d3b3069
--- /dev/null
+++ b/include/net/bond_alb.h
@@ -0,0 +1,181 @@
+/*
+ * Copyright(c) 1999 - 2004 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ */
+
+#ifndef _NET_BOND_ALB_H
+#define _NET_BOND_ALB_H
+
+#include <linux/if_ether.h>
+
+struct bonding;
+struct slave;
+
+#define BOND_ALB_INFO(bond) ((bond)->alb_info)
+#define SLAVE_TLB_INFO(slave) ((slave)->tlb_info)
+
+#define ALB_TIMER_TICKS_PER_SEC 10 /* should be a divisor of HZ */
+#define BOND_TLB_REBALANCE_INTERVAL 10 /* In seconds, periodic re-balancing.
+ * Used for division - never set
+ * to zero !!!
+ */
+#define BOND_ALB_DEFAULT_LP_INTERVAL 1
+#define BOND_ALB_LP_INTERVAL(bond) (bond->params.lp_interval) /* In seconds, periodic send of
+ * learning packets to the switch
+ */
+
+#define BOND_TLB_REBALANCE_TICKS (BOND_TLB_REBALANCE_INTERVAL \
+ * ALB_TIMER_TICKS_PER_SEC)
+
+#define BOND_ALB_LP_TICKS(bond) (BOND_ALB_LP_INTERVAL(bond) \
+ * ALB_TIMER_TICKS_PER_SEC)
+
+#define TLB_HASH_TABLE_SIZE 256 /* The size of the clients hash table.
+ * Note that this value MUST NOT be smaller
+ * because the key hash table is BYTE wide !
+ */
+
+
+#define TLB_NULL_INDEX 0xffffffff
+
+/* rlb defs */
+#define RLB_HASH_TABLE_SIZE 256
+#define RLB_NULL_INDEX 0xffffffff
+#define RLB_UPDATE_DELAY (2*ALB_TIMER_TICKS_PER_SEC) /* 2 seconds */
+#define RLB_ARP_BURST_SIZE 2
+#define RLB_UPDATE_RETRY 3 /* 3-ticks - must be smaller than the rlb
+ * rebalance interval (5 min).
+ */
+/* RLB_PROMISC_TIMEOUT = 10 sec equals the time that the current slave is
+ * promiscuous after failover
+ */
+#define RLB_PROMISC_TIMEOUT (10*ALB_TIMER_TICKS_PER_SEC)
+
+
+struct tlb_client_info {
+ struct slave *tx_slave; /* A pointer to slave used for transmiting
+ * packets to a Client that the Hash function
+ * gave this entry index.
+ */
+ u32 tx_bytes; /* Each Client accumulates the BytesTx that
+ * were transmitted to it, and after each
+ * CallBack the LoadHistory is divided
+ * by the balance interval
+ */
+ u32 load_history; /* This field contains the amount of Bytes
+ * that were transmitted to this client by
+ * the server on the previous balance
+ * interval in Bps.
+ */
+ u32 next; /* The next Hash table entry index, assigned
+ * to use the same adapter for transmit.
+ */
+ u32 prev; /* The previous Hash table entry index,
+ * assigned to use the same
+ */
+};
+
+/* -------------------------------------------------------------------------
+ * struct rlb_client_info contains all info related to a specific rx client
+ * connection. This is the Clients Hash Table entry struct.
+ * Note that this is not a proper hash table; if a new client's IP address
+ * hash collides with an existing client entry, the old entry is replaced.
+ *
+ * There is a linked list (linked by the used_next and used_prev members)
+ * linking all the used entries of the hash table. This allows updating
+ * all the clients without walking over all the unused elements of the table.
+ *
+ * There are also linked lists of entries with identical hash(ip_src). These
+ * allow cleaning up the table from ip_src<->mac_src associations that have
+ * become outdated and would cause sending out invalid ARP updates to the
+ * network. These are linked by the (src_next and src_prev members).
+ * -------------------------------------------------------------------------
+ */
+struct rlb_client_info {
+ __be32 ip_src; /* the server IP address */
+ __be32 ip_dst; /* the client IP address */
+ u8 mac_src[ETH_ALEN]; /* the server MAC address */
+ u8 mac_dst[ETH_ALEN]; /* the client MAC address */
+
+ /* list of used hash table entries, starting at rx_hashtbl_used_head */
+ u32 used_next;
+ u32 used_prev;
+
+ /* ip_src based hashing */
+ u32 src_next; /* next entry with same hash(ip_src) */
+ u32 src_prev; /* prev entry with same hash(ip_src) */
+ u32 src_first; /* first entry with hash(ip_src) == this entry's index */
+
+ u8 assigned; /* checking whether this entry is assigned */
+ u8 ntt; /* flag - need to transmit client info */
+ struct slave *slave; /* the slave assigned to this client */
+ unsigned short vlan_id; /* VLAN tag associated with IP address */
+};
+
+struct tlb_slave_info {
+ u32 head; /* Index to the head of the bi-directional clients
+ * hash table entries list. The entries in the list
+ * are the entries that were assigned to use this
+ * slave for transmit.
+ */
+ u32 load; /* Each slave sums the loadHistory of all clients
+ * assigned to it
+ */
+};
+
+struct alb_bond_info {
+ struct tlb_client_info *tx_hashtbl; /* Dynamically allocated */
+ u32 unbalanced_load;
+ int tx_rebalance_counter;
+ int lp_counter;
+ /* -------- rlb parameters -------- */
+ int rlb_enabled;
+ struct rlb_client_info *rx_hashtbl; /* Receive hash table */
+ u32 rx_hashtbl_used_head;
+ u8 rx_ntt; /* flag - need to transmit
+ * to all rx clients
+ */
+ struct slave *rx_slave;/* last slave to xmit from */
+ u8 primary_is_promisc; /* boolean */
+ u32 rlb_promisc_timeout_counter;/* counts primary
+ * promiscuity time
+ */
+ u32 rlb_update_delay_counter;
+ u32 rlb_update_retry_counter;/* counter of retries
+ * of client update
+ */
+ u8 rlb_rebalance; /* flag - indicates that the
+ * rx traffic should be
+ * rebalanced
+ */
+};
+
+int bond_alb_initialize(struct bonding *bond, int rlb_enabled);
+void bond_alb_deinitialize(struct bonding *bond);
+int bond_alb_init_slave(struct bonding *bond, struct slave *slave);
+void bond_alb_deinit_slave(struct bonding *bond, struct slave *slave);
+void bond_alb_handle_link_change(struct bonding *bond, struct slave *slave, char link);
+void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave);
+int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev);
+int bond_tlb_xmit(struct sk_buff *skb, struct net_device *bond_dev);
+void bond_alb_monitor(struct work_struct *);
+int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr);
+void bond_alb_clear_vlan(struct bonding *bond, unsigned short vlan_id);
+#endif /* _NET_BOND_ALB_H */
+
diff --git a/include/net/bond_options.h b/include/net/bond_options.h
new file mode 100644
index 000000000000..ea6546d2c946
--- /dev/null
+++ b/include/net/bond_options.h
@@ -0,0 +1,130 @@
+/*
+ * drivers/net/bond/bond_options.h - bonding options
+ * Copyright (c) 2013 Nikolay Aleksandrov <nikolay@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _NET_BOND_OPTIONS_H
+#define _NET_BOND_OPTIONS_H
+
+#define BOND_OPT_MAX_NAMELEN 32
+#define BOND_OPT_VALID(opt) ((opt) < BOND_OPT_LAST)
+#define BOND_MODE_ALL_EX(x) (~(x))
+
+/* Option flags:
+ * BOND_OPTFLAG_NOSLAVES - check if the bond device is empty before setting
+ * BOND_OPTFLAG_IFDOWN - check if the bond device is down before setting
+ * BOND_OPTFLAG_RAWVAL - the option parses the value itself
+ */
+enum {
+ BOND_OPTFLAG_NOSLAVES = BIT(0),
+ BOND_OPTFLAG_IFDOWN = BIT(1),
+ BOND_OPTFLAG_RAWVAL = BIT(2)
+};
+
+/* Value type flags:
+ * BOND_VALFLAG_DEFAULT - mark the value as default
+ * BOND_VALFLAG_(MIN|MAX) - mark the value as min/max
+ */
+enum {
+ BOND_VALFLAG_DEFAULT = BIT(0),
+ BOND_VALFLAG_MIN = BIT(1),
+ BOND_VALFLAG_MAX = BIT(2)
+};
+
+/* Option IDs, their bit positions correspond to their IDs */
+enum {
+ BOND_OPT_MODE,
+ BOND_OPT_PACKETS_PER_SLAVE,
+ BOND_OPT_XMIT_HASH,
+ BOND_OPT_ARP_VALIDATE,
+ BOND_OPT_ARP_ALL_TARGETS,
+ BOND_OPT_FAIL_OVER_MAC,
+ BOND_OPT_ARP_INTERVAL,
+ BOND_OPT_ARP_TARGETS,
+ BOND_OPT_DOWNDELAY,
+ BOND_OPT_UPDELAY,
+ BOND_OPT_LACP_RATE,
+ BOND_OPT_MINLINKS,
+ BOND_OPT_AD_SELECT,
+ BOND_OPT_NUM_PEER_NOTIF,
+ BOND_OPT_MIIMON,
+ BOND_OPT_PRIMARY,
+ BOND_OPT_PRIMARY_RESELECT,
+ BOND_OPT_USE_CARRIER,
+ BOND_OPT_ACTIVE_SLAVE,
+ BOND_OPT_QUEUE_ID,
+ BOND_OPT_ALL_SLAVES_ACTIVE,
+ BOND_OPT_RESEND_IGMP,
+ BOND_OPT_LP_INTERVAL,
+ BOND_OPT_SLAVES,
+ BOND_OPT_TLB_DYNAMIC_LB,
+ BOND_OPT_LAST
+};
+
+/* This structure is used for storing option values and for passing option
+ * values when changing an option. The logic when used as an arg is as follows:
+ * - if string != NULL -> parse it, if the opt is RAW type then return it, else
+ * return the parse result
+ * - if string == NULL -> parse value
+ */
+struct bond_opt_value {
+ char *string;
+ u64 value;
+ u32 flags;
+};
+
+struct bonding;
+
+struct bond_option {
+ int id;
+ const char *name;
+ const char *desc;
+ u32 flags;
+
+ /* unsuppmodes is used to denote modes in which the option isn't
+ * supported.
+ */
+ unsigned long unsuppmodes;
+ /* supported values which this option can have, can be a subset of
+ * BOND_OPTVAL_RANGE's value range
+ */
+ const struct bond_opt_value *values;
+
+ int (*set)(struct bonding *bond, const struct bond_opt_value *val);
+};
+
+int __bond_opt_set(struct bonding *bond, unsigned int option,
+ struct bond_opt_value *val);
+int bond_opt_tryset_rtnl(struct bonding *bond, unsigned int option, char *buf);
+
+const struct bond_opt_value *bond_opt_parse(const struct bond_option *opt,
+ struct bond_opt_value *val);
+const struct bond_option *bond_opt_get(unsigned int option);
+const struct bond_option *bond_opt_get_by_name(const char *name);
+const struct bond_opt_value *bond_opt_get_val(unsigned int option, u64 val);
+
+/* This helper is used to initialize a bond_opt_value structure for parameter
+ * passing. There should be either a valid string or value, but not both.
+ * When value is ULLONG_MAX then string will be used.
+ */
+static inline void __bond_opt_init(struct bond_opt_value *optval,
+ char *string, u64 value)
+{
+ memset(optval, 0, sizeof(*optval));
+ optval->value = ULLONG_MAX;
+ if (value == ULLONG_MAX)
+ optval->string = string;
+ else
+ optval->value = value;
+}
+#define bond_opt_initval(optval, value) __bond_opt_init(optval, NULL, value)
+#define bond_opt_initstr(optval, str) __bond_opt_init(optval, str, ULLONG_MAX)
+
+void bond_option_arp_ip_targets_clear(struct bonding *bond);
+
+#endif /* _NET_BOND_OPTIONS_H */
diff --git a/include/net/bonding.h b/include/net/bonding.h
new file mode 100644
index 000000000000..983a94b86b95
--- /dev/null
+++ b/include/net/bonding.h
@@ -0,0 +1,654 @@
+/*
+ * Bond several ethernet interfaces into a Cisco, running 'Etherchannel'.
+ *
+ * Portions are (c) Copyright 1995 Simon "Guru Aleph-Null" Janes
+ * NCM: Network and Communications Management, Inc.
+ *
+ * BUT, I'm the one who modified it for ethernet, so:
+ * (c) Copyright 1999, Thomas Davis, tadavis@lbl.gov
+ *
+ * This software may be used and distributed according to the terms
+ * of the GNU Public License, incorporated herein by reference.
+ *
+ */
+
+#ifndef _NET_BONDING_H
+#define _NET_BONDING_H
+
+#include <linux/timer.h>
+#include <linux/proc_fs.h>
+#include <linux/if_bonding.h>
+#include <linux/cpumask.h>
+#include <linux/in6.h>
+#include <linux/netpoll.h>
+#include <linux/inetdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/reciprocal_div.h>
+#include <linux/if_link.h>
+
+#include <net/bond_3ad.h>
+#include <net/bond_alb.h>
+#include <net/bond_options.h>
+
+#define DRV_VERSION "3.7.1"
+#define DRV_RELDATE "April 27, 2011"
+#define DRV_NAME "bonding"
+#define DRV_DESCRIPTION "Ethernet Channel Bonding Driver"
+
+#define bond_version DRV_DESCRIPTION ": v" DRV_VERSION " (" DRV_RELDATE ")\n"
+
+#define BOND_MAX_ARP_TARGETS 16
+
+#define BOND_DEFAULT_MIIMON 100
+
+/*
+ * Less bad way to call ioctl from within the kernel; this needs to be
+ * done some other way to get the call out of interrupt context.
+ * Needs "ioctl" variable to be supplied by calling context.
+ */
+#define IOCTL(dev, arg, cmd) ({ \
+ int res = 0; \
+ mm_segment_t fs = get_fs(); \
+ set_fs(get_ds()); \
+ res = ioctl(dev, arg, cmd); \
+ set_fs(fs); \
+ res; })
+
+#define BOND_MODE(bond) ((bond)->params.mode)
+
+/* slave list primitives */
+#define bond_slave_list(bond) (&(bond)->dev->adj_list.lower)
+
+#define bond_has_slaves(bond) !list_empty(bond_slave_list(bond))
+
+/* IMPORTANT: bond_first/last_slave can return NULL in case of an empty list */
+#define bond_first_slave(bond) \
+ (bond_has_slaves(bond) ? \
+ netdev_adjacent_get_private(bond_slave_list(bond)->next) : \
+ NULL)
+#define bond_last_slave(bond) \
+ (bond_has_slaves(bond) ? \
+ netdev_adjacent_get_private(bond_slave_list(bond)->prev) : \
+ NULL)
+
+/* Caller must have rcu_read_lock */
+#define bond_first_slave_rcu(bond) \
+ netdev_lower_get_first_private_rcu(bond->dev)
+
+#define bond_is_first_slave(bond, pos) (pos == bond_first_slave(bond))
+#define bond_is_last_slave(bond, pos) (pos == bond_last_slave(bond))
+
+/**
+ * bond_for_each_slave - iterate over all slaves
+ * @bond: the bond holding this list
+ * @pos: current slave
+ * @iter: list_head * iterator
+ *
+ * Caller must hold RTNL
+ */
+#define bond_for_each_slave(bond, pos, iter) \
+ netdev_for_each_lower_private((bond)->dev, pos, iter)
+
+/* Caller must have rcu_read_lock */
+#define bond_for_each_slave_rcu(bond, pos, iter) \
+ netdev_for_each_lower_private_rcu((bond)->dev, pos, iter)
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+extern atomic_t netpoll_block_tx;
+
+static inline void block_netpoll_tx(void)
+{
+ atomic_inc(&netpoll_block_tx);
+}
+
+static inline void unblock_netpoll_tx(void)
+{
+ atomic_dec(&netpoll_block_tx);
+}
+
+static inline int is_netpoll_tx_blocked(struct net_device *dev)
+{
+ if (unlikely(netpoll_tx_running(dev)))
+ return atomic_read(&netpoll_block_tx);
+ return 0;
+}
+#else
+#define block_netpoll_tx()
+#define unblock_netpoll_tx()
+#define is_netpoll_tx_blocked(dev) (0)
+#endif
+
+struct bond_params {
+ int mode;
+ int xmit_policy;
+ int miimon;
+ u8 num_peer_notif;
+ int arp_interval;
+ int arp_validate;
+ int arp_all_targets;
+ int use_carrier;
+ int fail_over_mac;
+ int updelay;
+ int downdelay;
+ int lacp_fast;
+ unsigned int min_links;
+ int ad_select;
+ char primary[IFNAMSIZ];
+ int primary_reselect;
+ __be32 arp_targets[BOND_MAX_ARP_TARGETS];
+ int tx_queues;
+ int all_slaves_active;
+ int resend_igmp;
+ int lp_interval;
+ int packets_per_slave;
+ int tlb_dynamic_lb;
+ struct reciprocal_value reciprocal_packets_per_slave;
+};
+
+struct bond_parm_tbl {
+ char *modename;
+ int mode;
+};
+
+struct slave {
+ struct net_device *dev; /* first - useful for panic debug */
+ struct bonding *bond; /* our master */
+ int delay;
+ /* all three in jiffies */
+ unsigned long last_link_up;
+ unsigned long last_rx;
+ unsigned long target_last_arp_rx[BOND_MAX_ARP_TARGETS];
+ s8 link; /* one of BOND_LINK_XXXX */
+ s8 new_link;
+ u8 backup:1, /* indicates backup slave. Value corresponds with
+ BOND_STATE_ACTIVE and BOND_STATE_BACKUP */
+ inactive:1, /* indicates inactive slave */
+ should_notify:1; /* indicateds whether the state changed */
+ u8 duplex;
+ u32 original_mtu;
+ u32 link_failure_count;
+ u32 speed;
+ u16 queue_id;
+ u8 perm_hwaddr[ETH_ALEN];
+ struct ad_slave_info *ad_info;
+ struct tlb_slave_info tlb_info;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ struct netpoll *np;
+#endif
+ struct kobject kobj;
+ struct rtnl_link_stats64 slave_stats;
+};
+
+struct bond_up_slave {
+ unsigned int count;
+ struct rcu_head rcu;
+ struct slave *arr[0];
+};
+
+/*
+ * Link pseudo-state only used internally by monitors
+ */
+#define BOND_LINK_NOCHANGE -1
+
+/*
+ * Here are the locking policies for the two bonding locks:
+ * Get rcu_read_lock when reading or RTNL when writing slave list.
+ */
+struct bonding {
+ struct net_device *dev; /* first - useful for panic debug */
+ struct slave __rcu *curr_active_slave;
+ struct slave __rcu *current_arp_slave;
+ struct slave __rcu *primary_slave;
+ struct bond_up_slave __rcu *slave_arr; /* Array of usable slaves */
+ bool force_primary;
+ s32 slave_cnt; /* never change this value outside the attach/detach wrappers */
+ int (*recv_probe)(const struct sk_buff *, struct bonding *,
+ struct slave *);
+ /* mode_lock is used for mode-specific locking needs, currently used by:
+ * 3ad mode (4) - protect against running bond_3ad_unbind_slave() and
+ * bond_3ad_state_machine_handler() concurrently and also
+ * the access to the state machine shared variables.
+ * TLB mode (5) - to sync the use and modifications of its hash table
+ * ALB mode (6) - to sync the use and modifications of its hash table
+ */
+ spinlock_t mode_lock;
+ u8 send_peer_notif;
+ u8 igmp_retrans;
+#ifdef CONFIG_PROC_FS
+ struct proc_dir_entry *proc_entry;
+ char proc_file_name[IFNAMSIZ];
+#endif /* CONFIG_PROC_FS */
+ struct list_head bond_list;
+ u32 rr_tx_counter;
+ struct ad_bond_info ad_info;
+ struct alb_bond_info alb_info;
+ struct bond_params params;
+ struct workqueue_struct *wq;
+ struct delayed_work mii_work;
+ struct delayed_work arp_work;
+ struct delayed_work alb_work;
+ struct delayed_work ad_work;
+ struct delayed_work mcast_work;
+ struct delayed_work slave_arr_work;
+#ifdef CONFIG_DEBUG_FS
+ /* debugging support via debugfs */
+ struct dentry *debug_dir;
+#endif /* CONFIG_DEBUG_FS */
+ struct rtnl_link_stats64 bond_stats;
+};
+
+#define bond_slave_get_rcu(dev) \
+ ((struct slave *) rcu_dereference(dev->rx_handler_data))
+
+#define bond_slave_get_rtnl(dev) \
+ ((struct slave *) rtnl_dereference(dev->rx_handler_data))
+
+struct bond_vlan_tag {
+ __be16 vlan_proto;
+ unsigned short vlan_id;
+};
+
+/**
+ * Returns NULL if the net_device does not belong to any of the bond's slaves
+ *
+ * Caller must hold bond lock for read
+ */
+static inline struct slave *bond_get_slave_by_dev(struct bonding *bond,
+ struct net_device *slave_dev)
+{
+ return netdev_lower_dev_get_private(bond->dev, slave_dev);
+}
+
+static inline struct bonding *bond_get_bond_by_slave(struct slave *slave)
+{
+ return slave->bond;
+}
+
+static inline bool bond_should_override_tx_queue(struct bonding *bond)
+{
+ return BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP ||
+ BOND_MODE(bond) == BOND_MODE_ROUNDROBIN;
+}
+
+static inline bool bond_is_lb(const struct bonding *bond)
+{
+ return BOND_MODE(bond) == BOND_MODE_TLB ||
+ BOND_MODE(bond) == BOND_MODE_ALB;
+}
+
+static inline bool bond_is_nondyn_tlb(const struct bonding *bond)
+{
+ return (BOND_MODE(bond) == BOND_MODE_TLB) &&
+ (bond->params.tlb_dynamic_lb == 0);
+}
+
+static inline bool bond_mode_uses_xmit_hash(const struct bonding *bond)
+{
+ return (BOND_MODE(bond) == BOND_MODE_8023AD ||
+ BOND_MODE(bond) == BOND_MODE_XOR ||
+ bond_is_nondyn_tlb(bond));
+}
+
+static inline bool bond_mode_uses_arp(int mode)
+{
+ return mode != BOND_MODE_8023AD && mode != BOND_MODE_TLB &&
+ mode != BOND_MODE_ALB;
+}
+
+static inline bool bond_mode_uses_primary(int mode)
+{
+ return mode == BOND_MODE_ACTIVEBACKUP || mode == BOND_MODE_TLB ||
+ mode == BOND_MODE_ALB;
+}
+
+static inline bool bond_uses_primary(struct bonding *bond)
+{
+ return bond_mode_uses_primary(BOND_MODE(bond));
+}
+
+static inline bool bond_slave_is_up(struct slave *slave)
+{
+ return netif_running(slave->dev) && netif_carrier_ok(slave->dev);
+}
+
+static inline void bond_set_active_slave(struct slave *slave)
+{
+ if (slave->backup) {
+ slave->backup = 0;
+ rtmsg_ifinfo(RTM_NEWLINK, slave->dev, 0, GFP_ATOMIC);
+ }
+}
+
+static inline void bond_set_backup_slave(struct slave *slave)
+{
+ if (!slave->backup) {
+ slave->backup = 1;
+ rtmsg_ifinfo(RTM_NEWLINK, slave->dev, 0, GFP_ATOMIC);
+ }
+}
+
+static inline void bond_set_slave_state(struct slave *slave,
+ int slave_state, bool notify)
+{
+ if (slave->backup == slave_state)
+ return;
+
+ slave->backup = slave_state;
+ if (notify) {
+ rtmsg_ifinfo(RTM_NEWLINK, slave->dev, 0, GFP_ATOMIC);
+ slave->should_notify = 0;
+ } else {
+ if (slave->should_notify)
+ slave->should_notify = 0;
+ else
+ slave->should_notify = 1;
+ }
+}
+
+static inline void bond_slave_state_change(struct bonding *bond)
+{
+ struct list_head *iter;
+ struct slave *tmp;
+
+ bond_for_each_slave(bond, tmp, iter) {
+ if (tmp->link == BOND_LINK_UP)
+ bond_set_active_slave(tmp);
+ else if (tmp->link == BOND_LINK_DOWN)
+ bond_set_backup_slave(tmp);
+ }
+}
+
+static inline void bond_slave_state_notify(struct bonding *bond)
+{
+ struct list_head *iter;
+ struct slave *tmp;
+
+ bond_for_each_slave(bond, tmp, iter) {
+ if (tmp->should_notify) {
+ rtmsg_ifinfo(RTM_NEWLINK, tmp->dev, 0, GFP_ATOMIC);
+ tmp->should_notify = 0;
+ }
+ }
+}
+
+static inline int bond_slave_state(struct slave *slave)
+{
+ return slave->backup;
+}
+
+static inline bool bond_is_active_slave(struct slave *slave)
+{
+ return !bond_slave_state(slave);
+}
+
+static inline bool bond_slave_can_tx(struct slave *slave)
+{
+ return bond_slave_is_up(slave) && slave->link == BOND_LINK_UP &&
+ bond_is_active_slave(slave);
+}
+
+#define BOND_PRI_RESELECT_ALWAYS 0
+#define BOND_PRI_RESELECT_BETTER 1
+#define BOND_PRI_RESELECT_FAILURE 2
+
+#define BOND_FOM_NONE 0
+#define BOND_FOM_ACTIVE 1
+#define BOND_FOM_FOLLOW 2
+
+#define BOND_ARP_TARGETS_ANY 0
+#define BOND_ARP_TARGETS_ALL 1
+
+#define BOND_ARP_VALIDATE_NONE 0
+#define BOND_ARP_VALIDATE_ACTIVE (1 << BOND_STATE_ACTIVE)
+#define BOND_ARP_VALIDATE_BACKUP (1 << BOND_STATE_BACKUP)
+#define BOND_ARP_VALIDATE_ALL (BOND_ARP_VALIDATE_ACTIVE | \
+ BOND_ARP_VALIDATE_BACKUP)
+#define BOND_ARP_FILTER (BOND_ARP_VALIDATE_ALL + 1)
+#define BOND_ARP_FILTER_ACTIVE (BOND_ARP_VALIDATE_ACTIVE | \
+ BOND_ARP_FILTER)
+#define BOND_ARP_FILTER_BACKUP (BOND_ARP_VALIDATE_BACKUP | \
+ BOND_ARP_FILTER)
+
+#define BOND_SLAVE_NOTIFY_NOW true
+#define BOND_SLAVE_NOTIFY_LATER false
+
+static inline int slave_do_arp_validate(struct bonding *bond,
+ struct slave *slave)
+{
+ return bond->params.arp_validate & (1 << bond_slave_state(slave));
+}
+
+static inline int slave_do_arp_validate_only(struct bonding *bond)
+{
+ return bond->params.arp_validate & BOND_ARP_FILTER;
+}
+
+static inline int bond_is_ip_target_ok(__be32 addr)
+{
+ return !ipv4_is_lbcast(addr) && !ipv4_is_zeronet(addr);
+}
+
+/* Get the oldest arp which we've received on this slave for bond's
+ * arp_targets.
+ */
+static inline unsigned long slave_oldest_target_arp_rx(struct bonding *bond,
+ struct slave *slave)
+{
+ int i = 1;
+ unsigned long ret = slave->target_last_arp_rx[0];
+
+ for (; (i < BOND_MAX_ARP_TARGETS) && bond->params.arp_targets[i]; i++)
+ if (time_before(slave->target_last_arp_rx[i], ret))
+ ret = slave->target_last_arp_rx[i];
+
+ return ret;
+}
+
+static inline unsigned long slave_last_rx(struct bonding *bond,
+ struct slave *slave)
+{
+ if (bond->params.arp_all_targets == BOND_ARP_TARGETS_ALL)
+ return slave_oldest_target_arp_rx(bond, slave);
+
+ return slave->last_rx;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static inline void bond_netpoll_send_skb(const struct slave *slave,
+ struct sk_buff *skb)
+{
+ struct netpoll *np = slave->np;
+
+ if (np)
+ netpoll_send_skb(np, skb);
+}
+#else
+static inline void bond_netpoll_send_skb(const struct slave *slave,
+ struct sk_buff *skb)
+{
+}
+#endif
+
+static inline void bond_set_slave_inactive_flags(struct slave *slave,
+ bool notify)
+{
+ if (!bond_is_lb(slave->bond))
+ bond_set_slave_state(slave, BOND_STATE_BACKUP, notify);
+ if (!slave->bond->params.all_slaves_active)
+ slave->inactive = 1;
+}
+
+static inline void bond_set_slave_active_flags(struct slave *slave,
+ bool notify)
+{
+ bond_set_slave_state(slave, BOND_STATE_ACTIVE, notify);
+ slave->inactive = 0;
+}
+
+static inline bool bond_is_slave_inactive(struct slave *slave)
+{
+ return slave->inactive;
+}
+
+static inline __be32 bond_confirm_addr(struct net_device *dev, __be32 dst, __be32 local)
+{
+ struct in_device *in_dev;
+ __be32 addr = 0;
+
+ rcu_read_lock();
+ in_dev = __in_dev_get_rcu(dev);
+
+ if (in_dev)
+ addr = inet_confirm_addr(dev_net(dev), in_dev, dst, local,
+ RT_SCOPE_HOST);
+ rcu_read_unlock();
+ return addr;
+}
+
+struct bond_net {
+ struct net *net; /* Associated network namespace */
+ struct list_head dev_list;
+#ifdef CONFIG_PROC_FS
+ struct proc_dir_entry *proc_dir;
+#endif
+ struct class_attribute class_attr_bonding_masters;
+};
+
+int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond, struct slave *slave);
+void bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, struct net_device *slave_dev);
+int bond_create(struct net *net, const char *name);
+int bond_create_sysfs(struct bond_net *net);
+void bond_destroy_sysfs(struct bond_net *net);
+void bond_prepare_sysfs_group(struct bonding *bond);
+int bond_sysfs_slave_add(struct slave *slave);
+void bond_sysfs_slave_del(struct slave *slave);
+int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev);
+int bond_release(struct net_device *bond_dev, struct net_device *slave_dev);
+u32 bond_xmit_hash(struct bonding *bond, struct sk_buff *skb);
+void bond_select_active_slave(struct bonding *bond);
+void bond_change_active_slave(struct bonding *bond, struct slave *new_active);
+void bond_create_debugfs(void);
+void bond_destroy_debugfs(void);
+void bond_debug_register(struct bonding *bond);
+void bond_debug_unregister(struct bonding *bond);
+void bond_debug_reregister(struct bonding *bond);
+const char *bond_mode_name(int mode);
+void bond_setup(struct net_device *bond_dev);
+unsigned int bond_get_num_tx_queues(void);
+int bond_netlink_init(void);
+void bond_netlink_fini(void);
+struct net_device *bond_option_active_slave_get_rcu(struct bonding *bond);
+const char *bond_slave_link_status(s8 link);
+struct bond_vlan_tag *bond_verify_device_path(struct net_device *start_dev,
+ struct net_device *end_dev,
+ int level);
+int bond_update_slave_arr(struct bonding *bond, struct slave *skipslave);
+void bond_slave_arr_work_rearm(struct bonding *bond, unsigned long delay);
+
+#ifdef CONFIG_PROC_FS
+void bond_create_proc_entry(struct bonding *bond);
+void bond_remove_proc_entry(struct bonding *bond);
+void bond_create_proc_dir(struct bond_net *bn);
+void bond_destroy_proc_dir(struct bond_net *bn);
+#else
+static inline void bond_create_proc_entry(struct bonding *bond)
+{
+}
+
+static inline void bond_remove_proc_entry(struct bonding *bond)
+{
+}
+
+static inline void bond_create_proc_dir(struct bond_net *bn)
+{
+}
+
+static inline void bond_destroy_proc_dir(struct bond_net *bn)
+{
+}
+#endif
+
+static inline struct slave *bond_slave_has_mac(struct bonding *bond,
+ const u8 *mac)
+{
+ struct list_head *iter;
+ struct slave *tmp;
+
+ bond_for_each_slave(bond, tmp, iter)
+ if (ether_addr_equal_64bits(mac, tmp->dev->dev_addr))
+ return tmp;
+
+ return NULL;
+}
+
+/* Caller must hold rcu_read_lock() for read */
+static inline struct slave *bond_slave_has_mac_rcu(struct bonding *bond,
+ const u8 *mac)
+{
+ struct list_head *iter;
+ struct slave *tmp;
+
+ bond_for_each_slave_rcu(bond, tmp, iter)
+ if (ether_addr_equal_64bits(mac, tmp->dev->dev_addr))
+ return tmp;
+
+ return NULL;
+}
+
+/* Caller must hold rcu_read_lock() for read */
+static inline bool bond_slave_has_mac_rx(struct bonding *bond, const u8 *mac)
+{
+ struct list_head *iter;
+ struct slave *tmp;
+ struct netdev_hw_addr *ha;
+
+ bond_for_each_slave_rcu(bond, tmp, iter)
+ if (ether_addr_equal_64bits(mac, tmp->dev->dev_addr))
+ return true;
+
+ if (netdev_uc_empty(bond->dev))
+ return false;
+
+ netdev_for_each_uc_addr(ha, bond->dev)
+ if (ether_addr_equal_64bits(mac, ha->addr))
+ return true;
+
+ return false;
+}
+
+/* Check if the ip is present in arp ip list, or first free slot if ip == 0
+ * Returns -1 if not found, index if found
+ */
+static inline int bond_get_targets_ip(__be32 *targets, __be32 ip)
+{
+ int i;
+
+ for (i = 0; i < BOND_MAX_ARP_TARGETS; i++)
+ if (targets[i] == ip)
+ return i;
+ else if (targets[i] == 0)
+ break;
+
+ return -1;
+}
+
+/* exported from bond_main.c */
+extern int bond_net_id;
+extern const struct bond_parm_tbl bond_lacp_tbl[];
+extern const struct bond_parm_tbl xmit_hashtype_tbl[];
+extern const struct bond_parm_tbl arp_validate_tbl[];
+extern const struct bond_parm_tbl arp_all_targets_tbl[];
+extern const struct bond_parm_tbl fail_over_mac_tbl[];
+extern const struct bond_parm_tbl pri_reselect_tbl[];
+extern struct bond_parm_tbl ad_select_tbl[];
+
+/* exported from bond_netlink.c */
+extern struct rtnl_link_ops bond_link_ops;
+
+static inline void bond_tx_drop(struct net_device *dev, struct sk_buff *skb)
+{
+ atomic_long_inc(&dev->tx_dropped);
+ dev_kfree_skb_any(skb);
+}
+
+#endif /* _NET_BONDING_H */
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 0a080c4de275..4ebb816241fa 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -4,6 +4,7 @@
* 802.11 device and configuration interface
*
* Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net>
+ * Copyright 2013-2014 Intel Mobile Communications GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -318,9 +319,12 @@ struct ieee80211_supported_band {
/**
* struct vif_params - describes virtual interface parameters
* @use_4addr: use 4-address frames
- * @macaddr: address to use for this virtual interface. This will only
- * be used for non-netdevice interfaces. If this parameter is set
- * to zero address the driver may determine the address as needed.
+ * @macaddr: address to use for this virtual interface.
+ * If this parameter is set to zero address the driver may
+ * determine the address as needed.
+ * This feature is only fully supported by drivers that enable the
+ * %NL80211_FEATURE_MAC_ON_CREATE flag. Others may support creating
+ ** only p2p devices with specified MAC.
*/
struct vif_params {
int use_4addr;
@@ -663,6 +667,7 @@ struct cfg80211_acl_data {
* @crypto: crypto settings
* @privacy: the BSS uses privacy
* @auth_type: Authentication type (algorithm)
+ * @smps_mode: SMPS mode
* @inactivity_timeout: time in seconds to determine station's inactivity.
* @p2p_ctwindow: P2P CT Window
* @p2p_opp_ps: P2P opportunistic PS
@@ -681,6 +686,7 @@ struct cfg80211_ap_settings {
struct cfg80211_crypto_settings crypto;
bool privacy;
enum nl80211_auth_type auth_type;
+ enum nl80211_smps_mode smps_mode;
int inactivity_timeout;
u8 p2p_ctwindow;
bool p2p_opp_ps;
@@ -796,6 +802,22 @@ struct station_parameters {
};
/**
+ * struct station_del_parameters - station deletion parameters
+ *
+ * Used to delete a station entry (or all stations).
+ *
+ * @mac: MAC address of the station to remove or NULL to remove all stations
+ * @subtype: Management frame subtype to use for indicating removal
+ * (10 = Disassociation, 12 = Deauthentication)
+ * @reason_code: Reason code for the Disassociation/Deauthentication frame
+ */
+struct station_del_parameters {
+ const u8 *mac;
+ u8 subtype;
+ u16 reason_code;
+};
+
+/**
* enum cfg80211_station_type - the type of station being modified
* @CFG80211_STA_AP_CLIENT: client of an AP interface
* @CFG80211_STA_AP_MLME_CLIENT: client of an AP interface that has
@@ -1337,6 +1359,16 @@ struct mesh_setup {
};
/**
+ * struct ocb_setup - 802.11p OCB mode setup configuration
+ * @chandef: defines the channel to use
+ *
+ * These parameters are fixed when connecting to the network
+ */
+struct ocb_setup {
+ struct cfg80211_chan_def chandef;
+};
+
+/**
* struct ieee80211_txq_params - TX queue parameters
* @ac: AC identifier
* @txop: Maximum burst time in units of 32 usecs, 0 meaning disabled
@@ -1405,6 +1437,10 @@ struct cfg80211_ssid {
* @aborted: (internal) scan request was notified as aborted
* @notified: (internal) scan request was notified as done or aborted
* @no_cck: used to send probe requests at non CCK rate in 2GHz band
+ * @mac_addr: MAC address used with randomisation
+ * @mac_addr_mask: MAC address mask used with randomisation, bits that
+ * are 0 in the mask should be randomised, bits that are 1 should
+ * be taken from the @mac_addr
*/
struct cfg80211_scan_request {
struct cfg80211_ssid *ssids;
@@ -1419,6 +1455,9 @@ struct cfg80211_scan_request {
struct wireless_dev *wdev;
+ u8 mac_addr[ETH_ALEN] __aligned(2);
+ u8 mac_addr_mask[ETH_ALEN] __aligned(2);
+
/* internal */
struct wiphy *wiphy;
unsigned long scan_start;
@@ -1429,6 +1468,17 @@ struct cfg80211_scan_request {
struct ieee80211_channel *channels[0];
};
+static inline void get_random_mask_addr(u8 *buf, const u8 *addr, const u8 *mask)
+{
+ int i;
+
+ get_random_bytes(buf, ETH_ALEN);
+ for (i = 0; i < ETH_ALEN; i++) {
+ buf[i] &= ~mask[i];
+ buf[i] |= addr[i] & mask[i];
+ }
+}
+
/**
* struct cfg80211_match_set - sets of attributes to match
*
@@ -1462,6 +1512,10 @@ struct cfg80211_match_set {
* @channels: channels to scan
* @min_rssi_thold: for drivers only supporting a single threshold, this
* contains the minimum over all matchsets
+ * @mac_addr: MAC address used with randomisation
+ * @mac_addr_mask: MAC address mask used with randomisation, bits that
+ * are 0 in the mask should be randomised, bits that are 1 should
+ * be taken from the @mac_addr
*/
struct cfg80211_sched_scan_request {
struct cfg80211_ssid *ssids;
@@ -1476,6 +1530,9 @@ struct cfg80211_sched_scan_request {
int n_match_sets;
s32 min_rssi_thold;
+ u8 mac_addr[ETH_ALEN] __aligned(2);
+ u8 mac_addr_mask[ETH_ALEN] __aligned(2);
+
/* internal */
struct wiphy *wiphy;
struct net_device *dev;
@@ -1503,12 +1560,14 @@ enum cfg80211_signal_type {
* @tsf: TSF contained in the frame that carried these IEs
* @rcu_head: internal use, for freeing
* @len: length of the IEs
+ * @from_beacon: these IEs are known to come from a beacon
* @data: IE data
*/
struct cfg80211_bss_ies {
u64 tsf;
struct rcu_head rcu_head;
int len;
+ bool from_beacon;
u8 data[];
};
@@ -1605,10 +1664,12 @@ struct cfg80211_auth_request {
*
* @ASSOC_REQ_DISABLE_HT: Disable HT (802.11n)
* @ASSOC_REQ_DISABLE_VHT: Disable VHT
+ * @ASSOC_REQ_USE_RRM: Declare RRM capability in this association
*/
enum cfg80211_assoc_req_flags {
ASSOC_REQ_DISABLE_HT = BIT(0),
ASSOC_REQ_DISABLE_VHT = BIT(1),
+ ASSOC_REQ_USE_RRM = BIT(2),
};
/**
@@ -1800,6 +1861,7 @@ struct cfg80211_connect_params {
* @WIPHY_PARAM_FRAG_THRESHOLD: wiphy->frag_threshold has changed
* @WIPHY_PARAM_RTS_THRESHOLD: wiphy->rts_threshold has changed
* @WIPHY_PARAM_COVERAGE_CLASS: coverage class changed
+ * @WIPHY_PARAM_DYN_ACK: dynack has been enabled
*/
enum wiphy_params_flags {
WIPHY_PARAM_RETRY_SHORT = 1 << 0,
@@ -1807,6 +1869,7 @@ enum wiphy_params_flags {
WIPHY_PARAM_FRAG_THRESHOLD = 1 << 2,
WIPHY_PARAM_RTS_THRESHOLD = 1 << 3,
WIPHY_PARAM_COVERAGE_CLASS = 1 << 4,
+ WIPHY_PARAM_DYN_ACK = 1 << 5,
};
/*
@@ -1902,6 +1965,7 @@ struct cfg80211_wowlan_tcp {
* @rfkill_release: wake up when rfkill is released
* @tcp: TCP connection establishment/wakeup parameters, see nl80211.h.
* NULL if not configured.
+ * @nd_config: configuration for the scan to be used for net detect wake.
*/
struct cfg80211_wowlan {
bool any, disconnect, magic_pkt, gtk_rekey_failure,
@@ -1910,6 +1974,7 @@ struct cfg80211_wowlan {
struct cfg80211_pkt_pattern *patterns;
struct cfg80211_wowlan_tcp *tcp;
int n_patterns;
+ struct cfg80211_sched_scan_request *nd_config;
};
/**
@@ -1942,6 +2007,35 @@ struct cfg80211_coalesce {
};
/**
+ * struct cfg80211_wowlan_nd_match - information about the match
+ *
+ * @ssid: SSID of the match that triggered the wake up
+ * @n_channels: Number of channels where the match occurred. This
+ * value may be zero if the driver can't report the channels.
+ * @channels: center frequencies of the channels where a match
+ * occurred (in MHz)
+ */
+struct cfg80211_wowlan_nd_match {
+ struct cfg80211_ssid ssid;
+ int n_channels;
+ u32 channels[];
+};
+
+/**
+ * struct cfg80211_wowlan_nd_info - net detect wake up information
+ *
+ * @n_matches: Number of match information instances provided in
+ * @matches. This value may be zero if the driver can't provide
+ * match information.
+ * @matches: Array of pointers to matches containing information about
+ * the matches that triggered the wake up.
+ */
+struct cfg80211_wowlan_nd_info {
+ int n_matches;
+ struct cfg80211_wowlan_nd_match *matches[];
+};
+
+/**
* struct cfg80211_wowlan_wakeup - wakeup report
* @disconnect: woke up by getting disconnected
* @magic_pkt: woke up by receiving magic packet
@@ -1960,6 +2054,7 @@ struct cfg80211_coalesce {
* @tcp_match: TCP wakeup packet received
* @tcp_connlost: TCP connection lost or failed to establish
* @tcp_nomoretokens: TCP data ran out of tokens
+ * @net_detect: if not %NULL, woke up because of net detect
*/
struct cfg80211_wowlan_wakeup {
bool disconnect, magic_pkt, gtk_rekey_failure,
@@ -1969,18 +2064,17 @@ struct cfg80211_wowlan_wakeup {
s32 pattern_idx;
u32 packet_present_len, packet_len;
const void *packet;
+ struct cfg80211_wowlan_nd_info *net_detect;
};
/**
* struct cfg80211_gtk_rekey_data - rekey data
- * @kek: key encryption key
- * @kck: key confirmation key
- * @replay_ctr: replay counter
+ * @kek: key encryption key (NL80211_KEK_LEN bytes)
+ * @kck: key confirmation key (NL80211_KCK_LEN bytes)
+ * @replay_ctr: replay counter (NL80211_REPLAY_CTR_LEN bytes)
*/
struct cfg80211_gtk_rekey_data {
- u8 kek[NL80211_KEK_LEN];
- u8 kck[NL80211_KCK_LEN];
- u8 replay_ctr[NL80211_REPLAY_CTR_LEN];
+ const u8 *kek, *kck, *replay_ctr;
};
/**
@@ -2125,7 +2219,7 @@ struct cfg80211_qos_map {
* @stop_ap: Stop being an AP, including stopping beaconing.
*
* @add_station: Add a new station.
- * @del_station: Remove a station; @mac may be NULL to remove all stations.
+ * @del_station: Remove a station
* @change_station: Modify a given station. Note that flags changes are not much
* validated in cfg80211, in particular the auth/assoc/authorized flags
* might come to the driver in invalid combinations -- make sure to check
@@ -2139,6 +2233,8 @@ struct cfg80211_qos_map {
* @change_mpath: change a given mesh path
* @get_mpath: get a mesh path for the given parameters
* @dump_mpath: dump mesh path callback -- resume dump at index @idx
+ * @get_mpp: get a mesh proxy path for the given parameters
+ * @dump_mpp: dump mesh proxy path callback -- resume dump at index @idx
* @join_mesh: join the mesh network with the specified parameters
* (invoked with the wireless_dev mutex held)
* @leave_mesh: leave the current mesh network
@@ -2313,6 +2409,28 @@ struct cfg80211_qos_map {
* @set_ap_chanwidth: Set the AP (including P2P GO) mode channel width for the
* given interface This is used e.g. for dynamic HT 20/40 MHz channel width
* changes during the lifetime of the BSS.
+ *
+ * @add_tx_ts: validate (if admitted_time is 0) or add a TX TS to the device
+ * with the given parameters; action frame exchange has been handled by
+ * userspace so this just has to modify the TX path to take the TS into
+ * account.
+ * If the admitted time is 0 just validate the parameters to make sure
+ * the session can be created at all; it is valid to just always return
+ * success for that but that may result in inefficient behaviour (handshake
+ * with the peer followed by immediate teardown when the addition is later
+ * rejected)
+ * @del_tx_ts: remove an existing TX TS
+ *
+ * @join_ocb: join the OCB network with the specified parameters
+ * (invoked with the wireless_dev mutex held)
+ * @leave_ocb: leave the current OCB network
+ * (invoked with the wireless_dev mutex held)
+ *
+ * @tdls_channel_switch: Start channel-switching with a TDLS peer. The driver
+ * is responsible for continually initiating channel-switching operations
+ * and returning to the base channel for communication with the AP.
+ * @tdls_cancel_channel_switch: Stop channel-switching with a TDLS peer. Both
+ * peers must be on the base channel when the call completes.
*/
struct cfg80211_ops {
int (*suspend)(struct wiphy *wiphy, struct cfg80211_wowlan *wow);
@@ -2358,7 +2476,7 @@ struct cfg80211_ops {
const u8 *mac,
struct station_parameters *params);
int (*del_station)(struct wiphy *wiphy, struct net_device *dev,
- const u8 *mac);
+ struct station_del_parameters *params);
int (*change_station)(struct wiphy *wiphy, struct net_device *dev,
const u8 *mac,
struct station_parameters *params);
@@ -2378,6 +2496,11 @@ struct cfg80211_ops {
int (*dump_mpath)(struct wiphy *wiphy, struct net_device *dev,
int idx, u8 *dst, u8 *next_hop,
struct mpath_info *pinfo);
+ int (*get_mpp)(struct wiphy *wiphy, struct net_device *dev,
+ u8 *dst, u8 *mpp, struct mpath_info *pinfo);
+ int (*dump_mpp)(struct wiphy *wiphy, struct net_device *dev,
+ int idx, u8 *dst, u8 *mpp,
+ struct mpath_info *pinfo);
int (*get_mesh_config)(struct wiphy *wiphy,
struct net_device *dev,
struct mesh_config *conf);
@@ -2389,6 +2512,10 @@ struct cfg80211_ops {
const struct mesh_setup *setup);
int (*leave_mesh)(struct wiphy *wiphy, struct net_device *dev);
+ int (*join_ocb)(struct wiphy *wiphy, struct net_device *dev,
+ struct ocb_setup *setup);
+ int (*leave_ocb)(struct wiphy *wiphy, struct net_device *dev);
+
int (*change_bss)(struct wiphy *wiphy, struct net_device *dev,
struct bss_parameters *params);
@@ -2553,6 +2680,20 @@ struct cfg80211_ops {
int (*set_ap_chanwidth)(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_chan_def *chandef);
+
+ int (*add_tx_ts)(struct wiphy *wiphy, struct net_device *dev,
+ u8 tsid, const u8 *peer, u8 user_prio,
+ u16 admitted_time);
+ int (*del_tx_ts)(struct wiphy *wiphy, struct net_device *dev,
+ u8 tsid, const u8 *peer);
+
+ int (*tdls_channel_switch)(struct wiphy *wiphy,
+ struct net_device *dev,
+ const u8 *addr, u8 oper_class,
+ struct cfg80211_chan_def *chandef);
+ void (*tdls_cancel_channel_switch)(struct wiphy *wiphy,
+ struct net_device *dev,
+ const u8 *addr);
};
/*
@@ -2727,6 +2868,7 @@ struct ieee80211_txrx_stypes {
* @WIPHY_WOWLAN_EAP_IDENTITY_REQ: supports wakeup on EAP identity request
* @WIPHY_WOWLAN_4WAY_HANDSHAKE: supports wakeup on 4-way handshake failure
* @WIPHY_WOWLAN_RFKILL_RELEASE: supports wakeup on RF-kill release
+ * @WIPHY_WOWLAN_NET_DETECT: supports wakeup on network detection
*/
enum wiphy_wowlan_support_flags {
WIPHY_WOWLAN_ANY = BIT(0),
@@ -2737,6 +2879,7 @@ enum wiphy_wowlan_support_flags {
WIPHY_WOWLAN_EAP_IDENTITY_REQ = BIT(5),
WIPHY_WOWLAN_4WAY_HANDSHAKE = BIT(6),
WIPHY_WOWLAN_RFKILL_RELEASE = BIT(7),
+ WIPHY_WOWLAN_NET_DETECT = BIT(8),
};
struct wiphy_wowlan_tcp_support {
@@ -2755,6 +2898,11 @@ struct wiphy_wowlan_tcp_support {
* @pattern_max_len: maximum length of each pattern
* @pattern_min_len: minimum length of each pattern
* @max_pkt_offset: maximum Rx packet offset
+ * @max_nd_match_sets: maximum number of matchsets for net-detect,
+ * similar, but not necessarily identical, to max_match_sets for
+ * scheduled scans.
+ * See &struct cfg80211_sched_scan_request.@match_sets for more
+ * details.
* @tcp: TCP wakeup support information
*/
struct wiphy_wowlan_support {
@@ -2763,6 +2911,7 @@ struct wiphy_wowlan_support {
int pattern_max_len;
int pattern_min_len;
int max_pkt_offset;
+ int max_nd_match_sets;
const struct wiphy_wowlan_tcp_support *tcp;
};
@@ -3138,6 +3287,23 @@ static inline const char *wiphy_name(const struct wiphy *wiphy)
}
/**
+ * wiphy_new_nm - create a new wiphy for use with cfg80211
+ *
+ * @ops: The configuration operations for this device
+ * @sizeof_priv: The size of the private area to allocate
+ * @requested_name: Request a particular name.
+ * NULL is valid value, and means use the default phy%d naming.
+ *
+ * Create a new wiphy and associate the given operations with it.
+ * @sizeof_priv bytes are allocated for private use.
+ *
+ * Return: A pointer to the new wiphy. This pointer must be
+ * assigned to each netdev's ieee80211_ptr for proper operation.
+ */
+struct wiphy *wiphy_new_nm(const struct cfg80211_ops *ops, int sizeof_priv,
+ const char *requested_name);
+
+/**
* wiphy_new - create a new wiphy for use with cfg80211
*
* @ops: The configuration operations for this device
@@ -3149,7 +3315,11 @@ static inline const char *wiphy_name(const struct wiphy *wiphy)
* Return: A pointer to the new wiphy. This pointer must be
* assigned to each netdev's ieee80211_ptr for proper operation.
*/
-struct wiphy *wiphy_new(const struct cfg80211_ops *ops, int sizeof_priv);
+static inline struct wiphy *wiphy_new(const struct cfg80211_ops *ops,
+ int sizeof_priv)
+{
+ return wiphy_new_nm(ops, sizeof_priv, NULL);
+}
/**
* wiphy_register - register a wiphy with cfg80211
@@ -3765,11 +3935,25 @@ cfg80211_inform_bss_frame(struct wiphy *wiphy,
}
/**
- * cfg80211_inform_bss - inform cfg80211 of a new BSS
+ * enum cfg80211_bss_frame_type - frame type that the BSS data came from
+ * @CFG80211_BSS_FTYPE_UNKNOWN: driver doesn't know whether the data is
+ * from a beacon or probe response
+ * @CFG80211_BSS_FTYPE_BEACON: data comes from a beacon
+ * @CFG80211_BSS_FTYPE_PRESP: data comes from a probe response
+ */
+enum cfg80211_bss_frame_type {
+ CFG80211_BSS_FTYPE_UNKNOWN,
+ CFG80211_BSS_FTYPE_BEACON,
+ CFG80211_BSS_FTYPE_PRESP,
+};
+
+/**
+ * cfg80211_inform_bss_width - inform cfg80211 of a new BSS
*
* @wiphy: the wiphy reporting the BSS
* @rx_channel: The channel the frame was received on
* @scan_width: width of the control channel
+ * @ftype: frame type (if known)
* @bssid: the BSSID of the BSS
* @tsf: the TSF sent by the peer in the beacon/probe response (or 0)
* @capability: the capability field sent by the peer
@@ -3789,6 +3973,7 @@ struct cfg80211_bss * __must_check
cfg80211_inform_bss_width(struct wiphy *wiphy,
struct ieee80211_channel *rx_channel,
enum nl80211_bss_scan_width scan_width,
+ enum cfg80211_bss_frame_type ftype,
const u8 *bssid, u64 tsf, u16 capability,
u16 beacon_interval, const u8 *ie, size_t ielen,
s32 signal, gfp_t gfp);
@@ -3796,12 +3981,13 @@ cfg80211_inform_bss_width(struct wiphy *wiphy,
static inline struct cfg80211_bss * __must_check
cfg80211_inform_bss(struct wiphy *wiphy,
struct ieee80211_channel *rx_channel,
+ enum cfg80211_bss_frame_type ftype,
const u8 *bssid, u64 tsf, u16 capability,
u16 beacon_interval, const u8 *ie, size_t ielen,
s32 signal, gfp_t gfp)
{
return cfg80211_inform_bss_width(wiphy, rx_channel,
- NL80211_BSS_CHAN_WIDTH_20,
+ NL80211_BSS_CHAN_WIDTH_20, ftype,
bssid, tsf, capability,
beacon_interval, ie, ielen, signal,
gfp);
@@ -3902,6 +4088,7 @@ void cfg80211_auth_timeout(struct net_device *dev, const u8 *addr);
* moves to cfg80211 in this call
* @buf: authentication frame (header + body)
* @len: length of the frame data
+ * @uapsd_queues: bitmap of ACs configured to uapsd. -1 if n/a.
*
* After being asked to associate via cfg80211_ops::assoc() the driver must
* call either this function or cfg80211_auth_timeout().
@@ -3910,7 +4097,8 @@ void cfg80211_auth_timeout(struct net_device *dev, const u8 *addr);
*/
void cfg80211_rx_assoc_resp(struct net_device *dev,
struct cfg80211_bss *bss,
- const u8 *buf, size_t len);
+ const u8 *buf, size_t len,
+ int uapsd_queues);
/**
* cfg80211_assoc_timeout - notification of timed out association
@@ -4412,7 +4600,6 @@ void cfg80211_conn_failed(struct net_device *dev, const u8 *mac_addr,
* @buf: Management frame (header + body)
* @len: length of the frame data
* @flags: flags, as defined in enum nl80211_rxmgmt_flags
- * @gfp: context flags
*
* This function is called whenever an Action frame is received for a station
* mode interface, but is not processed in kernel.
@@ -4423,7 +4610,7 @@ void cfg80211_conn_failed(struct net_device *dev, const u8 *mac_addr,
* driver is responsible for rejecting the frame.
*/
bool cfg80211_rx_mgmt(struct wireless_dev *wdev, int freq, int sig_dbm,
- const u8 *buf, size_t len, u32 flags, gfp_t gfp);
+ const u8 *buf, size_t len, u32 flags);
/**
* cfg80211_mgmt_tx_status - notification of TX status for management frame
@@ -4456,33 +4643,6 @@ void cfg80211_cqm_rssi_notify(struct net_device *dev,
gfp_t gfp);
/**
- * cfg80211_radar_event - radar detection event
- * @wiphy: the wiphy
- * @chandef: chandef for the current channel
- * @gfp: context flags
- *
- * This function is called when a radar is detected on the current chanenl.
- */
-void cfg80211_radar_event(struct wiphy *wiphy,
- struct cfg80211_chan_def *chandef, gfp_t gfp);
-
-/**
- * cfg80211_cac_event - Channel availability check (CAC) event
- * @netdev: network device
- * @chandef: chandef for the current channel
- * @event: type of event
- * @gfp: context flags
- *
- * This function is called when a Channel availability check (CAC) is finished
- * or aborted. This must be called to notify the completion of a CAC process,
- * also by full-MAC drivers.
- */
-void cfg80211_cac_event(struct net_device *netdev,
- const struct cfg80211_chan_def *chandef,
- enum nl80211_radar_event event, gfp_t gfp);
-
-
-/**
* cfg80211_cqm_pktloss_notify - notify userspace about packetloss to peer
* @dev: network device
* @peer: peer's MAC address
@@ -4510,6 +4670,42 @@ void cfg80211_cqm_txe_notify(struct net_device *dev, const u8 *peer,
u32 num_packets, u32 rate, u32 intvl, gfp_t gfp);
/**
+ * cfg80211_cqm_beacon_loss_notify - beacon loss event
+ * @dev: network device
+ * @gfp: context flags
+ *
+ * Notify userspace about beacon loss from the connected AP.
+ */
+void cfg80211_cqm_beacon_loss_notify(struct net_device *dev, gfp_t gfp);
+
+/**
+ * cfg80211_radar_event - radar detection event
+ * @wiphy: the wiphy
+ * @chandef: chandef for the current channel
+ * @gfp: context flags
+ *
+ * This function is called when a radar is detected on the current chanenl.
+ */
+void cfg80211_radar_event(struct wiphy *wiphy,
+ struct cfg80211_chan_def *chandef, gfp_t gfp);
+
+/**
+ * cfg80211_cac_event - Channel availability check (CAC) event
+ * @netdev: network device
+ * @chandef: chandef for the current channel
+ * @event: type of event
+ * @gfp: context flags
+ *
+ * This function is called when a Channel availability check (CAC) is finished
+ * or aborted. This must be called to notify the completion of a CAC process,
+ * also by full-MAC drivers.
+ */
+void cfg80211_cac_event(struct net_device *netdev,
+ const struct cfg80211_chan_def *chandef,
+ enum nl80211_radar_event event, gfp_t gfp);
+
+
+/**
* cfg80211_gtk_rekey_notify - notify userspace about driver rekeying
* @dev: network device
* @bssid: BSSID of AP (to avoid races)
@@ -4612,6 +4808,20 @@ bool cfg80211_reg_can_beacon(struct wiphy *wiphy,
void cfg80211_ch_switch_notify(struct net_device *dev,
struct cfg80211_chan_def *chandef);
+/*
+ * cfg80211_ch_switch_started_notify - notify channel switch start
+ * @dev: the device on which the channel switch started
+ * @chandef: the future channel definition
+ * @count: the number of TBTTs until the channel switch happens
+ *
+ * Inform the userspace about the channel switch that has just
+ * started, so that it can take appropriate actions (eg. starting
+ * channel switch on other vifs), if necessary.
+ */
+void cfg80211_ch_switch_started_notify(struct net_device *dev,
+ struct cfg80211_chan_def *chandef,
+ u8 count);
+
/**
* ieee80211_operating_class_to_band - convert operating class to band
*
diff --git a/include/net/cfg802154.h b/include/net/cfg802154.h
new file mode 100644
index 000000000000..7f713acfa106
--- /dev/null
+++ b/include/net/cfg802154.h
@@ -0,0 +1,161 @@
+/*
+ * Copyright (C) 2007, 2008, 2009 Siemens AG
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Written by:
+ * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
+ */
+
+#ifndef __NET_CFG802154_H
+#define __NET_CFG802154_H
+
+#include <linux/ieee802154.h>
+#include <linux/netdevice.h>
+#include <linux/mutex.h>
+#include <linux/bug.h>
+
+#include <net/nl802154.h>
+
+struct wpan_phy;
+
+struct cfg802154_ops {
+ struct net_device * (*add_virtual_intf_deprecated)(struct wpan_phy *wpan_phy,
+ const char *name,
+ int type);
+ void (*del_virtual_intf_deprecated)(struct wpan_phy *wpan_phy,
+ struct net_device *dev);
+ int (*add_virtual_intf)(struct wpan_phy *wpan_phy,
+ const char *name,
+ enum nl802154_iftype type,
+ __le64 extended_addr);
+ int (*del_virtual_intf)(struct wpan_phy *wpan_phy,
+ struct wpan_dev *wpan_dev);
+ int (*set_channel)(struct wpan_phy *wpan_phy, u8 page, u8 channel);
+ int (*set_pan_id)(struct wpan_phy *wpan_phy,
+ struct wpan_dev *wpan_dev, __le16 pan_id);
+ int (*set_short_addr)(struct wpan_phy *wpan_phy,
+ struct wpan_dev *wpan_dev, __le16 short_addr);
+ int (*set_backoff_exponent)(struct wpan_phy *wpan_phy,
+ struct wpan_dev *wpan_dev, u8 min_be,
+ u8 max_be);
+ int (*set_max_csma_backoffs)(struct wpan_phy *wpan_phy,
+ struct wpan_dev *wpan_dev,
+ u8 max_csma_backoffs);
+ int (*set_max_frame_retries)(struct wpan_phy *wpan_phy,
+ struct wpan_dev *wpan_dev,
+ s8 max_frame_retries);
+ int (*set_lbt_mode)(struct wpan_phy *wpan_phy,
+ struct wpan_dev *wpan_dev, bool mode);
+};
+
+struct wpan_phy {
+ struct mutex pib_lock;
+
+ /* If multiple wpan_phys are registered and you're handed e.g.
+ * a regular netdev with assigned ieee802154_ptr, you won't
+ * know whether it points to a wpan_phy your driver has registered
+ * or not. Assign this to something global to your driver to
+ * help determine whether you own this wpan_phy or not.
+ */
+ const void *privid;
+
+ /*
+ * This is a PIB according to 802.15.4-2011.
+ * We do not provide timing-related variables, as they
+ * aren't used outside of driver
+ */
+ u8 current_channel;
+ u8 current_page;
+ u32 channels_supported[IEEE802154_MAX_PAGE + 1];
+ s8 transmit_power;
+ u8 cca_mode;
+
+ __le64 perm_extended_addr;
+
+ s32 cca_ed_level;
+
+ /* PHY depended MAC PIB values */
+
+ /* 802.15.4 acronym: Tdsym in usec */
+ u8 symbol_duration;
+ /* lifs and sifs periods timing */
+ u16 lifs_period;
+ u16 sifs_period;
+
+ struct device dev;
+
+ char priv[0] __aligned(NETDEV_ALIGN);
+};
+
+struct wpan_dev {
+ struct wpan_phy *wpan_phy;
+ int iftype;
+
+ /* the remainder of this struct should be private to cfg802154 */
+ struct list_head list;
+ struct net_device *netdev;
+
+ u32 identifier;
+
+ /* MAC PIB */
+ __le16 pan_id;
+ __le16 short_addr;
+ __le64 extended_addr;
+
+ /* MAC BSN field */
+ u8 bsn;
+ /* MAC DSN field */
+ u8 dsn;
+
+ u8 min_be;
+ u8 max_be;
+ u8 csma_retries;
+ s8 frame_retries;
+
+ bool lbt;
+
+ bool promiscuous_mode;
+};
+
+#define to_phy(_dev) container_of(_dev, struct wpan_phy, dev)
+
+struct wpan_phy *
+wpan_phy_new(const struct cfg802154_ops *ops, size_t priv_size);
+static inline void wpan_phy_set_dev(struct wpan_phy *phy, struct device *dev)
+{
+ phy->dev.parent = dev;
+}
+
+int wpan_phy_register(struct wpan_phy *phy);
+void wpan_phy_unregister(struct wpan_phy *phy);
+void wpan_phy_free(struct wpan_phy *phy);
+/* Same semantics as for class_for_each_device */
+int wpan_phy_for_each(int (*fn)(struct wpan_phy *phy, void *data), void *data);
+
+static inline void *wpan_phy_priv(struct wpan_phy *phy)
+{
+ BUG_ON(!phy);
+ return &phy->priv;
+}
+
+struct wpan_phy *wpan_phy_find(const char *str);
+
+static inline void wpan_phy_put(struct wpan_phy *phy)
+{
+ put_device(&phy->dev);
+}
+
+static inline const char *wpan_phy_name(struct wpan_phy *phy)
+{
+ return dev_name(&phy->dev);
+}
+
+#endif /* __NET_CFG802154_H */
diff --git a/include/net/checksum.h b/include/net/checksum.h
index 87cb1903640d..e339a9513e29 100644
--- a/include/net/checksum.h
+++ b/include/net/checksum.h
@@ -122,9 +122,7 @@ static inline __wsum csum_partial_ext(const void *buff, int len, __wsum sum)
static inline void csum_replace4(__sum16 *sum, __be32 from, __be32 to)
{
- __be32 diff[] = { ~from, to };
-
- *sum = csum_fold(csum_partial(diff, sizeof(diff), ~csum_unfold(*sum)));
+ *sum = csum_fold(csum_add(csum_sub(~csum_unfold(*sum), from), to));
}
/* Implements RFC 1624 (Incremental Internet Checksum)
@@ -153,4 +151,20 @@ static inline void inet_proto_csum_replace2(__sum16 *sum, struct sk_buff *skb,
(__force __be32)to, pseudohdr);
}
+static inline __wsum remcsum_adjust(void *ptr, __wsum csum,
+ int start, int offset)
+{
+ __sum16 *psum = (__sum16 *)(ptr + offset);
+ __wsum delta;
+
+ /* Subtract out checksum up to start */
+ csum = csum_sub(csum, csum_partial(ptr, start, 0));
+
+ /* Set derived checksum in packet */
+ delta = csum_sub(csum_fold(csum), *psum);
+ *psum = csum_fold(csum);
+
+ return delta;
+}
+
#endif
diff --git a/include/net/codel.h b/include/net/codel.h
index fe0eab32ce76..aeee28081245 100644
--- a/include/net/codel.h
+++ b/include/net/codel.h
@@ -66,7 +66,7 @@ typedef s32 codel_tdiff_t;
static inline codel_time_t codel_get_time(void)
{
- u64 ns = ktime_to_ns(ktime_get());
+ u64 ns = ktime_get_ns();
return ns >> CODEL_SHIFT;
}
diff --git a/include/net/compat.h b/include/net/compat.h
index 3b603b199c01..42a9c8431177 100644
--- a/include/net/compat.h
+++ b/include/net/compat.h
@@ -40,9 +40,8 @@ int compat_sock_get_timestampns(struct sock *, struct timespec __user *);
#define compat_mmsghdr mmsghdr
#endif /* defined(CONFIG_COMPAT) */
-int get_compat_msghdr(struct msghdr *, struct compat_msghdr __user *);
-int verify_compat_iovec(struct msghdr *, struct iovec *,
- struct sockaddr_storage *, int);
+ssize_t get_compat_msghdr(struct msghdr *, struct compat_msghdr __user *,
+ struct sockaddr __user **, struct iovec **);
asmlinkage long compat_sys_sendmsg(int, struct compat_msghdr __user *,
unsigned int);
asmlinkage long compat_sys_sendmmsg(int, struct compat_mmsghdr __user *,
diff --git a/include/net/dsa.h b/include/net/dsa.h
index 6efce384451e..ed3c34bbb67a 100644
--- a/include/net/dsa.h
+++ b/include/net/dsa.h
@@ -15,6 +15,18 @@
#include <linux/list.h>
#include <linux/timer.h>
#include <linux/workqueue.h>
+#include <linux/of.h>
+#include <linux/phy.h>
+#include <linux/phy_fixed.h>
+#include <linux/ethtool.h>
+
+enum dsa_tag_protocol {
+ DSA_TAG_PROTO_NONE = 0,
+ DSA_TAG_PROTO_DSA,
+ DSA_TAG_PROTO_TRAILER,
+ DSA_TAG_PROTO_EDSA,
+ DSA_TAG_PROTO_BRCM,
+};
#define DSA_MAX_SWITCHES 4
#define DSA_MAX_PORTS 12
@@ -23,9 +35,18 @@ struct dsa_chip_data {
/*
* How to access the switch configuration registers.
*/
- struct device *mii_bus;
+ struct device *host_dev;
int sw_addr;
+ /* set to size of eeprom if supported by the switch */
+ int eeprom_len;
+
+ /* Device tree node pointer for this specific switch chip
+ * used during switch setup in case additional properties
+ * and resources needs to be used
+ */
+ struct device_node *of_node;
+
/*
* The names of the switch's ports. Use "cpu" to
* designate the switch port that the cpu is connected to,
@@ -34,6 +55,7 @@ struct dsa_chip_data {
* or any other string to indicate this is a physical port.
*/
char *port_names[DSA_MAX_PORTS];
+ struct device_node *port_dn[DSA_MAX_PORTS];
/*
* An array (with nr_chips elements) of which element [a]
@@ -59,6 +81,8 @@ struct dsa_platform_data {
struct dsa_chip_data *chip;
};
+struct packet_type;
+
struct dsa_switch_tree {
/*
* Configuration data for the platform device that owns
@@ -71,7 +95,11 @@ struct dsa_switch_tree {
* protocol to use.
*/
struct net_device *master_netdev;
- __be16 tag_protocol;
+ int (*rcv)(struct sk_buff *skb,
+ struct net_device *dev,
+ struct packet_type *pt,
+ struct net_device *orig_dev);
+ enum dsa_tag_protocol tag_protocol;
/*
* The switch and port to which the CPU is attached.
@@ -110,15 +138,24 @@ struct dsa_switch {
struct dsa_switch_driver *drv;
/*
- * Reference to mii bus to use.
+ * Reference to host device to use.
*/
- struct mii_bus *master_mii_bus;
+ struct device *master_dev;
+
+#ifdef CONFIG_NET_DSA_HWMON
+ /*
+ * Hardware monitoring information
+ */
+ char hwmon_name[IFNAMSIZ + 8];
+ struct device *hwmon_dev;
+#endif
/*
* Slave mii_bus and devices for the individual ports.
*/
u32 dsa_port_mask;
u32 phys_port_mask;
+ u32 phys_mii_mask;
struct mii_bus *slave_mii_bus;
struct net_device *ports[DSA_MAX_PORTS];
};
@@ -147,15 +184,16 @@ static inline u8 dsa_upstream_port(struct dsa_switch *ds)
struct dsa_switch_driver {
struct list_head list;
- __be16 tag_protocol;
+ enum dsa_tag_protocol tag_protocol;
int priv_size;
/*
* Probing and setup.
*/
- char *(*probe)(struct mii_bus *bus, int sw_addr);
+ char *(*probe)(struct device *host_dev, int sw_addr);
int (*setup)(struct dsa_switch *ds);
int (*set_addr)(struct dsa_switch *ds, u8 *addr);
+ u32 (*get_phy_flags)(struct dsa_switch *ds, int port);
/*
* Access to the switch's PHY registers.
@@ -170,37 +208,86 @@ struct dsa_switch_driver {
void (*poll_link)(struct dsa_switch *ds);
/*
+ * Link state adjustment (called from libphy)
+ */
+ void (*adjust_link)(struct dsa_switch *ds, int port,
+ struct phy_device *phydev);
+ void (*fixed_link_update)(struct dsa_switch *ds, int port,
+ struct fixed_phy_status *st);
+
+ /*
* ethtool hardware statistics.
*/
void (*get_strings)(struct dsa_switch *ds, int port, uint8_t *data);
void (*get_ethtool_stats)(struct dsa_switch *ds,
int port, uint64_t *data);
int (*get_sset_count)(struct dsa_switch *ds);
+
+ /*
+ * ethtool Wake-on-LAN
+ */
+ void (*get_wol)(struct dsa_switch *ds, int port,
+ struct ethtool_wolinfo *w);
+ int (*set_wol)(struct dsa_switch *ds, int port,
+ struct ethtool_wolinfo *w);
+
+ /*
+ * Suspend and resume
+ */
+ int (*suspend)(struct dsa_switch *ds);
+ int (*resume)(struct dsa_switch *ds);
+
+ /*
+ * Port enable/disable
+ */
+ int (*port_enable)(struct dsa_switch *ds, int port,
+ struct phy_device *phy);
+ void (*port_disable)(struct dsa_switch *ds, int port,
+ struct phy_device *phy);
+
+ /*
+ * EEE setttings
+ */
+ int (*set_eee)(struct dsa_switch *ds, int port,
+ struct phy_device *phydev,
+ struct ethtool_eee *e);
+ int (*get_eee)(struct dsa_switch *ds, int port,
+ struct ethtool_eee *e);
+
+#ifdef CONFIG_NET_DSA_HWMON
+ /* Hardware monitoring */
+ int (*get_temp)(struct dsa_switch *ds, int *temp);
+ int (*get_temp_limit)(struct dsa_switch *ds, int *temp);
+ int (*set_temp_limit)(struct dsa_switch *ds, int temp);
+ int (*get_temp_alarm)(struct dsa_switch *ds, bool *alarm);
+#endif
+
+ /* EEPROM access */
+ int (*get_eeprom_len)(struct dsa_switch *ds);
+ int (*get_eeprom)(struct dsa_switch *ds,
+ struct ethtool_eeprom *eeprom, u8 *data);
+ int (*set_eeprom)(struct dsa_switch *ds,
+ struct ethtool_eeprom *eeprom, u8 *data);
+
+ /*
+ * Register access.
+ */
+ int (*get_regs_len)(struct dsa_switch *ds, int port);
+ void (*get_regs)(struct dsa_switch *ds, int port,
+ struct ethtool_regs *regs, void *p);
};
void register_switch_driver(struct dsa_switch_driver *type);
void unregister_switch_driver(struct dsa_switch_driver *type);
+struct mii_bus *dsa_host_dev_to_mii_bus(struct device *dev);
static inline void *ds_to_priv(struct dsa_switch *ds)
{
return (void *)(ds + 1);
}
-/*
- * The original DSA tag format and some other tag formats have no
- * ethertype, which means that we need to add a little hack to the
- * networking receive path to make sure that received frames get
- * the right ->protocol assigned to them when one of those tag
- * formats is in use.
- */
-static inline bool dsa_uses_dsa_tags(struct dsa_switch_tree *dst)
-{
- return !!(dst->tag_protocol == htons(ETH_P_DSA));
-}
-
-static inline bool dsa_uses_trailer_tags(struct dsa_switch_tree *dst)
+static inline bool dsa_uses_tagged_protocol(struct dsa_switch_tree *dst)
{
- return !!(dst->tag_protocol == htons(ETH_P_TRAILER));
+ return dst->rcv != NULL;
}
-
#endif
diff --git a/include/net/dst_ops.h b/include/net/dst_ops.h
index 2f26dfb8450e..1f99a1de0e4f 100644
--- a/include/net/dst_ops.h
+++ b/include/net/dst_ops.h
@@ -63,7 +63,7 @@ static inline void dst_entries_add(struct dst_ops *dst, int val)
static inline int dst_entries_init(struct dst_ops *dst)
{
- return percpu_counter_init(&dst->pcpuc_entries, 0);
+ return percpu_counter_init(&dst->pcpuc_entries, 0, GFP_KERNEL);
}
static inline void dst_entries_destroy(struct dst_ops *dst)
diff --git a/include/net/flow_keys.h b/include/net/flow_keys.h
index 6667a054763a..7ee2df083542 100644
--- a/include/net/flow_keys.h
+++ b/include/net/flow_keys.h
@@ -27,7 +27,19 @@ struct flow_keys {
u8 ip_proto;
};
-bool skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow);
-__be32 skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto);
+bool __skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow,
+ void *data, __be16 proto, int nhoff, int hlen);
+static inline bool skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow)
+{
+ return __skb_flow_dissect(skb, flow, NULL, 0, 0, 0);
+}
+__be32 __skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto,
+ void *data, int hlen_proto);
+static inline __be32 skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto)
+{
+ return __skb_flow_get_ports(skb, thoff, ip_proto, NULL, 0);
+}
u32 flow_hash_from_keys(struct flow_keys *keys);
+unsigned int flow_get_hlen(const unsigned char *data, unsigned int max_len,
+ __be16 protocol);
#endif
diff --git a/include/net/fou.h b/include/net/fou.h
new file mode 100644
index 000000000000..19b8a0c62a98
--- /dev/null
+++ b/include/net/fou.h
@@ -0,0 +1,19 @@
+#ifndef __NET_FOU_H
+#define __NET_FOU_H
+
+#include <linux/skbuff.h>
+
+#include <net/flow.h>
+#include <net/gue.h>
+#include <net/ip_tunnels.h>
+#include <net/udp.h>
+
+size_t fou_encap_hlen(struct ip_tunnel_encap *e);
+static size_t gue_encap_hlen(struct ip_tunnel_encap *e);
+
+int fou_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
+ u8 *protocol, struct flowi4 *fl4);
+int gue_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
+ u8 *protocol, struct flowi4 *fl4);
+
+#endif
diff --git a/include/net/gen_stats.h b/include/net/gen_stats.h
index ea4271dceff0..cbafa3768d48 100644
--- a/include/net/gen_stats.h
+++ b/include/net/gen_stats.h
@@ -6,6 +6,11 @@
#include <linux/rtnetlink.h>
#include <linux/pkt_sched.h>
+struct gnet_stats_basic_cpu {
+ struct gnet_stats_basic_packed bstats;
+ struct u64_stats_sync syncp;
+};
+
struct gnet_dump {
spinlock_t * lock;
struct sk_buff * skb;
@@ -27,21 +32,29 @@ int gnet_stats_start_copy_compat(struct sk_buff *skb, int type,
spinlock_t *lock, struct gnet_dump *d);
int gnet_stats_copy_basic(struct gnet_dump *d,
+ struct gnet_stats_basic_cpu __percpu *cpu,
struct gnet_stats_basic_packed *b);
+void __gnet_stats_copy_basic(struct gnet_stats_basic_packed *bstats,
+ struct gnet_stats_basic_cpu __percpu *cpu,
+ struct gnet_stats_basic_packed *b);
int gnet_stats_copy_rate_est(struct gnet_dump *d,
const struct gnet_stats_basic_packed *b,
struct gnet_stats_rate_est64 *r);
-int gnet_stats_copy_queue(struct gnet_dump *d, struct gnet_stats_queue *q);
+int gnet_stats_copy_queue(struct gnet_dump *d,
+ struct gnet_stats_queue __percpu *cpu_q,
+ struct gnet_stats_queue *q, __u32 qlen);
int gnet_stats_copy_app(struct gnet_dump *d, void *st, int len);
int gnet_stats_finish_copy(struct gnet_dump *d);
int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
+ struct gnet_stats_basic_cpu __percpu *cpu_bstats,
struct gnet_stats_rate_est64 *rate_est,
spinlock_t *stats_lock, struct nlattr *opt);
void gen_kill_estimator(struct gnet_stats_basic_packed *bstats,
struct gnet_stats_rate_est64 *rate_est);
int gen_replace_estimator(struct gnet_stats_basic_packed *bstats,
+ struct gnet_stats_basic_cpu __percpu *cpu_bstats,
struct gnet_stats_rate_est64 *rate_est,
spinlock_t *stats_lock, struct nlattr *opt);
bool gen_estimator_active(const struct gnet_stats_basic_packed *bstats,
diff --git a/include/net/genetlink.h b/include/net/genetlink.h
index af10c2cf8a1d..84125088c309 100644
--- a/include/net/genetlink.h
+++ b/include/net/genetlink.h
@@ -31,6 +31,9 @@ struct genl_info;
* do additional, common, filtering and return an error
* @post_doit: called after an operation's doit callback, it may
* undo operations done by pre_doit, for example release locks
+ * @mcast_bind: a socket bound to the given multicast group (which
+ * is given as the offset into the groups array)
+ * @mcast_unbind: a socket was unbound from the given multicast group
* @attrbuf: buffer to store parsed attributes
* @family_list: family list
* @mcgrps: multicast groups used by this family (private)
@@ -53,6 +56,8 @@ struct genl_family {
void (*post_doit)(const struct genl_ops *ops,
struct sk_buff *skb,
struct genl_info *info);
+ int (*mcast_bind)(struct net *net, int group);
+ void (*mcast_unbind)(struct net *net, int group);
struct nlattr ** attrbuf; /* private */
const struct genl_ops * ops; /* private */
const struct genl_multicast_group *mcgrps; /* private */
@@ -395,11 +400,11 @@ static inline int genl_set_err(struct genl_family *family, struct net *net,
}
static inline int genl_has_listeners(struct genl_family *family,
- struct sock *sk, unsigned int group)
+ struct net *net, unsigned int group)
{
if (WARN_ON_ONCE(group >= family->n_mcgrps))
return -EINVAL;
group = family->mcgrp_offset + group;
- return netlink_has_listeners(sk, group);
+ return netlink_has_listeners(net->genl_sock, group);
}
#endif /* __NET_GENERIC_NETLINK_H */
diff --git a/include/net/geneve.h b/include/net/geneve.h
new file mode 100644
index 000000000000..112132cf8e2e
--- /dev/null
+++ b/include/net/geneve.h
@@ -0,0 +1,97 @@
+#ifndef __NET_GENEVE_H
+#define __NET_GENEVE_H 1
+
+#ifdef CONFIG_INET
+#include <net/udp_tunnel.h>
+#endif
+
+
+/* Geneve Header:
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |Ver| Opt Len |O|C| Rsvd. | Protocol Type |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Virtual Network Identifier (VNI) | Reserved |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Variable Length Options |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * Option Header:
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Option Class | Type |R|R|R| Length |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Variable Option Data |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+
+struct geneve_opt {
+ __be16 opt_class;
+ u8 type;
+#ifdef __LITTLE_ENDIAN_BITFIELD
+ u8 length:5;
+ u8 r3:1;
+ u8 r2:1;
+ u8 r1:1;
+#else
+ u8 r1:1;
+ u8 r2:1;
+ u8 r3:1;
+ u8 length:5;
+#endif
+ u8 opt_data[];
+};
+
+#define GENEVE_CRIT_OPT_TYPE (1 << 7)
+
+struct genevehdr {
+#ifdef __LITTLE_ENDIAN_BITFIELD
+ u8 opt_len:6;
+ u8 ver:2;
+ u8 rsvd1:6;
+ u8 critical:1;
+ u8 oam:1;
+#else
+ u8 ver:2;
+ u8 opt_len:6;
+ u8 oam:1;
+ u8 critical:1;
+ u8 rsvd1:6;
+#endif
+ __be16 proto_type;
+ u8 vni[3];
+ u8 rsvd2;
+ struct geneve_opt options[];
+};
+
+#ifdef CONFIG_INET
+struct geneve_sock;
+
+typedef void (geneve_rcv_t)(struct geneve_sock *gs, struct sk_buff *skb);
+
+struct geneve_sock {
+ struct hlist_node hlist;
+ geneve_rcv_t *rcv;
+ void *rcv_data;
+ struct work_struct del_work;
+ struct socket *sock;
+ struct rcu_head rcu;
+ atomic_t refcnt;
+ struct udp_offload udp_offloads;
+};
+
+#define GENEVE_VER 0
+#define GENEVE_BASE_HLEN (sizeof(struct udphdr) + sizeof(struct genevehdr))
+
+struct geneve_sock *geneve_sock_add(struct net *net, __be16 port,
+ geneve_rcv_t *rcv, void *data,
+ bool no_share, bool ipv6);
+
+void geneve_sock_release(struct geneve_sock *vs);
+
+int geneve_xmit_skb(struct geneve_sock *gs, struct rtable *rt,
+ struct sk_buff *skb, __be32 src, __be32 dst, __u8 tos,
+ __u8 ttl, __be16 df, __be16 src_port, __be16 dst_port,
+ __be16 tun_flags, u8 vni[3], u8 opt_len, u8 *opt,
+ bool xnet);
+#endif /*ifdef CONFIG_INET */
+
+#endif /*ifdef__NET_GENEVE_H */
diff --git a/include/net/gue.h b/include/net/gue.h
new file mode 100644
index 000000000000..3f28ec7f1c7f
--- /dev/null
+++ b/include/net/gue.h
@@ -0,0 +1,116 @@
+#ifndef __NET_GUE_H
+#define __NET_GUE_H
+
+/* Definitions for the GUE header, standard and private flags, lengths
+ * of optional fields are below.
+ *
+ * Diagram of GUE header:
+ *
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |Ver|C| Hlen | Proto/ctype | Standard flags |P|
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | |
+ * ~ Fields (optional) ~
+ * | |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Private flags (optional, P bit is set) |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | |
+ * ~ Private fields (optional) ~
+ * | |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * C bit indicates contol message when set, data message when unset.
+ * For a control message, proto/ctype is interpreted as a type of
+ * control message. For data messages, proto/ctype is the IP protocol
+ * of the next header.
+ *
+ * P bit indicates private flags field is present. The private flags
+ * may refer to options placed after this field.
+ */
+
+struct guehdr {
+ union {
+ struct {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u8 hlen:5,
+ control:1,
+ version:2;
+#elif defined (__BIG_ENDIAN_BITFIELD)
+ __u8 version:2,
+ control:1,
+ hlen:5;
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+ __u8 proto_ctype;
+ __u16 flags;
+ };
+ __u32 word;
+ };
+};
+
+/* Standard flags in GUE header */
+
+#define GUE_FLAG_PRIV htons(1<<0) /* Private flags are in options */
+#define GUE_LEN_PRIV 4
+
+#define GUE_FLAGS_ALL (GUE_FLAG_PRIV)
+
+/* Private flags in the private option extension */
+
+#define GUE_PFLAG_REMCSUM htonl(1 << 31)
+#define GUE_PLEN_REMCSUM 4
+
+#define GUE_PFLAGS_ALL (GUE_PFLAG_REMCSUM)
+
+/* Functions to compute options length corresponding to flags.
+ * If we ever have a lot of flags this can be potentially be
+ * converted to a more optimized algorithm (table lookup
+ * for instance).
+ */
+static inline size_t guehdr_flags_len(__be16 flags)
+{
+ return ((flags & GUE_FLAG_PRIV) ? GUE_LEN_PRIV : 0);
+}
+
+static inline size_t guehdr_priv_flags_len(__be32 flags)
+{
+ return 0;
+}
+
+/* Validate standard and private flags. Returns non-zero (meaning invalid)
+ * if there is an unknown standard or private flags, or the options length for
+ * the flags exceeds the options length specific in hlen of the GUE header.
+ */
+static inline int validate_gue_flags(struct guehdr *guehdr,
+ size_t optlen)
+{
+ size_t len;
+ __be32 flags = guehdr->flags;
+
+ if (flags & ~GUE_FLAGS_ALL)
+ return 1;
+
+ len = guehdr_flags_len(flags);
+ if (len > optlen)
+ return 1;
+
+ if (flags & GUE_FLAG_PRIV) {
+ /* Private flags are last four bytes accounted in
+ * guehdr_flags_len
+ */
+ flags = *(__be32 *)((void *)&guehdr[1] + len - GUE_LEN_PRIV);
+
+ if (flags & ~GUE_PFLAGS_ALL)
+ return 1;
+
+ len += guehdr_priv_flags_len(flags);
+ if (len > optlen)
+ return 1;
+ }
+
+ return 0;
+}
+
+#endif
diff --git a/include/net/ieee802154_netdev.h b/include/net/ieee802154_netdev.h
index 3b53c8e405e4..83bb8a73d23c 100644
--- a/include/net/ieee802154_netdev.h
+++ b/include/net/ieee802154_netdev.h
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
* Written by:
* Pavel Smolenskiy <pavel.smolenskiy@gmail.com>
* Maxim Gorbachyov <maxim.gorbachev@siemens.com>
@@ -27,10 +23,10 @@
#ifndef IEEE802154_NETDEVICE_H
#define IEEE802154_NETDEVICE_H
-#include <net/ieee802154.h>
#include <net/af_ieee802154.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
+#include <linux/ieee802154.h>
struct ieee802154_sechdr {
#if defined(__LITTLE_ENDIAN_BITFIELD)
@@ -427,8 +423,6 @@ struct ieee802154_mlme_ops {
/* The fields below are required. */
- struct wpan_phy *(*get_phy)(const struct net_device *dev);
-
/*
* FIXME: these should become the part of PIB/MIB interface.
* However we still don't have IB interface of any kind
@@ -438,16 +432,6 @@ struct ieee802154_mlme_ops {
u8 (*get_dsn)(const struct net_device *dev);
};
-/* The IEEE 802.15.4 standard defines 2 type of the devices:
- * - FFD - full functionality device
- * - RFD - reduce functionality device
- *
- * So 2 sets of mlme operations are needed
- */
-struct ieee802154_reduced_mlme_ops {
- struct wpan_phy *(*get_phy)(const struct net_device *dev);
-};
-
static inline struct ieee802154_mlme_ops *
ieee802154_mlme_ops(const struct net_device *dev)
{
diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h
index d07b1a64b4e7..98e5f9578f86 100644
--- a/include/net/if_inet6.h
+++ b/include/net/if_inet6.h
@@ -35,7 +35,6 @@ enum {
INET6_IFADDR_STATE_DAD,
INET6_IFADDR_STATE_POSTDAD,
INET6_IFADDR_STATE_ERRDAD,
- INET6_IFADDR_STATE_UP,
INET6_IFADDR_STATE_DEAD,
};
@@ -147,7 +146,6 @@ struct ifacaddr6 {
struct ifacaddr6 *aca_next;
int aca_users;
atomic_t aca_refcnt;
- spinlock_t aca_lock;
unsigned long aca_cstamp;
unsigned long aca_tstamp;
};
diff --git a/include/net/inet6_hashtables.h b/include/net/inet6_hashtables.h
index ae0613544308..9201afe083fa 100644
--- a/include/net/inet6_hashtables.h
+++ b/include/net/inet6_hashtables.h
@@ -80,7 +80,8 @@ static inline struct sock *__inet6_lookup(struct net *net,
static inline struct sock *__inet6_lookup_skb(struct inet_hashinfo *hashinfo,
struct sk_buff *skb,
const __be16 sport,
- const __be16 dport)
+ const __be16 dport,
+ int iif)
{
struct sock *sk = skb_steal_sock(skb);
@@ -90,7 +91,7 @@ static inline struct sock *__inet6_lookup_skb(struct inet_hashinfo *hashinfo,
return __inet6_lookup(dev_net(skb_dst(skb)->dev), hashinfo,
&ipv6_hdr(skb)->saddr, sport,
&ipv6_hdr(skb)->daddr, ntohs(dport),
- inet6_iif(skb));
+ iif);
}
struct sock *inet6_lookup(struct net *net, struct inet_hashinfo *hashinfo,
@@ -98,4 +99,14 @@ struct sock *inet6_lookup(struct net *net, struct inet_hashinfo *hashinfo,
const struct in6_addr *daddr, const __be16 dport,
const int dif);
#endif /* IS_ENABLED(CONFIG_IPV6) */
+
+#define INET6_MATCH(__sk, __net, __saddr, __daddr, __ports, __dif) \
+ (((__sk)->sk_portpair == (__ports)) && \
+ ((__sk)->sk_family == AF_INET6) && \
+ ipv6_addr_equal(&(__sk)->sk_v6_daddr, (__saddr)) && \
+ ipv6_addr_equal(&(__sk)->sk_v6_rcv_saddr, (__daddr)) && \
+ (!(__sk)->sk_bound_dev_if || \
+ ((__sk)->sk_bound_dev_if == (__dif))) && \
+ net_eq(sock_net(__sk), (__net)))
+
#endif /* _INET6_HASHTABLES_H */
diff --git a/include/net/inet_common.h b/include/net/inet_common.h
index fe7994c48b75..b2828a06a5a6 100644
--- a/include/net/inet_common.h
+++ b/include/net/inet_common.h
@@ -37,6 +37,8 @@ int inet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
int inet_ctl_sock_create(struct sock **sk, unsigned short family,
unsigned short type, unsigned char protocol,
struct net *net);
+int inet_recv_error(struct sock *sk, struct msghdr *msg, int len,
+ int *addr_len);
static inline void inet_ctl_sock_destroy(struct sock *sk)
{
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index 5fbe6568c3cf..848e85cb5c61 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -242,6 +242,15 @@ static inline void inet_csk_reset_xmit_timer(struct sock *sk, const int what,
#endif
}
+static inline unsigned long
+inet_csk_rto_backoff(const struct inet_connection_sock *icsk,
+ unsigned long max_when)
+{
+ u64 when = (u64)icsk->icsk_rto << icsk->icsk_backoff;
+
+ return (unsigned long)min_t(u64, when, max_when);
+}
+
struct sock *inet_csk_accept(struct sock *sk, int flags, int *err);
struct request_sock *inet_csk_search_req(const struct sock *sk,
diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h
index 65a8855e99fe..8d1765577acc 100644
--- a/include/net/inet_frag.h
+++ b/include/net/inet_frag.h
@@ -151,7 +151,7 @@ static inline void add_frag_mem_limit(struct inet_frag_queue *q, int i)
static inline void init_frag_mem_limit(struct netns_frags *nf)
{
- percpu_counter_init(&nf->mem, 0);
+ percpu_counter_init(&nf->mem, 0, GFP_KERNEL);
}
static inline unsigned int sum_frag_mem_limit(struct netns_frags *nf)
diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
index 01d590ee5e7e..80479abddf73 100644
--- a/include/net/inetpeer.h
+++ b/include/net/inetpeer.h
@@ -61,7 +61,6 @@ struct inet_peer {
struct inet_peer_base {
struct inet_peer __rcu *root;
seqlock_t lock;
- u32 flush_seq;
int total;
};
diff --git a/include/net/ip.h b/include/net/ip.h
index db4a771b9ef3..0bb620702929 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -180,8 +180,10 @@ static inline __u8 ip_reply_arg_flowi_flags(const struct ip_reply_arg *arg)
return (arg->flags & IP_REPLY_ARG_NOSRCCHECK) ? FLOWI_FLAG_ANYSRC : 0;
}
-void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, __be32 daddr,
- __be32 saddr, const struct ip_reply_arg *arg,
+void ip_send_unicast_reply(struct net *net, struct sk_buff *skb,
+ const struct ip_options *sopt,
+ __be32 daddr, __be32 saddr,
+ const struct ip_reply_arg *arg,
unsigned int len);
#define IP_INC_STATS(net, field) SNMP_INC_STATS64((net)->mib.ip_statistics, field)
@@ -229,8 +231,6 @@ static inline int inet_is_local_reserved_port(struct net *net, int port)
}
#endif
-extern int sysctl_ip_nonlocal_bind;
-
/* From inetpeer.c */
extern int inet_peer_threshold;
extern int inet_peer_minttl;
@@ -364,6 +364,14 @@ static inline void inet_set_txhash(struct sock *sk)
sk->sk_txhash = flow_hash_from_keys(&keys);
}
+static inline __wsum inet_gro_compute_pseudo(struct sk_buff *skb, int proto)
+{
+ const struct iphdr *iph = skb_gro_network_header(skb);
+
+ return csum_tcpudp_nofold(iph->saddr, iph->daddr,
+ skb_gro_len(skb), proto, 0);
+}
+
/*
* Map a multicast IP onto multicast MAC for type ethernet.
*/
@@ -505,7 +513,14 @@ int ip_forward(struct sk_buff *skb);
void ip_options_build(struct sk_buff *skb, struct ip_options *opt,
__be32 daddr, struct rtable *rt, int is_frag);
-int ip_options_echo(struct ip_options *dopt, struct sk_buff *skb);
+
+int __ip_options_echo(struct ip_options *dopt, struct sk_buff *skb,
+ const struct ip_options *sopt);
+static inline int ip_options_echo(struct ip_options *dopt, struct sk_buff *skb)
+{
+ return __ip_options_echo(dopt, skb, &IPCB(skb)->opt);
+}
+
void ip_options_fragment(struct sk_buff *skb);
int ip_options_compile(struct net *net, struct ip_options *opt,
struct sk_buff *skb);
@@ -542,6 +557,10 @@ void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port,
void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 dport,
u32 info);
+bool icmp_global_allow(void);
+extern int sysctl_icmp_msgs_per_sec;
+extern int sysctl_icmp_msgs_burst;
+
#ifdef CONFIG_PROC_FS
int ip_misc_proc_init(void);
#endif
diff --git a/include/net/ip6_checksum.h b/include/net/ip6_checksum.h
index 55236cb71174..1a49b73f7f6e 100644
--- a/include/net/ip6_checksum.h
+++ b/include/net/ip6_checksum.h
@@ -48,6 +48,14 @@ static inline __wsum ip6_compute_pseudo(struct sk_buff *skb, int proto)
skb->len, proto, 0));
}
+static inline __wsum ip6_gro_compute_pseudo(struct sk_buff *skb, int proto)
+{
+ const struct ipv6hdr *iph = skb_gro_network_header(skb);
+
+ return ~csum_unfold(csum_ipv6_magic(&iph->saddr, &iph->daddr,
+ skb_gro_len(skb), proto, 0));
+}
+
static __inline__ __sum16 tcp_v6_check(int len,
const struct in6_addr *saddr,
const struct in6_addr *daddr,
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index cf485f9aa563..8eea35d32a75 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -64,7 +64,7 @@ struct fib6_node {
__u16 fn_bit; /* bit key */
__u16 fn_flags;
- __u32 fn_sernum;
+ int fn_sernum;
struct rt6_info *rr_ptr;
};
@@ -202,15 +202,25 @@ static inline void ip6_rt_put(struct rt6_info *rt)
dst_release(&rt->dst);
}
-struct fib6_walker_t {
+enum fib6_walk_state {
+#ifdef CONFIG_IPV6_SUBTREES
+ FWS_S,
+#endif
+ FWS_L,
+ FWS_R,
+ FWS_C,
+ FWS_U
+};
+
+struct fib6_walker {
struct list_head lh;
struct fib6_node *root, *node;
struct rt6_info *leaf;
- unsigned char state;
- unsigned char prune;
+ enum fib6_walk_state state;
+ bool prune;
unsigned int skip;
unsigned int count;
- int (*func)(struct fib6_walker_t *);
+ int (*func)(struct fib6_walker *);
void *args;
};
diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h
index a5593dab6af7..9326c41c2d7f 100644
--- a/include/net/ip6_tunnel.h
+++ b/include/net/ip6_tunnel.h
@@ -65,7 +65,8 @@ void ip6_tnl_dst_reset(struct ip6_tnl *t);
void ip6_tnl_dst_store(struct ip6_tnl *t, struct dst_entry *dst);
int ip6_tnl_rcv_ctl(struct ip6_tnl *t, const struct in6_addr *laddr,
const struct in6_addr *raddr);
-int ip6_tnl_xmit_ctl(struct ip6_tnl *t);
+int ip6_tnl_xmit_ctl(struct ip6_tnl *t, const struct in6_addr *laddr,
+ const struct in6_addr *raddr);
__u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw);
__u32 ip6_tnl_get_cap(struct ip6_tnl *t, const struct in6_addr *laddr,
const struct in6_addr *raddr);
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index 9922093f575e..09a819ee2151 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -65,7 +65,8 @@ struct fnhe_hash_bucket {
struct fib_nh_exception __rcu *chain;
};
-#define FNHE_HASH_SIZE 2048
+#define FNHE_HASH_SHIFT 11
+#define FNHE_HASH_SIZE (1 << FNHE_HASH_SHIFT)
#define FNHE_RECLAIM_DEPTH 5
struct fib_nh {
@@ -87,7 +88,7 @@ struct fib_nh {
int nh_saddr_genid;
struct rtable __rcu * __percpu *nh_pcpu_rth_output;
struct rtable __rcu *nh_rth_input;
- struct fnhe_hash_bucket *nh_exceptions;
+ struct fnhe_hash_bucket __rcu *nh_exceptions;
};
/*
@@ -200,8 +201,8 @@ void fib_free_table(struct fib_table *tb);
#ifndef CONFIG_IP_MULTIPLE_TABLES
-#define TABLE_LOCAL_INDEX 0
-#define TABLE_MAIN_INDEX 1
+#define TABLE_LOCAL_INDEX (RT_TABLE_LOCAL & (FIB_TABLE_HASHSZ - 1))
+#define TABLE_MAIN_INDEX (RT_TABLE_MAIN & (FIB_TABLE_HASHSZ - 1))
static inline struct fib_table *fib_get_table(struct net *net, u32 id)
{
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index 8dd8cab88b87..25a59eb388a6 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -10,6 +10,7 @@
#include <net/gro_cells.h>
#include <net/inet_ecn.h>
#include <net/ip.h>
+#include <net/netns/generic.h>
#include <net/rtnetlink.h>
#if IS_ENABLED(CONFIG_IPV6)
@@ -31,6 +32,13 @@ struct ip_tunnel_6rd_parm {
};
#endif
+struct ip_tunnel_encap {
+ __u16 type;
+ __u16 flags;
+ __be16 sport;
+ __be16 dport;
+};
+
struct ip_tunnel_prl_entry {
struct ip_tunnel_prl_entry __rcu *next;
__be32 addr;
@@ -56,13 +64,18 @@ struct ip_tunnel {
/* These four fields used only by GRE */
__u32 i_seqno; /* The last seen seqno */
__u32 o_seqno; /* The last output seqno */
- int hlen; /* Precalculated header length */
+ int tun_hlen; /* Precalculated header length */
int mlink;
struct ip_tunnel_dst __percpu *dst_cache;
struct ip_tunnel_parm parms;
+ int encap_hlen; /* Encap header length (FOU,GUE) */
+ struct ip_tunnel_encap encap;
+
+ int hlen; /* tun_hlen + encap_hlen */
+
/* for SIT */
#ifdef CONFIG_IPV6_SIT_6RD
struct ip_tunnel_6rd_parm ip6rd;
@@ -73,15 +86,18 @@ struct ip_tunnel {
struct gro_cells gro_cells;
};
-#define TUNNEL_CSUM __cpu_to_be16(0x01)
-#define TUNNEL_ROUTING __cpu_to_be16(0x02)
-#define TUNNEL_KEY __cpu_to_be16(0x04)
-#define TUNNEL_SEQ __cpu_to_be16(0x08)
-#define TUNNEL_STRICT __cpu_to_be16(0x10)
-#define TUNNEL_REC __cpu_to_be16(0x20)
-#define TUNNEL_VERSION __cpu_to_be16(0x40)
-#define TUNNEL_NO_KEY __cpu_to_be16(0x80)
+#define TUNNEL_CSUM __cpu_to_be16(0x01)
+#define TUNNEL_ROUTING __cpu_to_be16(0x02)
+#define TUNNEL_KEY __cpu_to_be16(0x04)
+#define TUNNEL_SEQ __cpu_to_be16(0x08)
+#define TUNNEL_STRICT __cpu_to_be16(0x10)
+#define TUNNEL_REC __cpu_to_be16(0x20)
+#define TUNNEL_VERSION __cpu_to_be16(0x40)
+#define TUNNEL_NO_KEY __cpu_to_be16(0x80)
#define TUNNEL_DONT_FRAGMENT __cpu_to_be16(0x0100)
+#define TUNNEL_OAM __cpu_to_be16(0x0200)
+#define TUNNEL_CRIT_OPT __cpu_to_be16(0x0400)
+#define TUNNEL_OPTIONS_PRESENT __cpu_to_be16(0x0800)
struct tnl_ptk_info {
__be16 flags;
@@ -101,6 +117,22 @@ struct ip_tunnel_net {
struct hlist_head tunnels[IP_TNL_HASH_SIZE];
};
+struct ip_tunnel_encap_ops {
+ size_t (*encap_hlen)(struct ip_tunnel_encap *e);
+ int (*build_header)(struct sk_buff *skb, struct ip_tunnel_encap *e,
+ u8 *protocol, struct flowi4 *fl4);
+};
+
+#define MAX_IPTUN_ENCAP_OPS 8
+
+extern const struct ip_tunnel_encap_ops __rcu *
+ iptun_encaps[MAX_IPTUN_ENCAP_OPS];
+
+int ip_tunnel_encap_add_ops(const struct ip_tunnel_encap_ops *op,
+ unsigned int num);
+int ip_tunnel_encap_del_ops(const struct ip_tunnel_encap_ops *op,
+ unsigned int num);
+
#ifdef CONFIG_INET
int ip_tunnel_init(struct net_device *dev);
@@ -114,6 +146,8 @@ void ip_tunnel_delete_net(struct ip_tunnel_net *itn, struct rtnl_link_ops *ops);
void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
const struct iphdr *tnl_params, const u8 protocol);
int ip_tunnel_ioctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd);
+int ip_tunnel_encap(struct sk_buff *skb, struct ip_tunnel *t,
+ u8 *protocol, struct flowi4 *fl4);
int ip_tunnel_change_mtu(struct net_device *dev, int new_mtu);
struct rtnl_link_stats64 *ip_tunnel_get_stats64(struct net_device *dev,
@@ -131,6 +165,8 @@ int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[],
struct ip_tunnel_parm *p);
void ip_tunnel_setup(struct net_device *dev, int net_id);
void ip_tunnel_dst_reset_all(struct ip_tunnel *t);
+int ip_tunnel_encap_setup(struct ip_tunnel *t,
+ struct ip_tunnel_encap *ipencap);
/* Extract dsfield from inner protocol */
static inline u8 ip_tunnel_get_dsfield(const struct iphdr *iph,
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index 624a8a54806d..615b20b58545 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -1,6 +1,5 @@
-/*
- * IP Virtual Server
- * data structure and functionality definitions
+/* IP Virtual Server
+ * data structure and functionality definitions
*/
#ifndef _NET_IP_VS_H
@@ -12,7 +11,7 @@
#include <linux/list.h> /* for struct list_head */
#include <linux/spinlock.h> /* for struct rwlock_t */
-#include <linux/atomic.h> /* for struct atomic_t */
+#include <linux/atomic.h> /* for struct atomic_t */
#include <linux/compiler.h>
#include <linux/timer.h>
#include <linux/bug.h>
@@ -30,15 +29,13 @@
#endif
#include <net/net_namespace.h> /* Netw namespace */
-/*
- * Generic access of ipvs struct
- */
+/* Generic access of ipvs struct */
static inline struct netns_ipvs *net_ipvs(struct net* net)
{
return net->ipvs;
}
-/*
- * Get net ptr from skb in traffic cases
+
+/* Get net ptr from skb in traffic cases
* use skb_sknet when call is from userland (ioctl or netlink)
*/
static inline struct net *skb_net(const struct sk_buff *skb)
@@ -90,8 +87,8 @@ static inline struct net *skb_sknet(const struct sk_buff *skb)
return &init_net;
#endif
}
-/*
- * This one needed for single_open_net since net is stored directly in
+
+/* This one needed for single_open_net since net is stored directly in
* private not as a struct i.e. seq_file_net can't be used.
*/
static inline struct net *seq_file_single_net(struct seq_file *seq)
@@ -108,7 +105,7 @@ extern int ip_vs_conn_tab_size;
struct ip_vs_iphdr {
__u32 len; /* IPv4 simply where L4 starts
- IPv6 where L4 Transport Header starts */
+ * IPv6 where L4 Transport Header starts */
__u16 fragoffs; /* IPv6 fragment offset, 0 if first frag (or not frag)*/
__s16 protocol;
__s32 flags;
@@ -304,16 +301,11 @@ static inline const char *ip_vs_dbg_addr(int af, char *buf, size_t buf_len,
#define LeaveFunction(level) do {} while (0)
#endif
-
-/*
- * The port number of FTP service (in network order).
- */
+/* The port number of FTP service (in network order). */
#define FTPPORT cpu_to_be16(21)
#define FTPDATA cpu_to_be16(20)
-/*
- * TCP State Values
- */
+/* TCP State Values */
enum {
IP_VS_TCP_S_NONE = 0,
IP_VS_TCP_S_ESTABLISHED,
@@ -329,25 +321,19 @@ enum {
IP_VS_TCP_S_LAST
};
-/*
- * UDP State Values
- */
+/* UDP State Values */
enum {
IP_VS_UDP_S_NORMAL,
IP_VS_UDP_S_LAST,
};
-/*
- * ICMP State Values
- */
+/* ICMP State Values */
enum {
IP_VS_ICMP_S_NORMAL,
IP_VS_ICMP_S_LAST,
};
-/*
- * SCTP State Values
- */
+/* SCTP State Values */
enum ip_vs_sctp_states {
IP_VS_SCTP_S_NONE,
IP_VS_SCTP_S_INIT1,
@@ -366,21 +352,18 @@ enum ip_vs_sctp_states {
IP_VS_SCTP_S_LAST
};
-/*
- * Delta sequence info structure
- * Each ip_vs_conn has 2 (output AND input seq. changes).
- * Only used in the VS/NAT.
+/* Delta sequence info structure
+ * Each ip_vs_conn has 2 (output AND input seq. changes).
+ * Only used in the VS/NAT.
*/
struct ip_vs_seq {
__u32 init_seq; /* Add delta from this seq */
__u32 delta; /* Delta in sequence numbers */
__u32 previous_delta; /* Delta in sequence numbers
- before last resized pkt */
+ * before last resized pkt */
};
-/*
- * counters per cpu
- */
+/* counters per cpu */
struct ip_vs_counters {
__u32 conns; /* connections scheduled */
__u32 inpkts; /* incoming packets */
@@ -388,17 +371,13 @@ struct ip_vs_counters {
__u64 inbytes; /* incoming bytes */
__u64 outbytes; /* outgoing bytes */
};
-/*
- * Stats per cpu
- */
+/* Stats per cpu */
struct ip_vs_cpu_stats {
struct ip_vs_counters ustats;
struct u64_stats_sync syncp;
};
-/*
- * IPVS statistics objects
- */
+/* IPVS statistics objects */
struct ip_vs_estimator {
struct list_head list;
@@ -491,9 +470,7 @@ struct ip_vs_protocol {
void (*timeout_change)(struct ip_vs_proto_data *pd, int flags);
};
-/*
- * protocol data per netns
- */
+/* protocol data per netns */
struct ip_vs_proto_data {
struct ip_vs_proto_data *next;
struct ip_vs_protocol *pp;
@@ -520,9 +497,7 @@ struct ip_vs_conn_param {
__u8 pe_data_len;
};
-/*
- * IP_VS structure allocated for each dynamically scheduled connection
- */
+/* IP_VS structure allocated for each dynamically scheduled connection */
struct ip_vs_conn {
struct hlist_node c_list; /* hashed list heads */
/* Protocol, addresses and port numbers */
@@ -535,6 +510,7 @@ struct ip_vs_conn {
union nf_inet_addr daddr; /* destination address */
volatile __u32 flags; /* status flags */
__u16 protocol; /* Which protocol (TCP/UDP) */
+ __u16 daf; /* Address family of the dest */
#ifdef CONFIG_NET_NS
struct net *net; /* Name space */
#endif
@@ -560,17 +536,18 @@ struct ip_vs_conn {
struct ip_vs_dest *dest; /* real server */
atomic_t in_pkts; /* incoming packet counter */
- /* packet transmitter for different forwarding methods. If it
- mangles the packet, it must return NF_DROP or better NF_STOLEN,
- otherwise this must be changed to a sk_buff **.
- NF_ACCEPT can be returned when destination is local.
+ /* Packet transmitter for different forwarding methods. If it
+ * mangles the packet, it must return NF_DROP or better NF_STOLEN,
+ * otherwise this must be changed to a sk_buff **.
+ * NF_ACCEPT can be returned when destination is local.
*/
int (*packet_xmit)(struct sk_buff *skb, struct ip_vs_conn *cp,
struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph);
/* Note: we can group the following members into a structure,
- in order to save more space, and the following members are
- only used in VS/NAT anyway */
+ * in order to save more space, and the following members are
+ * only used in VS/NAT anyway
+ */
struct ip_vs_app *app; /* bound ip_vs_app object */
void *app_data; /* Application private data */
struct ip_vs_seq in_seq; /* incoming seq. struct */
@@ -583,9 +560,7 @@ struct ip_vs_conn {
struct rcu_head rcu_head;
};
-/*
- * To save some memory in conn table when name space is disabled.
- */
+/* To save some memory in conn table when name space is disabled. */
static inline struct net *ip_vs_conn_net(const struct ip_vs_conn *cp)
{
#ifdef CONFIG_NET_NS
@@ -594,6 +569,7 @@ static inline struct net *ip_vs_conn_net(const struct ip_vs_conn *cp)
return &init_net;
#endif
}
+
static inline void ip_vs_conn_net_set(struct ip_vs_conn *cp, struct net *net)
{
#ifdef CONFIG_NET_NS
@@ -611,13 +587,12 @@ static inline int ip_vs_conn_net_eq(const struct ip_vs_conn *cp,
#endif
}
-/*
- * Extended internal versions of struct ip_vs_service_user and
- * ip_vs_dest_user for IPv6 support.
+/* Extended internal versions of struct ip_vs_service_user and ip_vs_dest_user
+ * for IPv6 support.
*
- * We need these to conveniently pass around service and destination
- * options, but unfortunately, we also need to keep the old definitions to
- * maintain userspace backwards compatibility for the setsockopt interface.
+ * We need these to conveniently pass around service and destination
+ * options, but unfortunately, we also need to keep the old definitions to
+ * maintain userspace backwards compatibility for the setsockopt interface.
*/
struct ip_vs_service_user_kern {
/* virtual service addresses */
@@ -648,12 +623,15 @@ struct ip_vs_dest_user_kern {
/* thresholds for active connections */
u32 u_threshold; /* upper threshold */
u32 l_threshold; /* lower threshold */
+
+ /* Address family of addr */
+ u16 af;
};
/*
- * The information about the virtual service offered to the net
- * and the forwarding entries
+ * The information about the virtual service offered to the net and the
+ * forwarding entries.
*/
struct ip_vs_service {
struct hlist_node s_list; /* for normal service table */
@@ -693,9 +671,8 @@ struct ip_vs_dest_dst {
struct rcu_head rcu_head;
};
-/*
- * The real server destination forwarding entry
- * with ip address, port number, and so on.
+/* The real server destination forwarding entry with ip address, port number,
+ * and so on.
*/
struct ip_vs_dest {
struct list_head n_list; /* for the dests in the service */
@@ -734,10 +711,7 @@ struct ip_vs_dest {
unsigned int in_rs_table:1; /* we are in rs_table */
};
-
-/*
- * The scheduler object
- */
+/* The scheduler object */
struct ip_vs_scheduler {
struct list_head n_list; /* d-linked list head */
char *name; /* scheduler name */
@@ -777,9 +751,7 @@ struct ip_vs_pe {
int (*show_pe_data)(const struct ip_vs_conn *cp, char *buf);
};
-/*
- * The application module object (a.k.a. app incarnation)
- */
+/* The application module object (a.k.a. app incarnation) */
struct ip_vs_app {
struct list_head a_list; /* member in app list */
int type; /* IP_VS_APP_TYPE_xxx */
@@ -795,16 +767,14 @@ struct ip_vs_app {
atomic_t usecnt; /* usage counter */
struct rcu_head rcu_head;
- /*
- * output hook: Process packet in inout direction, diff set for TCP.
+ /* output hook: Process packet in inout direction, diff set for TCP.
* Return: 0=Error, 1=Payload Not Mangled/Mangled but checksum is ok,
* 2=Mangled but checksum was not updated
*/
int (*pkt_out)(struct ip_vs_app *, struct ip_vs_conn *,
struct sk_buff *, int *diff);
- /*
- * input hook: Process packet in outin direction, diff set for TCP.
+ /* input hook: Process packet in outin direction, diff set for TCP.
* Return: 0=Error, 1=Payload Not Mangled/Mangled but checksum is ok,
* 2=Mangled but checksum was not updated
*/
@@ -863,9 +833,7 @@ struct ipvs_master_sync_state {
struct netns_ipvs {
int gen; /* Generation */
int enable; /* enable like nf_hooks do */
- /*
- * Hash table: for real service lookups
- */
+ /* Hash table: for real service lookups */
#define IP_VS_RTAB_BITS 4
#define IP_VS_RTAB_SIZE (1 << IP_VS_RTAB_BITS)
#define IP_VS_RTAB_MASK (IP_VS_RTAB_SIZE - 1)
@@ -899,7 +867,7 @@ struct netns_ipvs {
struct list_head sctp_apps[SCTP_APP_TAB_SIZE];
#endif
/* ip_vs_conn */
- atomic_t conn_count; /* connection counter */
+ atomic_t conn_count; /* connection counter */
/* ip_vs_ctl */
struct ip_vs_stats tot_stats; /* Statistics & est. */
@@ -986,6 +954,10 @@ struct netns_ipvs {
char backup_mcast_ifn[IP_VS_IFNAME_MAXLEN];
/* net name space ptr */
struct net *net; /* Needed by timer routines */
+ /* Number of heterogeneous destinations, needed becaus heterogeneous
+ * are not supported when synchronization is enabled.
+ */
+ unsigned int mixed_address_family_dests;
};
#define DEFAULT_SYNC_THRESHOLD 3
@@ -1139,9 +1111,8 @@ static inline int sysctl_backup_only(struct netns_ipvs *ipvs)
#endif
-/*
- * IPVS core functions
- * (from ip_vs_core.c)
+/* IPVS core functions
+ * (from ip_vs_core.c)
*/
const char *ip_vs_proto_name(unsigned int proto);
void ip_vs_init_hash_table(struct list_head *table, int rows);
@@ -1149,11 +1120,9 @@ void ip_vs_init_hash_table(struct list_head *table, int rows);
#define IP_VS_APP_TYPE_FTP 1
-/*
- * ip_vs_conn handling functions
- * (from ip_vs_conn.c)
+/* ip_vs_conn handling functions
+ * (from ip_vs_conn.c)
*/
-
enum {
IP_VS_DIR_INPUT = 0,
IP_VS_DIR_OUTPUT,
@@ -1210,7 +1179,7 @@ static inline void __ip_vs_conn_put(struct ip_vs_conn *cp)
void ip_vs_conn_put(struct ip_vs_conn *cp);
void ip_vs_conn_fill_cport(struct ip_vs_conn *cp, __be16 cport);
-struct ip_vs_conn *ip_vs_conn_new(const struct ip_vs_conn_param *p,
+struct ip_vs_conn *ip_vs_conn_new(const struct ip_vs_conn_param *p, int dest_af,
const union nf_inet_addr *daddr,
__be16 dport, unsigned int flags,
struct ip_vs_dest *dest, __u32 fwmark);
@@ -1284,9 +1253,7 @@ ip_vs_control_add(struct ip_vs_conn *cp, struct ip_vs_conn *ctl_cp)
atomic_inc(&ctl_cp->n_control);
}
-/*
- * IPVS netns init & cleanup functions
- */
+/* IPVS netns init & cleanup functions */
int ip_vs_estimator_net_init(struct net *net);
int ip_vs_control_net_init(struct net *net);
int ip_vs_protocol_net_init(struct net *net);
@@ -1301,9 +1268,8 @@ void ip_vs_estimator_net_cleanup(struct net *net);
void ip_vs_sync_net_cleanup(struct net *net);
void ip_vs_service_net_cleanup(struct net *net);
-/*
- * IPVS application functions
- * (from ip_vs_app.c)
+/* IPVS application functions
+ * (from ip_vs_app.c)
*/
#define IP_VS_APP_MAX_PORTS 8
struct ip_vs_app *register_ip_vs_app(struct net *net, struct ip_vs_app *app);
@@ -1323,9 +1289,7 @@ int unregister_ip_vs_pe(struct ip_vs_pe *pe);
struct ip_vs_pe *ip_vs_pe_getbyname(const char *name);
struct ip_vs_pe *__ip_vs_pe_getbyname(const char *pe_name);
-/*
- * Use a #define to avoid all of module.h just for these trivial ops
- */
+/* Use a #define to avoid all of module.h just for these trivial ops */
#define ip_vs_pe_get(pe) \
if (pe && pe->module) \
__module_get(pe->module);
@@ -1334,9 +1298,7 @@ struct ip_vs_pe *__ip_vs_pe_getbyname(const char *pe_name);
if (pe && pe->module) \
module_put(pe->module);
-/*
- * IPVS protocol functions (from ip_vs_proto.c)
- */
+/* IPVS protocol functions (from ip_vs_proto.c) */
int ip_vs_protocol_init(void);
void ip_vs_protocol_cleanup(void);
void ip_vs_protocol_timeout_change(struct netns_ipvs *ipvs, int flags);
@@ -1354,9 +1316,8 @@ extern struct ip_vs_protocol ip_vs_protocol_esp;
extern struct ip_vs_protocol ip_vs_protocol_ah;
extern struct ip_vs_protocol ip_vs_protocol_sctp;
-/*
- * Registering/unregistering scheduler functions
- * (from ip_vs_sched.c)
+/* Registering/unregistering scheduler functions
+ * (from ip_vs_sched.c)
*/
int register_ip_vs_scheduler(struct ip_vs_scheduler *scheduler);
int unregister_ip_vs_scheduler(struct ip_vs_scheduler *scheduler);
@@ -1375,10 +1336,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
void ip_vs_scheduler_err(struct ip_vs_service *svc, const char *msg);
-
-/*
- * IPVS control data and functions (from ip_vs_ctl.c)
- */
+/* IPVS control data and functions (from ip_vs_ctl.c) */
extern struct ip_vs_stats ip_vs_stats;
extern int sysctl_ip_vs_sync_ver;
@@ -1396,8 +1354,9 @@ void ip_vs_unregister_nl_ioctl(void);
int ip_vs_control_init(void);
void ip_vs_control_cleanup(void);
struct ip_vs_dest *
-ip_vs_find_dest(struct net *net, int af, const union nf_inet_addr *daddr,
- __be16 dport, const union nf_inet_addr *vaddr, __be16 vport,
+ip_vs_find_dest(struct net *net, int svc_af, int dest_af,
+ const union nf_inet_addr *daddr, __be16 dport,
+ const union nf_inet_addr *vaddr, __be16 vport,
__u16 protocol, __u32 fwmark, __u32 flags);
void ip_vs_try_bind_dest(struct ip_vs_conn *cp);
@@ -1418,26 +1377,21 @@ static inline void ip_vs_dest_put_and_free(struct ip_vs_dest *dest)
kfree(dest);
}
-/*
- * IPVS sync daemon data and function prototypes
- * (from ip_vs_sync.c)
+/* IPVS sync daemon data and function prototypes
+ * (from ip_vs_sync.c)
*/
int start_sync_thread(struct net *net, int state, char *mcast_ifn, __u8 syncid);
int stop_sync_thread(struct net *net, int state);
void ip_vs_sync_conn(struct net *net, struct ip_vs_conn *cp, int pkts);
-/*
- * IPVS rate estimator prototypes (from ip_vs_est.c)
- */
+/* IPVS rate estimator prototypes (from ip_vs_est.c) */
void ip_vs_start_estimator(struct net *net, struct ip_vs_stats *stats);
void ip_vs_stop_estimator(struct net *net, struct ip_vs_stats *stats);
void ip_vs_zero_estimator(struct ip_vs_stats *stats);
void ip_vs_read_estimator(struct ip_vs_stats_user *dst,
struct ip_vs_stats *stats);
-/*
- * Various IPVS packet transmitters (from ip_vs_xmit.c)
- */
+/* Various IPVS packet transmitters (from ip_vs_xmit.c) */
int ip_vs_null_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph);
int ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
@@ -1468,12 +1422,10 @@ int ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
#endif
#ifdef CONFIG_SYSCTL
-/*
- * This is a simple mechanism to ignore packets when
- * we are loaded. Just set ip_vs_drop_rate to 'n' and
- * we start to drop 1/rate of the packets
+/* This is a simple mechanism to ignore packets when
+ * we are loaded. Just set ip_vs_drop_rate to 'n' and
+ * we start to drop 1/rate of the packets
*/
-
static inline int ip_vs_todrop(struct netns_ipvs *ipvs)
{
if (!ipvs->drop_rate)
@@ -1487,9 +1439,7 @@ static inline int ip_vs_todrop(struct netns_ipvs *ipvs)
static inline int ip_vs_todrop(struct netns_ipvs *ipvs) { return 0; }
#endif
-/*
- * ip_vs_fwd_tag returns the forwarding tag of the connection
- */
+/* ip_vs_fwd_tag returns the forwarding tag of the connection */
#define IP_VS_FWD_METHOD(cp) (cp->flags & IP_VS_CONN_F_FWD_MASK)
static inline char ip_vs_fwd_tag(struct ip_vs_conn *cp)
@@ -1548,9 +1498,7 @@ static inline __wsum ip_vs_check_diff2(__be16 old, __be16 new, __wsum oldsum)
return csum_partial(diff, sizeof(diff), oldsum);
}
-/*
- * Forget current conntrack (unconfirmed) and attach notrack entry
- */
+/* Forget current conntrack (unconfirmed) and attach notrack entry */
static inline void ip_vs_notrack(struct sk_buff *skb)
{
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
@@ -1567,9 +1515,8 @@ static inline void ip_vs_notrack(struct sk_buff *skb)
}
#ifdef CONFIG_IP_VS_NFCT
-/*
- * Netfilter connection tracking
- * (from ip_vs_nfct.c)
+/* Netfilter connection tracking
+ * (from ip_vs_nfct.c)
*/
static inline int ip_vs_conntrack_enabled(struct netns_ipvs *ipvs)
{
@@ -1608,14 +1555,12 @@ static inline int ip_vs_confirm_conntrack(struct sk_buff *skb)
static inline void ip_vs_conn_drop_conntrack(struct ip_vs_conn *cp)
{
}
-/* CONFIG_IP_VS_NFCT */
-#endif
+#endif /* CONFIG_IP_VS_NFCT */
static inline int
ip_vs_dest_conn_overhead(struct ip_vs_dest *dest)
{
- /*
- * We think the overhead of processing active connections is 256
+ /* We think the overhead of processing active connections is 256
* times higher than that of inactive connections in average. (This
* 256 times might not be accurate, we will change it later) We
* use the following formula to estimate the overhead now:
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index a2db816e8461..4292929392b0 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -121,6 +121,7 @@ struct frag_hdr {
/* sysctls */
extern int sysctl_mld_max_msf;
+extern int sysctl_mld_qrv;
#define _DEVINC(net, statname, modifier, idev, field) \
({ \
@@ -287,7 +288,8 @@ struct ipv6_txoptions *ipv6_renew_options(struct sock *sk,
struct ipv6_txoptions *ipv6_fixup_options(struct ipv6_txoptions *opt_space,
struct ipv6_txoptions *opt);
-bool ipv6_opt_accepted(const struct sock *sk, const struct sk_buff *skb);
+bool ipv6_opt_accepted(const struct sock *sk, const struct sk_buff *skb,
+ const struct inet6_skb_parm *opt);
static inline bool ipv6_accept_ra(struct inet6_dev *idev)
{
@@ -669,6 +671,8 @@ static inline int ipv6_addr_diff(const struct in6_addr *a1, const struct in6_add
return __ipv6_addr_diff(a1, a2, sizeof(struct in6_addr));
}
+void ipv6_proxy_select_ident(struct sk_buff *skb);
+
int ip6_dst_hoplimit(struct dst_entry *dst);
static inline int ip6_sk_dst_hoplimit(struct ipv6_pinfo *np, struct flowi6 *fl6,
diff --git a/include/net/ipx.h b/include/net/ipx.h
index 0143180fecc9..e5cff6811b30 100644
--- a/include/net/ipx.h
+++ b/include/net/ipx.h
@@ -42,6 +42,9 @@ struct ipxhdr {
struct ipx_address ipx_source __packed;
};
+/* From af_ipx.c */
+extern int sysctl_ipx_pprop_broadcasting;
+
static __inline__ struct ipxhdr *ipx_hdr(struct sk_buff *skb)
{
return (struct ipxhdr *)skb_transport_header(skb);
@@ -147,7 +150,7 @@ int ipxrtr_add_route(__be32 network, struct ipx_interface *intrfc,
unsigned char *node);
void ipxrtr_del_routes(struct ipx_interface *intrfc);
int ipxrtr_route_packet(struct sock *sk, struct sockaddr_ipx *usipx,
- struct iovec *iov, size_t len, int noblock);
+ struct msghdr *msg, size_t len, int noblock);
int ipxrtr_route_skb(struct sk_buff *skb);
struct ipx_route *ipxrtr_lookup(__be32 net);
int ipxrtr_ioctl(unsigned int cmd, void __user *arg);
diff --git a/include/net/irda/irda.h b/include/net/irda/irda.h
index a059465101ff..92c8fb575213 100644
--- a/include/net/irda/irda.h
+++ b/include/net/irda/irda.h
@@ -55,16 +55,6 @@ typedef __u32 magic_t;
#endif
#ifdef CONFIG_IRDA_DEBUG
-
-extern unsigned int irda_debug;
-
-/* use 0 for production, 1 for verification, >2 for debug */
-#define IRDA_DEBUG_LEVEL 0
-
-#define IRDA_DEBUG(n, args...) \
-do { if (irda_debug >= (n)) \
- printk(KERN_DEBUG args); \
-} while (0)
#define IRDA_ASSERT(expr, func) \
do { if(!(expr)) { \
printk( "Assertion failed! %s:%s:%d %s\n", \
@@ -72,15 +62,10 @@ do { if(!(expr)) { \
func } } while (0)
#define IRDA_ASSERT_LABEL(label) label
#else
-#define IRDA_DEBUG(n, args...) do { } while (0)
#define IRDA_ASSERT(expr, func) do { (void)(expr); } while (0)
#define IRDA_ASSERT_LABEL(label)
#endif /* CONFIG_IRDA_DEBUG */
-#define IRDA_WARNING(args...) do { if (net_ratelimit()) printk(KERN_WARNING args); } while (0)
-#define IRDA_MESSAGE(args...) do { if (net_ratelimit()) printk(KERN_INFO args); } while (0)
-#define IRDA_ERROR(args...) do { if (net_ratelimit()) printk(KERN_ERR args); } while (0)
-
/*
* Magic numbers used by Linux-IrDA. Random numbers which must be unique to
* give the best protection
diff --git a/include/net/irda/irlap.h b/include/net/irda/irlap.h
index fb4b76d5d7f1..6f23e820618c 100644
--- a/include/net/irda/irlap.h
+++ b/include/net/irda/irlap.h
@@ -303,7 +303,7 @@ static inline void irlap_next_state(struct irlap_cb *self, IRLAP_STATE state)
if (!self || self->magic != LAP_MAGIC)
return;
- IRDA_DEBUG(4, "next LAP state = %s\n", irlap_state[state]);
+ pr_debug("next LAP state = %s\n", irlap_state[state]);
*/
self->state = state;
}
diff --git a/include/net/irda/parameters.h b/include/net/irda/parameters.h
index 42713c931d1f..2d9cd0007cba 100644
--- a/include/net/irda/parameters.h
+++ b/include/net/irda/parameters.h
@@ -71,17 +71,17 @@ typedef int (*PV_HANDLER)(void *self, __u8 *buf, int len, __u8 pi,
PV_TYPE type, PI_HANDLER func);
typedef struct {
- PI_HANDLER func; /* Handler for this parameter identifier */
+ const PI_HANDLER func; /* Handler for this parameter identifier */
PV_TYPE type; /* Data type for this parameter */
} pi_minor_info_t;
typedef struct {
- pi_minor_info_t *pi_minor_call_table;
+ const pi_minor_info_t *pi_minor_call_table;
int len;
} pi_major_info_t;
typedef struct {
- pi_major_info_t *tables;
+ const pi_major_info_t *tables;
int len;
__u8 pi_mask;
int pi_major_offset;
diff --git a/include/net/lib80211.h b/include/net/lib80211.h
index be95b9262801..aab0f427edb5 100644
--- a/include/net/lib80211.h
+++ b/include/net/lib80211.h
@@ -32,11 +32,6 @@
#include <linux/timer.h>
#include <linux/seq_file.h>
-/* print_ssid() is intended to be used in debug (and possibly error)
- * messages. It should never be used for passing ssid to user space. */
-const char *print_ssid(char *buf, const char *ssid, u8 ssid_len);
-#define DECLARE_SSID_BUF(var) char var[IEEE80211_MAX_SSID_LEN * 4 + 1] __maybe_unused
-
#define NUM_WEP_KEYS 4
enum {
diff --git a/include/net/llc_c_st.h b/include/net/llc_c_st.h
index 0e79cfba4b3b..48f3f891b2f9 100644
--- a/include/net/llc_c_st.h
+++ b/include/net/llc_c_st.h
@@ -35,8 +35,8 @@
struct llc_conn_state_trans {
llc_conn_ev_t ev;
u8 next_state;
- llc_conn_ev_qfyr_t *ev_qualifiers;
- llc_conn_action_t *ev_actions;
+ const llc_conn_ev_qfyr_t *ev_qualifiers;
+ const llc_conn_action_t *ev_actions;
};
struct llc_conn_state {
diff --git a/include/net/llc_s_st.h b/include/net/llc_s_st.h
index 567c681f1f3e..c4359e203013 100644
--- a/include/net/llc_s_st.h
+++ b/include/net/llc_s_st.h
@@ -19,7 +19,7 @@
struct llc_sap_state_trans {
llc_sap_ev_t ev;
u8 next_state;
- llc_sap_action_t *ev_actions;
+ const llc_sap_action_t *ev_actions;
};
struct llc_sap_state {
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index dae2e24616e1..58d719ddaa60 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -4,6 +4,7 @@
* Copyright 2002-2005, Devicescape Software, Inc.
* Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
* Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net>
+ * Copyright 2013-2014 Intel Mobile Communications GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -262,6 +263,7 @@ struct ieee80211_vif_chanctx_switch {
* @BSS_CHANGED_BANDWIDTH: The bandwidth used by this interface changed,
* note that this is only called when it changes after the channel
* context had been assigned.
+ * @BSS_CHANGED_OCB: OCB join status changed
*/
enum ieee80211_bss_change {
BSS_CHANGED_ASSOC = 1<<0,
@@ -286,6 +288,7 @@ enum ieee80211_bss_change {
BSS_CHANGED_P2P_PS = 1<<19,
BSS_CHANGED_BEACON_INFO = 1<<20,
BSS_CHANGED_BANDWIDTH = 1<<21,
+ BSS_CHANGED_OCB = 1<<22,
/* when adding here, make sure to change ieee80211_reconfig */
};
@@ -738,7 +741,8 @@ struct ieee80211_tx_info {
u8 ampdu_ack_len;
u8 ampdu_len;
u8 antenna;
- void *status_driver_data[21 / sizeof(void *)];
+ u16 tx_time;
+ void *status_driver_data[19 / sizeof(void *)];
} status;
struct {
struct ieee80211_tx_rate driver_rates[
@@ -878,6 +882,9 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info)
* subframes share the same sequence number. Reported subframes can be
* either regular MSDU or singly A-MSDUs. Subframes must not be
* interleaved with other frames.
+ * @RX_FLAG_RADIOTAP_VENDOR_DATA: This frame contains vendor-specific
+ * radiotap data in the skb->data (before the frame) as described by
+ * the &struct ieee80211_vendor_radiotap.
*/
enum mac80211_rx_flags {
RX_FLAG_MMIC_ERROR = BIT(0),
@@ -907,6 +914,7 @@ enum mac80211_rx_flags {
RX_FLAG_10MHZ = BIT(28),
RX_FLAG_5MHZ = BIT(29),
RX_FLAG_AMSDU_MORE = BIT(30),
+ RX_FLAG_RADIOTAP_VENDOR_DATA = BIT(31),
};
#define RX_FLAG_STBC_SHIFT 26
@@ -978,6 +986,39 @@ struct ieee80211_rx_status {
};
/**
+ * struct ieee80211_vendor_radiotap - vendor radiotap data information
+ * @present: presence bitmap for this vendor namespace
+ * (this could be extended in the future if any vendor needs more
+ * bits, the radiotap spec does allow for that)
+ * @align: radiotap vendor namespace alignment. This defines the needed
+ * alignment for the @data field below, not for the vendor namespace
+ * description itself (which has a fixed 2-byte alignment)
+ * Must be a power of two, and be set to at least 1!
+ * @oui: radiotap vendor namespace OUI
+ * @subns: radiotap vendor sub namespace
+ * @len: radiotap vendor sub namespace skip length, if alignment is done
+ * then that's added to this, i.e. this is only the length of the
+ * @data field.
+ * @pad: number of bytes of padding after the @data, this exists so that
+ * the skb data alignment can be preserved even if the data has odd
+ * length
+ * @data: the actual vendor namespace data
+ *
+ * This struct, including the vendor data, goes into the skb->data before
+ * the 802.11 header. It's split up in mac80211 using the align/oui/subns
+ * data.
+ */
+struct ieee80211_vendor_radiotap {
+ u32 present;
+ u8 align;
+ u8 oui[3];
+ u8 subns;
+ u8 pad;
+ u16 len;
+ u8 data[];
+} __packed;
+
+/**
* enum ieee80211_conf_flags - configuration flags
*
* Flags to define PHY configuration options
@@ -1116,6 +1157,8 @@ struct ieee80211_conf {
* Function (TSF) timer when the frame containing the channel switch
* announcement was received. This is simply the rx.mactime parameter
* the driver passed into mac80211.
+ * @device_timestamp: arbitrary timestamp for the device, this is the
+ * rx.device_timestamp parameter the driver passed to mac80211.
* @block_tx: Indicates whether transmission must be blocked before the
* scheduled channel switch, as indicated by the AP.
* @chandef: the new channel to switch to
@@ -1123,6 +1166,7 @@ struct ieee80211_conf {
*/
struct ieee80211_channel_switch {
u64 timestamp;
+ u32 device_timestamp;
bool block_tx;
struct cfg80211_chan_def chandef;
u8 count;
@@ -1226,7 +1270,8 @@ struct ieee80211_vif *wdev_to_ieee80211_vif(struct wireless_dev *wdev);
*
* @IEEE80211_KEY_FLAG_GENERATE_IV: This flag should be set by the
* driver to indicate that it requires IV generation for this
- * particular key.
+ * particular key. Setting this flag does not necessarily mean that SKBs
+ * will have sufficient tailroom for ICV or MIC.
* @IEEE80211_KEY_FLAG_GENERATE_MMIC: This flag should be set by
* the driver for a TKIP key if it requires Michael MIC
* generation in software.
@@ -1238,7 +1283,9 @@ struct ieee80211_vif *wdev_to_ieee80211_vif(struct wireless_dev *wdev);
* @IEEE80211_KEY_FLAG_PUT_IV_SPACE: This flag should be set by the driver
* if space should be prepared for the IV, but the IV
* itself should not be generated. Do not set together with
- * @IEEE80211_KEY_FLAG_GENERATE_IV on the same key.
+ * @IEEE80211_KEY_FLAG_GENERATE_IV on the same key. Setting this flag does
+ * not necessarily mean that SKBs will have sufficient tailroom for ICV or
+ * MIC.
* @IEEE80211_KEY_FLAG_RX_MGMT: This key will be used to decrypt received
* management frames. The flag can help drivers that have a hardware
* crypto implementation that doesn't deal with management frames
@@ -1405,7 +1452,7 @@ struct ieee80211_sta_rates {
* @supp_rates: Bitmap of supported rates (per band)
* @ht_cap: HT capabilities of this STA; restricted to our own capabilities
* @vht_cap: VHT capabilities of this STA; restricted to our own capabilities
- * @wme: indicates whether the STA supports WME. Only valid during AP-mode.
+ * @wme: indicates whether the STA supports QoS/WME.
* @drv_priv: data area for driver use, will always be aligned to
* sizeof(void *), size is determined in hw information.
* @uapsd_queues: bitmap of queues configured for uapsd. Only valid
@@ -1419,6 +1466,8 @@ struct ieee80211_sta_rates {
* @smps_mode: current SMPS mode (off, static or dynamic)
* @rates: rate control selection table
* @tdls: indicates whether the STA is a TDLS peer
+ * @tdls_initiator: indicates the STA is an initiator of the TDLS link. Only
+ * valid if the STA is a TDLS peer in the first place.
*/
struct ieee80211_sta {
u32 supp_rates[IEEE80211_NUM_BANDS];
@@ -1434,6 +1483,7 @@ struct ieee80211_sta {
enum ieee80211_smps_mode smps_mode;
struct ieee80211_sta_rates __rcu *rates;
bool tdls;
+ bool tdls_initiator;
/* must be last */
u8 drv_priv[0] __aligned(sizeof(void *));
@@ -1533,16 +1583,6 @@ struct ieee80211_tx_control {
* @IEEE80211_HW_MFP_CAPABLE:
* Hardware supports management frame protection (MFP, IEEE 802.11w).
*
- * @IEEE80211_HW_SUPPORTS_STATIC_SMPS:
- * Hardware supports static spatial multiplexing powersave,
- * ie. can turn off all but one chain even on HT connections
- * that should be using more chains.
- *
- * @IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS:
- * Hardware supports dynamic spatial multiplexing powersave,
- * ie. can turn off all but one chain and then wake the rest
- * up as required after, for example, rts/cts handshake.
- *
* @IEEE80211_HW_SUPPORTS_UAPSD:
* Hardware supports Unscheduled Automatic Power Save Delivery
* (U-APSD) in managed mode. The mode is configured with
@@ -1582,6 +1622,10 @@ struct ieee80211_tx_control {
* a virtual monitor interface when monitor interfaces are the only
* active interfaces.
*
+ * @IEEE80211_HW_NO_AUTO_VIF: The driver would like for no wlanX to
+ * be created. It is expected user-space will create vifs as
+ * desired (and thus have them named as desired).
+ *
* @IEEE80211_HW_QUEUE_CONTROL: The driver wants to control per-interface
* queue mapping in order to use different queues (not just one per AC)
* for different virtual interfaces. See the doc section on HW queue
@@ -1606,6 +1650,9 @@ struct ieee80211_tx_control {
* is not enabled the default action is to disconnect when getting the
* CSA frame.
*
+ * @IEEE80211_HW_SUPPORTS_CLONED_SKBS: The driver will never modify the payload
+ * or tailroom of TX skbs without copying them first.
+ *
* @IEEE80211_SINGLE_HW_SCAN_ON_ALL_BANDS: The HW supports scanning on all bands
* in one command, mac80211 doesn't have to run separate scans per band.
*/
@@ -1625,8 +1672,8 @@ enum ieee80211_hw_flags {
IEEE80211_HW_SUPPORTS_DYNAMIC_PS = 1<<12,
IEEE80211_HW_MFP_CAPABLE = 1<<13,
IEEE80211_HW_WANT_MONITOR_VIF = 1<<14,
- IEEE80211_HW_SUPPORTS_STATIC_SMPS = 1<<15,
- IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS = 1<<16,
+ IEEE80211_HW_NO_AUTO_VIF = 1<<15,
+ /* free slot */
IEEE80211_HW_SUPPORTS_UAPSD = 1<<17,
IEEE80211_HW_REPORTS_TX_ACK_STATUS = 1<<18,
IEEE80211_HW_CONNECTION_MONITOR = 1<<19,
@@ -1639,7 +1686,7 @@ enum ieee80211_hw_flags {
IEEE80211_HW_TIMING_BEACON_ONLY = 1<<26,
IEEE80211_HW_SUPPORTS_HT_CCK_RATES = 1<<27,
IEEE80211_HW_CHANCTX_STA_CSA = 1<<28,
- /* bit 29 unused */
+ IEEE80211_HW_SUPPORTS_CLONED_SKBS = 1<<29,
IEEE80211_SINGLE_HW_SCAN_ON_ALL_BANDS = 1<<30,
};
@@ -1780,6 +1827,31 @@ struct ieee80211_scan_request {
};
/**
+ * struct ieee80211_tdls_ch_sw_params - TDLS channel switch parameters
+ *
+ * @sta: peer this TDLS channel-switch request/response came from
+ * @chandef: channel referenced in a TDLS channel-switch request
+ * @action_code: see &enum ieee80211_tdls_actioncode
+ * @status: channel-switch response status
+ * @timestamp: time at which the frame was received
+ * @switch_time: switch-timing parameter received in the frame
+ * @switch_timeout: switch-timing parameter received in the frame
+ * @tmpl_skb: TDLS switch-channel response template
+ * @ch_sw_tm_ie: offset of the channel-switch timing IE inside @tmpl_skb
+ */
+struct ieee80211_tdls_ch_sw_params {
+ struct ieee80211_sta *sta;
+ struct cfg80211_chan_def *chandef;
+ u8 action_code;
+ u32 status;
+ u32 timestamp;
+ u16 switch_time;
+ u16 switch_timeout;
+ struct sk_buff *tmpl_skb;
+ u32 ch_sw_tm_ie;
+};
+
+/**
* wiphy_to_ieee80211_hw - return a mac80211 driver hw struct from a wiphy
*
* @wiphy: the &struct wiphy which we want to query
@@ -2379,6 +2451,22 @@ enum ieee80211_roc_type {
};
/**
+ * enum ieee80211_reconfig_complete_type - reconfig type
+ *
+ * This enum is used by the reconfig_complete() callback to indicate what
+ * reconfiguration type was completed.
+ *
+ * @IEEE80211_RECONFIG_TYPE_RESTART: hw restart type
+ * (also due to resume() callback returning 1)
+ * @IEEE80211_RECONFIG_TYPE_SUSPEND: suspend type (regardless
+ * of wowlan configuration)
+ */
+enum ieee80211_reconfig_type {
+ IEEE80211_RECONFIG_TYPE_RESTART,
+ IEEE80211_RECONFIG_TYPE_SUSPEND,
+};
+
+/**
* struct ieee80211_ops - callbacks from mac80211 to the driver
*
* This structure contains various callbacks that the driver may
@@ -2534,7 +2622,9 @@ enum ieee80211_roc_type {
*
* @sw_scan_start: Notifier function that is called just before a software scan
* is started. Can be NULL, if the driver doesn't need this notification.
- * The callback can sleep.
+ * The mac_addr parameter allows supporting NL80211_SCAN_FLAG_RANDOM_ADDR,
+ * the driver may set the NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR flag if it
+ * can use this parameter. The callback can sleep.
*
* @sw_scan_complete: Notifier function that is called just after a
* software scan finished. Can be NULL, if the driver doesn't need
@@ -2605,6 +2695,9 @@ enum ieee80211_roc_type {
* uses hardware rate control (%IEEE80211_HW_HAS_RATE_CONTROL) since
* otherwise the rate control algorithm is notified directly.
* Must be atomic.
+ * @sta_rate_tbl_update: Notifies the driver that the rate table changed. This
+ * is only used if the configured rate control algorithm actually uses
+ * the new rate table API, and is therefore optional. Must be atomic.
*
* @conf_tx: Configure TX queue parameters (EDCF (aifs, cw_min, cw_max),
* bursting) for a hardware TX queue.
@@ -2666,7 +2759,9 @@ enum ieee80211_roc_type {
*
* @set_coverage_class: Set slot time for given coverage class as specified
* in IEEE 802.11-2007 section 17.3.8.6 and modify ACK timeout
- * accordingly. This callback is not required and may sleep.
+ * accordingly; coverage class equals to -1 to enable ACK timeout
+ * estimation algorithm (dynack). To disable dynack set valid value for
+ * coverage class. This callback is not required and may sleep.
*
* @testmode_cmd: Implement a cfg80211 test mode command. The passed @vif may
* be %NULL. The callback can sleep.
@@ -2811,11 +2906,11 @@ enum ieee80211_roc_type {
* disabled/enabled via @bss_info_changed.
* @stop_ap: Stop operation on the AP interface.
*
- * @restart_complete: Called after a call to ieee80211_restart_hw(), when the
- * reconfiguration has completed. This can help the driver implement the
- * reconfiguration step. Also called when reconfiguring because the
- * driver's resume function returned 1, as this is just like an "inline"
- * hardware restart. This callback may sleep.
+ * @reconfig_complete: Called after a call to ieee80211_restart_hw() and
+ * during resume, when the reconfiguration has completed.
+ * This can help the driver implement the reconfiguration step (and
+ * indicate mac80211 is ready to receive frames).
+ * This callback may sleep.
*
* @ipv6_addr_change: IPv6 address assignment on the given interface changed.
* Currently, this is only called for managed or P2P client interfaces.
@@ -2831,6 +2926,13 @@ enum ieee80211_roc_type {
* transmitted and then call ieee80211_csa_finish().
* If the CSA count starts as zero or 1, this function will not be called,
* since there won't be any time to beacon before the switch anyway.
+ * @pre_channel_switch: This is an optional callback that is called
+ * before a channel switch procedure is started (ie. when a STA
+ * gets a CSA or an userspace initiated channel-switch), allowing
+ * the driver to prepare for the channel switch.
+ * @post_channel_switch: This is an optional callback that is called
+ * after a channel switch procedure is completed, allowing the
+ * driver to go back to a normal configuration.
*
* @join_ibss: Join an IBSS (on an IBSS interface); this is called after all
* information in bss_conf is set up and the beacon can be retrieved. A
@@ -2840,6 +2942,26 @@ enum ieee80211_roc_type {
* @get_expected_throughput: extract the expected throughput towards the
* specified station. The returned value is expressed in Kbps. It returns 0
* if the RC algorithm does not have proper data to provide.
+ *
+ * @get_txpower: get current maximum tx power (in dBm) based on configuration
+ * and hardware limits.
+ *
+ * @tdls_channel_switch: Start channel-switching with a TDLS peer. The driver
+ * is responsible for continually initiating channel-switching operations
+ * and returning to the base channel for communication with the AP. The
+ * driver receives a channel-switch request template and the location of
+ * the switch-timing IE within the template as part of the invocation.
+ * The template is valid only within the call, and the driver can
+ * optionally copy the skb for further re-use.
+ * @tdls_cancel_channel_switch: Stop channel-switching with a TDLS peer. Both
+ * peers must be on the base channel when the call completes.
+ * @tdls_recv_channel_switch: a TDLS channel-switch related frame (request or
+ * response) has been received from a remote peer. The driver gets
+ * parameters parsed from the incoming frame and may use them to continue
+ * an ongoing channel-switch operation. In addition, a channel-switch
+ * response template is provided, together with the location of the
+ * switch-timing IE within the template. The skb can only be used within
+ * the function call.
*/
struct ieee80211_ops {
void (*tx)(struct ieee80211_hw *hw,
@@ -2899,8 +3021,11 @@ struct ieee80211_ops {
struct ieee80211_scan_ies *ies);
int (*sched_scan_stop)(struct ieee80211_hw *hw,
struct ieee80211_vif *vif);
- void (*sw_scan_start)(struct ieee80211_hw *hw);
- void (*sw_scan_complete)(struct ieee80211_hw *hw);
+ void (*sw_scan_start)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ const u8 *mac_addr);
+ void (*sw_scan_complete)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif);
int (*get_stats)(struct ieee80211_hw *hw,
struct ieee80211_low_level_stats *stats);
void (*get_tkip_seq)(struct ieee80211_hw *hw, u8 hw_key_idx,
@@ -2934,6 +3059,9 @@ struct ieee80211_ops {
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
u32 changed);
+ void (*sta_rate_tbl_update)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
int (*conf_tx)(struct ieee80211_hw *hw,
struct ieee80211_vif *vif, u16 ac,
const struct ieee80211_tx_queue_params *params);
@@ -2950,7 +3078,7 @@ struct ieee80211_ops {
int (*get_survey)(struct ieee80211_hw *hw, int idx,
struct survey_info *survey);
void (*rfkill_poll)(struct ieee80211_hw *hw);
- void (*set_coverage_class)(struct ieee80211_hw *hw, u8 coverage_class);
+ void (*set_coverage_class)(struct ieee80211_hw *hw, s16 coverage_class);
#ifdef CONFIG_NL80211_TESTMODE
int (*testmode_cmd)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
void *data, int len);
@@ -2961,6 +3089,7 @@ struct ieee80211_ops {
void (*flush)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
u32 queues, bool drop);
void (*channel_switch)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
struct ieee80211_channel_switch *ch_switch);
int (*set_antenna)(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant);
int (*get_antenna)(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant);
@@ -3027,7 +3156,8 @@ struct ieee80211_ops {
int n_vifs,
enum ieee80211_chanctx_switch_mode mode);
- void (*restart_complete)(struct ieee80211_hw *hw);
+ void (*reconfig_complete)(struct ieee80211_hw *hw,
+ enum ieee80211_reconfig_type reconfig_type);
#if IS_ENABLED(CONFIG_IPV6)
void (*ipv6_addr_change)(struct ieee80211_hw *hw,
@@ -3037,14 +3167,54 @@ struct ieee80211_ops {
void (*channel_switch_beacon)(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct cfg80211_chan_def *chandef);
+ int (*pre_channel_switch)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_channel_switch *ch_switch);
+
+ int (*post_channel_switch)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif);
int (*join_ibss)(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
void (*leave_ibss)(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
u32 (*get_expected_throughput)(struct ieee80211_sta *sta);
+ int (*get_txpower)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ int *dbm);
+
+ int (*tdls_channel_switch)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, u8 oper_class,
+ struct cfg80211_chan_def *chandef,
+ struct sk_buff *tmpl_skb, u32 ch_sw_tm_ie);
+ void (*tdls_cancel_channel_switch)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+ void (*tdls_recv_channel_switch)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_tdls_ch_sw_params *params);
};
/**
- * ieee80211_alloc_hw - Allocate a new hardware device
+ * ieee80211_alloc_hw_nm - Allocate a new hardware device
+ *
+ * This must be called once for each hardware device. The returned pointer
+ * must be used to refer to this device when calling other functions.
+ * mac80211 allocates a private data area for the driver pointed to by
+ * @priv in &struct ieee80211_hw, the size of this area is given as
+ * @priv_data_len.
+ *
+ * @priv_data_len: length of private data
+ * @ops: callbacks for this device
+ * @requested_name: Requested name for this device.
+ * NULL is valid value, and means use the default naming (phy%d)
+ *
+ * Return: A pointer to the new hardware device, or %NULL on error.
+ */
+struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len,
+ const struct ieee80211_ops *ops,
+ const char *requested_name);
+
+/**
+ * ieee80211_alloc_hw - Allocate a new hardware device
*
* This must be called once for each hardware device. The returned pointer
* must be used to refer to this device when calling other functions.
@@ -3057,8 +3227,12 @@ struct ieee80211_ops {
*
* Return: A pointer to the new hardware device, or %NULL on error.
*/
+static inline
struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
- const struct ieee80211_ops *ops);
+ const struct ieee80211_ops *ops)
+{
+ return ieee80211_alloc_hw_nm(priv_data_len, ops, NULL);
+}
/**
* ieee80211_register_hw - Register hardware device
@@ -3445,6 +3619,26 @@ void ieee80211_tx_status(struct ieee80211_hw *hw,
struct sk_buff *skb);
/**
+ * ieee80211_tx_status_noskb - transmit status callback without skb
+ *
+ * This function can be used as a replacement for ieee80211_tx_status
+ * in drivers that cannot reliably map tx status information back to
+ * specific skbs.
+ *
+ * Calls to this function for a single hardware must be synchronized
+ * against each other. Calls to this function, ieee80211_tx_status_ni()
+ * and ieee80211_tx_status_irqsafe() may not be mixed for a single hardware.
+ *
+ * @hw: the hardware the frame was transmitted by
+ * @sta: the receiver station to which this packet is sent
+ * (NULL for multicast packets)
+ * @info: tx status information
+ */
+void ieee80211_tx_status_noskb(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta,
+ struct ieee80211_tx_info *info);
+
+/**
* ieee80211_tx_status_ni - transmit status callback (in process context)
*
* Like ieee80211_tx_status() but can be called in process context.
@@ -3657,7 +3851,7 @@ struct sk_buff *ieee80211_nullfunc_get(struct ieee80211_hw *hw,
/**
* ieee80211_probereq_get - retrieve a Probe Request template
* @hw: pointer obtained from ieee80211_alloc_hw().
- * @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ * @src_addr: source MAC address
* @ssid: SSID buffer
* @ssid_len: length of SSID
* @tailroom: tailroom to reserve at end of SKB for IEs
@@ -3668,7 +3862,7 @@ struct sk_buff *ieee80211_nullfunc_get(struct ieee80211_hw *hw,
* Return: The Probe Request template. %NULL on error.
*/
struct sk_buff *ieee80211_probereq_get(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
+ const u8 *src_addr,
const u8 *ssid, size_t ssid_len,
size_t tailroom);
@@ -4174,6 +4368,22 @@ void ieee80211_iterate_active_interfaces_rtnl(struct ieee80211_hw *hw,
void *data);
/**
+ * ieee80211_iterate_stations_atomic - iterate stations
+ *
+ * This function iterates over all stations associated with a given
+ * hardware that are currently uploaded to the driver and calls the callback
+ * function for them.
+ * This function requires the iterator callback function to be atomic,
+ *
+ * @hw: the hardware struct of which the interfaces should be iterated over
+ * @iterator: the iterator function to call, cannot sleep
+ * @data: first argument of the iterator function
+ */
+void ieee80211_iterate_stations_atomic(struct ieee80211_hw *hw,
+ void (*iterator)(void *data,
+ struct ieee80211_sta *sta),
+ void *data);
+/**
* ieee80211_queue_work - add work onto the mac80211 workqueue
*
* Drivers and mac80211 use this to add work onto the mac80211 workqueue.
@@ -4482,6 +4692,14 @@ void ieee80211_cqm_rssi_notify(struct ieee80211_vif *vif,
gfp_t gfp);
/**
+ * ieee80211_cqm_beacon_loss_notify - inform CQM of beacon loss
+ *
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ * @gfp: context flags
+ */
+void ieee80211_cqm_beacon_loss_notify(struct ieee80211_vif *vif, gfp_t gfp);
+
+/**
* ieee80211_radar_detected - inform that a radar was detected
*
* @hw: pointer as obtained from ieee80211_alloc_hw()
@@ -4639,6 +4857,10 @@ struct rate_control_ops {
void (*free_sta)(void *priv, struct ieee80211_sta *sta,
void *priv_sta);
+ void (*tx_status_noskb)(void *priv,
+ struct ieee80211_supported_band *sband,
+ struct ieee80211_sta *sta, void *priv_sta,
+ struct ieee80211_tx_info *info);
void (*tx_status)(void *priv, struct ieee80211_supported_band *sband,
struct ieee80211_sta *sta, void *priv_sta,
struct sk_buff *skb);
@@ -4890,4 +5112,69 @@ void ieee80211_update_p2p_noa(struct ieee80211_noa_data *data, u32 tsf);
void ieee80211_tdls_oper_request(struct ieee80211_vif *vif, const u8 *peer,
enum nl80211_tdls_operation oper,
u16 reason_code, gfp_t gfp);
+
+/**
+ * ieee80211_reserve_tid - request to reserve a specific TID
+ *
+ * There is sometimes a need (such as in TDLS) for blocking the driver from
+ * using a specific TID so that the FW can use it for certain operations such
+ * as sending PTI requests. To make sure that the driver doesn't use that TID,
+ * this function must be called as it flushes out packets on this TID and marks
+ * it as blocked, so that any transmit for the station on this TID will be
+ * redirected to the alternative TID in the same AC.
+ *
+ * Note that this function blocks and may call back into the driver, so it
+ * should be called without driver locks held. Also note this function should
+ * only be called from the driver's @sta_state callback.
+ *
+ * @sta: the station to reserve the TID for
+ * @tid: the TID to reserve
+ *
+ * Returns: 0 on success, else on failure
+ */
+int ieee80211_reserve_tid(struct ieee80211_sta *sta, u8 tid);
+
+/**
+ * ieee80211_unreserve_tid - request to unreserve a specific TID
+ *
+ * Once there is no longer any need for reserving a certain TID, this function
+ * should be called, and no longer will packets have their TID modified for
+ * preventing use of this TID in the driver.
+ *
+ * Note that this function blocks and acquires a lock, so it should be called
+ * without driver locks held. Also note this function should only be called
+ * from the driver's @sta_state callback.
+ *
+ * @sta: the station
+ * @tid: the TID to unreserve
+ */
+void ieee80211_unreserve_tid(struct ieee80211_sta *sta, u8 tid);
+
+/**
+ * ieee80211_ie_split - split an IE buffer according to ordering
+ *
+ * @ies: the IE buffer
+ * @ielen: the length of the IE buffer
+ * @ids: an array with element IDs that are allowed before
+ * the split
+ * @n_ids: the size of the element ID array
+ * @offset: offset where to start splitting in the buffer
+ *
+ * This function splits an IE buffer by updating the @offset
+ * variable to point to the location where the buffer should be
+ * split.
+ *
+ * It assumes that the given IE buffer is well-formed, this
+ * has to be guaranteed by the caller!
+ *
+ * It also assumes that the IEs in the buffer are ordered
+ * correctly, if not the result of using this function will not
+ * be ordered correctly either, i.e. it does no reordering.
+ *
+ * The function returns the offset where the next part of the
+ * buffer starts, which may be @ielen if the entire (remainder)
+ * of the buffer should be used.
+ */
+size_t ieee80211_ie_split(const u8 *ies, size_t ielen,
+ const u8 *ids, int n_ids, size_t offset);
#endif /* MAC80211_H */
diff --git a/include/net/mac802154.h b/include/net/mac802154.h
index 2e67cdd19cdc..c823d910b46c 100644
--- a/include/net/mac802154.h
+++ b/include/net/mac802154.h
@@ -12,14 +12,12 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#ifndef NET_MAC802154_H
#define NET_MAC802154_H
#include <net/af_ieee802154.h>
+#include <linux/ieee802154.h>
#include <linux/skbuff.h>
/* General MAC frame format:
@@ -35,13 +33,13 @@
*/
/* indicates that the Short Address changed */
-#define IEEE802515_AFILT_SADDR_CHANGED 0x00000001
+#define IEEE802154_AFILT_SADDR_CHANGED 0x00000001
/* indicates that the IEEE Address changed */
-#define IEEE802515_AFILT_IEEEADDR_CHANGED 0x00000002
+#define IEEE802154_AFILT_IEEEADDR_CHANGED 0x00000002
/* indicates that the PAN ID changed */
-#define IEEE802515_AFILT_PANID_CHANGED 0x00000004
+#define IEEE802154_AFILT_PANID_CHANGED 0x00000004
/* indicates that PAN Coordinator status changed */
-#define IEEE802515_AFILT_PANC_CHANGED 0x00000008
+#define IEEE802154_AFILT_PANC_CHANGED 0x00000008
struct ieee802154_hw_addr_filt {
__le16 pan_id; /* Each independent PAN selects a unique
@@ -55,7 +53,14 @@ struct ieee802154_hw_addr_filt {
u8 pan_coord;
};
-struct ieee802154_dev {
+struct ieee802154_vif {
+ int type;
+
+ /* must be last */
+ u8 drv_priv[0] __aligned(sizeof(void *));
+};
+
+struct ieee802154_hw {
/* filled by the driver */
int extra_tx_headroom;
u32 flags;
@@ -65,6 +70,7 @@ struct ieee802154_dev {
struct ieee802154_hw_addr_filt hw_filt;
void *priv;
struct wpan_phy *phy;
+ size_t vif_data_size;
};
/* Checksum is in hardware and is omitted from a packet
@@ -76,28 +82,43 @@ struct ieee802154_dev {
* however, so you are advised to review these flags carefully.
*/
-/* Indicates that receiver omits FCS and xmitter will add FCS on it's own. */
-#define IEEE802154_HW_OMIT_CKSUM 0x00000001
+/* Indicates that xmitter will add FCS on it's own. */
+#define IEEE802154_HW_TX_OMIT_CKSUM 0x00000001
/* Indicates that receiver will autorespond with ACK frames. */
-#define IEEE802154_HW_AACK 0x00000002
+#define IEEE802154_HW_AACK 0x00000002
/* Indicates that transceiver will support transmit power setting. */
-#define IEEE802154_HW_TXPOWER 0x00000004
+#define IEEE802154_HW_TXPOWER 0x00000004
/* Indicates that transceiver will support listen before transmit. */
-#define IEEE802154_HW_LBT 0x00000008
+#define IEEE802154_HW_LBT 0x00000008
/* Indicates that transceiver will support cca mode setting. */
-#define IEEE802154_HW_CCA_MODE 0x00000010
+#define IEEE802154_HW_CCA_MODE 0x00000010
/* Indicates that transceiver will support cca ed level setting. */
-#define IEEE802154_HW_CCA_ED_LEVEL 0x00000020
+#define IEEE802154_HW_CCA_ED_LEVEL 0x00000020
/* Indicates that transceiver will support csma (max_be, min_be, csma retries)
* settings. */
-#define IEEE802154_HW_CSMA_PARAMS 0x00000040
+#define IEEE802154_HW_CSMA_PARAMS 0x00000040
/* Indicates that transceiver will support ARET frame retries setting. */
-#define IEEE802154_HW_FRAME_RETRIES 0x00000080
+#define IEEE802154_HW_FRAME_RETRIES 0x00000080
+/* Indicates that transceiver will support hardware address filter setting. */
+#define IEEE802154_HW_AFILT 0x00000100
+/* Indicates that transceiver will support promiscuous mode setting. */
+#define IEEE802154_HW_PROMISCUOUS 0x00000200
+/* Indicates that receiver omits FCS. */
+#define IEEE802154_HW_RX_OMIT_CKSUM 0x00000400
+/* Indicates that receiver will not filter frames with bad checksum. */
+#define IEEE802154_HW_RX_DROP_BAD_CKSUM 0x00000800
+
+/* Indicates that receiver omits FCS and xmitter will add FCS on it's own. */
+#define IEEE802154_HW_OMIT_CKSUM (IEEE802154_HW_TX_OMIT_CKSUM | \
+ IEEE802154_HW_RX_OMIT_CKSUM)
/* This groups the most common CSMA support fields into one. */
#define IEEE802154_HW_CSMA (IEEE802154_HW_CCA_MODE | \
IEEE802154_HW_CCA_ED_LEVEL | \
- IEEE802154_HW_CSMA_PARAMS | \
+ IEEE802154_HW_CSMA_PARAMS)
+
+/* This groups the most common ARET support fields into one. */
+#define IEEE802154_HW_ARET (IEEE802154_HW_CSMA | \
IEEE802154_HW_FRAME_RETRIES)
/* struct ieee802154_ops - callbacks from mac802154 to the driver
@@ -112,12 +133,24 @@ struct ieee802154_dev {
* stop: Handler that 802.15.4 module calls for device cleanup.
* This function is called after the last interface is removed.
*
- * xmit: Handler that 802.15.4 module calls for each transmitted frame.
+ * xmit_sync:
+ * Handler that 802.15.4 module calls for each transmitted frame.
+ * skb cntains the buffer starting from the IEEE 802.15.4 header.
+ * The low-level driver should send the frame based on available
+ * configuration. This is called by a workqueue and useful for
+ * synchronous 802.15.4 drivers.
+ * This function should return zero or negative errno.
+ *
+ * WARNING:
+ * This will be deprecated soon. We don't accept synced xmit callbacks
+ * drivers anymore.
+ *
+ * xmit_async:
+ * Handler that 802.15.4 module calls for each transmitted frame.
* skb cntains the buffer starting from the IEEE 802.15.4 header.
* The low-level driver should send the frame based on available
* configuration.
- * This function should return zero or negative errno. Called with
- * pib_lock held.
+ * This function should return zero or negative errno.
*
* ed: Handler that 802.15.4 module calls for Energy Detection.
* This function should place the value for detected energy
@@ -159,40 +192,75 @@ struct ieee802154_dev {
* set_frame_retries
* Sets the retransmission attempt limit. Called with pib_lock held.
* Returns either zero, or negative errno.
+ *
+ * set_promiscuous_mode
+ * Enables or disable promiscuous mode.
*/
struct ieee802154_ops {
struct module *owner;
- int (*start)(struct ieee802154_dev *dev);
- void (*stop)(struct ieee802154_dev *dev);
- int (*xmit)(struct ieee802154_dev *dev,
- struct sk_buff *skb);
- int (*ed)(struct ieee802154_dev *dev, u8 *level);
- int (*set_channel)(struct ieee802154_dev *dev,
- int page,
- int channel);
- int (*set_hw_addr_filt)(struct ieee802154_dev *dev,
- struct ieee802154_hw_addr_filt *filt,
+ int (*start)(struct ieee802154_hw *hw);
+ void (*stop)(struct ieee802154_hw *hw);
+ int (*xmit_sync)(struct ieee802154_hw *hw,
+ struct sk_buff *skb);
+ int (*xmit_async)(struct ieee802154_hw *hw,
+ struct sk_buff *skb);
+ int (*ed)(struct ieee802154_hw *hw, u8 *level);
+ int (*set_channel)(struct ieee802154_hw *hw, u8 page,
+ u8 channel);
+ int (*set_hw_addr_filt)(struct ieee802154_hw *hw,
+ struct ieee802154_hw_addr_filt *filt,
unsigned long changed);
- int (*ieee_addr)(struct ieee802154_dev *dev, __le64 addr);
- int (*set_txpower)(struct ieee802154_dev *dev, int db);
- int (*set_lbt)(struct ieee802154_dev *dev, bool on);
- int (*set_cca_mode)(struct ieee802154_dev *dev, u8 mode);
- int (*set_cca_ed_level)(struct ieee802154_dev *dev,
+ int (*set_txpower)(struct ieee802154_hw *hw, int db);
+ int (*set_lbt)(struct ieee802154_hw *hw, bool on);
+ int (*set_cca_mode)(struct ieee802154_hw *hw, u8 mode);
+ int (*set_cca_ed_level)(struct ieee802154_hw *hw,
s32 level);
- int (*set_csma_params)(struct ieee802154_dev *dev,
+ int (*set_csma_params)(struct ieee802154_hw *hw,
u8 min_be, u8 max_be, u8 retries);
- int (*set_frame_retries)(struct ieee802154_dev *dev,
+ int (*set_frame_retries)(struct ieee802154_hw *hw,
s8 retries);
+ int (*set_promiscuous_mode)(struct ieee802154_hw *hw,
+ const bool on);
};
-/* Basic interface to register ieee802154 device */
-struct ieee802154_dev *
-ieee802154_alloc_device(size_t priv_data_len, struct ieee802154_ops *ops);
-void ieee802154_free_device(struct ieee802154_dev *dev);
-int ieee802154_register_device(struct ieee802154_dev *dev);
-void ieee802154_unregister_device(struct ieee802154_dev *dev);
+/**
+ * ieee802154_be64_to_le64 - copies and convert be64 to le64
+ * @le64_dst: le64 destination pointer
+ * @be64_src: be64 source pointer
+ */
+static inline void ieee802154_be64_to_le64(void *le64_dst, const void *be64_src)
+{
+ __le64 tmp = (__force __le64)swab64p(be64_src);
+
+ memcpy(le64_dst, &tmp, IEEE802154_EXTENDED_ADDR_LEN);
+}
-void ieee802154_rx_irqsafe(struct ieee802154_dev *dev, struct sk_buff *skb,
+/**
+ * ieee802154_le64_to_be64 - copies and convert le64 to be64
+ * @be64_dst: be64 destination pointer
+ * @le64_src: le64 source pointer
+ */
+static inline void ieee802154_le64_to_be64(void *be64_dst, const void *le64_src)
+{
+ __be64 tmp = (__force __be64)swab64p(le64_src);
+
+ memcpy(be64_dst, &tmp, IEEE802154_EXTENDED_ADDR_LEN);
+}
+
+/* Basic interface to register ieee802154 hwice */
+struct ieee802154_hw *
+ieee802154_alloc_hw(size_t priv_data_len, const struct ieee802154_ops *ops);
+void ieee802154_free_hw(struct ieee802154_hw *hw);
+int ieee802154_register_hw(struct ieee802154_hw *hw);
+void ieee802154_unregister_hw(struct ieee802154_hw *hw);
+
+void ieee802154_rx(struct ieee802154_hw *hw, struct sk_buff *skb);
+void ieee802154_rx_irqsafe(struct ieee802154_hw *hw, struct sk_buff *skb,
u8 lqi);
+void ieee802154_wake_queue(struct ieee802154_hw *hw);
+void ieee802154_stop_queue(struct ieee802154_hw *hw);
+void ieee802154_xmit_complete(struct ieee802154_hw *hw, struct sk_buff *skb,
+ bool ifs_handling);
+
#endif /* NET_MAC802154_H */
diff --git a/include/net/mld.h b/include/net/mld.h
index faa1d161bf24..01d751303498 100644
--- a/include/net/mld.h
+++ b/include/net/mld.h
@@ -88,12 +88,15 @@ struct mld2_query {
#define MLDV2_QQIC_EXP(value) (((value) >> 4) & 0x07)
#define MLDV2_QQIC_MAN(value) ((value) & 0x0f)
+#define MLD_EXP_MIN_LIMIT 32768UL
+#define MLDV1_MRD_MAX_COMPAT (MLD_EXP_MIN_LIMIT - 1)
+
static inline unsigned long mldv2_mrc(const struct mld2_query *mlh2)
{
/* RFC3810, 5.1.3. Maximum Response Code */
unsigned long ret, mc_mrc = ntohs(mlh2->mld2q_mrc);
- if (mc_mrc < 32768) {
+ if (mc_mrc < MLD_EXP_MIN_LIMIT) {
ret = mc_mrc;
} else {
unsigned long mc_man, mc_exp;
diff --git a/include/net/mpls.h b/include/net/mpls.h
new file mode 100644
index 000000000000..5b3b5addfb08
--- /dev/null
+++ b/include/net/mpls.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2014 Nicira, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef _NET_MPLS_H
+#define _NET_MPLS_H 1
+
+#include <linux/if_ether.h>
+#include <linux/netdevice.h>
+
+#define MPLS_HLEN 4
+
+static inline bool eth_p_mpls(__be16 eth_type)
+{
+ return eth_type == htons(ETH_P_MPLS_UC) ||
+ eth_type == htons(ETH_P_MPLS_MC);
+}
+
+/*
+ * For non-MPLS skbs this will correspond to the network header.
+ * For MPLS skbs it will be before the network_header as the MPLS
+ * label stack lies between the end of the mac header and the network
+ * header. That is, for MPLS skbs the end of the mac header
+ * is the top of the MPLS label stack.
+ */
+static inline unsigned char *skb_mpls_header(struct sk_buff *skb)
+{
+ return skb_mac_header(skb) + skb->mac_len;
+}
+#endif
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index 47f425464f84..76f708486aae 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -69,7 +69,7 @@ struct neigh_parms {
struct net *net;
#endif
struct net_device *dev;
- struct neigh_parms *next;
+ struct list_head list;
int (*neigh_setup)(struct neighbour *);
void (*neigh_cleanup)(struct neighbour *);
struct neigh_table *tbl;
@@ -190,7 +190,6 @@ struct neigh_hash_table {
struct neigh_table {
- struct neigh_table *next;
int family;
int entry_size;
int key_len;
@@ -203,6 +202,7 @@ struct neigh_table {
void (*proxy_redo)(struct sk_buff *skb);
char *id;
struct neigh_parms parms;
+ struct list_head parms_list;
int gc_interval;
int gc_thresh1;
int gc_thresh2;
@@ -219,6 +219,13 @@ struct neigh_table {
struct pneigh_entry **phash_buckets;
};
+enum {
+ NEIGH_ARP_TABLE = 0,
+ NEIGH_ND_TABLE = 1,
+ NEIGH_DN_TABLE = 2,
+ NEIGH_NR_TABLES,
+};
+
static inline int neigh_parms_family(struct neigh_parms *p)
{
return p->tbl->family;
@@ -239,8 +246,8 @@ static inline void *neighbour_priv(const struct neighbour *n)
#define NEIGH_UPDATE_F_ISROUTER 0x40000000
#define NEIGH_UPDATE_F_ADMIN 0x80000000
-void neigh_table_init(struct neigh_table *tbl);
-int neigh_table_clear(struct neigh_table *tbl);
+void neigh_table_init(int index, struct neigh_table *tbl);
+int neigh_table_clear(int index, struct neigh_table *tbl);
struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
struct net_device *dev);
struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net,
@@ -373,7 +380,7 @@ static inline int neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
return 0;
}
-#ifdef CONFIG_BRIDGE_NETFILTER
+#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
static inline int neigh_hh_bridge(struct hh_cache *hh, struct sk_buff *skb)
{
unsigned int seq, hh_alen;
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index e0d64667a4b3..2e8756b8c775 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -26,6 +26,7 @@
#endif
#include <net/netns/nftables.h>
#include <net/netns/xfrm.h>
+#include <linux/ns_common.h>
struct user_namespace;
struct proc_dir_entry;
@@ -60,7 +61,7 @@ struct net {
struct user_namespace *user_ns; /* Owning user namespace */
- unsigned int proc_inum;
+ struct ns_common ns;
struct proc_dir_entry *proc_net;
struct proc_dir_entry *proc_net_stat;
diff --git a/include/net/netdma.h b/include/net/netdma.h
deleted file mode 100644
index 8ba8ce284eeb..000000000000
--- a/include/net/netdma.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59
- * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called COPYING.
- */
-#ifndef NETDMA_H
-#define NETDMA_H
-#ifdef CONFIG_NET_DMA
-#include <linux/dmaengine.h>
-#include <linux/skbuff.h>
-
-int dma_skb_copy_datagram_iovec(struct dma_chan* chan,
- struct sk_buff *skb, int offset, struct iovec *to,
- size_t len, struct dma_pinned_list *pinned_list);
-
-#endif /* CONFIG_NET_DMA */
-#endif /* NETDMA_H */
diff --git a/include/net/netfilter/br_netfilter.h b/include/net/netfilter/br_netfilter.h
new file mode 100644
index 000000000000..2aa6048a55c1
--- /dev/null
+++ b/include/net/netfilter/br_netfilter.h
@@ -0,0 +1,6 @@
+#ifndef _BR_NETFILTER_H_
+#define _BR_NETFILTER_H_
+
+void br_netfilter_enable(void);
+
+#endif /* _BR_NETFILTER_H_ */
diff --git a/include/net/netfilter/ipv4/nf_nat_masquerade.h b/include/net/netfilter/ipv4/nf_nat_masquerade.h
new file mode 100644
index 000000000000..a9c001c646da
--- /dev/null
+++ b/include/net/netfilter/ipv4/nf_nat_masquerade.h
@@ -0,0 +1,14 @@
+#ifndef _NF_NAT_MASQUERADE_IPV4_H_
+#define _NF_NAT_MASQUERADE_IPV4_H_
+
+#include <net/netfilter/nf_nat.h>
+
+unsigned int
+nf_nat_masquerade_ipv4(struct sk_buff *skb, unsigned int hooknum,
+ const struct nf_nat_range *range,
+ const struct net_device *out);
+
+void nf_nat_masquerade_ipv4_register_notifier(void);
+void nf_nat_masquerade_ipv4_unregister_notifier(void);
+
+#endif /*_NF_NAT_MASQUERADE_IPV4_H_ */
diff --git a/include/net/netfilter/ipv4/nf_reject.h b/include/net/netfilter/ipv4/nf_reject.h
index 931fbf812171..03e928a55229 100644
--- a/include/net/netfilter/ipv4/nf_reject.h
+++ b/include/net/netfilter/ipv4/nf_reject.h
@@ -1,128 +1,23 @@
#ifndef _IPV4_NF_REJECT_H
#define _IPV4_NF_REJECT_H
+#include <linux/skbuff.h>
#include <net/ip.h>
-#include <net/tcp.h>
-#include <net/route.h>
-#include <net/dst.h>
+#include <net/icmp.h>
static inline void nf_send_unreach(struct sk_buff *skb_in, int code)
{
icmp_send(skb_in, ICMP_DEST_UNREACH, code, 0);
}
-/* Send RST reply */
-static void nf_send_reset(struct sk_buff *oldskb, int hook)
-{
- struct sk_buff *nskb;
- const struct iphdr *oiph;
- struct iphdr *niph;
- const struct tcphdr *oth;
- struct tcphdr _otcph, *tcph;
-
- /* IP header checks: fragment. */
- if (ip_hdr(oldskb)->frag_off & htons(IP_OFFSET))
- return;
-
- oth = skb_header_pointer(oldskb, ip_hdrlen(oldskb),
- sizeof(_otcph), &_otcph);
- if (oth == NULL)
- return;
-
- /* No RST for RST. */
- if (oth->rst)
- return;
-
- if (skb_rtable(oldskb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
- return;
-
- /* Check checksum */
- if (nf_ip_checksum(oldskb, hook, ip_hdrlen(oldskb), IPPROTO_TCP))
- return;
- oiph = ip_hdr(oldskb);
-
- nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct tcphdr) +
- LL_MAX_HEADER, GFP_ATOMIC);
- if (!nskb)
- return;
-
- skb_reserve(nskb, LL_MAX_HEADER);
-
- skb_reset_network_header(nskb);
- niph = (struct iphdr *)skb_put(nskb, sizeof(struct iphdr));
- niph->version = 4;
- niph->ihl = sizeof(struct iphdr) / 4;
- niph->tos = 0;
- niph->id = 0;
- niph->frag_off = htons(IP_DF);
- niph->protocol = IPPROTO_TCP;
- niph->check = 0;
- niph->saddr = oiph->daddr;
- niph->daddr = oiph->saddr;
-
- skb_reset_transport_header(nskb);
- tcph = (struct tcphdr *)skb_put(nskb, sizeof(struct tcphdr));
- memset(tcph, 0, sizeof(*tcph));
- tcph->source = oth->dest;
- tcph->dest = oth->source;
- tcph->doff = sizeof(struct tcphdr) / 4;
-
- if (oth->ack)
- tcph->seq = oth->ack_seq;
- else {
- tcph->ack_seq = htonl(ntohl(oth->seq) + oth->syn + oth->fin +
- oldskb->len - ip_hdrlen(oldskb) -
- (oth->doff << 2));
- tcph->ack = 1;
- }
-
- tcph->rst = 1;
- tcph->check = ~tcp_v4_check(sizeof(struct tcphdr), niph->saddr,
- niph->daddr, 0);
- nskb->ip_summed = CHECKSUM_PARTIAL;
- nskb->csum_start = (unsigned char *)tcph - nskb->head;
- nskb->csum_offset = offsetof(struct tcphdr, check);
-
- /* ip_route_me_harder expects skb->dst to be set */
- skb_dst_set_noref(nskb, skb_dst(oldskb));
-
- nskb->protocol = htons(ETH_P_IP);
- if (ip_route_me_harder(nskb, RTN_UNSPEC))
- goto free_nskb;
-
- niph->ttl = ip4_dst_hoplimit(skb_dst(nskb));
-
- /* "Never happens" */
- if (nskb->len > dst_mtu(skb_dst(nskb)))
- goto free_nskb;
-
- nf_ct_attach(nskb, oldskb);
-
-#ifdef CONFIG_BRIDGE_NETFILTER
- /* If we use ip_local_out for bridged traffic, the MAC source on
- * the RST will be ours, instead of the destination's. This confuses
- * some routers/firewalls, and they drop the packet. So we need to
- * build the eth header using the original destination's MAC as the
- * source, and send the RST packet directly.
- */
- if (oldskb->nf_bridge) {
- struct ethhdr *oeth = eth_hdr(oldskb);
- nskb->dev = oldskb->nf_bridge->physindev;
- niph->tot_len = htons(nskb->len);
- ip_send_check(niph);
- if (dev_hard_header(nskb, nskb->dev, ntohs(nskb->protocol),
- oeth->h_source, oeth->h_dest, nskb->len) < 0)
- goto free_nskb;
- dev_queue_xmit(nskb);
- } else
-#endif
- ip_local_out(nskb);
-
- return;
-
- free_nskb:
- kfree_skb(nskb);
-}
+void nf_send_reset(struct sk_buff *oldskb, int hook);
+const struct tcphdr *nf_reject_ip_tcphdr_get(struct sk_buff *oldskb,
+ struct tcphdr *_oth, int hook);
+struct iphdr *nf_reject_iphdr_put(struct sk_buff *nskb,
+ const struct sk_buff *oldskb,
+ __be16 protocol, int ttl);
+void nf_reject_ip_tcphdr_put(struct sk_buff *nskb, const struct sk_buff *oldskb,
+ const struct tcphdr *oth);
#endif /* _IPV4_NF_REJECT_H */
diff --git a/include/net/netfilter/ipv6/nf_nat_masquerade.h b/include/net/netfilter/ipv6/nf_nat_masquerade.h
new file mode 100644
index 000000000000..0a13396cd390
--- /dev/null
+++ b/include/net/netfilter/ipv6/nf_nat_masquerade.h
@@ -0,0 +1,10 @@
+#ifndef _NF_NAT_MASQUERADE_IPV6_H_
+#define _NF_NAT_MASQUERADE_IPV6_H_
+
+unsigned int
+nf_nat_masquerade_ipv6(struct sk_buff *skb, const struct nf_nat_range *range,
+ const struct net_device *out);
+void nf_nat_masquerade_ipv6_register_notifier(void);
+void nf_nat_masquerade_ipv6_unregister_notifier(void);
+
+#endif /* _NF_NAT_MASQUERADE_IPV6_H_ */
diff --git a/include/net/netfilter/ipv6/nf_reject.h b/include/net/netfilter/ipv6/nf_reject.h
index 710d17ed70b4..23216d48abf9 100644
--- a/include/net/netfilter/ipv6/nf_reject.h
+++ b/include/net/netfilter/ipv6/nf_reject.h
@@ -1,11 +1,7 @@
#ifndef _IPV6_NF_REJECT_H
#define _IPV6_NF_REJECT_H
-#include <net/ipv6.h>
-#include <net/ip6_route.h>
-#include <net/ip6_fib.h>
-#include <net/ip6_checksum.h>
-#include <linux/netfilter_ipv6.h>
+#include <linux/icmpv6.h>
static inline void
nf_send_unreach6(struct net *net, struct sk_buff *skb_in, unsigned char code,
@@ -17,155 +13,16 @@ nf_send_unreach6(struct net *net, struct sk_buff *skb_in, unsigned char code,
icmpv6_send(skb_in, ICMPV6_DEST_UNREACH, code, 0);
}
-/* Send RST reply */
-static void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook)
-{
- struct sk_buff *nskb;
- struct tcphdr otcph, *tcph;
- unsigned int otcplen, hh_len;
- int tcphoff, needs_ack;
- const struct ipv6hdr *oip6h = ipv6_hdr(oldskb);
- struct ipv6hdr *ip6h;
-#define DEFAULT_TOS_VALUE 0x0U
- const __u8 tclass = DEFAULT_TOS_VALUE;
- struct dst_entry *dst = NULL;
- u8 proto;
- __be16 frag_off;
- struct flowi6 fl6;
-
- if ((!(ipv6_addr_type(&oip6h->saddr) & IPV6_ADDR_UNICAST)) ||
- (!(ipv6_addr_type(&oip6h->daddr) & IPV6_ADDR_UNICAST))) {
- pr_debug("addr is not unicast.\n");
- return;
- }
-
- proto = oip6h->nexthdr;
- tcphoff = ipv6_skip_exthdr(oldskb, ((u8*)(oip6h+1) - oldskb->data), &proto, &frag_off);
-
- if ((tcphoff < 0) || (tcphoff > oldskb->len)) {
- pr_debug("Cannot get TCP header.\n");
- return;
- }
-
- otcplen = oldskb->len - tcphoff;
-
- /* IP header checks: fragment, too short. */
- if (proto != IPPROTO_TCP || otcplen < sizeof(struct tcphdr)) {
- pr_debug("proto(%d) != IPPROTO_TCP, "
- "or too short. otcplen = %d\n",
- proto, otcplen);
- return;
- }
-
- if (skb_copy_bits(oldskb, tcphoff, &otcph, sizeof(struct tcphdr)))
- BUG();
-
- /* No RST for RST. */
- if (otcph.rst) {
- pr_debug("RST is set\n");
- return;
- }
-
- /* Check checksum. */
- if (nf_ip6_checksum(oldskb, hook, tcphoff, IPPROTO_TCP)) {
- pr_debug("TCP checksum is invalid\n");
- return;
- }
-
- memset(&fl6, 0, sizeof(fl6));
- fl6.flowi6_proto = IPPROTO_TCP;
- fl6.saddr = oip6h->daddr;
- fl6.daddr = oip6h->saddr;
- fl6.fl6_sport = otcph.dest;
- fl6.fl6_dport = otcph.source;
- security_skb_classify_flow(oldskb, flowi6_to_flowi(&fl6));
- dst = ip6_route_output(net, NULL, &fl6);
- if (dst == NULL || dst->error) {
- dst_release(dst);
- return;
- }
- dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), NULL, 0);
- if (IS_ERR(dst))
- return;
-
- hh_len = (dst->dev->hard_header_len + 15)&~15;
- nskb = alloc_skb(hh_len + 15 + dst->header_len + sizeof(struct ipv6hdr)
- + sizeof(struct tcphdr) + dst->trailer_len,
- GFP_ATOMIC);
-
- if (!nskb) {
- net_dbg_ratelimited("cannot alloc skb\n");
- dst_release(dst);
- return;
- }
-
- skb_dst_set(nskb, dst);
-
- skb_reserve(nskb, hh_len + dst->header_len);
-
- skb_put(nskb, sizeof(struct ipv6hdr));
- skb_reset_network_header(nskb);
- ip6h = ipv6_hdr(nskb);
- ip6_flow_hdr(ip6h, tclass, 0);
- ip6h->hop_limit = ip6_dst_hoplimit(dst);
- ip6h->nexthdr = IPPROTO_TCP;
- ip6h->saddr = oip6h->daddr;
- ip6h->daddr = oip6h->saddr;
-
- skb_reset_transport_header(nskb);
- tcph = (struct tcphdr *)skb_put(nskb, sizeof(struct tcphdr));
- /* Truncate to length (no data) */
- tcph->doff = sizeof(struct tcphdr)/4;
- tcph->source = otcph.dest;
- tcph->dest = otcph.source;
-
- if (otcph.ack) {
- needs_ack = 0;
- tcph->seq = otcph.ack_seq;
- tcph->ack_seq = 0;
- } else {
- needs_ack = 1;
- tcph->ack_seq = htonl(ntohl(otcph.seq) + otcph.syn + otcph.fin
- + otcplen - (otcph.doff<<2));
- tcph->seq = 0;
- }
-
- /* Reset flags */
- ((u_int8_t *)tcph)[13] = 0;
- tcph->rst = 1;
- tcph->ack = needs_ack;
- tcph->window = 0;
- tcph->urg_ptr = 0;
- tcph->check = 0;
-
- /* Adjust TCP checksum */
- tcph->check = csum_ipv6_magic(&ipv6_hdr(nskb)->saddr,
- &ipv6_hdr(nskb)->daddr,
- sizeof(struct tcphdr), IPPROTO_TCP,
- csum_partial(tcph,
- sizeof(struct tcphdr), 0));
-
- nf_ct_attach(nskb, oldskb);
-
-#ifdef CONFIG_BRIDGE_NETFILTER
- /* If we use ip6_local_out for bridged traffic, the MAC source on
- * the RST will be ours, instead of the destination's. This confuses
- * some routers/firewalls, and they drop the packet. So we need to
- * build the eth header using the original destination's MAC as the
- * source, and send the RST packet directly.
- */
- if (oldskb->nf_bridge) {
- struct ethhdr *oeth = eth_hdr(oldskb);
- nskb->dev = oldskb->nf_bridge->physindev;
- nskb->protocol = htons(ETH_P_IPV6);
- ip6h->payload_len = htons(sizeof(struct tcphdr));
- if (dev_hard_header(nskb, nskb->dev, ntohs(nskb->protocol),
- oeth->h_source, oeth->h_dest, nskb->len) < 0)
- return;
- dev_queue_xmit(nskb);
- } else
-#endif
- ip6_local_out(nskb);
-}
+void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook);
+
+const struct tcphdr *nf_reject_ip6_tcphdr_get(struct sk_buff *oldskb,
+ struct tcphdr *otcph,
+ unsigned int *otcplen, int hook);
+struct ipv6hdr *nf_reject_ip6hdr_put(struct sk_buff *nskb,
+ const struct sk_buff *oldskb,
+ __be16 protocol, int hoplimit);
+void nf_reject_ip6_tcphdr_put(struct sk_buff *nskb,
+ const struct sk_buff *oldskb,
+ const struct tcphdr *oth, unsigned int otcplen);
#endif /* _IPV6_NF_REJECT_H */
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index 37252f71a380..f0daed2b54d1 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -92,12 +92,18 @@ struct nf_conn {
/* Have we seen traffic both ways yet? (bitset) */
unsigned long status;
- /* If we were expected by an expectation, this will be it */
- struct nf_conn *master;
-
/* Timer function; drops refcnt when it goes off. */
struct timer_list timeout;
+#ifdef CONFIG_NET_NS
+ struct net *ct_net;
+#endif
+ /* all members below initialized via memset */
+ u8 __nfct_init_offset[0];
+
+ /* If we were expected by an expectation, this will be it */
+ struct nf_conn *master;
+
#if defined(CONFIG_NF_CONNTRACK_MARK)
u_int32_t mark;
#endif
@@ -108,9 +114,6 @@ struct nf_conn {
/* Extensions */
struct nf_ct_ext *ext;
-#ifdef CONFIG_NET_NS
- struct net *ct_net;
-#endif
/* Storage reserved for other modules, must be the last member */
union nf_conntrack_proto proto;
@@ -242,7 +245,7 @@ extern s32 (*nf_ct_nat_offset)(const struct nf_conn *ct,
DECLARE_PER_CPU(struct nf_conn, nf_conntrack_untracked);
static inline struct nf_conn *nf_ct_untracked_get(void)
{
- return &__raw_get_cpu_var(nf_conntrack_untracked);
+ return raw_cpu_ptr(&nf_conntrack_untracked);
}
void nf_ct_untracked_status_or(unsigned long bits);
diff --git a/include/net/netfilter/nf_conntrack_core.h b/include/net/netfilter/nf_conntrack_core.h
index cc0c18827602..f2f0fa3bb150 100644
--- a/include/net/netfilter/nf_conntrack_core.h
+++ b/include/net/netfilter/nf_conntrack_core.h
@@ -72,7 +72,7 @@ static inline int nf_conntrack_confirm(struct sk_buff *skb)
return ret;
}
-int
+void
print_tuple(struct seq_file *s, const struct nf_conntrack_tuple *tuple,
const struct nf_conntrack_l3proto *l3proto,
const struct nf_conntrack_l4proto *proto);
diff --git a/include/net/netfilter/nf_conntrack_l3proto.h b/include/net/netfilter/nf_conntrack_l3proto.h
index adc1fa3dd7ab..cdc920b4c4c2 100644
--- a/include/net/netfilter/nf_conntrack_l3proto.h
+++ b/include/net/netfilter/nf_conntrack_l3proto.h
@@ -38,8 +38,8 @@ struct nf_conntrack_l3proto {
const struct nf_conntrack_tuple *orig);
/* Print out the per-protocol part of the tuple. */
- int (*print_tuple)(struct seq_file *s,
- const struct nf_conntrack_tuple *);
+ void (*print_tuple)(struct seq_file *s,
+ const struct nf_conntrack_tuple *);
/*
* Called before tracking.
diff --git a/include/net/netfilter/nf_conntrack_l4proto.h b/include/net/netfilter/nf_conntrack_l4proto.h
index 4c8d573830b7..1f7061313d54 100644
--- a/include/net/netfilter/nf_conntrack_l4proto.h
+++ b/include/net/netfilter/nf_conntrack_l4proto.h
@@ -56,11 +56,11 @@ struct nf_conntrack_l4proto {
u_int8_t pf, unsigned int hooknum);
/* Print out the per-protocol part of the tuple. Return like seq_* */
- int (*print_tuple)(struct seq_file *s,
- const struct nf_conntrack_tuple *);
+ void (*print_tuple)(struct seq_file *s,
+ const struct nf_conntrack_tuple *);
/* Print out the private part of the conntrack. */
- int (*print_conntrack)(struct seq_file *s, struct nf_conn *);
+ void (*print_conntrack)(struct seq_file *s, struct nf_conn *);
/* Return the array of timeouts for this protocol. */
unsigned int *(*get_timeouts)(struct net *net);
diff --git a/include/net/netfilter/nf_nat.h b/include/net/netfilter/nf_nat.h
index a71dd333ac68..344b1ab19220 100644
--- a/include/net/netfilter/nf_nat.h
+++ b/include/net/netfilter/nf_nat.h
@@ -32,10 +32,8 @@ struct nf_conn_nat {
struct hlist_node bysource;
struct nf_conn *ct;
union nf_conntrack_nat_help help;
-#if defined(CONFIG_IP_NF_TARGET_MASQUERADE) || \
- defined(CONFIG_IP_NF_TARGET_MASQUERADE_MODULE) || \
- defined(CONFIG_IP6_NF_TARGET_MASQUERADE) || \
- defined(CONFIG_IP6_NF_TARGET_MASQUERADE_MODULE)
+#if IS_ENABLED(CONFIG_NF_NAT_MASQUERADE_IPV4) || \
+ IS_ENABLED(CONFIG_NF_NAT_MASQUERADE_IPV6)
int masq_index;
#endif
};
@@ -68,8 +66,8 @@ static inline bool nf_nat_oif_changed(unsigned int hooknum,
struct nf_conn_nat *nat,
const struct net_device *out)
{
-#if IS_ENABLED(CONFIG_IP_NF_TARGET_MASQUERADE) || \
- IS_ENABLED(CONFIG_IP6_NF_TARGET_MASQUERADE)
+#if IS_ENABLED(CONFIG_NF_NAT_MASQUERADE_IPV4) || \
+ IS_ENABLED(CONFIG_NF_NAT_MASQUERADE_IPV6)
return nat->masq_index && hooknum == NF_INET_POST_ROUTING &&
CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL &&
nat->masq_index != out->ifindex;
diff --git a/include/net/netfilter/nf_nat_l3proto.h b/include/net/netfilter/nf_nat_l3proto.h
index 5a2919b2e09a..340c013795a4 100644
--- a/include/net/netfilter/nf_nat_l3proto.h
+++ b/include/net/netfilter/nf_nat_l3proto.h
@@ -42,8 +42,83 @@ const struct nf_nat_l3proto *__nf_nat_l3proto_find(u8 l3proto);
int nf_nat_icmp_reply_translation(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned int hooknum);
+
+unsigned int nf_nat_ipv4_in(const struct nf_hook_ops *ops, struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ unsigned int (*do_chain)(const struct nf_hook_ops *ops,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ struct nf_conn *ct));
+
+unsigned int nf_nat_ipv4_out(const struct nf_hook_ops *ops, struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ unsigned int (*do_chain)(const struct nf_hook_ops *ops,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ struct nf_conn *ct));
+
+unsigned int nf_nat_ipv4_local_fn(const struct nf_hook_ops *ops,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ unsigned int (*do_chain)(const struct nf_hook_ops *ops,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ struct nf_conn *ct));
+
+unsigned int nf_nat_ipv4_fn(const struct nf_hook_ops *ops, struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ unsigned int (*do_chain)(const struct nf_hook_ops *ops,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ struct nf_conn *ct));
+
int nf_nat_icmpv6_reply_translation(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned int hooknum, unsigned int hdrlen);
+unsigned int nf_nat_ipv6_in(const struct nf_hook_ops *ops, struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ unsigned int (*do_chain)(const struct nf_hook_ops *ops,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ struct nf_conn *ct));
+
+unsigned int nf_nat_ipv6_out(const struct nf_hook_ops *ops, struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ unsigned int (*do_chain)(const struct nf_hook_ops *ops,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ struct nf_conn *ct));
+
+unsigned int nf_nat_ipv6_local_fn(const struct nf_hook_ops *ops,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ unsigned int (*do_chain)(const struct nf_hook_ops *ops,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ struct nf_conn *ct));
+
+unsigned int nf_nat_ipv6_fn(const struct nf_hook_ops *ops, struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ unsigned int (*do_chain)(const struct nf_hook_ops *ops,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ struct nf_conn *ct));
+
#endif /* _NF_NAT_L3PROTO_H */
diff --git a/include/net/netfilter/nf_nat_redirect.h b/include/net/netfilter/nf_nat_redirect.h
new file mode 100644
index 000000000000..73b729543309
--- /dev/null
+++ b/include/net/netfilter/nf_nat_redirect.h
@@ -0,0 +1,12 @@
+#ifndef _NF_NAT_REDIRECT_H_
+#define _NF_NAT_REDIRECT_H_
+
+unsigned int
+nf_nat_redirect_ipv4(struct sk_buff *skb,
+ const struct nf_nat_ipv4_multi_range_compat *mr,
+ unsigned int hooknum);
+unsigned int
+nf_nat_redirect_ipv6(struct sk_buff *skb, const struct nf_nat_range *range,
+ unsigned int hooknum);
+
+#endif /* _NF_NAT_REDIRECT_H_ */
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index c4d86198d3d6..3ae969e3acf0 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -241,6 +241,7 @@ void nft_unregister_set(struct nft_set_ops *ops);
* @dtype: data type (verdict or numeric type defined by userspace)
* @size: maximum set size
* @nelems: number of elements
+ * @policy: set parameterization (see enum nft_set_policies)
* @ops: set ops
* @flags: set flags
* @klen: key length
@@ -255,6 +256,7 @@ struct nft_set {
u32 dtype;
u32 size;
u32 nelems;
+ u16 policy;
/* runtime data below here */
const struct nft_set_ops *ops ____cacheline_aligned;
u16 flags;
@@ -394,14 +396,12 @@ struct nft_rule {
/**
* struct nft_trans - nf_tables object update in transaction
*
- * @rcu_head: rcu head to defer release of transaction data
* @list: used internally
* @msg_type: message type
* @ctx: transaction context
* @data: internal information related to the transaction
*/
struct nft_trans {
- struct rcu_head rcu_head;
struct list_head list;
int msg_type;
struct nft_ctx ctx;
@@ -528,6 +528,9 @@ enum nft_chain_type {
NFT_CHAIN_T_MAX
};
+int nft_chain_validate_dependency(const struct nft_chain *chain,
+ enum nft_chain_type type);
+
struct nft_stats {
u64 bytes;
u64 pkts;
diff --git a/include/net/netfilter/nf_tables_bridge.h b/include/net/netfilter/nf_tables_bridge.h
new file mode 100644
index 000000000000..511fb79f6dad
--- /dev/null
+++ b/include/net/netfilter/nf_tables_bridge.h
@@ -0,0 +1,7 @@
+#ifndef _NET_NF_TABLES_BRIDGE_H
+#define _NET_NF_TABLES_BRIDGE_H
+
+int nft_bridge_iphdr_validate(struct sk_buff *skb);
+int nft_bridge_ip6hdr_validate(struct sk_buff *skb);
+
+#endif /* _NET_NF_TABLES_BRIDGE_H */
diff --git a/include/net/netfilter/nft_masq.h b/include/net/netfilter/nft_masq.h
new file mode 100644
index 000000000000..e2a518b60e19
--- /dev/null
+++ b/include/net/netfilter/nft_masq.h
@@ -0,0 +1,19 @@
+#ifndef _NFT_MASQ_H_
+#define _NFT_MASQ_H_
+
+struct nft_masq {
+ u32 flags;
+};
+
+extern const struct nla_policy nft_masq_policy[];
+
+int nft_masq_init(const struct nft_ctx *ctx,
+ const struct nft_expr *expr,
+ const struct nlattr * const tb[]);
+
+int nft_masq_dump(struct sk_buff *skb, const struct nft_expr *expr);
+
+int nft_masq_validate(const struct nft_ctx *ctx, const struct nft_expr *expr,
+ const struct nft_data **data);
+
+#endif /* _NFT_MASQ_H_ */
diff --git a/include/net/netfilter/nft_redir.h b/include/net/netfilter/nft_redir.h
new file mode 100644
index 000000000000..a2d67546afab
--- /dev/null
+++ b/include/net/netfilter/nft_redir.h
@@ -0,0 +1,21 @@
+#ifndef _NFT_REDIR_H_
+#define _NFT_REDIR_H_
+
+struct nft_redir {
+ enum nft_registers sreg_proto_min:8;
+ enum nft_registers sreg_proto_max:8;
+ u16 flags;
+};
+
+extern const struct nla_policy nft_redir_policy[];
+
+int nft_redir_init(const struct nft_ctx *ctx,
+ const struct nft_expr *expr,
+ const struct nlattr * const tb[]);
+
+int nft_redir_dump(struct sk_buff *skb, const struct nft_expr *expr);
+
+int nft_redir_validate(const struct nft_ctx *ctx, const struct nft_expr *expr,
+ const struct nft_data **data);
+
+#endif /* _NFT_REDIR_H_ */
diff --git a/include/net/netfilter/nft_reject.h b/include/net/netfilter/nft_reject.h
index 36b0da2d55bb..60fa1530006b 100644
--- a/include/net/netfilter/nft_reject.h
+++ b/include/net/netfilter/nft_reject.h
@@ -14,12 +14,7 @@ int nft_reject_init(const struct nft_ctx *ctx,
int nft_reject_dump(struct sk_buff *skb, const struct nft_expr *expr);
-void nft_reject_ipv4_eval(const struct nft_expr *expr,
- struct nft_data data[NFT_REG_MAX + 1],
- const struct nft_pktinfo *pkt);
-
-void nft_reject_ipv6_eval(const struct nft_expr *expr,
- struct nft_data data[NFT_REG_MAX + 1],
- const struct nft_pktinfo *pkt);
+int nft_reject_icmp_code(u8 code);
+int nft_reject_icmpv6_code(u8 code);
#endif
diff --git a/include/net/netlink.h b/include/net/netlink.h
index 6c1076275aaa..64158353ecb2 100644
--- a/include/net/netlink.h
+++ b/include/net/netlink.h
@@ -431,7 +431,7 @@ static inline int nlmsg_report(const struct nlmsghdr *nlh)
/**
* nlmsg_put - Add a new netlink message to an skb
* @skb: socket buffer to store message in
- * @portid: netlink process id
+ * @portid: netlink PORTID of requesting application
* @seq: sequence number of message
* @type: message type
* @payload: length of message payload
@@ -1185,4 +1185,14 @@ static inline int nla_validate_nested(const struct nlattr *start, int maxtype,
#define nla_for_each_nested(pos, nla, rem) \
nla_for_each_attr(pos, nla_data(nla), nla_len(nla), rem)
+/**
+ * nla_is_last - Test if attribute is last in stream
+ * @nla: attribute to test
+ * @rem: bytes remaining in stream
+ */
+static inline bool nla_is_last(const struct nlattr *nla, int rem)
+{
+ return nla->nla_len == rem;
+}
+
#endif
diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
index aec5e12f9f19..24945cefc4fd 100644
--- a/include/net/netns/ipv4.h
+++ b/include/net/netns/ipv4.h
@@ -76,6 +76,7 @@ struct netns_ipv4 {
int sysctl_tcp_ecn;
int sysctl_ip_no_pmtu_disc;
int sysctl_ip_fwd_use_pmtu;
+ int sysctl_ip_nonlocal_bind;
int sysctl_fwmark_reflect;
int sysctl_tcp_fwmark_accept;
diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h
index eade27adecf3..69ae41f2098c 100644
--- a/include/net/netns/ipv6.h
+++ b/include/net/netns/ipv6.h
@@ -76,7 +76,7 @@ struct netns_ipv6 {
#endif
#endif
atomic_t dev_addr_genid;
- atomic_t rt_genid;
+ atomic_t fib6_sernum;
};
#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
diff --git a/include/net/netns/xfrm.h b/include/net/netns/xfrm.h
index 3492434baf88..730d82ad6ee5 100644
--- a/include/net/netns/xfrm.h
+++ b/include/net/netns/xfrm.h
@@ -13,6 +13,19 @@ struct ctl_table_header;
struct xfrm_policy_hash {
struct hlist_head *table;
unsigned int hmask;
+ u8 dbits4;
+ u8 sbits4;
+ u8 dbits6;
+ u8 sbits6;
+};
+
+struct xfrm_policy_hthresh {
+ struct work_struct work;
+ seqlock_t lock;
+ u8 lbits4;
+ u8 rbits4;
+ u8 lbits6;
+ u8 rbits6;
};
struct netns_xfrm {
@@ -37,10 +50,11 @@ struct netns_xfrm {
struct list_head policy_all;
struct hlist_head *policy_byidx;
unsigned int policy_idx_hmask;
- struct hlist_head policy_inexact[XFRM_POLICY_MAX * 2];
- struct xfrm_policy_hash policy_bydst[XFRM_POLICY_MAX * 2];
+ struct hlist_head policy_inexact[XFRM_POLICY_MAX];
+ struct xfrm_policy_hash policy_bydst[XFRM_POLICY_MAX];
unsigned int policy_count[XFRM_POLICY_MAX * 2];
struct work_struct policy_hash_work;
+ struct xfrm_policy_hthresh policy_hthresh;
struct sock *nlsk;
diff --git a/include/net/nfc/digital.h b/include/net/nfc/digital.h
index d9a5cf7ac1c4..0ae101eef0f4 100644
--- a/include/net/nfc/digital.h
+++ b/include/net/nfc/digital.h
@@ -225,6 +225,19 @@ struct nfc_digital_dev {
u8 curr_protocol;
u8 curr_rf_tech;
u8 curr_nfc_dep_pni;
+ u8 did;
+
+ u8 local_payload_max;
+ u8 remote_payload_max;
+
+ struct sk_buff *chaining_skb;
+ struct digital_data_exch *data_exch;
+
+ int atn_count;
+ int nack_count;
+
+ struct sk_buff *saved_skb;
+ unsigned int saved_skb_len;
u16 target_fsc;
diff --git a/include/net/nfc/hci.h b/include/net/nfc/hci.h
index 7ee8f4cc610b..14bd0e1c47fa 100644
--- a/include/net/nfc/hci.h
+++ b/include/net/nfc/hci.h
@@ -57,10 +57,14 @@ struct nfc_hci_ops {
int (*discover_se)(struct nfc_hci_dev *dev);
int (*enable_se)(struct nfc_hci_dev *dev, u32 se_idx);
int (*disable_se)(struct nfc_hci_dev *dev, u32 se_idx);
+ int (*se_io)(struct nfc_hci_dev *dev, u32 se_idx,
+ u8 *apdu, size_t apdu_length,
+ se_io_cb_t cb, void *cb_context);
};
/* Pipes */
#define NFC_HCI_INVALID_PIPE 0x80
+#define NFC_HCI_DO_NOT_CREATE_PIPE 0x81
#define NFC_HCI_LINK_MGMT_PIPE 0x00
#define NFC_HCI_ADMIN_PIPE 0x01
diff --git a/include/net/nfc/nci.h b/include/net/nfc/nci.h
index fbfa4e471abb..e7257a4653b4 100644
--- a/include/net/nfc/nci.h
+++ b/include/net/nfc/nci.h
@@ -2,6 +2,7 @@
* The NFC Controller Interface is the communication protocol between an
* NFC Controller (NFCC) and a Device Host (DH).
*
+ * Copyright (C) 2014 Marvell International Ltd.
* Copyright (C) 2011 Texas Instruments, Inc.
*
* Written by Ilan Elias <ilane@ti.com>
@@ -27,6 +28,8 @@
#ifndef __NCI_H
#define __NCI_H
+#include <net/nfc/nfc.h>
+
/* NCI constants */
#define NCI_MAX_NUM_MAPPING_CONFIGS 10
#define NCI_MAX_NUM_RF_CONFIGS 10
@@ -65,19 +68,20 @@
#define NCI_NFC_F_PASSIVE_POLL_MODE 0x02
#define NCI_NFC_A_ACTIVE_POLL_MODE 0x03
#define NCI_NFC_F_ACTIVE_POLL_MODE 0x05
-#define NCI_NFC_15693_PASSIVE_POLL_MODE 0x06
+#define NCI_NFC_V_PASSIVE_POLL_MODE 0x06
#define NCI_NFC_A_PASSIVE_LISTEN_MODE 0x80
#define NCI_NFC_B_PASSIVE_LISTEN_MODE 0x81
#define NCI_NFC_F_PASSIVE_LISTEN_MODE 0x82
#define NCI_NFC_A_ACTIVE_LISTEN_MODE 0x83
#define NCI_NFC_F_ACTIVE_LISTEN_MODE 0x85
-#define NCI_NFC_15693_PASSIVE_LISTEN_MODE 0x86
+
+#define NCI_RF_TECH_MODE_LISTEN_MASK 0x80
/* NCI RF Technologies */
#define NCI_NFC_RF_TECHNOLOGY_A 0x00
#define NCI_NFC_RF_TECHNOLOGY_B 0x01
#define NCI_NFC_RF_TECHNOLOGY_F 0x02
-#define NCI_NFC_RF_TECHNOLOGY_15693 0x03
+#define NCI_NFC_RF_TECHNOLOGY_V 0x03
/* NCI Bit Rates */
#define NCI_NFC_BIT_RATE_106 0x00
@@ -87,6 +91,7 @@
#define NCI_NFC_BIT_RATE_1695 0x04
#define NCI_NFC_BIT_RATE_3390 0x05
#define NCI_NFC_BIT_RATE_6780 0x06
+#define NCI_NFC_BIT_RATE_26 0x20
/* NCI RF Protocols */
#define NCI_RF_PROTOCOL_UNKNOWN 0x00
@@ -95,6 +100,7 @@
#define NCI_RF_PROTOCOL_T3T 0x03
#define NCI_RF_PROTOCOL_ISO_DEP 0x04
#define NCI_RF_PROTOCOL_NFC_DEP 0x05
+#define NCI_RF_PROTOCOL_T5T 0x06
/* NCI RF Interfaces */
#define NCI_RF_INTERFACE_NFCEE_DIRECT 0x00
@@ -104,6 +110,17 @@
/* NCI Configuration Parameter Tags */
#define NCI_PN_ATR_REQ_GEN_BYTES 0x29
+#define NCI_LN_ATR_RES_GEN_BYTES 0x61
+#define NCI_LA_SEL_INFO 0x32
+#define NCI_LF_PROTOCOL_TYPE 0x50
+#define NCI_LF_CON_BITR_F 0x54
+
+/* NCI Configuration Parameters masks */
+#define NCI_LA_SEL_INFO_ISO_DEP_MASK 0x20
+#define NCI_LA_SEL_INFO_NFC_DEP_MASK 0x40
+#define NCI_LF_PROTOCOL_TYPE_NFC_DEP_MASK 0x02
+#define NCI_LF_CON_BITR_F_212 0x02
+#define NCI_LF_CON_BITR_F_424 0x04
/* NCI Reset types */
#define NCI_RESET_TYPE_KEEP_CONFIG 0x00
@@ -312,20 +329,31 @@ struct nci_core_intf_error_ntf {
struct rf_tech_specific_params_nfca_poll {
__u16 sens_res;
__u8 nfcid1_len; /* 0, 4, 7, or 10 Bytes */
- __u8 nfcid1[10];
+ __u8 nfcid1[NFC_NFCID1_MAXSIZE];
__u8 sel_res_len; /* 0 or 1 Bytes */
__u8 sel_res;
} __packed;
struct rf_tech_specific_params_nfcb_poll {
__u8 sensb_res_len;
- __u8 sensb_res[12]; /* 11 or 12 Bytes */
+ __u8 sensb_res[NFC_SENSB_RES_MAXSIZE]; /* 11 or 12 Bytes */
} __packed;
struct rf_tech_specific_params_nfcf_poll {
__u8 bit_rate;
__u8 sensf_res_len;
- __u8 sensf_res[18]; /* 16 or 18 Bytes */
+ __u8 sensf_res[NFC_SENSF_RES_MAXSIZE]; /* 16 or 18 Bytes */
+} __packed;
+
+struct rf_tech_specific_params_nfcv_poll {
+ __u8 res_flags;
+ __u8 dsfid;
+ __u8 uid[NFC_ISO15693_UID_MAXSIZE]; /* 8 Bytes */
+} __packed;
+
+struct rf_tech_specific_params_nfcf_listen {
+ __u8 local_nfcid2_len;
+ __u8 local_nfcid2[NFC_NFCID2_MAXSIZE]; /* 0 or 8 Bytes */
} __packed;
struct nci_rf_discover_ntf {
@@ -338,6 +366,7 @@ struct nci_rf_discover_ntf {
struct rf_tech_specific_params_nfca_poll nfca_poll;
struct rf_tech_specific_params_nfcb_poll nfcb_poll;
struct rf_tech_specific_params_nfcf_poll nfcf_poll;
+ struct rf_tech_specific_params_nfcv_poll nfcv_poll;
} rf_tech_specific_params;
__u8 ntf_type;
@@ -356,7 +385,12 @@ struct activation_params_nfcb_poll_iso_dep {
struct activation_params_poll_nfc_dep {
__u8 atr_res_len;
- __u8 atr_res[63];
+ __u8 atr_res[NFC_ATR_RES_MAXSIZE - 2]; /* ATR_RES from byte 3 */
+};
+
+struct activation_params_listen_nfc_dep {
+ __u8 atr_req_len;
+ __u8 atr_req[NFC_ATR_REQ_MAXSIZE - 2]; /* ATR_REQ from byte 3 */
};
struct nci_rf_intf_activated_ntf {
@@ -372,6 +406,8 @@ struct nci_rf_intf_activated_ntf {
struct rf_tech_specific_params_nfca_poll nfca_poll;
struct rf_tech_specific_params_nfcb_poll nfcb_poll;
struct rf_tech_specific_params_nfcf_poll nfcf_poll;
+ struct rf_tech_specific_params_nfcv_poll nfcv_poll;
+ struct rf_tech_specific_params_nfcf_listen nfcf_listen;
} rf_tech_specific_params;
__u8 data_exch_rf_tech_and_mode;
@@ -383,6 +419,7 @@ struct nci_rf_intf_activated_ntf {
struct activation_params_nfca_poll_iso_dep nfca_poll_iso_dep;
struct activation_params_nfcb_poll_iso_dep nfcb_poll_iso_dep;
struct activation_params_poll_nfc_dep poll_nfc_dep;
+ struct activation_params_listen_nfc_dep listen_nfc_dep;
} activation_params;
} __packed;
diff --git a/include/net/nfc/nci_core.h b/include/net/nfc/nci_core.h
index 1f9a0f5272fe..9e51bb4d841e 100644
--- a/include/net/nfc/nci_core.h
+++ b/include/net/nfc/nci_core.h
@@ -4,6 +4,7 @@
*
* Copyright (C) 2011 Texas Instruments, Inc.
* Copyright (C) 2013 Intel Corporation. All rights reserved.
+ * Copyright (C) 2014 Marvell International Ltd.
*
* Written by Ilan Elias <ilane@ti.com>
*
@@ -49,6 +50,8 @@ enum nci_state {
NCI_W4_ALL_DISCOVERIES,
NCI_W4_HOST_SELECT,
NCI_POLL_ACTIVE,
+ NCI_LISTEN_ACTIVE,
+ NCI_LISTEN_SLEEP,
};
/* NCI timeouts */
@@ -64,10 +67,17 @@ enum nci_state {
struct nci_dev;
struct nci_ops {
- int (*open)(struct nci_dev *ndev);
- int (*close)(struct nci_dev *ndev);
- int (*send)(struct nci_dev *ndev, struct sk_buff *skb);
- int (*setup)(struct nci_dev *ndev);
+ int (*open)(struct nci_dev *ndev);
+ int (*close)(struct nci_dev *ndev);
+ int (*send)(struct nci_dev *ndev, struct sk_buff *skb);
+ int (*setup)(struct nci_dev *ndev);
+ __u32 (*get_rfprotocol)(struct nci_dev *ndev, __u8 rf_protocol);
+ int (*discover_se)(struct nci_dev *ndev);
+ int (*disable_se)(struct nci_dev *ndev, u32 se_idx);
+ int (*enable_se)(struct nci_dev *ndev, u32 se_idx);
+ int (*se_io)(struct nci_dev *ndev, u32 se_idx,
+ u8 *apdu, size_t apdu_length,
+ se_io_cb_t cb, void *cb_context);
};
#define NCI_MAX_SUPPORTED_RF_INTERFACES 4
diff --git a/include/net/nfc/nfc.h b/include/net/nfc/nfc.h
index 6c583e244de2..12adb817c27a 100644
--- a/include/net/nfc/nfc.h
+++ b/include/net/nfc/nfc.h
@@ -1,5 +1,6 @@
/*
* Copyright (C) 2011 Instituto Nokia de Tecnologia
+ * Copyright (C) 2014 Marvell International Ltd.
*
* Authors:
* Lauro Ramos Venancio <lauro.venancio@openbossa.org>
@@ -87,6 +88,7 @@ struct nfc_ops {
#define NFC_TARGET_IDX_ANY -1
#define NFC_MAX_GT_LEN 48
#define NFC_ATR_RES_GT_OFFSET 15
+#define NFC_ATR_REQ_GT_OFFSET 14
/**
* struct nfc_target - NFC target descriptiom
diff --git a/include/net/nl802154.h b/include/net/nl802154.h
index b23548e04098..6dbd406ca41b 100644
--- a/include/net/nl802154.h
+++ b/include/net/nl802154.h
@@ -1,126 +1,122 @@
+#ifndef __NL802154_H
+#define __NL802154_H
/*
- * nl802154.h
+ * 802.15.4 netlink interface public header
*
- * Copyright (C) 2007, 2008, 2009 Siemens AG
+ * Copyright 2014 Alexander Aring <aar@pengutronix.de>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation.
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
*
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*
*/
-#ifndef IEEE802154_NL_H
-#define IEEE802154_NL_H
+#define NL802154_GENL_NAME "nl802154"
-struct net_device;
-struct ieee802154_addr;
+enum nl802154_commands {
+/* don't change the order or add anything between, this is ABI! */
+/* currently we don't shipping this file via uapi, ignore the above one */
+ NL802154_CMD_UNSPEC,
-/**
- * ieee802154_nl_assoc_indic - Notify userland of an association request.
- * @dev: The network device on which this association request was
- * received.
- * @addr: The address of the device requesting association.
- * @cap: The capability information field from the device.
- *
- * This informs a userland coordinator of a device requesting to
- * associate with the PAN controlled by the coordinator.
- *
- * Note: This is in section 7.3.1 of the IEEE 802.15.4-2006 document.
- */
-int ieee802154_nl_assoc_indic(struct net_device *dev,
- struct ieee802154_addr *addr, u8 cap);
-
-/**
- * ieee802154_nl_assoc_confirm - Notify userland of association.
- * @dev: The device which has completed association.
- * @short_addr: The short address assigned to the device.
- * @status: The status of the association.
- *
- * Inform userland of the result of an association request. If the
- * association request included asking the coordinator to allocate
- * a short address then it is returned in @short_addr.
- *
- * Note: This is in section 7.3.2 of the IEEE 802.15.4 document.
- */
-int ieee802154_nl_assoc_confirm(struct net_device *dev,
- __le16 short_addr, u8 status);
-
-/**
- * ieee802154_nl_disassoc_indic - Notify userland of disassociation.
- * @dev: The device on which disassociation was indicated.
- * @addr: The device which is disassociating.
- * @reason: The reason for the disassociation.
- *
- * Inform userland that a device has disassociated from the network.
- *
- * Note: This is in section 7.3.3 of the IEEE 802.15.4 document.
- */
-int ieee802154_nl_disassoc_indic(struct net_device *dev,
- struct ieee802154_addr *addr, u8 reason);
-
-/**
- * ieee802154_nl_disassoc_confirm - Notify userland of disassociation
- * completion.
- * @dev: The device on which disassociation was ordered.
- * @status: The result of the disassociation.
- *
- * Inform userland of the result of requesting that a device
- * disassociate, or the result of requesting that we disassociate from
- * a PAN managed by another coordinator.
- *
- * Note: This is in section 7.1.4.3 of the IEEE 802.15.4 document.
- */
-int ieee802154_nl_disassoc_confirm(struct net_device *dev,
- u8 status);
-
-/**
- * ieee802154_nl_scan_confirm - Notify userland of completion of scan.
- * @dev: The device which was instructed to scan.
- * @status: The status of the scan operation.
- * @scan_type: What type of scan was performed.
- * @unscanned: Any channels that the device was unable to scan.
- * @edl: The energy levels (if a passive scan).
- *
- *
- * Note: This is in section 7.1.11 of the IEEE 802.15.4 document.
- * Note: This API does not permit the return of an active scan result.
- */
-int ieee802154_nl_scan_confirm(struct net_device *dev,
- u8 status, u8 scan_type, u32 unscanned, u8 page,
- u8 *edl/*, struct list_head *pan_desc_list */);
-
-/**
- * ieee802154_nl_beacon_indic - Notify userland of a received beacon.
- * @dev: The device on which a beacon was received.
- * @panid: The PAN of the coordinator.
- * @coord_addr: The short address of the coordinator on that PAN.
- *
- * Note: This is in section 7.1.5 of the IEEE 802.15.4 document.
- * Note: This API does not provide extended information such as what
- * channel the PAN is on or what the LQI of the beacon frame was on
- * receipt.
- * Note: This API cannot indicate a beacon frame for a coordinator
- * operating in long addressing mode.
- */
-int ieee802154_nl_beacon_indic(struct net_device *dev, __le16 panid,
- __le16 coord_addr);
+ NL802154_CMD_GET_WPAN_PHY, /* can dump */
+ NL802154_CMD_SET_WPAN_PHY,
+ NL802154_CMD_NEW_WPAN_PHY,
+ NL802154_CMD_DEL_WPAN_PHY,
-/**
- * ieee802154_nl_start_confirm - Notify userland of completion of start.
- * @dev: The device which was instructed to scan.
- * @status: The status of the scan operation.
- *
- * Note: This is in section 7.1.14 of the IEEE 802.15.4 document.
- */
-int ieee802154_nl_start_confirm(struct net_device *dev, u8 status);
+ NL802154_CMD_GET_INTERFACE, /* can dump */
+ NL802154_CMD_SET_INTERFACE,
+ NL802154_CMD_NEW_INTERFACE,
+ NL802154_CMD_DEL_INTERFACE,
+
+ NL802154_CMD_SET_CHANNEL,
+
+ NL802154_CMD_SET_PAN_ID,
+ NL802154_CMD_SET_SHORT_ADDR,
+
+ NL802154_CMD_SET_TX_POWER,
+ NL802154_CMD_SET_CCA_MODE,
+ NL802154_CMD_SET_CCA_ED_LEVEL,
+
+ NL802154_CMD_SET_MAX_FRAME_RETRIES,
+
+ NL802154_CMD_SET_BACKOFF_EXPONENT,
+ NL802154_CMD_SET_MAX_CSMA_BACKOFFS,
+
+ NL802154_CMD_SET_LBT_MODE,
+
+ /* add new commands above here */
+
+ /* used to define NL802154_CMD_MAX below */
+ __NL802154_CMD_AFTER_LAST,
+ NL802154_CMD_MAX = __NL802154_CMD_AFTER_LAST - 1
+};
+
+enum nl802154_attrs {
+/* don't change the order or add anything between, this is ABI! */
+/* currently we don't shipping this file via uapi, ignore the above one */
+ NL802154_ATTR_UNSPEC,
+
+ NL802154_ATTR_WPAN_PHY,
+ NL802154_ATTR_WPAN_PHY_NAME,
+
+ NL802154_ATTR_IFINDEX,
+ NL802154_ATTR_IFNAME,
+ NL802154_ATTR_IFTYPE,
+
+ NL802154_ATTR_WPAN_DEV,
+
+ NL802154_ATTR_PAGE,
+ NL802154_ATTR_CHANNEL,
+
+ NL802154_ATTR_PAN_ID,
+ NL802154_ATTR_SHORT_ADDR,
+
+ NL802154_ATTR_TX_POWER,
+
+ NL802154_ATTR_CCA_MODE,
+ NL802154_ATTR_CCA_MODE3_AND,
+ NL802154_ATTR_CCA_ED_LEVEL,
+
+ NL802154_ATTR_MAX_FRAME_RETRIES,
+
+ NL802154_ATTR_MAX_BE,
+ NL802154_ATTR_MIN_BE,
+ NL802154_ATTR_MAX_CSMA_BACKOFFS,
+
+ NL802154_ATTR_LBT_MODE,
+
+ NL802154_ATTR_GENERATION,
+
+ NL802154_ATTR_CHANNELS_SUPPORTED,
+ NL802154_ATTR_SUPPORTED_CHANNEL,
+
+ NL802154_ATTR_EXTENDED_ADDR,
+
+ /* add attributes here, update the policy in nl802154.c */
+
+ __NL802154_ATTR_AFTER_LAST,
+ NL802154_ATTR_MAX = __NL802154_ATTR_AFTER_LAST - 1
+};
+
+enum nl802154_iftype {
+ /* for backwards compatibility TODO */
+ NL802154_IFTYPE_UNSPEC = -1,
+
+ NL802154_IFTYPE_NODE,
+ NL802154_IFTYPE_MONITOR,
+ NL802154_IFTYPE_COORD,
+
+ /* keep last */
+ NUM_NL802154_IFTYPES,
+ NL802154_IFTYPE_MAX = NUM_NL802154_IFTYPES - 1
+};
-#endif
+#endif /* __NL802154_H */
diff --git a/include/net/ping.h b/include/net/ping.h
index 026479b61a2d..f074060bc5de 100644
--- a/include/net/ping.h
+++ b/include/net/ping.h
@@ -82,7 +82,7 @@ int ping_common_sendmsg(int family, struct msghdr *msg, size_t len,
int ping_v6_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
size_t len);
int ping_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
-void ping_rcv(struct sk_buff *skb);
+bool ping_rcv(struct sk_buff *skb);
#ifdef CONFIG_PROC_FS
struct ping_seq_afinfo {
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
index 6da46dcf1049..bc49967e1a68 100644
--- a/include/net/pkt_cls.h
+++ b/include/net/pkt_cls.h
@@ -20,11 +20,7 @@ int unregister_tcf_proto_ops(struct tcf_proto_ops *ops);
static inline unsigned long
__cls_set_class(unsigned long *clp, unsigned long cl)
{
- unsigned long old_cl;
-
- old_cl = *clp;
- *clp = cl;
- return old_cl;
+ return xchg(clp, cl);
}
static inline unsigned long
@@ -137,7 +133,7 @@ tcf_exts_exec(struct sk_buff *skb, struct tcf_exts *exts,
int tcf_exts_validate(struct net *net, struct tcf_proto *tp,
struct nlattr **tb, struct nlattr *rate_tlv,
struct tcf_exts *exts, bool ovr);
-void tcf_exts_destroy(struct tcf_proto *tp, struct tcf_exts *exts);
+void tcf_exts_destroy(struct tcf_exts *exts);
void tcf_exts_change(struct tcf_proto *tp, struct tcf_exts *dst,
struct tcf_exts *src);
int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts);
@@ -170,6 +166,7 @@ struct tcf_ematch {
unsigned int datalen;
u16 matchid;
u16 flags;
+ struct net *net;
};
static inline int tcf_em_is_container(struct tcf_ematch *em)
@@ -233,12 +230,11 @@ struct tcf_ematch_tree {
struct tcf_ematch_ops {
int kind;
int datalen;
- int (*change)(struct tcf_proto *, void *,
+ int (*change)(struct net *net, void *,
int, struct tcf_ematch *);
int (*match)(struct sk_buff *, struct tcf_ematch *,
struct tcf_pkt_info *);
- void (*destroy)(struct tcf_proto *,
- struct tcf_ematch *);
+ void (*destroy)(struct tcf_ematch *);
int (*dump)(struct sk_buff *, struct tcf_ematch *);
struct module *owner;
struct list_head link;
@@ -248,7 +244,7 @@ int tcf_em_register(struct tcf_ematch_ops *);
void tcf_em_unregister(struct tcf_ematch_ops *);
int tcf_em_tree_validate(struct tcf_proto *, struct nlattr *,
struct tcf_ematch_tree *);
-void tcf_em_tree_destroy(struct tcf_proto *, struct tcf_ematch_tree *);
+void tcf_em_tree_destroy(struct tcf_ematch_tree *);
int tcf_em_tree_dump(struct sk_buff *, struct tcf_ematch_tree *, int);
int __tcf_em_tree_match(struct sk_buff *, struct tcf_ematch_tree *,
struct tcf_pkt_info *);
@@ -305,7 +301,7 @@ struct tcf_ematch_tree {
};
#define tcf_em_tree_validate(tp, tb, t) ((void)(t), 0)
-#define tcf_em_tree_destroy(tp, t) do { (void)(t); } while(0)
+#define tcf_em_tree_destroy(t) do { (void)(t); } while(0)
#define tcf_em_tree_dump(skb, t, tlv) (0)
#define tcf_em_tree_change(tp, dst, src) do { } while(0)
#define tcf_em_tree_match(skb, t, info) ((void)(info), 1)
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
index ec030cd76616..27a33833ff4a 100644
--- a/include/net/pkt_sched.h
+++ b/include/net/pkt_sched.h
@@ -50,7 +50,7 @@ typedef long psched_tdiff_t;
static inline psched_time_t psched_get_time(void)
{
- return PSCHED_NS2TICKS(ktime_to_ns(ktime_get()));
+ return PSCHED_NS2TICKS(ktime_get_ns());
}
static inline psched_tdiff_t
@@ -65,12 +65,12 @@ struct qdisc_watchdog {
};
void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc);
-void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, u64 expires);
+void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, u64 expires, bool throttle);
static inline void qdisc_watchdog_schedule(struct qdisc_watchdog *wd,
psched_time_t expires)
{
- qdisc_watchdog_schedule_ns(wd, PSCHED_TICKS2NS(expires));
+ qdisc_watchdog_schedule_ns(wd, PSCHED_TICKS2NS(expires), true);
}
void qdisc_watchdog_cancel(struct qdisc_watchdog *wd);
@@ -99,7 +99,7 @@ void qdisc_put_stab(struct qdisc_size_table *tab);
void qdisc_warn_nonwc(const char *txt, struct Qdisc *qdisc);
int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
struct net_device *dev, struct netdev_queue *txq,
- spinlock_t *root_lock);
+ spinlock_t *root_lock, bool validate);
void __qdisc_run(struct Qdisc *q);
diff --git a/include/net/regulatory.h b/include/net/regulatory.h
index dad7ab20a8cb..b776d72d84be 100644
--- a/include/net/regulatory.h
+++ b/include/net/regulatory.h
@@ -136,6 +136,17 @@ struct regulatory_request {
* otherwise initiating radiation is not allowed. This will enable the
* relaxations enabled under the CFG80211_REG_RELAX_NO_IR configuration
* option
+ * @REGULATORY_IGNORE_STALE_KICKOFF: the regulatory core will _not_ make sure
+ * all interfaces on this wiphy reside on allowed channels. If this flag
+ * is not set, upon a regdomain change, the interfaces are given a grace
+ * period (currently 60 seconds) to disconnect or move to an allowed
+ * channel. Interfaces on forbidden channels are forcibly disconnected.
+ * Currently these types of interfaces are supported for enforcement:
+ * NL80211_IFTYPE_ADHOC, NL80211_IFTYPE_STATION, NL80211_IFTYPE_AP,
+ * NL80211_IFTYPE_AP_VLAN, NL80211_IFTYPE_MONITOR,
+ * NL80211_IFTYPE_P2P_CLIENT, NL80211_IFTYPE_P2P_GO,
+ * NL80211_IFTYPE_P2P_DEVICE. The flag will be set by default if a device
+ * includes any modes unsupported for enforcement checking.
*/
enum ieee80211_regulatory_flags {
REGULATORY_CUSTOM_REG = BIT(0),
@@ -144,6 +155,7 @@ enum ieee80211_regulatory_flags {
REGULATORY_COUNTRY_IE_FOLLOW_POWER = BIT(3),
REGULATORY_COUNTRY_IE_IGNORE = BIT(4),
REGULATORY_ENABLE_RELAX_NO_IR = BIT(5),
+ REGULATORY_IGNORE_STALE_KICKOFF = BIT(6),
};
struct ieee80211_freq_range {
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 620e086c0cbe..3d282cbb66bf 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -6,6 +6,8 @@
#include <linux/rcupdate.h>
#include <linux/pkt_sched.h>
#include <linux/pkt_cls.h>
+#include <linux/percpu.h>
+#include <linux/dynamic_queue_limits.h>
#include <net/gen_stats.h>
#include <net/rtnetlink.h>
@@ -58,6 +60,7 @@ struct Qdisc {
* multiqueue device.
*/
#define TCQ_F_WARN_NONWC (1 << 16)
+#define TCQ_F_CPUSTATS 0x20 /* run using percpu statistics */
u32 limit;
const struct Qdisc_ops *ops;
struct qdisc_size_table __rcu *stab;
@@ -83,9 +86,15 @@ struct Qdisc {
*/
unsigned long state;
struct sk_buff_head q;
- struct gnet_stats_basic_packed bstats;
+ union {
+ struct gnet_stats_basic_packed bstats;
+ struct gnet_stats_basic_cpu __percpu *cpu_bstats;
+ } __packed;
unsigned int __state;
- struct gnet_stats_queue qstats;
+ union {
+ struct gnet_stats_queue qstats;
+ struct gnet_stats_queue __percpu *cpu_qstats;
+ } __packed;
struct rcu_head rcu_head;
int padded;
atomic_t refcnt;
@@ -111,6 +120,21 @@ static inline void qdisc_run_end(struct Qdisc *qdisc)
qdisc->__state &= ~__QDISC___STATE_RUNNING;
}
+static inline bool qdisc_may_bulk(const struct Qdisc *qdisc)
+{
+ return qdisc->flags & TCQ_F_ONETXQUEUE;
+}
+
+static inline int qdisc_avail_bulklimit(const struct netdev_queue *txq)
+{
+#ifdef CONFIG_BQL
+ /* Non-BQL migrated drivers will return 0, too. */
+ return dql_avail(&txq->dql);
+#else
+ return 0;
+#endif
+}
+
static inline bool qdisc_is_throttled(const struct Qdisc *qdisc)
{
return test_bit(__QDISC_STATE_THROTTLED, &qdisc->state) ? true : false;
@@ -143,7 +167,7 @@ struct Qdisc_class_ops {
void (*walk)(struct Qdisc *, struct qdisc_walker * arg);
/* Filter manipulation */
- struct tcf_proto ** (*tcf_chain)(struct Qdisc *, unsigned long);
+ struct tcf_proto __rcu ** (*tcf_chain)(struct Qdisc *, unsigned long);
unsigned long (*bind_tcf)(struct Qdisc *, unsigned long,
u32 classid);
void (*unbind_tcf)(struct Qdisc *, unsigned long);
@@ -195,7 +219,6 @@ struct tcf_proto_ops {
void (*destroy)(struct tcf_proto*);
unsigned long (*get)(struct tcf_proto*, u32 handle);
- void (*put)(struct tcf_proto*, unsigned long);
int (*change)(struct net *net, struct sk_buff *,
struct tcf_proto*, unsigned long,
u32 handle, struct nlattr **,
@@ -212,8 +235,8 @@ struct tcf_proto_ops {
struct tcf_proto {
/* Fast access part */
- struct tcf_proto *next;
- void *root;
+ struct tcf_proto __rcu *next;
+ void __rcu *root;
int (*classify)(struct sk_buff *,
const struct tcf_proto *,
struct tcf_result *);
@@ -225,6 +248,7 @@ struct tcf_proto {
struct Qdisc *q;
void *data;
const struct tcf_proto_ops *ops;
+ struct rcu_head rcu;
};
struct qdisc_skb_cb {
@@ -260,7 +284,9 @@ static inline spinlock_t *qdisc_lock(struct Qdisc *qdisc)
static inline struct Qdisc *qdisc_root(const struct Qdisc *qdisc)
{
- return qdisc->dev_queue->qdisc;
+ struct Qdisc *q = rcu_dereference_rtnl(qdisc->dev_queue->qdisc);
+
+ return q;
}
static inline struct Qdisc *qdisc_root_sleeping(const struct Qdisc *qdisc)
@@ -377,7 +403,7 @@ struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
void __qdisc_calculate_pkt_len(struct sk_buff *skb,
const struct qdisc_size_table *stab);
void tcf_destroy(struct tcf_proto *tp);
-void tcf_destroy_chain(struct tcf_proto **fl);
+void tcf_destroy_chain(struct tcf_proto __rcu **fl);
/* Reset all TX qdiscs greater then index of a device. */
static inline void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i)
@@ -385,7 +411,7 @@ static inline void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i)
struct Qdisc *qdisc;
for (; i < dev->num_tx_queues; i++) {
- qdisc = netdev_get_tx_queue(dev, i)->qdisc;
+ qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc);
if (qdisc) {
spin_lock_bh(qdisc_lock(qdisc));
qdisc_reset(qdisc);
@@ -403,13 +429,18 @@ static inline void qdisc_reset_all_tx(struct net_device *dev)
static inline bool qdisc_all_tx_empty(const struct net_device *dev)
{
unsigned int i;
+
+ rcu_read_lock();
for (i = 0; i < dev->num_tx_queues; i++) {
struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
- const struct Qdisc *q = txq->qdisc;
+ const struct Qdisc *q = rcu_dereference(txq->qdisc);
- if (q->q.qlen)
+ if (q->q.qlen) {
+ rcu_read_unlock();
return false;
+ }
}
+ rcu_read_unlock();
return true;
}
@@ -417,9 +448,10 @@ static inline bool qdisc_all_tx_empty(const struct net_device *dev)
static inline bool qdisc_tx_changing(const struct net_device *dev)
{
unsigned int i;
+
for (i = 0; i < dev->num_tx_queues; i++) {
struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
- if (txq->qdisc != txq->qdisc_sleeping)
+ if (rcu_access_pointer(txq->qdisc) != txq->qdisc_sleeping)
return true;
}
return false;
@@ -429,9 +461,10 @@ static inline bool qdisc_tx_changing(const struct net_device *dev)
static inline bool qdisc_tx_is_noop(const struct net_device *dev)
{
unsigned int i;
+
for (i = 0; i < dev->num_tx_queues; i++) {
struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
- if (txq->qdisc != &noop_qdisc)
+ if (rcu_access_pointer(txq->qdisc) != &noop_qdisc)
return false;
}
return true;
@@ -477,6 +510,10 @@ static inline int qdisc_enqueue_root(struct sk_buff *skb, struct Qdisc *sch)
return qdisc_enqueue(skb, sch) & NET_XMIT_MASK;
}
+static inline bool qdisc_is_percpu_stats(const struct Qdisc *q)
+{
+ return q->flags & TCQ_F_CPUSTATS;
+}
static inline void bstats_update(struct gnet_stats_basic_packed *bstats,
const struct sk_buff *skb)
@@ -485,17 +522,62 @@ static inline void bstats_update(struct gnet_stats_basic_packed *bstats,
bstats->packets += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
}
+static inline void qdisc_bstats_update_cpu(struct Qdisc *sch,
+ const struct sk_buff *skb)
+{
+ struct gnet_stats_basic_cpu *bstats =
+ this_cpu_ptr(sch->cpu_bstats);
+
+ u64_stats_update_begin(&bstats->syncp);
+ bstats_update(&bstats->bstats, skb);
+ u64_stats_update_end(&bstats->syncp);
+}
+
static inline void qdisc_bstats_update(struct Qdisc *sch,
const struct sk_buff *skb)
{
bstats_update(&sch->bstats, skb);
}
+static inline void qdisc_qstats_backlog_dec(struct Qdisc *sch,
+ const struct sk_buff *skb)
+{
+ sch->qstats.backlog -= qdisc_pkt_len(skb);
+}
+
+static inline void qdisc_qstats_backlog_inc(struct Qdisc *sch,
+ const struct sk_buff *skb)
+{
+ sch->qstats.backlog += qdisc_pkt_len(skb);
+}
+
+static inline void __qdisc_qstats_drop(struct Qdisc *sch, int count)
+{
+ sch->qstats.drops += count;
+}
+
+static inline void qdisc_qstats_drop(struct Qdisc *sch)
+{
+ sch->qstats.drops++;
+}
+
+static inline void qdisc_qstats_drop_cpu(struct Qdisc *sch)
+{
+ struct gnet_stats_queue *qstats = this_cpu_ptr(sch->cpu_qstats);
+
+ qstats->drops++;
+}
+
+static inline void qdisc_qstats_overlimit(struct Qdisc *sch)
+{
+ sch->qstats.overlimits++;
+}
+
static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff_head *list)
{
__skb_queue_tail(list, skb);
- sch->qstats.backlog += qdisc_pkt_len(skb);
+ qdisc_qstats_backlog_inc(sch, skb);
return NET_XMIT_SUCCESS;
}
@@ -511,7 +593,7 @@ static inline struct sk_buff *__qdisc_dequeue_head(struct Qdisc *sch,
struct sk_buff *skb = __skb_dequeue(list);
if (likely(skb != NULL)) {
- sch->qstats.backlog -= qdisc_pkt_len(skb);
+ qdisc_qstats_backlog_dec(sch, skb);
qdisc_bstats_update(sch, skb);
}
@@ -530,7 +612,7 @@ static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch,
if (likely(skb != NULL)) {
unsigned int len = qdisc_pkt_len(skb);
- sch->qstats.backlog -= len;
+ qdisc_qstats_backlog_dec(sch, skb);
kfree_skb(skb);
return len;
}
@@ -549,7 +631,7 @@ static inline struct sk_buff *__qdisc_dequeue_tail(struct Qdisc *sch,
struct sk_buff *skb = __skb_dequeue_tail(list);
if (likely(skb != NULL))
- sch->qstats.backlog -= qdisc_pkt_len(skb);
+ qdisc_qstats_backlog_dec(sch, skb);
return skb;
}
@@ -631,14 +713,14 @@ static inline unsigned int qdisc_queue_drop(struct Qdisc *sch)
static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch)
{
kfree_skb(skb);
- sch->qstats.drops++;
+ qdisc_qstats_drop(sch);
return NET_XMIT_DROP;
}
static inline int qdisc_reshape_fail(struct sk_buff *skb, struct Qdisc *sch)
{
- sch->qstats.drops++;
+ qdisc_qstats_drop(sch);
#ifdef CONFIG_NET_CLS_ACT
if (sch->reshape_fail == NULL || sch->reshape_fail(skb, sch))
diff --git a/include/net/sctp/command.h b/include/net/sctp/command.h
index f22538e68245..d4a20d00461c 100644
--- a/include/net/sctp/command.h
+++ b/include/net/sctp/command.h
@@ -115,7 +115,7 @@ typedef enum {
* analysis of the state functions, but in reality just taken from
* thin air in the hopes othat we don't trigger a kernel panic.
*/
-#define SCTP_MAX_NUM_COMMANDS 14
+#define SCTP_MAX_NUM_COMMANDS 20
typedef union {
void *zero_all; /* Set to NULL to clear the entire union */
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index 9fbd856e6713..856f01cb51dd 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -426,6 +426,11 @@ static inline void sctp_assoc_pending_pmtu(struct sock *sk, struct sctp_associat
asoc->pmtu_pending = 0;
}
+static inline bool sctp_chunk_pending(const struct sctp_chunk *chunk)
+{
+ return !list_empty(&chunk->list);
+}
+
/* Walk through a list of TLV parameters. Don't trust the
* individual parameter lengths and instead depend on
* the chunk length to indicate when to stop. Make sure
diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
index 7f4eeb340a54..487ef34bbd63 100644
--- a/include/net/sctp/sm.h
+++ b/include/net/sctp/sm.h
@@ -219,7 +219,7 @@ struct sctp_chunk *sctp_make_abort_no_data(const struct sctp_association *,
const struct sctp_chunk *,
__u32 tsn);
struct sctp_chunk *sctp_make_abort_user(const struct sctp_association *,
- const struct msghdr *, size_t msg_len);
+ struct msghdr *, size_t msg_len);
struct sctp_chunk *sctp_make_abort_violation(const struct sctp_association *,
const struct sctp_chunk *,
const __u8 *,
@@ -248,9 +248,9 @@ struct sctp_chunk *sctp_make_asconf_update_ip(struct sctp_association *,
int, __be16);
struct sctp_chunk *sctp_make_asconf_set_prim(struct sctp_association *asoc,
union sctp_addr *addr);
-int sctp_verify_asconf(const struct sctp_association *asoc,
- struct sctp_paramhdr *param_hdr, void *chunk_end,
- struct sctp_paramhdr **errp);
+bool sctp_verify_asconf(const struct sctp_association *asoc,
+ struct sctp_chunk *chunk, bool addr_param_needed,
+ struct sctp_paramhdr **errp);
struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc,
struct sctp_chunk *asconf);
int sctp_process_asconf_ack(struct sctp_association *asoc,
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 4ff3f67be62c..2bb2fcf5b11f 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -531,7 +531,7 @@ struct sctp_datamsg {
struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *,
struct sctp_sndrcvinfo *,
- struct msghdr *, int len);
+ struct iov_iter *);
void sctp_datamsg_free(struct sctp_datamsg *);
void sctp_datamsg_put(struct sctp_datamsg *);
void sctp_chunk_fail(struct sctp_chunk *, int error);
@@ -647,8 +647,8 @@ struct sctp_chunk {
void sctp_chunk_hold(struct sctp_chunk *);
void sctp_chunk_put(struct sctp_chunk *);
-int sctp_user_addto_chunk(struct sctp_chunk *chunk, int off, int len,
- struct iovec *data);
+int sctp_user_addto_chunk(struct sctp_chunk *chunk, int len,
+ struct iov_iter *from);
void sctp_chunk_free(struct sctp_chunk *);
void *sctp_addto_chunk(struct sctp_chunk *, int len, const void *data);
struct sctp_chunk *sctp_chunkify(struct sk_buff *,
@@ -1116,7 +1116,6 @@ int sctp_raw_to_bind_addrs(struct sctp_bind_addr *bp, __u8 *raw, int len,
sctp_scope_t sctp_scope(const union sctp_addr *);
int sctp_in_scope(struct net *net, const union sctp_addr *addr, const sctp_scope_t scope);
int sctp_is_any(struct sock *sk, const union sctp_addr *addr);
-int sctp_addr_is_valid(const union sctp_addr *addr);
int sctp_is_ep_boundall(struct sock *sk);
diff --git a/include/net/snmp.h b/include/net/snmp.h
index f1f27fdbb0d5..35512ac6dcfb 100644
--- a/include/net/snmp.h
+++ b/include/net/snmp.h
@@ -146,19 +146,15 @@ struct linux_xfrm_mib {
#define SNMP_ADD_STATS(mib, field, addend) \
this_cpu_add(mib->mibs[field], addend)
-/*
- * Use "__typeof__(*mib) *ptr" instead of "__typeof__(mib) ptr"
- * to make @ptr a non-percpu pointer.
- */
#define SNMP_UPD_PO_STATS(mib, basefield, addend) \
do { \
- __typeof__(*mib->mibs) *ptr = mib->mibs; \
+ __typeof__((mib->mibs) + 0) ptr = mib->mibs; \
this_cpu_inc(ptr[basefield##PKTS]); \
this_cpu_add(ptr[basefield##OCTETS], addend); \
} while (0)
#define SNMP_UPD_PO_STATS_BH(mib, basefield, addend) \
do { \
- __typeof__(*mib->mibs) *ptr = mib->mibs; \
+ __typeof__((mib->mibs) + 0) ptr = mib->mibs; \
__this_cpu_inc(ptr[basefield##PKTS]); \
__this_cpu_add(ptr[basefield##OCTETS], addend); \
} while (0)
@@ -168,7 +164,7 @@ struct linux_xfrm_mib {
#define SNMP_ADD_STATS64_BH(mib, field, addend) \
do { \
- __typeof__(*mib) *ptr = __this_cpu_ptr(mib); \
+ __typeof__(*mib) *ptr = raw_cpu_ptr(mib); \
u64_stats_update_begin(&ptr->syncp); \
ptr->mibs[field] += addend; \
u64_stats_update_end(&ptr->syncp); \
@@ -189,8 +185,8 @@ struct linux_xfrm_mib {
#define SNMP_INC_STATS64(mib, field) SNMP_ADD_STATS64(mib, field, 1)
#define SNMP_UPD_PO_STATS64_BH(mib, basefield, addend) \
do { \
- __typeof__(*mib) *ptr; \
- ptr = __this_cpu_ptr(mib); \
+ __typeof__(*mib) *ptr; \
+ ptr = raw_cpu_ptr((mib)); \
u64_stats_update_begin(&ptr->syncp); \
ptr->mibs[basefield##PKTS]++; \
ptr->mibs[basefield##OCTETS] += addend; \
diff --git a/include/net/sock.h b/include/net/sock.h
index b9a5bd0ed9f3..2210fec65669 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -54,8 +54,8 @@
#include <linux/security.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
+#include <linux/page_counter.h>
#include <linux/memcontrol.h>
-#include <linux/res_counter.h>
#include <linux/static_key.h>
#include <linux/aio.h>
#include <linux/sched.h>
@@ -233,7 +233,6 @@ struct cg_proto;
* @sk_receive_queue: incoming packets
* @sk_wmem_alloc: transmit queue bytes committed
* @sk_write_queue: Packet sending queue
- * @sk_async_wait_queue: DMA copied packets
* @sk_omem_alloc: "o" is "option" or "other"
* @sk_wmem_queued: persistent queue size
* @sk_forward_alloc: space allocated forward
@@ -274,6 +273,7 @@ struct cg_proto;
* @sk_rcvtimeo: %SO_RCVTIMEO setting
* @sk_sndtimeo: %SO_SNDTIMEO setting
* @sk_rxhash: flow hash received from netif layer
+ * @sk_incoming_cpu: record cpu processing incoming packets
* @sk_txhash: computed flow hash for use on transmit
* @sk_filter: socket filtering instructions
* @sk_protinfo: private area, net family specific, when not using slab
@@ -351,6 +351,12 @@ struct sock {
#ifdef CONFIG_RPS
__u32 sk_rxhash;
#endif
+ u16 sk_incoming_cpu;
+ /* 16bit hole
+ * Warned : sk_incoming_cpu can be set from softirq,
+ * Do not use this hole without fully understanding possible issues.
+ */
+
__u32 sk_txhash;
#ifdef CONFIG_NET_RX_BUSY_POLL
unsigned int sk_napi_id;
@@ -362,10 +368,6 @@ struct sock {
struct sk_filter __rcu *sk_filter;
struct socket_wq __rcu *sk_wq;
-#ifdef CONFIG_NET_DMA
- struct sk_buff_head sk_async_wait_queue;
-#endif
-
#ifdef CONFIG_XFRM
struct xfrm_policy *sk_policy[2];
#endif
@@ -838,6 +840,11 @@ static inline int sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
return sk->sk_backlog_rcv(sk, skb);
}
+static inline void sk_incoming_cpu_update(struct sock *sk)
+{
+ sk->sk_incoming_cpu = raw_smp_processor_id();
+}
+
static inline void sock_rps_record_flow_hash(__u32 hash)
{
#ifdef CONFIG_RPS
@@ -902,6 +909,7 @@ static inline void sock_rps_reset_rxhash(struct sock *sk)
if (!__rc) { \
*(__timeo) = schedule_timeout(*(__timeo)); \
} \
+ sched_annotate_sleep(); \
lock_sock(__sk); \
__rc = __condition; \
__rc; \
@@ -1066,7 +1074,7 @@ enum cg_proto_flags {
};
struct cg_proto {
- struct res_counter memory_allocated; /* Current allocated memory. */
+ struct page_counter memory_allocated; /* Current allocated memory. */
struct percpu_counter sockets_allocated; /* Current number of sockets. */
int memory_pressure;
long sysctl_mem[3];
@@ -1218,34 +1226,26 @@ static inline void memcg_memory_allocated_add(struct cg_proto *prot,
unsigned long amt,
int *parent_status)
{
- struct res_counter *fail;
- int ret;
+ page_counter_charge(&prot->memory_allocated, amt);
- ret = res_counter_charge_nofail(&prot->memory_allocated,
- amt << PAGE_SHIFT, &fail);
- if (ret < 0)
+ if (page_counter_read(&prot->memory_allocated) >
+ prot->memory_allocated.limit)
*parent_status = OVER_LIMIT;
}
static inline void memcg_memory_allocated_sub(struct cg_proto *prot,
unsigned long amt)
{
- res_counter_uncharge(&prot->memory_allocated, amt << PAGE_SHIFT);
-}
-
-static inline u64 memcg_memory_allocated_read(struct cg_proto *prot)
-{
- u64 ret;
- ret = res_counter_read_u64(&prot->memory_allocated, RES_USAGE);
- return ret >> PAGE_SHIFT;
+ page_counter_uncharge(&prot->memory_allocated, amt);
}
static inline long
sk_memory_allocated(const struct sock *sk)
{
struct proto *prot = sk->sk_prot;
+
if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
- return memcg_memory_allocated_read(sk->sk_cgrp);
+ return page_counter_read(&sk->sk_cgrp->memory_allocated);
return atomic_long_read(prot->memory_allocated);
}
@@ -1259,7 +1259,7 @@ sk_memory_allocated_add(struct sock *sk, int amt, int *parent_status)
memcg_memory_allocated_add(sk->sk_cgrp, amt, parent_status);
/* update the root cgroup regardless */
atomic_long_add_return(amt, prot->memory_allocated);
- return memcg_memory_allocated_read(sk->sk_cgrp);
+ return page_counter_read(&sk->sk_cgrp->memory_allocated);
}
return atomic_long_add_return(amt, prot->memory_allocated);
@@ -1574,7 +1574,12 @@ struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
void sock_wfree(struct sk_buff *skb);
void skb_orphan_partial(struct sk_buff *skb);
void sock_rfree(struct sk_buff *skb);
+void sock_efree(struct sk_buff *skb);
+#ifdef CONFIG_INET
void sock_edemux(struct sk_buff *skb);
+#else
+#define sock_edemux(skb) sock_efree(skb)
+#endif
int sock_setsockopt(struct socket *sock, int level, int op,
char __user *optval, unsigned int optlen);
@@ -1588,6 +1593,7 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
int *errcode, int max_page_order);
void *sock_kmalloc(struct sock *sk, int size, gfp_t priority);
void sock_kfree_s(struct sock *sk, void *mem, int size);
+void sock_kzfree_s(struct sock *sk, void *mem, int size);
void sk_send_sigurg(struct sock *sk);
/*
@@ -1872,29 +1878,6 @@ static inline int skb_copy_to_page_nocache(struct sock *sk, char __user *from,
return 0;
}
-static inline int skb_copy_to_page(struct sock *sk, char __user *from,
- struct sk_buff *skb, struct page *page,
- int off, int copy)
-{
- if (skb->ip_summed == CHECKSUM_NONE) {
- int err = 0;
- __wsum csum = csum_and_copy_from_user(from,
- page_address(page) + off,
- copy, 0, &err);
- if (err)
- return err;
- skb->csum = csum_block_add(skb->csum, csum, skb->len);
- } else if (copy_from_user(page_address(page) + off, from, copy))
- return -EFAULT;
-
- skb->len += copy;
- skb->data_len += copy;
- skb->truesize += copy;
- sk->sk_wmem_queued += copy;
- sk_mem_charge(sk, copy);
- return 0;
-}
-
/**
* sk_wmem_alloc_get - returns write allocations
* @sk: socket
@@ -2041,6 +2024,7 @@ void sk_stop_timer(struct sock *sk, struct timer_list *timer);
int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb);
+struct sk_buff *sock_dequeue_err_skb(struct sock *sk);
/*
* Recover an error report and clear atomically
@@ -2193,6 +2177,8 @@ static inline void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
sk->sk_stamp = skb->tstamp;
}
+void __sock_tx_timestamp(const struct sock *sk, __u8 *tx_flags);
+
/**
* sock_tx_timestamp - checks whether the outgoing packet is to be time stamped
* @sk: socket sending this packet
@@ -2200,33 +2186,27 @@ static inline void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
*
* Note : callers should take care of initial *tx_flags value (usually 0)
*/
-void sock_tx_timestamp(const struct sock *sk, __u8 *tx_flags);
+static inline void sock_tx_timestamp(const struct sock *sk, __u8 *tx_flags)
+{
+ if (unlikely(sk->sk_tsflags))
+ __sock_tx_timestamp(sk, tx_flags);
+ if (unlikely(sock_flag(sk, SOCK_WIFI_STATUS)))
+ *tx_flags |= SKBTX_WIFI_STATUS;
+}
/**
* sk_eat_skb - Release a skb if it is no longer needed
* @sk: socket to eat this skb from
* @skb: socket buffer to eat
- * @copied_early: flag indicating whether DMA operations copied this data early
*
* This routine must be called with interrupts disabled or with the socket
* locked so that the sk_buff queue operation is ok.
*/
-#ifdef CONFIG_NET_DMA
-static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, bool copied_early)
-{
- __skb_unlink(skb, &sk->sk_receive_queue);
- if (!copied_early)
- __kfree_skb(skb);
- else
- __skb_queue_tail(&sk->sk_async_wait_queue, skb);
-}
-#else
-static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, bool copied_early)
+static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb)
{
__skb_unlink(skb, &sk->sk_receive_queue);
__kfree_skb(skb);
}
-#endif
static inline
struct net *sock_net(const struct sock *sk)
@@ -2279,16 +2259,6 @@ bool sk_ns_capable(const struct sock *sk,
bool sk_capable(const struct sock *sk, int cap);
bool sk_net_capable(const struct sock *sk, int cap);
-/*
- * Enable debug/info messages
- */
-extern int net_msg_warn;
-#define NETDEBUG(fmt, args...) \
- do { if (net_msg_warn) printk(fmt,##args); } while (0)
-
-#define LIMIT_NETDEBUG(fmt, args...) \
- do { if (net_msg_warn && net_ratelimit()) printk(fmt,##args); } while(0)
-
extern __u32 sysctl_wmem_max;
extern __u32 sysctl_rmem_max;
diff --git a/include/net/switchdev.h b/include/net/switchdev.h
new file mode 100644
index 000000000000..8a6d1641fd9b
--- /dev/null
+++ b/include/net/switchdev.h
@@ -0,0 +1,37 @@
+/*
+ * include/net/switchdev.h - Switch device API
+ * Copyright (c) 2014 Jiri Pirko <jiri@resnulli.us>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#ifndef _LINUX_SWITCHDEV_H_
+#define _LINUX_SWITCHDEV_H_
+
+#include <linux/netdevice.h>
+
+#ifdef CONFIG_NET_SWITCHDEV
+
+int netdev_switch_parent_id_get(struct net_device *dev,
+ struct netdev_phys_item_id *psid);
+int netdev_switch_port_stp_update(struct net_device *dev, u8 state);
+
+#else
+
+static inline int netdev_switch_parent_id_get(struct net_device *dev,
+ struct netdev_phys_item_id *psid)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int netdev_switch_port_stp_update(struct net_device *dev,
+ u8 state)
+{
+ return -EOPNOTSUPP;
+}
+
+#endif
+
+#endif /* _LINUX_SWITCHDEV_H_ */
diff --git a/include/net/tc_act/tc_vlan.h b/include/net/tc_act/tc_vlan.h
new file mode 100644
index 000000000000..93b70ade1ff3
--- /dev/null
+++ b/include/net/tc_act/tc_vlan.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 2014 Jiri Pirko <jiri@resnulli.us>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __NET_TC_VLAN_H
+#define __NET_TC_VLAN_H
+
+#include <net/act_api.h>
+
+#define VLAN_F_POP 0x1
+#define VLAN_F_PUSH 0x2
+
+struct tcf_vlan {
+ struct tcf_common common;
+ int tcfv_action;
+ u16 tcfv_push_vid;
+ __be16 tcfv_push_proto;
+};
+#define to_vlan(a) \
+ container_of(a->priv, struct tcf_vlan, common)
+
+#endif /* __NET_TC_VLAN_H */
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 590e01a476ac..f50f29faf76f 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -27,7 +27,6 @@
#include <linux/cache.h>
#include <linux/percpu.h>
#include <linux/skbuff.h>
-#include <linux/dmaengine.h>
#include <linux/crypto.h>
#include <linux/cryptohash.h>
#include <linux/kref.h>
@@ -56,9 +55,9 @@ void tcp_time_wait(struct sock *sk, int state, int timeo);
#define MAX_TCP_HEADER (128 + MAX_HEADER)
#define MAX_TCP_OPTION_SPACE 40
-/*
+/*
* Never offer a window over 32767 without using window scaling. Some
- * poor stacks do signed 16bit maths!
+ * poor stacks do signed 16bit maths!
*/
#define MAX_TCP_WINDOW 32767U
@@ -71,9 +70,6 @@ void tcp_time_wait(struct sock *sk, int state, int timeo);
/* After receiving this amount of duplicate ACKs fast retransmit starts. */
#define TCP_FASTRETRANS_THRESH 3
-/* Maximal reordering. */
-#define TCP_MAX_REORDERING 127
-
/* Maximal number of ACKs sent quickly to accelerate slow-start. */
#define TCP_MAX_QUICKACKS 16U
@@ -168,7 +164,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo);
/*
* TCP option
*/
-
+
#define TCPOPT_NOP 1 /* Padding */
#define TCPOPT_EOL 0 /* End of options */
#define TCPOPT_MSS 2 /* Segment size negotiating */
@@ -253,6 +249,7 @@ extern int sysctl_tcp_abort_on_overflow;
extern int sysctl_tcp_max_orphans;
extern int sysctl_tcp_fack;
extern int sysctl_tcp_reordering;
+extern int sysctl_tcp_max_reordering;
extern int sysctl_tcp_dsack;
extern long sysctl_tcp_mem[3];
extern int sysctl_tcp_wmem[3];
@@ -262,7 +259,6 @@ extern int sysctl_tcp_adv_win_scale;
extern int sysctl_tcp_tw_reuse;
extern int sysctl_tcp_frto;
extern int sysctl_tcp_low_latency;
-extern int sysctl_tcp_dma_copybreak;
extern int sysctl_tcp_nometrics_save;
extern int sysctl_tcp_moderate_rcvbuf;
extern int sysctl_tcp_tso_win_divisor;
@@ -368,7 +364,6 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
void tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
const struct tcphdr *th, unsigned int len);
void tcp_rcv_space_adjust(struct sock *sk);
-void tcp_cleanup_rbuf(struct sock *sk, int copied);
int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp);
void tcp_twsk_destructor(struct sock *sk);
ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
@@ -471,8 +466,7 @@ void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb);
/* From syncookies.c */
int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th,
u32 cookie);
-struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
- struct ip_options *opt);
+struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb);
#ifdef CONFIG_SYN_COOKIES
/* Syncookies use a monotonic timer which increments every 60 seconds.
@@ -496,17 +490,16 @@ u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th,
u16 *mssp);
__u32 cookie_v4_init_sequence(struct sock *sk, const struct sk_buff *skb,
__u16 *mss);
-#endif
-
__u32 cookie_init_timestamp(struct request_sock *req);
-bool cookie_check_timestamp(struct tcp_options_received *opt, struct net *net,
- bool *ecn_ok);
+bool cookie_timestamp_decode(struct tcp_options_received *opt);
+bool cookie_ecn_ok(const struct tcp_options_received *opt,
+ const struct net *net, const struct dst_entry *dst);
/* From net/ipv6/syncookies.c */
int __cookie_v6_check(const struct ipv6hdr *iph, const struct tcphdr *th,
u32 cookie);
struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb);
-#ifdef CONFIG_SYN_COOKIES
+
u32 __cookie_v6_init_sequence(const struct ipv6hdr *iph,
const struct tcphdr *th, u16 *mssp);
__u32 cookie_v6_init_sequence(struct sock *sk, const struct sk_buff *skb,
@@ -672,6 +665,12 @@ void tcp_send_window_probe(struct sock *sk);
*/
#define tcp_time_stamp ((__u32)(jiffies))
+static inline u32 tcp_skb_timestamp(const struct sk_buff *skb)
+{
+ return skb->skb_mstamp.stamp_jiffies;
+}
+
+
#define tcp_flag_byte(th) (((u_int8_t *)th)[13])
#define TCPHDR_FIN 0x01
@@ -690,15 +689,18 @@ void tcp_send_window_probe(struct sock *sk);
* If this grows please adjust skbuff.h:skbuff->cb[xxx] size appropriately.
*/
struct tcp_skb_cb {
- union {
- struct inet_skb_parm h4;
-#if IS_ENABLED(CONFIG_IPV6)
- struct inet6_skb_parm h6;
-#endif
- } header; /* For incoming frames */
__u32 seq; /* Starting sequence number */
__u32 end_seq; /* SEQ + FIN + SYN + datalen */
- __u32 when; /* used to compute rtt's */
+ union {
+ /* Note : tcp_tw_isn is used in input path only
+ * (isn chosen by tcp_timewait_state_process())
+ *
+ * tcp_gso_segs is used in write queue only,
+ * cf tcp_skb_pcount()
+ */
+ __u32 tcp_tw_isn;
+ __u32 tcp_gso_segs;
+ };
__u8 tcp_flags; /* TCP header flags. (tcp[13]) */
__u8 sacked; /* State flags for SACK/FACK. */
@@ -714,33 +716,43 @@ struct tcp_skb_cb {
__u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */
/* 1 byte hole */
__u32 ack_seq; /* Sequence number ACK'd */
+ union {
+ struct inet_skb_parm h4;
+#if IS_ENABLED(CONFIG_IPV6)
+ struct inet6_skb_parm h6;
+#endif
+ } header; /* For incoming frames */
};
#define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0]))
-/* RFC3168 : 6.1.1 SYN packets must not have ECT/ECN bits set
- *
- * If we receive a SYN packet with these bits set, it means a network is
- * playing bad games with TOS bits. In order to avoid possible false congestion
- * notifications, we disable TCP ECN negociation.
+
+#if IS_ENABLED(CONFIG_IPV6)
+/* This is the variant of inet6_iif() that must be used by TCP,
+ * as TCP moves IP6CB into a different location in skb->cb[]
*/
-static inline void
-TCP_ECN_create_request(struct request_sock *req, const struct sk_buff *skb,
- struct net *net)
+static inline int tcp_v6_iif(const struct sk_buff *skb)
{
- const struct tcphdr *th = tcp_hdr(skb);
-
- if (net->ipv4.sysctl_tcp_ecn && th->ece && th->cwr &&
- INET_ECN_is_not_ect(TCP_SKB_CB(skb)->ip_dsfield))
- inet_rsk(req)->ecn_ok = 1;
+ return TCP_SKB_CB(skb)->header.h6.iif;
}
+#endif
/* Due to TSO, an SKB can be composed of multiple actual
* packets. To keep these tracked properly, we use this.
*/
static inline int tcp_skb_pcount(const struct sk_buff *skb)
{
- return skb_shinfo(skb)->gso_segs;
+ return TCP_SKB_CB(skb)->tcp_gso_segs;
+}
+
+static inline void tcp_skb_pcount_set(struct sk_buff *skb, int segs)
+{
+ TCP_SKB_CB(skb)->tcp_gso_segs = segs;
+}
+
+static inline void tcp_skb_pcount_add(struct sk_buff *skb, int segs)
+{
+ TCP_SKB_CB(skb)->tcp_gso_segs += segs;
}
/* This is valid iff tcp_skb_pcount() > 1. */
@@ -755,8 +767,17 @@ enum tcp_ca_event {
CA_EVENT_CWND_RESTART, /* congestion window restart */
CA_EVENT_COMPLETE_CWR, /* end of congestion recovery */
CA_EVENT_LOSS, /* loss timeout */
- CA_EVENT_FAST_ACK, /* in sequence ack */
- CA_EVENT_SLOW_ACK, /* other ack */
+ CA_EVENT_ECN_NO_CE, /* ECT set, but not CE marked */
+ CA_EVENT_ECN_IS_CE, /* received CE marked IP packet */
+ CA_EVENT_DELAYED_ACK, /* Delayed ack is sent */
+ CA_EVENT_NON_DELAYED_ACK,
+};
+
+/* Information about inbound ACK, passed to cong_ops->in_ack_event() */
+enum tcp_ca_ack_event_flags {
+ CA_ACK_SLOWPATH = (1 << 0), /* In slow path processing */
+ CA_ACK_WIN_UPDATE = (1 << 1), /* ACK updated window */
+ CA_ACK_ECE = (1 << 2), /* ECE bit is set on ack */
};
/*
@@ -766,7 +787,10 @@ enum tcp_ca_event {
#define TCP_CA_MAX 128
#define TCP_CA_BUF_MAX (TCP_CA_NAME_MAX*TCP_CA_MAX)
+/* Algorithm can be set on socket without CAP_NET_ADMIN privileges */
#define TCP_CONG_NON_RESTRICTED 0x1
+/* Requires ECN/ECT set on all packets */
+#define TCP_CONG_NEEDS_ECN 0x2
struct tcp_congestion_ops {
struct list_head list;
@@ -785,6 +809,8 @@ struct tcp_congestion_ops {
void (*set_state)(struct sock *sk, u8 new_state);
/* call when cwnd event occurs (optional) */
void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev);
+ /* call when ack arrives (optional) */
+ void (*in_ack_event)(struct sock *sk, u32 flags);
/* new value of cwnd after loss (optional) */
u32 (*undo_cwnd)(struct sock *sk);
/* hook for packet ack accounting (optional) */
@@ -799,6 +825,7 @@ struct tcp_congestion_ops {
int tcp_register_congestion_control(struct tcp_congestion_ops *type);
void tcp_unregister_congestion_control(struct tcp_congestion_ops *type);
+void tcp_assign_congestion_control(struct sock *sk);
void tcp_init_congestion_control(struct sock *sk);
void tcp_cleanup_congestion_control(struct sock *sk);
int tcp_set_default_congestion_control(const char *name);
@@ -807,14 +834,20 @@ void tcp_get_available_congestion_control(char *buf, size_t len);
void tcp_get_allowed_congestion_control(char *buf, size_t len);
int tcp_set_allowed_congestion_control(char *allowed);
int tcp_set_congestion_control(struct sock *sk, const char *name);
-int tcp_slow_start(struct tcp_sock *tp, u32 acked);
+void tcp_slow_start(struct tcp_sock *tp, u32 acked);
void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w);
-extern struct tcp_congestion_ops tcp_init_congestion_ops;
u32 tcp_reno_ssthresh(struct sock *sk);
void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked);
extern struct tcp_congestion_ops tcp_reno;
+static inline bool tcp_ca_needs_ecn(const struct sock *sk)
+{
+ const struct inet_connection_sock *icsk = inet_csk(sk);
+
+ return icsk->icsk_ca_ops->flags & TCP_CONG_NEEDS_ECN;
+}
+
static inline void tcp_set_ca_state(struct sock *sk, const u8 ca_state)
{
struct inet_connection_sock *icsk = inet_csk(sk);
@@ -1031,12 +1064,6 @@ static inline void tcp_prequeue_init(struct tcp_sock *tp)
tp->ucopy.len = 0;
tp->ucopy.memory = 0;
skb_queue_head_init(&tp->ucopy.prequeue);
-#ifdef CONFIG_NET_DMA
- tp->ucopy.dma_chan = NULL;
- tp->ucopy.wakeup = 0;
- tp->ucopy.pinned_list = NULL;
- tp->ucopy.dma_cookie = 0;
-#endif
}
bool tcp_prequeue(struct sock *sk, struct sk_buff *skb);
@@ -1074,16 +1101,16 @@ static inline int tcp_win_from_space(int space)
space - (space>>sysctl_tcp_adv_win_scale);
}
-/* Note: caller must be prepared to deal with negative returns */
+/* Note: caller must be prepared to deal with negative returns */
static inline int tcp_space(const struct sock *sk)
{
return tcp_win_from_space(sk->sk_rcvbuf -
atomic_read(&sk->sk_rmem_alloc));
-}
+}
static inline int tcp_full_space(const struct sock *sk)
{
- return tcp_win_from_space(sk->sk_rcvbuf);
+ return tcp_win_from_space(sk->sk_rcvbuf);
}
static inline void tcp_openreq_init(struct request_sock *req,
@@ -1646,4 +1673,24 @@ int tcpv4_offload_init(void);
void tcp_v4_init(void);
void tcp_init(void);
+/*
+ * Save and compile IPv4 options, return a pointer to it
+ */
+static inline struct ip_options_rcu *tcp_v4_save_options(struct sk_buff *skb)
+{
+ const struct ip_options *opt = &TCP_SKB_CB(skb)->header.h4.opt;
+ struct ip_options_rcu *dopt = NULL;
+
+ if (opt->optlen) {
+ int opt_size = sizeof(*dopt) + opt->optlen;
+
+ dopt = kmalloc(opt_size, GFP_ATOMIC);
+ if (dopt && __ip_options_echo(&dopt->opt, skb, opt)) {
+ kfree(dopt);
+ dopt = NULL;
+ }
+ }
+ return dopt;
+}
+
#endif /* _TCP_H */
diff --git a/include/net/udp.h b/include/net/udp.h
index 70f941368ace..07f9b70962f6 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -158,6 +158,24 @@ static inline __sum16 udp_v4_check(int len, __be32 saddr,
void udp_set_csum(bool nocheck, struct sk_buff *skb,
__be32 saddr, __be32 daddr, int len);
+struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb,
+ struct udphdr *uh);
+int udp_gro_complete(struct sk_buff *skb, int nhoff);
+
+static inline struct udphdr *udp_gro_udphdr(struct sk_buff *skb)
+{
+ struct udphdr *uh;
+ unsigned int hlen, off;
+
+ off = skb_gro_offset(skb);
+ hlen = off + sizeof(*uh);
+ uh = skb_gro_header_fast(skb, off);
+ if (skb_gro_header_hard(skb, hlen))
+ uh = skb_gro_header_slow(skb, hlen, off);
+
+ return uh;
+}
+
/* hash routines shared between UDPv4/6 and UDP-Litev4/6 */
static inline void udp_lib_hash(struct sock *sk)
{
@@ -221,7 +239,8 @@ int udp_ioctl(struct sock *sk, int cmd, unsigned long arg);
int udp_disconnect(struct sock *sk, int flags);
unsigned int udp_poll(struct file *file, struct socket *sock, poll_table *wait);
struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
- netdev_features_t features);
+ netdev_features_t features,
+ bool is_ipv6);
int udp_lib_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen);
int udp_lib_setsockopt(struct sock *sk, int level, int optname,
diff --git a/include/net/udp_tunnel.h b/include/net/udp_tunnel.h
index ffd69cbded35..2a50a70ef587 100644
--- a/include/net/udp_tunnel.h
+++ b/include/net/udp_tunnel.h
@@ -1,6 +1,14 @@
#ifndef __NET_UDP_TUNNEL_H
#define __NET_UDP_TUNNEL_H
+#include <net/ip_tunnels.h>
+#include <net/udp.h>
+
+#if IS_ENABLED(CONFIG_IPV6)
+#include <net/ipv6.h>
+#include <net/addrconf.h>
+#endif
+
struct udp_port_cfg {
u8 family;
@@ -26,7 +34,89 @@ struct udp_port_cfg {
use_udp6_rx_checksums:1;
};
-int udp_sock_create(struct net *net, struct udp_port_cfg *cfg,
- struct socket **sockp);
+int udp_sock_create4(struct net *net, struct udp_port_cfg *cfg,
+ struct socket **sockp);
+
+#if IS_ENABLED(CONFIG_IPV6)
+int udp_sock_create6(struct net *net, struct udp_port_cfg *cfg,
+ struct socket **sockp);
+#else
+static inline int udp_sock_create6(struct net *net, struct udp_port_cfg *cfg,
+ struct socket **sockp)
+{
+ return 0;
+}
+#endif
+
+static inline int udp_sock_create(struct net *net,
+ struct udp_port_cfg *cfg,
+ struct socket **sockp)
+{
+ if (cfg->family == AF_INET)
+ return udp_sock_create4(net, cfg, sockp);
+
+ if (cfg->family == AF_INET6)
+ return udp_sock_create6(net, cfg, sockp);
+
+ return -EPFNOSUPPORT;
+}
+
+typedef int (*udp_tunnel_encap_rcv_t)(struct sock *sk, struct sk_buff *skb);
+typedef void (*udp_tunnel_encap_destroy_t)(struct sock *sk);
+
+struct udp_tunnel_sock_cfg {
+ void *sk_user_data; /* user data used by encap_rcv call back */
+ /* Used for setting up udp_sock fields, see udp.h for details */
+ __u8 encap_type;
+ udp_tunnel_encap_rcv_t encap_rcv;
+ udp_tunnel_encap_destroy_t encap_destroy;
+};
+
+/* Setup the given (UDP) sock to receive UDP encapsulated packets */
+void setup_udp_tunnel_sock(struct net *net, struct socket *sock,
+ struct udp_tunnel_sock_cfg *sock_cfg);
+
+/* Transmit the skb using UDP encapsulation. */
+int udp_tunnel_xmit_skb(struct socket *sock, struct rtable *rt,
+ struct sk_buff *skb, __be32 src, __be32 dst,
+ __u8 tos, __u8 ttl, __be16 df, __be16 src_port,
+ __be16 dst_port, bool xnet);
+
+#if IS_ENABLED(CONFIG_IPV6)
+int udp_tunnel6_xmit_skb(struct socket *sock, struct dst_entry *dst,
+ struct sk_buff *skb, struct net_device *dev,
+ struct in6_addr *saddr, struct in6_addr *daddr,
+ __u8 prio, __u8 ttl, __be16 src_port,
+ __be16 dst_port);
+#endif
+
+void udp_tunnel_sock_release(struct socket *sock);
+
+static inline struct sk_buff *udp_tunnel_handle_offloads(struct sk_buff *skb,
+ bool udp_csum)
+{
+ int type = udp_csum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL;
+
+ return iptunnel_handle_offloads(skb, udp_csum, type);
+}
+
+static inline void udp_tunnel_gro_complete(struct sk_buff *skb, int nhoff)
+{
+ struct udphdr *uh;
+
+ uh = (struct udphdr *)(skb->data + nhoff - sizeof(struct udphdr));
+ skb_shinfo(skb)->gso_type |= uh->check ?
+ SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL;
+}
+
+static inline void udp_tunnel_encap_enable(struct socket *sock)
+{
+#if IS_ENABLED(CONFIG_IPV6)
+ if (sock->sk->sk_family == PF_INET6)
+ ipv6_stub->udpv6_encap_enable();
+ else
+#endif
+ udp_encap_enable();
+}
#endif
diff --git a/include/net/udplite.h b/include/net/udplite.h
index 2caadabcd07b..ae7c8d1fbcad 100644
--- a/include/net/udplite.h
+++ b/include/net/udplite.h
@@ -19,7 +19,9 @@ extern struct udp_table udplite_table;
static __inline__ int udplite_getfrag(void *from, char *to, int offset,
int len, int odd, struct sk_buff *skb)
{
- return memcpy_fromiovecend(to, (struct iovec *) from, offset, len);
+ struct msghdr *msg = from;
+ /* XXX: stripping const */
+ return memcpy_fromiovecend(to, (struct iovec *)msg->msg_iter.iov, offset, len);
}
/* Designate sk as UDP-Lite socket */
@@ -40,7 +42,7 @@ static inline int udplite_checksum_init(struct sk_buff *skb, struct udphdr *uh)
* checksum. UDP-Lite (like IPv6) mandates checksums, hence packets
* with a zero checksum field are illegal. */
if (uh->check == 0) {
- LIMIT_NETDEBUG(KERN_DEBUG "UDPLite: zeroed checksum field\n");
+ net_dbg_ratelimited("UDPLite: zeroed checksum field\n");
return 1;
}
@@ -52,8 +54,8 @@ static inline int udplite_checksum_init(struct sk_buff *skb, struct udphdr *uh)
/*
* Coverage length violates RFC 3828: log and discard silently.
*/
- LIMIT_NETDEBUG(KERN_DEBUG "UDPLite: bad csum coverage %d/%d\n",
- cscov, skb->len);
+ net_dbg_ratelimited("UDPLite: bad csum coverage %d/%d\n",
+ cscov, skb->len);
return 1;
} else if (cscov < skb->len) {
diff --git a/include/net/vxlan.h b/include/net/vxlan.h
index d5f59f3fc35d..903461aa5644 100644
--- a/include/net/vxlan.h
+++ b/include/net/vxlan.h
@@ -1,6 +1,9 @@
#ifndef __NET_VXLAN_H
#define __NET_VXLAN_H 1
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/if_vlan.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/udp.h>
@@ -8,6 +11,12 @@
#define VNI_HASH_BITS 10
#define VNI_HASH_SIZE (1<<VNI_HASH_BITS)
+/* VXLAN protocol header */
+struct vxlanhdr {
+ __be32 vx_flags;
+ __be32 vx_vni;
+};
+
struct vxlan_sock;
typedef void (vxlan_rcv_t)(struct vxlan_sock *vh, struct sk_buff *skb, __be32 key);
@@ -45,6 +54,35 @@ int vxlan_xmit_skb(struct vxlan_sock *vs,
__be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df,
__be16 src_port, __be16 dst_port, __be32 vni, bool xnet);
+static inline netdev_features_t vxlan_features_check(struct sk_buff *skb,
+ netdev_features_t features)
+{
+ u8 l4_hdr = 0;
+
+ if (!skb->encapsulation)
+ return features;
+
+ switch (vlan_get_protocol(skb)) {
+ case htons(ETH_P_IP):
+ l4_hdr = ip_hdr(skb)->protocol;
+ break;
+ case htons(ETH_P_IPV6):
+ l4_hdr = ipv6_hdr(skb)->nexthdr;
+ break;
+ default:
+ return features;;
+ }
+
+ if ((l4_hdr == IPPROTO_UDP) &&
+ (skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
+ skb->inner_protocol != htons(ETH_P_TEB) ||
+ (skb_inner_mac_header(skb) - skb_transport_header(skb) !=
+ sizeof(struct udphdr) + sizeof(struct vxlanhdr))))
+ return features & ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK);
+
+ return features;
+}
+
/* IP header + UDP + VXLAN + Ethernet header */
#define VXLAN_HEADROOM (20 + 8 + 8 + 14)
/* IPv6 header + UDP + VXLAN + Ethernet header */
diff --git a/include/net/wpan-phy.h b/include/net/wpan-phy.h
deleted file mode 100644
index 10ab0fc6d4f7..000000000000
--- a/include/net/wpan-phy.h
+++ /dev/null
@@ -1,105 +0,0 @@
-/*
- * Copyright (C) 2007, 2008, 2009 Siemens AG
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Written by:
- * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
- */
-
-#ifndef WPAN_PHY_H
-#define WPAN_PHY_H
-
-#include <linux/netdevice.h>
-#include <linux/mutex.h>
-#include <linux/bug.h>
-
-/* According to the IEEE 802.15.4 stadard the upper most significant bits of
- * the 32-bit channel bitmaps shall be used as an integer value to specify 32
- * possible channel pages. The lower 27 bits of the channel bit map shall be
- * used as a bit mask to specify channel numbers within a channel page.
- */
-#define WPAN_NUM_CHANNELS 27
-#define WPAN_NUM_PAGES 32
-
-struct wpan_phy {
- struct mutex pib_lock;
-
- /*
- * This is a PIB according to 802.15.4-2011.
- * We do not provide timing-related variables, as they
- * aren't used outside of driver
- */
- u8 current_channel;
- u8 current_page;
- u32 channels_supported[32];
- s8 transmit_power;
- u8 cca_mode;
- u8 min_be;
- u8 max_be;
- u8 csma_retries;
- s8 frame_retries;
-
- bool lbt;
- s32 cca_ed_level;
-
- struct device dev;
- int idx;
-
- struct net_device *(*add_iface)(struct wpan_phy *phy,
- const char *name, int type);
- void (*del_iface)(struct wpan_phy *phy, struct net_device *dev);
-
- int (*set_txpower)(struct wpan_phy *phy, int db);
- int (*set_lbt)(struct wpan_phy *phy, bool on);
- int (*set_cca_mode)(struct wpan_phy *phy, u8 cca_mode);
- int (*set_cca_ed_level)(struct wpan_phy *phy, int level);
- int (*set_csma_params)(struct wpan_phy *phy, u8 min_be, u8 max_be,
- u8 retries);
- int (*set_frame_retries)(struct wpan_phy *phy, s8 retries);
-
- char priv[0] __attribute__((__aligned__(NETDEV_ALIGN)));
-};
-
-#define to_phy(_dev) container_of(_dev, struct wpan_phy, dev)
-
-struct wpan_phy *wpan_phy_alloc(size_t priv_size);
-static inline void wpan_phy_set_dev(struct wpan_phy *phy, struct device *dev)
-{
- phy->dev.parent = dev;
-}
-int wpan_phy_register(struct wpan_phy *phy);
-void wpan_phy_unregister(struct wpan_phy *phy);
-void wpan_phy_free(struct wpan_phy *phy);
-/* Same semantics as for class_for_each_device */
-int wpan_phy_for_each(int (*fn)(struct wpan_phy *phy, void *data), void *data);
-
-static inline void *wpan_phy_priv(struct wpan_phy *phy)
-{
- BUG_ON(!phy);
- return &phy->priv;
-}
-
-struct wpan_phy *wpan_phy_find(const char *str);
-
-static inline void wpan_phy_put(struct wpan_phy *phy)
-{
- put_device(&phy->dev);
-}
-
-static inline const char *wpan_phy_name(struct wpan_phy *phy)
-{
- return dev_name(&phy->dev);
-}
-#endif
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 721e9c3b11bd..dc4865e90fe4 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -1591,6 +1591,7 @@ struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark,
struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u8, int dir,
u32 id, int delete, int *err);
int xfrm_policy_flush(struct net *net, u8 type, bool task_valid);
+void xfrm_policy_hash_rebuild(struct net *net);
u32 xfrm_get_acqseq(void);
int verify_spi_info(u8 proto, u32 min, u32 max);
int xfrm_alloc_spi(struct xfrm_state *x, u32 minspi, u32 maxspi);
diff --git a/include/ras/ras_event.h b/include/ras/ras_event.h
index 47da53c27ffa..79abb9c71772 100644
--- a/include/ras/ras_event.h
+++ b/include/ras/ras_event.h
@@ -8,6 +8,7 @@
#include <linux/tracepoint.h>
#include <linux/edac.h>
#include <linux/ktime.h>
+#include <linux/pci.h>
#include <linux/aer.h>
#include <linux/cper.h>
@@ -173,25 +174,34 @@ TRACE_EVENT(mc_event,
* u8 severity - error severity 0:NONFATAL 1:FATAL 2:CORRECTED
*/
-#define aer_correctable_errors \
- {BIT(0), "Receiver Error"}, \
- {BIT(6), "Bad TLP"}, \
- {BIT(7), "Bad DLLP"}, \
- {BIT(8), "RELAY_NUM Rollover"}, \
- {BIT(12), "Replay Timer Timeout"}, \
- {BIT(13), "Advisory Non-Fatal"}
-
-#define aer_uncorrectable_errors \
- {BIT(4), "Data Link Protocol"}, \
- {BIT(12), "Poisoned TLP"}, \
- {BIT(13), "Flow Control Protocol"}, \
- {BIT(14), "Completion Timeout"}, \
- {BIT(15), "Completer Abort"}, \
- {BIT(16), "Unexpected Completion"}, \
- {BIT(17), "Receiver Overflow"}, \
- {BIT(18), "Malformed TLP"}, \
- {BIT(19), "ECRC"}, \
- {BIT(20), "Unsupported Request"}
+#define aer_correctable_errors \
+ {PCI_ERR_COR_RCVR, "Receiver Error"}, \
+ {PCI_ERR_COR_BAD_TLP, "Bad TLP"}, \
+ {PCI_ERR_COR_BAD_DLLP, "Bad DLLP"}, \
+ {PCI_ERR_COR_REP_ROLL, "RELAY_NUM Rollover"}, \
+ {PCI_ERR_COR_REP_TIMER, "Replay Timer Timeout"}, \
+ {PCI_ERR_COR_ADV_NFAT, "Advisory Non-Fatal Error"}, \
+ {PCI_ERR_COR_INTERNAL, "Corrected Internal Error"}, \
+ {PCI_ERR_COR_LOG_OVER, "Header Log Overflow"}
+
+#define aer_uncorrectable_errors \
+ {PCI_ERR_UNC_UND, "Undefined"}, \
+ {PCI_ERR_UNC_DLP, "Data Link Protocol Error"}, \
+ {PCI_ERR_UNC_SURPDN, "Surprise Down Error"}, \
+ {PCI_ERR_UNC_POISON_TLP,"Poisoned TLP"}, \
+ {PCI_ERR_UNC_FCP, "Flow Control Protocol Error"}, \
+ {PCI_ERR_UNC_COMP_TIME, "Completion Timeout"}, \
+ {PCI_ERR_UNC_COMP_ABORT,"Completer Abort"}, \
+ {PCI_ERR_UNC_UNX_COMP, "Unexpected Completion"}, \
+ {PCI_ERR_UNC_RX_OVER, "Receiver Overflow"}, \
+ {PCI_ERR_UNC_MALF_TLP, "Malformed TLP"}, \
+ {PCI_ERR_UNC_ECRC, "ECRC Error"}, \
+ {PCI_ERR_UNC_UNSUP, "Unsupported Request Error"}, \
+ {PCI_ERR_UNC_ACSV, "ACS Violation"}, \
+ {PCI_ERR_UNC_INTN, "Uncorrectable Internal Error"},\
+ {PCI_ERR_UNC_MCBTLP, "MC Blocked TLP"}, \
+ {PCI_ERR_UNC_ATOMEG, "AtomicOp Egress Blocked"}, \
+ {PCI_ERR_UNC_TLPPRE, "TLP Prefix Blocked Error"}
TRACE_EVENT(aer_event,
TP_PROTO(const char *dev_name,
diff --git a/include/rdma/ib_umem.h b/include/rdma/ib_umem.h
index a2bf41e0bde9..2d83cfd7e6ce 100644
--- a/include/rdma/ib_umem.h
+++ b/include/rdma/ib_umem.h
@@ -38,11 +38,12 @@
#include <linux/workqueue.h>
struct ib_ucontext;
+struct ib_umem_odp;
struct ib_umem {
struct ib_ucontext *context;
size_t length;
- int offset;
+ unsigned long address;
int page_size;
int writable;
int hugetlb;
@@ -50,17 +51,43 @@ struct ib_umem {
struct pid *pid;
struct mm_struct *mm;
unsigned long diff;
+ struct ib_umem_odp *odp_data;
struct sg_table sg_head;
int nmap;
int npages;
};
+/* Returns the offset of the umem start relative to the first page. */
+static inline int ib_umem_offset(struct ib_umem *umem)
+{
+ return umem->address & ((unsigned long)umem->page_size - 1);
+}
+
+/* Returns the first page of an ODP umem. */
+static inline unsigned long ib_umem_start(struct ib_umem *umem)
+{
+ return umem->address - ib_umem_offset(umem);
+}
+
+/* Returns the address of the page after the last one of an ODP umem. */
+static inline unsigned long ib_umem_end(struct ib_umem *umem)
+{
+ return PAGE_ALIGN(umem->address + umem->length);
+}
+
+static inline size_t ib_umem_num_pages(struct ib_umem *umem)
+{
+ return (ib_umem_end(umem) - ib_umem_start(umem)) >> PAGE_SHIFT;
+}
+
#ifdef CONFIG_INFINIBAND_USER_MEM
struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
size_t size, int access, int dmasync);
void ib_umem_release(struct ib_umem *umem);
int ib_umem_page_count(struct ib_umem *umem);
+int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
+ size_t length);
#else /* CONFIG_INFINIBAND_USER_MEM */
@@ -73,7 +100,10 @@ static inline struct ib_umem *ib_umem_get(struct ib_ucontext *context,
}
static inline void ib_umem_release(struct ib_umem *umem) { }
static inline int ib_umem_page_count(struct ib_umem *umem) { return 0; }
-
+static inline int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
+ size_t length) {
+ return -EINVAL;
+}
#endif /* CONFIG_INFINIBAND_USER_MEM */
#endif /* IB_UMEM_H */
diff --git a/include/rdma/ib_umem_odp.h b/include/rdma/ib_umem_odp.h
new file mode 100644
index 000000000000..3da0b167041b
--- /dev/null
+++ b/include/rdma/ib_umem_odp.h
@@ -0,0 +1,160 @@
+/*
+ * Copyright (c) 2014 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef IB_UMEM_ODP_H
+#define IB_UMEM_ODP_H
+
+#include <rdma/ib_umem.h>
+#include <rdma/ib_verbs.h>
+#include <linux/interval_tree.h>
+
+struct umem_odp_node {
+ u64 __subtree_last;
+ struct rb_node rb;
+};
+
+struct ib_umem_odp {
+ /*
+ * An array of the pages included in the on-demand paging umem.
+ * Indices of pages that are currently not mapped into the device will
+ * contain NULL.
+ */
+ struct page **page_list;
+ /*
+ * An array of the same size as page_list, with DMA addresses mapped
+ * for pages the pages in page_list. The lower two bits designate
+ * access permissions. See ODP_READ_ALLOWED_BIT and
+ * ODP_WRITE_ALLOWED_BIT.
+ */
+ dma_addr_t *dma_list;
+ /*
+ * The umem_mutex protects the page_list and dma_list fields of an ODP
+ * umem, allowing only a single thread to map/unmap pages. The mutex
+ * also protects access to the mmu notifier counters.
+ */
+ struct mutex umem_mutex;
+ void *private; /* for the HW driver to use. */
+
+ /* When false, use the notifier counter in the ucontext struct. */
+ bool mn_counters_active;
+ int notifiers_seq;
+ int notifiers_count;
+
+ /* A linked list of umems that don't have private mmu notifier
+ * counters yet. */
+ struct list_head no_private_counters;
+ struct ib_umem *umem;
+
+ /* Tree tracking */
+ struct umem_odp_node interval_tree;
+
+ struct completion notifier_completion;
+ int dying;
+};
+
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+
+int ib_umem_odp_get(struct ib_ucontext *context, struct ib_umem *umem);
+
+void ib_umem_odp_release(struct ib_umem *umem);
+
+/*
+ * The lower 2 bits of the DMA address signal the R/W permissions for
+ * the entry. To upgrade the permissions, provide the appropriate
+ * bitmask to the map_dma_pages function.
+ *
+ * Be aware that upgrading a mapped address might result in change of
+ * the DMA address for the page.
+ */
+#define ODP_READ_ALLOWED_BIT (1<<0ULL)
+#define ODP_WRITE_ALLOWED_BIT (1<<1ULL)
+
+#define ODP_DMA_ADDR_MASK (~(ODP_READ_ALLOWED_BIT | ODP_WRITE_ALLOWED_BIT))
+
+int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 start_offset, u64 bcnt,
+ u64 access_mask, unsigned long current_seq);
+
+void ib_umem_odp_unmap_dma_pages(struct ib_umem *umem, u64 start_offset,
+ u64 bound);
+
+void rbt_ib_umem_insert(struct umem_odp_node *node, struct rb_root *root);
+void rbt_ib_umem_remove(struct umem_odp_node *node, struct rb_root *root);
+typedef int (*umem_call_back)(struct ib_umem *item, u64 start, u64 end,
+ void *cookie);
+/*
+ * Call the callback on each ib_umem in the range. Returns the logical or of
+ * the return values of the functions called.
+ */
+int rbt_ib_umem_for_each_in_range(struct rb_root *root, u64 start, u64 end,
+ umem_call_back cb, void *cookie);
+
+struct umem_odp_node *rbt_ib_umem_iter_first(struct rb_root *root,
+ u64 start, u64 last);
+struct umem_odp_node *rbt_ib_umem_iter_next(struct umem_odp_node *node,
+ u64 start, u64 last);
+
+static inline int ib_umem_mmu_notifier_retry(struct ib_umem *item,
+ unsigned long mmu_seq)
+{
+ /*
+ * This code is strongly based on the KVM code from
+ * mmu_notifier_retry. Should be called with
+ * the relevant locks taken (item->odp_data->umem_mutex
+ * and the ucontext umem_mutex semaphore locked for read).
+ */
+
+ /* Do not allow page faults while the new ib_umem hasn't seen a state
+ * with zero notifiers yet, and doesn't have its own valid set of
+ * private counters. */
+ if (!item->odp_data->mn_counters_active)
+ return 1;
+
+ if (unlikely(item->odp_data->notifiers_count))
+ return 1;
+ if (item->odp_data->notifiers_seq != mmu_seq)
+ return 1;
+ return 0;
+}
+
+#else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
+
+static inline int ib_umem_odp_get(struct ib_ucontext *context,
+ struct ib_umem *umem)
+{
+ return -EINVAL;
+}
+
+static inline void ib_umem_odp_release(struct ib_umem *umem) {}
+
+#endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
+
+#endif /* IB_UMEM_ODP_H */
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index ed44cc07a7b3..0d74f1de99aa 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -51,6 +51,7 @@
#include <uapi/linux/if_ether.h>
#include <linux/atomic.h>
+#include <linux/mmu_notifier.h>
#include <asm/uaccess.h>
extern struct workqueue_struct *ib_wq;
@@ -123,7 +124,8 @@ enum ib_device_cap_flags {
IB_DEVICE_MEM_WINDOW_TYPE_2A = (1<<23),
IB_DEVICE_MEM_WINDOW_TYPE_2B = (1<<24),
IB_DEVICE_MANAGED_FLOW_STEERING = (1<<29),
- IB_DEVICE_SIGNATURE_HANDOVER = (1<<30)
+ IB_DEVICE_SIGNATURE_HANDOVER = (1<<30),
+ IB_DEVICE_ON_DEMAND_PAGING = (1<<31),
};
enum ib_signature_prot_cap {
@@ -143,6 +145,27 @@ enum ib_atomic_cap {
IB_ATOMIC_GLOB
};
+enum ib_odp_general_cap_bits {
+ IB_ODP_SUPPORT = 1 << 0,
+};
+
+enum ib_odp_transport_cap_bits {
+ IB_ODP_SUPPORT_SEND = 1 << 0,
+ IB_ODP_SUPPORT_RECV = 1 << 1,
+ IB_ODP_SUPPORT_WRITE = 1 << 2,
+ IB_ODP_SUPPORT_READ = 1 << 3,
+ IB_ODP_SUPPORT_ATOMIC = 1 << 4,
+};
+
+struct ib_odp_caps {
+ uint64_t general_caps;
+ struct {
+ uint32_t rc_odp_caps;
+ uint32_t uc_odp_caps;
+ uint32_t ud_odp_caps;
+ } per_transport_caps;
+};
+
struct ib_device_attr {
u64 fw_ver;
__be64 sys_image_guid;
@@ -186,6 +209,7 @@ struct ib_device_attr {
u8 local_ca_ack_delay;
int sig_prot_cap;
int sig_guard_cap;
+ struct ib_odp_caps odp_caps;
};
enum ib_mtu {
@@ -491,20 +515,14 @@ struct ib_mr_init_attr {
u32 flags;
};
-enum ib_signature_type {
- IB_SIG_TYPE_T10_DIF,
-};
-
/**
- * T10-DIF Signature types
- * T10-DIF types are defined by SCSI
- * specifications.
+ * Signature types
+ * IB_SIG_TYPE_NONE: Unprotected.
+ * IB_SIG_TYPE_T10_DIF: Type T10-DIF
*/
-enum ib_t10_dif_type {
- IB_T10DIF_NONE,
- IB_T10DIF_TYPE1,
- IB_T10DIF_TYPE2,
- IB_T10DIF_TYPE3
+enum ib_signature_type {
+ IB_SIG_TYPE_NONE,
+ IB_SIG_TYPE_T10_DIF,
};
/**
@@ -520,24 +538,26 @@ enum ib_t10_dif_bg_type {
/**
* struct ib_t10_dif_domain - Parameters specific for T10-DIF
* domain.
- * @type: T10-DIF type (0|1|2|3)
* @bg_type: T10-DIF block guard type (CRC|CSUM)
* @pi_interval: protection information interval.
* @bg: seed of guard computation.
* @app_tag: application tag of guard block
* @ref_tag: initial guard block reference tag.
- * @type3_inc_reftag: T10-DIF type 3 does not state
- * about the reference tag, it is the user
- * choice to increment it or not.
+ * @ref_remap: Indicate wethear the reftag increments each block
+ * @app_escape: Indicate to skip block check if apptag=0xffff
+ * @ref_escape: Indicate to skip block check if reftag=0xffffffff
+ * @apptag_check_mask: check bitmask of application tag.
*/
struct ib_t10_dif_domain {
- enum ib_t10_dif_type type;
enum ib_t10_dif_bg_type bg_type;
u16 pi_interval;
u16 bg;
u16 app_tag;
u32 ref_tag;
- bool type3_inc_reftag;
+ bool ref_remap;
+ bool app_escape;
+ bool ref_escape;
+ u16 apptag_check_mask;
};
/**
@@ -1077,7 +1097,8 @@ enum ib_access_flags {
IB_ACCESS_REMOTE_READ = (1<<2),
IB_ACCESS_REMOTE_ATOMIC = (1<<3),
IB_ACCESS_MW_BIND = (1<<4),
- IB_ZERO_BASED = (1<<5)
+ IB_ZERO_BASED = (1<<5),
+ IB_ACCESS_ON_DEMAND = (1<<6),
};
struct ib_phys_buf {
@@ -1119,6 +1140,8 @@ struct ib_fmr_attr {
u8 page_shift;
};
+struct ib_umem;
+
struct ib_ucontext {
struct ib_device *device;
struct list_head pd_list;
@@ -1131,6 +1154,24 @@ struct ib_ucontext {
struct list_head xrcd_list;
struct list_head rule_list;
int closing;
+
+ struct pid *tgid;
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+ struct rb_root umem_tree;
+ /*
+ * Protects .umem_rbroot and tree, as well as odp_mrs_count and
+ * mmu notifiers registration.
+ */
+ struct rw_semaphore umem_rwsem;
+ void (*invalidate_range)(struct ib_umem *umem,
+ unsigned long start, unsigned long end);
+
+ struct mmu_notifier mn;
+ atomic_t notifier_count;
+ /* A list of umems that don't have private mmu notifier counters yet. */
+ struct list_head no_private_counters;
+ int odp_mrs_count;
+#endif
};
struct ib_uobject {
@@ -1666,7 +1707,10 @@ static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t
static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len)
{
- return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0;
+ size_t copy_sz;
+
+ copy_sz = min_t(size_t, len, udata->outlen);
+ return copy_to_user(udata->outbuf, src, copy_sz) ? -EFAULT : 0;
}
/**
diff --git a/include/rxrpc/types.h b/include/rxrpc/types.h
deleted file mode 100644
index 30d48f6da228..000000000000
--- a/include/rxrpc/types.h
+++ /dev/null
@@ -1,41 +0,0 @@
-/* types.h: Rx types
- *
- * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#ifndef _LINUX_RXRPC_TYPES_H
-#define _LINUX_RXRPC_TYPES_H
-
-#include <linux/types.h>
-#include <linux/list.h>
-#include <linux/socket.h>
-#include <linux/in.h>
-#include <linux/spinlock.h>
-#include <linux/atomic.h>
-
-typedef uint32_t rxrpc_seq_t; /* Rx message sequence number */
-typedef uint32_t rxrpc_serial_t; /* Rx message serial number */
-typedef __be32 rxrpc_seq_net_t; /* on-the-wire Rx message sequence number */
-typedef __be32 rxrpc_serial_net_t; /* on-the-wire Rx message serial number */
-
-struct rxrpc_call;
-struct rxrpc_connection;
-struct rxrpc_header;
-struct rxrpc_message;
-struct rxrpc_operation;
-struct rxrpc_peer;
-struct rxrpc_service;
-typedef struct rxrpc_timer rxrpc_timer_t;
-struct rxrpc_transport;
-
-typedef void (*rxrpc_call_attn_func_t)(struct rxrpc_call *call);
-typedef void (*rxrpc_call_error_func_t)(struct rxrpc_call *call);
-typedef void (*rxrpc_call_aemap_func_t)(struct rxrpc_call *call);
-
-#endif /* _LINUX_RXRPC_TYPES_H */
diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
index 52beadf9a29b..93d14daf0994 100644
--- a/include/scsi/libfc.h
+++ b/include/scsi/libfc.h
@@ -1105,8 +1105,6 @@ int fc_eh_abort(struct scsi_cmnd *);
int fc_eh_device_reset(struct scsi_cmnd *);
int fc_eh_host_reset(struct scsi_cmnd *);
int fc_slave_alloc(struct scsi_device *);
-int fc_change_queue_depth(struct scsi_device *, int qdepth, int reason);
-int fc_change_queue_type(struct scsi_device *, int tag_type);
/*
* ELS/CT interface
diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h
index 728c9ad9feb0..4d1c46aac331 100644
--- a/include/scsi/libiscsi.h
+++ b/include/scsi/libiscsi.h
@@ -378,8 +378,6 @@ struct iscsi_host {
/*
* scsi host template
*/
-extern int iscsi_change_queue_depth(struct scsi_device *sdev, int depth,
- int reason);
extern int iscsi_eh_abort(struct scsi_cmnd *sc);
extern int iscsi_eh_recover_target(struct scsi_cmnd *sc);
extern int iscsi_eh_session_reset(struct scsi_cmnd *sc);
diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h
index ef7872c20da9..dae99d7d2bc0 100644
--- a/include/scsi/libsas.h
+++ b/include/scsi/libsas.h
@@ -161,17 +161,12 @@ struct expander_device {
};
/* ---------- SATA device ---------- */
-enum ata_command_set {
- ATA_COMMAND_SET = 0,
- ATAPI_COMMAND_SET = 1,
-};
-
#define ATA_RESP_FIS_SIZE 24
struct sata_device {
- enum ata_command_set command_set;
- struct smp_resp rps_resp; /* report_phy_sata_resp */
- u8 port_no; /* port number, if this is a PM (Port) */
+ unsigned int class;
+ struct smp_resp rps_resp; /* report_phy_sata_resp */
+ u8 port_no; /* port number, if this is a PM (Port) */
struct ata_port *ap;
struct ata_host ata_host;
@@ -365,12 +360,6 @@ struct asd_sas_phy {
struct scsi_core {
struct Scsi_Host *shost;
- struct mutex task_queue_flush;
- spinlock_t task_queue_lock;
- struct list_head task_queue;
- int task_queue_size;
-
- struct task_struct *queue_thread;
};
struct sas_ha_event {
@@ -422,9 +411,6 @@ struct sas_ha_struct {
struct asd_sas_port **sas_port; /* array of valid pointers, must be set */
int num_phys; /* must be set, gt 0, static */
- /* The class calls this to send a task for execution. */
- int lldd_max_execute_num;
- int lldd_queue_size;
int strict_wide_ports; /* both sas_addr and attached_sas_addr must match
* their siblings when forming wide ports */
@@ -612,7 +598,6 @@ struct sas_ssp_task {
struct sas_task {
struct domain_device *dev;
- struct list_head list;
spinlock_t task_state_lock;
unsigned task_state_flags;
@@ -665,8 +650,7 @@ struct sas_domain_function_template {
int (*lldd_dev_found)(struct domain_device *);
void (*lldd_dev_gone)(struct domain_device *);
- int (*lldd_execute_task)(struct sas_task *, int num,
- gfp_t gfp_flags);
+ int (*lldd_execute_task)(struct sas_task *, gfp_t gfp_flags);
/* Task Management Functions. Must be called from process context. */
int (*lldd_abort_task)(struct sas_task *);
@@ -700,13 +684,10 @@ extern void sas_suspend_ha(struct sas_ha_struct *sas_ha);
int sas_set_phy_speed(struct sas_phy *phy,
struct sas_phy_linkrates *rates);
int sas_phy_reset(struct sas_phy *phy, int hard_reset);
-int sas_queue_up(struct sas_task *task);
extern int sas_queuecommand(struct Scsi_Host * ,struct scsi_cmnd *);
extern int sas_target_alloc(struct scsi_target *);
extern int sas_slave_configure(struct scsi_device *);
-extern int sas_change_queue_depth(struct scsi_device *, int new_depth,
- int reason);
-extern int sas_change_queue_type(struct scsi_device *, int qt);
+extern int sas_change_queue_depth(struct scsi_device *, int new_depth);
extern int sas_bios_param(struct scsi_device *,
struct block_device *,
sector_t capacity, int *hsc);
diff --git a/include/scsi/osd_initiator.h b/include/scsi/osd_initiator.h
index b2e85fdd2ae0..a09cca829082 100644
--- a/include/scsi/osd_initiator.h
+++ b/include/scsi/osd_initiator.h
@@ -4,7 +4,7 @@
* Copyright (C) 2008 Panasas Inc. All rights reserved.
*
* Authors:
- * Boaz Harrosh <bharrosh@panasas.com>
+ * Boaz Harrosh <ooo@electrozaur.com>
* Benny Halevy <bhalevy@panasas.com>
*
* This program is free software; you can redistribute it and/or modify
diff --git a/include/scsi/osd_ore.h b/include/scsi/osd_ore.h
index 6ca3265a4dca..7a8d2cd30328 100644
--- a/include/scsi/osd_ore.h
+++ b/include/scsi/osd_ore.h
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2011
- * Boaz Harrosh <bharrosh@panasas.com>
+ * Boaz Harrosh <ooo@electrozaur.com>
*
* Public Declarations of the ORE API
*
diff --git a/include/scsi/osd_protocol.h b/include/scsi/osd_protocol.h
index a2594afe05c7..e0ca835e7bf7 100644
--- a/include/scsi/osd_protocol.h
+++ b/include/scsi/osd_protocol.h
@@ -4,7 +4,7 @@
* Copyright (C) 2008 Panasas Inc. All rights reserved.
*
* Authors:
- * Boaz Harrosh <bharrosh@panasas.com>
+ * Boaz Harrosh <ooo@electrozaur.com>
* Benny Halevy <bhalevy@panasas.com>
*
* This program is free software; you can redistribute it and/or modify
@@ -496,7 +496,7 @@ struct osd_timestamp {
*/
struct osd_key_identifier {
- u8 id[7]; /* if you know why 7 please email bharrosh@panasas.com */
+ u8 id[7]; /* if you know why 7 please email ooo@electrozaur.com */
} __packed;
/* for osd_capability.format */
diff --git a/include/scsi/osd_sec.h b/include/scsi/osd_sec.h
index f96151c9c9e8..7abeb0f0db30 100644
--- a/include/scsi/osd_sec.h
+++ b/include/scsi/osd_sec.h
@@ -4,7 +4,7 @@
* Copyright (C) 2008 Panasas Inc. All rights reserved.
*
* Authors:
- * Boaz Harrosh <bharrosh@panasas.com>
+ * Boaz Harrosh <ooo@electrozaur.com>
* Benny Halevy <bhalevy@panasas.com>
*
* This program is free software; you can redistribute it and/or modify
diff --git a/include/scsi/osd_sense.h b/include/scsi/osd_sense.h
index 91db543a5502..d52aa93a0b2d 100644
--- a/include/scsi/osd_sense.h
+++ b/include/scsi/osd_sense.h
@@ -4,7 +4,7 @@
* Copyright (C) 2008 Panasas Inc. All rights reserved.
*
* Authors:
- * Boaz Harrosh <bharrosh@panasas.com>
+ * Boaz Harrosh <ooo@electrozaur.com>
* Benny Halevy <bhalevy@panasas.com>
*
* This program is free software; you can redistribute it and/or modify
diff --git a/include/scsi/osd_types.h b/include/scsi/osd_types.h
index bd0be7ed4bcf..48e8a165e136 100644
--- a/include/scsi/osd_types.h
+++ b/include/scsi/osd_types.h
@@ -4,7 +4,7 @@
* Copyright (C) 2008 Panasas Inc. All rights reserved.
*
* Authors:
- * Boaz Harrosh <bharrosh@panasas.com>
+ * Boaz Harrosh <ooo@electrozaur.com>
* Benny Halevy <bhalevy@panasas.com>
*
* This program is free software; you can redistribute it and/or modify
diff --git a/include/scsi/scsi.h b/include/scsi/scsi.h
index 261e708010da..8a7f8ad58aac 100644
--- a/include/scsi/scsi.h
+++ b/include/scsi/scsi.h
@@ -128,8 +128,10 @@ enum scsi_timeouts {
#define MOVE_MEDIUM 0xa5
#define EXCHANGE_MEDIUM 0xa6
#define READ_12 0xa8
+#define SERVICE_ACTION_OUT_12 0xa9
#define WRITE_12 0xaa
-#define READ_MEDIA_SERIAL_NUMBER 0xab
+#define READ_MEDIA_SERIAL_NUMBER 0xab /* Obsolete with SPC-2 */
+#define SERVICE_ACTION_IN_12 0xab
#define WRITE_VERIFY_12 0xae
#define VERIFY_12 0xaf
#define SEARCH_HIGH_12 0xb0
@@ -151,7 +153,9 @@ enum scsi_timeouts {
#define VERIFY_16 0x8f
#define SYNCHRONIZE_CACHE_16 0x91
#define WRITE_SAME_16 0x93
-#define SERVICE_ACTION_IN 0x9e
+#define SERVICE_ACTION_BIDIRECTIONAL 0x9d
+#define SERVICE_ACTION_IN_16 0x9e
+#define SERVICE_ACTION_OUT_16 0x9f
/* values for service action in */
#define SAI_READ_CAPACITY_16 0x10
#define SAI_GET_LBA_STATUS 0x12
@@ -165,8 +169,8 @@ enum scsi_timeouts {
#define MI_REPORT_ALIASES 0x0b
#define MI_REPORT_SUPPORTED_OPERATION_CODES 0x0c
#define MI_REPORT_SUPPORTED_TASK_MANAGEMENT_FUNCTIONS 0x0d
-#define MI_REPORT_PRIORITY 0x0e
-#define MI_REPORT_TIMESTAMP 0x0f
+#define MI_REPORT_PRIORITY 0x0e
+#define MI_REPORT_TIMESTAMP 0x0f
#define MI_MANAGEMENT_PROTOCOL_IN 0x10
/* value for MI_REPORT_TARGET_PGS ext header */
#define MI_EXT_HDR_PARAM_FMT 0x20
@@ -333,6 +337,7 @@ static inline int scsi_status_is_good(int status)
#define TYPE_RBC 0x0e
#define TYPE_OSD 0x11
#define TYPE_ZBC 0x14
+#define TYPE_WLUN 0x1e /* well-known logical unit */
#define TYPE_NO_LUN 0x7f
/* SCSI protocols; these are taken from SPC-3 section 7.5 */
diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h
index 73f349044941..9fc1aecfc813 100644
--- a/include/scsi/scsi_cmnd.h
+++ b/include/scsi/scsi_cmnd.h
@@ -10,9 +10,10 @@
#include <scsi/scsi_device.h>
struct Scsi_Host;
-struct scsi_device;
struct scsi_driver;
+#include <scsi/scsi_device.h>
+
/*
* MAX_COMMAND_SIZE is:
* The longest fixed-length SCSI CDB as per the SCSI standard.
@@ -52,6 +53,9 @@ struct scsi_pointer {
volatile int phase;
};
+/* for scmd->flags */
+#define SCMD_TAGGED (1 << 0)
+
struct scsi_cmnd {
struct scsi_device *device;
struct list_head list; /* scsi_cmnd participates in queue lists */
@@ -81,6 +85,7 @@ struct scsi_cmnd {
unsigned char prot_op;
unsigned char prot_type;
+ unsigned char prot_flags;
unsigned short cmd_len;
enum dma_data_direction sc_data_direction;
@@ -130,6 +135,7 @@ struct scsi_cmnd {
* to be at an address < 16Mb). */
int result; /* Status code from lower level driver */
+ int flags; /* Command flags */
unsigned char tag; /* SCSI-II queued command tag */
};
@@ -157,7 +163,7 @@ extern void *scsi_kmap_atomic_sg(struct scatterlist *sg, int sg_count,
size_t *offset, size_t *len);
extern void scsi_kunmap_atomic_sg(void *virt);
-extern int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask);
+extern int scsi_init_io(struct scsi_cmnd *cmd);
extern int scsi_dma_map(struct scsi_cmnd *cmd);
extern void scsi_dma_unmap(struct scsi_cmnd *cmd);
@@ -252,6 +258,14 @@ static inline unsigned char scsi_get_prot_op(struct scsi_cmnd *scmd)
return scmd->prot_op;
}
+enum scsi_prot_flags {
+ SCSI_PROT_TRANSFER_PI = 1 << 0,
+ SCSI_PROT_GUARD_CHECK = 1 << 1,
+ SCSI_PROT_REF_CHECK = 1 << 2,
+ SCSI_PROT_REF_INCREMENT = 1 << 3,
+ SCSI_PROT_IP_CHECKSUM = 1 << 4,
+};
+
/*
* The controller usually does not know anything about the target it
* is communicating with. However, when DIX is enabled the controller
@@ -280,6 +294,17 @@ static inline sector_t scsi_get_lba(struct scsi_cmnd *scmd)
return blk_rq_pos(scmd->request);
}
+static inline unsigned int scsi_prot_interval(struct scsi_cmnd *scmd)
+{
+ return scmd->device->sector_size;
+}
+
+static inline u32 scsi_prot_ref_tag(struct scsi_cmnd *scmd)
+{
+ return blk_rq_pos(scmd->request) >>
+ (ilog2(scsi_prot_interval(scmd)) - 9) & 0xffffffff;
+}
+
static inline unsigned scsi_prot_sg_count(struct scsi_cmnd *cmd)
{
return cmd->prot_sdb ? cmd->prot_sdb->table.nents : 0;
@@ -316,17 +341,12 @@ static inline void set_driver_byte(struct scsi_cmnd *cmd, char status)
static inline unsigned scsi_transfer_length(struct scsi_cmnd *scmd)
{
unsigned int xfer_len = scsi_out(scmd)->length;
- unsigned int prot_op = scsi_get_prot_op(scmd);
- unsigned int sector_size = scmd->device->sector_size;
+ unsigned int prot_interval = scsi_prot_interval(scmd);
- switch (prot_op) {
- case SCSI_PROT_NORMAL:
- case SCSI_PROT_WRITE_STRIP:
- case SCSI_PROT_READ_INSERT:
- return xfer_len;
- }
+ if (scmd->prot_flags & SCSI_PROT_TRANSFER_PI)
+ xfer_len += (xfer_len >> ilog2(prot_interval)) * 8;
- return xfer_len + (xfer_len >> ilog2(sector_size)) * 8;
+ return xfer_len;
}
#endif /* _SCSI_SCSI_CMND_H */
diff --git a/include/scsi/scsi_dbg.h b/include/scsi/scsi_dbg.h
index e89844cc2cd3..7982795df595 100644
--- a/include/scsi/scsi_dbg.h
+++ b/include/scsi/scsi_dbg.h
@@ -2,23 +2,27 @@
#define _SCSI_SCSI_DBG_H
struct scsi_cmnd;
+struct scsi_device;
struct scsi_sense_hdr;
extern void scsi_print_command(struct scsi_cmnd *);
-extern void __scsi_print_command(unsigned char *);
-extern void scsi_show_extd_sense(unsigned char, unsigned char);
-extern void scsi_show_sense_hdr(struct scsi_sense_hdr *);
-extern void scsi_print_sense_hdr(const char *, struct scsi_sense_hdr *);
-extern void scsi_cmd_print_sense_hdr(struct scsi_cmnd *, const char *,
- struct scsi_sense_hdr *);
-extern void scsi_print_sense(char *, struct scsi_cmnd *);
-extern void __scsi_print_sense(const char *name,
+extern void __scsi_print_command(const unsigned char *, size_t);
+extern void scsi_show_extd_sense(const struct scsi_device *, const char *,
+ unsigned char, unsigned char);
+extern void scsi_show_sense_hdr(const struct scsi_device *, const char *,
+ const struct scsi_sense_hdr *);
+extern void scsi_print_sense_hdr(const struct scsi_device *, const char *,
+ const struct scsi_sense_hdr *);
+extern void scsi_print_sense(const struct scsi_cmnd *);
+extern void __scsi_print_sense(const struct scsi_device *, const char *name,
const unsigned char *sense_buffer,
int sense_len);
-extern void scsi_show_result(int);
-extern void scsi_print_result(struct scsi_cmnd *);
-extern void scsi_print_status(unsigned char);
+extern void scsi_print_result(struct scsi_cmnd *, const char *, int);
+extern const char *scsi_hostbyte_string(int);
+extern const char *scsi_driverbyte_string(int);
+extern const char *scsi_mlreturn_string(int);
extern const char *scsi_sense_key_string(unsigned char);
-extern const char *scsi_extd_sense_format(unsigned char, unsigned char);
+extern const char *scsi_extd_sense_format(unsigned char, unsigned char,
+ const char **);
#endif /* _SCSI_SCSI_DBG_H */
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index 1a0d1842962e..3a4edd1f7dbb 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -141,7 +141,6 @@ struct scsi_device {
unsigned ppr:1; /* Device supports PPR messages */
unsigned tagged_supported:1; /* Supports SCSI-II tagged queuing */
unsigned simple_tags:1; /* simple queue tag messages are enabled */
- unsigned ordered_tags:1;/* ordered queue tag messages are enabled */
unsigned was_reset:1; /* There was a bus reset on the bus for
* this device */
unsigned expecting_cc_ua:1; /* Expecting a CHECK_CONDITION/UNIT_ATTN
@@ -174,6 +173,7 @@ struct scsi_device {
unsigned wce_default_on:1; /* Cache is ON by default */
unsigned no_dif:1; /* T10 PI (DIF) should be disabled */
unsigned broken_fua:1; /* Don't set FUA bit */
+ unsigned lun_in_cdb:1; /* Store LUN bits in CDB[1] */
atomic_t disk_events_disable_depth; /* disable depth for disk events */
@@ -200,11 +200,6 @@ struct scsi_device {
unsigned long sdev_data[0];
} __attribute__((aligned(sizeof(unsigned long))));
-struct scsi_dh_devlist {
- char *vendor;
- char *model;
-};
-
typedef void (*activate_complete)(void *, int);
struct scsi_device_handler {
/* Used by the infrastructure */
@@ -213,9 +208,8 @@ struct scsi_device_handler {
/* Filled by the hardware handler */
struct module *module;
const char *name;
- const struct scsi_dh_devlist *devlist;
int (*check_sense)(struct scsi_device *, struct scsi_sense_hdr *);
- int (*attach)(struct scsi_device *);
+ struct scsi_dh_data *(*attach)(struct scsi_device *);
void (*detach)(struct scsi_device *);
int (*activate)(struct scsi_device *, activate_complete, void *);
int (*prep_fn)(struct scsi_device *, struct request *);
@@ -227,7 +221,6 @@ struct scsi_dh_data {
struct scsi_device_handler *scsi_dh;
struct scsi_device *sdev;
struct kref kref;
- char buf[0];
};
#define to_scsi_device(d) \
@@ -243,6 +236,15 @@ struct scsi_dh_data {
#define sdev_dbg(sdev, fmt, a...) \
dev_dbg(&(sdev)->sdev_gendev, fmt, ##a)
+/*
+ * like scmd_printk, but the device name is passed in
+ * as a string pointer
+ */
+#define sdev_prefix_printk(l, sdev, p, fmt, a...) \
+ (p) ? \
+ sdev_printk(l, sdev, "[%s] " fmt, p, ##a) : \
+ sdev_printk(l, sdev, fmt, ##a)
+
#define scmd_printk(prefix, scmd, fmt, a...) \
(scmd)->request->rq_disk ? \
sdev_printk(prefix, (scmd)->device, "[%s] " fmt, \
@@ -378,7 +380,7 @@ extern struct scsi_device *__scsi_iterate_devices(struct Scsi_Host *,
#define __shost_for_each_device(sdev, shost) \
list_for_each_entry((sdev), &((shost)->__devices), siblings)
-extern void scsi_adjust_queue_depth(struct scsi_device *, int, int);
+extern int scsi_change_queue_depth(struct scsi_device *, int);
extern int scsi_track_queue_full(struct scsi_device *, int);
extern int scsi_set_medium_removal(struct scsi_device *, char);
@@ -439,13 +441,13 @@ static inline int scsi_execute_req(struct scsi_device *sdev,
extern void sdev_disable_disk_events(struct scsi_device *sdev);
extern void sdev_enable_disk_events(struct scsi_device *sdev);
-#ifdef CONFIG_PM_RUNTIME
+#ifdef CONFIG_PM
extern int scsi_autopm_get_device(struct scsi_device *);
extern void scsi_autopm_put_device(struct scsi_device *);
#else
static inline int scsi_autopm_get_device(struct scsi_device *d) { return 0; }
static inline void scsi_autopm_put_device(struct scsi_device *d) {}
-#endif /* CONFIG_PM_RUNTIME */
+#endif /* CONFIG_PM */
static inline int __must_check scsi_device_reprobe(struct scsi_device *sdev)
{
diff --git a/include/scsi/scsi_driver.h b/include/scsi/scsi_driver.h
index c2b759809d8a..891a658aa867 100644
--- a/include/scsi/scsi_driver.h
+++ b/include/scsi/scsi_driver.h
@@ -9,7 +9,6 @@ struct scsi_cmnd;
struct scsi_device;
struct scsi_driver {
- struct module *owner;
struct device_driver gendrv;
void (*rescan)(struct device *);
diff --git a/include/scsi/scsi_eh.h b/include/scsi/scsi_eh.h
index 06a8790893ef..1e1421b06565 100644
--- a/include/scsi/scsi_eh.h
+++ b/include/scsi/scsi_eh.h
@@ -27,10 +27,10 @@ struct scsi_sense_hdr { /* See SPC-3 section 4.5 */
u8 additional_length; /* always 0 for fixed sense format */
};
-static inline int scsi_sense_valid(struct scsi_sense_hdr *sshdr)
+static inline bool scsi_sense_valid(const struct scsi_sense_hdr *sshdr)
{
if (!sshdr)
- return 0;
+ return false;
return (sshdr->response_code & 0x70) == 0x70;
}
@@ -42,12 +42,12 @@ extern void scsi_eh_flush_done_q(struct list_head *done_q);
extern void scsi_report_bus_reset(struct Scsi_Host *, int);
extern void scsi_report_device_reset(struct Scsi_Host *, int, int);
extern int scsi_block_when_processing_errors(struct scsi_device *);
-extern int scsi_normalize_sense(const u8 *sense_buffer, int sb_len,
- struct scsi_sense_hdr *sshdr);
-extern int scsi_command_normalize_sense(struct scsi_cmnd *cmd,
- struct scsi_sense_hdr *sshdr);
+extern bool scsi_normalize_sense(const u8 *sense_buffer, int sb_len,
+ struct scsi_sense_hdr *sshdr);
+extern bool scsi_command_normalize_sense(const struct scsi_cmnd *cmd,
+ struct scsi_sense_hdr *sshdr);
-static inline int scsi_sense_is_deferred(struct scsi_sense_hdr *sshdr)
+static inline bool scsi_sense_is_deferred(const struct scsi_sense_hdr *sshdr)
{
return ((sshdr->response_code >= 0x70) && (sshdr->response_code & 1));
}
@@ -60,15 +60,7 @@ extern int scsi_get_sense_info_fld(const u8 * sense_buffer, int sb_len,
extern void scsi_build_sense_buffer(int desc, u8 *buf, u8 key, u8 asc, u8 ascq);
-/*
- * Reset request from external source
- */
-#define SCSI_TRY_RESET_DEVICE 1
-#define SCSI_TRY_RESET_BUS 2
-#define SCSI_TRY_RESET_HOST 3
-#define SCSI_TRY_RESET_TARGET 4
-
-extern int scsi_reset_provider(struct scsi_device *, int);
+extern int scsi_ioctl_reset(struct scsi_device *, int __user *);
struct scsi_eh_save {
/* saved state */
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index ba2034779961..019e66858ce6 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -46,12 +46,6 @@ struct blk_queue_tags;
#define DISABLE_CLUSTERING 0
#define ENABLE_CLUSTERING 1
-enum {
- SCSI_QDEPTH_DEFAULT, /* default requested change, e.g. from sysfs */
- SCSI_QDEPTH_QFULL, /* scsi-ml requested due to queue full */
- SCSI_QDEPTH_RAMP_UP, /* scsi-ml requested due to threshold event */
-};
-
struct scsi_host_template {
struct module *module;
const char *name;
@@ -195,7 +189,7 @@ struct scsi_host_template {
* Things currently recommended to be handled at this time include:
*
* 1. Setting the device queue depth. Proper setting of this is
- * described in the comments for scsi_adjust_queue_depth.
+ * described in the comments for scsi_change_queue_depth.
* 2. Determining if the device supports the various synchronous
* negotiation protocols. The device struct will already have
* responded to INQUIRY and the results of the standard items
@@ -281,20 +275,7 @@ struct scsi_host_template {
*
* Status: OPTIONAL
*/
- int (* change_queue_depth)(struct scsi_device *, int, int);
-
- /*
- * Fill in this function to allow the changing of tag types
- * (this also allows the enabling/disabling of tag command
- * queueing). An error should only be returned if something
- * went wrong in the driver while trying to set the tag type.
- * If the driver doesn't support the requested tag type, then
- * it should set the closest type it does support without
- * returning an error. Returns the actual tag type set.
- *
- * Status: OPTIONAL
- */
- int (* change_queue_type)(struct scsi_device *, int);
+ int (* change_queue_depth)(struct scsi_device *, int);
/*
* This function determines the BIOS parameters for a given
@@ -422,6 +403,16 @@ struct scsi_host_template {
unsigned char present;
/*
+ * Let the block layer assigns tags to all commands.
+ */
+ unsigned use_blk_tags:1;
+
+ /*
+ * Track QUEUE_FULL events and reduce queue depth on demand.
+ */
+ unsigned track_queue_depth:1;
+
+ /*
* This specifies the mode that a LLD supports.
*/
unsigned supported_mode:2;
@@ -451,11 +442,6 @@ struct scsi_host_template {
*/
unsigned skip_settle_delay:1;
- /*
- * True if we are using ordered write support.
- */
- unsigned ordered_tag:1;
-
/* True if the controller does not support WRITE SAME */
unsigned no_write_same:1;
@@ -555,7 +541,7 @@ struct Scsi_Host {
* __devices is protected by the host_lock, but you should
* usually use scsi_device_lookup / shost_for_each_device
* to access it and don't care about locking yourself.
- * In the rare case of beeing in irq context you can use
+ * In the rare case of being in irq context you can use
* their __ prefixed variants with the lock held. NEVER
* access this list directly from a driver.
*/
@@ -606,7 +592,7 @@ struct Scsi_Host {
/*
* These three parameters can be used to allow for wide scsi,
* and for host adapters that support multiple busses
- * The first two should be set to 1 more than the actual max id
+ * The last two should be set to 1 more than the actual max id
* or lun (e.g. 8 for SCSI parallel systems).
*/
unsigned int max_channel;
@@ -638,6 +624,14 @@ struct Scsi_Host {
short unsigned int sg_prot_tablesize;
unsigned int max_sectors;
unsigned long dma_boundary;
+ /*
+ * In scsi-mq mode, the number of hardware queues supported by the LLD.
+ *
+ * Note: it is assumed that each hardware queue has a queue depth of
+ * can_queue. In other words, the total queue depth per host
+ * is nr_hw_queues * can_queue.
+ */
+ unsigned nr_hw_queues;
/*
* Used to assign serial numbers to the cmds.
* Protected by the host lock.
@@ -647,7 +641,6 @@ struct Scsi_Host {
unsigned active_mode:2;
unsigned unchecked_isa_dma:1;
unsigned use_clustering:1;
- unsigned use_blk_tcq:1;
/*
* Host has requested that no further requests come through for the
@@ -662,11 +655,6 @@ struct Scsi_Host {
*/
unsigned reverse_ordering:1;
- /*
- * Ordered write support
- */
- unsigned ordered_tag:1;
-
/* Task mgmt function in progress */
unsigned tmf_in_progress:1;
@@ -680,6 +668,7 @@ struct Scsi_Host {
unsigned no_write_same:1;
unsigned use_blk_mq:1;
+ unsigned use_cmd_list:1;
/*
* Optional work queue to be utilized by the transport
@@ -692,6 +681,9 @@ struct Scsi_Host {
*/
struct workqueue_struct *tmf_work_q;
+ /* The transport requires the LUN bits NOT to be stored in CDB[1] */
+ unsigned no_scsi2_lun_in_cdb:1;
+
/*
* Value host_blocked counts down from
*/
diff --git a/include/scsi/scsi_ioctl.h b/include/scsi/scsi_ioctl.h
index b9006848b813..8d19d1d233c3 100644
--- a/include/scsi/scsi_ioctl.h
+++ b/include/scsi/scsi_ioctl.h
@@ -40,9 +40,9 @@ typedef struct scsi_fctargaddress {
unsigned char host_wwn[8]; // include NULL term.
} Scsi_FCTargAddress;
+int scsi_ioctl_block_when_processing_errors(struct scsi_device *sdev,
+ int cmd, bool ndelay);
extern int scsi_ioctl(struct scsi_device *, int, void __user *);
-extern int scsi_nonblockable_ioctl(struct scsi_device *sdev, int cmd,
- void __user *arg, int ndelay);
#endif /* __KERNEL__ */
#endif /* _SCSI_IOCTL_H */
diff --git a/include/scsi/scsi_tcq.h b/include/scsi/scsi_tcq.h
index e64583560701..9708b28bd2aa 100644
--- a/include/scsi/scsi_tcq.h
+++ b/include/scsi/scsi_tcq.h
@@ -6,123 +6,26 @@
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
-#define MSG_SIMPLE_TAG 0x20
-#define MSG_HEAD_TAG 0x21
-#define MSG_ORDERED_TAG 0x22
-#define MSG_ACA_TAG 0x24 /* unsupported */
-
#define SCSI_NO_TAG (-1) /* identify no tag in use */
#ifdef CONFIG_BLOCK
-
-/**
- * scsi_get_tag_type - get the type of tag the device supports
- * @sdev: the scsi device
- *
- * Notes:
- * If the drive only supports simple tags, returns MSG_SIMPLE_TAG
- * if it supports all tag types, returns MSG_ORDERED_TAG.
- */
-static inline int scsi_get_tag_type(struct scsi_device *sdev)
-{
- if (!sdev->tagged_supported)
- return 0;
- if (sdev->ordered_tags)
- return MSG_ORDERED_TAG;
- if (sdev->simple_tags)
- return MSG_SIMPLE_TAG;
- return 0;
-}
-
-static inline void scsi_set_tag_type(struct scsi_device *sdev, int tag)
-{
- switch (tag) {
- case MSG_ORDERED_TAG:
- sdev->ordered_tags = 1;
- /* fall through */
- case MSG_SIMPLE_TAG:
- sdev->simple_tags = 1;
- break;
- case 0:
- /* fall through */
- default:
- sdev->ordered_tags = 0;
- sdev->simple_tags = 0;
- break;
- }
-}
-/**
- * scsi_activate_tcq - turn on tag command queueing
- * @SDpnt: device to turn on TCQ for
- * @depth: queue depth
- *
- * Notes:
- * Eventually, I hope depth would be the maximum depth
- * the device could cope with and the real queue depth
- * would be adjustable from 0 to depth.
- **/
-static inline void scsi_activate_tcq(struct scsi_device *sdev, int depth)
-{
- if (!sdev->tagged_supported)
- return;
-
- if (!shost_use_blk_mq(sdev->host) &&
- !blk_queue_tagged(sdev->request_queue))
- blk_queue_init_tags(sdev->request_queue, depth,
- sdev->host->bqt);
-
- scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth);
-}
-
-/**
- * scsi_deactivate_tcq - turn off tag command queueing
- * @SDpnt: device to turn off TCQ for
- **/
-static inline void scsi_deactivate_tcq(struct scsi_device *sdev, int depth)
-{
- if (!shost_use_blk_mq(sdev->host) &&
- blk_queue_tagged(sdev->request_queue))
- blk_queue_free_tags(sdev->request_queue);
- scsi_adjust_queue_depth(sdev, 0, depth);
-}
-
-/**
- * scsi_populate_tag_msg - place a tag message in a buffer
- * @SCpnt: pointer to the Scsi_Cmnd for the tag
- * @msg: pointer to the area to place the tag
- *
- * Notes:
- * designed to create the correct type of tag message for the
- * particular request. Returns the size of the tag message.
- * May return 0 if TCQ is disabled for this device.
- **/
-static inline int scsi_populate_tag_msg(struct scsi_cmnd *cmd, char *msg)
-{
- struct request *req = cmd->request;
-
- if (blk_rq_tagged(req)) {
- *msg++ = MSG_SIMPLE_TAG;
- *msg++ = req->tag;
- return 2;
- }
-
- return 0;
-}
-
static inline struct scsi_cmnd *scsi_mq_find_tag(struct Scsi_Host *shost,
- unsigned int hw_ctx, int tag)
+ int unique_tag)
{
- struct request *req;
+ u16 hwq = blk_mq_unique_tag_to_hwq(unique_tag);
+ struct request *req = NULL;
- req = blk_mq_tag_to_rq(shost->tag_set.tags[hw_ctx], tag);
+ if (hwq < shost->tag_set.nr_hw_queues)
+ req = blk_mq_tag_to_rq(shost->tag_set.tags[hwq],
+ blk_mq_unique_tag_to_tag(unique_tag));
return req ? (struct scsi_cmnd *)req->special : NULL;
}
/**
* scsi_find_tag - find a tagged command by device
* @SDpnt: pointer to the ScSI device
- * @tag: the tag number
+ * @tag: tag generated by blk_mq_unique_tag()
*
* Notes:
* Only works with tags allocated by the generic blk layer.
@@ -133,9 +36,9 @@ static inline struct scsi_cmnd *scsi_find_tag(struct scsi_device *sdev, int tag)
if (tag != SCSI_NO_TAG) {
if (shost_use_blk_mq(sdev->host))
- return scsi_mq_find_tag(sdev->host, 0, tag);
+ return scsi_mq_find_tag(sdev->host, tag);
- req = blk_queue_find_tag(sdev->request_queue, tag);
+ req = blk_queue_find_tag(sdev->request_queue, tag);
return req ? (struct scsi_cmnd *)req->special : NULL;
}
@@ -174,7 +77,7 @@ static inline int scsi_init_shared_tag_map(struct Scsi_Host *shost, int depth)
/**
* scsi_host_find_tag - find the tagged command by host
* @shost: pointer to scsi_host
- * @tag: tag of the scsi_cmnd
+ * @tag: tag generated by blk_mq_unique_tag()
*
* Notes:
* Only works with tags allocated by the generic blk layer.
@@ -186,7 +89,7 @@ static inline struct scsi_cmnd *scsi_host_find_tag(struct Scsi_Host *shost,
if (tag != SCSI_NO_TAG) {
if (shost_use_blk_mq(shost))
- return scsi_mq_find_tag(shost, 0, tag);
+ return scsi_mq_find_tag(shost, tag);
req = blk_map_queue_find_tag(shost->bqt, tag);
return req ? (struct scsi_cmnd *)req->special : NULL;
}
diff --git a/include/scsi/scsi_transport_spi.h b/include/scsi/scsi_transport_spi.h
index 7497a383b1a4..a4fa52b4d5c5 100644
--- a/include/scsi/scsi_transport_spi.h
+++ b/include/scsi/scsi_transport_spi.h
@@ -157,5 +157,6 @@ int spi_populate_width_msg(unsigned char *msg, int width);
int spi_populate_sync_msg(unsigned char *msg, int period, int offset);
int spi_populate_ppr_msg(unsigned char *msg, int period, int offset, int width,
int options);
+int spi_populate_tag_msg(unsigned char *msg, struct scsi_cmnd *cmd);
#endif /* SCSI_TRANSPORT_SPI_H */
diff --git a/include/scsi/sg.h b/include/scsi/sg.h
index 750e5db7c6bf..3afec7032448 100644
--- a/include/scsi/sg.h
+++ b/include/scsi/sg.h
@@ -164,12 +164,15 @@ typedef struct sg_req_info { /* used by SG_GET_REQUEST_TABLE ioctl() */
/* Returns -EBUSY if occupied. 3rd argument pointer to int (see next) */
#define SG_SCSI_RESET 0x2284
-/* Associated values that can be given to SG_SCSI_RESET follow */
+/* Associated values that can be given to SG_SCSI_RESET follow.
+ * SG_SCSI_RESET_NO_ESCALATE may be OR-ed to the _DEVICE, _TARGET, _BUS
+ * or _HOST reset value so only that action is attempted. */
#define SG_SCSI_RESET_NOTHING 0
#define SG_SCSI_RESET_DEVICE 1
#define SG_SCSI_RESET_BUS 2
#define SG_SCSI_RESET_HOST 3
#define SG_SCSI_RESET_TARGET 4
+#define SG_SCSI_RESET_NO_ESCALATE 0x100
/* synchronous SCSI command ioctl, (only in version 3 interface) */
#define SG_IO 0x2285 /* similar effect as write() followed by read() */
diff --git a/include/soc/at91/at91rm9200_sdramc.h b/include/soc/at91/at91rm9200_sdramc.h
new file mode 100644
index 000000000000..aa047f458f1b
--- /dev/null
+++ b/include/soc/at91/at91rm9200_sdramc.h
@@ -0,0 +1,63 @@
+/*
+ * arch/arm/mach-at91/include/mach/at91rm9200_sdramc.h
+ *
+ * Copyright (C) 2005 Ivan Kokshaysky
+ * Copyright (C) SAN People
+ *
+ * Memory Controllers (SDRAMC only) - System peripherals registers.
+ * Based on AT91RM9200 datasheet revision E.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef AT91RM9200_SDRAMC_H
+#define AT91RM9200_SDRAMC_H
+
+/* SDRAM Controller registers */
+#define AT91RM9200_SDRAMC_MR 0x90 /* Mode Register */
+#define AT91RM9200_SDRAMC_MODE (0xf << 0) /* Command Mode */
+#define AT91RM9200_SDRAMC_MODE_NORMAL (0 << 0)
+#define AT91RM9200_SDRAMC_MODE_NOP (1 << 0)
+#define AT91RM9200_SDRAMC_MODE_PRECHARGE (2 << 0)
+#define AT91RM9200_SDRAMC_MODE_LMR (3 << 0)
+#define AT91RM9200_SDRAMC_MODE_REFRESH (4 << 0)
+#define AT91RM9200_SDRAMC_DBW (1 << 4) /* Data Bus Width */
+#define AT91RM9200_SDRAMC_DBW_32 (0 << 4)
+#define AT91RM9200_SDRAMC_DBW_16 (1 << 4)
+
+#define AT91RM9200_SDRAMC_TR 0x94 /* Refresh Timer Register */
+#define AT91RM9200_SDRAMC_COUNT (0xfff << 0) /* Refresh Timer Count */
+
+#define AT91RM9200_SDRAMC_CR 0x98 /* Configuration Register */
+#define AT91RM9200_SDRAMC_NC (3 << 0) /* Number of Column Bits */
+#define AT91RM9200_SDRAMC_NC_8 (0 << 0)
+#define AT91RM9200_SDRAMC_NC_9 (1 << 0)
+#define AT91RM9200_SDRAMC_NC_10 (2 << 0)
+#define AT91RM9200_SDRAMC_NC_11 (3 << 0)
+#define AT91RM9200_SDRAMC_NR (3 << 2) /* Number of Row Bits */
+#define AT91RM9200_SDRAMC_NR_11 (0 << 2)
+#define AT91RM9200_SDRAMC_NR_12 (1 << 2)
+#define AT91RM9200_SDRAMC_NR_13 (2 << 2)
+#define AT91RM9200_SDRAMC_NB (1 << 4) /* Number of Banks */
+#define AT91RM9200_SDRAMC_NB_2 (0 << 4)
+#define AT91RM9200_SDRAMC_NB_4 (1 << 4)
+#define AT91RM9200_SDRAMC_CAS (3 << 5) /* CAS Latency */
+#define AT91RM9200_SDRAMC_CAS_2 (2 << 5)
+#define AT91RM9200_SDRAMC_TWR (0xf << 7) /* Write Recovery Delay */
+#define AT91RM9200_SDRAMC_TRC (0xf << 11) /* Row Cycle Delay */
+#define AT91RM9200_SDRAMC_TRP (0xf << 15) /* Row Precharge Delay */
+#define AT91RM9200_SDRAMC_TRCD (0xf << 19) /* Row to Column Delay */
+#define AT91RM9200_SDRAMC_TRAS (0xf << 23) /* Active to Precharge Delay */
+#define AT91RM9200_SDRAMC_TXSR (0xf << 27) /* Exit Self Refresh to Active Delay */
+
+#define AT91RM9200_SDRAMC_SRR 0x9c /* Self Refresh Register */
+#define AT91RM9200_SDRAMC_LPR 0xa0 /* Low Power Register */
+#define AT91RM9200_SDRAMC_IER 0xa4 /* Interrupt Enable Register */
+#define AT91RM9200_SDRAMC_IDR 0xa8 /* Interrupt Disable Register */
+#define AT91RM9200_SDRAMC_IMR 0xac /* Interrupt Mask Register */
+#define AT91RM9200_SDRAMC_ISR 0xb0 /* Interrupt Status Register */
+
+#endif
diff --git a/include/soc/at91/at91sam9_ddrsdr.h b/include/soc/at91/at91sam9_ddrsdr.h
new file mode 100644
index 000000000000..0210797abf2e
--- /dev/null
+++ b/include/soc/at91/at91sam9_ddrsdr.h
@@ -0,0 +1,124 @@
+/*
+ * Header file for the Atmel DDR/SDR SDRAM Controller
+ *
+ * Copyright (C) 2010 Atmel Corporation
+ * Nicolas Ferre <nicolas.ferre@atmel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#ifndef AT91SAM9_DDRSDR_H
+#define AT91SAM9_DDRSDR_H
+
+#define AT91_DDRSDRC_MR 0x00 /* Mode Register */
+#define AT91_DDRSDRC_MODE (0x7 << 0) /* Command Mode */
+#define AT91_DDRSDRC_MODE_NORMAL 0
+#define AT91_DDRSDRC_MODE_NOP 1
+#define AT91_DDRSDRC_MODE_PRECHARGE 2
+#define AT91_DDRSDRC_MODE_LMR 3
+#define AT91_DDRSDRC_MODE_REFRESH 4
+#define AT91_DDRSDRC_MODE_EXT_LMR 5
+#define AT91_DDRSDRC_MODE_DEEP 6
+
+#define AT91_DDRSDRC_RTR 0x04 /* Refresh Timer Register */
+#define AT91_DDRSDRC_COUNT (0xfff << 0) /* Refresh Timer Counter */
+
+#define AT91_DDRSDRC_CR 0x08 /* Configuration Register */
+#define AT91_DDRSDRC_NC (3 << 0) /* Number of Column Bits */
+#define AT91_DDRSDRC_NC_SDR8 (0 << 0)
+#define AT91_DDRSDRC_NC_SDR9 (1 << 0)
+#define AT91_DDRSDRC_NC_SDR10 (2 << 0)
+#define AT91_DDRSDRC_NC_SDR11 (3 << 0)
+#define AT91_DDRSDRC_NC_DDR9 (0 << 0)
+#define AT91_DDRSDRC_NC_DDR10 (1 << 0)
+#define AT91_DDRSDRC_NC_DDR11 (2 << 0)
+#define AT91_DDRSDRC_NC_DDR12 (3 << 0)
+#define AT91_DDRSDRC_NR (3 << 2) /* Number of Row Bits */
+#define AT91_DDRSDRC_NR_11 (0 << 2)
+#define AT91_DDRSDRC_NR_12 (1 << 2)
+#define AT91_DDRSDRC_NR_13 (2 << 2)
+#define AT91_DDRSDRC_NR_14 (3 << 2)
+#define AT91_DDRSDRC_CAS (7 << 4) /* CAS Latency */
+#define AT91_DDRSDRC_CAS_2 (2 << 4)
+#define AT91_DDRSDRC_CAS_3 (3 << 4)
+#define AT91_DDRSDRC_CAS_25 (6 << 4)
+#define AT91_DDRSDRC_RST_DLL (1 << 7) /* Reset DLL */
+#define AT91_DDRSDRC_DICDS (1 << 8) /* Output impedance control */
+#define AT91_DDRSDRC_DIS_DLL (1 << 9) /* Disable DLL [SAM9 Only] */
+#define AT91_DDRSDRC_OCD (1 << 12) /* Off-Chip Driver [SAM9 Only] */
+#define AT91_DDRSDRC_DQMS (1 << 16) /* Mask Data is Shared [SAM9 Only] */
+#define AT91_DDRSDRC_ACTBST (1 << 18) /* Active Bank X to Burst Stop Read Access Bank Y [SAM9 Only] */
+
+#define AT91_DDRSDRC_T0PR 0x0C /* Timing 0 Register */
+#define AT91_DDRSDRC_TRAS (0xf << 0) /* Active to Precharge delay */
+#define AT91_DDRSDRC_TRCD (0xf << 4) /* Row to Column delay */
+#define AT91_DDRSDRC_TWR (0xf << 8) /* Write recovery delay */
+#define AT91_DDRSDRC_TRC (0xf << 12) /* Row cycle delay */
+#define AT91_DDRSDRC_TRP (0xf << 16) /* Row precharge delay */
+#define AT91_DDRSDRC_TRRD (0xf << 20) /* Active BankA to BankB */
+#define AT91_DDRSDRC_TWTR (0x7 << 24) /* Internal Write to Read delay */
+#define AT91_DDRSDRC_RED_WRRD (0x1 << 27) /* Reduce Write to Read Delay [SAM9 Only] */
+#define AT91_DDRSDRC_TMRD (0xf << 28) /* Load mode to active/refresh delay */
+
+#define AT91_DDRSDRC_T1PR 0x10 /* Timing 1 Register */
+#define AT91_DDRSDRC_TRFC (0x1f << 0) /* Row Cycle Delay */
+#define AT91_DDRSDRC_TXSNR (0xff << 8) /* Exit self-refresh to non-read */
+#define AT91_DDRSDRC_TXSRD (0xff << 16) /* Exit self-refresh to read */
+#define AT91_DDRSDRC_TXP (0xf << 24) /* Exit power-down delay */
+
+#define AT91_DDRSDRC_T2PR 0x14 /* Timing 2 Register [SAM9 Only] */
+#define AT91_DDRSDRC_TXARD (0xf << 0) /* Exit active power down delay to read command in mode "Fast Exit" */
+#define AT91_DDRSDRC_TXARDS (0xf << 4) /* Exit active power down delay to read command in mode "Slow Exit" */
+#define AT91_DDRSDRC_TRPA (0xf << 8) /* Row Precharge All delay */
+#define AT91_DDRSDRC_TRTP (0x7 << 12) /* Read to Precharge delay */
+
+#define AT91_DDRSDRC_LPR 0x1C /* Low Power Register */
+#define AT91_DDRSDRC_LPCB (3 << 0) /* Low-power Configurations */
+#define AT91_DDRSDRC_LPCB_DISABLE 0
+#define AT91_DDRSDRC_LPCB_SELF_REFRESH 1
+#define AT91_DDRSDRC_LPCB_POWER_DOWN 2
+#define AT91_DDRSDRC_LPCB_DEEP_POWER_DOWN 3
+#define AT91_DDRSDRC_CLKFR (1 << 2) /* Clock Frozen */
+#define AT91_DDRSDRC_PASR (7 << 4) /* Partial Array Self Refresh */
+#define AT91_DDRSDRC_TCSR (3 << 8) /* Temperature Compensated Self Refresh */
+#define AT91_DDRSDRC_DS (3 << 10) /* Drive Strength */
+#define AT91_DDRSDRC_TIMEOUT (3 << 12) /* Time to define when Low Power Mode is enabled */
+#define AT91_DDRSDRC_TIMEOUT_0_CLK_CYCLES (0 << 12)
+#define AT91_DDRSDRC_TIMEOUT_64_CLK_CYCLES (1 << 12)
+#define AT91_DDRSDRC_TIMEOUT_128_CLK_CYCLES (2 << 12)
+#define AT91_DDRSDRC_APDE (1 << 16) /* Active power down exit time */
+#define AT91_DDRSDRC_UPD_MR (3 << 20) /* Update load mode register and extended mode register */
+
+#define AT91_DDRSDRC_MDR 0x20 /* Memory Device Register */
+#define AT91_DDRSDRC_MD (3 << 0) /* Memory Device Type */
+#define AT91_DDRSDRC_MD_SDR 0
+#define AT91_DDRSDRC_MD_LOW_POWER_SDR 1
+#define AT91_DDRSDRC_MD_LOW_POWER_DDR 3
+#define AT91_DDRSDRC_MD_DDR2 6 /* [SAM9 Only] */
+#define AT91_DDRSDRC_DBW (1 << 4) /* Data Bus Width */
+#define AT91_DDRSDRC_DBW_32BITS (0 << 4)
+#define AT91_DDRSDRC_DBW_16BITS (1 << 4)
+
+#define AT91_DDRSDRC_DLL 0x24 /* DLL Information Register */
+#define AT91_DDRSDRC_MDINC (1 << 0) /* Master Delay increment */
+#define AT91_DDRSDRC_MDDEC (1 << 1) /* Master Delay decrement */
+#define AT91_DDRSDRC_MDOVF (1 << 2) /* Master Delay Overflow */
+#define AT91_DDRSDRC_MDVAL (0xff << 8) /* Master Delay value */
+
+#define AT91_DDRSDRC_HS 0x2C /* High Speed Register [SAM9 Only] */
+#define AT91_DDRSDRC_DIS_ATCP_RD (1 << 2) /* Anticip read access is disabled */
+
+#define AT91_DDRSDRC_DELAY(n) (0x30 + (0x4 * (n))) /* Delay I/O Register n */
+
+#define AT91_DDRSDRC_WPMR 0xE4 /* Write Protect Mode Register [SAM9 Only] */
+#define AT91_DDRSDRC_WP (1 << 0) /* Write protect enable */
+#define AT91_DDRSDRC_WPKEY (0xffffff << 8) /* Write protect key */
+#define AT91_DDRSDRC_KEY (0x444452 << 8) /* Write protect key = "DDR" */
+
+#define AT91_DDRSDRC_WPSR 0xE8 /* Write Protect Status Register [SAM9 Only] */
+#define AT91_DDRSDRC_WPVS (1 << 0) /* Write protect violation status */
+#define AT91_DDRSDRC_WPVSRC (0xffff << 8) /* Write protect violation source */
+
+#endif
diff --git a/include/soc/at91/at91sam9_sdramc.h b/include/soc/at91/at91sam9_sdramc.h
new file mode 100644
index 000000000000..3d085a9a7450
--- /dev/null
+++ b/include/soc/at91/at91sam9_sdramc.h
@@ -0,0 +1,85 @@
+/*
+ * arch/arm/mach-at91/include/mach/at91sam9_sdramc.h
+ *
+ * Copyright (C) 2007 Andrew Victor
+ * Copyright (C) 2007 Atmel Corporation.
+ *
+ * SDRAM Controllers (SDRAMC) - System peripherals registers.
+ * Based on AT91SAM9261 datasheet revision D.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef AT91SAM9_SDRAMC_H
+#define AT91SAM9_SDRAMC_H
+
+/* SDRAM Controller (SDRAMC) registers */
+#define AT91_SDRAMC_MR 0x00 /* SDRAM Controller Mode Register */
+#define AT91_SDRAMC_MODE (0xf << 0) /* Command Mode */
+#define AT91_SDRAMC_MODE_NORMAL 0
+#define AT91_SDRAMC_MODE_NOP 1
+#define AT91_SDRAMC_MODE_PRECHARGE 2
+#define AT91_SDRAMC_MODE_LMR 3
+#define AT91_SDRAMC_MODE_REFRESH 4
+#define AT91_SDRAMC_MODE_EXT_LMR 5
+#define AT91_SDRAMC_MODE_DEEP 6
+
+#define AT91_SDRAMC_TR 0x04 /* SDRAM Controller Refresh Timer Register */
+#define AT91_SDRAMC_COUNT (0xfff << 0) /* Refresh Timer Counter */
+
+#define AT91_SDRAMC_CR 0x08 /* SDRAM Controller Configuration Register */
+#define AT91_SDRAMC_NC (3 << 0) /* Number of Column Bits */
+#define AT91_SDRAMC_NC_8 (0 << 0)
+#define AT91_SDRAMC_NC_9 (1 << 0)
+#define AT91_SDRAMC_NC_10 (2 << 0)
+#define AT91_SDRAMC_NC_11 (3 << 0)
+#define AT91_SDRAMC_NR (3 << 2) /* Number of Row Bits */
+#define AT91_SDRAMC_NR_11 (0 << 2)
+#define AT91_SDRAMC_NR_12 (1 << 2)
+#define AT91_SDRAMC_NR_13 (2 << 2)
+#define AT91_SDRAMC_NB (1 << 4) /* Number of Banks */
+#define AT91_SDRAMC_NB_2 (0 << 4)
+#define AT91_SDRAMC_NB_4 (1 << 4)
+#define AT91_SDRAMC_CAS (3 << 5) /* CAS Latency */
+#define AT91_SDRAMC_CAS_1 (1 << 5)
+#define AT91_SDRAMC_CAS_2 (2 << 5)
+#define AT91_SDRAMC_CAS_3 (3 << 5)
+#define AT91_SDRAMC_DBW (1 << 7) /* Data Bus Width */
+#define AT91_SDRAMC_DBW_32 (0 << 7)
+#define AT91_SDRAMC_DBW_16 (1 << 7)
+#define AT91_SDRAMC_TWR (0xf << 8) /* Write Recovery Delay */
+#define AT91_SDRAMC_TRC (0xf << 12) /* Row Cycle Delay */
+#define AT91_SDRAMC_TRP (0xf << 16) /* Row Precharge Delay */
+#define AT91_SDRAMC_TRCD (0xf << 20) /* Row to Column Delay */
+#define AT91_SDRAMC_TRAS (0xf << 24) /* Active to Precharge Delay */
+#define AT91_SDRAMC_TXSR (0xf << 28) /* Exit Self Refresh to Active Delay */
+
+#define AT91_SDRAMC_LPR 0x10 /* SDRAM Controller Low Power Register */
+#define AT91_SDRAMC_LPCB (3 << 0) /* Low-power Configurations */
+#define AT91_SDRAMC_LPCB_DISABLE 0
+#define AT91_SDRAMC_LPCB_SELF_REFRESH 1
+#define AT91_SDRAMC_LPCB_POWER_DOWN 2
+#define AT91_SDRAMC_LPCB_DEEP_POWER_DOWN 3
+#define AT91_SDRAMC_PASR (7 << 4) /* Partial Array Self Refresh */
+#define AT91_SDRAMC_TCSR (3 << 8) /* Temperature Compensated Self Refresh */
+#define AT91_SDRAMC_DS (3 << 10) /* Drive Strength */
+#define AT91_SDRAMC_TIMEOUT (3 << 12) /* Time to define when Low Power Mode is enabled */
+#define AT91_SDRAMC_TIMEOUT_0_CLK_CYCLES (0 << 12)
+#define AT91_SDRAMC_TIMEOUT_64_CLK_CYCLES (1 << 12)
+#define AT91_SDRAMC_TIMEOUT_128_CLK_CYCLES (2 << 12)
+
+#define AT91_SDRAMC_IER 0x14 /* SDRAM Controller Interrupt Enable Register */
+#define AT91_SDRAMC_IDR 0x18 /* SDRAM Controller Interrupt Disable Register */
+#define AT91_SDRAMC_IMR 0x1C /* SDRAM Controller Interrupt Mask Register */
+#define AT91_SDRAMC_ISR 0x20 /* SDRAM Controller Interrupt Status Register */
+#define AT91_SDRAMC_RES (1 << 0) /* Refresh Error Status */
+
+#define AT91_SDRAMC_MDR 0x24 /* SDRAM Memory Device Register */
+#define AT91_SDRAMC_MD (3 << 0) /* Memory Device Type */
+#define AT91_SDRAMC_MD_SDRAM 0
+#define AT91_SDRAMC_MD_LOW_POWER_SDRAM 1
+
+#endif
diff --git a/include/soc/tegra/mc.h b/include/soc/tegra/mc.h
new file mode 100644
index 000000000000..63deb8d9f82a
--- /dev/null
+++ b/include/soc/tegra/mc.h
@@ -0,0 +1,107 @@
+/*
+ * Copyright (C) 2014 NVIDIA Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __SOC_TEGRA_MC_H__
+#define __SOC_TEGRA_MC_H__
+
+#include <linux/types.h>
+
+struct clk;
+struct device;
+struct page;
+
+struct tegra_smmu_enable {
+ unsigned int reg;
+ unsigned int bit;
+};
+
+/* latency allowance */
+struct tegra_mc_la {
+ unsigned int reg;
+ unsigned int shift;
+ unsigned int mask;
+ unsigned int def;
+};
+
+struct tegra_mc_client {
+ unsigned int id;
+ const char *name;
+ unsigned int swgroup;
+
+ unsigned int fifo_size;
+
+ struct tegra_smmu_enable smmu;
+ struct tegra_mc_la la;
+};
+
+struct tegra_smmu_swgroup {
+ unsigned int swgroup;
+ unsigned int reg;
+};
+
+struct tegra_smmu_ops {
+ void (*flush_dcache)(struct page *page, unsigned long offset,
+ size_t size);
+};
+
+struct tegra_smmu_soc {
+ const struct tegra_mc_client *clients;
+ unsigned int num_clients;
+
+ const struct tegra_smmu_swgroup *swgroups;
+ unsigned int num_swgroups;
+
+ bool supports_round_robin_arbitration;
+ bool supports_request_limit;
+
+ unsigned int num_asids;
+
+ const struct tegra_smmu_ops *ops;
+};
+
+struct tegra_mc;
+struct tegra_smmu;
+
+#ifdef CONFIG_TEGRA_IOMMU_SMMU
+struct tegra_smmu *tegra_smmu_probe(struct device *dev,
+ const struct tegra_smmu_soc *soc,
+ struct tegra_mc *mc);
+#else
+static inline struct tegra_smmu *
+tegra_smmu_probe(struct device *dev, const struct tegra_smmu_soc *soc,
+ struct tegra_mc *mc)
+{
+ return NULL;
+}
+#endif
+
+struct tegra_mc_soc {
+ const struct tegra_mc_client *clients;
+ unsigned int num_clients;
+
+ const unsigned int *emem_regs;
+ unsigned int num_emem_regs;
+
+ unsigned int num_address_bits;
+ unsigned int atom_size;
+
+ const struct tegra_smmu_soc *smmu;
+};
+
+struct tegra_mc {
+ struct device *dev;
+ struct tegra_smmu *smmu;
+ void __iomem *regs;
+ struct clk *clk;
+ int irq;
+
+ const struct tegra_mc_soc *soc;
+ unsigned long tick;
+};
+
+#endif /* __SOC_TEGRA_MC_H__ */
diff --git a/include/sound/atmel-abdac.h b/include/sound/atmel-abdac.h
index edff6a8ba1b5..a8f735d677fa 100644
--- a/include/sound/atmel-abdac.h
+++ b/include/sound/atmel-abdac.h
@@ -10,7 +10,7 @@
#ifndef __INCLUDE_SOUND_ATMEL_ABDAC_H
#define __INCLUDE_SOUND_ATMEL_ABDAC_H
-#include <linux/dw_dmac.h>
+#include <linux/platform_data/dma-dw.h>
/**
* struct atmel_abdac_pdata - board specific ABDAC configuration
diff --git a/include/sound/atmel-ac97c.h b/include/sound/atmel-ac97c.h
index 00e6c289a936..f2a1cdc37661 100644
--- a/include/sound/atmel-ac97c.h
+++ b/include/sound/atmel-ac97c.h
@@ -10,7 +10,7 @@
#ifndef __INCLUDE_SOUND_ATMEL_AC97C_H
#define __INCLUDE_SOUND_ATMEL_AC97C_H
-#include <linux/dw_dmac.h>
+#include <linux/platform_data/dma-dw.h>
#define AC97C_CAPTURE 0x01
#define AC97C_PLAYBACK 0x02
diff --git a/include/sound/compress_driver.h b/include/sound/compress_driver.h
index ae6c3b8ed2f5..396e8f73670a 100644
--- a/include/sound/compress_driver.h
+++ b/include/sound/compress_driver.h
@@ -42,12 +42,11 @@ struct snd_compr_ops;
* @buffer_size: size of the above buffer
* @fragment_size: size of buffer fragment in bytes
* @fragments: number of such fragments
- * @hw_pointer: offset of last location in buffer where DSP copied data
- * @app_pointer: offset of last location in buffer where app wrote data
* @total_bytes_available: cumulative number of bytes made available in
* the ring buffer
* @total_bytes_transferred: cumulative bytes transferred by offload DSP
* @sleep: poll sleep
+ * @private_data: driver private data pointer
*/
struct snd_compr_runtime {
snd_pcm_state_t state;
@@ -94,6 +93,8 @@ struct snd_compr_stream {
* This can be called in during stream creation only to set codec params
* and the stream properties
* @get_params: retrieve the codec parameters, mandatory
+ * @set_metadata: Set the metadata values for a stream
+ * @get_metadata: retreives the requested metadata values from stream
* @trigger: Trigger operations like start, pause, resume, drain, stop.
* This callback is mandatory
* @pointer: Retrieve current h/w pointer information. Mandatory
diff --git a/include/sound/jack.h b/include/sound/jack.h
index 58916573db58..218235030ebc 100644
--- a/include/sound/jack.h
+++ b/include/sound/jack.h
@@ -28,8 +28,23 @@
struct input_dev;
/**
- * Jack types which can be reported. These values are used as a
- * bitmask.
+ * enum snd_jack_types - Jack types which can be reported
+ * @SND_JACK_HEADPHONE: Headphone
+ * @SND_JACK_MICROPHONE: Microphone
+ * @SND_JACK_HEADSET: Headset
+ * @SND_JACK_LINEOUT: Line out
+ * @SND_JACK_MECHANICAL: Mechanical switch
+ * @SND_JACK_VIDEOOUT: Video out
+ * @SND_JACK_AVOUT: AV (Audio Video) out
+ * @SND_JACK_LINEIN: Line in
+ * @SND_JACK_BTN_0: Button 0
+ * @SND_JACK_BTN_1: Button 1
+ * @SND_JACK_BTN_2: Button 2
+ * @SND_JACK_BTN_3: Button 3
+ * @SND_JACK_BTN_4: Button 4
+ * @SND_JACK_BTN_5: Button 5
+ *
+ * These values are used as a bitmask.
*
* Note that this must be kept in sync with the lookup table in
* sound/core/jack.c.
@@ -90,6 +105,13 @@ static inline void snd_jack_set_parent(struct snd_jack *jack,
{
}
+static inline int snd_jack_set_key(struct snd_jack *jack,
+ enum snd_jack_types type,
+ int keytype)
+{
+ return 0;
+}
+
static inline void snd_jack_report(struct snd_jack *jack, int status)
{
}
diff --git a/include/sound/omap-hdmi-audio.h b/include/sound/omap-hdmi-audio.h
new file mode 100644
index 000000000000..afdb416898e0
--- /dev/null
+++ b/include/sound/omap-hdmi-audio.h
@@ -0,0 +1,43 @@
+/*
+ * hdmi-audio.c -- OMAP4+ DSS HDMI audio support library
+ *
+ * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com
+ *
+ * Author: Jyri Sarha <jsarha@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ */
+
+#include <video/omapdss.h>
+
+#ifndef __OMAP_HDMI_AUDIO_H__
+#define __OMAP_HDMI_AUDIO_H__
+
+struct omap_hdmi_audio_ops {
+ int (*audio_startup)(struct device *dev,
+ void (*abort_cb)(struct device *dev));
+ int (*audio_shutdown)(struct device *dev);
+ int (*audio_start)(struct device *dev);
+ void (*audio_stop)(struct device *dev);
+ int (*audio_config)(struct device *dev,
+ struct omap_dss_audio *dss_audio);
+};
+
+/* HDMI audio initalization data */
+struct omap_hdmi_audio_pdata {
+ struct device *dev;
+ enum omapdss_version dss_version;
+ phys_addr_t audio_dma_addr;
+
+ const struct omap_hdmi_audio_ops *ops;
+};
+
+#endif /* __OMAP_HDMI_AUDIO_H__ */
diff --git a/include/sound/pcm.h b/include/sound/pcm.h
index 6f3e10ca0e32..1e7f74acc2ec 100644
--- a/include/sound/pcm.h
+++ b/include/sound/pcm.h
@@ -183,6 +183,9 @@ struct snd_pcm_ops {
#define SNDRV_PCM_FMTBIT_G723_40_1B _SNDRV_PCM_FMTBIT(G723_40_1B)
#define SNDRV_PCM_FMTBIT_DSD_U8 _SNDRV_PCM_FMTBIT(DSD_U8)
#define SNDRV_PCM_FMTBIT_DSD_U16_LE _SNDRV_PCM_FMTBIT(DSD_U16_LE)
+#define SNDRV_PCM_FMTBIT_DSD_U32_LE _SNDRV_PCM_FMTBIT(DSD_U32_LE)
+#define SNDRV_PCM_FMTBIT_DSD_U16_BE _SNDRV_PCM_FMTBIT(DSD_U16_BE)
+#define SNDRV_PCM_FMTBIT_DSD_U32_BE _SNDRV_PCM_FMTBIT(DSD_U32_BE)
#ifdef SNDRV_LITTLE_ENDIAN
#define SNDRV_PCM_FMTBIT_S16 SNDRV_PCM_FMTBIT_S16_LE
@@ -365,6 +368,7 @@ struct snd_pcm_runtime {
struct snd_pcm_group { /* keep linked substreams */
spinlock_t lock;
+ struct mutex mutex;
struct list_head substreams;
int count;
};
@@ -414,7 +418,10 @@ struct snd_pcm_substream {
struct snd_info_entry *proc_status_entry;
struct snd_info_entry *proc_prealloc_entry;
struct snd_info_entry *proc_prealloc_max_entry;
+#ifdef CONFIG_SND_PCM_XRUN_DEBUG
+ struct snd_info_entry *proc_xrun_injection_entry;
#endif
+#endif /* CONFIG_SND_VERBOSE_PROCFS */
/* misc flags */
unsigned int hw_opened: 1;
};
@@ -460,6 +467,7 @@ struct snd_pcm {
void (*private_free) (struct snd_pcm *pcm);
struct device *dev; /* actual hw device this belongs to */
bool internal; /* pcm is for internal use only */
+ bool nonatomic; /* whole PCM operations are in non-atomic context */
#if defined(CONFIG_SND_PCM_OSS) || defined(CONFIG_SND_PCM_OSS_MODULE)
struct snd_pcm_oss oss;
#endif
@@ -492,8 +500,6 @@ int snd_pcm_notify(struct snd_pcm_notify *notify, int nfree);
* Native I/O
*/
-extern rwlock_t snd_pcm_link_rwlock;
-
int snd_pcm_info(struct snd_pcm_substream *substream, struct snd_pcm_info *info);
int snd_pcm_info_user(struct snd_pcm_substream *substream,
struct snd_pcm_info __user *info);
@@ -502,6 +508,7 @@ int snd_pcm_status(struct snd_pcm_substream *substream,
int snd_pcm_start(struct snd_pcm_substream *substream);
int snd_pcm_stop(struct snd_pcm_substream *substream, snd_pcm_state_t status);
int snd_pcm_drain_done(struct snd_pcm_substream *substream);
+int snd_pcm_stop_xrun(struct snd_pcm_substream *substream);
#ifdef CONFIG_PM
int snd_pcm_suspend(struct snd_pcm_substream *substream);
int snd_pcm_suspend_all(struct snd_pcm *pcm);
@@ -532,50 +539,59 @@ snd_pcm_debug_name(struct snd_pcm_substream *substream, char *buf, size_t size)
* PCM library
*/
+/**
+ * snd_pcm_stream_linked - Check whether the substream is linked with others
+ * @substream: substream to check
+ *
+ * Returns true if the given substream is being linked with others.
+ */
static inline int snd_pcm_stream_linked(struct snd_pcm_substream *substream)
{
return substream->group != &substream->self_group;
}
-static inline void snd_pcm_stream_lock(struct snd_pcm_substream *substream)
-{
- read_lock(&snd_pcm_link_rwlock);
- spin_lock(&substream->self_group.lock);
-}
+void snd_pcm_stream_lock(struct snd_pcm_substream *substream);
+void snd_pcm_stream_unlock(struct snd_pcm_substream *substream);
+void snd_pcm_stream_lock_irq(struct snd_pcm_substream *substream);
+void snd_pcm_stream_unlock_irq(struct snd_pcm_substream *substream);
+unsigned long _snd_pcm_stream_lock_irqsave(struct snd_pcm_substream *substream);
-static inline void snd_pcm_stream_unlock(struct snd_pcm_substream *substream)
-{
- spin_unlock(&substream->self_group.lock);
- read_unlock(&snd_pcm_link_rwlock);
-}
-
-static inline void snd_pcm_stream_lock_irq(struct snd_pcm_substream *substream)
-{
- read_lock_irq(&snd_pcm_link_rwlock);
- spin_lock(&substream->self_group.lock);
-}
-
-static inline void snd_pcm_stream_unlock_irq(struct snd_pcm_substream *substream)
-{
- spin_unlock(&substream->self_group.lock);
- read_unlock_irq(&snd_pcm_link_rwlock);
-}
-
-#define snd_pcm_stream_lock_irqsave(substream, flags) \
-do { \
- read_lock_irqsave(&snd_pcm_link_rwlock, (flags)); \
- spin_lock(&substream->self_group.lock); \
-} while (0)
-
-#define snd_pcm_stream_unlock_irqrestore(substream, flags) \
-do { \
- spin_unlock(&substream->self_group.lock); \
- read_unlock_irqrestore(&snd_pcm_link_rwlock, (flags)); \
-} while (0)
+/**
+ * snd_pcm_stream_lock_irqsave - Lock the PCM stream
+ * @substream: PCM substream
+ * @flags: irq flags
+ *
+ * This locks the PCM stream like snd_pcm_stream_lock() but with the local
+ * IRQ (only when nonatomic is false). In nonatomic case, this is identical
+ * as snd_pcm_stream_lock().
+ */
+#define snd_pcm_stream_lock_irqsave(substream, flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+ flags = _snd_pcm_stream_lock_irqsave(substream); \
+ } while (0)
+void snd_pcm_stream_unlock_irqrestore(struct snd_pcm_substream *substream,
+ unsigned long flags);
+/**
+ * snd_pcm_group_for_each_entry - iterate over the linked substreams
+ * @s: the iterator
+ * @substream: the substream
+ *
+ * Iterate over the all linked substreams to the given @substream.
+ * When @substream isn't linked with any others, this gives returns @substream
+ * itself once.
+ */
#define snd_pcm_group_for_each_entry(s, substream) \
list_for_each_entry(s, &substream->group->substreams, link_list)
+/**
+ * snd_pcm_running - Check whether the substream is in a running state
+ * @substream: substream to check
+ *
+ * Returns true if the given substream is in the state RUNNING, or in the
+ * state DRAINING for playback.
+ */
static inline int snd_pcm_running(struct snd_pcm_substream *substream)
{
return (substream->runtime->status->state == SNDRV_PCM_STATE_RUNNING ||
@@ -583,45 +599,81 @@ static inline int snd_pcm_running(struct snd_pcm_substream *substream)
substream->stream == SNDRV_PCM_STREAM_PLAYBACK));
}
+/**
+ * bytes_to_samples - Unit conversion of the size from bytes to samples
+ * @runtime: PCM runtime instance
+ * @size: size in bytes
+ */
static inline ssize_t bytes_to_samples(struct snd_pcm_runtime *runtime, ssize_t size)
{
return size * 8 / runtime->sample_bits;
}
+/**
+ * bytes_to_frames - Unit conversion of the size from bytes to frames
+ * @runtime: PCM runtime instance
+ * @size: size in bytes
+ */
static inline snd_pcm_sframes_t bytes_to_frames(struct snd_pcm_runtime *runtime, ssize_t size)
{
return size * 8 / runtime->frame_bits;
}
+/**
+ * samples_to_bytes - Unit conversion of the size from samples to bytes
+ * @runtime: PCM runtime instance
+ * @size: size in samples
+ */
static inline ssize_t samples_to_bytes(struct snd_pcm_runtime *runtime, ssize_t size)
{
return size * runtime->sample_bits / 8;
}
+/**
+ * frames_to_bytes - Unit conversion of the size from frames to bytes
+ * @runtime: PCM runtime instance
+ * @size: size in frames
+ */
static inline ssize_t frames_to_bytes(struct snd_pcm_runtime *runtime, snd_pcm_sframes_t size)
{
return size * runtime->frame_bits / 8;
}
+/**
+ * frame_aligned - Check whether the byte size is aligned to frames
+ * @runtime: PCM runtime instance
+ * @bytes: size in bytes
+ */
static inline int frame_aligned(struct snd_pcm_runtime *runtime, ssize_t bytes)
{
return bytes % runtime->byte_align == 0;
}
+/**
+ * snd_pcm_lib_buffer_bytes - Get the buffer size of the current PCM in bytes
+ * @substream: PCM substream
+ */
static inline size_t snd_pcm_lib_buffer_bytes(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
return frames_to_bytes(runtime, runtime->buffer_size);
}
+/**
+ * snd_pcm_lib_period_bytes - Get the period size of the current PCM in bytes
+ * @substream: PCM substream
+ */
static inline size_t snd_pcm_lib_period_bytes(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
return frames_to_bytes(runtime, runtime->period_size);
}
-/*
- * result is: 0 ... (boundary - 1)
+/**
+ * snd_pcm_playback_avail - Get the available (writable) space for playback
+ * @runtime: PCM runtime instance
+ *
+ * Result is between 0 ... (boundary - 1)
*/
static inline snd_pcm_uframes_t snd_pcm_playback_avail(struct snd_pcm_runtime *runtime)
{
@@ -633,8 +685,11 @@ static inline snd_pcm_uframes_t snd_pcm_playback_avail(struct snd_pcm_runtime *r
return avail;
}
-/*
- * result is: 0 ... (boundary - 1)
+/**
+ * snd_pcm_playback_avail - Get the available (readable) space for capture
+ * @runtime: PCM runtime instance
+ *
+ * Result is between 0 ... (boundary - 1)
*/
static inline snd_pcm_uframes_t snd_pcm_capture_avail(struct snd_pcm_runtime *runtime)
{
@@ -644,11 +699,19 @@ static inline snd_pcm_uframes_t snd_pcm_capture_avail(struct snd_pcm_runtime *ru
return avail;
}
+/**
+ * snd_pcm_playback_hw_avail - Get the queued space for playback
+ * @runtime: PCM runtime instance
+ */
static inline snd_pcm_sframes_t snd_pcm_playback_hw_avail(struct snd_pcm_runtime *runtime)
{
return runtime->buffer_size - snd_pcm_playback_avail(runtime);
}
+/**
+ * snd_pcm_capture_hw_avail - Get the free space for capture
+ * @runtime: PCM runtime instance
+ */
static inline snd_pcm_sframes_t snd_pcm_capture_hw_avail(struct snd_pcm_runtime *runtime)
{
return runtime->buffer_size - snd_pcm_capture_avail(runtime);
@@ -728,6 +791,20 @@ static inline int snd_pcm_capture_empty(struct snd_pcm_substream *substream)
return snd_pcm_capture_avail(runtime) == 0;
}
+/**
+ * snd_pcm_trigger_done - Mark the master substream
+ * @substream: the pcm substream instance
+ * @master: the linked master substream
+ *
+ * When multiple substreams of the same card are linked and the hardware
+ * supports the single-shot operation, the driver calls this in the loop
+ * in snd_pcm_group_for_each_entry() for marking the substream as "done".
+ * Then most of trigger operations are performed only to the given master
+ * substream.
+ *
+ * The trigger_master mark is cleared at timestamp updates at the end
+ * of trigger operations.
+ */
static inline void snd_pcm_trigger_done(struct snd_pcm_substream *substream,
struct snd_pcm_substream *master)
{
@@ -770,18 +847,59 @@ static inline const struct snd_interval *hw_param_interval_c(const struct snd_pc
return &params->intervals[var - SNDRV_PCM_HW_PARAM_FIRST_INTERVAL];
}
-#define params_channels(p) \
- (hw_param_interval_c((p), SNDRV_PCM_HW_PARAM_CHANNELS)->min)
-#define params_rate(p) \
- (hw_param_interval_c((p), SNDRV_PCM_HW_PARAM_RATE)->min)
-#define params_period_size(p) \
- (hw_param_interval_c((p), SNDRV_PCM_HW_PARAM_PERIOD_SIZE)->min)
-#define params_periods(p) \
- (hw_param_interval_c((p), SNDRV_PCM_HW_PARAM_PERIODS)->min)
-#define params_buffer_size(p) \
- (hw_param_interval_c((p), SNDRV_PCM_HW_PARAM_BUFFER_SIZE)->min)
-#define params_buffer_bytes(p) \
- (hw_param_interval_c((p), SNDRV_PCM_HW_PARAM_BUFFER_BYTES)->min)
+/**
+ * params_channels - Get the number of channels from the hw params
+ * @p: hw params
+ */
+static inline unsigned int params_channels(const struct snd_pcm_hw_params *p)
+{
+ return hw_param_interval_c(p, SNDRV_PCM_HW_PARAM_CHANNELS)->min;
+}
+
+/**
+ * params_channels - Get the sample rate from the hw params
+ * @p: hw params
+ */
+static inline unsigned int params_rate(const struct snd_pcm_hw_params *p)
+{
+ return hw_param_interval_c(p, SNDRV_PCM_HW_PARAM_RATE)->min;
+}
+
+/**
+ * params_channels - Get the period size (in frames) from the hw params
+ * @p: hw params
+ */
+static inline unsigned int params_period_size(const struct snd_pcm_hw_params *p)
+{
+ return hw_param_interval_c(p, SNDRV_PCM_HW_PARAM_PERIOD_SIZE)->min;
+}
+
+/**
+ * params_channels - Get the number of periods from the hw params
+ * @p: hw params
+ */
+static inline unsigned int params_periods(const struct snd_pcm_hw_params *p)
+{
+ return hw_param_interval_c(p, SNDRV_PCM_HW_PARAM_PERIODS)->min;
+}
+
+/**
+ * params_channels - Get the buffer size (in frames) from the hw params
+ * @p: hw params
+ */
+static inline unsigned int params_buffer_size(const struct snd_pcm_hw_params *p)
+{
+ return hw_param_interval_c(p, SNDRV_PCM_HW_PARAM_BUFFER_SIZE)->min;
+}
+
+/**
+ * params_channels - Get the buffer size (in bytes) from the hw params
+ * @p: hw params
+ */
+static inline unsigned int params_buffer_bytes(const struct snd_pcm_hw_params *p)
+{
+ return hw_param_interval_c(p, SNDRV_PCM_HW_PARAM_BUFFER_BYTES)->min;
+}
int snd_interval_refine(struct snd_interval *i, const struct snd_interval *v);
void snd_interval_mul(const struct snd_interval *a, const struct snd_interval *b, struct snd_interval *c);
@@ -903,6 +1021,14 @@ unsigned int snd_pcm_rate_bit_to_rate(unsigned int rate_bit);
unsigned int snd_pcm_rate_mask_intersect(unsigned int rates_a,
unsigned int rates_b);
+/**
+ * snd_pcm_set_runtime_buffer - Set the PCM runtime buffer
+ * @substream: PCM substream to set
+ * @bufp: the buffer information, NULL to clear
+ *
+ * Copy the buffer information to runtime->dma_buffer when @bufp is non-NULL.
+ * Otherwise it clears the current buffer information.
+ */
static inline void snd_pcm_set_runtime_buffer(struct snd_pcm_substream *substream,
struct snd_dma_buffer *bufp)
{
@@ -928,6 +1054,11 @@ void snd_pcm_timer_resolution_change(struct snd_pcm_substream *substream);
void snd_pcm_timer_init(struct snd_pcm_substream *substream);
void snd_pcm_timer_done(struct snd_pcm_substream *substream);
+/**
+ * snd_pcm_gettime - Fill the timespec depending on the timestamp mode
+ * @runtime: PCM runtime instance
+ * @tv: timespec to fill
+ */
static inline void snd_pcm_gettime(struct snd_pcm_runtime *runtime,
struct timespec *tv)
{
@@ -964,7 +1095,6 @@ int _snd_pcm_lib_alloc_vmalloc_buffer(struct snd_pcm_substream *substream,
int snd_pcm_lib_free_vmalloc_buffer(struct snd_pcm_substream *substream);
struct page *snd_pcm_lib_get_vmalloc_page(struct snd_pcm_substream *substream,
unsigned long offset);
-#if 0 /* for kernel-doc */
/**
* snd_pcm_lib_alloc_vmalloc_buffer - allocate virtual DMA buffer
* @substream: the substream to allocate the buffer to
@@ -977,8 +1107,13 @@ struct page *snd_pcm_lib_get_vmalloc_page(struct snd_pcm_substream *substream,
* Return: 1 if the buffer was changed, 0 if not changed, or a negative error
* code.
*/
-static int snd_pcm_lib_alloc_vmalloc_buffer
- (struct snd_pcm_substream *substream, size_t size);
+static inline int snd_pcm_lib_alloc_vmalloc_buffer
+ (struct snd_pcm_substream *substream, size_t size)
+{
+ return _snd_pcm_lib_alloc_vmalloc_buffer(substream, size,
+ GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
+}
+
/**
* snd_pcm_lib_alloc_vmalloc_32_buffer - allocate 32-bit-addressable buffer
* @substream: the substream to allocate the buffer to
@@ -990,15 +1125,12 @@ static int snd_pcm_lib_alloc_vmalloc_buffer
* Return: 1 if the buffer was changed, 0 if not changed, or a negative error
* code.
*/
-static int snd_pcm_lib_alloc_vmalloc_32_buffer
- (struct snd_pcm_substream *substream, size_t size);
-#endif
-#define snd_pcm_lib_alloc_vmalloc_buffer(subs, size) \
- _snd_pcm_lib_alloc_vmalloc_buffer \
- (subs, size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO)
-#define snd_pcm_lib_alloc_vmalloc_32_buffer(subs, size) \
- _snd_pcm_lib_alloc_vmalloc_buffer \
- (subs, size, GFP_KERNEL | GFP_DMA32 | __GFP_ZERO)
+static inline int snd_pcm_lib_alloc_vmalloc_32_buffer
+ (struct snd_pcm_substream *substream, size_t size)
+{
+ return _snd_pcm_lib_alloc_vmalloc_buffer(substream, size,
+ GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
+}
#define snd_pcm_get_dma_buf(substream) ((substream)->runtime->dma_buffer_p)
@@ -1018,18 +1150,35 @@ struct page *snd_pcm_sgbuf_ops_page(struct snd_pcm_substream *substream,
#define snd_pcm_sgbuf_ops_page NULL
#endif /* SND_DMA_SGBUF */
+/**
+ * snd_pcm_sgbuf_get_addr - Get the DMA address at the corresponding offset
+ * @substream: PCM substream
+ * @ofs: byte offset
+ */
static inline dma_addr_t
snd_pcm_sgbuf_get_addr(struct snd_pcm_substream *substream, unsigned int ofs)
{
return snd_sgbuf_get_addr(snd_pcm_get_dma_buf(substream), ofs);
}
+/**
+ * snd_pcm_sgbuf_get_ptr - Get the virtual address at the corresponding offset
+ * @substream: PCM substream
+ * @ofs: byte offset
+ */
static inline void *
snd_pcm_sgbuf_get_ptr(struct snd_pcm_substream *substream, unsigned int ofs)
{
return snd_sgbuf_get_ptr(snd_pcm_get_dma_buf(substream), ofs);
}
+/**
+ * snd_pcm_sgbuf_chunk_size - Compute the max size that fits within the contig.
+ * page from the given size
+ * @substream: PCM substream
+ * @ofs: byte offset
+ * @size: byte size to examine
+ */
static inline unsigned int
snd_pcm_sgbuf_get_chunk_size(struct snd_pcm_substream *substream,
unsigned int ofs, unsigned int size)
@@ -1037,13 +1186,24 @@ snd_pcm_sgbuf_get_chunk_size(struct snd_pcm_substream *substream,
return snd_sgbuf_get_chunk_size(snd_pcm_get_dma_buf(substream), ofs, size);
}
-/* handle mmap counter - PCM mmap callback should handle this counter properly */
+/**
+ * snd_pcm_mmap_data_open - increase the mmap counter
+ * @area: VMA
+ *
+ * PCM mmap callback should handle this counter properly
+ */
static inline void snd_pcm_mmap_data_open(struct vm_area_struct *area)
{
struct snd_pcm_substream *substream = (struct snd_pcm_substream *)area->vm_private_data;
atomic_inc(&substream->mmap_count);
}
+/**
+ * snd_pcm_mmap_data_close - decrease the mmap counter
+ * @area: VMA
+ *
+ * PCM mmap callback should handle this counter properly
+ */
static inline void snd_pcm_mmap_data_close(struct vm_area_struct *area)
{
struct snd_pcm_substream *substream = (struct snd_pcm_substream *)area->vm_private_data;
@@ -1063,6 +1223,11 @@ int snd_pcm_lib_mmap_iomem(struct snd_pcm_substream *substream, struct vm_area_s
#define snd_pcm_lib_mmap_vmalloc NULL
+/**
+ * snd_pcm_limit_isa_dma_size - Get the max size fitting with ISA DMA transfer
+ * @dma: DMA number
+ * @max: pointer to store the max size
+ */
static inline void snd_pcm_limit_isa_dma_size(int dma, size_t *max)
{
*max = dma < 4 ? 64 * 1024 : 128 * 1024;
@@ -1115,7 +1280,11 @@ struct snd_pcm_chmap {
void *private_data; /* optional: private data pointer */
};
-/* get the PCM substream assigned to the given chmap info */
+/**
+ * snd_pcm_chmap_substream - get the PCM substream assigned to the given chmap info
+ * @info: chmap information
+ * @idx: the substream number index
+ */
static inline struct snd_pcm_substream *
snd_pcm_chmap_substream(struct snd_pcm_chmap *info, unsigned int idx)
{
@@ -1142,7 +1311,10 @@ int snd_pcm_add_chmap_ctls(struct snd_pcm *pcm, int stream,
unsigned long private_value,
struct snd_pcm_chmap **info_ret);
-/* Strong-typed conversion of pcm_format to bitwise */
+/**
+ * pcm_format_to_bits - Strong-typed conversion of pcm_format to bitwise
+ * @pcm_format: PCM format
+ */
static inline u64 pcm_format_to_bits(snd_pcm_format_t pcm_format)
{
return 1ULL << (__force int) pcm_format;
diff --git a/include/sound/rcar_snd.h b/include/sound/rcar_snd.h
index d76412b84b48..83284cae464c 100644
--- a/include/sound/rcar_snd.h
+++ b/include/sound/rcar_snd.h
@@ -36,14 +36,14 @@
#define RSND_SSI_CLK_PIN_SHARE (1 << 31)
#define RSND_SSI_NO_BUSIF (1 << 30) /* SSI+DMA without BUSIF */
-#define RSND_SSI(_dma_id, _pio_irq, _flags) \
-{ .dma_id = _dma_id, .pio_irq = _pio_irq, .flags = _flags }
+#define RSND_SSI(_dma_id, _irq, _flags) \
+{ .dma_id = _dma_id, .irq = _irq, .flags = _flags }
#define RSND_SSI_UNUSED \
-{ .dma_id = -1, .pio_irq = -1, .flags = 0 }
+{ .dma_id = -1, .irq = -1, .flags = 0 }
struct rsnd_ssi_platform_info {
int dma_id;
- int pio_irq;
+ int irq;
u32 flags;
};
diff --git a/include/sound/rt5645.h b/include/sound/rt5645.h
index 1de744c242f6..120d9610054e 100644
--- a/include/sound/rt5645.h
+++ b/include/sound/rt5645.h
@@ -20,6 +20,13 @@ struct rt5645_platform_data {
/* 0 = IN2N; 1 = GPIO5; 2 = GPIO11 */
unsigned int dmic2_data_pin;
/* 0 = IN2P; 1 = GPIO6; 2 = GPIO10; 3 = GPIO12 */
+
+ unsigned int hp_det_gpio;
+ bool gpio_hp_det_active_high;
+
+ /* true if codec's jd function is used */
+ bool en_jd_func;
+ unsigned int jd_mode;
};
#endif
diff --git a/include/sound/rt5677.h b/include/sound/rt5677.h
index 3da14313bcfc..d9eb7d861cd0 100644
--- a/include/sound/rt5677.h
+++ b/include/sound/rt5677.h
@@ -12,10 +12,31 @@
#ifndef __LINUX_SND_RT5677_H
#define __LINUX_SND_RT5677_H
+enum rt5677_dmic2_clk {
+ RT5677_DMIC_CLK1 = 0,
+ RT5677_DMIC_CLK2 = 1,
+};
+
+
struct rt5677_platform_data {
- /* IN1 IN2 can optionally be differential */
+ /* IN1/IN2/LOUT1/LOUT2/LOUT3 can optionally be differential */
bool in1_diff;
bool in2_diff;
+ bool lout1_diff;
+ bool lout2_diff;
+ bool lout3_diff;
+ /* DMIC2 clock source selection */
+ enum rt5677_dmic2_clk dmic2_clk_pin;
+
+ /* configures GPIO, 0 - floating, 1 - pulldown, 2 - pullup */
+ u8 gpio_config[6];
+
+ /* jd1 can select 0 ~ 3 as OFF, GPIO1, GPIO2 and GPIO3 respectively */
+ unsigned int jd1_gpio;
+ /* jd2 and jd3 can select 0 ~ 3 as
+ OFF, GPIO4, GPIO5 and GPIO6 respectively */
+ unsigned int jd2_gpio;
+ unsigned int jd3_gpio;
};
#endif
diff --git a/include/sound/seq_kernel.h b/include/sound/seq_kernel.h
index 2398521f0998..eea5400fe373 100644
--- a/include/sound/seq_kernel.h
+++ b/include/sound/seq_kernel.h
@@ -108,9 +108,13 @@ int snd_seq_event_port_detach(int client, int port);
#ifdef CONFIG_MODULES
void snd_seq_autoload_lock(void);
void snd_seq_autoload_unlock(void);
+void snd_seq_autoload_init(void);
+#define snd_seq_autoload_exit() snd_seq_autoload_lock()
#else
#define snd_seq_autoload_lock()
#define snd_seq_autoload_unlock()
+#define snd_seq_autoload_init()
+#define snd_seq_autoload_exit()
#endif
#endif /* __SOUND_SEQ_KERNEL_H */
diff --git a/include/sound/soc-dai.h b/include/sound/soc-dai.h
index e8b3080d196a..2df96b1384c7 100644
--- a/include/sound/soc-dai.h
+++ b/include/sound/soc-dai.h
@@ -206,7 +206,6 @@ struct snd_soc_dai_driver {
/* DAI description */
const char *name;
unsigned int id;
- int ac97_control;
unsigned int base;
/* DAI driver callbacks */
@@ -216,6 +215,8 @@ struct snd_soc_dai_driver {
int (*resume)(struct snd_soc_dai *dai);
/* compress dai */
bool compress_dai;
+ /* DAI is also used for the control bus */
+ bool bus_control;
/* ops */
const struct snd_soc_dai_ops *ops;
@@ -241,7 +242,6 @@ struct snd_soc_dai {
const char *name;
int id;
struct device *dev;
- void *ac97_pdata; /* platform_data for the ac97 codec */
/* driver ops */
struct snd_soc_dai_driver *driver;
@@ -268,7 +268,6 @@ struct snd_soc_dai {
unsigned int sample_bits;
/* parent platform/codec */
- struct snd_soc_platform *platform;
struct snd_soc_codec *codec;
struct snd_soc_component *component;
@@ -276,8 +275,6 @@ struct snd_soc_dai {
unsigned int tx_mask;
unsigned int rx_mask;
- struct snd_soc_card *card;
-
struct list_head list;
};
diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h
index aac04ff84eea..89823cfe6f04 100644
--- a/include/sound/soc-dapm.h
+++ b/include/sound/soc-dapm.h
@@ -432,9 +432,10 @@ int snd_soc_dapm_force_enable_pin_unlocked(struct snd_soc_dapm_context *dapm,
int snd_soc_dapm_ignore_suspend(struct snd_soc_dapm_context *dapm,
const char *pin);
void snd_soc_dapm_auto_nc_pins(struct snd_soc_card *card);
+unsigned int dapm_kcontrol_get_value(const struct snd_kcontrol *kcontrol);
/* Mostly internal - should not normally be used */
-void dapm_mark_io_dirty(struct snd_soc_dapm_context *dapm);
+void dapm_mark_endpoints_dirty(struct snd_soc_card *card);
/* dapm path query */
int snd_soc_dapm_dai_get_connected_widgets(struct snd_soc_dai *dai, int stream,
@@ -507,9 +508,9 @@ struct snd_soc_dapm_path {
/* status */
u32 connect:1; /* source and sink widgets are connected */
- u32 walked:1; /* path has been walked */
u32 walking:1; /* path is in the process of being walked */
u32 weak:1; /* path ignored for power management */
+ u32 is_supply:1; /* At least one of the connected widgets is a supply */
int (*connected)(struct snd_soc_dapm_widget *source,
struct snd_soc_dapm_widget *sink);
@@ -543,11 +544,13 @@ struct snd_soc_dapm_widget {
unsigned char active:1; /* active stream on DAC, ADC's */
unsigned char connected:1; /* connected codec pin */
unsigned char new:1; /* cnew complete */
- unsigned char ext:1; /* has external widgets */
unsigned char force:1; /* force state */
unsigned char ignore_suspend:1; /* kept enabled over suspend */
unsigned char new_power:1; /* power from this run */
unsigned char power_checked:1; /* power checked this run */
+ unsigned char is_supply:1; /* Widget is a supply type widget */
+ unsigned char is_sink:1; /* Widget is a sink type widget */
+ unsigned char is_source:1; /* Widget is a source type widget */
int subseq; /* sort within widget type */
int (*power_check)(struct snd_soc_dapm_widget *w);
@@ -566,6 +569,7 @@ struct snd_soc_dapm_widget {
struct list_head sinks;
/* used during DAPM updates */
+ struct list_head work_list;
struct list_head power_list;
struct list_head dirty;
int inputs;
@@ -587,13 +591,13 @@ struct snd_soc_dapm_context {
enum snd_soc_bias_level suspend_bias_level;
struct delayed_work delayed_work;
unsigned int idle_bias_off:1; /* Use BIAS_OFF instead of STANDBY */
-
+ /* Go to BIAS_OFF in suspend if the DAPM context is idle */
+ unsigned int suspend_bias_off:1;
void (*seq_notifier)(struct snd_soc_dapm_context *,
enum snd_soc_dapm_type, int);
struct device *dev; /* from parent - for debug */
struct snd_soc_component *component; /* parent component */
- struct snd_soc_codec *codec; /* parent codec */
struct snd_soc_card *card; /* parent card */
/* used during DAPM updates */
diff --git a/include/sound/soc-dpcm.h b/include/sound/soc-dpcm.h
index 2883a7a6f9f3..98f2ade0266e 100644
--- a/include/sound/soc-dpcm.h
+++ b/include/sound/soc-dpcm.h
@@ -102,6 +102,8 @@ struct snd_soc_dpcm_runtime {
/* state and update */
enum snd_soc_dpcm_update runtime_update;
enum snd_soc_dpcm_state state;
+
+ int trigger_pending; /* trigger cmd + 1 if pending, 0 if not */
};
/* can this BE stop and free */
diff --git a/include/sound/soc.h b/include/sound/soc.h
index c83a334dd00f..b4fca9aed2a2 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -36,6 +36,11 @@
{.reg = xreg, .rreg = xreg, .shift = shift_left, \
.rshift = shift_right, .max = xmax, .platform_max = xmax, \
.invert = xinvert, .autodisable = xautodisable})
+#define SOC_DOUBLE_S_VALUE(xreg, shift_left, shift_right, xmin, xmax, xsign_bit, xinvert, xautodisable) \
+ ((unsigned long)&(struct soc_mixer_control) \
+ {.reg = xreg, .rreg = xreg, .shift = shift_left, \
+ .rshift = shift_right, .min = xmin, .max = xmax, .platform_max = xmax, \
+ .sign_bit = xsign_bit, .invert = xinvert, .autodisable = xautodisable})
#define SOC_SINGLE_VALUE(xreg, xshift, xmax, xinvert, xautodisable) \
SOC_DOUBLE_VALUE(xreg, xshift, xshift, xmax, xinvert, xautodisable)
#define SOC_SINGLE_VALUE_EXT(xreg, xmax, xinvert) \
@@ -171,11 +176,9 @@
.access = SNDRV_CTL_ELEM_ACCESS_TLV_READ | \
SNDRV_CTL_ELEM_ACCESS_READWRITE, \
.tlv.p = (tlv_array), \
- .info = snd_soc_info_volsw_s8, .get = snd_soc_get_volsw_s8, \
- .put = snd_soc_put_volsw_s8, \
- .private_value = (unsigned long)&(struct soc_mixer_control) \
- {.reg = xreg, .min = xmin, .max = xmax, \
- .platform_max = xmax} }
+ .info = snd_soc_info_volsw, .get = snd_soc_get_volsw,\
+ .put = snd_soc_put_volsw, \
+ .private_value = SOC_DOUBLE_S_VALUE(xreg, 0, 8, xmin, xmax, 7, 0, 0) }
#define SOC_ENUM_DOUBLE(xreg, xshift_l, xshift_r, xitems, xtexts) \
{ .reg = xreg, .shift_l = xshift_l, .shift_r = xshift_r, \
.items = xitems, .texts = xtexts, \
@@ -366,8 +369,6 @@ struct snd_soc_jack_gpio;
typedef int (*hw_write_t)(void *,const char* ,int);
-extern struct snd_ac97_bus_ops *soc_ac97_ops;
-
enum snd_soc_pcm_subclass {
SND_SOC_PCM_CLASS_PCM = 0,
SND_SOC_PCM_CLASS_BE = 1,
@@ -409,13 +410,9 @@ int devm_snd_soc_register_component(struct device *dev,
const struct snd_soc_component_driver *cmpnt_drv,
struct snd_soc_dai_driver *dai_drv, int num_dai);
void snd_soc_unregister_component(struct device *dev);
-int snd_soc_cache_sync(struct snd_soc_codec *codec);
int snd_soc_cache_init(struct snd_soc_codec *codec);
int snd_soc_cache_exit(struct snd_soc_codec *codec);
-int snd_soc_cache_write(struct snd_soc_codec *codec,
- unsigned int reg, unsigned int value);
-int snd_soc_cache_read(struct snd_soc_codec *codec,
- unsigned int reg, unsigned int *value);
+
int snd_soc_platform_read(struct snd_soc_platform *platform,
unsigned int reg);
int snd_soc_platform_write(struct snd_soc_platform *platform,
@@ -500,14 +497,28 @@ int snd_soc_update_bits_locked(struct snd_soc_codec *codec,
int snd_soc_test_bits(struct snd_soc_codec *codec, unsigned int reg,
unsigned int mask, unsigned int value);
-int snd_soc_new_ac97_codec(struct snd_soc_codec *codec,
- struct snd_ac97_bus_ops *ops, int num);
-void snd_soc_free_ac97_codec(struct snd_soc_codec *codec);
+#ifdef CONFIG_SND_SOC_AC97_BUS
+struct snd_ac97 *snd_soc_new_ac97_codec(struct snd_soc_codec *codec);
+void snd_soc_free_ac97_codec(struct snd_ac97 *ac97);
int snd_soc_set_ac97_ops(struct snd_ac97_bus_ops *ops);
int snd_soc_set_ac97_ops_of_reset(struct snd_ac97_bus_ops *ops,
struct platform_device *pdev);
+extern struct snd_ac97_bus_ops *soc_ac97_ops;
+#else
+static inline int snd_soc_set_ac97_ops_of_reset(struct snd_ac97_bus_ops *ops,
+ struct platform_device *pdev)
+{
+ return 0;
+}
+
+static inline int snd_soc_set_ac97_ops(struct snd_ac97_bus_ops *ops)
+{
+ return 0;
+}
+#endif
+
/*
*Controls
*/
@@ -545,12 +556,6 @@ int snd_soc_get_volsw_sx(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol);
int snd_soc_put_volsw_sx(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol);
-int snd_soc_info_volsw_s8(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_info *uinfo);
-int snd_soc_get_volsw_s8(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol);
-int snd_soc_put_volsw_s8(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol);
int snd_soc_info_volsw_range(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo);
int snd_soc_put_volsw_range(struct snd_kcontrol *kcontrol,
@@ -690,6 +695,17 @@ struct snd_soc_compr_ops {
struct snd_soc_component_driver {
const char *name;
+ /* Default control and setup, added after probe() is run */
+ const struct snd_kcontrol_new *controls;
+ unsigned int num_controls;
+ const struct snd_soc_dapm_widget *dapm_widgets;
+ unsigned int num_dapm_widgets;
+ const struct snd_soc_dapm_route *dapm_routes;
+ unsigned int num_dapm_routes;
+
+ int (*probe)(struct snd_soc_component *);
+ void (*remove)(struct snd_soc_component *);
+
/* DT */
int (*of_xlate_dai_name)(struct snd_soc_component *component,
struct of_phandle_args *args,
@@ -697,6 +713,10 @@ struct snd_soc_component_driver {
void (*seq_notifier)(struct snd_soc_component *, enum snd_soc_dapm_type,
int subseq);
int (*stream_event)(struct snd_soc_component *, int event);
+
+ /* probe ordering - for components with runtime dependencies */
+ int probe_order;
+ int remove_order;
};
struct snd_soc_component {
@@ -710,6 +730,7 @@ struct snd_soc_component {
unsigned int ignore_pmdown_time:1; /* pmdown_time is ignored at stop */
unsigned int registered_as_component:1;
+ unsigned int probed:1;
struct list_head list;
@@ -728,9 +749,35 @@ struct snd_soc_component {
struct mutex io_mutex;
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *debugfs_root;
+#endif
+
+ /*
+ * DO NOT use any of the fields below in drivers, they are temporary and
+ * are going to be removed again soon. If you use them in driver code the
+ * driver will be marked as BROKEN when these fields are removed.
+ */
+
/* Don't use these, use snd_soc_component_get_dapm() */
struct snd_soc_dapm_context dapm;
struct snd_soc_dapm_context *dapm_ptr;
+
+ const struct snd_kcontrol_new *controls;
+ unsigned int num_controls;
+ const struct snd_soc_dapm_widget *dapm_widgets;
+ unsigned int num_dapm_widgets;
+ const struct snd_soc_dapm_route *dapm_routes;
+ unsigned int num_dapm_routes;
+ struct snd_soc_codec *codec;
+
+ int (*probe)(struct snd_soc_component *);
+ void (*remove)(struct snd_soc_component *);
+
+#ifdef CONFIG_DEBUG_FS
+ void (*init_debugfs)(struct snd_soc_component *component);
+ const char *debugfs_prefix;
+#endif
};
/* SoC Audio Codec device */
@@ -738,26 +785,18 @@ struct snd_soc_codec {
struct device *dev;
const struct snd_soc_codec_driver *driver;
- struct mutex mutex;
struct list_head list;
struct list_head card_list;
/* runtime */
- struct snd_ac97 *ac97; /* for ad-hoc ac97 devices */
unsigned int cache_bypass:1; /* Suppress access to the cache */
unsigned int suspended:1; /* Codec is in suspend PM state */
- unsigned int probed:1; /* Codec has been probed */
- unsigned int ac97_registered:1; /* Codec has been AC97 registered */
- unsigned int ac97_created:1; /* Codec has been created by SoC */
unsigned int cache_init:1; /* codec cache has been initialized */
- u32 cache_only; /* Suppress writes to hardware */
- u32 cache_sync; /* Cache needs to be synced to hardware */
/* codec IO */
void *control_data; /* codec control (i2c/3wire) data */
hw_write_t hw_write;
void *reg_cache;
- struct mutex cache_rw_mutex;
/* component */
struct snd_soc_component component;
@@ -766,7 +805,6 @@ struct snd_soc_codec {
struct snd_soc_dapm_context dapm;
#ifdef CONFIG_DEBUG_FS
- struct dentry *debugfs_codec_root;
struct dentry *debugfs_reg;
#endif
};
@@ -808,15 +846,12 @@ struct snd_soc_codec_driver {
int (*set_bias_level)(struct snd_soc_codec *,
enum snd_soc_bias_level level);
bool idle_bias_off;
+ bool suspend_bias_off;
void (*seq_notifier)(struct snd_soc_dapm_context *,
enum snd_soc_dapm_type, int);
bool ignore_pmdown_time; /* Doesn't benefit from pmdown delay */
-
- /* probe ordering - for components with runtime dependencies */
- int probe_order;
- int remove_order;
};
/* SoC platform interface */
@@ -824,22 +859,12 @@ struct snd_soc_platform_driver {
int (*probe)(struct snd_soc_platform *);
int (*remove)(struct snd_soc_platform *);
- int (*suspend)(struct snd_soc_dai *dai);
- int (*resume)(struct snd_soc_dai *dai);
struct snd_soc_component_driver component_driver;
/* pcm creation and destruction */
int (*pcm_new)(struct snd_soc_pcm_runtime *);
void (*pcm_free)(struct snd_pcm *);
- /* Default control and setup, added after probe() is run */
- const struct snd_kcontrol_new *controls;
- int num_controls;
- const struct snd_soc_dapm_widget *dapm_widgets;
- int num_dapm_widgets;
- const struct snd_soc_dapm_route *dapm_routes;
- int num_dapm_routes;
-
/*
* For platform caused delay reporting.
* Optional.
@@ -853,19 +878,12 @@ struct snd_soc_platform_driver {
/* platform stream compress ops */
const struct snd_compr_ops *compr_ops;
- /* probe ordering - for components with runtime dependencies */
- int probe_order;
- int remove_order;
-
- /* platform IO - used for platform DAPM */
- unsigned int (*read)(struct snd_soc_platform *, unsigned int);
- int (*write)(struct snd_soc_platform *, unsigned int, unsigned int);
int (*bespoke_trigger)(struct snd_pcm_substream *, int);
};
struct snd_soc_dai_link_component {
const char *name;
- const struct device_node *of_node;
+ struct device_node *of_node;
const char *dai_name;
};
@@ -873,16 +891,9 @@ struct snd_soc_platform {
struct device *dev;
const struct snd_soc_platform_driver *driver;
- unsigned int suspended:1; /* platform is suspended */
- unsigned int probed:1;
-
struct list_head list;
struct snd_soc_component component;
-
-#ifdef CONFIG_DEBUG_FS
- struct dentry *debugfs_platform_root;
-#endif
};
struct snd_soc_dai_link {
@@ -897,7 +908,7 @@ struct snd_soc_dai_link {
* only for codec to codec links, or systems using device tree.
*/
const char *cpu_name;
- const struct device_node *cpu_of_node;
+ struct device_node *cpu_of_node;
/*
* You MAY specify the DAI name of the CPU DAI. If this information is
* omitted, the CPU-side DAI is matched using .cpu_name/.cpu_of_node
@@ -909,7 +920,7 @@ struct snd_soc_dai_link {
* DT/OF node, but not both.
*/
const char *codec_name;
- const struct device_node *codec_of_node;
+ struct device_node *codec_of_node;
/* You MUST specify the DAI name within the codec */
const char *codec_dai_name;
@@ -922,7 +933,7 @@ struct snd_soc_dai_link {
* do not need a platform.
*/
const char *platform_name;
- const struct device_node *platform_of_node;
+ struct device_node *platform_of_node;
int be_id; /* optional ID for machine driver BE identification */
const struct snd_soc_pcm_stream *params;
@@ -974,7 +985,7 @@ struct snd_soc_codec_conf {
* DT/OF node, but not both.
*/
const char *dev_name;
- const struct device_node *of_node;
+ struct device_node *of_node;
/*
* optional map of kcontrol, widget and path name prefixes that are
@@ -991,10 +1002,10 @@ struct snd_soc_aux_dev {
* DT/OF node, but not both.
*/
const char *codec_name;
- const struct device_node *codec_of_node;
+ struct device_node *codec_of_node;
/* codec/machine specific init - e.g. add machine controls */
- int (*init)(struct snd_soc_dapm_context *dapm);
+ int (*init)(struct snd_soc_component *component);
};
/* SoC card */
@@ -1112,6 +1123,7 @@ struct snd_soc_pcm_runtime {
struct snd_soc_platform *platform;
struct snd_soc_dai *codec_dai;
struct snd_soc_dai *cpu_dai;
+ struct snd_soc_component *component; /* Only valid for AUX dev rtds */
struct snd_soc_dai **codec_dais;
unsigned int num_codecs;
@@ -1247,6 +1259,17 @@ unsigned int snd_soc_read(struct snd_soc_codec *codec, unsigned int reg);
int snd_soc_write(struct snd_soc_codec *codec, unsigned int reg,
unsigned int val);
+/**
+ * snd_soc_cache_sync() - Sync the register cache with the hardware
+ * @codec: CODEC to sync
+ *
+ * Note: This function will call regcache_sync()
+ */
+static inline int snd_soc_cache_sync(struct snd_soc_codec *codec)
+{
+ return regcache_sync(codec->component.regmap);
+}
+
/* component IO */
int snd_soc_component_read(struct snd_soc_component *component,
unsigned int reg, unsigned int *val);
@@ -1260,8 +1283,44 @@ void snd_soc_component_async_complete(struct snd_soc_component *component);
int snd_soc_component_test_bits(struct snd_soc_component *component,
unsigned int reg, unsigned int mask, unsigned int value);
-int snd_soc_component_init_io(struct snd_soc_component *component,
+#ifdef CONFIG_REGMAP
+
+void snd_soc_component_init_regmap(struct snd_soc_component *component,
struct regmap *regmap);
+void snd_soc_component_exit_regmap(struct snd_soc_component *component);
+
+/**
+ * snd_soc_codec_init_regmap() - Initialize regmap instance for the CODEC
+ * @codec: The CODEC for which to initialize the regmap instance
+ * @regmap: The regmap instance that should be used by the CODEC
+ *
+ * This function allows deferred assignment of the regmap instance that is
+ * associated with the CODEC. Only use this if the regmap instance is not yet
+ * ready when the CODEC is registered. The function must also be called before
+ * the first IO attempt of the CODEC.
+ */
+static inline void snd_soc_codec_init_regmap(struct snd_soc_codec *codec,
+ struct regmap *regmap)
+{
+ snd_soc_component_init_regmap(&codec->component, regmap);
+}
+
+/**
+ * snd_soc_codec_exit_regmap() - De-initialize regmap instance for the CODEC
+ * @codec: The CODEC for which to de-initialize the regmap instance
+ *
+ * Calls regmap_exit() on the regmap instance associated to the CODEC and
+ * removes the regmap instance from the CODEC.
+ *
+ * This function should only be used if snd_soc_codec_init_regmap() was used to
+ * initialize the regmap instance.
+ */
+static inline void snd_soc_codec_exit_regmap(struct snd_soc_codec *codec)
+{
+ snd_soc_component_exit_regmap(&codec->component);
+}
+
+#endif
/* device driver data */
@@ -1276,26 +1335,37 @@ static inline void *snd_soc_card_get_drvdata(struct snd_soc_card *card)
return card->drvdata;
}
+static inline void snd_soc_component_set_drvdata(struct snd_soc_component *c,
+ void *data)
+{
+ dev_set_drvdata(c->dev, data);
+}
+
+static inline void *snd_soc_component_get_drvdata(struct snd_soc_component *c)
+{
+ return dev_get_drvdata(c->dev);
+}
+
static inline void snd_soc_codec_set_drvdata(struct snd_soc_codec *codec,
void *data)
{
- dev_set_drvdata(codec->dev, data);
+ snd_soc_component_set_drvdata(&codec->component, data);
}
static inline void *snd_soc_codec_get_drvdata(struct snd_soc_codec *codec)
{
- return dev_get_drvdata(codec->dev);
+ return snd_soc_component_get_drvdata(&codec->component);
}
static inline void snd_soc_platform_set_drvdata(struct snd_soc_platform *platform,
void *data)
{
- dev_set_drvdata(platform->dev, data);
+ snd_soc_component_set_drvdata(&platform->component, data);
}
static inline void *snd_soc_platform_get_drvdata(struct snd_soc_platform *platform)
{
- return dev_get_drvdata(platform->dev);
+ return snd_soc_component_get_drvdata(&platform->component);
}
static inline void snd_soc_pcm_set_drvdata(struct snd_soc_pcm_runtime *rtd,
@@ -1426,6 +1496,9 @@ unsigned int snd_soc_of_parse_daifmt(struct device_node *np,
struct device_node **framemaster);
int snd_soc_of_get_dai_name(struct device_node *of_node,
const char **dai_name);
+int snd_soc_of_get_dai_link_codecs(struct device *dev,
+ struct device_node *of_node,
+ struct snd_soc_dai_link *dai_link);
#include <sound/soc-dai.h>
diff --git a/include/sound/uda134x.h b/include/sound/uda134x.h
index e475659bd3be..509efb050176 100644
--- a/include/sound/uda134x.h
+++ b/include/sound/uda134x.h
@@ -18,18 +18,6 @@ struct uda134x_platform_data {
struct l3_pins l3;
void (*power) (int);
int model;
- /*
- ALSA SOC usually puts the device in standby mode when it's not used
- for sometime. If you unset is_powered_on_standby the driver will
- turn off the ADC/DAC when this callback is invoked and turn it back
- on when needed. Unfortunately this will result in a very light bump
- (it can be audible only with good earphones). If this bothers you
- set is_powered_on_standby, you will have slightly higher power
- consumption. Please note that sending the L3 command for ADC is
- enough to make the bump, so it doesn't make difference if you
- completely take off power from the codec.
- */
- int is_powered_on_standby;
#define UDA134X_UDA1340 1
#define UDA134X_UDA1341 2
#define UDA134X_UDA1344 3
diff --git a/include/sound/vx_core.h b/include/sound/vx_core.h
index f634f8f85db5..cae9c9d4ef22 100644
--- a/include/sound/vx_core.h
+++ b/include/sound/vx_core.h
@@ -80,8 +80,6 @@ struct vx_pipe {
unsigned int references; /* an output pipe may be used for monitoring and/or playback */
struct vx_pipe *monitoring_pipe; /* pointer to the monitoring pipe (capture pipe only)*/
-
- struct tasklet_struct start_tq;
};
struct vx_core;
@@ -165,9 +163,7 @@ struct vx_core {
struct snd_vx_hardware *hw;
struct snd_vx_ops *ops;
- spinlock_t lock;
- spinlock_t irq_lock;
- struct tasklet_struct tq;
+ struct mutex lock;
unsigned int chip_status;
unsigned int pcm_running;
@@ -223,6 +219,7 @@ void snd_vx_free_firmware(struct vx_core *chip);
* interrupt handler; exported for pcmcia
*/
irqreturn_t snd_vx_irq_handler(int irq, void *dev);
+irqreturn_t snd_vx_threaded_irq_handler(int irq, void *dev);
/*
* lowlevel functions
diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h
index 9adc1bca1178..430cfaf92285 100644
--- a/include/target/target_core_backend.h
+++ b/include/target/target_core_backend.h
@@ -5,6 +5,15 @@
#define TRANSPORT_PLUGIN_VHBA_PDEV 2
#define TRANSPORT_PLUGIN_VHBA_VDEV 3
+struct target_backend_cits {
+ struct config_item_type tb_dev_cit;
+ struct config_item_type tb_dev_attrib_cit;
+ struct config_item_type tb_dev_pr_cit;
+ struct config_item_type tb_dev_wwn_cit;
+ struct config_item_type tb_dev_alua_tg_pt_gps_cit;
+ struct config_item_type tb_dev_stat_cit;
+};
+
struct se_subsystem_api {
struct list_head sub_api_list;
@@ -44,6 +53,8 @@ struct se_subsystem_api {
int (*init_prot)(struct se_device *);
int (*format_prot)(struct se_device *);
void (*free_prot)(struct se_device *);
+
+ struct target_backend_cits tb_cits;
};
struct sbc_ops {
@@ -96,4 +107,36 @@ sense_reason_t transport_generic_map_mem_to_cmd(struct se_cmd *,
void array_free(void *array, int n);
+/* From target_core_configfs.c to setup default backend config_item_types */
+void target_core_setup_sub_cits(struct se_subsystem_api *);
+
+/* attribute helpers from target_core_device.c for backend drivers */
+int se_dev_set_max_unmap_lba_count(struct se_device *, u32);
+int se_dev_set_max_unmap_block_desc_count(struct se_device *, u32);
+int se_dev_set_unmap_granularity(struct se_device *, u32);
+int se_dev_set_unmap_granularity_alignment(struct se_device *, u32);
+int se_dev_set_max_write_same_len(struct se_device *, u32);
+int se_dev_set_emulate_model_alias(struct se_device *, int);
+int se_dev_set_emulate_dpo(struct se_device *, int);
+int se_dev_set_emulate_fua_write(struct se_device *, int);
+int se_dev_set_emulate_fua_read(struct se_device *, int);
+int se_dev_set_emulate_write_cache(struct se_device *, int);
+int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *, int);
+int se_dev_set_emulate_tas(struct se_device *, int);
+int se_dev_set_emulate_tpu(struct se_device *, int);
+int se_dev_set_emulate_tpws(struct se_device *, int);
+int se_dev_set_emulate_caw(struct se_device *, int);
+int se_dev_set_emulate_3pc(struct se_device *, int);
+int se_dev_set_pi_prot_type(struct se_device *, int);
+int se_dev_set_pi_prot_format(struct se_device *, int);
+int se_dev_set_enforce_pr_isids(struct se_device *, int);
+int se_dev_set_force_pr_aptpl(struct se_device *, int);
+int se_dev_set_is_nonrot(struct se_device *, int);
+int se_dev_set_emulate_rest_reord(struct se_device *dev, int);
+int se_dev_set_queue_depth(struct se_device *, u32);
+int se_dev_set_max_sectors(struct se_device *, u32);
+int se_dev_set_fabric_max_sectors(struct se_device *, u32);
+int se_dev_set_optimal_sectors(struct se_device *, u32);
+int se_dev_set_block_size(struct se_device *, u32);
+
#endif /* TARGET_CORE_BACKEND_H */
diff --git a/include/target/target_core_backend_configfs.h b/include/target/target_core_backend_configfs.h
new file mode 100644
index 000000000000..3247d7530107
--- /dev/null
+++ b/include/target/target_core_backend_configfs.h
@@ -0,0 +1,120 @@
+#ifndef TARGET_CORE_BACKEND_CONFIGFS_H
+#define TARGET_CORE_BACKEND_CONFIGFS_H
+
+#include <target/configfs_macros.h>
+
+#define DEF_TB_DEV_ATTRIB_SHOW(_backend, _name) \
+static ssize_t _backend##_dev_show_attr_##_name( \
+ struct se_dev_attrib *da, \
+ char *page) \
+{ \
+ return snprintf(page, PAGE_SIZE, "%u\n", \
+ (u32)da->da_dev->dev_attrib._name); \
+}
+
+#define DEF_TB_DEV_ATTRIB_STORE(_backend, _name) \
+static ssize_t _backend##_dev_store_attr_##_name( \
+ struct se_dev_attrib *da, \
+ const char *page, \
+ size_t count) \
+{ \
+ unsigned long val; \
+ int ret; \
+ \
+ ret = kstrtoul(page, 0, &val); \
+ if (ret < 0) { \
+ pr_err("kstrtoul() failed with ret: %d\n", ret); \
+ return -EINVAL; \
+ } \
+ ret = se_dev_set_##_name(da->da_dev, (u32)val); \
+ \
+ return (!ret) ? count : -EINVAL; \
+}
+
+#define DEF_TB_DEV_ATTRIB(_backend, _name) \
+DEF_TB_DEV_ATTRIB_SHOW(_backend, _name); \
+DEF_TB_DEV_ATTRIB_STORE(_backend, _name);
+
+#define DEF_TB_DEV_ATTRIB_RO(_backend, name) \
+DEF_TB_DEV_ATTRIB_SHOW(_backend, name);
+
+CONFIGFS_EATTR_STRUCT(target_backend_dev_attrib, se_dev_attrib);
+#define TB_DEV_ATTR(_backend, _name, _mode) \
+static struct target_backend_dev_attrib_attribute _backend##_dev_attrib_##_name = \
+ __CONFIGFS_EATTR(_name, _mode, \
+ _backend##_dev_show_attr_##_name, \
+ _backend##_dev_store_attr_##_name);
+
+#define TB_DEV_ATTR_RO(_backend, _name) \
+static struct target_backend_dev_attrib_attribute _backend##_dev_attrib_##_name = \
+ __CONFIGFS_EATTR_RO(_name, \
+ _backend##_dev_show_attr_##_name);
+
+/*
+ * Default list of target backend device attributes as defined by
+ * struct se_dev_attrib
+ */
+
+#define DEF_TB_DEFAULT_ATTRIBS(_backend) \
+ DEF_TB_DEV_ATTRIB(_backend, emulate_model_alias); \
+ TB_DEV_ATTR(_backend, emulate_model_alias, S_IRUGO | S_IWUSR); \
+ DEF_TB_DEV_ATTRIB(_backend, emulate_dpo); \
+ TB_DEV_ATTR(_backend, emulate_dpo, S_IRUGO | S_IWUSR); \
+ DEF_TB_DEV_ATTRIB(_backend, emulate_fua_write); \
+ TB_DEV_ATTR(_backend, emulate_fua_write, S_IRUGO | S_IWUSR); \
+ DEF_TB_DEV_ATTRIB(_backend, emulate_fua_read); \
+ TB_DEV_ATTR(_backend, emulate_fua_read, S_IRUGO | S_IWUSR); \
+ DEF_TB_DEV_ATTRIB(_backend, emulate_write_cache); \
+ TB_DEV_ATTR(_backend, emulate_write_cache, S_IRUGO | S_IWUSR); \
+ DEF_TB_DEV_ATTRIB(_backend, emulate_ua_intlck_ctrl); \
+ TB_DEV_ATTR(_backend, emulate_ua_intlck_ctrl, S_IRUGO | S_IWUSR); \
+ DEF_TB_DEV_ATTRIB(_backend, emulate_tas); \
+ TB_DEV_ATTR(_backend, emulate_tas, S_IRUGO | S_IWUSR); \
+ DEF_TB_DEV_ATTRIB(_backend, emulate_tpu); \
+ TB_DEV_ATTR(_backend, emulate_tpu, S_IRUGO | S_IWUSR); \
+ DEF_TB_DEV_ATTRIB(_backend, emulate_tpws); \
+ TB_DEV_ATTR(_backend, emulate_tpws, S_IRUGO | S_IWUSR); \
+ DEF_TB_DEV_ATTRIB(_backend, emulate_caw); \
+ TB_DEV_ATTR(_backend, emulate_caw, S_IRUGO | S_IWUSR); \
+ DEF_TB_DEV_ATTRIB(_backend, emulate_3pc); \
+ TB_DEV_ATTR(_backend, emulate_3pc, S_IRUGO | S_IWUSR); \
+ DEF_TB_DEV_ATTRIB(_backend, pi_prot_type); \
+ TB_DEV_ATTR(_backend, pi_prot_type, S_IRUGO | S_IWUSR); \
+ DEF_TB_DEV_ATTRIB_RO(_backend, hw_pi_prot_type); \
+ TB_DEV_ATTR_RO(_backend, hw_pi_prot_type); \
+ DEF_TB_DEV_ATTRIB(_backend, pi_prot_format); \
+ TB_DEV_ATTR(_backend, pi_prot_format, S_IRUGO | S_IWUSR); \
+ DEF_TB_DEV_ATTRIB(_backend, enforce_pr_isids); \
+ TB_DEV_ATTR(_backend, enforce_pr_isids, S_IRUGO | S_IWUSR); \
+ DEF_TB_DEV_ATTRIB(_backend, is_nonrot); \
+ TB_DEV_ATTR(_backend, is_nonrot, S_IRUGO | S_IWUSR); \
+ DEF_TB_DEV_ATTRIB(_backend, emulate_rest_reord); \
+ TB_DEV_ATTR(_backend, emulate_rest_reord, S_IRUGO | S_IWUSR); \
+ DEF_TB_DEV_ATTRIB(_backend, force_pr_aptpl); \
+ TB_DEV_ATTR(_backend, force_pr_aptpl, S_IRUGO | S_IWUSR); \
+ DEF_TB_DEV_ATTRIB_RO(_backend, hw_block_size); \
+ TB_DEV_ATTR_RO(_backend, hw_block_size); \
+ DEF_TB_DEV_ATTRIB(_backend, block_size); \
+ TB_DEV_ATTR(_backend, block_size, S_IRUGO | S_IWUSR); \
+ DEF_TB_DEV_ATTRIB_RO(_backend, hw_max_sectors); \
+ TB_DEV_ATTR_RO(_backend, hw_max_sectors); \
+ DEF_TB_DEV_ATTRIB(_backend, fabric_max_sectors); \
+ TB_DEV_ATTR(_backend, fabric_max_sectors, S_IRUGO | S_IWUSR); \
+ DEF_TB_DEV_ATTRIB(_backend, optimal_sectors); \
+ TB_DEV_ATTR(_backend, optimal_sectors, S_IRUGO | S_IWUSR); \
+ DEF_TB_DEV_ATTRIB_RO(_backend, hw_queue_depth); \
+ TB_DEV_ATTR_RO(_backend, hw_queue_depth); \
+ DEF_TB_DEV_ATTRIB(_backend, queue_depth); \
+ TB_DEV_ATTR(_backend, queue_depth, S_IRUGO | S_IWUSR); \
+ DEF_TB_DEV_ATTRIB(_backend, max_unmap_lba_count); \
+ TB_DEV_ATTR(_backend, max_unmap_lba_count, S_IRUGO | S_IWUSR); \
+ DEF_TB_DEV_ATTRIB(_backend, max_unmap_block_desc_count); \
+ TB_DEV_ATTR(_backend, max_unmap_block_desc_count, S_IRUGO | S_IWUSR); \
+ DEF_TB_DEV_ATTRIB(_backend, unmap_granularity); \
+ TB_DEV_ATTR(_backend, unmap_granularity, S_IRUGO | S_IWUSR); \
+ DEF_TB_DEV_ATTRIB(_backend, unmap_granularity_alignment); \
+ TB_DEV_ATTR(_backend, unmap_granularity_alignment, S_IRUGO | S_IWUSR); \
+ DEF_TB_DEV_ATTRIB(_backend, max_write_same_len); \
+ TB_DEV_ATTR(_backend, max_write_same_len, S_IRUGO | S_IWUSR);
+
+#endif /* TARGET_CORE_BACKEND_CONFIGFS_H */
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 9ec9864ecf38..397fb635766a 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -108,6 +108,8 @@
#define DA_EMULATE_ALUA 0
/* Enforce SCSI Initiator Port TransportID with 'ISID' for PR */
#define DA_ENFORCE_PR_ISIDS 1
+/* Force SPC-3 PR Activate Persistence across Target Power Loss */
+#define DA_FORCE_PR_APTPL 0
#define DA_STATUS_MAX_SECTORS_MIN 16
#define DA_STATUS_MAX_SECTORS_MAX 8192
/* By default don't report non-rotating (solid state) medium */
@@ -474,6 +476,12 @@ struct se_dif_v1_tuple {
__be32 ref_tag;
};
+/* for sam_task_attr */
+#define TCM_SIMPLE_TAG 0x20
+#define TCM_HEAD_TAG 0x21
+#define TCM_ORDERED_TAG 0x22
+#define TCM_ACA_TAG 0x24
+
struct se_cmd {
/* SAM response code being sent to initiator */
u8 scsi_status;
@@ -680,6 +688,7 @@ struct se_dev_attrib {
enum target_prot_type pi_prot_type;
enum target_prot_type hw_pi_prot_type;
int enforce_pr_isids;
+ int force_pr_aptpl;
int is_nonrot;
int emulate_rest_reord;
u32 hw_block_size;
@@ -903,4 +912,18 @@ struct se_wwn {
struct config_group fabric_stat_group;
};
+static inline void atomic_inc_mb(atomic_t *v)
+{
+ smp_mb__before_atomic();
+ atomic_inc(v);
+ smp_mb__after_atomic();
+}
+
+static inline void atomic_dec_mb(atomic_t *v)
+{
+ smp_mb__before_atomic();
+ atomic_dec(v);
+ smp_mb__after_atomic();
+}
+
#endif /* TARGET_CORE_BASE_H */
diff --git a/include/trace/events/asoc.h b/include/trace/events/asoc.h
index 0194a641e4e2..88cf39d96d0f 100644
--- a/include/trace/events/asoc.h
+++ b/include/trace/events/asoc.h
@@ -175,7 +175,7 @@ TRACE_EVENT(snd_soc_dapm_output_path,
__entry->path_sink = (long)path->sink;
),
- TP_printk("%c%s -> %s -> %s\n",
+ TP_printk("%c%s -> %s -> %s",
(int) __entry->path_sink &&
(int) __entry->path_connect ? '*' : ' ',
__get_str(wname), __get_str(pname), __get_str(psname))
@@ -204,7 +204,7 @@ TRACE_EVENT(snd_soc_dapm_input_path,
__entry->path_source = (long)path->source;
),
- TP_printk("%c%s <- %s <- %s\n",
+ TP_printk("%c%s <- %s <- %s",
(int) __entry->path_source &&
(int) __entry->path_connect ? '*' : ' ',
__get_str(wname), __get_str(pname), __get_str(psname))
@@ -226,7 +226,7 @@ TRACE_EVENT(snd_soc_dapm_connected,
__entry->stream = stream;
),
- TP_printk("%s: found %d paths\n",
+ TP_printk("%s: found %d paths",
__entry->stream ? "capture" : "playback", __entry->paths)
);
@@ -288,31 +288,6 @@ TRACE_EVENT(snd_soc_jack_notify,
TP_printk("jack=%s %x", __get_str(name), (int)__entry->val)
);
-TRACE_EVENT(snd_soc_cache_sync,
-
- TP_PROTO(struct snd_soc_codec *codec, const char *type,
- const char *status),
-
- TP_ARGS(codec, type, status),
-
- TP_STRUCT__entry(
- __string( name, codec->component.name)
- __string( status, status )
- __string( type, type )
- __field( int, id )
- ),
-
- TP_fast_assign(
- __assign_str(name, codec->component.name);
- __assign_str(status, status);
- __assign_str(type, type);
- __entry->id = codec->component.id;
- ),
-
- TP_printk("codec=%s.%d type=%s status=%s", __get_str(name),
- (int)__entry->id, __get_str(type), __get_str(status))
-);
-
#endif /* _TRACE_ASOC_H */
/* This part must be outside protection */
diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h
index 4ee4e30d26d9..1faecea101f3 100644
--- a/include/trace/events/btrfs.h
+++ b/include/trace/events/btrfs.h
@@ -23,6 +23,7 @@ struct map_lookup;
struct extent_buffer;
struct btrfs_work;
struct __btrfs_workqueue;
+struct btrfs_qgroup_operation;
#define show_ref_type(type) \
__print_symbolic(type, \
@@ -157,12 +158,13 @@ DEFINE_EVENT(btrfs__inode, btrfs_inode_evict,
#define show_map_flags(flag) \
__print_flags(flag, "|", \
- { EXTENT_FLAG_PINNED, "PINNED" }, \
- { EXTENT_FLAG_COMPRESSED, "COMPRESSED" }, \
- { EXTENT_FLAG_VACANCY, "VACANCY" }, \
- { EXTENT_FLAG_PREALLOC, "PREALLOC" }, \
- { EXTENT_FLAG_LOGGING, "LOGGING" }, \
- { EXTENT_FLAG_FILLING, "FILLING" })
+ { (1 << EXTENT_FLAG_PINNED), "PINNED" },\
+ { (1 << EXTENT_FLAG_COMPRESSED), "COMPRESSED" },\
+ { (1 << EXTENT_FLAG_VACANCY), "VACANCY" },\
+ { (1 << EXTENT_FLAG_PREALLOC), "PREALLOC" },\
+ { (1 << EXTENT_FLAG_LOGGING), "LOGGING" },\
+ { (1 << EXTENT_FLAG_FILLING), "FILLING" },\
+ { (1 << EXTENT_FLAG_FS_MAPPING), "FS_MAPPING" })
TRACE_EVENT_CONDITION(btrfs_get_extent,
@@ -996,6 +998,7 @@ DECLARE_EVENT_CLASS(btrfs__work,
__field( void *, func )
__field( void *, ordered_func )
__field( void *, ordered_free )
+ __field( void *, normal_work )
),
TP_fast_assign(
@@ -1004,11 +1007,13 @@ DECLARE_EVENT_CLASS(btrfs__work,
__entry->func = work->func;
__entry->ordered_func = work->ordered_func;
__entry->ordered_free = work->ordered_free;
+ __entry->normal_work = &work->normal_work;
),
- TP_printk("work=%p, wq=%p, func=%p, ordered_func=%p, ordered_free=%p",
- __entry->work, __entry->wq, __entry->func,
- __entry->ordered_func, __entry->ordered_free)
+ TP_printk("work=%p (normal_work=%p), wq=%p, func=%pf, ordered_func=%p,"
+ " ordered_free=%p",
+ __entry->work, __entry->normal_work, __entry->wq,
+ __entry->func, __entry->ordered_func, __entry->ordered_free)
);
/* For situiations that the work is freed */
@@ -1043,13 +1048,6 @@ DEFINE_EVENT(btrfs__work, btrfs_work_sched,
TP_ARGS(work)
);
-DEFINE_EVENT(btrfs__work, btrfs_normal_work_done,
-
- TP_PROTO(struct btrfs_work *work),
-
- TP_ARGS(work)
-);
-
DEFINE_EVENT(btrfs__work__done, btrfs_all_work_done,
TP_PROTO(struct btrfs_work *work),
@@ -1119,6 +1117,61 @@ DEFINE_EVENT(btrfs__workqueue_done, btrfs_workqueue_destroy,
TP_ARGS(wq)
);
+#define show_oper_type(type) \
+ __print_symbolic(type, \
+ { BTRFS_QGROUP_OPER_ADD_EXCL, "OPER_ADD_EXCL" }, \
+ { BTRFS_QGROUP_OPER_ADD_SHARED, "OPER_ADD_SHARED" }, \
+ { BTRFS_QGROUP_OPER_SUB_EXCL, "OPER_SUB_EXCL" }, \
+ { BTRFS_QGROUP_OPER_SUB_SHARED, "OPER_SUB_SHARED" })
+
+DECLARE_EVENT_CLASS(btrfs_qgroup_oper,
+
+ TP_PROTO(struct btrfs_qgroup_operation *oper),
+
+ TP_ARGS(oper),
+
+ TP_STRUCT__entry(
+ __field( u64, ref_root )
+ __field( u64, bytenr )
+ __field( u64, num_bytes )
+ __field( u64, seq )
+ __field( int, type )
+ __field( u64, elem_seq )
+ ),
+
+ TP_fast_assign(
+ __entry->ref_root = oper->ref_root;
+ __entry->bytenr = oper->bytenr,
+ __entry->num_bytes = oper->num_bytes;
+ __entry->seq = oper->seq;
+ __entry->type = oper->type;
+ __entry->elem_seq = oper->elem.seq;
+ ),
+
+ TP_printk("ref_root = %llu, bytenr = %llu, num_bytes = %llu, "
+ "seq = %llu, elem.seq = %llu, type = %s",
+ (unsigned long long)__entry->ref_root,
+ (unsigned long long)__entry->bytenr,
+ (unsigned long long)__entry->num_bytes,
+ (unsigned long long)__entry->seq,
+ (unsigned long long)__entry->elem_seq,
+ show_oper_type(__entry->type))
+);
+
+DEFINE_EVENT(btrfs_qgroup_oper, btrfs_qgroup_account,
+
+ TP_PROTO(struct btrfs_qgroup_operation *oper),
+
+ TP_ARGS(oper)
+);
+
+DEFINE_EVENT(btrfs_qgroup_oper, btrfs_qgroup_record_ref,
+
+ TP_PROTO(struct btrfs_qgroup_operation *oper),
+
+ TP_ARGS(oper)
+);
+
#endif /* _TRACE_BTRFS_H */
/* This part must be outside protection */
diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h
index d4f70a7fe876..6cfb841fea7c 100644
--- a/include/trace/events/ext4.h
+++ b/include/trace/events/ext4.h
@@ -43,15 +43,13 @@ struct extent_status;
{ EXT4_GET_BLOCKS_METADATA_NOFAIL, "METADATA_NOFAIL" }, \
{ EXT4_GET_BLOCKS_NO_NORMALIZE, "NO_NORMALIZE" }, \
{ EXT4_GET_BLOCKS_KEEP_SIZE, "KEEP_SIZE" }, \
- { EXT4_GET_BLOCKS_NO_LOCK, "NO_LOCK" }, \
- { EXT4_GET_BLOCKS_NO_PUT_HOLE, "NO_PUT_HOLE" })
+ { EXT4_GET_BLOCKS_NO_LOCK, "NO_LOCK" })
#define show_mflags(flags) __print_flags(flags, "", \
{ EXT4_MAP_NEW, "N" }, \
{ EXT4_MAP_MAPPED, "M" }, \
{ EXT4_MAP_UNWRITTEN, "U" }, \
- { EXT4_MAP_BOUNDARY, "B" }, \
- { EXT4_MAP_FROM_CLUSTER, "C" })
+ { EXT4_MAP_BOUNDARY, "B" })
#define show_free_flags(flags) __print_flags(flags, "|", \
{ EXT4_FREE_BLOCKS_METADATA, "METADATA" }, \
@@ -2369,7 +2367,7 @@ TRACE_EVENT(ext4_es_lookup_extent_exit,
show_extent_status(__entry->found ? __entry->status : 0))
);
-TRACE_EVENT(ext4_es_shrink_enter,
+DECLARE_EVENT_CLASS(ext4__es_shrink_enter,
TP_PROTO(struct super_block *sb, int nr_to_scan, int cache_cnt),
TP_ARGS(sb, nr_to_scan, cache_cnt),
@@ -2391,26 +2389,38 @@ TRACE_EVENT(ext4_es_shrink_enter,
__entry->nr_to_scan, __entry->cache_cnt)
);
-TRACE_EVENT(ext4_es_shrink_exit,
- TP_PROTO(struct super_block *sb, int shrunk_nr, int cache_cnt),
+DEFINE_EVENT(ext4__es_shrink_enter, ext4_es_shrink_count,
+ TP_PROTO(struct super_block *sb, int nr_to_scan, int cache_cnt),
+
+ TP_ARGS(sb, nr_to_scan, cache_cnt)
+);
+
+DEFINE_EVENT(ext4__es_shrink_enter, ext4_es_shrink_scan_enter,
+ TP_PROTO(struct super_block *sb, int nr_to_scan, int cache_cnt),
- TP_ARGS(sb, shrunk_nr, cache_cnt),
+ TP_ARGS(sb, nr_to_scan, cache_cnt)
+);
+
+TRACE_EVENT(ext4_es_shrink_scan_exit,
+ TP_PROTO(struct super_block *sb, int nr_shrunk, int cache_cnt),
+
+ TP_ARGS(sb, nr_shrunk, cache_cnt),
TP_STRUCT__entry(
__field( dev_t, dev )
- __field( int, shrunk_nr )
+ __field( int, nr_shrunk )
__field( int, cache_cnt )
),
TP_fast_assign(
__entry->dev = sb->s_dev;
- __entry->shrunk_nr = shrunk_nr;
+ __entry->nr_shrunk = nr_shrunk;
__entry->cache_cnt = cache_cnt;
),
- TP_printk("dev %d,%d shrunk_nr %d cache_cnt %d",
+ TP_printk("dev %d,%d nr_shrunk %d cache_cnt %d",
MAJOR(__entry->dev), MINOR(__entry->dev),
- __entry->shrunk_nr, __entry->cache_cnt)
+ __entry->nr_shrunk, __entry->cache_cnt)
);
TRACE_EVENT(ext4_collapse_range,
@@ -2438,6 +2448,34 @@ TRACE_EVENT(ext4_collapse_range,
__entry->offset, __entry->len)
);
+TRACE_EVENT(ext4_es_shrink,
+ TP_PROTO(struct super_block *sb, int nr_shrunk, u64 scan_time,
+ int nr_skipped, int retried),
+
+ TP_ARGS(sb, nr_shrunk, scan_time, nr_skipped, retried),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( int, nr_shrunk )
+ __field( unsigned long long, scan_time )
+ __field( int, nr_skipped )
+ __field( int, retried )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = sb->s_dev;
+ __entry->nr_shrunk = nr_shrunk;
+ __entry->scan_time = div_u64(scan_time, 1000);
+ __entry->nr_skipped = nr_skipped;
+ __entry->retried = retried;
+ ),
+
+ TP_printk("dev %d,%d nr_shrunk %d, scan_time %llu "
+ "nr_skipped %d retried %d",
+ MAJOR(__entry->dev), MINOR(__entry->dev), __entry->nr_shrunk,
+ __entry->scan_time, __entry->nr_skipped, __entry->retried)
+);
+
#endif /* _TRACE_EXT4_H */
/* This part must be outside protection */
diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h
index d06d44363fea..bbc4de9baef7 100644
--- a/include/trace/events/f2fs.h
+++ b/include/trace/events/f2fs.h
@@ -69,6 +69,12 @@
{ GC_GREEDY, "Greedy" }, \
{ GC_CB, "Cost-Benefit" })
+#define show_cpreason(type) \
+ __print_symbolic(type, \
+ { CP_UMOUNT, "Umount" }, \
+ { CP_SYNC, "Sync" }, \
+ { CP_DISCARD, "Discard" })
+
struct victim_sel_policy;
DECLARE_EVENT_CLASS(f2fs__inode,
@@ -944,25 +950,25 @@ TRACE_EVENT(f2fs_submit_page_mbio,
TRACE_EVENT(f2fs_write_checkpoint,
- TP_PROTO(struct super_block *sb, bool is_umount, char *msg),
+ TP_PROTO(struct super_block *sb, int reason, char *msg),
- TP_ARGS(sb, is_umount, msg),
+ TP_ARGS(sb, reason, msg),
TP_STRUCT__entry(
__field(dev_t, dev)
- __field(bool, is_umount)
+ __field(int, reason)
__field(char *, msg)
),
TP_fast_assign(
__entry->dev = sb->s_dev;
- __entry->is_umount = is_umount;
+ __entry->reason = reason;
__entry->msg = msg;
),
TP_printk("dev = (%d,%d), checkpoint for %s, state = %s",
show_dev(__entry),
- __entry->is_umount ? "clean umount" : "consistency",
+ show_cpreason(__entry->reason),
__entry->msg)
);
diff --git a/include/trace/events/filelock.h b/include/trace/events/filelock.h
index 59d11c22f076..a0d008070962 100644
--- a/include/trace/events/filelock.h
+++ b/include/trace/events/filelock.h
@@ -53,15 +53,15 @@ DECLARE_EVENT_CLASS(filelock_lease,
),
TP_fast_assign(
- __entry->fl = fl;
+ __entry->fl = fl ? fl : NULL;
__entry->s_dev = inode->i_sb->s_dev;
__entry->i_ino = inode->i_ino;
- __entry->fl_next = fl->fl_next;
- __entry->fl_owner = fl->fl_owner;
- __entry->fl_flags = fl->fl_flags;
- __entry->fl_type = fl->fl_type;
- __entry->fl_break_time = fl->fl_break_time;
- __entry->fl_downgrade_time = fl->fl_downgrade_time;
+ __entry->fl_next = fl ? fl->fl_next : NULL;
+ __entry->fl_owner = fl ? fl->fl_owner : NULL;
+ __entry->fl_flags = fl ? fl->fl_flags : 0;
+ __entry->fl_type = fl ? fl->fl_type : 0;
+ __entry->fl_break_time = fl ? fl->fl_break_time : 0;
+ __entry->fl_downgrade_time = fl ? fl->fl_downgrade_time : 0;
),
TP_printk("fl=0x%p dev=0x%x:0x%x ino=0x%lx fl_next=0x%p fl_owner=0x%p fl_flags=%s fl_type=%s fl_break_time=%lu fl_downgrade_time=%lu",
diff --git a/include/trace/events/host1x.h b/include/trace/events/host1x.h
index 94db6a2c3540..63116362543c 100644
--- a/include/trace/events/host1x.h
+++ b/include/trace/events/host1x.h
@@ -29,6 +29,8 @@
#include <linux/ktime.h>
#include <linux/tracepoint.h>
+struct host1x_bo;
+
DECLARE_EVENT_CLASS(host1x,
TP_PROTO(const char *name),
TP_ARGS(name),
@@ -79,14 +81,14 @@ TRACE_EVENT(host1x_cdma_push,
);
TRACE_EVENT(host1x_cdma_push_gather,
- TP_PROTO(const char *name, u32 mem_id,
+ TP_PROTO(const char *name, struct host1x_bo *bo,
u32 words, u32 offset, void *cmdbuf),
- TP_ARGS(name, mem_id, words, offset, cmdbuf),
+ TP_ARGS(name, bo, words, offset, cmdbuf),
TP_STRUCT__entry(
__field(const char *, name)
- __field(u32, mem_id)
+ __field(struct host1x_bo *, bo)
__field(u32, words)
__field(u32, offset)
__field(bool, cmdbuf)
@@ -100,13 +102,13 @@ TRACE_EVENT(host1x_cdma_push_gather,
}
__entry->cmdbuf = cmdbuf;
__entry->name = name;
- __entry->mem_id = mem_id;
+ __entry->bo = bo;
__entry->words = words;
__entry->offset = offset;
),
- TP_printk("name=%s, mem_id=%08x, words=%u, offset=%d, contents=[%s]",
- __entry->name, __entry->mem_id,
+ TP_printk("name=%s, bo=%p, words=%u, offset=%d, contents=[%s]",
+ __entry->name, __entry->bo,
__entry->words, __entry->offset,
__print_hex(__get_dynamic_array(cmdbuf),
__entry->cmdbuf ? __entry->words * 4 : 0))
@@ -221,12 +223,13 @@ TRACE_EVENT(host1x_syncpt_load_min,
);
TRACE_EVENT(host1x_syncpt_wait_check,
- TP_PROTO(void *mem_id, u32 offset, u32 syncpt_id, u32 thresh, u32 min),
+ TP_PROTO(struct host1x_bo *bo, u32 offset, u32 syncpt_id, u32 thresh,
+ u32 min),
- TP_ARGS(mem_id, offset, syncpt_id, thresh, min),
+ TP_ARGS(bo, offset, syncpt_id, thresh, min),
TP_STRUCT__entry(
- __field(void *, mem_id)
+ __field(struct host1x_bo *, bo)
__field(u32, offset)
__field(u32, syncpt_id)
__field(u32, thresh)
@@ -234,15 +237,15 @@ TRACE_EVENT(host1x_syncpt_wait_check,
),
TP_fast_assign(
- __entry->mem_id = mem_id;
+ __entry->bo = bo;
__entry->offset = offset;
__entry->syncpt_id = syncpt_id;
__entry->thresh = thresh;
__entry->min = min;
),
- TP_printk("mem_id=%p, offset=%05x, id=%d, thresh=%d, current=%d",
- __entry->mem_id, __entry->offset,
+ TP_printk("bo=%p, offset=%05x, id=%d, thresh=%d, current=%d",
+ __entry->bo, __entry->offset,
__entry->syncpt_id, __entry->thresh,
__entry->min)
);
diff --git a/include/trace/events/kvm.h b/include/trace/events/kvm.h
index 908925ace776..6edf1f2028cd 100644
--- a/include/trace/events/kvm.h
+++ b/include/trace/events/kvm.h
@@ -95,6 +95,26 @@ TRACE_EVENT(kvm_ioapic_set_irq,
__entry->coalesced ? " (coalesced)" : "")
);
+TRACE_EVENT(kvm_ioapic_delayed_eoi_inj,
+ TP_PROTO(__u64 e),
+ TP_ARGS(e),
+
+ TP_STRUCT__entry(
+ __field( __u64, e )
+ ),
+
+ TP_fast_assign(
+ __entry->e = e;
+ ),
+
+ TP_printk("dst %x vec=%u (%s|%s|%s%s)",
+ (u8)(__entry->e >> 56), (u8)__entry->e,
+ __print_symbolic((__entry->e >> 8 & 0x7), kvm_deliver_mode),
+ (__entry->e & (1<<11)) ? "logical" : "physical",
+ (__entry->e & (1<<15)) ? "level" : "edge",
+ (__entry->e & (1<<16)) ? "|masked" : "")
+);
+
TRACE_EVENT(kvm_msi_set_irq,
TP_PROTO(__u64 address, __u64 data),
TP_ARGS(address, data),
@@ -205,24 +225,26 @@ TRACE_EVENT(kvm_fpu,
);
TRACE_EVENT(kvm_age_page,
- TP_PROTO(ulong hva, struct kvm_memory_slot *slot, int ref),
- TP_ARGS(hva, slot, ref),
+ TP_PROTO(ulong gfn, int level, struct kvm_memory_slot *slot, int ref),
+ TP_ARGS(gfn, level, slot, ref),
TP_STRUCT__entry(
__field( u64, hva )
__field( u64, gfn )
+ __field( u8, level )
__field( u8, referenced )
),
TP_fast_assign(
- __entry->hva = hva;
- __entry->gfn =
- slot->base_gfn + ((hva - slot->userspace_addr) >> PAGE_SHIFT);
+ __entry->gfn = gfn;
+ __entry->level = level;
+ __entry->hva = ((gfn - slot->base_gfn) <<
+ PAGE_SHIFT) + slot->userspace_addr;
__entry->referenced = ref;
),
- TP_printk("hva %llx gfn %llx %s",
- __entry->hva, __entry->gfn,
+ TP_printk("hva %llx gfn %llx level %u %s",
+ __entry->hva, __entry->gfn, __entry->level,
__entry->referenced ? "YOUNG" : "OLD")
);
diff --git a/include/trace/events/module.h b/include/trace/events/module.h
index 7c5cbfe3fc49..81c4c183d348 100644
--- a/include/trace/events/module.h
+++ b/include/trace/events/module.h
@@ -80,7 +80,7 @@ DECLARE_EVENT_CLASS(module_refcnt,
TP_fast_assign(
__entry->ip = ip;
- __entry->refcnt = __this_cpu_read(mod->refptr->incs) - __this_cpu_read(mod->refptr->decs);
+ __entry->refcnt = atomic_read(&mod->refcnt);
__assign_str(name, mod->name);
),
diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h
index aca382266411..c78e88ce5ea3 100644
--- a/include/trace/events/rcu.h
+++ b/include/trace/events/rcu.h
@@ -36,7 +36,7 @@ TRACE_EVENT(rcu_utilization,
#ifdef CONFIG_RCU_TRACE
-#if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU)
+#if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU)
/*
* Tracepoint for grace-period events. Takes a string identifying the
@@ -180,9 +180,12 @@ TRACE_EVENT(rcu_grace_period_init,
* argument is a string as follows:
*
* "WakeEmpty": Wake rcuo kthread, first CB to empty list.
+ * "WakeEmptyIsDeferred": Wake rcuo kthread later, first CB to empty list.
* "WakeOvf": Wake rcuo kthread, CB list is huge.
+ * "WakeOvfIsDeferred": Wake rcuo kthread later, CB list is huge.
* "WakeNot": Don't wake rcuo kthread.
* "WakeNotPoll": Don't wake rcuo kthread because it is polling.
+ * "DeferredWake": Carried out the "IsDeferred" wakeup.
* "Poll": Start of new polling cycle for rcu_nocb_poll.
* "Sleep": Sleep waiting for CBs for !rcu_nocb_poll.
* "WokeEmpty": rcuo kthread woke to find empty list.
@@ -342,7 +345,7 @@ TRACE_EVENT(rcu_fqs,
__entry->cpu, __entry->qsevent)
);
-#endif /* #if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) */
+#endif /* #if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU) */
/*
* Tracepoint for dyntick-idle entry/exit events. These take a string
@@ -657,18 +660,18 @@ TRACE_EVENT(rcu_torture_read,
/*
* Tracepoint for _rcu_barrier() execution. The string "s" describes
* the _rcu_barrier phase:
- * "Begin": rcu_barrier_callback() started.
- * "Check": rcu_barrier_callback() checking for piggybacking.
- * "EarlyExit": rcu_barrier_callback() piggybacked, thus early exit.
- * "Inc1": rcu_barrier_callback() piggyback check counter incremented.
- * "Offline": rcu_barrier_callback() found offline CPU
- * "OnlineNoCB": rcu_barrier_callback() found online no-CBs CPU.
- * "OnlineQ": rcu_barrier_callback() found online CPU with callbacks.
- * "OnlineNQ": rcu_barrier_callback() found online CPU, no callbacks.
+ * "Begin": _rcu_barrier() started.
+ * "Check": _rcu_barrier() checking for piggybacking.
+ * "EarlyExit": _rcu_barrier() piggybacked, thus early exit.
+ * "Inc1": _rcu_barrier() piggyback check counter incremented.
+ * "OfflineNoCB": _rcu_barrier() found callback on never-online CPU
+ * "OnlineNoCB": _rcu_barrier() found online no-CBs CPU.
+ * "OnlineQ": _rcu_barrier() found online CPU with callbacks.
+ * "OnlineNQ": _rcu_barrier() found online CPU, no callbacks.
* "IRQ": An rcu_barrier_callback() callback posted on remote CPU.
* "CB": An rcu_barrier_callback() invoked a callback, not the last.
* "LastCB": An rcu_barrier_callback() invoked the last callback.
- * "Inc2": rcu_barrier_callback() piggyback check counter incremented.
+ * "Inc2": _rcu_barrier() piggyback check counter incremented.
* The "cpu" argument is the CPU or -1 if meaningless, the "cnt" argument
* is the count of remaining callbacks, and "done" is the piggybacking count.
*/
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index 0a68d5ae584e..30fedaf3e56a 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -97,16 +97,19 @@ static inline long __trace_sched_switch_state(struct task_struct *p)
long state = p->state;
#ifdef CONFIG_PREEMPT
+#ifdef CONFIG_SCHED_DEBUG
+ BUG_ON(p != current);
+#endif /* CONFIG_SCHED_DEBUG */
/*
* For all intents and purposes a preempted task is a running task.
*/
- if (task_preempt_count(p) & PREEMPT_ACTIVE)
+ if (preempt_count() & PREEMPT_ACTIVE)
state = TASK_RUNNING | TASK_STATE_MAX;
-#endif
+#endif /* CONFIG_PREEMPT */
return state;
}
-#endif
+#endif /* CREATE_TRACE_POINTS */
/*
* Tracepoint for task switches, performed by the scheduler:
diff --git a/include/trace/events/scsi.h b/include/trace/events/scsi.h
index db6c93510f74..079bd10a01b4 100644
--- a/include/trace/events/scsi.h
+++ b/include/trace/events/scsi.h
@@ -94,7 +94,7 @@
scsi_opcode_name(WRITE_16), \
scsi_opcode_name(VERIFY_16), \
scsi_opcode_name(WRITE_SAME_16), \
- scsi_opcode_name(SERVICE_ACTION_IN), \
+ scsi_opcode_name(SERVICE_ACTION_IN_16), \
scsi_opcode_name(SAI_READ_CAPACITY_16), \
scsi_opcode_name(SAI_GET_LBA_STATUS), \
scsi_opcode_name(MI_REPORT_TARGET_PGS), \
diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h
index 1fef3e6e9436..b9c1dc6c825a 100644
--- a/include/trace/events/sunrpc.h
+++ b/include/trace/events/sunrpc.h
@@ -6,6 +6,9 @@
#include <linux/sunrpc/sched.h>
#include <linux/sunrpc/clnt.h>
+#include <linux/sunrpc/svc.h>
+#include <linux/sunrpc/xprtsock.h>
+#include <linux/sunrpc/svc_xprt.h>
#include <net/tcp_states.h>
#include <linux/net.h>
#include <linux/tracepoint.h>
@@ -306,6 +309,271 @@ DEFINE_RPC_SOCKET_EVENT_DONE(rpc_socket_reset_connection);
DEFINE_RPC_SOCKET_EVENT(rpc_socket_close);
DEFINE_RPC_SOCKET_EVENT(rpc_socket_shutdown);
+DECLARE_EVENT_CLASS(rpc_xprt_event,
+ TP_PROTO(struct rpc_xprt *xprt, __be32 xid, int status),
+
+ TP_ARGS(xprt, xid, status),
+
+ TP_STRUCT__entry(
+ __field(__be32, xid)
+ __field(int, status)
+ __string(addr, xprt->address_strings[RPC_DISPLAY_ADDR])
+ __string(port, xprt->address_strings[RPC_DISPLAY_PORT])
+ ),
+
+ TP_fast_assign(
+ __entry->xid = xid;
+ __entry->status = status;
+ __assign_str(addr, xprt->address_strings[RPC_DISPLAY_ADDR]);
+ __assign_str(port, xprt->address_strings[RPC_DISPLAY_PORT]);
+ ),
+
+ TP_printk("peer=[%s]:%s xid=0x%x status=%d", __get_str(addr),
+ __get_str(port), be32_to_cpu(__entry->xid),
+ __entry->status)
+);
+
+DEFINE_EVENT(rpc_xprt_event, xprt_lookup_rqst,
+ TP_PROTO(struct rpc_xprt *xprt, __be32 xid, int status),
+ TP_ARGS(xprt, xid, status));
+
+DEFINE_EVENT(rpc_xprt_event, xprt_transmit,
+ TP_PROTO(struct rpc_xprt *xprt, __be32 xid, int status),
+ TP_ARGS(xprt, xid, status));
+
+DEFINE_EVENT(rpc_xprt_event, xprt_complete_rqst,
+ TP_PROTO(struct rpc_xprt *xprt, __be32 xid, int status),
+ TP_ARGS(xprt, xid, status));
+
+TRACE_EVENT(xs_tcp_data_ready,
+ TP_PROTO(struct rpc_xprt *xprt, int err, unsigned int total),
+
+ TP_ARGS(xprt, err, total),
+
+ TP_STRUCT__entry(
+ __field(int, err)
+ __field(unsigned int, total)
+ __string(addr, xprt ? xprt->address_strings[RPC_DISPLAY_ADDR] :
+ "(null)")
+ __string(port, xprt ? xprt->address_strings[RPC_DISPLAY_PORT] :
+ "(null)")
+ ),
+
+ TP_fast_assign(
+ __entry->err = err;
+ __entry->total = total;
+ __assign_str(addr, xprt ?
+ xprt->address_strings[RPC_DISPLAY_ADDR] : "(null)");
+ __assign_str(port, xprt ?
+ xprt->address_strings[RPC_DISPLAY_PORT] : "(null)");
+ ),
+
+ TP_printk("peer=[%s]:%s err=%d total=%u", __get_str(addr),
+ __get_str(port), __entry->err, __entry->total)
+);
+
+#define rpc_show_sock_xprt_flags(flags) \
+ __print_flags(flags, "|", \
+ { TCP_RCV_LAST_FRAG, "TCP_RCV_LAST_FRAG" }, \
+ { TCP_RCV_COPY_FRAGHDR, "TCP_RCV_COPY_FRAGHDR" }, \
+ { TCP_RCV_COPY_XID, "TCP_RCV_COPY_XID" }, \
+ { TCP_RCV_COPY_DATA, "TCP_RCV_COPY_DATA" }, \
+ { TCP_RCV_READ_CALLDIR, "TCP_RCV_READ_CALLDIR" }, \
+ { TCP_RCV_COPY_CALLDIR, "TCP_RCV_COPY_CALLDIR" }, \
+ { TCP_RPC_REPLY, "TCP_RPC_REPLY" })
+
+TRACE_EVENT(xs_tcp_data_recv,
+ TP_PROTO(struct sock_xprt *xs),
+
+ TP_ARGS(xs),
+
+ TP_STRUCT__entry(
+ __string(addr, xs->xprt.address_strings[RPC_DISPLAY_ADDR])
+ __string(port, xs->xprt.address_strings[RPC_DISPLAY_PORT])
+ __field(__be32, xid)
+ __field(unsigned long, flags)
+ __field(unsigned long, copied)
+ __field(unsigned int, reclen)
+ __field(unsigned long, offset)
+ ),
+
+ TP_fast_assign(
+ __assign_str(addr, xs->xprt.address_strings[RPC_DISPLAY_ADDR]);
+ __assign_str(port, xs->xprt.address_strings[RPC_DISPLAY_PORT]);
+ __entry->xid = xs->tcp_xid;
+ __entry->flags = xs->tcp_flags;
+ __entry->copied = xs->tcp_copied;
+ __entry->reclen = xs->tcp_reclen;
+ __entry->offset = xs->tcp_offset;
+ ),
+
+ TP_printk("peer=[%s]:%s xid=0x%x flags=%s copied=%lu reclen=%u offset=%lu",
+ __get_str(addr), __get_str(port), be32_to_cpu(__entry->xid),
+ rpc_show_sock_xprt_flags(__entry->flags),
+ __entry->copied, __entry->reclen, __entry->offset)
+);
+
+#define show_rqstp_flags(flags) \
+ __print_flags(flags, "|", \
+ { (1UL << RQ_SECURE), "RQ_SECURE"}, \
+ { (1UL << RQ_LOCAL), "RQ_LOCAL"}, \
+ { (1UL << RQ_USEDEFERRAL), "RQ_USEDEFERRAL"}, \
+ { (1UL << RQ_DROPME), "RQ_DROPME"}, \
+ { (1UL << RQ_SPLICE_OK), "RQ_SPLICE_OK"}, \
+ { (1UL << RQ_VICTIM), "RQ_VICTIM"}, \
+ { (1UL << RQ_BUSY), "RQ_BUSY"})
+
+TRACE_EVENT(svc_recv,
+ TP_PROTO(struct svc_rqst *rqst, int status),
+
+ TP_ARGS(rqst, status),
+
+ TP_STRUCT__entry(
+ __field(struct sockaddr *, addr)
+ __field(__be32, xid)
+ __field(int, status)
+ __field(unsigned long, flags)
+ ),
+
+ TP_fast_assign(
+ __entry->addr = (struct sockaddr *)&rqst->rq_addr;
+ __entry->xid = status > 0 ? rqst->rq_xid : 0;
+ __entry->status = status;
+ __entry->flags = rqst->rq_flags;
+ ),
+
+ TP_printk("addr=%pIScp xid=0x%x status=%d flags=%s", __entry->addr,
+ be32_to_cpu(__entry->xid), __entry->status,
+ show_rqstp_flags(__entry->flags))
+);
+
+DECLARE_EVENT_CLASS(svc_rqst_status,
+
+ TP_PROTO(struct svc_rqst *rqst, int status),
+
+ TP_ARGS(rqst, status),
+
+ TP_STRUCT__entry(
+ __field(struct sockaddr *, addr)
+ __field(__be32, xid)
+ __field(int, dropme)
+ __field(int, status)
+ __field(unsigned long, flags)
+ ),
+
+ TP_fast_assign(
+ __entry->addr = (struct sockaddr *)&rqst->rq_addr;
+ __entry->xid = rqst->rq_xid;
+ __entry->status = status;
+ __entry->flags = rqst->rq_flags;
+ ),
+
+ TP_printk("addr=%pIScp rq_xid=0x%x status=%d flags=%s",
+ __entry->addr, be32_to_cpu(__entry->xid),
+ __entry->status, show_rqstp_flags(__entry->flags))
+);
+
+DEFINE_EVENT(svc_rqst_status, svc_process,
+ TP_PROTO(struct svc_rqst *rqst, int status),
+ TP_ARGS(rqst, status));
+
+DEFINE_EVENT(svc_rqst_status, svc_send,
+ TP_PROTO(struct svc_rqst *rqst, int status),
+ TP_ARGS(rqst, status));
+
+#define show_svc_xprt_flags(flags) \
+ __print_flags(flags, "|", \
+ { (1UL << XPT_BUSY), "XPT_BUSY"}, \
+ { (1UL << XPT_CONN), "XPT_CONN"}, \
+ { (1UL << XPT_CLOSE), "XPT_CLOSE"}, \
+ { (1UL << XPT_DATA), "XPT_DATA"}, \
+ { (1UL << XPT_TEMP), "XPT_TEMP"}, \
+ { (1UL << XPT_DEAD), "XPT_DEAD"}, \
+ { (1UL << XPT_CHNGBUF), "XPT_CHNGBUF"}, \
+ { (1UL << XPT_DEFERRED), "XPT_DEFERRED"}, \
+ { (1UL << XPT_OLD), "XPT_OLD"}, \
+ { (1UL << XPT_LISTENER), "XPT_LISTENER"}, \
+ { (1UL << XPT_CACHE_AUTH), "XPT_CACHE_AUTH"}, \
+ { (1UL << XPT_LOCAL), "XPT_LOCAL"})
+
+TRACE_EVENT(svc_xprt_do_enqueue,
+ TP_PROTO(struct svc_xprt *xprt, struct svc_rqst *rqst),
+
+ TP_ARGS(xprt, rqst),
+
+ TP_STRUCT__entry(
+ __field(struct svc_xprt *, xprt)
+ __field(struct svc_rqst *, rqst)
+ ),
+
+ TP_fast_assign(
+ __entry->xprt = xprt;
+ __entry->rqst = rqst;
+ ),
+
+ TP_printk("xprt=0x%p addr=%pIScp pid=%d flags=%s", __entry->xprt,
+ (struct sockaddr *)&__entry->xprt->xpt_remote,
+ __entry->rqst ? __entry->rqst->rq_task->pid : 0,
+ show_svc_xprt_flags(__entry->xprt->xpt_flags))
+);
+
+TRACE_EVENT(svc_xprt_dequeue,
+ TP_PROTO(struct svc_xprt *xprt),
+
+ TP_ARGS(xprt),
+
+ TP_STRUCT__entry(
+ __field(struct svc_xprt *, xprt)
+ __field_struct(struct sockaddr_storage, ss)
+ __field(unsigned long, flags)
+ ),
+
+ TP_fast_assign(
+ __entry->xprt = xprt,
+ xprt ? memcpy(&__entry->ss, &xprt->xpt_remote, sizeof(__entry->ss)) : memset(&__entry->ss, 0, sizeof(__entry->ss));
+ __entry->flags = xprt ? xprt->xpt_flags : 0;
+ ),
+
+ TP_printk("xprt=0x%p addr=%pIScp flags=%s", __entry->xprt,
+ (struct sockaddr *)&__entry->ss,
+ show_svc_xprt_flags(__entry->flags))
+);
+
+TRACE_EVENT(svc_wake_up,
+ TP_PROTO(int pid),
+
+ TP_ARGS(pid),
+
+ TP_STRUCT__entry(
+ __field(int, pid)
+ ),
+
+ TP_fast_assign(
+ __entry->pid = pid;
+ ),
+
+ TP_printk("pid=%d", __entry->pid)
+);
+
+TRACE_EVENT(svc_handle_xprt,
+ TP_PROTO(struct svc_xprt *xprt, int len),
+
+ TP_ARGS(xprt, len),
+
+ TP_STRUCT__entry(
+ __field(struct svc_xprt *, xprt)
+ __field(int, len)
+ ),
+
+ TP_fast_assign(
+ __entry->xprt = xprt;
+ __entry->len = len;
+ ),
+
+ TP_printk("xprt=0x%p addr=%pIScp len=%d flags=%s", __entry->xprt,
+ (struct sockaddr *)&__entry->xprt->xpt_remote, __entry->len,
+ show_svc_xprt_flags(__entry->xprt->xpt_flags))
+);
#endif /* _TRACE_SUNRPC_H */
#include <trace/define_trace.h>
diff --git a/include/trace/events/target.h b/include/trace/events/target.h
index da9cc0f05c93..04c3c6efdcc2 100644
--- a/include/trace/events/target.h
+++ b/include/trace/events/target.h
@@ -96,7 +96,7 @@
scsi_opcode_name(WRITE_16), \
scsi_opcode_name(VERIFY_16), \
scsi_opcode_name(WRITE_SAME_16), \
- scsi_opcode_name(SERVICE_ACTION_IN), \
+ scsi_opcode_name(SERVICE_ACTION_IN_16), \
scsi_opcode_name(SAI_READ_CAPACITY_16), \
scsi_opcode_name(SAI_GET_LBA_STATUS), \
scsi_opcode_name(MI_REPORT_TARGET_PGS), \
@@ -109,10 +109,10 @@
#define show_task_attribute_name(val) \
__print_symbolic(val, \
- { MSG_SIMPLE_TAG, "SIMPLE" }, \
- { MSG_HEAD_TAG, "HEAD" }, \
- { MSG_ORDERED_TAG, "ORDERED" }, \
- { MSG_ACA_TAG, "ACA" } )
+ { TCM_SIMPLE_TAG, "SIMPLE" }, \
+ { TCM_HEAD_TAG, "HEAD" }, \
+ { TCM_ORDERED_TAG, "ORDERED" }, \
+ { TCM_ACA_TAG, "ACA" } )
#define show_scsi_status_name(val) \
__print_symbolic(val, \
diff --git a/include/trace/events/thermal.h b/include/trace/events/thermal.h
new file mode 100644
index 000000000000..0f4f95d63c03
--- /dev/null
+++ b/include/trace/events/thermal.h
@@ -0,0 +1,83 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM thermal
+
+#if !defined(_TRACE_THERMAL_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_THERMAL_H
+
+#include <linux/thermal.h>
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(thermal_temperature,
+
+ TP_PROTO(struct thermal_zone_device *tz),
+
+ TP_ARGS(tz),
+
+ TP_STRUCT__entry(
+ __string(thermal_zone, tz->type)
+ __field(int, id)
+ __field(int, temp_prev)
+ __field(int, temp)
+ ),
+
+ TP_fast_assign(
+ __assign_str(thermal_zone, tz->type);
+ __entry->id = tz->id;
+ __entry->temp_prev = tz->last_temperature;
+ __entry->temp = tz->temperature;
+ ),
+
+ TP_printk("thermal_zone=%s id=%d temp_prev=%d temp=%d",
+ __get_str(thermal_zone), __entry->id, __entry->temp_prev,
+ __entry->temp)
+);
+
+TRACE_EVENT(cdev_update,
+
+ TP_PROTO(struct thermal_cooling_device *cdev, unsigned long target),
+
+ TP_ARGS(cdev, target),
+
+ TP_STRUCT__entry(
+ __string(type, cdev->type)
+ __field(unsigned long, target)
+ ),
+
+ TP_fast_assign(
+ __assign_str(type, cdev->type);
+ __entry->target = target;
+ ),
+
+ TP_printk("type=%s target=%lu", __get_str(type), __entry->target)
+);
+
+TRACE_EVENT(thermal_zone_trip,
+
+ TP_PROTO(struct thermal_zone_device *tz, int trip,
+ enum thermal_trip_type trip_type),
+
+ TP_ARGS(tz, trip, trip_type),
+
+ TP_STRUCT__entry(
+ __string(thermal_zone, tz->type)
+ __field(int, id)
+ __field(int, trip)
+ __field(enum thermal_trip_type, trip_type)
+ ),
+
+ TP_fast_assign(
+ __assign_str(thermal_zone, tz->type);
+ __entry->id = tz->id;
+ __entry->trip = trip;
+ __entry->trip_type = trip_type;
+ ),
+
+ TP_printk("thermal_zone=%s id=%d trip=%d trip_type=%d",
+ __get_str(thermal_zone), __entry->id, __entry->trip,
+ __entry->trip_type)
+);
+
+#endif /* _TRACE_THERMAL_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
index 26b4f2e13275..139b5067345b 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/ftrace.h
@@ -277,14 +277,12 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \
field = (typeof(field))iter->ent; \
\
ret = ftrace_raw_output_prep(iter, trace_event); \
- if (ret) \
+ if (ret != TRACE_TYPE_HANDLED) \
return ret; \
\
- ret = trace_seq_printf(s, print); \
- if (!ret) \
- return TRACE_TYPE_PARTIAL_LINE; \
+ trace_seq_printf(s, print); \
\
- return TRACE_TYPE_HANDLED; \
+ return trace_handle_return(s); \
} \
static struct trace_event_functions ftrace_event_type_funcs_##call = { \
.trace = ftrace_raw_output_##call, \
diff --git a/include/uapi/Kbuild b/include/uapi/Kbuild
index 81d2106287fe..245aa6e05e6a 100644
--- a/include/uapi/Kbuild
+++ b/include/uapi/Kbuild
@@ -12,3 +12,4 @@ header-y += video/
header-y += drm/
header-y += xen/
header-y += scsi/
+header-y += misc/
diff --git a/include/uapi/asm-generic/siginfo.h b/include/uapi/asm-generic/siginfo.h
index ba5be7fdbdfe..1e3552037a5a 100644
--- a/include/uapi/asm-generic/siginfo.h
+++ b/include/uapi/asm-generic/siginfo.h
@@ -91,6 +91,10 @@ typedef struct siginfo {
int _trapno; /* TRAP # which caused the signal */
#endif
short _addr_lsb; /* LSB of the reported address */
+ struct {
+ void __user *_lower;
+ void __user *_upper;
+ } _addr_bnd;
} _sigfault;
/* SIGPOLL */
@@ -131,6 +135,8 @@ typedef struct siginfo {
#define si_trapno _sifields._sigfault._trapno
#endif
#define si_addr_lsb _sifields._sigfault._addr_lsb
+#define si_lower _sifields._sigfault._addr_bnd._lower
+#define si_upper _sifields._sigfault._addr_bnd._upper
#define si_band _sifields._sigpoll._band
#define si_fd _sifields._sigpoll._fd
#ifdef __ARCH_SIGSYS
@@ -199,7 +205,8 @@ typedef struct siginfo {
*/
#define SEGV_MAPERR (__SI_FAULT|1) /* address not mapped to object */
#define SEGV_ACCERR (__SI_FAULT|2) /* invalid permissions for mapped object */
-#define NSIGSEGV 2
+#define SEGV_BNDERR (__SI_FAULT|3) /* failed address bound checks */
+#define NSIGSEGV 3
/*
* SIGBUS si_codes
diff --git a/include/uapi/asm-generic/socket.h b/include/uapi/asm-generic/socket.h
index ea0796bdcf88..5c15c2a5c123 100644
--- a/include/uapi/asm-generic/socket.h
+++ b/include/uapi/asm-generic/socket.h
@@ -82,4 +82,9 @@
#define SO_BPF_EXTENSIONS 48
+#define SO_INCOMING_CPU 49
+
+#define SO_ATTACH_BPF 50
+#define SO_DETACH_BPF SO_DETACH_FILTER
+
#endif /* __ASM_GENERIC_SOCKET_H */
diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h
index 11d11bc5c78f..e016bd9b1a04 100644
--- a/include/uapi/asm-generic/unistd.h
+++ b/include/uapi/asm-generic/unistd.h
@@ -705,9 +705,13 @@ __SYSCALL(__NR_seccomp, sys_seccomp)
__SYSCALL(__NR_getrandom, sys_getrandom)
#define __NR_memfd_create 279
__SYSCALL(__NR_memfd_create, sys_memfd_create)
+#define __NR_bpf 280
+__SYSCALL(__NR_bpf, sys_bpf)
+#define __NR_execveat 281
+__SC_COMP(__NR_execveat, sys_execveat, compat_sys_execveat)
#undef __NR_syscalls
-#define __NR_syscalls 280
+#define __NR_syscalls 282
/*
* All syscalls below here should go away really,
diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h
index a0db2d4aa5f0..86574b0005ff 100644
--- a/include/uapi/drm/drm_mode.h
+++ b/include/uapi/drm/drm_mode.h
@@ -286,6 +286,8 @@ struct drm_mode_get_property {
char name[DRM_PROP_NAME_LEN];
__u32 count_values;
+ /* This is only used to count enum values, not blobs. The _blobs is
+ * simply because of a historical reason, i.e. backwards compat. */
__u32 count_enum_blobs;
};
diff --git a/include/uapi/drm/exynos_drm.h b/include/uapi/drm/exynos_drm.h
index d5844122ff32..5575ed1598bd 100644
--- a/include/uapi/drm/exynos_drm.h
+++ b/include/uapi/drm/exynos_drm.h
@@ -33,38 +33,6 @@ struct drm_exynos_gem_create {
};
/**
- * A structure for getting buffer offset.
- *
- * @handle: a pointer to gem object created.
- * @pad: just padding to be 64-bit aligned.
- * @offset: relatived offset value of the memory region allocated.
- * - this value should be set by user.
- */
-struct drm_exynos_gem_map_off {
- unsigned int handle;
- unsigned int pad;
- uint64_t offset;
-};
-
-/**
- * A structure for mapping buffer.
- *
- * @handle: a handle to gem object created.
- * @pad: just padding to be 64-bit aligned.
- * @size: memory size to be mapped.
- * @mapped: having user virtual address mmaped.
- * - this variable would be filled by exynos gem module
- * of kernel side with user virtual address which is allocated
- * by do_mmap().
- */
-struct drm_exynos_gem_mmap {
- unsigned int handle;
- unsigned int pad;
- uint64_t size;
- uint64_t mapped;
-};
-
-/**
* A structure to gem information.
*
* @handle: a handle to gem object created.
@@ -316,8 +284,6 @@ struct drm_exynos_ipp_cmd_ctrl {
};
#define DRM_EXYNOS_GEM_CREATE 0x00
-#define DRM_EXYNOS_GEM_MAP_OFFSET 0x01
-#define DRM_EXYNOS_GEM_MMAP 0x02
/* Reserved 0x03 ~ 0x05 for exynos specific gem ioctl */
#define DRM_EXYNOS_GEM_GET 0x04
#define DRM_EXYNOS_VIDI_CONNECTION 0x07
@@ -336,12 +302,6 @@ struct drm_exynos_ipp_cmd_ctrl {
#define DRM_IOCTL_EXYNOS_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + \
DRM_EXYNOS_GEM_CREATE, struct drm_exynos_gem_create)
-#define DRM_IOCTL_EXYNOS_GEM_MAP_OFFSET DRM_IOWR(DRM_COMMAND_BASE + \
- DRM_EXYNOS_GEM_MAP_OFFSET, struct drm_exynos_gem_map_off)
-
-#define DRM_IOCTL_EXYNOS_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + \
- DRM_EXYNOS_GEM_MMAP, struct drm_exynos_gem_mmap)
-
#define DRM_IOCTL_EXYNOS_GEM_GET DRM_IOWR(DRM_COMMAND_BASE + \
DRM_EXYNOS_GEM_GET, struct drm_exynos_gem_info)
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index ff57f07c3249..250262265ee3 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -340,6 +340,7 @@ typedef struct drm_i915_irq_wait {
#define I915_PARAM_HAS_EXEC_HANDLE_LUT 26
#define I915_PARAM_HAS_WT 27
#define I915_PARAM_CMD_PARSER_VERSION 28
+#define I915_PARAM_HAS_COHERENT_PHYS_GTT 29
typedef struct drm_i915_getparam {
int param;
@@ -876,6 +877,12 @@ struct drm_i915_gem_get_tiling {
* mmap mapping.
*/
__u32 swizzle_mode;
+
+ /**
+ * Returned address bit 6 swizzling required for CPU access through
+ * mmap mapping whilst bound.
+ */
+ __u32 phys_swizzle_mode;
};
struct drm_i915_gem_get_aperture {
diff --git a/include/uapi/drm/radeon_drm.h b/include/uapi/drm/radeon_drm.h
index fea6099608ef..50d0fb41a3bf 100644
--- a/include/uapi/drm/radeon_drm.h
+++ b/include/uapi/drm/radeon_drm.h
@@ -511,6 +511,7 @@ typedef struct {
#define DRM_RADEON_GEM_BUSY 0x2a
#define DRM_RADEON_GEM_VA 0x2b
#define DRM_RADEON_GEM_OP 0x2c
+#define DRM_RADEON_GEM_USERPTR 0x2d
#define DRM_IOCTL_RADEON_CP_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CP_INIT, drm_radeon_init_t)
#define DRM_IOCTL_RADEON_CP_START DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_CP_START)
@@ -554,6 +555,7 @@ typedef struct {
#define DRM_IOCTL_RADEON_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_BUSY, struct drm_radeon_gem_busy)
#define DRM_IOCTL_RADEON_GEM_VA DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_VA, struct drm_radeon_gem_va)
#define DRM_IOCTL_RADEON_GEM_OP DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_OP, struct drm_radeon_gem_op)
+#define DRM_IOCTL_RADEON_GEM_USERPTR DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_USERPTR, struct drm_radeon_gem_userptr)
typedef struct drm_radeon_init {
enum {
@@ -799,6 +801,10 @@ struct drm_radeon_gem_info {
#define RADEON_GEM_NO_BACKING_STORE (1 << 0)
#define RADEON_GEM_GTT_UC (1 << 1)
#define RADEON_GEM_GTT_WC (1 << 2)
+/* BO is expected to be accessed by the CPU */
+#define RADEON_GEM_CPU_ACCESS (1 << 3)
+/* CPU access is not expected to work for this BO */
+#define RADEON_GEM_NO_CPU_ACCESS (1 << 4)
struct drm_radeon_gem_create {
uint64_t size;
@@ -808,6 +814,23 @@ struct drm_radeon_gem_create {
uint32_t flags;
};
+/*
+ * This is not a reliable API and you should expect it to fail for any
+ * number of reasons and have fallback path that do not use userptr to
+ * perform any operation.
+ */
+#define RADEON_GEM_USERPTR_READONLY (1 << 0)
+#define RADEON_GEM_USERPTR_ANONONLY (1 << 1)
+#define RADEON_GEM_USERPTR_VALIDATE (1 << 2)
+#define RADEON_GEM_USERPTR_REGISTER (1 << 3)
+
+struct drm_radeon_gem_userptr {
+ uint64_t addr;
+ uint64_t size;
+ uint32_t flags;
+ uint32_t handle;
+};
+
#define RADEON_TILING_MACRO 0x1
#define RADEON_TILING_MICRO 0x2
#define RADEON_TILING_SWAP_16BIT 0x4
diff --git a/include/uapi/drm/vmwgfx_drm.h b/include/uapi/drm/vmwgfx_drm.h
index 4fc66f6b12ce..c472bedbe38e 100644
--- a/include/uapi/drm/vmwgfx_drm.h
+++ b/include/uapi/drm/vmwgfx_drm.h
@@ -29,7 +29,7 @@
#define __VMWGFX_DRM_H__
#ifndef __KERNEL__
-#include <drm.h>
+#include <drm/drm.h>
#endif
#define DRM_VMW_MAX_SURFACE_FACES 6
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index be88166349a1..00b100023c47 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -1,4 +1,5 @@
# UAPI Header export list
+header-y += android/
header-y += byteorder/
header-y += can/
header-y += caif/
@@ -37,27 +38,27 @@ header-y += aio_abi.h
header-y += apm_bios.h
header-y += arcfb.h
header-y += atalk.h
-header-y += atm.h
-header-y += atm_eni.h
-header-y += atm_he.h
-header-y += atm_idt77105.h
-header-y += atm_nicstar.h
-header-y += atm_tcp.h
-header-y += atm_zatm.h
header-y += atmapi.h
header-y += atmarp.h
header-y += atmbr2684.h
header-y += atmclip.h
header-y += atmdev.h
+header-y += atm_eni.h
+header-y += atm.h
+header-y += atm_he.h
+header-y += atm_idt77105.h
header-y += atmioc.h
header-y += atmlec.h
header-y += atmmpc.h
+header-y += atm_nicstar.h
header-y += atmppp.h
header-y += atmsap.h
header-y += atmsvc.h
+header-y += atm_tcp.h
+header-y += atm_zatm.h
header-y += audit.h
-header-y += auto_fs.h
header-y += auto_fs4.h
+header-y += auto_fs.h
header-y += auxvec.h
header-y += ax25.h
header-y += b1lli.h
@@ -67,6 +68,8 @@ header-y += bfs_fs.h
header-y += binfmts.h
header-y += blkpg.h
header-y += blktrace_api.h
+header-y += bpf_common.h
+header-y += bpf.h
header-y += bpqether.h
header-y += bsg.h
header-y += btrfs.h
@@ -91,21 +94,21 @@ header-y += cyclades.h
header-y += cycx_cfm.h
header-y += dcbnl.h
header-y += dccp.h
-header-y += dlm.h
+header-y += dlmconstants.h
header-y += dlm_device.h
+header-y += dlm.h
header-y += dlm_netlink.h
header-y += dlm_plock.h
-header-y += dlmconstants.h
header-y += dm-ioctl.h
header-y += dm-log-userspace.h
header-y += dn.h
header-y += dqblk_xfs.h
header-y += edd.h
header-y += efs_fs_sb.h
+header-y += elfcore.h
header-y += elf-em.h
header-y += elf-fdpic.h
header-y += elf.h
-header-y += elfcore.h
header-y += errno.h
header-y += errqueue.h
header-y += ethtool.h
@@ -123,22 +126,24 @@ header-y += filter.h
header-y += firewire-cdev.h
header-y += firewire-constants.h
header-y += flat.h
+header-y += fou.h
header-y += fs.h
header-y += fsl_hypervisor.h
header-y += fuse.h
header-y += futex.h
header-y += gameport.h
-header-y += gen_stats.h
header-y += genetlink.h
+header-y += gen_stats.h
header-y += gfs2_ondisk.h
header-y += gigaset_dev.h
-header-y += hdlc.h
header-y += hdlcdrv.h
+header-y += hdlc.h
header-y += hdreg.h
-header-y += hid.h
header-y += hiddev.h
+header-y += hid.h
header-y += hidraw.h
header-y += hpet.h
+header-y += hsr_netlink.h
header-y += hyperv.h
header-y += hysdn_if.h
header-y += i2c-dev.h
@@ -147,7 +152,6 @@ header-y += i2o-dev.h
header-y += i8k.h
header-y += icmp.h
header-y += icmpv6.h
-header-y += if.h
header-y += if_addr.h
header-y += if_addrlabel.h
header-y += if_alg.h
@@ -161,6 +165,7 @@ header-y += if_ether.h
header-y += if_fc.h
header-y += if_fddi.h
header-y += if_frad.h
+header-y += if.h
header-y += if_hippi.h
header-y += if_infiniband.h
header-y += if_link.h
@@ -178,40 +183,41 @@ header-y += if_tunnel.h
header-y += if_vlan.h
header-y += if_x25.h
header-y += igmp.h
-header-y += in.h
header-y += in6.h
-header-y += in_route.h
header-y += inet_diag.h
+header-y += in.h
header-y += inotify.h
header-y += input.h
+header-y += in_route.h
header-y += ioctl.h
-header-y += ip.h
header-y += ip6_tunnel.h
-header-y += ip_vs.h
header-y += ipc.h
+header-y += ip.h
header-y += ipmi.h
header-y += ipmi_msgdefs.h
header-y += ipsec.h
header-y += ipv6.h
header-y += ipv6_route.h
+header-y += ip_vs.h
header-y += ipx.h
header-y += irda.h
header-y += irqnr.h
-header-y += isdn.h
header-y += isdn_divertif.h
-header-y += isdn_ppp.h
+header-y += isdn.h
header-y += isdnif.h
+header-y += isdn_ppp.h
header-y += iso_fs.h
-header-y += ivtv.h
header-y += ivtvfb.h
+header-y += ivtv.h
header-y += ixjuser.h
header-y += jffs2.h
header-y += joystick.h
-header-y += kd.h
+header-y += kcmp.h
header-y += kdev_t.h
-header-y += kernel-page-flags.h
-header-y += kernel.h
+header-y += kd.h
header-y += kernelcapi.h
+header-y += kernel.h
+header-y += kernel-page-flags.h
header-y += kexec.h
header-y += keyboard.h
header-y += keyctl.h
@@ -227,6 +233,7 @@ ifneq ($(wildcard $(srctree)/arch/$(SRCARCH)/include/uapi/asm/kvm_para.h \
header-y += kvm_para.h
endif
+header-y += hw_breakpoint.h
header-y += l2tp.h
header-y += libc-compat.h
header-y += limits.h
@@ -239,6 +246,7 @@ header-y += map_to_7segment.h
header-y += matroxfb.h
header-y += mdio.h
header-y += media.h
+header-y += media-bus-format.h
header-y += mei.h
header-y += memfd.h
header-y += mempolicy.h
@@ -249,44 +257,45 @@ header-y += mii.h
header-y += minix_fs.h
header-y += mman.h
header-y += mmtimer.h
+header-y += mpls.h
header-y += mqueue.h
-header-y += mroute.h
header-y += mroute6.h
+header-y += mroute.h
header-y += msdos_fs.h
header-y += msg.h
header-y += mtio.h
-header-y += n_r3964.h
header-y += nbd.h
-header-y += ncp.h
header-y += ncp_fs.h
+header-y += ncp.h
header-y += ncp_mount.h
header-y += ncp_no.h
header-y += neighbour.h
-header-y += net.h
-header-y += net_dropmon.h
-header-y += net_tstamp.h
header-y += netconf.h
header-y += netdevice.h
-header-y += netlink_diag.h
-header-y += netfilter.h
+header-y += net_dropmon.h
header-y += netfilter_arp.h
header-y += netfilter_bridge.h
header-y += netfilter_decnet.h
+header-y += netfilter.h
header-y += netfilter_ipv4.h
header-y += netfilter_ipv6.h
+header-y += net.h
+header-y += netlink_diag.h
header-y += netlink.h
header-y += netrom.h
+header-y += net_tstamp.h
header-y += nfc.h
-header-y += nfs.h
header-y += nfs2.h
header-y += nfs3.h
header-y += nfs4.h
header-y += nfs4_mount.h
+header-y += nfsacl.h
header-y += nfs_fs.h
+header-y += nfs.h
header-y += nfs_idmap.h
header-y += nfs_mount.h
-header-y += nfsacl.h
header-y += nl80211.h
+header-y += n_r3964.h
header-y += nubus.h
header-y += nvme.h
header-y += nvram.h
@@ -306,16 +315,16 @@ header-y += pfkeyv2.h
header-y += pg.h
header-y += phantom.h
header-y += phonet.h
+header-y += pktcdvd.h
header-y += pkt_cls.h
header-y += pkt_sched.h
-header-y += pktcdvd.h
header-y += pmu.h
header-y += poll.h
header-y += posix_types.h
header-y += ppdev.h
header-y += ppp-comp.h
-header-y += ppp-ioctl.h
header-y += ppp_defs.h
+header-y += ppp-ioctl.h
header-y += pps.h
header-y += prctl.h
header-y += psci.h
@@ -347,13 +356,14 @@ header-y += seccomp.h
header-y += securebits.h
header-y += selinux_netlink.h
header-y += sem.h
-header-y += serial.h
header-y += serial_core.h
+header-y += serial.h
header-y += serial_reg.h
header-y += serio.h
header-y += shm.h
-header-y += signal.h
header-y += signalfd.h
+header-y += signal.h
+header-y += smiapp.h
header-y += snmp.h
header-y += sock_diag.h
header-y += socket.h
@@ -361,8 +371,8 @@ header-y += sockios.h
header-y += som.h
header-y += sonet.h
header-y += sonypi.h
-header-y += sound.h
header-y += soundcard.h
+header-y += sound.h
header-y += stat.h
header-y += stddef.h
header-y += string.h
@@ -371,20 +381,23 @@ header-y += swab.h
header-y += synclink.h
header-y += sysctl.h
header-y += sysinfo.h
+header-y += target_core_user.h
header-y += taskstats.h
header-y += tcp.h
header-y += tcp_metrics.h
header-y += telephony.h
header-y += termios.h
+header-y += thermal.h
header-y += time.h
header-y += times.h
header-y += timex.h
header-y += tiocl.h
-header-y += tipc.h
header-y += tipc_config.h
+header-y += tipc_netlink.h
+header-y += tipc.h
header-y += toshiba.h
-header-y += tty.h
header-y += tty_flags.h
+header-y += tty.h
header-y += types.h
header-y += udf_fs_i.h
header-y += udp.h
@@ -420,6 +433,9 @@ header-y += virtio_net.h
header-y += virtio_pci.h
header-y += virtio_ring.h
header-y += virtio_rng.h
+header-y += virtio_scsi.h
+header-y += virtio_types.h
+header-y += vm_sockets.h
header-y += vt.h
header-y += wait.h
header-y += wanrouter.h
@@ -429,6 +445,5 @@ header-y += wireless.h
header-y += x25.h
header-y += xattr.h
header-y += xfrm.h
-header-y += hw_breakpoint.h
header-y += zorro.h
header-y += zorro_ids.h
diff --git a/include/uapi/linux/android/Kbuild b/include/uapi/linux/android/Kbuild
new file mode 100644
index 000000000000..ca011eec252a
--- /dev/null
+++ b/include/uapi/linux/android/Kbuild
@@ -0,0 +1,2 @@
+# UAPI Header export list
+header-y += binder.h
diff --git a/include/uapi/linux/android/binder.h b/include/uapi/linux/android/binder.h
new file mode 100644
index 000000000000..41420e341e75
--- /dev/null
+++ b/include/uapi/linux/android/binder.h
@@ -0,0 +1,352 @@
+/*
+ * Copyright (C) 2008 Google, Inc.
+ *
+ * Based on, but no longer compatible with, the original
+ * OpenBinder.org binder driver interface, which is:
+ *
+ * Copyright (c) 2005 Palmsource, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _UAPI_LINUX_BINDER_H
+#define _UAPI_LINUX_BINDER_H
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+#define B_PACK_CHARS(c1, c2, c3, c4) \
+ ((((c1)<<24)) | (((c2)<<16)) | (((c3)<<8)) | (c4))
+#define B_TYPE_LARGE 0x85
+
+enum {
+ BINDER_TYPE_BINDER = B_PACK_CHARS('s', 'b', '*', B_TYPE_LARGE),
+ BINDER_TYPE_WEAK_BINDER = B_PACK_CHARS('w', 'b', '*', B_TYPE_LARGE),
+ BINDER_TYPE_HANDLE = B_PACK_CHARS('s', 'h', '*', B_TYPE_LARGE),
+ BINDER_TYPE_WEAK_HANDLE = B_PACK_CHARS('w', 'h', '*', B_TYPE_LARGE),
+ BINDER_TYPE_FD = B_PACK_CHARS('f', 'd', '*', B_TYPE_LARGE),
+};
+
+enum {
+ FLAT_BINDER_FLAG_PRIORITY_MASK = 0xff,
+ FLAT_BINDER_FLAG_ACCEPTS_FDS = 0x100,
+};
+
+#ifdef BINDER_IPC_32BIT
+typedef __u32 binder_size_t;
+typedef __u32 binder_uintptr_t;
+#else
+typedef __u64 binder_size_t;
+typedef __u64 binder_uintptr_t;
+#endif
+
+/*
+ * This is the flattened representation of a Binder object for transfer
+ * between processes. The 'offsets' supplied as part of a binder transaction
+ * contains offsets into the data where these structures occur. The Binder
+ * driver takes care of re-writing the structure type and data as it moves
+ * between processes.
+ */
+struct flat_binder_object {
+ /* 8 bytes for large_flat_header. */
+ __u32 type;
+ __u32 flags;
+
+ /* 8 bytes of data. */
+ union {
+ binder_uintptr_t binder; /* local object */
+ __u32 handle; /* remote object */
+ };
+
+ /* extra data associated with local object */
+ binder_uintptr_t cookie;
+};
+
+/*
+ * On 64-bit platforms where user code may run in 32-bits the driver must
+ * translate the buffer (and local binder) addresses appropriately.
+ */
+
+struct binder_write_read {
+ binder_size_t write_size; /* bytes to write */
+ binder_size_t write_consumed; /* bytes consumed by driver */
+ binder_uintptr_t write_buffer;
+ binder_size_t read_size; /* bytes to read */
+ binder_size_t read_consumed; /* bytes consumed by driver */
+ binder_uintptr_t read_buffer;
+};
+
+/* Use with BINDER_VERSION, driver fills in fields. */
+struct binder_version {
+ /* driver protocol version -- increment with incompatible change */
+ __s32 protocol_version;
+};
+
+/* This is the current protocol version. */
+#ifdef BINDER_IPC_32BIT
+#define BINDER_CURRENT_PROTOCOL_VERSION 7
+#else
+#define BINDER_CURRENT_PROTOCOL_VERSION 8
+#endif
+
+#define BINDER_WRITE_READ _IOWR('b', 1, struct binder_write_read)
+#define BINDER_SET_IDLE_TIMEOUT _IOW('b', 3, __s64)
+#define BINDER_SET_MAX_THREADS _IOW('b', 5, __u32)
+#define BINDER_SET_IDLE_PRIORITY _IOW('b', 6, __s32)
+#define BINDER_SET_CONTEXT_MGR _IOW('b', 7, __s32)
+#define BINDER_THREAD_EXIT _IOW('b', 8, __s32)
+#define BINDER_VERSION _IOWR('b', 9, struct binder_version)
+
+/*
+ * NOTE: Two special error codes you should check for when calling
+ * in to the driver are:
+ *
+ * EINTR -- The operation has been interupted. This should be
+ * handled by retrying the ioctl() until a different error code
+ * is returned.
+ *
+ * ECONNREFUSED -- The driver is no longer accepting operations
+ * from your process. That is, the process is being destroyed.
+ * You should handle this by exiting from your process. Note
+ * that once this error code is returned, all further calls to
+ * the driver from any thread will return this same code.
+ */
+
+enum transaction_flags {
+ TF_ONE_WAY = 0x01, /* this is a one-way call: async, no return */
+ TF_ROOT_OBJECT = 0x04, /* contents are the component's root object */
+ TF_STATUS_CODE = 0x08, /* contents are a 32-bit status code */
+ TF_ACCEPT_FDS = 0x10, /* allow replies with file descriptors */
+};
+
+struct binder_transaction_data {
+ /* The first two are only used for bcTRANSACTION and brTRANSACTION,
+ * identifying the target and contents of the transaction.
+ */
+ union {
+ /* target descriptor of command transaction */
+ __u32 handle;
+ /* target descriptor of return transaction */
+ binder_uintptr_t ptr;
+ } target;
+ binder_uintptr_t cookie; /* target object cookie */
+ __u32 code; /* transaction command */
+
+ /* General information about the transaction. */
+ __u32 flags;
+ pid_t sender_pid;
+ uid_t sender_euid;
+ binder_size_t data_size; /* number of bytes of data */
+ binder_size_t offsets_size; /* number of bytes of offsets */
+
+ /* If this transaction is inline, the data immediately
+ * follows here; otherwise, it ends with a pointer to
+ * the data buffer.
+ */
+ union {
+ struct {
+ /* transaction data */
+ binder_uintptr_t buffer;
+ /* offsets from buffer to flat_binder_object structs */
+ binder_uintptr_t offsets;
+ } ptr;
+ __u8 buf[8];
+ } data;
+};
+
+struct binder_ptr_cookie {
+ binder_uintptr_t ptr;
+ binder_uintptr_t cookie;
+};
+
+struct binder_handle_cookie {
+ __u32 handle;
+ binder_uintptr_t cookie;
+} __packed;
+
+struct binder_pri_desc {
+ __s32 priority;
+ __u32 desc;
+};
+
+struct binder_pri_ptr_cookie {
+ __s32 priority;
+ binder_uintptr_t ptr;
+ binder_uintptr_t cookie;
+};
+
+enum binder_driver_return_protocol {
+ BR_ERROR = _IOR('r', 0, __s32),
+ /*
+ * int: error code
+ */
+
+ BR_OK = _IO('r', 1),
+ /* No parameters! */
+
+ BR_TRANSACTION = _IOR('r', 2, struct binder_transaction_data),
+ BR_REPLY = _IOR('r', 3, struct binder_transaction_data),
+ /*
+ * binder_transaction_data: the received command.
+ */
+
+ BR_ACQUIRE_RESULT = _IOR('r', 4, __s32),
+ /*
+ * not currently supported
+ * int: 0 if the last bcATTEMPT_ACQUIRE was not successful.
+ * Else the remote object has acquired a primary reference.
+ */
+
+ BR_DEAD_REPLY = _IO('r', 5),
+ /*
+ * The target of the last transaction (either a bcTRANSACTION or
+ * a bcATTEMPT_ACQUIRE) is no longer with us. No parameters.
+ */
+
+ BR_TRANSACTION_COMPLETE = _IO('r', 6),
+ /*
+ * No parameters... always refers to the last transaction requested
+ * (including replies). Note that this will be sent even for
+ * asynchronous transactions.
+ */
+
+ BR_INCREFS = _IOR('r', 7, struct binder_ptr_cookie),
+ BR_ACQUIRE = _IOR('r', 8, struct binder_ptr_cookie),
+ BR_RELEASE = _IOR('r', 9, struct binder_ptr_cookie),
+ BR_DECREFS = _IOR('r', 10, struct binder_ptr_cookie),
+ /*
+ * void *: ptr to binder
+ * void *: cookie for binder
+ */
+
+ BR_ATTEMPT_ACQUIRE = _IOR('r', 11, struct binder_pri_ptr_cookie),
+ /*
+ * not currently supported
+ * int: priority
+ * void *: ptr to binder
+ * void *: cookie for binder
+ */
+
+ BR_NOOP = _IO('r', 12),
+ /*
+ * No parameters. Do nothing and examine the next command. It exists
+ * primarily so that we can replace it with a BR_SPAWN_LOOPER command.
+ */
+
+ BR_SPAWN_LOOPER = _IO('r', 13),
+ /*
+ * No parameters. The driver has determined that a process has no
+ * threads waiting to service incoming transactions. When a process
+ * receives this command, it must spawn a new service thread and
+ * register it via bcENTER_LOOPER.
+ */
+
+ BR_FINISHED = _IO('r', 14),
+ /*
+ * not currently supported
+ * stop threadpool thread
+ */
+
+ BR_DEAD_BINDER = _IOR('r', 15, binder_uintptr_t),
+ /*
+ * void *: cookie
+ */
+ BR_CLEAR_DEATH_NOTIFICATION_DONE = _IOR('r', 16, binder_uintptr_t),
+ /*
+ * void *: cookie
+ */
+
+ BR_FAILED_REPLY = _IO('r', 17),
+ /*
+ * The the last transaction (either a bcTRANSACTION or
+ * a bcATTEMPT_ACQUIRE) failed (e.g. out of memory). No parameters.
+ */
+};
+
+enum binder_driver_command_protocol {
+ BC_TRANSACTION = _IOW('c', 0, struct binder_transaction_data),
+ BC_REPLY = _IOW('c', 1, struct binder_transaction_data),
+ /*
+ * binder_transaction_data: the sent command.
+ */
+
+ BC_ACQUIRE_RESULT = _IOW('c', 2, __s32),
+ /*
+ * not currently supported
+ * int: 0 if the last BR_ATTEMPT_ACQUIRE was not successful.
+ * Else you have acquired a primary reference on the object.
+ */
+
+ BC_FREE_BUFFER = _IOW('c', 3, binder_uintptr_t),
+ /*
+ * void *: ptr to transaction data received on a read
+ */
+
+ BC_INCREFS = _IOW('c', 4, __u32),
+ BC_ACQUIRE = _IOW('c', 5, __u32),
+ BC_RELEASE = _IOW('c', 6, __u32),
+ BC_DECREFS = _IOW('c', 7, __u32),
+ /*
+ * int: descriptor
+ */
+
+ BC_INCREFS_DONE = _IOW('c', 8, struct binder_ptr_cookie),
+ BC_ACQUIRE_DONE = _IOW('c', 9, struct binder_ptr_cookie),
+ /*
+ * void *: ptr to binder
+ * void *: cookie for binder
+ */
+
+ BC_ATTEMPT_ACQUIRE = _IOW('c', 10, struct binder_pri_desc),
+ /*
+ * not currently supported
+ * int: priority
+ * int: descriptor
+ */
+
+ BC_REGISTER_LOOPER = _IO('c', 11),
+ /*
+ * No parameters.
+ * Register a spawned looper thread with the device.
+ */
+
+ BC_ENTER_LOOPER = _IO('c', 12),
+ BC_EXIT_LOOPER = _IO('c', 13),
+ /*
+ * No parameters.
+ * These two commands are sent as an application-level thread
+ * enters and exits the binder loop, respectively. They are
+ * used so the binder can have an accurate count of the number
+ * of looping threads it has available.
+ */
+
+ BC_REQUEST_DEATH_NOTIFICATION = _IOW('c', 14,
+ struct binder_handle_cookie),
+ /*
+ * int: handle
+ * void *: cookie
+ */
+
+ BC_CLEAR_DEATH_NOTIFICATION = _IOW('c', 15,
+ struct binder_handle_cookie),
+ /*
+ * int: handle
+ * void *: cookie
+ */
+
+ BC_DEAD_BINDER_DONE = _IOW('c', 16, binder_uintptr_t),
+ /*
+ * void *: cookie
+ */
+};
+
+#endif /* _UAPI_LINUX_BINDER_H */
+
diff --git a/include/uapi/linux/audit.h b/include/uapi/linux/audit.h
index 3b9ff33e1768..d3475e1f15ec 100644
--- a/include/uapi/linux/audit.h
+++ b/include/uapi/linux/audit.h
@@ -322,9 +322,15 @@ enum {
#define AUDIT_STATUS_BACKLOG_LIMIT 0x0010
#define AUDIT_STATUS_BACKLOG_WAIT_TIME 0x0020
-#define AUDIT_VERSION_BACKLOG_LIMIT 1
-#define AUDIT_VERSION_BACKLOG_WAIT_TIME 2
-#define AUDIT_VERSION_LATEST AUDIT_VERSION_BACKLOG_WAIT_TIME
+#define AUDIT_FEATURE_BITMAP_BACKLOG_LIMIT 0x00000001
+#define AUDIT_FEATURE_BITMAP_BACKLOG_WAIT_TIME 0x00000002
+#define AUDIT_FEATURE_BITMAP_ALL (AUDIT_FEATURE_BITMAP_BACKLOG_LIMIT | \
+ AUDIT_FEATURE_BITMAP_BACKLOG_WAIT_TIME)
+
+/* deprecated: AUDIT_VERSION_* */
+#define AUDIT_VERSION_LATEST AUDIT_FEATURE_BITMAP_ALL
+#define AUDIT_VERSION_BACKLOG_LIMIT AUDIT_FEATURE_BITMAP_BACKLOG_LIMIT
+#define AUDIT_VERSION_BACKLOG_WAIT_TIME AUDIT_FEATURE_BITMAP_BACKLOG_WAIT_TIME
/* Failure-to-log actions */
#define AUDIT_FAIL_SILENT 0
@@ -352,6 +358,7 @@ enum {
#define AUDIT_ARCH_IA64 (EM_IA_64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
#define AUDIT_ARCH_M32R (EM_M32R)
#define AUDIT_ARCH_M68K (EM_68K)
+#define AUDIT_ARCH_MICROBLAZE (EM_MICROBLAZE)
#define AUDIT_ARCH_MIPS (EM_MIPS)
#define AUDIT_ARCH_MIPSEL (EM_MIPS|__AUDIT_ARCH_LE)
#define AUDIT_ARCH_MIPS64 (EM_MIPS|__AUDIT_ARCH_64BIT)
@@ -364,7 +371,9 @@ enum {
#define AUDIT_ARCH_PARISC (EM_PARISC)
#define AUDIT_ARCH_PARISC64 (EM_PARISC|__AUDIT_ARCH_64BIT)
#define AUDIT_ARCH_PPC (EM_PPC)
+/* do not define AUDIT_ARCH_PPCLE since it is not supported by audit */
#define AUDIT_ARCH_PPC64 (EM_PPC64|__AUDIT_ARCH_64BIT)
+#define AUDIT_ARCH_PPC64LE (EM_PPC64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
#define AUDIT_ARCH_S390 (EM_S390)
#define AUDIT_ARCH_S390X (EM_S390|__AUDIT_ARCH_64BIT)
#define AUDIT_ARCH_SH (EM_SH)
@@ -403,7 +412,10 @@ struct audit_status {
__u32 backlog_limit; /* waiting messages limit */
__u32 lost; /* messages lost */
__u32 backlog; /* messages waiting in queue */
- __u32 version; /* audit api version number */
+ union {
+ __u32 version; /* deprecated: audit api version num */
+ __u32 feature_bitmap; /* bitmap of kernel audit features */
+ };
__u32 backlog_wait_time;/* message queue wait timeout */
};
@@ -445,17 +457,4 @@ struct audit_rule_data {
char buf[0]; /* string fields buffer */
};
-/* audit_rule is supported to maintain backward compatibility with
- * userspace. It supports integer fields only and corresponds to
- * AUDIT_ADD, AUDIT_DEL and AUDIT_LIST requests.
- */
-struct audit_rule { /* for AUDIT_LIST, AUDIT_ADD, and AUDIT_DEL */
- __u32 flags; /* AUDIT_PER_{TASK,CALL}, AUDIT_PREPEND */
- __u32 action; /* AUDIT_NEVER, AUDIT_POSSIBLE, AUDIT_ALWAYS */
- __u32 field_count;
- __u32 mask[AUDIT_BITMASK_SIZE];
- __u32 fields[AUDIT_MAX_FIELDS];
- __u32 values[AUDIT_MAX_FIELDS];
-};
-
#endif /* _UAPI_LINUX_AUDIT_H_ */
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
new file mode 100644
index 000000000000..45da7ec7d274
--- /dev/null
+++ b/include/uapi/linux/bpf.h
@@ -0,0 +1,168 @@
+/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+#ifndef _UAPI__LINUX_BPF_H__
+#define _UAPI__LINUX_BPF_H__
+
+#include <linux/types.h>
+#include <linux/bpf_common.h>
+
+/* Extended instruction set based on top of classic BPF */
+
+/* instruction classes */
+#define BPF_ALU64 0x07 /* alu mode in double word width */
+
+/* ld/ldx fields */
+#define BPF_DW 0x18 /* double word */
+#define BPF_XADD 0xc0 /* exclusive add */
+
+/* alu/jmp fields */
+#define BPF_MOV 0xb0 /* mov reg to reg */
+#define BPF_ARSH 0xc0 /* sign extending arithmetic shift right */
+
+/* change endianness of a register */
+#define BPF_END 0xd0 /* flags for endianness conversion: */
+#define BPF_TO_LE 0x00 /* convert to little-endian */
+#define BPF_TO_BE 0x08 /* convert to big-endian */
+#define BPF_FROM_LE BPF_TO_LE
+#define BPF_FROM_BE BPF_TO_BE
+
+#define BPF_JNE 0x50 /* jump != */
+#define BPF_JSGT 0x60 /* SGT is signed '>', GT in x86 */
+#define BPF_JSGE 0x70 /* SGE is signed '>=', GE in x86 */
+#define BPF_CALL 0x80 /* function call */
+#define BPF_EXIT 0x90 /* function return */
+
+/* Register numbers */
+enum {
+ BPF_REG_0 = 0,
+ BPF_REG_1,
+ BPF_REG_2,
+ BPF_REG_3,
+ BPF_REG_4,
+ BPF_REG_5,
+ BPF_REG_6,
+ BPF_REG_7,
+ BPF_REG_8,
+ BPF_REG_9,
+ BPF_REG_10,
+ __MAX_BPF_REG,
+};
+
+/* BPF has 10 general purpose 64-bit registers and stack frame. */
+#define MAX_BPF_REG __MAX_BPF_REG
+
+struct bpf_insn {
+ __u8 code; /* opcode */
+ __u8 dst_reg:4; /* dest register */
+ __u8 src_reg:4; /* source register */
+ __s16 off; /* signed offset */
+ __s32 imm; /* signed immediate constant */
+};
+
+/* BPF syscall commands */
+enum bpf_cmd {
+ /* create a map with given type and attributes
+ * fd = bpf(BPF_MAP_CREATE, union bpf_attr *, u32 size)
+ * returns fd or negative error
+ * map is deleted when fd is closed
+ */
+ BPF_MAP_CREATE,
+
+ /* lookup key in a given map
+ * err = bpf(BPF_MAP_LOOKUP_ELEM, union bpf_attr *attr, u32 size)
+ * Using attr->map_fd, attr->key, attr->value
+ * returns zero and stores found elem into value
+ * or negative error
+ */
+ BPF_MAP_LOOKUP_ELEM,
+
+ /* create or update key/value pair in a given map
+ * err = bpf(BPF_MAP_UPDATE_ELEM, union bpf_attr *attr, u32 size)
+ * Using attr->map_fd, attr->key, attr->value, attr->flags
+ * returns zero or negative error
+ */
+ BPF_MAP_UPDATE_ELEM,
+
+ /* find and delete elem by key in a given map
+ * err = bpf(BPF_MAP_DELETE_ELEM, union bpf_attr *attr, u32 size)
+ * Using attr->map_fd, attr->key
+ * returns zero or negative error
+ */
+ BPF_MAP_DELETE_ELEM,
+
+ /* lookup key in a given map and return next key
+ * err = bpf(BPF_MAP_GET_NEXT_KEY, union bpf_attr *attr, u32 size)
+ * Using attr->map_fd, attr->key, attr->next_key
+ * returns zero and stores next key or negative error
+ */
+ BPF_MAP_GET_NEXT_KEY,
+
+ /* verify and load eBPF program
+ * prog_fd = bpf(BPF_PROG_LOAD, union bpf_attr *attr, u32 size)
+ * Using attr->prog_type, attr->insns, attr->license
+ * returns fd or negative error
+ */
+ BPF_PROG_LOAD,
+};
+
+enum bpf_map_type {
+ BPF_MAP_TYPE_UNSPEC,
+ BPF_MAP_TYPE_HASH,
+ BPF_MAP_TYPE_ARRAY,
+};
+
+enum bpf_prog_type {
+ BPF_PROG_TYPE_UNSPEC,
+ BPF_PROG_TYPE_SOCKET_FILTER,
+};
+
+/* flags for BPF_MAP_UPDATE_ELEM command */
+#define BPF_ANY 0 /* create new element or update existing */
+#define BPF_NOEXIST 1 /* create new element if it didn't exist */
+#define BPF_EXIST 2 /* update existing element */
+
+union bpf_attr {
+ struct { /* anonymous struct used by BPF_MAP_CREATE command */
+ __u32 map_type; /* one of enum bpf_map_type */
+ __u32 key_size; /* size of key in bytes */
+ __u32 value_size; /* size of value in bytes */
+ __u32 max_entries; /* max number of entries in a map */
+ };
+
+ struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */
+ __u32 map_fd;
+ __aligned_u64 key;
+ union {
+ __aligned_u64 value;
+ __aligned_u64 next_key;
+ };
+ __u64 flags;
+ };
+
+ struct { /* anonymous struct used by BPF_PROG_LOAD command */
+ __u32 prog_type; /* one of enum bpf_prog_type */
+ __u32 insn_cnt;
+ __aligned_u64 insns;
+ __aligned_u64 license;
+ __u32 log_level; /* verbosity level of verifier */
+ __u32 log_size; /* size of user buffer */
+ __aligned_u64 log_buf; /* user supplied buffer */
+ };
+} __attribute__((aligned(8)));
+
+/* integer value in 'imm' field of BPF_CALL instruction selects which helper
+ * function eBPF program intends to call
+ */
+enum bpf_func_id {
+ BPF_FUNC_unspec,
+ BPF_FUNC_map_lookup_elem, /* void *map_lookup_elem(&map, &key) */
+ BPF_FUNC_map_update_elem, /* int map_update_elem(&map, &key, &value, flags) */
+ BPF_FUNC_map_delete_elem, /* int map_delete_elem(&map, &key) */
+ __BPF_FUNC_MAX_ID,
+};
+
+#endif /* _UAPI__LINUX_BPF_H__ */
diff --git a/include/uapi/linux/bpf_common.h b/include/uapi/linux/bpf_common.h
new file mode 100644
index 000000000000..a5c220e0828f
--- /dev/null
+++ b/include/uapi/linux/bpf_common.h
@@ -0,0 +1,55 @@
+#ifndef _UAPI__LINUX_BPF_COMMON_H__
+#define _UAPI__LINUX_BPF_COMMON_H__
+
+/* Instruction classes */
+#define BPF_CLASS(code) ((code) & 0x07)
+#define BPF_LD 0x00
+#define BPF_LDX 0x01
+#define BPF_ST 0x02
+#define BPF_STX 0x03
+#define BPF_ALU 0x04
+#define BPF_JMP 0x05
+#define BPF_RET 0x06
+#define BPF_MISC 0x07
+
+/* ld/ldx fields */
+#define BPF_SIZE(code) ((code) & 0x18)
+#define BPF_W 0x00
+#define BPF_H 0x08
+#define BPF_B 0x10
+#define BPF_MODE(code) ((code) & 0xe0)
+#define BPF_IMM 0x00
+#define BPF_ABS 0x20
+#define BPF_IND 0x40
+#define BPF_MEM 0x60
+#define BPF_LEN 0x80
+#define BPF_MSH 0xa0
+
+/* alu/jmp fields */
+#define BPF_OP(code) ((code) & 0xf0)
+#define BPF_ADD 0x00
+#define BPF_SUB 0x10
+#define BPF_MUL 0x20
+#define BPF_DIV 0x30
+#define BPF_OR 0x40
+#define BPF_AND 0x50
+#define BPF_LSH 0x60
+#define BPF_RSH 0x70
+#define BPF_NEG 0x80
+#define BPF_MOD 0x90
+#define BPF_XOR 0xa0
+
+#define BPF_JA 0x00
+#define BPF_JEQ 0x10
+#define BPF_JGT 0x20
+#define BPF_JGE 0x30
+#define BPF_JSET 0x40
+#define BPF_SRC(code) ((code) & 0x08)
+#define BPF_K 0x00
+#define BPF_X 0x08
+
+#ifndef BPF_MAXINSNS
+#define BPF_MAXINSNS 4096
+#endif
+
+#endif /* _UAPI__LINUX_BPF_COMMON_H__ */
diff --git a/include/uapi/linux/btrfs.h b/include/uapi/linux/btrfs.h
index 2f47824e7a36..611e1c5893b4 100644
--- a/include/uapi/linux/btrfs.h
+++ b/include/uapi/linux/btrfs.h
@@ -157,6 +157,7 @@ struct btrfs_ioctl_dev_replace_status_params {
#define BTRFS_IOCTL_DEV_REPLACE_RESULT_NO_ERROR 0
#define BTRFS_IOCTL_DEV_REPLACE_RESULT_NOT_STARTED 1
#define BTRFS_IOCTL_DEV_REPLACE_RESULT_ALREADY_STARTED 2
+#define BTRFS_IOCTL_DEV_REPLACE_RESULT_SCRUB_INPROGRESS 3
struct btrfs_ioctl_dev_replace_args {
__u64 cmd; /* in */
__u64 result; /* out */
diff --git a/include/uapi/linux/can/error.h b/include/uapi/linux/can/error.h
index c247446ab25a..1c508be9687f 100644
--- a/include/uapi/linux/can/error.h
+++ b/include/uapi/linux/can/error.h
@@ -71,6 +71,7 @@
#define CAN_ERR_CRTL_TX_PASSIVE 0x20 /* reached error passive status TX */
/* (at least one error counter exceeds */
/* the protocol-defined level of 127) */
+#define CAN_ERR_CRTL_ACTIVE 0x40 /* recovered to error active state */
/* error in CAN protocol (type) / data[2] */
#define CAN_ERR_PROT_UNSPEC 0x00 /* unspecified */
diff --git a/include/uapi/linux/dlmconstants.h b/include/uapi/linux/dlmconstants.h
index 47bf08dc7566..2857bdc5b27b 100644
--- a/include/uapi/linux/dlmconstants.h
+++ b/include/uapi/linux/dlmconstants.h
@@ -114,7 +114,7 @@
*
* DLM_LKF_ORPHAN
*
- * not yet implemented
+ * Acquire an orphan lock.
*
* DLM_LKF_ALTPR
*
diff --git a/include/uapi/linux/dm-ioctl.h b/include/uapi/linux/dm-ioctl.h
index c8a4302093a3..a570d7b5796c 100644
--- a/include/uapi/linux/dm-ioctl.h
+++ b/include/uapi/linux/dm-ioctl.h
@@ -267,9 +267,9 @@ enum {
#define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl)
#define DM_VERSION_MAJOR 4
-#define DM_VERSION_MINOR 27
+#define DM_VERSION_MINOR 29
#define DM_VERSION_PATCHLEVEL 0
-#define DM_VERSION_EXTRA "-ioctl (2013-10-30)"
+#define DM_VERSION_EXTRA "-ioctl (2014-10-28)"
/* Status bits */
#define DM_READONLY_FLAG (1 << 0) /* In/Out */
@@ -352,4 +352,9 @@ enum {
*/
#define DM_DEFERRED_REMOVE (1 << 17) /* In/Out */
+/*
+ * If set, the device is suspended internally.
+ */
+#define DM_INTERNAL_SUSPEND_FLAG (1 << 18) /* Out */
+
#endif /* _LINUX_DM_IOCTL_H */
diff --git a/include/uapi/linux/elf-em.h b/include/uapi/linux/elf-em.h
index 01529bd96438..ae99f7743cf4 100644
--- a/include/uapi/linux/elf-em.h
+++ b/include/uapi/linux/elf-em.h
@@ -32,7 +32,9 @@
#define EM_V850 87 /* NEC v850 */
#define EM_M32R 88 /* Renesas M32R */
#define EM_MN10300 89 /* Panasonic/MEI MN10300, AM33 */
+#define EM_OPENRISC 92 /* OpenRISC 32-bit embedded processor */
#define EM_BLACKFIN 106 /* ADI Blackfin Processor */
+#define EM_ALTERA_NIOS2 113 /* Altera Nios II soft-core processor */
#define EM_TI_C6000 140 /* TI C6X DSPs */
#define EM_AARCH64 183 /* ARM 64 bit */
#define EM_FRV 0x5441 /* Fujitsu FR-V */
diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h
index ef6103bf1f9b..71e1d0ed92f7 100644
--- a/include/uapi/linux/elf.h
+++ b/include/uapi/linux/elf.h
@@ -391,10 +391,13 @@ typedef struct elf64_shdr {
#define NT_S390_LAST_BREAK 0x306 /* s390 breaking event address */
#define NT_S390_SYSTEM_CALL 0x307 /* s390 system call restart data */
#define NT_S390_TDB 0x308 /* s390 transaction diagnostic block */
+#define NT_S390_VXRS_LOW 0x309 /* s390 vector registers 0-15 upper half */
+#define NT_S390_VXRS_HIGH 0x30a /* s390 vector registers 16-31 */
#define NT_ARM_VFP 0x400 /* ARM VFP/NEON registers */
#define NT_ARM_TLS 0x401 /* ARM TLS register */
#define NT_ARM_HW_BREAK 0x402 /* ARM hardware breakpoint registers */
#define NT_ARM_HW_WATCH 0x403 /* ARM hardware watchpoint registers */
+#define NT_ARM_SYSTEM_CALL 0x404 /* ARM system call number */
#define NT_METAG_CBUF 0x500 /* Metag catch buffer registers */
#define NT_METAG_RPIPE 0x501 /* Metag read pipeline state */
#define NT_METAG_TLS 0x502 /* Metag TLS pointer */
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
index e3c7a719c76b..5f66d9c2889d 100644
--- a/include/uapi/linux/ethtool.h
+++ b/include/uapi/linux/ethtool.h
@@ -209,6 +209,33 @@ struct ethtool_value {
__u32 data;
};
+enum tunable_id {
+ ETHTOOL_ID_UNSPEC,
+ ETHTOOL_RX_COPYBREAK,
+ ETHTOOL_TX_COPYBREAK,
+};
+
+enum tunable_type_id {
+ ETHTOOL_TUNABLE_UNSPEC,
+ ETHTOOL_TUNABLE_U8,
+ ETHTOOL_TUNABLE_U16,
+ ETHTOOL_TUNABLE_U32,
+ ETHTOOL_TUNABLE_U64,
+ ETHTOOL_TUNABLE_STRING,
+ ETHTOOL_TUNABLE_S8,
+ ETHTOOL_TUNABLE_S16,
+ ETHTOOL_TUNABLE_S32,
+ ETHTOOL_TUNABLE_S64,
+};
+
+struct ethtool_tunable {
+ __u32 cmd;
+ __u32 id;
+ __u32 type_id;
+ __u32 len;
+ void *data[0];
+};
+
/**
* struct ethtool_regs - hardware register dump
* @cmd: Command number = %ETHTOOL_GREGS
@@ -507,6 +534,7 @@ struct ethtool_pauseparam {
* @ETH_SS_NTUPLE_FILTERS: Previously used with %ETHTOOL_GRXNTUPLE;
* now deprecated
* @ETH_SS_FEATURES: Device feature names
+ * @ETH_SS_RSS_HASH_FUNCS: RSS hush function names
*/
enum ethtool_stringset {
ETH_SS_TEST = 0,
@@ -514,6 +542,7 @@ enum ethtool_stringset {
ETH_SS_PRIV_FLAGS,
ETH_SS_NTUPLE_FILTERS,
ETH_SS_FEATURES,
+ ETH_SS_RSS_HASH_FUNCS,
};
/**
@@ -857,6 +886,8 @@ struct ethtool_rxfh_indir {
* @key_size: On entry, the array size of the user buffer for the hash key,
* which may be zero. On return from %ETHTOOL_GRSSH, the size of the
* hardware hash key.
+ * @hfunc: Defines the current RSS hash function used by HW (or to be set to).
+ * Valid values are one of the %ETH_RSS_HASH_*.
* @rsvd: Reserved for future extensions.
* @rss_config: RX ring/queue index for each hash value i.e., indirection table
* of @indir_size __u32 elements, followed by hash key of @key_size
@@ -866,14 +897,16 @@ struct ethtool_rxfh_indir {
* size should be returned. For %ETHTOOL_SRSSH, an @indir_size of
* %ETH_RXFH_INDIR_NO_CHANGE means that indir table setting is not requested
* and a @indir_size of zero means the indir table should be reset to default
- * values.
+ * values. An hfunc of zero means that hash function setting is not requested.
*/
struct ethtool_rxfh {
__u32 cmd;
__u32 rss_context;
__u32 indir_size;
__u32 key_size;
- __u32 rsvd[2];
+ __u8 hfunc;
+ __u8 rsvd8[3];
+ __u32 rsvd32;
__u32 rss_config[0];
};
#define ETH_RXFH_INDIR_NO_CHANGE 0xffffffff
@@ -1152,6 +1185,8 @@ enum ethtool_sfeatures_retval_bits {
#define ETHTOOL_GRSSH 0x00000046 /* Get RX flow hash configuration */
#define ETHTOOL_SRSSH 0x00000047 /* Set RX flow hash configuration */
+#define ETHTOOL_GTUNABLE 0x00000048 /* Get tunable configuration */
+#define ETHTOOL_STUNABLE 0x00000049 /* Set tunable configuration */
/* compatibility with older code */
#define SPARC_ETH_GSET ETHTOOL_GSET
@@ -1184,6 +1219,10 @@ enum ethtool_sfeatures_retval_bits {
#define SUPPORTED_40000baseCR4_Full (1 << 24)
#define SUPPORTED_40000baseSR4_Full (1 << 25)
#define SUPPORTED_40000baseLR4_Full (1 << 26)
+#define SUPPORTED_56000baseKR4_Full (1 << 27)
+#define SUPPORTED_56000baseCR4_Full (1 << 28)
+#define SUPPORTED_56000baseSR4_Full (1 << 29)
+#define SUPPORTED_56000baseLR4_Full (1 << 30)
#define ADVERTISED_10baseT_Half (1 << 0)
#define ADVERTISED_10baseT_Full (1 << 1)
@@ -1212,6 +1251,10 @@ enum ethtool_sfeatures_retval_bits {
#define ADVERTISED_40000baseCR4_Full (1 << 24)
#define ADVERTISED_40000baseSR4_Full (1 << 25)
#define ADVERTISED_40000baseLR4_Full (1 << 26)
+#define ADVERTISED_56000baseKR4_Full (1 << 27)
+#define ADVERTISED_56000baseCR4_Full (1 << 28)
+#define ADVERTISED_56000baseSR4_Full (1 << 29)
+#define ADVERTISED_56000baseLR4_Full (1 << 30)
/* The following are all involved in forcing a particular link
* mode for the device for setting things. When getting the
@@ -1219,12 +1262,16 @@ enum ethtool_sfeatures_retval_bits {
* it was forced up into this mode or autonegotiated.
*/
-/* The forced speed, 10Mb, 100Mb, gigabit, 2.5Gb, 10GbE. */
+/* The forced speed, 10Mb, 100Mb, gigabit, [2.5|10|20|40|56]GbE. */
#define SPEED_10 10
#define SPEED_100 100
#define SPEED_1000 1000
#define SPEED_2500 2500
#define SPEED_10000 10000
+#define SPEED_20000 20000
+#define SPEED_40000 40000
+#define SPEED_56000 56000
+
#define SPEED_UNKNOWN -1
/* Duplex, half or full. */
@@ -1314,6 +1361,10 @@ enum ethtool_sfeatures_retval_bits {
#define ETH_MODULE_SFF_8079_LEN 256
#define ETH_MODULE_SFF_8472 0x2
#define ETH_MODULE_SFF_8472_LEN 512
+#define ETH_MODULE_SFF_8636 0x3
+#define ETH_MODULE_SFF_8636_LEN 256
+#define ETH_MODULE_SFF_8436 0x4
+#define ETH_MODULE_SFF_8436_LEN 256
/* Reset flags */
/* The reset() operation must clear the flags for the components which
diff --git a/include/uapi/linux/filter.h b/include/uapi/linux/filter.h
index 253b4d42cf2b..47785d5ecf17 100644
--- a/include/uapi/linux/filter.h
+++ b/include/uapi/linux/filter.h
@@ -7,7 +7,7 @@
#include <linux/compiler.h>
#include <linux/types.h>
-
+#include <linux/bpf_common.h>
/*
* Current version of the filter code architecture.
@@ -32,56 +32,6 @@ struct sock_fprog { /* Required for SO_ATTACH_FILTER. */
struct sock_filter __user *filter;
};
-/*
- * Instruction classes
- */
-
-#define BPF_CLASS(code) ((code) & 0x07)
-#define BPF_LD 0x00
-#define BPF_LDX 0x01
-#define BPF_ST 0x02
-#define BPF_STX 0x03
-#define BPF_ALU 0x04
-#define BPF_JMP 0x05
-#define BPF_RET 0x06
-#define BPF_MISC 0x07
-
-/* ld/ldx fields */
-#define BPF_SIZE(code) ((code) & 0x18)
-#define BPF_W 0x00
-#define BPF_H 0x08
-#define BPF_B 0x10
-#define BPF_MODE(code) ((code) & 0xe0)
-#define BPF_IMM 0x00
-#define BPF_ABS 0x20
-#define BPF_IND 0x40
-#define BPF_MEM 0x60
-#define BPF_LEN 0x80
-#define BPF_MSH 0xa0
-
-/* alu/jmp fields */
-#define BPF_OP(code) ((code) & 0xf0)
-#define BPF_ADD 0x00
-#define BPF_SUB 0x10
-#define BPF_MUL 0x20
-#define BPF_DIV 0x30
-#define BPF_OR 0x40
-#define BPF_AND 0x50
-#define BPF_LSH 0x60
-#define BPF_RSH 0x70
-#define BPF_NEG 0x80
-#define BPF_MOD 0x90
-#define BPF_XOR 0xa0
-
-#define BPF_JA 0x00
-#define BPF_JEQ 0x10
-#define BPF_JGT 0x20
-#define BPF_JGE 0x30
-#define BPF_JSET 0x40
-#define BPF_SRC(code) ((code) & 0x08)
-#define BPF_K 0x00
-#define BPF_X 0x08
-
/* ret - BPF_K and BPF_X also apply */
#define BPF_RVAL(code) ((code) & 0x18)
#define BPF_A 0x10
@@ -91,10 +41,6 @@ struct sock_fprog { /* Required for SO_ATTACH_FILTER. */
#define BPF_TAX 0x00
#define BPF_TXA 0x80
-#ifndef BPF_MAXINSNS
-#define BPF_MAXINSNS 4096
-#endif
-
/*
* Macros for filter block array initializers.
*/
diff --git a/include/uapi/linux/fou.h b/include/uapi/linux/fou.h
new file mode 100644
index 000000000000..8df06894da23
--- /dev/null
+++ b/include/uapi/linux/fou.h
@@ -0,0 +1,39 @@
+/* fou.h - FOU Interface */
+
+#ifndef _UAPI_LINUX_FOU_H
+#define _UAPI_LINUX_FOU_H
+
+/* NETLINK_GENERIC related info
+ */
+#define FOU_GENL_NAME "fou"
+#define FOU_GENL_VERSION 0x1
+
+enum {
+ FOU_ATTR_UNSPEC,
+ FOU_ATTR_PORT, /* u16 */
+ FOU_ATTR_AF, /* u8 */
+ FOU_ATTR_IPPROTO, /* u8 */
+ FOU_ATTR_TYPE, /* u8 */
+
+ __FOU_ATTR_MAX,
+};
+
+#define FOU_ATTR_MAX (__FOU_ATTR_MAX - 1)
+
+enum {
+ FOU_CMD_UNSPEC,
+ FOU_CMD_ADD,
+ FOU_CMD_DEL,
+
+ __FOU_CMD_MAX,
+};
+
+enum {
+ FOU_ENCAP_UNSPEC,
+ FOU_ENCAP_DIRECT,
+ FOU_ENCAP_GUE,
+};
+
+#define FOU_CMD_MAX (__FOU_CMD_MAX - 1)
+
+#endif /* _UAPI_LINUX_FOU_H */
diff --git a/include/uapi/linux/fs.h b/include/uapi/linux/fs.h
index ca1a11bb4443..3735fa0a6784 100644
--- a/include/uapi/linux/fs.h
+++ b/include/uapi/linux/fs.h
@@ -37,6 +37,7 @@
#define RENAME_NOREPLACE (1 << 0) /* Don't overwrite target */
#define RENAME_EXCHANGE (1 << 1) /* Exchange source and dest */
+#define RENAME_WHITEOUT (1 << 2) /* Whiteout source */
struct fstrim_range {
__u64 start;
diff --git a/include/uapi/linux/genwqe/genwqe_card.h b/include/uapi/linux/genwqe/genwqe_card.h
index 4fc065f29255..baa93fb4cd4f 100644
--- a/include/uapi/linux/genwqe/genwqe_card.h
+++ b/include/uapi/linux/genwqe/genwqe_card.h
@@ -8,7 +8,7 @@
*
* Author: Frank Haverkamp <haver@linux.vnet.ibm.com>
* Author: Joerg-Stephan Vogt <jsvogt@de.ibm.com>
- * Author: Michael Jung <mijung@de.ibm.com>
+ * Author: Michael Jung <mijung@gmx.net>
* Author: Michael Ruettger <michael@ibmra.de>
*
* This program is free software; you can redistribute it and/or modify
diff --git a/include/uapi/linux/hyperv.h b/include/uapi/linux/hyperv.h
index 78e4a86030dd..bb1cb73c927a 100644
--- a/include/uapi/linux/hyperv.h
+++ b/include/uapi/linux/hyperv.h
@@ -134,10 +134,11 @@ struct hv_start_fcopy {
struct hv_do_fcopy {
struct hv_fcopy_hdr hdr;
+ __u32 pad;
__u64 offset;
__u32 size;
__u8 data[DATA_FRAGMENT];
-};
+} __attribute__((packed));
/*
* An implementation of HyperV key value pair (KVP) functionality for Linux.
diff --git a/include/uapi/linux/if_alg.h b/include/uapi/linux/if_alg.h
index 0f9acce5b1ff..f2acd2fde1f3 100644
--- a/include/uapi/linux/if_alg.h
+++ b/include/uapi/linux/if_alg.h
@@ -32,6 +32,8 @@ struct af_alg_iv {
#define ALG_SET_KEY 1
#define ALG_SET_IV 2
#define ALG_SET_OP 3
+#define ALG_SET_AEAD_ASSOCLEN 4
+#define ALG_SET_AEAD_AUTHSIZE 5
/* Operations */
#define ALG_OP_DECRYPT 0
diff --git a/include/uapi/linux/if_bridge.h b/include/uapi/linux/if_bridge.h
index 39f621a9fe82..b03ee8f62d3c 100644
--- a/include/uapi/linux/if_bridge.h
+++ b/include/uapi/linux/if_bridge.h
@@ -15,6 +15,7 @@
#include <linux/types.h>
#include <linux/if_ether.h>
+#include <linux/in6.h>
#define SYSFS_BRIDGE_ATTR "bridge"
#define SYSFS_BRIDGE_FDB "brforward"
@@ -104,6 +105,7 @@ struct __fdb_entry {
#define BRIDGE_MODE_VEB 0 /* Default loopback mode */
#define BRIDGE_MODE_VEPA 1 /* 802.1Qbg defined VEPA mode */
+#define BRIDGE_MODE_UNDEF 0xFFFF /* mode undefined */
/* Bridge management nested attributes
* [IFLA_AF_SPEC] = {
diff --git a/include/uapi/linux/if_ether.h b/include/uapi/linux/if_ether.h
index 0f8210b8e0bc..aa63ed023c2b 100644
--- a/include/uapi/linux/if_ether.h
+++ b/include/uapi/linux/if_ether.h
@@ -128,6 +128,7 @@
#define ETH_P_PHONET 0x00F5 /* Nokia Phonet frames */
#define ETH_P_IEEE802154 0x00F6 /* IEEE802.15.4 frame */
#define ETH_P_CAIF 0x00F7 /* ST-Ericsson CAIF protocol */
+#define ETH_P_XDSA 0x00F8 /* Multiplexed DSA protocol */
/*
* This is an Ethernet frame header.
diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h
index ff957604a721..f7d0d2d7173a 100644
--- a/include/uapi/linux/if_link.h
+++ b/include/uapi/linux/if_link.h
@@ -145,6 +145,7 @@ enum {
IFLA_CARRIER,
IFLA_PHYS_PORT_ID,
IFLA_CARRIER_CHANGES,
+ IFLA_PHYS_SWITCH_ID,
__IFLA_MAX
};
@@ -215,6 +216,18 @@ enum in6_addr_gen_mode {
IN6_ADDR_GEN_MODE_NONE,
};
+/* Bridge section */
+
+enum {
+ IFLA_BR_UNSPEC,
+ IFLA_BR_FORWARD_DELAY,
+ IFLA_BR_HELLO_TIME,
+ IFLA_BR_MAX_AGE,
+ __IFLA_BR_MAX,
+};
+
+#define IFLA_BR_MAX (__IFLA_BR_MAX - 1)
+
enum {
BRIDGE_MODE_UNSPEC,
BRIDGE_MODE_HAIRPIN,
@@ -231,6 +244,8 @@ enum {
IFLA_BRPORT_FAST_LEAVE, /* multicast fast leave */
IFLA_BRPORT_LEARNING, /* mac learning */
IFLA_BRPORT_UNICAST_FLOOD, /* flood unicast traffic */
+ IFLA_BRPORT_PROXYARP, /* proxy ARP */
+ IFLA_BRPORT_LEARNING_SYNC, /* mac learning sync from device */
__IFLA_BRPORT_MAX
};
#define IFLA_BRPORT_MAX (__IFLA_BRPORT_MAX - 1)
@@ -291,6 +306,10 @@ enum {
IFLA_MACVLAN_UNSPEC,
IFLA_MACVLAN_MODE,
IFLA_MACVLAN_FLAGS,
+ IFLA_MACVLAN_MACADDR_MODE,
+ IFLA_MACVLAN_MACADDR,
+ IFLA_MACVLAN_MACADDR_DATA,
+ IFLA_MACVLAN_MACADDR_COUNT,
__IFLA_MACVLAN_MAX,
};
@@ -301,10 +320,33 @@ enum macvlan_mode {
MACVLAN_MODE_VEPA = 2, /* talk to other ports through ext bridge */
MACVLAN_MODE_BRIDGE = 4, /* talk to bridge ports directly */
MACVLAN_MODE_PASSTHRU = 8,/* take over the underlying device */
+ MACVLAN_MODE_SOURCE = 16,/* use source MAC address list to assign */
+};
+
+enum macvlan_macaddr_mode {
+ MACVLAN_MACADDR_ADD,
+ MACVLAN_MACADDR_DEL,
+ MACVLAN_MACADDR_FLUSH,
+ MACVLAN_MACADDR_SET,
};
#define MACVLAN_FLAG_NOPROMISC 1
+/* IPVLAN section */
+enum {
+ IFLA_IPVLAN_UNSPEC,
+ IFLA_IPVLAN_MODE,
+ __IFLA_IPVLAN_MAX
+};
+
+#define IFLA_IPVLAN_MAX (__IFLA_IPVLAN_MAX - 1)
+
+enum ipvlan_mode {
+ IPVLAN_MODE_L2 = 0,
+ IPVLAN_MODE_L3,
+ IPVLAN_MODE_MAX
+};
+
/* VXLAN section */
enum {
IFLA_VXLAN_UNSPEC,
diff --git a/include/uapi/linux/if_tun.h b/include/uapi/linux/if_tun.h
index e9502dd1ee2c..50ae24335444 100644
--- a/include/uapi/linux/if_tun.h
+++ b/include/uapi/linux/if_tun.h
@@ -22,21 +22,11 @@
/* Read queue size */
#define TUN_READQ_SIZE 500
-
-/* TUN device flags */
-#define TUN_TUN_DEV 0x0001
-#define TUN_TAP_DEV 0x0002
+/* TUN device type flags: deprecated. Use IFF_TUN/IFF_TAP instead. */
+#define TUN_TUN_DEV IFF_TUN
+#define TUN_TAP_DEV IFF_TAP
#define TUN_TYPE_MASK 0x000f
-#define TUN_FASYNC 0x0010
-#define TUN_NOCHECKSUM 0x0020
-#define TUN_NO_PI 0x0040
-/* This flag has no real effect */
-#define TUN_ONE_QUEUE 0x0080
-#define TUN_PERSIST 0x0100
-#define TUN_VNET_HDR 0x0200
-#define TUN_TAP_MQ 0x0400
-
/* Ioctl defines */
#define TUNSETNOCSUM _IOW('T', 200, int)
#define TUNSETDEBUG _IOW('T', 201, int)
@@ -58,6 +48,8 @@
#define TUNSETQUEUE _IOW('T', 217, int)
#define TUNSETIFINDEX _IOW('T', 218, unsigned int)
#define TUNGETFILTER _IOR('T', 219, struct sock_fprog)
+#define TUNSETVNETLE _IOW('T', 220, int)
+#define TUNGETVNETLE _IOR('T', 221, int)
/* TUNSETIFF ifr flags */
#define IFF_TUN 0x0001
diff --git a/include/uapi/linux/if_tunnel.h b/include/uapi/linux/if_tunnel.h
index 3bce9e9d9f7c..bd3cc11a431f 100644
--- a/include/uapi/linux/if_tunnel.h
+++ b/include/uapi/linux/if_tunnel.h
@@ -53,10 +53,24 @@ enum {
IFLA_IPTUN_6RD_RELAY_PREFIX,
IFLA_IPTUN_6RD_PREFIXLEN,
IFLA_IPTUN_6RD_RELAY_PREFIXLEN,
+ IFLA_IPTUN_ENCAP_TYPE,
+ IFLA_IPTUN_ENCAP_FLAGS,
+ IFLA_IPTUN_ENCAP_SPORT,
+ IFLA_IPTUN_ENCAP_DPORT,
__IFLA_IPTUN_MAX,
};
#define IFLA_IPTUN_MAX (__IFLA_IPTUN_MAX - 1)
+enum tunnel_encap_types {
+ TUNNEL_ENCAP_NONE,
+ TUNNEL_ENCAP_FOU,
+ TUNNEL_ENCAP_GUE,
+};
+
+#define TUNNEL_ENCAP_FLAG_CSUM (1<<0)
+#define TUNNEL_ENCAP_FLAG_CSUM6 (1<<1)
+#define TUNNEL_ENCAP_FLAG_REMCSUM (1<<2)
+
/* SIT-mode i_flags */
#define SIT_ISATAP 0x0001
@@ -94,6 +108,10 @@ enum {
IFLA_GRE_ENCAP_LIMIT,
IFLA_GRE_FLOWINFO,
IFLA_GRE_FLAGS,
+ IFLA_GRE_ENCAP_TYPE,
+ IFLA_GRE_ENCAP_FLAGS,
+ IFLA_GRE_ENCAP_SPORT,
+ IFLA_GRE_ENCAP_DPORT,
__IFLA_GRE_MAX,
};
diff --git a/include/uapi/linux/in6.h b/include/uapi/linux/in6.h
index 74a2a1773494..79b12b004ade 100644
--- a/include/uapi/linux/in6.h
+++ b/include/uapi/linux/in6.h
@@ -149,7 +149,7 @@ struct in6_flowlabel_req {
/*
* IPV6 socket options
*/
-
+#if __UAPI_DEF_IPV6_OPTIONS
#define IPV6_ADDRFORM 1
#define IPV6_2292PKTINFO 2
#define IPV6_2292HOPOPTS 3
@@ -196,6 +196,7 @@ struct in6_flowlabel_req {
#define IPV6_IPSEC_POLICY 34
#define IPV6_XFRM_POLICY 35
+#endif
/*
* Multicast:
diff --git a/include/uapi/linux/inet_diag.h b/include/uapi/linux/inet_diag.h
index bbde90fa5838..d65c0a09efd3 100644
--- a/include/uapi/linux/inet_diag.h
+++ b/include/uapi/linux/inet_diag.h
@@ -110,10 +110,10 @@ enum {
INET_DIAG_TCLASS,
INET_DIAG_SKMEMINFO,
INET_DIAG_SHUTDOWN,
+ INET_DIAG_DCTCPINFO,
};
-#define INET_DIAG_MAX INET_DIAG_SHUTDOWN
-
+#define INET_DIAG_MAX INET_DIAG_DCTCPINFO
/* INET_DIAG_MEM */
@@ -133,5 +133,14 @@ struct tcpvegas_info {
__u32 tcpv_minrtt;
};
+/* INET_DIAG_DCTCPINFO */
+
+struct tcp_dctcp_info {
+ __u16 dctcp_enabled;
+ __u16 dctcp_ce_state;
+ __u32 dctcp_alpha;
+ __u32 dctcp_ab_ecn;
+ __u32 dctcp_ab_tot;
+};
#endif /* _UAPI_INET_DIAG_H_ */
diff --git a/include/uapi/linux/input.h b/include/uapi/linux/input.h
index 1874ebe9ac1e..a1d7e931ab72 100644
--- a/include/uapi/linux/input.h
+++ b/include/uapi/linux/input.h
@@ -739,6 +739,13 @@ struct input_keymap_entry {
#define KEY_BRIGHTNESS_MIN 0x250 /* Set Brightness to Minimum */
#define KEY_BRIGHTNESS_MAX 0x251 /* Set Brightness to Maximum */
+#define KEY_KBDINPUTASSIST_PREV 0x260
+#define KEY_KBDINPUTASSIST_NEXT 0x261
+#define KEY_KBDINPUTASSIST_PREVGROUP 0x262
+#define KEY_KBDINPUTASSIST_NEXTGROUP 0x263
+#define KEY_KBDINPUTASSIST_ACCEPT 0x264
+#define KEY_KBDINPUTASSIST_CANCEL 0x265
+
#define BTN_TRIGGER_HAPPY 0x2c0
#define BTN_TRIGGER_HAPPY1 0x2c0
#define BTN_TRIGGER_HAPPY2 0x2c1
diff --git a/include/uapi/linux/ip_vs.h b/include/uapi/linux/ip_vs.h
index fbcffe8041f7..cabe95d5b461 100644
--- a/include/uapi/linux/ip_vs.h
+++ b/include/uapi/linux/ip_vs.h
@@ -384,6 +384,9 @@ enum {
IPVS_DEST_ATTR_PERSIST_CONNS, /* persistent connections */
IPVS_DEST_ATTR_STATS, /* nested attribute for dest stats */
+
+ IPVS_DEST_ATTR_ADDR_FAMILY, /* Address family of address */
+
__IPVS_DEST_ATTR_MAX,
};
diff --git a/include/uapi/linux/ipv6.h b/include/uapi/linux/ipv6.h
index efa2666f4b8a..e863d088b9a5 100644
--- a/include/uapi/linux/ipv6.h
+++ b/include/uapi/linux/ipv6.h
@@ -164,6 +164,7 @@ enum {
DEVCONF_MLDV2_UNSOLICITED_REPORT_INTERVAL,
DEVCONF_SUPPRESS_FRAG_NDISC,
DEVCONF_ACCEPT_RA_FROM_LOCAL,
+ DEVCONF_USE_OPTIMISTIC,
DEVCONF_MAX
};
diff --git a/include/linux/kcmp.h b/include/uapi/linux/kcmp.h
index 2dcd1b3aafc8..84df14b37360 100644
--- a/include/linux/kcmp.h
+++ b/include/uapi/linux/kcmp.h
@@ -1,5 +1,5 @@
-#ifndef _LINUX_KCMP_H
-#define _LINUX_KCMP_H
+#ifndef _UAPI_LINUX_KCMP_H
+#define _UAPI_LINUX_KCMP_H
/* Comparison type */
enum kcmp_type {
@@ -14,4 +14,4 @@ enum kcmp_type {
KCMP_TYPES,
};
-#endif /* _LINUX_KCMP_H */
+#endif /* _UAPI_LINUX_KCMP_H */
diff --git a/include/uapi/linux/kernel-page-flags.h b/include/uapi/linux/kernel-page-flags.h
index 5116a0e48172..2f96d233c980 100644
--- a/include/uapi/linux/kernel-page-flags.h
+++ b/include/uapi/linux/kernel-page-flags.h
@@ -31,6 +31,7 @@
#define KPF_KSM 21
#define KPF_THP 22
+#define KPF_BALLOON 23
#endif /* _UAPILINUX_KERNEL_PAGE_FLAGS_H */
diff --git a/include/uapi/linux/kfd_ioctl.h b/include/uapi/linux/kfd_ioctl.h
new file mode 100644
index 000000000000..7acef41fc209
--- /dev/null
+++ b/include/uapi/linux/kfd_ioctl.h
@@ -0,0 +1,154 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef KFD_IOCTL_H_INCLUDED
+#define KFD_IOCTL_H_INCLUDED
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+#define KFD_IOCTL_MAJOR_VERSION 1
+#define KFD_IOCTL_MINOR_VERSION 0
+
+struct kfd_ioctl_get_version_args {
+ uint32_t major_version; /* from KFD */
+ uint32_t minor_version; /* from KFD */
+};
+
+/* For kfd_ioctl_create_queue_args.queue_type. */
+#define KFD_IOC_QUEUE_TYPE_COMPUTE 0
+#define KFD_IOC_QUEUE_TYPE_SDMA 1
+#define KFD_IOC_QUEUE_TYPE_COMPUTE_AQL 2
+
+#define KFD_MAX_QUEUE_PERCENTAGE 100
+#define KFD_MAX_QUEUE_PRIORITY 15
+
+struct kfd_ioctl_create_queue_args {
+ uint64_t ring_base_address; /* to KFD */
+ uint64_t write_pointer_address; /* from KFD */
+ uint64_t read_pointer_address; /* from KFD */
+ uint64_t doorbell_offset; /* from KFD */
+
+ uint32_t ring_size; /* to KFD */
+ uint32_t gpu_id; /* to KFD */
+ uint32_t queue_type; /* to KFD */
+ uint32_t queue_percentage; /* to KFD */
+ uint32_t queue_priority; /* to KFD */
+ uint32_t queue_id; /* from KFD */
+
+ uint64_t eop_buffer_address; /* to KFD */
+ uint64_t eop_buffer_size; /* to KFD */
+ uint64_t ctx_save_restore_address; /* to KFD */
+ uint64_t ctx_save_restore_size; /* to KFD */
+};
+
+struct kfd_ioctl_destroy_queue_args {
+ uint32_t queue_id; /* to KFD */
+ uint32_t pad;
+};
+
+struct kfd_ioctl_update_queue_args {
+ uint64_t ring_base_address; /* to KFD */
+
+ uint32_t queue_id; /* to KFD */
+ uint32_t ring_size; /* to KFD */
+ uint32_t queue_percentage; /* to KFD */
+ uint32_t queue_priority; /* to KFD */
+};
+
+/* For kfd_ioctl_set_memory_policy_args.default_policy and alternate_policy */
+#define KFD_IOC_CACHE_POLICY_COHERENT 0
+#define KFD_IOC_CACHE_POLICY_NONCOHERENT 1
+
+struct kfd_ioctl_set_memory_policy_args {
+ uint64_t alternate_aperture_base; /* to KFD */
+ uint64_t alternate_aperture_size; /* to KFD */
+
+ uint32_t gpu_id; /* to KFD */
+ uint32_t default_policy; /* to KFD */
+ uint32_t alternate_policy; /* to KFD */
+ uint32_t pad;
+};
+
+/*
+ * All counters are monotonic. They are used for profiling of compute jobs.
+ * The profiling is done by userspace.
+ *
+ * In case of GPU reset, the counter should not be affected.
+ */
+
+struct kfd_ioctl_get_clock_counters_args {
+ uint64_t gpu_clock_counter; /* from KFD */
+ uint64_t cpu_clock_counter; /* from KFD */
+ uint64_t system_clock_counter; /* from KFD */
+ uint64_t system_clock_freq; /* from KFD */
+
+ uint32_t gpu_id; /* to KFD */
+ uint32_t pad;
+};
+
+#define NUM_OF_SUPPORTED_GPUS 7
+
+struct kfd_process_device_apertures {
+ uint64_t lds_base; /* from KFD */
+ uint64_t lds_limit; /* from KFD */
+ uint64_t scratch_base; /* from KFD */
+ uint64_t scratch_limit; /* from KFD */
+ uint64_t gpuvm_base; /* from KFD */
+ uint64_t gpuvm_limit; /* from KFD */
+ uint32_t gpu_id; /* from KFD */
+ uint32_t pad;
+};
+
+struct kfd_ioctl_get_process_apertures_args {
+ struct kfd_process_device_apertures
+ process_apertures[NUM_OF_SUPPORTED_GPUS];/* from KFD */
+
+ /* from KFD, should be in the range [1 - NUM_OF_SUPPORTED_GPUS] */
+ uint32_t num_of_nodes;
+ uint32_t pad;
+};
+
+#define KFD_IOC_MAGIC 'K'
+
+#define KFD_IOC_GET_VERSION \
+ _IOR(KFD_IOC_MAGIC, 1, struct kfd_ioctl_get_version_args)
+
+#define KFD_IOC_CREATE_QUEUE \
+ _IOWR(KFD_IOC_MAGIC, 2, struct kfd_ioctl_create_queue_args)
+
+#define KFD_IOC_DESTROY_QUEUE \
+ _IOWR(KFD_IOC_MAGIC, 3, struct kfd_ioctl_destroy_queue_args)
+
+#define KFD_IOC_SET_MEMORY_POLICY \
+ _IOW(KFD_IOC_MAGIC, 4, struct kfd_ioctl_set_memory_policy_args)
+
+#define KFD_IOC_GET_CLOCK_COUNTERS \
+ _IOWR(KFD_IOC_MAGIC, 5, struct kfd_ioctl_get_clock_counters_args)
+
+#define KFD_IOC_GET_PROCESS_APERTURES \
+ _IOR(KFD_IOC_MAGIC, 6, struct kfd_ioctl_get_process_apertures_args)
+
+#define KFD_IOC_UPDATE_QUEUE \
+ _IOW(KFD_IOC_MAGIC, 7, struct kfd_ioctl_update_queue_args)
+
+#endif
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index cf3a2ff440e4..a37fd1224f36 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -647,16 +647,10 @@ struct kvm_ppc_smmu_info {
#define KVM_CAP_MP_STATE 14
#define KVM_CAP_COALESCED_MMIO 15
#define KVM_CAP_SYNC_MMU 16 /* Changes to host mmap are reflected in guest */
-#define KVM_CAP_DEVICE_ASSIGNMENT 17
#define KVM_CAP_IOMMU 18
-#ifdef __KVM_HAVE_MSI
-#define KVM_CAP_DEVICE_MSI 20
-#endif
/* Bug in KVM_SET_USER_MEMORY_REGION fixed: */
#define KVM_CAP_DESTROY_MEMORY_REGION_WORKS 21
-#ifdef __KVM_HAVE_USER_NMI
#define KVM_CAP_USER_NMI 22
-#endif
#ifdef __KVM_HAVE_GUEST_DEBUG
#define KVM_CAP_SET_GUEST_DEBUG 23
#endif
@@ -665,10 +659,6 @@ struct kvm_ppc_smmu_info {
#endif
#define KVM_CAP_IRQ_ROUTING 25
#define KVM_CAP_IRQ_INJECT_STATUS 26
-#define KVM_CAP_DEVICE_DEASSIGNMENT 27
-#ifdef __KVM_HAVE_MSIX
-#define KVM_CAP_DEVICE_MSIX 28
-#endif
#define KVM_CAP_ASSIGN_DEV_IRQ 29
/* Another bug in KVM_SET_USER_MEMORY_REGION fixed: */
#define KVM_CAP_JOIN_MEMORY_REGIONS_WORKS 30
@@ -738,9 +728,7 @@ struct kvm_ppc_smmu_info {
#define KVM_CAP_PPC_GET_SMMU_INFO 78
#define KVM_CAP_S390_COW 79
#define KVM_CAP_PPC_ALLOC_HTAB 80
-#ifdef __KVM_HAVE_READONLY_MEM
#define KVM_CAP_READONLY_MEM 81
-#endif
#define KVM_CAP_IRQFD_RESAMPLE 82
#define KVM_CAP_PPC_BOOKE_WATCHDOG 83
#define KVM_CAP_PPC_HTAB_FD 84
@@ -947,15 +935,25 @@ struct kvm_device_attr {
__u64 addr; /* userspace address of attr data */
};
-#define KVM_DEV_TYPE_FSL_MPIC_20 1
-#define KVM_DEV_TYPE_FSL_MPIC_42 2
-#define KVM_DEV_TYPE_XICS 3
-#define KVM_DEV_TYPE_VFIO 4
#define KVM_DEV_VFIO_GROUP 1
#define KVM_DEV_VFIO_GROUP_ADD 1
#define KVM_DEV_VFIO_GROUP_DEL 2
-#define KVM_DEV_TYPE_ARM_VGIC_V2 5
-#define KVM_DEV_TYPE_FLIC 6
+
+enum kvm_device_type {
+ KVM_DEV_TYPE_FSL_MPIC_20 = 1,
+#define KVM_DEV_TYPE_FSL_MPIC_20 KVM_DEV_TYPE_FSL_MPIC_20
+ KVM_DEV_TYPE_FSL_MPIC_42,
+#define KVM_DEV_TYPE_FSL_MPIC_42 KVM_DEV_TYPE_FSL_MPIC_42
+ KVM_DEV_TYPE_XICS,
+#define KVM_DEV_TYPE_XICS KVM_DEV_TYPE_XICS
+ KVM_DEV_TYPE_VFIO,
+#define KVM_DEV_TYPE_VFIO KVM_DEV_TYPE_VFIO
+ KVM_DEV_TYPE_ARM_VGIC_V2,
+#define KVM_DEV_TYPE_ARM_VGIC_V2 KVM_DEV_TYPE_ARM_VGIC_V2
+ KVM_DEV_TYPE_FLIC,
+#define KVM_DEV_TYPE_FLIC KVM_DEV_TYPE_FLIC
+ KVM_DEV_TYPE_MAX,
+};
/*
* ioctls for VM fds
@@ -1093,7 +1091,7 @@ struct kvm_s390_ucas_mapping {
#define KVM_S390_INITIAL_RESET _IO(KVMIO, 0x97)
#define KVM_GET_MP_STATE _IOR(KVMIO, 0x98, struct kvm_mp_state)
#define KVM_SET_MP_STATE _IOW(KVMIO, 0x99, struct kvm_mp_state)
-/* Available with KVM_CAP_NMI */
+/* Available with KVM_CAP_USER_NMI */
#define KVM_NMI _IO(KVMIO, 0x9a)
/* Available with KVM_CAP_SET_GUEST_DEBUG */
#define KVM_SET_GUEST_DEBUG _IOW(KVMIO, 0x9b, struct kvm_guest_debug)
@@ -1101,9 +1099,6 @@ struct kvm_s390_ucas_mapping {
#define KVM_X86_SETUP_MCE _IOW(KVMIO, 0x9c, __u64)
#define KVM_X86_GET_MCE_CAP_SUPPORTED _IOR(KVMIO, 0x9d, __u64)
#define KVM_X86_SET_MCE _IOW(KVMIO, 0x9e, struct kvm_x86_mce)
-/* IA64 stack access */
-#define KVM_IA64_VCPU_GET_STACK _IOR(KVMIO, 0x9a, void *)
-#define KVM_IA64_VCPU_SET_STACK _IOW(KVMIO, 0x9b, void *)
/* Available with KVM_CAP_VCPU_EVENTS */
#define KVM_GET_VCPU_EVENTS _IOR(KVMIO, 0x9f, struct kvm_vcpu_events)
#define KVM_SET_VCPU_EVENTS _IOW(KVMIO, 0xa0, struct kvm_vcpu_events)
diff --git a/include/uapi/linux/libc-compat.h b/include/uapi/linux/libc-compat.h
index c140620dad92..e28807ad17fa 100644
--- a/include/uapi/linux/libc-compat.h
+++ b/include/uapi/linux/libc-compat.h
@@ -69,6 +69,7 @@
#define __UAPI_DEF_SOCKADDR_IN6 0
#define __UAPI_DEF_IPV6_MREQ 0
#define __UAPI_DEF_IPPROTO_V6 0
+#define __UAPI_DEF_IPV6_OPTIONS 0
#else
@@ -82,6 +83,7 @@
#define __UAPI_DEF_SOCKADDR_IN6 1
#define __UAPI_DEF_IPV6_MREQ 1
#define __UAPI_DEF_IPPROTO_V6 1
+#define __UAPI_DEF_IPV6_OPTIONS 1
#endif /* _NETINET_IN_H */
@@ -103,6 +105,7 @@
#define __UAPI_DEF_SOCKADDR_IN6 1
#define __UAPI_DEF_IPV6_MREQ 1
#define __UAPI_DEF_IPPROTO_V6 1
+#define __UAPI_DEF_IPV6_OPTIONS 1
/* Definitions for xattr.h */
#define __UAPI_DEF_XATTR 1
diff --git a/include/uapi/linux/magic.h b/include/uapi/linux/magic.h
index 77c60311a6c6..7d664ea85ebd 100644
--- a/include/uapi/linux/magic.h
+++ b/include/uapi/linux/magic.h
@@ -72,5 +72,6 @@
#define MTD_INODE_FS_MAGIC 0x11307854
#define ANON_INODE_FS_MAGIC 0x09041934
#define BTRFS_TEST_MAGIC 0x73727279
+#define NSFS_MAGIC 0x6e736673
#endif /* __LINUX_MAGIC_H__ */
diff --git a/include/uapi/linux/media-bus-format.h b/include/uapi/linux/media-bus-format.h
new file mode 100644
index 000000000000..23b40908be30
--- /dev/null
+++ b/include/uapi/linux/media-bus-format.h
@@ -0,0 +1,125 @@
+/*
+ * Media Bus API header
+ *
+ * Copyright (C) 2009, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __LINUX_MEDIA_BUS_FORMAT_H
+#define __LINUX_MEDIA_BUS_FORMAT_H
+
+/*
+ * These bus formats uniquely identify data formats on the data bus. Format 0
+ * is reserved, MEDIA_BUS_FMT_FIXED shall be used by host-client pairs, where
+ * the data format is fixed. Additionally, "2X8" means that one pixel is
+ * transferred in two 8-bit samples, "BE" or "LE" specify in which order those
+ * samples are transferred over the bus: "LE" means that the least significant
+ * bits are transferred first, "BE" means that the most significant bits are
+ * transferred first, and "PADHI" and "PADLO" define which bits - low or high,
+ * in the incomplete high byte, are filled with padding bits.
+ *
+ * The bus formats are grouped by type, bus_width, bits per component, samples
+ * per pixel and order of subsamples. Numerical values are sorted using generic
+ * numerical sort order (8 thus comes before 10).
+ *
+ * As their value can't change when a new bus format is inserted in the
+ * enumeration, the bus formats are explicitly given a numerical value. The next
+ * free values for each category are listed below, update them when inserting
+ * new pixel codes.
+ */
+
+#define MEDIA_BUS_FMT_FIXED 0x0001
+
+/* RGB - next is 0x100e */
+#define MEDIA_BUS_FMT_RGB444_2X8_PADHI_BE 0x1001
+#define MEDIA_BUS_FMT_RGB444_2X8_PADHI_LE 0x1002
+#define MEDIA_BUS_FMT_RGB555_2X8_PADHI_BE 0x1003
+#define MEDIA_BUS_FMT_RGB555_2X8_PADHI_LE 0x1004
+#define MEDIA_BUS_FMT_BGR565_2X8_BE 0x1005
+#define MEDIA_BUS_FMT_BGR565_2X8_LE 0x1006
+#define MEDIA_BUS_FMT_RGB565_2X8_BE 0x1007
+#define MEDIA_BUS_FMT_RGB565_2X8_LE 0x1008
+#define MEDIA_BUS_FMT_RGB666_1X18 0x1009
+#define MEDIA_BUS_FMT_RGB888_1X24 0x100a
+#define MEDIA_BUS_FMT_RGB888_2X12_BE 0x100b
+#define MEDIA_BUS_FMT_RGB888_2X12_LE 0x100c
+#define MEDIA_BUS_FMT_ARGB8888_1X32 0x100d
+
+/* YUV (including grey) - next is 0x2024 */
+#define MEDIA_BUS_FMT_Y8_1X8 0x2001
+#define MEDIA_BUS_FMT_UV8_1X8 0x2015
+#define MEDIA_BUS_FMT_UYVY8_1_5X8 0x2002
+#define MEDIA_BUS_FMT_VYUY8_1_5X8 0x2003
+#define MEDIA_BUS_FMT_YUYV8_1_5X8 0x2004
+#define MEDIA_BUS_FMT_YVYU8_1_5X8 0x2005
+#define MEDIA_BUS_FMT_UYVY8_2X8 0x2006
+#define MEDIA_BUS_FMT_VYUY8_2X8 0x2007
+#define MEDIA_BUS_FMT_YUYV8_2X8 0x2008
+#define MEDIA_BUS_FMT_YVYU8_2X8 0x2009
+#define MEDIA_BUS_FMT_Y10_1X10 0x200a
+#define MEDIA_BUS_FMT_UYVY10_2X10 0x2018
+#define MEDIA_BUS_FMT_VYUY10_2X10 0x2019
+#define MEDIA_BUS_FMT_YUYV10_2X10 0x200b
+#define MEDIA_BUS_FMT_YVYU10_2X10 0x200c
+#define MEDIA_BUS_FMT_Y12_1X12 0x2013
+#define MEDIA_BUS_FMT_UYVY8_1X16 0x200f
+#define MEDIA_BUS_FMT_VYUY8_1X16 0x2010
+#define MEDIA_BUS_FMT_YUYV8_1X16 0x2011
+#define MEDIA_BUS_FMT_YVYU8_1X16 0x2012
+#define MEDIA_BUS_FMT_YDYUYDYV8_1X16 0x2014
+#define MEDIA_BUS_FMT_UYVY10_1X20 0x201a
+#define MEDIA_BUS_FMT_VYUY10_1X20 0x201b
+#define MEDIA_BUS_FMT_YUYV10_1X20 0x200d
+#define MEDIA_BUS_FMT_YVYU10_1X20 0x200e
+#define MEDIA_BUS_FMT_YUV10_1X30 0x2016
+#define MEDIA_BUS_FMT_AYUV8_1X32 0x2017
+#define MEDIA_BUS_FMT_UYVY12_2X12 0x201c
+#define MEDIA_BUS_FMT_VYUY12_2X12 0x201d
+#define MEDIA_BUS_FMT_YUYV12_2X12 0x201e
+#define MEDIA_BUS_FMT_YVYU12_2X12 0x201f
+#define MEDIA_BUS_FMT_UYVY12_1X24 0x2020
+#define MEDIA_BUS_FMT_VYUY12_1X24 0x2021
+#define MEDIA_BUS_FMT_YUYV12_1X24 0x2022
+#define MEDIA_BUS_FMT_YVYU12_1X24 0x2023
+
+/* Bayer - next is 0x3019 */
+#define MEDIA_BUS_FMT_SBGGR8_1X8 0x3001
+#define MEDIA_BUS_FMT_SGBRG8_1X8 0x3013
+#define MEDIA_BUS_FMT_SGRBG8_1X8 0x3002
+#define MEDIA_BUS_FMT_SRGGB8_1X8 0x3014
+#define MEDIA_BUS_FMT_SBGGR10_ALAW8_1X8 0x3015
+#define MEDIA_BUS_FMT_SGBRG10_ALAW8_1X8 0x3016
+#define MEDIA_BUS_FMT_SGRBG10_ALAW8_1X8 0x3017
+#define MEDIA_BUS_FMT_SRGGB10_ALAW8_1X8 0x3018
+#define MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8 0x300b
+#define MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8 0x300c
+#define MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8 0x3009
+#define MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8 0x300d
+#define MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_BE 0x3003
+#define MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE 0x3004
+#define MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_BE 0x3005
+#define MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_LE 0x3006
+#define MEDIA_BUS_FMT_SBGGR10_1X10 0x3007
+#define MEDIA_BUS_FMT_SGBRG10_1X10 0x300e
+#define MEDIA_BUS_FMT_SGRBG10_1X10 0x300a
+#define MEDIA_BUS_FMT_SRGGB10_1X10 0x300f
+#define MEDIA_BUS_FMT_SBGGR12_1X12 0x3008
+#define MEDIA_BUS_FMT_SGBRG12_1X12 0x3010
+#define MEDIA_BUS_FMT_SGRBG12_1X12 0x3011
+#define MEDIA_BUS_FMT_SRGGB12_1X12 0x3012
+
+/* JPEG compressed formats - next is 0x4002 */
+#define MEDIA_BUS_FMT_JPEG_1X8 0x4001
+
+/* Vendor specific formats - next is 0x5002 */
+
+/* S5C73M3 sensor specific interleaved UYVY and JPEG */
+#define MEDIA_BUS_FMT_S5C_UYVY_JPEG_1X8 0x5001
+
+/* HSV - next is 0x6002 */
+#define MEDIA_BUS_FMT_AHSV8888_1X32 0x6001
+
+#endif /* __LINUX_MEDIA_BUS_FORMAT_H */
diff --git a/include/uapi/linux/msg.h b/include/uapi/linux/msg.h
index a70375526578..f51c8001dbe5 100644
--- a/include/uapi/linux/msg.h
+++ b/include/uapi/linux/msg.h
@@ -51,16 +51,28 @@ struct msginfo {
};
/*
- * Scaling factor to compute msgmni:
- * the memory dedicated to msg queues (msgmni * msgmnb) should occupy
- * at most 1/MSG_MEM_SCALE of the lowmem (see the formula in ipc/msg.c):
- * up to 8MB : msgmni = 16 (MSGMNI)
- * 4 GB : msgmni = 8K
- * more than 16 GB : msgmni = 32K (IPCMNI)
+ * MSGMNI, MSGMAX and MSGMNB are default values which can be
+ * modified by sysctl.
+ *
+ * MSGMNI is the upper limit for the number of messages queues per
+ * namespace.
+ * It has been chosen to be as large possible without facilitating
+ * scenarios where userspace causes overflows when adjusting the limits via
+ * operations of the form retrieve current limit; add X; update limit".
+ *
+ * MSGMNB is the default size of a new message queue. Non-root tasks can
+ * decrease the size with msgctl(IPC_SET), root tasks
+ * (actually: CAP_SYS_RESOURCE) can both increase and decrease the queue
+ * size. The optimal value is application dependent.
+ * 16384 is used because it was always used (since 0.99.10)
+ *
+ * MAXMAX is the maximum size of an individual message, it's a global
+ * (per-namespace) limit that applies for all message queues.
+ * It's set to 1/2 of MSGMNB, to ensure that at least two messages fit into
+ * the queue. This is also an arbitrary choice (since 2.6.0).
*/
-#define MSG_MEM_SCALE 32
-#define MSGMNI 16 /* <= IPCMNI */ /* max # of msg queue identifiers */
+#define MSGMNI 32000 /* <= IPCMNI */ /* max # of msg queue identifiers */
#define MSGMAX 8192 /* <= INT_MAX */ /* max size of message (bytes) */
#define MSGMNB 16384 /* <= INT_MAX */ /* default max size of a message queue */
diff --git a/include/uapi/linux/neighbour.h b/include/uapi/linux/neighbour.h
index 4a1d7e96dfe3..f3d77f9f1e0b 100644
--- a/include/uapi/linux/neighbour.h
+++ b/include/uapi/linux/neighbour.h
@@ -35,11 +35,11 @@ enum {
*/
#define NTF_USE 0x01
-#define NTF_PROXY 0x08 /* == ATF_PUBL */
-#define NTF_ROUTER 0x80
-
#define NTF_SELF 0x02
#define NTF_MASTER 0x04
+#define NTF_PROXY 0x08 /* == ATF_PUBL */
+#define NTF_EXT_LEARNED 0x10
+#define NTF_ROUTER 0x80
/*
* Neighbor Cache Entry States.
diff --git a/include/uapi/linux/net_tstamp.h b/include/uapi/linux/net_tstamp.h
index ff354021bb69..edbc888ceb51 100644
--- a/include/uapi/linux/net_tstamp.h
+++ b/include/uapi/linux/net_tstamp.h
@@ -23,8 +23,9 @@ enum {
SOF_TIMESTAMPING_OPT_ID = (1<<7),
SOF_TIMESTAMPING_TX_SCHED = (1<<8),
SOF_TIMESTAMPING_TX_ACK = (1<<9),
+ SOF_TIMESTAMPING_OPT_CMSG = (1<<10),
- SOF_TIMESTAMPING_LAST = SOF_TIMESTAMPING_TX_ACK,
+ SOF_TIMESTAMPING_LAST = SOF_TIMESTAMPING_OPT_CMSG,
SOF_TIMESTAMPING_MASK = (SOF_TIMESTAMPING_LAST - 1) |
SOF_TIMESTAMPING_LAST
};
diff --git a/include/uapi/linux/netfilter/ipset/ip_set.h b/include/uapi/linux/netfilter/ipset/ip_set.h
index 78c2f2e79920..5ab4e60894cf 100644
--- a/include/uapi/linux/netfilter/ipset/ip_set.h
+++ b/include/uapi/linux/netfilter/ipset/ip_set.h
@@ -115,6 +115,9 @@ enum {
IPSET_ATTR_BYTES,
IPSET_ATTR_PACKETS,
IPSET_ATTR_COMMENT,
+ IPSET_ATTR_SKBMARK,
+ IPSET_ATTR_SKBPRIO,
+ IPSET_ATTR_SKBQUEUE,
__IPSET_ATTR_ADT_MAX,
};
#define IPSET_ATTR_ADT_MAX (__IPSET_ATTR_ADT_MAX - 1)
@@ -147,6 +150,7 @@ enum ipset_errno {
IPSET_ERR_COUNTER,
IPSET_ERR_COMMENT,
IPSET_ERR_INVALID_MARKMASK,
+ IPSET_ERR_SKBINFO,
/* Type specific error codes */
IPSET_ERR_TYPE_SPECIFIC = 4352,
@@ -170,6 +174,12 @@ enum ipset_cmd_flags {
IPSET_FLAG_MATCH_COUNTERS = (1 << IPSET_FLAG_BIT_MATCH_COUNTERS),
IPSET_FLAG_BIT_RETURN_NOMATCH = 7,
IPSET_FLAG_RETURN_NOMATCH = (1 << IPSET_FLAG_BIT_RETURN_NOMATCH),
+ IPSET_FLAG_BIT_MAP_SKBMARK = 8,
+ IPSET_FLAG_MAP_SKBMARK = (1 << IPSET_FLAG_BIT_MAP_SKBMARK),
+ IPSET_FLAG_BIT_MAP_SKBPRIO = 9,
+ IPSET_FLAG_MAP_SKBPRIO = (1 << IPSET_FLAG_BIT_MAP_SKBPRIO),
+ IPSET_FLAG_BIT_MAP_SKBQUEUE = 10,
+ IPSET_FLAG_MAP_SKBQUEUE = (1 << IPSET_FLAG_BIT_MAP_SKBQUEUE),
IPSET_FLAG_CMD_MAX = 15,
};
@@ -187,6 +197,8 @@ enum ipset_cadt_flags {
IPSET_FLAG_WITH_COMMENT = (1 << IPSET_FLAG_BIT_WITH_COMMENT),
IPSET_FLAG_BIT_WITH_FORCEADD = 5,
IPSET_FLAG_WITH_FORCEADD = (1 << IPSET_FLAG_BIT_WITH_FORCEADD),
+ IPSET_FLAG_BIT_WITH_SKBINFO = 6,
+ IPSET_FLAG_WITH_SKBINFO = (1 << IPSET_FLAG_BIT_WITH_SKBINFO),
IPSET_FLAG_CADT_MAX = 15,
};
@@ -244,11 +256,17 @@ enum {
IPSET_COUNTER_GT,
};
-struct ip_set_counter_match {
+/* Backward compatibility for set match v3 */
+struct ip_set_counter_match0 {
__u8 op;
__u64 value;
};
+struct ip_set_counter_match {
+ __aligned_u64 value;
+ __u8 op;
+};
+
/* Interface to iptables/ip6tables */
#define SO_IP_SET 83
diff --git a/include/uapi/linux/netfilter/nf_nat.h b/include/uapi/linux/netfilter/nf_nat.h
index 1ad3659102b6..0880781ad7b6 100644
--- a/include/uapi/linux/netfilter/nf_nat.h
+++ b/include/uapi/linux/netfilter/nf_nat.h
@@ -13,6 +13,11 @@
#define NF_NAT_RANGE_PROTO_RANDOM_ALL \
(NF_NAT_RANGE_PROTO_RANDOM | NF_NAT_RANGE_PROTO_RANDOM_FULLY)
+#define NF_NAT_RANGE_MASK \
+ (NF_NAT_RANGE_MAP_IPS | NF_NAT_RANGE_PROTO_SPECIFIED | \
+ NF_NAT_RANGE_PROTO_RANDOM | NF_NAT_RANGE_PERSISTENT | \
+ NF_NAT_RANGE_PROTO_RANDOM_FULLY)
+
struct nf_nat_ipv4_range {
unsigned int flags;
__be32 min_ip;
diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h
index 801bdd1e56e3..832bc46db78b 100644
--- a/include/uapi/linux/netfilter/nf_tables.h
+++ b/include/uapi/linux/netfilter/nf_tables.h
@@ -51,6 +51,8 @@ enum nft_verdicts {
* @NFT_MSG_NEWSETELEM: create a new set element (enum nft_set_elem_attributes)
* @NFT_MSG_GETSETELEM: get a set element (enum nft_set_elem_attributes)
* @NFT_MSG_DELSETELEM: delete a set element (enum nft_set_elem_attributes)
+ * @NFT_MSG_NEWGEN: announce a new generation, only for events (enum nft_gen_attributes)
+ * @NFT_MSG_GETGEN: get the rule-set generation (enum nft_gen_attributes)
*/
enum nf_tables_msg_types {
NFT_MSG_NEWTABLE,
@@ -68,6 +70,8 @@ enum nf_tables_msg_types {
NFT_MSG_NEWSETELEM,
NFT_MSG_GETSETELEM,
NFT_MSG_DELSETELEM,
+ NFT_MSG_NEWGEN,
+ NFT_MSG_GETGEN,
NFT_MSG_MAX,
};
@@ -571,6 +575,11 @@ enum nft_exthdr_attributes {
* @NFT_META_L4PROTO: layer 4 protocol number
* @NFT_META_BRI_IIFNAME: packet input bridge interface name
* @NFT_META_BRI_OIFNAME: packet output bridge interface name
+ * @NFT_META_PKTTYPE: packet type (skb->pkt_type), special handling for loopback
+ * @NFT_META_CPU: cpu id through smp_processor_id()
+ * @NFT_META_IIFGROUP: packet input interface group
+ * @NFT_META_OIFGROUP: packet output interface group
+ * @NFT_META_CGROUP: socket control group (skb->sk->sk_classid)
*/
enum nft_meta_keys {
NFT_META_LEN,
@@ -592,6 +601,11 @@ enum nft_meta_keys {
NFT_META_L4PROTO,
NFT_META_BRI_IIFNAME,
NFT_META_BRI_OIFNAME,
+ NFT_META_PKTTYPE,
+ NFT_META_CPU,
+ NFT_META_IIFGROUP,
+ NFT_META_OIFGROUP,
+ NFT_META_CGROUP,
};
/**
@@ -737,13 +751,34 @@ enum nft_queue_attributes {
*
* @NFT_REJECT_ICMP_UNREACH: reject using ICMP unreachable
* @NFT_REJECT_TCP_RST: reject using TCP RST
+ * @NFT_REJECT_ICMPX_UNREACH: abstracted ICMP unreachable for bridge and inet
*/
enum nft_reject_types {
NFT_REJECT_ICMP_UNREACH,
NFT_REJECT_TCP_RST,
+ NFT_REJECT_ICMPX_UNREACH,
};
/**
+ * enum nft_reject_code - Generic reject codes for IPv4/IPv6
+ *
+ * @NFT_REJECT_ICMPX_NO_ROUTE: no route to host / network unreachable
+ * @NFT_REJECT_ICMPX_PORT_UNREACH: port unreachable
+ * @NFT_REJECT_ICMPX_HOST_UNREACH: host unreachable
+ * @NFT_REJECT_ICMPX_ADMIN_PROHIBITED: administratively prohibited
+ *
+ * These codes are mapped to real ICMP and ICMPv6 codes.
+ */
+enum nft_reject_inet_code {
+ NFT_REJECT_ICMPX_NO_ROUTE = 0,
+ NFT_REJECT_ICMPX_PORT_UNREACH,
+ NFT_REJECT_ICMPX_HOST_UNREACH,
+ NFT_REJECT_ICMPX_ADMIN_PROHIBITED,
+ __NFT_REJECT_ICMPX_MAX
+};
+#define NFT_REJECT_ICMPX_MAX (__NFT_REJECT_ICMPX_MAX - 1)
+
+/**
* enum nft_reject_attributes - nf_tables reject expression netlink attributes
*
* @NFTA_REJECT_TYPE: packet type to use (NLA_U32: nft_reject_types)
@@ -777,6 +812,7 @@ enum nft_nat_types {
* @NFTA_NAT_REG_ADDR_MAX: source register of address range end (NLA_U32: nft_registers)
* @NFTA_NAT_REG_PROTO_MIN: source register of proto range start (NLA_U32: nft_registers)
* @NFTA_NAT_REG_PROTO_MAX: source register of proto range end (NLA_U32: nft_registers)
+ * @NFTA_NAT_FLAGS: NAT flags (see NF_NAT_RANGE_* in linux/netfilter/nf_nat.h) (NLA_U32)
*/
enum nft_nat_attributes {
NFTA_NAT_UNSPEC,
@@ -786,8 +822,49 @@ enum nft_nat_attributes {
NFTA_NAT_REG_ADDR_MAX,
NFTA_NAT_REG_PROTO_MIN,
NFTA_NAT_REG_PROTO_MAX,
+ NFTA_NAT_FLAGS,
__NFTA_NAT_MAX
};
#define NFTA_NAT_MAX (__NFTA_NAT_MAX - 1)
+/**
+ * enum nft_masq_attributes - nf_tables masquerade expression attributes
+ *
+ * @NFTA_MASQ_FLAGS: NAT flags (see NF_NAT_RANGE_* in linux/netfilter/nf_nat.h) (NLA_U32)
+ */
+enum nft_masq_attributes {
+ NFTA_MASQ_UNSPEC,
+ NFTA_MASQ_FLAGS,
+ __NFTA_MASQ_MAX
+};
+#define NFTA_MASQ_MAX (__NFTA_MASQ_MAX - 1)
+
+/**
+ * enum nft_redir_attributes - nf_tables redirect expression netlink attributes
+ *
+ * @NFTA_REDIR_REG_PROTO_MIN: source register of proto range start (NLA_U32: nft_registers)
+ * @NFTA_REDIR_REG_PROTO_MAX: source register of proto range end (NLA_U32: nft_registers)
+ * @NFTA_REDIR_FLAGS: NAT flags (see NF_NAT_RANGE_* in linux/netfilter/nf_nat.h) (NLA_U32)
+ */
+enum nft_redir_attributes {
+ NFTA_REDIR_UNSPEC,
+ NFTA_REDIR_REG_PROTO_MIN,
+ NFTA_REDIR_REG_PROTO_MAX,
+ NFTA_REDIR_FLAGS,
+ __NFTA_REDIR_MAX
+};
+#define NFTA_REDIR_MAX (__NFTA_REDIR_MAX - 1)
+
+/**
+ * enum nft_gen_attributes - nf_tables ruleset generation attributes
+ *
+ * @NFTA_GEN_ID: Ruleset generation ID (NLA_U32)
+ */
+enum nft_gen_attributes {
+ NFTA_GEN_UNSPEC,
+ NFTA_GEN_ID,
+ __NFTA_GEN_MAX
+};
+#define NFTA_GEN_MAX (__NFTA_GEN_MAX - 1)
+
#endif /* _LINUX_NF_TABLES_H */
diff --git a/include/uapi/linux/netfilter/nfnetlink_acct.h b/include/uapi/linux/netfilter/nfnetlink_acct.h
index 51404ec19022..f3e34dbbf966 100644
--- a/include/uapi/linux/netfilter/nfnetlink_acct.h
+++ b/include/uapi/linux/netfilter/nfnetlink_acct.h
@@ -28,9 +28,17 @@ enum nfnl_acct_type {
NFACCT_USE,
NFACCT_FLAGS,
NFACCT_QUOTA,
+ NFACCT_FILTER,
__NFACCT_MAX
};
#define NFACCT_MAX (__NFACCT_MAX - 1)
+enum nfnl_attr_filter_type {
+ NFACCT_FILTER_UNSPEC,
+ NFACCT_FILTER_MASK,
+ NFACCT_FILTER_VALUE,
+ __NFACCT_FILTER_MAX
+};
+#define NFACCT_FILTER_MAX (__NFACCT_FILTER_MAX - 1)
#endif /* _UAPI_NFNL_ACCT_H_ */
diff --git a/include/uapi/linux/netfilter/xt_set.h b/include/uapi/linux/netfilter/xt_set.h
index 964d3d42f874..d4e02348384c 100644
--- a/include/uapi/linux/netfilter/xt_set.h
+++ b/include/uapi/linux/netfilter/xt_set.h
@@ -66,6 +66,25 @@ struct xt_set_info_target_v2 {
struct xt_set_info_match_v3 {
struct xt_set_info match_set;
+ struct ip_set_counter_match0 packets;
+ struct ip_set_counter_match0 bytes;
+ __u32 flags;
+};
+
+/* Revision 3 target */
+
+struct xt_set_info_target_v3 {
+ struct xt_set_info add_set;
+ struct xt_set_info del_set;
+ struct xt_set_info map_set;
+ __u32 flags;
+ __u32 timeout;
+};
+
+/* Revision 4 match */
+
+struct xt_set_info_match_v4 {
+ struct xt_set_info match_set;
struct ip_set_counter_match packets;
struct ip_set_counter_match bytes;
__u32 flags;
diff --git a/include/uapi/linux/netfilter_arp/arpt_mangle.h b/include/uapi/linux/netfilter_arp/arpt_mangle.h
index 250f502902bb..8c2b16a1f5a0 100644
--- a/include/uapi/linux/netfilter_arp/arpt_mangle.h
+++ b/include/uapi/linux/netfilter_arp/arpt_mangle.h
@@ -13,7 +13,7 @@ struct arpt_mangle
union {
struct in_addr tgt_ip;
} u_t;
- u_int8_t flags;
+ __u8 flags;
int target;
};
diff --git a/include/uapi/linux/nfc.h b/include/uapi/linux/nfc.h
index 9b19b4461928..8119255feae4 100644
--- a/include/uapi/linux/nfc.h
+++ b/include/uapi/linux/nfc.h
@@ -116,6 +116,7 @@ enum nfc_commands {
NFC_EVENT_SE_TRANSACTION,
NFC_CMD_GET_SE,
NFC_CMD_SE_IO,
+ NFC_CMD_ACTIVATE_TARGET,
/* private: internal use only */
__NFC_CMD_AFTER_LAST
};
@@ -196,15 +197,19 @@ enum nfc_sdp_attr {
};
#define NFC_SDP_ATTR_MAX (__NFC_SDP_ATTR_AFTER_LAST - 1)
-#define NFC_DEVICE_NAME_MAXSIZE 8
-#define NFC_NFCID1_MAXSIZE 10
-#define NFC_NFCID2_MAXSIZE 8
-#define NFC_NFCID3_MAXSIZE 10
-#define NFC_SENSB_RES_MAXSIZE 12
-#define NFC_SENSF_RES_MAXSIZE 18
-#define NFC_GB_MAXSIZE 48
-#define NFC_FIRMWARE_NAME_MAXSIZE 32
-#define NFC_ISO15693_UID_MAXSIZE 8
+#define NFC_DEVICE_NAME_MAXSIZE 8
+#define NFC_NFCID1_MAXSIZE 10
+#define NFC_NFCID2_MAXSIZE 8
+#define NFC_NFCID3_MAXSIZE 10
+#define NFC_SENSB_RES_MAXSIZE 12
+#define NFC_SENSF_RES_MAXSIZE 18
+#define NFC_ATR_REQ_MAXSIZE 64
+#define NFC_ATR_RES_MAXSIZE 64
+#define NFC_ATR_REQ_GB_MAXSIZE 48
+#define NFC_ATR_RES_GB_MAXSIZE 47
+#define NFC_GB_MAXSIZE 48
+#define NFC_FIRMWARE_NAME_MAXSIZE 32
+#define NFC_ISO15693_UID_MAXSIZE 8
/* NFC protocols */
#define NFC_PROTO_JEWEL 1
diff --git a/include/uapi/linux/nfsd/debug.h b/include/uapi/linux/nfsd/debug.h
index a6f453c740b8..1fdc95bb2375 100644
--- a/include/uapi/linux/nfsd/debug.h
+++ b/include/uapi/linux/nfsd/debug.h
@@ -15,7 +15,7 @@
* Enable debugging for nfsd.
* Requires RPC_DEBUG.
*/
-#ifdef RPC_DEBUG
+#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
# define NFSD_DEBUG 1
#endif
diff --git a/include/uapi/linux/nfsd/export.h b/include/uapi/linux/nfsd/export.h
index cf47c313794e..584b6ef3a5e8 100644
--- a/include/uapi/linux/nfsd/export.h
+++ b/include/uapi/linux/nfsd/export.h
@@ -28,7 +28,8 @@
#define NFSEXP_ALLSQUASH 0x0008
#define NFSEXP_ASYNC 0x0010
#define NFSEXP_GATHERED_WRITES 0x0020
-/* 40 80 100 currently unused */
+#define NFSEXP_NOREADDIRPLUS 0x0040
+/* 80 100 currently unused */
#define NFSEXP_NOHIDE 0x0200
#define NFSEXP_NOSUBTREECHECK 0x0400
#define NFSEXP_NOAUTHNLM 0x0800 /* Don't authenticate NLM requests - just trust */
@@ -47,7 +48,7 @@
*/
#define NFSEXP_V4ROOT 0x10000
/* All flags that we claim to support. (Note we don't support NOACL.) */
-#define NFSEXP_ALLFLAGS 0x17E3F
+#define NFSEXP_ALLFLAGS 0x1FE7F
/* The flags that may vary depending on security flavor: */
#define NFSEXP_SECINFO_FLAGS (NFSEXP_READONLY | NFSEXP_ROOTSQUASH \
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
index f1db15b9c041..b37bd5a1cb82 100644
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@ -227,7 +227,11 @@
* the interface identified by %NL80211_ATTR_IFINDEX.
* @NL80211_CMD_DEL_STATION: Remove a station identified by %NL80211_ATTR_MAC
* or, if no MAC address given, all stations, on the interface identified
- * by %NL80211_ATTR_IFINDEX.
+ * by %NL80211_ATTR_IFINDEX. %NL80211_ATTR_MGMT_SUBTYPE and
+ * %NL80211_ATTR_REASON_CODE can optionally be used to specify which type
+ * of disconnection indication should be sent to the station
+ * (Deauthentication or Disassociation frame and reason code for that
+ * frame).
*
* @NL80211_CMD_GET_MPATH: Get mesh path attributes for mesh path to
* destination %NL80211_ATTR_MAC on the interface identified by
@@ -639,7 +643,18 @@
* @NL80211_CMD_CH_SWITCH_NOTIFY: An AP or GO may decide to switch channels
* independently of the userspace SME, send this event indicating
* %NL80211_ATTR_IFINDEX is now on %NL80211_ATTR_WIPHY_FREQ and the
- * attributes determining channel width.
+ * attributes determining channel width. This indication may also be
+ * sent when a remotely-initiated switch (e.g., when a STA receives a CSA
+ * from the remote AP) is completed;
+ *
+ * @NL80211_CMD_CH_SWITCH_STARTED_NOTIFY: Notify that a channel switch
+ * has been started on an interface, regardless of the initiator
+ * (ie. whether it was requested from a remote device or
+ * initiated on our own). It indicates that
+ * %NL80211_ATTR_IFINDEX will be on %NL80211_ATTR_WIPHY_FREQ
+ * after %NL80211_ATTR_CH_SWITCH_COUNT TBTT's. The userspace may
+ * decide to react to this indication by requesting other
+ * interfaces to change channel as well.
*
* @NL80211_CMD_START_P2P_DEVICE: Start the given P2P Device, identified by
* its %NL80211_ATTR_WDEV identifier. It must have been created with
@@ -722,6 +737,43 @@
* QoS mapping is relevant for IP packets, it is only valid during an
* association. This is cleared on disassociation and AP restart.
*
+ * @NL80211_CMD_ADD_TX_TS: Ask the kernel to add a traffic stream for the given
+ * %NL80211_ATTR_TSID and %NL80211_ATTR_MAC with %NL80211_ATTR_USER_PRIO
+ * and %NL80211_ATTR_ADMITTED_TIME parameters.
+ * Note that the action frame handshake with the AP shall be handled by
+ * userspace via the normal management RX/TX framework, this only sets
+ * up the TX TS in the driver/device.
+ * If the admitted time attribute is not added then the request just checks
+ * if a subsequent setup could be successful, the intent is to use this to
+ * avoid setting up a session with the AP when local restrictions would
+ * make that impossible. However, the subsequent "real" setup may still
+ * fail even if the check was successful.
+ * @NL80211_CMD_DEL_TX_TS: Remove an existing TS with the %NL80211_ATTR_TSID
+ * and %NL80211_ATTR_MAC parameters. It isn't necessary to call this
+ * before removing a station entry entirely, or before disassociating
+ * or similar, cleanup will happen in the driver/device in this case.
+ *
+ * @NL80211_CMD_GET_MPP: Get mesh path attributes for mesh proxy path to
+ * destination %NL80211_ATTR_MAC on the interface identified by
+ * %NL80211_ATTR_IFINDEX.
+ *
+ * @NL80211_CMD_JOIN_OCB: Join the OCB network. The center frequency and
+ * bandwidth of a channel must be given.
+ * @NL80211_CMD_LEAVE_OCB: Leave the OCB network -- no special arguments, the
+ * network is determined by the network interface.
+ *
+ * @NL80211_CMD_TDLS_CHANNEL_SWITCH: Start channel-switching with a TDLS peer,
+ * identified by the %NL80211_ATTR_MAC parameter. A target channel is
+ * provided via %NL80211_ATTR_WIPHY_FREQ and other attributes determining
+ * channel width/type. The target operating class is given via
+ * %NL80211_ATTR_OPER_CLASS.
+ * The driver is responsible for continually initiating channel-switching
+ * operations and returning to the base channel for communication with the
+ * AP.
+ * @NL80211_CMD_TDLS_CANCEL_CHANNEL_SWITCH: Stop channel-switching with a TDLS
+ * peer given by %NL80211_ATTR_MAC. Both peers must be on the base channel
+ * when this command completes.
+ *
* @NL80211_CMD_MAX: highest used command number
* @__NL80211_CMD_AFTER_LAST: internal use
*/
@@ -893,6 +945,19 @@ enum nl80211_commands {
NL80211_CMD_SET_QOS_MAP,
+ NL80211_CMD_ADD_TX_TS,
+ NL80211_CMD_DEL_TX_TS,
+
+ NL80211_CMD_GET_MPP,
+
+ NL80211_CMD_JOIN_OCB,
+ NL80211_CMD_LEAVE_OCB,
+
+ NL80211_CMD_CH_SWITCH_STARTED_NOTIFY,
+
+ NL80211_CMD_TDLS_CHANNEL_SWITCH,
+ NL80211_CMD_TDLS_CANCEL_CHANNEL_SWITCH,
+
/* add new commands above here */
/* used to define NL80211_CMD_MAX below */
@@ -1587,13 +1652,43 @@ enum nl80211_commands {
* @NL80211_ATTR_TDLS_PEER_CAPABILITY: flags for TDLS peer capabilities, u32.
* As specified in the &enum nl80211_tdls_peer_capability.
*
- * @NL80211_ATTR_IFACE_SOCKET_OWNER: flag attribute, if set during interface
+ * @NL80211_ATTR_SOCKET_OWNER: Flag attribute, if set during interface
* creation then the new interface will be owned by the netlink socket
- * that created it and will be destroyed when the socket is closed
+ * that created it and will be destroyed when the socket is closed.
*
* @NL80211_ATTR_TDLS_INITIATOR: flag attribute indicating the current end is
* the TDLS link initiator.
*
+ * @NL80211_ATTR_USE_RRM: flag for indicating whether the current connection
+ * shall support Radio Resource Measurements (11k). This attribute can be
+ * used with %NL80211_CMD_ASSOCIATE and %NL80211_CMD_CONNECT requests.
+ * User space applications are expected to use this flag only if the
+ * underlying device supports these minimal RRM features:
+ * %NL80211_FEATURE_DS_PARAM_SET_IE_IN_PROBES,
+ * %NL80211_FEATURE_QUIET,
+ * If this flag is used, driver must add the Power Capabilities IE to the
+ * association request. In addition, it must also set the RRM capability
+ * flag in the association request's Capability Info field.
+ *
+ * @NL80211_ATTR_WIPHY_DYN_ACK: flag attribute used to enable ACK timeout
+ * estimation algorithm (dynack). In order to activate dynack
+ * %NL80211_FEATURE_ACKTO_ESTIMATION feature flag must be set by lower
+ * drivers to indicate dynack capability. Dynack is automatically disabled
+ * setting valid value for coverage class.
+ *
+ * @NL80211_ATTR_TSID: a TSID value (u8 attribute)
+ * @NL80211_ATTR_USER_PRIO: user priority value (u8 attribute)
+ * @NL80211_ATTR_ADMITTED_TIME: admitted time in units of 32 microseconds
+ * (per second) (u16 attribute)
+ *
+ * @NL80211_ATTR_SMPS_MODE: SMPS mode to use (ap mode). see
+ * &enum nl80211_smps_mode.
+ *
+ * @NL80211_ATTR_OPER_CLASS: operating class
+ *
+ * @NL80211_ATTR_MAC_MASK: MAC address mask
+ *
+ * @NUM_NL80211_ATTR: total number of nl80211_attrs available
* @NL80211_ATTR_MAX: highest attribute number currently defined
* @__NL80211_ATTR_AFTER_LAST: internal use
*/
@@ -1929,22 +2024,38 @@ enum nl80211_attrs {
NL80211_ATTR_TDLS_PEER_CAPABILITY,
- NL80211_ATTR_IFACE_SOCKET_OWNER,
+ NL80211_ATTR_SOCKET_OWNER,
NL80211_ATTR_CSA_C_OFFSETS_TX,
NL80211_ATTR_MAX_CSA_COUNTERS,
NL80211_ATTR_TDLS_INITIATOR,
+ NL80211_ATTR_USE_RRM,
+
+ NL80211_ATTR_WIPHY_DYN_ACK,
+
+ NL80211_ATTR_TSID,
+ NL80211_ATTR_USER_PRIO,
+ NL80211_ATTR_ADMITTED_TIME,
+
+ NL80211_ATTR_SMPS_MODE,
+
+ NL80211_ATTR_OPER_CLASS,
+
+ NL80211_ATTR_MAC_MASK,
+
/* add attributes here, update the policy in nl80211.c */
__NL80211_ATTR_AFTER_LAST,
+ NUM_NL80211_ATTR = __NL80211_ATTR_AFTER_LAST,
NL80211_ATTR_MAX = __NL80211_ATTR_AFTER_LAST - 1
};
/* source-level API compatibility */
#define NL80211_ATTR_SCAN_GENERATION NL80211_ATTR_GENERATION
#define NL80211_ATTR_MESH_PARAMS NL80211_ATTR_MESH_CONFIG
+#define NL80211_ATTR_IFACE_SOCKET_OWNER NL80211_ATTR_SOCKET_OWNER
/*
* Allow user space programs to use #ifdef on new attributes by defining them
@@ -2010,6 +2121,8 @@ enum nl80211_attrs {
* and therefore can't be created in the normal ways, use the
* %NL80211_CMD_START_P2P_DEVICE and %NL80211_CMD_STOP_P2P_DEVICE
* commands to create and destroy one
+ * @NL80211_IF_TYPE_OCB: Outside Context of a BSS
+ * This mode corresponds to the MIB variable dot11OCBActivated=true
* @NL80211_IFTYPE_MAX: highest interface type number currently defined
* @NUM_NL80211_IFTYPES: number of defined interface types
*
@@ -2029,6 +2142,7 @@ enum nl80211_iftype {
NL80211_IFTYPE_P2P_CLIENT,
NL80211_IFTYPE_P2P_GO,
NL80211_IFTYPE_P2P_DEVICE,
+ NL80211_IFTYPE_OCB,
/* keep last */
NUM_NL80211_IFTYPES,
@@ -2577,6 +2691,11 @@ enum nl80211_sched_scan_match_attr {
* @NL80211_RRF_AUTO_BW: maximum available bandwidth should be calculated
* base on contiguous rules and wider channels will be allowed to cross
* multiple contiguous/overlapping frequency ranges.
+ * @NL80211_RRF_GO_CONCURRENT: See &NL80211_FREQUENCY_ATTR_GO_CONCURRENT
+ * @NL80211_RRF_NO_HT40MINUS: channels can't be used in HT40- operation
+ * @NL80211_RRF_NO_HT40PLUS: channels can't be used in HT40+ operation
+ * @NL80211_RRF_NO_80MHZ: 80MHz operation not allowed
+ * @NL80211_RRF_NO_160MHZ: 160MHz operation not allowed
*/
enum nl80211_reg_rule_flags {
NL80211_RRF_NO_OFDM = 1<<0,
@@ -2589,11 +2708,18 @@ enum nl80211_reg_rule_flags {
NL80211_RRF_NO_IR = 1<<7,
__NL80211_RRF_NO_IBSS = 1<<8,
NL80211_RRF_AUTO_BW = 1<<11,
+ NL80211_RRF_GO_CONCURRENT = 1<<12,
+ NL80211_RRF_NO_HT40MINUS = 1<<13,
+ NL80211_RRF_NO_HT40PLUS = 1<<14,
+ NL80211_RRF_NO_80MHZ = 1<<15,
+ NL80211_RRF_NO_160MHZ = 1<<16,
};
#define NL80211_RRF_PASSIVE_SCAN NL80211_RRF_NO_IR
#define NL80211_RRF_NO_IBSS NL80211_RRF_NO_IR
#define NL80211_RRF_NO_IR NL80211_RRF_NO_IR
+#define NL80211_RRF_NO_HT40 (NL80211_RRF_NO_HT40MINUS |\
+ NL80211_RRF_NO_HT40PLUS)
/* For backport compatibility with older userspace */
#define NL80211_RRF_NO_IR_ALL (NL80211_RRF_NO_IR | __NL80211_RRF_NO_IBSS)
@@ -3055,14 +3181,20 @@ enum nl80211_bss_scan_width {
* @NL80211_BSS_BSSID: BSSID of the BSS (6 octets)
* @NL80211_BSS_FREQUENCY: frequency in MHz (u32)
* @NL80211_BSS_TSF: TSF of the received probe response/beacon (u64)
+ * (if @NL80211_BSS_PRESP_DATA is present then this is known to be
+ * from a probe response, otherwise it may be from the same beacon
+ * that the NL80211_BSS_BEACON_TSF will be from)
* @NL80211_BSS_BEACON_INTERVAL: beacon interval of the (I)BSS (u16)
* @NL80211_BSS_CAPABILITY: capability field (CPU order, u16)
* @NL80211_BSS_INFORMATION_ELEMENTS: binary attribute containing the
* raw information elements from the probe response/beacon (bin);
- * if the %NL80211_BSS_BEACON_IES attribute is present, the IEs here are
- * from a Probe Response frame; otherwise they are from a Beacon frame.
+ * if the %NL80211_BSS_BEACON_IES attribute is present and the data is
+ * different then the IEs here are from a Probe Response frame; otherwise
+ * they are from a Beacon frame.
* However, if the driver does not indicate the source of the IEs, these
* IEs may be from either frame subtype.
+ * If present, the @NL80211_BSS_PRESP_DATA attribute indicates that the
+ * data here is known to be from a probe response, without any heuristics.
* @NL80211_BSS_SIGNAL_MBM: signal strength of probe response/beacon
* in mBm (100 * dBm) (s32)
* @NL80211_BSS_SIGNAL_UNSPEC: signal strength of the probe response/beacon
@@ -3074,6 +3206,10 @@ enum nl80211_bss_scan_width {
* yet been received
* @NL80211_BSS_CHAN_WIDTH: channel width of the control channel
* (u32, enum nl80211_bss_scan_width)
+ * @NL80211_BSS_BEACON_TSF: TSF of the last received beacon (u64)
+ * (not present if no beacon frame has been received yet)
+ * @NL80211_BSS_PRESP_DATA: the data in @NL80211_BSS_INFORMATION_ELEMENTS and
+ * @NL80211_BSS_TSF is known to be from a probe response (flag attribute)
* @__NL80211_BSS_AFTER_LAST: internal
* @NL80211_BSS_MAX: highest BSS attribute
*/
@@ -3091,6 +3227,8 @@ enum nl80211_bss {
NL80211_BSS_SEEN_MS_AGO,
NL80211_BSS_BEACON_IES,
NL80211_BSS_CHAN_WIDTH,
+ NL80211_BSS_BEACON_TSF,
+ NL80211_BSS_PRESP_DATA,
/* keep last */
__NL80211_BSS_AFTER_LAST,
@@ -3313,6 +3451,8 @@ enum nl80211_ps_state {
* interval in which %NL80211_ATTR_CQM_TXE_PKTS and
* %NL80211_ATTR_CQM_TXE_RATE must be satisfied before generating an
* %NL80211_CMD_NOTIFY_CQM. Set to 0 to turn off TX error reporting.
+ * @NL80211_ATTR_CQM_BEACON_LOSS_EVENT: flag attribute that's set in a beacon
+ * loss event
* @__NL80211_ATTR_CQM_AFTER_LAST: internal
* @NL80211_ATTR_CQM_MAX: highest key attribute
*/
@@ -3325,6 +3465,7 @@ enum nl80211_attr_cqm {
NL80211_ATTR_CQM_TXE_RATE,
NL80211_ATTR_CQM_TXE_PKTS,
NL80211_ATTR_CQM_TXE_INTVL,
+ NL80211_ATTR_CQM_BEACON_LOSS_EVENT,
/* keep last */
__NL80211_ATTR_CQM_AFTER_LAST,
@@ -3337,9 +3478,7 @@ enum nl80211_attr_cqm {
* configured threshold
* @NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH: The RSSI is higher than the
* configured threshold
- * @NL80211_CQM_RSSI_BEACON_LOSS_EVENT: The device experienced beacon loss.
- * (Note that deauth/disassoc will still follow if the AP is not
- * available. This event might get used as roaming event, etc.)
+ * @NL80211_CQM_RSSI_BEACON_LOSS_EVENT: (reserved, never sent)
*/
enum nl80211_cqm_rssi_threshold_event {
NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW,
@@ -3479,6 +3618,25 @@ struct nl80211_pattern_support {
* @NL80211_WOWLAN_TRIG_WAKEUP_TCP_NOMORETOKENS: For wakeup reporting only,
* the TCP connection ran out of tokens to use for data to send to the
* service
+ * @NL80211_WOWLAN_TRIG_NET_DETECT: wake up when a configured network
+ * is detected. This is a nested attribute that contains the
+ * same attributes used with @NL80211_CMD_START_SCHED_SCAN. It
+ * specifies how the scan is performed (e.g. the interval and the
+ * channels to scan) as well as the scan results that will
+ * trigger a wake (i.e. the matchsets).
+ * @NL80211_WOWLAN_TRIG_NET_DETECT_RESULTS: nested attribute
+ * containing an array with information about what triggered the
+ * wake up. If no elements are present in the array, it means
+ * that the information is not available. If more than one
+ * element is present, it means that more than one match
+ * occurred.
+ * Each element in the array is a nested attribute that contains
+ * one optional %NL80211_ATTR_SSID attribute and one optional
+ * %NL80211_ATTR_SCAN_FREQUENCIES attribute. At least one of
+ * these attributes must be present. If
+ * %NL80211_ATTR_SCAN_FREQUENCIES contains more than one
+ * frequency, it means that the match occurred in more than one
+ * channel.
* @NUM_NL80211_WOWLAN_TRIG: number of wake on wireless triggers
* @MAX_NL80211_WOWLAN_TRIG: highest wowlan trigger attribute number
*
@@ -3504,6 +3662,8 @@ enum nl80211_wowlan_triggers {
NL80211_WOWLAN_TRIG_WAKEUP_TCP_MATCH,
NL80211_WOWLAN_TRIG_WAKEUP_TCP_CONNLOST,
NL80211_WOWLAN_TRIG_WAKEUP_TCP_NOMORETOKENS,
+ NL80211_WOWLAN_TRIG_NET_DETECT,
+ NL80211_WOWLAN_TRIG_NET_DETECT_RESULTS,
/* keep last */
NUM_NL80211_WOWLAN_TRIG,
@@ -3956,6 +4116,47 @@ enum nl80211_ap_sme_features {
* @NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE: This driver supports dynamic
* channel bandwidth change (e.g., HT 20 <-> 40 MHz channel) during the
* lifetime of a BSS.
+ * @NL80211_FEATURE_DS_PARAM_SET_IE_IN_PROBES: This device adds a DS Parameter
+ * Set IE to probe requests.
+ * @NL80211_FEATURE_WFA_TPC_IE_IN_PROBES: This device adds a WFA TPC Report IE
+ * to probe requests.
+ * @NL80211_FEATURE_QUIET: This device, in client mode, supports Quiet Period
+ * requests sent to it by an AP.
+ * @NL80211_FEATURE_TX_POWER_INSERTION: This device is capable of inserting the
+ * current tx power value into the TPC Report IE in the spectrum
+ * management TPC Report action frame, and in the Radio Measurement Link
+ * Measurement Report action frame.
+ * @NL80211_FEATURE_ACKTO_ESTIMATION: This driver supports dynamic ACK timeout
+ * estimation (dynack). %NL80211_ATTR_WIPHY_DYN_ACK flag attribute is used
+ * to enable dynack.
+ * @NL80211_FEATURE_STATIC_SMPS: Device supports static spatial
+ * multiplexing powersave, ie. can turn off all but one chain
+ * even on HT connections that should be using more chains.
+ * @NL80211_FEATURE_DYNAMIC_SMPS: Device supports dynamic spatial
+ * multiplexing powersave, ie. can turn off all but one chain
+ * and then wake the rest up as required after, for example,
+ * rts/cts handshake.
+ * @NL80211_FEATURE_SUPPORTS_WMM_ADMISSION: the device supports setting up WMM
+ * TSPEC sessions (TID aka TSID 0-7) with the %NL80211_CMD_ADD_TX_TS
+ * command. Standard IEEE 802.11 TSPEC setup is not yet supported, it
+ * needs to be able to handle Block-Ack agreements and other things.
+ * @NL80211_FEATURE_MAC_ON_CREATE: Device supports configuring
+ * the vif's MAC address upon creation.
+ * See 'macaddr' field in the vif_params (cfg80211.h).
+ * @NL80211_FEATURE_TDLS_CHANNEL_SWITCH: Driver supports channel switching when
+ * operating as a TDLS peer.
+ * @NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR: This device/driver supports using a
+ * random MAC address during scan (if the device is unassociated); the
+ * %NL80211_SCAN_FLAG_RANDOM_ADDR flag may be set for scans and the MAC
+ * address mask/value will be used.
+ * @NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR: This device/driver supports
+ * using a random MAC address for every scan iteration during scheduled
+ * scan (while not associated), the %NL80211_SCAN_FLAG_RANDOM_ADDR may
+ * be set for scheduled scan and the MAC address mask/value will be used.
+ * @NL80211_FEATURE_ND_RANDOM_MAC_ADDR: This device/driver supports using a
+ * random MAC address for every scan iteration during "net detect", i.e.
+ * scan in unassociated WoWLAN, the %NL80211_SCAN_FLAG_RANDOM_ADDR may
+ * be set for scheduled scan and the MAC address mask/value will be used.
*/
enum nl80211_feature_flags {
NL80211_FEATURE_SK_TX_STATUS = 1 << 0,
@@ -3977,6 +4178,19 @@ enum nl80211_feature_flags {
NL80211_FEATURE_USERSPACE_MPM = 1 << 16,
NL80211_FEATURE_ACTIVE_MONITOR = 1 << 17,
NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE = 1 << 18,
+ NL80211_FEATURE_DS_PARAM_SET_IE_IN_PROBES = 1 << 19,
+ NL80211_FEATURE_WFA_TPC_IE_IN_PROBES = 1 << 20,
+ NL80211_FEATURE_QUIET = 1 << 21,
+ NL80211_FEATURE_TX_POWER_INSERTION = 1 << 22,
+ NL80211_FEATURE_ACKTO_ESTIMATION = 1 << 23,
+ NL80211_FEATURE_STATIC_SMPS = 1 << 24,
+ NL80211_FEATURE_DYNAMIC_SMPS = 1 << 25,
+ NL80211_FEATURE_SUPPORTS_WMM_ADMISSION = 1 << 26,
+ NL80211_FEATURE_MAC_ON_CREATE = 1 << 27,
+ NL80211_FEATURE_TDLS_CHANNEL_SWITCH = 1 << 28,
+ NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR = 1 << 29,
+ NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR = 1 << 30,
+ NL80211_FEATURE_ND_RANDOM_MAC_ADDR = 1 << 31,
};
/**
@@ -4025,11 +4239,21 @@ enum nl80211_connect_failed_reason {
* dangerous because will destroy stations performance as a lot of frames
* will be lost while scanning off-channel, therefore it must be used only
* when really needed
+ * @NL80211_SCAN_FLAG_RANDOM_ADDR: use a random MAC address for this scan (or
+ * for scheduled scan: a different one for every scan iteration). When the
+ * flag is set, depending on device capabilities the @NL80211_ATTR_MAC and
+ * @NL80211_ATTR_MAC_MASK attributes may also be given in which case only
+ * the masked bits will be preserved from the MAC address and the remainder
+ * randomised. If the attributes are not given full randomisation (46 bits,
+ * locally administered 1, multicast 0) is assumed.
+ * This flag must not be requested when the feature isn't supported, check
+ * the nl80211 feature flags for the device.
*/
enum nl80211_scan_flags {
NL80211_SCAN_FLAG_LOW_PRIORITY = 1<<0,
NL80211_SCAN_FLAG_FLUSH = 1<<1,
NL80211_SCAN_FLAG_AP = 1<<2,
+ NL80211_SCAN_FLAG_RANDOM_ADDR = 1<<3,
};
/**
@@ -4051,6 +4275,25 @@ enum nl80211_acl_policy {
};
/**
+ * enum nl80211_smps_mode - SMPS mode
+ *
+ * Requested SMPS mode (for AP mode)
+ *
+ * @NL80211_SMPS_OFF: SMPS off (use all antennas).
+ * @NL80211_SMPS_STATIC: static SMPS (use a single antenna)
+ * @NL80211_SMPS_DYNAMIC: dynamic smps (start with a single antenna and
+ * turn on other antennas after CTS/RTS).
+ */
+enum nl80211_smps_mode {
+ NL80211_SMPS_OFF,
+ NL80211_SMPS_STATIC,
+ NL80211_SMPS_DYNAMIC,
+
+ __NL80211_SMPS_AFTER_LAST,
+ NL80211_SMPS_MAX = __NL80211_SMPS_AFTER_LAST - 1
+};
+
+/**
* enum nl80211_radar_event - type of radar event for DFS operation
*
* Type of event to be used with NL80211_ATTR_RADAR_EVENT to inform userspace
diff --git a/include/uapi/linux/nvme.h b/include/uapi/linux/nvme.h
index 29a7d8619d8d..26386cf3db44 100644
--- a/include/uapi/linux/nvme.h
+++ b/include/uapi/linux/nvme.h
@@ -181,6 +181,22 @@ enum {
NVME_LBART_ATTRIB_HIDE = 1 << 1,
};
+struct nvme_reservation_status {
+ __le32 gen;
+ __u8 rtype;
+ __u8 regctl[2];
+ __u8 resv5[2];
+ __u8 ptpls;
+ __u8 resv10[13];
+ struct {
+ __le16 cntlid;
+ __u8 rcsts;
+ __u8 resv3[5];
+ __le64 hostid;
+ __le64 rkey;
+ } regctl_ds[];
+};
+
/* I/O commands */
enum nvme_opcode {
@@ -189,7 +205,12 @@ enum nvme_opcode {
nvme_cmd_read = 0x02,
nvme_cmd_write_uncor = 0x04,
nvme_cmd_compare = 0x05,
+ nvme_cmd_write_zeroes = 0x08,
nvme_cmd_dsm = 0x09,
+ nvme_cmd_resv_register = 0x0d,
+ nvme_cmd_resv_report = 0x0e,
+ nvme_cmd_resv_acquire = 0x11,
+ nvme_cmd_resv_release = 0x15,
};
struct nvme_common_command {
@@ -305,7 +326,11 @@ enum {
NVME_FEAT_IRQ_CONFIG = 0x09,
NVME_FEAT_WRITE_ATOMIC = 0x0a,
NVME_FEAT_ASYNC_EVENT = 0x0b,
- NVME_FEAT_SW_PROGRESS = 0x0c,
+ NVME_FEAT_AUTO_PST = 0x0c,
+ NVME_FEAT_SW_PROGRESS = 0x80,
+ NVME_FEAT_HOST_ID = 0x81,
+ NVME_FEAT_RESV_MASK = 0x82,
+ NVME_FEAT_RESV_PERSIST = 0x83,
NVME_LOG_ERROR = 0x01,
NVME_LOG_SMART = 0x02,
NVME_LOG_FW_SLOT = 0x03,
@@ -440,9 +465,15 @@ enum {
NVME_SC_FUSED_MISSING = 0xa,
NVME_SC_INVALID_NS = 0xb,
NVME_SC_CMD_SEQ_ERROR = 0xc,
+ NVME_SC_SGL_INVALID_LAST = 0xd,
+ NVME_SC_SGL_INVALID_COUNT = 0xe,
+ NVME_SC_SGL_INVALID_DATA = 0xf,
+ NVME_SC_SGL_INVALID_METADATA = 0x10,
+ NVME_SC_SGL_INVALID_TYPE = 0x11,
NVME_SC_LBA_RANGE = 0x80,
NVME_SC_CAP_EXCEEDED = 0x81,
NVME_SC_NS_NOT_READY = 0x82,
+ NVME_SC_RESERVATION_CONFLICT = 0x83,
NVME_SC_CQ_INVALID = 0x100,
NVME_SC_QID_INVALID = 0x101,
NVME_SC_QUEUE_SIZE = 0x102,
@@ -454,7 +485,15 @@ enum {
NVME_SC_INVALID_VECTOR = 0x108,
NVME_SC_INVALID_LOG_PAGE = 0x109,
NVME_SC_INVALID_FORMAT = 0x10a,
+ NVME_SC_FIRMWARE_NEEDS_RESET = 0x10b,
+ NVME_SC_INVALID_QUEUE = 0x10c,
+ NVME_SC_FEATURE_NOT_SAVEABLE = 0x10d,
+ NVME_SC_FEATURE_NOT_CHANGEABLE = 0x10e,
+ NVME_SC_FEATURE_NOT_PER_NS = 0x10f,
+ NVME_SC_FW_NEEDS_RESET_SUBSYS = 0x110,
NVME_SC_BAD_ATTRIBUTES = 0x180,
+ NVME_SC_INVALID_PI = 0x181,
+ NVME_SC_READ_ONLY = 0x182,
NVME_SC_WRITE_FAULT = 0x280,
NVME_SC_READ_ERROR = 0x281,
NVME_SC_GUARD_CHECK = 0x282,
@@ -489,7 +528,7 @@ struct nvme_user_io {
__u16 appmask;
};
-struct nvme_admin_cmd {
+struct nvme_passthru_cmd {
__u8 opcode;
__u8 flags;
__u16 rsvd1;
@@ -510,8 +549,11 @@ struct nvme_admin_cmd {
__u32 result;
};
+#define nvme_admin_cmd nvme_passthru_cmd
+
#define NVME_IOCTL_ID _IO('N', 0x40)
#define NVME_IOCTL_ADMIN_CMD _IOWR('N', 0x41, struct nvme_admin_cmd)
#define NVME_IOCTL_SUBMIT_IO _IOW('N', 0x42, struct nvme_user_io)
+#define NVME_IOCTL_IO_CMD _IOWR('N', 0x43, struct nvme_passthru_cmd)
#endif /* _UAPI_LINUX_NVME_H */
diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h
index a794d1dd7b40..3a6dcaa359b7 100644
--- a/include/uapi/linux/openvswitch.h
+++ b/include/uapi/linux/openvswitch.h
@@ -157,6 +157,11 @@ enum ovs_packet_cmd {
* notification if the %OVS_ACTION_ATTR_USERSPACE action specified an
* %OVS_USERSPACE_ATTR_USERDATA attribute, with the same length and content
* specified there.
+ * @OVS_PACKET_ATTR_EGRESS_TUN_KEY: Present for an %OVS_PACKET_CMD_ACTION
+ * notification if the %OVS_ACTION_ATTR_USERSPACE action specified an
+ * %OVS_USERSPACE_ATTR_EGRESS_TUN_PORT attribute, which is sent only if the
+ * output port is actually a tunnel port. Contains the output tunnel key
+ * extracted from the packet as nested %OVS_TUNNEL_KEY_ATTR_* attributes.
*
* These attributes follow the &struct ovs_header within the Generic Netlink
* payload for %OVS_PACKET_* commands.
@@ -167,6 +172,8 @@ enum ovs_packet_attr {
OVS_PACKET_ATTR_KEY, /* Nested OVS_KEY_ATTR_* attributes. */
OVS_PACKET_ATTR_ACTIONS, /* Nested OVS_ACTION_ATTR_* attributes. */
OVS_PACKET_ATTR_USERDATA, /* OVS_ACTION_ATTR_USERSPACE arg. */
+ OVS_PACKET_ATTR_EGRESS_TUN_KEY, /* Nested OVS_TUNNEL_KEY_ATTR_*
+ attributes. */
__OVS_PACKET_ATTR_MAX
};
@@ -192,6 +199,7 @@ enum ovs_vport_type {
OVS_VPORT_TYPE_INTERNAL, /* network device implemented by datapath */
OVS_VPORT_TYPE_GRE, /* GRE tunnel. */
OVS_VPORT_TYPE_VXLAN, /* VXLAN tunnel. */
+ OVS_VPORT_TYPE_GENEVE, /* Geneve tunnel. */
__OVS_VPORT_TYPE_MAX
};
@@ -289,9 +297,15 @@ enum ovs_key_attr {
OVS_KEY_ATTR_TUNNEL, /* Nested set of ovs_tunnel attributes */
OVS_KEY_ATTR_SCTP, /* struct ovs_key_sctp */
OVS_KEY_ATTR_TCP_FLAGS, /* be16 TCP flags. */
+ OVS_KEY_ATTR_DP_HASH, /* u32 hash value. Value 0 indicates the hash
+ is not computed by the datapath. */
+ OVS_KEY_ATTR_RECIRC_ID, /* u32 recirc id */
+ OVS_KEY_ATTR_MPLS, /* array of struct ovs_key_mpls.
+ * The implementation may restrict
+ * the accepted length of the array. */
#ifdef __KERNEL__
- OVS_KEY_ATTR_IPV4_TUNNEL, /* struct ovs_key_ipv4_tunnel */
+ OVS_KEY_ATTR_TUNNEL_INFO, /* struct ovs_tunnel_info */
#endif
__OVS_KEY_ATTR_MAX
};
@@ -306,6 +320,10 @@ enum ovs_tunnel_key_attr {
OVS_TUNNEL_KEY_ATTR_TTL, /* u8 Tunnel IP TTL. */
OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT, /* No argument, set DF. */
OVS_TUNNEL_KEY_ATTR_CSUM, /* No argument. CSUM packet. */
+ OVS_TUNNEL_KEY_ATTR_OAM, /* No argument. OAM frame. */
+ OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS, /* Array of Geneve options. */
+ OVS_TUNNEL_KEY_ATTR_TP_SRC, /* be16 src Transport Port. */
+ OVS_TUNNEL_KEY_ATTR_TP_DST, /* be16 dst Transport Port. */
__OVS_TUNNEL_KEY_ATTR_MAX
};
@@ -334,6 +352,10 @@ struct ovs_key_ethernet {
__u8 eth_dst[ETH_ALEN];
};
+struct ovs_key_mpls {
+ __be32 mpls_lse;
+};
+
struct ovs_key_ipv4 {
__be32 ipv4_src;
__be32 ipv4_dst;
@@ -387,9 +409,9 @@ struct ovs_key_arp {
};
struct ovs_key_nd {
- __u32 nd_target[4];
- __u8 nd_sll[ETH_ALEN];
- __u8 nd_tll[ETH_ALEN];
+ __be32 nd_target[4];
+ __u8 nd_sll[ETH_ALEN];
+ __u8 nd_tll[ETH_ALEN];
};
/**
@@ -435,6 +457,8 @@ enum ovs_flow_attr {
OVS_FLOW_ATTR_USED, /* u64 msecs last used in monotonic time. */
OVS_FLOW_ATTR_CLEAR, /* Flag to clear stats, tcp_flags, used. */
OVS_FLOW_ATTR_MASK, /* Sequence of OVS_KEY_ATTR_* attributes. */
+ OVS_FLOW_ATTR_PROBE, /* Flow operation is a feature probe, error
+ * logging should be suppressed. */
__OVS_FLOW_ATTR_MAX
};
@@ -467,17 +491,34 @@ enum ovs_sample_attr {
* message should be sent. Required.
* @OVS_USERSPACE_ATTR_USERDATA: If present, its variable-length argument is
* copied to the %OVS_PACKET_CMD_ACTION message as %OVS_PACKET_ATTR_USERDATA.
+ * @OVS_USERSPACE_ATTR_EGRESS_TUN_PORT: If present, u32 output port to get
+ * tunnel info.
*/
enum ovs_userspace_attr {
OVS_USERSPACE_ATTR_UNSPEC,
OVS_USERSPACE_ATTR_PID, /* u32 Netlink PID to receive upcalls. */
OVS_USERSPACE_ATTR_USERDATA, /* Optional user-specified cookie. */
+ OVS_USERSPACE_ATTR_EGRESS_TUN_PORT, /* Optional, u32 output port
+ * to get tunnel info. */
__OVS_USERSPACE_ATTR_MAX
};
#define OVS_USERSPACE_ATTR_MAX (__OVS_USERSPACE_ATTR_MAX - 1)
/**
+ * struct ovs_action_push_mpls - %OVS_ACTION_ATTR_PUSH_MPLS action argument.
+ * @mpls_lse: MPLS label stack entry to push.
+ * @mpls_ethertype: Ethertype to set in the encapsulating ethernet frame.
+ *
+ * The only values @mpls_ethertype should ever be given are %ETH_P_MPLS_UC and
+ * %ETH_P_MPLS_MC, indicating MPLS unicast or multicast. Other are rejected.
+ */
+struct ovs_action_push_mpls {
+ __be32 mpls_lse;
+ __be16 mpls_ethertype; /* Either %ETH_P_MPLS_UC or %ETH_P_MPLS_MC */
+};
+
+/**
* struct ovs_action_push_vlan - %OVS_ACTION_ATTR_PUSH_VLAN action argument.
* @vlan_tpid: Tag protocol identifier (TPID) to push.
* @vlan_tci: Tag control identifier (TCI) to push. The CFI bit must be set
@@ -493,6 +534,27 @@ struct ovs_action_push_vlan {
__be16 vlan_tci; /* 802.1Q TCI (VLAN ID and priority). */
};
+/* Data path hash algorithm for computing Datapath hash.
+ *
+ * The algorithm type only specifies the fields in a flow
+ * will be used as part of the hash. Each datapath is free
+ * to use its own hash algorithm. The hash value will be
+ * opaque to the user space daemon.
+ */
+enum ovs_hash_alg {
+ OVS_HASH_ALG_L4,
+};
+
+/*
+ * struct ovs_action_hash - %OVS_ACTION_ATTR_HASH action argument.
+ * @hash_alg: Algorithm used to compute hash prior to recirculation.
+ * @hash_basis: basis used for computing hash.
+ */
+struct ovs_action_hash {
+ uint32_t hash_alg; /* One of ovs_hash_alg. */
+ uint32_t hash_basis;
+};
+
/**
* enum ovs_action_attr - Action types.
*
@@ -507,6 +569,15 @@ struct ovs_action_push_vlan {
* @OVS_ACTION_ATTR_POP_VLAN: Pop the outermost 802.1Q header off the packet.
* @OVS_ACTION_ATTR_SAMPLE: Probabilitically executes actions, as specified in
* the nested %OVS_SAMPLE_ATTR_* attributes.
+ * @OVS_ACTION_ATTR_PUSH_MPLS: Push a new MPLS label stack entry onto the
+ * top of the packets MPLS label stack. Set the ethertype of the
+ * encapsulating frame to either %ETH_P_MPLS_UC or %ETH_P_MPLS_MC to
+ * indicate the new packet contents.
+ * @OVS_ACTION_ATTR_POP_MPLS: Pop an MPLS label stack entry off of the
+ * packet's MPLS label stack. Set the encapsulating frame's ethertype to
+ * indicate the new packet contents. This could potentially still be
+ * %ETH_P_MPLS if the resulting MPLS label stack is not empty. If there
+ * is no MPLS label stack, as determined by ethertype, no action is taken.
*
* Only a single header can be set with a single %OVS_ACTION_ATTR_SET. Not all
* fields within a header are modifiable, e.g. the IPv4 protocol and fragment
@@ -521,6 +592,11 @@ enum ovs_action_attr {
OVS_ACTION_ATTR_PUSH_VLAN, /* struct ovs_action_push_vlan. */
OVS_ACTION_ATTR_POP_VLAN, /* No argument. */
OVS_ACTION_ATTR_SAMPLE, /* Nested OVS_SAMPLE_ATTR_*. */
+ OVS_ACTION_ATTR_RECIRC, /* u32 recirc_id. */
+ OVS_ACTION_ATTR_HASH, /* struct ovs_action_hash. */
+ OVS_ACTION_ATTR_PUSH_MPLS, /* struct ovs_action_push_mpls. */
+ OVS_ACTION_ATTR_POP_MPLS, /* __be16 ethertype. */
+
__OVS_ACTION_ATTR_MAX
};
diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h
index 30db069bce62..4a1d0cc38ff2 100644
--- a/include/uapi/linux/pci_regs.h
+++ b/include/uapi/linux/pci_regs.h
@@ -552,6 +552,7 @@
#define PCI_EXP_RTCTL_PMEIE 0x0008 /* PME Interrupt Enable */
#define PCI_EXP_RTCTL_CRSSVE 0x0010 /* CRS Software Visibility Enable */
#define PCI_EXP_RTCAP 30 /* Root Capabilities */
+#define PCI_EXP_RTCAP_CRSVIS 0x0001 /* CRS Software Visibility capability */
#define PCI_EXP_RTSTA 32 /* Root Status */
#define PCI_EXP_RTSTA_PME 0x00010000 /* PME status */
#define PCI_EXP_RTSTA_PENDING 0x00020000 /* PME pending */
@@ -630,7 +631,7 @@
/* Advanced Error Reporting */
#define PCI_ERR_UNCOR_STATUS 4 /* Uncorrectable Error Status */
-#define PCI_ERR_UNC_TRAIN 0x00000001 /* Training */
+#define PCI_ERR_UNC_UND 0x00000001 /* Undefined */
#define PCI_ERR_UNC_DLP 0x00000010 /* Data Link Protocol */
#define PCI_ERR_UNC_SURPDN 0x00000020 /* Surprise Down */
#define PCI_ERR_UNC_POISON_TLP 0x00001000 /* Poisoned TLP */
diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
index 9269de254874..9b79abbd1ab8 100644
--- a/include/uapi/linux/perf_event.h
+++ b/include/uapi/linux/perf_event.h
@@ -137,8 +137,9 @@ enum perf_event_sample_format {
PERF_SAMPLE_DATA_SRC = 1U << 15,
PERF_SAMPLE_IDENTIFIER = 1U << 16,
PERF_SAMPLE_TRANSACTION = 1U << 17,
+ PERF_SAMPLE_REGS_INTR = 1U << 18,
- PERF_SAMPLE_MAX = 1U << 18, /* non-ABI */
+ PERF_SAMPLE_MAX = 1U << 19, /* non-ABI */
};
/*
@@ -238,6 +239,7 @@ enum perf_event_read_format {
#define PERF_ATTR_SIZE_VER2 80 /* add: branch_sample_type */
#define PERF_ATTR_SIZE_VER3 96 /* add: sample_regs_user */
/* add: sample_stack_user */
+#define PERF_ATTR_SIZE_VER4 104 /* add: sample_regs_intr */
/*
* Hardware event_id to monitor via a performance monitoring event:
@@ -334,6 +336,15 @@ struct perf_event_attr {
/* Align to u64. */
__u32 __reserved_2;
+ /*
+ * Defines set of regs to dump for each sample
+ * state captured on:
+ * - precise = 0: PMU interrupt
+ * - precise > 0: sampled instruction
+ *
+ * See asm/perf_regs.h for details.
+ */
+ __u64 sample_regs_intr;
};
#define perf_flags(attr) (*(&(attr)->read_format + 1))
@@ -364,7 +375,7 @@ struct perf_event_mmap_page {
/*
* Bits needed to read the hw events in user-space.
*
- * u32 seq, time_mult, time_shift, idx, width;
+ * u32 seq, time_mult, time_shift, index, width;
* u64 count, enabled, running;
* u64 cyc, time_offset;
* s64 pmc = 0;
@@ -383,11 +394,11 @@ struct perf_event_mmap_page {
* time_shift = pc->time_shift;
* }
*
- * idx = pc->index;
+ * index = pc->index;
* count = pc->offset;
- * if (pc->cap_usr_rdpmc && idx) {
+ * if (pc->cap_user_rdpmc && index) {
* width = pc->pmc_width;
- * pmc = rdpmc(idx - 1);
+ * pmc = rdpmc(index - 1);
* }
*
* barrier();
@@ -415,7 +426,7 @@ struct perf_event_mmap_page {
};
/*
- * If cap_usr_rdpmc this field provides the bit-width of the value
+ * If cap_user_rdpmc this field provides the bit-width of the value
* read using the rdpmc() or equivalent instruction. This can be used
* to sign extend the result like:
*
@@ -439,10 +450,10 @@ struct perf_event_mmap_page {
*
* Where time_offset,time_mult,time_shift and cyc are read in the
* seqcount loop described above. This delta can then be added to
- * enabled and possible running (if idx), improving the scaling:
+ * enabled and possible running (if index), improving the scaling:
*
* enabled += delta;
- * if (idx)
+ * if (index)
* running += delta;
*
* quot = count / running;
@@ -686,6 +697,8 @@ enum perf_event_type {
* { u64 weight; } && PERF_SAMPLE_WEIGHT
* { u64 data_src; } && PERF_SAMPLE_DATA_SRC
* { u64 transaction; } && PERF_SAMPLE_TRANSACTION
+ * { u64 abi; # enum perf_sample_regs_abi
+ * u64 regs[weight(mask)]; } && PERF_SAMPLE_REGS_INTR
* };
*/
PERF_RECORD_SAMPLE = 9,
diff --git a/include/uapi/linux/prctl.h b/include/uapi/linux/prctl.h
index 58afc04c107e..89f63503f903 100644
--- a/include/uapi/linux/prctl.h
+++ b/include/uapi/linux/prctl.h
@@ -1,6 +1,8 @@
#ifndef _LINUX_PRCTL_H
#define _LINUX_PRCTL_H
+#include <linux/types.h>
+
/* Values to pass as first argument to prctl() */
#define PR_SET_PDEATHSIG 1 /* Second arg is a signal */
@@ -119,6 +121,31 @@
# define PR_SET_MM_ENV_END 11
# define PR_SET_MM_AUXV 12
# define PR_SET_MM_EXE_FILE 13
+# define PR_SET_MM_MAP 14
+# define PR_SET_MM_MAP_SIZE 15
+
+/*
+ * This structure provides new memory descriptor
+ * map which mostly modifies /proc/pid/stat[m]
+ * output for a task. This mostly done in a
+ * sake of checkpoint/restore functionality.
+ */
+struct prctl_mm_map {
+ __u64 start_code; /* code section bounds */
+ __u64 end_code;
+ __u64 start_data; /* data section bounds */
+ __u64 end_data;
+ __u64 start_brk; /* heap for brk() syscall */
+ __u64 brk;
+ __u64 start_stack; /* stack starts at */
+ __u64 arg_start; /* command line arguments bounds */
+ __u64 arg_end;
+ __u64 env_start; /* environment variables bounds */
+ __u64 env_end;
+ __u64 *auxv; /* auxiliary vector */
+ __u32 auxv_size; /* vector size */
+ __u32 exe_fd; /* /proc/$pid/exe link file */
+};
/*
* Set specific pid that is allowed to ptrace the current task.
@@ -152,4 +179,10 @@
#define PR_SET_THP_DISABLE 41
#define PR_GET_THP_DISABLE 42
+/*
+ * Tell the kernel to start/stop helping userspace manage bounds tables.
+ */
+#define PR_MPX_ENABLE_MANAGEMENT 43
+#define PR_MPX_DISABLE_MANAGEMENT 44
+
#endif /* _LINUX_PRCTL_H */
diff --git a/include/uapi/linux/raid/md_u.h b/include/uapi/linux/raid/md_u.h
index 4133e744e4e6..74e7c60c4716 100644
--- a/include/uapi/linux/raid/md_u.h
+++ b/include/uapi/linux/raid/md_u.h
@@ -39,7 +39,6 @@
#define RAID_VERSION _IOR (MD_MAJOR, 0x10, mdu_version_t)
#define GET_ARRAY_INFO _IOR (MD_MAJOR, 0x11, mdu_array_info_t)
#define GET_DISK_INFO _IOR (MD_MAJOR, 0x12, mdu_disk_info_t)
-#define PRINT_RAID_DEBUG _IO (MD_MAJOR, 0x13)
#define RAID_AUTORUN _IO (MD_MAJOR, 0x14)
#define GET_BITMAP_FILE _IOR (MD_MAJOR, 0x15, mdu_bitmap_file_t)
diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h
index eb0f1a554d7b..9c9b8b4480cd 100644
--- a/include/uapi/linux/rtnetlink.h
+++ b/include/uapi/linux/rtnetlink.h
@@ -235,6 +235,7 @@ enum {
#define RTPROT_NTK 15 /* Netsukuku */
#define RTPROT_DHCP 16 /* DHCP client */
#define RTPROT_MROUTED 17 /* Multicast daemon */
+#define RTPROT_BABEL 42 /* Babel daemon */
/* rtm_scope
diff --git a/include/uapi/linux/sched.h b/include/uapi/linux/sched.h
index 34f9d7387d13..cc89ddefa926 100644
--- a/include/uapi/linux/sched.h
+++ b/include/uapi/linux/sched.h
@@ -13,7 +13,7 @@
#define CLONE_VFORK 0x00004000 /* set if the parent wants the child to wake it up on mm_release */
#define CLONE_PARENT 0x00008000 /* set if we want to have the same parent as the cloner */
#define CLONE_THREAD 0x00010000 /* Same thread group? */
-#define CLONE_NEWNS 0x00020000 /* New namespace group? */
+#define CLONE_NEWNS 0x00020000 /* New mount namespace group */
#define CLONE_SYSVSEM 0x00040000 /* share system V SEM_UNDO semantics */
#define CLONE_SETTLS 0x00080000 /* create a new TLS for the child */
#define CLONE_PARENT_SETTID 0x00100000 /* set the TID in the parent */
@@ -23,8 +23,8 @@
#define CLONE_CHILD_SETTID 0x01000000 /* set the TID in the child */
/* 0x02000000 was previously the unused CLONE_STOPPED (Start in stopped state)
and is now available for re-use. */
-#define CLONE_NEWUTS 0x04000000 /* New utsname group? */
-#define CLONE_NEWIPC 0x08000000 /* New ipcs */
+#define CLONE_NEWUTS 0x04000000 /* New utsname namespace */
+#define CLONE_NEWIPC 0x08000000 /* New ipc namespace */
#define CLONE_NEWUSER 0x10000000 /* New user namespace */
#define CLONE_NEWPID 0x20000000 /* New pid namespace */
#define CLONE_NEWNET 0x40000000 /* New network namespace */
diff --git a/include/uapi/linux/sem.h b/include/uapi/linux/sem.h
index 541fce03b50c..dd73b908b2f3 100644
--- a/include/uapi/linux/sem.h
+++ b/include/uapi/linux/sem.h
@@ -63,10 +63,22 @@ struct seminfo {
int semaem;
};
-#define SEMMNI 128 /* <= IPCMNI max # of semaphore identifiers */
-#define SEMMSL 250 /* <= 8 000 max num of semaphores per id */
+/*
+ * SEMMNI, SEMMSL and SEMMNS are default values which can be
+ * modified by sysctl.
+ * The values has been chosen to be larger than necessary for any
+ * known configuration.
+ *
+ * SEMOPM should not be increased beyond 1000, otherwise there is the
+ * risk that semop()/semtimedop() fails due to kernel memory fragmentation when
+ * allocating the sop array.
+ */
+
+
+#define SEMMNI 32000 /* <= IPCMNI max # of semaphore identifiers */
+#define SEMMSL 32000 /* <= INT_MAX max num of semaphores per id */
#define SEMMNS (SEMMNI*SEMMSL) /* <= INT_MAX max # of semaphores in system */
-#define SEMOPM 32 /* <= 1 000 max num of ops per semop call */
+#define SEMOPM 500 /* <= 1 000 max num of ops per semop call */
#define SEMVMX 32767 /* <= 32767 semaphore maximum value */
#define SEMAEM SEMVMX /* adjust on exit max value */
diff --git a/include/uapi/linux/serial_core.h b/include/uapi/linux/serial_core.h
index 5820269aa132..c17218094f18 100644
--- a/include/uapi/linux/serial_core.h
+++ b/include/uapi/linux/serial_core.h
@@ -54,7 +54,8 @@
#define PORT_ALTR_16550_F32 26 /* Altera 16550 UART with 32 FIFOs */
#define PORT_ALTR_16550_F64 27 /* Altera 16550 UART with 64 FIFOs */
#define PORT_ALTR_16550_F128 28 /* Altera 16550 UART with 128 FIFOs */
-#define PORT_MAX_8250 28 /* max port ID */
+#define PORT_RT2880 29 /* Ralink RT2880 internal UART */
+#define PORT_MAX_8250 29 /* max port ID */
/*
* ARM specific type numbers. These are not currently guaranteed
@@ -244,4 +245,7 @@
/* SC16IS74xx */
#define PORT_SC16IS7XX 108
+/* MESON */
+#define PORT_MESON 109
+
#endif /* _UAPILINUX_SERIAL_CORE_H */
diff --git a/include/uapi/linux/serial_reg.h b/include/uapi/linux/serial_reg.h
index df6c9ab6b0cd..53af3b790129 100644
--- a/include/uapi/linux/serial_reg.h
+++ b/include/uapi/linux/serial_reg.h
@@ -359,6 +359,7 @@
#define UART_OMAP_SYSC 0x15 /* System configuration register */
#define UART_OMAP_SYSS 0x16 /* System status register */
#define UART_OMAP_WER 0x17 /* Wake-up enable register */
+#define UART_OMAP_TX_LVL 0x1a /* TX FIFO level register */
/*
* These are the definitions for the MDR1 register
diff --git a/include/uapi/linux/smiapp.h b/include/uapi/linux/smiapp.h
new file mode 100644
index 000000000000..53938f4412ee
--- /dev/null
+++ b/include/uapi/linux/smiapp.h
@@ -0,0 +1,29 @@
+/*
+ * include/uapi/linux/smiapp.h
+ *
+ * Generic driver for SMIA/SMIA++ compliant camera modules
+ *
+ * Copyright (C) 2014 Intel Corporation
+ * Contact: Sakari Ailus <sakari.ailus@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ */
+
+#ifndef __UAPI_LINUX_SMIAPP_H_
+#define __UAPI_LINUX_SMIAPP_H_
+
+#define V4L2_SMIAPP_TEST_PATTERN_MODE_DISABLED 0
+#define V4L2_SMIAPP_TEST_PATTERN_MODE_SOLID_COLOUR 1
+#define V4L2_SMIAPP_TEST_PATTERN_MODE_COLOUR_BARS 2
+#define V4L2_SMIAPP_TEST_PATTERN_MODE_COLOUR_BARS_GREY 3
+#define V4L2_SMIAPP_TEST_PATTERN_MODE_PN9 4
+
+#endif /* __UAPI_LINUX_SMIAPP_H_ */
diff --git a/include/uapi/linux/snmp.h b/include/uapi/linux/snmp.h
index df40137f33dd..b22224100011 100644
--- a/include/uapi/linux/snmp.h
+++ b/include/uapi/linux/snmp.h
@@ -156,6 +156,7 @@ enum
UDP_MIB_RCVBUFERRORS, /* RcvbufErrors */
UDP_MIB_SNDBUFERRORS, /* SndbufErrors */
UDP_MIB_CSUMERRORS, /* InCsumErrors */
+ UDP_MIB_IGNOREDMULTI, /* IgnoredMulti */
__UDP_MIB_MAX
};
@@ -265,6 +266,10 @@ enum
LINUX_MIB_TCPWANTZEROWINDOWADV, /* TCPWantZeroWindowAdv */
LINUX_MIB_TCPSYNRETRANS, /* TCPSynRetrans */
LINUX_MIB_TCPORIGDATASENT, /* TCPOrigDataSent */
+ LINUX_MIB_TCPHYSTARTTRAINDETECT, /* TCPHystartTrainDetect */
+ LINUX_MIB_TCPHYSTARTTRAINCWND, /* TCPHystartTrainCwnd */
+ LINUX_MIB_TCPHYSTARTDELAYDETECT, /* TCPHystartDelayDetect */
+ LINUX_MIB_TCPHYSTARTDELAYCWND, /* TCPHystartDelayCwnd */
__LINUX_MIB_MAX
};
diff --git a/include/uapi/linux/sysctl.h b/include/uapi/linux/sysctl.h
index 43aaba1cc037..0956373b56db 100644
--- a/include/uapi/linux/sysctl.h
+++ b/include/uapi/linux/sysctl.h
@@ -153,6 +153,7 @@ enum
KERN_MAX_LOCK_DEPTH=74, /* int: rtmutex's maximum lock depth */
KERN_NMI_WATCHDOG=75, /* int: enable/disable nmi watchdog */
KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
+ KERN_PANIC_ON_WARN=77, /* int: call panic() in WARN() functions */
};
diff --git a/include/uapi/linux/target_core_user.h b/include/uapi/linux/target_core_user.h
new file mode 100644
index 000000000000..b483d1909d3e
--- /dev/null
+++ b/include/uapi/linux/target_core_user.h
@@ -0,0 +1,138 @@
+#ifndef __TARGET_CORE_USER_H
+#define __TARGET_CORE_USER_H
+
+/* This header will be used by application too */
+
+#include <linux/types.h>
+#include <linux/uio.h>
+
+#define TCMU_VERSION "1.0"
+
+/*
+ * Ring Design
+ * -----------
+ *
+ * The mmaped area is divided into three parts:
+ * 1) The mailbox (struct tcmu_mailbox, below)
+ * 2) The command ring
+ * 3) Everything beyond the command ring (data)
+ *
+ * The mailbox tells userspace the offset of the command ring from the
+ * start of the shared memory region, and how big the command ring is.
+ *
+ * The kernel passes SCSI commands to userspace by putting a struct
+ * tcmu_cmd_entry in the ring, updating mailbox->cmd_head, and poking
+ * userspace via uio's interrupt mechanism.
+ *
+ * tcmu_cmd_entry contains a header. If the header type is PAD,
+ * userspace should skip hdr->length bytes (mod cmdr_size) to find the
+ * next cmd_entry.
+ *
+ * Otherwise, the entry will contain offsets into the mmaped area that
+ * contain the cdb and data buffers -- the latter accessible via the
+ * iov array. iov addresses are also offsets into the shared area.
+ *
+ * When userspace is completed handling the command, set
+ * entry->rsp.scsi_status, fill in rsp.sense_buffer if appropriate,
+ * and also set mailbox->cmd_tail equal to the old cmd_tail plus
+ * hdr->length, mod cmdr_size. If cmd_tail doesn't equal cmd_head, it
+ * should process the next packet the same way, and so on.
+ */
+
+#define TCMU_MAILBOX_VERSION 1
+#define ALIGN_SIZE 64 /* Should be enough for most CPUs */
+
+struct tcmu_mailbox {
+ __u16 version;
+ __u16 flags;
+ __u32 cmdr_off;
+ __u32 cmdr_size;
+
+ __u32 cmd_head;
+
+ /* Updated by user. On its own cacheline */
+ __u32 cmd_tail __attribute__((__aligned__(ALIGN_SIZE)));
+
+} __packed;
+
+enum tcmu_opcode {
+ TCMU_OP_PAD = 0,
+ TCMU_OP_CMD,
+};
+
+/*
+ * Only a few opcodes, and length is 8-byte aligned, so use low bits for opcode.
+ */
+struct tcmu_cmd_entry_hdr {
+ __u32 len_op;
+} __packed;
+
+#define TCMU_OP_MASK 0x7
+
+static inline enum tcmu_opcode tcmu_hdr_get_op(struct tcmu_cmd_entry_hdr *hdr)
+{
+ return hdr->len_op & TCMU_OP_MASK;
+}
+
+static inline void tcmu_hdr_set_op(struct tcmu_cmd_entry_hdr *hdr, enum tcmu_opcode op)
+{
+ hdr->len_op &= ~TCMU_OP_MASK;
+ hdr->len_op |= (op & TCMU_OP_MASK);
+}
+
+static inline __u32 tcmu_hdr_get_len(struct tcmu_cmd_entry_hdr *hdr)
+{
+ return hdr->len_op & ~TCMU_OP_MASK;
+}
+
+static inline void tcmu_hdr_set_len(struct tcmu_cmd_entry_hdr *hdr, __u32 len)
+{
+ hdr->len_op &= TCMU_OP_MASK;
+ hdr->len_op |= len;
+}
+
+/* Currently the same as SCSI_SENSE_BUFFERSIZE */
+#define TCMU_SENSE_BUFFERSIZE 96
+
+struct tcmu_cmd_entry {
+ struct tcmu_cmd_entry_hdr hdr;
+
+ uint16_t cmd_id;
+ uint16_t __pad1;
+
+ union {
+ struct {
+ uint64_t cdb_off;
+ uint64_t iov_cnt;
+ struct iovec iov[0];
+ } req;
+ struct {
+ uint8_t scsi_status;
+ uint8_t __pad1;
+ uint16_t __pad2;
+ uint32_t __pad3;
+ char sense_buffer[TCMU_SENSE_BUFFERSIZE];
+ } rsp;
+ };
+
+} __packed;
+
+#define TCMU_OP_ALIGN_SIZE sizeof(uint64_t)
+
+enum tcmu_genl_cmd {
+ TCMU_CMD_UNSPEC,
+ TCMU_CMD_ADDED_DEVICE,
+ TCMU_CMD_REMOVED_DEVICE,
+ __TCMU_CMD_MAX,
+};
+#define TCMU_CMD_MAX (__TCMU_CMD_MAX - 1)
+
+enum tcmu_genl_attr {
+ TCMU_ATTR_UNSPEC,
+ TCMU_ATTR_DEVICE,
+ TCMU_ATTR_MINOR,
+ __TCMU_ATTR_MAX,
+};
+#define TCMU_ATTR_MAX (__TCMU_ATTR_MAX - 1)
+
+#endif
diff --git a/include/uapi/linux/tc_act/Kbuild b/include/uapi/linux/tc_act/Kbuild
index 56f121605c99..b057da2b87a4 100644
--- a/include/uapi/linux/tc_act/Kbuild
+++ b/include/uapi/linux/tc_act/Kbuild
@@ -7,3 +7,4 @@ header-y += tc_mirred.h
header-y += tc_nat.h
header-y += tc_pedit.h
header-y += tc_skbedit.h
+header-y += tc_vlan.h
diff --git a/include/uapi/linux/tc_act/tc_vlan.h b/include/uapi/linux/tc_act/tc_vlan.h
new file mode 100644
index 000000000000..f7b8d448b960
--- /dev/null
+++ b/include/uapi/linux/tc_act/tc_vlan.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2014 Jiri Pirko <jiri@resnulli.us>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __LINUX_TC_VLAN_H
+#define __LINUX_TC_VLAN_H
+
+#include <linux/pkt_cls.h>
+
+#define TCA_ACT_VLAN 12
+
+#define TCA_VLAN_ACT_POP 1
+#define TCA_VLAN_ACT_PUSH 2
+
+struct tc_vlan {
+ tc_gen;
+ int v_action;
+};
+
+enum {
+ TCA_VLAN_UNSPEC,
+ TCA_VLAN_TM,
+ TCA_VLAN_PARMS,
+ TCA_VLAN_PUSH_VLAN_ID,
+ TCA_VLAN_PUSH_VLAN_PROTOCOL,
+ __TCA_VLAN_MAX,
+};
+#define TCA_VLAN_MAX (__TCA_VLAN_MAX - 1)
+
+#endif
diff --git a/include/uapi/linux/thermal.h b/include/uapi/linux/thermal.h
new file mode 100644
index 000000000000..ac5535855982
--- /dev/null
+++ b/include/uapi/linux/thermal.h
@@ -0,0 +1,35 @@
+#ifndef _UAPI_LINUX_THERMAL_H
+#define _UAPI_LINUX_THERMAL_H
+
+#define THERMAL_NAME_LENGTH 20
+
+/* Adding event notification support elements */
+#define THERMAL_GENL_FAMILY_NAME "thermal_event"
+#define THERMAL_GENL_VERSION 0x01
+#define THERMAL_GENL_MCAST_GROUP_NAME "thermal_mc_grp"
+
+/* Events supported by Thermal Netlink */
+enum events {
+ THERMAL_AUX0,
+ THERMAL_AUX1,
+ THERMAL_CRITICAL,
+ THERMAL_DEV_FAULT,
+};
+
+/* attributes of thermal_genl_family */
+enum {
+ THERMAL_GENL_ATTR_UNSPEC,
+ THERMAL_GENL_ATTR_EVENT,
+ __THERMAL_GENL_ATTR_MAX,
+};
+#define THERMAL_GENL_ATTR_MAX (__THERMAL_GENL_ATTR_MAX - 1)
+
+/* commands supported by the thermal_genl_family */
+enum {
+ THERMAL_GENL_CMD_UNSPEC,
+ THERMAL_GENL_CMD_EVENT,
+ __THERMAL_GENL_CMD_MAX,
+};
+#define THERMAL_GENL_CMD_MAX (__THERMAL_GENL_CMD_MAX - 1)
+
+#endif /* _UAPI_LINUX_THERMAL_H */
diff --git a/include/uapi/linux/tipc_netlink.h b/include/uapi/linux/tipc_netlink.h
new file mode 100644
index 000000000000..8d723824ad69
--- /dev/null
+++ b/include/uapi/linux/tipc_netlink.h
@@ -0,0 +1,244 @@
+/*
+ * Copyright (c) 2014, Ericsson AB
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _LINUX_TIPC_NETLINK_H_
+#define _LINUX_TIPC_NETLINK_H_
+
+#define TIPC_GENL_V2_NAME "TIPCv2"
+#define TIPC_GENL_V2_VERSION 0x1
+
+/* Netlink commands */
+enum {
+ TIPC_NL_UNSPEC,
+ TIPC_NL_LEGACY,
+ TIPC_NL_BEARER_DISABLE,
+ TIPC_NL_BEARER_ENABLE,
+ TIPC_NL_BEARER_GET,
+ TIPC_NL_BEARER_SET,
+ TIPC_NL_SOCK_GET,
+ TIPC_NL_PUBL_GET,
+ TIPC_NL_LINK_GET,
+ TIPC_NL_LINK_SET,
+ TIPC_NL_LINK_RESET_STATS,
+ TIPC_NL_MEDIA_GET,
+ TIPC_NL_MEDIA_SET,
+ TIPC_NL_NODE_GET,
+ TIPC_NL_NET_GET,
+ TIPC_NL_NET_SET,
+ TIPC_NL_NAME_TABLE_GET,
+
+ __TIPC_NL_CMD_MAX,
+ TIPC_NL_CMD_MAX = __TIPC_NL_CMD_MAX - 1
+};
+
+/* Top level netlink attributes */
+enum {
+ TIPC_NLA_UNSPEC,
+ TIPC_NLA_BEARER, /* nest */
+ TIPC_NLA_SOCK, /* nest */
+ TIPC_NLA_PUBL, /* nest */
+ TIPC_NLA_LINK, /* nest */
+ TIPC_NLA_MEDIA, /* nest */
+ TIPC_NLA_NODE, /* nest */
+ TIPC_NLA_NET, /* nest */
+ TIPC_NLA_NAME_TABLE, /* nest */
+
+ __TIPC_NLA_MAX,
+ TIPC_NLA_MAX = __TIPC_NLA_MAX - 1
+};
+
+/* Bearer info */
+enum {
+ TIPC_NLA_BEARER_UNSPEC,
+ TIPC_NLA_BEARER_NAME, /* string */
+ TIPC_NLA_BEARER_PROP, /* nest */
+ TIPC_NLA_BEARER_DOMAIN, /* u32 */
+
+ __TIPC_NLA_BEARER_MAX,
+ TIPC_NLA_BEARER_MAX = __TIPC_NLA_BEARER_MAX - 1
+};
+
+/* Socket info */
+enum {
+ TIPC_NLA_SOCK_UNSPEC,
+ TIPC_NLA_SOCK_ADDR, /* u32 */
+ TIPC_NLA_SOCK_REF, /* u32 */
+ TIPC_NLA_SOCK_CON, /* nest */
+ TIPC_NLA_SOCK_HAS_PUBL, /* flag */
+
+ __TIPC_NLA_SOCK_MAX,
+ TIPC_NLA_SOCK_MAX = __TIPC_NLA_SOCK_MAX - 1
+};
+
+/* Link info */
+enum {
+ TIPC_NLA_LINK_UNSPEC,
+ TIPC_NLA_LINK_NAME, /* string */
+ TIPC_NLA_LINK_DEST, /* u32 */
+ TIPC_NLA_LINK_MTU, /* u32 */
+ TIPC_NLA_LINK_BROADCAST, /* flag */
+ TIPC_NLA_LINK_UP, /* flag */
+ TIPC_NLA_LINK_ACTIVE, /* flag */
+ TIPC_NLA_LINK_PROP, /* nest */
+ TIPC_NLA_LINK_STATS, /* nest */
+ TIPC_NLA_LINK_RX, /* u32 */
+ TIPC_NLA_LINK_TX, /* u32 */
+
+ __TIPC_NLA_LINK_MAX,
+ TIPC_NLA_LINK_MAX = __TIPC_NLA_LINK_MAX - 1
+};
+
+/* Media info */
+enum {
+ TIPC_NLA_MEDIA_UNSPEC,
+ TIPC_NLA_MEDIA_NAME, /* string */
+ TIPC_NLA_MEDIA_PROP, /* nest */
+
+ __TIPC_NLA_MEDIA_MAX,
+ TIPC_NLA_MEDIA_MAX = __TIPC_NLA_MEDIA_MAX - 1
+};
+
+/* Node info */
+enum {
+ TIPC_NLA_NODE_UNSPEC,
+ TIPC_NLA_NODE_ADDR, /* u32 */
+ TIPC_NLA_NODE_UP, /* flag */
+
+ __TIPC_NLA_NODE_MAX,
+ TIPC_NLA_NODE_MAX = __TIPC_NLA_NODE_MAX - 1
+};
+
+/* Net info */
+enum {
+ TIPC_NLA_NET_UNSPEC,
+ TIPC_NLA_NET_ID, /* u32 */
+ TIPC_NLA_NET_ADDR, /* u32 */
+
+ __TIPC_NLA_NET_MAX,
+ TIPC_NLA_NET_MAX = __TIPC_NLA_NET_MAX - 1
+};
+
+/* Name table info */
+enum {
+ TIPC_NLA_NAME_TABLE_UNSPEC,
+ TIPC_NLA_NAME_TABLE_PUBL, /* nest */
+
+ __TIPC_NLA_NAME_TABLE_MAX,
+ TIPC_NLA_NAME_TABLE_MAX = __TIPC_NLA_NAME_TABLE_MAX - 1
+};
+
+/* Publication info */
+enum {
+ TIPC_NLA_PUBL_UNSPEC,
+
+ TIPC_NLA_PUBL_TYPE, /* u32 */
+ TIPC_NLA_PUBL_LOWER, /* u32 */
+ TIPC_NLA_PUBL_UPPER, /* u32 */
+ TIPC_NLA_PUBL_SCOPE, /* u32 */
+ TIPC_NLA_PUBL_NODE, /* u32 */
+ TIPC_NLA_PUBL_REF, /* u32 */
+ TIPC_NLA_PUBL_KEY, /* u32 */
+
+ __TIPC_NLA_PUBL_MAX,
+ TIPC_NLA_PUBL_MAX = __TIPC_NLA_PUBL_MAX - 1
+};
+
+/* Nest, connection info */
+enum {
+ TIPC_NLA_CON_UNSPEC,
+
+ TIPC_NLA_CON_FLAG, /* flag */
+ TIPC_NLA_CON_NODE, /* u32 */
+ TIPC_NLA_CON_SOCK, /* u32 */
+ TIPC_NLA_CON_TYPE, /* u32 */
+ TIPC_NLA_CON_INST, /* u32 */
+
+ __TIPC_NLA_CON_MAX,
+ TIPC_NLA_CON_MAX = __TIPC_NLA_CON_MAX - 1
+};
+
+/* Nest, link propreties. Valid for link, media and bearer */
+enum {
+ TIPC_NLA_PROP_UNSPEC,
+
+ TIPC_NLA_PROP_PRIO, /* u32 */
+ TIPC_NLA_PROP_TOL, /* u32 */
+ TIPC_NLA_PROP_WIN, /* u32 */
+
+ __TIPC_NLA_PROP_MAX,
+ TIPC_NLA_PROP_MAX = __TIPC_NLA_PROP_MAX - 1
+};
+
+/* Nest, statistics info */
+enum {
+ TIPC_NLA_STATS_UNSPEC,
+
+ TIPC_NLA_STATS_RX_INFO, /* u32 */
+ TIPC_NLA_STATS_RX_FRAGMENTS, /* u32 */
+ TIPC_NLA_STATS_RX_FRAGMENTED, /* u32 */
+ TIPC_NLA_STATS_RX_BUNDLES, /* u32 */
+ TIPC_NLA_STATS_RX_BUNDLED, /* u32 */
+ TIPC_NLA_STATS_TX_INFO, /* u32 */
+ TIPC_NLA_STATS_TX_FRAGMENTS, /* u32 */
+ TIPC_NLA_STATS_TX_FRAGMENTED, /* u32 */
+ TIPC_NLA_STATS_TX_BUNDLES, /* u32 */
+ TIPC_NLA_STATS_TX_BUNDLED, /* u32 */
+ TIPC_NLA_STATS_MSG_PROF_TOT, /* u32 */
+ TIPC_NLA_STATS_MSG_LEN_CNT, /* u32 */
+ TIPC_NLA_STATS_MSG_LEN_TOT, /* u32 */
+ TIPC_NLA_STATS_MSG_LEN_P0, /* u32 */
+ TIPC_NLA_STATS_MSG_LEN_P1, /* u32 */
+ TIPC_NLA_STATS_MSG_LEN_P2, /* u32 */
+ TIPC_NLA_STATS_MSG_LEN_P3, /* u32 */
+ TIPC_NLA_STATS_MSG_LEN_P4, /* u32 */
+ TIPC_NLA_STATS_MSG_LEN_P5, /* u32 */
+ TIPC_NLA_STATS_MSG_LEN_P6, /* u32 */
+ TIPC_NLA_STATS_RX_STATES, /* u32 */
+ TIPC_NLA_STATS_RX_PROBES, /* u32 */
+ TIPC_NLA_STATS_RX_NACKS, /* u32 */
+ TIPC_NLA_STATS_RX_DEFERRED, /* u32 */
+ TIPC_NLA_STATS_TX_STATES, /* u32 */
+ TIPC_NLA_STATS_TX_PROBES, /* u32 */
+ TIPC_NLA_STATS_TX_NACKS, /* u32 */
+ TIPC_NLA_STATS_TX_ACKS, /* u32 */
+ TIPC_NLA_STATS_RETRANSMITTED, /* u32 */
+ TIPC_NLA_STATS_DUPLICATES, /* u32 */
+ TIPC_NLA_STATS_LINK_CONGS, /* u32 */
+ TIPC_NLA_STATS_MAX_QUEUE, /* u32 */
+ TIPC_NLA_STATS_AVG_QUEUE, /* u32 */
+
+ __TIPC_NLA_STATS_MAX,
+ TIPC_NLA_STATS_MAX = __TIPC_NLA_STATS_MAX - 1
+};
+
+#endif
diff --git a/include/uapi/linux/tty_flags.h b/include/uapi/linux/tty_flags.h
index eefcb483a2c0..fae4864737fa 100644
--- a/include/uapi/linux/tty_flags.h
+++ b/include/uapi/linux/tty_flags.h
@@ -6,27 +6,31 @@
* shared by the tty_port flags structures.
*
* Define ASYNCB_* for convenient use with {test,set,clear}_bit.
+ *
+ * Bits [0..ASYNCB_LAST_USER] are userspace defined/visible/changeable
+ * [x] in the bit comments indicates the flag is defunct and no longer used.
*/
#define ASYNCB_HUP_NOTIFY 0 /* Notify getty on hangups and closes
* on the callout port */
#define ASYNCB_FOURPORT 1 /* Set OU1, OUT2 per AST Fourport settings */
#define ASYNCB_SAK 2 /* Secure Attention Key (Orange book) */
-#define ASYNCB_SPLIT_TERMIOS 3 /* Separate termios for dialin/callout */
+#define ASYNCB_SPLIT_TERMIOS 3 /* [x] Separate termios for dialin/callout */
#define ASYNCB_SPD_HI 4 /* Use 56000 instead of 38400 bps */
#define ASYNCB_SPD_VHI 5 /* Use 115200 instead of 38400 bps */
#define ASYNCB_SKIP_TEST 6 /* Skip UART test during autoconfiguration */
#define ASYNCB_AUTO_IRQ 7 /* Do automatic IRQ during
* autoconfiguration */
-#define ASYNCB_SESSION_LOCKOUT 8 /* Lock out cua opens based on session */
-#define ASYNCB_PGRP_LOCKOUT 9 /* Lock out cua opens based on pgrp */
-#define ASYNCB_CALLOUT_NOHUP 10 /* Don't do hangups for cua device */
+#define ASYNCB_SESSION_LOCKOUT 8 /* [x] Lock out cua opens based on session */
+#define ASYNCB_PGRP_LOCKOUT 9 /* [x] Lock out cua opens based on pgrp */
+#define ASYNCB_CALLOUT_NOHUP 10 /* [x] Don't do hangups for cua device */
#define ASYNCB_HARDPPS_CD 11 /* Call hardpps when CD goes high */
#define ASYNCB_SPD_SHI 12 /* Use 230400 instead of 38400 bps */
#define ASYNCB_LOW_LATENCY 13 /* Request low latency behaviour */
#define ASYNCB_BUGGY_UART 14 /* This is a buggy UART, skip some safety
* checks. Note: can be dangerous! */
-#define ASYNCB_AUTOPROBE 15 /* Port was autoprobed by PCI or PNP code */
-#define ASYNCB_LAST_USER 15
+#define ASYNCB_AUTOPROBE 15 /* [x] Port was autoprobed by PCI/PNP code */
+#define ASYNCB_MAGIC_MULTIPLIER 16 /* Use special CLK or divisor */
+#define ASYNCB_LAST_USER 16
/* Internal flags used only by kernel */
#define ASYNCB_INITIALIZED 31 /* Serial port was initialized */
@@ -57,8 +61,11 @@
#define ASYNC_LOW_LATENCY (1U << ASYNCB_LOW_LATENCY)
#define ASYNC_BUGGY_UART (1U << ASYNCB_BUGGY_UART)
#define ASYNC_AUTOPROBE (1U << ASYNCB_AUTOPROBE)
+#define ASYNC_MAGIC_MULTIPLIER (1U << ASYNCB_MAGIC_MULTIPLIER)
#define ASYNC_FLAGS ((1U << (ASYNCB_LAST_USER + 1)) - 1)
+#define ASYNC_DEPRECATED (ASYNC_SESSION_LOCKOUT | ASYNC_PGRP_LOCKOUT | \
+ ASYNC_CALLOUT_NOHUP | ASYNC_AUTOPROBE)
#define ASYNC_USR_MASK (ASYNC_SPD_MASK|ASYNC_CALLOUT_NOHUP| \
ASYNC_LOW_LATENCY)
#define ASYNC_SPD_CUST (ASYNC_SPD_HI|ASYNC_SPD_VHI)
diff --git a/include/uapi/linux/uhid.h b/include/uapi/linux/uhid.h
index 1e3b09c191cd..aaa86d6bd1dd 100644
--- a/include/uapi/linux/uhid.h
+++ b/include/uapi/linux/uhid.h
@@ -24,35 +24,23 @@
#include <linux/hid.h>
enum uhid_event_type {
- UHID_CREATE,
+ __UHID_LEGACY_CREATE,
UHID_DESTROY,
UHID_START,
UHID_STOP,
UHID_OPEN,
UHID_CLOSE,
UHID_OUTPUT,
- UHID_OUTPUT_EV, /* obsolete! */
- UHID_INPUT,
- UHID_FEATURE,
- UHID_FEATURE_ANSWER,
+ __UHID_LEGACY_OUTPUT_EV,
+ __UHID_LEGACY_INPUT,
+ UHID_GET_REPORT,
+ UHID_GET_REPORT_REPLY,
UHID_CREATE2,
UHID_INPUT2,
+ UHID_SET_REPORT,
+ UHID_SET_REPORT_REPLY,
};
-struct uhid_create_req {
- __u8 name[128];
- __u8 phys[64];
- __u8 uniq[64];
- __u8 __user *rd_data;
- __u16 rd_size;
-
- __u16 bus;
- __u32 vendor;
- __u32 product;
- __u32 version;
- __u32 country;
-} __attribute__((__packed__));
-
struct uhid_create2_req {
__u8 name[128];
__u8 phys[64];
@@ -66,6 +54,16 @@ struct uhid_create2_req {
__u8 rd_data[HID_MAX_DESCRIPTOR_SIZE];
} __attribute__((__packed__));
+enum uhid_dev_flag {
+ UHID_DEV_NUMBERED_FEATURE_REPORTS = (1ULL << 0),
+ UHID_DEV_NUMBERED_OUTPUT_REPORTS = (1ULL << 1),
+ UHID_DEV_NUMBERED_INPUT_REPORTS = (1ULL << 2),
+};
+
+struct uhid_start_req {
+ __u64 dev_flags;
+};
+
#define UHID_DATA_MAX 4096
enum uhid_report_type {
@@ -74,36 +72,94 @@ enum uhid_report_type {
UHID_INPUT_REPORT,
};
-struct uhid_input_req {
+struct uhid_input2_req {
+ __u16 size;
+ __u8 data[UHID_DATA_MAX];
+} __attribute__((__packed__));
+
+struct uhid_output_req {
__u8 data[UHID_DATA_MAX];
__u16 size;
+ __u8 rtype;
} __attribute__((__packed__));
-struct uhid_input2_req {
+struct uhid_get_report_req {
+ __u32 id;
+ __u8 rnum;
+ __u8 rtype;
+} __attribute__((__packed__));
+
+struct uhid_get_report_reply_req {
+ __u32 id;
+ __u16 err;
__u16 size;
__u8 data[UHID_DATA_MAX];
} __attribute__((__packed__));
-struct uhid_output_req {
+struct uhid_set_report_req {
+ __u32 id;
+ __u8 rnum;
+ __u8 rtype;
+ __u16 size;
+ __u8 data[UHID_DATA_MAX];
+} __attribute__((__packed__));
+
+struct uhid_set_report_reply_req {
+ __u32 id;
+ __u16 err;
+} __attribute__((__packed__));
+
+/*
+ * Compat Layer
+ * All these commands and requests are obsolete. You should avoid using them in
+ * new code. We support them for backwards-compatibility, but you might not get
+ * access to new feature in case you use them.
+ */
+
+enum uhid_legacy_event_type {
+ UHID_CREATE = __UHID_LEGACY_CREATE,
+ UHID_OUTPUT_EV = __UHID_LEGACY_OUTPUT_EV,
+ UHID_INPUT = __UHID_LEGACY_INPUT,
+ UHID_FEATURE = UHID_GET_REPORT,
+ UHID_FEATURE_ANSWER = UHID_GET_REPORT_REPLY,
+};
+
+/* Obsolete! Use UHID_CREATE2. */
+struct uhid_create_req {
+ __u8 name[128];
+ __u8 phys[64];
+ __u8 uniq[64];
+ __u8 __user *rd_data;
+ __u16 rd_size;
+
+ __u16 bus;
+ __u32 vendor;
+ __u32 product;
+ __u32 version;
+ __u32 country;
+} __attribute__((__packed__));
+
+/* Obsolete! Use UHID_INPUT2. */
+struct uhid_input_req {
__u8 data[UHID_DATA_MAX];
__u16 size;
- __u8 rtype;
} __attribute__((__packed__));
-/* Obsolete! Newer kernels will no longer send these events but instead convert
- * it into raw output reports via UHID_OUTPUT. */
+/* Obsolete! Kernel uses UHID_OUTPUT exclusively now. */
struct uhid_output_ev_req {
__u16 type;
__u16 code;
__s32 value;
} __attribute__((__packed__));
+/* Obsolete! Kernel uses ABI compatible UHID_GET_REPORT. */
struct uhid_feature_req {
__u32 id;
__u8 rnum;
__u8 rtype;
} __attribute__((__packed__));
+/* Obsolete! Use ABI compatible UHID_GET_REPORT_REPLY. */
struct uhid_feature_answer_req {
__u32 id;
__u16 err;
@@ -111,6 +167,15 @@ struct uhid_feature_answer_req {
__u8 data[UHID_DATA_MAX];
} __attribute__((__packed__));
+/*
+ * UHID Events
+ * All UHID events from and to the kernel are encoded as "struct uhid_event".
+ * The "type" field contains a UHID_* type identifier. All payload depends on
+ * that type and can be accessed via ev->u.XYZ accordingly.
+ * If user-space writes short events, they're extended with 0s by the kernel. If
+ * the kernel writes short events, user-space shall extend them with 0s.
+ */
+
struct uhid_event {
__u32 type;
@@ -120,9 +185,14 @@ struct uhid_event {
struct uhid_output_req output;
struct uhid_output_ev_req output_ev;
struct uhid_feature_req feature;
+ struct uhid_get_report_req get_report;
struct uhid_feature_answer_req feature_answer;
+ struct uhid_get_report_reply_req get_report_reply;
struct uhid_create2_req create2;
struct uhid_input2_req input2;
+ struct uhid_set_report_req set_report;
+ struct uhid_set_report_reply_req set_report_reply;
+ struct uhid_start_req start;
} u;
} __attribute__((__packed__));
diff --git a/include/uapi/linux/usb/functionfs.h b/include/uapi/linux/usb/functionfs.h
index 0154b2859fd7..295ba299e7bd 100644
--- a/include/uapi/linux/usb/functionfs.h
+++ b/include/uapi/linux/usb/functionfs.h
@@ -19,6 +19,7 @@ enum functionfs_flags {
FUNCTIONFS_HAS_HS_DESC = 2,
FUNCTIONFS_HAS_SS_DESC = 4,
FUNCTIONFS_HAS_MS_OS_DESC = 8,
+ FUNCTIONFS_VIRTUAL_ADDR = 16,
};
/* Descriptor of an non-audio endpoint */
@@ -32,6 +33,16 @@ struct usb_endpoint_descriptor_no_audio {
__u8 bInterval;
} __attribute__((packed));
+struct usb_functionfs_descs_head_v2 {
+ __le32 magic;
+ __le32 length;
+ __le32 flags;
+ /*
+ * __le32 fs_count, hs_count, fs_count; must be included manually in
+ * the structure taking flags into consideration.
+ */
+} __attribute__((packed));
+
/* Legacy format, deprecated as of 3.14. */
struct usb_functionfs_descs_head {
__le32 magic;
@@ -92,7 +103,7 @@ struct usb_ext_prop_desc {
* structure. Any flags that are not recognised cause the whole block to be
* rejected with -ENOSYS.
*
- * Legacy descriptors format:
+ * Legacy descriptors format (deprecated as of 3.14):
*
* | off | name | type | description |
* |-----+-----------+--------------+--------------------------------------|
@@ -265,6 +276,12 @@ struct usb_functionfs_event {
*/
#define FUNCTIONFS_ENDPOINT_REVMAP _IO('g', 129)
+/*
+ * Returns endpoint descriptor. If function is not active returns -ENODEV.
+ */
+#define FUNCTIONFS_ENDPOINT_DESC _IOR('g', 130, \
+ struct usb_endpoint_descriptor)
+
#endif /* _UAPI__LINUX_FUNCTIONFS_H__ */
diff --git a/include/uapi/linux/v4l2-common.h b/include/uapi/linux/v4l2-common.h
index 2f6f8cafe773..15273987093e 100644
--- a/include/uapi/linux/v4l2-common.h
+++ b/include/uapi/linux/v4l2-common.h
@@ -43,6 +43,8 @@
#define V4L2_SEL_TGT_CROP_DEFAULT 0x0001
/* Cropping bounds */
#define V4L2_SEL_TGT_CROP_BOUNDS 0x0002
+/* Native frame size */
+#define V4L2_SEL_TGT_NATIVE_SIZE 0x0003
/* Current composing area */
#define V4L2_SEL_TGT_COMPOSE 0x0100
/* Default composing area */
diff --git a/include/uapi/linux/v4l2-controls.h b/include/uapi/linux/v4l2-controls.h
index e946e43fb8d5..661f119a51b8 100644
--- a/include/uapi/linux/v4l2-controls.h
+++ b/include/uapi/linux/v4l2-controls.h
@@ -746,6 +746,8 @@ enum v4l2_auto_focus_range {
V4L2_AUTO_FOCUS_RANGE_INFINITY = 3,
};
+#define V4L2_CID_PAN_SPEED (V4L2_CID_CAMERA_CLASS_BASE+32)
+#define V4L2_CID_TILT_SPEED (V4L2_CID_CAMERA_CLASS_BASE+33)
/* FM Modulator class control IDs */
@@ -865,6 +867,10 @@ enum v4l2_jpeg_chroma_subsampling {
#define V4L2_CID_VBLANK (V4L2_CID_IMAGE_SOURCE_CLASS_BASE + 1)
#define V4L2_CID_HBLANK (V4L2_CID_IMAGE_SOURCE_CLASS_BASE + 2)
#define V4L2_CID_ANALOGUE_GAIN (V4L2_CID_IMAGE_SOURCE_CLASS_BASE + 3)
+#define V4L2_CID_TEST_PATTERN_RED (V4L2_CID_IMAGE_SOURCE_CLASS_BASE + 4)
+#define V4L2_CID_TEST_PATTERN_GREENR (V4L2_CID_IMAGE_SOURCE_CLASS_BASE + 5)
+#define V4L2_CID_TEST_PATTERN_BLUE (V4L2_CID_IMAGE_SOURCE_CLASS_BASE + 6)
+#define V4L2_CID_TEST_PATTERN_GREENB (V4L2_CID_IMAGE_SOURCE_CLASS_BASE + 7)
/* Image processing controls */
diff --git a/include/uapi/linux/v4l2-mediabus.h b/include/uapi/linux/v4l2-mediabus.h
index 1445e858854f..26db20647e6f 100644
--- a/include/uapi/linux/v4l2-mediabus.h
+++ b/include/uapi/linux/v4l2-mediabus.h
@@ -11,122 +11,10 @@
#ifndef __LINUX_V4L2_MEDIABUS_H
#define __LINUX_V4L2_MEDIABUS_H
+#include <linux/media-bus-format.h>
#include <linux/types.h>
#include <linux/videodev2.h>
-/*
- * These pixel codes uniquely identify data formats on the media bus. Mostly
- * they correspond to similarly named V4L2_PIX_FMT_* formats, format 0 is
- * reserved, V4L2_MBUS_FMT_FIXED shall be used by host-client pairs, where the
- * data format is fixed. Additionally, "2X8" means that one pixel is transferred
- * in two 8-bit samples, "BE" or "LE" specify in which order those samples are
- * transferred over the bus: "LE" means that the least significant bits are
- * transferred first, "BE" means that the most significant bits are transferred
- * first, and "PADHI" and "PADLO" define which bits - low or high, in the
- * incomplete high byte, are filled with padding bits.
- *
- * The pixel codes are grouped by type, bus_width, bits per component, samples
- * per pixel and order of subsamples. Numerical values are sorted using generic
- * numerical sort order (8 thus comes before 10).
- *
- * As their value can't change when a new pixel code is inserted in the
- * enumeration, the pixel codes are explicitly given a numerical value. The next
- * free values for each category are listed below, update them when inserting
- * new pixel codes.
- */
-enum v4l2_mbus_pixelcode {
- V4L2_MBUS_FMT_FIXED = 0x0001,
-
- /* RGB - next is 0x100e */
- V4L2_MBUS_FMT_RGB444_2X8_PADHI_BE = 0x1001,
- V4L2_MBUS_FMT_RGB444_2X8_PADHI_LE = 0x1002,
- V4L2_MBUS_FMT_RGB555_2X8_PADHI_BE = 0x1003,
- V4L2_MBUS_FMT_RGB555_2X8_PADHI_LE = 0x1004,
- V4L2_MBUS_FMT_BGR565_2X8_BE = 0x1005,
- V4L2_MBUS_FMT_BGR565_2X8_LE = 0x1006,
- V4L2_MBUS_FMT_RGB565_2X8_BE = 0x1007,
- V4L2_MBUS_FMT_RGB565_2X8_LE = 0x1008,
- V4L2_MBUS_FMT_RGB666_1X18 = 0x1009,
- V4L2_MBUS_FMT_RGB888_1X24 = 0x100a,
- V4L2_MBUS_FMT_RGB888_2X12_BE = 0x100b,
- V4L2_MBUS_FMT_RGB888_2X12_LE = 0x100c,
- V4L2_MBUS_FMT_ARGB8888_1X32 = 0x100d,
-
- /* YUV (including grey) - next is 0x2024 */
- V4L2_MBUS_FMT_Y8_1X8 = 0x2001,
- V4L2_MBUS_FMT_UV8_1X8 = 0x2015,
- V4L2_MBUS_FMT_UYVY8_1_5X8 = 0x2002,
- V4L2_MBUS_FMT_VYUY8_1_5X8 = 0x2003,
- V4L2_MBUS_FMT_YUYV8_1_5X8 = 0x2004,
- V4L2_MBUS_FMT_YVYU8_1_5X8 = 0x2005,
- V4L2_MBUS_FMT_UYVY8_2X8 = 0x2006,
- V4L2_MBUS_FMT_VYUY8_2X8 = 0x2007,
- V4L2_MBUS_FMT_YUYV8_2X8 = 0x2008,
- V4L2_MBUS_FMT_YVYU8_2X8 = 0x2009,
- V4L2_MBUS_FMT_Y10_1X10 = 0x200a,
- V4L2_MBUS_FMT_UYVY10_2X10 = 0x2018,
- V4L2_MBUS_FMT_VYUY10_2X10 = 0x2019,
- V4L2_MBUS_FMT_YUYV10_2X10 = 0x200b,
- V4L2_MBUS_FMT_YVYU10_2X10 = 0x200c,
- V4L2_MBUS_FMT_Y12_1X12 = 0x2013,
- V4L2_MBUS_FMT_UYVY8_1X16 = 0x200f,
- V4L2_MBUS_FMT_VYUY8_1X16 = 0x2010,
- V4L2_MBUS_FMT_YUYV8_1X16 = 0x2011,
- V4L2_MBUS_FMT_YVYU8_1X16 = 0x2012,
- V4L2_MBUS_FMT_YDYUYDYV8_1X16 = 0x2014,
- V4L2_MBUS_FMT_UYVY10_1X20 = 0x201a,
- V4L2_MBUS_FMT_VYUY10_1X20 = 0x201b,
- V4L2_MBUS_FMT_YUYV10_1X20 = 0x200d,
- V4L2_MBUS_FMT_YVYU10_1X20 = 0x200e,
- V4L2_MBUS_FMT_YUV10_1X30 = 0x2016,
- V4L2_MBUS_FMT_AYUV8_1X32 = 0x2017,
- V4L2_MBUS_FMT_UYVY12_2X12 = 0x201c,
- V4L2_MBUS_FMT_VYUY12_2X12 = 0x201d,
- V4L2_MBUS_FMT_YUYV12_2X12 = 0x201e,
- V4L2_MBUS_FMT_YVYU12_2X12 = 0x201f,
- V4L2_MBUS_FMT_UYVY12_1X24 = 0x2020,
- V4L2_MBUS_FMT_VYUY12_1X24 = 0x2021,
- V4L2_MBUS_FMT_YUYV12_1X24 = 0x2022,
- V4L2_MBUS_FMT_YVYU12_1X24 = 0x2023,
-
- /* Bayer - next is 0x3019 */
- V4L2_MBUS_FMT_SBGGR8_1X8 = 0x3001,
- V4L2_MBUS_FMT_SGBRG8_1X8 = 0x3013,
- V4L2_MBUS_FMT_SGRBG8_1X8 = 0x3002,
- V4L2_MBUS_FMT_SRGGB8_1X8 = 0x3014,
- V4L2_MBUS_FMT_SBGGR10_ALAW8_1X8 = 0x3015,
- V4L2_MBUS_FMT_SGBRG10_ALAW8_1X8 = 0x3016,
- V4L2_MBUS_FMT_SGRBG10_ALAW8_1X8 = 0x3017,
- V4L2_MBUS_FMT_SRGGB10_ALAW8_1X8 = 0x3018,
- V4L2_MBUS_FMT_SBGGR10_DPCM8_1X8 = 0x300b,
- V4L2_MBUS_FMT_SGBRG10_DPCM8_1X8 = 0x300c,
- V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8 = 0x3009,
- V4L2_MBUS_FMT_SRGGB10_DPCM8_1X8 = 0x300d,
- V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_BE = 0x3003,
- V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_LE = 0x3004,
- V4L2_MBUS_FMT_SBGGR10_2X8_PADLO_BE = 0x3005,
- V4L2_MBUS_FMT_SBGGR10_2X8_PADLO_LE = 0x3006,
- V4L2_MBUS_FMT_SBGGR10_1X10 = 0x3007,
- V4L2_MBUS_FMT_SGBRG10_1X10 = 0x300e,
- V4L2_MBUS_FMT_SGRBG10_1X10 = 0x300a,
- V4L2_MBUS_FMT_SRGGB10_1X10 = 0x300f,
- V4L2_MBUS_FMT_SBGGR12_1X12 = 0x3008,
- V4L2_MBUS_FMT_SGBRG12_1X12 = 0x3010,
- V4L2_MBUS_FMT_SGRBG12_1X12 = 0x3011,
- V4L2_MBUS_FMT_SRGGB12_1X12 = 0x3012,
-
- /* JPEG compressed formats - next is 0x4002 */
- V4L2_MBUS_FMT_JPEG_1X8 = 0x4001,
-
- /* Vendor specific formats - next is 0x5002 */
-
- /* S5C73M3 sensor specific interleaved UYVY and JPEG */
- V4L2_MBUS_FMT_S5C_UYVY_JPEG_1X8 = 0x5001,
-
- /* HSV - next is 0x6002 */
- V4L2_MBUS_FMT_AHSV8888_1X32 = 0x6001,
-};
-
/**
* struct v4l2_mbus_framefmt - frame format on the media bus
* @width: frame width
@@ -134,6 +22,8 @@ enum v4l2_mbus_pixelcode {
* @code: data format code (from enum v4l2_mbus_pixelcode)
* @field: used interlacing type (from enum v4l2_field)
* @colorspace: colorspace of the data (from enum v4l2_colorspace)
+ * @ycbcr_enc: YCbCr encoding of the data (from enum v4l2_ycbcr_encoding)
+ * @quantization: quantization of the data (from enum v4l2_quantization)
*/
struct v4l2_mbus_framefmt {
__u32 width;
@@ -141,7 +31,108 @@ struct v4l2_mbus_framefmt {
__u32 code;
__u32 field;
__u32 colorspace;
- __u32 reserved[7];
+ __u16 ycbcr_enc;
+ __u16 quantization;
+ __u32 reserved[6];
+};
+
+#ifndef __KERNEL__
+/*
+ * enum v4l2_mbus_pixelcode and its definitions are now deprecated, and
+ * MEDIA_BUS_FMT_ definitions (defined in media-bus-format.h) should be
+ * used instead.
+ *
+ * New defines should only be added to media-bus-format.h. The
+ * v4l2_mbus_pixelcode enum is frozen.
+ */
+
+#define V4L2_MBUS_FROM_MEDIA_BUS_FMT(name) \
+ V4L2_MBUS_FMT_ ## name = MEDIA_BUS_FMT_ ## name
+
+enum v4l2_mbus_pixelcode {
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(FIXED),
+
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(RGB444_2X8_PADHI_BE),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(RGB444_2X8_PADHI_LE),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(RGB555_2X8_PADHI_BE),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(RGB555_2X8_PADHI_LE),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(BGR565_2X8_BE),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(BGR565_2X8_LE),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(RGB565_2X8_BE),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(RGB565_2X8_LE),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(RGB666_1X18),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(RGB888_1X24),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(RGB888_2X12_BE),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(RGB888_2X12_LE),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(ARGB8888_1X32),
+
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(Y8_1X8),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(UV8_1X8),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(UYVY8_1_5X8),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(VYUY8_1_5X8),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(YUYV8_1_5X8),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(YVYU8_1_5X8),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(UYVY8_2X8),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(VYUY8_2X8),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(YUYV8_2X8),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(YVYU8_2X8),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(Y10_1X10),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(UYVY10_2X10),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(VYUY10_2X10),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(YUYV10_2X10),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(YVYU10_2X10),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(Y12_1X12),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(UYVY8_1X16),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(VYUY8_1X16),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(YUYV8_1X16),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(YVYU8_1X16),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(YDYUYDYV8_1X16),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(UYVY10_1X20),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(VYUY10_1X20),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(YUYV10_1X20),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(YVYU10_1X20),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(YUV10_1X30),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(AYUV8_1X32),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(UYVY12_2X12),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(VYUY12_2X12),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(YUYV12_2X12),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(YVYU12_2X12),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(UYVY12_1X24),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(VYUY12_1X24),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(YUYV12_1X24),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(YVYU12_1X24),
+
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(SBGGR8_1X8),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(SGBRG8_1X8),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(SGRBG8_1X8),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(SRGGB8_1X8),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(SBGGR10_ALAW8_1X8),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(SGBRG10_ALAW8_1X8),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(SGRBG10_ALAW8_1X8),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(SRGGB10_ALAW8_1X8),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(SBGGR10_DPCM8_1X8),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(SGBRG10_DPCM8_1X8),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(SGRBG10_DPCM8_1X8),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(SRGGB10_DPCM8_1X8),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(SBGGR10_2X8_PADHI_BE),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(SBGGR10_2X8_PADHI_LE),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(SBGGR10_2X8_PADLO_BE),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(SBGGR10_2X8_PADLO_LE),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(SBGGR10_1X10),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(SGBRG10_1X10),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(SGRBG10_1X10),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(SRGGB10_1X10),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(SBGGR12_1X12),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(SGBRG12_1X12),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(SGRBG12_1X12),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(SRGGB12_1X12),
+
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(JPEG_1X8),
+
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(S5C_UYVY_JPEG_1X8),
+
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(AHSV8888_1X32),
};
+#endif /* __KERNEL__ */
#endif
diff --git a/include/uapi/linux/v4l2-subdev.h b/include/uapi/linux/v4l2-subdev.h
index a619cdd300ac..e0a7e3da498a 100644
--- a/include/uapi/linux/v4l2-subdev.h
+++ b/include/uapi/linux/v4l2-subdev.h
@@ -68,7 +68,7 @@ struct v4l2_subdev_crop {
* struct v4l2_subdev_mbus_code_enum - Media bus format enumeration
* @pad: pad number, as reported by the media API
* @index: format index during enumeration
- * @code: format code (from enum v4l2_mbus_pixelcode)
+ * @code: format code (MEDIA_BUS_FMT_ definitions)
*/
struct v4l2_subdev_mbus_code_enum {
__u32 pad;
@@ -81,7 +81,7 @@ struct v4l2_subdev_mbus_code_enum {
* struct v4l2_subdev_frame_size_enum - Media bus format enumeration
* @pad: pad number, as reported by the media API
* @index: format index during enumeration
- * @code: format code (from enum v4l2_mbus_pixelcode)
+ * @code: format code (MEDIA_BUS_FMT_ definitions)
*/
struct v4l2_subdev_frame_size_enum {
__u32 index;
@@ -109,7 +109,7 @@ struct v4l2_subdev_frame_interval {
* struct v4l2_subdev_frame_interval_enum - Frame interval enumeration
* @pad: pad number, as reported by the media API
* @index: frame interval index during enumeration
- * @code: format code (from enum v4l2_mbus_pixelcode)
+ * @code: format code (MEDIA_BUS_FMT_ definitions)
* @width: frame width in pixels
* @height: frame height in pixels
* @interval: frame interval in seconds
diff --git a/include/uapi/linux/vfio.h b/include/uapi/linux/vfio.h
index 6612974c64bf..29715d27548f 100644
--- a/include/uapi/linux/vfio.h
+++ b/include/uapi/linux/vfio.h
@@ -33,6 +33,9 @@
/* Check if EEH is supported */
#define VFIO_EEH 5
+/* Two-stage IOMMU */
+#define VFIO_TYPE1_NESTING_IOMMU 6 /* Implies v2 */
+
/*
* The IOCTL interface is designed for extensibility by embedding the
* structure length (argsz) and flags into structures passed between
diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h
index 778a3298fb34..d279c1b75cf7 100644
--- a/include/uapi/linux/videodev2.h
+++ b/include/uapi/linux/videodev2.h
@@ -79,6 +79,7 @@
/* Four-character-code (FOURCC) */
#define v4l2_fourcc(a, b, c, d)\
((__u32)(a) | ((__u32)(b) << 8) | ((__u32)(c) << 16) | ((__u32)(d) << 24))
+#define v4l2_fourcc_be(a, b, c, d) (v4l2_fourcc(a, b, c, d) | (1 << 31))
/*
* E N U M S
@@ -177,30 +178,103 @@ enum v4l2_memory {
/* see also http://vektor.theorem.ca/graphics/ycbcr/ */
enum v4l2_colorspace {
- /* ITU-R 601 -- broadcast NTSC/PAL */
+ /* SMPTE 170M: used for broadcast NTSC/PAL SDTV */
V4L2_COLORSPACE_SMPTE170M = 1,
- /* 1125-Line (US) HDTV */
+ /* Obsolete pre-1998 SMPTE 240M HDTV standard, superseded by Rec 709 */
V4L2_COLORSPACE_SMPTE240M = 2,
- /* HD and modern captures. */
+ /* Rec.709: used for HDTV */
V4L2_COLORSPACE_REC709 = 3,
- /* broken BT878 extents (601, luma range 16-253 instead of 16-235) */
+ /*
+ * Deprecated, do not use. No driver will ever return this. This was
+ * based on a misunderstanding of the bt878 datasheet.
+ */
V4L2_COLORSPACE_BT878 = 4,
- /* These should be useful. Assume 601 extents. */
+ /*
+ * NTSC 1953 colorspace. This only makes sense when dealing with
+ * really, really old NTSC recordings. Superseded by SMPTE 170M.
+ */
V4L2_COLORSPACE_470_SYSTEM_M = 5,
+
+ /*
+ * EBU Tech 3213 PAL/SECAM colorspace. This only makes sense when
+ * dealing with really old PAL/SECAM recordings. Superseded by
+ * SMPTE 170M.
+ */
V4L2_COLORSPACE_470_SYSTEM_BG = 6,
- /* I know there will be cameras that send this. So, this is
- * unspecified chromaticities and full 0-255 on each of the
- * Y'CbCr components
+ /*
+ * Effectively shorthand for V4L2_COLORSPACE_SRGB, V4L2_YCBCR_ENC_601
+ * and V4L2_QUANTIZATION_FULL_RANGE. To be used for (Motion-)JPEG.
*/
V4L2_COLORSPACE_JPEG = 7,
- /* For RGB colourspaces, this is probably a good start. */
+ /* For RGB colorspaces such as produces by most webcams. */
V4L2_COLORSPACE_SRGB = 8,
+
+ /* AdobeRGB colorspace */
+ V4L2_COLORSPACE_ADOBERGB = 9,
+
+ /* BT.2020 colorspace, used for UHDTV. */
+ V4L2_COLORSPACE_BT2020 = 10,
+};
+
+enum v4l2_ycbcr_encoding {
+ /*
+ * Mapping of V4L2_YCBCR_ENC_DEFAULT to actual encodings for the
+ * various colorspaces:
+ *
+ * V4L2_COLORSPACE_SMPTE170M, V4L2_COLORSPACE_470_SYSTEM_M,
+ * V4L2_COLORSPACE_470_SYSTEM_BG, V4L2_COLORSPACE_ADOBERGB and
+ * V4L2_COLORSPACE_JPEG: V4L2_YCBCR_ENC_601
+ *
+ * V4L2_COLORSPACE_REC709: V4L2_YCBCR_ENC_709
+ *
+ * V4L2_COLORSPACE_SRGB: V4L2_YCBCR_ENC_SYCC
+ *
+ * V4L2_COLORSPACE_BT2020: V4L2_YCBCR_ENC_BT2020
+ *
+ * V4L2_COLORSPACE_SMPTE240M: V4L2_YCBCR_ENC_SMPTE240M
+ */
+ V4L2_YCBCR_ENC_DEFAULT = 0,
+
+ /* ITU-R 601 -- SDTV */
+ V4L2_YCBCR_ENC_601 = 1,
+
+ /* Rec. 709 -- HDTV */
+ V4L2_YCBCR_ENC_709 = 2,
+
+ /* ITU-R 601/EN 61966-2-4 Extended Gamut -- SDTV */
+ V4L2_YCBCR_ENC_XV601 = 3,
+
+ /* Rec. 709/EN 61966-2-4 Extended Gamut -- HDTV */
+ V4L2_YCBCR_ENC_XV709 = 4,
+
+ /* sYCC (Y'CbCr encoding of sRGB) */
+ V4L2_YCBCR_ENC_SYCC = 5,
+
+ /* BT.2020 Non-constant Luminance Y'CbCr */
+ V4L2_YCBCR_ENC_BT2020 = 6,
+
+ /* BT.2020 Constant Luminance Y'CbcCrc */
+ V4L2_YCBCR_ENC_BT2020_CONST_LUM = 7,
+
+ /* SMPTE 240M -- Obsolete HDTV */
+ V4L2_YCBCR_ENC_SMPTE240M = 8,
+};
+
+enum v4l2_quantization {
+ /*
+ * The default for R'G'B' quantization is always full range. For
+ * Y'CbCr the quantization is always limited range, except for
+ * SYCC, XV601, XV709 or JPEG: those are full range.
+ */
+ V4L2_QUANTIZATION_DEFAULT = 0,
+ V4L2_QUANTIZATION_FULL_RANGE = 1,
+ V4L2_QUANTIZATION_LIM_RANGE = 2,
};
enum v4l2_priority {
@@ -293,6 +367,8 @@ struct v4l2_pix_format {
__u32 colorspace; /* enum v4l2_colorspace */
__u32 priv; /* private data, depends on pixelformat */
__u32 flags; /* format flags (V4L2_PIX_FMT_FLAG_*) */
+ __u32 ycbcr_enc; /* enum v4l2_ycbcr_encoding */
+ __u32 quantization; /* enum v4l2_quantization */
};
/* Pixel format FOURCC depth Description */
@@ -307,6 +383,8 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_XRGB555 v4l2_fourcc('X', 'R', '1', '5') /* 16 XRGB-1-5-5-5 */
#define V4L2_PIX_FMT_RGB565 v4l2_fourcc('R', 'G', 'B', 'P') /* 16 RGB-5-6-5 */
#define V4L2_PIX_FMT_RGB555X v4l2_fourcc('R', 'G', 'B', 'Q') /* 16 RGB-5-5-5 BE */
+#define V4L2_PIX_FMT_ARGB555X v4l2_fourcc_be('A', 'R', '1', '5') /* 16 ARGB-5-5-5 BE */
+#define V4L2_PIX_FMT_XRGB555X v4l2_fourcc_be('X', 'R', '1', '5') /* 16 XRGB-5-5-5 BE */
#define V4L2_PIX_FMT_RGB565X v4l2_fourcc('R', 'G', 'B', 'R') /* 16 RGB-5-6-5 BE */
#define V4L2_PIX_FMT_BGR666 v4l2_fourcc('B', 'G', 'R', 'H') /* 18 BGR-6-6-6 */
#define V4L2_PIX_FMT_BGR24 v4l2_fourcc('B', 'G', 'R', '3') /* 24 BGR-8-8-8 */
@@ -1246,6 +1324,7 @@ struct v4l2_input {
#define V4L2_IN_CAP_DV_TIMINGS 0x00000002 /* Supports S_DV_TIMINGS */
#define V4L2_IN_CAP_CUSTOM_TIMINGS V4L2_IN_CAP_DV_TIMINGS /* For compatibility */
#define V4L2_IN_CAP_STD 0x00000004 /* Supports S_STD */
+#define V4L2_IN_CAP_NATIVE_SIZE 0x00000008 /* Supports setting native size */
/*
* V I D E O O U T P U T S
@@ -1269,6 +1348,7 @@ struct v4l2_output {
#define V4L2_OUT_CAP_DV_TIMINGS 0x00000002 /* Supports S_DV_TIMINGS */
#define V4L2_OUT_CAP_CUSTOM_TIMINGS V4L2_OUT_CAP_DV_TIMINGS /* For compatibility */
#define V4L2_OUT_CAP_STD 0x00000004 /* Supports S_STD */
+#define V4L2_OUT_CAP_NATIVE_SIZE 0x00000008 /* Supports setting native size */
/*
* C O N T R O L S
@@ -1285,11 +1365,11 @@ struct v4l2_ext_control {
union {
__s32 value;
__s64 value64;
- char *string;
- __u8 *p_u8;
- __u16 *p_u16;
- __u32 *p_u32;
- void *ptr;
+ char __user *string;
+ __u8 __user *p_u8;
+ __u16 __user *p_u16;
+ __u32 __user *p_u32;
+ void __user *ptr;
};
} __attribute__ ((packed));
@@ -1774,6 +1854,8 @@ struct v4l2_plane_pix_format {
* @plane_fmt: per-plane information
* @num_planes: number of planes for this format
* @flags: format flags (V4L2_PIX_FMT_FLAG_*)
+ * @ycbcr_enc: enum v4l2_ycbcr_encoding, Y'CbCr encoding
+ * @quantization: enum v4l2_quantization, colorspace quantization
*/
struct v4l2_pix_format_mplane {
__u32 width;
@@ -1785,7 +1867,9 @@ struct v4l2_pix_format_mplane {
struct v4l2_plane_pix_format plane_fmt[VIDEO_MAX_PLANES];
__u8 num_planes;
__u8 flags;
- __u8 reserved[10];
+ __u8 ycbcr_enc;
+ __u8 quantization;
+ __u8 reserved[8];
} __attribute__ ((packed));
/**
diff --git a/include/uapi/linux/virtio_balloon.h b/include/uapi/linux/virtio_balloon.h
index 5e26f61b5df5..be40f7059e93 100644
--- a/include/uapi/linux/virtio_balloon.h
+++ b/include/uapi/linux/virtio_balloon.h
@@ -31,6 +31,7 @@
/* The feature bitmap for virtio balloon */
#define VIRTIO_BALLOON_F_MUST_TELL_HOST 0 /* Tell before reclaiming pages */
#define VIRTIO_BALLOON_F_STATS_VQ 1 /* Memory Stats virtqueue */
+#define VIRTIO_BALLOON_F_DEFLATE_ON_OOM 2 /* Deflate balloon on OOM */
/* Size of a PFN in the balloon interface. */
#define VIRTIO_BALLOON_PFN_SHIFT 12
diff --git a/include/uapi/linux/virtio_blk.h b/include/uapi/linux/virtio_blk.h
index 9ad67b267584..247c8ba8544a 100644
--- a/include/uapi/linux/virtio_blk.h
+++ b/include/uapi/linux/virtio_blk.h
@@ -28,6 +28,7 @@
#include <linux/types.h>
#include <linux/virtio_ids.h>
#include <linux/virtio_config.h>
+#include <linux/virtio_types.h>
/* Feature bits */
#define VIRTIO_BLK_F_BARRIER 0 /* Does host support barriers? */
@@ -114,18 +115,18 @@ struct virtio_blk_config {
/* This is the first element of the read scatter-gather list. */
struct virtio_blk_outhdr {
/* VIRTIO_BLK_T* */
- __u32 type;
+ __virtio32 type;
/* io priority. */
- __u32 ioprio;
+ __virtio32 ioprio;
/* Sector (ie. 512 byte offset) */
- __u64 sector;
+ __virtio64 sector;
};
struct virtio_scsi_inhdr {
- __u32 errors;
- __u32 data_len;
- __u32 sense_len;
- __u32 residual;
+ __virtio32 errors;
+ __virtio32 data_len;
+ __virtio32 sense_len;
+ __virtio32 residual;
};
/* And this is the final byte of the write scatter-gather list. */
diff --git a/include/uapi/linux/virtio_config.h b/include/uapi/linux/virtio_config.h
index 3ce768c6910d..a6d0cdeaacd4 100644
--- a/include/uapi/linux/virtio_config.h
+++ b/include/uapi/linux/virtio_config.h
@@ -38,14 +38,16 @@
#define VIRTIO_CONFIG_S_DRIVER 2
/* Driver has used its parts of the config, and is happy */
#define VIRTIO_CONFIG_S_DRIVER_OK 4
+/* Driver has finished configuring features */
+#define VIRTIO_CONFIG_S_FEATURES_OK 8
/* We've given up on this device. */
#define VIRTIO_CONFIG_S_FAILED 0x80
-/* Some virtio feature bits (currently bits 28 through 31) are reserved for the
+/* Some virtio feature bits (currently bits 28 through 32) are reserved for the
* transport being used (eg. virtio_ring), the rest are per-device feature
* bits. */
#define VIRTIO_TRANSPORT_F_START 28
-#define VIRTIO_TRANSPORT_F_END 32
+#define VIRTIO_TRANSPORT_F_END 33
/* Do we get callbacks when the ring is completely used, even if we've
* suppressed them? */
@@ -54,4 +56,7 @@
/* Can the device handle any descriptor layout? */
#define VIRTIO_F_ANY_LAYOUT 27
+/* v1.0 compliant. */
+#define VIRTIO_F_VERSION_1 32
+
#endif /* _UAPI_LINUX_VIRTIO_CONFIG_H */
diff --git a/include/uapi/linux/virtio_console.h b/include/uapi/linux/virtio_console.h
index ba260dd0b33a..b7fb108c9310 100644
--- a/include/uapi/linux/virtio_console.h
+++ b/include/uapi/linux/virtio_console.h
@@ -32,6 +32,7 @@
#ifndef _UAPI_LINUX_VIRTIO_CONSOLE_H
#define _UAPI_LINUX_VIRTIO_CONSOLE_H
#include <linux/types.h>
+#include <linux/virtio_types.h>
#include <linux/virtio_ids.h>
#include <linux/virtio_config.h>
@@ -58,9 +59,9 @@ struct virtio_console_config {
* particular port.
*/
struct virtio_console_control {
- __u32 id; /* Port number */
- __u16 event; /* The kind of control event (see below) */
- __u16 value; /* Extra information for the key */
+ __virtio32 id; /* Port number */
+ __virtio16 event; /* The kind of control event (see below) */
+ __virtio16 value; /* Extra information for the key */
};
/* Some events for control messages */
diff --git a/include/uapi/linux/virtio_net.h b/include/uapi/linux/virtio_net.h
index 172a7f00780c..b5f1677b291c 100644
--- a/include/uapi/linux/virtio_net.h
+++ b/include/uapi/linux/virtio_net.h
@@ -28,6 +28,7 @@
#include <linux/types.h>
#include <linux/virtio_ids.h>
#include <linux/virtio_config.h>
+#include <linux/virtio_types.h>
#include <linux/if_ether.h>
/* The feature bitmap for virtio net */
@@ -84,17 +85,17 @@ struct virtio_net_hdr {
#define VIRTIO_NET_HDR_GSO_TCPV6 4 // GSO frame, IPv6 TCP
#define VIRTIO_NET_HDR_GSO_ECN 0x80 // TCP has ECN set
__u8 gso_type;
- __u16 hdr_len; /* Ethernet + IP + tcp/udp hdrs */
- __u16 gso_size; /* Bytes to append to hdr_len per frame */
- __u16 csum_start; /* Position to start checksumming from */
- __u16 csum_offset; /* Offset after that to place checksum */
+ __virtio16 hdr_len; /* Ethernet + IP + tcp/udp hdrs */
+ __virtio16 gso_size; /* Bytes to append to hdr_len per frame */
+ __virtio16 csum_start; /* Position to start checksumming from */
+ __virtio16 csum_offset; /* Offset after that to place checksum */
};
/* This is the version of the header to use when the MRG_RXBUF
* feature has been negotiated. */
struct virtio_net_hdr_mrg_rxbuf {
struct virtio_net_hdr hdr;
- __u16 num_buffers; /* Number of merged rx buffers */
+ __virtio16 num_buffers; /* Number of merged rx buffers */
};
/*
@@ -149,7 +150,7 @@ typedef __u8 virtio_net_ctrl_ack;
* VIRTIO_NET_F_CTRL_MAC_ADDR feature is available.
*/
struct virtio_net_ctrl_mac {
- __u32 entries;
+ __virtio32 entries;
__u8 macs[][ETH_ALEN];
} __attribute__((packed));
@@ -193,7 +194,7 @@ struct virtio_net_ctrl_mac {
* specified.
*/
struct virtio_net_ctrl_mq {
- __u16 virtqueue_pairs;
+ __virtio16 virtqueue_pairs;
};
#define VIRTIO_NET_CTRL_MQ 4
diff --git a/include/uapi/linux/virtio_pci.h b/include/uapi/linux/virtio_pci.h
index e5ec1caab82a..35b552c7f330 100644
--- a/include/uapi/linux/virtio_pci.h
+++ b/include/uapi/linux/virtio_pci.h
@@ -41,6 +41,8 @@
#include <linux/virtio_config.h>
+#ifndef VIRTIO_PCI_NO_LEGACY
+
/* A 32-bit r/o bitmask of the features supported by the host */
#define VIRTIO_PCI_HOST_FEATURES 0
@@ -67,16 +69,11 @@
* a read-and-acknowledge. */
#define VIRTIO_PCI_ISR 19
-/* The bit of the ISR which indicates a device configuration change. */
-#define VIRTIO_PCI_ISR_CONFIG 0x2
-
/* MSI-X registers: only enabled if MSI-X is enabled. */
/* A 16-bit vector for configuration changes. */
#define VIRTIO_MSI_CONFIG_VECTOR 20
/* A 16-bit vector for selected queue notifications. */
#define VIRTIO_MSI_QUEUE_VECTOR 22
-/* Vector value used to disable MSI for queue */
-#define VIRTIO_MSI_NO_VECTOR 0xffff
/* The remaining space is defined by each driver as the per-driver
* configuration space */
@@ -94,4 +91,12 @@
/* The alignment to use between consumer and producer parts of vring.
* x86 pagesize again. */
#define VIRTIO_PCI_VRING_ALIGN 4096
+
+#endif /* VIRTIO_PCI_NO_LEGACY */
+
+/* The bit of the ISR which indicates a device configuration change. */
+#define VIRTIO_PCI_ISR_CONFIG 0x2
+/* Vector value used to disable MSI for queue */
+#define VIRTIO_MSI_NO_VECTOR 0xffff
+
#endif
diff --git a/include/uapi/linux/virtio_ring.h b/include/uapi/linux/virtio_ring.h
index a99f9b7caa67..61c818a7fe70 100644
--- a/include/uapi/linux/virtio_ring.h
+++ b/include/uapi/linux/virtio_ring.h
@@ -32,6 +32,7 @@
*
* Copyright Rusty Russell IBM Corporation 2007. */
#include <linux/types.h>
+#include <linux/virtio_types.h>
/* This marks a buffer as continuing via the next field. */
#define VRING_DESC_F_NEXT 1
@@ -61,32 +62,32 @@
/* Virtio ring descriptors: 16 bytes. These can chain together via "next". */
struct vring_desc {
/* Address (guest-physical). */
- __u64 addr;
+ __virtio64 addr;
/* Length. */
- __u32 len;
+ __virtio32 len;
/* The flags as indicated above. */
- __u16 flags;
+ __virtio16 flags;
/* We chain unused descriptors via this, too */
- __u16 next;
+ __virtio16 next;
};
struct vring_avail {
- __u16 flags;
- __u16 idx;
- __u16 ring[];
+ __virtio16 flags;
+ __virtio16 idx;
+ __virtio16 ring[];
};
/* u32 is used here for ids for padding reasons. */
struct vring_used_elem {
/* Index of start of used descriptor chain. */
- __u32 id;
+ __virtio32 id;
/* Total length of the descriptor chain which was used (written to) */
- __u32 len;
+ __virtio32 len;
};
struct vring_used {
- __u16 flags;
- __u16 idx;
+ __virtio16 flags;
+ __virtio16 idx;
struct vring_used_elem ring[];
};
@@ -109,25 +110,25 @@ struct vring {
* struct vring_desc desc[num];
*
* // A ring of available descriptor heads with free-running index.
- * __u16 avail_flags;
- * __u16 avail_idx;
- * __u16 available[num];
- * __u16 used_event_idx;
+ * __virtio16 avail_flags;
+ * __virtio16 avail_idx;
+ * __virtio16 available[num];
+ * __virtio16 used_event_idx;
*
* // Padding to the next align boundary.
* char pad[];
*
* // A ring of used descriptor heads with free-running index.
- * __u16 used_flags;
- * __u16 used_idx;
+ * __virtio16 used_flags;
+ * __virtio16 used_idx;
* struct vring_used_elem used[num];
- * __u16 avail_event_idx;
+ * __virtio16 avail_event_idx;
* };
*/
/* We publish the used event index at the end of the available ring, and vice
* versa. They are at the end for backwards compatibility. */
#define vring_used_event(vr) ((vr)->avail->ring[(vr)->num])
-#define vring_avail_event(vr) (*(__u16 *)&(vr)->used->ring[(vr)->num])
+#define vring_avail_event(vr) (*(__virtio16 *)&(vr)->used->ring[(vr)->num])
static inline void vring_init(struct vring *vr, unsigned int num, void *p,
unsigned long align)
@@ -135,15 +136,15 @@ static inline void vring_init(struct vring *vr, unsigned int num, void *p,
vr->num = num;
vr->desc = p;
vr->avail = p + num*sizeof(struct vring_desc);
- vr->used = (void *)(((unsigned long)&vr->avail->ring[num] + sizeof(__u16)
+ vr->used = (void *)(((unsigned long)&vr->avail->ring[num] + sizeof(__virtio16)
+ align-1) & ~(align - 1));
}
static inline unsigned vring_size(unsigned int num, unsigned long align)
{
- return ((sizeof(struct vring_desc) * num + sizeof(__u16) * (3 + num)
+ return ((sizeof(struct vring_desc) * num + sizeof(__virtio16) * (3 + num)
+ align - 1) & ~(align - 1))
- + sizeof(__u16) * 3 + sizeof(struct vring_used_elem) * num;
+ + sizeof(__virtio16) * 3 + sizeof(struct vring_used_elem) * num;
}
/* The following is used with USED_EVENT_IDX and AVAIL_EVENT_IDX */
diff --git a/include/linux/virtio_scsi.h b/include/uapi/linux/virtio_scsi.h
index de429d1f4357..42b9370771b0 100644
--- a/include/linux/virtio_scsi.h
+++ b/include/uapi/linux/virtio_scsi.h
@@ -27,83 +27,85 @@
#ifndef _LINUX_VIRTIO_SCSI_H
#define _LINUX_VIRTIO_SCSI_H
+#include <linux/virtio_types.h>
+
#define VIRTIO_SCSI_CDB_SIZE 32
#define VIRTIO_SCSI_SENSE_SIZE 96
/* SCSI command request, followed by data-out */
struct virtio_scsi_cmd_req {
- u8 lun[8]; /* Logical Unit Number */
- u64 tag; /* Command identifier */
- u8 task_attr; /* Task attribute */
- u8 prio; /* SAM command priority field */
- u8 crn;
- u8 cdb[VIRTIO_SCSI_CDB_SIZE];
-} __packed;
+ __u8 lun[8]; /* Logical Unit Number */
+ __virtio64 tag; /* Command identifier */
+ __u8 task_attr; /* Task attribute */
+ __u8 prio; /* SAM command priority field */
+ __u8 crn;
+ __u8 cdb[VIRTIO_SCSI_CDB_SIZE];
+} __attribute__((packed));
/* SCSI command request, followed by protection information */
struct virtio_scsi_cmd_req_pi {
- u8 lun[8]; /* Logical Unit Number */
- u64 tag; /* Command identifier */
- u8 task_attr; /* Task attribute */
- u8 prio; /* SAM command priority field */
- u8 crn;
- u32 pi_bytesout; /* DataOUT PI Number of bytes */
- u32 pi_bytesin; /* DataIN PI Number of bytes */
- u8 cdb[VIRTIO_SCSI_CDB_SIZE];
-} __packed;
+ __u8 lun[8]; /* Logical Unit Number */
+ __virtio64 tag; /* Command identifier */
+ __u8 task_attr; /* Task attribute */
+ __u8 prio; /* SAM command priority field */
+ __u8 crn;
+ __virtio32 pi_bytesout; /* DataOUT PI Number of bytes */
+ __virtio32 pi_bytesin; /* DataIN PI Number of bytes */
+ __u8 cdb[VIRTIO_SCSI_CDB_SIZE];
+} __attribute__((packed));
/* Response, followed by sense data and data-in */
struct virtio_scsi_cmd_resp {
- u32 sense_len; /* Sense data length */
- u32 resid; /* Residual bytes in data buffer */
- u16 status_qualifier; /* Status qualifier */
- u8 status; /* Command completion status */
- u8 response; /* Response values */
- u8 sense[VIRTIO_SCSI_SENSE_SIZE];
-} __packed;
+ __virtio32 sense_len; /* Sense data length */
+ __virtio32 resid; /* Residual bytes in data buffer */
+ __virtio16 status_qualifier; /* Status qualifier */
+ __u8 status; /* Command completion status */
+ __u8 response; /* Response values */
+ __u8 sense[VIRTIO_SCSI_SENSE_SIZE];
+} __attribute__((packed));
/* Task Management Request */
struct virtio_scsi_ctrl_tmf_req {
- u32 type;
- u32 subtype;
- u8 lun[8];
- u64 tag;
-} __packed;
+ __virtio32 type;
+ __virtio32 subtype;
+ __u8 lun[8];
+ __virtio64 tag;
+} __attribute__((packed));
struct virtio_scsi_ctrl_tmf_resp {
- u8 response;
-} __packed;
+ __u8 response;
+} __attribute__((packed));
/* Asynchronous notification query/subscription */
struct virtio_scsi_ctrl_an_req {
- u32 type;
- u8 lun[8];
- u32 event_requested;
-} __packed;
+ __virtio32 type;
+ __u8 lun[8];
+ __virtio32 event_requested;
+} __attribute__((packed));
struct virtio_scsi_ctrl_an_resp {
- u32 event_actual;
- u8 response;
-} __packed;
+ __virtio32 event_actual;
+ __u8 response;
+} __attribute__((packed));
struct virtio_scsi_event {
- u32 event;
- u8 lun[8];
- u32 reason;
-} __packed;
+ __virtio32 event;
+ __u8 lun[8];
+ __virtio32 reason;
+} __attribute__((packed));
struct virtio_scsi_config {
- u32 num_queues;
- u32 seg_max;
- u32 max_sectors;
- u32 cmd_per_lun;
- u32 event_info_size;
- u32 sense_size;
- u32 cdb_size;
- u16 max_channel;
- u16 max_target;
- u32 max_lun;
-} __packed;
+ __u32 num_queues;
+ __u32 seg_max;
+ __u32 max_sectors;
+ __u32 cmd_per_lun;
+ __u32 event_info_size;
+ __u32 sense_size;
+ __u32 cdb_size;
+ __u16 max_channel;
+ __u16 max_target;
+ __u32 max_lun;
+} __attribute__((packed));
/* Feature Bits */
#define VIRTIO_SCSI_F_INOUT 0
diff --git a/include/uapi/linux/virtio_types.h b/include/uapi/linux/virtio_types.h
new file mode 100644
index 000000000000..e845e8c4cbee
--- /dev/null
+++ b/include/uapi/linux/virtio_types.h
@@ -0,0 +1,46 @@
+#ifndef _UAPI_LINUX_VIRTIO_TYPES_H
+#define _UAPI_LINUX_VIRTIO_TYPES_H
+/* Type definitions for virtio implementations.
+ *
+ * This header is BSD licensed so anyone can use the definitions to implement
+ * compatible drivers/servers.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of IBM nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Copyright (C) 2014 Red Hat, Inc.
+ * Author: Michael S. Tsirkin <mst@redhat.com>
+ */
+#include <linux/types.h>
+
+/*
+ * __virtio{16,32,64} have the following meaning:
+ * - __u{16,32,64} for virtio devices in legacy mode, accessed in native endian
+ * - __le{16,32,64} for standard-compliant virtio devices
+ */
+
+typedef __u16 __bitwise__ __virtio16;
+typedef __u32 __bitwise__ __virtio32;
+typedef __u64 __bitwise__ __virtio64;
+
+#endif /* _UAPI_LINUX_VIRTIO_TYPES_H */
diff --git a/include/uapi/linux/vt.h b/include/uapi/linux/vt.h
index 4b59a26799a3..978578bd1895 100644
--- a/include/uapi/linux/vt.h
+++ b/include/uapi/linux/vt.h
@@ -84,7 +84,4 @@ struct vt_setactivate {
#define VT_SETACTIVATE 0x560F /* Activate and set the mode of a console */
-
-#define vt_get_kmsg_redirect() vt_kmsg_redirect(-1)
-
#endif /* _UAPI_LINUX_VT_H */
diff --git a/include/uapi/linux/wil6210_uapi.h b/include/uapi/linux/wil6210_uapi.h
new file mode 100644
index 000000000000..6a3cddd156c4
--- /dev/null
+++ b/include/uapi/linux/wil6210_uapi.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2014 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __WIL6210_UAPI_H__
+#define __WIL6210_UAPI_H__
+
+#if !defined(__KERNEL__)
+#define __user
+#endif
+
+#include <linux/sockios.h>
+
+/* Numbers SIOCDEVPRIVATE and SIOCDEVPRIVATE + 1
+ * are used by Android devices to implement PNO (preferred network offload).
+ * Albeit it is temporary solution, use different numbers to avoid conflicts
+ */
+
+/**
+ * Perform 32-bit I/O operation to the card memory
+ *
+ * User code should arrange data in memory like this:
+ *
+ * struct wil_memio io;
+ * struct ifreq ifr = {
+ * .ifr_data = &io,
+ * };
+ */
+#define WIL_IOCTL_MEMIO (SIOCDEVPRIVATE + 2)
+
+/**
+ * Perform block I/O operation to the card memory
+ *
+ * User code should arrange data in memory like this:
+ *
+ * void *buf;
+ * struct wil_memio_block io = {
+ * .block = buf,
+ * };
+ * struct ifreq ifr = {
+ * .ifr_data = &io,
+ * };
+ */
+#define WIL_IOCTL_MEMIO_BLOCK (SIOCDEVPRIVATE + 3)
+
+/**
+ * operation to perform
+ *
+ * @wil_mmio_op_mask - bits defining operation,
+ * @wil_mmio_addr_mask - bits defining addressing mode
+ */
+enum wil_memio_op {
+ wil_mmio_read = 0,
+ wil_mmio_write = 1,
+ wil_mmio_op_mask = 0xff,
+ wil_mmio_addr_linker = 0 << 8,
+ wil_mmio_addr_ahb = 1 << 8,
+ wil_mmio_addr_bar = 2 << 8,
+ wil_mmio_addr_mask = 0xff00,
+};
+
+struct wil_memio {
+ uint32_t op; /* enum wil_memio_op */
+ uint32_t addr; /* should be 32-bit aligned */
+ uint32_t val;
+};
+
+struct wil_memio_block {
+ uint32_t op; /* enum wil_memio_op */
+ uint32_t addr; /* should be 32-bit aligned */
+ uint32_t size; /* should be multiple of 4 */
+ void __user *block; /* block address */
+};
+
+#endif /* __WIL6210_UAPI_H__ */
diff --git a/include/uapi/linux/xfrm.h b/include/uapi/linux/xfrm.h
index 25e5dd916ba4..02d5125a5ee8 100644
--- a/include/uapi/linux/xfrm.h
+++ b/include/uapi/linux/xfrm.h
@@ -328,6 +328,8 @@ enum xfrm_spdattr_type_t {
XFRMA_SPD_UNSPEC,
XFRMA_SPD_INFO,
XFRMA_SPD_HINFO,
+ XFRMA_SPD_IPV4_HTHRESH,
+ XFRMA_SPD_IPV6_HTHRESH,
__XFRMA_SPD_MAX
#define XFRMA_SPD_MAX (__XFRMA_SPD_MAX - 1)
@@ -347,6 +349,11 @@ struct xfrmu_spdhinfo {
__u32 spdhmcnt;
};
+struct xfrmu_spdhthresh {
+ __u8 lbits;
+ __u8 rbits;
+};
+
struct xfrm_usersa_info {
struct xfrm_selector sel;
struct xfrm_id id;
diff --git a/include/uapi/misc/Kbuild b/include/uapi/misc/Kbuild
new file mode 100644
index 000000000000..e96cae7d58c9
--- /dev/null
+++ b/include/uapi/misc/Kbuild
@@ -0,0 +1,2 @@
+# misc Header export list
+header-y += cxl.h
diff --git a/include/uapi/misc/cxl.h b/include/uapi/misc/cxl.h
new file mode 100644
index 000000000000..cd6d789b73ec
--- /dev/null
+++ b/include/uapi/misc/cxl.h
@@ -0,0 +1,88 @@
+/*
+ * Copyright 2014 IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _UAPI_MISC_CXL_H
+#define _UAPI_MISC_CXL_H
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+
+struct cxl_ioctl_start_work {
+ __u64 flags;
+ __u64 work_element_descriptor;
+ __u64 amr;
+ __s16 num_interrupts;
+ __s16 reserved1;
+ __s32 reserved2;
+ __u64 reserved3;
+ __u64 reserved4;
+ __u64 reserved5;
+ __u64 reserved6;
+};
+
+#define CXL_START_WORK_AMR 0x0000000000000001ULL
+#define CXL_START_WORK_NUM_IRQS 0x0000000000000002ULL
+#define CXL_START_WORK_ALL (CXL_START_WORK_AMR |\
+ CXL_START_WORK_NUM_IRQS)
+
+/* ioctl numbers */
+#define CXL_MAGIC 0xCA
+#define CXL_IOCTL_START_WORK _IOW(CXL_MAGIC, 0x00, struct cxl_ioctl_start_work)
+#define CXL_IOCTL_GET_PROCESS_ELEMENT _IOR(CXL_MAGIC, 0x01, __u32)
+
+#define CXL_READ_MIN_SIZE 0x1000 /* 4K */
+
+/* Events from read() */
+enum cxl_event_type {
+ CXL_EVENT_RESERVED = 0,
+ CXL_EVENT_AFU_INTERRUPT = 1,
+ CXL_EVENT_DATA_STORAGE = 2,
+ CXL_EVENT_AFU_ERROR = 3,
+};
+
+struct cxl_event_header {
+ __u16 type;
+ __u16 size;
+ __u16 process_element;
+ __u16 reserved1;
+};
+
+struct cxl_event_afu_interrupt {
+ __u16 flags;
+ __u16 irq; /* Raised AFU interrupt number */
+ __u32 reserved1;
+};
+
+struct cxl_event_data_storage {
+ __u16 flags;
+ __u16 reserved1;
+ __u32 reserved2;
+ __u64 addr;
+ __u64 dsisr;
+ __u64 reserved3;
+};
+
+struct cxl_event_afu_error {
+ __u16 flags;
+ __u16 reserved1;
+ __u32 reserved2;
+ __u64 error;
+};
+
+struct cxl_event {
+ struct cxl_event_header header;
+ union {
+ struct cxl_event_afu_interrupt irq;
+ struct cxl_event_data_storage fault;
+ struct cxl_event_afu_error afu_error;
+ };
+};
+
+#endif /* _UAPI_MISC_CXL_H */
diff --git a/include/uapi/rdma/ib_user_verbs.h b/include/uapi/rdma/ib_user_verbs.h
index 26daf55ff76e..4275b961bf60 100644
--- a/include/uapi/rdma/ib_user_verbs.h
+++ b/include/uapi/rdma/ib_user_verbs.h
@@ -90,8 +90,9 @@ enum {
};
enum {
+ IB_USER_VERBS_EX_CMD_QUERY_DEVICE = IB_USER_VERBS_CMD_QUERY_DEVICE,
IB_USER_VERBS_EX_CMD_CREATE_FLOW = IB_USER_VERBS_CMD_THRESHOLD,
- IB_USER_VERBS_EX_CMD_DESTROY_FLOW
+ IB_USER_VERBS_EX_CMD_DESTROY_FLOW,
};
/*
@@ -201,6 +202,32 @@ struct ib_uverbs_query_device_resp {
__u8 reserved[4];
};
+enum {
+ IB_USER_VERBS_EX_QUERY_DEVICE_ODP = 1ULL << 0,
+};
+
+struct ib_uverbs_ex_query_device {
+ __u32 comp_mask;
+ __u32 reserved;
+};
+
+struct ib_uverbs_odp_caps {
+ __u64 general_caps;
+ struct {
+ __u32 rc_odp_caps;
+ __u32 uc_odp_caps;
+ __u32 ud_odp_caps;
+ } per_transport_caps;
+ __u32 reserved;
+};
+
+struct ib_uverbs_ex_query_device_resp {
+ struct ib_uverbs_query_device_resp base;
+ __u32 comp_mask;
+ __u32 reserved;
+ struct ib_uverbs_odp_caps odp_caps;
+};
+
struct ib_uverbs_query_port {
__u64 response;
__u8 port_num;
diff --git a/include/uapi/sound/asound.h b/include/uapi/sound/asound.h
index 32168f7ffce3..1f23cd635957 100644
--- a/include/uapi/sound/asound.h
+++ b/include/uapi/sound/asound.h
@@ -96,9 +96,10 @@ enum {
SNDRV_HWDEP_IFACE_FW_DICE, /* TC DICE FireWire device */
SNDRV_HWDEP_IFACE_FW_FIREWORKS, /* Echo Audio Fireworks based device */
SNDRV_HWDEP_IFACE_FW_BEBOB, /* BridgeCo BeBoB based device */
+ SNDRV_HWDEP_IFACE_FW_OXFW, /* Oxford OXFW970/971 based device */
/* Don't forget to change the following: */
- SNDRV_HWDEP_IFACE_LAST = SNDRV_HWDEP_IFACE_FW_BEBOB
+ SNDRV_HWDEP_IFACE_LAST = SNDRV_HWDEP_IFACE_FW_OXFW
};
struct snd_hwdep_info {
@@ -219,7 +220,10 @@ typedef int __bitwise snd_pcm_format_t;
#define SNDRV_PCM_FORMAT_G723_40_1B ((__force snd_pcm_format_t) 47) /* 1 sample in 1 byte */
#define SNDRV_PCM_FORMAT_DSD_U8 ((__force snd_pcm_format_t) 48) /* DSD, 1-byte samples DSD (x8) */
#define SNDRV_PCM_FORMAT_DSD_U16_LE ((__force snd_pcm_format_t) 49) /* DSD, 2-byte samples DSD (x16), little endian */
-#define SNDRV_PCM_FORMAT_LAST SNDRV_PCM_FORMAT_DSD_U16_LE
+#define SNDRV_PCM_FORMAT_DSD_U32_LE ((__force snd_pcm_format_t) 50) /* DSD, 4-byte samples DSD (x32), little endian */
+#define SNDRV_PCM_FORMAT_DSD_U16_BE ((__force snd_pcm_format_t) 51) /* DSD, 2-byte samples DSD (x16), big endian */
+#define SNDRV_PCM_FORMAT_DSD_U32_BE ((__force snd_pcm_format_t) 52) /* DSD, 4-byte samples DSD (x32), big endian */
+#define SNDRV_PCM_FORMAT_LAST SNDRV_PCM_FORMAT_DSD_U32_BE
#ifdef SNDRV_LITTLE_ENDIAN
#define SNDRV_PCM_FORMAT_S16 SNDRV_PCM_FORMAT_S16_LE
diff --git a/include/uapi/sound/compress_offload.h b/include/uapi/sound/compress_offload.h
index 1964026b5e09..22ed8cb7800b 100644
--- a/include/uapi/sound/compress_offload.h
+++ b/include/uapi/sound/compress_offload.h
@@ -32,7 +32,7 @@
#define SNDRV_COMPRESS_VERSION SNDRV_PROTOCOL_VERSION(0, 1, 2)
/**
- * struct snd_compressed_buffer: compressed buffer
+ * struct snd_compressed_buffer - compressed buffer
* @fragment_size: size of buffer fragment in bytes
* @fragments: number of such fragments
*/
@@ -42,7 +42,7 @@ struct snd_compressed_buffer {
} __attribute__((packed, aligned(4)));
/**
- * struct snd_compr_params: compressed stream params
+ * struct snd_compr_params - compressed stream params
* @buffer: buffer description
* @codec: codec parameters
* @no_wake_mode: dont wake on fragment elapsed
@@ -54,7 +54,7 @@ struct snd_compr_params {
} __attribute__((packed, aligned(4)));
/**
- * struct snd_compr_tstamp: timestamp descriptor
+ * struct snd_compr_tstamp - timestamp descriptor
* @byte_offset: Byte offset in ring buffer to DSP
* @copied_total: Total number of bytes copied from/to ring buffer to/by DSP
* @pcm_frames: Frames decoded or encoded by DSP. This field will evolve by
@@ -73,7 +73,7 @@ struct snd_compr_tstamp {
} __attribute__((packed, aligned(4)));
/**
- * struct snd_compr_avail: avail descriptor
+ * struct snd_compr_avail - avail descriptor
* @avail: Number of bytes available in ring buffer for writing/reading
* @tstamp: timestamp infomation
*/
@@ -88,7 +88,7 @@ enum snd_compr_direction {
};
/**
- * struct snd_compr_caps: caps descriptor
+ * struct snd_compr_caps - caps descriptor
* @codecs: pointer to array of codecs
* @direction: direction supported. Of type snd_compr_direction
* @min_fragment_size: minimum fragment supported by DSP
@@ -110,7 +110,7 @@ struct snd_compr_caps {
} __attribute__((packed, aligned(4)));
/**
- * struct snd_compr_codec_caps: query capability of codec
+ * struct snd_compr_codec_caps - query capability of codec
* @codec: codec for which capability is queried
* @num_descriptors: number of codec descriptors
* @descriptor: array of codec capability descriptor
@@ -122,18 +122,19 @@ struct snd_compr_codec_caps {
} __attribute__((packed, aligned(4)));
/**
+ * enum sndrv_compress_encoder
* @SNDRV_COMPRESS_ENCODER_PADDING: no of samples appended by the encoder at the
* end of the track
* @SNDRV_COMPRESS_ENCODER_DELAY: no of samples inserted by the encoder at the
* beginning of the track
*/
-enum {
+enum sndrv_compress_encoder {
SNDRV_COMPRESS_ENCODER_PADDING = 1,
SNDRV_COMPRESS_ENCODER_DELAY = 2,
};
/**
- * struct snd_compr_metadata: compressed stream metadata
+ * struct snd_compr_metadata - compressed stream metadata
* @key: key id
* @value: key value
*/
diff --git a/include/uapi/sound/firewire.h b/include/uapi/sound/firewire.h
index af4bd136c75d..49122df3b56b 100644
--- a/include/uapi/sound/firewire.h
+++ b/include/uapi/sound/firewire.h
@@ -55,7 +55,8 @@ union snd_firewire_event {
#define SNDRV_FIREWIRE_TYPE_DICE 1
#define SNDRV_FIREWIRE_TYPE_FIREWORKS 2
#define SNDRV_FIREWIRE_TYPE_BEBOB 3
-/* AV/C, RME, MOTU, ... */
+#define SNDRV_FIREWIRE_TYPE_OXFW 4
+/* RME, MOTU, ... */
struct snd_firewire_get_info {
unsigned int type; /* SNDRV_FIREWIRE_TYPE_xxx */
diff --git a/include/uapi/sound/hdspm.h b/include/uapi/sound/hdspm.h
index d956c3593f65..b357f1a5e29c 100644
--- a/include/uapi/sound/hdspm.h
+++ b/include/uapi/sound/hdspm.h
@@ -74,14 +74,14 @@ struct hdspm_config {
#define SNDRV_HDSPM_IOCTL_GET_CONFIG \
_IOR('H', 0x41, struct hdspm_config)
-/**
+/*
* If there's a TCO (TimeCode Option) board installed,
* there are further options and status data available.
* The hdspm_ltc structure contains the current SMPTE
* timecode and some status information and can be
* obtained via SNDRV_HDSPM_IOCTL_GET_LTC or in the
* hdspm_status struct.
- **/
+ */
enum hdspm_ltc_format {
format_invalid,
@@ -113,11 +113,11 @@ struct hdspm_ltc {
#define SNDRV_HDSPM_IOCTL_GET_LTC _IOR('H', 0x46, struct hdspm_ltc)
-/**
+/*
* The status data reflects the device's current state
* as determined by the card's configuration and
* connection status.
- **/
+ */
enum hdspm_sync {
hdspm_sync_no_lock = 0,
@@ -171,9 +171,9 @@ struct hdspm_status {
#define SNDRV_HDSPM_IOCTL_GET_STATUS \
_IOR('H', 0x47, struct hdspm_status)
-/**
+/*
* Get information about the card and its add-ons.
- **/
+ */
#define HDSPM_ADDON_TCO 1
diff --git a/include/video/imx-ipu-v3.h b/include/video/imx-ipu-v3.h
index 3e43e22cdff9..c74bf4a0520e 100644
--- a/include/video/imx-ipu-v3.h
+++ b/include/video/imx-ipu-v3.h
@@ -16,6 +16,7 @@
#include <linux/videodev2.h>
#include <linux/bitmap.h>
#include <linux/fb.h>
+#include <media/v4l2-mediabus.h>
struct ipu_soc;
@@ -61,6 +62,29 @@ struct ipu_di_signal_cfg {
u8 vsync_pin;
};
+/*
+ * Enumeration of CSI destinations
+ */
+enum ipu_csi_dest {
+ IPU_CSI_DEST_IDMAC, /* to memory via SMFC */
+ IPU_CSI_DEST_IC, /* to Image Converter */
+ IPU_CSI_DEST_VDIC, /* to VDIC */
+};
+
+/*
+ * Enumeration of IPU rotation modes
+ */
+enum ipu_rotate_mode {
+ IPU_ROTATE_NONE = 0,
+ IPU_ROTATE_VERT_FLIP,
+ IPU_ROTATE_HORIZ_FLIP,
+ IPU_ROTATE_180,
+ IPU_ROTATE_90_RIGHT,
+ IPU_ROTATE_90_RIGHT_VFLIP,
+ IPU_ROTATE_90_RIGHT_HFLIP,
+ IPU_ROTATE_90_LEFT,
+};
+
enum ipu_color_space {
IPUV3_COLORSPACE_RGB,
IPUV3_COLORSPACE_YUV,
@@ -76,6 +100,36 @@ enum ipu_channel_irq {
IPU_IRQ_EOS = 192,
};
+/*
+ * Enumeration of IDMAC channels
+ */
+#define IPUV3_CHANNEL_CSI0 0
+#define IPUV3_CHANNEL_CSI1 1
+#define IPUV3_CHANNEL_CSI2 2
+#define IPUV3_CHANNEL_CSI3 3
+#define IPUV3_CHANNEL_VDI_MEM_IC_VF 5
+#define IPUV3_CHANNEL_MEM_IC_PP 11
+#define IPUV3_CHANNEL_MEM_IC_PRP_VF 12
+#define IPUV3_CHANNEL_G_MEM_IC_PRP_VF 14
+#define IPUV3_CHANNEL_G_MEM_IC_PP 15
+#define IPUV3_CHANNEL_IC_PRP_ENC_MEM 20
+#define IPUV3_CHANNEL_IC_PRP_VF_MEM 21
+#define IPUV3_CHANNEL_IC_PP_MEM 22
+#define IPUV3_CHANNEL_MEM_BG_SYNC 23
+#define IPUV3_CHANNEL_MEM_BG_ASYNC 24
+#define IPUV3_CHANNEL_MEM_FG_SYNC 27
+#define IPUV3_CHANNEL_MEM_DC_SYNC 28
+#define IPUV3_CHANNEL_MEM_FG_ASYNC 29
+#define IPUV3_CHANNEL_MEM_FG_SYNC_ALPHA 31
+#define IPUV3_CHANNEL_MEM_DC_ASYNC 41
+#define IPUV3_CHANNEL_MEM_ROT_ENC 45
+#define IPUV3_CHANNEL_MEM_ROT_VF 46
+#define IPUV3_CHANNEL_MEM_ROT_PP 47
+#define IPUV3_CHANNEL_ROT_ENC_MEM 48
+#define IPUV3_CHANNEL_ROT_VF_MEM 49
+#define IPUV3_CHANNEL_ROT_PP_MEM 50
+#define IPUV3_CHANNEL_MEM_BG_SYNC_ALPHA 51
+
int ipu_map_irq(struct ipu_soc *ipu, int irq);
int ipu_idmac_channel_irq(struct ipu_soc *ipu, struct ipuv3_channel *channel,
enum ipu_channel_irq irq);
@@ -93,6 +147,13 @@ int ipu_idmac_channel_irq(struct ipu_soc *ipu, struct ipuv3_channel *channel,
#define IPU_IRQ_VSYNC_PRE_1 (448 + 15)
/*
+ * IPU Common functions
+ */
+void ipu_set_csi_src_mux(struct ipu_soc *ipu, int csi_id, bool mipi_csi2);
+void ipu_set_ic_src_mux(struct ipu_soc *ipu, int csi_id, bool vdi);
+void ipu_dump(struct ipu_soc *ipu);
+
+/*
* IPU Image DMA Controller (idmac) functions
*/
struct ipuv3_channel *ipu_idmac_get(struct ipu_soc *ipu, unsigned channel);
@@ -100,12 +161,58 @@ void ipu_idmac_put(struct ipuv3_channel *);
int ipu_idmac_enable_channel(struct ipuv3_channel *channel);
int ipu_idmac_disable_channel(struct ipuv3_channel *channel);
+void ipu_idmac_enable_watermark(struct ipuv3_channel *channel, bool enable);
+int ipu_idmac_lock_enable(struct ipuv3_channel *channel, int num_bursts);
int ipu_idmac_wait_busy(struct ipuv3_channel *channel, int ms);
void ipu_idmac_set_double_buffer(struct ipuv3_channel *channel,
bool doublebuffer);
int ipu_idmac_get_current_buffer(struct ipuv3_channel *channel);
+bool ipu_idmac_buffer_is_ready(struct ipuv3_channel *channel, u32 buf_num);
void ipu_idmac_select_buffer(struct ipuv3_channel *channel, u32 buf_num);
+void ipu_idmac_clear_buffer(struct ipuv3_channel *channel, u32 buf_num);
+
+/*
+ * IPU Channel Parameter Memory (cpmem) functions
+ */
+struct ipu_rgb {
+ struct fb_bitfield red;
+ struct fb_bitfield green;
+ struct fb_bitfield blue;
+ struct fb_bitfield transp;
+ int bits_per_pixel;
+};
+
+struct ipu_image {
+ struct v4l2_pix_format pix;
+ struct v4l2_rect rect;
+ dma_addr_t phys0;
+ dma_addr_t phys1;
+};
+
+void ipu_cpmem_zero(struct ipuv3_channel *ch);
+void ipu_cpmem_set_resolution(struct ipuv3_channel *ch, int xres, int yres);
+void ipu_cpmem_set_stride(struct ipuv3_channel *ch, int stride);
+void ipu_cpmem_set_high_priority(struct ipuv3_channel *ch);
+void ipu_cpmem_set_buffer(struct ipuv3_channel *ch, int bufnum, dma_addr_t buf);
+void ipu_cpmem_interlaced_scan(struct ipuv3_channel *ch, int stride);
+void ipu_cpmem_set_axi_id(struct ipuv3_channel *ch, u32 id);
+void ipu_cpmem_set_burstsize(struct ipuv3_channel *ch, int burstsize);
+void ipu_cpmem_set_block_mode(struct ipuv3_channel *ch);
+void ipu_cpmem_set_rotation(struct ipuv3_channel *ch,
+ enum ipu_rotate_mode rot);
+int ipu_cpmem_set_format_rgb(struct ipuv3_channel *ch,
+ const struct ipu_rgb *rgb);
+int ipu_cpmem_set_format_passthrough(struct ipuv3_channel *ch, int width);
+void ipu_cpmem_set_yuv_interleaved(struct ipuv3_channel *ch, u32 pixel_format);
+void ipu_cpmem_set_yuv_planar_full(struct ipuv3_channel *ch,
+ u32 pixel_format, int stride,
+ int u_offset, int v_offset);
+void ipu_cpmem_set_yuv_planar(struct ipuv3_channel *ch,
+ u32 pixel_format, int stride, int height);
+int ipu_cpmem_set_fmt(struct ipuv3_channel *ch, u32 drm_fourcc);
+int ipu_cpmem_set_image(struct ipuv3_channel *ch, struct ipu_image *image);
+void ipu_cpmem_dump(struct ipuv3_channel *ch);
/*
* IPU Display Controller (dc) functions
@@ -169,171 +276,78 @@ int ipu_dp_set_global_alpha(struct ipu_dp *dp, bool enable, u8 alpha,
/*
* IPU CMOS Sensor Interface (csi) functions
*/
-int ipu_csi_enable(struct ipu_soc *ipu, int csi);
-int ipu_csi_disable(struct ipu_soc *ipu, int csi);
+struct ipu_csi;
+int ipu_csi_init_interface(struct ipu_csi *csi,
+ struct v4l2_mbus_config *mbus_cfg,
+ struct v4l2_mbus_framefmt *mbus_fmt);
+bool ipu_csi_is_interlaced(struct ipu_csi *csi);
+void ipu_csi_get_window(struct ipu_csi *csi, struct v4l2_rect *w);
+void ipu_csi_set_window(struct ipu_csi *csi, struct v4l2_rect *w);
+void ipu_csi_set_test_generator(struct ipu_csi *csi, bool active,
+ u32 r_value, u32 g_value, u32 b_value,
+ u32 pix_clk);
+int ipu_csi_set_mipi_datatype(struct ipu_csi *csi, u32 vc,
+ struct v4l2_mbus_framefmt *mbus_fmt);
+int ipu_csi_set_skip_smfc(struct ipu_csi *csi, u32 skip,
+ u32 max_ratio, u32 id);
+int ipu_csi_set_dest(struct ipu_csi *csi, enum ipu_csi_dest csi_dest);
+int ipu_csi_enable(struct ipu_csi *csi);
+int ipu_csi_disable(struct ipu_csi *csi);
+struct ipu_csi *ipu_csi_get(struct ipu_soc *ipu, int id);
+void ipu_csi_put(struct ipu_csi *csi);
+void ipu_csi_dump(struct ipu_csi *csi);
/*
- * IPU Sensor Multiple FIFO Controller (SMFC) functions
+ * IPU Image Converter (ic) functions
*/
-int ipu_smfc_enable(struct ipu_soc *ipu);
-int ipu_smfc_disable(struct ipu_soc *ipu);
-int ipu_smfc_map_channel(struct ipu_soc *ipu, int channel, int csi_id, int mipi_id);
-int ipu_smfc_set_burstsize(struct ipu_soc *ipu, int channel, int burstsize);
-
-#define IPU_CPMEM_WORD(word, ofs, size) ((((word) * 160 + (ofs)) << 8) | (size))
-
-#define IPU_FIELD_UBO IPU_CPMEM_WORD(0, 46, 22)
-#define IPU_FIELD_VBO IPU_CPMEM_WORD(0, 68, 22)
-#define IPU_FIELD_IOX IPU_CPMEM_WORD(0, 90, 4)
-#define IPU_FIELD_RDRW IPU_CPMEM_WORD(0, 94, 1)
-#define IPU_FIELD_SO IPU_CPMEM_WORD(0, 113, 1)
-#define IPU_FIELD_SLY IPU_CPMEM_WORD(1, 102, 14)
-#define IPU_FIELD_SLUV IPU_CPMEM_WORD(1, 128, 14)
-
-#define IPU_FIELD_XV IPU_CPMEM_WORD(0, 0, 10)
-#define IPU_FIELD_YV IPU_CPMEM_WORD(0, 10, 9)
-#define IPU_FIELD_XB IPU_CPMEM_WORD(0, 19, 13)
-#define IPU_FIELD_YB IPU_CPMEM_WORD(0, 32, 12)
-#define IPU_FIELD_NSB_B IPU_CPMEM_WORD(0, 44, 1)
-#define IPU_FIELD_CF IPU_CPMEM_WORD(0, 45, 1)
-#define IPU_FIELD_SX IPU_CPMEM_WORD(0, 46, 12)
-#define IPU_FIELD_SY IPU_CPMEM_WORD(0, 58, 11)
-#define IPU_FIELD_NS IPU_CPMEM_WORD(0, 69, 10)
-#define IPU_FIELD_SDX IPU_CPMEM_WORD(0, 79, 7)
-#define IPU_FIELD_SM IPU_CPMEM_WORD(0, 86, 10)
-#define IPU_FIELD_SCC IPU_CPMEM_WORD(0, 96, 1)
-#define IPU_FIELD_SCE IPU_CPMEM_WORD(0, 97, 1)
-#define IPU_FIELD_SDY IPU_CPMEM_WORD(0, 98, 7)
-#define IPU_FIELD_SDRX IPU_CPMEM_WORD(0, 105, 1)
-#define IPU_FIELD_SDRY IPU_CPMEM_WORD(0, 106, 1)
-#define IPU_FIELD_BPP IPU_CPMEM_WORD(0, 107, 3)
-#define IPU_FIELD_DEC_SEL IPU_CPMEM_WORD(0, 110, 2)
-#define IPU_FIELD_DIM IPU_CPMEM_WORD(0, 112, 1)
-#define IPU_FIELD_BNDM IPU_CPMEM_WORD(0, 114, 3)
-#define IPU_FIELD_BM IPU_CPMEM_WORD(0, 117, 2)
-#define IPU_FIELD_ROT IPU_CPMEM_WORD(0, 119, 1)
-#define IPU_FIELD_HF IPU_CPMEM_WORD(0, 120, 1)
-#define IPU_FIELD_VF IPU_CPMEM_WORD(0, 121, 1)
-#define IPU_FIELD_THE IPU_CPMEM_WORD(0, 122, 1)
-#define IPU_FIELD_CAP IPU_CPMEM_WORD(0, 123, 1)
-#define IPU_FIELD_CAE IPU_CPMEM_WORD(0, 124, 1)
-#define IPU_FIELD_FW IPU_CPMEM_WORD(0, 125, 13)
-#define IPU_FIELD_FH IPU_CPMEM_WORD(0, 138, 12)
-#define IPU_FIELD_EBA0 IPU_CPMEM_WORD(1, 0, 29)
-#define IPU_FIELD_EBA1 IPU_CPMEM_WORD(1, 29, 29)
-#define IPU_FIELD_ILO IPU_CPMEM_WORD(1, 58, 20)
-#define IPU_FIELD_NPB IPU_CPMEM_WORD(1, 78, 7)
-#define IPU_FIELD_PFS IPU_CPMEM_WORD(1, 85, 4)
-#define IPU_FIELD_ALU IPU_CPMEM_WORD(1, 89, 1)
-#define IPU_FIELD_ALBM IPU_CPMEM_WORD(1, 90, 3)
-#define IPU_FIELD_ID IPU_CPMEM_WORD(1, 93, 2)
-#define IPU_FIELD_TH IPU_CPMEM_WORD(1, 95, 7)
-#define IPU_FIELD_SL IPU_CPMEM_WORD(1, 102, 14)
-#define IPU_FIELD_WID0 IPU_CPMEM_WORD(1, 116, 3)
-#define IPU_FIELD_WID1 IPU_CPMEM_WORD(1, 119, 3)
-#define IPU_FIELD_WID2 IPU_CPMEM_WORD(1, 122, 3)
-#define IPU_FIELD_WID3 IPU_CPMEM_WORD(1, 125, 3)
-#define IPU_FIELD_OFS0 IPU_CPMEM_WORD(1, 128, 5)
-#define IPU_FIELD_OFS1 IPU_CPMEM_WORD(1, 133, 5)
-#define IPU_FIELD_OFS2 IPU_CPMEM_WORD(1, 138, 5)
-#define IPU_FIELD_OFS3 IPU_CPMEM_WORD(1, 143, 5)
-#define IPU_FIELD_SXYS IPU_CPMEM_WORD(1, 148, 1)
-#define IPU_FIELD_CRE IPU_CPMEM_WORD(1, 149, 1)
-#define IPU_FIELD_DEC_SEL2 IPU_CPMEM_WORD(1, 150, 1)
-
-struct ipu_cpmem_word {
- u32 data[5];
- u32 res[3];
-};
-
-struct ipu_ch_param {
- struct ipu_cpmem_word word[2];
-};
-
-void ipu_ch_param_write_field(struct ipu_ch_param __iomem *base, u32 wbs, u32 v);
-u32 ipu_ch_param_read_field(struct ipu_ch_param __iomem *base, u32 wbs);
-struct ipu_ch_param __iomem *ipu_get_cpmem(struct ipuv3_channel *channel);
-void ipu_ch_param_dump(struct ipu_ch_param __iomem *p);
-
-static inline void ipu_ch_param_zero(struct ipu_ch_param __iomem *p)
-{
- int i;
- void __iomem *base = p;
-
- for (i = 0; i < sizeof(*p) / sizeof(u32); i++)
- writel(0, base + i * sizeof(u32));
-}
-
-static inline void ipu_cpmem_set_buffer(struct ipu_ch_param __iomem *p,
- int bufnum, dma_addr_t buf)
-{
- if (bufnum)
- ipu_ch_param_write_field(p, IPU_FIELD_EBA1, buf >> 3);
- else
- ipu_ch_param_write_field(p, IPU_FIELD_EBA0, buf >> 3);
-}
-
-static inline void ipu_cpmem_set_resolution(struct ipu_ch_param __iomem *p,
- int xres, int yres)
-{
- ipu_ch_param_write_field(p, IPU_FIELD_FW, xres - 1);
- ipu_ch_param_write_field(p, IPU_FIELD_FH, yres - 1);
-}
-
-static inline void ipu_cpmem_set_stride(struct ipu_ch_param __iomem *p,
- int stride)
-{
- ipu_ch_param_write_field(p, IPU_FIELD_SLY, stride - 1);
-}
-
-void ipu_cpmem_set_high_priority(struct ipuv3_channel *channel);
-
-struct ipu_rgb {
- struct fb_bitfield red;
- struct fb_bitfield green;
- struct fb_bitfield blue;
- struct fb_bitfield transp;
- int bits_per_pixel;
-};
-
-struct ipu_image {
- struct v4l2_pix_format pix;
- struct v4l2_rect rect;
- dma_addr_t phys;
+enum ipu_ic_task {
+ IC_TASK_ENCODER,
+ IC_TASK_VIEWFINDER,
+ IC_TASK_POST_PROCESSOR,
+ IC_NUM_TASKS,
};
-int ipu_cpmem_set_format_passthrough(struct ipu_ch_param __iomem *p,
- int width);
-
-int ipu_cpmem_set_format_rgb(struct ipu_ch_param __iomem *,
- const struct ipu_rgb *rgb);
-
-static inline void ipu_cpmem_interlaced_scan(struct ipu_ch_param *p,
- int stride)
-{
- ipu_ch_param_write_field(p, IPU_FIELD_SO, 1);
- ipu_ch_param_write_field(p, IPU_FIELD_ILO, stride / 8);
- ipu_ch_param_write_field(p, IPU_FIELD_SLY, (stride * 2) - 1);
-};
+struct ipu_ic;
+int ipu_ic_task_init(struct ipu_ic *ic,
+ int in_width, int in_height,
+ int out_width, int out_height,
+ enum ipu_color_space in_cs,
+ enum ipu_color_space out_cs);
+int ipu_ic_task_graphics_init(struct ipu_ic *ic,
+ enum ipu_color_space in_g_cs,
+ bool galpha_en, u32 galpha,
+ bool colorkey_en, u32 colorkey);
+void ipu_ic_task_enable(struct ipu_ic *ic);
+void ipu_ic_task_disable(struct ipu_ic *ic);
+int ipu_ic_task_idma_init(struct ipu_ic *ic, struct ipuv3_channel *channel,
+ u32 width, u32 height, int burst_size,
+ enum ipu_rotate_mode rot);
+int ipu_ic_enable(struct ipu_ic *ic);
+int ipu_ic_disable(struct ipu_ic *ic);
+struct ipu_ic *ipu_ic_get(struct ipu_soc *ipu, enum ipu_ic_task task);
+void ipu_ic_put(struct ipu_ic *ic);
+void ipu_ic_dump(struct ipu_ic *ic);
-void ipu_cpmem_set_yuv_planar(struct ipu_ch_param __iomem *p, u32 pixel_format,
- int stride, int height);
-void ipu_cpmem_set_yuv_interleaved(struct ipu_ch_param __iomem *p,
- u32 pixel_format);
-void ipu_cpmem_set_yuv_planar_full(struct ipu_ch_param __iomem *p,
- u32 pixel_format, int stride, int u_offset, int v_offset);
-int ipu_cpmem_set_fmt(struct ipu_ch_param __iomem *cpmem, u32 pixelformat);
-int ipu_cpmem_set_image(struct ipu_ch_param __iomem *cpmem,
- struct ipu_image *image);
+/*
+ * IPU Sensor Multiple FIFO Controller (SMFC) functions
+ */
+struct ipu_smfc *ipu_smfc_get(struct ipu_soc *ipu, unsigned int chno);
+void ipu_smfc_put(struct ipu_smfc *smfc);
+int ipu_smfc_enable(struct ipu_smfc *smfc);
+int ipu_smfc_disable(struct ipu_smfc *smfc);
+int ipu_smfc_map_channel(struct ipu_smfc *smfc, int csi_id, int mipi_id);
+int ipu_smfc_set_burstsize(struct ipu_smfc *smfc, int burstsize);
+int ipu_smfc_set_watermark(struct ipu_smfc *smfc, u32 set_level, u32 clr_level);
enum ipu_color_space ipu_drm_fourcc_to_colorspace(u32 drm_fourcc);
enum ipu_color_space ipu_pixelformat_to_colorspace(u32 pixelformat);
-
-static inline void ipu_cpmem_set_burstsize(struct ipu_ch_param __iomem *p,
- int burstsize)
-{
- ipu_ch_param_write_field(p, IPU_FIELD_NPB, burstsize - 1);
-};
+enum ipu_color_space ipu_mbus_code_to_colorspace(u32 mbus_code);
+int ipu_stride_to_bytes(u32 pixel_stride, u32 pixelformat);
+bool ipu_pixelformat_is_planar(u32 pixelformat);
+int ipu_degrees_to_rot_mode(enum ipu_rotate_mode *mode, int degrees,
+ bool hflip, bool vflip);
+int ipu_rot_mode_to_degrees(int *degrees, enum ipu_rotate_mode mode,
+ bool hflip, bool vflip);
struct ipu_client_platformdata {
int csi;
diff --git a/include/video/of_display_timing.h b/include/video/of_display_timing.h
index 79e6697af6cf..ea755b5616d8 100644
--- a/include/video/of_display_timing.h
+++ b/include/video/of_display_timing.h
@@ -15,9 +15,25 @@ struct display_timings;
#define OF_USE_NATIVE_MODE -1
+#ifdef CONFIG_OF
int of_get_display_timing(struct device_node *np, const char *name,
struct display_timing *dt);
struct display_timings *of_get_display_timings(struct device_node *np);
int of_display_timings_exist(struct device_node *np);
+#else
+static inline int of_get_display_timing(struct device_node *np, const char *name,
+ struct display_timing *dt)
+{
+ return -ENOSYS;
+}
+static inline struct display_timings *of_get_display_timings(struct device_node *np)
+{
+ return NULL;
+}
+static inline int of_display_timings_exist(struct device_node *np)
+{
+ return -ENOSYS;
+}
+#endif
#endif
diff --git a/include/video/omapdss.h b/include/video/omapdss.h
index 069dfca9549a..6a84498ea513 100644
--- a/include/video/omapdss.h
+++ b/include/video/omapdss.h
@@ -166,13 +166,6 @@ enum omap_dss_display_state {
OMAP_DSS_DISPLAY_ACTIVE,
};
-enum omap_dss_audio_state {
- OMAP_DSS_AUDIO_DISABLED = 0,
- OMAP_DSS_AUDIO_ENABLED,
- OMAP_DSS_AUDIO_CONFIGURED,
- OMAP_DSS_AUDIO_PLAYING,
-};
-
struct omap_dss_audio {
struct snd_aes_iec958 *iec;
struct snd_cea_861_aud_if *cea;
@@ -635,19 +628,6 @@ struct omapdss_hdmi_ops {
int (*set_hdmi_mode)(struct omap_dss_device *dssdev, bool hdmi_mode);
int (*set_infoframe)(struct omap_dss_device *dssdev,
const struct hdmi_avi_infoframe *avi);
-
- /*
- * Note: These functions might sleep. Do not call while
- * holding a spinlock/readlock.
- */
- int (*audio_enable)(struct omap_dss_device *dssdev);
- void (*audio_disable)(struct omap_dss_device *dssdev);
- bool (*audio_supported)(struct omap_dss_device *dssdev);
- int (*audio_config)(struct omap_dss_device *dssdev,
- struct omap_dss_audio *audio);
- /* Note: These functions may not sleep */
- int (*audio_start)(struct omap_dss_device *dssdev);
- void (*audio_stop)(struct omap_dss_device *dssdev);
};
struct omapdss_dsi_ops {
@@ -783,8 +763,6 @@ struct omap_dss_device {
enum omap_dss_display_state state;
- enum omap_dss_audio_state audio_state;
-
/* OMAP DSS output specific fields */
struct list_head list;
@@ -795,6 +773,9 @@ struct omap_dss_device {
/* output instance */
enum omap_dss_output_id id;
+ /* the port number in the DT node */
+ int port_num;
+
/* dynamic fields */
struct omap_overlay_manager *manager;
@@ -858,24 +839,6 @@ struct omap_dss_driver {
int (*set_hdmi_mode)(struct omap_dss_device *dssdev, bool hdmi_mode);
int (*set_hdmi_infoframe)(struct omap_dss_device *dssdev,
const struct hdmi_avi_infoframe *avi);
-
- /*
- * For display drivers that support audio. This encompasses
- * HDMI and DisplayPort at the moment.
- */
- /*
- * Note: These functions might sleep. Do not call while
- * holding a spinlock/readlock.
- */
- int (*audio_enable)(struct omap_dss_device *dssdev);
- void (*audio_disable)(struct omap_dss_device *dssdev);
- bool (*audio_supported)(struct omap_dss_device *dssdev);
- int (*audio_config)(struct omap_dss_device *dssdev,
- struct omap_dss_audio *audio);
- /* Note: These functions may not sleep */
- int (*audio_start)(struct omap_dss_device *dssdev);
- void (*audio_stop)(struct omap_dss_device *dssdev);
-
};
enum omapdss_version omapdss_get_version(void);
@@ -918,7 +881,7 @@ int omapdss_register_output(struct omap_dss_device *output);
void omapdss_unregister_output(struct omap_dss_device *output);
struct omap_dss_device *omap_dss_get_output(enum omap_dss_output_id id);
struct omap_dss_device *omap_dss_find_output(const char *name);
-struct omap_dss_device *omap_dss_find_output_by_node(struct device_node *node);
+struct omap_dss_device *omap_dss_find_output_by_port_node(struct device_node *port);
int omapdss_output_set_device(struct omap_dss_device *out,
struct omap_dss_device *dssdev);
int omapdss_output_unset_device(struct omap_dss_device *out);
diff --git a/include/xen/events.h b/include/xen/events.h
index 8bee7a75e850..5321cd9636e6 100644
--- a/include/xen/events.h
+++ b/include/xen/events.h
@@ -28,6 +28,8 @@ int bind_ipi_to_irqhandler(enum ipi_vector ipi,
unsigned long irqflags,
const char *devname,
void *dev_id);
+int bind_interdomain_evtchn_to_irq(unsigned int remote_domain,
+ unsigned int remote_port);
int bind_interdomain_evtchn_to_irqhandler(unsigned int remote_domain,
unsigned int remote_port,
irq_handler_t handler,
diff --git a/include/xen/interface/elfnote.h b/include/xen/interface/elfnote.h
index 6f4eae328ca7..f90b03454659 100644
--- a/include/xen/interface/elfnote.h
+++ b/include/xen/interface/elfnote.h
@@ -3,6 +3,24 @@
*
* Definitions used for the Xen ELF notes.
*
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
* Copyright (c) 2006, Ian Campbell, XenSource Ltd.
*/
@@ -18,12 +36,13 @@
*
* LEGACY indicated the fields in the legacy __xen_guest string which
* this a note type replaces.
+ *
+ * String values (for non-legacy) are NULL terminated ASCII, also known
+ * as ASCIZ type.
*/
/*
* NAME=VALUE pair (string).
- *
- * LEGACY: FEATURES and PAE
*/
#define XEN_ELFNOTE_INFO 0
@@ -137,10 +156,30 @@
/*
* Whether or not the guest supports cooperative suspend cancellation.
+ * This is a numeric value.
+ *
+ * Default is 0
*/
#define XEN_ELFNOTE_SUSPEND_CANCEL 14
/*
+ * The (non-default) location the initial phys-to-machine map should be
+ * placed at by the hypervisor (Dom0) or the tools (DomU).
+ * The kernel must be prepared for this mapping to be established using
+ * large pages, despite such otherwise not being available to guests.
+ * The kernel must also be able to handle the page table pages used for
+ * this mapping not being accessible through the initial mapping.
+ * (Only x86-64 supports this at present.)
+ */
+#define XEN_ELFNOTE_INIT_P2M 15
+
+/*
+ * Whether or not the guest can deal with being passed an initrd not
+ * mapped through its initial page tables.
+ */
+#define XEN_ELFNOTE_MOD_START_PFN 16
+
+/*
* The features supported by this kernel (numeric).
*
* Other than XEN_ELFNOTE_FEATURES on pre-4.2 Xen, this note allows a
@@ -153,6 +192,11 @@
*/
#define XEN_ELFNOTE_SUPPORTED_FEATURES 17
+/*
+ * The number of the highest elfnote defined.
+ */
+#define XEN_ELFNOTE_MAX XEN_ELFNOTE_SUPPORTED_FEATURES
+
#endif /* __XEN_PUBLIC_ELFNOTE_H__ */
/*
diff --git a/include/xen/interface/features.h b/include/xen/interface/features.h
index 14334d0161d5..131a6ccdba25 100644
--- a/include/xen/interface/features.h
+++ b/include/xen/interface/features.h
@@ -53,9 +53,6 @@
/* operation as Dom0 is supported */
#define XENFEAT_dom0 11
-/* Xen also maps grant references at pfn = mfn */
-#define XENFEAT_grant_map_identity 12
-
#define XENFEAT_NR_SUBMAPS 1
#endif /* __XEN_PUBLIC_FEATURES_H__ */
diff --git a/include/xen/interface/grant_table.h b/include/xen/interface/grant_table.h
index e40fae9bf11a..bcce56439d64 100644
--- a/include/xen/interface/grant_table.h
+++ b/include/xen/interface/grant_table.h
@@ -479,6 +479,25 @@ struct gnttab_get_version {
DEFINE_GUEST_HANDLE_STRUCT(gnttab_get_version);
/*
+ * Issue one or more cache maintenance operations on a portion of a
+ * page granted to the calling domain by a foreign domain.
+ */
+#define GNTTABOP_cache_flush 12
+struct gnttab_cache_flush {
+ union {
+ uint64_t dev_bus_addr;
+ grant_ref_t ref;
+ } a;
+ uint16_t offset; /* offset from start of grant */
+ uint16_t length; /* size within the grant */
+#define GNTTAB_CACHE_CLEAN (1<<0)
+#define GNTTAB_CACHE_INVAL (1<<1)
+#define GNTTAB_CACHE_SOURCE_GREF (1<<31)
+ uint32_t op;
+};
+DEFINE_GUEST_HANDLE_STRUCT(gnttab_cache_flush);
+
+/*
* Bitfield values for update_pin_status.flags.
*/
/* Map the grant entry for access by I/O devices. */
diff --git a/include/xen/interface/io/vscsiif.h b/include/xen/interface/io/vscsiif.h
new file mode 100644
index 000000000000..d07d7aca8d1c
--- /dev/null
+++ b/include/xen/interface/io/vscsiif.h
@@ -0,0 +1,229 @@
+/******************************************************************************
+ * vscsiif.h
+ *
+ * Based on the blkif.h code.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Copyright(c) FUJITSU Limited 2008.
+ */
+
+#ifndef __XEN__PUBLIC_IO_SCSI_H__
+#define __XEN__PUBLIC_IO_SCSI_H__
+
+#include "ring.h"
+#include "../grant_table.h"
+
+/*
+ * Feature and Parameter Negotiation
+ * =================================
+ * The two halves of a Xen pvSCSI driver utilize nodes within the XenStore to
+ * communicate capabilities and to negotiate operating parameters. This
+ * section enumerates these nodes which reside in the respective front and
+ * backend portions of the XenStore, following the XenBus convention.
+ *
+ * Any specified default value is in effect if the corresponding XenBus node
+ * is not present in the XenStore.
+ *
+ * XenStore nodes in sections marked "PRIVATE" are solely for use by the
+ * driver side whose XenBus tree contains them.
+ *
+ *****************************************************************************
+ * Backend XenBus Nodes
+ *****************************************************************************
+ *
+ *------------------ Backend Device Identification (PRIVATE) ------------------
+ *
+ * p-devname
+ * Values: string
+ *
+ * A free string used to identify the physical device (e.g. a disk name).
+ *
+ * p-dev
+ * Values: string
+ *
+ * A string specifying the backend device: either a 4-tuple "h:c:t:l"
+ * (host, controller, target, lun, all integers), or a WWN (e.g.
+ * "naa.60014054ac780582").
+ *
+ * v-dev
+ * Values: string
+ *
+ * A string specifying the frontend device in form of a 4-tuple "h:c:t:l"
+ * (host, controller, target, lun, all integers).
+ *
+ *--------------------------------- Features ---------------------------------
+ *
+ * feature-sg-grant
+ * Values: unsigned [VSCSIIF_SG_TABLESIZE...65535]
+ * Default Value: 0
+ *
+ * Specifies the maximum number of scatter/gather elements in grant pages
+ * supported. If not set, the backend supports up to VSCSIIF_SG_TABLESIZE
+ * SG elements specified directly in the request.
+ *
+ *****************************************************************************
+ * Frontend XenBus Nodes
+ *****************************************************************************
+ *
+ *----------------------- Request Transport Parameters -----------------------
+ *
+ * event-channel
+ * Values: unsigned
+ *
+ * The identifier of the Xen event channel used to signal activity
+ * in the ring buffer.
+ *
+ * ring-ref
+ * Values: unsigned
+ *
+ * The Xen grant reference granting permission for the backend to map
+ * the sole page in a single page sized ring buffer.
+ *
+ * protocol
+ * Values: string (XEN_IO_PROTO_ABI_*)
+ * Default Value: XEN_IO_PROTO_ABI_NATIVE
+ *
+ * The machine ABI rules governing the format of all ring request and
+ * response structures.
+ */
+
+/* Requests from the frontend to the backend */
+
+/*
+ * Request a SCSI operation specified via a CDB in vscsiif_request.cmnd.
+ * The target is specified via channel, id and lun.
+ *
+ * The operation to be performed is specified via a CDB in cmnd[], the length
+ * of the CDB is in cmd_len. sc_data_direction specifies the direction of data
+ * (to the device, from the device, or none at all).
+ *
+ * If data is to be transferred to or from the device the buffer(s) in the
+ * guest memory is/are specified via one or multiple scsiif_request_segment
+ * descriptors each specifying a memory page via a grant_ref_t, a offset into
+ * the page and the length of the area in that page. All scsiif_request_segment
+ * areas concatenated form the resulting data buffer used by the operation.
+ * If the number of scsiif_request_segment areas is not too large (less than
+ * or equal VSCSIIF_SG_TABLESIZE) the areas can be specified directly in the
+ * seg[] array and the number of valid scsiif_request_segment elements is to be
+ * set in nr_segments.
+ *
+ * If "feature-sg-grant" in the Xenstore is set it is possible to specify more
+ * than VSCSIIF_SG_TABLESIZE scsiif_request_segment elements via indirection.
+ * The maximum number of allowed scsiif_request_segment elements is the value
+ * of the "feature-sg-grant" entry from Xenstore. When using indirection the
+ * seg[] array doesn't contain specifications of the data buffers, but
+ * references to scsiif_request_segment arrays, which in turn reference the
+ * data buffers. While nr_segments holds the number of populated seg[] entries
+ * (plus the set VSCSIIF_SG_GRANT bit), the number of scsiif_request_segment
+ * elements referencing the target data buffers is calculated from the lengths
+ * of the seg[] elements (the sum of all valid seg[].length divided by the
+ * size of one scsiif_request_segment structure).
+ */
+#define VSCSIIF_ACT_SCSI_CDB 1
+
+/*
+ * Request abort of a running operation for the specified target given by
+ * channel, id, lun and the operation's rqid in ref_rqid.
+ */
+#define VSCSIIF_ACT_SCSI_ABORT 2
+
+/*
+ * Request a device reset of the specified target (channel and id).
+ */
+#define VSCSIIF_ACT_SCSI_RESET 3
+
+/*
+ * Preset scatter/gather elements for a following request. Deprecated.
+ * Keeping the define only to avoid usage of the value "4" for other actions.
+ */
+#define VSCSIIF_ACT_SCSI_SG_PRESET 4
+
+/*
+ * Maximum scatter/gather segments per request.
+ *
+ * Considering balance between allocating at least 16 "vscsiif_request"
+ * structures on one page (4096 bytes) and the number of scatter/gather
+ * elements needed, we decided to use 26 as a magic number.
+ *
+ * If "feature-sg-grant" is set, more scatter/gather elements can be specified
+ * by placing them in one or more (up to VSCSIIF_SG_TABLESIZE) granted pages.
+ * In this case the vscsiif_request seg elements don't contain references to
+ * the user data, but to the SG elements referencing the user data.
+ */
+#define VSCSIIF_SG_TABLESIZE 26
+
+/*
+ * based on Linux kernel 2.6.18, still valid
+ * Changing these values requires support of multiple protocols via the rings
+ * as "old clients" will blindly use these values and the resulting structure
+ * sizes.
+ */
+#define VSCSIIF_MAX_COMMAND_SIZE 16
+#define VSCSIIF_SENSE_BUFFERSIZE 96
+
+struct scsiif_request_segment {
+ grant_ref_t gref;
+ uint16_t offset;
+ uint16_t length;
+};
+
+#define VSCSIIF_SG_PER_PAGE (PAGE_SIZE / sizeof(struct scsiif_request_segment))
+
+/* Size of one request is 252 bytes */
+struct vscsiif_request {
+ uint16_t rqid; /* private guest value, echoed in resp */
+ uint8_t act; /* command between backend and frontend */
+ uint8_t cmd_len; /* valid CDB bytes */
+
+ uint8_t cmnd[VSCSIIF_MAX_COMMAND_SIZE]; /* the CDB */
+ uint16_t timeout_per_command; /* deprecated */
+ uint16_t channel, id, lun; /* (virtual) device specification */
+ uint16_t ref_rqid; /* command abort reference */
+ uint8_t sc_data_direction; /* for DMA_TO_DEVICE(1)
+ DMA_FROM_DEVICE(2)
+ DMA_NONE(3) requests */
+ uint8_t nr_segments; /* Number of pieces of scatter-gather */
+/*
+ * flag in nr_segments: SG elements via grant page
+ *
+ * If VSCSIIF_SG_GRANT is set, the low 7 bits of nr_segments specify the number
+ * of grant pages containing SG elements. Usable if "feature-sg-grant" set.
+ */
+#define VSCSIIF_SG_GRANT 0x80
+
+ struct scsiif_request_segment seg[VSCSIIF_SG_TABLESIZE];
+ uint32_t reserved[3];
+};
+
+/* Size of one response is 252 bytes */
+struct vscsiif_response {
+ uint16_t rqid; /* identifies request */
+ uint8_t padding;
+ uint8_t sense_len;
+ uint8_t sense_buffer[VSCSIIF_SENSE_BUFFERSIZE];
+ int32_t rslt;
+ uint32_t residual_len; /* request bufflen -
+ return the value from physical device */
+ uint32_t reserved[36];
+};
+
+DEFINE_RING_TYPES(vscsiif, struct vscsiif_request, struct vscsiif_response);
+
+#endif /*__XEN__PUBLIC_IO_SCSI_H__*/
diff --git a/include/xen/interface/xen.h b/include/xen/interface/xen.h
index de082130ba4b..f68719f405af 100644
--- a/include/xen/interface/xen.h
+++ b/include/xen/interface/xen.h
@@ -3,6 +3,24 @@
*
* Guest OS interface to Xen.
*
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
* Copyright (c) 2004, K A Fraser
*/
@@ -73,13 +91,23 @@
* VIRTUAL INTERRUPTS
*
* Virtual interrupts that a guest OS may receive from Xen.
+ * In the side comments, 'V.' denotes a per-VCPU VIRQ while 'G.' denotes a
+ * global VIRQ. The former can be bound once per VCPU and cannot be re-bound.
+ * The latter can be allocated only once per guest: they must initially be
+ * allocated to VCPU0 but can subsequently be re-bound.
*/
-#define VIRQ_TIMER 0 /* Timebase update, and/or requested timeout. */
-#define VIRQ_DEBUG 1 /* Request guest to dump debug info. */
-#define VIRQ_CONSOLE 2 /* (DOM0) Bytes received on emergency console. */
-#define VIRQ_DOM_EXC 3 /* (DOM0) Exceptional event for some domain. */
-#define VIRQ_DEBUGGER 6 /* (DOM0) A domain has paused for debugging. */
-#define VIRQ_PCPU_STATE 9 /* (DOM0) PCPU state changed */
+#define VIRQ_TIMER 0 /* V. Timebase update, and/or requested timeout. */
+#define VIRQ_DEBUG 1 /* V. Request guest to dump debug info. */
+#define VIRQ_CONSOLE 2 /* G. (DOM0) Bytes received on emergency console. */
+#define VIRQ_DOM_EXC 3 /* G. (DOM0) Exceptional event for some domain. */
+#define VIRQ_TBUF 4 /* G. (DOM0) Trace buffer has records available. */
+#define VIRQ_DEBUGGER 6 /* G. (DOM0) A domain has paused for debugging. */
+#define VIRQ_XENOPROF 7 /* V. XenOprofile interrupt: new sample available */
+#define VIRQ_CON_RING 8 /* G. (DOM0) Bytes received on console */
+#define VIRQ_PCPU_STATE 9 /* G. (DOM0) PCPU state changed */
+#define VIRQ_MEM_EVENT 10 /* G. (DOM0) A memory event has occured */
+#define VIRQ_XC_RESERVED 11 /* G. Reserved for XenClient */
+#define VIRQ_ENOMEM 12 /* G. (DOM0) Low on heap memory */
/* Architecture-specific VIRQ definitions. */
#define VIRQ_ARCH_0 16
@@ -92,24 +120,68 @@
#define VIRQ_ARCH_7 23
#define NR_VIRQS 24
+
/*
- * MMU-UPDATE REQUESTS
- *
- * HYPERVISOR_mmu_update() accepts a list of (ptr, val) pairs.
- * A foreigndom (FD) can be specified (or DOMID_SELF for none).
- * Where the FD has some effect, it is described below.
- * ptr[1:0] specifies the appropriate MMU_* command.
+ * enum neg_errnoval HYPERVISOR_mmu_update(const struct mmu_update reqs[],
+ * unsigned count, unsigned *done_out,
+ * unsigned foreigndom)
+ * @reqs is an array of mmu_update_t structures ((ptr, val) pairs).
+ * @count is the length of the above array.
+ * @pdone is an output parameter indicating number of completed operations
+ * @foreigndom[15:0]: FD, the expected owner of data pages referenced in this
+ * hypercall invocation. Can be DOMID_SELF.
+ * @foreigndom[31:16]: PFD, the expected owner of pagetable pages referenced
+ * in this hypercall invocation. The value of this field
+ * (x) encodes the PFD as follows:
+ * x == 0 => PFD == DOMID_SELF
+ * x != 0 => PFD == x - 1
*
+ * Sub-commands: ptr[1:0] specifies the appropriate MMU_* command.
+ * -------------
* ptr[1:0] == MMU_NORMAL_PT_UPDATE:
- * Updates an entry in a page table. If updating an L1 table, and the new
- * table entry is valid/present, the mapped frame must belong to the FD, if
- * an FD has been specified. If attempting to map an I/O page then the
- * caller assumes the privilege of the FD.
+ * Updates an entry in a page table belonging to PFD. If updating an L1 table,
+ * and the new table entry is valid/present, the mapped frame must belong to
+ * FD. If attempting to map an I/O page then the caller assumes the privilege
+ * of the FD.
* FD == DOMID_IO: Permit /only/ I/O mappings, at the priv level of the caller.
* FD == DOMID_XEN: Map restricted areas of Xen's heap space.
* ptr[:2] -- Machine address of the page-table entry to modify.
* val -- Value to write.
*
+ * There also certain implicit requirements when using this hypercall. The
+ * pages that make up a pagetable must be mapped read-only in the guest.
+ * This prevents uncontrolled guest updates to the pagetable. Xen strictly
+ * enforces this, and will disallow any pagetable update which will end up
+ * mapping pagetable page RW, and will disallow using any writable page as a
+ * pagetable. In practice it means that when constructing a page table for a
+ * process, thread, etc, we MUST be very dilligient in following these rules:
+ * 1). Start with top-level page (PGD or in Xen language: L4). Fill out
+ * the entries.
+ * 2). Keep on going, filling out the upper (PUD or L3), and middle (PMD
+ * or L2).
+ * 3). Start filling out the PTE table (L1) with the PTE entries. Once
+ * done, make sure to set each of those entries to RO (so writeable bit
+ * is unset). Once that has been completed, set the PMD (L2) for this
+ * PTE table as RO.
+ * 4). When completed with all of the PMD (L2) entries, and all of them have
+ * been set to RO, make sure to set RO the PUD (L3). Do the same
+ * operation on PGD (L4) pagetable entries that have a PUD (L3) entry.
+ * 5). Now before you can use those pages (so setting the cr3), you MUST also
+ * pin them so that the hypervisor can verify the entries. This is done
+ * via the HYPERVISOR_mmuext_op(MMUEXT_PIN_L4_TABLE, guest physical frame
+ * number of the PGD (L4)). And this point the HYPERVISOR_mmuext_op(
+ * MMUEXT_NEW_BASEPTR, guest physical frame number of the PGD (L4)) can be
+ * issued.
+ * For 32-bit guests, the L4 is not used (as there is less pagetables), so
+ * instead use L3.
+ * At this point the pagetables can be modified using the MMU_NORMAL_PT_UPDATE
+ * hypercall. Also if so desired the OS can also try to write to the PTE
+ * and be trapped by the hypervisor (as the PTE entry is RO).
+ *
+ * To deallocate the pages, the operations are the reverse of the steps
+ * mentioned above. The argument is MMUEXT_UNPIN_TABLE for all levels and the
+ * pagetable MUST not be in use (meaning that the cr3 is not set to it).
+ *
* ptr[1:0] == MMU_MACHPHYS_UPDATE:
* Updates an entry in the machine->pseudo-physical mapping table.
* ptr[:2] -- Machine address within the frame whose mapping to modify.
@@ -119,6 +191,72 @@
* ptr[1:0] == MMU_PT_UPDATE_PRESERVE_AD:
* As MMU_NORMAL_PT_UPDATE above, but A/D bits currently in the PTE are ORed
* with those in @val.
+ *
+ * @val is usually the machine frame number along with some attributes.
+ * The attributes by default follow the architecture defined bits. Meaning that
+ * if this is a X86_64 machine and four page table layout is used, the layout
+ * of val is:
+ * - 63 if set means No execute (NX)
+ * - 46-13 the machine frame number
+ * - 12 available for guest
+ * - 11 available for guest
+ * - 10 available for guest
+ * - 9 available for guest
+ * - 8 global
+ * - 7 PAT (PSE is disabled, must use hypercall to make 4MB or 2MB pages)
+ * - 6 dirty
+ * - 5 accessed
+ * - 4 page cached disabled
+ * - 3 page write through
+ * - 2 userspace accessible
+ * - 1 writeable
+ * - 0 present
+ *
+ * The one bits that does not fit with the default layout is the PAGE_PSE
+ * also called PAGE_PAT). The MMUEXT_[UN]MARK_SUPER arguments to the
+ * HYPERVISOR_mmuext_op serve as mechanism to set a pagetable to be 4MB
+ * (or 2MB) instead of using the PAGE_PSE bit.
+ *
+ * The reason that the PAGE_PSE (bit 7) is not being utilized is due to Xen
+ * using it as the Page Attribute Table (PAT) bit - for details on it please
+ * refer to Intel SDM 10.12. The PAT allows to set the caching attributes of
+ * pages instead of using MTRRs.
+ *
+ * The PAT MSR is as follows (it is a 64-bit value, each entry is 8 bits):
+ * PAT4 PAT0
+ * +-----+-----+----+----+----+-----+----+----+
+ * | UC | UC- | WC | WB | UC | UC- | WC | WB | <= Linux
+ * +-----+-----+----+----+----+-----+----+----+
+ * | UC | UC- | WT | WB | UC | UC- | WT | WB | <= BIOS (default when machine boots)
+ * +-----+-----+----+----+----+-----+----+----+
+ * | rsv | rsv | WP | WC | UC | UC- | WT | WB | <= Xen
+ * +-----+-----+----+----+----+-----+----+----+
+ *
+ * The lookup of this index table translates to looking up
+ * Bit 7, Bit 4, and Bit 3 of val entry:
+ *
+ * PAT/PSE (bit 7) ... PCD (bit 4) .. PWT (bit 3).
+ *
+ * If all bits are off, then we are using PAT0. If bit 3 turned on,
+ * then we are using PAT1, if bit 3 and bit 4, then PAT2..
+ *
+ * As you can see, the Linux PAT1 translates to PAT4 under Xen. Which means
+ * that if a guest that follows Linux's PAT setup and would like to set Write
+ * Combined on pages it MUST use PAT4 entry. Meaning that Bit 7 (PAGE_PAT) is
+ * set. For example, under Linux it only uses PAT0, PAT1, and PAT2 for the
+ * caching as:
+ *
+ * WB = none (so PAT0)
+ * WC = PWT (bit 3 on)
+ * UC = PWT | PCD (bit 3 and 4 are on).
+ *
+ * To make it work with Xen, it needs to translate the WC bit as so:
+ *
+ * PWT (so bit 3 on) --> PAT (so bit 7 is on) and clear bit 3
+ *
+ * And to translate back it would:
+ *
+ * PAT (bit 7 on) --> PWT (bit 3 on) and clear bit 7.
*/
#define MMU_NORMAL_PT_UPDATE 0 /* checked '*ptr = val'. ptr is MA. */
#define MMU_MACHPHYS_UPDATE 1 /* ptr = MA of frame to modify entry for */
@@ -127,7 +265,12 @@
/*
* MMU EXTENDED OPERATIONS
*
- * HYPERVISOR_mmuext_op() accepts a list of mmuext_op structures.
+ * enum neg_errnoval HYPERVISOR_mmuext_op(mmuext_op_t uops[],
+ * unsigned int count,
+ * unsigned int *pdone,
+ * unsigned int foreigndom)
+ */
+/* HYPERVISOR_mmuext_op() accepts a list of mmuext_op structures.
* A foreigndom (FD) can be specified (or DOMID_SELF for none).
* Where the FD has some effect, it is described below.
*
@@ -164,9 +307,23 @@
* cmd: MMUEXT_FLUSH_CACHE
* No additional arguments. Writes back and flushes cache contents.
*
+ * cmd: MMUEXT_FLUSH_CACHE_GLOBAL
+ * No additional arguments. Writes back and flushes cache contents
+ * on all CPUs in the system.
+ *
* cmd: MMUEXT_SET_LDT
* linear_addr: Linear address of LDT base (NB. must be page-aligned).
* nr_ents: Number of entries in LDT.
+ *
+ * cmd: MMUEXT_CLEAR_PAGE
+ * mfn: Machine frame number to be cleared.
+ *
+ * cmd: MMUEXT_COPY_PAGE
+ * mfn: Machine frame number of the destination page.
+ * src_mfn: Machine frame number of the source page.
+ *
+ * cmd: MMUEXT_[UN]MARK_SUPER
+ * mfn: Machine frame number of head of superpage to be [un]marked.
*/
#define MMUEXT_PIN_L1_TABLE 0
#define MMUEXT_PIN_L2_TABLE 1
@@ -183,12 +340,18 @@
#define MMUEXT_FLUSH_CACHE 12
#define MMUEXT_SET_LDT 13
#define MMUEXT_NEW_USER_BASEPTR 15
+#define MMUEXT_CLEAR_PAGE 16
+#define MMUEXT_COPY_PAGE 17
+#define MMUEXT_FLUSH_CACHE_GLOBAL 18
+#define MMUEXT_MARK_SUPER 19
+#define MMUEXT_UNMARK_SUPER 20
#ifndef __ASSEMBLY__
struct mmuext_op {
unsigned int cmd;
union {
- /* [UN]PIN_TABLE, NEW_BASEPTR, NEW_USER_BASEPTR */
+ /* [UN]PIN_TABLE, NEW_BASEPTR, NEW_USER_BASEPTR
+ * CLEAR_PAGE, COPY_PAGE, [UN]MARK_SUPER */
xen_pfn_t mfn;
/* INVLPG_LOCAL, INVLPG_ALL, SET_LDT */
unsigned long linear_addr;
@@ -198,6 +361,8 @@ struct mmuext_op {
unsigned int nr_ents;
/* TLB_FLUSH_MULTI, INVLPG_MULTI */
void *vcpumask;
+ /* COPY_PAGE */
+ xen_pfn_t src_mfn;
} arg2;
};
DEFINE_GUEST_HANDLE_STRUCT(mmuext_op);
@@ -225,10 +390,23 @@ DEFINE_GUEST_HANDLE_STRUCT(mmuext_op);
*/
#define VMASST_CMD_enable 0
#define VMASST_CMD_disable 1
+
+/* x86/32 guests: simulate full 4GB segment limits. */
#define VMASST_TYPE_4gb_segments 0
+
+/* x86/32 guests: trap (vector 15) whenever above vmassist is used. */
#define VMASST_TYPE_4gb_segments_notify 1
+
+/*
+ * x86 guests: support writes to bottom-level PTEs.
+ * NB1. Page-directory entries cannot be written.
+ * NB2. Guest must continue to remove all writable mappings of PTEs.
+ */
#define VMASST_TYPE_writable_pagetables 2
+
+/* x86/PAE guests: support PDPTs above 4GB. */
#define VMASST_TYPE_pae_extended_cr3 3
+
#define MAX_VMASST_TYPE 3
#ifndef __ASSEMBLY__
@@ -260,6 +438,15 @@ typedef uint16_t domid_t;
*/
#define DOMID_XEN (0x7FF2U)
+/* DOMID_COW is used as the owner of sharable pages */
+#define DOMID_COW (0x7FF3U)
+
+/* DOMID_INVALID is used to identify pages with unknown owner. */
+#define DOMID_INVALID (0x7FF4U)
+
+/* Idle domain. */
+#define DOMID_IDLE (0x7FFFU)
+
/*
* Send an array of these to HYPERVISOR_mmu_update().
* NB. The fields are natural pointer/address size for this architecture.
@@ -272,7 +459,9 @@ DEFINE_GUEST_HANDLE_STRUCT(mmu_update);
/*
* Send an array of these to HYPERVISOR_multicall().
- * NB. The fields are natural register size for this architecture.
+ * NB. The fields are logically the natural register size for this
+ * architecture. In cases where xen_ulong_t is larger than this then
+ * any unused bits in the upper portion must be zero.
*/
struct multicall_entry {
xen_ulong_t op;
@@ -442,8 +631,48 @@ struct start_info {
unsigned long mod_start; /* VIRTUAL address of pre-loaded module. */
unsigned long mod_len; /* Size (bytes) of pre-loaded module. */
int8_t cmd_line[MAX_GUEST_CMDLINE];
+ /* The pfn range here covers both page table and p->m table frames. */
+ unsigned long first_p2m_pfn;/* 1st pfn forming initial P->M table. */
+ unsigned long nr_p2m_frames;/* # of pfns forming initial P->M table. */
};
+/* These flags are passed in the 'flags' field of start_info_t. */
+#define SIF_PRIVILEGED (1<<0) /* Is the domain privileged? */
+#define SIF_INITDOMAIN (1<<1) /* Is this the initial control domain? */
+#define SIF_MULTIBOOT_MOD (1<<2) /* Is mod_start a multiboot module? */
+#define SIF_MOD_START_PFN (1<<3) /* Is mod_start a PFN? */
+#define SIF_PM_MASK (0xFF<<8) /* reserve 1 byte for xen-pm options */
+
+/*
+ * A multiboot module is a package containing modules very similar to a
+ * multiboot module array. The only differences are:
+ * - the array of module descriptors is by convention simply at the beginning
+ * of the multiboot module,
+ * - addresses in the module descriptors are based on the beginning of the
+ * multiboot module,
+ * - the number of modules is determined by a termination descriptor that has
+ * mod_start == 0.
+ *
+ * This permits to both build it statically and reference it in a configuration
+ * file, and let the PV guest easily rebase the addresses to virtual addresses
+ * and at the same time count the number of modules.
+ */
+struct xen_multiboot_mod_list {
+ /* Address of first byte of the module */
+ uint32_t mod_start;
+ /* Address of last byte of the module (inclusive) */
+ uint32_t mod_end;
+ /* Address of zero-terminated command line */
+ uint32_t cmdline;
+ /* Unused, must be zero */
+ uint32_t pad;
+};
+/*
+ * The console structure in start_info.console.dom0
+ *
+ * This structure includes a variety of information required to
+ * have a working VGA/VESA console.
+ */
struct dom0_vga_console_info {
uint8_t video_type;
#define XEN_VGATYPE_TEXT_MODE_3 0x03
@@ -484,11 +713,6 @@ struct dom0_vga_console_info {
} u;
};
-/* These flags are passed in the 'flags' field of start_info_t. */
-#define SIF_PRIVILEGED (1<<0) /* Is the domain privileged? */
-#define SIF_INITDOMAIN (1<<1) /* Is this the initial control domain? */
-#define SIF_PM_MASK (0xFF<<8) /* reserve 1 byte for xen-pm options */
-
typedef uint64_t cpumap_t;
typedef uint8_t xen_domain_handle_t[16];
diff --git a/include/xen/xenbus.h b/include/xen/xenbus.h
index 0324c6d340c1..b78f21caf55a 100644
--- a/include/xen/xenbus.h
+++ b/include/xen/xenbus.h
@@ -86,6 +86,7 @@ struct xenbus_device_id
/* A xenbus driver. */
struct xenbus_driver {
+ const char *name; /* defaults to ids[0].devicetype */
const struct xenbus_device_id *ids;
int (*probe)(struct xenbus_device *dev,
const struct xenbus_device_id *id);
@@ -100,20 +101,22 @@ struct xenbus_driver {
int (*is_ready)(struct xenbus_device *dev);
};
-#define DEFINE_XENBUS_DRIVER(var, drvname, methods...) \
-struct xenbus_driver var ## _driver = { \
- .driver.name = drvname + 0 ?: var ## _ids->devicetype, \
- .driver.owner = THIS_MODULE, \
- .ids = var ## _ids, ## methods \
-}
-
static inline struct xenbus_driver *to_xenbus_driver(struct device_driver *drv)
{
return container_of(drv, struct xenbus_driver, driver);
}
-int __must_check xenbus_register_frontend(struct xenbus_driver *);
-int __must_check xenbus_register_backend(struct xenbus_driver *);
+int __must_check __xenbus_register_frontend(struct xenbus_driver *drv,
+ struct module *owner,
+ const char *mod_name);
+int __must_check __xenbus_register_backend(struct xenbus_driver *drv,
+ struct module *owner,
+ const char *mod_name);
+
+#define xenbus_register_frontend(drv) \
+ __xenbus_register_frontend(drv, THIS_MODULE, KBUILD_MODNAME);
+#define xenbus_register_backend(drv) \
+ __xenbus_register_backend(drv, THIS_MODULE, KBUILD_MODNAME);
void xenbus_unregister_driver(struct xenbus_driver *drv);